[2018-06-07 15:33:38,765] INFO park.jobserver.JobManager$ [] [] - Starting JobManager named jobManager-32-9e8a-eb53f2d71aa5 with config { # system properties "ui" : { # system properties "killEnabled" : "true" }, # system properties "app" : { # system properties "name" : "spark.jobserver.JobManager" }, # application.conf: 5 # spark web UI port "webUrlPort" : 8080, # system properties "submit" : { # system properties "deployMode" : "client" }, # system properties "serializer" : "org.apache.spark.serializer.KryoSerializer", # system properties "executor" : { # system properties "extraLibraryPath" : "/opt/cloudera/parcels/CDH-5.8.2-1.cdh5.8.2.p0.3/lib/hadoop/lib/native", # system properties "uri" : "", # system properties "extraJavaOptions" : "-Dlog4j.configuration=file:/opt/spark-job-server-0.6.2.2-KNIME_cdh-5.11/log4j-server.properties\n -DLOG_DIR=/var/log/spark-job-server/jobserver-spark-job-server6094165701522702640" }, # merge of /opt/spark-job-server-0.6.2.2-KNIME_cdh-5.11/environment.conf: 25,application.conf: 7 # ######################################################################### # Job server settings # ######################################################################### "jobserver" : { # /opt/spark-job-server-0.6.2.2-KNIME_cdh-5.11/environment.conf: 27 # TCP port that the job server listens on for HTTP requests "port" : 8090, # merge of /opt/spark-job-server-0.6.2.2-KNIME_cdh-5.11/environment.conf: 37,application.conf: 40 "sqldao" : { # /opt/spark-job-server-0.6.2.2-KNIME_cdh-5.11/environment.conf: 38 "rootdir" : "/tmp/spark-job-server/sqldao/data", # application.conf: 42 # Slick database driver, full classpath "slick-driver" : "scala.slick.driver.H2Driver", # /opt/spark-job-server-0.6.2.2-KNIME_cdh-5.11/environment.conf: 41 "dbcp" : { # /opt/spark-job-server-0.6.2.2-KNIME_cdh-5.11/environment.conf: 45 "maxidle" : 10, # /opt/spark-job-server-0.6.2.2-KNIME_cdh-5.11/environment.conf: 43 "maxactive" : 20, # /opt/spark-job-server-0.6.2.2-KNIME_cdh-5.11/environment.conf: 42 "connectonstart" : true, # /opt/spark-job-server-0.6.2.2-KNIME_cdh-5.11/environment.conf: 44 "minidle" : 1, # /opt/spark-job-server-0.6.2.2-KNIME_cdh-5.11/environment.conf: 46 "initialsize" : 10 }, # application.conf: 45 # JDBC driver, full classpath "jdbc-driver" : "org.h2.Driver", # merge of /opt/spark-job-server-0.6.2.2-KNIME_cdh-5.11/environment.conf: 39,application.conf: 52 # Full JDBC URL / init string, along with username and password. Sorry, needs to match above. # Substitutions may be used to launch job-server, but leave it out here in the default or tests won't pass "jdbc" : { # application.conf: 55 "password" : "", # application.conf: 54 "user" : "", # /opt/spark-job-server-0.6.2.2-KNIME_cdh-5.11/environment.conf: 39 "url" : "jdbc:h2:/tmp/spark-job-server/sqldao/data/h2-db;AUTO_SERVER=TRUE;AUTO_RECONNECT=TRUE" } }, # application.conf: 87 # spark broadcst factory in yarn deployment # Versions prior to 1.1.0, spark default broadcast factory is org.apache.spark.broadcast.HttpBroadcastFactory. # Can't start multiple sparkContexts in the same JVM with HttpBroadcastFactory. "yarn-broadcast-factory" : "org.apache.spark.broadcast.TorrentBroadcastFactory", # /opt/spark-job-server-0.6.2.2-KNIME_cdh-5.11/environment.conf: 52 # Storage directory for files that are uploaded to the server # via POST/data commands "datadao" : { # /opt/spark-job-server-0.6.2.2-KNIME_cdh-5.11/environment.conf: 53 "rootdir" : "/tmp/spark-job-server/upload" }, # application.conf: 9 "bind-address" : "0.0.0.0", # /opt/spark-job-server-0.6.2.2-KNIME_cdh-5.11/environment.conf: 65 # Timeouts for Spark context creation in seconds. In "yarn-client" mode, use # the first option, in all other modes, use the second one. If context # creation takes longer than the timeout, the jobserver closes the context. # Especially in yarn-client mode, context creation includes container # allocation, which can take a while. "yarn-context-creation-timeout" : "60 s", # /opt/spark-job-server-0.6.2.2-KNIME_cdh-5.11/environment.conf: 66 "context-creation-timeout" : "60 s", # application.conf: 69 # The ask pattern timeout for Api "short-timeout" : "3 s", # /opt/spark-job-server-0.6.2.2-KNIME_cdh-5.11/environment.conf: 58 # Number of jobs that can be run simultaneously per context. If not set, # defaults to number of cores on machine where Spark job server is running. "max-jobs-per-context" : 100, # /opt/spark-job-server-0.6.2.2-KNIME_cdh-5.11/environment.conf: 36 # Class to use to persist data such as jars, applications, jobs, etc. # spark.jobserver.io.JobFileDAO uses the file system for persistence # spark.jobserver.io.JobSqlDAO uses an SQL database for persistence # "jobdao" : "spark.jobserver.io.JobSqlDAO", # /opt/spark-job-server-0.6.2.2-KNIME_cdh-5.11/environment.conf: 30 # Directory where the job server stores uploaded jar files "jar-store-rootdir" : "/tmp/spark-job-server/jars", # application.conf: 24 "filedao" : { # application.conf: 25 "rootdir" : "/tmp/spark-jobserver/filedao/data" }, # application.conf: 15 # Number of job results to keep per JobResultActor/context "job-result-cache-size" : 5000, # /opt/spark-job-server-0.6.2.2-KNIME_cdh-5.11/environment.conf: 71 # If true, a separate JVM is forked for each Spark context. # KERBEROS NOTE: In a Kerberos-enabled environment, you should set this to true # (as well as shiro authentication, see later in this file). "context-per-jvm" : true, # application.conf: 75 # Time out for job server to wait while creating named objects "named-object-creation-timeout" : "60 s" }, # merge of /opt/spark-job-server-0.6.2.2-KNIME_cdh-5.11/environment.conf: 90,application.conf: 103 # Default settings for Spark contexts. These settings can be overridden on a # per-context basis. Please consult the Spark documentation for more details # on available settings. # Default settings for ad hoc as well as manually created contexts # You can add any Spark config params here, for example, spark.mesos.coarse = true "context-settings" : { # application.conf: 111 "streaming" : { # application.conf: 116 # if true, stops gracefully by waiting for the processing of all received data to be completed "stopGracefully" : true, # application.conf: 120 # if true, stops the SparkContext with the StreamingContext. The underlying SparkContext will be # stopped regardless of whether the StreamingContext has been started. "stopSparkContext" : true, # application.conf: 113 # Default batch interval for Spark Streaming contexts in milliseconds "batch_interval" : 1000 }, # application.conf: 127 # Timeout for SupervisorActor to wait for forked (separate JVM) contexts to initialize "context-init-timeout" : "60s", # application.conf: 109 # A zero-arg class implementing spark.jobserver.context.SparkContextFactory # Determines the type of jobs that can run in a SparkContext "context-factory" : "spark.jobserver.context.DefaultSparkContextFactory", # application.conf: 129 "passthrough" : { # application.conf: 130 "spark" : { # application.conf: 130 "driver" : { # application.conf: 130 "allowMultipleContexts" : true } } }, # application.conf: 105 # Number of cores to allocate. Required. "memory-per-node" : "512m", # /opt/spark-job-server-0.6.2.2-KNIME_cdh-5.11/environment.conf: 98 # Required setting, that sets "spark.cores.max" in the SparkConf. This sets # the maximum amount of CPU cores to request for the Spark on the cluster # (not from each machine). # # IMPORTANT: Note that although required by job-server, this setting only # has an effect in Standalone and Mesos clusters. "num-cpu-cores" : 2 }, # /opt/spark-job-server-0.6.2.2-KNIME_cdh-5.11/environment.conf: 82 # Sets the "spark.master" property in the SparkConf. KNIME recommends # "yarn-client" for production use and "local[4]" for debugging purposes. # master = "local[4]" # Run Spark locally with 4 worker threads "master" : "yarn-client", # system properties "dynamicAllocation" : { # system properties "enabled" : "true", # system properties "minExecutors" : "0", # system properties "executorIdleTimeout" : "60", # system properties "schedulerBacklogTimeout" : "1" }, # system properties "authenticate" : "false", # system properties "shuffle" : { # system properties "service" : { # system properties "port" : "7337", # system properties "enabled" : "true" } }, # system properties "eventLog" : { # system properties "enabled" : "true", # system properties "dir" : "hdfs://cluster-01.example.com:8020/user/spark/applicationHistory" }, # system properties "driver" : { # system properties "extraLibraryPath" : "/opt/cloudera/parcels/CDH-5.8.2-1.cdh5.8.2.p0.3/lib/hadoop/lib/native", # system properties "extraJavaOptions" : "-XX:+UseConcMarkSweepGC\n -verbose:gc -XX:+PrintGCTimeStamps -Xloggc:/opt/spark-job-server-0.6.2.2-KNIME_cdh-5.11/gc.out\n -XX:MaxPermSize=512m\n -XX:+CMSClassUnloadingEnabled -XX:MaxDirectMemorySize=512M\n -XX:+HeapDumpOnOutOfMemoryError -Djava.net.preferIPv4Stack=true -Dlog4j.configuration=file:/opt/spark-job-server-0.6.2.2-KNIME_cdh-5.11/log4j-server.properties\n -DLOG_DIR=/var/log/spark-job-server/jobserver-spark-job-server6094165701522702640 -Dspark.executor.uri= ", # system properties "memory" : "2G" }, # application.conf: 93 # predefined Spark contexts # Below is an example, but do not uncomment it. Everything defined here is carried over to # deploy-time configs, so they will be created in all environments. :( "contexts" : {}, # system properties "yarn" : { # system properties "am" : { # system properties "extraLibraryPath" : "/opt/cloudera/parcels/CDH-5.8.2-1.cdh5.8.2.p0.3/lib/hadoop/lib/native" }, # system properties "jar" : "local:/opt/cloudera/parcels/CDH-5.8.2-1.cdh5.8.2.p0.3/lib/spark/lib/spark-assembly.jar", # system properties "config" : { # system properties "gatewayPath" : "/opt/cloudera/parcels", # system properties "replacementPath" : "{{HADOOP_COMMON_HOME}}/../../.." }, # system properties "historyServer" : { # system properties "allowTracking" : "true", # system properties "address" : "http://cluster-01.example.com:18088" } }, # system properties "jars" : "file:/opt/spark-job-server-0.6.2.2-KNIME_cdh-5.11/spark-job-server.jar" } [2018-06-07 15:33:38,767] INFO park.jobserver.JobManager$ [] [] - ..and context config: { # /var/log/spark-job-server/jobserver-spark-job-server6094165701522702640/context.conf: 1 "streaming" : { # /var/log/spark-job-server/jobserver-spark-job-server6094165701522702640/context.conf: 1 "stopGracefully" : true, # /var/log/spark-job-server/jobserver-spark-job-server6094165701522702640/context.conf: 1 "stopSparkContext" : true, # /var/log/spark-job-server/jobserver-spark-job-server6094165701522702640/context.conf: 1 "batch_interval" : 1000 }, # /var/log/spark-job-server/jobserver-spark-job-server6094165701522702640/context.conf: 1 "context-init-timeout" : "60s", # /var/log/spark-job-server/jobserver-spark-job-server6094165701522702640/context.conf: 1 "is-adhoc" : "false", # /var/log/spark-job-server/jobserver-spark-job-server6094165701522702640/context.conf: 1 "context-factory" : "spark.jobserver.context.DefaultSparkContextFactory", # /var/log/spark-job-server/jobserver-spark-job-server6094165701522702640/context.conf: 1 "passthrough" : { # /var/log/spark-job-server/jobserver-spark-job-server6094165701522702640/context.conf: 1 "spark" : { # /var/log/spark-job-server/jobserver-spark-job-server6094165701522702640/context.conf: 1 "driver" : { # /var/log/spark-job-server/jobserver-spark-job-server6094165701522702640/context.conf: 1 "allowMultipleContexts" : true } } }, # /var/log/spark-job-server/jobserver-spark-job-server6094165701522702640/context.conf: 1 "memory-per-node" : "512m", # /var/log/spark-job-server/jobserver-spark-job-server6094165701522702640/context.conf: 1 "num-cpu-cores" : 2, # /var/log/spark-job-server/jobserver-spark-job-server6094165701522702640/context.conf: 1 "context" : { # /var/log/spark-job-server/jobserver-spark-job-server6094165701522702640/context.conf: 1 "name" : "spark-job-server", # /var/log/spark-job-server/jobserver-spark-job-server6094165701522702640/context.conf: 1 "actorname" : "jobManager-32-9e8a-eb53f2d71aa5" } } [2018-06-07 15:33:38,972] INFO ka.event.slf4j.Slf4jLogger [] [] - Slf4jLogger started [2018-06-07 15:33:39,034] INFO Remoting [] [Remoting] - Starting remoting [2018-06-07 15:33:39,150] INFO Remoting [] [Remoting] - Remoting started; listening on addresses :[akka.tcp://JobServer@127.0.0.1:56067] [2018-06-07 15:33:39,151] INFO Remoting [] [Remoting] - Remoting now listens on addresses: [akka.tcp://JobServer@127.0.0.1:56067] [2018-06-07 15:33:39,169] INFO Cluster(akka://JobServer) [] [Cluster(akka://JobServer)] - Cluster Node [akka.tcp://JobServer@127.0.0.1:56067] - Starting up... [2018-06-07 15:33:39,232] INFO Cluster(akka://JobServer) [] [Cluster(akka://JobServer)] - Cluster Node [akka.tcp://JobServer@127.0.0.1:56067] - Registered cluster JMX MBean [akka:type=Cluster] [2018-06-07 15:33:39,233] INFO Cluster(akka://JobServer) [] [Cluster(akka://JobServer)] - Cluster Node [akka.tcp://JobServer@127.0.0.1:56067] - Started up successfully [2018-06-07 15:33:39,245] INFO Cluster(akka://JobServer) [] [Cluster(akka://JobServer)] - Cluster Node [akka.tcp://JobServer@127.0.0.1:56067] - No seed-nodes configured, manual cluster join required [2018-06-07 15:33:39,246] INFO park.jobserver.JobManager$ [] [] - Joining cluster at address akka.tcp://JobServer@127.0.0.1:33139 [2018-06-07 15:33:39,255] INFO kka.actor.ProductionReaper [] [akka://JobServer/user/$a] - Starting actor ooyala.common.akka.actor.ProductionReaper [2018-06-07 15:33:39,255] INFO .jobserver.JobManagerActor [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - Starting actor spark.jobserver.JobManagerActor [2018-06-07 15:33:39,284] INFO kka.actor.ProductionReaper [] [akka://JobServer/user/$a] - Watching actor Actor[akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5#-95460231] [2018-06-07 15:33:39,398] INFO Cluster(akka://JobServer) [] [Cluster(akka://JobServer)] - Cluster Node [akka.tcp://JobServer@127.0.0.1:56067] - Welcome from [akka.tcp://JobServer@127.0.0.1:33139] [2018-06-07 15:33:40,195] INFO k.jobserver.JobStatusActor [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5/$a] - Starting actor spark.jobserver.JobStatusActor [2018-06-07 15:33:40,242] INFO parkContextFactory$$anon$1 [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - Running Spark version 1.6.0 [2018-06-07 15:33:41,014] INFO ache.spark.SecurityManager [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - Changing view acls to: root [2018-06-07 15:33:41,014] INFO ache.spark.SecurityManager [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - Changing modify acls to: root [2018-06-07 15:33:41,015] INFO ache.spark.SecurityManager [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - SecurityManager: authentication disabled; ui acls disabled; users with view permissions: Set(root); users with modify permissions: Set(root) [2018-06-07 15:33:41,156] INFO rg.apache.spark.util.Utils [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - Successfully started service 'sparkDriver' on port 51291. [2018-06-07 15:33:41,222] INFO ka.event.slf4j.Slf4jLogger [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - Slf4jLogger started [2018-06-07 15:33:41,227] INFO Remoting [] [Remoting] - Starting remoting [2018-06-07 15:33:41,239] INFO Remoting [] [Remoting] - Remoting started; listening on addresses :[akka.tcp://sparkDriverActorSystem@10.10.10.100:52008] [2018-06-07 15:33:41,239] INFO Remoting [] [Remoting] - Remoting now listens on addresses: [akka.tcp://sparkDriverActorSystem@10.10.10.100:52008] [2018-06-07 15:33:41,240] INFO rg.apache.spark.util.Utils [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - Successfully started service 'sparkDriverActorSystem' on port 52008. [2018-06-07 15:33:41,294] INFO org.apache.spark.SparkEnv [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - Registering MapOutputTracker [2018-06-07 15:33:41,307] INFO org.apache.spark.SparkEnv [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - Registering BlockManagerMaster [2018-06-07 15:33:41,316] INFO k.storage.DiskBlockManager [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - Created local directory at /tmp/blockmgr-35d90ebc-a326-4b1f-ad1b-df5ee8cd2d25 [2018-06-07 15:33:41,326] INFO .spark.storage.MemoryStore [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - MemoryStore started with capacity 1069.1 MB [2018-06-07 15:33:41,460] INFO org.apache.spark.SparkEnv [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - Registering OutputCommitCoordinator [2018-06-07 15:33:41,567] INFO roject.jetty.server.Server [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - jetty-8.y.z-SNAPSHOT [2018-06-07 15:33:41,597] INFO y.server.AbstractConnector [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - Started SelectChannelConnector@0.0.0.0:39595 [2018-06-07 15:33:41,597] INFO rg.apache.spark.util.Utils [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - Successfully started service 'SparkUI' on port 39595. [2018-06-07 15:33:41,598] INFO rg.apache.spark.ui.SparkUI [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - Started SparkUI at http://10.10.10.100:39595 [2018-06-07 15:33:41,619] INFO parkContextFactory$$anon$1 [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - Added JAR file:/opt/spark-job-server-0.6.2.2-KNIME_cdh-5.11/spark-job-server.jar at spark://10.10.10.100:51291/jars/spark-job-server.jar with timestamp 1528400021619 [2018-06-07 15:33:41,748] INFO hadoop.yarn.client.RMProxy [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - Connecting to ResourceManager at cluster-01.example.com/10.10.10.100:8032 [2018-06-07 15:33:41,914] INFO e.spark.deploy.yarn.Client [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - Requesting a new application from cluster with 3 NodeManagers [2018-06-07 15:33:41,927] INFO e.spark.deploy.yarn.Client [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - Verifying our application has not requested more than the maximum memory capability of the cluster (8192 MB per container) [2018-06-07 15:33:41,927] INFO e.spark.deploy.yarn.Client [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - Will allocate AM container, with 896 MB memory including 384 MB overhead [2018-06-07 15:33:41,927] INFO e.spark.deploy.yarn.Client [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - Setting up container launch context for our AM [2018-06-07 15:33:41,929] INFO e.spark.deploy.yarn.Client [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - Setting up the launch environment for our AM container [2018-06-07 15:33:41,940] INFO e.spark.deploy.yarn.Client [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - Preparing resources for our AM container [2018-06-07 15:33:42,372] ERROR parkContextFactory$$anon$1 [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - Error initializing SparkContext. org.apache.hadoop.security.AccessControlException: Permission denied: user=root, access=WRITE, inode="/user":hdfs:supergroup:drwxr-xr-x at org.apache.hadoop.hdfs.server.namenode.DefaultAuthorizationProvider.checkFsPermission(DefaultAuthorizationProvider.java:281) at org.apache.hadoop.hdfs.server.namenode.DefaultAuthorizationProvider.check(DefaultAuthorizationProvider.java:262) at org.apache.hadoop.hdfs.server.namenode.DefaultAuthorizationProvider.check(DefaultAuthorizationProvider.java:242) at org.apache.hadoop.hdfs.server.namenode.DefaultAuthorizationProvider.checkPermission(DefaultAuthorizationProvider.java:169) at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:152) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkPermission(FSNamesystem.java:6631) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkPermission(FSNamesystem.java:6613) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkAncestorAccess(FSNamesystem.java:6565) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.mkdirsInternal(FSNamesystem.java:4360) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.mkdirsInt(FSNamesystem.java:4330) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.mkdirs(FSNamesystem.java:4303) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.mkdirs(NameNodeRpcServer.java:869) at org.apache.hadoop.hdfs.server.namenode.AuthorizationProviderProxyClientProtocol.mkdirs(AuthorizationProviderProxyClientProtocol.java:323) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.mkdirs(ClientNamenodeProtocolServerSideTranslatorPB.java:608) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:617) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1073) at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2086) at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2082) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:415) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1693) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2080) at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57) at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.lang.reflect.Constructor.newInstance(Constructor.java:526) at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:106) at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:73) at org.apache.hadoop.hdfs.DFSClient.primitiveMkdir(DFSClient.java:3104) at org.apache.hadoop.hdfs.DFSClient.mkdirs(DFSClient.java:3069) at org.apache.hadoop.hdfs.DistributedFileSystem$18.doCall(DistributedFileSystem.java:957) at org.apache.hadoop.hdfs.DistributedFileSystem$18.doCall(DistributedFileSystem.java:953) at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) at org.apache.hadoop.hdfs.DistributedFileSystem.mkdirsInternal(DistributedFileSystem.java:953) at org.apache.hadoop.hdfs.DistributedFileSystem.mkdirs(DistributedFileSystem.java:946) at org.apache.hadoop.fs.FileSystem.mkdirs(FileSystem.java:1861) at org.apache.hadoop.fs.FileSystem.mkdirs(FileSystem.java:609) at org.apache.spark.deploy.yarn.Client.prepareLocalResources(Client.scala:357) at org.apache.spark.deploy.yarn.Client.createContainerLaunchContext(Client.scala:724) at org.apache.spark.deploy.yarn.Client.submitApplication(Client.scala:143) at org.apache.spark.scheduler.cluster.YarnClientSchedulerBackend.start(YarnClientSchedulerBackend.scala:57) at org.apache.spark.scheduler.TaskSchedulerImpl.start(TaskSchedulerImpl.scala:144) at org.apache.spark.SparkContext.(SparkContext.scala:541) at spark.jobserver.context.DefaultSparkContextFactory$$anon$1.(SparkContextFactory.scala:53) at spark.jobserver.context.DefaultSparkContextFactory.makeContext(SparkContextFactory.scala:53) at spark.jobserver.context.DefaultSparkContextFactory.makeContext(SparkContextFactory.scala:48) at spark.jobserver.context.SparkContextFactory$class.makeContext(SparkContextFactory.scala:37) at spark.jobserver.context.DefaultSparkContextFactory.makeContext(SparkContextFactory.scala:48) at spark.jobserver.JobManagerActor.createContextFromConfig(JobManagerActor.scala:386) at spark.jobserver.JobManagerActor$$anonfun$wrappedReceive$1.applyOrElse(JobManagerActor.scala:129) at scala.runtime.AbstractPartialFunction$mcVL$sp.apply$mcVL$sp(AbstractPartialFunction.scala:33) at scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:33) at scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:25) at ooyala.common.akka.ActorStack$$anonfun$receive$1.applyOrElse(ActorStack.scala:33) at scala.runtime.AbstractPartialFunction$mcVL$sp.apply$mcVL$sp(AbstractPartialFunction.scala:33) at scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:33) at scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:25) at ooyala.common.akka.Slf4jLogging$$anonfun$receive$1$$anonfun$applyOrElse$1.apply$mcV$sp(Slf4jLogging.scala:26) at ooyala.common.akka.Slf4jLogging$class.ooyala$common$akka$Slf4jLogging$$withAkkaSourceLogging(Slf4jLogging.scala:35) at ooyala.common.akka.Slf4jLogging$$anonfun$receive$1.applyOrElse(Slf4jLogging.scala:25) at scala.runtime.AbstractPartialFunction$mcVL$sp.apply$mcVL$sp(AbstractPartialFunction.scala:33) at scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:33) at scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:25) at ooyala.common.akka.ActorMetrics$$anonfun$receive$1.applyOrElse(ActorMetrics.scala:24) at akka.actor.ActorCell.receiveMessage(ActorCell.scala:498) at akka.actor.ActorCell.invoke(ActorCell.scala:456) at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:237) at akka.dispatch.Mailbox.run(Mailbox.scala:219) at akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinTask.exec(AbstractDispatcher.scala:386) at scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260) at scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339) at scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979) at scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107) Caused by: org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.security.AccessControlException): Permission denied: user=root, access=WRITE, inode="/user":hdfs:supergroup:drwxr-xr-x at org.apache.hadoop.hdfs.server.namenode.DefaultAuthorizationProvider.checkFsPermission(DefaultAuthorizationProvider.java:281) at org.apache.hadoop.hdfs.server.namenode.DefaultAuthorizationProvider.check(DefaultAuthorizationProvider.java:262) at org.apache.hadoop.hdfs.server.namenode.DefaultAuthorizationProvider.check(DefaultAuthorizationProvider.java:242) at org.apache.hadoop.hdfs.server.namenode.DefaultAuthorizationProvider.checkPermission(DefaultAuthorizationProvider.java:169) at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:152) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkPermission(FSNamesystem.java:6631) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkPermission(FSNamesystem.java:6613) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkAncestorAccess(FSNamesystem.java:6565) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.mkdirsInternal(FSNamesystem.java:4360) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.mkdirsInt(FSNamesystem.java:4330) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.mkdirs(FSNamesystem.java:4303) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.mkdirs(NameNodeRpcServer.java:869) at org.apache.hadoop.hdfs.server.namenode.AuthorizationProviderProxyClientProtocol.mkdirs(AuthorizationProviderProxyClientProtocol.java:323) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.mkdirs(ClientNamenodeProtocolServerSideTranslatorPB.java:608) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:617) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1073) at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2086) at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2082) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:415) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1693) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2080) at org.apache.hadoop.ipc.Client.call(Client.java:1471) at org.apache.hadoop.ipc.Client.call(Client.java:1408) at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230) at com.sun.proxy.$Proxy20.mkdirs(Unknown Source) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.mkdirs(ClientNamenodeProtocolTranslatorPB.java:549) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:256) at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:104) at com.sun.proxy.$Proxy21.mkdirs(Unknown Source) at org.apache.hadoop.hdfs.DFSClient.primitiveMkdir(DFSClient.java:3102) ... 44 more [2018-06-07 15:33:42,399] INFO ver.handler.ContextHandler [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - stopped o.s.j.s.ServletContextHandler{/stages/stage/kill,null} [2018-06-07 15:33:42,399] INFO ver.handler.ContextHandler [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - stopped o.s.j.s.ServletContextHandler{/api,null} [2018-06-07 15:33:42,399] INFO ver.handler.ContextHandler [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - stopped o.s.j.s.ServletContextHandler{/,null} [2018-06-07 15:33:42,400] INFO ver.handler.ContextHandler [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - stopped o.s.j.s.ServletContextHandler{/static,null} [2018-06-07 15:33:42,400] INFO ver.handler.ContextHandler [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - stopped o.s.j.s.ServletContextHandler{/executors/threadDump/json,null} [2018-06-07 15:33:42,400] INFO ver.handler.ContextHandler [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - stopped o.s.j.s.ServletContextHandler{/executors/threadDump,null} [2018-06-07 15:33:42,400] INFO ver.handler.ContextHandler [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - stopped o.s.j.s.ServletContextHandler{/executors/json,null} [2018-06-07 15:33:42,400] INFO ver.handler.ContextHandler [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - stopped o.s.j.s.ServletContextHandler{/executors,null} [2018-06-07 15:33:42,400] INFO ver.handler.ContextHandler [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - stopped o.s.j.s.ServletContextHandler{/environment/json,null} [2018-06-07 15:33:42,400] INFO ver.handler.ContextHandler [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - stopped o.s.j.s.ServletContextHandler{/environment,null} [2018-06-07 15:33:42,400] INFO ver.handler.ContextHandler [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - stopped o.s.j.s.ServletContextHandler{/storage/rdd/json,null} [2018-06-07 15:33:42,401] INFO ver.handler.ContextHandler [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - stopped o.s.j.s.ServletContextHandler{/storage/rdd,null} [2018-06-07 15:33:42,401] INFO ver.handler.ContextHandler [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - stopped o.s.j.s.ServletContextHandler{/storage/json,null} [2018-06-07 15:33:42,401] INFO ver.handler.ContextHandler [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - stopped o.s.j.s.ServletContextHandler{/storage,null} [2018-06-07 15:33:42,401] INFO ver.handler.ContextHandler [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - stopped o.s.j.s.ServletContextHandler{/stages/pool/json,null} [2018-06-07 15:33:42,401] INFO ver.handler.ContextHandler [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - stopped o.s.j.s.ServletContextHandler{/stages/pool,null} [2018-06-07 15:33:42,401] INFO ver.handler.ContextHandler [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - stopped o.s.j.s.ServletContextHandler{/stages/stage/json,null} [2018-06-07 15:33:42,402] INFO ver.handler.ContextHandler [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - stopped o.s.j.s.ServletContextHandler{/stages/stage,null} [2018-06-07 15:33:42,402] INFO ver.handler.ContextHandler [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - stopped o.s.j.s.ServletContextHandler{/stages/json,null} [2018-06-07 15:33:42,402] INFO ver.handler.ContextHandler [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - stopped o.s.j.s.ServletContextHandler{/stages,null} [2018-06-07 15:33:42,402] INFO ver.handler.ContextHandler [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - stopped o.s.j.s.ServletContextHandler{/jobs/job/json,null} [2018-06-07 15:33:42,402] INFO ver.handler.ContextHandler [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - stopped o.s.j.s.ServletContextHandler{/jobs/job,null} [2018-06-07 15:33:42,402] INFO ver.handler.ContextHandler [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - stopped o.s.j.s.ServletContextHandler{/jobs/json,null} [2018-06-07 15:33:42,402] INFO ver.handler.ContextHandler [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - stopped o.s.j.s.ServletContextHandler{/jobs,null} [2018-06-07 15:33:42,464] INFO rg.apache.spark.ui.SparkUI [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - Stopped Spark web UI at http://10.10.10.100:39595 [2018-06-07 15:33:42,468] INFO YarnClientSchedulerBackend [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - Stopped [2018-06-07 15:33:42,472] INFO utputTrackerMasterEndpoint [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - MapOutputTrackerMasterEndpoint stopped! [2018-06-07 15:33:42,474] ERROR rg.apache.spark.util.Utils [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - Uncaught exception in thread JobServer-akka.actor.default-dispatcher-2 java.lang.NullPointerException at org.apache.spark.network.shuffle.ExternalShuffleClient.close(ExternalShuffleClient.java:152) at org.apache.spark.storage.BlockManager.stop(BlockManager.scala:1231) at org.apache.spark.SparkEnv.stop(SparkEnv.scala:96) at org.apache.spark.SparkContext$$anonfun$stop$12.apply$mcV$sp(SparkContext.scala:1767) at org.apache.spark.util.Utils$.tryLogNonFatalError(Utils.scala:1230) at org.apache.spark.SparkContext.stop(SparkContext.scala:1766) at org.apache.spark.SparkContext.(SparkContext.scala:613) at spark.jobserver.context.DefaultSparkContextFactory$$anon$1.(SparkContextFactory.scala:53) at spark.jobserver.context.DefaultSparkContextFactory.makeContext(SparkContextFactory.scala:53) at spark.jobserver.context.DefaultSparkContextFactory.makeContext(SparkContextFactory.scala:48) at spark.jobserver.context.SparkContextFactory$class.makeContext(SparkContextFactory.scala:37) at spark.jobserver.context.DefaultSparkContextFactory.makeContext(SparkContextFactory.scala:48) at spark.jobserver.JobManagerActor.createContextFromConfig(JobManagerActor.scala:386) at spark.jobserver.JobManagerActor$$anonfun$wrappedReceive$1.applyOrElse(JobManagerActor.scala:129) at scala.runtime.AbstractPartialFunction$mcVL$sp.apply$mcVL$sp(AbstractPartialFunction.scala:33) at scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:33) at scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:25) at ooyala.common.akka.ActorStack$$anonfun$receive$1.applyOrElse(ActorStack.scala:33) at scala.runtime.AbstractPartialFunction$mcVL$sp.apply$mcVL$sp(AbstractPartialFunction.scala:33) at scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:33) at scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:25) at ooyala.common.akka.Slf4jLogging$$anonfun$receive$1$$anonfun$applyOrElse$1.apply$mcV$sp(Slf4jLogging.scala:26) at ooyala.common.akka.Slf4jLogging$class.ooyala$common$akka$Slf4jLogging$$withAkkaSourceLogging(Slf4jLogging.scala:35) at ooyala.common.akka.Slf4jLogging$$anonfun$receive$1.applyOrElse(Slf4jLogging.scala:25) at scala.runtime.AbstractPartialFunction$mcVL$sp.apply$mcVL$sp(AbstractPartialFunction.scala:33) at scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:33) at scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:25) at ooyala.common.akka.ActorMetrics$$anonfun$receive$1.applyOrElse(ActorMetrics.scala:24) at akka.actor.ActorCell.receiveMessage(ActorCell.scala:498) at akka.actor.ActorCell.invoke(ActorCell.scala:456) at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:237) at akka.dispatch.Mailbox.run(Mailbox.scala:219) at akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinTask.exec(AbstractDispatcher.scala:386) at scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260) at scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339) at scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979) at scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107) [2018-06-07 15:33:42,475] INFO parkContextFactory$$anon$1 [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - Successfully stopped SparkContext [2018-06-07 15:33:42,475] ERROR .jobserver.JobManagerActor [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - Failed to create context spark-job-server, shutting down actor org.apache.hadoop.security.AccessControlException: Permission denied: user=root, access=WRITE, inode="/user":hdfs:supergroup:drwxr-xr-x at org.apache.hadoop.hdfs.server.namenode.DefaultAuthorizationProvider.checkFsPermission(DefaultAuthorizationProvider.java:281) at org.apache.hadoop.hdfs.server.namenode.DefaultAuthorizationProvider.check(DefaultAuthorizationProvider.java:262) at org.apache.hadoop.hdfs.server.namenode.DefaultAuthorizationProvider.check(DefaultAuthorizationProvider.java:242) at org.apache.hadoop.hdfs.server.namenode.DefaultAuthorizationProvider.checkPermission(DefaultAuthorizationProvider.java:169) at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:152) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkPermission(FSNamesystem.java:6631) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkPermission(FSNamesystem.java:6613) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkAncestorAccess(FSNamesystem.java:6565) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.mkdirsInternal(FSNamesystem.java:4360) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.mkdirsInt(FSNamesystem.java:4330) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.mkdirs(FSNamesystem.java:4303) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.mkdirs(NameNodeRpcServer.java:869) at org.apache.hadoop.hdfs.server.namenode.AuthorizationProviderProxyClientProtocol.mkdirs(AuthorizationProviderProxyClientProtocol.java:323) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.mkdirs(ClientNamenodeProtocolServerSideTranslatorPB.java:608) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:617) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1073) at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2086) at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2082) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:415) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1693) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2080) at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57) at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.lang.reflect.Constructor.newInstance(Constructor.java:526) at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:106) at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:73) at org.apache.hadoop.hdfs.DFSClient.primitiveMkdir(DFSClient.java:3104) at org.apache.hadoop.hdfs.DFSClient.mkdirs(DFSClient.java:3069) at org.apache.hadoop.hdfs.DistributedFileSystem$18.doCall(DistributedFileSystem.java:957) at org.apache.hadoop.hdfs.DistributedFileSystem$18.doCall(DistributedFileSystem.java:953) at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) at org.apache.hadoop.hdfs.DistributedFileSystem.mkdirsInternal(DistributedFileSystem.java:953) at org.apache.hadoop.hdfs.DistributedFileSystem.mkdirs(DistributedFileSystem.java:946) at org.apache.hadoop.fs.FileSystem.mkdirs(FileSystem.java:1861) at org.apache.hadoop.fs.FileSystem.mkdirs(FileSystem.java:609) at org.apache.spark.deploy.yarn.Client.prepareLocalResources(Client.scala:357) at org.apache.spark.deploy.yarn.Client.createContainerLaunchContext(Client.scala:724) at org.apache.spark.deploy.yarn.Client.submitApplication(Client.scala:143) at org.apache.spark.scheduler.cluster.YarnClientSchedulerBackend.start(YarnClientSchedulerBackend.scala:57) at org.apache.spark.scheduler.TaskSchedulerImpl.start(TaskSchedulerImpl.scala:144) at org.apache.spark.SparkContext.(SparkContext.scala:541) at spark.jobserver.context.DefaultSparkContextFactory$$anon$1.(SparkContextFactory.scala:53) at spark.jobserver.context.DefaultSparkContextFactory.makeContext(SparkContextFactory.scala:53) at spark.jobserver.context.DefaultSparkContextFactory.makeContext(SparkContextFactory.scala:48) at spark.jobserver.context.SparkContextFactory$class.makeContext(SparkContextFactory.scala:37) at spark.jobserver.context.DefaultSparkContextFactory.makeContext(SparkContextFactory.scala:48) at spark.jobserver.JobManagerActor.createContextFromConfig(JobManagerActor.scala:386) at spark.jobserver.JobManagerActor$$anonfun$wrappedReceive$1.applyOrElse(JobManagerActor.scala:129) at scala.runtime.AbstractPartialFunction$mcVL$sp.apply$mcVL$sp(AbstractPartialFunction.scala:33) at scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:33) at scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:25) at ooyala.common.akka.ActorStack$$anonfun$receive$1.applyOrElse(ActorStack.scala:33) at scala.runtime.AbstractPartialFunction$mcVL$sp.apply$mcVL$sp(AbstractPartialFunction.scala:33) at scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:33) at scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:25) at ooyala.common.akka.Slf4jLogging$$anonfun$receive$1$$anonfun$applyOrElse$1.apply$mcV$sp(Slf4jLogging.scala:26) at ooyala.common.akka.Slf4jLogging$class.ooyala$common$akka$Slf4jLogging$$withAkkaSourceLogging(Slf4jLogging.scala:35) at ooyala.common.akka.Slf4jLogging$$anonfun$receive$1.applyOrElse(Slf4jLogging.scala:25) at scala.runtime.AbstractPartialFunction$mcVL$sp.apply$mcVL$sp(AbstractPartialFunction.scala:33) at scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:33) at scala.runtime.AbstractPartialFunction$mcVL$sp.apply(AbstractPartialFunction.scala:25) at ooyala.common.akka.ActorMetrics$$anonfun$receive$1.applyOrElse(ActorMetrics.scala:24) at akka.actor.ActorCell.receiveMessage(ActorCell.scala:498) at akka.actor.ActorCell.invoke(ActorCell.scala:456) at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:237) at akka.dispatch.Mailbox.run(Mailbox.scala:219) at akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinTask.exec(AbstractDispatcher.scala:386) at scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260) at scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339) at scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979) at scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107) Caused by: org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.security.AccessControlException): Permission denied: user=root, access=WRITE, inode="/user":hdfs:supergroup:drwxr-xr-x at org.apache.hadoop.hdfs.server.namenode.DefaultAuthorizationProvider.checkFsPermission(DefaultAuthorizationProvider.java:281) at org.apache.hadoop.hdfs.server.namenode.DefaultAuthorizationProvider.check(DefaultAuthorizationProvider.java:262) at org.apache.hadoop.hdfs.server.namenode.DefaultAuthorizationProvider.check(DefaultAuthorizationProvider.java:242) at org.apache.hadoop.hdfs.server.namenode.DefaultAuthorizationProvider.checkPermission(DefaultAuthorizationProvider.java:169) at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:152) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkPermission(FSNamesystem.java:6631) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkPermission(FSNamesystem.java:6613) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkAncestorAccess(FSNamesystem.java:6565) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.mkdirsInternal(FSNamesystem.java:4360) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.mkdirsInt(FSNamesystem.java:4330) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.mkdirs(FSNamesystem.java:4303) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.mkdirs(NameNodeRpcServer.java:869) at org.apache.hadoop.hdfs.server.namenode.AuthorizationProviderProxyClientProtocol.mkdirs(AuthorizationProviderProxyClientProtocol.java:323) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.mkdirs(ClientNamenodeProtocolServerSideTranslatorPB.java:608) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:617) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1073) at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2086) at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2082) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:415) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1693) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2080) at org.apache.hadoop.ipc.Client.call(Client.java:1471) at org.apache.hadoop.ipc.Client.call(Client.java:1408) at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230) at com.sun.proxy.$Proxy20.mkdirs(Unknown Source) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.mkdirs(ClientNamenodeProtocolTranslatorPB.java:549) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:256) at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:104) at com.sun.proxy.$Proxy21.mkdirs(Unknown Source) at org.apache.hadoop.hdfs.DFSClient.primitiveMkdir(DFSClient.java:3102) ... 44 more [2018-06-07 15:33:42,514] INFO .jobserver.JobManagerActor [] [] - Shutting down SparkContext spark-job-server [2018-06-07 15:33:42,516] INFO .actor.RepointableActorRef [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - Message [akka.actor.PoisonPill$] from Actor[akka.tcp://JobServer@127.0.0.1:33139/user/context-supervisor#114661079] to Actor[akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5#-95460231] was not delivered. [1] dead letters encountered. This logging can be turned off or adjusted with configuration settings 'akka.log-dead-letters' and 'akka.log-dead-letters-during-shutdown'. [2018-06-07 15:33:42,516] INFO kka.actor.ProductionReaper [] [akka://JobServer/user/$a] - Actor Actor[akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5#-95460231] terminated [2018-06-07 15:33:42,517] WARN kka.actor.ProductionReaper [] [akka://JobServer/user/$a] - Shutting down actor system because all actors have terminated [2018-06-07 15:33:42,517] WARN kka.actor.ProductionReaper [] [] - Shutting down ooyala.common.akka.actor.ProductionReaper [2018-06-07 15:33:42,519] INFO rovider$RemotingTerminator [] [akka://JobServer/system/remoting-terminator] - Shutting down remote daemon. [2018-06-07 15:33:42,520] INFO rovider$RemotingTerminator [] [akka://JobServer/system/remoting-terminator] - Remote daemon shut down; proceeding with flushing remote transports. [2018-06-07 15:33:42,528] INFO akka.actor.LocalActorRef [] [akka://JobServer/system/transports/akkaprotocolmanager.tcp0/akkaProtocol-tcp%3A%2F%2FJobServer%40127.0.0.1%3A33139-1] - Message [akka.remote.transport.AssociationHandle$Disassociated] from Actor[akka://JobServer/deadLetters] to Actor[akka://JobServer/system/transports/akkaprotocolmanager.tcp0/akkaProtocol-tcp%3A%2F%2FJobServer%40127.0.0.1%3A33139-1#1612033340] was not delivered. [2] dead letters encountered. This logging can be turned off or adjusted with configuration settings 'akka.log-dead-letters' and 'akka.log-dead-letters-during-shutdown'. [2018-06-07 15:33:42,529] INFO akka.actor.LocalActorRef [] [akka://JobServer/system/endpointManager/reliableEndpointWriter-akka.tcp%3A%2F%2FJobServer%40127.0.0.1%3A33139-0/endpointWriter] - Message [akka.actor.Terminated] from Actor[akka://JobServer/system/endpointManager/reliableEndpointWriter-akka.tcp%3A%2F%2FJobServer%40127.0.0.1%3A33139-0/endpointWriter/endpointReader-akka.tcp%3A%2F%2FJobServer%40127.0.0.1%3A33139-0#1703670904] to Actor[akka://JobServer/system/endpointManager/reliableEndpointWriter-akka.tcp%3A%2F%2FJobServer%40127.0.0.1%3A33139-0/endpointWriter#1198496148] was not delivered. [3] dead letters encountered. This logging can be turned off or adjusted with configuration settings 'akka.log-dead-letters' and 'akka.log-dead-letters-during-shutdown'. [2018-06-07 15:33:42,541] INFO Remoting [] [Remoting] - Remoting shut down [2018-06-07 15:33:42,542] INFO rovider$RemotingTerminator [] [akka://JobServer/system/remoting-terminator] - Remoting shut down. [2018-06-07 15:33:42,555] INFO k.storage.DiskBlockManager [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - Shutdown hook called [2018-06-07 15:33:42,555] INFO k.util.ShutdownHookManager [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - Shutdown hook called [2018-06-07 15:33:42,556] INFO k.util.ShutdownHookManager [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - Deleting directory /tmp/spark-80b5321b-2473-464a-bea5-4b6fda206f77 [2018-06-07 15:33:42,557] INFO k.util.ShutdownHookManager [] [akka://JobServer/user/jobManager-32-9e8a-eb53f2d71aa5] - Deleting directory /tmp/spark-80b5321b-2473-464a-bea5-4b6fda206f77/userFiles-9aed4c0b-d6bb-4328-8016-1f754f16acec