Skip to content

Instantly share code, notes, and snippets.

@addisonj
Created June 29, 2016 05:33
Show Gist options
  • Save addisonj/eef2c84f09de4c85ab61708a7d7708a1 to your computer and use it in GitHub Desktop.
Save addisonj/eef2c84f09de4c85ab61708a7d7708a1 to your computer and use it in GitHub Desktop.
deploy {
manager-start-cmd = "/app/manager_start.sh"
}
spark {
#
master = "local[4]"
master = ${?SPARK_MASTER}
jobserver {
port = 8090
port = ${?PORT}
jobdao = spark.jobserver.io.JobSqlDAO
context-per-jvm = true
sqldao {
# Directory where default H2 driver stores its data. Only needed for H2.
rootdir = /mnt/mesos/sandbox
# Full JDBC URL / init string. Sorry, needs to match above.
# Substitutions may be used to launch job-server, but leave it out here in the default or tests won't pass
jdbc.url = "jdbc:h2:file:/mnt/mesos/sandbox/h2-db"
}
}
# predefined Spark contexts
# contexts {
# my-low-latency-context {
# num-cpu-cores = 1 # Number of cores to allocate. Required.
# memory-per-node = 512m # Executor memory per node, -Xmx style eg 512m, 1G, etc.
# }
# # define additional contexts here
# }
# universal context configuration. These settings can be overridden, see README.md
context-settings {
num-cpu-cores = 2 # Number of cores to allocate. Required.
memory-per-node = 512m # Executor memory per node, -Xmx style eg 512m, #1G, etc.
# in case spark distribution should be accessed from HDFS (as opposed to being installed on every mesos slave)
#spark.executor.uri = ${EXECUTOR_URI}
spark.mesos.coarse=true
spark.mesos.executor.home = ${?SPARK_HOME}
spark.mesos.executor.docker.image="323604001645.dkr.ecr.us-east-1.amazonaws.com/pandata-dcos:srr23"
spark.mesos.uris = "http://dcos-bootstrap-us-east-1.s3.amazonaws.com/docker/pandata/creds.tar.gz"
# uris of jars to be loaded into the classpath for this context. Uris is a string list, or a string separated by commas ','
# dependent-jar-uris = ["file:///some/path/present/in/each/mesos/slave/somepackage.jar"]
# If you wish to pass any settings directly to the sparkConf as-is, add them here in passthrough,
# such as hadoop connection settings that don't use the "spark." prefix
passthrough {
#es.nodes = "192.1.1.1"
}
}
# This needs to match SPARK_HOME for cluster SparkContexts to be created successfully
home = "/usr/local/spark"
home = ${?SPARK_HOME}
}
akka {
remote {
netty.tcp {
hostname = "0.0.0.0"
hostname = ${?HOSTNAME}
port = 0
}
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment