1.local单机模式,结果xshell可见:
./bin/spark-submit --class org.apache.spark.examples.SparkPi --master local[1] ./lib/spark-examples-1.6.0-hadoop2.4.0.jar 100
2.standalone集群模式之client模式:
conf/spark-env.sh添加
export JAVA_HOME=/root/install/jdk1.7.0_21
export SPARK_MASTER_IP=spark1
export SPARK_MASTER_PORT=7077
export SPARK_WORKER_CORES=1
export SPARK_WORKER_INSTANCES=1
export SPARK_WORKER_MEMORY=1g
vi slaves
添加
node2
node3
rm -rf slaves.template
rm -rf spark-env.sh.template
结果xshell可见:
./bin/spark-submit --class org.apache.spark.examples.SparkPi --master spark://node1:7077 --executor-memory 1G --total-executor-cores 2 ./lib/spark-examples-1.6.0-hadoop2.4.0.jar 100
3.standalone集群模式之cluster模式:
结果spark001:8080里面可见!
./bin/spark-submit --class org.apache.spark.examples.SparkPi --master spark://node1:7077 --deploy-mode cluster --supervise --executor-memory 1G --total-executor-cores 1 ./lib/spark-examples-1.6.0-hadoop2.4.0.jar 100
4.Yarn集群模式,结果spark001:8088里面可见:
在conf/spark-env.sh里添加export HADOOP_CONF_DIR=/home/install/hadoop-2.5/etc/hadoop
./bin/spark-submit --class org.apache.spark.examples.SparkPi --master yarn-cluster --executor-memory 1G --num-executors 1 ./lib/spark-examples-1.6.0-hadoop2.4.0.jar 100
com.spark.study.MySparkPi
./bin/spark-submit --class com.spark.study.MySparkPi --master yarn-client --executor-memory 1G --num-executors 1 ./data/spark_pagerank_pi.jar 100