1.检查JDK版本
2.时间同步
3.设置免密码登录
#生成密钥(node1,node2,node3,node4) ssh-keygen -t dsa -P ‘‘ -f ~/.ssh/id_dsa cd ~/.ssh/ ls #说明 ##id_ds 私钥 ##is_dsa.pub 公钥 #将公钥文件追加到本地的认证文件中(node1,node2,node3,node4) cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys ssh 192.168.2.136 #Last login: Sun Jul 9 14:14:35 2017 from 192.168.2.136,表示登录成功,并且是不需要输入密码的 exit #logout 退出 将node1中的公钥复制到node2,node3,node4中 scp ./id_dsa.pub [email protected]:/opt/ scp ./id_dsa.pub [email protected]:/opt/ scp ./id_dsa.pub [email protected]:/opt/ 将/opt/id_dsa.pub添加到node2,node3,node4的认证文件中 cat /opt/id_dsa.pub >> ~/.ssh/authorized_keys
4.设置node1为NameNode,node2,node3,node4为datanode
将hadoop压缩包上传到node1的/root目录 tar -zxvf hadoop-2.5.1_x64.tar.gz mv hadoop-2.5.1 /home/ cd /home/ ls cd hadoop-2.5.1 ls cd etc/hadoop #配置1,修改hadoopenv.sh vi hadoop-env.sh #将jdk的安装目录添加到这个文件 export JAVA_HOME=/usr/java/jdk1.7.0_79 #配置2,修改core-site.xml vi core-site.xml <configuration> <property> <name>fs.defaultFS</name> <value>hdfs://192.168.2.136:9000</value> </property> <property> <name>hadoop.tmp.dir</name> <value>/opt/hadoop-2.5</value> </property> </configuration> #配置3,修改hdfs-site.xml vi hdfs-site.xml <configuration> <property> <name>dfs.namenode.secondary.http-address</name> <value>192.168.2.137:50090</value> </property> <property> <name>dfs.namenode.secondary.https-address</name> <value>192.168.2.137:50091</value> </property> </configuration> #配置4,修改slaves vi slaves 192.168.2.137 192.168.2.138 192.168.2.139 #配置5,修改masters vi masters 192.168.2.137
5.复制文件
scp -r hadoop-2.5.1/ [email protected]:/home/ scp -r hadoop-2.5.1/ [email protected]:/home/ scp -r hadoop-2.5.1/ [email protected]:/home/
6.配置hadoop环境变量
vi ~/.bash_profile export HADOOP_HOME=/home/hadoop-2.5.1 export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
7.复制文件
scp ~/.bash_profile [email protected]:/root/ scp ~/.bash_profile [email protected]:/root/ scp ~/.bash_profile [email protected]:/root/
8.重新加载~/.bash_profile
source ~/.bash_profile
9.格式化hdf文件系统,只能在NameNode(node1)上使用
hdfs namenode -format
10.查看生成文件fsimage
cd /opt/hadoop-2.5/dfs/name/current ls -l
11.启动节点
start-dfs.sh #start-all.sh
12.关闭防火墙
service iptables stop
13.监控页面进行访问
http://192.168.2.136:50070/dfshealth.html#tab-overview
14.说明:保证/etc/hosts下的配置正确
时间: 2024-10-12 11:19:24