您好,登錄后才能下訂單哦!
#node1,node2,node3 #安裝請參考http://suyanzhu.blog.51cto.com/8050189/1946580
#node1,node4
#node2,node3,node4
#node2,node3,node4 #啟動DataNode節點命令hadoop-daemon.sh start datanode
<configuration> <property> <name>dfs.nameservices</name> <value>yunshuocluster</value> </property> <property> <name>dfs.ha.namenodes.yunshuocluster</name> <value>nn1,nn2</value> </property> <property> <name>dfs.namenode.rpc-address.yunshuocluster.nn1</name> <value>node1:8020</value> </property> <property> <name>dfs.namenode.rpc-address.yunshuocluster.nn2</name> <value>node4:8020</value> </property> <property> <name>dfs.namenode.http-address.yunshuocluster.nn1</name> <value>node1:50070</value> </property> <property> <name>dfs.namenode.http-address.yunshuocluster.nn2</name> <value>node4:50070</value> </property> <property> <name>dfs.namenode.shared.edits.dir</name> <value>qjournal://node2:8485;node3:8485;node4:8485/yunshuocluste r</value> </property> <property> <name>dfs.client.failover.proxy.provider.mycluster</name> <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailo verProxyProvider</value> </property> <property> <name>dfs.ha.fencing.methods</name> <value>sshfence</value> </property> <property> <name>dfs.ha.fencing.ssh.private-key-files</name> <value>/root/.ssh/id_dsa</value> </property> <property> <name>dfs.journalnode.edits.dir</name> <value>/opt/journalnode/</value> </property> <property> <name>dfs.ha.automatic-failover.enabled</name> <value>true</value> </property> </configuration>
<configuration> <property> <name>fs.defaultFS</name> <value>hdfs://yunshuocluster</value> </property> <property> <name>hadoop.tmp.dir</name> <value>/opt/hadoop-2.5</value> </property> <property> <name>ha.zookeeper.quorum</name> <value>node1:2181,node2:2181,node3:2181</value> </property> </configuration>
node2 node3 node4
zkServer.sh start
#啟動命令 停止命令hadoop-daemon.sh stop journalnode hadoop-daemon.sh start journalnode
cd /home/hadoop-2.5.1/logs ls tail -200 hadoop-root-journalnode-node2.log
hdfs namenode -format cd /opt/hadoop-2.5 #兩臺NameNode同步完成 scp -r /opt/hadoop-2.5/* root@node1:/opt/hadoop-2.5/
hdfs zkfc -formatZK
start-dfs.sh #stop-dfs.sh表示停止服務
免責聲明:本站發布的內容(圖片、視頻和文字)以原創、轉載和分享為主,文章觀點不代表本網站立場,如果涉及侵權請聯系站長郵箱:is@yisu.com進行舉報,并提供相關證據,一經查實,將立刻刪除涉嫌侵權內容。