安装hadoop
上传安装文件到/opt/software目录并解压
[bigdata@node101 software]$ tar -zxvf hadoop-3.3.5.tar.gz -C /opt/services/
配置环境变量
[bigdata@node101 ~]$ sudo vim /etc/profile.d/bigdata_env.sh
export JAVA_HOME=/opt/services/jdk1.8.0_161
export ZK_HOME=/opt/services/zookeeper-3.5.7
export HADOOP_HOME=/opt/services/hadoop-3.3.5
export PATH=$PATH:$JAVA_HOME/bin:$ZK_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
分发环境变量
[bigdata@node101 bin]$ sudo ./bin/xsync /etc/profile.d/bigdata_env.sh
刷新环境变量,5台机器上执行
[bigdata@node101 ~]$ source /etc/profile
配置core-site.xml文件
<property><name>fs.defaultFS</name><value>hdfs://mycluster</value>
</property>
<property><name>hadoop.tmp.dir</name><value>/opt/services/hadoop-3.3.5/data</value>
</property>
<property><name>hadoop.http.staticuser.user</name><value>bigdata</value>
</property>
<property> <name>fs.trash.interval</name> <value>1440</value>
</property>
<property> <name>fs.trash.checkpoint.interval</name> <value>1440</value>
</property>
<property> <name>ha.zookeeper.quorum</name> <value>node101:2181,node102:2181,node103:2181</value>
</property>
<property><name>hadoop.proxyuser.bigdata.hosts</name><value>*</value>
</property>
<property><name>hadoop.proxyuser.bigdata.groups</name><value>*</value>
</property>
<property><name>hadoop.proxyuser.bigdata.users</name><value>*</value>
</property>
配置hdfs-site.xml文件
<property><name>dfs.namenode.data.dir</name><value>file://${hadoop.tmp.dir}/name</value>
</property>
<property><name>dfs.datanode.data.dir</name><value>file://${hadoop.tmp.dir}/data1,file://${hadoop.tmp.dir}/data2</value>
</property>
<property><name>dfs.journalnode.edits.dir</name><value>${hadoop.tmp.dir}/journal/</value>
</property>
<property><name>dfs.nameservices</name><value>mycluster</value>
</property>
<property><name>dfs.ha.namenodes.mycluster</name><value>nn1,nn2</value>
</property>
<property><name>dfs.namenode.rpc-address.mycluster.nn1</name><value>node101:8020</value>
</property>
<property><name>dfs.namenode.rpc-address.mycluster.nn2</name><value>node102:8020</value>
</property>
<property><name>dfs.namenode.http-address.mycluster.nn1</name><value>node101:9870</value>
</property>
<property><name>dfs.namenode.http-address.mycluster.nn2</name><value>node102:9870</value>
</property>
<property><name>dfs.namenode.shared.edits.dir</name><va