hadoop节点上下线
docker run -d --name hd1 -p 8888:8888 -p 2222:22 centos:basic init
docker run -d --name hd2 -p 8889:8889 centos:basic init
docker run -d --name hd3 centos:basic init
# hosts
echo "172.17.0.2 hadoop1
172.17.0.3 hadoop2
172.17.0.4 hadoop3">>/etc/hosts# 免密
ssh-keygen -t rsa -N '' -f /root/.ssh/id_rsa -q
ssh-copy-id -i /root/.ssh/id_rsa.pub root@hadoop1
ssh-copy-id -i /root/.ssh/id_rsa.pub root@hadoop2
ssh-copy-id -i /root/.ssh/id_rsa.pub root@hadoop3# 部署目录
mkdir /data# jdk
wget https://repo.huaweicloud.com/java/jdk/8u202-b08/jdk-8u202-linux-x64.tar.gz
tar xf jdk-8u202-linux-x64.tar.gz
mv jdk1.8.0_202/ /data/java
echo "export JAVA_HOME=/data/java
export PATH=\$PATH:\$JAVA_HOME/bin" >>/etc/profile.d/hadoop_env.sh# hadoop
wget https://mirrors.tuna.tsinghua.edu.cn/apache/hadoop/common/hadoop-3.2.4/hadoop-3.2.4.tar.gz
tar xf hadoop-3.2.4.tar.gz
echo "export HADOOP_HOME=/data/hadoop
export PATH=\$PATH:\$HADOOP_HOME/bin:\$HADOOP_HOME/sbin">>/etc/profile.d/hadoop_env.sh
配置更改
1)配置 core-site.xml
cat /data/hadoop/etc/hadoop/core-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration><!-- 指定NameNode的地址 --><property><name>fs.defaultFS</name><value>hdfs://hadoop1:9000</value></property><!-- 指定Hadoop数据的存储目录 --><property><name>hadoop.tmp.dir</name><!-- /tmp/hadoop-${user.name} --><value>/data/hadoop/data</value><description>A base for other temporary directories.</description></property><!-- 配置HDFS网页登录使用的静态用户为hadoopuser --><property><name>hadoop.http.staticuser.user</name><value>hadoopuser</value></property><!-- 配置该hadoopuser(superuser)允许通过代理访问的主机节点 --><property><name>hadoop.proxyuser.hadoopuser.hosts</name><value>*</value></property><!--配置该hadoopuser(superuser)允许通过代理用户所属组--><property><name>hadoop.proxyuser.hadoopuser.groups</name><value>*</value></property><!--配置该hadoopuser(superuser)允许通过代理的用户 --><property><name>hadoop.proxyuser.hadoopuser.users</name><value>*</value></property><!--下线功能 --><property><name>dfs.hosts.exclude</name><value>/data/etc/hadoop/excludes</value></property>
</configuration>
2)配置 hdfs-site.xml
vi /data/hadoop/etc/hadoop/hdfs-site.xml
cat /data/hadoop/etc/hadoop/hdfs-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration><!-- nn web端访问地址--><property><name>dfs.namenode.http-address</name><value>hadoop1:8888</value></property><!-- 2nn web端访问地址--><property><name>dfs.namenode.secondary.http-address</name><value>hadoop2:8889</value></property><!--测试环境指定HDFS副本的数量2 --><property><name>dfs.replication</name><value>2</value></property><property><!--节点下线功能 --><name>dfs.hosts.exclude</name><value>/data/hadoop/etc/hadoop/excludes</value></property>
</configuration>
3)配置 mapred-site.xml
vi /data/hadoop/etc/hadoop/mapred-site.xml
cat /data/hadoop/etc/hadoop/mapred-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration><property><name>mapreduce.framework.name</name><value>yarn</value></property><property><!--节点下线功能 --><name>mapred.hosts.exclude</name><value>/data/hadoop/etc/hadoop/excludes</value></property>
</configuration>
4)配置 yarn-site.xml
vi /data/hadoop/etc/hadoop/yarn-site.xml
cat /data/hadoop/etc/hadoop/yarn-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration><!-- 设置ResourceManager的主机名 --><property><name>yarn.resourcemanager.hostname</name><value>hadoop1</value></property><!-- 设置NodeManager的辅助服务,通常为mapreduce_shuffle以支持MapReduce作业 --><property><name>yarn.nodemanager.aux-services</name><value>mapreduce_shuffle</value></property><!-- 设置每个NodeManager可用的内存量(以MB为单位) --><property><name>yarn.nodemanager.resource.memory-mb</name><value>8192</value></property><!--分别设置容器请求的最小和最大内存限制--><property><name>yarn.scheduler.minimum-allocation-mb</name><value>1024</value></property><property><name>yarn.scheduler.maximum-allocation-mb</name><value>8192</value></property><!--分别设置容器请求的最小和最大虚拟CPU核心数--><property><name>yarn.scheduler.minimum-allocation-vcores</name><value>1</value></property><property><name>yarn.scheduler.maximum-allocation-vcores</name><value>4</value></property>
</configuration>
4)修改 workers
vi /data/hadoop/etc/hadoop/workers
[root@d9bef3a9e577 hadoop]# cat /data/hadoop/etc/hadoop/workers
hadoop1
hadoop2
hadoop3
5)修改Hadoop默认启动、关闭脚本,添加root执行权限
cd /data/hadoop/sbin/
for i in `ls start*.sh stop*.sh`;do sed -i "1a\HDFS_DATANODE_USER=root\nHDFS_DATANODE_SECURE_USER=root\nHDFS_NAMENODE_USER=root\nHDFS_SECONDARYNAMENODE_USER=root\nYARN_RESOURCEMANAGER_USER=root\n\YARN_NODEMANAGER_USER=root" $i ;done
# 同步数据,验证
rsync -a /data/ hadoop2:/data
rsync -a /data/ hadoop3:/data
ssh hadoop2 "source /etc/profile.d/hadoop_env.sh;java -version"
ssh hadoop3 "source /etc/profile.d/hadoop_env.sh;java -version"
4、启动hadoop
在启动hadoop之前,我们需要做一步非常关键的步骤,需要在Namenode上执行初始化命令,初始化name目录和数据目录。
#初始化集群;
/data/hadoop/bin/hdfs namenode -format#启动所有服务;
/data/hadoop/sbin/start-all.sh# 查看
[root@hadoop1 hadoop]# jps
8370 NodeManager
7636 DataNode
8042 ResourceManager
8571 Jps
7469 NameNode[root@hadoop2 hadoop]# jps
2354 SecondaryNameNode
2434 NodeManager
2243 DataNode
2559 Jps[root@hadoop3 hadoop]# jps
2370 Jps
2141 DataNode
2254 NodeManager访问 http://172.17.0.2:8888 http://172.17.0.3:8889
#查看服务进程;
ps -ef|grep -aiE hadoop
#查看服务监听端口;
netstat -ntpl
#执行JPS命令查看JAVA进程;
jps
#查看Hadoop日志内容;
tail -fn 100 /data/hadoop/logs/*.log#停止所有服务;
/data/hadoop/sbin/stop-all.sh
#kill方式停止服务;
ps -ef|grep hadoop|grep java |grep -v grep |awk '{print $2}'|xargs kill -9
sleep 2
添加节点
# namenode
[root@hadoop1 ~]# echo "172.17.0.5 hadoop4">>/etc/hosts # 所有节点,新节点配置所有hosts
[root@hadoop1 ~]# ssh-copy-id -i /root/.ssh/id_rsa.pub root@hadoop4
[root@hadoop1 ~]# scp /etc/profile.d/hadoop_env.sh root@hadoop4:/etc/profile.d/[root@hadoop1 ~]# ssh hadoop4 "mkdir /data;source /etc/profile.d/hadoop_env.sh;yum -y install rsync"[root@hadoop1 ~]# vi /data/hadoop/etc/hadoop/workers
hadoop4 #新增# 数据同步
[root@hadoop1 ~]# rsync -a /data root@hadoop4:/ --exclude=/data/hadoop/{data,logs} --bwlimit=5000# hadoop4
[root@hadoop4 ~]# hdfs --daemon start datanode
[root@hadoop4 ~]# yarn --daemon start nodemanager# namenode
[root@hadoop1 ~]# hdfs dfsadmin -refreshNodes
[root@hadoop1 ~]# hdfs dfsadmin -report | grep Live
Live datanodes (4):[root@hadoop1 ~]# hdfs dfsadmin -setBalancerBandwidth 10485760 # 10M
[root@hadoop1 ~]# hdfs balancer -threshold 1 # 表示以阈值1%运行(默认值10%) 参数设置的越小,整个集群就越平衡
在到 web 管理页面查看:
下线节点
[root@hadoop1 ~]# vi etc/hadoop/excludes
hadoop4
[root@hadoop1 ~]# hdfs dfsadmin -refreshNodes # 刷新驱逐
Refresh nodes successful
[root@hadoop1 ~]# hdfs dfsadmin -report | grep -i status -B 2
--
Name: 172.17.0.5:9866 (hadoop4)
Hostname: 19287e3cce1b
Decommission Status : Decommissioned # 当节点处于Decommissioned,表示关闭成功[root@hadoop4 ~]# hdfs --daemon stop datanode #停止服务