yum install -y docker-1.8.2-10.el7.centos
rpm -e lvm2-7:2.02.105-14.el7.x86_64
service docker start
systemctl stop firewalld systemctl disable firewalld #注意:执行完上面的命令之后需要重启系统 reboot -h(重启系统)
Mkdir centos-ssh-root Cd centos-ssh-root Vi Dockerfile
# 选择一个已有的os镜像作为基础 FROM centos # 镜像的作者 MAINTAINER crxy # 安装openssh-server和sudo软件包,并且将sshd的UsePAM参数设置成no RUN yum install -y openssh-server sudo RUN sed -i 's/UsePAM yes/UsePAM no/g' /etc/ssh/sshd_config #安装openssh-clients RUN yum install -y openssh-clients # 添加测试用户root,密码root,并且将此用户添加到sudoers里 RUN echo "root:root" | chpasswd RUN echo "root ALL=(ALL) ALL" >> /etc/sudoers # 下面这两句比较特殊,在centos6上必须要有,否则创建出来的容器sshd不能登录 RUN ssh-keygen -t dsa -f /etc/ssh/ssh_host_dsa_key RUN ssh-keygen -t rsa -f /etc/ssh/ssh_host_rsa_key # 启动sshd服务并且暴露22端口 RUN mkdir /var/run/sshd EXPOSE 22 CMD ["/usr/sbin/sshd", "-D"]
docker build -t=”crxy/centos-ssh-root” .
Mkdir centos-ssh-root-jdk Cd centos-ssh-root-jdk Cp ../jdk-7u75-Linux-x64.tar.gz . Vi Dockerfile
FROM crxy/centos-ssh-root ADD jdk-7u75-linux-x64.tar.gz /usr/local/ RUN mv /usr/local/jdk1.7.0_75 /usr/local/jdk1.7 ENV JAVA_HOME /usr/local/jdk1.7 ENV PATH $JAVA_HOME/bin:$PATH
docker build -t=”crxy/centos-ssh-root-jdk” .
Mkdir centos-ssh-root-jdk-hadoop Cd centos-ssh-root-jdk-hadoop Cp ../hadoop-2.4.1.tar.gz . Vi Dockerfile
FROM crxy/centos-ssh-root-jdk ADD hadoop-2.4.1.tar.gz /usr/local RUN mv /usr/local/hadoop-2.4.1 /usr/local/hadoop ENV HADOOP_HOME /usr/local/hadoop ENV PATH $HADOOP_HOME/bin:$PATH
docker build -t=”crxy/centos-ssh-root-jdk-hadoop” .
docker run --name hadoop0 --hostname hadoop0 -d -P -p 50070:50070 -p 8088:8088 crxy/centos-ssh-root-jdk-hadoop docker run --name hadoop1 --hostname hadoop1 -d -P crxy/centos-ssh-root-jdk-hadoop docker run --name hadoop2 --hostname hadoop2 -d -P crxy/centos-ssh-root-jdk-hadoop
unzip pipework-master.zip mv pipework-master pipework cp -rp pipework/pipework /usr/local/bin/
yum -y install bridge-utils
brctl addbr br0 ip link set dev br0 up ip addr add 192.168.2.1/24 dev br0
pipework br0 hadoop0 192.168.2.10/24 pipework br0 hadoop1 192.168.2.11/24 pipework br0 hadoop2 192.168.2.12/24
docker exec -it hadoop0 /bin/bash
192.168.2.10 hadoop0 192.168.2.11 hadoop1 192.168.2.12 hadoop2
cd ~ mkdir .ssh cd .ssh ssh-keygen -t rsa(一直按回车即可) ssh-copy-id -i localhost ssh-copy-id -i hadoop0 ssh-copy-id -i hadoop1 ssh-copy-id -i hadoop2 在hadoop1上执行下面操作 cd ~ cd .ssh ssh-keygen -t rsa(一直按回车即可) ssh-copy-id -i localhost ssh-copy-id -i hadoop1 在hadoop2上执行下面操作 cd ~ cd .ssh ssh-keygen -t rsa(一直按回车即可) ssh-copy-id -i localhost ssh-copy-id -i hadoop2
export JAVA_HOME=/usr/local/jdk1.7
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://hadoop0:9000</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/usr/local/hadoop/tmp</value>
</property>
<property>
<name>fs.trash.interval</name>
<value>1440</value>
</property>
</configuration>
<configuration>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
<property>
<name>dfs.permissions</name>
<value>false</value>
</property>
</configuration>
<configuration>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.log-aggregation-enable</name>
<value>true</value>
</property>
</configuration>
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
bin/hdfs namenode -format
yum install -y which
sbin/start-all.sh
[root@hadoop0 hadoop]# jps 3267 SecondaryNameNode 3003 NameNode 3664 Jps 3397 ResourceManager 3090 DataNode 3487 NodeManager
sbin/stop-all.sh
<property> <description>The hostname of the RM.</description> <name>yarn.resourcemanager.hostname</name> <value>hadoop0</value> </property>
hadoop1 hadoop2
scp -rq /usr/local/hadoop hadoop1:/usr/local scp -rq /usr/local/hadoop hadoop2:/usr/local
yum install -y which
sbin/start-all.sh
[root@hadoop0 hadoop]# jps 4643 Jps 4073 NameNode 4216 SecondaryNameNode 4381 ResourceManager
[root@hadoop1 hadoop]# jps 715 NodeManager 849 Jps 645 DataNode
[root@hadoop2 hadoop]# jps 456 NodeManager 589 Jps 388 DataNode
vi a.txt hello you hello me
hdfs dfs -put a.txt /
cd /usr/local/hadoop/share/hadoop/mapreduce hadoop jar hadoop-mapreduce-examples-2.4.1.jar wordcount /a.txt /out
docker stop hadoop0 docker stop hadoop1 docker stop hadoop2
docker start hadoop0 docker start hadoop1 docker start hadoop2
pipework br0 hadoop0 192.168.2.10/24 pipework br0 hadoop1 192.168.2.11/24 pipework br0 hadoop2 192.168.2.12/24
#!/bin/bash echo 192.168.2.10 hadoop0 >> /etc/hosts echo 192.168.2.11 hadoop1 >> /etc/hosts echo 192.168.2.12 hadoop2 >> /etc/hosts
chmod +x runhosts.sh
scp runhosts.sh hadoop1:~ scp runhosts.sh hadoop2:~
./runhosts.sh
sbin/start-all.sh
机械节能产品生产企业官网模板...
大气智能家居家具装修装饰类企业通用网站模板...
礼品公司网站模板
宽屏简约大气婚纱摄影影楼模板...
蓝白WAP手机综合医院类整站源码(独立后台)...苏ICP备2024110244号-2 苏公网安备32050702011978号 增值电信业务经营许可证编号:苏B2-20251499 | Copyright 2018 - 2025 源码网商城 (www.ymwmall.com) 版权所有