CentOS-6.6 hadoop-2.7.1 java-1.7.0_75
一、基础环境部署及检查
[root@hadoop-node1 ~]# uname -r
2.6.32-504.12.2.el6.x86_64
[root@hadoop-node1 ~]# cat /etc/redhat-release
CentOS release 6.6 (Final)
[root@hadoop-node1 ~]# hostname
hadoop-node1.csoftintl.com
[root@hadoop-node1 ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether fa:16:3e:2c:86:da brd ff:ff:ff:ff:ff:ff
inet 192.168.7.39/24 brd 192.168.7.255 scope global eth0
inet6 fe80::f816:3eff:fe2c:86da/64 scope link
valid_lft forever preferred_lft forever
3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether fa:16:3e:94:14:f6 brd ff:ff:ff:ff:ff:ff
inet 172.16.1.39/24 brd 172.16.1.255 scope global eth1
inet6 fe80::f816:3eff:fe94:14f6/64 scope link
valid_lft forever preferred_lft
[root@hadoop-node1 ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
172.16.1.39 hadoop-node1.csoftintl.com
[root@hadoop-node1 tools]# ll
total 270260
-rw-r--r-- 1 root root 138656756 Jul 18 2014 hadoop-2.7.1.tar.gz
-rw-r--r-- 1 root root 138082565 Jul 10 16:06 jdk-7u79-linux-x64.rpm
必要的软件和安装包
[root@hadoop-node1 ~]# rpm -qa bzip2-devel snappy-devel openssl-devel zlib-devel cmake
zlib-devel-1.2.3-29.el6.x86_64
snappy-devel-1.1.0-1.el6.x86_64
cmake-2.8.12.2-4.el6.x86_64
bzip2-devel-1.0.5-7.el6_0.x86_64
openssl-devel-1.0.1e-30.el6.11.x86_64
[root@hadoop-node1 ~]# ll apache-maven-3.3.3-bin.tar.gz protobuf-2.5.0.tar.gz
-rw-r--r-- 1 root root 8042383 Apr 28 23:12 apache-maven-3.3.3-bin.tar.gz
-rw-r--r-- 1 root root 2401901 Jul 15 13:57 protobuf-2.5.0.tar.gz
二、安装部署
1、安装jdk
[root@hadoop-node1 tools]# rpm -ivh jdk-7u79-linux-x64.rpm
Preparing... ########################################### [100%]
1:jdk ########################################### [100%]
Unpacking JAR files...
rt.jar...
jsse.jar...
charsets.jar...
tools.jar...
localedata.jar...
jfxrt.jar...
[root@hadoop-node1 ~]# ln -s /usr/java/jdk1.7.0_79/ /usr/java/jdk
[root@hadoop-node1 ~]# ll /usr/java/jdk
lrwxrwxrwx 1 root root 22 Jul 10 16:25 /usr/java/jdk -> /usr/java/jdk1.7.0_79/
[root@hadoop-node1 tools]# vim /etc/profile
[root@hadoop-node1 tools]# tail -3 /etc/profile
export JAVA_HOME=/usr/java/jdk
export PATH=$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$PATH
export CLASSPATH=$CLASSPATH:$JAVA_HOME/lib:$JAVA_HOME/jre/lib:$JAVA_HOME/lib/tools.jar
[root@hadoop-node1 tools]# source /etc/profile
[root@hadoop-node1 tools]# java -version
java version "1.7.0_79"
Java(TM) SE Runtime Environment (build 1.7.0_79-b15)
Java HotSpot(TM) 64-Bit Server VM (build 24.79-b02, mixed mode)
二、部署hadoop
1、创建用户,编译hadoop
[root@hadoop-node1 tools]# groupadd hadoop
[root@hadoop-node1 tools]# useradd hadoop -g hadoop
[hadoop@hadoop-node1 tools]$ cd hadoop-2.7.1-src
[hadoop@hadoop-node1 hadoop-2.7.1-src]$ nohup mvn clean package -Pdist,native -DskipTests -Dtar
nohup: ignoring input and appending output to `nohup.out\'
2、拷贝编译好的文件到安装目录并解压,调整环境变量并测试
[hadoop@hadoop-node1 hadoop-2.7.1-src]$ tail -74 nohup.out
[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO]
[INFO] Apache Hadoop Main ................................. SUCCESS [ 2.608 s]
[INFO] Apache Hadoop Project POM .......................... SUCCESS [ 1.884 s]
[INFO] Apache Hadoop Annotations .......................... SUCCESS [ 6.388 s]
[INFO] Apache Hadoop Assemblies ........................... SUCCESS [ 0.253 s]
[INFO] Apache Hadoop Project Dist POM ..................... SUCCESS [ 2.066 s]
[INFO] Apache Hadoop Maven Plugins ........................ SUCCESS [ 4.423 s]
[INFO] Apache Hadoop MiniKDC .............................. SUCCESS [ 4.157 s]
[INFO] Apache Hadoop Auth ................................. SUCCESS [ 6.289 s]
[INFO] Apache Hadoop Auth Examples ........................ SUCCESS [ 4.995 s]
[INFO] Apache Hadoop Common ............................... SUCCESS [02:13 min]
[INFO] Apache Hadoop NFS .................................. SUCCESS [ 18.014 s]
[INFO] Apache Hadoop KMS .................................. SUCCESS [ 14.853 s]
[INFO] Apache Hadoop Common Project ....................... SUCCESS [ 1.199 s]
[INFO] Apache Hadoop HDFS ................................. SUCCESS [03:56 min]
[INFO] Apache Hadoop HttpFS ............................... SUCCESS [ 41.761 s]
[INFO] Apache Hadoop HDFS BookKeeper Journal .............. SUCCESS [ 7.424 s]
[INFO] Apache Hadoop HDFS-NFS ............................. SUCCESS [ 4.790 s]
[INFO] Apache Hadoop HDFS Project ......................... SUCCESS [ 0.071 s]
[INFO] hadoop-yarn ........................................ SUCCESS [ 0.062 s]
[INFO] hadoop-yarn-api .................................... SUCCESS [ 46.512 s]
[INFO] hadoop-yarn-common ................................. SUCCESS [ 40.779 s]
[INFO] hadoop-yarn-server ................................. SUCCESS [ 0.051 s]
[INFO] hadoop-yarn-server-common .......................... SUCCESS [ 12.650 s]
[INFO] hadoop-yarn-server-nodemanager ..................... SUCCESS [ 22.826 s]
[INFO] hadoop-yarn-server-web-proxy ....................... SUCCESS [ 4.278 s]
[INFO] hadoop-yarn-server-applicationhistoryservice ....... SUCCESS [ 8.284 s]
[INFO] hadoop-yarn-server-resourcemanager ................. SUCCESS [ 24.365 s]
[INFO] hadoop-yarn-server-tests ........................... SUCCESS [ 6.071 s]
[INFO] hadoop-yarn-client ................................. SUCCESS [ 7.911 s]
[INFO] hadoop-yarn-server-sharedcachemanager .............. SUCCESS [ 4.327 s]
[INFO] hadoop-yarn-applications ........................... SUCCESS [ 0.043 s]
[INFO] hadoop-yarn-applications-distributedshell .......... SUCCESS [ 3.613 s]
[INFO] hadoop-yarn-applications-unmanaged-am-launcher ..... SUCCESS [ 2.570 s]
[INFO] hadoop-yarn-site ................................... SUCCESS [ 0.053 s]
[INFO] hadoop-yarn-registry ............................... SUCCESS [ 6.645 s]
[INFO] hadoop-yarn-project ................................ SUCCESS [ 9.353 s]
[INFO] hadoop-mapreduce-client ............................ SUCCESS [ 0.062 s]
[INFO] hadoop-mapreduce-client-core ....................... SUCCESS [ 23.674 s]
[INFO] hadoop-mapreduce-client-common ..................... SUCCESS [ 21.104 s]
[INFO] hadoop-mapreduce-client-shuffle .................... SUCCESS [ 4.265 s]
[INFO] hadoop-mapreduce-client-app ........................ SUCCESS [ 10.053 s]
[INFO] hadoop-mapreduce-client-hs ......................... SUCCESS [ 6.908 s]
[INFO] hadoop-mapreduce-client-jobclient .................. SUCCESS [ 11.029 s]
[INFO] hadoop-mapreduce-client-hs-plugins ................. SUCCESS [ 2.541 s]
[INFO] Apache Hadoop MapReduce Examples ................... SUCCESS [ 7.050 s]
[INFO] hadoop-mapreduce ................................... SUCCESS [ 3.871 s]
[INFO] Apache Hadoop MapReduce Streaming .................. SUCCESS [ 7.371 s]
[INFO] Apache Hadoop Distributed Copy ..................... SUCCESS [ 13.228 s]
[INFO] Apache Hadoop Archives ............................. SUCCESS [ 3.766 s]
[INFO] Apache Hadoop Rumen ................................ SUCCESS [ 7.782 s]
[INFO] Apache Hadoop Gridmix .............................. SUCCESS [ 6.200 s]
[INFO] Apache Hadoop Data Join ............................ SUCCESS [ 3.685 s]
[INFO] Apache Hadoop Ant Tasks ............................ SUCCESS [ 2.809 s]
[INFO] Apache Hadoop Extras ............................... SUCCESS [ 4.489 s]
[INFO] Apache Hadoop Pipes ................................ SUCCESS [ 16.950 s]
[INFO] Apache Hadoop OpenStack support .................... SUCCESS [ 5.690 s]
[INFO] Apache Hadoop Amazon Web Services support .......... SUCCESS [ 4.586 s]
[INFO] Apache Hadoop Azure support ........................ SUCCESS [ 5.308 s]
[INFO] Apache Hadoop Client ............................... SUCCESS [ 10.547 s]
[INFO] Apache Hadoop Mini-Cluster ......................... SUCCESS [ 0.074 s]
[INFO] Apache Hadoop Scheduler Load Simulator ............. SUCCESS [ 4.904 s]
[INFO] Apache Hadoop Tools Dist ........................... SUCCESS [ 25.184 s]
[INFO] Apache Hadoop Tools ................................ SUCCESS [ 0.049 s]
[INFO] Apache Hadoop Distribution ......................... SUCCESS [01:36 min]
[INFO] ------------------------------------------------------------------------
[INFO] BUILD SUCCESS
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 16:46 min
[INFO] Finished at: 2015-07-15T15:56:43+08:00
[INFO] Final Memory: 123M/407M
[INFO] ------------------------------------------------------------------------
[hadoop@hadoop-node1 hadoop-2.7.1-src]$ cp hadoop-dist/target/hadoop-2.7.1.tar.gz /home/hadoop/application/
[hadoop@hadoop-node1 hadoop-2.7.1-src]$ cd /home/hadoop/application/
[hadoop@hadoop-node1 application]$ tar xf hadoop-2.7.1.tar.gz
[hadoop@hadoop-node1 application]$ ll
total 190292
lrwxrwxrwx 1 hadoop hadoop 12 Jul 15 16:03 hadoop -> hadoop-2.7.1
drwxrwxr-x 9 hadoop hadoop 4096 Jul 15 15:55 hadoop-2.7.1
-rw-rw-r-- 1 hadoop hadoop 194854446 Jul 15 16:02 hadoop-2.7.1.tar.gz
[root@hadoop-node1 ~]# vim /etc/profile
export HADOOP_HOME=/home/hadoop/application/hadoop
export PATH=$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:/application/maven/bin
[hadoop@hadoop-node1 application]$ source /etc/profile
[hadoop@hadoop-node1 application]$ hadoop version
Hadoop 2.7.1
Subversion Unknown -r Unknown
Compiled by hadoop on 2015-07-15T07:40Z
Compiled with protoc 2.5.0
From source with checksum fc0a1a23fc1868e4d5ee7fa2b28a58a
This command was run using /home/hadoop/application/hadoop-2.7.1/share/hadoop/common/hadoop-common-2.7.1.jar
[hadoop@hadoop-node1 application]$ hadoop checknative -a
15/07/15 16:05:33 INFO bzip2.Bzip2Factory: Successfully loaded & initialized native-bzip2 library system-native
15/07/15 16:05:33 INFO zlib.ZlibFactory: Successfully loaded & initialized native-zlib library
Native library checking:
hadoop: true /home/hadoop/application/hadoop-2.7.1/lib/native/libhadoop.so.1.0.0
zlib: true /lib64/libz.so.1
snappy: true /usr/lib64/libsnappy.so.1
lz4: true revision:99
bzip2: true /lib64/libbz2.so.1
openssl: true /usr/lib64/libcrypto.so
3、配置文件,所有节点配置文件一样
[hadoop@hadoop-node1 hadoop-2.7.1]$ cd etc/hadoop/
[hadoop@hadoop-node1 hadoop]$ ll
total 124
-rw-r--r-- 1 hadoop hadoop 3589 Jun 21 2014 capacity-scheduler.xml
-rw-r--r-- 1 hadoop hadoop 1335 Jun 21 2014 configuration.xsl
-rw-r--r-- 1 hadoop hadoop 318 Jun 21 2014 container-executor.cfg
-rw-r--r-- 1 hadoop hadoop 774 Jun 21 2014 core-site.xml
-rw-r--r-- 1 hadoop hadoop 3589 Jun 21 2014 hadoop-env.cmd
-rw-r--r-- 1 hadoop hadoop 3494 Jun 21 2014 hadoop-env.sh
-rw-r--r-- 1 hadoop hadoop 2490 Jun 21 2014 hadoop-metrics.properties
-rw-r--r-- 1 hadoop hadoop 1774 Jun 21 2014 hadoop-metrics2.properties
-rw-r--r-- 1 hadoop hadoop 9257 Jun 21 2014 hadoop-policy.xml
-rw-r--r-- 1 hadoop hadoop 775 Jun 21 2014 hdfs-site.xml
-rw-r--r-- 1 hadoop hadoop 1449 Jun 21 2014 httpfs-env.sh
-rw-r--r-- 1 hadoop hadoop 1657 Jun 21 2014 httpfs-log4j.properties
-rw-r--r-- 1 hadoop hadoop 21 Jun 21 2014 httpfs-signature.secret
-rw-r--r-- 1 hadoop hadoop 620 Jun 21 2014 httpfs-site.xml
-rw-r--r-- 1 hadoop hadoop 11169 Jun 21 2014 log4j.properties
-rw-r--r-- 1 hadoop hadoop 918 Jun 21 2014 mapred-env.cmd
-rw-r--r-- 1 hadoop hadoop 1383 Jun 21 2014 mapred-env.sh
-rw-r--r-- 1 hadoop hadoop 4113 Jun 21 2014 mapred-queues.xml.template
-rw-r--r-- 1 hadoop hadoop 758 Jun 21 2014 mapred-site.xml.template
-rw-r--r-- 1 hadoop hadoop 10 Jun 21 2014 slaves
-rw-r--r-- 1 hadoop hadoop 2316 Jun 21 2014 ssl-client.xml.example
-rw-r--r-- 1 hadoop hadoop 2268 Jun 21 2014 ssl-server.xml.example
-rw-r--r-- 1 hadoop hadoop 2178 Jun 21 2014 yarn-env.cmd
-rw-r--r-- 1 hadoop hadoop 4564 Jun 21 2014 yarn-env.sh
-rw-r--r-- 1 hadoop hadoop 690 Jun 21 2014 yarn-site.xml
1)[hadoop@hadoop-node1 hadoop]$ vim hadoop-env.sh
27行:
# The java implementation to use.
#export JAVA_HOME=${JAVA_HOME}
export JAVA_HOME="/usr/java/jdk"
2) [hadoop@hadoop-node1 hadoop]$ vim core-site.xml
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://hadoop-node1.csoftintl.com:9000</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/hadoop/hdfs</value>
</property>
</configuration>
3) [hadoop@hadoop-node1 hadoop]$ vim hdfs-site.xml
<configuration>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/hadoop/dfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/hadoop/dfs/data</value>
</property>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>hadoop-node1.csoftintl.com:9001</value>
</property>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
</configuration>
4)配置mapreduce
[hadoop@hadoop-node1 hadoop]$ cp -a mapred-site.xml.template mapred-site.xml
[hadoop@hadoop-node1 hadoop]$ vim mapred-site.xml
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
5)[hadoop@hadoop-node1 hadoop]$ vim yarn-site.xml
<configuration>
<property>
<name>yarn.resourcemanager.hostname</name>
<value>hadoop-node1.csoftintl.com</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
</configuration>
6)[hadoop@hadoop-node1 ~]$ vim app/hadoop-2.7.1/etc/hadoop/slaves
这里是所有datanode的节点,要提前做好规划,一般跑namenode的节点,不跑datanode
hadoop-node2.csoftintl.com
7)最后配置一下HADOOP_HOME环境变量
[root@hadoop-node1 ~]# vim /etc/profile
export HADOOP_HOME=/hadoop/hadoop
export PATH=$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
[root@hadoop-node1 ~]# source /etc/profile
8)查看一下Hadoop的版本,确认配置
[hadoop@hadoop-node1 ~]$ hadoop version
Hadoop 2.7.1
Subversion https://git-wip-us.apache.org/repos/asf/hadoop.git -r 15ecc87ccf4a0228f35af08fc56de536e6ce657a
Compiled by jenkins on 2015-06-29T06:04Z
Compiled with protoc 2.5.0
From source with checksum fc0a1a23fc1868e4d5ee7fa2b28a58a
This command was run using /home/hadoop/app/hadoop-2.7.1/share/hadoop/common/hadoop-common-2.7.1.jar
4、格式化HDFS(在namenode的主机工作目录下)
[hadoop@hadoop-node1 ~]$ hdfs namenode -format
15/07/13 16:01:45 INFO namenode.NameNode: STARTUP_MSG:
/************************************************************
STARTUP_MSG: Starting NameNode
STARTUP_MSG: host = hadoop-node1.csoftintl.com/172.16.1.39
STARTUP_MSG: args = [-format]
STARTUP_MSG: version = 2.7.1
STARTUP_MSG: classpath = /home/hadoop/app/hadoop-2.7.1/etc/hadoop:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/servlet-api-2.5.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-collections-3.2.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-math3-3.1.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-compress-1.4.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-beanutils-1.7.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/hadoop-auth-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/java-xmlbuilder-0.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/mockito-all-1.8.5.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/httpclient-4.2.5.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/gson-2.2.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/stax-api-1.0-2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-lang-2.6.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/hadoop-annotations-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jsp-api-2.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/slf4j-log4j12-1.7.10.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-httpclient-3.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/zookeeper-3.4.6.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/httpcore-4.2.5.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/api-util-1.0.0-M20.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jaxb-api-2.2.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/asm-3.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jetty-util-6.1.26.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/api-asn1-api-1.0.0-M20.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/snappy-java-1.0.4.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/activation-1.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jetty-6.1.26.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jackson-jaxrs-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/log4j-1.2.17.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-configuration-1.6.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jackson-mapper-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/apacheds-kerberos-codec-2.0.0-M15.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/netty-3.6.2.Final.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/hamcrest-core-1.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-io-2.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-logging-1.1.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-beanutils-core-1.8.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jsr305-3.0.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/guava-11.0.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jaxb-impl-2.2.3-1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jsch-0.1.42.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/protobuf-java-2.5.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-cli-1.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/xmlenc-0.52.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-codec-1.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-net-3.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/curator-recipes-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jackson-xc-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-digester-1.8.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/xz-1.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/avro-1.7.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jersey-json-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jackson-core-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jersey-server-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/paranamer-2.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jets3t-0.9.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/curator-client-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/curator-framework-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/htrace-core-3.1.0-incubating.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/slf4j-api-1.7.10.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/junit-4.11.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jersey-core-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jettison-1.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/apacheds-i18n-2.0.0-M15.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/hadoop-common-2.7.1-tests.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/hadoop-common-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/hadoop-nfs-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/servlet-api-2.5.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/commons-daemon-1.0.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/commons-lang-2.6.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/xercesImpl-2.9.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/asm-3.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jetty-util-6.1.26.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jetty-6.1.26.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/xml-apis-1.3.04.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/log4j-1.2.17.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jackson-mapper-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/netty-3.6.2.Final.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/commons-io-2.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/commons-logging-1.1.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jsr305-3.0.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/guava-11.0.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/netty-all-4.0.23.Final.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/protobuf-java-2.5.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/commons-cli-1.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/xmlenc-0.52.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/commons-codec-1.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jackson-core-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/leveldbjni-all-1.8.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jersey-server-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/htrace-core-3.1.0-incubating.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jersey-core-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/hadoop-hdfs-2.7.1-tests.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/hadoop-hdfs-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/hadoop-hdfs-nfs-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/servlet-api-2.5.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/aopalliance-1.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-collections-3.2.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-compress-1.4.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jersey-guice-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/stax-api-1.0-2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-lang-2.6.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/guice-3.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/zookeeper-3.4.6-tests.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/zookeeper-3.4.6.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jaxb-api-2.2.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/asm-3.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jetty-util-6.1.26.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/activation-1.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jetty-6.1.26.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jackson-jaxrs-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/log4j-1.2.17.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jersey-client-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jackson-mapper-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/netty-3.6.2.Final.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-io-2.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-logging-1.1.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jsr305-3.0.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/guava-11.0.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jaxb-impl-2.2.3-1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/javax.inject-1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/protobuf-java-2.5.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/guice-servlet-3.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-cli-1.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-codec-1.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jackson-xc-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/xz-1.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jersey-json-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jackson-core-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/leveldbjni-all-1.8.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jersey-server-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jersey-core-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jettison-1.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-tests-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-api-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-applications-unmanaged-am-launcher-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-resourcemanager-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-web-proxy-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-applicationhistoryservice-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-registry-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-sharedcachemanager-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-applications-distributedshell-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-common-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-client-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-common-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-nodemanager-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/aopalliance-1.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/commons-compress-1.4.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/jersey-guice-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/guice-3.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/hadoop-annotations-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/asm-3.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/snappy-java-1.0.4.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/log4j-1.2.17.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/jackson-mapper-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/netty-3.6.2.Final.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/hamcrest-core-1.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/commons-io-2.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/javax.inject-1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/protobuf-java-2.5.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/guice-servlet-3.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/xz-1.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/avro-1.7.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/jackson-core-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/leveldbjni-all-1.8.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/jersey-server-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/paranamer-2.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/junit-4.11.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/jersey-core-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-hs-plugins-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-shuffle-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-hs-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-common-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-app-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.7.1-tests.jar:/home/hadoop/app/hadoop-2.7.1/contrib/capacity-scheduler/*.jar
STARTUP_MSG: build = https://git-wip-us.apache.org/repos/asf/hadoop.git -r 15ecc87ccf4a0228f35af08fc56de536e6ce657a; compiled by \'jenkins\' on 2015-06-29T06:04Z
STARTUP_MSG: java = 1.7.0_79
************************************************************/
15/07/13 16:01:45 INFO namenode.NameNode: registered UNIX signal handlers for [TERM, HUP, INT]
15/07/13 16:01:45 INFO namenode.NameNode: createNameNode [-format]
15/07/13 16:01:46 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
Formatting using clusterid: CID-baec9f0b-46b2-405e-815e-2603ae5493be
15/07/13 16:01:47 INFO namenode.FSNamesystem: No KeyProvider found.
15/07/13 16:01:47 INFO namenode.FSNamesystem: fsLock is fair:true
15/07/13 16:01:47 INFO blockmanagement.DatanodeManager: dfs.block.invalidate.limit=1000
15/07/13 16:01:47 INFO blockmanagement.DatanodeManager: dfs.namenode.datanode.registration.ip-hostname-check=true
15/07/13 16:01:47 INFO blockmanagement.BlockManager: dfs.namenode.startup.delay.block.deletion.sec is set to 000:00:00:00.000
15/07/13 16:01:47 INFO blockmanagement.BlockManager: The block deletion will start around 2015 Jul 13 16:01:47
15/07/13 16:01:47 INFO util.GSet: Computing capacity for map BlocksMap
15/07/13 16:01:47 INFO util.GSet: VM type = 64-bit
15/07/13 16:01:47 INFO util.GSet: 2.0% max memory 910.5 MB = 18.2 MB
15/07/13 16:01:47 INFO util.GSet: capacity = 2^21 = 2097152 entries
15/07/13 16:01:47 INFO blockmanagement.BlockManager: dfs.block.access.token.enable=false
15/07/13 16:01:47 INFO blockmanagement.BlockManager: defaultReplication = 1
15/07/13 16:01:47 INFO blockmanagement.BlockManager: maxReplication = 512
15/07/13 16:01:47 INFO blockmanagement.BlockManager: minReplication = 1
15/07/13 16:01:47 INFO blockmanagement.BlockManager: maxReplicationStreams = 2
15/07/13 16:01:47 INFO blockmanagement.BlockManager: shouldCheckForEnoughRacks = false
15/07/13 16:01:47 INFO blockmanagement.BlockManager: replicationRecheckInterval = 3000
15/07/13 16:01:47 INFO blockmanagement.BlockManager: encryptDataTransfer = false
15/07/13 16:01:47 INFO blockmanagement.BlockManager: maxNumBlocksToLog = 1000
15/07/13 16:01:47 INFO namenode.FSNamesystem: fsOwner = hadoop (auth:SIMPLE)
15/07/13 16:01:47 INFO namenode.FSNamesystem: supergroup = supergroup
15/07/13 16:01:47 INFO namenode.FSNamesystem: isPermissionEnabled = true
15/07/13 16:01:47 INFO namenode.FSNamesystem: HA Enabled: false
15/07/13 16:01:47 INFO namenode.FSNamesystem: Append Enabled: true
15/07/13 16:01:48 INFO util.GSet: Computing capacity for map INodeMap
15/07/13 16:01:48 INFO util.GSet: VM type = 64-bit
15/07/13 16:01:48 INFO util.GSet: 1.0% max memory 910.5 MB = 9.1 MB
15/07/13 16:01:48 INFO util.GSet: capacity = 2^20 = 1048576 entries
15/07/13 16:01:48 INFO namenode.FSDirectory: ACLs enabled? false
15/07/13 16:01:48 INFO namenode.FSDirectory: XAttrs enabled? true
15/07/13 16:01:48 INFO namenode.FSDirectory: Maximum size of an xattr: 16384
15/07/13 16:01:48 INFO namenode.NameNode: Caching file names occuring more than 10 times
15/07/13 16:01:48 INFO util.GSet: Computing capacity for map cachedBlocks
15/07/13 16:01:48 INFO util.GSet: VM type = 64-bit
15/07/13 16:01:48 INFO util.GSet: 0.25% max memory 910.5 MB = 2.3 MB
15/07/13 16:01:48 INFO util.GSet: capacity = 2^18 = 262144 entries
15/07/13 16:01:48 INFO namenode.FSNamesystem: dfs.namenode.safemode.threshold-pct = 0.9990000128746033
15/07/13 16:01:48 INFO namenode.FSNamesystem: dfs.namenode.safemode.min.datanodes = 0
15/07/13 16:01:48 INFO namenode.FSNamesystem: dfs.namenode.safemode.extension = 30000
15/07/13 16:01:48 INFO metrics.TopMetrics: NNTop conf: dfs.namenode.top.window.num.buckets = 10
15/07/13 16:01:48 INFO metrics.TopMetrics: NNTop conf: dfs.namenode.top.num.users = 10
15/07/13 16:01:48 INFO metrics.TopMetrics: NNTop conf: dfs.namenode.top.windows.minutes = 1,5,25
15/07/13 16:01:48 INFO namenode.FSNamesystem: Retry cache on namenode is enabled
15/07/13 16:01:48 INFO namenode.FSNamesystem: Retry cache will use 0.03 of total heap and retry cache entry expiry time is 600000 millis
15/07/13 16:01:48 INFO util.GSet: Computing capacity for map NameNodeRetryCache
15/07/13 16:01:48 INFO util.GSet: VM type = 64-bit
15/07/13 16:01:48 INFO util.GSet: 0.029999999329447746% max memory 910.5 MB = 279.7 KB
15/07/13 16:01:48 INFO util.GSet: capacity = 2^15 = 32768 entries
15/07/13 16:01:48 INFO namenode.FSImage: Allocated new BlockPoolId: BP-69428073-172.16.1.39-1436774508204
15/07/13 16:01:48 INFO common.Storage: Storage directory /home/hadoop/app/hadoop-2.7.1/tmp/dfs/name has been successfully formatted.
15/07/13 16:01:48 INFO namenode.NNStorageRetentionManager: Going to retain 1 images with txid >= 0
15/07/13 16:01:48 INFO util.ExitUtil: Exiting with status 0
15/07/13 16:01:48 INFO namenode.NameNode: SHUTDOWN_MSG:
/************************************************************
SHUTDOWN_MSG: Shutting down NameNode at hadoop-node1.csoftintl.com/172.16.1.39
************************************************************/
5、配置免密钥认证
[hadoop@hadoop-node1 ~]$ ssh-keygen -t rsa -P "" -f ~/.ssh/id_rsa
Generating public/private rsa key pair.
Your identification has been saved in /home/hadoop/.ssh/id_rsa.
Your public key has been saved in /home/hadoop/.ssh/id_rsa.pub.
The key fingerprint is:
ba:4f:f7:87:4e:73:7f:e8:f1:56:8d:12:6b:23:ab:f1 hadoop@hadoop-node1.csoftintl.com
The key\'s randomart image is:
+--[ RSA 2048]----+
| |
| |
| |
| . |
| S o ..|
| . . = . o|
| . o .+o+o..|
| o +.o.oo+o|
| ..o.E.oo..+|
+-----------------+
[hadoop@hadoop-node1 ~]$ ssh-copy-id -i ~/.ssh/id_rsa.pub hadoop@hadoop-node1.csoftintl.com
hadoop@hadoop-node1.csoftintl.com\'s password:
Now try logging into the machine, with "ssh \'hadoop@hadoop-node1.csoftintl.com\'", and check in:
.ssh/authorized_keys
to make sure we haven\'t added extra keys that you weren\'t expecting.
[hadoop@hadoop-node1 ~]$ ssh hadoop@hadoop-node1.csoftintl.com
Last login: Sat Jul 11 02:32:42 2015 from ::1
6、启动HDFS分布式文件系统
[hadoop@hadoop-node1 hadoop]$ start-dfs.sh
Starting namenodes on [hadoop-node1.csoftintl.com]
hadoop-node1.csoftintl.com: starting namenode, logging to /home/hadoop/application/hadoop-2.7.1/logs/hadoop-hadoop-namenode-hadoop-node1.csoftintl.com.out
hadoop-node2.csoftintl.com: starting datanode, logging to /home/hadoop/application/hadoop-2.7.1/logs/hadoop-hadoop-datanode-hadoop-node2.csoftintl.com.out
Starting secondary namenodes [hadoop-node1.csoftintl.com]
hadoop-node1.csoftintl.com: starting secondarynamenode, logging to /home/hadoop/application/hadoop-2.7.1/logs/hadoop-hadoop-secondarynamenode-hadoop-node1.csoftintl.com.out
[hadoop@hadoop-node1 hadoop]$ jps
24786 SecondaryNameNode
24901 Jps
24583 NameNode
[hadoop@hadoop-node2 hadoop]$ jps
4147 Jps
4060 DataNode
7、测试分布式文件系统
[hadoop@hadoop-node1 ~]$ hadoop fs
Usage: hadoop fs [generic options]
[-appendToFile <localsrc> ... <dst>]
[-cat [-ignoreCrc] <src> ...]
[-checksum <src> ...]
[-chgrp [-R] GROUP PATH...]
[-chmod [-R] <MODE[,MODE]... | OCTALMODE> PATH...]
[-chown [-R] [OWNER][:[GROUP]] PATH...]
[-copyFromLocal [-f] [-p] <localsrc> ... <dst>]
[-copyToLocal [-p] [-ignoreCrc] [-crc] <src> ... <localdst>]
[-count [-q] <path> ...]
[-cp [-f] [-p] <src> ... <dst>]
[-createSnapshot <snapshotDir> [<snapshotName>]]
[-deleteSnapshot <snapshotDir> <snapshotName>]
[-df [-h] [<path> ...]]
[-du [-s] [-h] <path> ...]
[-expunge]
[-get [-p] [-ignoreCrc] [-crc] <src> ... <localdst>]
[-getfacl [-R] <path>]
[-getmerge [-nl] <src> <localdst>]
[-help [cmd ...]]
[-ls [-d] [-h] [-R] [<path> ...]]
[-mkdir [-p] <path> ...]
[-moveFromLocal <localsrc> ... <dst>]
[-moveToLocal <src> <localdst>]
[-mv <src> ... <dst>]
[-put [-f] [-p] <localsrc> ... <dst>]
[-renameSnapshot <snapshotDir> <oldName> <newName>]
[-rm [-f] [-r|-R] [-skipTrash] <src> ...]
[-rmdir [--ignore-fail-on-non-empty] <dir> ...]
[-setfacl [-R] [{-b|-k} {-m|-x <acl_spec>} <path>]|[--set <acl_spec> <path>]]
[-setrep [-R] [-w] <rep> <path> ...]
[-stat [format] <path> ...]
[-tail [-f] <file>]
[-test -[defsz] <path>]
[-text [-ignoreCrc] <src> ...]
[-touchz <path> ...]
[-usage [cmd ...]]
1)向HDFS里增加文件
[hadoop@hadoop-node1 ~]$ hadoop fs -put hadoop-2.7.1.tar.gz /
15/07/13 16:03:55 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
[hadoop@hadoop-node1 ~]$ hadoop fs -ls /
15/07/13 16:04:09 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
Found 1 items
-rw-r--r-- 1 hadoop supergroup 210606807 2015-07-13 16:03 /hadoop-2.7.1.tar.gz
2)删除文件
[hadoop@hadoop-node1 ~]$ hadoop fs -rm /hadoop-2.7.1.tar.gz
15/07/13 16:09:47 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
15/07/13 16:09:48 INFO fs.TrashPolicyDefault: Namenode trash configuration: Deletion interval = 0 minutes, Emptier interval = 0 minutes.
Deleted /hadoop-2.7.1.tar.gz
[hadoop@hadoop-node1 ~]$ hadoop fs -ls /
15/07/13 16:09:58 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
8、启动yarn作业管理集群
[hadoop@hadoop-node1 ~]$ start-yarn.sh
starting yarn daemons
starting resourcemanager, logging to /home/hadoop/application/hadoop-2.7.1/logs/yarn-hadoop-resourcemanager-hadoop-node1.csoftintl.com.out
hadoop-node2.csoftintl.com: starting nodemanager, logging to /home/hadoop/application/hadoop-2.7.1/logs/yarn-hadoop-nodemanager-hadoop-node2.csoftintl.com.out
[hadoop@hadoop-node1 ~]$ jps
24786 SecondaryNameNode
7781 Jps
24583 NameNode
7533 ResourceManager
[hadoop@hadoop-node2 ~]$ jps
4060 DataNode
14812 Jps
14654 NodeManager
这里可以看到,namenode的节点,启动了一个ResourceManager的进程,而datanode的节点,启动了一个NodeManager的进程。
9、测试mapreduce和yarn框架的工作情况
[hadoop@hadoop-node1 ~]$ hadoop fs -ls /flow/srcdata
Found 1 items
-rw-r--r-- 1 hadoop supergroup 2229 2015-07-17 16:04 /flow/srcdata/HTTP_20130313143750.dat
[hadoop@hadoop-node1 ~]$ hadoop jar flow.jar cn.itheima.bigdata.hadoop.mr.flowcount.FlowCount /flow/srcdata /flow/output
15/07/20 11:48:25 INFO client.RMProxy: Connecting to ResourceManager at hadoop-node1.csoftintl.com/172.16.1.39:8032
Exception in thread "main" org.apache.hadoop.mapred.FileAlreadyExistsException: Output directory hdfs://hadoop-node1.csoftintl.com:9000/flow/output already exists
at org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.checkOutputSpecs(FileOutputFormat.java:146)
at org.apache.hadoop.mapreduce.JobSubmitter.checkSpecs(JobSubmitter.java:266)
at org.apache.hadoop.mapreduce.JobSubmitter.submitJobInternal(JobSubmitter.java:139)
at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1290)
at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1287)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:415)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1657)
at org.apache.hadoop.mapreduce.Job.submit(Job.java:1287)
at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1308)
at cn.itheima.bigdata.hadoop.mr.flowcount.FlowCount.main(FlowCount.java:101)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at org.apache.hadoop.util.RunJar.run(RunJar.java:221)
at org.apache.hadoop.util.RunJar.main(RunJar.java:136)
[hadoop@hadoop-node1 ~]$ hadoop fs -rm -r /flow/output
15/07/20 11:48:46 INFO fs.TrashPolicyDefault: Namenode trash configuration: Deletion interval = 0 minutes, Emptier interval = 0 minutes.
Deleted /flow/output
[hadoop@hadoop-node1 ~]$ hadoop jar flow.jar cn.itheima.bigdata.hadoop.mr.flowcount.FlowCount /flow/srcdata /flow/output
15/07/20 11:48:52 INFO client.RMProxy: Connecting to ResourceManager at hadoop-node1.csoftintl.com/172.16.1.39:8032
15/07/20 11:48:53 WARN mapreduce.JobResourceUploader: Hadoop command-line option parsing not performed. Implement the Tool interface and execute your application with ToolRunner to remedy this.
15/07/20 11:48:53 INFO input.FileInputFormat: Total input paths to process : 1
15/07/20 11:48:53 INFO mapreduce.JobSubmitter: number of splits:1
15/07/20 11:48:54 INFO mapreduce.JobSubmitter: Submitting tokens for job: job_1437360105422_0002
15/07/20 11:48:54 INFO impl.YarnClientImpl: Submitted application application_1437360105422_0002
15/07/20 11:48:54 INFO mapreduce.Job: The url to track the job: http://hadoop-node1.csoftintl.com:8088/proxy/application_1437360105422_0002/
15/07/20 11:48:54 INFO mapreduce.Job: Running job: job_1437360105422_0002
15/07/20 11:49:04 INFO mapreduce.Job: Job job_1437360105422_0002 running in uber mode : false
15/07/20 11:49:04 INFO mapreduce.Job: map 0% reduce 0%
15/07/20 11:49:16 INFO mapreduce.Job: map 100% reduce 0%
15/07/20 11:49:24 INFO mapreduce.Job: map 100% reduce 100%
15/07/20 11:49:24 INFO mapreduce.Job: Job job_1437360105422_0002 completed successfully
15/07/20 11:49:24 INFO mapreduce.Job: Counters: 49
File System Counters
FILE: Number of bytes read=1122
FILE: Number of bytes written=234001
FILE: Number of read operations=0
FILE: Number of large read operations=0
FILE: Number of write operations=0
HDFS: Number of bytes read=2369
HDFS: Number of bytes written=551
HDFS: Number of read operations=6
HDFS: Number of large read operations=0
HDFS: Number of write operations=2
Job Counters
Launched map tasks=1
Launched reduce tasks=1
Data-local map tasks=1
Total time spent by all maps in occupied slots (ms)=9612
Total time spent by all reduces in occupied slots (ms)=5644
Total time spent by all map tasks (ms)=9612
Total time spent by all reduce tasks (ms)=5644
Total vcore-seconds taken by all map tasks=9612
Total vcore-seconds taken by all reduce tasks=5644
Total megabyte-seconds taken by all map tasks=9842688
Total megabyte-seconds taken by all reduce tasks=5779456
Map-Reduce Framework
Map input records=22
Map output records=22
Map output bytes=1072
Map output materialized bytes=1122
Input split bytes=140
Combine input records=0
Combine output records=0
Reduce input groups=21
Reduce shuffle bytes=1122
Reduce input records=22
Reduce output records=21
Spilled Records=44
Shuffled Maps =1
Failed Shuffles=0
Merged Map outputs=1
GC time elapsed (ms)=515
CPU time spent (ms)=3880
Physical memory (bytes) snapshot=432300032
Virtual memory (bytes) snapshot=1365823488
Total committed heap usage (bytes)=275251200
Shuffle Errors
BAD_ID=0
CONNECTION=0
IO_ERROR=0
WRONG_LENGTH=0
WRONG_MAP=0
WRONG_REDUCE=0
File Input Format Counters
Bytes Read=2229
File Output Format Counters
Bytes Written=551
[hadoop@hadoop-node1 ~]$ hadoop fs -ls /flow/output
Found 2 items
-rw-r--r-- 1 hadoop supergroup 0 2015-07-20 11:49 /flow/output/_SUCCESS
-rw-r--r-- 1 hadoop supergroup 551 2015-07-20 11:49 /flow/output/part-r-00000
[hadoop@hadoop-node1 ~]$ hadoop fs -cat /flow/output/part-r-00000
13480253104 180 180 360
13502468823 7335 110349 117684
13560436666 1116 954 2070
13560439658 2034 5892 7926
13602846565 1938 2910 4848
13660577991 6960 690 7650
13719199419 240 0 240
13726230503 2481 24681 27162
13726238888 2481 24681 27162
13760778710 120 120 240
13826544101 264 0 264
13922314466 3008 3720 6728
13925057413 11058 48243 59301
13926251106 240 0 240
13926435656 132 1512 1644
15013685858 3659 3538 7197
15920133257 3156 2936 6092
15989002119 1938 180 2118
18211575961 1527 2106 3633
18320173382 9531 2412 11943
84138413 4116 1432 5548
三、Mapreduce的框架和基础算法
四、Hadoop的HA机制和Federation
1、为什么需要HA
HDFS的NameNode一旦宕机,整个集群无法提供服务。
2、怎么实现的
基于Zookeeper实现的
3、什么是Zookeeper
为分布式集群提供协调服务,作为第三方,管理一些共享数据。Zookeeper本身非常可靠,本身就是一个分布高可用式集群
4、Zookeeper的典型应用场景
1)统一命名服务
分布式应用中,通常需要有一套完整的命名规则,既能产生唯一的名称又便于人识别和记住。Name Service是Zookeeper内置功能,只要调用API就能实现。
2)配置管理
配置管理在分布式应用环境中很常见,例如同一个应用系统需要多台Server运行,但它们的某些配置项是相同的,如果要修改这些相同的配置项,就必须同时修改每台运行这个应用的Server,这样非常麻烦而且容易出错。将配置信息保存在Zookeeper的某个目录节点中,然后将所有需要修改的应用机器配置信息的状态,一旦配置信息发生变化,每台应用
3)集群管理
Zookeeper能够很容易的实现集群管理功能,如果有多台Server组成一个服务集群,那么必须有一个“总管”知道当前集群中每台服务器的服务状态,一旦有机器不能提供服务,集群中的“总管”必须知道,从而做出调整重新分配服务策略。同样当增加集群的服务能力时,增加一台或多台Server,同样也必须让“总管”知道。Zookeeper不仅能够维护当前集群中所有节点的服务状态,而且能够选出一个“总管”,让这个总管来管理集群,这就是Zookeeper的另一个主要功能:Leader Election
4)共享锁
共享锁在一个进程中很容易实现,但是在跨进程或者在不同Server之间就不好实现了。Zookeeper 却很容易实现这个功能。
5)队列管理
五、Hadoop集群部署及启动
1、集群规划
两台namenode,两台resourcemanager,三台datanode(附带跑zookeeper和qjournalmanager)
2、salt部署
略
主要配置文件:
[root@linux-node0 files]# vim core-site.xml
<configuration>
<!-- 指定hdfs的nameservice为ns1 -->
<property>
<name>fs.defaultFS</name>
<value>hdfs://ns1</value>
</property>
<!-- 指定hadoop临时目录 -->
<property>
<name>hadoop.tmp.dir</name>
<value>/home/hadoop/app/hadoop/tmp</value>
</property>
<!-- 指定zookeeper地址 -->
<property>
<name>ha.zookeeper.quorum</name>
<value>hadoop-node5:2181,hadoop-node6:2181,hadoop-node7:2181</value>
</property>
</configuration>
[root@linux-node0 files]# vim hdfs-site.xml
<configuration><!--指定hdfs的nameservice为ns1,需要和core-site.xml中的保持一致 --><property><name>dfs.nameservices</name><value>ns1</value></property><!-- ns1下面有两个NameNode,分别是nn1,nn2 --><property><name>dfs.ha.namenodes.ns1</name><value>nn1,nn2</value></property><!-- nn1的RPC通信地址 --><property><name>dfs.namenode.rpc-address.ns1.nn1</name><value>hadoop-node1:9000</value></property><!-- nn1的http通信地址 --><property><name>dfs.namenode.http-address.ns1.nn1</name><value>hadoop-node1:50070</value></property><!-- nn2的RPC通信地址 --><property><name>dfs.namenode.rpc-address.ns1.nn2</name><value>hadoop-node2:9000</value></property><!-- nn2的http通信地址 --><property><name>dfs.namenode.http-address.ns1.nn2</name><value>hadoop-node2:50070</value></property><!-- 指定NameNode的元数据在JournalNode上的存放位置 --><property><name>dfs.namenode.shared.edits.dir</name><value>qjournal://hadoop-node5:8485;hadoop-node6:8485;hadoop-node7:8485/ns1</value></property><!-- 指定JournalNode在本地磁盘存放数据的位置 --><property><name>dfs.journalnode.edits.dir</name><value>/home/hadoop/app/hadoop/journaldata</value></property><!-- 开启NameNode失败自动切换 --><property><name>dfs.ha.automatic-failover.enabled</name><value>true</value></property><!-- 配置失败自动切换实现方式 --><property><name>dfs.client.failover.proxy.provider.ns1</name><value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value></property><!-- 配置隔离机制方法,多个机制用换行分割,即每个机制暂用一行--><property><name>dfs.ha.fencing.methods</name><value>sshfenceshell(/bin/true)</value></property><!-- 使用sshfence隔离机制时需要ssh免登陆 --><property><name>dfs.ha.fencing.ssh.private-key-files</name><value>/home/hadoop/.ssh/id_rsa</value></property><!-- 配置sshfence隔离机制超时时间 --><property><name>dfs.ha.fencing.ssh.connect-timeout</name><value>30000</value></property></configuration>
[root@linux-node0 files]# vim mapred-site.xml
<configuration><!-- 指定mr框架为yarn方式 --><property><name>mapreduce.framework.name</name><value>yarn</value></property></configuration>
[root@linux-node0 files]# vim yarn-site.xml
<configuration><!-- 开启RM高可用 --><property><name>yarn.resourcemanager.ha.enabled</name><value>true</value></property><!-- 指定RM的cluster id --><property><name>yarn.resourcemanager.cluster-id</name><value>yrc</value></property><!-- 指定RM的名字 --><property><name>yarn.resourcemanager.ha.rm-ids</name><value>rm1,rm2</value></property><!-- 分别指定RM的地址 --><property><name>yarn.resourcemanager.hostname.rm1</name><value>hadoop-node3</value></property><property><name>yarn.resourcemanager.hostname.rm2</name><value>hadoop-node4</value></property><!-- 指定zk集群地址 --><property><name>yarn.resourcemanager.zk-address</name><value>hadoop-node5:2181,hadoop-node6:2181,hadoop-node7:2181</value></property><property><name>yarn.nodemanager.aux-services</name><value>mapreduce_shuffle</value></property></configuration>
[root@linux-node0 files]# vim zoo.cfg
# The number of milliseconds of each ticktickTime=2000# The number of ticks that the initial# synchronization phase can takeinitLimit=10# The number of ticks that can pass between# sending a request and getting an acknowledgementsyncLimit=5# the directory where the snapshot is stored.# do not use /tmp for storage, /tmp here is just# example sakes.dataDir=/home/hadoop/app/zookeeper/data# the port at which the clients will connectclientPort=2181server.1=192.168.7.45:2888:3888server.2=192.168.7.46:2888:3888server.3=192.168.7.47:2888:3888# the maximum number of client connections.# increase this if you need to handle more clients#maxClientCnxns=60## Be sure to read the maintenance section of the# administrator guide before turning on autopurge.## http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance## The number of snapshots to retain in dataDir#autopurge.snapRetainCount=3# Purge task interval in hours# Set to "0" to disable auto purge feature#autopurge.purgeInterval=1
[root@linux-node0 files]# vim slaves
hadoop-node5hadoop-node6hadoop-node7
3、启动集群步骤
1)先启动三个zookeeper集群,规划跑在三台datanode上
[hadoop@hadoop-node5 ~]$ ./app/zookeeper/bin/zkServer.sh startJMX enabled by defaultUsing config: /home/hadoop/app/zookeeper/bin/../conf/zoo.cfgStarting zookeeper ... STARTED[hadoop@hadoop-node5 ~]$ jps29422 Jps29398 QuorumPeerMain[hadoop@hadoop-node6 ~]$ ./app/zookeeper/bin/zkServer.sh startJMX enabled by defaultUsing config: /home/hadoop/app/zookeeper/bin/../conf/zoo.cfgStarting zookeeper ... STARTED[hadoop@hadoop-node6 ~]$ jps28582 Jps28555 QuorumPeerMain[hadoop@hadoop-node7 ~]$ ./app/zookeeper/bin/zkServer.sh startJMX enabled by defaultUsing config: /home/hadoop/app/zookeeper/bin/../conf/zoo.cfgStarting zookeeper ... STARTED[hadoop@hadoop-node7 ~]$ jps28037 QuorumPeerMain28064 Jps
2)启动JournalNode
[hadoop@hadoop-node5 ~]$ hadoop-daemon.sh start journalnodestarting journalnode, logging to /home/hadoop/app/hadoop-2.7.1/logs/hadoop-hadoop-journalnode-hadoop-node5.csoftintl.com.out[hadoop@hadoop-node5 ~]$ jps29509 JournalNode29398 QuorumPeerMain29560 Jps[hadoop@hadoop-node6 ~]$ hadoop-daemon.sh start journalnodestarting journalnode, logging to /home/hadoop/app/hadoop-2.7.1/logs/hadoop-hadoop-journalnode-hadoop-node6.csoftintl.com.out[hadoop@hadoop-node6 ~]$ jps28706 Jps28655 JournalNode28555 QuorumPeerMain[hadoop@hadoop-node7 ~]$ hadoop-daemon.sh start journalnodestarting journalnode, logging to /home/hadoop/app/hadoop-2.7.1/logs/hadoop-hadoop-journalnode-hadoop-node7.csoftintl.com.out[hadoop@hadoop-node7 ~]$ jps28037 QuorumPeerMain28169 Jps28115 JournalNode
3)格式化HDFS的namenode
[hadoop@hadoop-node1 ~]$ hdfs namenode -format15/07/21 17:48:22 INFO namenode.NameNode: STARTUP_MSG:/************************************************************STARTUP_MSG: Starting NameNodeSTARTUP_MSG: host = hadoop-node1.csoftintl.com/172.16.1.41STARTUP_MSG: args = [-format]STARTUP_MSG: version = 2.7.1STARTUP_MSG: classpath = /home/hadoop/app/hadoop-2.7.1/etc/hadoop:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/servlet-api-2.5.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-collections-3.2.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-math3-3.1.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-compress-1.4.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-beanutils-1.7.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/hadoop-auth-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/java-xmlbuilder-0.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/mockito-all-1.8.5.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/httpclient-4.2.5.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/gson-2.2.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/stax-api-1.0-2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-lang-2.6.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/hadoop-annotations-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jsp-api-2.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/slf4j-log4j12-1.7.10.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-httpclient-3.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/zookeeper-3.4.6.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/httpcore-4.2.5.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/api-util-1.0.0-M20.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jaxb-api-2.2.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/asm-3.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jetty-util-6.1.26.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/api-asn1-api-1.0.0-M20.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/snappy-java-1.0.4.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/activation-1.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jetty-6.1.26.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jackson-jaxrs-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/log4j-1.2.17.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-configuration-1.6.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jackson-mapper-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/apacheds-kerberos-codec-2.0.0-M15.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/netty-3.6.2.Final.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/hamcrest-core-1.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-io-2.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-logging-1.1.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-beanutils-core-1.8.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jsr305-3.0.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/guava-11.0.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jaxb-impl-2.2.3-1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jsch-0.1.42.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/protobuf-java-2.5.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-cli-1.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/xmlenc-0.52.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-codec-1.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-net-3.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/curator-recipes-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jackson-xc-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-digester-1.8.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/xz-1.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/avro-1.7.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jersey-json-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jackson-core-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jersey-server-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/paranamer-2.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jets3t-0.9.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/curator-client-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/curator-framework-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/htrace-core-3.1.0-incubating.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/slf4j-api-1.7.10.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/junit-4.11.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jersey-core-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jettison-1.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/apacheds-i18n-2.0.0-M15.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/hadoop-common-2.7.1-tests.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/hadoop-common-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/hadoop-nfs-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/servlet-api-2.5.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/commons-daemon-1.0.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/commons-lang-2.6.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/xercesImpl-2.9.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/asm-3.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jetty-util-6.1.26.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jetty-6.1.26.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/xml-apis-1.3.04.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/log4j-1.2.17.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jackson-mapper-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/netty-3.6.2.Final.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/commons-io-2.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/commons-logging-1.1.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jsr305-3.0.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/guava-11.0.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/netty-all-4.0.23.Final.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/protobuf-java-2.5.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/commons-cli-1.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/xmlenc-0.52.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/commons-codec-1.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jackson-core-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/leveldbjni-all-1.8.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jersey-server-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/htrace-core-3.1.0-incubating.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jersey-core-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/hadoop-hdfs-2.7.1-tests.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/hadoop-hdfs-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/hadoop-hdfs-nfs-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/servlet-api-2.5.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/aopalliance-1.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-collections-3.2.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-compress-1.4.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jersey-guice-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/stax-api-1.0-2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-lang-2.6.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/guice-3.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/zookeeper-3.4.6-tests.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/zookeeper-3.4.6.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jaxb-api-2.2.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/asm-3.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jetty-util-6.1.26.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/activation-1.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jetty-6.1.26.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jackson-jaxrs-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/log4j-1.2.17.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jersey-client-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jackson-mapper-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/netty-3.6.2.Final.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-io-2.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-logging-1.1.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jsr305-3.0.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/guava-11.0.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jaxb-impl-2.2.3-1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/javax.inject-1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/protobuf-java-2.5.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/guice-servlet-3.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-cli-1.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-codec-1.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jackson-xc-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/xz-1.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jersey-json-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jackson-core-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/leveldbjni-all-1.8.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jersey-server-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jersey-core-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jettison-1.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-tests-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-api-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-applications-unmanaged-am-launcher-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-resourcemanager-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-web-proxy-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-applicationhistoryservice-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-registry-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-sharedcachemanager-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-applications-distributedshell-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-common-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-client-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-common-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-nodemanager-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/aopalliance-1.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/commons-compress-1.4.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/jersey-guice-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/guice-3.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/hadoop-annotations-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/asm-3.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/snappy-java-1.0.4.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/log4j-1.2.17.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/jackson-mapper-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/netty-3.6.2.Final.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/hamcrest-core-1.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/commons-io-2.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/javax.inject-1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/protobuf-java-2.5.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/guice-servlet-3.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/xz-1.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/avro-1.7.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/jackson-core-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/leveldbjni-all-1.8.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/jersey-server-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/paranamer-2.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/junit-4.11.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/jersey-core-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-hs-plugins-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-shuffle-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-hs-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-common-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-app-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.7.1-tests.jar:/home/hadoop/app/hadoop/contrib/capacity-scheduler/*.jarSTARTUP_MSG: build = Unknown -r Unknown; compiled by \'hadoop\' on 2015-07-20T01:43ZSTARTUP_MSG: java = 1.7.0_75************************************************************/15/07/21 17:48:22 INFO namenode.NameNode: registered UNIX signal handlers for [TERM, HUP, INT]15/07/21 17:48:22 INFO namenode.NameNode: createNameNode [-format]Formatting using clusterid: CID-e3ea5761-53de-4f50-a3f3-608074105da915/07/21 17:48:24 INFO namenode.FSNamesystem: No KeyProvider found.15/07/21 17:48:24 INFO namenode.FSNamesystem: fsLock is fair:true15/07/21 17:48:24 INFO blockmanagement.DatanodeManager: dfs.block.invalidate.limit=100015/07/21 17:48:24 INFO blockmanagement.DatanodeManager: dfs.namenode.datanode.registration.ip-hostname-check=true15/07/21 17:48:24 INFO blockmanagement.BlockManager: dfs.namenode.startup.delay.block.deletion.sec is set to 000:00:00:00.00015/07/21 17:48:24 INFO blockmanagement.BlockManager: The block deletion will start around 2015 Jul 21 17:48:2415/07/21 17:48:24 INFO util.GSet: Computing capacity for map BlocksMap15/07/21 17:48:24 INFO util.GSet: VM type = 64-bit15/07/21 17:48:24 INFO util.GSet: 2.0% max memory 889 MB = 17.8 MB15/07/21 17:48:24 INFO util.GSet: capacity = 2^21 = 2097152 entries15/07/21 17:48:24 INFO blockmanagement.BlockManager: dfs.block.access.token.enable=false15/07/21 17:48:24 INFO blockmanagement.BlockManager: defaultReplication = 315/07/21 17:48:24 INFO blockmanagement.BlockManager: maxReplication = 51215/07/21 17:48:24 INFO blockmanagement.BlockManager: minReplication = 115/07/21 17:48:24 INFO blockmanagement.BlockManager: maxReplicationStreams = 215/07/21 17:48:24 INFO blockmanagement.BlockManager: shouldCheckForEnoughRacks = false15/07/21 17:48:24 INFO blockmanagement.BlockManager: replicationRecheckInterval = 300015/07/21 17:48:24 INFO blockmanagement.BlockManager: encryptDataTransfer = false15/07/21 17:48:24 INFO blockmanagement.BlockManager: maxNumBlocksToLog = 100015/07/21 17:48:24 INFO namenode.FSNamesystem: fsOwner = hadoop (auth:SIMPLE)15/07/21 17:48:24 INFO namenode.FSNamesystem: supergroup = supergroup15/07/21 17:48:24 INFO namenode.FSNamesystem: isPermissionEnabled = true15/07/21 17:48:24 INFO namenode.FSNamesystem: Determined nameservice ID: ns115/07/21 17:48:24 INFO namenode.FSNamesystem: HA Enabled: true15/07/21 17:48:24 INFO namenode.FSNamesystem: Append Enabled: true15/07/21 17:48:25 INFO util.GSet: Computing capacity for map INodeMap15/07/21 17:48:25 INFO util.GSet: VM type = 64-bit15/07/21 17:48:25 INFO util.GSet: 1.0% max memory 889 MB = 8.9 MB15/07/21 17:48:25 INFO util.GSet: capacity = 2^20 = 1048576 entries15/07/21 17:48:25 INFO namenode.FSDirectory: ACLs enabled? false15/07/21 17:48:25 INFO namenode.FSDirectory: XAttrs enabled? true15/07/21 17:48:25 INFO namenode.FSDirectory: Maximum size of an xattr: 1638415/07/21 17:48:25 INFO namenode.NameNode: Caching file names occuring more than 10 times15/07/21 17:48:25 INFO util.GSet: Computing capacity for map cachedBlocks15/07/21 17:48:25 INFO util.GSet: VM type = 64-bit15/07/21 17:48:25 INFO util.GSet: 0.25% max memory 889 MB = 2.2 MB15/07/21 17:48:25 INFO util.GSet: capacity = 2^18 = 262144 entries15/07/21 17:48:25 INFO namenode.FSNamesystem: dfs.namenode.safemode.threshold-pct = 0.999000012874603315/07/21 17:48:25 INFO namenode.FSNamesystem: dfs.namenode.safemode.min.datanodes = 015/07/21 17:48:25 INFO namenode.FSNamesystem: dfs.namenode.safemode.extension = 3000015/07/21 17:48:25 INFO metrics.TopMetrics: NNTop conf: dfs.namenode.top.window.num.buckets = 1015/07/21 17:48:25 INFO metrics.TopMetrics: NNTop conf: dfs.namenode.top.num.users = 1015/07/21 17:48:25 INFO metrics.TopMetrics: NNTop conf: dfs.namenode.top.windows.minutes = 1,5,2515/07/21 17:48:25 INFO namenode.FSNamesystem: Retry cache on namenode is enabled15/07/21 17:48:25 INFO namenode.FSNamesystem: Retry cache will use 0.03 of total heap and retry cache entry expiry time is 600000 millis15/07/21 17:48:25 INFO util.GSet: Computing capacity for map NameNodeRetryCache15/07/21 17:48:25 INFO util.GSet: VM type = 64-bit15/07/21 17:48:25 INFO util.GSet: 0.029999999329447746% max memory 889 MB = 273.1 KB15/07/21 17:48:25 INFO util.GSet: capacity = 2^15 = 32768 entries15/07/21 17:48:27 INFO namenode.FSImage: Allocated new BlockPoolId: BP-180122279-172.16.1.41-143747210722815/07/21 17:48:27 INFO common.Storage: Storage directory /home/hadoop/app/hadoop/tmp/dfs/name has been successfully formatted.15/07/21 17:48:27 INFO namenode.NNStorageRetentionManager: Going to retain 1 images with txid >= 015/07/21 17:48:28 INFO util.ExitUtil: Exiting with status 015/07/21 17:48:28 INFO namenode.NameNode: SHUTDOWN_MSG:/************************************************************SHUTDOWN_MSG: Shutting down NameNode at hadoop-node1.csoftintl.com/172.16.1.41************************************************************/
4)拷贝HDFS主namenode节点的数据到高可用节点的目录中
[hadoop@hadoop-node1 hadoop]$ scp -r tmp/ hadoop-node2:~/app/hadoop/seen_txid 100% 2 0.0KB/s 00:00VERSION 100% 202 0.2KB/s 00:00fsimage_0000000000000000000 100% 353 0.3KB/s 00:00fsimage_0000000000000000000.md5 100% 62 0.1KB/s 00:00
5)格式化ZKFS
[hadoop@hadoop-node1 ~]$ hdfs zkfc -formatZK15/07/21 18:10:16 INFO tools.DFSZKFailoverController: Failover controller configured for NameNode NameNode at hadoop-node1/172.16.1.41:900015/07/21 18:10:16 INFO zookeeper.ZooKeeper: Client environment:zookeeper.version=3.4.6-1569965, built on 02/20/2014 09:09 GMT15/07/21 18:10:16 INFO zookeeper.ZooKeeper: Client environment:host.name=hadoop-node1.csoftintl.com15/07/21 18:10:16 INFO zookeeper.ZooKeeper: Client environment:java.version=1.7.0_7515/07/21 18:10:16 INFO zookeeper.ZooKeeper: Client environment:java.vendor=Oracle Corporation15/07/21 18:10:16 INFO zookeeper.ZooKeeper: Client environment:java.home=/usr/java/jdk1.7.0_75/jre15/07/21 18:10:16 INFO zookeeper.ZooKeeper: Client environment:java.class.path=/home/hadoop/app/hadoop-2.7.1/etc/hadoop:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/servlet-api-2.5.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-collections-3.2.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-math3-3.1.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-compress-1.4.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-beanutils-1.7.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/hadoop-auth-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/java-xmlbuilder-0.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/mockito-all-1.8.5.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/httpclient-4.2.5.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/gson-2.2.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/stax-api-1.0-2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-lang-2.6.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/hadoop-annotations-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jsp-api-2.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/slf4j-log4j12-1.7.10.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-httpclient-3.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/zookeeper-3.4.6.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/httpcore-4.2.5.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/api-util-1.0.0-M20.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jaxb-api-2.2.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/asm-3.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jetty-util-6.1.26.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/api-asn1-api-1.0.0-M20.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/snappy-java-1.0.4.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/activation-1.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jetty-6.1.26.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jackson-jaxrs-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/log4j-1.2.17.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-configuration-1.6.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jackson-mapper-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/apacheds-kerberos-codec-2.0.0-M15.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/netty-3.6.2.Final.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/hamcrest-core-1.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-io-2.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-logging-1.1.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-beanutils-core-1.8.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jsr305-3.0.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/guava-11.0.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jaxb-impl-2.2.3-1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jsch-0.1.42.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/protobuf-java-2.5.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-cli-1.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/xmlenc-0.52.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-codec-1.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-net-3.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/curator-recipes-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jackson-xc-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-digester-1.8.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/xz-1.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/avro-1.7.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jersey-json-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jackson-core-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jersey-server-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/paranamer-2.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jets3t-0.9.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/curator-client-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/curator-framework-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/htrace-core-3.1.0-incubating.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/slf4j-api-1.7.10.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/junit-4.11.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jersey-core-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jettison-1.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/apacheds-i18n-2.0.0-M15.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/hadoop-common-2.7.1-tests.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/hadoop-common-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/hadoop-nfs-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/servlet-api-2.5.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/commons-daemon-1.0.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/commons-lang-2.6.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/xercesImpl-2.9.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/asm-3.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jetty-util-6.1.26.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jetty-6.1.26.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/xml-apis-1.3.04.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/log4j-1.2.17.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jackson-mapper-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/netty-3.6.2.Final.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/commons-io-2.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/commons-logging-1.1.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jsr305-3.0.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/guava-11.0.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/netty-all-4.0.23.Final.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/protobuf-java-2.5.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/commons-cli-1.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/xmlenc-0.52.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/commons-codec-1.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jackson-core-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/leveldbjni-all-1.8.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jersey-server-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/htrace-core-3.1.0-incubating.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jersey-core-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/hadoop-hdfs-2.7.1-tests.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/hadoop-hdfs-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/hadoop-hdfs-nfs-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/servlet-api-2.5.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/aopalliance-1.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-collections-3.2.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-compress-1.4.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jersey-guice-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/stax-api-1.0-2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-lang-2.6.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/guice-3.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/zookeeper-3.4.6-tests.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/zookeeper-3.4.6.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jaxb-api-2.2.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/asm-3.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jetty-util-6.1.26.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/activation-1.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jetty-6.1.26.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jackson-jaxrs-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/log4j-1.2.17.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jersey-client-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jackson-mapper-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/netty-3.6.2.Final.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-io-2.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-logging-1.1.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jsr305-3.0.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/guava-11.0.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jaxb-impl-2.2.3-1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/javax.inject-1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/protobuf-java-2.5.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/guice-servlet-3.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-cli-1.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-codec-1.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jackson-xc-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/xz-1.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jersey-json-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jackson-core-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/leveldbjni-all-1.8.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jersey-server-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jersey-core-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jettison-1.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-tests-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-api-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-applications-unmanaged-am-launcher-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-resourcemanager-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-web-proxy-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-applicationhistoryservice-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-registry-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-sharedcachemanager-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-applications-distributedshell-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-common-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-client-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-common-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-nodemanager-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/aopalliance-1.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/commons-compress-1.4.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/jersey-guice-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/guice-3.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/hadoop-annotations-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/asm-3.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/snappy-java-1.0.4.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/log4j-1.2.17.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/jackson-mapper-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/netty-3.6.2.Final.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/hamcrest-core-1.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/commons-io-2.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/javax.inject-1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/protobuf-java-2.5.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/guice-servlet-3.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/xz-1.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/avro-1.7.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/jackson-core-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/leveldbjni-all-1.8.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/jersey-server-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/paranamer-2.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/junit-4.11.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/jersey-core-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-hs-plugins-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-shuffle-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-hs-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-common-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-app-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.7.1-tests.jar:/home/hadoop/app/hadoop/contrib/capacity-scheduler/*.jar15/07/21 18:10:16 INFO zookeeper.ZooKeeper: Client environment:java.library.path=/home/hadoop/app/hadoop-2.7.1/lib/native15/07/21 18:10:16 INFO zookeeper.ZooKeeper: Client environment:java.io.tmpdir=/tmp15/07/21 18:10:16 INFO zookeeper.ZooKeeper: Client environment:java.compiler=<NA>15/07/21 18:10:16 INFO zookeeper.ZooKeeper: Client environment:os.name=Linux15/07/21 18:10:16 INFO zookeeper.ZooKeeper: Client environment:os.arch=amd6415/07/21 18:10:16 INFO zookeeper.ZooKeeper: Client environment:os.version=2.6.32-504.23.4.el6.x86_6415/07/21 18:10:16 INFO zookeeper.ZooKeeper: Client environment:user.name=hadoop15/07/21 18:10:16 INFO zookeeper.ZooKeeper: Client environment:user.home=/home/hadoop15/07/21 18:10:16 INFO zookeeper.ZooKeeper: Client environment:user.dir=/home/hadoop15/07/21 18:10:16 INFO zookeeper.ZooKeeper: Initiating client connection, connectString=hadoop-node5:2181,hadoop-node6:2181,hadoop-node7:2181 sessionTimeout=5000 watcher=org.apache.hadoop.ha.ActiveStandbyElector$WatcherWithClientRef@1d6a602715/07/21 18:10:16 INFO zookeeper.ClientCnxn: Opening socket connection to server hadoop-node6.csoftintl.com/172.16.1.46:2181. Will not attempt to authenticate using SASL (unknown error)15/07/21 18:10:16 INFO zookeeper.ClientCnxn: Socket connection established to hadoop-node6.csoftintl.com/172.16.1.46:2181, initiating session15/07/21 18:10:16 INFO zookeeper.ClientCnxn: Session establishment complete on server hadoop-node6.csoftintl.com/172.16.1.46:2181, sessionid = 0x24eb010cc5b0000, negotiated timeout = 500015/07/21 18:10:16 INFO ha.ActiveStandbyElector: Session connected.15/07/21 18:10:16 INFO ha.ActiveStandbyElector: Successfully created /hadoop-ha/ns1 in ZK.15/07/21 18:10:16 INFO zookeeper.ZooKeeper: Session: 0x24eb010cc5b0000 closed15/07/21 18:10:16 INFO zookeeper.ClientCnxn: EventThread shut down
6)启动hdfs(在hadoop-node1上执行启动)
[hadoop@hadoop-node1 ~]$ start-dfs.shStarting namenodes on [hadoop-node1 hadoop-node2]hadoop-node1: starting namenode, logging to /home/hadoop/app/hadoop-2.7.1/logs/hadoop-hadoop-namenode-hadoop-node1.csoftintl.com.outhadoop-node2: starting namenode, logging to /home/hadoop/app/hadoop-2.7.1/logs/hadoop-hadoop-namenode-hadoop-node2.csoftintl.com.outhadoop-node7: starting datanode, logging to /home/hadoop/app/hadoop-2.7.1/logs/hadoop-hadoop-datanode-hadoop-node7.csoftintl.com.outhadoop-node5: starting datanode, logging to /home/hadoop/app/hadoop-2.7.1/logs/hadoop-hadoop-datanode-hadoop-node5.csoftintl.com.outhadoop-node6: starting datanode, logging to /home/hadoop/app/hadoop-2.7.1/logs/hadoop-hadoop-datanode-hadoop-node6.csoftintl.com.outStarting journal nodes [hadoop-node5 hadoop-node6 hadoop-node7]hadoop-node5: journalnode running as process 29509. Stop it first.hadoop-node6: journalnode running as process 28655. Stop it first.hadoop-node7: journalnode running as process 28115. Stop it first.Starting ZK Failover Controllers on NN hosts [hadoop-node1 hadoop-node2]hadoop-node1: starting zkfc, logging to /home/hadoop/app/hadoop-2.7.1/logs/hadoop-hadoop-zkfc-hadoop-node1.csoftintl.com.outhadoop-node2: starting zkfc, logging to /home/hadoop/app/hadoop-2.7.1/logs/hadoop-hadoop-zkfc-hadoop-node2.csoftintl.com.out
7)启动yarn(在hadoop-node3上启动,haoop-node4启动resourcemanager)
[hadoop@hadoop-node3 logs]$ start-yarn.shstarting yarn daemonsstarting resourcemanager, logging to /home/hadoop/app/hadoop-2.7.1/logs/yarn-hadoop-resourcemanager-hadoop-node3.csoftintl.com.outhadoop-node5: starting nodemanager, logging to /home/hadoop/app/hadoop-2.7.1/logs/yarn-hadoop-nodemanager-hadoop-node5.csoftintl.com.outhadoop-node6: starting nodemanager, logging to /home/hadoop/app/hadoop-2.7.1/logs/yarn-hadoop-nodemanager-hadoop-node6.csoftintl.com.outhadoop-node7: starting nodemanager, logging to /home/hadoop/app/hadoop-2.7.1/logs/yarn-hadoop-nodemanager-hadoop-node7.csoftintl.com.out[hadoop@hadoop-node3 logs]$ jps29011 Jps28938 ResourceManager[hadoop@hadoop-node4 hadoop]$ yarn-daemon.sh resourcemanager
8)最后检查一下所有节点的服务启动情况
[hadoop@hadoop-node1 ~]$ jps23383 NameNode23874 Jps23693 DFSZKFailoverController[hadoop@hadoop-node2 ~]$ jps16930 DFSZKFailoverController16991 Jps16823 NameNode[hadoop@hadoop-node3 ~]$ jps29011 Jps28938 ResourceManager[hadoop@hadoop-node4 ~]$ jps27233 ResourceManager27370 Jps[hadoop@hadoop-node5 ~]$ jps29509 JournalNode31094 NodeManager29398 QuorumPeerMain31218 Jps29618 DataNode[hadoop@hadoop-node6 ~]$ jps30339 Jps28655 JournalNode28555 QuorumPeerMain30215 NodeManager28764 DataNode[hadoop@hadoop-node7 ~]$ jps28037 QuorumPeerMain28115 JournalNode29675 NodeManager28224 DataNode29799 Jps
9)测试各组件功能
1、HDFS
[hadoop@hadoop-node1 ~]$ hadoop fs -put HTTP_20130313143750.dat /[hadoop@hadoop-node1 ~]$ hadoop fs -ls /Found 1 items-rw-r--r-- 3 hadoop supergroup 2229 2015-07-21 18:50 /HTTP_20130313143750.dat[hadoop@hadoop-node1 ~]$ hdfs haadmin -getServiceState nn1active[hadoop@hadoop-node1 ~]$ hdfs haadmin -getServiceState nn2standby
管理页面:
http://192.168.7.41:50070
2、yarn
[hadoop@hadoop-node1 ~]$ cp app/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.1.jar .Error: Could not find or load main class hadoop-mapreduce-examples-2.7.1.jar[hadoop@hadoop-node1 ~]$ hadoop jar hadoop-mapreduce-examples-2.7.1.jar pi 5 5Number of Maps = 5Samples per Map = 5Wrote input for Map #0Wrote input for Map #1Wrote input for Map #2Wrote input for Map #3Wrote input for Map #4Starting Job15/07/21 19:20:36 INFO input.FileInputFormat: Total input paths to process : 515/07/21 19:20:37 INFO mapreduce.JobSubmitter: number of splits:515/07/21 19:20:38 INFO mapreduce.JobSubmitter: Submitting tokens for job: job_1437475325594_000115/07/21 19:20:38 INFO impl.YarnClientImpl: Submitted application application_1437475325594_000115/07/21 19:20:38 INFO mapreduce.Job: The url to track the job: http://hadoop-node3:8088/proxy/application_1437475325594_0001/15/07/21 19:20:38 INFO mapreduce.Job: Running job: job_1437475325594_000115/07/21 19:20:52 INFO mapreduce.Job: Job job_1437475325594_0001 running in uber mode : false15/07/21 19:20:52 INFO mapreduce.Job: map 0% reduce 0%15/07/21 19:21:15 INFO mapreduce.Job: map 100% reduce 0%15/07/21 19:21:29 INFO mapreduce.Job: map 100% reduce 100%15/07/21 19:21:30 INFO mapreduce.Job: Job job_1437475325594_0001 completed successfully15/07/21 19:21:30 INFO mapreduce.Job: Counters: 49File System CountersFILE: Number of bytes read=116FILE: Number of bytes written=709755FILE: Number of read operations=0FILE: Number of large read operations=0FILE: Number of write operations=0HDFS: Number of bytes read=1275HDFS: Number of bytes written=215HDFS: Number of read operations=23HDFS: Number of large read operations=0HDFS: Number of write operations=3Job CountersLaunched map tasks=5Launched reduce tasks=1Data-local map tasks=5Total time spent by all maps in occupied slots (ms)=100795Total time spent by all reduces in occupied slots (ms)=11156Total time spent by all map tasks (ms)=100795Total time spent by all reduce tasks (ms)=11156Total vcore-seconds taken by all map tasks=100795Total vcore-seconds taken by all reduce tasks=11156Total megabyte-seconds taken by all map tasks=103214080Total megabyte-seconds taken by all reduce tasks=11423744Map-Reduce FrameworkMap input records=5Map output records=10Map output bytes=90Map output materialized bytes=140Input split bytes=685Combine input records=0Combine output records=0Reduce input groups=2Reduce shuffle bytes=140Reduce input records=10Reduce output records=0Spilled Records=20Shuffled Maps =5Failed Shuffles=0Merged Map outputs=5GC time elapsed (ms)=358CPU time spent (ms)=5740Physical memory (bytes) snapshot=1323433984Virtual memory (bytes) snapshot=4129165312Total committed heap usage (bytes)=846725120Shuffle ErrorsBAD_ID=0CONNECTION=0IO_ERROR=0WRONG_LENGTH=0WRONG_MAP=0WRONG_REDUCE=0File Input Format CountersBytes Read=590File Output Format CountersBytes Written=97Job Finished in 55.254 secondsEstimated value of Pi is 3.68000000000000000000
管理页面:
http://192.168.7.43:8088/cluster
六、HIVE的安装配置与使用
1、下载并部署
[hadoop@hadoop-node2 tools]$ wget http://apache.communilink.net/hive/hive-1.2.1/apache-hive-1.2.1-bin.tar.gz--2015-07-22 14:03:05-- http://apache.communilink.net/hive/hive-1.2.1/apache-hive-1.2.1-bin.tar.gzResolving apache.communilink.net... 203.124.11.85Connecting to apache.communilink.net|203.124.11.85|:80... connected.HTTP request sent, awaiting response... 200 OKLength: 92834839 (89M) [application/x-gzip]Saving to: `apache-hive-1.2.1-bin.tar.gz\'100%[===============================================================>] 92,834,839 974K/s in 98s2015-07-22 14:04:43 (928 KB/s) - `apache-hive-1.2.1-bin.tar.gz\' saved [92834839/92834839]
[hadoop@hadoop-node2 tools]$ tar xfvz apache-hive-1.2.1-bin.tar.gz -C ../app[hadoop@hadoop-node2 app]$ ln -s apache-hive-1.2.1-bin/ hive[hadoop@hadoop-node2 app]$ lltotal 8drwxrwxr-x 9 hadoop hadoop 4096 Jul 22 14:07 apache-hive-1.2.1-binlrwxrwxrwx 1 hadoop hadoop 30 Jul 21 13:08 hadoop -> /home/hadoop/app/hadoop-2.7.1/drwxrwxr-x 11 hadoop hadoop 4096 Jul 21 18:11 hadoop-2.7.1lrwxrwxrwx 1 hadoop hadoop 22 Jul 22 14:06 hive -> apache-hive-1.2.1-bin/
[hadoop@hadoop-node2 app]$ hive/bin/hiveLogging initialized using configuration in jar:file:/home/hadoop/app/apache-hive-1.2.1-bin/lib/hive-common-1.2.1.jar!/hive-log4j.propertieshive> exit;[hadoop@hadoop-node2 app]$ lltotal 36drwxrwxr-x 9 hadoop hadoop 4096 Jul 22 14:07 apache-hive-1.2.1-bin-rw-rw-r-- 1 hadoop hadoop 21071 Jul 22 14:27 derby.loglrwxrwxrwx 1 hadoop hadoop 30 Jul 21 13:08 hadoop -> /home/hadoop/app/hadoop-2.7.1/drwxrwxr-x 11 hadoop hadoop 4096 Jul 21 18:11 hadoop-2.7.1lrwxrwxrwx 1 hadoop hadoop 22 Jul 22 14:06 hive -> apache-hive-1.2.1-bin/drwxrwxr-x 5 hadoop hadoop 4096 Jul 22 14:27 metastore_db
2、建库建表
hive> show databases;OKdefaultTime taken: 1.294 seconds, Fetched: 1 row(s)hive> create database csoft;OKTime taken: 0.432 secondshive> use csoft;OKTime taken: 0.026 secondshive> create table t_mygirls(id int,name string,age int,size string)> row format delimited> fields terminated by "\t";OKTime taken: 0.552 seconds
3、上传数据
[hadoop@hadoop-node2 hive]$ vim mygirls.dat1 baby 18 36C2 yifei 19 37B3 yangmi 22 36A4 zhiling 36 36C5 fengjie 29 39A[hadoop@hadoop-node2 hive]$ hadoop fs -put mygirls.dat /user/hive/warehouse/csoft.db/t_mygirls
4、测试查询
hive> use csoft;OKhive> select * from t_mygirls;OK1 baby 18 36C2 yifei 19 37B3 yangmi 22 36A4 zhiling 36 36C5 fengjie 29 39ATime taken: 1.502 seconds, Fetched: 5 row(s)hive> select count(*) from t_mygirls;Query ID = hadoop_20150722160910_1d513448-3c28-4c74-aed1-2a26bcfe852cTotal jobs = 1Launching Job 1 out of 1Number of reduce tasks determined at compile time: 1In order to change the average load for a reducer (in bytes):set hive.exec.reducers.bytes.per.reducer=<number>In order to limit the maximum number of reducers:set hive.exec.reducers.max=<number>In order to set a constant number of reducers:set mapreduce.job.reduces=<number>Starting Job = job_1437530701036_0005, Tracking URL = http://hadoop-node3:8088/proxy/application_1437530701036_0005/Kill Command = /home/hadoop/app/hadoop-2.7.1/bin/hadoop job -kill job_1437530701036_0005Hadoop job information for Stage-1: number of mappers: 1; number of reducers: 12015-07-22 16:09:27,490 Stage-1 map = 0%, reduce = 0%2015-07-22 16:09:38,131 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 2.83 sec2015-07-22 16:09:52,022 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 6.54 secMapReduce Total cumulative CPU time: 6 seconds 540 msecEnded Job = job_1437530701036_0005MapReduce Jobs Launched:Stage-Stage-1: Map: 1 Reduce: 1 Cumulative CPU: 6.54 sec HDFS Read: 6605 HDFS Write: 2 SUCCESSTotal MapReduce CPU Time Spent: 6 seconds 540 msecOK5Time taken: 43.983 seconds, Fetched: 1 row(s)
5、修改hive配置文件,与mysql结合
[hadoop@hadoop-node2 hive]$ cp conf/hive-default.xml.template conf/hive-site.xml[hadoop@hadoop-node2 hive]$ vim conf/hive-site.xml394行:<property><name>javax.jdo.option.ConnectionURL</name><value>jdbc:mysql://192.168.7.10:3306/hive?createDatabaseIfNotExist=true</value><description>JDBC connect string for a JDBC metastore</description></property>789行:<property><name>javax.jdo.option.ConnectionDriverName</name><value>com.mysql.jdbc.Driver</value><description>Driver class name for a JDBC metastore</description></property>814行:<property><name>javax.jdo.option.ConnectionUserName</name><value>hive</value><description>Username to use against metastore database</description></property>380行:<property><name>javax.jdo.option.ConnectionPassword</name><value>hive</value><description>password to use against metastore database</description></property>
注意要在mysql里修改一下hive库的字符集,不然会报错
mysql> alter database hive character set latin1;Query OK, 1 row affected (0.02 sec)
6、导入mysql连接包
[hadoop@hadoop-node2 conf]$ cd ../lib/[hadoop@hadoop-node2 lib]$ ls -ltrtotal 96144-rw-r--r-- 1 hadoop hadoop 875336 Jan 9 2014 mysql-connector-java-5.1.28.jar[hadoop@hadoop-node2 hive]$ hiveLogging initialized using configuration in jar:file:/home/hadoop/app/apache-hive-1.2.1-bin/lib/hive-common-1.2.1.jar!/hive-log4j.propertieshive> show databases;OKdefaultTime taken: 1.585 seconds, Fetched: 1 row(s)
mysql> create database hive;Query OK, 1 row affected (0.08 sec)mysql> grant all on hive.* to \'hive\'@\'192.168.7.%\' identified by "hive";Query OK, 0 rows affected (0.00 sec)mysql> select user,host from mysql.user;+----------+-------------+| user | host |+----------+-------------+| hive | 192.168.7.% |+----------+-------------+9 rows in set (0.02 sec)
7、建库建表
hive> create table t_order(id int,name string,money double)> row format delimited> fields terminated by \',\';OKTime taken: 24.432 secondshive> show tables;OKt_orderTime taken: 0.037 seconds, Fetched: 1 row(s)mysql> select * from TBLS\G*************************** 1. row ***************************TBL_ID: 1CREATE_TIME: 1437556284DB_ID: 1LAST_ACCESS_TIME: 0OWNER: hadoopRETENTION: 0SD_ID: 1TBL_NAME: t_orderTBL_TYPE: MANAGED_TABLEVIEW_EXPANDED_TEXT: NULLVIEW_ORIGINAL_TEXT: NULL1 row in set (0.00 sec)mysql> select * from COLUMNS_V2\G*************************** 1. row ***************************CD_ID: 1COMMENT: NULLCOLUMN_NAME: idTYPE_NAME: intINTEGER_IDX: 0*************************** 2. row ***************************CD_ID: 1COMMENT: NULLCOLUMN_NAME: moneyTYPE_NAME: doubleINTEGER_IDX: 2*************************** 3. row ***************************CD_ID: 1COMMENT: NULLCOLUMN_NAME: nameTYPE_NAME: stringINTEGER_IDX: 13 rows in set (0.00 sec)
8、导入数据
[hadoop@hadoop-node2 ~]$ vim order.data[hadoop@hadoop-node2 ~]$ ll order.data-rw-rw-r-- 1 hadoop hadoop 54 Jul 22 17:39 order.datahive> load data local inpath \'/home/hadoop/order.data\' into table t_order;Loading data to table default.t_orderTable default.t_order stats: [numFiles=1, totalSize=54]OKTime taken: 1.03 secondshive> select * from t_order;OK1 iphone 6888.02 xiaomi 2399.03 meizu 2499.04 mate7 4398.0Time taken: 0.474 seconds, Fetched: 4 row(s)
[hadoop@hadoop-node2 ~]$ hadoop fs -put order.data /order.data.2[hadoop@hadoop-node2 ~]$ hadoop fs -ls /Found 4 itemsdrwxr-xr-x - hadoop supergroup 0 2015-07-21 18:51 /flow-rw-r--r-- 3 hadoop supergroup 54 2015-07-22 17:41 /order.data.2drwx------ - hadoop supergroup 0 2015-07-22 14:07 /tmpdrwxr-xr-x - hadoop supergroup 0 2015-07-22 17:03 /userhive> load data inpath \'/order.data.2\' into table t_order;Loading data to table default.t_orderTable default.t_order stats: [numFiles=2, totalSize=108]OKTime taken: 0.489 secondshive> select * from t_order;OK1 iphone 6888.02 xiaomi 2399.03 meizu 2499.04 mate7 4398.01 iphone 6888.02 xiaomi 2399.03 meizu 2499.04 mate7 4398.0Time taken: 0.188 seconds, Fetched: 8 row(s)
9、创建外部表
hive> create external table t_order_ex(id int,name string,money double)> row format delimited> fields terminated by \',\'> location \'/hive-tmp/order\';OKTime taken: 0.151 secondshive> load data local inpath \'/home/hadoop/order.data\' into table t_order_ex;Loading data to table default.t_order_exTable default.t_order_ex stats: [numFiles=0, totalSize=0]OKTime taken: 0.524 secondshive> select * from t_order_ex;OK1 iphone 6888.02 xiaomi 2399.03 meizu 2499.04 mate7 4398.0Time taken: 0.182 seconds, Fetched: 4 row(s)mysql> select * from TBLS\G*************************** 1. row ***************************TBL_ID: 1CREATE_TIME: 1437556284DB_ID: 1LAST_ACCESS_TIME: 0OWNER: hadoopRETENTION: 0SD_ID: 1TBL_NAME: t_orderTBL_TYPE: MANAGED_TABLEVIEW_EXPANDED_TEXT: NULLVIEW_ORIGINAL_TEXT: NULL*************************** 2. row ***************************TBL_ID: 2CREATE_TIME: 1437558341DB_ID: 1LAST_ACCESS_TIME: 0OWNER: hadoopRETENTION: 0SD_ID: 2TBL_NAME: t_order_exTBL_TYPE: EXTERNAL_TABLEVIEW_EXPANDED_TEXT: NULLVIEW_ORIGINAL_TEXT: NULL2 rows in set (0.00 sec)
内部表和外部表的区别:drop table后外部表数据还在,表结构和元数据不在了。
hive> drop table t_order_ex;OKTime taken: 32.076 seconds[hadoop@hadoop-node2 ~]$ hadoop fs -ls /hive-tmp/orderFound 1 items-rwxr-xr-x 3 hadoop supergroup 54 2015-07-22 17:46 /hive-tmp/order/order.data
10、从已有数据表创建表 select语句
hive> create table t_order_sample> as> select name,money from t_order;Query ID = hadoop_20150723115105_2a68366f-ce9a-40a3-810b-1634acab2bc4Total jobs = 3Launching Job 1 out of 3Number of reduce tasks is set to 0 since there\'s no reduce operatorStarting Job = job_1437530701036_0008, Tracking URL = http://hadoop-node3:8088/proxy/application_1437530701036_0008/Kill Command = /home/hadoop/app/hadoop-2.7.1/bin/hadoop job -kill job_1437530701036_0008Hadoop job information for Stage-1: number of mappers: 1; number of reducers: 02015-07-23 11:51:25,498 Stage-1 map = 0%, reduce = 0%2015-07-23 11:51:37,143 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 2.58 secMapReduce Total cumulative CPU time: 2 seconds 580 msecEnded Job = job_1437530701036_0008Stage-4 is selected by condition resolver.Stage-3 is filtered out by condition resolver.Stage-5 is filtered out by condition resolver.Moving data to: hdfs://ns1/user/hive/warehouse/.hive-staging_hive_2015-07-23_11-51-05_828_639578264040662267-1/-ext-10001Moving data to: hdfs://ns1/user/hive/warehouse/t_order_sampleTable default.t_order_sample stats: [numFiles=1, numRows=8, totalSize=108, rawDataSize=100]MapReduce Jobs Launched:Stage-Stage-1: Map: 1 Cumulative CPU: 2.58 sec HDFS Read: 3088 HDFS Write: 187 SUCCESSTotal MapReduce CPU Time Spent: 2 seconds 580 msecOKTime taken: 40.675 seconds
hive> select * from t_order_sample;OKiphone 6888.0xiaomi 2399.0meizu 2499.0mate7 4398.0iphone 6888.0xiaomi 2399.0meizu 2499.0mate7 4398.0Time taken: 0.236 seconds, Fetched: 8 row(s)
11、插入数据的方法 insert语句
[hadoop@hadoop-node2 hive]$ vim order.data.25,caomei,256,xianggua,27,chengzi,48,apple,8~hive> load data local inpath \'/home/hadoop/app/hive/order.data.2\' into table t_order;Loading data to table default.t_orderTable default.t_order stats: [numFiles=3, totalSize=155]OKTime taken: 0.717 secondshive> select * from t_order;OK1 iphone 6888.02 xiaomi 2399.03 meizu 2499.04 mate7 4398.01 iphone 6888.02 xiaomi 2399.03 meizu 2499.04 mate7 4398.05 caomei 25.06 xianggua 2.07 chengzi 4.08 apple 8.0Time taken: 0.208 seconds, Fetched: 12 row(s)hive> insert into table t_order_sample> select name,money from t_order> where id>4;Query ID = hadoop_20150723120112_b0728032-db77-4026-9828-3ac468d7647eTotal jobs = 3Launching Job 1 out of 3Number of reduce tasks is set to 0 since there\'s no reduce operatorStarting Job = job_1437530701036_0009, Tracking URL = http://hadoop-node3:8088/proxy/application_1437530701036_0009/Kill Command = /home/hadoop/app/hadoop-2.7.1/bin/hadoop job -kill job_1437530701036_0009Hadoop job information for Stage-1: number of mappers: 1; number of reducers: 02015-07-23 12:01:33,570 Stage-1 map = 0%, reduce = 0%2015-07-23 12:01:45,170 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 4.01 secMapReduce Total cumulative CPU time: 4 seconds 10 msecEnded Job = job_1437530701036_0009Stage-4 is selected by condition resolver.Stage-3 is filtered out by condition resolver.Stage-5 is filtered out by condition resolver.Moving data to: hdfs://ns1/user/hive/warehouse/t_order_sample/.hive-staging_hive_2015-07-23_12-01-12_051_7473768812803361164-1/-ext-10000Loading data to table default.t_order_sampleTable default.t_order_sample stats: [numFiles=2, numRows=12, totalSize=155, rawDataSize=143]MapReduce Jobs Launched:Stage-Stage-1: Map: 1 Cumulative CPU: 4.01 sec HDFS Read: 4118 HDFS Write: 125 SUCCESSTotal MapReduce CPU Time Spent: 4 seconds 10 msecOKTime taken: 40.232 seconds
hive> select * from t_order_sample;OKiphone 6888.0xiaomi 2399.0meizu 2499.0mate7 4398.0iphone 6888.0xiaomi 2399.0meizu 2499.0mate7 4398.0caomei 25.0xianggua 2.0chengzi 4.0apple 8.0Time taken: 0.221 seconds, Fetched: 12 row(s)
七、Hbase
1、安装部署
[hadoop@hadoop-node1 tools]$ wget http://apache.01link.hk/hbase/1.1.1/hbase-1.1.1-bin.tar.gz--2015-07-23 16:12:34-- http://apache.01link.hk/hbase/1.1.1/hbase-1.1.1-bin.tar.gzResolving apache.01link.hk... 101.78.134.82Connecting to apache.01link.hk|101.78.134.82|:80... connected.HTTP request sent, awaiting response... 200 OKLength: 102487389 (98M) [application/x-gzip]Saving to: `hbase-1.1.1-bin.tar.gz\'100%[===============================================================>] 102,487,389 1.70M/s in 63s2015-07-23 16:13:38 (1.54 MB/s) - `hbase-1.1.1-bin.tar.gz\' saved [102487389/102487389]
[hadoop@hadoop-node1 tools]$ tar xf hbase-1.1.1-bin.tar.gz -C ../app/[hadoop@hadoop-node1 tools]$ ln -s ../app/hbase-1.1.1/ ../app/hbase[hadoop@hadoop-node1 app]$ lltotal 8lrwxrwxrwx 1 hadoop hadoop 30 Jul 21 13:08 hadoop -> /home/hadoop/app/hadoop-2.7.1/drwxrwxr-x 11 hadoop hadoop 4096 Jul 21 18:11 hadoop-2.7.1lrwxrwxrwx 1 hadoop hadoop 19 Jul 23 16:16 hbase -> ../app/hbase-1.1.1/drwxrwxr-x 7 hadoop hadoop 4096 Jul 23 16:15 hbase-1.1.1
2、配置文件
1)[hadoop@hadoop-node1 app]$ vim hbase/conf/hbase-env.sh
29行:export JAVA_HOME=/usr/java/jdk/131行:export HBASE_MANAGES_ZK=false
2)[hadoop@hadoop-node1 app]$ vim hbase/conf/hbase-site.xml
<configuration><property><name>hbase.rootdir</name><value>hdfs://ns1/hbase</value></property><property><name>hbase.cluster.distributed</name><value>true</value></property><property><name>hbase.zookeeper.quorum</name><value>hadoop-node5:2181,hadoop-node6:2181,hadoop-node7:2181</value></property></configuration>
3)[hadoop@hadoop-node1 app]$ vim hbase/conf/regionservers
hadoop-node5hadoop-node6hadoop-node7
4)拷贝hadoop的部分配置文件
[hadoop@hadoop-node1 app]$ cp hadoop/etc/hadoop/core-site.xml hadoop/etc/hadoop/hdfs-site.xml hbase/conf/
5)分发部署包
scp -r hbase-1.1.1/ hadoop@hadoop-node2:/home/hadoop/appscp -r hbase-1.1.1/ hadoop@hadoop-node5:/home/hadoop/appscp -r hbase-1.1.1/ hadoop@hadoop-node6:/home/hadoop/appscp -r hbase-1.1.1/ hadoop@hadoop-node7:/home/hadoop/app
分别做好软连接后,就可以启动Hbase集群了
3、启动集群
[hadoop@hadoop-node1 app]$ hbase/bin/start-hbase.shstarting master, logging to /home/hadoop/app/hbase/bin/../logs/hbase-hadoop-master-hadoop-node1.csoftintl.com.outhadoop-node6: starting regionserver, logging to /home/hadoop/app/hbase/bin/../logs/hbase-hadoop-regionserver-hadoop-node6.csoftintl.com.outhadoop-node5: starting regionserver, logging to /home/hadoop/app/hbase/bin/../logs/hbase-hadoop-regionserver-hadoop-node5.csoftintl.com.outhadoop-node7: starting regionserver, logging to /home/hadoop/app/hbase/bin/../logs/hbase-hadoop-regionserver-hadoop-node7.csoftintl.com.out[hadoop@hadoop-node1 app]$ jps22040 Jps21770 HMaster12417 NameNode11236 DFSZKFailoverController[hadoop@hadoop-node2 app]$ hbase/bin/start-hbase.shstarting master, logging to /home/hadoop/app/hbase/bin/../logs/hbase-hadoop-master-hadoop-node2.csoftintl.com.outhadoop-node5: starting regionserver, logging to /home/hadoop/app/hbase/bin/../logs/hbase-hadoop-regionserver-hadoop-node5.csoftintl.com.outhadoop-node7: starting regionserver, logging to /home/hadoop/app/hbase/bin/../logs/hbase-hadoop-regionserver-hadoop-node7.csoftintl.com.outhadoop-node6: starting regionserver, logging to /home/hadoop/app/hbase/bin/../logs/hbase-hadoop-regionserver-hadoop-node6.csoftintl.com.out[hadoop@hadoop-node2 app]$ jps20818 Jps20697 HMaster15587 DFSZKFailoverController25736 NameNode[hadoop@hadoop-node5 app]$ jps3991 HRegionServer22260 QuorumPeerMain22452 NodeManager22561 JournalNode4133 Jps22339 DataNode[hadoop@hadoop-node6 app]$ jps8360 JournalNode22731 HRegionServer22903 Jps8171 DataNode8284 NodeManager8090 QuorumPeerMain[hadoop@hadoop-node7 app]$ jps19648 QuorumPeerMain19741 DataNode32436 HRegionServer19930 JournalNode19854 NodeManager32578 Jps
管理网页:
http://192.168.7.41:16010/master-status
4、命令行客户端及管理命令
[root@hadoop-node1 ~]# vim /etc/profileexport PATH=$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:/home/hadoop/app/hive/bin:/home/hadoop/app/hbase/bin[root@hadoop-node1 ~]# su - hadoop[hadoop@hadoop-node1 ~]$ source /etc/profile[hadoop@hadoop-node1 ~]$ hbase shellSLF4J: Class path contains multiple SLF4J bindings.SLF4J: Found binding in [jar:file:/home/hadoop/app/hbase-1.1.1/lib/slf4j-log4j12-1.7.5.jar!/org/slf4j/impl/StaticLoggerBinder.class]SLF4J: Found binding in [jar:file:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/slf4j-log4j12-1.7.10.jar!/org/slf4j/impl/StaticLoggerBinder.class]SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.SLF4J: Actual binding is of type [org.slf4j.impl.Log4jLoggerFactory]HBase Shell; enter \'help<RETURN>\' for list of supported commands.Type "exit<RETURN>" to leave the HBase ShellVersion 1.1.1, rd0a115a7267f54e01c72c603ec53e91ec418292f, Tue Jun 23 14:44:07 PDT 2015hbase(main):001:0>
hbase(main):017:0> helpHBase Shell, version 1.1.1, rd0a115a7267f54e01c72c603ec53e91ec418292f, Tue Jun 23 14:44:07 PDT 2015Type \'help "COMMAND"\', (e.g. \'help "get"\' -- the quotes are necessary) for help on a specific command.Commands are grouped. Type \'help "COMMAND_GROUP"\', (e.g. \'help "general"\') for help on a command group.COMMAND GROUPS:Group name: generalCommands: status, table_help, version, whoamiGroup name: ddlCommands: alter, alter_async, alter_status, create, describe, disable, disable_all, drop, drop_all, enable, enable_all, exists, get_table, is_disabled, is_enabled, list, show_filtersGroup name: namespaceCommands: alter_namespace, create_namespace, describe_namespace, drop_namespace, list_namespace, list_namespace_tablesGroup name: dmlCommands: append, count, delete, deleteall, get, get_counter, get_splits, incr, put, scan, truncate, truncate_preserveGroup name: toolsCommands: assign, balance_switch, balancer, balancer_enabled, catalogjanitor_enabled, catalogjanitor_run, catalogjanitor_switch, close_region, compact, compact_rs, flush, major_compact, merge_region, move, split, trace, unassign, wal_roll, zk_dumpGroup name: replicationCommands: add_peer, append_peer_tableCFs, disable_peer, disable_table_replication, enable_peer, enable_table_replication, list_peers, list_replicated_tables, remove_peer, remove_peer_tableCFs, set_peer_tableCFs, show_peer_tableCFsGroup name: snapshotsCommands: clone_snapshot, delete_all_snapshot, delete_snapshot, list_snapshots, restore_snapshot, snapshotGroup name: configurationCommands: update_all_config, update_configGroup name: quotasCommands: list_quotas, set_quotaGroup name: securityCommands: grant, revoke, user_permissionGroup name: visibility labelsCommands: add_labels, clear_auths, get_auths, list_labels, set_auths, set_visibilitySHELL USAGE:Quote all names in HBase Shell such as table and column names. Commas delimitcommand parameters. Type <RETURN> after entering a command to run it.Dictionaries of configuration used in the creation and alteration of tables areRuby Hashes. They look like this:{\'key1\' => \'value1\', \'key2\' => \'value2\', ...}and are opened and closed with curley-braces. Key/values are delimited by the\'=>\' character combination. Usually keys are predefined constants such asNAME, VERSIONS, COMPRESSION, etc. Constants do not need to be quoted. Type\'Object.constants\' to see a (messy) list of all constants in the environment.If you are using binary keys or values and need to enter them in the shell, usedouble-quote\'d hexadecimal representation. For example:hbase> get \'t1\', "key\x03\x3f\xcd"hbase> get \'t1\', "key\003\023\011"hbase> put \'t1\', "test\xef\xff", \'f1:\', "\x01\x33\x40"The HBase shell is the (J)Ruby IRB with the above HBase-specific commands added.For more on the HBase Shell, see http://hbase.apache.org/book.html
创建表:
hbase(main):020:0> create \'user_info\',{NAME => \'base_info\',VERSIONS => 3},{NAME => \'extra_info\'}0 row(s) in 4.5880 seconds=> Hbase::Table - user_infohbase(main):022:0> listTABLEuser_info1 row(s) in 0.0130 seconds=> ["user_info"]
插入数据:
hbase(main):024:0> put \'user_info\' ,\'001\',\'base_info:name\',\'fengjie\'0 row(s) in 0.1610 secondshbase(main):025:0> put \'user_info\' ,\'001\',\'base_info:age\',\'28\'0 row(s) in 0.0200 secondshbase(main):026:0> put \'user_info\' ,\'001\',\'base_info:addr\',\'New York\'0 row(s) in 0.0150 secondshbase(main):027:0> put \'user_info\' ,\'001\',\'extra_info:sex\',\'female\'0 row(s) in 0.0170 secondshbase(main):028:0> put \'user_info\' ,\'001\',\'base_info:phonenbr\',\'13813838888\'0 row(s) in 0.0160 seconds
查询数据:
hbase(main):029:0> scan \'user_info\'ROW COLUMN+CELL001 column=base_info:addr, timestamp=1437644997783, value=New York001 column=base_info:age, timestamp=1437644967365, value=28001 column=base_info:name, timestamp=1437644932773, value=fengjie001 column=base_info:phonenbr, timestamp=1437645075989, value=13813838888001 column=extra_info:sex, timestamp=1437645025106, value=female1 row(s) in 0.0520 secondshbase(main):030:0> get \'user_info\',\'001\'COLUMN CELLbase_info:addr timestamp=1437644997783, value=New Yorkbase_info:age timestamp=1437644967365, value=28base_info:name timestamp=1437644932773, value=fengjiebase_info:phonenbr timestamp=1437645075989, value=13813838888extra_info:sex timestamp=1437645025106, value=female5 row(s) in 0.0660 seconds
修改数据:
hbase(main):031:0> put \'user_info\',\'001\',\'base_info:name\',\'luoyufeng\'0 row(s) in 0.0130 secondshbase(main):032:0> get \'user_info\',\'001\'COLUMN CELLbase_info:addr timestamp=1437644997783, value=New Yorkbase_info:age timestamp=1437644967365, value=28base_info:name timestamp=1437645350820, value=luoyufengbase_info:phonenbr timestamp=1437645075989, value=13813838888extra_info:sex timestamp=1437645025106, value=female5 row(s) in 0.0340 seconds
hbase(main):033:0> get \'user_info\',\'001\',{COLUMN => \'base_info:name\',VERSIONS =>10}COLUMN CELLbase_info:name timestamp=1437645350820, value=luoyufengbase_info:name timestamp=1437644932773, value=fengjie2 row(s) in 0.0400 seconds
scan和get的区别:
hbase(main):038:0> scan \'user_info\',{RAW => true,VERSION => 5}ROW COLUMN+CELL001 column=base_info:addr, timestamp=1437644997783, value=New York001 column=base_info:age, timestamp=1437644967365, value=28001 column=base_info:name, timestamp=1437645602346, value=fengbaobao001 column=base_info:phonenbr, timestamp=1437645075989, value=13813838888001 column=extra_info:sex, timestamp=1437645025106, value=female1 row(s) in 0.0260 secondshbase(main):037:0> get \'user_info\',\'001\',{COLUMN => \'base_info:name\',VERSIONS =>10}COLUMN CELLbase_info:name timestamp=1437645602346, value=fengbaobaobase_info:name timestamp=1437645498636, value=fengfengbase_info:name timestamp=1437645350820, value=luoyufeng3 row(s) in 0.0200 seconds
下载CDH版Hadoop
wget http://archive.cloudera.com/cm5/installer/latest/cloudera-manager-installer.bin