sunmmi

CentOS-6.6 hadoop-2.7.1 java-1.7.0_75

一、基础环境部署及检查

    [root@hadoop-node1 ~]# uname -r
    2.6.32-504.12.2.el6.x86_64
    [root@hadoop-node1 ~]# cat /etc/redhat-release
    CentOS release 6.6 (Final)
    [root@hadoop-node1 ~]# hostname
    hadoop-node1.csoftintl.com
    [root@hadoop-node1 ~]# ip addr
    1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
    inet6 ::1/128 scope host
    valid_lft forever preferred_lft forever
    2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether fa:16:3e:2c:86:da brd ff:ff:ff:ff:ff:ff
    inet 192.168.7.39/24 brd 192.168.7.255 scope global eth0
    inet6 fe80::f816:3eff:fe2c:86da/64 scope link
    valid_lft forever preferred_lft forever
    3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether fa:16:3e:94:14:f6 brd ff:ff:ff:ff:ff:ff
    inet 172.16.1.39/24 brd 172.16.1.255 scope global eth1
    inet6 fe80::f816:3eff:fe94:14f6/64 scope link
    valid_lft forever preferred_lft
    [root@hadoop-node1 ~]# cat /etc/hosts
    127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
    ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
    172.16.1.39 hadoop-node1.csoftintl.com
    [root@hadoop-node1 tools]# ll
    total 270260
    -rw-r--r-- 1 root root 138656756 Jul 18 2014 hadoop-2.7.1.tar.gz
    -rw-r--r-- 1 root root 138082565 Jul 10 16:06 jdk-7u79-linux-x64.rpm

必要的软件和安装包

    [root@hadoop-node1 ~]# rpm -qa bzip2-devel snappy-devel openssl-devel zlib-devel cmake
    zlib-devel-1.2.3-29.el6.x86_64
    snappy-devel-1.1.0-1.el6.x86_64
    cmake-2.8.12.2-4.el6.x86_64
    bzip2-devel-1.0.5-7.el6_0.x86_64
    openssl-devel-1.0.1e-30.el6.11.x86_64

    [root@hadoop-node1 ~]# ll apache-maven-3.3.3-bin.tar.gz protobuf-2.5.0.tar.gz
    -rw-r--r-- 1 root root 8042383 Apr 28 23:12 apache-maven-3.3.3-bin.tar.gz
    -rw-r--r-- 1 root root 2401901 Jul 15 13:57 protobuf-2.5.0.tar.gz

二、安装部署 
1、安装jdk

    [root@hadoop-node1 tools]# rpm -ivh jdk-7u79-linux-x64.rpm
    Preparing... ########################################### [100%]
    1:jdk ########################################### [100%]
    Unpacking JAR files...
    rt.jar...
    jsse.jar...
    charsets.jar...
    tools.jar...
    localedata.jar...
    jfxrt.jar...
    [root@hadoop-node1 ~]# ln -s /usr/java/jdk1.7.0_79/ /usr/java/jdk
    [root@hadoop-node1 ~]# ll /usr/java/jdk
    lrwxrwxrwx 1 root root 22 Jul 10 16:25 /usr/java/jdk -> /usr/java/jdk1.7.0_79/
    [root@hadoop-node1 tools]# vim /etc/profile
    [root@hadoop-node1 tools]# tail -3 /etc/profile
    export JAVA_HOME=/usr/java/jdk
    export PATH=$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$PATH
    export CLASSPATH=$CLASSPATH:$JAVA_HOME/lib:$JAVA_HOME/jre/lib:$JAVA_HOME/lib/tools.jar
    [root@hadoop-node1 tools]# source /etc/profile
    [root@hadoop-node1 tools]# java -version
    java version "1.7.0_79"
    Java(TM) SE Runtime Environment (build 1.7.0_79-b15)
    Java HotSpot(TM) 64-Bit Server VM (build 24.79-b02, mixed mode)

二、部署hadoop 
1、创建用户,编译hadoop

    [root@hadoop-node1 tools]# groupadd hadoop
    [root@hadoop-node1 tools]# useradd hadoop -g hadoop
    [hadoop@hadoop-node1 tools]$ cd hadoop-2.7.1-src
    [hadoop@hadoop-node1 hadoop-2.7.1-src]$ nohup mvn clean package -Pdist,native -DskipTests -Dtar
    nohup: ignoring input and appending output to `nohup.out\'

2、拷贝编译好的文件到安装目录并解压,调整环境变量并测试

    [hadoop@hadoop-node1 hadoop-2.7.1-src]$ tail -74 nohup.out
    [INFO] ------------------------------------------------------------------------
    [INFO] Reactor Summary:
    [INFO]
    [INFO] Apache Hadoop Main ................................. SUCCESS [ 2.608 s]
    [INFO] Apache Hadoop Project POM .......................... SUCCESS [ 1.884 s]
    [INFO] Apache Hadoop Annotations .......................... SUCCESS [ 6.388 s]
    [INFO] Apache Hadoop Assemblies ........................... SUCCESS [ 0.253 s]
    [INFO] Apache Hadoop Project Dist POM ..................... SUCCESS [ 2.066 s]
    [INFO] Apache Hadoop Maven Plugins ........................ SUCCESS [ 4.423 s]
    [INFO] Apache Hadoop MiniKDC .............................. SUCCESS [ 4.157 s]
    [INFO] Apache Hadoop Auth ................................. SUCCESS [ 6.289 s]
    [INFO] Apache Hadoop Auth Examples ........................ SUCCESS [ 4.995 s]
    [INFO] Apache Hadoop Common ............................... SUCCESS [02:13 min]
    [INFO] Apache Hadoop NFS .................................. SUCCESS [ 18.014 s]
    [INFO] Apache Hadoop KMS .................................. SUCCESS [ 14.853 s]
    [INFO] Apache Hadoop Common Project ....................... SUCCESS [ 1.199 s]
    [INFO] Apache Hadoop HDFS ................................. SUCCESS [03:56 min]
    [INFO] Apache Hadoop HttpFS ............................... SUCCESS [ 41.761 s]
    [INFO] Apache Hadoop HDFS BookKeeper Journal .............. SUCCESS [ 7.424 s]
    [INFO] Apache Hadoop HDFS-NFS ............................. SUCCESS [ 4.790 s]
    [INFO] Apache Hadoop HDFS Project ......................... SUCCESS [ 0.071 s]
    [INFO] hadoop-yarn ........................................ SUCCESS [ 0.062 s]
    [INFO] hadoop-yarn-api .................................... SUCCESS [ 46.512 s]
    [INFO] hadoop-yarn-common ................................. SUCCESS [ 40.779 s]
    [INFO] hadoop-yarn-server ................................. SUCCESS [ 0.051 s]
    [INFO] hadoop-yarn-server-common .......................... SUCCESS [ 12.650 s]
    [INFO] hadoop-yarn-server-nodemanager ..................... SUCCESS [ 22.826 s]
    [INFO] hadoop-yarn-server-web-proxy ....................... SUCCESS [ 4.278 s]
    [INFO] hadoop-yarn-server-applicationhistoryservice ....... SUCCESS [ 8.284 s]
    [INFO] hadoop-yarn-server-resourcemanager ................. SUCCESS [ 24.365 s]
    [INFO] hadoop-yarn-server-tests ........................... SUCCESS [ 6.071 s]
    [INFO] hadoop-yarn-client ................................. SUCCESS [ 7.911 s]
    [INFO] hadoop-yarn-server-sharedcachemanager .............. SUCCESS [ 4.327 s]
    [INFO] hadoop-yarn-applications ........................... SUCCESS [ 0.043 s]
    [INFO] hadoop-yarn-applications-distributedshell .......... SUCCESS [ 3.613 s]
    [INFO] hadoop-yarn-applications-unmanaged-am-launcher ..... SUCCESS [ 2.570 s]
    [INFO] hadoop-yarn-site ................................... SUCCESS [ 0.053 s]
    [INFO] hadoop-yarn-registry ............................... SUCCESS [ 6.645 s]
    [INFO] hadoop-yarn-project ................................ SUCCESS [ 9.353 s]
    [INFO] hadoop-mapreduce-client ............................ SUCCESS [ 0.062 s]
    [INFO] hadoop-mapreduce-client-core ....................... SUCCESS [ 23.674 s]
    [INFO] hadoop-mapreduce-client-common ..................... SUCCESS [ 21.104 s]
    [INFO] hadoop-mapreduce-client-shuffle .................... SUCCESS [ 4.265 s]
    [INFO] hadoop-mapreduce-client-app ........................ SUCCESS [ 10.053 s]
    [INFO] hadoop-mapreduce-client-hs ......................... SUCCESS [ 6.908 s]
    [INFO] hadoop-mapreduce-client-jobclient .................. SUCCESS [ 11.029 s]
    [INFO] hadoop-mapreduce-client-hs-plugins ................. SUCCESS [ 2.541 s]
    [INFO] Apache Hadoop MapReduce Examples ................... SUCCESS [ 7.050 s]
    [INFO] hadoop-mapreduce ................................... SUCCESS [ 3.871 s]
    [INFO] Apache Hadoop MapReduce Streaming .................. SUCCESS [ 7.371 s]
    [INFO] Apache Hadoop Distributed Copy ..................... SUCCESS [ 13.228 s]
    [INFO] Apache Hadoop Archives ............................. SUCCESS [ 3.766 s]
    [INFO] Apache Hadoop Rumen ................................ SUCCESS [ 7.782 s]
    [INFO] Apache Hadoop Gridmix .............................. SUCCESS [ 6.200 s]
    [INFO] Apache Hadoop Data Join ............................ SUCCESS [ 3.685 s]
    [INFO] Apache Hadoop Ant Tasks ............................ SUCCESS [ 2.809 s]
    [INFO] Apache Hadoop Extras ............................... SUCCESS [ 4.489 s]
    [INFO] Apache Hadoop Pipes ................................ SUCCESS [ 16.950 s]
    [INFO] Apache Hadoop OpenStack support .................... SUCCESS [ 5.690 s]
    [INFO] Apache Hadoop Amazon Web Services support .......... SUCCESS [ 4.586 s]
    [INFO] Apache Hadoop Azure support ........................ SUCCESS [ 5.308 s]
    [INFO] Apache Hadoop Client ............................... SUCCESS [ 10.547 s]
    [INFO] Apache Hadoop Mini-Cluster ......................... SUCCESS [ 0.074 s]
    [INFO] Apache Hadoop Scheduler Load Simulator ............. SUCCESS [ 4.904 s]
    [INFO] Apache Hadoop Tools Dist ........................... SUCCESS [ 25.184 s]
    [INFO] Apache Hadoop Tools ................................ SUCCESS [ 0.049 s]
    [INFO] Apache Hadoop Distribution ......................... SUCCESS [01:36 min]
    [INFO] ------------------------------------------------------------------------
    [INFO] BUILD SUCCESS
    [INFO] ------------------------------------------------------------------------
    [INFO] Total time: 16:46 min
    [INFO] Finished at: 2015-07-15T15:56:43+08:00
    [INFO] Final Memory: 123M/407M
    [INFO] ------------------------------------------------------------------------
    [hadoop@hadoop-node1 hadoop-2.7.1-src]$ cp hadoop-dist/target/hadoop-2.7.1.tar.gz /home/hadoop/application/
    [hadoop@hadoop-node1 hadoop-2.7.1-src]$ cd /home/hadoop/application/
    [hadoop@hadoop-node1 application]$ tar xf hadoop-2.7.1.tar.gz
    [hadoop@hadoop-node1 application]$ ll
    total 190292
    lrwxrwxrwx 1 hadoop hadoop 12 Jul 15 16:03 hadoop -> hadoop-2.7.1
    drwxrwxr-x 9 hadoop hadoop 4096 Jul 15 15:55 hadoop-2.7.1
    -rw-rw-r-- 1 hadoop hadoop 194854446 Jul 15 16:02 hadoop-2.7.1.tar.gz
    [root@hadoop-node1 ~]# vim /etc/profile
    export HADOOP_HOME=/home/hadoop/application/hadoop
    export PATH=$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:/application/maven/bin
    [hadoop@hadoop-node1 application]$ source /etc/profile
    [hadoop@hadoop-node1 application]$ hadoop version
    Hadoop 2.7.1
    Subversion Unknown -r Unknown
    Compiled by hadoop on 2015-07-15T07:40Z
    Compiled with protoc 2.5.0
    From source with checksum fc0a1a23fc1868e4d5ee7fa2b28a58a
    This command was run using /home/hadoop/application/hadoop-2.7.1/share/hadoop/common/hadoop-common-2.7.1.jar
    [hadoop@hadoop-node1 application]$ hadoop checknative -a
    15/07/15 16:05:33 INFO bzip2.Bzip2Factory: Successfully loaded & initialized native-bzip2 library system-native
    15/07/15 16:05:33 INFO zlib.ZlibFactory: Successfully loaded & initialized native-zlib library
    Native library checking:
    hadoop: true /home/hadoop/application/hadoop-2.7.1/lib/native/libhadoop.so.1.0.0
    zlib: true /lib64/libz.so.1
    snappy: true /usr/lib64/libsnappy.so.1
    lz4: true revision:99
    bzip2: true /lib64/libbz2.so.1
    openssl: true /usr/lib64/libcrypto.so

3、配置文件,所有节点配置文件一样

    [hadoop@hadoop-node1 hadoop-2.7.1]$ cd etc/hadoop/
    [hadoop@hadoop-node1 hadoop]$ ll
    total 124
    -rw-r--r-- 1 hadoop hadoop 3589 Jun 21 2014 capacity-scheduler.xml
    -rw-r--r-- 1 hadoop hadoop 1335 Jun 21 2014 configuration.xsl
    -rw-r--r-- 1 hadoop hadoop 318 Jun 21 2014 container-executor.cfg
    -rw-r--r-- 1 hadoop hadoop 774 Jun 21 2014 core-site.xml
    -rw-r--r-- 1 hadoop hadoop 3589 Jun 21 2014 hadoop-env.cmd
    -rw-r--r-- 1 hadoop hadoop 3494 Jun 21 2014 hadoop-env.sh
    -rw-r--r-- 1 hadoop hadoop 2490 Jun 21 2014 hadoop-metrics.properties
    -rw-r--r-- 1 hadoop hadoop 1774 Jun 21 2014 hadoop-metrics2.properties
    -rw-r--r-- 1 hadoop hadoop 9257 Jun 21 2014 hadoop-policy.xml
    -rw-r--r-- 1 hadoop hadoop 775 Jun 21 2014 hdfs-site.xml
    -rw-r--r-- 1 hadoop hadoop 1449 Jun 21 2014 httpfs-env.sh
    -rw-r--r-- 1 hadoop hadoop 1657 Jun 21 2014 httpfs-log4j.properties
    -rw-r--r-- 1 hadoop hadoop 21 Jun 21 2014 httpfs-signature.secret
    -rw-r--r-- 1 hadoop hadoop 620 Jun 21 2014 httpfs-site.xml
    -rw-r--r-- 1 hadoop hadoop 11169 Jun 21 2014 log4j.properties
    -rw-r--r-- 1 hadoop hadoop 918 Jun 21 2014 mapred-env.cmd
    -rw-r--r-- 1 hadoop hadoop 1383 Jun 21 2014 mapred-env.sh
    -rw-r--r-- 1 hadoop hadoop 4113 Jun 21 2014 mapred-queues.xml.template
    -rw-r--r-- 1 hadoop hadoop 758 Jun 21 2014 mapred-site.xml.template
    -rw-r--r-- 1 hadoop hadoop 10 Jun 21 2014 slaves
    -rw-r--r-- 1 hadoop hadoop 2316 Jun 21 2014 ssl-client.xml.example
    -rw-r--r-- 1 hadoop hadoop 2268 Jun 21 2014 ssl-server.xml.example
    -rw-r--r-- 1 hadoop hadoop 2178 Jun 21 2014 yarn-env.cmd
    -rw-r--r-- 1 hadoop hadoop 4564 Jun 21 2014 yarn-env.sh
    -rw-r--r-- 1 hadoop hadoop 690 Jun 21 2014 yarn-site.xml

1)[hadoop@hadoop-node1 hadoop]$ vim hadoop-env.sh

    27行:
    # The java implementation to use.
    #export JAVA_HOME=${JAVA_HOME}
    export JAVA_HOME="/usr/java/jdk"

2) [hadoop@hadoop-node1 hadoop]$ vim core-site.xml

    <configuration>
    <property>
    <name>fs.defaultFS</name>
    <value>hdfs://hadoop-node1.csoftintl.com:9000</value>
    </property>
    <property>
    <name>hadoop.tmp.dir</name>
    <value>/hadoop/hdfs</value>
    </property>
    </configuration>

3) [hadoop@hadoop-node1 hadoop]$ vim hdfs-site.xml

    <configuration>
    <property>
    <name>dfs.namenode.name.dir</name>
    <value>file:/hadoop/dfs/name</value>
    </property>
    <property>
    <name>dfs.datanode.data.dir</name>
    <value>file:/hadoop/dfs/data</value>
    </property>
    <property>
    <name>dfs.namenode.secondary.http-address</name>
    <value>hadoop-node1.csoftintl.com:9001</value>
    </property>
    <property>
    <name>dfs.replication</name>
    <value>1</value>
    </property>
    </configuration>

4)配置mapreduce

    [hadoop@hadoop-node1 hadoop]$ cp -a mapred-site.xml.template mapred-site.xml
    [hadoop@hadoop-node1 hadoop]$ vim mapred-site.xml
    <configuration>
    <property>
    <name>mapreduce.framework.name</name>
    <value>yarn</value>
    </property>
    </configuration>

5)[hadoop@hadoop-node1 hadoop]$ vim yarn-site.xml

    <configuration>
    <property>
    <name>yarn.resourcemanager.hostname</name>
    <value>hadoop-node1.csoftintl.com</value>
    </property>
    <property>
    <name>yarn.nodemanager.aux-services</name>
    <value>mapreduce_shuffle</value>
    </property>
    </configuration>

6)[hadoop@hadoop-node1 ~]$ vim app/hadoop-2.7.1/etc/hadoop/slaves 
这里是所有datanode的节点,要提前做好规划,一般跑namenode的节点,不跑datanode

hadoop-node2.csoftintl.com

7)最后配置一下HADOOP_HOME环境变量

    [root@hadoop-node1 ~]# vim /etc/profile
    export HADOOP_HOME=/hadoop/hadoop
    export PATH=$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
    [root@hadoop-node1 ~]# source /etc/profile

8)查看一下Hadoop的版本,确认配置

    [hadoop@hadoop-node1 ~]$ hadoop version
    Hadoop 2.7.1
    Subversion https://git-wip-us.apache.org/repos/asf/hadoop.git -r 15ecc87ccf4a0228f35af08fc56de536e6ce657a
    Compiled by jenkins on 2015-06-29T06:04Z
    Compiled with protoc 2.5.0
    From source with checksum fc0a1a23fc1868e4d5ee7fa2b28a58a
    This command was run using /home/hadoop/app/hadoop-2.7.1/share/hadoop/common/hadoop-common-2.7.1.jar

4、格式化HDFS(在namenode的主机工作目录下)

    [hadoop@hadoop-node1 ~]$ hdfs namenode -format
    15/07/13 16:01:45 INFO namenode.NameNode: STARTUP_MSG:
    /************************************************************
    STARTUP_MSG: Starting NameNode
    STARTUP_MSG: host = hadoop-node1.csoftintl.com/172.16.1.39
    STARTUP_MSG: args = [-format]
    STARTUP_MSG: version = 2.7.1
    STARTUP_MSG: classpath = /home/hadoop/app/hadoop-2.7.1/etc/hadoop:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/servlet-api-2.5.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-collections-3.2.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-math3-3.1.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-compress-1.4.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-beanutils-1.7.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/hadoop-auth-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/java-xmlbuilder-0.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/mockito-all-1.8.5.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/httpclient-4.2.5.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/gson-2.2.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/stax-api-1.0-2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-lang-2.6.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/hadoop-annotations-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jsp-api-2.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/slf4j-log4j12-1.7.10.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-httpclient-3.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/zookeeper-3.4.6.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/httpcore-4.2.5.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/api-util-1.0.0-M20.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jaxb-api-2.2.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/asm-3.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jetty-util-6.1.26.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/api-asn1-api-1.0.0-M20.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/snappy-java-1.0.4.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/activation-1.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jetty-6.1.26.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jackson-jaxrs-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/log4j-1.2.17.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-configuration-1.6.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jackson-mapper-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/apacheds-kerberos-codec-2.0.0-M15.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/netty-3.6.2.Final.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/hamcrest-core-1.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-io-2.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-logging-1.1.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-beanutils-core-1.8.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jsr305-3.0.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/guava-11.0.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jaxb-impl-2.2.3-1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jsch-0.1.42.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/protobuf-java-2.5.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-cli-1.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/xmlenc-0.52.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-codec-1.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-net-3.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/curator-recipes-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jackson-xc-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-digester-1.8.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/xz-1.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/avro-1.7.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jersey-json-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jackson-core-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jersey-server-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/paranamer-2.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jets3t-0.9.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/curator-client-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/curator-framework-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/htrace-core-3.1.0-incubating.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/slf4j-api-1.7.10.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/junit-4.11.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jersey-core-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jettison-1.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/apacheds-i18n-2.0.0-M15.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/hadoop-common-2.7.1-tests.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/hadoop-common-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/hadoop-nfs-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/servlet-api-2.5.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/commons-daemon-1.0.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/commons-lang-2.6.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/xercesImpl-2.9.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/asm-3.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jetty-util-6.1.26.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jetty-6.1.26.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/xml-apis-1.3.04.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/log4j-1.2.17.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jackson-mapper-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/netty-3.6.2.Final.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/commons-io-2.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/commons-logging-1.1.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jsr305-3.0.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/guava-11.0.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/netty-all-4.0.23.Final.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/protobuf-java-2.5.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/commons-cli-1.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/xmlenc-0.52.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/commons-codec-1.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jackson-core-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/leveldbjni-all-1.8.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jersey-server-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/htrace-core-3.1.0-incubating.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jersey-core-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/hadoop-hdfs-2.7.1-tests.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/hadoop-hdfs-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/hadoop-hdfs-nfs-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/servlet-api-2.5.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/aopalliance-1.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-collections-3.2.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-compress-1.4.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jersey-guice-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/stax-api-1.0-2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-lang-2.6.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/guice-3.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/zookeeper-3.4.6-tests.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/zookeeper-3.4.6.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jaxb-api-2.2.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/asm-3.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jetty-util-6.1.26.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/activation-1.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jetty-6.1.26.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jackson-jaxrs-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/log4j-1.2.17.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jersey-client-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jackson-mapper-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/netty-3.6.2.Final.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-io-2.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-logging-1.1.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jsr305-3.0.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/guava-11.0.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jaxb-impl-2.2.3-1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/javax.inject-1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/protobuf-java-2.5.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/guice-servlet-3.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-cli-1.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-codec-1.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jackson-xc-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/xz-1.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jersey-json-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jackson-core-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/leveldbjni-all-1.8.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jersey-server-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jersey-core-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jettison-1.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-tests-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-api-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-applications-unmanaged-am-launcher-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-resourcemanager-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-web-proxy-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-applicationhistoryservice-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-registry-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-sharedcachemanager-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-applications-distributedshell-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-common-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-client-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-common-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-nodemanager-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/aopalliance-1.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/commons-compress-1.4.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/jersey-guice-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/guice-3.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/hadoop-annotations-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/asm-3.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/snappy-java-1.0.4.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/log4j-1.2.17.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/jackson-mapper-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/netty-3.6.2.Final.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/hamcrest-core-1.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/commons-io-2.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/javax.inject-1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/protobuf-java-2.5.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/guice-servlet-3.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/xz-1.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/avro-1.7.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/jackson-core-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/leveldbjni-all-1.8.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/jersey-server-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/paranamer-2.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/junit-4.11.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/jersey-core-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-hs-plugins-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-shuffle-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-hs-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-common-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-app-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.7.1-tests.jar:/home/hadoop/app/hadoop-2.7.1/contrib/capacity-scheduler/*.jar
    STARTUP_MSG: build = https://git-wip-us.apache.org/repos/asf/hadoop.git -r 15ecc87ccf4a0228f35af08fc56de536e6ce657a; compiled by \'jenkins\' on 2015-06-29T06:04Z
    STARTUP_MSG: java = 1.7.0_79
    ************************************************************/
    15/07/13 16:01:45 INFO namenode.NameNode: registered UNIX signal handlers for [TERM, HUP, INT]
    15/07/13 16:01:45 INFO namenode.NameNode: createNameNode [-format]
    15/07/13 16:01:46 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
    Formatting using clusterid: CID-baec9f0b-46b2-405e-815e-2603ae5493be
    15/07/13 16:01:47 INFO namenode.FSNamesystem: No KeyProvider found.
    15/07/13 16:01:47 INFO namenode.FSNamesystem: fsLock is fair:true
    15/07/13 16:01:47 INFO blockmanagement.DatanodeManager: dfs.block.invalidate.limit=1000
    15/07/13 16:01:47 INFO blockmanagement.DatanodeManager: dfs.namenode.datanode.registration.ip-hostname-check=true
    15/07/13 16:01:47 INFO blockmanagement.BlockManager: dfs.namenode.startup.delay.block.deletion.sec is set to 000:00:00:00.000
    15/07/13 16:01:47 INFO blockmanagement.BlockManager: The block deletion will start around 2015 Jul 13 16:01:47
    15/07/13 16:01:47 INFO util.GSet: Computing capacity for map BlocksMap
    15/07/13 16:01:47 INFO util.GSet: VM type = 64-bit
    15/07/13 16:01:47 INFO util.GSet: 2.0% max memory 910.5 MB = 18.2 MB
    15/07/13 16:01:47 INFO util.GSet: capacity = 2^21 = 2097152 entries
    15/07/13 16:01:47 INFO blockmanagement.BlockManager: dfs.block.access.token.enable=false
    15/07/13 16:01:47 INFO blockmanagement.BlockManager: defaultReplication = 1
    15/07/13 16:01:47 INFO blockmanagement.BlockManager: maxReplication = 512
    15/07/13 16:01:47 INFO blockmanagement.BlockManager: minReplication = 1
    15/07/13 16:01:47 INFO blockmanagement.BlockManager: maxReplicationStreams = 2
    15/07/13 16:01:47 INFO blockmanagement.BlockManager: shouldCheckForEnoughRacks = false
    15/07/13 16:01:47 INFO blockmanagement.BlockManager: replicationRecheckInterval = 3000
    15/07/13 16:01:47 INFO blockmanagement.BlockManager: encryptDataTransfer = false
    15/07/13 16:01:47 INFO blockmanagement.BlockManager: maxNumBlocksToLog = 1000
    15/07/13 16:01:47 INFO namenode.FSNamesystem: fsOwner = hadoop (auth:SIMPLE)
    15/07/13 16:01:47 INFO namenode.FSNamesystem: supergroup = supergroup
    15/07/13 16:01:47 INFO namenode.FSNamesystem: isPermissionEnabled = true
    15/07/13 16:01:47 INFO namenode.FSNamesystem: HA Enabled: false
    15/07/13 16:01:47 INFO namenode.FSNamesystem: Append Enabled: true
    15/07/13 16:01:48 INFO util.GSet: Computing capacity for map INodeMap
    15/07/13 16:01:48 INFO util.GSet: VM type = 64-bit
    15/07/13 16:01:48 INFO util.GSet: 1.0% max memory 910.5 MB = 9.1 MB
    15/07/13 16:01:48 INFO util.GSet: capacity = 2^20 = 1048576 entries
    15/07/13 16:01:48 INFO namenode.FSDirectory: ACLs enabled? false
    15/07/13 16:01:48 INFO namenode.FSDirectory: XAttrs enabled? true
    15/07/13 16:01:48 INFO namenode.FSDirectory: Maximum size of an xattr: 16384
    15/07/13 16:01:48 INFO namenode.NameNode: Caching file names occuring more than 10 times
    15/07/13 16:01:48 INFO util.GSet: Computing capacity for map cachedBlocks
    15/07/13 16:01:48 INFO util.GSet: VM type = 64-bit
    15/07/13 16:01:48 INFO util.GSet: 0.25% max memory 910.5 MB = 2.3 MB
    15/07/13 16:01:48 INFO util.GSet: capacity = 2^18 = 262144 entries
    15/07/13 16:01:48 INFO namenode.FSNamesystem: dfs.namenode.safemode.threshold-pct = 0.9990000128746033
    15/07/13 16:01:48 INFO namenode.FSNamesystem: dfs.namenode.safemode.min.datanodes = 0
    15/07/13 16:01:48 INFO namenode.FSNamesystem: dfs.namenode.safemode.extension = 30000
    15/07/13 16:01:48 INFO metrics.TopMetrics: NNTop conf: dfs.namenode.top.window.num.buckets = 10
    15/07/13 16:01:48 INFO metrics.TopMetrics: NNTop conf: dfs.namenode.top.num.users = 10
    15/07/13 16:01:48 INFO metrics.TopMetrics: NNTop conf: dfs.namenode.top.windows.minutes = 1,5,25
    15/07/13 16:01:48 INFO namenode.FSNamesystem: Retry cache on namenode is enabled
    15/07/13 16:01:48 INFO namenode.FSNamesystem: Retry cache will use 0.03 of total heap and retry cache entry expiry time is 600000 millis
    15/07/13 16:01:48 INFO util.GSet: Computing capacity for map NameNodeRetryCache
    15/07/13 16:01:48 INFO util.GSet: VM type = 64-bit
    15/07/13 16:01:48 INFO util.GSet: 0.029999999329447746% max memory 910.5 MB = 279.7 KB
    15/07/13 16:01:48 INFO util.GSet: capacity = 2^15 = 32768 entries
    15/07/13 16:01:48 INFO namenode.FSImage: Allocated new BlockPoolId: BP-69428073-172.16.1.39-1436774508204
    15/07/13 16:01:48 INFO common.Storage: Storage directory /home/hadoop/app/hadoop-2.7.1/tmp/dfs/name has been successfully formatted.
    15/07/13 16:01:48 INFO namenode.NNStorageRetentionManager: Going to retain 1 images with txid >= 0
    15/07/13 16:01:48 INFO util.ExitUtil: Exiting with status 0
    15/07/13 16:01:48 INFO namenode.NameNode: SHUTDOWN_MSG:
    /************************************************************
    SHUTDOWN_MSG: Shutting down NameNode at hadoop-node1.csoftintl.com/172.16.1.39
    ************************************************************/

5、配置免密钥认证

    [hadoop@hadoop-node1 ~]$ ssh-keygen -t rsa -P "" -f ~/.ssh/id_rsa
    Generating public/private rsa key pair.
    Your identification has been saved in /home/hadoop/.ssh/id_rsa.
    Your public key has been saved in /home/hadoop/.ssh/id_rsa.pub.
    The key fingerprint is:
    ba:4f:f7:87:4e:73:7f:e8:f1:56:8d:12:6b:23:ab:f1 hadoop@hadoop-node1.csoftintl.com
    The key\'s randomart image is:
    +--[ RSA 2048]----+
    | |
    | |
    | |
    | . |
    | S o ..|
    | . . = . o|
    | . o .+o+o..|
    | o +.o.oo+o|
    | ..o.E.oo..+|
    +-----------------+

    [hadoop@hadoop-node1 ~]$ ssh-copy-id -i ~/.ssh/id_rsa.pub hadoop@hadoop-node1.csoftintl.com
    hadoop@hadoop-node1.csoftintl.com\'s password:
    Now try logging into the machine, with "ssh \'hadoop@hadoop-node1.csoftintl.com\'", and check in:
    .ssh/authorized_keys
    to make sure we haven\'t added extra keys that you weren\'t expecting.

    [hadoop@hadoop-node1 ~]$ ssh hadoop@hadoop-node1.csoftintl.com
    Last login: Sat Jul 11 02:32:42 2015 from ::1

6、启动HDFS分布式文件系统

    [hadoop@hadoop-node1 hadoop]$ start-dfs.sh
    Starting namenodes on [hadoop-node1.csoftintl.com]
    hadoop-node1.csoftintl.com: starting namenode, logging to /home/hadoop/application/hadoop-2.7.1/logs/hadoop-hadoop-namenode-hadoop-node1.csoftintl.com.out
    hadoop-node2.csoftintl.com: starting datanode, logging to /home/hadoop/application/hadoop-2.7.1/logs/hadoop-hadoop-datanode-hadoop-node2.csoftintl.com.out
    Starting secondary namenodes [hadoop-node1.csoftintl.com]
    hadoop-node1.csoftintl.com: starting secondarynamenode, logging to /home/hadoop/application/hadoop-2.7.1/logs/hadoop-hadoop-secondarynamenode-hadoop-node1.csoftintl.com.out
    [hadoop@hadoop-node1 hadoop]$ jps
    24786 SecondaryNameNode
    24901 Jps
    24583 NameNode
    [hadoop@hadoop-node2 hadoop]$ jps
    4147 Jps
    4060 DataNode

7、测试分布式文件系统

    [hadoop@hadoop-node1 ~]$ hadoop fs
    Usage: hadoop fs [generic options]
    [-appendToFile <localsrc> ... <dst>]
    [-cat [-ignoreCrc] <src> ...]
    [-checksum <src> ...]
    [-chgrp [-R] GROUP PATH...]
    [-chmod [-R] <MODE[,MODE]... | OCTALMODE> PATH...]
    [-chown [-R] [OWNER][:[GROUP]] PATH...]
    [-copyFromLocal [-f] [-p] <localsrc> ... <dst>]
    [-copyToLocal [-p] [-ignoreCrc] [-crc] <src> ... <localdst>]
    [-count [-q] <path> ...]
    [-cp [-f] [-p] <src> ... <dst>]
    [-createSnapshot <snapshotDir> [<snapshotName>]]
    [-deleteSnapshot <snapshotDir> <snapshotName>]
    [-df [-h] [<path> ...]]
    [-du [-s] [-h] <path> ...]
    [-expunge]
    [-get [-p] [-ignoreCrc] [-crc] <src> ... <localdst>]
    [-getfacl [-R] <path>]
    [-getmerge [-nl] <src> <localdst>]
    [-help [cmd ...]]
    [-ls [-d] [-h] [-R] [<path> ...]]
    [-mkdir [-p] <path> ...]
    [-moveFromLocal <localsrc> ... <dst>]
    [-moveToLocal <src> <localdst>]
    [-mv <src> ... <dst>]
    [-put [-f] [-p] <localsrc> ... <dst>]
    [-renameSnapshot <snapshotDir> <oldName> <newName>]
    [-rm [-f] [-r|-R] [-skipTrash] <src> ...]
    [-rmdir [--ignore-fail-on-non-empty] <dir> ...]
    [-setfacl [-R] [{-b|-k} {-m|-x <acl_spec>} <path>]|[--set <acl_spec> <path>]]
    [-setrep [-R] [-w] <rep> <path> ...]
    [-stat [format] <path> ...]
    [-tail [-f] <file>]
    [-test -[defsz] <path>]
    [-text [-ignoreCrc] <src> ...]
    [-touchz <path> ...]
    [-usage [cmd ...]]

1)向HDFS里增加文件

    [hadoop@hadoop-node1 ~]$ hadoop fs -put hadoop-2.7.1.tar.gz /
    15/07/13 16:03:55 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
    [hadoop@hadoop-node1 ~]$ hadoop fs -ls /
    15/07/13 16:04:09 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
    Found 1 items
    -rw-r--r-- 1 hadoop supergroup 210606807 2015-07-13 16:03 /hadoop-2.7.1.tar.gz

2)删除文件

    [hadoop@hadoop-node1 ~]$ hadoop fs -rm /hadoop-2.7.1.tar.gz
    15/07/13 16:09:47 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
    15/07/13 16:09:48 INFO fs.TrashPolicyDefault: Namenode trash configuration: Deletion interval = 0 minutes, Emptier interval = 0 minutes.
    Deleted /hadoop-2.7.1.tar.gz
    [hadoop@hadoop-node1 ~]$ hadoop fs -ls /
    15/07/13 16:09:58 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable

8、启动yarn作业管理集群

    [hadoop@hadoop-node1 ~]$ start-yarn.sh
    starting yarn daemons
    starting resourcemanager, logging to /home/hadoop/application/hadoop-2.7.1/logs/yarn-hadoop-resourcemanager-hadoop-node1.csoftintl.com.out
    hadoop-node2.csoftintl.com: starting nodemanager, logging to /home/hadoop/application/hadoop-2.7.1/logs/yarn-hadoop-nodemanager-hadoop-node2.csoftintl.com.out
    [hadoop@hadoop-node1 ~]$ jps
    24786 SecondaryNameNode
    7781 Jps
    24583 NameNode
    7533 ResourceManager

    [hadoop@hadoop-node2 ~]$ jps
    4060 DataNode
    14812 Jps
    14654 NodeManager

这里可以看到,namenode的节点,启动了一个ResourceManager的进程,而datanode的节点,启动了一个NodeManager的进程。

9、测试mapreduce和yarn框架的工作情况

    [hadoop@hadoop-node1 ~]$ hadoop fs -ls /flow/srcdata
    Found 1 items
    -rw-r--r-- 1 hadoop supergroup 2229 2015-07-17 16:04 /flow/srcdata/HTTP_20130313143750.dat

    [hadoop@hadoop-node1 ~]$ hadoop jar flow.jar cn.itheima.bigdata.hadoop.mr.flowcount.FlowCount /flow/srcdata /flow/output
    15/07/20 11:48:25 INFO client.RMProxy: Connecting to ResourceManager at hadoop-node1.csoftintl.com/172.16.1.39:8032
    Exception in thread "main" org.apache.hadoop.mapred.FileAlreadyExistsException: Output directory hdfs://hadoop-node1.csoftintl.com:9000/flow/output already exists
    at org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.checkOutputSpecs(FileOutputFormat.java:146)
    at org.apache.hadoop.mapreduce.JobSubmitter.checkSpecs(JobSubmitter.java:266)
    at org.apache.hadoop.mapreduce.JobSubmitter.submitJobInternal(JobSubmitter.java:139)
    at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1290)
    at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1287)
    at java.security.AccessController.doPrivileged(Native Method)
    at javax.security.auth.Subject.doAs(Subject.java:415)
    at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1657)
    at org.apache.hadoop.mapreduce.Job.submit(Job.java:1287)
    at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1308)
    at cn.itheima.bigdata.hadoop.mr.flowcount.FlowCount.main(FlowCount.java:101)
    at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
    at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
    at java.lang.reflect.Method.invoke(Method.java:606)
    at org.apache.hadoop.util.RunJar.run(RunJar.java:221)
    at org.apache.hadoop.util.RunJar.main(RunJar.java:136)
    [hadoop@hadoop-node1 ~]$ hadoop fs -rm -r /flow/output
    15/07/20 11:48:46 INFO fs.TrashPolicyDefault: Namenode trash configuration: Deletion interval = 0 minutes, Emptier interval = 0 minutes.
    Deleted /flow/output
    [hadoop@hadoop-node1 ~]$ hadoop jar flow.jar cn.itheima.bigdata.hadoop.mr.flowcount.FlowCount /flow/srcdata /flow/output
    15/07/20 11:48:52 INFO client.RMProxy: Connecting to ResourceManager at hadoop-node1.csoftintl.com/172.16.1.39:8032
    15/07/20 11:48:53 WARN mapreduce.JobResourceUploader: Hadoop command-line option parsing not performed. Implement the Tool interface and execute your application with ToolRunner to remedy this.
    15/07/20 11:48:53 INFO input.FileInputFormat: Total input paths to process : 1
    15/07/20 11:48:53 INFO mapreduce.JobSubmitter: number of splits:1
    15/07/20 11:48:54 INFO mapreduce.JobSubmitter: Submitting tokens for job: job_1437360105422_0002
    15/07/20 11:48:54 INFO impl.YarnClientImpl: Submitted application application_1437360105422_0002
    15/07/20 11:48:54 INFO mapreduce.Job: The url to track the job: http://hadoop-node1.csoftintl.com:8088/proxy/application_1437360105422_0002/
    15/07/20 11:48:54 INFO mapreduce.Job: Running job: job_1437360105422_0002
    15/07/20 11:49:04 INFO mapreduce.Job: Job job_1437360105422_0002 running in uber mode : false
    15/07/20 11:49:04 INFO mapreduce.Job: map 0% reduce 0%
    15/07/20 11:49:16 INFO mapreduce.Job: map 100% reduce 0%
    15/07/20 11:49:24 INFO mapreduce.Job: map 100% reduce 100%
    15/07/20 11:49:24 INFO mapreduce.Job: Job job_1437360105422_0002 completed successfully
    15/07/20 11:49:24 INFO mapreduce.Job: Counters: 49
    File System Counters
    FILE: Number of bytes read=1122
    FILE: Number of bytes written=234001
    FILE: Number of read operations=0
    FILE: Number of large read operations=0
    FILE: Number of write operations=0
    HDFS: Number of bytes read=2369
    HDFS: Number of bytes written=551
    HDFS: Number of read operations=6
    HDFS: Number of large read operations=0
    HDFS: Number of write operations=2
    Job Counters
    Launched map tasks=1
    Launched reduce tasks=1
    Data-local map tasks=1
    Total time spent by all maps in occupied slots (ms)=9612
    Total time spent by all reduces in occupied slots (ms)=5644
    Total time spent by all map tasks (ms)=9612
    Total time spent by all reduce tasks (ms)=5644
    Total vcore-seconds taken by all map tasks=9612
    Total vcore-seconds taken by all reduce tasks=5644
    Total megabyte-seconds taken by all map tasks=9842688
    Total megabyte-seconds taken by all reduce tasks=5779456
    Map-Reduce Framework
    Map input records=22
    Map output records=22
    Map output bytes=1072
    Map output materialized bytes=1122
    Input split bytes=140
    Combine input records=0
    Combine output records=0
    Reduce input groups=21
    Reduce shuffle bytes=1122
    Reduce input records=22
    Reduce output records=21
    Spilled Records=44
    Shuffled Maps =1
    Failed Shuffles=0
    Merged Map outputs=1
    GC time elapsed (ms)=515
    CPU time spent (ms)=3880
    Physical memory (bytes) snapshot=432300032
    Virtual memory (bytes) snapshot=1365823488
    Total committed heap usage (bytes)=275251200
    Shuffle Errors
    BAD_ID=0
    CONNECTION=0
    IO_ERROR=0
    WRONG_LENGTH=0
    WRONG_MAP=0
    WRONG_REDUCE=0
    File Input Format Counters
    Bytes Read=2229
    File Output Format Counters
    Bytes Written=551

    [hadoop@hadoop-node1 ~]$ hadoop fs -ls /flow/output
    Found 2 items
    -rw-r--r-- 1 hadoop supergroup 0 2015-07-20 11:49 /flow/output/_SUCCESS
    -rw-r--r-- 1 hadoop supergroup 551 2015-07-20 11:49 /flow/output/part-r-00000
    [hadoop@hadoop-node1 ~]$ hadoop fs -cat /flow/output/part-r-00000
    13480253104 180 180 360
    13502468823 7335 110349 117684
    13560436666 1116 954 2070
    13560439658 2034 5892 7926
    13602846565 1938 2910 4848
    13660577991 6960 690 7650
    13719199419 240 0 240
    13726230503 2481 24681 27162
    13726238888 2481 24681 27162
    13760778710 120 120 240
    13826544101 264 0 264
    13922314466 3008 3720 6728
    13925057413 11058 48243 59301
    13926251106 240 0 240
    13926435656 132 1512 1644
    15013685858 3659 3538 7197
    15920133257 3156 2936 6092
    15989002119 1938 180 2118
    18211575961 1527 2106 3633
    18320173382 9531 2412 11943
    84138413 4116 1432 5548

三、Mapreduce的框架和基础算法

四、Hadoop的HA机制和Federation 
1、为什么需要HA 
HDFS的NameNode一旦宕机,整个集群无法提供服务。

2、怎么实现的 
基于Zookeeper实现的

3、什么是Zookeeper 
为分布式集群提供协调服务,作为第三方,管理一些共享数据。Zookeeper本身非常可靠,本身就是一个分布高可用式集群

4、Zookeeper的典型应用场景 
1)统一命名服务 
分布式应用中,通常需要有一套完整的命名规则,既能产生唯一的名称又便于人识别和记住。Name Service是Zookeeper内置功能,只要调用API就能实现。 
2)配置管理 
配置管理在分布式应用环境中很常见,例如同一个应用系统需要多台Server运行,但它们的某些配置项是相同的,如果要修改这些相同的配置项,就必须同时修改每台运行这个应用的Server,这样非常麻烦而且容易出错。将配置信息保存在Zookeeper的某个目录节点中,然后将所有需要修改的应用机器配置信息的状态,一旦配置信息发生变化,每台应用 
3)集群管理 
Zookeeper能够很容易的实现集群管理功能,如果有多台Server组成一个服务集群,那么必须有一个“总管”知道当前集群中每台服务器的服务状态,一旦有机器不能提供服务,集群中的“总管”必须知道,从而做出调整重新分配服务策略。同样当增加集群的服务能力时,增加一台或多台Server,同样也必须让“总管”知道。Zookeeper不仅能够维护当前集群中所有节点的服务状态,而且能够选出一个“总管”,让这个总管来管理集群,这就是Zookeeper的另一个主要功能:Leader Election 
4)共享锁 
共享锁在一个进程中很容易实现,但是在跨进程或者在不同Server之间就不好实现了。Zookeeper 却很容易实现这个功能。 
5)队列管理

五、Hadoop集群部署及启动 
1、集群规划 
两台namenode,两台resourcemanager,三台datanode(附带跑zookeeper和qjournalmanager) 
2、salt部署 
 
主要配置文件: 

[root@linux-node0 files]# vim core-site.xml

    <configuration>
    <!-- 指定hdfs的nameservice为ns1 -->
    <property>
    <name>fs.defaultFS</name>
    <value>hdfs://ns1</value>
    </property>
    <!-- 指定hadoop临时目录 -->
    <property>
    <name>hadoop.tmp.dir</name>
    <value>/home/hadoop/app/hadoop/tmp</value>
    </property>
    <!-- 指定zookeeper地址 -->
    <property>
    <name>ha.zookeeper.quorum</name>
    <value>hadoop-node5:2181,hadoop-node6:2181,hadoop-node7:2181</value>
    </property>
    </configuration>

[root@linux-node0 files]# vim hdfs-site.xml

  1. <configuration>
  2. <!--指定hdfs的nameservice为ns1,需要和core-site.xml中的保持一致 -->
  3. <property>
  4. <name>dfs.nameservices</name>
  5. <value>ns1</value>
  6. </property>
  7. <!-- ns1下面有两个NameNode,分别是nn1,nn2 -->
  8. <property>
  9. <name>dfs.ha.namenodes.ns1</name>
  10. <value>nn1,nn2</value>
  11. </property>
  12. <!-- nn1的RPC通信地址 -->
  13. <property>
  14. <name>dfs.namenode.rpc-address.ns1.nn1</name>
  15. <value>hadoop-node1:9000</value>
  16. </property>
  17. <!-- nn1的http通信地址 -->
  18. <property>
  19. <name>dfs.namenode.http-address.ns1.nn1</name>
  20. <value>hadoop-node1:50070</value>
  21. </property>
  22. <!-- nn2的RPC通信地址 -->
  23. <property>
  24. <name>dfs.namenode.rpc-address.ns1.nn2</name>
  25. <value>hadoop-node2:9000</value>
  26. </property>
  27. <!-- nn2的http通信地址 -->
  28. <property>
  29. <name>dfs.namenode.http-address.ns1.nn2</name>
  30. <value>hadoop-node2:50070</value>
  31. </property>
  32. <!-- 指定NameNode的元数据在JournalNode上的存放位置 -->
  33. <property>
  34. <name>dfs.namenode.shared.edits.dir</name>
  35. <value>qjournal://hadoop-node5:8485;hadoop-node6:8485;hadoop-node7:8485/ns1</value>
  36. </property>
  37. <!-- 指定JournalNode在本地磁盘存放数据的位置 -->
  38. <property>
  39. <name>dfs.journalnode.edits.dir</name>
  40. <value>/home/hadoop/app/hadoop/journaldata</value>
  41. </property>
  42. <!-- 开启NameNode失败自动切换 -->
  43. <property>
  44. <name>dfs.ha.automatic-failover.enabled</name>
  45. <value>true</value>
  46. </property>
  47. <!-- 配置失败自动切换实现方式 -->
  48. <property>
  49. <name>dfs.client.failover.proxy.provider.ns1</name>
  50. <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
  51. </property>
  52. <!-- 配置隔离机制方法,多个机制用换行分割,即每个机制暂用一行-->
  53. <property>
  54. <name>dfs.ha.fencing.methods</name>
  55. <value>
  56. sshfence
  57. shell(/bin/true)
  58. </value>
  59. </property>
  60. <!-- 使用sshfence隔离机制时需要ssh免登陆 -->
  61. <property>
  62. <name>dfs.ha.fencing.ssh.private-key-files</name>
  63. <value>/home/hadoop/.ssh/id_rsa</value>
  64. </property>
  65. <!-- 配置sshfence隔离机制超时时间 -->
  66. <property>
  67. <name>dfs.ha.fencing.ssh.connect-timeout</name>
  68. <value>30000</value>
  69. </property>
  70. </configuration>

[root@linux-node0 files]# vim mapred-site.xml

  1. <configuration>
  2. <!-- 指定mr框架为yarn方式 -->
  3. <property>
  4. <name>mapreduce.framework.name</name>
  5. <value>yarn</value>
  6. </property>
  7. </configuration>

[root@linux-node0 files]# vim yarn-site.xml

  1. <configuration>
  2. <!-- 开启RM高可用 -->
  3. <property>
  4. <name>yarn.resourcemanager.ha.enabled</name>
  5. <value>true</value>
  6. </property>
  7. <!-- 指定RM的cluster id -->
  8. <property>
  9. <name>yarn.resourcemanager.cluster-id</name>
  10. <value>yrc</value>
  11. </property>
  12. <!-- 指定RM的名字 -->
  13. <property>
  14. <name>yarn.resourcemanager.ha.rm-ids</name>
  15. <value>rm1,rm2</value>
  16. </property>
  17. <!-- 分别指定RM的地址 -->
  18. <property>
  19. <name>yarn.resourcemanager.hostname.rm1</name>
  20. <value>hadoop-node3</value>
  21. </property>
  22. <property>
  23. <name>yarn.resourcemanager.hostname.rm2</name>
  24. <value>hadoop-node4</value>
  25. </property>
  26. <!-- 指定zk集群地址 -->
  27. <property>
  28. <name>yarn.resourcemanager.zk-address</name>
  29. <value>hadoop-node5:2181,hadoop-node6:2181,hadoop-node7:2181</value>
  30. </property>
  31. <property>
  32. <name>yarn.nodemanager.aux-services</name>
  33. <value>mapreduce_shuffle</value>
  34. </property>
  35. </configuration>

[root@linux-node0 files]# vim zoo.cfg

  1. # The number of milliseconds of each tick
  2. tickTime=2000
  3. # The number of ticks that the initial
  4. # synchronization phase can take
  5. initLimit=10
  6. # The number of ticks that can pass between
  7. # sending a request and getting an acknowledgement
  8. syncLimit=5
  9. # the directory where the snapshot is stored.
  10. # do not use /tmp for storage, /tmp here is just
  11. # example sakes.
  12. dataDir=/home/hadoop/app/zookeeper/data
  13. # the port at which the clients will connect
  14. clientPort=2181
  15. server.1=192.168.7.45:2888:3888
  16. server.2=192.168.7.46:2888:3888
  17. server.3=192.168.7.47:2888:3888
  18. # the maximum number of client connections.
  19. # increase this if you need to handle more clients
  20. #maxClientCnxns=60
  21. #
  22. # Be sure to read the maintenance section of the
  23. # administrator guide before turning on autopurge.
  24. #
  25. # http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
  26. #
  27. # The number of snapshots to retain in dataDir
  28. #autopurge.snapRetainCount=3
  29. # Purge task interval in hours
  30. # Set to "0" to disable auto purge feature
  31. #autopurge.purgeInterval=1

[root@linux-node0 files]# vim slaves

  1. hadoop-node5
  2. hadoop-node6
  3. hadoop-node7

3、启动集群步骤 
1)先启动三个zookeeper集群,规划跑在三台datanode上

  1. [hadoop@hadoop-node5 ~]$ ./app/zookeeper/bin/zkServer.sh start
  2. JMX enabled by default
  3. Using config: /home/hadoop/app/zookeeper/bin/../conf/zoo.cfg
  4. Starting zookeeper ... STARTED
  5. [hadoop@hadoop-node5 ~]$ jps
  6. 29422 Jps
  7. 29398 QuorumPeerMain
  8. [hadoop@hadoop-node6 ~]$ ./app/zookeeper/bin/zkServer.sh start
  9. JMX enabled by default
  10. Using config: /home/hadoop/app/zookeeper/bin/../conf/zoo.cfg
  11. Starting zookeeper ... STARTED
  12. [hadoop@hadoop-node6 ~]$ jps
  13. 28582 Jps
  14. 28555 QuorumPeerMain
  15. [hadoop@hadoop-node7 ~]$ ./app/zookeeper/bin/zkServer.sh start
  16. JMX enabled by default
  17. Using config: /home/hadoop/app/zookeeper/bin/../conf/zoo.cfg
  18. Starting zookeeper ... STARTED
  19. [hadoop@hadoop-node7 ~]$ jps
  20. 28037 QuorumPeerMain
  21. 28064 Jps

2)启动JournalNode

  1. [hadoop@hadoop-node5 ~]$ hadoop-daemon.sh start journalnode
  2. starting journalnode, logging to /home/hadoop/app/hadoop-2.7.1/logs/hadoop-hadoop-journalnode-hadoop-node5.csoftintl.com.out
  3. [hadoop@hadoop-node5 ~]$ jps
  4. 29509 JournalNode
  5. 29398 QuorumPeerMain
  6. 29560 Jps
  7. [hadoop@hadoop-node6 ~]$ hadoop-daemon.sh start journalnode
  8. starting journalnode, logging to /home/hadoop/app/hadoop-2.7.1/logs/hadoop-hadoop-journalnode-hadoop-node6.csoftintl.com.out
  9. [hadoop@hadoop-node6 ~]$ jps
  10. 28706 Jps
  11. 28655 JournalNode
  12. 28555 QuorumPeerMain
  13. [hadoop@hadoop-node7 ~]$ hadoop-daemon.sh start journalnode
  14. starting journalnode, logging to /home/hadoop/app/hadoop-2.7.1/logs/hadoop-hadoop-journalnode-hadoop-node7.csoftintl.com.out
  15. [hadoop@hadoop-node7 ~]$ jps
  16. 28037 QuorumPeerMain
  17. 28169 Jps
  18. 28115 JournalNode

3)格式化HDFS的namenode

  1. [hadoop@hadoop-node1 ~]$ hdfs namenode -format
  2. 15/07/21 17:48:22 INFO namenode.NameNode: STARTUP_MSG:
  3. /************************************************************
  4. STARTUP_MSG: Starting NameNode
  5. STARTUP_MSG: host = hadoop-node1.csoftintl.com/172.16.1.41
  6. STARTUP_MSG: args = [-format]
  7. STARTUP_MSG: version = 2.7.1
  8. STARTUP_MSG: classpath = /home/hadoop/app/hadoop-2.7.1/etc/hadoop:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/servlet-api-2.5.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-collections-3.2.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-math3-3.1.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-compress-1.4.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-beanutils-1.7.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/hadoop-auth-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/java-xmlbuilder-0.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/mockito-all-1.8.5.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/httpclient-4.2.5.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/gson-2.2.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/stax-api-1.0-2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-lang-2.6.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/hadoop-annotations-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jsp-api-2.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/slf4j-log4j12-1.7.10.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-httpclient-3.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/zookeeper-3.4.6.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/httpcore-4.2.5.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/api-util-1.0.0-M20.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jaxb-api-2.2.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/asm-3.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jetty-util-6.1.26.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/api-asn1-api-1.0.0-M20.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/snappy-java-1.0.4.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/activation-1.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jetty-6.1.26.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jackson-jaxrs-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/log4j-1.2.17.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-configuration-1.6.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jackson-mapper-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/apacheds-kerberos-codec-2.0.0-M15.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/netty-3.6.2.Final.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/hamcrest-core-1.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-io-2.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-logging-1.1.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-beanutils-core-1.8.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jsr305-3.0.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/guava-11.0.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jaxb-impl-2.2.3-1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jsch-0.1.42.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/protobuf-java-2.5.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-cli-1.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/xmlenc-0.52.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-codec-1.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-net-3.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/curator-recipes-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jackson-xc-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-digester-1.8.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/xz-1.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/avro-1.7.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jersey-json-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jackson-core-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jersey-server-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/paranamer-2.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jets3t-0.9.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/curator-client-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/curator-framework-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/htrace-core-3.1.0-incubating.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/slf4j-api-1.7.10.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/junit-4.11.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jersey-core-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jettison-1.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/apacheds-i18n-2.0.0-M15.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/hadoop-common-2.7.1-tests.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/hadoop-common-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/hadoop-nfs-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/servlet-api-2.5.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/commons-daemon-1.0.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/commons-lang-2.6.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/xercesImpl-2.9.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/asm-3.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jetty-util-6.1.26.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jetty-6.1.26.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/xml-apis-1.3.04.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/log4j-1.2.17.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jackson-mapper-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/netty-3.6.2.Final.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/commons-io-2.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/commons-logging-1.1.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jsr305-3.0.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/guava-11.0.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/netty-all-4.0.23.Final.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/protobuf-java-2.5.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/commons-cli-1.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/xmlenc-0.52.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/commons-codec-1.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jackson-core-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/leveldbjni-all-1.8.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jersey-server-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/htrace-core-3.1.0-incubating.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jersey-core-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/hadoop-hdfs-2.7.1-tests.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/hadoop-hdfs-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/hadoop-hdfs-nfs-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/servlet-api-2.5.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/aopalliance-1.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-collections-3.2.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-compress-1.4.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jersey-guice-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/stax-api-1.0-2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-lang-2.6.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/guice-3.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/zookeeper-3.4.6-tests.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/zookeeper-3.4.6.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jaxb-api-2.2.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/asm-3.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jetty-util-6.1.26.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/activation-1.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jetty-6.1.26.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jackson-jaxrs-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/log4j-1.2.17.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jersey-client-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jackson-mapper-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/netty-3.6.2.Final.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-io-2.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-logging-1.1.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jsr305-3.0.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/guava-11.0.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jaxb-impl-2.2.3-1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/javax.inject-1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/protobuf-java-2.5.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/guice-servlet-3.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-cli-1.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-codec-1.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jackson-xc-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/xz-1.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jersey-json-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jackson-core-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/leveldbjni-all-1.8.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jersey-server-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jersey-core-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jettison-1.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-tests-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-api-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-applications-unmanaged-am-launcher-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-resourcemanager-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-web-proxy-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-applicationhistoryservice-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-registry-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-sharedcachemanager-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-applications-distributedshell-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-common-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-client-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-common-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-nodemanager-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/aopalliance-1.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/commons-compress-1.4.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/jersey-guice-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/guice-3.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/hadoop-annotations-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/asm-3.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/snappy-java-1.0.4.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/log4j-1.2.17.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/jackson-mapper-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/netty-3.6.2.Final.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/hamcrest-core-1.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/commons-io-2.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/javax.inject-1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/protobuf-java-2.5.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/guice-servlet-3.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/xz-1.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/avro-1.7.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/jackson-core-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/leveldbjni-all-1.8.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/jersey-server-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/paranamer-2.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/junit-4.11.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/jersey-core-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-hs-plugins-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-shuffle-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-hs-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-common-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-app-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.7.1-tests.jar:/home/hadoop/app/hadoop/contrib/capacity-scheduler/*.jar
  9. STARTUP_MSG: build = Unknown -r Unknown; compiled by \'hadoop\' on 2015-07-20T01:43Z
  10. STARTUP_MSG: java = 1.7.0_75
  11. ************************************************************/
  12. 15/07/21 17:48:22 INFO namenode.NameNode: registered UNIX signal handlers for [TERM, HUP, INT]
  13. 15/07/21 17:48:22 INFO namenode.NameNode: createNameNode [-format]
  14. Formatting using clusterid: CID-e3ea5761-53de-4f50-a3f3-608074105da9
  15. 15/07/21 17:48:24 INFO namenode.FSNamesystem: No KeyProvider found.
  16. 15/07/21 17:48:24 INFO namenode.FSNamesystem: fsLock is fair:true
  17. 15/07/21 17:48:24 INFO blockmanagement.DatanodeManager: dfs.block.invalidate.limit=1000
  18. 15/07/21 17:48:24 INFO blockmanagement.DatanodeManager: dfs.namenode.datanode.registration.ip-hostname-check=true
  19. 15/07/21 17:48:24 INFO blockmanagement.BlockManager: dfs.namenode.startup.delay.block.deletion.sec is set to 000:00:00:00.000
  20. 15/07/21 17:48:24 INFO blockmanagement.BlockManager: The block deletion will start around 2015 Jul 21 17:48:24
  21. 15/07/21 17:48:24 INFO util.GSet: Computing capacity for map BlocksMap
  22. 15/07/21 17:48:24 INFO util.GSet: VM type = 64-bit
  23. 15/07/21 17:48:24 INFO util.GSet: 2.0% max memory 889 MB = 17.8 MB
  24. 15/07/21 17:48:24 INFO util.GSet: capacity = 2^21 = 2097152 entries
  25. 15/07/21 17:48:24 INFO blockmanagement.BlockManager: dfs.block.access.token.enable=false
  26. 15/07/21 17:48:24 INFO blockmanagement.BlockManager: defaultReplication = 3
  27. 15/07/21 17:48:24 INFO blockmanagement.BlockManager: maxReplication = 512
  28. 15/07/21 17:48:24 INFO blockmanagement.BlockManager: minReplication = 1
  29. 15/07/21 17:48:24 INFO blockmanagement.BlockManager: maxReplicationStreams = 2
  30. 15/07/21 17:48:24 INFO blockmanagement.BlockManager: shouldCheckForEnoughRacks = false
  31. 15/07/21 17:48:24 INFO blockmanagement.BlockManager: replicationRecheckInterval = 3000
  32. 15/07/21 17:48:24 INFO blockmanagement.BlockManager: encryptDataTransfer = false
  33. 15/07/21 17:48:24 INFO blockmanagement.BlockManager: maxNumBlocksToLog = 1000
  34. 15/07/21 17:48:24 INFO namenode.FSNamesystem: fsOwner = hadoop (auth:SIMPLE)
  35. 15/07/21 17:48:24 INFO namenode.FSNamesystem: supergroup = supergroup
  36. 15/07/21 17:48:24 INFO namenode.FSNamesystem: isPermissionEnabled = true
  37. 15/07/21 17:48:24 INFO namenode.FSNamesystem: Determined nameservice ID: ns1
  38. 15/07/21 17:48:24 INFO namenode.FSNamesystem: HA Enabled: true
  39. 15/07/21 17:48:24 INFO namenode.FSNamesystem: Append Enabled: true
  40. 15/07/21 17:48:25 INFO util.GSet: Computing capacity for map INodeMap
  41. 15/07/21 17:48:25 INFO util.GSet: VM type = 64-bit
  42. 15/07/21 17:48:25 INFO util.GSet: 1.0% max memory 889 MB = 8.9 MB
  43. 15/07/21 17:48:25 INFO util.GSet: capacity = 2^20 = 1048576 entries
  44. 15/07/21 17:48:25 INFO namenode.FSDirectory: ACLs enabled? false
  45. 15/07/21 17:48:25 INFO namenode.FSDirectory: XAttrs enabled? true
  46. 15/07/21 17:48:25 INFO namenode.FSDirectory: Maximum size of an xattr: 16384
  47. 15/07/21 17:48:25 INFO namenode.NameNode: Caching file names occuring more than 10 times
  48. 15/07/21 17:48:25 INFO util.GSet: Computing capacity for map cachedBlocks
  49. 15/07/21 17:48:25 INFO util.GSet: VM type = 64-bit
  50. 15/07/21 17:48:25 INFO util.GSet: 0.25% max memory 889 MB = 2.2 MB
  51. 15/07/21 17:48:25 INFO util.GSet: capacity = 2^18 = 262144 entries
  52. 15/07/21 17:48:25 INFO namenode.FSNamesystem: dfs.namenode.safemode.threshold-pct = 0.9990000128746033
  53. 15/07/21 17:48:25 INFO namenode.FSNamesystem: dfs.namenode.safemode.min.datanodes = 0
  54. 15/07/21 17:48:25 INFO namenode.FSNamesystem: dfs.namenode.safemode.extension = 30000
  55. 15/07/21 17:48:25 INFO metrics.TopMetrics: NNTop conf: dfs.namenode.top.window.num.buckets = 10
  56. 15/07/21 17:48:25 INFO metrics.TopMetrics: NNTop conf: dfs.namenode.top.num.users = 10
  57. 15/07/21 17:48:25 INFO metrics.TopMetrics: NNTop conf: dfs.namenode.top.windows.minutes = 1,5,25
  58. 15/07/21 17:48:25 INFO namenode.FSNamesystem: Retry cache on namenode is enabled
  59. 15/07/21 17:48:25 INFO namenode.FSNamesystem: Retry cache will use 0.03 of total heap and retry cache entry expiry time is 600000 millis
  60. 15/07/21 17:48:25 INFO util.GSet: Computing capacity for map NameNodeRetryCache
  61. 15/07/21 17:48:25 INFO util.GSet: VM type = 64-bit
  62. 15/07/21 17:48:25 INFO util.GSet: 0.029999999329447746% max memory 889 MB = 273.1 KB
  63. 15/07/21 17:48:25 INFO util.GSet: capacity = 2^15 = 32768 entries
  64. 15/07/21 17:48:27 INFO namenode.FSImage: Allocated new BlockPoolId: BP-180122279-172.16.1.41-1437472107228
  65. 15/07/21 17:48:27 INFO common.Storage: Storage directory /home/hadoop/app/hadoop/tmp/dfs/name has been successfully formatted.
  66. 15/07/21 17:48:27 INFO namenode.NNStorageRetentionManager: Going to retain 1 images with txid >= 0
  67. 15/07/21 17:48:28 INFO util.ExitUtil: Exiting with status 0
  68. 15/07/21 17:48:28 INFO namenode.NameNode: SHUTDOWN_MSG:
  69. /************************************************************
  70. SHUTDOWN_MSG: Shutting down NameNode at hadoop-node1.csoftintl.com/172.16.1.41
  71. ************************************************************/

4)拷贝HDFS主namenode节点的数据到高可用节点的目录中

  1. [hadoop@hadoop-node1 hadoop]$ scp -r tmp/ hadoop-node2:~/app/hadoop/
  2. seen_txid 100% 2 0.0KB/s 00:00
  3. VERSION 100% 202 0.2KB/s 00:00
  4. fsimage_0000000000000000000 100% 353 0.3KB/s 00:00
  5. fsimage_0000000000000000000.md5 100% 62 0.1KB/s 00:00

5)格式化ZKFS

  1. [hadoop@hadoop-node1 ~]$ hdfs zkfc -formatZK
  2. 15/07/21 18:10:16 INFO tools.DFSZKFailoverController: Failover controller configured for NameNode NameNode at hadoop-node1/172.16.1.41:9000
  3. 15/07/21 18:10:16 INFO zookeeper.ZooKeeper: Client environment:zookeeper.version=3.4.6-1569965, built on 02/20/2014 09:09 GMT
  4. 15/07/21 18:10:16 INFO zookeeper.ZooKeeper: Client environment:host.name=hadoop-node1.csoftintl.com
  5. 15/07/21 18:10:16 INFO zookeeper.ZooKeeper: Client environment:java.version=1.7.0_75
  6. 15/07/21 18:10:16 INFO zookeeper.ZooKeeper: Client environment:java.vendor=Oracle Corporation
  7. 15/07/21 18:10:16 INFO zookeeper.ZooKeeper: Client environment:java.home=/usr/java/jdk1.7.0_75/jre
  8. 15/07/21 18:10:16 INFO zookeeper.ZooKeeper: Client environment:java.class.path=/home/hadoop/app/hadoop-2.7.1/etc/hadoop:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/servlet-api-2.5.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-collections-3.2.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-math3-3.1.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-compress-1.4.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-beanutils-1.7.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/hadoop-auth-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/java-xmlbuilder-0.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/mockito-all-1.8.5.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/httpclient-4.2.5.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/gson-2.2.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/stax-api-1.0-2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-lang-2.6.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/hadoop-annotations-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jsp-api-2.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/slf4j-log4j12-1.7.10.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-httpclient-3.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/zookeeper-3.4.6.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/httpcore-4.2.5.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/api-util-1.0.0-M20.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jaxb-api-2.2.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/asm-3.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jetty-util-6.1.26.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/api-asn1-api-1.0.0-M20.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/snappy-java-1.0.4.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/activation-1.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jetty-6.1.26.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jackson-jaxrs-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/log4j-1.2.17.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-configuration-1.6.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jackson-mapper-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/apacheds-kerberos-codec-2.0.0-M15.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/netty-3.6.2.Final.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/hamcrest-core-1.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-io-2.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-logging-1.1.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-beanutils-core-1.8.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jsr305-3.0.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/guava-11.0.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jaxb-impl-2.2.3-1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jsch-0.1.42.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/protobuf-java-2.5.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-cli-1.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/xmlenc-0.52.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-codec-1.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-net-3.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/curator-recipes-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jackson-xc-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/commons-digester-1.8.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/xz-1.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/avro-1.7.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jersey-json-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jackson-core-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jersey-server-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/paranamer-2.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jets3t-0.9.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/curator-client-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/curator-framework-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/htrace-core-3.1.0-incubating.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/slf4j-api-1.7.10.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/junit-4.11.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jersey-core-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/jettison-1.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/apacheds-i18n-2.0.0-M15.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/hadoop-common-2.7.1-tests.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/hadoop-common-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/hadoop-nfs-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/servlet-api-2.5.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/commons-daemon-1.0.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/commons-lang-2.6.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/xercesImpl-2.9.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/asm-3.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jetty-util-6.1.26.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jetty-6.1.26.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/xml-apis-1.3.04.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/log4j-1.2.17.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jackson-mapper-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/netty-3.6.2.Final.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/commons-io-2.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/commons-logging-1.1.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jsr305-3.0.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/guava-11.0.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/netty-all-4.0.23.Final.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/protobuf-java-2.5.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/commons-cli-1.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/xmlenc-0.52.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/commons-codec-1.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jackson-core-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/leveldbjni-all-1.8.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jersey-server-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/htrace-core-3.1.0-incubating.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/lib/jersey-core-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/hadoop-hdfs-2.7.1-tests.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/hadoop-hdfs-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/hdfs/hadoop-hdfs-nfs-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/servlet-api-2.5.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/aopalliance-1.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-collections-3.2.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-compress-1.4.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jersey-guice-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/stax-api-1.0-2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-lang-2.6.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/guice-3.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/zookeeper-3.4.6-tests.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/zookeeper-3.4.6.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jaxb-api-2.2.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/asm-3.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jetty-util-6.1.26.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/activation-1.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jetty-6.1.26.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jackson-jaxrs-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/log4j-1.2.17.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jersey-client-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jackson-mapper-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/netty-3.6.2.Final.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-io-2.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-logging-1.1.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jsr305-3.0.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/guava-11.0.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jaxb-impl-2.2.3-1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/javax.inject-1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/protobuf-java-2.5.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/guice-servlet-3.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-cli-1.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/commons-codec-1.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jackson-xc-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/xz-1.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jersey-json-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jackson-core-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/leveldbjni-all-1.8.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jersey-server-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jersey-core-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/lib/jettison-1.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-tests-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-api-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-applications-unmanaged-am-launcher-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-resourcemanager-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-web-proxy-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-applicationhistoryservice-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-registry-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-sharedcachemanager-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-applications-distributedshell-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-common-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-client-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-common-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/yarn/hadoop-yarn-server-nodemanager-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/aopalliance-1.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/commons-compress-1.4.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/jersey-guice-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/guice-3.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/hadoop-annotations-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/asm-3.2.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/snappy-java-1.0.4.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/log4j-1.2.17.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/jackson-mapper-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/netty-3.6.2.Final.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/hamcrest-core-1.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/commons-io-2.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/javax.inject-1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/protobuf-java-2.5.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/guice-servlet-3.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/xz-1.0.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/avro-1.7.4.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/jackson-core-asl-1.9.13.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/leveldbjni-all-1.8.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/jersey-server-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/paranamer-2.3.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/junit-4.11.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/lib/jersey-core-1.9.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-hs-plugins-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-shuffle-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-hs-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-common-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-app-2.7.1.jar:/home/hadoop/app/hadoop-2.7.1/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.7.1-tests.jar:/home/hadoop/app/hadoop/contrib/capacity-scheduler/*.jar
  9. 15/07/21 18:10:16 INFO zookeeper.ZooKeeper: Client environment:java.library.path=/home/hadoop/app/hadoop-2.7.1/lib/native
  10. 15/07/21 18:10:16 INFO zookeeper.ZooKeeper: Client environment:java.io.tmpdir=/tmp
  11. 15/07/21 18:10:16 INFO zookeeper.ZooKeeper: Client environment:java.compiler=<NA>
  12. 15/07/21 18:10:16 INFO zookeeper.ZooKeeper: Client environment:os.name=Linux
  13. 15/07/21 18:10:16 INFO zookeeper.ZooKeeper: Client environment:os.arch=amd64
  14. 15/07/21 18:10:16 INFO zookeeper.ZooKeeper: Client environment:os.version=2.6.32-504.23.4.el6.x86_64
  15. 15/07/21 18:10:16 INFO zookeeper.ZooKeeper: Client environment:user.name=hadoop
  16. 15/07/21 18:10:16 INFO zookeeper.ZooKeeper: Client environment:user.home=/home/hadoop
  17. 15/07/21 18:10:16 INFO zookeeper.ZooKeeper: Client environment:user.dir=/home/hadoop
  18. 15/07/21 18:10:16 INFO zookeeper.ZooKeeper: Initiating client connection, connectString=hadoop-node5:2181,hadoop-node6:2181,hadoop-node7:2181 sessionTimeout=5000 watcher=org.apache.hadoop.ha.ActiveStandbyElector$WatcherWithClientRef@1d6a6027
  19. 15/07/21 18:10:16 INFO zookeeper.ClientCnxn: Opening socket connection to server hadoop-node6.csoftintl.com/172.16.1.46:2181. Will not attempt to authenticate using SASL (unknown error)
  20. 15/07/21 18:10:16 INFO zookeeper.ClientCnxn: Socket connection established to hadoop-node6.csoftintl.com/172.16.1.46:2181, initiating session
  21. 15/07/21 18:10:16 INFO zookeeper.ClientCnxn: Session establishment complete on server hadoop-node6.csoftintl.com/172.16.1.46:2181, sessionid = 0x24eb010cc5b0000, negotiated timeout = 5000
  22. 15/07/21 18:10:16 INFO ha.ActiveStandbyElector: Session connected.
  23. 15/07/21 18:10:16 INFO ha.ActiveStandbyElector: Successfully created /hadoop-ha/ns1 in ZK.
  24. 15/07/21 18:10:16 INFO zookeeper.ZooKeeper: Session: 0x24eb010cc5b0000 closed
  25. 15/07/21 18:10:16 INFO zookeeper.ClientCnxn: EventThread shut down

6)启动hdfs(在hadoop-node1上执行启动)

  1. [hadoop@hadoop-node1 ~]$ start-dfs.sh
  2. Starting namenodes on [hadoop-node1 hadoop-node2]
  3. hadoop-node1: starting namenode, logging to /home/hadoop/app/hadoop-2.7.1/logs/hadoop-hadoop-namenode-hadoop-node1.csoftintl.com.out
  4. hadoop-node2: starting namenode, logging to /home/hadoop/app/hadoop-2.7.1/logs/hadoop-hadoop-namenode-hadoop-node2.csoftintl.com.out
  5. hadoop-node7: starting datanode, logging to /home/hadoop/app/hadoop-2.7.1/logs/hadoop-hadoop-datanode-hadoop-node7.csoftintl.com.out
  6. hadoop-node5: starting datanode, logging to /home/hadoop/app/hadoop-2.7.1/logs/hadoop-hadoop-datanode-hadoop-node5.csoftintl.com.out
  7. hadoop-node6: starting datanode, logging to /home/hadoop/app/hadoop-2.7.1/logs/hadoop-hadoop-datanode-hadoop-node6.csoftintl.com.out
  8. Starting journal nodes [hadoop-node5 hadoop-node6 hadoop-node7]
  9. hadoop-node5: journalnode running as process 29509. Stop it first.
  10. hadoop-node6: journalnode running as process 28655. Stop it first.
  11. hadoop-node7: journalnode running as process 28115. Stop it first.
  12. Starting ZK Failover Controllers on NN hosts [hadoop-node1 hadoop-node2]
  13. hadoop-node1: starting zkfc, logging to /home/hadoop/app/hadoop-2.7.1/logs/hadoop-hadoop-zkfc-hadoop-node1.csoftintl.com.out
  14. hadoop-node2: starting zkfc, logging to /home/hadoop/app/hadoop-2.7.1/logs/hadoop-hadoop-zkfc-hadoop-node2.csoftintl.com.out

7)启动yarn(在hadoop-node3上启动,haoop-node4启动resourcemanager)

  1. [hadoop@hadoop-node3 logs]$ start-yarn.sh
  2. starting yarn daemons
  3. starting resourcemanager, logging to /home/hadoop/app/hadoop-2.7.1/logs/yarn-hadoop-resourcemanager-hadoop-node3.csoftintl.com.out
  4. hadoop-node5: starting nodemanager, logging to /home/hadoop/app/hadoop-2.7.1/logs/yarn-hadoop-nodemanager-hadoop-node5.csoftintl.com.out
  5. hadoop-node6: starting nodemanager, logging to /home/hadoop/app/hadoop-2.7.1/logs/yarn-hadoop-nodemanager-hadoop-node6.csoftintl.com.out
  6. hadoop-node7: starting nodemanager, logging to /home/hadoop/app/hadoop-2.7.1/logs/yarn-hadoop-nodemanager-hadoop-node7.csoftintl.com.out
  7. [hadoop@hadoop-node3 logs]$ jps
  8. 29011 Jps
  9. 28938 ResourceManager
  10. [hadoop@hadoop-node4 hadoop]$ yarn-daemon.sh resourcemanager

8)最后检查一下所有节点的服务启动情况

  1. [hadoop@hadoop-node1 ~]$ jps
  2. 23383 NameNode
  3. 23874 Jps
  4. 23693 DFSZKFailoverController
  5. [hadoop@hadoop-node2 ~]$ jps
  6. 16930 DFSZKFailoverController
  7. 16991 Jps
  8. 16823 NameNode
  9. [hadoop@hadoop-node3 ~]$ jps
  10. 29011 Jps
  11. 28938 ResourceManager
  12. [hadoop@hadoop-node4 ~]$ jps
  13. 27233 ResourceManager
  14. 27370 Jps
  15. [hadoop@hadoop-node5 ~]$ jps
  16. 29509 JournalNode
  17. 31094 NodeManager
  18. 29398 QuorumPeerMain
  19. 31218 Jps
  20. 29618 DataNode
  21. [hadoop@hadoop-node6 ~]$ jps
  22. 30339 Jps
  23. 28655 JournalNode
  24. 28555 QuorumPeerMain
  25. 30215 NodeManager
  26. 28764 DataNode
  27. [hadoop@hadoop-node7 ~]$ jps
  28. 28037 QuorumPeerMain
  29. 28115 JournalNode
  30. 29675 NodeManager
  31. 28224 DataNode
  32. 29799 Jps

9)测试各组件功能 
1、HDFS

  1. [hadoop@hadoop-node1 ~]$ hadoop fs -put HTTP_20130313143750.dat /
  2. [hadoop@hadoop-node1 ~]$ hadoop fs -ls /
  3. Found 1 items
  4. -rw-r--r-- 3 hadoop supergroup 2229 2015-07-21 18:50 /HTTP_20130313143750.dat
  5. [hadoop@hadoop-node1 ~]$ hdfs haadmin -getServiceState nn1
  6. active
  7. [hadoop@hadoop-node1 ~]$ hdfs haadmin -getServiceState nn2
  8. standby

管理页面:

  1. http://192.168.7.41:50070

2、yarn

  1. [hadoop@hadoop-node1 ~]$ cp app/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.1.jar .
  2. Error: Could not find or load main class hadoop-mapreduce-examples-2.7.1.jar
  3. [hadoop@hadoop-node1 ~]$ hadoop jar hadoop-mapreduce-examples-2.7.1.jar pi 5 5
  4. Number of Maps = 5
  5. Samples per Map = 5
  6. Wrote input for Map #0
  7. Wrote input for Map #1
  8. Wrote input for Map #2
  9. Wrote input for Map #3
  10. Wrote input for Map #4
  11. Starting Job
  12. 15/07/21 19:20:36 INFO input.FileInputFormat: Total input paths to process : 5
  13. 15/07/21 19:20:37 INFO mapreduce.JobSubmitter: number of splits:5
  14. 15/07/21 19:20:38 INFO mapreduce.JobSubmitter: Submitting tokens for job: job_1437475325594_0001
  15. 15/07/21 19:20:38 INFO impl.YarnClientImpl: Submitted application application_1437475325594_0001
  16. 15/07/21 19:20:38 INFO mapreduce.Job: The url to track the job: http://hadoop-node3:8088/proxy/application_1437475325594_0001/
  17. 15/07/21 19:20:38 INFO mapreduce.Job: Running job: job_1437475325594_0001
  18. 15/07/21 19:20:52 INFO mapreduce.Job: Job job_1437475325594_0001 running in uber mode : false
  19. 15/07/21 19:20:52 INFO mapreduce.Job: map 0% reduce 0%
  20. 15/07/21 19:21:15 INFO mapreduce.Job: map 100% reduce 0%
  21. 15/07/21 19:21:29 INFO mapreduce.Job: map 100% reduce 100%
  22. 15/07/21 19:21:30 INFO mapreduce.Job: Job job_1437475325594_0001 completed successfully
  23. 15/07/21 19:21:30 INFO mapreduce.Job: Counters: 49
  24. File System Counters
  25. FILE: Number of bytes read=116
  26. FILE: Number of bytes written=709755
  27. FILE: Number of read operations=0
  28. FILE: Number of large read operations=0
  29. FILE: Number of write operations=0
  30. HDFS: Number of bytes read=1275
  31. HDFS: Number of bytes written=215
  32. HDFS: Number of read operations=23
  33. HDFS: Number of large read operations=0
  34. HDFS: Number of write operations=3
  35. Job Counters
  36. Launched map tasks=5
  37. Launched reduce tasks=1
  38. Data-local map tasks=5
  39. Total time spent by all maps in occupied slots (ms)=100795
  40. Total time spent by all reduces in occupied slots (ms)=11156
  41. Total time spent by all map tasks (ms)=100795
  42. Total time spent by all reduce tasks (ms)=11156
  43. Total vcore-seconds taken by all map tasks=100795
  44. Total vcore-seconds taken by all reduce tasks=11156
  45. Total megabyte-seconds taken by all map tasks=103214080
  46. Total megabyte-seconds taken by all reduce tasks=11423744
  47. Map-Reduce Framework
  48. Map input records=5
  49. Map output records=10
  50. Map output bytes=90
  51. Map output materialized bytes=140
  52. Input split bytes=685
  53. Combine input records=0
  54. Combine output records=0
  55. Reduce input groups=2
  56. Reduce shuffle bytes=140
  57. Reduce input records=10
  58. Reduce output records=0
  59. Spilled Records=20
  60. Shuffled Maps =5
  61. Failed Shuffles=0
  62. Merged Map outputs=5
  63. GC time elapsed (ms)=358
  64. CPU time spent (ms)=5740
  65. Physical memory (bytes) snapshot=1323433984
  66. Virtual memory (bytes) snapshot=4129165312
  67. Total committed heap usage (bytes)=846725120
  68. Shuffle Errors
  69. BAD_ID=0
  70. CONNECTION=0
  71. IO_ERROR=0
  72. WRONG_LENGTH=0
  73. WRONG_MAP=0
  74. WRONG_REDUCE=0
  75. File Input Format Counters
  76. Bytes Read=590
  77. File Output Format Counters
  78. Bytes Written=97
  79. Job Finished in 55.254 seconds
  80. Estimated value of Pi is 3.68000000000000000000

管理页面:

  1. http://192.168.7.43:8088/cluster

六、HIVE的安装配置与使用 
1、下载并部署

  1. [hadoop@hadoop-node2 tools]$ wget http://apache.communilink.net/hive/hive-1.2.1/apache-hive-1.2.1-bin.tar.gz
  2. --2015-07-22 14:03:05-- http://apache.communilink.net/hive/hive-1.2.1/apache-hive-1.2.1-bin.tar.gz
  3. Resolving apache.communilink.net... 203.124.11.85
  4. Connecting to apache.communilink.net|203.124.11.85|:80... connected.
  5. HTTP request sent, awaiting response... 200 OK
  6. Length: 92834839 (89M) [application/x-gzip]
  7. Saving to: `apache-hive-1.2.1-bin.tar.gz\'
  8. 100%[===============================================================>] 92,834,839 974K/s in 98s
  9. 2015-07-22 14:04:43 (928 KB/s) - `apache-hive-1.2.1-bin.tar.gz\' saved [92834839/92834839]
  1. [hadoop@hadoop-node2 tools]$ tar xfvz apache-hive-1.2.1-bin.tar.gz -C ../app
  2. [hadoop@hadoop-node2 app]$ ln -s apache-hive-1.2.1-bin/ hive
  3. [hadoop@hadoop-node2 app]$ ll
  4. total 8
  5. drwxrwxr-x 9 hadoop hadoop 4096 Jul 22 14:07 apache-hive-1.2.1-bin
  6. lrwxrwxrwx 1 hadoop hadoop 30 Jul 21 13:08 hadoop -> /home/hadoop/app/hadoop-2.7.1/
  7. drwxrwxr-x 11 hadoop hadoop 4096 Jul 21 18:11 hadoop-2.7.1
  8. lrwxrwxrwx 1 hadoop hadoop 22 Jul 22 14:06 hive -> apache-hive-1.2.1-bin/
  1. [hadoop@hadoop-node2 app]$ hive/bin/hive
  2. Logging initialized using configuration in jar:file:/home/hadoop/app/apache-hive-1.2.1-bin/lib/hive-common-1.2.1.jar!/hive-log4j.properties
  3. hive> exit;
  4. [hadoop@hadoop-node2 app]$ ll
  5. total 36
  6. drwxrwxr-x 9 hadoop hadoop 4096 Jul 22 14:07 apache-hive-1.2.1-bin
  7. -rw-rw-r-- 1 hadoop hadoop 21071 Jul 22 14:27 derby.log
  8. lrwxrwxrwx 1 hadoop hadoop 30 Jul 21 13:08 hadoop -> /home/hadoop/app/hadoop-2.7.1/
  9. drwxrwxr-x 11 hadoop hadoop 4096 Jul 21 18:11 hadoop-2.7.1
  10. lrwxrwxrwx 1 hadoop hadoop 22 Jul 22 14:06 hive -> apache-hive-1.2.1-bin/
  11. drwxrwxr-x 5 hadoop hadoop 4096 Jul 22 14:27 metastore_db

2、建库建表

  1. hive> show databases;
  2. OK
  3. default
  4. Time taken: 1.294 seconds, Fetched: 1 row(s)
  5. hive> create database csoft;
  6. OK
  7. Time taken: 0.432 seconds
  8. hive> use csoft;
  9. OK
  10. Time taken: 0.026 seconds
  11. hive> create table t_mygirls(id int,name string,age int,size string)
  12. > row format delimited
  13. > fields terminated by "\t";
  14. OK
  15. Time taken: 0.552 seconds

3、上传数据

  1. [hadoop@hadoop-node2 hive]$ vim mygirls.dat
  2. 1 baby 18 36C
  3. 2 yifei 19 37B
  4. 3 yangmi 22 36A
  5. 4 zhiling 36 36C
  6. 5 fengjie 29 39A
  7. [hadoop@hadoop-node2 hive]$ hadoop fs -put mygirls.dat /user/hive/warehouse/csoft.db/t_mygirls

4、测试查询

  1. hive> use csoft;
  2. OK
  3. hive> select * from t_mygirls;
  4. OK
  5. 1 baby 18 36C
  6. 2 yifei 19 37B
  7. 3 yangmi 22 36A
  8. 4 zhiling 36 36C
  9. 5 fengjie 29 39A
  10. Time taken: 1.502 seconds, Fetched: 5 row(s)
  11. hive> select count(*) from t_mygirls;
  12. Query ID = hadoop_20150722160910_1d513448-3c28-4c74-aed1-2a26bcfe852c
  13. Total jobs = 1
  14. Launching Job 1 out of 1
  15. Number of reduce tasks determined at compile time: 1
  16. In order to change the average load for a reducer (in bytes):
  17. set hive.exec.reducers.bytes.per.reducer=<number>
  18. In order to limit the maximum number of reducers:
  19. set hive.exec.reducers.max=<number>
  20. In order to set a constant number of reducers:
  21. set mapreduce.job.reduces=<number>
  22. Starting Job = job_1437530701036_0005, Tracking URL = http://hadoop-node3:8088/proxy/application_1437530701036_0005/
  23. Kill Command = /home/hadoop/app/hadoop-2.7.1/bin/hadoop job -kill job_1437530701036_0005
  24. Hadoop job information for Stage-1: number of mappers: 1; number of reducers: 1
  25. 2015-07-22 16:09:27,490 Stage-1 map = 0%, reduce = 0%
  26. 2015-07-22 16:09:38,131 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 2.83 sec
  27. 2015-07-22 16:09:52,022 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 6.54 sec
  28. MapReduce Total cumulative CPU time: 6 seconds 540 msec
  29. Ended Job = job_1437530701036_0005
  30. MapReduce Jobs Launched:
  31. Stage-Stage-1: Map: 1 Reduce: 1 Cumulative CPU: 6.54 sec HDFS Read: 6605 HDFS Write: 2 SUCCESS
  32. Total MapReduce CPU Time Spent: 6 seconds 540 msec
  33. OK
  34. 5
  35. Time taken: 43.983 seconds, Fetched: 1 row(s)

5、修改hive配置文件,与mysql结合

  1. [hadoop@hadoop-node2 hive]$ cp conf/hive-default.xml.template conf/hive-site.xml
  2. [hadoop@hadoop-node2 hive]$ vim conf/hive-site.xml
  3. 394行:
  4. <property>
  5. <name>javax.jdo.option.ConnectionURL</name>
  6. <value>jdbc:mysql://192.168.7.10:3306/hive?createDatabaseIfNotExist=true</value>
  7. <description>JDBC connect string for a JDBC metastore</description>
  8. </property>
  9. 789行:
  10. <property>
  11. <name>javax.jdo.option.ConnectionDriverName</name>
  12. <value>com.mysql.jdbc.Driver</value>
  13. <description>Driver class name for a JDBC metastore</description>
  14. </property>
  15. 814行:
  16. <property>
  17. <name>javax.jdo.option.ConnectionUserName</name>
  18. <value>hive</value>
  19. <description>Username to use against metastore database</description>
  20. </property>
  21. 380行:
  22. <property>
  23. <name>javax.jdo.option.ConnectionPassword</name>
  24. <value>hive</value>
  25. <description>password to use against metastore database</description>
  26. </property>

注意要在mysql里修改一下hive库的字符集,不然会报错

  1. mysql> alter database hive character set latin1;
  2. Query OK, 1 row affected (0.02 sec)

6、导入mysql连接包

  1. [hadoop@hadoop-node2 conf]$ cd ../lib/
  2. [hadoop@hadoop-node2 lib]$ ls -ltr
  3. total 96144
  4. -rw-r--r-- 1 hadoop hadoop 875336 Jan 9 2014 mysql-connector-java-5.1.28.jar
  5. [hadoop@hadoop-node2 hive]$ hive
  6. Logging initialized using configuration in jar:file:/home/hadoop/app/apache-hive-1.2.1-bin/lib/hive-common-1.2.1.jar!/hive-log4j.properties
  7. hive> show databases;
  8. OK
  9. default
  10. Time taken: 1.585 seconds, Fetched: 1 row(s)
  1. mysql> create database hive;
  2. Query OK, 1 row affected (0.08 sec)
  3. mysql> grant all on hive.* to \'hive\'@\'192.168.7.%\' identified by "hive";
  4. Query OK, 0 rows affected (0.00 sec)
  5. mysql> select user,host from mysql.user;
  6. +----------+-------------+
  7. | user | host |
  8. +----------+-------------+
  9. | hive | 192.168.7.% |
  10. +----------+-------------+
  11. 9 rows in set (0.02 sec)

7、建库建表

  1. hive> create table t_order(id int,name string,money double)
  2. > row format delimited
  3. > fields terminated by \',\';
  4. OK
  5. Time taken: 24.432 seconds
  6. hive> show tables;
  7. OK
  8. t_order
  9. Time taken: 0.037 seconds, Fetched: 1 row(s)
  10. mysql> select * from TBLS\G
  11. *************************** 1. row ***************************
  12. TBL_ID: 1
  13. CREATE_TIME: 1437556284
  14. DB_ID: 1
  15. LAST_ACCESS_TIME: 0
  16. OWNER: hadoop
  17. RETENTION: 0
  18. SD_ID: 1
  19. TBL_NAME: t_order
  20. TBL_TYPE: MANAGED_TABLE
  21. VIEW_EXPANDED_TEXT: NULL
  22. VIEW_ORIGINAL_TEXT: NULL
  23. 1 row in set (0.00 sec)
  24. mysql> select * from COLUMNS_V2\G
  25. *************************** 1. row ***************************
  26. CD_ID: 1
  27. COMMENT: NULL
  28. COLUMN_NAME: id
  29. TYPE_NAME: int
  30. INTEGER_IDX: 0
  31. *************************** 2. row ***************************
  32. CD_ID: 1
  33. COMMENT: NULL
  34. COLUMN_NAME: money
  35. TYPE_NAME: double
  36. INTEGER_IDX: 2
  37. *************************** 3. row ***************************
  38. CD_ID: 1
  39. COMMENT: NULL
  40. COLUMN_NAME: name
  41. TYPE_NAME: string
  42. INTEGER_IDX: 1
  43. 3 rows in set (0.00 sec)

8、导入数据

  1. [hadoop@hadoop-node2 ~]$ vim order.data
  2. [hadoop@hadoop-node2 ~]$ ll order.data
  3. -rw-rw-r-- 1 hadoop hadoop 54 Jul 22 17:39 order.data
  4. hive> load data local inpath \'/home/hadoop/order.data\' into table t_order;
  5. Loading data to table default.t_order
  6. Table default.t_order stats: [numFiles=1, totalSize=54]
  7. OK
  8. Time taken: 1.03 seconds
  9. hive> select * from t_order;
  10. OK
  11. 1 iphone 6888.0
  12. 2 xiaomi 2399.0
  13. 3 meizu 2499.0
  14. 4 mate7 4398.0
  15. Time taken: 0.474 seconds, Fetched: 4 row(s)
  1. [hadoop@hadoop-node2 ~]$ hadoop fs -put order.data /order.data.2
  2. [hadoop@hadoop-node2 ~]$ hadoop fs -ls /
  3. Found 4 items
  4. drwxr-xr-x - hadoop supergroup 0 2015-07-21 18:51 /flow
  5. -rw-r--r-- 3 hadoop supergroup 54 2015-07-22 17:41 /order.data.2
  6. drwx------ - hadoop supergroup 0 2015-07-22 14:07 /tmp
  7. drwxr-xr-x - hadoop supergroup 0 2015-07-22 17:03 /user
  8. hive> load data inpath \'/order.data.2\' into table t_order;
  9. Loading data to table default.t_order
  10. Table default.t_order stats: [numFiles=2, totalSize=108]
  11. OK
  12. Time taken: 0.489 seconds
  13. hive> select * from t_order;
  14. OK
  15. 1 iphone 6888.0
  16. 2 xiaomi 2399.0
  17. 3 meizu 2499.0
  18. 4 mate7 4398.0
  19. 1 iphone 6888.0
  20. 2 xiaomi 2399.0
  21. 3 meizu 2499.0
  22. 4 mate7 4398.0
  23. Time taken: 0.188 seconds, Fetched: 8 row(s)

9、创建外部表

  1. hive> create external table t_order_ex(id int,name string,money double)
  2. > row format delimited
  3. > fields terminated by \',\'
  4. > location \'/hive-tmp/order\';
  5. OK
  6. Time taken: 0.151 seconds
  7. hive> load data local inpath \'/home/hadoop/order.data\' into table t_order_ex;
  8. Loading data to table default.t_order_ex
  9. Table default.t_order_ex stats: [numFiles=0, totalSize=0]
  10. OK
  11. Time taken: 0.524 seconds
  12. hive> select * from t_order_ex;
  13. OK
  14. 1 iphone 6888.0
  15. 2 xiaomi 2399.0
  16. 3 meizu 2499.0
  17. 4 mate7 4398.0
  18. Time taken: 0.182 seconds, Fetched: 4 row(s)
  19. mysql> select * from TBLS\G
  20. *************************** 1. row ***************************
  21. TBL_ID: 1
  22. CREATE_TIME: 1437556284
  23. DB_ID: 1
  24. LAST_ACCESS_TIME: 0
  25. OWNER: hadoop
  26. RETENTION: 0
  27. SD_ID: 1
  28. TBL_NAME: t_order
  29. TBL_TYPE: MANAGED_TABLE
  30. VIEW_EXPANDED_TEXT: NULL
  31. VIEW_ORIGINAL_TEXT: NULL
  32. *************************** 2. row ***************************
  33. TBL_ID: 2
  34. CREATE_TIME: 1437558341
  35. DB_ID: 1
  36. LAST_ACCESS_TIME: 0
  37. OWNER: hadoop
  38. RETENTION: 0
  39. SD_ID: 2
  40. TBL_NAME: t_order_ex
  41. TBL_TYPE: EXTERNAL_TABLE
  42. VIEW_EXPANDED_TEXT: NULL
  43. VIEW_ORIGINAL_TEXT: NULL
  44. 2 rows in set (0.00 sec)

内部表和外部表的区别:drop table后外部表数据还在,表结构和元数据不在了。

  1. hive> drop table t_order_ex;
  2. OK
  3. Time taken: 32.076 seconds
  4. [hadoop@hadoop-node2 ~]$ hadoop fs -ls /hive-tmp/order
  5. Found 1 items
  6. -rwxr-xr-x 3 hadoop supergroup 54 2015-07-22 17:46 /hive-tmp/order/order.data

10、从已有数据表创建表 select语句

  1. hive> create table t_order_sample
  2. > as
  3. > select name,money from t_order;
  4. Query ID = hadoop_20150723115105_2a68366f-ce9a-40a3-810b-1634acab2bc4
  5. Total jobs = 3
  6. Launching Job 1 out of 3
  7. Number of reduce tasks is set to 0 since there\'s no reduce operator
  8. Starting Job = job_1437530701036_0008, Tracking URL = http://hadoop-node3:8088/proxy/application_1437530701036_0008/
  9. Kill Command = /home/hadoop/app/hadoop-2.7.1/bin/hadoop job -kill job_1437530701036_0008
  10. Hadoop job information for Stage-1: number of mappers: 1; number of reducers: 0
  11. 2015-07-23 11:51:25,498 Stage-1 map = 0%, reduce = 0%
  12. 2015-07-23 11:51:37,143 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 2.58 sec
  13. MapReduce Total cumulative CPU time: 2 seconds 580 msec
  14. Ended Job = job_1437530701036_0008
  15. Stage-4 is selected by condition resolver.
  16. Stage-3 is filtered out by condition resolver.
  17. Stage-5 is filtered out by condition resolver.
  18. Moving data to: hdfs://ns1/user/hive/warehouse/.hive-staging_hive_2015-07-23_11-51-05_828_639578264040662267-1/-ext-10001
  19. Moving data to: hdfs://ns1/user/hive/warehouse/t_order_sample
  20. Table default.t_order_sample stats: [numFiles=1, numRows=8, totalSize=108, rawDataSize=100]
  21. MapReduce Jobs Launched:
  22. Stage-Stage-1: Map: 1 Cumulative CPU: 2.58 sec HDFS Read: 3088 HDFS Write: 187 SUCCESS
  23. Total MapReduce CPU Time Spent: 2 seconds 580 msec
  24. OK
  25. Time taken: 40.675 seconds
  1. hive> select * from t_order_sample;
  2. OK
  3. iphone 6888.0
  4. xiaomi 2399.0
  5. meizu 2499.0
  6. mate7 4398.0
  7. iphone 6888.0
  8. xiaomi 2399.0
  9. meizu 2499.0
  10. mate7 4398.0
  11. Time taken: 0.236 seconds, Fetched: 8 row(s)

11、插入数据的方法 insert语句

  1. [hadoop@hadoop-node2 hive]$ vim order.data.2
  2. 5,caomei,25
  3. 6,xianggua,2
  4. 7,chengzi,4
  5. 8,apple,8
  6. ~
  7. hive> load data local inpath \'/home/hadoop/app/hive/order.data.2\' into table t_order;
  8. Loading data to table default.t_order
  9. Table default.t_order stats: [numFiles=3, totalSize=155]
  10. OK
  11. Time taken: 0.717 seconds
  12. hive> select * from t_order;
  13. OK
  14. 1 iphone 6888.0
  15. 2 xiaomi 2399.0
  16. 3 meizu 2499.0
  17. 4 mate7 4398.0
  18. 1 iphone 6888.0
  19. 2 xiaomi 2399.0
  20. 3 meizu 2499.0
  21. 4 mate7 4398.0
  22. 5 caomei 25.0
  23. 6 xianggua 2.0
  24. 7 chengzi 4.0
  25. 8 apple 8.0
  26. Time taken: 0.208 seconds, Fetched: 12 row(s)
  27. hive> insert into table t_order_sample
  28. > select name,money from t_order
  29. > where id>4;
  30. Query ID = hadoop_20150723120112_b0728032-db77-4026-9828-3ac468d7647e
  31. Total jobs = 3
  32. Launching Job 1 out of 3
  33. Number of reduce tasks is set to 0 since there\'s no reduce operator
  34. Starting Job = job_1437530701036_0009, Tracking URL = http://hadoop-node3:8088/proxy/application_1437530701036_0009/
  35. Kill Command = /home/hadoop/app/hadoop-2.7.1/bin/hadoop job -kill job_1437530701036_0009
  36. Hadoop job information for Stage-1: number of mappers: 1; number of reducers: 0
  37. 2015-07-23 12:01:33,570 Stage-1 map = 0%, reduce = 0%
  38. 2015-07-23 12:01:45,170 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 4.01 sec
  39. MapReduce Total cumulative CPU time: 4 seconds 10 msec
  40. Ended Job = job_1437530701036_0009
  41. Stage-4 is selected by condition resolver.
  42. Stage-3 is filtered out by condition resolver.
  43. Stage-5 is filtered out by condition resolver.
  44. Moving data to: hdfs://ns1/user/hive/warehouse/t_order_sample/.hive-staging_hive_2015-07-23_12-01-12_051_7473768812803361164-1/-ext-10000
  45. Loading data to table default.t_order_sample
  46. Table default.t_order_sample stats: [numFiles=2, numRows=12, totalSize=155, rawDataSize=143]
  47. MapReduce Jobs Launched:
  48. Stage-Stage-1: Map: 1 Cumulative CPU: 4.01 sec HDFS Read: 4118 HDFS Write: 125 SUCCESS
  49. Total MapReduce CPU Time Spent: 4 seconds 10 msec
  50. OK
  51. Time taken: 40.232 seconds
  1. hive> select * from t_order_sample;
  2. OK
  3. iphone 6888.0
  4. xiaomi 2399.0
  5. meizu 2499.0
  6. mate7 4398.0
  7. iphone 6888.0
  8. xiaomi 2399.0
  9. meizu 2499.0
  10. mate7 4398.0
  11. caomei 25.0
  12. xianggua 2.0
  13. chengzi 4.0
  14. apple 8.0
  15. Time taken: 0.221 seconds, Fetched: 12 row(s)

七、Hbase 
1、安装部署

  1. [hadoop@hadoop-node1 tools]$ wget http://apache.01link.hk/hbase/1.1.1/hbase-1.1.1-bin.tar.gz
  2. --2015-07-23 16:12:34-- http://apache.01link.hk/hbase/1.1.1/hbase-1.1.1-bin.tar.gz
  3. Resolving apache.01link.hk... 101.78.134.82
  4. Connecting to apache.01link.hk|101.78.134.82|:80... connected.
  5. HTTP request sent, awaiting response... 200 OK
  6. Length: 102487389 (98M) [application/x-gzip]
  7. Saving to: `hbase-1.1.1-bin.tar.gz\'
  8. 100%[===============================================================>] 102,487,389 1.70M/s in 63s
  9. 2015-07-23 16:13:38 (1.54 MB/s) - `hbase-1.1.1-bin.tar.gz\' saved [102487389/102487389]
  1. [hadoop@hadoop-node1 tools]$ tar xf hbase-1.1.1-bin.tar.gz -C ../app/
  2. [hadoop@hadoop-node1 tools]$ ln -s ../app/hbase-1.1.1/ ../app/hbase
  3. [hadoop@hadoop-node1 app]$ ll
  4. total 8
  5. lrwxrwxrwx 1 hadoop hadoop 30 Jul 21 13:08 hadoop -> /home/hadoop/app/hadoop-2.7.1/
  6. drwxrwxr-x 11 hadoop hadoop 4096 Jul 21 18:11 hadoop-2.7.1
  7. lrwxrwxrwx 1 hadoop hadoop 19 Jul 23 16:16 hbase -> ../app/hbase-1.1.1/
  8. drwxrwxr-x 7 hadoop hadoop 4096 Jul 23 16:15 hbase-1.1.1

2、配置文件 
1)[hadoop@hadoop-node1 app]$ vim hbase/conf/hbase-env.sh

  1. 29行:
  2. export JAVA_HOME=/usr/java/jdk/
  3. 131行:
  4. export HBASE_MANAGES_ZK=false

2)[hadoop@hadoop-node1 app]$ vim hbase/conf/hbase-site.xml

  1. <configuration>
  2. <property>
  3. <name>hbase.rootdir</name>
  4. <value>hdfs://ns1/hbase</value>
  5. </property>
  6. <property>
  7. <name>hbase.cluster.distributed</name>
  8. <value>true</value>
  9. </property>
  10. <property>
  11. <name>hbase.zookeeper.quorum</name>
  12. <value>hadoop-node5:2181,hadoop-node6:2181,hadoop-node7:2181</value>
  13. </property>
  14. </configuration>

3)[hadoop@hadoop-node1 app]$ vim hbase/conf/regionservers

  1. hadoop-node5
  2. hadoop-node6
  3. hadoop-node7

4)拷贝hadoop的部分配置文件

  1. [hadoop@hadoop-node1 app]$ cp hadoop/etc/hadoop/core-site.xml hadoop/etc/hadoop/hdfs-site.xml hbase/conf/

5)分发部署包

  1. scp -r hbase-1.1.1/ hadoop@hadoop-node2:/home/hadoop/app
  2. scp -r hbase-1.1.1/ hadoop@hadoop-node5:/home/hadoop/app
  3. scp -r hbase-1.1.1/ hadoop@hadoop-node6:/home/hadoop/app
  4. scp -r hbase-1.1.1/ hadoop@hadoop-node7:/home/hadoop/app

分别做好软连接后,就可以启动Hbase集群了

3、启动集群

  1. [hadoop@hadoop-node1 app]$ hbase/bin/start-hbase.sh
  2. starting master, logging to /home/hadoop/app/hbase/bin/../logs/hbase-hadoop-master-hadoop-node1.csoftintl.com.out
  3. hadoop-node6: starting regionserver, logging to /home/hadoop/app/hbase/bin/../logs/hbase-hadoop-regionserver-hadoop-node6.csoftintl.com.out
  4. hadoop-node5: starting regionserver, logging to /home/hadoop/app/hbase/bin/../logs/hbase-hadoop-regionserver-hadoop-node5.csoftintl.com.out
  5. hadoop-node7: starting regionserver, logging to /home/hadoop/app/hbase/bin/../logs/hbase-hadoop-regionserver-hadoop-node7.csoftintl.com.out
  6. [hadoop@hadoop-node1 app]$ jps
  7. 22040 Jps
  8. 21770 HMaster
  9. 12417 NameNode
  10. 11236 DFSZKFailoverController
  11. [hadoop@hadoop-node2 app]$ hbase/bin/start-hbase.sh
  12. starting master, logging to /home/hadoop/app/hbase/bin/../logs/hbase-hadoop-master-hadoop-node2.csoftintl.com.out
  13. hadoop-node5: starting regionserver, logging to /home/hadoop/app/hbase/bin/../logs/hbase-hadoop-regionserver-hadoop-node5.csoftintl.com.out
  14. hadoop-node7: starting regionserver, logging to /home/hadoop/app/hbase/bin/../logs/hbase-hadoop-regionserver-hadoop-node7.csoftintl.com.out
  15. hadoop-node6: starting regionserver, logging to /home/hadoop/app/hbase/bin/../logs/hbase-hadoop-regionserver-hadoop-node6.csoftintl.com.out
  16. [hadoop@hadoop-node2 app]$ jps
  17. 20818 Jps
  18. 20697 HMaster
  19. 15587 DFSZKFailoverController
  20. 25736 NameNode
  21. [hadoop@hadoop-node5 app]$ jps
  22. 3991 HRegionServer
  23. 22260 QuorumPeerMain
  24. 22452 NodeManager
  25. 22561 JournalNode
  26. 4133 Jps
  27. 22339 DataNode
  28. [hadoop@hadoop-node6 app]$ jps
  29. 8360 JournalNode
  30. 22731 HRegionServer
  31. 22903 Jps
  32. 8171 DataNode
  33. 8284 NodeManager
  34. 8090 QuorumPeerMain
  35. [hadoop@hadoop-node7 app]$ jps
  36. 19648 QuorumPeerMain
  37. 19741 DataNode
  38. 32436 HRegionServer
  39. 19930 JournalNode
  40. 19854 NodeManager
  41. 32578 Jps

管理网页:

  1. http://192.168.7.41:16010/master-status

4、命令行客户端及管理命令

  1. [root@hadoop-node1 ~]# vim /etc/profile
  2. export PATH=$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:/home/hadoop/app/hive/bin:/home/hadoop/app/hbase/bin
  3. [root@hadoop-node1 ~]# su - hadoop
  4. [hadoop@hadoop-node1 ~]$ source /etc/profile
  5. [hadoop@hadoop-node1 ~]$ hbase shell
  6. SLF4J: Class path contains multiple SLF4J bindings.
  7. SLF4J: Found binding in [jar:file:/home/hadoop/app/hbase-1.1.1/lib/slf4j-log4j12-1.7.5.jar!/org/slf4j/impl/StaticLoggerBinder.class]
  8. SLF4J: Found binding in [jar:file:/home/hadoop/app/hadoop-2.7.1/share/hadoop/common/lib/slf4j-log4j12-1.7.10.jar!/org/slf4j/impl/StaticLoggerBinder.class]
  9. SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
  10. SLF4J: Actual binding is of type [org.slf4j.impl.Log4jLoggerFactory]
  11. HBase Shell; enter \'help<RETURN>\' for list of supported commands.
  12. Type "exit<RETURN>" to leave the HBase Shell
  13. Version 1.1.1, rd0a115a7267f54e01c72c603ec53e91ec418292f, Tue Jun 23 14:44:07 PDT 2015
  14. hbase(main):001:0>
  1. hbase(main):017:0> help
  2. HBase Shell, version 1.1.1, rd0a115a7267f54e01c72c603ec53e91ec418292f, Tue Jun 23 14:44:07 PDT 2015
  3. Type \'help "COMMAND"\', (e.g. \'help "get"\' -- the quotes are necessary) for help on a specific command.
  4. Commands are grouped. Type \'help "COMMAND_GROUP"\', (e.g. \'help "general"\') for help on a command group.
  5. COMMAND GROUPS:
  6. Group name: general
  7. Commands: status, table_help, version, whoami
  8. Group name: ddl
  9. Commands: alter, alter_async, alter_status, create, describe, disable, disable_all, drop, drop_all, enable, enable_all, exists, get_table, is_disabled, is_enabled, list, show_filters
  10. Group name: namespace
  11. Commands: alter_namespace, create_namespace, describe_namespace, drop_namespace, list_namespace, list_namespace_tables
  12. Group name: dml
  13. Commands: append, count, delete, deleteall, get, get_counter, get_splits, incr, put, scan, truncate, truncate_preserve
  14. Group name: tools
  15. Commands: assign, balance_switch, balancer, balancer_enabled, catalogjanitor_enabled, catalogjanitor_run, catalogjanitor_switch, close_region, compact, compact_rs, flush, major_compact, merge_region, move, split, trace, unassign, wal_roll, zk_dump
  16. Group name: replication
  17. Commands: add_peer, append_peer_tableCFs, disable_peer, disable_table_replication, enable_peer, enable_table_replication, list_peers, list_replicated_tables, remove_peer, remove_peer_tableCFs, set_peer_tableCFs, show_peer_tableCFs
  18. Group name: snapshots
  19. Commands: clone_snapshot, delete_all_snapshot, delete_snapshot, list_snapshots, restore_snapshot, snapshot
  20. Group name: configuration
  21. Commands: update_all_config, update_config
  22. Group name: quotas
  23. Commands: list_quotas, set_quota
  24. Group name: security
  25. Commands: grant, revoke, user_permission
  26. Group name: visibility labels
  27. Commands: add_labels, clear_auths, get_auths, list_labels, set_auths, set_visibility
  28. SHELL USAGE:
  29. Quote all names in HBase Shell such as table and column names. Commas delimit
  30. command parameters. Type <RETURN> after entering a command to run it.
  31. Dictionaries of configuration used in the creation and alteration of tables are
  32. Ruby Hashes. They look like this:
  33. {\'key1\' => \'value1\', \'key2\' => \'value2\', ...}
  34. and are opened and closed with curley-braces. Key/values are delimited by the
  35. \'=>\' character combination. Usually keys are predefined constants such as
  36. NAME, VERSIONS, COMPRESSION, etc. Constants do not need to be quoted. Type
  37. \'Object.constants\' to see a (messy) list of all constants in the environment.
  38. If you are using binary keys or values and need to enter them in the shell, use
  39. double-quote\'d hexadecimal representation. For example:
  40. hbase> get \'t1\', "key\x03\x3f\xcd"
  41. hbase> get \'t1\', "key\003\023\011"
  42. hbase> put \'t1\', "test\xef\xff", \'f1:\', "\x01\x33\x40"
  43. The HBase shell is the (J)Ruby IRB with the above HBase-specific commands added.
  44. For more on the HBase Shell, see http://hbase.apache.org/book.html

创建表:

  1. hbase(main):020:0> create \'user_info\',{NAME => \'base_info\',VERSIONS => 3},{NAME => \'extra_info\'}
  2. 0 row(s) in 4.5880 seconds
  3. => Hbase::Table - user_info
  4. hbase(main):022:0> list
  5. TABLE
  6. user_info
  7. 1 row(s) in 0.0130 seconds
  8. => ["user_info"]

插入数据:

  1. hbase(main):024:0> put \'user_info\' ,\'001\',\'base_info:name\',\'fengjie\'
  2. 0 row(s) in 0.1610 seconds
  3. hbase(main):025:0> put \'user_info\' ,\'001\',\'base_info:age\',\'28\'
  4. 0 row(s) in 0.0200 seconds
  5. hbase(main):026:0> put \'user_info\' ,\'001\',\'base_info:addr\',\'New York\'
  6. 0 row(s) in 0.0150 seconds
  7. hbase(main):027:0> put \'user_info\' ,\'001\',\'extra_info:sex\',\'female\'
  8. 0 row(s) in 0.0170 seconds
  9. hbase(main):028:0> put \'user_info\' ,\'001\',\'base_info:phonenbr\',\'13813838888\'
  10. 0 row(s) in 0.0160 seconds

查询数据:

  1. hbase(main):029:0> scan \'user_info\'
  2. ROW COLUMN+CELL
  3. 001 column=base_info:addr, timestamp=1437644997783, value=New York
  4. 001 column=base_info:age, timestamp=1437644967365, value=28
  5. 001 column=base_info:name, timestamp=1437644932773, value=fengjie
  6. 001 column=base_info:phonenbr, timestamp=1437645075989, value=13813838888
  7. 001 column=extra_info:sex, timestamp=1437645025106, value=female
  8. 1 row(s) in 0.0520 seconds
  9. hbase(main):030:0> get \'user_info\',\'001\'
  10. COLUMN CELL
  11. base_info:addr timestamp=1437644997783, value=New York
  12. base_info:age timestamp=1437644967365, value=28
  13. base_info:name timestamp=1437644932773, value=fengjie
  14. base_info:phonenbr timestamp=1437645075989, value=13813838888
  15. extra_info:sex timestamp=1437645025106, value=female
  16. 5 row(s) in 0.0660 seconds

修改数据:

  1. hbase(main):031:0> put \'user_info\',\'001\',\'base_info:name\',\'luoyufeng\'
  2. 0 row(s) in 0.0130 seconds
  3. hbase(main):032:0> get \'user_info\',\'001\'
  4. COLUMN CELL
  5. base_info:addr timestamp=1437644997783, value=New York
  6. base_info:age timestamp=1437644967365, value=28
  7. base_info:name timestamp=1437645350820, value=luoyufeng
  8. base_info:phonenbr timestamp=1437645075989, value=13813838888
  9. extra_info:sex timestamp=1437645025106, value=female
  10. 5 row(s) in 0.0340 seconds
  1. hbase(main):033:0> get \'user_info\',\'001\',{COLUMN => \'base_info:name\',VERSIONS =>10}
  2. COLUMN CELL
  3. base_info:name timestamp=1437645350820, value=luoyufeng
  4. base_info:name timestamp=1437644932773, value=fengjie
  5. 2 row(s) in 0.0400 seconds

scan和get的区别:

  1. hbase(main):038:0> scan \'user_info\',{RAW => true,VERSION => 5}
  2. ROW COLUMN+CELL
  3. 001 column=base_info:addr, timestamp=1437644997783, value=New York
  4. 001 column=base_info:age, timestamp=1437644967365, value=28
  5. 001 column=base_info:name, timestamp=1437645602346, value=fengbaobao
  6. 001 column=base_info:phonenbr, timestamp=1437645075989, value=13813838888
  7. 001 column=extra_info:sex, timestamp=1437645025106, value=female
  8. 1 row(s) in 0.0260 seconds
  9. hbase(main):037:0> get \'user_info\',\'001\',{COLUMN => \'base_info:name\',VERSIONS =>10}
  10. COLUMN CELL
  11. base_info:name timestamp=1437645602346, value=fengbaobao
  12. base_info:name timestamp=1437645498636, value=fengfeng
  13. base_info:name timestamp=1437645350820, value=luoyufeng
  14. 3 row(s) in 0.0200 seconds

下载CDH版Hadoop

    1. wget http://archive.cloudera.com/cm5/installer/latest/cloudera-manager-installer.bin

分类:

技术点:

相关文章: