liuyuqi-dellpc 8 years ago
parent
commit
8fa1094794

+ 353 - 0
conf/core-site.xml

@@ -0,0 +1,353 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<configuration>
+<property>
+<name>hadoop.proxyuser.solr.hosts</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.http.server.MaxRequests</name>
+<value>2000</value>
+</property>
+<property>
+<name>hadoop.ssl.require.client.cert</name>
+<value>false</value>
+</property>
+<property>
+<name>hadoop.http.authentication.kerberos.keytab</name>
+<value>/opt/huawei/Bigdata/FusionInsight/FusionInsight-Hadoop-2.7.2/hadoop/sbin/mapred.keytab</value>
+</property>
+<property>
+<name>hadoop.proxyuser.mapred.hosts</name>
+<value>*</value>
+</property>
+<property>
+<name>fs.nas.task.nodes</name>
+<value></value>
+</property>
+<property>
+<name>hadoop.security.auth_to_local</name>
+<value>RULE:[1:$1@$0](^.*@$)s/^(.*)@$/$1/g
+                    RULE:[2:$1@$0](^.*@$)s/^(.*)@$/$1/g
+                    DEFAULT</value>
+</property>
+<property>
+<name>ipc.ping.interval</name>
+<value>60000</value>
+</property>
+<property>
+<name>hadoop.proxyuser.impala.hosts</name>
+<value>*</value>
+</property>
+<property>
+<name>fs.viewfs.mounttable.ClusterX.link</name>
+<value></value>
+</property>
+<property>
+<name>hadoop.security.authorization</name>
+<value>true</value>
+</property>
+<property>
+<name>hadoop.proxyuser.solr.groups</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.proxyuser.HTTP.hosts</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.proxyuser.spark.hosts</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.proxyuser.spark3.hosts</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.proxyuser.loader.hosts</name>
+<value>*</value>
+</property>
+<property>
+<name>ipc.client.connect.timeout</name>
+<value>20000</value>
+</property>
+<property>
+<name>hadoop.proxyuser.spark2.groups</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.proxyuser.spark4.groups</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.proxyuser.loader.groups</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.proxyuser.spark.groups</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.security.authentication</name>
+<value>kerberos</value>
+</property>
+<property>
+<name>hadoop.proxyuser.spark4.hosts</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.proxyuser.hue.hosts</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.proxyuser.hbase1.hosts</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.proxyuser.hbase1.groups</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.proxyuser.hbase3.groups</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.proxyuser.hiveadmin.hosts</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.http.kerberos.internal.spnego.principal</name>
+<value>HTTP/hd08@HADOOP.COM</value>
+</property>
+<property>
+<name>hadoop.proxyuser.hiveadmin.groups</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.proxyuser.mapred.groups</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.security.group.mapping</name>
+<value>org.apache.hadoop.security.ShellBasedUnixGroupsMapping</value>
+</property>
+<property>
+<name>hadoop.proxyuser.hue.groups</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.proxyuser.hbase4.hosts</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.security.crypto.implementation.class</name>
+<value>com.huawei.hadoop.datasight.security.FMHadoopCryptAdapter</value>
+</property>
+<property>
+<name>hadoop.ssl.keystores.factory.class</name>
+<value>org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory</value>
+</property>
+<property>
+<name>hadoop.proxyuser.hive.groups</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.http.filter.initializers</name>
+<value>com.huawei.hadoop.datasight.FlowCtrlFilter,com.huawei.hadoop.datasight.XSSFilterInitializer,com.huawei.hadoop.datasight.InternalSpnegoFilterForYARN,com.huawei.hadoop.datasight.CASClientFilter</value>
+</property>
+<property>
+<name>hadoop.proxyuser.miner.groups</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.http.authentication.center</name>
+<value>https://172.19.23.150:20027/cas/</value>
+</property>
+<property>
+<name>hadoop.proxyuser.hive3.hosts</name>
+<value>*</value>
+</property>
+<property>
+<name>fs.defaultFS</name>
+<value>hdfs://hacluster</value>
+</property>
+<property>
+<name>hadoop.proxyuser.impala.groups</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.proxyuser.miner.hosts</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.ssl.client.conf</name>
+<value>ssl-client.xml</value>
+</property>
+<property>
+<name>fs.nas.mount.dir</name>
+<value>/mnt/nfsdata0</value>
+</property>
+<property>
+<name>hadoop.proxyuser.omm.groups</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.proxyuser.spark1.hosts</name>
+<value>*</value>
+</property>
+<property>
+<name>fs.AbstractFileSystem.nas.impl</name>
+<value>com.huawei.nasfilesystem.WushanFs</value>
+</property>
+<property>
+<name>hadoop.ssl.hostname.verifier</name>
+<value>ALLOW_ALL</value>
+</property>
+<property>
+<name>hadoop.proxyuser.hbase3.hosts</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.http.kerberos.internal.spnego.keytab</name>
+<value>/opt/huawei/Bigdata/FusionInsight/FusionInsight-Hadoop-2.7.2/hadoop/sbin/mapred.keytab</value>
+</property>
+<property>
+<name>hadoop.http.authentication.logout</name>
+<value>https://172.19.23.150:20027/cas/logout</value>
+</property>
+<property>
+<name>hadoop.http.authentication.center.listener</name>
+<value>org.jasig.cas.client.session.SingleSignOutHttpSessionListener</value>
+</property>
+<property>
+<name>hadoop.proxyuser.hive4.groups</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.proxyuser.hive3.groups</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.proxyuser.hive2.hosts</name>
+<value>*</value>
+</property>
+<property>
+<name>yarn.core-site.customized.configs</name>
+<value></value>
+</property>
+<property>
+<name>hadoop.ssl.server.conf</name>
+<value>ssl-server.xml</value>
+</property>
+<property>
+<name>hadoop.proxyuser.hive2.groups</name>
+<value>*</value>
+</property>
+<property>
+<name>ipc.client.rpc.timeout</name>
+<value>300000</value>
+</property>
+<property>
+<name>fs.viewfs.mounttable.ClusterX.homedir</name>
+<value>/</value>
+</property>
+<property>
+<name>hadoop.proxyuser.hive1.groups</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.http.authentication.kerberos.principal</name>
+<value>HTTP/hd08@HADOOP.COM</value>
+</property>
+<property>
+<name>hadoop.proxyuser.hbase.hosts</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.proxyuser.hbase.groups</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.proxyuser.hive.hosts</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.proxyuser.hive4.hosts</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.proxyuser.spark3.groups</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.spnego.allowed.ips</name>
+<value>.*</value>
+</property>
+<property>
+<name>ipc.client.fallback-to-simple-auth-allowed</name>
+<value>true</value>
+</property>
+<property>
+<name>hadoop.proxyuser.hbase2.groups</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.proxyuser.omm.hosts</name>
+<value>*</value>
+</property>
+<property>
+<name>resourcemanager.core-site.customized.configs</name>
+<value></value>
+</property>
+<property>
+<name>io.file.buffer.size</name>
+<value>131072</value>
+</property>
+<property>
+<name>ipc.client.ping</name>
+<value>true</value>
+</property>
+<property>
+<name>hadoop.proxyuser.hbase4.groups</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.http.server.name</name>
+<value>https://hd08:26001</value>
+</property>
+<property>
+<name>hadoop.proxyuser.oozie.hosts</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.proxyuser.hive1.hosts</name>
+<value>*</value>
+</property>
+<property>
+<name>fs.nas.impl</name>
+<value>com.huawei.nasfilesystem.ShareNASFileSystem</value>
+</property>
+<property>
+<name>hadoop.rpc.protection</name>
+<value>privacy</value>
+</property>
+<property>
+<name>hadoop.proxyuser.hbase2.hosts</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.proxyuser.oozie.groups</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.proxyuser.spark2.hosts</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.proxyuser.spark1.groups</name>
+<value>*</value>
+</property>
+<property>
+<name>hadoop.proxyuser.HTTP.groups</name>
+<value>*</value>
+</property>
+</configuration>

+ 27 - 0
conf/distcp-default.xml

@@ -0,0 +1,27 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<configuration>
+<property>
+<name>mapred.job.reduce.memory.mb</name>
+<value>4096</value>
+</property>
+<property>
+<name>mapred.reducer.new-api</name>
+<value>true</value>
+</property>
+<property>
+<name>distcp.dynamic.strategy.impl</name>
+<value>org.apache.hadoop.tools.mapred.lib.DynamicInputFormat</value>
+</property>
+<property>
+<name>mapred.job.map.memory.mb</name>
+<value>4096</value>
+</property>
+<property>
+<name>distcp.static.strategy.impl</name>
+<value>org.apache.hadoop.tools.mapred.UniformSizeInputFormat</value>
+</property>
+<property>
+<name>mapreduce.reduce.class</name>
+<value>org.apache.hadoop.mapreduce.Reducer</value>
+</property>
+</configuration>

+ 175 - 0
conf/hdfs-site.xml

@@ -0,0 +1,175 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<configuration>
+<property>
+<name>dfs.client.read.shortcircuit.skip.checksum</name>
+<value>true</value>
+</property>
+<property>
+<name>dfs.namenode.kerberos.principal.pattern</name>
+<value>*</value>
+</property>
+<property>
+<name>dfs.nameservices.mappings</name>
+<value>[{"name":"hacluster","roleInstances":["21","22"]}]</value>
+</property>
+<property>
+<name>dfs.client.https.need-auth</name>
+<value>false</value>
+</property>
+<property>
+<name>dfs.replication</name>
+<value>3</value>
+</property>
+<property>
+<name>dfs.client.block.write.replace-datanode-on-failure.enable</name>
+<value>true</value>
+</property>
+<property>
+<name>dfs.nameservices</name>
+<value>hacluster,haclusterX</value>
+</property>
+<property>
+<name>dfs.datanode.kerberos.https.principal</name>
+<value>hdfs/hadoop.hadoop.com@HADOOP.COM</value>
+</property>
+<property>
+<name>dfs.namenode.kerberos.https.principal</name>
+<value>hdfs/hadoop.hadoop.com@HADOOP.COM</value>
+</property>
+<property>
+<name>dfs.client.file-block-storage-locations.timeout.millis</name>
+<value>600000</value>
+</property>
+<property>
+<name>dfs.client.failover.connection.retries.on.timeouts</name>
+<value>0</value>
+</property>
+<property>
+<name>dfs.client.close.ack-timeout</name>
+<value>900000</value>
+</property>
+<property>
+<name>dfs.namenode.rpc-address.haclusterX.remotenn2</name>
+<value></value>
+</property>
+<property>
+<name>oi.dfs.colocation.zookeeper.quorum</name>
+<value>hd09:24002,hd08:24002,hd07:24002</value>
+</property>
+<property>
+<name>dfs.namenode.rpc-address.haclusterX.remotenn1</name>
+<value></value>
+</property>
+<property>
+<name>dfs.web.authentication.kerberos.principal</name>
+<value>HTTP/_HOST@HADOOP.COM</value>
+</property>
+<property>
+<name>dfs.client.socket-timeout</name>
+<value>600000</value>
+</property>
+<property>
+<name>dfs.client.socketcache.expiryMsec</name>
+<value>900</value>
+</property>
+<property>
+<name>dfs.datanode.socket.write.timeout</name>
+<value>600000</value>
+</property>
+<property>
+<name>dfs.client.failover.proxy.provider.haclusterX</name>
+<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
+</property>
+<property>
+<name>dfs.client.failover.connection.retries</name>
+<value>0</value>
+</property>
+<property>
+<name>dfs.http.policy</name>
+<value>HTTPS_ONLY</value>
+</property>
+<property>
+<name>dfs.client.failover.proxy.provider.hacluster</name>
+<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
+</property>
+<property>
+<name>dfs.datanode.kerberos.principal</name>
+<value>hdfs/hadoop.hadoop.com@HADOOP.COM</value>
+</property>
+<property>
+<name>dfs.domain.socket.path</name>
+<value>/var/run/FusionInsight-HDFS/dn_socket</value>
+</property>
+<property>
+<name>dfs.client.block.write.replace-datanode-on-failure.policy</name>
+<value>DEFAULT</value>
+</property>
+<property>
+<name>dfs.client.read.shortcircuit</name>
+<value>true</value>
+</property>
+<property>
+<name>dfs.namenode.kerberos.principal</name>
+<value>hdfs/hadoop.hadoop.com@HADOOP.COM</value>
+</property>
+<property>
+<name>dfs.namenode.rpc-address.hacluster.21</name>
+<value>hd08:25000</value>
+</property>
+<property>
+<name>dfs.namenode.rpc-address.hacluster.22</name>
+<value>hd09:25000</value>
+</property>
+<property>
+<name>dfs.ha.namenodes.hacluster</name>
+<value>21,22</value>
+</property>
+<property>
+<name>yarn.hdfs-site.customized.configs</name>
+<value></value>
+</property>
+<property>
+<name>ipc.client.connect.max.retries.on.timeouts</name>
+<value>45</value>
+</property>
+<property>
+<name>dfs.client.socketcache.capacity</name>
+<value>0</value>
+</property>
+<property>
+<name>dfs.blocksize</name>
+<value>134217728</value>
+</property>
+<property>
+<name>dfs.datanode.address</name>
+<value>hd08:25009</value>
+</property>
+<property>
+<name>dfs.distcp</name>
+<value>haclusterX</value>
+</property>
+<property>
+<name>dfs.ha.namenodes.haclusterX</name>
+<value>remotenn1,remotenn2</value>
+</property>
+<property>
+<name>yarn.distcp.fs-limits.max-directory-items</name>
+<value>10000000</value>
+</property>
+<property>
+<name>dfs.datanode.socket.reuse.keepalive</name>
+<value>-1</value>
+</property>
+<property>
+<name>dfs.client.failover.max.attempts</name>
+<value>10</value>
+</property>
+<property>
+<name>dfs.datanode.http.address</name>
+<value>hd08:25010</value>
+</property>
+<property>
+<name>dfs.client.block.write.replace-datanode-on-failure.replication</name>
+<value>2</value>
+</property>
+</configuration>

+ 38 - 0
conf/krb5.conf

@@ -0,0 +1,38 @@
+[kdcdefaults]
+kdc_ports = 21732
+
+[libdefaults]
+default_realm = HADOOP.COM
+kdc_timeout = 2500
+clockskew = 300
+use_dns_lookup = 0
+udp_preference_limit = 1465
+max_retries = 5
+dns_lookup_kdc = false
+dns_lookup_realm = false
+renewable = false
+forwardable = false
+renew_lifetime = 0m
+max_renewable_life = 30m
+
+[realms]
+HADOOP.COM = {
+kdc = 172.19.23.93:21732
+kdc = 172.19.23.92:21732
+admin_server = 172.19.23.93:21730
+admin_server = 172.19.23.92:21730
+kpasswd_server = 172.19.23.93:21731
+kpasswd_server = 172.19.23.92:21731
+renewable = false
+forwardable = false
+renew_lifetime = 0m
+max_renewable_life = 30m
+}
+
+[domain_realm]
+.hadoop.com = HADOOP.COM
+
+[logging]
+kdc = SYSLOG:INFO:DAEMON
+admin_server = SYSLOG:INFO:DAEMON
+default = SYSLOG:NOTICE:DAEMON

+ 7 - 0
conf/log4j.properties

@@ -0,0 +1,7 @@
+# This sets the global logging level and specifies the appenders
+log4j.rootLogger=INFO, theConsoleAppender
+ 
+# settings for the console appender
+log4j.appender.theConsoleAppender=org.apache.log4j.ConsoleAppender
+log4j.appender.theConsoleAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.theConsoleAppender.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n

+ 307 - 0
conf/mapred-site.xml

@@ -0,0 +1,307 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<configuration>
+<property>
+<name>mapreduce.map.speculative</name>
+<value>false</value>
+</property>
+<property>
+<name>mapreduce.jobhistory.keytab</name>
+<value>/opt/huawei/Bigdata/FusionInsight/FusionInsight-Hadoop-2.7.2/hadoop/sbin/mapred.keytab</value>
+</property>
+<property>
+<name>mapreduce.cluster.temp.dir</name>
+<value>/mapred/temp</value>
+</property>
+<property>
+<name>mapred.child.java.opts</name>
+<value>-Xms1024M -Xmx3584M -Xloggc:/tmp/@taskid@.gc</value>
+</property>
+<property>
+<name>mapreduce.shuffle.port</name>
+<value>26008</value>
+</property>
+<property>
+<name>mapreduce.client.submit.file.replication</name>
+<value>10</value>
+</property>
+<property>
+<name>mapreduce.job.counters.max</name>
+<value>120</value>
+</property>
+<property>
+<name>mapreduce.reduce.log.level</name>
+<value>INFO</value>
+</property>
+<property>
+<name>mapreduce.job.ubertask.maxreduces</name>
+<value>1</value>
+</property>
+<property>
+<name>mapreduce.container.reuse.enforce.strict-locality</name>
+<value>false</value>
+</property>
+<property>
+<name>mapreduce.jobhistory.principal</name>
+<value>mapred/hadoop.hadoop.com@HADOOP.COM</value>
+</property>
+<property>
+<name>mapreduce.job.maxtaskfailures.per.tracker</name>
+<value>3</value>
+</property>
+<property>
+<name>mapreduce.shuffle.max.connections</name>
+<value>0</value>
+</property>
+<property>
+<name>mapreduce.map.log.level</name>
+<value>INFO</value>
+</property>
+<property>
+<name>mapreduce.reduce.merge.inmem.threshold</name>
+<value>1000</value>
+</property>
+<property>
+<name>mapreduce.job.speculative.slowtaskthreshold</name>
+<value>1.0</value>
+</property>
+<property>
+<name>mapreduce.job.reduces</name>
+<value>1</value>
+</property>
+<property>
+<name>yarn.app.mapreduce.am.staging-dir</name>
+<value>/tmp/hadoop-yarn/staging</value>
+</property>
+<property>
+<name>mapreduce.job.ubertask.maxbytes</name>
+<value></value>
+</property>
+<property>
+<name>mapreduce.reduce.shuffle.merge.percent</name>
+<value>0.66</value>
+</property>
+<property>
+<name>mapreduce.job.ubertask.maxmaps</name>
+<value>9</value>
+</property>
+<property>
+<name>mapreduce.output.fileoutputformat.compress</name>
+<value>false</value>
+</property>
+<property>
+<name>mapreduce.job.reduce.slowstart.completedmaps</name>
+<value>1.0</value>
+</property>
+<property>
+<name>mapreduce.jobhistory.http.policy</name>
+<value>HTTPS_ONLY</value>
+</property>
+<property>
+<name>mapreduce.jobhistory.max-age-ms</name>
+<value>1296000000</value>
+</property>
+<property>
+<name>mapreduce.application.classpath</name>
+<value></value>
+</property>
+<property>
+<name>mapreduce.reduce.java.opts</name>
+<value>-Xmx3276M -Djava.net.preferIPv4Stack=true</value>
+</property>
+<property>
+<name>mapreduce.map.output.compress</name>
+<value>true</value>
+</property>
+<property>
+<name>mapreduce.map.output.compress.codec</name>
+<value>org.apache.hadoop.io.compress.SnappyCodec</value>
+</property>
+<property>
+<name>mapreduce.reduce.input.buffer.percent</name>
+<value>0.0</value>
+</property>
+<property>
+<name>mapreduce.jobhistory.cleaner.interval-ms</name>
+<value>86400000</value>
+</property>
+<property>
+<name>yarn.mapred-site.customized.configs</name>
+<value></value>
+</property>
+<property>
+<name>mapreduce.jobhistory.intermediate-done-dir</name>
+<value>/mr-history/tmp</value>
+</property>
+<property>
+<name>resourcemanager.mapred-site.customized.configs</name>
+<value></value>
+</property>
+<property>
+<name>mapreduce.input.fileinputformat.split.minsize</name>
+<value>0</value>
+</property>
+<property>
+<name>mapreduce.job.split.metainfo.maxsize</name>
+<value>10000000</value>
+</property>
+<property>
+<name>mapreduce.reduce.shuffle.input.buffer.percent</name>
+<value>0.70</value>
+</property>
+<property>
+<name>mapreduce.reduce.speculative</name>
+<value>false</value>
+</property>
+<property>
+<name>mapreduce.input.fileinputformat.split.maxsize</name>
+<value></value>
+</property>
+<property>
+<name>yarn.app.mapreduce.am.resource.cpu-vcores</name>
+<value>1</value>
+</property>
+<property>
+<name>yarn.app.mapreduce.client.jobstatus.max-retries</name>
+<value>3</value>
+</property>
+<property>
+<name>mapreduce.output.fileoutputformat.compress.type</name>
+<value>RECORD</value>
+</property>
+<property>
+<name>ipc.server.read.threadpool.size</name>
+<value>1</value>
+</property>
+<property>
+<name>mapreduce.task.io.sort.mb</name>
+<value>512</value>
+</property>
+<property>
+<name>yarn.app.mapreduce.shuffle.log.limit.kb</name>
+<value>51200</value>
+</property>
+<property>
+<name>yarn.app.mapreduce.client.jobstatus.max-retries.interval</name>
+<value>5000</value>
+</property>
+<property>
+<name>mapreduce.container.reuse.enabled</name>
+<value>false</value>
+</property>
+<property>
+<name>mapreduce.task.get-task.retries</name>
+<value>5</value>
+</property>
+<property>
+<name>mapreduce.reduce.cpu.vcores</name>
+<value>1</value>
+</property>
+<property>
+<name>mapreduce.jobhistory.address</name>
+<value>hd09:26013</value>
+</property>
+<property>
+<name>mapreduce.job.ubertask.enable</name>
+<value>false</value>
+</property>
+<property>
+<name>mapreduce.map.sort.spill.percent</name>
+<value>0.8</value>
+</property>
+<property>
+<name>mapreduce.map.memory.mb</name>
+<value>4096</value>
+</property>
+<property>
+<name>yarn.app.mapreduce.am.job.client.port-range</name>
+<value>27100-27999</value>
+</property>
+<property>
+<name>mapreduce.jobtracker.jobhistory.location</name>
+<value></value>
+</property>
+<property>
+<name>mapreduce.task.timeout</name>
+<value>600000</value>
+</property>
+<property>
+<name>mapreduce.admin.user.env</name>
+<value></value>
+</property>
+<property>
+<name>mapreduce.reduce.memory.mb</name>
+<value>4096</value>
+</property>
+<property>
+<name>mapreduce.framework.name</name>
+<value>yarn</value>
+</property>
+<property>
+<name>mapreduce.map.cpu.vcores</name>
+<value>1</value>
+</property>
+<property>
+<name>mapreduce.jobhistory.admin.address</name>
+<value>hd09:26015</value>
+</property>
+<property>
+<name>mapreduce.reduce.shuffle.parallelcopies</name>
+<value>10</value>
+</property>
+<property>
+<name>mapreduce.output.fileoutputformat.compress.codec</name>
+<value>org.apache.hadoop.io.compress.DefaultCodec</value>
+</property>
+<property>
+<name>mapreduce.jobhistory.webapp.https.address</name>
+<value>hd09:26014</value>
+</property>
+<property>
+<name>mapreduce.jobhistory.done-dir</name>
+<value>/mr-history/done</value>
+</property>
+<property>
+<name>mapreduce.jobhistory.webapp.address</name>
+<value>hd09:26014</value>
+</property>
+<property>
+<name>mapreduce.task.userlog.limit.kb</name>
+<value>51200</value>
+</property>
+<property>
+<name>mapreduce.task.io.sort.factor</name>
+<value>64</value>
+</property>
+<property>
+<name>yarn.app.mapreduce.task.container.log.backups</name>
+<value>10</value>
+</property>
+<property>
+<name>mapreduce.jobhistory.cleaner.enable</name>
+<value>true</value>
+</property>
+<property>
+<name>yarn.app.mapreduce.am.container.log.backups</name>
+<value>20</value>
+</property>
+<property>
+<name>yarn.app.mapreduce.am.command-opts</name>
+<value>-Xmx1024m -XX:CMSFullGCsBeforeCompaction=1 -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:+UseCMSCompactAtFullCollection -verbose:gc</value>
+</property>
+<property>
+<name>yarn.app.mapreduce.shuffle.log.backups</name>
+<value>10</value>
+</property>
+<property>
+<name>mapreduce.map.java.opts</name>
+<value>-Xmx2048M -Djava.net.preferIPv4Stack=true</value>
+</property>
+<property>
+<name>yarn.app.mapreduce.am.container.log.limit.kb</name>
+<value>51200</value>
+</property>
+<property>
+<name>yarn.app.mapreduce.am.resource.mb</name>
+<value>1536</value>
+</property>
+</configuration>

+ 6 - 0
conf/part-r-00000

@@ -0,0 +1,6 @@
+HBase	2
+Hadoop	2
+Hello	3
+Hive	1
+MapReduce	2
+World	1

+ 5 - 0
conf/test.txt

@@ -0,0 +1,5 @@
+Hello Hadoop
+Hello World
+Hello HBase
+HBase Hive MapReduce
+Hadoop MapReduce

+ 19 - 0
conf/user-mapred.xml

@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<configuration>
+    <property>
+        <name>username.client.mapred.input</name>
+        <value>/user/user01/mapred/example/input/</value>
+    </property>
+    <property>
+        <name>username.client.mapred.output</name>
+        <value>/user/user01/mapred/example/output/</value>
+    </property>
+    <property>
+        <name>username.client.filelist.conf</name>
+        <value>test.txt</value>
+    </property>
+    <property>
+        <name>mapreduce.app-submission.cross-platform</name>
+        <value>true</value>
+    </property>
+</configuration>

BIN
conf/user.keytab


+ 491 - 0
conf/yarn-site.xml

@@ -0,0 +1,491 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<configuration>
+<property>
+<name>yarn.resourcemanager.scheduler.monitor.policies</name>
+<value>org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy</value>
+</property>
+<property>
+<name>yarn.resourcemanager.scheduler.address.34</name>
+<value>hd08:26002</value>
+</property>
+<property>
+<name>yarn.resourcemanager.zk-address</name>
+<value>hd09:24002,hd08:24002,hd07:24002</value>
+</property>
+<property>
+<name>yarn.client.failover-proxy-provider</name>
+<value>org.apache.hadoop.yarn.client.ConfiguredRMFailoverProxyProvider</value>
+</property>
+<property>
+<name>yarn.application.classpath</name>
+<value>$HADOOP_CONF_DIR,$HADOOP_COMMON_HOME/share/hadoop/common/*,$HADOOP_COMMON_HOME/share/hadoop/common/lib/*,$HADOOP_HDFS_HOME/share/hadoop/hdfs/*,$HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*,$HADOOP_YARN_HOME/share/hadoop/mapreduce/*,$HADOOP_YARN_HOME/share/hadoop/mapreduce/lib/*,$HADOOP_YARN_HOME/share/hadoop/yarn/*,$HADOOP_YARN_HOME/share/hadoop/yarn/lib/*</value>
+</property>
+<property>
+<name>yarn.resourcemanager.resource-tracker.address.port</name>
+<value>26003</value>
+</property>
+<property>
+<name>yarn.nodemanager.webapp.https.address</name>
+<value>hd08:26010</value>
+</property>
+<property>
+<name>yarn.resourcemanager.ha.id</name>
+<value>34</value>
+</property>
+<property>
+<name>yarn.admin.acl</name>
+<value>mapred,hive supergroup,System_administrator_186</value>
+</property>
+<property>
+<name>yarn.resourcemanager.scheduler.address.35</name>
+<value>hd09:26002</value>
+</property>
+<property>
+<name>yarn.nodemanager.address</name>
+<value>hd08:26009</value>
+</property>
+<property>
+<name>yarn.resourcemanager.applicationmasters.heartbeat-min-interval-ms</name>
+<value>100</value>
+</property>
+<property>
+<name>yarn.resourcemanager.resource-tracker.address.35</name>
+<value>hd09:26003</value>
+</property>
+<property>
+<name>yarn.node-labels.manager-class</name>
+<value>org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager</value>
+</property>
+<property>
+<name>yarn.scheduler.maximum-allocation-vcores</name>
+<value>32</value>
+</property>
+<property>
+<name>yarn.resourcemanager.resource-tracker.address.34</name>
+<value>hd08:26003</value>
+</property>
+<property>
+<name>yarn.resourcemanager.cluster-id</name>
+<value>yarncluster</value>
+</property>
+<property>
+<name>yarn.nodemanager.aux-services</name>
+<value>mapreduce_shuffle</value>
+</property>
+<property>
+<name>yarn.resourcemanager.webapp.https.address.35</name>
+<value>hd09:26001</value>
+</property>
+<property>
+<name>resourcemanager.yarn-site.customized.configs</name>
+<value></value>
+</property>
+<property>
+<name>yarn.resourcemanager.webapp.https.address.34</name>
+<value>hd08:26001</value>
+</property>
+<property>
+<name>yarn.resourcemanager.address.34</name>
+<value>hd08:26004</value>
+</property>
+<property>
+<name>yarn.resourcemanager.address.35</name>
+<value>hd09:26004</value>
+</property>
+<property>
+<name>yarn.log-aggregation.retain-check-interval-seconds</name>
+<value>86400</value>
+</property>
+<property>
+<name>yarn.web-proxy.keytab</name>
+<value>/opt/huawei/Bigdata/FusionInsight/FusionInsight-Hadoop-2.7.2/hadoop/sbin/mapred.keytab</value>
+</property>
+<property>
+<name>yarn.resourcemanager.nm.liveness-monitor.interval-ms</name>
+<value>1000</value>
+</property>
+<property>
+<name>yarn.resourcemanager.webapp.pagination.enable</name>
+<value>true</value>
+</property>
+<property>
+<name>yarn.client.nodemanager-connect.max-wait-ms</name>
+<value>30000</value>
+</property>
+<property>
+<name>yarn.http.policy</name>
+<value>HTTPS_ONLY</value>
+</property>
+<property>
+<name>yarn.resourcemanager.hostname.35</name>
+<value>hd09</value>
+</property>
+<property>
+<name>yarn.resourcemanager.zk-state-store.parent-path</name>
+<value>/rmstore</value>
+</property>
+<property>
+<name>yarn.application.priority-labels</name>
+<value>VERY_LOW,LOW,NORMAL,HIGH,VERY_HIGH</value>
+</property>
+<property>
+<name>yarn.nodemanager.keytab</name>
+<value>/opt/huawei/Bigdata/FusionInsight/FusionInsight-Hadoop-2.7.2/hadoop/sbin/mapred.keytab</value>
+</property>
+<property>
+<name>yarn.resourcemanager.hostname.34</name>
+<value>hd08</value>
+</property>
+<property>
+<name>yarn.resourcemanager.monitor.capacity.preemption.max_ignored_over_capacity</name>
+<value>0</value>
+</property>
+<property>
+<name>yarn.client.application-master-protocol.event-based-heartbeat.enabled</name>
+<value>true</value>
+</property>
+<property>
+<name>yarn.resourcemanager.am.max-retries</name>
+<value>2</value>
+</property>
+<property>
+<name>yarn.resourcemanager.fs.state-store.retry-policy-spec</name>
+<value>2000,500</value>
+</property>
+<property>
+<name>yarn.resourcemanager.nodemanagers.heartbeat-min-interval-ms</name>
+<value>100</value>
+</property>
+<property>
+<name>yarn.client.failover-sleep-base-ms</name>
+<value>30000</value>
+</property>
+<property>
+<name>yarn.log-aggregation.retain-seconds</name>
+<value>1296000</value>
+</property>
+<property>
+<name>yarn.node-labels.enabled</name>
+<value>true</value>
+</property>
+<property>
+<name>clientPort</name>
+<value>24002</value>
+</property>
+<property>
+<name>yarn.resourcemanager.admin.address.port</name>
+<value>26005</value>
+</property>
+<property>
+<name>yarn.resourcemanager.zk-num-retries</name>
+<value>1000</value>
+</property>
+<property>
+<name>yarn.scheduler.minimum-allocation-vcores</name>
+<value>1</value>
+</property>
+<property>
+<name>yarn.resourcemanager.monitor.capacity.preemption.max_wait_before_kill</name>
+<value>15000</value>
+</property>
+<property>
+<name>yarn.resourcemanager.admin.client.thread-count</name>
+<value>1</value>
+</property>
+<property>
+<name>yarn.resourcemanager.webapp.pagination.threshold</name>
+<value>5000</value>
+</property>
+<property>
+<name>yarn.resourcemanager.scheduler.class</name>
+<value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
+</property>
+<property>
+<name>yarn.resourcemanager.applicationmasters.heartbeat-interval-ms</name>
+<value>1000</value>
+</property>
+<property>
+<name>yarn.nodemanager.webapp.port</name>
+<value>26006</value>
+</property>
+<property>
+<name>yarn.resourcemanager.container.liveness-monitor.interval-ms</name>
+<value>600000</value>
+</property>
+<property>
+<name>_initial_.leafqueue.acl_administer_queue</name>
+<value>mapred supergroup,System_administrator</value>
+</property>
+<property>
+<name>yarn.resourcemanager.monitor.capacity.preemption.monitoring_interval</name>
+<value>3000</value>
+</property>
+<property>
+<name>yarn.nodemanager.webapp.https.port</name>
+<value>26010</value>
+</property>
+<property>
+<name>yarn.resourcemanager.webapp.address.35</name>
+<value>hd09:26000</value>
+</property>
+<property>
+<name>yarn.resourcemanager.webapp.address.34</name>
+<value>hd08:26000</value>
+</property>
+<property>
+<name>yarn.resourcemanager.fs.state-store.uri</name>
+<value>/tmp/hadoop-omm/yarn/system/rmstore</value>
+</property>
+<property>
+<name>yarn.resourcemanager.ha.automatic-failover.embedded</name>
+<value>true</value>
+</property>
+<property>
+<name>yarn.app.mapreduce.am.pagination.threshold</name>
+<value>1000</value>
+</property>
+<property>
+<name>yarn.resourcemanager.recovery.enabled</name>
+<value>true</value>
+</property>
+<property>
+<name>yarn.node-labels.configuration-type</name>
+<value>centralized</value>
+</property>
+<property>
+<name>yarn.resourcemanager.nodemanagers.heartbeat-interval-ms</name>
+<value>1000</value>
+</property>
+<property>
+<name>yarn.log.server.url</name>
+<value>https://hd09:26014/jobhistory/logs</value>
+</property>
+<property>
+<name>yarn.resourcemanager.scheduler.monitor.enable</name>
+<value>false</value>
+</property>
+<property>
+<name>yarn.resourcemanager.resource-tracker.client.thread-count</name>
+<value>50</value>
+</property>
+<property>
+<name>yarn.nodemanager.remote-app-log-archive-dir</name>
+<value>/tmp/archived</value>
+</property>
+<property>
+<name>yarn.resourcemanager.keytab</name>
+<value>/opt/huawei/Bigdata/FusionInsight/FusionInsight-Hadoop-2.7.2/hadoop/sbin/mapred.keytab</value>
+</property>
+<property>
+<name>yarn.nodemanager.principal</name>
+<value>mapred/hadoop.hadoop.com@HADOOP.COM</value>
+</property>
+<property>
+<name>yarn.resourcemanager.proxy-user-privileges.enabled</name>
+<value>true</value>
+</property>
+<property>
+<name>yarn.acl.enable</name>
+<value>true</value>
+</property>
+<property>
+<name>yarn.resourcemanager.ha.enabled</name>
+<value>true</value>
+</property>
+<property>
+<name>yarn.log-aggregation.archive-check-interval-seconds</name>
+<value>0</value>
+</property>
+<property>
+<name>yarn.resourcemanager.admin.address.35</name>
+<value>hd09:26005</value>
+</property>
+<property>
+<name>yarn.nm.liveness-monitor.expiry-interval-ms</name>
+<value>600000</value>
+</property>
+<property>
+<name>yarn.resourcemanager.admin.address.34</name>
+<value>hd08:26005</value>
+</property>
+<property>
+<name>yarn.client.nodemanager-connect.retry-interval-ms</name>
+<value>300000</value>
+</property>
+<property>
+<name>yarn.am.liveness-monitor.expiry-interval-ms</name>
+<value>600000</value>
+</property>
+<property>
+<name>yarn.resourcemanager.client.thread-count</name>
+<value>50</value>
+</property>
+<property>
+<name>yarn.resourcemanager.port</name>
+<value>26004</value>
+</property>
+<property>
+<name>yarn.resourcemanager.monitor.capacity.preemption.natural_termination_factor</name>
+<value>1</value>
+</property>
+<property>
+<name>_initial_.yarn.admin.acl</name>
+<value>mapred,hive supergroup,System_administrator_186</value>
+</property>
+<property>
+<name>yarn.resourcemanager.strict-view.enabled</name>
+<value>true</value>
+</property>
+<property>
+<name>yarn.resourcemanager.zk-timeout-ms</name>
+<value>45000</value>
+</property>
+<property>
+<name>yarn.resourcemanager.max-completed-applications</name>
+<value>10000</value>
+</property>
+<property>
+<name>yarn.resourcemanager.am.max-attempts</name>
+<value>2</value>
+</property>
+<property>
+<name>yarn.app.mapreduce.am.pagination.enable</name>
+<value>true</value>
+</property>
+<property>
+<name>yarn.resourcemanager.ha.automatic-failover.enabled</name>
+<value>true</value>
+</property>
+<property>
+<name>yarn.node-labels.fs-store.root-dir</name>
+<value>hdfs://hacluster/user/mapred/node-labels/</value>
+</property>
+<property>
+<name>yarn.nodemanager.webapp.address</name>
+<value>hd08:26006</value>
+</property>
+<property>
+<name>yarn.node-labels.fs-store.retry-policy-spec</name>
+<value>1000,20</value>
+</property>
+<property>
+<name>yarn.yarn-site.customized.configs</name>
+<value></value>
+</property>
+<property>
+<name>yarn.resourcemanager.monitor.capacity.preemption.observe_only</name>
+<value>false</value>
+</property>
+<property>
+<name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
+<value>org.apache.hadoop.mapred.ShuffleHandler</value>
+</property>
+<property>
+<name>yarn.log-aggregation-enable</name>
+<value>true</value>
+</property>
+<property>
+<name>yarn.resourcemanager.work-preserving-recovery.enabled</name>
+<value>true</value>
+</property>
+<property>
+<name>yarn.resourcemanager.store.class</name>
+<value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
+</property>
+<property>
+<name>_initial_.leafqueue.acl_submit_applications</name>
+<value>mapred supergroup,System_administrator</value>
+</property>
+<property>
+<name>yarn.nodemanager.log.retain-seconds</name>
+<value>10800</value>
+</property>
+<property>
+<name>yarn.resourcemanager.scheduler.port</name>
+<value>26002</value>
+</property>
+<property>
+<name>yarn.resourcemanager.ha.rm-ids</name>
+<value>35,34</value>
+</property>
+<property>
+<name>yarn.log-aggregation.archive.files.minimum</name>
+<value>5000</value>
+</property>
+<property>
+<name>yarn.resourcemanager.scheduler.client.thread-count</name>
+<value>50</value>
+</property>
+<property>
+<name>yarn.resourcemanager.webapp.https.port</name>
+<value>26001</value>
+</property>
+<property>
+<name>yarn.resourcemanager.nodemanagers.heartbeat-max-interval-ms</name>
+<value>60000</value>
+</property>
+<property>
+<name>yarn.resourcemanager.monitor.capacity.preemption.total_preemption_per_round</name>
+<value>0.1</value>
+</property>
+<property>
+<name>yarn.client.application-master-protocol.event-based-heartbeat-interval-ms</name>
+<value>50</value>
+</property>
+<property>
+<name>yarn.scheduler.minimum-allocation-mb</name>
+<value>512</value>
+</property>
+<property>
+<name>yarn.resourcemanager.principal</name>
+<value>mapred/hadoop.hadoop.com@HADOOP.COM</value>
+</property>
+<property>
+<name>yarn.nodemanager.port</name>
+<value>26009</value>
+</property>
+<property>
+<name>yarn.resourcemanager.nodes.include-path</name>
+<value></value>
+</property>
+<property>
+<name>yarn.resourcemanager.zk-retry-interval-ms</name>
+<value>1000</value>
+</property>
+<property>
+<name>yarn.resourcemanager.amliveliness-monitor.interval-ms</name>
+<value>1000</value>
+</property>
+<property>
+<name>yarn.nodemanager.remote-app-log-dir</name>
+<value>/tmp/logs</value>
+</property>
+<property>
+<name>yarn.scheduler.maximum-allocation-mb</name>
+<value>65536</value>
+</property>
+<property>
+<name>yarn.resourcemanager.applicationmasters.heartbeat-max-interval-ms</name>
+<value>60000</value>
+</property>
+<property>
+<name>yarn.rm.app.timeout.thread.interval</name>
+<value>30</value>
+</property>
+<property>
+<name>yarn.resourcemanager.throttle-heartbeat.enabled</name>
+<value>false</value>
+</property>
+<property>
+<name>yarn.resourcemanager.webapp.port</name>
+<value>26000</value>
+</property>
+<property>
+<name>yarn.web-proxy.principal</name>
+<value>mapred/hadoop.hadoop.com@HADOOP.COM</value>
+</property>
+<property>
+<name>yarn.resourcemanager.rm.container-allocation.expiry-interval-ms</name>
+<value>600000</value>
+</property>
+</configuration>

+ 152 - 0
src/com/huawei/bigdata/mapreduce/demo/WordCount.java

@@ -0,0 +1,152 @@
+package com.huawei.bigdata.mapreduce.demo;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.StringTokenizer;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.mapreduce.Reducer;
+import org.apache.hadoop.util.GenericOptionsParser;
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+
+import com.huawei.bigdata.mapreduce.local.LocalRunner;
+import com.huawei.bigdata.mapreduce.tools.LoginUtil;
+import com.huawei.bigdata.mapreduce.tools.TarManager;
+
+
+public class WordCount {
+	
+	public static final String PRINCIPAL= "user01";
+	public static final String KEYTAB = WordCount.class.getClassLoader().getResource("user.keytab").getPath();
+	public static final String KRB = WordCount.class.getClassLoader().getResource("krb5.conf").getPath();
+	
+	// Init environment variables,load xml file
+    public static Configuration conf = LocalRunner.getConfiguration();
+    
+	// The native source file path
+    public static String filelist = conf.get("username.client.filelist.conf");
+
+    //Input file path, the first parameter
+    public static String inputPath = conf.get("username.client.mapred.input");
+
+    // MR output path,the second parameter
+    public static String outputPath = conf.get("username.client.mapred.output");
+	
+	public static class Map extends Mapper<Object,Text,Text,IntWritable>{
+		private static IntWritable one = new IntWritable(1);
+		
+		private Text word = new Text();
+		public void map(Object key,Text value,Context context) throws IOException,InterruptedException
+		{
+			StringTokenizer st = new StringTokenizer(value.toString());
+			while(st.hasMoreTokens()){
+				word.set(st.nextToken());
+				context.write(word, one);
+			}
+		}
+	}
+	
+	public static class Reduce extends Reducer<Text,IntWritable,Text,IntWritable>{
+		private static IntWritable result = new IntWritable();
+		
+		public void reduce(Text key,Iterable<IntWritable> values,Context context) throws IOException,InterruptedException
+		{
+			int sum = 0;
+			for(IntWritable val:values)
+			{
+				sum += val.get();
+			}
+			
+			result.set(sum);
+			System.out.print(key);
+			System.out.print(result);
+			context.write(key, result);
+		}
+	}
+	
+	public static void main(String[] args) throws Exception {
+		
+		// Create JAR
+	    TarManager.createJar();
+		
+		// Security login
+	    /**************************DIY Code start**************************************/
+	    
+	    
+	    
+	    /**************************DIY Code End****************************************/
+	    
+	    // Obtain input parameters.
+	    String[] otherArgs = new GenericOptionsParser(conf, args)
+	        .getRemainingArgs();
+	    if (otherArgs.length >= 1) {
+	        inputPath = otherArgs[0];
+	    }
+
+	    if (otherArgs.length >= 2) {
+	        outputPath = otherArgs[1];
+	    }
+	      
+	    // Get current path
+	    String dir = System.getProperty("user.dir");
+
+	    // Local directory
+	    String localPath = dir + File.separator + "conf";
+
+	    // Put source files to server
+	    FileSystem fileSystem = FileSystem.get(conf);
+	    
+	    // Delete input path's files
+	    if (fileSystem.exists(new Path(inputPath))) {
+	      fileSystem.delete(new Path(inputPath), true);
+	    }
+	    
+	    LocalRunner.putFiles(fileSystem, filelist,
+	        localPath, inputPath);
+	    
+	    // Delete output path's files
+	    if (fileSystem.exists(new Path(outputPath))) {
+	      fileSystem.delete(new Path(outputPath), true);
+	    }
+	    
+	    // Initialize the job object.
+	    @SuppressWarnings("deprecation")
+	    Job job = new Job(conf, "Word Count");
+	    job.setJar(dir + File.separator + "mapreduce-demo.jar");
+        job.setJarByClass(WordCount.class);
+
+	    // Set map and reduce classes to be executed, or specify the map and reduce classes using configuration files.
+	    /**************************DIY Code start**************************************/
+	    
+        
+        
+	    /**************************DIY Code End******************************************/
+
+	    // Set the output type of the job.
+	    job.setOutputKeyClass(Text.class);
+	    job.setOutputValueClass(IntWritable.class);
+	    
+	    FileInputFormat.addInputPath(job, new Path(inputPath));
+	    FileOutputFormat.setOutputPath(job, new Path(outputPath));
+
+	    // Submit the job to a remote environment for execution.
+	    job.waitForCompletion(true);
+	    
+	    //读取文件,并且下载到本地的conf文件中
+	    String FILE_NAME = "part-r-00000";
+	    String dstPath = outputPath + File.separator + FILE_NAME;
+	    LocalRunner.readFile(fileSystem,dstPath);
+	    LocalRunner.downFiles(fileSystem,localPath,dstPath);
+	    
+	    System.exit(1);
+	
+	}
+
+}

+ 234 - 0
src/com/huawei/bigdata/mapreduce/local/LocalRunner.java

@@ -0,0 +1,234 @@
+package com.huawei.bigdata.mapreduce.local;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+import com.huawei.bigdata.mapreduce.tools.FileUploader;
+import com.huawei.bigdata.mapreduce.tools.LoginUtil;
+import com.huawei.bigdata.mapreduce.tools.TarManager;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapred.FileOutputCommitter;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.OutputCommitter;
+import org.apache.hadoop.mapreduce.OutputFormat;
+import org.apache.hadoop.mapreduce.RecordWriter;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+import org.apache.hadoop.util.GenericOptionsParser;
+
+import com.huawei.bigdata.mapreduce.demo.WordCount;
+import com.huawei.bigdata.mapreduce.demo.WordCount.Map;
+import com.huawei.bigdata.mapreduce.demo.WordCount.Reduce;
+
+/**
+ * run MR application
+ */
+public class LocalRunner {
+	// Init environment, load xml file
+    public static Configuration conf = getConfiguration();
+    
+	// The native source file path
+    public static String filelist = conf.get("username.client.filelist.conf");
+
+    //Input file path, the first parameter
+    public static String inputPath = conf.get("username.client.mapred.input");
+
+    // MR output path,the second parameter
+    public static String outputPath = conf.get("username.client.mapred.output");
+    
+  /**
+   * main detail:first produce JAR,CreateJar method use to create JAR
+   *
+   * @param args String[] :index 0:process file directory  index 1:process out file directory
+   * @throws Exception
+   */
+  public static void main(String[] args) throws Exception {
+
+    // Create JAR
+    TarManager.createJar();
+
+    // Security login
+    LoginUtil.login(WordCount.PRINCIPAL, WordCount.KEYTAB, WordCount.KRB, conf);
+
+    // If set input parameter,use the input parameters,else use default parameter
+    String[] parArgs = new GenericOptionsParser(conf, args)
+        .getRemainingArgs();
+
+    if (parArgs.length >= 1) {
+      inputPath = parArgs[0];
+    }
+
+    if (parArgs.length >= 2) {
+      outputPath = parArgs[1];
+    }
+
+    // Get current path
+    String dir = System.getProperty("user.dir");
+
+    // Local directory
+    String localPath = dir + File.separator + "conf";
+
+    // Put source files to server
+    FileSystem fileSystem = FileSystem.get(conf);
+    putFiles(fileSystem, filelist,
+        localPath, inputPath);
+    
+    // Delete output path's files
+    if (fileSystem.exists(new Path(outputPath))) {
+      fileSystem.delete(new Path(outputPath), true);
+    }
+    
+    // Initialize the job object.
+    @SuppressWarnings("deprecation")
+    Job job = new Job(conf, "Collect Female Info");
+
+    // Set excute jar and class
+    job.setJar(dir + File.separator + "mapreduce-demo.jar");
+    job.setJarByClass(WordCount.class);
+
+    // Set map and reduce classes to be executed, or specify the map and reduce classes using configuration files.
+    job.setMapperClass(Map.class);
+    job.setCombinerClass(Reduce.class);
+    job.setReducerClass(Reduce.class);
+
+    // Set the output type of the job.
+    job.setOutputKeyClass(Text.class);
+    job.setOutputValueClass(IntWritable.class);
+    
+    // Set hdfs input path and, output path
+    FileInputFormat.addInputPath(job, new Path(inputPath));
+    FileOutputFormat.setOutputPath(job, new Path(outputPath));
+    
+    // Submit the job to a remote environment for execution.
+    job.waitForCompletion(true);
+    
+    //读取文件,并且下载到本地的conf文件中
+    String FILE_NAME = "part-r-00000";
+    String dstPath = outputPath + File.separator + FILE_NAME;
+    readFile(fileSystem,dstPath);
+    downFiles(fileSystem,localPath,dstPath);
+    
+    System.exit(1);
+  }
+
+  /**
+   * get conf object
+   *
+   * @return Configuration
+   */
+  public static Configuration getConfiguration() {
+    // Default load from conf directory
+    Configuration conf = new Configuration();
+    conf.addResource(LocalRunner.class.getClassLoader().getResourceAsStream("core-site.xml"));
+    conf.addResource(LocalRunner.class.getClassLoader().getResourceAsStream("yarn-site.xml"));
+    conf.addResource(LocalRunner.class.getClassLoader().getResourceAsStream("mapred-site.xml"));
+    conf.addResource(LocalRunner.class.getClassLoader().getResourceAsStream("hdfs-site.xml"));
+    conf.addResource(LocalRunner.class.getClassLoader().getResourceAsStream("user-mapred.xml"));
+    return conf;
+  }
+
+  /**
+   * upload local files to remote file system
+   *
+   * @param fileSystem FileSystem : file system
+   * @param fileConfig String : conf files to be upload
+   * @param localPath  String : local file directory
+   * @param inputPath  String : remote target path
+   * @return boolean : result
+   */
+  public static void putFiles(FileSystem fileSystem,
+                                 final String fileConfig, final String localPath,
+                                 final String inputPath) throws Exception {
+
+    // local files which are to be uploaded
+    String[] filenames = fileConfig.split(",");
+
+    if (filenames == null || filenames.length <= 0) {
+      throw new Exception("The files to be uploaded are not specified.");
+    }
+
+    // file loader to hdfs
+    FileUploader fileLoader = null;
+
+    for (int i = 0; i < filenames.length; i++) {
+      if (filenames[i] == null || "".equals(filenames[i])) {
+        continue;
+      }
+
+      // Excute upload hdfs
+      fileLoader = new FileUploader(fileSystem, inputPath,
+          filenames[i], localPath + File.separator + filenames[i]);
+      fileLoader.upload();
+    }
+  }
+  
+  /**
+   * 下载hdfs上面的文件
+   * @param fileSystem
+   * @param localPath
+   * @param inputPath
+   * @throws Exception
+   */
+  public static void downFiles(FileSystem fileSystem,final String localPath,
+          final String remote) throws Exception {
+	  //String FILE_NAME = "count.txt";
+	  Path path = new Path(remote + File.separator);
+	  //System.out.println("path: "+path.toString());
+	  //FileSystem fs = FileSystem.get(URI.create(uri), conf);  
+	  fileSystem.copyToLocalFile(false,path, new Path(localPath),true);
+	  
+	  //System.out.println("download: from" + remote + " to " + localPath);  
+	  //fileSystem.close();
+  }
+  
+  /**
+   * 
+   * @throws Exception
+   */
+  public static void readFile(FileSystem fileSystem,String filePath) throws Exception{
+	  Path path = new Path(filePath);
+	    FSDataInputStream in = null;
+	    BufferedReader reader = null;
+	    StringBuffer strBuffer = new StringBuffer();
+
+	    try {
+	      in = fileSystem.open(path);
+	      reader = new BufferedReader(new InputStreamReader(in));
+	      String sTempOneLine;
+
+	      // write file
+	      while ((sTempOneLine = reader.readLine()) != null) {
+	    	
+	        strBuffer.append(sTempOneLine);
+	        strBuffer.append("\n");
+	        //System.out.println(sTempOneLine);
+	      }
+
+	      System.out.println("Word Count result is : ");
+	      System.out.println(strBuffer.toString());
+	      //System.out.println("success to read.");
+
+	    } finally {
+	      // make sure the streams are closed finally.
+	      reader.close();
+	      in.close();
+	    }
+
+  }
+  
+  
+}

+ 171 - 0
src/com/huawei/bigdata/mapreduce/tools/FileUploader.java

@@ -0,0 +1,171 @@
+package com.huawei.bigdata.mapreduce.tools;
+
+import java.io.BufferedOutputStream;
+import java.io.Closeable;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
+/**
+ * use to upload the file to hdfs
+ */
+public class FileUploader {
+
+  /**
+   * filesystem
+   */
+  private FileSystem fileSystem;
+
+  /**
+   * target path
+   */
+  private String destPath;
+
+  /**
+   * target file name
+   */
+  private String fileName;
+
+  /**
+   * source file have source path
+   */
+  private String sourcePath;
+
+  /**
+   * to HDFS outstream
+   */
+  private FSDataOutputStream hdfsOutStream;
+
+  /**
+   * HDFS BufferedOutputStream
+   */
+  private BufferedOutputStream bufferOutStream;
+
+  /**
+   * construct
+   *
+   * @param fileSystem FileSystem : file system
+   * @param destPath   String : target path(remote file server)
+   * @param fileName   String : target file name
+   * @param sourcePath String : source file(absolute path native file)
+   */
+  public FileUploader(FileSystem fileSystem, String destPath,
+                      String fileName, String sourcePath) {
+    this.fileSystem = fileSystem;
+    this.destPath = destPath;
+    this.fileName = fileName;
+    this.sourcePath = sourcePath;
+  }
+
+  /**
+   * write to hdfs
+   *
+   * @param inputStream InputStream : inputStream
+   * @throws IOException , ParameterException
+   */
+  public void doWrite(InputStream inputStream) throws IOException {
+    // Initialize
+    setWriteResource();
+    try {
+      // Write to hdfs
+      outputToHDFS(inputStream);
+    } finally {
+      closeResource();
+    }
+  }
+
+  /**
+   * write to the target directory
+   *
+   * @param inputStream InputStream
+   * @throws IOException
+   */
+  private void outputToHDFS(InputStream inputStream) throws IOException {
+    final int countForOneRead = 1024; // 1024 Bytes each time
+    final byte buff[] = new byte[countForOneRead];
+    int count;
+    while ((count = inputStream.read(buff, 0, countForOneRead)) > 0) {
+      bufferOutStream.write(buff, 0, count);
+    }
+
+    bufferOutStream.flush();
+    hdfsOutStream.hflush();
+  }
+
+  /**
+   * init object
+   *
+   * @throws IOException
+   */
+  private void setWriteResource() throws IOException {
+    Path filepath = new Path(destPath + File.separator + fileName);
+    hdfsOutStream = fileSystem.create(filepath);
+    bufferOutStream = new BufferedOutputStream(hdfsOutStream);
+  }
+
+  /**
+   * close resource
+   */
+  private void closeResource() throws IOException {
+    // Close hdfsOutStream
+    if (hdfsOutStream != null) {
+      hdfsOutStream.close();
+    }
+
+    // Close bufferOutStream
+    if (bufferOutStream != null) {
+      bufferOutStream.close();
+    }
+  }
+
+  /**
+   * upload file
+   *
+   * @return boolean : result
+   */
+  public void upload() throws IOException {
+    // Create target file directory
+    Path destFilePath = new Path(destPath);
+    createPath(destFilePath);
+
+    // Inputstream
+    FileInputStream input = null;
+    try {
+      input = new FileInputStream(sourcePath);
+      doWrite(input);
+    } finally {
+      close(input);
+    }
+  }
+
+  /**
+   * create a file path
+   *
+   * @param filePath Path : file path
+   * @return boolean : result
+   */
+  private void createPath(Path filePath) throws IOException {
+    if (!fileSystem.exists(filePath)) {
+      // Create this directory
+      fileSystem.mkdirs(filePath);
+    }
+  }
+
+  /**
+   * close stream
+   *
+   * @param stream Closeable : stream object
+   */
+  private void close(Closeable stream) throws IOException {
+    if (stream == null) {
+      return;
+    }
+
+    stream.close();
+  }
+}

+ 451 - 0
src/com/huawei/bigdata/mapreduce/tools/LoginUtil.java

@@ -0,0 +1,451 @@
+package com.huawei.bigdata.mapreduce.tools;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import javax.security.auth.login.AppConfigurationEntry;
+import javax.security.auth.login.AppConfigurationEntry.LoginModuleControlFlag;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.util.KerberosUtil;
+import org.apache.log4j.Logger;
+
+public class LoginUtil
+{
+    
+    private static final Logger LOG = Logger.getLogger(LoginUtil.class);
+    
+    private static final String JAVA_SECURITY_KRB5_CONF_KEY = "java.security.krb5.conf";
+       
+    private static final String LOGIN_FAILED_CAUSE_PASSWORD_WRONG =
+        "(wrong password) keytab file and user not match, you can kinit -k -t keytab user in client server to check";
+    
+    private static final String LOGIN_FAILED_CAUSE_TIME_WRONG =
+        "(clock skew) time of local server and remote server not match, please check ntp to remote server";
+    
+    private static final String LOGIN_FAILED_CAUSE_AES256_WRONG =
+        "(aes256 not support) aes256 not support by default jdk/jre, need copy local_policy.jar and US_export_policy.jar from remote server in path /opt/huawei/Bigdata/jdk/jre/lib/security";
+    
+    private static final String LOGIN_FAILED_CAUSE_PRINCIPAL_WRONG =
+        "(no rule) principal format not support by default, need add property hadoop.security.auth_to_local(in core-site.xml) value RULE:[1:$1] RULE:[2:$1]";
+    
+    private static final String LOGIN_FAILED_CAUSE_TIME_OUT =
+        "(time out) can not connect to kdc server or there is fire wall in the network";
+    
+    private static final boolean IS_IBM_JDK = System.getProperty("java.vendor").contains("IBM");
+    
+    public synchronized static void login(String userPrincipal, String userKeytabPath, String krb5ConfPath, Configuration conf)
+        throws IOException
+    {
+        // 1.check input parameters
+        if ((userPrincipal == null) || (userPrincipal.length() <= 0))
+        {
+            LOG.error("input userPrincipal is invalid.");
+            throw new IOException("input userPrincipal is invalid.");
+        }
+        
+        if ((userKeytabPath == null) || (userKeytabPath.length() <= 0))
+        {
+            LOG.error("input userKeytabPath is invalid.");
+            throw new IOException("input userKeytabPath is invalid.");
+        }
+                
+        if ((krb5ConfPath == null) || (krb5ConfPath.length() <= 0))
+        {
+            LOG.error("input krb5ConfPath is invalid.");
+            throw new IOException("input krb5ConfPath is invalid.");
+        }
+        
+        if ((conf == null))
+        {
+            LOG.error("input conf is invalid.");
+            throw new IOException("input conf is invalid.");
+        }
+        
+        // 2.check file exsits
+        File userKeytabFile = new File(userKeytabPath);
+        if (!userKeytabFile.exists())
+        {
+            LOG.error("userKeytabFile(" + userKeytabFile.getAbsolutePath() + ") does not exsit.");
+            throw new IOException("userKeytabFile(" + userKeytabFile.getAbsolutePath() + ") does not exsit.");
+        }
+        if (!userKeytabFile.isFile())
+        {
+            LOG.error("userKeytabFile(" + userKeytabFile.getAbsolutePath() + ") is not a file.");
+            throw new IOException("userKeytabFile(" + userKeytabFile.getAbsolutePath() + ") is not a file.");
+        }
+        
+        File krb5ConfFile = new File(krb5ConfPath);
+        if (!krb5ConfFile.exists())
+        {
+            LOG.error("krb5ConfFile(" + krb5ConfFile.getAbsolutePath() + ") does not exsit.");
+            throw new IOException("krb5ConfFile(" + krb5ConfFile.getAbsolutePath() + ") does not exsit.");
+        }
+        if (!krb5ConfFile.isFile())
+        {
+            LOG.error("krb5ConfFile(" + krb5ConfFile.getAbsolutePath() + ") is not a file.");
+            throw new IOException("krb5ConfFile(" + krb5ConfFile.getAbsolutePath() + ") is not a file.");
+        }
+        
+        // 3.set and check krb5config
+        setKrb5Config(krb5ConfFile.getAbsolutePath());        
+        setConfiguration(conf);
+        
+        // 4.login and check for hadoop
+        loginHadoop(userPrincipal, userKeytabFile.getAbsolutePath());
+        LOG.info("Login success!!!!!!!!!!!!!!");
+    }
+    
+    private static void setConfiguration(Configuration conf) throws IOException {
+	    UserGroupInformation.setConfiguration(conf);
+    }
+        
+    private static boolean checkNeedLogin(String principal)
+        throws IOException
+    {
+        if (!UserGroupInformation.isSecurityEnabled())
+        {
+            LOG.error("UserGroupInformation is not SecurityEnabled, please check if core-site.xml exists in classpath.");
+            throw new IOException(
+                "UserGroupInformation is not SecurityEnabled, please check if core-site.xml exists in classpath.");
+        }
+        UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
+        if ((currentUser != null) && (currentUser.hasKerberosCredentials()))
+        {
+            if (checkCurrentUserCorrect(principal))
+            {
+                LOG.info("current user is " + currentUser + "has logined.");
+                if (!currentUser.isFromKeytab())
+                {
+                    LOG.error("current user is not from keytab.");
+                    throw new IOException("current user is not from keytab.");
+                }
+                return false;
+            }
+            else
+            {
+                LOG.error("current user is " + currentUser + "has logined. please check your enviroment , especially when it used IBM JDK or kerberos for OS count login!!");
+                throw new IOException("current user is " + currentUser + " has logined. And please check your enviroment!!");
+            }
+        }
+        
+        return true;
+    }
+    
+    private static void setKrb5Config(String krb5ConfFile)
+        throws IOException
+    {
+        System.setProperty(JAVA_SECURITY_KRB5_CONF_KEY, krb5ConfFile);
+        String ret = System.getProperty(JAVA_SECURITY_KRB5_CONF_KEY);
+        if (ret == null)
+        {
+            LOG.error(JAVA_SECURITY_KRB5_CONF_KEY + " is null.");
+            throw new IOException(JAVA_SECURITY_KRB5_CONF_KEY + " is null.");
+        }
+        if (!ret.equals(krb5ConfFile))
+        {
+            LOG.error(JAVA_SECURITY_KRB5_CONF_KEY + " is " + ret + " is not " + krb5ConfFile + ".");
+            throw new IOException(JAVA_SECURITY_KRB5_CONF_KEY + " is " + ret + " is not " + krb5ConfFile + ".");
+        }
+    }
+    
+    public static void setJaasConf(String loginContextName, String principal, String keytabFile)
+        throws IOException
+    {
+        if ((loginContextName == null) || (loginContextName.length() <= 0))
+        {
+            LOG.error("input loginContextName is invalid.");
+            throw new IOException("input loginContextName is invalid.");
+        }
+        
+        if ((principal == null) || (principal.length() <= 0))
+        {
+            LOG.error("input principal is invalid.");
+            throw new IOException("input principal is invalid.");
+        }
+        
+        if ((keytabFile == null) || (keytabFile.length() <= 0))
+        {
+            LOG.error("input keytabFile is invalid.");
+            throw new IOException("input keytabFile is invalid.");
+        }
+        
+        File userKeytabFile = new File(keytabFile);
+        if (!userKeytabFile.exists())
+        {
+            LOG.error("userKeytabFile(" + userKeytabFile.getAbsolutePath() + ") does not exsit.");
+            throw new IOException("userKeytabFile(" + userKeytabFile.getAbsolutePath() + ") does not exsit.");
+        }
+    	
+        javax.security.auth.login.Configuration.setConfiguration(new JaasConfiguration(loginContextName, principal,
+        		userKeytabFile.getAbsolutePath()));       
+        
+        javax.security.auth.login.Configuration conf = javax.security.auth.login.Configuration.getConfiguration();
+        if (!(conf instanceof JaasConfiguration))
+        {
+            LOG.error("javax.security.auth.login.Configuration is not JaasConfiguration.");
+            throw new IOException("javax.security.auth.login.Configuration is not JaasConfiguration.");
+        }
+        
+    	AppConfigurationEntry[] entrys = conf.getAppConfigurationEntry(loginContextName);
+        if (entrys == null)
+        {
+            LOG.error("javax.security.auth.login.Configuration has no AppConfigurationEntry named " + loginContextName
+                + ".");
+            throw new IOException("javax.security.auth.login.Configuration has no AppConfigurationEntry named "
+                + loginContextName + ".");
+        }
+        
+        boolean checkPrincipal = false;
+        boolean checkKeytab = false;        
+        for (int i = 0; i < entrys.length; i++)
+        {
+            if (entrys[i].getOptions().get("principal").equals(principal))
+            {
+                checkPrincipal = true;
+            }
+            
+            if (IS_IBM_JDK)
+            {
+                if (entrys[i].getOptions().get("useKeytab").equals(keytabFile))
+                {
+                    checkKeytab = true;
+                }
+            }
+            else 
+            {
+                if (entrys[i].getOptions().get("keyTab").equals(keytabFile))
+                {
+                    checkKeytab = true;
+                }
+            }
+
+        }
+        
+        if (!checkPrincipal)
+        {
+            LOG.error("AppConfigurationEntry named " + loginContextName + " does not have principal value of "
+                + principal + ".");
+            throw new IOException("AppConfigurationEntry named " + loginContextName
+                + " does not have principal value of " + principal + ".");
+        }
+        
+        if (!checkKeytab)
+        {
+            LOG.error("AppConfigurationEntry named " + loginContextName + " does not have keyTab value of "
+                + keytabFile + ".");
+            throw new IOException("AppConfigurationEntry named " + loginContextName + " does not have keyTab value of "
+                + keytabFile + ".");
+        }
+        
+    }
+        
+    public static void setZookeeperServerPrincipal(String zkServerPrincipalKey, String zkServerPrincipal)
+            throws IOException
+        {
+            System.setProperty(zkServerPrincipalKey, zkServerPrincipal);
+            String ret = System.getProperty(zkServerPrincipalKey);
+            if (ret == null)
+            {
+                LOG.error(zkServerPrincipalKey + " is null.");
+                throw new IOException(zkServerPrincipalKey + " is null.");
+            }
+            if (!ret.equals(zkServerPrincipal))
+            {
+                LOG.error(zkServerPrincipalKey + " is " + ret + " is not " + zkServerPrincipal
+                    + ".");
+                throw new IOException(zkServerPrincipalKey + " is " + ret + " is not "
+                    + zkServerPrincipal + ".");
+            }
+        }
+    
+    private static void loginHadoop(String principal, String keytabFile)
+        throws IOException
+    {
+        try
+        {
+            UserGroupInformation.loginUserFromKeytab(principal, keytabFile);
+        }
+        catch (IOException e)
+        {
+            LOG.error("login failed with " + principal + " and " + keytabFile + ".");
+            LOG.error("perhaps cause 1 is " + LOGIN_FAILED_CAUSE_PASSWORD_WRONG + ".");
+            LOG.error("perhaps cause 2 is " + LOGIN_FAILED_CAUSE_TIME_WRONG + ".");
+            LOG.error("perhaps cause 3 is " + LOGIN_FAILED_CAUSE_AES256_WRONG + ".");
+            LOG.error("perhaps cause 4 is " + LOGIN_FAILED_CAUSE_PRINCIPAL_WRONG + ".");
+            LOG.error("perhaps cause 5 is " + LOGIN_FAILED_CAUSE_TIME_OUT + ".");
+            
+            throw e;
+        }
+    }
+    
+    private static void checkAuthenticateOverKrb()
+        throws IOException
+    {
+        UserGroupInformation loginUser = UserGroupInformation.getLoginUser();
+        UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
+        if (loginUser == null)
+        {
+            LOG.error("current user is " + currentUser + ", but loginUser is null.");
+            throw new IOException("current user is " + currentUser + ", but loginUser is null.");
+        }
+        if (!loginUser.equals(currentUser))
+        {
+            LOG.error("current user is " + currentUser + ", but loginUser is " + loginUser + ".");
+            throw new IOException("current user is " + currentUser + ", but loginUser is " + loginUser + ".");
+        }
+        if (!loginUser.hasKerberosCredentials())
+        {
+            LOG.error("current user is " + currentUser + " has no Kerberos Credentials.");
+            throw new IOException("current user is " + currentUser + " has no Kerberos Credentials.");
+        }
+        if (!UserGroupInformation.isLoginKeytabBased())
+        {
+            LOG.error("current user is " + currentUser + " is not Login Keytab Based.");
+            throw new IOException("current user is " + currentUser + " is not Login Keytab Based.");
+        }
+    }
+    
+    private static boolean checkCurrentUserCorrect(String principal)
+        throws IOException
+    {
+        UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+        if (ugi == null)
+        {
+            LOG.error("current user still null.");
+            throw new IOException("current user still null.");
+        }
+        
+        String defaultRealm = null;
+		try {
+			defaultRealm = KerberosUtil.getDefaultRealm();
+		} catch (Exception e) {
+			LOG.warn("getDefaultRealm failed.");
+			throw new IOException(e);
+		}
+
+        if ((defaultRealm != null) && (defaultRealm.length() > 0))
+        {
+            StringBuilder realm = new StringBuilder();
+            StringBuilder principalWithRealm = new StringBuilder();
+            realm.append("@").append(defaultRealm);
+            if (!principal.endsWith(realm.toString()))
+            {
+                principalWithRealm.append(principal).append(realm);
+                principal = principalWithRealm.toString();
+            }
+        }
+        
+        return principal.equals(ugi.getUserName());
+    }
+    
+    /**
+     * copy from hbase zkutil 0.94&0.98 A JAAS configuration that defines the login modules that we want to use for
+     * login.
+     */
+    private static class JaasConfiguration extends javax.security.auth.login.Configuration
+    {        
+        private static final Map<String, String> BASIC_JAAS_OPTIONS = new HashMap<String, String>();
+        static
+        {
+            String jaasEnvVar = System.getenv("HBASE_JAAS_DEBUG");
+            if (jaasEnvVar != null && "true".equalsIgnoreCase(jaasEnvVar))
+            {
+                BASIC_JAAS_OPTIONS.put("debug", "true");
+            }
+        }
+        
+        private static final Map<String, String> KEYTAB_KERBEROS_OPTIONS = new HashMap<String, String>();
+        static
+        {
+            if (IS_IBM_JDK)
+            {              
+            	KEYTAB_KERBEROS_OPTIONS.put("credsType", "both");
+            }
+            else {
+            	KEYTAB_KERBEROS_OPTIONS.put("useKeyTab", "true");              
+            	KEYTAB_KERBEROS_OPTIONS.put("useTicketCache", "false");
+            	KEYTAB_KERBEROS_OPTIONS.put("doNotPrompt", "true");
+            	KEYTAB_KERBEROS_OPTIONS.put("storeKey", "true");
+            }
+        	
+            KEYTAB_KERBEROS_OPTIONS.putAll(BASIC_JAAS_OPTIONS);
+        }
+        
+        
+        
+        private static final AppConfigurationEntry KEYTAB_KERBEROS_LOGIN = new AppConfigurationEntry(
+            KerberosUtil.getKrb5LoginModuleName(), LoginModuleControlFlag.REQUIRED, KEYTAB_KERBEROS_OPTIONS);
+        
+        private static final AppConfigurationEntry[] KEYTAB_KERBEROS_CONF =
+            new AppConfigurationEntry[] {KEYTAB_KERBEROS_LOGIN};
+        
+        private javax.security.auth.login.Configuration baseConfig;
+        
+        private final String loginContextName;
+        
+        private final boolean useTicketCache;
+        
+        private final String keytabFile;
+        
+        private final String principal;
+        
+        
+        public JaasConfiguration(String loginContextName, String principal, String keytabFile) throws IOException
+        {
+            this(loginContextName, principal, keytabFile, keytabFile == null || keytabFile.length() == 0);
+        }
+        
+        private JaasConfiguration(String loginContextName, String principal, String keytabFile, boolean useTicketCache) throws IOException
+        {
+            try
+            {
+                this.baseConfig = javax.security.auth.login.Configuration.getConfiguration();
+            }
+            catch (SecurityException e)
+            {
+                this.baseConfig = null;
+            }
+            this.loginContextName = loginContextName;
+            this.useTicketCache = useTicketCache;
+            this.keytabFile = keytabFile;
+            this.principal = principal;
+            
+            initKerberosOption();
+            LOG.info("JaasConfiguration loginContextName=" + loginContextName + " principal=" + principal
+                + " useTicketCache=" + useTicketCache + " keytabFile=" + keytabFile);
+        }
+        
+        private void initKerberosOption() throws IOException
+        {
+            if (!useTicketCache)
+            {
+            	if(IS_IBM_JDK)
+            	{                		
+            		KEYTAB_KERBEROS_OPTIONS.put("useKeytab", keytabFile);
+            	}
+            	else
+            	{
+                    KEYTAB_KERBEROS_OPTIONS.put("keyTab", keytabFile);
+                    KEYTAB_KERBEROS_OPTIONS.put("useKeyTab", "true");
+                    KEYTAB_KERBEROS_OPTIONS.put("useTicketCache", useTicketCache ? "true" : "false");
+            	}
+            }
+            KEYTAB_KERBEROS_OPTIONS.put("principal", principal);            
+        }
+        
+        public AppConfigurationEntry[] getAppConfigurationEntry(String appName)
+        {
+            if (loginContextName.equals(appName))
+            {
+                return KEYTAB_KERBEROS_CONF;
+            }
+            if (baseConfig != null)
+                return baseConfig.getAppConfigurationEntry(appName);
+            return (null);
+        }
+    }
+}

+ 117 - 0
src/com/huawei/bigdata/mapreduce/tools/TarManager.java

@@ -0,0 +1,117 @@
+package com.huawei.bigdata.mapreduce.tools;
+
+import java.io.BufferedInputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.jar.Attributes;
+import java.util.jar.JarEntry;
+import java.util.jar.JarOutputStream;
+import java.util.jar.Manifest;
+
+public class TarManager {
+
+  /**
+   * main object
+   */
+  private static String MAIN_CLASS = "com.huawei.bigdata.mapreduce.demo.WordCount";
+
+  /**
+   * JAR
+   */
+  private static String JAR_NAME = "mapreduce-demo.jar";
+
+  /**
+   * JAR compress
+   *
+   * @param source File : source file
+   * @param target JarOutputStream : OutputStream
+   * @throws IOException
+   */
+  private static void add(File source, JarOutputStream target)
+      throws IOException {
+    BufferedInputStream in = null;
+    try {
+      if (source.isDirectory()) {
+
+        for (File nestedFile : source.listFiles())
+          add(nestedFile, target);
+
+        return;
+      }
+
+      // bin dir remove(from com directory start to copy)
+      String path = source.getPath();
+      int index = path.indexOf("com");
+      path = path.substring(index);
+
+      JarEntry entry = new JarEntry(path.replace("\\", "/"));
+      entry.setTime(source.lastModified());
+      target.putNextEntry(entry);
+      in = new BufferedInputStream(new FileInputStream(source));
+
+      // Copy
+      byte[] buffer = new byte[1024];
+      while (true) {
+        int count = in.read(buffer);
+        if (count == -1)
+          break;
+        target.write(buffer, 0, count);
+      }
+      target.closeEntry();
+    } finally {
+      if (in != null)
+        in.close();
+    }
+  }
+
+  /**
+   * before run,must create project jar
+   *
+   * @throws IOException
+   */
+  public static void createJar() throws Exception {
+    // Relative path
+    String classPath = "bin" + File.separator
+        + "com";
+
+    // Check already compile
+    File cpFile = new File(classPath);
+    if (cpFile.exists() == false) {
+      throw new IOException("the class path does not exist.");
+    }
+
+    String[] child = cpFile.list();
+    if (child == null || child.length <= 0) {
+      throw new Exception("Please complie the project,then do this.");
+    }
+
+    // Delete JAR
+    File jarFile = new File(JAR_NAME);
+    if (jarFile.exists()) {
+      jarFile.delete();
+    }
+
+    // Create JAR
+    File sourceFile = new File(classPath);
+    File[] files =
+        {sourceFile};
+
+    Manifest manifest = new Manifest();
+    manifest.getMainAttributes().put(Attributes.Name.MANIFEST_VERSION,
+        "1.0");
+    manifest.getMainAttributes().put(Attributes.Name.MAIN_CLASS, MAIN_CLASS);
+
+    // JAR name
+    JarOutputStream target = new JarOutputStream(new FileOutputStream(
+        JAR_NAME), manifest);
+
+    // Compress process
+    for (int i = 0; i < files.length; i++) {
+      add(files[i], target);
+    }
+
+    target.close();
+  }
+}