在搭建HA的时候我遇到一个指令导致我一直无法搭建运行成功
[root@master src]# chown -R hadoop:hadoop /usr/local/src/java
chown: 无效的用户: "hadoop:hadoop"
请问如果我不增加用户的话,我要修改哪些文件配置才可以搭建成功呢?以下给出文件配置
profile的配置文件
export JAVA_HOME=/usr/local/src/java
export PATH=$PATH:$JAVA_HOME/bin
export ZK_HOME=/usr/local/src/zookeeper
export PATH=$PATH:$ZK_HOME/bin
export HADOOP_HOME=/usr/local/src/hadoop
export HADOOP_PREFIX=$HADOOP_HOME
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
export HADOOP_INSTALL=$HADOOP_HOME
export HADOOP_OPTS="-Djava.library.path=$HADOOP_INSTALL/lib:$HADOOP_COMMON_LIB_NATIVE_DIR"
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
core-site.xml的配置文件
<property>
<name>fs.defaultFs</name>
<value>hdfs://mycluster</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>file:/usr/local/src/hadoop/tmp</value>
</property>
<property>
<name>ha.zookeeper.quorum</name>
<value>master:2181,slave1:2181,slave2:2181</value>
</property>
<property>
<name>ha.zookeeper.session-timeout.ms</name>
<value>30000</value>
<description>ms</description>
</property>
<property>
<name>fs.trash.interval</name>
<value>1440</value>
</property>
mapred-site.xml的配置文件
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.jobhistiory.address</name>
<value>master:10020</value>
</property>
<property>
<name>mapreduce.jobhistiory.webapp.address</name>
<value>master:19888</value>
</property>
hdfs-site.xml的配置文件
<property>
<name>dfs.qjournal.start-segment.timeout.ms</name>
<value>60000</value>
</property>
<property>
<name>dfs.nameserverices</name>
<value>mycluster</value>
</property>
<property>
<name>dfs.namenodea.mycluster</name>
<value>master,slave1</value>
</property>
<property>
<name>dfs.namenode.rpc-address.mycluster.master</name>
<value>master:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.mycluster.slave1</name>
<value>slave:8020</value>
</property>
<property>
<name>dfs.namenode.http.address.mycluster.master</name>
<value>master:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.mycluster.slave1</name>
<value>slave:50070</value>
</property>
<property>
<name>dfs.namenode.share.edits.dir</name>
<value>qjournal://master:8485;slave1:8485;slave2:8485/mycluster</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.mycluster</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvidir</value>
</property>
<property>
<name>dfs.ha.fencing.methods</name>
<value>
sshfence
shell(bin/true)
</value>
</property>
<property>
<name>dfs.permissions.enabled</name>
<value>false</value>
</property>
<property>
<name>dfs.support.append</name>
<value>true</value>
</property>
<property>
<name>dfs.ha.fencing.ssh.private-ket-files</name>
<value>/root/.ssh/id_rsa</value>
</property>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>/usr/local/src/hadoop/tmp/hdfs/nn</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>/usr/local/src/hadoop/tmp/hdfs/dn</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>/usr/local/src/hadoop/tmp/hdfs/jn</value>
</property>
<property>
<name>dfs.ha.automatic.failover.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.webhdfs.enbaled</name>
<value>true</value>
</property>
<property>
<name>dfs.ha.fencing.ssh.connect-timeout</name>
<value>30000</value>
</property>
<property>
<name>ha.failover-contoller.cil-check.rcp-address.timeout.ms</name>
<value>60000</value>
</property>
yarn-site.xml的配置文件
<property>
<name>yarn.resoucemanager.ha.enabled</name>
<value>true</value>
</property>
<property>
<name>yarn.resoucemanager.cluster-ids</name>
<value>rm1,rm2</value>
</property>
<property>
<name>yarn.resoucemanager.hostname.rm1</name>
<value>master</value>
</property>
<property>
<name>yarn.resoucemanager.hostname.rm2</name>
<value>slave1</value>
</property>
<property>
<name>yarn.resoucemanager.zk-address</name>
<value>master:2181,slave1:2181,slave2:2181</value>
</property>
<property>
<name>yarn.resoucemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.log-aggregation-enable</name>
<value></value>
</property>
<property>
<name>yarn.log-aggregation.retain-seconds</name>
<value>86400</value>
</property>
<property>
<name>yarn.resoucemanager.recovery.enabled</name>
<value>true</value>
</property>
<property>
<name>yarn.resoucemanager.store.class</name>
<value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
</property>
hadoop-env.sh文件
export JAVA_HOME=/usr/local/src/java
大概主要文件就是这些惹,如有遗漏还请牛逼人士说出来