输入命令时 出现如下报错
hdfs namenode -bootstrapStandby
22/07/27 10:15:27 INFO namenode.NameNode: registered UNIX signal handlers for [TERM, HUP, INT]
22/07/27 10:15:27 INFO namenode.NameNode: createNameNode [-bootstrapStandby]
22/07/27 10:15:27 ERROR namenode.NameNode: Failed to start namenode.
java.io.IOException: org.apache.hadoop.HadoopIllegalArgumentException: Shared edits storage is not enabled for this namenode.
at org.apache.hadoop.hdfs.server.namenode.ha.BootstrapStandby.run(BootstrapStandby.java:460)
at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1680)
at org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1782)
Caused by: org.apache.hadoop.HadoopIllegalArgumentException: Shared edits storage is not enabled for this namenode.
at org.apache.hadoop.hdfs.server.namenode.ha.BootstrapStandby.parseConfAndFindOtherNN(BootstrapStandby.java:411)
at org.apache.hadoop.hdfs.server.namenode.ha.BootstrapStandby.run(BootstrapStandby.java:107)
at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:76)
at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:90)
at org.apache.hadoop.hdfs.server.namenode.ha.BootstrapStandby.run(BootstrapStandby.java:455)
... 2 more
22/07/27 10:15:27 INFO util.ExitUtil: Exiting with status 1: java.io.IOException: org.apache.hadoop.HadoopIllegalArgumentException: Shared edits storage is not enabled for this namenode.
22/07/27 10:15:27 INFO namenode.NameNode: SHUTDOWN_MSG:
/************************************************************
SHUTDOWN_MSG: Shutting down NameNode at hadoop02/192.168.110.249
************************************************************/
hdfs-core.xml配置如下
<configuration>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<property>
<name>dfs.block.size</name>
<value>134217728</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:///usr/local/hadoop/data/hdfs/namenode</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:///usr/local/hadoop/data/hdfs/datanode</value>
</property>
<property>
<name>dfs.namenode.edits.dir</name>
<value>file:///usr/local/hadoop/data/hdfs/nn/edits</value>
</property>
<property>
<name>dfs.nameservices</name>
<value>ns</value>
</property>
<property>
<name>dfs.ha.namenodes.ns</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.rpc-address.ns.nn1</name>
<value>hadoop01:9000</value>
</property>
<property>
<name>dfs.namenode.rpc-address.ns.nn2</name>
<value>hadoop02:9000</value>
</property>
<property>
<name>dfs.namenode.http-address.ns.nn1</name>
<value>hadoop01:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.ns.nn2</name>
<value>hadoop02:50070</value>
</property>
<property>
<name>dfs.namenode.shard.edits.dir</name>
<value>qjournal://hadoop01:8485;hadoop02:8485;hadoop03:8485/ns</value>
</property>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>hadoop01:9001</value>
</property>
<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.ha.automatic-failover.enabled.ns</name>
<value>true</value>
</property>
<property>
<name>dfs.permissions</name>
<value>false</value>
</property>
<property>
<name>dfs.ha.fencing.methods</name>
<value>sshfence</value>
</property>
<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>~/.ssh/id_rsa</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.ns</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
</configuration>