2018-05-29 18:42:42,749 ERROR namenode.NameNode: Failed to start namenode.
java.lang.IllegalArgumentException: URI has an authority component
at java.io.File.(File.java:423)
at org.apache.hadoop.hdfs.server.namenode.NNStorage.getStorageDirectory(NNStorage.java:341)
at org.apache.hadoop.hdfs.server.namenode.FSEditLog.initJournals(FSEditLog.java:288)
at org.apache.hadoop.hdfs.server.namenode.FSEditLog.initJournalsForWrite(FSEditLog.java:259)
at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1169)
at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1631)
at org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1741)
hdfs-site.xml文件
**cat hdfs-site.xml **
<!--指定hdfs的nameservice为ns1,需要和core-site.xml中的保持一致 -->
dfs.replication
3
dfs.nameservices
ns1
<!-- ns1下面有两个NameNode,分别是nn1,nn2 -->
dfs.ha.namenodes.ns1
nn1,nn2,nn3
<!-- nn1的RPC通信地址 -->
dfs.namenode.rpc-address.ns1.nn1
pinpoint-1:9000
<!-- nn1的http通信地址 -->
dfs.namenode.http-address.ns1.nn1
pinpoint-1:50070
<!-- nn2的RPC通信地址 -->
dfs.namenode.rpc-address.ns1.nn2
pinpoint-2:9000
dfs.namenode.http-address.ns1.nn2
pinpoint-2:50070
<!-- nn3的RPC通信地址 -->
dfs.namenode.rpc-address.ns1.nn3
pinpoint-3:9000
dfs.namenode.http-address.ns1.nn3
pinpoint-3:50070
<!-- 指定NameNode的元数据在JournalNode上的存放位置 -->
dfs.namenode.shared.edits.dir
qjournal://pinpoint-1:8485;pinpoint-2:8485;pinpoint-3:8485/ns1
<!-- 指定JournalNode在本地磁盘存放数据的位置 -->
dfs.journalnode.edits.dir
/pinpoint/data/zookeeper/hadoop/journaldata
dfs.datanode.data.dir
/pinpoint/data/zookeeper/hadoop/dfs/data
<!-- 开启NameNode失败自动切换 -->
dfs.ha.automatic-failover.enabled
true
<!-- 配置失败自动切换实现方式 -->
dfs.client.failover.proxy.provider.ns1
org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
<!-- 配置隔离机制方法,多个机制用换行分割,即每个机制暂用一行-->
dfs.ha.fencing.methods
sshfence
shell(/bin/true)
<!-- 使用sshfence隔离机制时需要ssh免登陆 -->
dfs.ha.fencing.ssh.private-key-files
/root/.ssh/id_rsa
<!-- 配置sshfence隔离机制超时时间 -->
dfs.ha.fencing.ssh.connect-timeout
30000
core-site.xml 文件
cat core-site.xml
<!--指定namenode的地址-->
fs.defaultFS
hdfs://ns1:9000
<!--用来指定使用hadoop时产生文件的存放目录-->
hadoop.tmp.dir
file:///pinpoint/data/hadoop/tmp
<!--用来设置检查点备份日志的最长时间-->
fs.checkpoint.period
3600