flink on yarn - HA连接不到zookeeper
报错如下:
显示用 sasl 连接,我并没有配置,这是我的配置文件:
blob:
server:
port: '6124'
historyserver:
web:
address: 0.0.0.0
port: 8082
archive:
fs:
dir: hdfs://master:9000/flink/flink-logs
refresh-interval: 10000
high-availability:
type: zookeeper
zookeeper:
path:
root: /flink
quorum: 172.18.0.4:2181
storageDir: hdfs://master:9000/flink/ha
jobmanager:
execution:
failover-strategy: region
rpc:
address: jobmanager
port: 6123
bind-host: 0.0.0.0
archive:
fs:
dir: hdfs://master:9000/flink/flink-logs
memory:
process:
size: 1600m
restart-strategy:
type: fixed-delay
fixed-delay:
attempts: 3
delay: 10 s
state:
savepoints:
dir: hdfs://master:9000/flink/savepoints
backend: filesystem
checkpoints:
dir: hdfs://master:9000/flink/checkpoints
query:
server:
port: '6125'
parallelism:
default: 1
taskmanager:
numberOfTaskSlots: 4
memory:
process:
size: 1728m
bind-host: 0.0.0.0
cluster:
evenly-spread-out-slots: true
env:
java:
opts:
all: --add-exports=java.base/sun.net.util=ALL-UNNAMED --add-exports=java.rmi/sun.rmi.registry=ALL-UNNAMED --add-exports=jdk.compiler/com.sun.tools.javac.api=ALL-UNNAMED --add-exports=jdk.compiler/com.sun.tools.javac.file=ALL-UNNAMED --add-exports=jdk.compiler/com.sun.tools.javac.parser=ALL-UNNAMED --add-exports=jdk.compiler/com.sun.tools.javac.tree=ALL-UNNAMED --add-exports=jdk.compiler/com.sun.tools.javac.util=ALL-UNNAMED --add-exports=java.security.jgss/sun.security.krb5=ALL-UNNAMED --add-opens=java.base/java.lang=ALL-UNNAMED --add-opens=java.base/java.net=ALL-UNNAMED --add-opens=java.base/java.io=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-opens=java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/java.lang.reflect=ALL-UNNAMED --add-opens=java.base/java.text=ALL-UNNAMED --add-opens=java.base/java.time=ALL-UNNAMED --add-opens=java.base/java.util=ALL-UNNAMED --add-opens=java.base/java.util.concurrent=ALL-UNNAMED --add-opens=java.base/java.util.concurrent.atomic=ALL-UNNAMED --add-opens=java.base/java.util.concurrent.locks=ALL-UNNAMED
rest:
bind-address: 0.0.0.0
address: 0.0.0.0
classloader:
check leaked classloader: false
fs:
hdfs:
hadoopconf: /hadoop/etc/hadoop/
zookeeper 配置文件
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/data
clientPort=2181
#限制每个客户端IP地址可以同时建立的最大连接数
maxClientCnxns=60
#自动清理过期快照文件时要保留的数量
autopurge.snapRetainCount=3
#自动清理快照文件的间隔
autopurge.purgeInterval=1
#启用独立模式,即使只有一个 ZooKeeper 服务器也可以运行
standaloneEnabled=true
#启用 admin 服务器,用于远程连接 ZooKeeper 服务器进行管理操作
admin.enableServer=true
#ZooKeeper 服务器监听所有 IP 地址,而不仅仅是配置文件中指定的 IP 地址
quorumListenOnAllIPs=true
#server.1=172.18.0.3:2888:3888;2181
#server.2=172.18.0.4:2888:3888;2181
#server.3=172.18.0.5:2888:3888;2181
请问该怎么处理