dinky0.7.1+flinkcdc2.3.0+flink 1.14.5+kerberos整库同步数据只有数据写到hdfs,但metastore连接失败,就没有表结构?
hdfs三台机器都配置了kerberos,hive的认证是hive/hadoop102@ ZHT.COM , hadoop是hadoop/hadoop@102@ZHT.COM
hive的metastore报错如下:
2023-03-05 21:26:13,327 ERROR [pool-6-thread-77] server.TThreadPoolServer (TThreadPoolServer.java:run(297)) - Error occurred during processing of message.
java.lang.RuntimeException: org.apache.thrift.transport.TTransportException: Invalid status -128
at org.apache.thrift.transport.TSaslServerTransport$Factory.getTransport(TSaslServerTransport.java:219)
at org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge$Server$TUGIAssumingTransportFactory$1.run(HadoopThriftAuthBridge.java:694)
at org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge$Server$TUGIAssumingTransportFactory$1.run(HadoopThriftAuthBridge.java:691)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:360)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1709)
at org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge$Server$TUGIAssumingTransportFactory.getTransport(HadoopThriftAuthBridge.java:691)
at org.apache.thrift.server.TThreadPoolServer$WorkerProcess.run(TThreadPoolServer.java:269)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.thrift.transport.TTransportException: Invalid status -128
at org.apache.thrift.transport.TSaslTransport.sendAndThrowMessage(TSaslTransport.java:232)
at org.apache.thrift.transport.TSaslTransport.receiveSaslMessage(TSaslTransport.java:184)
at org.apache.thrift.transport.TSaslServerTransport.handleSaslStartMessage(TSaslServerTransport.java:125)
at org.apache.thrift.transport.TSaslTransport.open(TSaslTransport.java:271)
at org.apache.thrift.transport.TSaslServerTransport.open(TSaslServerTransport.java:41)
at org.apache.thrift.transport.TSaslServerTransport$Factory.getTransport(TSaslServerTransport.java:216)
flink cdc报错如下:
2023-03-05 20:53:14,097 INFO org.apache.hadoop.hive.metastore.HiveMetaStoreClient [] - Closed a connection to metastore, current connections: 5
2023-03-05 20:53:14,097 INFO org.apache.hadoop.hive.metastore.HiveMetaStoreClient [] - Trying to connect to metastore with URI thrift://hadoop102:9083
2023-03-05 20:53:14,097 INFO org.apache.hadoop.hive.metastore.HiveMetaStoreClient [] - Opened a connection to metastore, current connections: 6
2023-03-05 20:53:14,107 WARN org.apache.hadoop.hive.metastore.HiveMetaStoreClient [] - set_ugi() not successful, Likely cause: new client talking to old server. Continuing without it.
org.apache.thrift.transport.TTransportException: null
at org.apache.thrift.transport.TIOStreamTransport.read(TIOStreamTransport.java:132) ~[flink-sql-connector-hive-3.1.2_2.12-1.14.5.jar:1.14.5]
at org.apache.thrift.transport.TTransport.readAll(TTransport.java:86) ~[flink-sql-connector-hive-3.1.2_2.12-1.14.5.jar:1.14.5]
at org.apache.thrift.protocol.TBinaryProtocol.readStringBody(TBinaryProtocol.java:380) ~[flink-sql-connector-hive-3.1.2_2.12-1.14.5.jar:1.14.5]
at org.apache.thrift.protocol.TBinaryProtocol.readMessageBegin(TBinaryProtocol.java:230) ~[flink-sql-connector-hive-3.1.2_2.12-1.14.5.jar:1.14.5]
at org.apache.thrift.TServiceClient.receiveBase(TServiceClient.java:77) ~[flink-sql-connector-hive-3.1.2_2.12-1.14.5.jar:1.14.5]
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Client.recv_set_ugi(ThriftHiveMetastore.java:4787) ~[flink-sql-connector-hive-3.1.2_2.12-1.14.5.jar:1.14.5]
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Client.set_ugi(ThriftHiveMetastore.java:4773) ~[flink-sql-connector-hive-3.1.2_2.12-1.14.5.jar:1.14.5]
at org.apache.hadoop.hive.metastore.HiveMetaStoreClient.open(HiveMetaStoreClient.java:534) ~[flink-sql-connector-hive-3.1.2_2.12-1.14.5.jar:1.14.5]
at org.apache.hadoop.hive.metastore.HiveMetaStoreClient.reconnect(HiveMetaStoreClient.java:379) ~[flink-sql-connector-hive-3.1.2_2.12-1.14.5.jar:1.14.5]
at org.apache.hadoop.hive.metastore.RetryingMetaStoreClient$1.run(RetryingMetaStoreClient.java:187) ~[flink-sql-connector-hive-3.1.2_2.12-1.14.5.jar:1.14.5]
at java.security.AccessController.doPrivileged(Native Method) ~[?:1.8.0_212]
at javax.security.auth.Subject.doAs(Subject.java:422) ~[?:1.8.0_212]
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1898) ~[flink-shaded-hadoop-3-uber-3.1.1.7.2.9.0-173-9.0.jar:3.1.1.7.2.9.0-173-9.0]
at org.apache.hadoop.hive.metastore.RetryingMetaStoreClient.invoke(RetryingMetaStoreClient.java:183) ~[flink-sql-connector-hive-3.1.2_2.12-1.14.5.jar:1.14.5]
at com.sun.proxy.$Proxy72.getAllFunctions(Unknown Source) ~[?:?]
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:1.8.0_212]
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[?:1.8.0_212]
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:1.8.0_212]
at java.lang.reflect.Method.invoke(Method.java:498) ~[?:1.8.0_212]
at org.apache.hadoop.hive.metastore.HiveMetaStoreClient$SynchronizedHandler.invoke(HiveMetaStoreClient.java:2773) ~[flink-sql-connector-hive-3.1.2_2.12-1.14.5.jar:1.14.5]
at com.sun.proxy.$Proxy72.getAllFunctions(Unknown Source) ~[?:?]
at org.apache.hadoop.hive.ql.metadata.Hive.getAllFunctions(Hive.java:4603) ~[flink-sql-connector-hive-3.1.2_2.12-1.14.5.jar:1.14.5]
at org.apache.hadoop.hive.ql.metadata.Hive.reloadFunctions(Hive.java:291) ~[flink-sql-connector-hive-3.1.2_2.12-1.14.5.jar:1.14.5]
at org.apache.hadoop.hive.ql.metadata.Hive.registerAllFunctionsOnce(Hive.java:274) ~[flink-sql-connector-hive-3.1.2_2.12-1.14.5.jar:1.14.5]
at org.apache.hadoop.hive.ql.metadata.Hive.<init>(Hive.java:435) ~[flink-sql-connector-hive-3.1.2_2.12-1.14.5.jar:1.14.5]
at org.apache.hadoop.hive.ql.metadata.Hive.create(Hive.java:375) ~[flink-sql-connector-hive-3.1.2_2.12-1.14.5.jar:1.14.5]
at org.apache.hadoop.hive.ql.metadata.Hive.getInternal(Hive.java:355) ~[flink-sql-connector-hive-3.1.2_2.12-1.14.5.jar:1.14.5]
at org.apache.hadoop.hive.ql.metadata.Hive.get(Hive.java:331) ~[flink-sql-connector-hive-3.1.2_2.12-1.14.5.jar:1.14.5]
at org.apache.hudi.hive.ddl.HMSDDLExecutor.<init>(HMSDDLExecutor.java:78) ~[hudi-flink1.14-bundle-0.12.0.jar:0.12.0]
at org.apache.hudi.hive.HoodieHiveSyncClient.<init>(HoodieHiveSyncClient.java:79) ~[hudi-flink1.14-bundle-0.12.0.jar:0.12.0]
at org.apache.hudi.hive.HiveSyncTool.initSyncClient(HiveSyncTool.java:101) ~[hudi-flink1.14-bundle-0.12.0.jar:0.12.0]
at org.apache.hudi.hive.HiveSyncTool.<init>(HiveSyncTool.java:95) ~[hudi-flink1.14-bundle-0.12.0.jar:0.12.0]
at org.apache.hudi.sink.utils.HiveSyncContext.hiveSyncTool(HiveSyncContext.java:79) ~[hudi-flink1.14-bundle-0.12.0.jar:0.12.0]
at org.apache.hudi.sink.StreamWriteOperatorCoordinator.doSyncHive(StreamWriteOperatorCoordinator.java:335) ~[hudi-flink1.14-bundle-0.12.0.jar:0.12.0]
at org.apache.hudi.sink.utils.NonThrownExecutor.lambda$wrapAction$0(NonThrownExecutor.java:130) ~[hudi-flink1.14-bundle-0.12.0.jar:0.12.0]
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) [?:1.8.0_212]
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) [?:1.8.0_212]
at java.lang.Thread.run(Thread.java:748) [?:1.8.0_212]