hbase启动报错 hbase shell

请大神帮帮忙
hbase启动 OK
hbase shell有下问题

./bin/hbase shell

2016-04-05 08:53:06,328 ERROR [main] zookeeper.RecoverableZooKeeper: ZooKeeper exists failed after 4 attempts
2016-04-05 08:53:06,331 WARN [main] zookeeper.ZKUtil: hconnection-0x1f6917fb0x0, quorum=salve1:2181,master:2181,salve2:2181, baseZNode=/hbase Unable to set watcher on znode (/hbase/hbaseid)
org.apache.zookeeper.KeeperException$ConnectionLossException: KeeperErrorCode = ConnectionLoss for /hbase/hbaseid
at org.apache.zookeeper.KeeperException.create(KeeperException.java:99)
at org.apache.zookeeper.KeeperException.create(KeeperException.java:51)
at org.apache.zookeeper.ZooKeeper.exists(ZooKeeper.java:1045)
at org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper.exists(RecoverableZooKeeper.java:221)
at org.apache.hadoop.hbase.zookeeper.ZKUtil.checkExists(ZKUtil.java:482)
at org.apache.hadoop.hbase.zookeeper.ZKClusterId.readClusterIdZNode(ZKClusterId.java:65)
at org.apache.hadoop.hbase.client.ZooKeeperRegistry.getClusterId(ZooKeeperRegistry.java:86)
at org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.retrieveClusterId(ConnectionManager.java:833)
at org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.(ConnectionManager.java:623)
at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
at java.lang.reflect.Constructor.newInstance(Constructor.java:422)
at org.apache.hadoop.hbase.client.ConnectionFactory.createConnection(ConnectionFactory.java:238)
at org.apache.hadoop.hbase.client.ConnectionFactory.createConnection(ConnectionFactory.java:218)
at org.apache.hadoop.hbase.client.ConnectionFactory.createConnection(ConnectionFactory.java:119)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:497)
at org.jruby.javasupport.JavaMethod.invokeDirectWithExceptionHandling(JavaMethod.java:450)
at org.jruby.javasupport.JavaMethod.invokeStaticDirect(JavaMethod.java:362)
at org.jruby.java.invokers.StaticMethodInvoker.call(StaticMethodInvoker.java:58)
at org.jruby.runtime.callsite.CachingCallSite.cacheAndCall(CachingCallSite.java:312)
at org.jruby.runtime.callsite.CachingCallSite.call(CachingCallSite.java:169)
at org.jruby.ast.CallOneArgNode.interpret(CallOneArgNode.java:57)
at org.jruby.ast.InstAsgnNode.interpret(InstAsgnNode.java:95)
at org.jruby.ast.NewlineNode.interpret(NewlineNode.java:104)
at org.jruby.ast.BlockNode.interpret(BlockNode.java:71)
at org.jruby.evaluator.ASTInterpreter.INTERPRET_METHOD(ASTInterpreter.java:74)
at org.jruby.internal.runtime.methods.InterpretedMethod.call(InterpretedMethod.java:169)
at org.jruby.internal.runtime.methods.DefaultMethod.call(DefaultMethod.java:191)
at org.jruby.runtime.callsite.CachingCallSite.cacheAndCall(CachingCallSite.java:302)
at org.jruby.runtime.callsite.CachingCallSite.callBlock(CachingCallSite.java:144)
at org.jruby.runtime.callsite.CachingCallSite.call(CachingCallSite.java:148)
at org.jruby.RubyClass.newInstance(RubyClass.java:822)
at org.jruby.RubyClass$i$newInstance.call(RubyClass$i$newInstance.gen:65535)
at org.jruby.internal.runtime.methods.JavaMethod$JavaMethodZeroOrNBlock.call(JavaMethod.java:249)
at org.jruby.runtime.callsite.CachingCallSite.cacheAndCall(CachingCallSite.java:292)
at org.jruby.runtime.callsite.CachingCallSite.call(CachingCallSite.java:135)
at usr.local.hadoop.hbase_minus_1_dot_0_dot_3.bin.hirb.__file__(/usr/local/hadoop/hbase-1.0.3/bin/hirb.rb:118)
at usr.local.hadoop.hbase_minus_1_dot_0_dot_3.bin.hirb.load(/usr/local/hadoop/hbase-1.0.3/bin/hirb.rb)
at org.jruby.Ruby.runScript(Ruby.java:697)
at org.jruby.Ruby.runScript(Ruby.java:690)
at org.jruby.Ruby.runNormally(Ruby.java:597)
at org.jruby.Ruby.runFromMain(Ruby.java:446)
at org.jruby.Main.doRunFromMain(Main.java:369)
at org.jruby.Main.internalRun(Main.java:258)
at org.jruby.Main.run(Main.java:224)
at org.jruby.Main.run(Main.java:208)
at org.jruby.Main.main(Main.java:188)
2016-04-05 08:53:06,338 ERROR [main] zookeeper.ZooKeeperWatcher: hconnection-0x1f6917fb0x0, quorum=salve1:2181,master:2181,salve2:2181, baseZNode=/hbase Received unexpected KeeperException, re-throwing exception
org.apache.zookeeper.KeeperException$ConnectionLossException: KeeperErrorCode = ConnectionLoss for /hbase/hbaseid
at org.apache.zookeeper.KeeperException.create(KeeperException.java:99)
at org.apache.zookeeper.KeeperException.create(KeeperException.java:51)
at org.apache.zookeeper.ZooKeeper.exists(ZooKeeper.java:1045)
at org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper.exists(RecoverableZooKeeper.java:221)
at org.apache.hadoop.hbase.zookeeper.ZKUtil.checkExists(ZKUtil.java:482)
at org.apache.hadoop.hbase.zookeeper.ZKClusterId.readClusterIdZNode(ZKClusterId.java:65)
at org.apache.hadoop.hbase.client.ZooKeeperRegistry.getClusterId(ZooKeeperRegistry.java:86)
at org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.retrieveClusterId(ConnectionManager.java:833)
at org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.(ConnectionManager.java:623)
at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
at java.lang.reflect.Constructor.newInstance(Constructor.java:422)
at org.apache.hadoop.hbase.client.ConnectionFactory.createConnection(ConnectionFactory.java:238)
at org.apache.hadoop.hbase.client.ConnectionFactory.createConnection(ConnectionFactory.java:218)
at org.apache.hadoop.hbase.client.ConnectionFactory.createConnection(ConnectionFactory.java:119)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:497)
at org.jruby.javasupport.JavaMethod.invokeDirectWithExceptionHandling(JavaMethod.java:450)
at org.jruby.javasupport.JavaMethod.invokeStaticDirect(JavaMethod.java:362)
at org.jruby.java.invokers.StaticMethodInvoker.call(StaticMethodInvoker.java:58)
at org.jruby.runtime.callsite.CachingCallSite.cacheAndCall(CachingCallSite.java:312)
at org.jruby.runtime.callsite.CachingCallSite.call(CachingCallSite.java:169)
at org.jruby.ast.CallOneArgNode.interpret(CallOneArgNode.java:57)
at org.jruby.ast.InstAsgnNode.interpret(InstAsgnNode.java:95)
at org.jruby.ast.NewlineNode.interpret(NewlineNode.java:104)
at org.jruby.ast.BlockNode.interpret(BlockNode.java:71)
at org.jruby.evaluator.ASTInterpreter.INTERPRET_METHOD(ASTInterpreter.java:74)
at org.jruby.internal.runtime.methods.InterpretedMethod.call(InterpretedMethod.java:169)
at org.jruby.internal.runtime.methods.DefaultMethod.call(DefaultMethod.java:191)
at org.jruby.runtime.callsite.CachingCallSite.cacheAndCall(CachingCallSite.java:302)
at org.jruby.runtime.callsite.CachingCallSite.callBlock(CachingCallSite.java:144)
at org.jruby.runtime.callsite.CachingCallSite.call(CachingCallSite.java:148)
at org.jruby.RubyClass.newInstance(RubyClass.java:822)
at org.jruby.RubyClass$i$newInstance.call(RubyClass$i$newInstance.gen:65535)
at org.jruby.internal.runtime.methods.JavaMethod$JavaMethodZeroOrNBlock.call(JavaMethod.java:249)
at org.jruby.runtime.callsite.CachingCallSite.cacheAndCall(CachingCallSite.java:292)
at org.jruby.runtime.callsite.CachingCallSite.call(CachingCallSite.java:135)
at usr.local.hadoop.hbase_minus_1_dot_0_dot_3.bin.hirb.__file__(/usr/local/hadoop/hbase-1.0.3/bin/hirb.rb:118)
at usr.local.hadoop.hbase_minus_1_dot_0_dot_3.bin.hirb.load(/usr/local/hadoop/hbase-1.0.3/bin/hirb.rb)
at org.jruby.Ruby.runScript(Ruby.java:697)
at org.jruby.Ruby.runScript(Ruby.java:690)
at org.jruby.Ruby.runNormally(Ruby.java:597)
at org.jruby.Ruby.runFromMain(Ruby.java:446)
at org.jruby.Main.doRunFromMain(Main.java:369)
at org.jruby.Main.internalRun(Main.java:258)
at org.jruby.Main.run(Main.java:224)
at org.jruby.Main.run(Main.java:208)
at org.jruby.Main.main(Main.java:188)
HBase Shell; enter 'help' for list of supported commands.
Type "exit" to leave the HBase Shell
Version 1.0.3, rf1e1312f9790a7c40f6a4b5a1bab2ea1dd559890, Tue Jan 19 19:26:53 PST 2016

1个回答

I have the same problem, Have u solved it?

Csdn user default icon
上传中...
上传图片
插入图片
抄袭、复制答案,以达到刷声望分或其他目的的行为,在CSDN问答是严格禁止的,一经发现立刻封号。是时候展现真正的技术了!
其他相关推荐
hbase shell 启动不了,求帮忙

安装了hbase 1.3.1 hadoop 2.8.2 jdk 9.0.1 hbase 启动没问题,但当启动 hbase shell 错误如下: root@Master bin]# ./hbase shell Java HotSpot(TM) 64-Bit Server VM warning: Option UseConcMarkSweepGC was deprecated in version 9.0 and will likely be removed in a future release. WARNING: An illegal reflective access operation has occurred WARNING: Illegal reflective access by org.jruby.java.invokers.RubyToJavaInvoker (file:/usr/local/hadoop/hbase-1.2.6/lib/jruby-complete-1.6.8.jar) to method java.lang.Object.registerNatives() WARNING: Please consider reporting this to the maintainers of org.jruby.java.invokers.RubyToJavaInvoker WARNING: Use --illegal-access=warn to enable warnings of further illegal reflective access operations WARNING: All illegal access operations will be denied in a future release ArgumentError: wrong number of arguments (0 for 1) method_added at file:/usr/local/hadoop/hbase-1.2.6/lib/jruby-complete-1.6.8.jar!/builtin/javasupport/core_ext/object.rb:10 method_added at file:/usr/local/hadoop/hbase-1.2.6/lib/jruby-complete-1.6.8.jar!/builtin/javasupport/core_ext/object.rb:129 Pattern at file:/usr/local/hadoop/hbase-1.2.6/lib/jruby-complete-1.6.8.jar!/builtin/java/java.util.regex.rb:2 (root) at file:/usr/local/hadoop/hbase-1.2.6/lib/jruby-complete-1.6.8.jar!/builtin/java/java.util.regex.rb:1 require at org/jruby/RubyKernel.java:1062 (root) at file:/usr/local/hadoop/hbase-1.2.6/lib/jruby-complete-1.6.8.jar!/builtin/java/java.util.regex.rb:42 (root) at /usr/local/hadoop/hbase-1.2.6/bin/hirb.rb:38 而hirb.rb 第38行是 include JAVA, 而我的环境变量都配好的,不知道还有什么出错了。

Hbase 正常启动后,运行status命令的错误

client.HConnectionManager$HConnectionImplementation:The node /hbase is not in Zookeeper![运行status命令时的错误](https://img-ask.csdn.net/upload/201505/13/1431504319_531298.png)如图中所示的错误异常,我再hbase-site.xml中已经配置了如下图![在hbase-site.xml中的配置](https://img-ask.csdn.net/upload/201505/13/1431504468_206666.png)我的HBase是按单机模式配置的,其大神帮助啊

hbase mapreduce 报错 java.lang.NullPointerException

http://bbs.csdn.net/topics/390865764 这篇文章出错相似,求教大牛们 2017-09-15 23:19:15 [WARN]-[] Your hostname, admin-PC resolves to a loopback/non-reachable address: fe80:0:0:0:0:5efe:c0a8:164%23, but we couldn't find any external IP address! 2017-09-15 23:19:15 [INFO]-[org.apache.hadoop.conf.Configuration.deprecation] session.id is deprecated. Instead, use dfs.metrics.session-id 2017-09-15 23:19:15 [INFO]-[org.apache.hadoop.metrics.jvm.JvmMetrics] Initializing JVM Metrics with processName=JobTracker, sessionId= Exception in thread "main" java.lang.NullPointerException at java.lang.ProcessBuilder.start(ProcessBuilder.java:1010) at org.apache.hadoop.util.Shell.runCommand(Shell.java:487) at org.apache.hadoop.util.Shell.run(Shell.java:460) at org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:720) at org.apache.hadoop.util.Shell.execCommand(Shell.java:813) at org.apache.hadoop.util.Shell.execCommand(Shell.java:796) at org.apache.hadoop.fs.RawLocalFileSystem.setPermission(RawLocalFileSystem.java:656) at org.apache.hadoop.fs.RawLocalFileSystem.mkdirs(RawLocalFileSystem.java:444) at org.apache.hadoop.fs.FilterFileSystem.mkdirs(FilterFileSystem.java:308) at org.apache.hadoop.mapreduce.JobSubmissionFiles.getStagingDir(JobSubmissionFiles.java:133) at org.apache.hadoop.mapreduce.JobSubmitter.submitJobInternal(JobSubmitter.java:147) at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1307) at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1304) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:415) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1671) at org.apache.hadoop.mapreduce.Job.submit(Job.java:1304) at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1325) at TestOnlyMapper.main(TestOnlyMapper.java:35) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at com.intellij.rt.execution.application.AppMain.main(AppMain.java:144) ----------------------分割线 代码------------------------------------------------- import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil; import org.apache.hadoop.hbase.mapreduce.TableMapper; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; import java.io.IOException; /** * Created by admin on 2017/9/15. */ public class TestOnlyMapper { public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException { Configuration conf = HBaseConfiguration.create(); conf.set("hbase.rootdir","hdfs://hadoop.master:8020/hdfs/hbase"); conf.set("hbase.zookeeper.quorum","hadoop.master,hadoop.slave11,hadoop.slave12"); conf.set("hbase.zookeeper.property.clientPort","2181"); Job job= new Job(conf,"test"); job.setJarByClass(TestOnlyMapper.class); Scan scan = new Scan(); job.setMapSpeculativeExecution(false); job.setReduceSpeculativeExecution(false); TableMapReduceUtil.initTableMapperJob("test11",scan,OMapper.class,null,null,job); job.setOutputFormatClass(NullOutputFormat.class); job.waitForCompletion(true); } } class OMapper extends TableMapper<Text, LongWritable> { @Override protected void map(ImmutableBytesWritable key, Result value, Context context) throws IOException, InterruptedException { for(Cell cell:value.listCells()) { System.out.println("---------------------"); System.out.println("cell.getQualifier()= "+cell.getQualifier().toString()); System.out.println("---------------------"); } } }

hbase 启动,查看日志文件出现错误 这是为什么?

ERROR org.apache.hadoop.hbase.master.HMasterCommandLine: Failed to start master java.lang.RuntimeException: Failed construction of Master: class org.apache.hadoop.hbase.master.HMaster at org.apache.hadoop.hbase.master.HMaster.constructMaster(HMaster.java:2115) at org.apache.hadoop.hbase.master.HMasterCommandLine.startMaster(HMasterCommandLine.java:152) at org.apache.hadoop.hbase.master.HMasterCommandLine.run(HMasterCommandLine.java:104) at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:65) at org.apache.hadoop.hbase.util.ServerCommandLine.doMain(ServerCommandLine.java:76) at org.apache.hadoop.hbase.master.HMaster.main(HMaster.java:2129) Caused by: org.apache.zookeeper.KeeperException$ConnectionLossException: KeeperErrorCode = ConnectionLoss for / hbase at org.apache.zookeeper.KeeperException.create(KeeperException.java:99) at org.apache.zookeeper.KeeperException.create(KeeperException.java:51) at org.apache.zookeeper.ZooKeeper.exists(ZooKeeper.java:1041)

运行start-hbase.sh一直报错,一直报:IS a directory

![图片说明](https://img-ask.csdn.net/upload/201910/03/1570069310_68396.png)

将hdfs中的csv文件导入hbase表时报错,跪求大神

我本机的虚拟机配置hadoop-2.6.0+zookeeper-3.4.6+hbase-1.0.3集群环境,三个节点,按照教程,环境配置都没有问题,hadoop,zookeeper,hbase都能成功启动,也能web访问,向hbase表导入csv文件数据,执行导入语句后,报出错误: zookeeper.ClientCnxn: Opening socket connection to server node2/192.168.220.102:2181. Will not attempt to authenticate using SASL (unknown error) csv文件为: Debugo,Beijing Le,Shanghai Sariel,Beijing Elvis,Beijing 导入语句: hbase org.apache.hadoop.hbase.mapreduce.ImportTsv -Dimporttsv.separator=',' -Dimporttsv.columns=HBASE_ROW_KEY,city user_info /user/yarn/user.csv hbase-site.xml的配置文件见图片:![图片说明](https://img-ask.csdn.net/upload/201611/16/1479268072_593384.png) zoo.cfg的配置见图片:![图片说明](https://img-ask.csdn.net/upload/201611/16/1479268009_329304.png)

试图在hbase中create table1报错

我是hbase的新手,安装了用本地file system的hbase,没有装hadoop/hdfs。 用bin/start-hbase.sh启动了hbase,可以用ps -ef看到HMaster进程,但用 jps | grep HMaster,什么都看不到。然后用bin/hbase shell,试图 create table, 报错: ERROR: Can't get master address from ZooKeeper; znode data == null 怎么处理这个问题呢? 谢谢!

Hbase启动时报错,有没有大佬解决下

Hbase启动时报错,有没有大佬解决下 Sat Apr 18 00:33:38 PDT 2020 Starting master on hadoop01 core file size (blocks, -c) 0 data seg size (kbytes, -d) unlimited scheduling priority (-e) 0 file size (blocks, -f) unlimited pending signals (-i) 3804 max locked memory (kbytes, -l) 64 max memory size (kbytes, -m) unlimited open files (-n) 1024 pipe size (512 bytes, -p) 8 POSIX message queues (bytes, -q) 819200 real-time priority (-r) 0 stack size (kbytes, -s) 10240 cpu time (seconds, -t) unlimited max user processes (-u) 3804 virtual memory (kbytes, -v) unlimited file locks (-x) unlimited 2020-04-18 00:33:40,217 INFO [main] util.VersionInfo: HBase 1.2.1 2020-04-18 00:33:40,218 INFO [main] util.VersionInfo: Source code repository git://asf-dev/home/busbey/projects/hbase revision=8d8a7107dc4ccbf36a92f64675dc60392f85c015 2020-04-18 00:33:40,218 INFO [main] util.VersionInfo: Compiled by busbey on Wed Mar 30 11:19:21 CDT 2016 2020-04-18 00:33:40,218 INFO [main] util.VersionInfo: From source with checksum f4bb4a14bb4e0b72b46f729dae98a772 2020-04-18 00:33:41,174 INFO [main] util.ServerCommandLine: env:HBASE_LOGFILE=hbase-root-master-hadoop01.log 2020-04-18 00:33:41,213 INFO [main] util.ServerCommandLine: env:PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/export/servers/jdk/bin:/export/servers/hadoop-2.9.2/bin:/export/servers/hadoop-2.9.2/sbin:/export/servers/apache-hive-1.2.1-bin/bin:/export/servers/jdk/bin:/export/servers/hadoop-2.9.2/bin:/export/servers/hadoop-2.9.2/sbin:/export/servers/zookeeper-3.4.10/bin:HBASE_CLASSPATH/bin:/root/bin:/export/servers/jdk/bin:/export/servers/hadoop-2.9.2/bin:/export/servers/hadoop-2.9.2/sbin:/export/servers/apache-hive-1.2.1-bin/bin:/export/servers/jdk/bin:/export/servers/hadoop-2.9.2/bin:/export/servers/hadoop-2.9.2/sbin:/export/servers/zookeeper-3.4.10/bin:HBASE_HIVE/bin:/export/servers/jdk/bin:/export/servers/hadoop-2.9.2/bin:/export/servers/hadoop-2.9.2/sbin:/export/servers/apache-hive-1.2.1-bin/bin:/export/servers/jdk/bin:/export/servers/hadoop-2.9.2/bin:/export/servers/hadoop-2.9.2/sbin:/export/servers/zookeeper-3.4.10/bin:HBASE_HIVE/bin 2020-04-18 00:33:41,213 INFO [main] util.ServerCommandLine: env:HISTCONTROL=ignoredups 2020-04-18 00:33:41,213 INFO [main] util.ServerCommandLine: env:HISTSIZE=1000 2020-04-18 00:33:41,213 INFO [main] util.ServerCommandLine: env:HBASE_REGIONSERVER_OPTS= -XX:PermSize=128m -XX:MaxPermSize=128m 2020-04-18 00:33:41,213 INFO [main] util.ServerCommandLine: env:JAVA_HOME=/export/servers/jdk 2020-04-18 00:33:41,214 INFO [main] util.ServerCommandLine: env:TERM=vt100 2020-04-18 00:33:41,214 INFO [main] util.ServerCommandLine: env:LANG=en_US.UTF-8 2020-04-18 00:33:41,214 INFO [main] util.ServerCommandLine: env:G_BROKEN_FILENAMES=1 2020-04-18 00:33:41,214 INFO [main] util.ServerCommandLine: env:SELINUX_LEVEL_REQUESTED= 2020-04-18 00:33:41,214 INFO [main] util.ServerCommandLine: env:SELINUX_ROLE_REQUESTED= 2020-04-18 00:33:41,214 INFO [main] util.ServerCommandLine: env:MAIL=/var/spool/mail/root 2020-04-18 00:33:41,214 INFO [main] util.ServerCommandLine: env:LD_LIBRARY_PATH=:/export/servers/hadoop-2.9.2/lib/native 2020-04-18 00:33:41,220 INFO [main] util.ServerCommandLine: env:LOGNAME=root 2020-04-18 00:33:41,220 INFO [main] util.ServerCommandLine: env:HBASE_REST_OPTS= 2020-04-18 00:33:41,221 INFO [main] util.ServerCommandLine: env:PWD=/export/servers/hbase-1.2.1/bin 2020-04-18 00:33:41,221 INFO [main] util.ServerCommandLine: env:HBASE_ROOT_LOGGER=INFO,RFA 2020-04-18 00:33:41,221 INFO [main] util.ServerCommandLine: env:LESSOPEN=||/usr/bin/lesspipe.sh %s 2020-04-18 00:33:41,221 INFO [main] util.ServerCommandLine: env:SHELL=/bin/bash 2020-04-18 00:33:41,221 INFO [main] util.ServerCommandLine: env:ZK_HOME=/export/servers/zookeeper-3.4.10 2020-04-18 00:33:41,221 INFO [main] util.ServerCommandLine: env:SELINUX_USE_CURRENT_RANGE= 2020-04-18 00:33:41,221 INFO [main] util.ServerCommandLine: env:HBASE_ENV_INIT=true 2020-04-18 00:33:41,221 INFO [main] util.ServerCommandLine: env:HBASE_IDENT_STRING=root 2020-04-18 00:33:41,221 INFO [main] util.ServerCommandLine: env:HBASE_ZNODE_FILE=/tmp/hbase-root-master.znode 2020-04-18 00:33:41,221 INFO [main] util.ServerCommandLine: env:SSH_TTY=/dev/pts/1 2020-04-18 00:33:41,222 INFO [main] util.ServerCommandLine: env:SSH_CLIENT=192.168.121.1 50359 22 2020-04-18 00:33:41,222 INFO [main] util.ServerCommandLine: env:HIVE_HOME=/export/servers/apache-hive-1.2.1-bin 2020-04-18 00:33:41,245 INFO [main] util.ServerCommandLine: env:HBASE_LOG_PREFIX=hbase-root-master-hadoop01 2020-04-18 00:33:41,246 INFO [main] util.ServerCommandLine: env:HBASE_LOG_DIR=/export/servers/hbase-1.2.1/logs 2020-04-18 00:33:41,247 INFO [main] util.ServerCommandLine: env:USER=root 2020-04-18 00:33:41,248 INFO [main] util.ServerCommandLine: env:CLASSPATH=/export/servers/hbase-1.2.1/conf:/export/servers/jdk/lib/tools.jar:/export/servers/hbase-1.2.1:/export/servers/hbase-1.2.1/lib/activation-1.1.jar:/export/servers/hbase-1.2.1/lib/asm-3.1.jar:/export/servers/hbase-1.2.1/lib/avro-1.7.4.jar:/export/servers/hbase-1.2.1/lib/commons-beanutils-1.7.0.jar:/export/servers/hbase-1.2.1/lib/commons-beanutils-core-1.7.0.jar:/export/servers/hbase-1.2.1/lib/commons-cli-1.2.jar:/export/servers/hbase-1.2.1/lib/commons-codec-1.9.jar:/export/servers/hbase-1.2.1/lib/commons-collections-3.2.2.jar:/export/servers/hbase-1.2.1/lib/commons-configuration-1.6.jar:/export/servers/hbase-1.2.1/lib/commons-digester-1.8.jar:/export/servers/hbase-1.2.1/lib/commons-el-1.0.jar:/export/servers/hbase-1.2.1/lib/commons-httpclient-3.1.jar:/export/servers/hbase-1.2.1/lib/commons-io-2.4.jar:/export/servers/hbase-1.2.1/lib/commons-lang-2.6.jar:/export/servers/hbase-1.2.1/lib/commons-logging-1.2.jar:/export/servers/hbase-1.2.1/lib/commons-math3-3.1.1.jar:/export/servers/hbase-1.2.1/lib/commons-net-3.1.jar:/export/servers/hbase-1.2.1/lib/findbugs-annotations-1.3.9-1.jar:/export/servers/hbase-1.2.1/lib/guava-12.0.1.jar:/export/servers/hbase-1.2.1/lib/hadoop-annotations-2.5.1.jar:/export/servers/hbase-1.2.1/lib/hadoop-common-2.5.1.jar:/export/servers/hbase-1.2.1/lib/hbase-annotations-1.2.1.jar:/export/servers/hbase-1.2.1/lib/hbase-annotations-1.2.1-tests.jar:/export/servers/hbase-1.2.1/lib/hbase-client-1.2.1.jar:/export/servers/hbase-1.2.1/lib/hbase-common-1.2.1.jar:/export/servers/hbase-1.2.1/lib/hbase-common-1.2.1-tests.jar:/export/servers/hbase-1.2.1/lib/hbase-examples-1.2.1.jar:/export/servers/hbase-1.2.1/lib/hbase-external-blockcache-1.2.1.jar:/export/servers/hbase-1.2.1/lib/hbase-hadoop2-compat-1.2.1.jar:/export/servers/hbase-1.2.1/lib/hbase-hadoop-compat-1.2.1.jar:/export/servers/hbase-1.2.1/lib/hbase-it-1.2.1.jar:/export/servers/hbase-1.2.1/lib/hbase-it-1.2.1-tests.jar:/export/servers/hbase-1.2.1/lib/hbase-prefix-tree-1.2.1.jar:/export/servers/hbase-1.2.1/lib/hbase-procedure-1.2.1.jar:/export/servers/hbase-1.2.1/lib/hbase-protocol-1.2.1.jar:/export/servers/hbase-1.2.1/lib/hbase-rest-1.2.1.jar:/export/servers/hbase-1.2.1/lib/hbase-server-1.2.1.jar:/export/servers/hbase-1.2.1/lib/hbase-server-1.2.1-tests.jar:/export/servers/hbase-1.2.1/lib/hbase-shell-1.2.1.jar:/export/servers/hbase-1.2.1/lib/hbase-thrift-1.2.1.jar:/export/servers/hbase-1.2.1/lib/htrace-core-3.1.0-incubating.jar:/export/servers/hbase-1.2.1/lib/httpclient-4.2.5.jar:/export/servers/hbase-1.2.1/lib/httpcore-4.4.1.jar:/export/servers/hbase-1.2.1/lib/jackson-core-asl-1.9.13.jar:/export/servers/hbase-1.2.1/lib/jackson-jaxrs-1.9.13.jar:/export/servers/hbase-1.2.1/lib/jackson-mapper-asl-1.9.13.jar:/export/servers/hbase-1.2.1/lib/jackson-xc-1.9.13.jar:/export/servers/hbase-1.2.1/lib/jasper-compiler-5.5.23.jar:/export/servers/hbase-1.2.1/lib/jasper-runtime-5.5.23.jar:/export/servers/hbase-1.2.1/lib/java-xmlbuilder-0.4.jar:/export/servers/hbase-1.2.1/lib/jaxb-api-2.2.2.jar:/export/servers/hbase-1.2.1/lib/jaxb-impl-2.2.3-1.jar:/export/servers/hbase-1.2.1/lib/jersey-core-1.9.jar:/export/servers/hbase-1.2.1/lib/jersey-json-1.9.jar:/export/servers/hbase-1.2.1/lib/jersey-server-1.9.jar:/export/servers/hbase-1.2.1/lib/jets3t-0.9.0.jar:/export/servers/hbase-1.2.1/lib/jettison-1.3.3.jar:/export/servers/hbase-1.2.1/lib/jetty-6.1.26.jar:/export/servers/hbase-1.2.1/lib/jetty-util-6.1.26.jar:/export/servers/hbase-1.2.1/lib/jsr305-1.3.9.jar:/export/servers/hbase-1.2.1/lib/junit-4.12.jar:/export/servers/hbase-1.2.1/lib/log4j-1.2.17.jar:/export/servers/hbase-1.2.1/lib/paranamer-2.3.jar:/export/servers/hbase-1.2.1/lib/protobuf-java-2.5.0.jar:/export/servers/hbase-1.2.1/lib/slf4j-api-1.7.7.jar:/export/servers/hbase-1.2.1/lib/slf4j-log4j12-1.7.5.jar:/export/servers/hbase-1.2.1/lib/snappy-java-1.0.4.1.jar:/export/servers/hbase-1.2.1/lib/xmlenc-0.52.jar:/export/servers/hadoop-2.9.2/etc/hadoop:/export/servers/hadoop-2.9.2/share/hadoop/common/lib/*:/export/servers/hadoop-2.9.2/share/hadoop/common/*:/export/servers/hadoop-2.9.2/share/hadoop/hdfs:/export/servers/hadoop-2.9.2/share/hadoop/hdfs/lib/*:/export/servers/hadoop-2.9.2/share/hadoop/hdfs/*:/export/servers/hadoop-2.9.2/share/hadoop/yarn:/export/servers/hadoop-2.9.2/share/hadoop/yarn/lib/*:/export/servers/hadoop-2.9.2/share/hadoop/yarn/*:/export/servers/hadoop-2.9.2/share/hadoop/mapreduce/lib/*:/export/servers/hadoop-2.9.2/share/hadoop/mapreduce/*:/export/servers/hadoop-2.9.2/contrib/capacity-scheduler/*.jar 2020-04-18 00:33:41,248 INFO [main] util.ServerCommandLine: env:HBASE_MASTER_OPTS= -XX:PermSize=128m -XX:MaxPermSize=128m 2020-04-18 00:33:41,249 INFO [main] util.ServerCommandLine: env:HBASE_MANAGES_ZK=false 2020-04-18 00:33:41,249 INFO [main] util.ServerCommandLine: env:SSH_ASKPASS=/usr/libexec/openssh/gnome-ssh-askpass 2020-04-18 00:33:41,249 INFO [main] util.ServerCommandLine: env:SSH_CONNECTION=192.168.121.1 50359 192.168.121.134 22 2020-04-18 00:33:41,249 INFO [main] util.ServerCommandLine: env:HOSTNAME=hadoop01 2020-04-18 00:33:41,249 INFO [main] util.ServerCommandLine: env:HADOOP_HOME=/export/servers/hadoop-2.9.2 2020-04-18 00:33:41,249 INFO [main] util.ServerCommandLine: env:HBASE_NICENESS=0 2020-04-18 00:33:41,249 INFO [main] util.ServerCommandLine: env:HBASE_OPTS=-XX:+UseConcMarkSweepGC -XX:PermSize=128m -XX:MaxPermSize=128m -Dhbase.log.dir=/export/servers/hbase-1.2.1/logs -Dhbase.log.file=hbase-root-master-hadoop01.log -Dhbase.home.dir=/export/servers/hbase-1.2.1 -Dhbase.id.str=root -Dhbase.root.logger=INFO,RFA -Djava.library.path=/export/servers/hadoop-2.9.2/lib/native -Dhbase.security.logger=INFO,RFAS 2020-04-18 00:33:41,249 INFO [main] util.ServerCommandLine: env:HBASE_START_FILE=/tmp/hbase-root-master.autorestart 2020-04-18 00:33:41,249 INFO [main] util.ServerCommandLine: env:HBASE_SECURITY_LOGGER=INFO,RFAS 2020-04-18 00:33:41,250 INFO [main] util.ServerCommandLine: env:HBASE_THRIFT_OPTS= 2020-04-18 00:33:41,250 INFO [main] util.ServerCommandLine: env:HBASE_HOME=/export/servers/hbase-1.2.1 2020-04-18 00:33:41,250 INFO [main] util.ServerCommandLine: env:LS_COLORS=rs=0:di=01;34:ln=01;36:mh=00:pi=40;33:so=01;35:do=01;35:bd=40;33;01:cd=40;33;01:or=40;31;01:mi=01;05;37;41:su=37;41:sg=30;43:ca=30;41:tw=30;42:ow=34;42:st=37;44:ex=01;32:*.tar=01;31:*.tgz=01;31:*.arj=01;31:*.taz=01;31:*.lzh=01;31:*.lzma=01;31:*.tlz=01;31:*.txz=01;31:*.zip=01;31:*.z=01;31:*.Z=01;31:*.dz=01;31:*.gz=01;31:*.lz=01;31:*.xz=01;31:*.bz2=01;31:*.tbz=01;31:*.tbz2=01;31:*.bz=01;31:*.tz=01;31:*.deb=01;31:*.rpm=01;31:*.jar=01;31:*.rar=01;31:*.ace=01;31:*.zoo=01;31:*.cpio=01;31:*.7z=01;31:*.rz=01;31:*.jpg=01;35:*.jpeg=01;35:*.gif=01;35:*.bmp=01;35:*.pbm=01;35:*.pgm=01;35:*.ppm=01;35:*.tga=01;35:*.xbm=01;35:*.xpm=01;35:*.tif=01;35:*.tiff=01;35:*.png=01;35:*.svg=01;35:*.svgz=01;35:*.mng=01;35:*.pcx=01;35:*.mov=01;35:*.mpg=01;35:*.mpeg=01;35:*.m2v=01;35:*.mkv=01;35:*.ogm=01;35:*.mp4=01;35:*.m4v=01;35:*.mp4v=01;35:*.vob=01;35:*.qt=01;35:*.nuv=01;35:*.wmv=01;35:*.asf=01;35:*.rm=01;35:*.rmvb=01;35:*.flc=01;35:*.avi=01;35:*.fli=01;35:*.flv=01;35:*.gl=01;35:*.dl=01;35:*.xcf=01;35:*.xwd=01;35:*.yuv=01;35:*.cgm=01;35:*.emf=01;35:*.axv=01;35:*.anx=01;35:*.ogv=01;35:*.ogx=01;35:*.aac=01;36:*.au=01;36:*.flac=01;36:*.mid=01;36:*.midi=01;36:*.mka=01;36:*.mp3=01;36:*.mpc=01;36:*.ogg=01;36:*.ra=01;36:*.wav=01;36:*.axa=01;36:*.oga=01;36:*.spx=01;36:*.xspf=01;36: 2020-04-18 00:33:41,250 INFO [main] util.ServerCommandLine: env:HOME=/root 2020-04-18 00:33:41,250 INFO [main] util.ServerCommandLine: env:SHLVL=4 2020-04-18 00:33:41,250 INFO [main] util.ServerCommandLine: env:MALLOC_ARENA_MAX=4 2020-04-18 00:33:41,254 INFO [main] util.ServerCommandLine: vmName=Java HotSpot(TM) 64-Bit Server VM, vmVendor=Oracle Corporation, vmVersion=25.161-b12 2020-04-18 00:33:41,590 INFO [main] util.ServerCommandLine: vmInputArguments=[-Dproc_master, -XX:OnOutOfMemoryError=kill -9 %p, -XX:+UseConcMarkSweepGC, -XX:PermSize=128m, -XX:MaxPermSize=128m, -Dhbase.log.dir=/export/servers/hbase-1.2.1/logs, -Dhbase.log.file=hbase-root-master-hadoop01.log, -Dhbase.home.dir=/export/servers/hbase-1.2.1, -Dhbase.id.str=root, -Dhbase.root.logger=INFO,RFA, -Djava.library.path=/export/servers/hadoop-2.9.2/lib/native, -Dhbase.security.logger=INFO,RFAS] 2020-04-18 00:33:42,227 WARN [main] util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2020-04-18 00:33:45,999 INFO [main] regionserver.RSRpcServices: master/hadoop01/192.168.121.134:16000 server-side HConnection retries=350 2020-04-18 00:33:46,311 INFO [main] ipc.SimpleRpcScheduler: Using deadline as user call queue, count=3 2020-04-18 00:33:46,371 INFO [main] ipc.RpcServer: master/hadoop01/192.168.121.134:16000: started 10 reader(s) listening on port=16000 2020-04-18 00:33:46,473 INFO [main] impl.MetricsConfig: loaded properties from hadoop-metrics2-hbase.properties 2020-04-18 00:33:46,674 INFO [main] impl.MetricsSystemImpl: Scheduled snapshot period at 10 second(s). 2020-04-18 00:33:46,674 INFO [main] impl.MetricsSystemImpl: HBase metrics system started 2020-04-18 00:33:46,746 ERROR [main] master.HMasterCommandLine: Master exiting java.lang.RuntimeException: Failed construction of Master: class org.apache.hadoop.hbase.master.HMaster. at org.apache.hadoop.hbase.master.HMaster.constructMaster(HMaster.java:2401) at org.apache.hadoop.hbase.master.HMasterCommandLine.startMaster(HMasterCommandLine.java:232) at org.apache.hadoop.hbase.master.HMasterCommandLine.run(HMasterCommandLine.java:138) at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:70) at org.apache.hadoop.hbase.util.ServerCommandLine.doMain(ServerCommandLine.java:126) at org.apache.hadoop.hbase.master.HMaster.main(HMaster.java:2411) Caused by: java.lang.NoClassDefFoundError: com/yammer/metrics/stats/Sample at org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry.newTimeHistogram(DynamicMetricsRegistry.java:305) at org.apache.hadoop.hbase.ipc.MetricsHBaseServerSourceImpl.<init>(MetricsHBaseServerSourceImpl.java:99) at org.apache.hadoop.hbase.ipc.MetricsHBaseServerSourceFactoryImpl.getSource(MetricsHBaseServerSourceFactoryImpl.java:48) at org.apache.hadoop.hbase.ipc.MetricsHBaseServerSourceFactoryImpl.create(MetricsHBaseServerSourceFactoryImpl.java:38) at org.apache.hadoop.hbase.ipc.MetricsHBaseServer.<init>(MetricsHBaseServer.java:39) at org.apache.hadoop.hbase.ipc.RpcServer.<init>(RpcServer.java:2032) at org.apache.hadoop.hbase.regionserver.RSRpcServices.<init>(RSRpcServices.java:923) at org.apache.hadoop.hbase.master.MasterRpcServices.<init>(MasterRpcServices.java:230) at org.apache.hadoop.hbase.master.HMaster.createRpcServices(HMaster.java:517) at org.apache.hadoop.hbase.regionserver.HRegionServer.<init>(HRegionServer.java:535) at org.apache.hadoop.hbase.master.HMaster.<init>(HMaster.java:364) at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62) at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.lang.reflect.Constructor.newInstance(Constructor.java:423) at org.apache.hadoop.hbase.master.HMaster.constructMaster(HMaster.java:2394) ... 5 more Caused by: java.lang.ClassNotFoundException: com.yammer.metrics.stats.Sample at java.net.URLClassLoader.findClass(URLClassLoader.java:381) at java.lang.ClassLoader.loadClass(ClassLoader.java:424) at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:338) at java.lang.ClassLoader.loadClass(ClassLoader.java:357) ... 21 more

hbase建表失败报错:ERROR: The procedure 22330 is still running

在一次停电后hbase 发生了block丢失的异常,检查后发现是WELs里的两个block处于分裂状态,经过多次修复命令无果后,使用 hdfs fsck -delete 将缺损的删除了,在删除后表ppr数据无法查询 , 正常删表失败后 , 选择了暴力删除 , 将hdfs上的datadir删除,meta表里的记录删除,zookeepercli里/hbase/table/ppr删除,然后进行了集群的重启,重启后在hbase shell里进行了list,没有ppr表了,再次创建出现了如下的报错: ``` hbase(main):001:0> create "ppr" ,"f" ERROR: The procedure 22330 is still running For usage try 'help "create"' Took 669.0389 seconds ``` 经过排查是Regions in Transition中有一个该表的region: 273f6a209c5e2bcad57db5e0e18b4028 ppr OFFLINE 19064 WAITING_TIMEOUT 但是这个region我该怎么删掉,在hdfs都已经把表删了 大神们,求解决方式

hbase 的datanode老师挂掉,求解决

hbase启动之后datanode就挂掉,而且我的hbase启动之后无法新创建pid文件,因此无法关闭,启动完hbase之后 ,使用hbase shell之后用status 会报错![图片说明](https://img-ask.csdn.net/upload/201712/07/1512636854_641235.png)

Hbase Regionserver启动出错

replace_conf_dir + find /var/run/cloudera-scm-agent/process/469-hbase-REGIONSERVER -type f '!' -path '/var/run/cloudera-scm-agent/process/469-hbase-REGIONSERVER/logs/*' '!' -name '*.log' '!' -name '*.keytab' '!' -name '*jceks' -exec perl -pi -e 's#{{CMF_CONF_DIR}}#/var/run/cloudera-scm-agent/process/469-hbase-REGIONSERVER#g' '{}' ';' **Can't open /var/run/cloudera-scm-agent/process/469-hbase-REGIONSERVER/supervisor.conf: Permission denied. **+ acquire_kerberos_tgt hbase.keytab + '[' -z hbase.keytab ']' + '[' -n '' ']' + export 'HBASE_OPTS=-Djava.net.preferIPv4Stack=true ' + HBASE_OPTS='-Djava.net.preferIPv4Stack=true ' + locate_hbase_script + '[' 5 -ge 5 ']' + export BIGTOP_DEFAULTS_DIR= + BIGTOP_DEFAULTS_DIR= + HBASE_BIN=/opt/cloudera/parcels/CDH-5.7.2-1.cdh5.7.2.p0.18/lib/hbase/../../bin/hbase + '[' upgrade = regionserver ']' + '[' region_mover = regionserver ']' + '[' toggle_balancer = regionserver ']' + '[' shell = regionserver ']' + '[' hfileCheck = regionserver ']' + '[' remoteSnapshotTool = regionserver ']' + '[' '' '!=' '' ']' + '[' regionserver = regionserver -a -n '' ']' + '[' start = start -a 5 -gt 4 ']' + '[' regionserver = regionserver -o master = regionserver ']' + export HBASE_ZNODE_FILE=/var/run/cloudera-scm-agent/process/469-hbase-REGIONSERVER/znode59836 + HBASE_ZNODE_FILE=/var/run/cloudera-scm-agent/process/469-hbase-REGIONSERVER/znode59836 + exec /opt/cloudera/parcels/CDH-5.7.2-1.cdh5.7.2.p0.18/lib/hbase/../../bin/hbase --config /var/run/cloudera-scm-agent/process/469-hbase-REGIONSERVER regionserver start + znode_cleanup regionserver ++ date + echo 'Thu Aug 10 15:01:12 CST 2017 Starting znode cleanup thread with HBASE_ZNODE_FILE=/var/run/cloudera-scm-agent/process/469-hbase-REGIONSERVER/znode59836 for regionserver' ++ replace_pid -Djava.net.preferIPv4Stack=true ++ sed 's#{{PID}}#59836#g' ++ echo -Djava.net.preferIPv4Stack=true + HBASE_OPTS=-Djava.net.preferIPv4Stack=true + '[' '' '!=' '' ']' + LOG_FILE=/var/run/cloudera-scm-agent/process/469-hbase-REGIONSERVER/logs/znode_cleanup.log + set +x

使用Java对hbase进行连接测试,连接不上,超时,

我在Linux上安装了hbase的单机版,hbase版本是1.2.6, zookeeper使用的是hbase自己的,Linux防火墙开放了16010和2181端口, 使用hbase shell 可以正常操作hbase,浏览器也可以正常访问hbase, 但是我使用Java对hbase进行连接测试的时候就是一直连接不上,然后当我关闭 Linux防火墙的时候java又可以正常的对hbase进行连接访问了,???,这是怎么回事,是因为Linux防火墙还需要开放其他端口吗

shell报错command not found

用户是csh,我写的脚本解释器为#!/bin/bash 但是下面定义的一个参数a=b报错,脚本经过dos2unix应该不是^M导致的。有没有大神指导一下。

Hive中在整合HBase的表中插入数据时报错

伪分布式模式下整合Hadoop 2.2.0(自己基于Ubuntu 64位系统编译的)+HBase 0.98+Hive 0.14,其他功能操作都正常,但是在Hive中往基于HBase存储的表中插入数据时报错,网上找了很多方法,但都没用,具体错误如下: java.lang.IllegalArgumentException: Can not create a Path from an empty string at org.apache.hadoop.fs.Path.checkPathArg(Path.java:127) at org.apache.hadoop.fs.Path.<init>(Path.java:135) at org.apache.hadoop.mapreduce.JobSubmitter.copyAndConfigureFiles(JobSubmitter.java:213) at org.apache.hadoop.mapreduce.JobSubmitter.copyAndConfigureFiles(JobSubmitter.java:300) at org.apache.hadoop.mapreduce.JobSubmitter.submitJobInternal(JobSubmitter.java:387) at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1268) at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1265) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:415) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1491) at org.apache.hadoop.mapreduce.Job.submit(Job.java:1265) at org.apache.hadoop.mapred.JobClient$1.run(JobClient.java:562) at org.apache.hadoop.mapred.JobClient$1.run(JobClient.java:557) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:415) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1491) at org.apache.hadoop.mapred.JobClient.submitJobInternal(JobClient.java:557) at org.apache.hadoop.mapred.JobClient.submitJob(JobClient.java:548) at org.apache.hadoop.hive.ql.exec.mr.ExecDriver.execute(ExecDriver.java:429) at org.apache.hadoop.hive.ql.exec.mr.MapRedTask.execute(MapRedTask.java:137) at org.apache.hadoop.hive.ql.exec.Task.executeTask(Task.java:160) at org.apache.hadoop.hive.ql.exec.TaskRunner.runSequential(TaskRunner.java:85) at org.apache.hadoop.hive.ql.Driver.launchTask(Driver.java:1604) at org.apache.hadoop.hive.ql.Driver.execute(Driver.java:1364) at org.apache.hadoop.hive.ql.Driver.runInternal(Driver.java:1177) at org.apache.hadoop.hive.ql.Driver.run(Driver.java:1004) at org.apache.hadoop.hive.ql.Driver.run(Driver.java:994) at org.apache.hadoop.hive.cli.CliDriver.processLocalCmd(CliDriver.java:247) at org.apache.hadoop.hive.cli.CliDriver.processCmd(CliDriver.java:199) at org.apache.hadoop.hive.cli.CliDriver.processLine(CliDriver.java:410) at org.apache.hadoop.hive.cli.CliDriver.executeDriver(CliDriver.java:783) at org.apache.hadoop.hive.cli.CliDriver.run(CliDriver.java:677) at org.apache.hadoop.hive.cli.CliDriver.main(CliDriver.java:616) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at org.apache.hadoop.util.RunJar.main(RunJar.java:212) Job Submission failed with exception 'java.lang.IllegalArgumentException(Can not create a Path from an empty string)' FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.mr.MapRedTask 有没有人遇到过同样的问题?

自定义hbase协处理器失败

代码 --- public class Util { public static String getRegNo(String callerId , String callTime){ //区域00-99 int hash = (callerId + callTime.substring(0, 6)).hashCode(); hash =(hash & Integer.MAX_VALUE) % 100; //hash区域号 DecimalFormat df = new DecimalFormat(); df.applyPattern("00"); String regNo = df.format(hash); return regNo ; } public class CalleeLogRegionObserver extends BaseRegionObserver{ public void postPut(ObserverContext<RegionCoprocessorEnvironment> e, Put put, WALEdit edit, Durability durability) throws IOException { FileWriter fw = new FileWriter("/home/centos/kkk.txt",true); super.postPut(e, put, edit, durability); // String tableName0 = TableName.valueOf("ns1:calllogs").getNameAsString(); //得到当前的TableName对象 String tableName1 = e.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString(); fw.write(tableName1 + "\r\n"); if(!tableName0.equals(tableName1)){ return ; } //得到主叫的rowkey //xx , callerid , time , direction, calleid ,duration //被叫:calleid,time, String rowkey = Bytes.toString(put.getRow()); String[] arr = rowkey.split(","); if(arr[3].equals("1")){ return ; } String hash = Util.getRegNo(arr[4], arr[2]); //hash String newRowKey = hash + "," + arr[4] + "," + arr[2] + ",1," + arr[1] + "," + arr[5]; Put newPut = new Put(Bytes.toBytes(newRowKey)); newPut.addColumn(Bytes.toBytes("f1"), Bytes.toBytes("dummy"), Bytes.toBytes("no")); TableName tn = TableName.valueOf("ns1:calllogs"); Table t = e.getEnvironment().getTable(tn); // fw.write(t.getName().getNameAsString() + "\r\n"); t.put(newPut); fw.close(); } public void preGetOp(ObserverContext<RegionCoprocessorEnvironment> e, Get get, List<Cell> results) throws IOException { super.preGetOp(e, get, results); } -- 配置 <!-- 启用完全分布式 --> <property> <name>hbase.cluster.distributed</name> <value>true</value> </property> <!-- 指定hbase数据在hdfs上的存放路径 --> <property> <name>hbase.rootdir</name> <value>hdfs://mycluster/hbase</value> </property> <!-- 配置zk地址 --> <property> <name>hbase.zookeeper.quorum</name> <value>s101:2181,s102:2181,s103:2181</value> </property> <!-- zk的本地目录 --> <property> <name>hbase.zookeeper.property.dataDir</name> <value>/home/centos/zookeeper</value> </property> <!--协处理器 --> <property> <name>hbase.coprocessor.region.classes</name> <value>com.tjx.hbasedemo.coprocessor.CalleeLogRegionObserver</value> </property> ------------------------------------------------------------- 我再重启hbase 的时候没有在主目录下 生成 kkk.txt 文件,也没有报错,就是调用不了协处理器器 ------------------------------------------------------------插入代码 @Test public void testPut() throws Exception{ //创建conf对象 Configuration conf = HBaseConfiguration.create(); //通过连接工厂创建连接对象 Connection conn = ConnectionFactory.createConnection(conf); //通过连接查询tableName对象 TableName tname = TableName.valueOf("ns1:calllogs"); //获得table Table table = conn.getTable(tname); //主叫 String callerId = "13845456767"; //被叫 String calleeId = "13989898787"; SimpleDateFormat sdf = new SimpleDateFormat(); sdf.applyPattern("yyyyMMddHHmmss"); //通话时间 String callTime = sdf.format(new Date()); //通话时长 int duration = 100 ; DecimalFormat dff = new DecimalFormat(); dff.applyPattern("00000"); String durStr = dff.format(duration); // hash 区域00 -- 99 int hash = (calleeId + callTime.substring(0,6)).hashCode(); hash = hash & Integer.MAX_VALUE % 100; DecimalFormat df = new DecimalFormat(); df.applyPattern("00"); String regNo = df.format(hash); // 拼接rowkey String rowkey = regNo +","+callerId+","+callTime+","+"0"+","+calleeId+","+durStr ; byte[] rowid = Bytes.toBytes(rowkey); //创建put对象 Put put = new Put(rowid); put.addColumn(Bytes.toBytes("f1"), Bytes.toBytes("callerPos"),Bytes.toBytes("河北")); put.addColumn(Bytes.toBytes("f1"), Bytes.toBytes("calleePos"),Bytes.toBytes("河南")); //执行插入 table.put(put); System.out.println("over"); }

hbase的list_namespace的问题!!!求大神指导

hbase执行list-namespace是只显示NAMESPACE别的没有是为什么? hbase(main):005:0> list_namespace NAMESPACE ERROR: Call id=27, waitTime=60002, operationTimeout=60000 expired. Here is some help for this command: List all namespaces in hbase. Optional regular expression parameter could be used to filter the output. Examples: hbase> list_namespace hbase> list_namespace 'abc.*'

habase 报错 ERROR: Can't get master address from ZooKeeper; znode data == null

hadoop + zookeeper +hbase 环境 hadoop 和 zookeeper 集群环境都ok,hbase启动之后,查看hbase状态报错 ![图片说明](https://img-ask.csdn.net/upload/201903/07/1551957383_266120.jpg) 网上的各种重启hbase 重启服务 修改配置文件都试过了,解决不了,跪求会的大神指导一下。 /etc/profile配置 ``` export JAVA_HOME=/opt/java/jdk1.8.0_201 export HADOOP_HOME=/opt/hadoop/hadoop-2.8.0 export HADOOP_CONF_DIR=${HADOOP_HOME}/etc/hadoop export HADOOP_COMMON_LIB_NATIVE_DIR=${HADOOP_HOME}/lib/native export HADOOP_OPTS="-Djava.library.path=${HADOOP_HOME}/lib" #export HIVE_HOME=/opt/hive/apache-hive-2.1.1-bin #export HIVE_CONF_DIR=${HIVE_HOME}/conf #export SQOOP_HOME=/opt/sqoop/sqoop-1.4.6.bin__hadoop-2.0.4-alpha export HBASE_HOME=/opt/hbase/hbase-1.4.9 export ZK_HOME=/opt/zookeeper/zookeeper-3.4.13 export CLASS_PATH=.:${JAVA_HOME}/lib:${HIVE_HOME}/lib:$CLASS_PATH export PATH=.:${JAVA_HOME}/bin:${HADOOP_HOME}/bin:${HADOOP_HOME}/sbin:${SPARK_HOME}/bin:${ZOOKEEPER_HOME}/bin:${HIVE_HOME}/bin:${SQOOP_HOME}/bin:${HBASE_HOME}:${ZK_HOME}/bin:$PATH ``` hbase/conf/hbase.xml 配置 ``` <configuration> <property> <name>hbase.rootdir</name> <value>hdfs://hadoop1:9000/hbase</value> <description>The directory shared byregion servers.</description> </property> <property> <name>hbase.zookeeper.property.dataDir</name> <value>/opt/hbase/zk_data</value> </property> <property> <name>hbase.zookeeper.property.clientPort</name> <value>2181</value> <description>Property from ZooKeeper'sconfig zoo.cfg. The port at which the clients will connect. </description> </property> <property> <name>zookeeper.session.timeout</name> <value>120000</value> </property> <property> <name>hbase.zookeeper.property.tickTime</name> <value>6000</value> </property> <property> <name>hbase.zookeeper.quorum</name> <value>hadoop1,hadoop2,hadoop3</value> </property> <property> <name>hbase.tmp.dir</name> <value>/root/hbase/tmp</value> </property> <property> <name>hbase.cluster.distributed</name> <value>true</value> </property> </configuration> ``` hbase/conf/conf/hbase-env.sh文件 ``` export HBASE_OPTS="-XX:+UseConcMarkSweepGC" # Configure PermSize. Only needed in JDK7. You can safely remove it for JDK8+ #export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -XX:PermSize=128m -XX:MaxPermSize=128m -XX:ReservedCodeCacheSize=256m" #export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -XX:PermSize=128m -XX:MaxPermSize=128m -XX:ReservedCodeCacheSize=256m" export JAVA_HOME=/opt/java/jdk1.8.0_201 export HADOOP_HOME=/opt/hadoop/hadoop-2.8.0 export HBASE_HOME=/opt/hbase/hbase-1.4.9 export HBASE_CLASSPATH=/opt/hadoop/hadoop-2.8.0/etc/hadoop export HBASE_PID_DIR=/root/hbase/pids export HBASE_MANAGES_ZK=false ``` zookeeper/zoo.cfg 配置文件 ``` tickTime=2000 initLimit=10 syncLimit=5 clientPort=2181 dataDir=/opt/zookeeper/data dataLogDir=/opt/zookeeper/dataLog server.1=hadoop1:2886:3881 server.2=hadoop2:2887:3882 server.3=hadoop3:2888:3883 quorumListenOnAllIPs=true ``` /opt/hadoop/hadoop-2.8.0/etc/hadoop/core-site.xml 文件 ``` <configuration> <property>         <name>hadoop.tmp.dir</name>         <value>/root/hadoop/tmp</value>         <description>Abase for other temporary directories.</description>    </property>    <property>         <name>fs.default.name</name>         <value>hdfs://hadoop1:9000</value>    </property> </configuration> ```

java 远程连接hbase遇到问题,网上都是改本地host文件,有没有一种不需要改本地配置就可以连接到hbase然后可以用Admin的api方法,?

Configuration configuration = HBaseConfiguration.create(); configuration.set("hbase.rootdir", "hdfs://远程hbaseIP:9820/hbase"); configuration.set("hbase.zookeeper.quorum", "远程hbaseIP"); configuration.set("hbase.zookeeper.property.clientPort", "2181"); Connection conn = ConnectionFactory.createConnection(configuration); Admin admin = conn.getAdmin(); NamespaceDescriptor.Builder builder = NamespaceDescriptor.create("SpaceName"); 报错信息 WARN [org.apache.hadoop.util.Shell] - Did not find winutils.exe: java.io.FileNotFoundException: java.io.FileNotFoundException: HADOOP_HOME and hadoop.home.dir are unset. -see https://wiki.apache.org/hadoop/WindowsProblems WARN [org.apache.hadoop.util.NativeCodeLoader] - Unable to load native-hadoop library for your platform... using builtin-java classes where applicable Exception in thread "main" java.lang.NoSuchMethodError: org.apache.hadoop.security.HadoopKerberosName.setRuleMechanism(Ljava/lang/String;)V

通过hue 执行hivesql 报错

我用hue 执行hivesql 的时候系统报错, java.net.SocketTimeoutException:callTimeout=60000, callDuration=68043: row 'log,,00000000000000' on table 'hbase:meta' at region=hbase:meta,,1.1588230740, hostname=node4,16020,1476410081203, seqNum=0:5:1", 'org.apache.hadoop.hbase.client.RpcRetryingCaller:callWithRetries:RpcRetryingCaller.java:159', 'org.apache.hadoop.hbase.client.ResultBoundedCompletionService$QueueingFuture:run:ResultBoundedCompletionService.java:64', '*org.apache.hadoop.hbase.exceptions.ConnectionClosingException:Call to node4/192.168.127.1:16020 failed on local exception: org.apache.hadoop.hbase.exceptions.ConnectionClosingException: Connection to node4/192.168.127.1:16020 is closing. Call id=9, waitTime=1:16:11', 'org.apache.hadoop.hbase.ipc.RpcClientImpl:wrapException:RpcClientImpl.java:1239', 'org.apache.hadoop.hbase.ipc.RpcClientImpl:call:RpcClientImpl.java:1210', 'org.apache.hadoop.hbase.ipc.AbstractRpcClient:callBlockingMethod:AbstractRpcClient.java:213', 'org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation:callBlockingMethod:AbstractRpcClient.java:287', 'org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$BlockingStub:scan:ClientProtos.java:32651', 'org.apache.hadoop.hbase.client.ScannerCallable:openScanner:ScannerCallable.java:372', 'org.apache.hadoop.hbase.client.ScannerCallable:call:ScannerCallable.java:199', 'org.apache.hadoop.hbase.client.ScannerCallable:call:ScannerCallable.java:62', 'org.apache.hadoop.hbase.client.RpcRetryingCaller:callWithoutRetries:RpcRetryingCaller.java:200', 'org.apache.hadoop.hbase.client.ScannerCallableWithReplicas$RetryingRPC:call:ScannerCallableWithReplicas.java:369', 'org.apache.hadoop.hbase.client.ScannerCallableWithReplicas$RetryingRPC:call:ScannerCallableWithReplicas.java:343', 'org.apache.hadoop.hbase.client.RpcRetryingCaller:callWithRetries:RpcRetryingCaller.java:126', '*org.apache.hadoop.hbase.exceptions.ConnectionClosingException:Connection to node4/192.168.127.1:16020 is closing. Call id=9, waitTime=1:3:2', 'org.apache.hadoop.hbase.ipc.RpcClientImpl$Connection:cleanupCalls:RpcClientImpl.java:1037', 'org.apache.hadoop.hbase.ipc.RpcClientImpl$Connection:close:RpcClientImpl.java:844', 'org.apache.hadoop.hbase.ipc.RpcClientImpl$Connection:run:RpcClientImpl.java:572'], statusCode=3), results=None, hasMoreRows=None) 但是我用hive shell 执行 sql 一切都正常

程序员的兼职技能课

获取讲师答疑方式: 在付费视频第一节(触摸命令_ALL)片头有二维码及加群流程介绍 限时福利 原价99元,今日仅需39元!购课添加小助手(微信号:csdn590)按提示还可领取价值800元的编程大礼包! 讲师介绍: 苏奕嘉&nbsp;前阿里UC项目工程师 脚本开发平台官方认证满级(六级)开发者。 我将如何教会你通过【定制脚本】赚到你人生的第一桶金? 零基础程序定制脚本开发课程,是完全针对零脚本开发经验的小白而设计,课程内容共分为3大阶段: ①前期将带你掌握Q开发语言和界面交互开发能力; ②中期通过实战来制作有具体需求的定制脚本; ③后期将解锁脚本的更高阶玩法,打通任督二脉; ④应用定制脚本合法赚取额外收入的完整经验分享,带你通过程序定制脚本开发这项副业,赚取到你的第一桶金!

Windows版YOLOv4目标检测实战:训练自己的数据集

课程演示环境:Windows10; cuda 10.2; cudnn7.6.5; Python3.7; VisualStudio2019; OpenCV3.4 需要学习ubuntu系统上YOLOv4的同学请前往:《YOLOv4目标检测实战:训练自己的数据集》 课程链接:https://edu.csdn.net/course/detail/28745 YOLOv4来了!速度和精度双提升! 与 YOLOv3 相比,新版本的 AP (精度)和 FPS (每秒帧率)分别提高了 10% 和 12%。 YOLO系列是基于深度学习的端到端实时目标检测方法。本课程将手把手地教大家使用labelImg标注和使用YOLOv4训练自己的数据集。课程实战分为两个项目:单目标检测(足球目标检测)和多目标检测(足球和梅西同时检测)。 本课程的YOLOv4使用AlexyAB/darknet,在Windows系统上做项目演示。包括:安装软件环境、安装YOLOv4、标注自己的数据集、整理自己的数据集、修改配置文件、训练自己的数据集、测试训练出的网络模型、性能统计(mAP计算)和先验框聚类分析。还将介绍改善YOLOv4目标检测性能的技巧。 除本课程《Windows版YOLOv4目标检测实战:训练自己的数据集》外,本人将推出有关YOLOv4目标检测的系列课程。请持续关注该系列的其它视频课程,包括: 《Windows版YOLOv4目标检测实战:人脸口罩佩戴检测》 《Windows版YOLOv4目标检测实战:中国交通标志识别》 《Windows版YOLOv4目标检测:原理与源码解析》

lena全身原图(非256*256版本,而是全身原图)

lena全身原图(非256*256版本,而是全身原图) lena原图很有意思,我们通常所用的256*256图片是在lena原图上截取了头部部分的256*256正方形得到的. 原图是花花公子杂志上的一个

快速入门Android开发 视频 教程 android studio

这是一门快速入门Android开发课程,顾名思义是让大家能快速入门Android开发。 学完能让你学会如下知识点: Android的发展历程 搭建Java开发环境 搭建Android开发环境 Android Studio基础使用方法 Android Studio创建项目 项目运行到模拟器 项目运行到真实手机 Android中常用控件 排查开发中的错误 Android中请求网络 常用Android开发命令 快速入门Gradle构建系统 项目实战:看美图 常用Android Studio使用技巧 项目签名打包 如何上架市场

Java调用微信支付

Java 使用微信支付 一. 准备工作 1.

汽车租赁管理系统需求分析规格说明书

汽车租赁管理系统需求分析规格说明书,这只是一个模板,如果有不会的可以借鉴一下,还是蛮详细的。。。。

C/C++跨平台研发从基础到高阶实战系列套餐

一 专题从基础的C语言核心到c++ 和stl完成基础强化; 二 再到数据结构,设计模式完成专业计算机技能强化; 三 通过跨平台网络编程,linux编程,qt界面编程,mfc编程,windows编程,c++与lua联合编程来完成应用强化 四 最后通过基于ffmpeg的音视频播放器,直播推流,屏幕录像,

程序员的算法通关课:知己知彼(第一季)

【超实用课程内容】 程序员对于算法一直又爱又恨!特别是在求职面试时,算法类问题绝对是不可逃避的提问点!本门课程作为算法面试系列的第一季,会从“知己知彼”的角度,聊聊关于算法面试的那些事~ 【哪些人适合学习这门课程?】 求职中的开发者,对于面试算法阶段缺少经验 想了解实际工作中算法相关知识 在职程序员,算法基础薄弱,急需充电 【超人气讲师】 孙秀洋&nbsp;| 服务器端工程师 硕士毕业于哈工大计算机科学与技术专业,ACM亚洲区赛铜奖获得者,先后在腾讯和百度从事一线技术研发,对算法和后端技术有深刻见解。 【课程如何观看?】 PC端:https://edu.csdn.net/course/detail/27272 移动端:CSDN 学院APP(注意不是CSDN APP哦) 本课程为录播课,课程无限观看时长,但是大家可以抓紧时间学习后一起讨论哦~

机器学习初学者必会的案例精讲

通过六个实际的编码项目,带领同学入门人工智能。这些项目涉及机器学习(回归,分类,聚类),深度学习(神经网络),底层数学算法,Weka数据挖掘,利用Git开源项目实战等。

Python入门视频精讲

Python入门视频培训课程以通俗易懂的方式讲解Python核心技术,Python基础,Python入门。适合初学者的教程,让你少走弯路! 课程内容包括:1.Python简介和安装 、2.第一个Python程序、PyCharm的使用 、3.Python基础、4.函数、5.高级特性、6.面向对象、7.模块、8.异常处理和IO操作、9.访问数据库MySQL。教学全程采用笔记+代码案例的形式讲解,通俗易懂!!!

我以为我对Mysql事务很熟,直到我遇到了阿里面试官

太惨了,面试又被吊打

深度学习原理+项目实战+算法详解+主流框架(套餐)

深度学习系列课程从深度学习基础知识点开始讲解一步步进入神经网络的世界再到卷积和递归神经网络,详解各大经典网络架构。实战部分选择当下最火爆深度学习框架PyTorch与Tensorflow/Keras,全程实战演示框架核心使用与建模方法。项目实战部分选择计算机视觉与自然语言处理领域经典项目,从零开始详解算法原理,debug模式逐行代码解读。适合准备就业和转行的同学们加入学习! 建议按照下列课程顺序来进行学习 (1)掌握深度学习必备经典网络架构 (2)深度框架实战方法 (3)计算机视觉与自然语言处理项目实战。(按照课程排列顺序即可)

Java62数据提取代码

利用苹果手机微信下面的wx.data文件提取出62数据,通过62可以实现不同设备直接登陆,可以通过文件流的方式用脚本上传到服务器进行解析

Python代码实现飞机大战

文章目录经典飞机大战一.游戏设定二.我方飞机三.敌方飞机四.发射子弹五.发放补给包六.主模块 经典飞机大战 源代码以及素材资料(图片,音频)可从下面的github中下载: 飞机大战源代码以及素材资料github项目地址链接 ————————————————————————————————————————————————————————— 不知道大家有没有打过飞机,喜不喜欢打飞机。当我第一次接触这个东西的时候,我的内心是被震撼到的。第一次接触打飞机的时候作者本人是身心愉悦的,因为周边的朋友都在打飞机, 每

2018年全国大学生计算机技能应用大赛决赛 大题

2018年全国大学生计算机技能应用大赛决赛大题,程序填空和程序设计(侵删)

Lena图像处理测试专业用图,高清完整全身原图

Lena图像处理测试专业用图,高清完整全身原图,该图片很好的包含了平坦区域、阴影和纹理等细节,这些都有益于测试各种不同的图像处理算法。它是一幅很好的测试照片!其次,由于这是一个非常有魅力女人的照片。

MySQL数据库面试题(2020最新版)

文章目录数据库基础知识为什么要使用数据库什么是SQL?什么是MySQL?数据库三大范式是什么mysql有关权限的表都有哪几个MySQL的binlog有有几种录入格式?分别有什么区别?数据类型mysql有哪些数据类型引擎MySQL存储引擎MyISAM与InnoDB区别MyISAM索引与InnoDB索引的区别?InnoDB引擎的4大特性存储引擎选择索引什么是索引?索引有哪些优缺点?索引使用场景(重点)...

verilog实现地铁系统售票

使用 verilog 实现地铁售票

Python+OpenCV计算机视觉

Python+OpenCV计算机视觉系统全面的介绍。

Python可以这样学(第四季:数据分析与科学计算可视化)

董付国老师系列教材《Python程序设计(第2版)》(ISBN:9787302436515)、《Python可以这样学》(ISBN:9787302456469)配套视频,在教材基础上又增加了大量内容,通过实例讲解numpy、scipy、pandas、statistics、matplotlib等标准库和扩展库用法。

150讲轻松搞定Python网络爬虫

【为什么学爬虫?】 &nbsp; &nbsp; &nbsp; &nbsp;1、爬虫入手容易,但是深入较难,如何写出高效率的爬虫,如何写出灵活性高可扩展的爬虫都是一项技术活。另外在爬虫过程中,经常容易遇到被反爬虫,比如字体反爬、IP识别、验证码等,如何层层攻克难点拿到想要的数据,这门课程,你都能学到! &nbsp; &nbsp; &nbsp; &nbsp;2、如果是作为一个其他行业的开发者,比如app开发,web开发,学习爬虫能让你加强对技术的认知,能够开发出更加安全的软件和网站 【课程设计】 一个完整的爬虫程序,无论大小,总体来说可以分成三个步骤,分别是: 网络请求:模拟浏览器的行为从网上抓取数据。 数据解析:将请求下来的数据进行过滤,提取我们想要的数据。 数据存储:将提取到的数据存储到硬盘或者内存中。比如用mysql数据库或者redis等。 那么本课程也是按照这几个步骤循序渐进的进行讲解,带领学生完整的掌握每个步骤的技术。另外,因为爬虫的多样性,在爬取的过程中可能会发生被反爬、效率低下等。因此我们又增加了两个章节用来提高爬虫程序的灵活性,分别是: 爬虫进阶:包括IP代理,多线程爬虫,图形验证码识别、JS加密解密、动态网页爬虫、字体反爬识别等。 Scrapy和分布式爬虫:Scrapy框架、Scrapy-redis组件、分布式爬虫等。 通过爬虫进阶的知识点我们能应付大量的反爬网站,而Scrapy框架作为一个专业的爬虫框架,使用他可以快速提高我们编写爬虫程序的效率和速度。另外如果一台机器不能满足你的需求,我们可以用分布式爬虫让多台机器帮助你快速爬取数据。 &nbsp; 从基础爬虫到商业化应用爬虫,本套课程满足您的所有需求! 【课程服务】 专属付费社群+每周三讨论会+1v1答疑

获取Linux下Ftp目录树并逐步绑定到treeview

在linux下抓取目录树,双击后获取该节点子节点(逐步生成)。另外有两个类,一个是windows下的(一次性获取目录树),一个是linux下的(足部获取目录树)

YOLOv3目标检测实战系列课程

《YOLOv3目标检测实战系列课程》旨在帮助大家掌握YOLOv3目标检测的训练、原理、源码与网络模型改进方法。 本课程的YOLOv3使用原作darknet(c语言编写),在Ubuntu系统上做项目演示。 本系列课程包括三门课: (1)《YOLOv3目标检测实战:训练自己的数据集》 包括:安装darknet、给自己的数据集打标签、整理自己的数据集、修改配置文件、训练自己的数据集、测试训练出的网络模型、性能统计(mAP计算和画出PR曲线)和先验框聚类。 (2)《YOLOv3目标检测:原理与源码解析》讲解YOLOv1、YOLOv2、YOLOv3的原理、程序流程并解析各层的源码。 (3)《YOLOv3目标检测:网络模型改进方法》讲解YOLOv3的改进方法,包括改进1:不显示指定类别目标的方法 (增加功能) ;改进2:合并BN层到卷积层 (加快推理速度) ; 改进3:使用GIoU指标和损失函数 (提高检测精度) ;改进4:tiny YOLOv3 (简化网络模型)并介绍 AlexeyAB/darknet项目。

手把手实现Java图书管理系统(附源码)

【超实用课程内容】 本课程演示的是一套基于Java的SSM框架实现的图书管理系统,主要针对计算机相关专业的正在做毕设的学生与需要项目实战练习的java人群。详细介绍了图书管理系统的实现,包括:环境搭建、系统业务、技术实现、项目运行、功能演示、系统扩展等,以通俗易懂的方式,手把手的带你从零开始运行本套图书管理系统,该项目附带全部源码可作为毕设使用。 【课程如何观看?】 PC端:https://edu.csdn.net/course/detail/27513 移动端:CSDN 学院APP(注意不是CSDN APP哦) 本课程为录播课,课程2年有效观看时长,大家可以抓紧时间学习后一起讨论哦~ 【学员专享增值服务】 源码开放 课件、课程案例代码完全开放给你,你可以根据所学知识,自行修改、优化

微信小程序开发实战之番茄时钟开发

微信小程序番茄时钟视频教程,本课程将带着各位学员开发一个小程序初级实战类项目,针对只看过官方文档而又无从下手的开发者来说,可以作为一个较好的练手项目,对于有小程序开发经验的开发者而言,可以更好加深对小程序各类组件和API 的理解,为更深层次高难度的项目做铺垫。

Java 最常见的 200+ 面试题:面试必备

这份面试清单是从我 2015 年做了 TeamLeader 之后开始收集的,一方面是给公司招聘用,另一方面是想用它来挖掘在 Java 技术栈中,还有那些知识点是我不知道的,我想找到这些技术盲点,然后修复它,以此来提高自己的技术水平。虽然我是从 2009 年就开始参加编程工作了,但我依旧觉得自己现在要学的东西很多,并且学习这些知识,让我很有成就感和满足感,那所以何乐而不为呢? 说回面试的事,这份面试...

Java基础知识面试题(2020最新版)

文章目录Java概述何为编程什么是Javajdk1.5之后的三大版本JVM、JRE和JDK的关系什么是跨平台性?原理是什么Java语言有哪些特点什么是字节码?采用字节码的最大好处是什么什么是Java程序的主类?应用程序和小程序的主类有何不同?Java应用程序与小程序之间有那些差别?Java和C++的区别Oracle JDK 和 OpenJDK 的对比基础语法数据类型Java有哪些数据类型switc...

三个项目玩转深度学习(附1G源码)

从事大数据与人工智能开发与实践约十年,钱老师亲自见证了大数据行业的发展与人工智能的从冷到热。事实证明,计算机技术的发展,算力突破,海量数据,机器人技术等,开启了第四次工业革命的序章。深度学习图像分类一直是人工智能的经典任务,是智慧零售、安防、无人驾驶等机器视觉应用领域的核心技术之一,掌握图像分类技术是机器视觉学习的重中之重。针对现有线上学习的特点与实际需求,我们开发了人工智能案例实战系列课程。打造:以项目案例实践为驱动的课程学习方式,覆盖了智能零售,智慧交通等常见领域,通过基础学习、项目案例实践、社群答疑,三维立体的方式,打造最好的学习效果。

微信小程序 实例汇总 完整项目源代码

微信小程序 实例汇总 完整项目源代码

基于西门子S7—1200的单部六层电梯设计程序,1部6层电梯

基于西门子S7—1200的单部六层电梯设计程序,1部6层电梯。 本系统控制六层电梯, 采用集选控制方式。 为了完成设定的控制任务, 主要根据电梯输入/输出点数确定PLC 的机型。 根据电梯控制的要求,

相关热词 c#分级显示数据 c# 不区分大小写替换 c#中调用就java c#正则表达式 验证小数 c# vscode 配置 c#三维数组能存多少数据 c# 新建excel c#多个文本框 c#怎么创建tcp通讯 c# mvc 电子病例
立即提问