hadoop 设置输出文件格式,job.setOutputFormatClass(SequenceFileOutputFormat.class); 报错

hadoop 2.9.2 版本
设置输出输出文件格式时,可以
job.setOutputFormatClass(TextOutputFormat.class); // 默认的输出组件
但是,job.setOutputFormatClass(SequenceFileOutputFormat.class); 这样时,会报错。看其他人的博客,可以这样设置。

错误提示:

The method setOutputFormatClass(Class<? extends OutputFormat>) in the type Job is not applicable for the arguments (Class)

package cn.edu360.mr.indexSequance;

import java.io.File;
import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;


/**
 * 构建倒排索引,产生结果; hello a.txt->3 b.txt->4
 * 
 * 分两步,
 * 一  
 *    map: 产生K: 单词+文档   V:次数(1)
 *    reduce: 产生  K:单词+文档  V:总次数
 *    
 *二
 *    map: 产生 K:单词    V:文档  在文档中出现的次数
 *    reduce : 输出            单词              文档, 文档中出现的次数  
 *    
 *    
 * @author Administrator
 *
 */
public class IndexStep1 {


    /**
     * 分割单词,
     * 输出  单词+文档      次数 1
     * @author Administrator
     *
     */
    public static class IndexMapper1 extends Mapper<LongWritable, Text, Text, IntWritable> {

        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

            // 获取maptask所处理的数据任务上下文信息:             文件路径, 偏移量范围
            //  若访问数据为数据库,则为:    库名, 表名, 行范围
            InputSplit inputSplit = context.getInputSplit();  // InputSplit为抽象类

            FileSplit fileSplit = (FileSplit) inputSplit;  // 强转inputSplit为FileSplit类型,   FileSplit为InputSplit的实现类,针对文件

            String filename = fileSplit.getPath().getName(); // 获取文件名

            String line = value.toString();
            String[] words = line.split(" ");
            for (String word : words) {

                word = format(word);
                if (word.length() >2) {
                    context.write(new Text(word + "-" + filename), new IntWritable(1));
                }

            }
        }

    }


    /**
     * 统计文档+单词
     * 
     * 输出: K:文档+单词   V: 单词在该文档中出现的总次数
     * @author Administrator
     *
     */
    public static class IndexReduce1 extends Reducer<Text, IntWritable, Text, IntWritable> {

        @Override
        protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {

            int count = 0;  
            for (IntWritable value : values) {
                count = count + value.get();
            }   
            context.write(key, new IntWritable(count));
        }

    }





    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {

        Configuration configuration = new Configuration();

        JobConf jobConf = new JobConf(configuration);

        Job job = Job.getInstance(configuration);

        job.setMapperClass(IndexMapper1.class);
        job.setReducerClass(IndexReduce1.class);

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);


        File file = new File("d:\\testOut");
        if (file.exists()) {
            deleteDir(file);
        }


        job.setOutputFormatClass(TextOutputFormat.class);  // 默认的输出组件

        job.setOutputFormatClass(SequenceFileOutputFormat.class);



        FileInputFormat.setInputPaths(job, new Path("d:\\test1"));
        FileOutputFormat.setOutputPath(job, new Path("d:\\testOut"));

        job.waitForCompletion(true);




    }


    // 使用正则表达式去除标点符号,大写转小写
    public static String format(String s) {
        String str = s.replaceAll("\\pP|\\pS", "");
        return str.toLowerCase(); // 大写转小写
    }


    /**
     * 递归删除文件夹及文件下的所有文件
     * @param dir
     * @return
     */
    private static boolean deleteDir(File dir) {
        if (dir.isDirectory()) {
            String[] children = dir.list();
              //  递归删除目录中的子目录下
            for (int i=0; i<children.length; i++) {
                boolean success = deleteDir(new File(dir, children[i]));
                if (!success) {
                    return false;
                }
            }
        }
        // 目录此时为空,可以删除
        return dir.delete();
    }

}

Csdn user default icon
上传中...
上传图片
插入图片
抄袭、复制答案,以达到刷声望分或其他目的的行为,在CSDN问答是严格禁止的,一经发现立刻封号。是时候展现真正的技术了!
其他相关推荐
运行mapredurce出现Method threw 'java.lang.IllegalStateException' exception. Cannot evaluate org.apache.hadoop.mapreduce.Job.toString()
执行下述代码后在,创建job后会有上述异常,但是可以执行到最后,但是job没有提交上去执行,在历史里也看不到有执行记录求帮助新手o(╥﹏╥)o。 package MapReducer; import com.sun.org.apache.bcel.internal.generic.RETURN; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.input.TextInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; import java.io.File; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import java.util.StringTokenizer; /** * @Describe MapReducer第一个读取文档并计数 * @Author zhanglei * @Date 2019/11/18 22:53 **/ public class WordCountApp extends Configured implements Tool { public int run(String[] strings) throws Exception { String input_path="hdfs://192.168.91.130:8020/data/wc.txt"; String output_path="hdfs://192.168.91.130:8020/data/outputwc"; Configuration configuration = getConf(); final FileSystem fileSystem = FileSystem.get(new URI(input_path),configuration); if(fileSystem.exists(new Path(output_path))){ fileSystem.delete(new Path(output_path),true); } Job job = Job.getInstance(configuration,"WordCountApp"); job.setJarByClass(WordCountApp.class); job.setMapperClass(WordCountMapper.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(IntWritable.class); job.setReducerClass(WordCountReducer.class); job.setInputFormatClass(TextInputFormat.class); Path inpath = new Path(input_path); FileInputFormat.addInputPath(job,inpath); job.setOutputFormatClass(TextOutputFormat.class); Path outpath = new Path(output_path); FileOutputFormat.setOutputPath(job,outpath); return job.waitForCompletion(true) ? 0:1; } //继承 public static class WordCountMapper extends Mapper<Object,Text,Text,IntWritable>{ private final static IntWritable one= new IntWritable(1); private Text word = new Text(); public void map(Object key,Text value,Context context) throws IOException, InterruptedException { Text t = value; StringTokenizer itr = new StringTokenizer(value.toString()); while(itr.hasMoreTokens()){ word.set(itr.nextToken()); context.write(word,one); } } } public static class WordCountReducer extends Reducer<Object,Text,Text,IntWritable>{ private final static IntWritable res= new IntWritable(1); public void reduce(Text key,Iterable<IntWritable> values,Context context) throws IOException, InterruptedException { int sum = 0; for(IntWritable val:values){ sum+=val.get(); } res.set(sum); context.write(key,res); } } public static void main(String[] args) throws Exception { int exitCode = ToolRunner.run(new WordCountApp(), args); System.exit(exitCode); } }
hadoop 报java.lang.InstantiationException
package mapreduce; import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.input.TextInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.mapreduce.lib.partition.HashPartitioner; public class WordCountApp { static final String INPUT_PATH = "hdfs://chaoren:9000/hello"; static final String OUTPUT_PATH = "hdfs://chaoren:9000/hello_statics"; public static void main(String[] args) throws Exception { Configuration conf = new Configuration(); Job job = new Job(conf, WordCountApp.class.getSimpleName()); //1。1输入的目录在哪里 FileInputFormat.setInputPaths(job, INPUT_PATH); //指定对输入的数据进行格式化处理 job.setInputFormatClass(TextInputFormat.class); //1.2指定自定义的mapper类 job.setMapperClass(MyMapper.class); //指定map输出的<key,value>类型 job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(LongWritable.class); //1.3 分区 job.setPartitionerClass(HashPartitioner.class); job.setNumReduceTasks(1); //1.4 TODO 排序分组 //1.5 TODO <可选>规约 //2.2指定自定义的Reducer类 job.setReducerClass(MyReducer.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(LongWritable.class); //2.2指定输出路径 FileOutputFormat.setOutputPath(job, new Path(OUTPUT_PATH)); //2.3指定输出的格式化类 job.setOutputFormatClass(FileOutputFormat.class); //把作业提交jobTracker运行 job.waitForCompletion(true); } /** * KEYIN 即k1 表示每一行的起始位置<偏移量> * VALUEIN 即v1 表示每一行的文本内容 * KEYOUT 即k1 表示每一行中的单词 * VALUEOUT 即v1 表示每一行中,每个单词出现的次数 * @author Administrator * */ static class MyMapper extends Mapper<LongWritable, Text, Text, LongWritable> { @Override protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { String[] splited = value.toString().split("\t"); for(String word : splited) { context.write(new Text(word), new LongWritable(1)); } } } /** * KEYIN 即k2 表示每一行中每个单词 * VALUEIN 即v2 表示每一行中每个单词出现的次数 * KEYOUT 即k3 表示整个文件中的不同单词 * VALUEOUT 即v3 表示整个文件中的不同单词的出现总数 * @author Administrator * */ static class MyReducer extends Reducer<Text,LongWritable, Text, LongWritable>{ protected void reduce(Text k2, Iterable<LongWritable> v2s, Context context) throws IOException, InterruptedException { long sum = 0l; for(LongWritable v2 : v2s){ sum+=v2.get(); } context.write(k2, new LongWritable(sum)); } } }
Hadoop+Hbase报错java.net.UnknownHostException:
问题:java.net.UnknownHostException: Invalid host name: local host is: (unknown); destination host is: "master":9000; java.net.UnknownHostException; log错误日志 2017-07-13 21:26:45,915 FATAL [master:16000.activeMasterManager] master.HMaster: Failed to become active master java.net.UnknownHostException: Invalid host name: local host is: (unknown); destination host is: "master":9000; java.net.UnknownHostException; For more details see: http://wiki.apache.org/hadoop/UnknownHost at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57) at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.lang.reflect.Constructor.newInstance(Constructor.java:526) at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:792) at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:744) at org.apache.hadoop.ipc.Client$Connection.<init>(Client.java:409) at org.apache.hadoop.ipc.Client.getConnection(Client.java:1518) at org.apache.hadoop.ipc.Client.call(Client.java:1451) at org.apache.hadoop.ipc.Client.call(Client.java:1412) at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:229) at com.sun.proxy.$Proxy18.setSafeMode(Unknown Source) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.setSafeMode(ClientNamenodeProtocolTranslatorPB.java:666) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:191) at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102) at com.sun.proxy.$Proxy19.setSafeMode(Unknown Source) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:279) at com.sun.proxy.$Proxy20.setSafeMode(Unknown Source) at org.apache.hadoop.hdfs.DFSClient.setSafeMode(DFSClient.java:2596) at org.apache.hadoop.hdfs.DistributedFileSystem.setSafeMode(DistributedFileSystem.java:1223) at org.apache.hadoop.hdfs.DistributedFileSystem.setSafeMode(DistributedFileSystem.java:1207) at org.apache.hadoop.hbase.util.FSUtils.isInSafeMode(FSUtils.java:525) at org.apache.hadoop.hbase.util.FSUtils.waitOnSafeMode(FSUtils.java:971) at org.apache.hadoop.hbase.master.MasterFileSystem.checkRootDir(MasterFileSystem.java:429) at org.apache.hadoop.hbase.master.MasterFileSystem.createInitialFileSystemLayout(MasterFileSystem.java:153) at org.apache.hadoop.hbase.master.MasterFileSystem.<init>(MasterFileSystem.java:128) at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:693) at org.apache.hadoop.hbase.master.HMaster.access$600(HMaster.java:189) at org.apache.hadoop.hbase.master.HMaster$2.run(HMaster.java:1803) at java.lang.Thread.run(Thread.java:745) Caused by: java.net.UnknownHostException ... 32 more 2017-07-13 21:26:45,924 FATAL [master:16000.activeMasterManager] master.HMaster: Unhandled exception. Starting shutdown. java.net.UnknownHostException: Invalid host name: local host is: (unknown); destination host is: "master":9000; java.net.UnknownHostException; For more details see: http://wiki.apache.org/hadoop/UnknownHost at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57) at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.lang.reflect.Constructor.newInstance(Constructor.java:526) at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:792) at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:744) at org.apache.hadoop.ipc.Client$Connection.<init>(Client.java:409) at org.apache.hadoop.ipc.Client.getConnection(Client.java:1518) at org.apache.hadoop.ipc.Client.call(Client.java:1451) at org.apache.hadoop.ipc.Client.call(Client.java:1412) at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:229) at com.sun.proxy.$Proxy18.setSafeMode(Unknown Source) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.setSafeMode(ClientNamenodeProtocolTranslatorPB.java:666) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:191) at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102) at com.sun.proxy.$Proxy19.setSafeMode(Unknown Source) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:279) at com.sun.proxy.$Proxy20.setSafeMode(Unknown Source) at org.apache.hadoop.hdfs.DFSClient.setSafeMode(DFSClient.java:2596) at org.apache.hadoop.hdfs.DistributedFileSystem.setSafeMode(DistributedFileSystem.java:1223) at org.apache.hadoop.hdfs.DistributedFileSystem.setSafeMode(DistributedFileSystem.java:1207) at org.apache.hadoop.hbase.util.FSUtils.isInSafeMode(FSUtils.java:525) at org.apache.hadoop.hbase.util.FSUtils.waitOnSafeMode(FSUtils.java:971) at org.apache.hadoop.hbase.master.MasterFileSystem.checkRootDir(MasterFileSystem.java:429) at org.apache.hadoop.hbase.master.MasterFileSystem.createInitialFileSystemLayout(MasterFileSystem.java:153) at org.apache.hadoop.hbase.master.MasterFileSystem.<init>(MasterFileSystem.java:128) at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:693) at org.apache.hadoop.hbase.master.HMaster.access$600(HMaster.java:189) at org.apache.hadoop.hbase.master.HMaster$2.run(HMaster.java:1803) at java.lang.Thread.run(Thread.java:745) Caused by: java.net.UnknownHostException ... 32 more 2017-07-13 21:26:45,925 INFO [master:16000.activeMasterManager] regionserver.HRegionServer: STOPPED: Unhandled exception. Starting shutdown. 问题补充 1、防火墙均已关闭、root最高权限 2、hadoop启动正常jps查看已启动,通过浏览器访问50070,8088无任何问题 3、Zookeeper启动正常jps查看已启动 4、已删除hbase/lib下所有关于hadoop的jar并将 hadoop/share所有关于Hadoop的jar拷贝到hbase/lib下,并添加aws-java-sdk-core-1.11.158.jar和aws-java-sdk-s3-1.11.155.jar 版本说明 1、hadoop 2.7.2 2、hbase 1.2.6 3、zookeeper 3.4.2
hadoop单词统计报错Job job_1581768459583_0001 failed
3个节点hadoop01、hadoop02、hadoop03 hadoop01是主节点 hadoop01、hadoop02、hadoop03是从节点,目前集群已搭建好,jps查看三个节点运行都很正常,而且UI也能正常显示,但是使用hadoop自带的hadoop-mapreduce-examples-2.7.4.jar的wordcount进行单词统计时报错如下,请高人指点,看不懂呀: ```[root@hadoop01 mapreduce]# hadoop jar hadoop-mapreduce-examples-2.7.4.jar wordcount /wordcount/input /wordcount/output 20/02/15 20:14:25 INFO client.RMProxy: Connecting to ResourceManager at hadoop01/192.168.233.132:8032 20/02/15 20:14:27 INFO input.FileInputFormat: Total input paths to process : 1 20/02/15 20:14:27 INFO mapreduce.JobSubmitter: number of splits:1 20/02/15 20:14:28 INFO mapreduce.JobSubmitter: Submitting tokens for job: job_1581768459583_0001 20/02/15 20:14:28 INFO impl.YarnClientImpl: Submitted application application_1581768459583_0001 20/02/15 20:14:28 INFO mapreduce.Job: The url to track the job: http://hadoop01:8088/proxy/application_1581768459583_0001/ 20/02/15 20:14:28 INFO mapreduce.Job: Running job: job_1581768459583_0001 20/02/15 20:15:38 INFO mapreduce.Job: Job job_1581768459583_0001 running in uber mode : false 20/02/15 20:15:38 INFO mapreduce.Job: map 0% reduce 0% 20/02/15 20:15:38 INFO mapreduce.Job: Job job_1581768459583_0001 failed with state FAILED due to: Application application_1581768459583_0001 failed 2 times due to Error launching appattempt_1581768459583_0001_000002. Got exception: java.io.IOException: Failed on local exception: java.io.IOException: java.io.IOException: Connection reset by peer; Host Details : local host is: "hadoop01.com/79.124.78.101"; destination host is: "79.124.78.101":43276; at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:776) at org.apache.hadoop.ipc.Client.call(Client.java:1480) at org.apache.hadoop.ipc.Client.call(Client.java:1413) at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:229) at com.sun.proxy.$Proxy83.startContainers(Unknown Source) at org.apache.hadoop.yarn.api.impl.pb.client.ContainerManagementProtocolPBClientImpl.startContainers(ContainerManagementProtocolPBClientImpl.java:96) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:191) at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102) at com.sun.proxy.$Proxy84.startContainers(Unknown Source) at org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncher.launch(AMLauncher.java:119) at org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncher.run(AMLauncher.java:250) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: java.io.IOException: java.io.IOException: Connection reset by peer at org.apache.hadoop.ipc.Client$Connection$1.run(Client.java:688) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1746) at org.apache.hadoop.ipc.Client$Connection.handleSaslConnectionFailure(Client.java:651) at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:738) at org.apache.hadoop.ipc.Client$Connection.access$2900(Client.java:376) at org.apache.hadoop.ipc.Client.getConnection(Client.java:1529) at org.apache.hadoop.ipc.Client.call(Client.java:1452) ... 16 more Caused by: java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.read0(Native Method) at sun.nio.ch.SocketDispatcher.read(SocketDispatcher.java:39) at sun.nio.ch.IOUtil.readIntoNativeBuffer(IOUtil.java:223) at sun.nio.ch.IOUtil.read(IOUtil.java:197) at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:380) at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142) at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) at java.io.FilterInputStream.read(FilterInputStream.java:133) at java.io.BufferedInputStream.fill(BufferedInputStream.java:246) at java.io.BufferedInputStream.read(BufferedInputStream.java:265) at java.io.DataInputStream.readInt(DataInputStream.java:387) at org.apache.hadoop.security.SaslRpcClient.saslConnect(SaslRpcClient.java:367) at org.apache.hadoop.ipc.Client$Connection.setupSaslConnection(Client.java:561) at org.apache.hadoop.ipc.Client$Connection.access$1900(Client.java:376) at org.apache.hadoop.ipc.Client$Connection$2.run(Client.java:730) at org.apache.hadoop.ipc.Client$Connection$2.run(Client.java:726) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1746) at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:726) ... 19 more . Failing the application. 20/02/15 20:15:38 INFO mapreduce.Job: Counters: 0 ```
ubuntu下hadoop-2.6.0测试用例运行失败
Results : Failed tests: TestTableMapping.testClearingCachedMappings:144 expected:</[rack1]> but was:</[default-rack]> TestTableMapping.testTableCaching:79 expected:</[rack1]> but was:</[default-rack]> TestTableMapping.testResolve:56 expected:</[rack1]> but was:</[default-rack]> TestDecayRpcScheduler.testAccumulate:136 expected:<3> but was:<2> TestDecayRpcScheduler.testPriority:203 expected:<2> but was:<1> Tests run: 2723, Failures: 5, Errors: 0, Skipped: 91 [INFO] ------------------------------------------------------------------------ [INFO] Reactor Summary: [INFO] [INFO] Apache Hadoop Main ................................ SUCCESS [4.300s] [INFO] Apache Hadoop Project POM ......................... SUCCESS [2.250s] [INFO] Apache Hadoop Annotations ......................... SUCCESS [7.805s] [INFO] Apache Hadoop Assemblies .......................... SUCCESS [1.006s] [INFO] Apache Hadoop Project Dist POM .................... SUCCESS [8.227s] [INFO] Apache Hadoop Maven Plugins ....................... SUCCESS [9.390s] [INFO] Apache Hadoop MiniKDC ............................. SUCCESS [22.836s] [INFO] Apache Hadoop Auth ................................ SUCCESS [40.704s] [INFO] Apache Hadoop Auth Examples ....................... SUCCESS [4.181s] [INFO] Apache Hadoop Common .............................. FAILURE [27:26.889s] [INFO] Apache Hadoop NFS ................................. SKIPPED [INFO] Apache Hadoop KMS ................................. SKIPPED [INFO] Apache Hadoop Common Project ...................... SKIPPED [INFO] Apache Hadoop HDFS ................................ SKIPPED [INFO] Apache Hadoop HttpFS .............................. SKIPPED [INFO] Apache Hadoop HDFS BookKeeper Journal ............. SKIPPED [INFO] Apache Hadoop HDFS-NFS ............................ SKIPPED [INFO] Apache Hadoop HDFS Project ........................ SKIPPED [INFO] hadoop-yarn ....................................... SKIPPED [INFO] hadoop-yarn-api ................................... SKIPPED [INFO] hadoop-yarn-common ................................ SKIPPED [INFO] hadoop-yarn-server ................................ SKIPPED [INFO] hadoop-yarn-server-common ......................... SKIPPED [INFO] hadoop-yarn-server-nodemanager .................... SKIPPED [INFO] hadoop-yarn-server-web-proxy ...................... SKIPPED [INFO] hadoop-yarn-server-applicationhistoryservice ...... SKIPPED [INFO] hadoop-yarn-server-resourcemanager ................ SKIPPED [INFO] hadoop-yarn-server-tests .......................... SKIPPED [INFO] hadoop-yarn-client ................................ SKIPPED [INFO] hadoop-yarn-applications .......................... SKIPPED [INFO] hadoop-yarn-applications-distributedshell ......... SKIPPED [INFO] hadoop-yarn-applications-unmanaged-am-launcher .... SKIPPED [INFO] hadoop-yarn-site .................................. SKIPPED [INFO] hadoop-yarn-registry .............................. SKIPPED [INFO] hadoop-yarn-project ............................... SKIPPED [INFO] hadoop-mapreduce-client ........................... SKIPPED [INFO] hadoop-mapreduce-client-core ...................... SKIPPED [INFO] hadoop-mapreduce-client-common .................... SKIPPED [INFO] hadoop-mapreduce-client-shuffle ................... SKIPPED [INFO] hadoop-mapreduce-client-app ....................... SKIPPED [INFO] hadoop-mapreduce-client-hs ........................ SKIPPED [INFO] hadoop-mapreduce-client-jobclient ................. SKIPPED [INFO] hadoop-mapreduce-client-hs-plugins ................ SKIPPED [INFO] Apache Hadoop MapReduce Examples .................. SKIPPED [INFO] hadoop-mapreduce .................................. SKIPPED [INFO] Apache Hadoop MapReduce Streaming ................. SKIPPED [INFO] Apache Hadoop Distributed Copy .................... SKIPPED [INFO] Apache Hadoop Archives ............................ SKIPPED [INFO] Apache Hadoop Rumen ............................... SKIPPED [INFO] Apache Hadoop Gridmix ............................. SKIPPED [INFO] Apache Hadoop Data Join ........................... SKIPPED [INFO] Apache Hadoop Ant Tasks ........................... SKIPPED [INFO] Apache Hadoop Extras .............................. SKIPPED [INFO] Apache Hadoop Pipes ............................... SKIPPED [INFO] Apache Hadoop OpenStack support ................... SKIPPED [INFO] Apache Hadoop Amazon Web Services support ......... SKIPPED [INFO] Apache Hadoop Client .............................. SKIPPED [INFO] Apache Hadoop Mini-Cluster ........................ SKIPPED [INFO] Apache Hadoop Scheduler Load Simulator ............ SKIPPED [INFO] Apache Hadoop Tools Dist .......................... SKIPPED [INFO] Apache Hadoop Tools ............................... SKIPPED [INFO] Apache Hadoop Distribution ........................ SKIPPED [INFO] ------------------------------------------------------------------------ [INFO] BUILD FAILURE [INFO] ------------------------------------------------------------------------ [INFO] Total time: 29:15.906s [INFO] Finished at: Tue Jun 09 01:10:59 CST 2015 [INFO] Final Memory: 65M/202M [INFO] ------------------------------------------------------------------------ [ERROR] Failed to execute goal org.apache.maven.plugins:maven-surefire-plugin:2.16:test (default-test) on project hadoop-common: There are test failures. [ERROR] [ERROR] Please refer to /home/cj/workspace/hadoop-2.6.0-src/hadoop-common-project/hadoop-common/target/surefire-reports for the individual test results. [ERROR] -> [Help 1] [ERROR] [ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch. [ERROR] Re-run Maven using the -X switch to enable full debug logging. [ERROR] [ERROR] For more information about the errors and possible solutions, please read the following articles: [ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException [ERROR] [ERROR] After correcting the problems, you can resume the build with the command [ERROR] mvn <goals> -rf :hadoop-common
求教,hadoop-2.2.0升级hadoop-2.6.0。
最近需要升级hadoop,从hadoop-2.2.0升级到hadoop-2.6.0,根据http://hadoop.apache.org/docs/r2.6.0/hadoop-project-dist/hadoop-hdfs/HdfsRollingUpgrade.html#dfsadmin_-rollingUpgrade 提示的来,第一步:./bin/hdfs dfsadmin -rollingUPgrade prepare 就出现了:PREPARE rolling upgrade ... rollingUpgrade: Unknown method rollingUpgrade called on org.apache.hadoop.hdfs.protocol.ClientProtocol protocol.
hadoop使用yarn运行jar 报java.lang.ClassNotFoundException 找不到类 (找不到的不是主类)
1、写了一个数据分析的程序,用idea打成jar包,依赖jar都打进去了 ![图片说明](https://img-ask.csdn.net/upload/201911/03/1572779664_439750.png) 已经设置了 job.setJarByClass(CountDurationRunner.class); 2、开启hadoop zookeeper 和hbase集群 3、yarn运行jar : $ /opt/module/hadoop-2.7.2/bin/yarn jar ct_analysis.jar runner.CountDurationRunner 报错截图:![图片说明](https://img-ask.csdn.net/upload/201911/03/1572779908_781957.png) CountDurationRunner类代码: ``` package runner; import kv.key.ComDimension; //就是这里第一个就没找到 import kv.value.CountDurationValue; import mapper.CountDurationMapper; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; import outputformat.MysqlOutputFormat; import reducer.CountDurationReducer; import java.io.IOException; public class CountDurationRunner implements Tool { private Configuration conf = null; @Override public void setConf(Configuration conf) { this.conf = HBaseConfiguration.create(conf); } @Override public Configuration getConf() { return this.conf; } @Override public int run(String[] args) throws Exception { //得到conf Configuration conf = this.getConf(); //实例化job Job job = Job.getInstance(conf); job.setJarByClass(CountDurationRunner.class); //组装Mapper InputFormat initHbaseInputConfig(job); //组装Reducer outputFormat initHbaseOutputConfig(job); return job.waitForCompletion(true) ? 0 : 1; } private void initHbaseOutputConfig(Job job) { Connection connection = null; Admin admin = null; String tableName = "ns_ct:calllog"; try { connection = ConnectionFactory.createConnection(job.getConfiguration()); admin = connection.getAdmin(); if(!admin.tableExists(TableName.valueOf(tableName))) throw new RuntimeException("没有找到目标表"); Scan scan = new Scan(); //初始化Mapper TableMapReduceUtil.initTableMapperJob( tableName, scan, CountDurationMapper.class, ComDimension.class, Text.class, job, true); }catch (IOException e){ e.printStackTrace(); }finally { try { if(admin!=null) admin.close(); if(connection!=null) connection.close(); } catch (IOException e) { e.printStackTrace(); } } } private void initHbaseInputConfig(Job job) { job.setReducerClass(CountDurationReducer.class); job.setOutputKeyClass(ComDimension.class); job.setOutputValueClass(CountDurationValue.class); job.setOutputFormatClass(MysqlOutputFormat.class); } public static void main(String[] args) { try { int status = ToolRunner.run(new CountDurationRunner(), args); System.exit(status); } catch (Exception e) { e.printStackTrace(); } } } 这问题困扰很久了,有人说classPath不对,不知道如何修改,求助! ```
企业中现在用的hadoop版本是多少?hadoop2.x还是3.x
企业中现在用的hadoop版本是多少?hadoop2.x还是3.x 还有大数据的其他版本,都是多少呢?有好心人告诉吗? 如果我想直接用新版,会有什么影响吗?
maven3.3.9编译hadoop2.6.5报错 帮忙解决问题
[INFO] Building Apache Hadoop Main 2.6.5 [INFO] ------------------------------------------------------------------------ Downloading: http://mirrors.ibiblio.org/pub/mirrors/maven2/org/apache/maven/plugins/maven-javadoc-plugin/maven-metadata.xml [WARNING] Checksum validation failed, expected <html> but is b113767b47336dcc165c5dd2222b5df4cb86b7ce for http://mirrors.ibiblio.org/pub/mirrors/maven2/org/apache/maven/plugins/maven-javadoc-plugin/maven-metadata.xml [WARNING] Could not validate integrity of download from http://mirrors.ibiblio.org/pub/mirrors/maven2/org/apache/maven/plugins/maven-javadoc-plugin/maven-metadata.xml: Checksum validation failed, expected <html> but is b113767b47336dcc165c5dd2222b5df4cb86b7ce [WARNING] Checksum validation failed, expected <html> but is b113767b47336dcc165c5dd2222b5df4cb86b7ce for http://mirrors.ibiblio.org/pub/mirrors/maven2/org/apache/maven/plugins/maven-javadoc-plugin/maven-metadata.xml Downloaded: http://mirrors.ibiblio.org/pub/mirrors/maven2/org/apache/maven/plugins/maven-javadoc-plugin/maven-metadata.xml (99 KB at 11.8 KB/sec) [WARNING] The metadata /root/.m2/repository/org/apache/maven/plugins/maven-javadoc-plugin/maven-metadata-ibiblio.org.xml is invalid: end tag name </body> must match start tag name <hr> from line 888 (position: START_TAG seen ... 08-Nov-2014 19:04 207\r\n</pre><hr></body>... @888:18) [INFO] ------------------------------------------------------------------------ [INFO] Reactor Summary: [INFO] [INFO] Apache Hadoop Main ................................. FAILURE [ 8.416 s] [INFO] Apache Hadoop Build Tools .......................... SKIPPED [INFO] Apache Hadoop Project POM .......................... SKIPPED [INFO] Apache Hadoop Annotations .......................... SKIPPED [INFO] Apache Hadoop Assemblies ........................... SKIPPED [INFO] Apache Hadoop Project Dist POM ..................... SKIPPED [INFO] Apache Hadoop Maven Plugins ........................ SKIPPED [INFO] Apache Hadoop MiniKDC .............................. SKIPPED [INFO] Apache Hadoop Auth ................................. SKIPPED [INFO] Apache Hadoop Auth Examples ........................ SKIPPED [INFO] Apache Hadoop Common ............................... SKIPPED [INFO] Apache Hadoop NFS .................................. SKIPPED [INFO] Apache Hadoop KMS .................................. SKIPPED [INFO] Apache Hadoop Common Project ....................... SKIPPED [INFO] Apache Hadoop HDFS ................................. SKIPPED [INFO] Apache Hadoop HttpFS ............................... SKIPPED [INFO] Apache Hadoop HDFS BookKeeper Journal .............. SKIPPED [INFO] Apache Hadoop HDFS-NFS ............................. SKIPPED [INFO] Apache Hadoop HDFS Project ......................... SKIPPED [INFO] hadoop-yarn ........................................ SKIPPED [INFO] hadoop-yarn-api .................................... SKIPPED [INFO] hadoop-yarn-common ................................. SKIPPED [INFO] hadoop-yarn-server ................................. SKIPPED [INFO] hadoop-yarn-server-common .......................... SKIPPED [INFO] hadoop-yarn-server-nodemanager ..................... SKIPPED [INFO] hadoop-yarn-server-web-proxy ....................... SKIPPED [INFO] hadoop-yarn-server-applicationhistoryservice ....... SKIPPED [INFO] hadoop-yarn-server-resourcemanager ................. SKIPPED [INFO] hadoop-yarn-server-tests ........................... SKIPPED [INFO] hadoop-yarn-client ................................. SKIPPED [INFO] hadoop-yarn-applications ........................... SKIPPED [INFO] hadoop-yarn-applications-distributedshell .......... SKIPPED [INFO] hadoop-yarn-applications-unmanaged-am-launcher ..... SKIPPED [INFO] hadoop-yarn-site ................................... SKIPPED [INFO] hadoop-yarn-registry ............................... SKIPPED [INFO] hadoop-yarn-project ................................ SKIPPED [INFO] hadoop-mapreduce-client ............................ SKIPPED [INFO] hadoop-mapreduce-client-core ....................... SKIPPED [INFO] hadoop-mapreduce-client-common ..................... SKIPPED [INFO] hadoop-mapreduce-client-shuffle .................... SKIPPED [INFO] hadoop-mapreduce-client-app ........................ SKIPPED [INFO] hadoop-mapreduce-client-hs ......................... SKIPPED [INFO] hadoop-mapreduce-client-jobclient .................. SKIPPED [INFO] hadoop-mapreduce-client-hs-plugins ................. SKIPPED [INFO] Apache Hadoop MapReduce Examples ................... SKIPPED [INFO] hadoop-mapreduce ................................... SKIPPED [INFO] Apache Hadoop MapReduce Streaming .................. SKIPPED [INFO] Apache Hadoop Distributed Copy ..................... SKIPPED [INFO] Apache Hadoop Archives ............................. SKIPPED [INFO] Apache Hadoop Rumen ................................ SKIPPED [INFO] Apache Hadoop Gridmix .............................. SKIPPED [INFO] Apache Hadoop Data Join ............................ SKIPPED [INFO] Apache Hadoop Ant Tasks ............................ SKIPPED [INFO] Apache Hadoop Extras ............................... SKIPPED [INFO] Apache Hadoop Pipes ................................ SKIPPED [INFO] Apache Hadoop OpenStack support .................... SKIPPED [INFO] Apache Hadoop Amazon Web Services support .......... SKIPPED [INFO] Apache Hadoop Client ............................... SKIPPED [INFO] Apache Hadoop Mini-Cluster ......................... SKIPPED [INFO] Apache Hadoop Scheduler Load Simulator ............. SKIPPED [INFO] Apache Hadoop Tools Dist ........................... SKIPPED [INFO] Apache Hadoop Tools ................................ SKIPPED [INFO] Apache Hadoop Distribution ......................... SKIPPED [INFO] ------------------------------------------------------------------------ [INFO] BUILD FAILURE [INFO] ------------------------------------------------------------------------ [INFO] Total time: 06:03 min [INFO] Finished at: 2018-06-23T11:25:17+08:00 [INFO] Final Memory: 27M/69M [INFO] ------------------------------------------------------------------------ [ERROR] Error resolving version for plugin 'org.apache.maven.plugins:maven-javadoc-plugin' from the repositories [local (/root/.m2/repository), ibiblio.org (http://mirrors.ibiblio.org/pub/mirrors/maven2)]: Plugin not found in any plugin repository -> [Help 1] [ERROR] [ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch. [ERROR] Re-run Maven using the -X switch to enable full debug logging. [ERROR] [ERROR] For more information about the errors and possible solutions, please read the following articles: [ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/PluginVersionResolutionException You have new mail in /var/spool/mail/root
hadoop wordcount报错192.168.79.172 to  :54895 拒绝连接
各位大神,我是hadoop新手,在自己电脑安装好hadoop后,运行wordcount报错,报错已困扰两天,希望有大神可以帮解决。 报错日志如下: [hadoop@hadoop0 mapreduce]$ hadoop jar hadoop-mapreduce-examples-2.7.1.jar wordcount /input /output 17/04/14 13:49:46 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 17/04/14 13:49:48 INFO client.RMProxy: Connecting to ResourceManager at hadoop0/192.168.79.172:8032 17/04/14 13:49:50 INFO input.FileInputFormat: Total input paths to process : 2 17/04/14 13:49:50 INFO mapreduce.JobSubmitter: number of splits:2 17/04/14 13:49:50 INFO mapreduce.JobSubmitter: Submitting tokens for job: job_1492077890345_0003 17/04/14 13:49:51 INFO impl.YarnClientImpl: Submitted application application_1492077890345_0003 17/04/14 13:49:51 INFO mapreduce.Job: The url to track the job: http://hadoop0:8088/proxy/application_1492077890345_0003/ 17/04/14 13:49:51 INFO mapreduce.Job: Running job: job_1492077890345_0003 17/04/14 13:50:12 INFO mapreduce.Job: Job job_1492077890345_0003 running in uber mode : false 17/04/14 13:50:12 INFO mapreduce.Job: map 0% reduce 0% 17/04/14 13:50:12 INFO mapreduce.Job: Job job_1492077890345_0003 failed with state FAILED due to: Application application_1492077890345_0003 failed 2 times due to Error launching appattempt_1492077890345_0003_000002. Got exception: java.net.ConnectException: Call From hadoop0/192.168.79.172 to  :54895 failed on connection exception: java.net.ConnectException: 拒绝连接; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62) at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.lang.reflect.Constructor.newInstance(Constructor.java:423) at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:792) at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:732) at org.apache.hadoop.ipc.Client.call(Client.java:1480) at org.apache.hadoop.ipc.Client.call(Client.java:1407) at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:229) at com.sun.proxy.$Proxy83.startContainers(Unknown Source) at org.apache.hadoop.yarn.api.impl.pb.client.ContainerManagementProtocolPBClientImpl.startContainers(ContainerManagementProtocolPBClientImpl.java:96) at org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncher.launch(AMLauncher.java:119) at org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncher.run(AMLauncher.java:254) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) Caused by: java.net.ConnectException: 拒绝连接 at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:717) at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206) at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531) at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495) at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:609) at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:707) at org.apache.hadoop.ipc.Client$Connection.access$2800(Client.java:370) at org.apache.hadoop.ipc.Client.getConnection(Client.java:1529) at org.apache.hadoop.ipc.Client.call(Client.java:1446) ... 9 more . Failing the application. 17/04/14 13:50:12 INFO mapreduce.Job: Counters: 0 metrics文件内容如下: ![图片说明](https://img-ask.csdn.net/upload/201704/14/1492150334_860585.png) 另外,在报错日志中发下这么句话Call From hadoop0/192.168.79.172 to  :54895 failed ,为什么54895只有端口前面没有IP,是不是环境没有配置正确?
hadoop2.6使用snappy.报错
hadoop2.6使用snappy.报错native snappy library not available: this version of libhadoop was built without snappy support. ![图片说明](https://img-ask.csdn.net/upload/201505/05/1430759464_528696.png) hadoop里面明明已经显示启用了snappy
centos6.8搭建hadoop2.X伪分布式无法启动namenode
能够格式化节点信息,但是namenode无法启动。在日志中出现如下错误 ``` STARTUP_MSG: build = Unknown -r Unknown; compiled by 'root' on 2017-05-22T10:49Z STARTUP_MSG: java = 1.8.0_144 ************************************************************/ 2020-01-31 16:37:06,931 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: registered UNIX signal handlers for [TERM, HUP, INT] 2020-01-31 16:37:06,935 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: createNameNode [] 2020-01-31 16:37:07,161 INFO org.apache.hadoop.metrics2.impl.MetricsConfig: loaded properties from hadoop-metrics2.properties 2020-01-31 16:37:07,233 INFO org.apache.hadoop.metrics2.impl.MetricsSystemImpl: Scheduled snapshot period at 10 second(s). 2020-01-31 16:37:07,233 INFO org.apache.hadoop.metrics2.impl.MetricsSystemImpl: NameNode metrics system started 2020-01-31 16:37:07,236 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: fs.defaultFS is hdfs://hadoop101:9000 2020-01-31 16:37:07,236 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: Clients are to use hadoop101:9000 to access this namenode/service. 2020-01-31 16:37:07,409 INFO org.apache.hadoop.hdfs.DFSUtil: Starting Web-server for hdfs at: http://huawei_mate_10-53013e4c60:50070 2020-01-31 16:37:07,457 INFO org.mortbay.log: Logging to org.slf4j.impl.Log4jLoggerAdapter(org.mortbay.log) via org.mortbay.log.Slf4jLog 2020-01-31 16:37:07,464 INFO org.apache.hadoop.security.authentication.server.AuthenticationFilter: Unable to initialize FileSignerSecretProvider, falling back to use random secrets. 2020-01-31 16:37:07,469 INFO org.apache.hadoop.http.HttpRequestLog: Http request log for http.requests.namenode is not defined 2020-01-31 16:37:07,473 INFO org.apache.hadoop.http.HttpServer2: Added global filter 'safety' (class=org.apache.hadoop.http.HttpServer2$QuotingInputFilter) 2020-01-31 16:37:07,475 ERROR org.apache.hadoop.hdfs.server.namenode.NameNode: Failed to start namenode. java.lang.IllegalArgumentException: The value of property bind.address must not be null at com.google.common.base.Preconditions.checkArgument(Preconditions.java:88) at org.apache.hadoop.conf.Configuration.set(Configuration.java:1134) at org.apache.hadoop.conf.Configuration.set(Configuration.java:1115) at org.apache.hadoop.http.HttpServer2.initializeWebServer(HttpServer2.java:398) at org.apache.hadoop.http.HttpServer2.<init>(HttpServer2.java:351) at org.apache.hadoop.http.HttpServer2.<init>(HttpServer2.java:114) at org.apache.hadoop.http.HttpServer2$Builder.build(HttpServer2.java:290) at org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer.start(NameNodeHttpServer.java:126) at org.apache.hadoop.hdfs.server.namenode.NameNode.startHttpServer(NameNode.java:752) at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:638) at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:811) at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:795) at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1488) at org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1554) 2020-01-31 16:37:07,477 INFO org.apache.hadoop.util.ExitUtil: Exiting with status 1 2020-01-31 16:37:07,479 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: SHUTDOWN_MSG: /************************************************************ SHUTDOWN_MSG: Shutting down NameNode at hadoop101/192.168.117.101 ************************************************************/ ``` 主要的报错信息是 java.lang.IllegalArgumentException: The value of property bind.address must not be null core-site.xml的配置信息 <configuration> <!-- 指定HDFS中NameNode的地址 --> <property> <name>fs.defaultFS</name> <value>hdfs://hadoop101:9000</value> </property> <!-- hadoop101已经在hosts文件中配置 --> <!-- 指定Hadoop运行时产生文件的存储目录 --> <property> <name>hadoop.tmp.dir</name> <value>/opt/module/hadoop-2.7.2/data/tmp</value> </property> </configuration> 希望大神能够帮忙解答一下。万分感谢感谢
(Hadoop)VMware下Centos6.5虚拟机安装Hadoop输入指令报错
输入 bin/hdfs dfsadmin -report 然后报错 19/05/09 11:21:41 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable report: No FileSystem for scheme: dfs
window系统下开发hadoop2.2出现报错
Exception in thread "main" java.io.IOException: Cannot run program "E:\hadoop-2.4.0\bin\winutils.exe": CreateProcess error=216, ӳÏñÎļþ %1 ÓÐЧ£¬µ«²»ÊÊÓÃÓڴ˼ÆË at java.lang.ProcessBuilder.start(Unknown Source) at org.apache.hadoop.util.Shell.runCommand(Shell.java:404) at org.apache.hadoop.util.Shell.run(Shell.java:379) at org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:589) at org.apache.hadoop.util.Shell.execCommand(Shell.java:678) at org.apache.hadoop.util.Shell.execCommand(Shell.java:661) at org.apache.hadoop.fs.RawLocalFileSystem.setPermission(RawLocalFileSystem.java:639) at org.apache.hadoop.fs.RawLocalFileSystem.mkdirs(RawLocalFileSystem.java:435) at org.apache.hadoop.fs.FilterFileSystem.mkdirs(FilterFileSystem.java:277) at org.apache.hadoop.mapreduce.JobSubmissionFiles.getStagingDir(JobSubmissionFiles.java:125) at org.apache.hadoop.mapreduce.JobSubmitter.submitJobInternal(JobSubmitter.java:344) at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1268) at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1265) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Unknown Source) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1491) at org.apache.hadoop.mapreduce.Job.submit(Job.java:1265) at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1286) at WordCount.main(WordCount.java:84) Caused by: java.io.IOException: CreateProcess error=216, ӳÏñÎļþ %1 ÓÐЧ£¬µ«²»ÊÊÓÃÓڴ˼ÆË at java.lang.ProcessImpl.create(Native Method) at java.lang.ProcessImpl.<init>(Unknown Source) at java.lang.ProcessImpl.start(Unknown Source) ... 19 more
org.apache.hadoop.mapred.LocalJobRunner这个类在那个包里?
我在用sqoop1的javaapi操作,但是一执行命令就会报这个错,hadoop集群并不在运行程序的机器上,我是缺少这个类么,我翻了一般依赖里面确实没有 ``` Exception in thread "main" java.lang.NoSuchMethodError: org.apache.hadoop.mapred.LocalJobRunner.<init>(Lorg/apache/hadoop/conf/Configuration;)V at org.apache.hadoop.mapred.LocalClientProtocolProvider.create(LocalClientProtocolProvider.java:42) at org.apache.hadoop.mapreduce.Cluster.initialize(Cluster.java:95) at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:82) at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:75) at org.apache.hadoop.mapreduce.Job$9.run(Job.java:1260) at org.apache.hadoop.mapreduce.Job$9.run(Job.java:1256) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1866) at org.apache.hadoop.mapreduce.Job.connect(Job.java:1255) at org.apache.hadoop.mapreduce.Job.submit(Job.java:1284) at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1308) at org.apache.sqoop.mapreduce.ExportJobBase.doSubmitJob(ExportJobBase.java:322) at org.apache.sqoop.mapreduce.ExportJobBase.runJob(ExportJobBase.java:299) at org.apache.sqoop.mapreduce.ExportJobBase.runExport(ExportJobBase.java:440) at org.apache.sqoop.manager.SqlManager.exportTable(SqlManager.java:931) at org.apache.sqoop.tool.ExportTool.exportTable(ExportTool.java:80) at org.apache.sqoop.tool.ExportTool.run(ExportTool.java:99) at org.apache.sqoop.Sqoop.run(Sqoop.java:147) at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:76) at org.apache.sqoop.Sqoop.runSqoop(Sqoop.java:183) at com.mshuoke.datagw.impl.sqoop.SqoopTest.main(SqoopTest.java:52) 09:55:47.069 [Thread-4] DEBUG org.apache.hadoop.util.ShutdownHookManager - ShutdownHookManger complete shutdown. ```
hadoop升级2.2.0后运行job报错Shell$ExitCodeException: id: dr.who: No such user
2013-12-03 11:34:56,590 WARN org.apache.hadoop.security.UserGroupInformation: No groups available for user dr.who 2013-12-03 11:34:56,589 WARN org.apache.hadoop.security.ShellBasedUnixGroupsMapping: got exception trying to get groups for user dr.who org.apache.hadoop.util.Shell$ExitCodeException: id: dr.who: No such user at org.apache.hadoop.util.Shell.runCommand(Shell.java:504) at org.apache.hadoop.util.Shell.run(Shell.java:417) at org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:636) at org.apache.hadoop.util.Shell.execCommand(Shell.java:725) at org.apache.hadoop.util.Shell.execCommand(Shell.java:708) at org.apache.hadoop.security.ShellBasedUnixGroupsMapping.getUnixGroups(ShellBasedUnixGroupsMapping.java:83) at org.apache.hadoop.security.ShellBasedUnixGroupsMapping.getGroups(ShellBasedUnixGroupsMapping.java:52) at org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback.getGroups(JniBasedUnixGroupsMappingWithFallback.java:50) at org.apache.hadoop.security.Groups.getGroups(Groups.java:95) at org.apache.hadoop.security.UserGroupInformation.getGroupNames(UserGroupInformation.java:1376) at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.<init>(FSPermissionChecker.java:63) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getPermissionChecker(FSNamesystem.java:3228) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getListingInt(FSNamesystem.java:4063) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getListing(FSNamesystem.java:4052) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.getListing(NameNodeRpcServer.java:748) at org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods.getDirectoryListing(NamenodeWebHdfsMethods.java:715) at org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods.getListingStream(NamenodeWebHdfsMethods.java:727) at org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods.get(NamenodeWebHdfsMethods.java:675) at org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods.access$400(NamenodeWebHdfsMethods.java:114) at org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods$3.run(NamenodeWebHdfsMethods.java:623) at org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods$3.run(NamenodeWebHdfsMethods.java:618) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:396) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1515) at org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods.get(NamenodeWebHdfsMethods.java:618) at org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods.getRoot(NamenodeWebHdfsMethods.java:586) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25) at java.lang.reflect.Method.invoke(Method.java:597) at com.sun.jersey.spi.container.JavaMethodInvokerFactory$1.invoke(JavaMethodInvokerFactory.java:60) at com.sun.jersey.server.impl.model.method.dispatch.AbstractResourceMethodDispatchProvider$ResponseOutInvoker._dispatch(AbstractResourceMethodDispatchProvider.java:205) at com.sun.jersey.server.impl.model.method.dispatch.ResourceJavaMethodDispatcher.dispatch(ResourceJavaMethodDispatcher.java:75) at com.sun.jersey.server.impl.uri.rules.HttpMethodRule.accept(HttpMethodRule.java:288) at com.sun.jersey.server.impl.uri.rules.ResourceClassRule.accept(ResourceClassRule.java:108) at com.sun.jersey.server.impl.uri.rules.RightHandPathRule.accept(RightHandPathRule.java:147) at com.sun.jersey.server.impl.uri.rules.RootResourceClassesRule.accept(RootResourceClassesRule.java:84) at com.sun.jersey.server.impl.application.WebApplicationImpl._handleRequest(WebApplicationImpl.java:1469) at com.sun.jersey.server.impl.application.WebApplicationImpl._handleRequest(WebApplicationImpl.java:1400) at com.sun.jersey.server.impl.application.WebApplicationImpl.handleRequest(WebApplicationImpl.java:1349) at com.sun.jersey.server.impl.application.WebApplicationImpl.handleRequest(WebApplicationImpl.java:1339) at com.sun.jersey.spi.container.servlet.WebComponent.service(WebComponent.java:416) at com.sun.jersey.spi.container.servlet.ServletContainer.service(ServletContainer.java:537) at com.sun.jersey.spi.container.servlet.ServletContainer.service(ServletContainer.java:699) at javax.servlet.http.HttpServlet.service(HttpServlet.java:820) at org.mortbay.jetty.servlet.ServletHolder.handle(ServletHolder.java:511) at org.mortbay.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1221) at org.apache.hadoop.security.authentication.server.AuthenticationFilter.doFilter(AuthenticationFilter.java:384) at org.apache.hadoop.hdfs.web.AuthFilter.doFilter(AuthFilter.java:85) at org.mortbay.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1212) at org.apache.hadoop.http.HttpServer$QuotingInputFilter.doFilter(HttpServer.java:1310) at org.mortbay.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1212) at org.apache.hadoop.http.NoCacheFilter.doFilter(NoCacheFilter.java:45) at org.mortbay.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1212) at org.apache.hadoop.http.NoCacheFilter.doFilter(NoCacheFilter.java:45) at org.mortbay.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1212) at org.mortbay.jetty.servlet.ServletHandler.handle(ServletHandler.java:399) at org.mortbay.jetty.security.SecurityHandler.handle(SecurityHandler.java:216) at org.mortbay.jetty.servlet.SessionHandler.handle(SessionHandler.java:182) at org.mortbay.jetty.handler.ContextHandler.handle(ContextHandler.java:766) at org.mortbay.jetty.webapp.WebAppContext.handle(WebAppContext.java:450) at org.mortbay.jetty.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:230) at org.mortbay.jetty.handler.HandlerWrapper.handle(HandlerWrapper.java:152) at org.mortbay.jetty.Server.handle(Server.java:326) at org.mortbay.jetty.HttpConnection.handleRequest(HttpConnection.java:542) at org.mortbay.jetty.HttpConnection$RequestHandler.headerComplete(HttpConnection.java:928) at org.mortbay.jetty.HttpParser.parseNext(HttpParser.java:549) at org.mortbay.jetty.HttpParser.parseAvailable(HttpParser.java:212) at org.mortbay.jetty.HttpConnection.handle(HttpConnection.java:404) at org.mortbay.io.nio.SelectChannelEndPoint.run(SelectChannelEndPoint.java:410) at org.mortbay.thread.QueuedThreadPool$PoolThread.run(QueuedThreadPool.java:582)
求救啊!Hadoop 2.2.0 搭建集群 启动hdfs时候 namenode 启动后报空指针
日志如下: 2015-02-07 01:01:46,610 FATAL org.apache.hadoop.hdfs.server.namenode.NameNode: Error encountered requiring NN shutdown. Shutting down immediately. java.lang.NullPointerException at org.apache.hadoop.hdfs.DFSUtil.substituteForWildcardAddress(DFSUtil.java:942) at org.apache.hadoop.hdfs.server.namenode.ha.StandbyCheckpointer.getHttpAddress(StandbyCheckpointer.java:108) at org.apache.hadoop.hdfs.server.namenode.ha.StandbyCheckpointer.setNameNodeAddresses(StandbyCheckpointer.java:90) at org.apache.hadoop.hdfs.server.namenode.ha.StandbyCheckpointer.<init>(StandbyCheckpointer.java:76) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startStandbyServices(FSNamesystem.java:994) at org.apache.hadoop.hdfs.server.namenode.NameNode$NameNodeHAContext.startStandbyServices(NameNode.java:1456) at org.apache.hadoop.hdfs.server.namenode.ha.StandbyState.enterState(StandbyState.java:58) at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:686) at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:669) at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1254) at org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1320) 2015-02-07 01:01:46,614 INFO org.apache.hadoop.util.ExitUtil: Exiting with status 1 2015-02-07 01:01:46,620 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: SHUTDOWN_MSG: 我就不明白了,为什么就一直报空指针,而且,远程调试的时候就不会报错,已经凌乱了。
hadoop2.7.2搭建分布式环境,格式化后,namenode没启动成功
第一步:执行hadoop namenode -formate STARTUP_MSG: build = https://git-wip-us.apache.org/repos/asf/hadoop.git -r b165c4fe8a74265c792ce23f546c64604acf0e41; compiled by 'jenkins' on 2016-01-26T00:08Z STARTUP_MSG: java = 1.7.0_76 ************************************************************/ 16/08/02 04:26:16 INFO namenode.NameNode: registered UNIX signal handlers for [TERM, HUP, INT] 16/08/02 04:26:16 INFO namenode.NameNode: createNameNode [-formate] Usage: java NameNode [-backup] | [-checkpoint] | [-format [-clusterid cid ] [-force] [-nonInteractive] ] | [-upgrade [-clusterid cid] [-renameReserved<k-v pairs>] ] | [-upgradeOnly [-clusterid cid] [-renameReserved<k-v pairs>] ] | [-rollback] | [-rollingUpgrade <rollback|downgrade|started> ] | [-finalize] | [-importCheckpoint] | [-initializeSharedEdits] | [-bootstrapStandby] | [-recover [ -force] ] | [-metadataVersion ] ] 16/08/02 04:26:16 INFO namenode.NameNode: SHUTDOWN_MSG: /************************************************************ SHUTDOWN_MSG: Shutting down NameNode at master/192.168.234.100 第二步:执行start-all.sh 结果如下: [root@master sbin]# sh start-all.sh This script is Deprecated. Instead use start-dfs.sh and start-yarn.sh 16/08/02 05:45:24 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable Starting namenodes on [master] master: starting namenode, logging to /usr/hadoop/hadoop-2.7.2/logs/hadoop-root-namenode-master.out slave2: starting datanode, logging to /usr/hadoop/hadoop-2.7.2/logs/hadoop-root-datanode-slave2.out slave3: starting datanode, logging to /usr/hadoop/hadoop-2.7.2/logs/hadoop-root-datanode-slave3.out slave1: starting datanode, logging to /usr/hadoop/hadoop-2.7.2/logs/hadoop-root-datanode-slave1.out Starting secondary namenodes [master] master: starting secondarynamenode, logging to /usr/hadoop/hadoop-2.7.2/logs/hadoop-root-secondarynamenode-master.out 16/08/02 05:46:01 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable starting yarn daemons starting resourcemanager, logging to /usr/hadoop/hadoop-2.7.2/logs/yarn-root-resourcemanager-master.out slave2: starting nodemanager, logging to /usr/hadoop/hadoop-2.7.2/logs/yarn-root-nodemanager-slave2.out slave3: starting nodemanager, logging to /usr/hadoop/hadoop-2.7.2/logs/yarn-root-nodemanager-slave3.out slave1: starting nodemanager, logging to /usr/hadoop/hadoop-2.7.2/logs/yarn-root-nodemanager-slave1.out [root@master sbin]# jps 2613 ResourceManager 2467 SecondaryNameNode 2684 Jps namenode日志: 2016-08-02 05:49:49,910 WARN org.apache.hadoop.hdfs.server.namenode.FSNamesystem: Encountered exception loading fsimage java.io.IOException: NameNode is not formatted. at org.apache.hadoop.hdfs.server.namenode.FSImage.recoverTransitionRead(FSImage.java:225) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.loadFSImage(FSNamesystem.java:975) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.loadFromDisk(FSNamesystem.java:681) at org.apache.hadoop.hdfs.server.namenode.NameNode.loadNamesystem(NameNode.java:584) at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:644) at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:811) at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:795) at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1488) at org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1554) 2016-08-02 05:49:49,928 INFO org.mortbay.log: Stopped HttpServer2$SelectChannelConnectorWithSafeStartup@0.0.0.0:50070 2016-08-02 05:49:49,928 WARN org.apache.hadoop.http.HttpServer2: HttpServer Acceptor: isRunning is false. Rechecking. 2016-08-02 05:49:49,930 WARN org.apache.hadoop.http.HttpServer2: HttpServer Acceptor: isRunning is false 2016-08-02 05:49:49,934 INFO org.apache.hadoop.metrics2.impl.MetricsSystemImpl: Stopping NameNode metrics system... 2016-08-02 05:49:49,935 INFO org.apache.hadoop.metrics2.impl.MetricsSystemImpl: NameNode metrics system stopped. 2016-08-02 05:49:49,935 INFO org.apache.hadoop.metrics2.impl.MetricsSystemImpl: NameNode metrics system shutdown complete. 2016-08-02 05:49:49,935 ERROR org.apache.hadoop.hdfs.server.namenode.NameNode: Failed to start namenode. java.io.IOException: NameNode is not formatted. at org.apache.hadoop.hdfs.server.namenode.FSImage.recoverTransitionRead(FSImage.java:225) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.loadFSImage(FSNamesystem.java:975) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.loadFromDisk(FSNamesystem.java:681) at org.apache.hadoop.hdfs.server.namenode.NameNode.loadNamesystem(NameNode.java:584) at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:644) at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:811) at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:795) at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1488) at org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1554) 2016-08-02 05:49:49,949 INFO org.apache.hadoop.util.ExitUtil: Exiting with status 1 2016-08-02 05:49:49,961 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: SHUTDOWN_MSG: /************************************************************ SHUTDOWN_MSG: Shutting down NameNode at master/192.168.234.100
hdfs无法进行词频统计
# hadoop集群,hdfs无法进行词频统计 ## 执行语句: hadoop jar hadoop-mapreduce-examples-2.7.4.jar wordcount \ > /wordcount/input /wordcount/output ``` ``` ## ResourceManager已经启动 [root@hadoop01 mapreduce]# jps 2977 NodeManager 2597 DataNode 3557 Jps 2873 ResourceManager 2494 NameNode ``` ``` ## 防火墙已关闭 [root@hadoop01 mapreduce]# service iptables stop [root@hadoop01 mapreduce]# ``` ``` 错误提示: INFO client.RMProxy: Connecting to ResourceManager at hadoop01/192.168.131.131:8032 org.apache.hadoop.mapred.FileAlreadyExistsException: Output directory hdfs://hadoop01:9000/wordcount/output already exists at org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.checkOutputSpecs(FileOutputFormat.java:146) at org.apache.hadoop.mapreduce.JobSubmitter.checkSpecs(JobSubmitter.java:266) at org.apache.hadoop.mapreduce.JobSubmitter.submitJobInternal(JobSubmitter.java:139) at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1290) at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1287) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1746) at org.apache.hadoop.mapreduce.Job.submit(Job.java:1287) at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1308) at org.apache.hadoop.examples.WordCount.main(WordCount.java:87) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at org.apache.hadoop.util.ProgramDriver$ProgramDescription.invoke(ProgramDriver.java:71) at org.apache.hadoop.util.ProgramDriver.run(ProgramDriver.java:144) at org.apache.hadoop.examples.ExampleDriver.main(ExampleDriver.java:74) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at org.apache.hadoop.util.RunJar.run(RunJar.java:221) at org.apache.hadoop.util.RunJar.main(RunJar.java:136) ``` ```
终于明白阿里百度这样的大公司,为什么面试经常拿ThreadLocal考验求职者了
点击上面↑「爱开发」关注我们每晚10点,捕获技术思考和创业资源洞察什么是ThreadLocalThreadLocal是一个本地线程副本变量工具类,各个线程都拥有一份线程私有的数
程序员必须掌握的核心算法有哪些?
由于我之前一直强调数据结构以及算法学习的重要性,所以就有一些读者经常问我,数据结构与算法应该要学习到哪个程度呢?,说实话,这个问题我不知道要怎么回答你,主要取决于你想学习到哪些程度,不过针对这个问题,我稍微总结一下我学过的算法知识点,以及我觉得值得学习的算法。这些算法与数据结构的学习大多数是零散的,并没有一本把他们全部覆盖的书籍。下面是我觉得值得学习的一些算法以及数据结构,当然,我也会整理一些看过...
《奇巧淫技》系列-python!!每天早上八点自动发送天气预报邮件到QQ邮箱
此博客仅为我业余记录文章所用,发布到此,仅供网友阅读参考,如有侵权,请通知我,我会删掉。 补充 有不少读者留言说本文章没有用,因为天气预报直接打开手机就可以收到了,为何要多此一举发送到邮箱呢!!!那我在这里只能说:因为你没用,所以你没用!!! 这里主要介绍的是思路,不是天气预报!不是天气预报!!不是天气预报!!!天气预报只是用于举例。请各位不要再刚了!!! 下面是我会用到的两个场景: 每日下
死磕YOLO系列,YOLOv1 的大脑、躯干和手脚
YOLO 是我非常喜欢的目标检测算法,堪称工业级的目标检测,能够达到实时的要求,它帮我解决了许多实际问题。 这就是 YOLO 的目标检测效果。它定位了图像中物体的位置,当然,也能预测物体的类别。 之前我有写博文介绍过它,但是每次重新读它的论文,我都有新的收获,为此我准备写一个系列的文章来详尽分析它。这是第一篇,从它的起始 YOLOv1 讲起。 YOLOv1 的论文地址:https://www.c...
知乎高赞:中国有什么拿得出手的开源软件产品?(整理自本人原创回答)
知乎高赞:中国有什么拿得出手的开源软件产品? 在知乎上,有个问题问“中国有什么拿得出手的开源软件产品(在 GitHub 等社区受欢迎度较好的)?” 事实上,还不少呢~ 本人于2019.7.6进行了较为全面的回答,对这些受欢迎的 Github 开源项目分类整理如下: 分布式计算、云平台相关工具类 1.SkyWalking,作者吴晟、刘浩杨 等等 仓库地址: apache/skywalking 更...
20行Python代码爬取王者荣耀全英雄皮肤
引言 王者荣耀大家都玩过吧,没玩过的也应该听说过,作为时下最火的手机MOBA游戏,咳咳,好像跑题了。我们今天的重点是爬取王者荣耀所有英雄的所有皮肤,而且仅仅使用20行Python代码即可完成。 准备工作 爬取皮肤本身并不难,难点在于分析,我们首先得得到皮肤图片的url地址,话不多说,我们马上来到王者荣耀的官网: 我们点击英雄资料,然后随意地选择一位英雄,接着F12打开调试台,找到英雄原皮肤的图片...
简明易理解的@SpringBootApplication注解源码解析(包含面试提问)
欢迎关注文章系列 ,关注我 《提升能力,涨薪可待》 《面试知识,工作可待》 《实战演练,拒绝996》 欢迎关注我博客,原创技术文章第一时间推出 也欢迎关注公 众 号【Ccww笔记】,同时推出 如果此文对你有帮助、喜欢的话,那就点个赞呗,点个关注呗! 《提升能力,涨薪可待篇》- @SpringBootApplication注解源码解析 一、@SpringBootApplication 的作用是什...
西游记团队中如果需要裁掉一个人,会先裁掉谁?
2019年互联网寒冬,大批企业开始裁员,下图是网上流传的一张截图: 裁员不可避免,那如何才能做到不管大环境如何变化,自身不受影响呢? 我们先来看一个有意思的故事,如果西游记取经团队需要裁员一名,会裁掉谁呢,为什么? 西游记团队组成: 1.唐僧 作为团队teamleader,有很坚韧的品性和极高的原则性,不达目的不罢休,遇到任何问题,都没有退缩过,又很得上司支持和赏识(直接得到唐太宗的任命,既给袈...
Python语言高频重点汇总
Python语言高频重点汇总 GitHub面试宝典仓库 回到首页 目录: Python语言高频重点汇总 目录: 1. 函数-传参 2. 元类 3. @staticmethod和@classmethod两个装饰器 4. 类属性和实例属性 5. Python的自省 6. 列表、集合、字典推导式 7. Python中单下划线和双下划线 8. 格式化字符串中的%和format 9. 迭代器和生成器 10...
究竟你适不适合买Mac?
我清晰的记得,刚买的macbook pro回到家,开机后第一件事情,就是上了淘宝网,花了500元钱,找了一个上门维修电脑的师傅,上门给我装了一个windows系统。。。。。。 表砍我。。。 当时买mac的初衷,只是想要个固态硬盘的笔记本,用来运行一些复杂的扑克软件。而看了当时所有的SSD笔记本后,最终决定,还是买个好(xiong)看(da)的。 已经有好几个朋友问我mba怎么样了,所以今天尽量客观
程序员一般通过什么途径接私活?
二哥,你好,我想知道一般程序猿都如何接私活,我也想接,能告诉我一些方法吗? 上面是一个读者“烦不烦”问我的一个问题。其实不止是“烦不烦”,还有很多读者问过我类似这样的问题。 我接的私活不算多,挣到的钱也没有多少,加起来不到 20W。说实话,这个数目说出来我是有点心虚的,毕竟太少了,大家轻喷。但我想,恰好配得上“一般程序员”这个称号啊。毕竟苍蝇再小也是肉,我也算是有经验的人了。 唾弃接私活、做外
ES6基础-ES6的扩展
进行对字符串扩展,正则扩展,数值扩展,函数扩展,对象扩展,数组扩展。 开发环境准备: 编辑器(VS Code, Atom,Sublime)或者IDE(Webstorm) 浏览器最新的Chrome 字符串的扩展: 模板字符串,部分新的方法,新的unicode表示和遍历方法: 部分新的字符串方法 padStart,padEnd,repeat,startsWith,endsWith,includes 字...
Python爬虫爬取淘宝,京东商品信息
小编是一个理科生,不善长说一些废话。简单介绍下原理然后直接上代码。 使用的工具(Python+pycharm2019.3+selenium+xpath+chromedriver)其中要使用pycharm也可以私聊我selenium是一个框架可以通过pip下载 pip install selenium -i https://pypi.tuna.tsinghua.edu.cn/simple/ 
阿里程序员写了一个新手都写不出的低级bug,被骂惨了。
你知道的越多,你不知道的越多 点赞再看,养成习惯 本文 GitHub https://github.com/JavaFamily 已收录,有一线大厂面试点思维导图,也整理了很多我的文档,欢迎Star和完善,大家面试可以参照考点复习,希望我们一起有点东西。 前前言 为啥今天有个前前言呢? 因为你们的丙丙啊,昨天有牌面了哟,直接被微信官方推荐,知乎推荐,也就仅仅是还行吧(心里乐开花)
Java工作4年来应聘要16K最后没要,细节如下。。。
前奏: 今天2B哥和大家分享一位前几天面试的一位应聘者,工作4年26岁,统招本科。 以下就是他的简历和面试情况。 基本情况: 专业技能: 1、&nbsp;熟悉Sping了解SpringMVC、SpringBoot、Mybatis等框架、了解SpringCloud微服务 2、&nbsp;熟悉常用项目管理工具:SVN、GIT、MAVEN、Jenkins 3、&nbsp;熟悉Nginx、tomca
Python爬虫精简步骤1 获取数据
爬虫的工作分为四步: 1.获取数据。爬虫程序会根据我们提供的网址,向服务器发起请求,然后返回数据。 2.解析数据。爬虫程序会把服务器返回的数据解析成我们能读懂的格式。 3.提取数据。爬虫程序再从中提取出我们需要的数据。 4.储存数据。爬虫程序把这些有用的数据保存起来,便于你日后的使用和分析。 这一篇的内容就是:获取数据。 首先,我们将会利用一个强大的库——requests来获取数据。 在电脑上安装
作为一个程序员,CPU的这些硬核知识你必须会!
CPU对每个程序员来说,是个既熟悉又陌生的东西? 如果你只知道CPU是中央处理器的话,那可能对你并没有什么用,那么作为程序员的我们,必须要搞懂的就是CPU这家伙是如何运行的,尤其要搞懂它里面的寄存器是怎么一回事,因为这将让你从底层明白程序的运行机制。 随我一起,来好好认识下CPU这货吧 把CPU掰开来看 对于CPU来说,我们首先就要搞明白它是怎么回事,也就是它的内部构造,当然,CPU那么牛的一个东
破14亿,Python分析我国存在哪些人口危机!
2020年1月17日,国家统计局发布了2019年国民经济报告,报告中指出我国人口突破14亿。 猪哥的朋友圈被14亿人口刷屏,但是很多人并没有看到我国复杂的人口问题:老龄化、男女比例失衡、生育率下降、人口红利下降等。 今天我们就来分析一下我们国家的人口数据吧! 更多有趣分析教程,扫描下方二维码关注vx公号「裸睡的猪」 即可查看! 一、背景 1.人口突破14亿 2020年1月17日,国家统计局发布
web前端javascript+jquery知识点总结
Javascript javascript 在前端网页中占有非常重要的地位,可以用于验证表单,制作特效等功能,它是一种描述语言,也是一种基于对象(Object)和事件驱动并具有安全性的脚本语言 ,语法同java类似,是一种解释性语言,边执行边解释。 JavaScript的组成: ECMAScipt 用于描述: 语法,变量和数据类型,运算符,逻辑控制语句,关键字保留字,对象。 浏览器对象模型(Br
Qt实践录:开篇
本系列文章介绍笔者的Qt实践之路。
在家远程办公效率低?那你一定要收好这个「在家办公」神器!
相信大家都已经收到国务院延长春节假期的消息,接下来,在家远程办公可能将会持续一段时间。 但是问题来了。远程办公不是人在电脑前就当坐班了,相反,对于沟通效率,文件协作,以及信息安全都有着极高的要求。有着非常多的挑战,比如: 1在异地互相不见面的会议上,如何提高沟通效率? 2文件之间的来往反馈如何做到及时性?如何保证信息安全? 3如何规划安排每天工作,以及如何进行成果验收? ......
作为一个程序员,内存和磁盘的这些事情,你不得不知道啊!!!
截止目前,我已经分享了如下几篇文章: 一个程序在计算机中是如何运行的?超级干货!!! 作为一个程序员,CPU的这些硬核知识你必须会! 作为一个程序员,内存的这些硬核知识你必须懂! 这些知识可以说是我们之前都不太重视的基础知识,可能大家在上大学的时候都学习过了,但是嘞,当时由于老师讲解的没那么有趣,又加上这些知识本身就比较枯燥,所以嘞,大家当初几乎等于没学。 再说啦,学习这些,也看不出来有什么用啊!
这个世界上人真的分三六九等,你信吗?
偶然间,在知乎上看到一个问题 一时间,勾起了我深深的回忆。 以前在厂里打过两次工,做过家教,干过辅导班,做过中介。零下几度的晚上,贴过广告,满脸、满手地长冻疮。   再回首那段岁月,虽然苦,但让我学会了坚持和忍耐。让我明白了,在这个世界上,无论环境多么的恶劣,只要心存希望,星星之火,亦可燎原。   下文是原回答,希望能对你能有所启发。   如果我说,这个世界上人真的分三六九等,
为什么听过很多道理,依然过不好这一生?
记录学习笔记是一个重要的习惯,不希望学习过的东西成为过眼云烟。做总结的同时也是一次复盘思考的过程。 本文是根据阅读得到 App上《万维钢·精英日课》部分文章后所做的一点笔记和思考。学习是一个系统的过程,思维模型的建立需要相对完整的学习和思考过程。以下观点是在碎片化阅读后总结的一点心得总结。
B 站上有哪些很好的学习资源?
哇说起B站,在小九眼里就是宝藏般的存在,放年假宅在家时一天刷6、7个小时不在话下,更别提今年的跨年晚会,我简直是跪着看完的!! 最早大家聚在在B站是为了追番,再后来我在上面刷欧美新歌和漂亮小姐姐的舞蹈视频,最近两年我和周围的朋友们已经把B站当作学习教室了,而且学习成本还免费,真是个励志的好平台ヽ(.◕ฺˇд ˇ◕ฺ;)ノ 下面我们就来盘点一下B站上优质的学习资源: 综合类 Oeasy: 综合
雷火神山直播超两亿,Web播放器事件监听是怎么实现的?
Web播放器解决了在手机浏览器和PC浏览器上播放音视频数据的问题,让视音频内容可以不依赖用户安装App,就能进行播放以及在社交平台进行传播。在视频业务大数据平台中,播放数据的统计分析非常重要,所以Web播放器在使用过程中,需要对其内部的数据进行收集并上报至服务端,此时,就需要对发生在其内部的一些播放行为进行事件监听。 那么Web播放器事件监听是怎么实现的呢? 01 监听事件明细表 名
3万字总结,Mysql优化之精髓
本文知识点较多,篇幅较长,请耐心学习 MySQL已经成为时下关系型数据库产品的中坚力量,备受互联网大厂的青睐,出门面试想进BAT,想拿高工资,不会点MySQL优化知识,拿offer的成功率会大大下降。 为什么要优化 系统的吞吐量瓶颈往往出现在数据库的访问速度上 随着应用程序的运行,数据库的中的数据会越来越多,处理时间会相应变慢 数据是存放在磁盘上的,读写速度无法和内存相比 如何优化 设计
一条链接即可让黑客跟踪你的位置! | Seeker工具使用
搬运自:冰崖的部落阁(icecliffsnet) 严正声明:本文仅限于技术讨论,严禁用于其他用途。 请遵守相对应法律规则,禁止用作违法途径,出事后果自负! 上次写的防社工文章里边提到的gps定位信息(如何防止自己被社工或人肉) 除了主动收集他人位置信息以外,我们还可以进行被动收集 (没有技术含量) Seeker作为一款高精度地理位置跟踪工具,同时也是社交工程学(社会工程学)爱好者...
作为程序员的我,大学四年一直自学,全靠这些实用工具和学习网站!
我本人因为高中沉迷于爱情,导致学业荒废,后来高考,毫无疑问进入了一所普普通通的大学,实在惭愧...... 我又是那么好强,现在学历不行,没办法改变的事情了,所以,进入大学开始,我就下定决心,一定要让自己掌握更多的技能,尤其选择了计算机这个行业,一定要多学习技术。 在进入大学学习不久后,我就认清了一个现实:我这个大学的整体教学质量和学习风气,真的一言难尽,懂的人自然知道怎么回事? 怎么办?我该如何更好的提升
前端JS初级面试题二 (。•ˇ‸ˇ•。)老铁们!快来瞧瞧自己都会了么
1. 传统事件绑定和符合W3C标准的事件绑定有什么区别? 传统事件绑定 &lt;div onclick=""&gt;123&lt;/div&gt; div1.onclick = function(){}; &lt;button onmouseover=""&gt;&lt;/button&gt; 注意: 如果给同一个元素绑定了两次或多次相同类型的事件,那么后面的绑定会覆盖前面的绑定 (不支持DOM事...
相关热词 c#导入fbx c#中屏蔽键盘某个键 c#正态概率密度 c#和数据库登陆界面设计 c# 高斯消去法 c# codedom c#读取cad文件文本 c# 控制全局鼠标移动 c# temp 目录 bytes初始化 c#
立即提问