zhangbo0805 2015-11-17 01:21 采纳率: 0%
浏览 20039

mapreduce编程出现错误:Job running in uber mode : false?

eclipse编写mapreduce程序,实现统计多个目录下所有文件的文件行数总数。

代码如下:
package ZhangBo;

import java.io.IOException;
//import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
//import org.apache.hadoop.mapreduce.lib.input.MultipleInputs;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
//import org.apache.hadoop.io.LongWritable;
//import org.apache.hadoop.util.GenericOptionsParser;
//import org.junit.Test;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
//import org.apache.log4j.MDC;
//import org.apache.log4j.Logger;

//import firstnight.testlog4jj;

public class WordCount {
//////log
static Log logger = LogFactory.getLog(WordCount.class);

public static class TokenizerMapper 
   extends Mapper<LongWritable, Text, Text, IntWritable>{
               //一行的起始位置

private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
public void map(LongWritable key, Text value, Context context
                ) throws IOException, InterruptedException {
  //进行分割
    logger.debug("This is debug message.");
    logger.error("This is error message."); 
    logger.info("ni hao ");
    context.write(word,one);

}
}
//对输入的key value值进行处理 转换成新的 key value值 进行一系列框架自带的处理 排序 合并 分句等等
public static class IntSumReducer
extends Reducer {
private IntWritable result = new IntWritable();
public void reduce(Text key, Iterable values,
Context context
) throws IOException, InterruptedException {
int sum = 0;
//循环遍历 IntWritable
for (IntWritable val : values) {
sum += val.get();//get 转 int
}
result.set(sum);
context.write(key, result);
}

  }

private static FileSystem hdfs;
private static String args1;
private static String args2;
private static String args3;
Configuration conf1 = new Configuration();

private static void showDir(FileStatus fs) throws Exception
{
Path path = fs.getPath();

  if (fs.isDir())
  {
      args2 = path.toString();
      if (args2 != args3)
          args1 = args1 +","+args2;
      System.out.println("目录:" + path);
      FileStatus[] f = hdfs.listStatus(path);
      if (f.length > 0)
      {
          for (FileStatus file:f)
          {
              showDir(file);
              args3 = file.getPath().toString();
          }
      }
  }
  else
  {
      System.out.println("文件:" + path); 
  }
  System.out.println("目录之和: "+args1);

}

public void test() throws Exception
{
hdfs = FileSystem.get(conf1);
FileStatus[] fs = hdfs.listStatus(new Path("hdfs://C15/zbo"));
args1 = "hdfs://C15/zbo";
if (fs.length > 0)
{
for (FileStatus f : fs)
{
showDir(f);
}
}

else
{
System.out.println("没有什么好遍历的....");
}
}

public static void main(String[] args) throws Exception
{
new WordCount().test();
Configuration conf = new Configuration();
//////////////////////

     Job job = new Job(conf,"word count");
     job.setJarByClass(WordCount.class);
     job.setMapperClass(TokenizerMapper.class);
     job.setCombinerClass(IntSumReducer.class);
     job.setReducerClass(IntSumReducer.class);
     FileInputFormat.addInputPaths(job,args1);
     FileOutputFormat.setOutputPath(job, new Path("outbo"));
     job.setOutputKeyClass(Text.class);
     job.setOutputValueClass(IntWritable.class);
     System.exit(job.waitForCompletion(true)?0:1);
  }

}

在目录hdfs://C15/zbo下创建目录和文件
在eclipse上打jar包ho.jar
执行程序 :hadoop jar ZhangBo.WordCount
执行结果如下所示:

[root@c15m1 /]# hadoop jar ho.jar ZhangBo.WordCount
文件:hdfs://C15/zbo/file1.txt
目录之和: hdfs://C15/zbo
文件:hdfs://C15/zbo/file2.txt
目录之和: hdfs://C15/zbo
目录:hdfs://C15/zbo/zbo
文件:hdfs://C15/zbo/zbo/file1.txt
目录之和: hdfs://C15/zbo,hdfs://C15/zbo/zbo
文件:hdfs://C15/zbo/zbo/file2.txt
目录之和: hdfs://C15/zbo,hdfs://C15/zbo/zbo
目录:hdfs://C15/zbo/zbo/zbo
文件:hdfs://C15/zbo/zbo/zbo/file1.txt
目录之和: hdfs://C15/zbo,hdfs://C15/zbo/zbo,hdfs://C15/zbo/zbo/zbo
文件:hdfs://C15/zbo/zbo/zbo/file2.txt
目录之和: hdfs://C15/zbo,hdfs://C15/zbo/zbo,hdfs://C15/zbo/zbo/zbo
目录之和: hdfs://C15/zbo,hdfs://C15/zbo/zbo,hdfs://C15/zbo/zbo/zbo
目录之和: hdfs://C15/zbo,hdfs://C15/zbo/zbo,hdfs://C15/zbo/zbo/zbo
15/11/17 08:55:37 INFO client.RMProxy: Connecting to ResourceManager at c15m1.ecld.com/132.121.94.213:8050
15/11/17 08:55:38 INFO hdfs.DFSClient: Created HDFS_DELEGATION_TOKEN token 72820725 for hdfs on ha-hdfs:C15
15/11/17 08:55:38 INFO security.TokenCache: Got dt for hdfs://C15; Kind: HDFS_DELEGATION_TOKEN, Service: ha-hdfs:C15, Ident: (HDFS_DELEGATION_TOKEN token 72820725 for hdfs)
15/11/17 08:55:38 WARN mapreduce.JobSubmitter: Hadoop command-line option parsing not performed. Implement the Tool interface and execute your application with ToolRunner to remedy this.
15/11/17 08:55:38 INFO input.FileInputFormat: Total input paths to process : 8
15/11/17 08:55:38 INFO lzo.GPLNativeCodeLoader: Loaded native gpl library
15/11/17 08:55:38 INFO lzo.LzoCodec: Successfully loaded & initialized native-lzo library [hadoop-lzo rev dbd51f0fb61f5347228a7a23fe0765ac1242fcdf]
15/11/17 08:55:47 INFO mapreduce.JobSubmitter: number of splits:8
15/11/17 08:55:48 INFO mapreduce.JobSubmitter: Submitting tokens for job: job_1440641224751_187144
15/11/17 08:55:48 INFO mapreduce.JobSubmitter: Kind: HDFS_DELEGATION_TOKEN, Service: ha-hdfs:C15, Ident: (HDFS_DELEGATION_TOKEN token 72820725 for hdfs)
15/11/17 08:55:48 INFO impl.YarnClientImpl: Submitted application application_1440641224751_187144
15/11/17 08:55:49 INFO mapreduce.Job: The url to track the job: http://c15m1.ecld.com:8088/proxy/application_1440641224751_187144/
15/11/17 08:55:49 INFO mapreduce.Job: Running job: job_1440641224751_187144
15/11/17 08:55:51 INFO mapreduce.Job: Job job_1440641224751_187144 running in uber mode : false
15/11/17 08:55:51 INFO mapreduce.Job: map 0% reduce 0%
15/11/17 08:55:51 INFO mapreduce.Job: Job job_1440641224751_187144 failed with state FAILED due to: Application application_1440641224751_187144 failed 2 times due to AM Container for appattempt_1440641224751_187144_000002 exited with exitCode: -1000 due to: Application application_1440641224751_187144 initialization failed (exitCode=255) with output: Requested user hdfs is banned

.Failing this attempt.. Failing the application.
15/11/17 08:55:51 INFO mapreduce.Job: Counters: 0

问题:running in uber mode : false是什么问题?

  • 写回答

1条回答 默认 最新

  • shushen123 2016-05-24 01:18
    关注

    你好,我也遇到了这个问题请问你是怎么解决的?谢谢!

    评论

报告相同问题?

问题事件

  • 请详细说明问题背景 4月15日

悬赏问题

  • ¥30 这是哪个作者做的宝宝起名网站
  • ¥60 版本过低apk如何修改可以兼容新的安卓系统
  • ¥25 由IPR导致的DRIVER_POWER_STATE_FAILURE蓝屏
  • ¥50 有数据,怎么建立模型求影响全要素生产率的因素
  • ¥50 有数据,怎么用matlab求全要素生产率
  • ¥15 TI的insta-spin例程
  • ¥15 完成下列问题完成下列问题
  • ¥15 C#算法问题, 不知道怎么处理这个数据的转换
  • ¥15 YoloV5 第三方库的版本对照问题
  • ¥15 请完成下列相关问题!