运行后结果:
2022-01-27 17:02:12,548 INFO [org.apache.hadoop.mapreduce.Job] - Job job_local156721860_0001 running in uber mode : false
2022-01-27 17:02:12,548 INFO [org.apache.hadoop.mapreduce.Job] - map 0% reduce 0%
2022-01-27 17:02:12,548 INFO [org.apache.hadoop.mapreduce.Job] - Job job_local156721860_0001 failed with state FAILED due to: NA
2022-01-27 17:02:12,548 INFO [org.apache.hadoop.mapreduce.Job] - Counters: 0
Process finished with exit code 1
程序代码如下:
FlowBean代码:
package com.atguigu.mapreduce.writable2;
import org.apache.hadoop.io.Writable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
public class FlowBean implements Writable {
private long upFlow;//上行流量
private long downFlow;//下行流量
private long sumFlow;//总流量
public FlowBean() {}
public long getUpFlow() {
return upFlow;
}
public void setUpFlow(long upFlow) {
this.upFlow = upFlow;
}
public long getDownFlow() {
return downFlow;
}
public void setDownFlow(long downFlow) {
this.downFlow = downFlow;
}
public long getSumFlow() {
return sumFlow;
}
public void setSumFlow(long sumFlow) {
this.sumFlow = sumFlow;
}
public void setSumFlow() {
this.sumFlow = this.upFlow+this.downFlow;
}
@Override
public void write(DataOutput out) throws IOException {
out.writeLong(upFlow);
out.writeLong(downFlow);
out.writeLong(sumFlow);
}
@Override
public void readFields(DataInput in) throws IOException {
this.upFlow = in.readLong();
this.downFlow = in.readLong();
this.sumFlow = in.readLong();
}
@Override
public String toString() {
return upFlow + "\t" + downFlow + "\t" + sumFlow;//格式与需求格式对应
}
}
Mapper代码:
package com.atguigu.mapreduce.writable2;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class FlowMapper extends Mapper<LongWritable, Text, Text, FlowBean> {
private Text outk = new Text();
private FlowBean outv = new FlowBean();
@Override
protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, FlowBean>.Context context) throws IOException, InterruptedException {
String line = value.toString();
String[] split = line.split("\t");
String intkphone = split[1];//输入的key:电话号码
String intvup = split[split.length - 3];//输入的value1:上行流量
String intvdown = split[split.length - 2];//输入的value2:下行流量
outk.set(intkphone);//将输入的key值封装成输出的key值。
outv.setUpFlow(Long.parseLong(intvup));//将输入的value1值转换为Long类型的数据后封装成outv。
outv.setDownFlow(Long.parseLong(intvdown));//将输入的value2值转换为Long类型的数据后封装成outv。
outv.setSumFlow();//将输入的value1值和value2值转换为Long类型的数据后相加再封装成outv。实现intvup+intvdown部分在FlowBean中重写的setSumFlow方法中。
//写出
context.write(outk, outv);
}
}
Reducer代码:
package com.atguigu.mapreduce.writable2;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class FlowReducer extends Reducer<Text, FlowBean, Text, FlowBean> {
//new一个outv
private FlowBean outv = new FlowBean();
@Override
protected void reduce(Text key, Iterable<FlowBean> values, Reducer<Text, FlowBean, Text, FlowBean>.Context context) throws IOException, InterruptedException {
//初始化累加变量
long tatalup = 0;
long tataldown = 0;
for (FlowBean value : values) {
tatalup += value.getUpFlow();
tataldown += value.getDownFlow();
}
//封装:封装输出的outk和outv,因为传进来的Text类型的电话号码没有发生改变,outk可以直接调用,所以不需要重新创建一个outk;而outv需要重新创建并封装起来。
outv.setUpFlow(tatalup);
outv.setDownFlow(tataldown);
outv.setSumFlow();
//写出:key为传进来的key,所以直接引用即可;value则是新建的outv,所以需要改变它的值。
context.write(key, outv);
}
}
Driver代码:
package com.atguigu.mapreduce.writable2;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class FlowDriver {
public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
Configuration confg = new Configuration();
Job job = Job.getInstance(confg);
job.setJarByClass(FlowDriver.class);
job.setMapperClass(FlowMapper.class);
job.setReducerClass(FlowReducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputKeyClass(FlowBean.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(FlowBean.class);
FileInputFormat.setInputPaths(job, new Path("C:\\bigdata\\大数据之Hadoop 3.x\\资料\\11_input\\inflow\\phone_data.txt"));
FileOutputFormat.setOutputPath(job, new Path("C:\\bigdata\\maven\\flow1344"));
boolean result = job.waitForCompletion(true);
System.exit(result?0:1);
}
}