一、需求
统计每一个手机号耗费的总上行流量、下行流量、总流量
二、分析
1、输入内容

1 13736230513 192.196.100.1 www.atguigu.com 2481 24681 200 2 13846544121 192.196.100.2 264 0 200 3 13956435636 192.196.100.3 132 1512 200 4 13966251146 192.168.100.1 240 0 404 5 18271575951 192.168.100.2 www.atguigu.com 1527 2106 200 6 84188413 192.168.100.3 www.atguigu.com 4116 1432 200 7 13590439668 192.168.100.4 1116 954 200 8 15910133277 192.168.100.5 www.hao123.com 3156 2936 200 9 13729199489 192.168.100.6 240 0 200 10 13630577991 192.168.100.7 www.shouhu.com 6960 690 200 11 15043685818 192.168.100.8 www.baidu.com 3659 3538 200 12 15959002129 192.168.100.9 www.atguigu.com 1938 180 500 13 13560439638 192.168.100.10 918 4938 200 14 13470253144 192.168.100.11 180 180 200 15 13682846555 192.168.100.12 www.qq.com 1938 2910 200 16 13992314666 192.168.100.13 www.gaga.com 3008 3720 200 17 13509468723 192.168.100.14 www.qinghua.com 7335 110349 404 18 18390173782 192.168.100.15 www.sogou.com 9531 2412 200 19 13975057813 192.168.100.16 www.baidu.com 11058 48243 200 20 13768778790 192.168.100.17 120 120 200 21 13568436656 192.168.100.18 www.alibaba.com 2481 24681 200 22 13568436656 192.168.100.19 1116 954 200
2、分析过程
phone 是 key
上行流量、下行流量、总流量 没有 Hadoop的序列化,因此需要 自定义Bean 作为 Value
三、过程
1、自定义Flow
package com.flow; import org.apache.hadoop.io.Writable; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; public class FlowBean implements Writable { private long upFlow; // 上传流量 private long downFlow; // 下载流量 private long sumFlow; // 总流量 public FlowBean() { } public long getUpFlow() { return upFlow; } public void setUpFlow(long upFlow) { this.upFlow = upFlow; } public long getDownFlow() { return downFlow; } public void setDownFlow(long downFlow) { this.downFlow = downFlow; } public long getSumFlow() { return sumFlow; } public void setSumFlow(long sumFlow) { this.sumFlow = sumFlow; } @Override public String toString() { return upFlow + " " + downFlow + " " + sumFlow; } // 序列化 public void write(DataOutput out) throws IOException { out.writeLong(upFlow); out.writeLong(downFlow); out.writeLong(sumFlow); } // 反序列化 public void readFields(DataInput in) throws IOException { this.upFlow = in.readLong(); this.downFlow = in.readLong(); this.sumFlow = in.readLong(); } public void set(long upFlow, long downFlow){ this.upFlow = upFlow; this.downFlow = downFlow; this.sumFlow = upFlow + downFlow; } }
2、Mapper
package com.flow; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Mapper; import java.io.IOException; /* * 1. 继承 Mapper * 2. 重写 map方法 * 3. 编写业务逻辑 * */ public class FlowMapper extends Mapper<LongWritable, Text, Text, FlowBean> { FlowBean v = new FlowBean(); Text k = new Text(); @Override protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { // 1. 获取第一行数据 String line = value.toString(); // 2. 切割 String[] infos = line.split(" "); // 3. 获取数据 long upFlow = Long.parseLong(infos[infos.length - 3]); long downFlow = Long.parseLong(infos[infos.length -2]); String phone = infos[1]; k.set(phone); v.setUpFlow(upFlow); v.setDownFlow(downFlow); // 4. 写入 context.write(k, v); } }
3、Reducer
package com.flow; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Reducer; import java.io.IOException; /* * 1.继承Reducer * 2.重写reduce()方法 * 3.写业务逻辑 * */ public class FlowReducer extends Reducer<Text,FlowBean,Text,FlowBean> { FlowBean v = new FlowBean(); @Override protected void reduce(Text key, Iterable<FlowBean> values, Context context) throws IOException, InterruptedException { // 1. 累加 long sum_up = 0; long sum_down = 0; for (FlowBean value : values) { sum_up += value.getUpFlow(); sum_down += value.getDownFlow(); } // 2.计算 v.set(sum_up, sum_down); // 3. 写入 context.write(key, v); } }
4、driver
package com.flow; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import java.io.IOException; /* * 1. job * 2. 设置jar * 3. 设置map和reduce类型 * 4. 设置map输出的k v * 5. 设置最终结果的 kv * 6. 设置输入输出的路径 * 7. 提交 * */ public class FlowDriver { public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException { // 1. 获取job Configuration conf = new Configuration(); Job job = Job.getInstance(conf); // 2. 设置jar路径 job.setJarByClass(FlowDriver.class); // 3. 设置map和Reduce类型 job.setMapperClass(FlowMapper.class); job.setReducerClass(FlowReducer.class); // 4. 设置Map的输出 k v job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(FlowBean.class); // 5. 设置最终输出的 kv job.setOutputKeyClass(Text.class); job.setOutputValueClass(FlowBean.class); // 6. 设置输出和输入路径 FileInputFormat.setInputPaths(job, new Path(args[0])); FileOutputFormat.setOutputPath(job, new Path(args[1])); // 7. 提交 boolean result = job.waitForCompletion(true); System.exit(result? 0:1); } }
踩过的巨坑
Text导错包