zoukankan      html  css  js  c++  java
  • 大数据wordcount代码。要理解代码就要配合图形理解

    package cn.itcast.hadoop.mr;

    import java.io.IOException;
    import org.apache.hadoop.conf.Configuration;
    import org.apache.hadoop.fs.Path;
    import org.apache.hadoop.io.IntWritable;
    import org.apache.hadoop.io.Text;
    import org.apache.hadoop.mapreduce.Job;
    import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
    import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

    public class WordCountDriver {
        public WordCountDriver() {
        }

        public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
            Configuration conf = new Configuration();
            //conf.set("mapreduce.app-submission.cross-platform", "true");  // 跨平台,保证在 Windows 下可以提交 mr job
            Job job = Job.getInstance(conf, "word count");
            job.setJarByClass(WordCountDriver.class);
            job.setMapperClass(WordCountMapper.class);
            job.setReducerClass(WordCountReducer.class);
            job.setMapOutputKeyClass(Text.class);
            job.setMapOutputValueClass(IntWritable.class);
            job.setOutputKeyClass(Text.class);
            job.setOutputValueClass(IntWritable.class);
            FileInputFormat.addInputPath(job, new Path("/home/node-1/zhouriyue/input/"));
            FileOutputFormat.setOutputPath(job, new Path("/home/node-1/zhouriyue/output/"));
            /*FileInputFormat.setInputPaths(job, "/wordcount/input");
            FileOutputFormat.setOutputPath(job, new Path("/wordcount/output"));*/
            boolean b = job.waitForCompletion(true);
            System.exit(b ? 0 : 1);
        }
    }

    package cn.itcast.hadoop.mr;

    import java.io.IOException;
    import java.util.Arrays;

    import org.apache.hadoop.io.IntWritable;
    import org.apache.hadoop.io.LongWritable;
    import org.apache.hadoop.io.Text;
    import org.apache.hadoop.mapreduce.Mapper;

    public class WordCountMapper extends Mapper<LongWritable, Text, Text, IntWritable> {
        public WordCountMapper() {
        }

        protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, IntWritable>.Context context) throws IOException, InterruptedException {
            String line = value.toString();
            String[] words = line.split(" ");
            String[] var9 = words;
            int var8 = words.length;
            for(int var7 = 0; var7 < var8; ++var7) {
                String word = var9[var7];
                System.out.println(word+","+1);
                context.write(new Text(word), new IntWritable(1));
            }

        }
    }

    package cn.itcast.hadoop.mr;

    import java.io.IOException;
    import java.util.Arrays;
    import java.util.Iterator;
    import org.apache.hadoop.io.IntWritable;
    import org.apache.hadoop.io.Text;
    import org.apache.hadoop.mapreduce.Reducer;

    public class WordCountReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
        public WordCountReducer() {
        }

        protected void reduce(Text key, Iterable<IntWritable> values, Reducer<Text, IntWritable, Text, IntWritable>.Context context) throws IOException, InterruptedException {
            int count = 0;
            IntWritable value;
            System.out.println(key+","+count);
            for(Iterator var6 = values.iterator(); var6.hasNext();) {
                System.out.println("count:"+count+"var6:"+var6);
                value = (IntWritable)var6.next();
                count += value.get();
            }
            context.write(key, new IntWritable(count));
        }
    }

    举一反三,借鉴别人的写出自己的才是真的好。

    问题:求4.txt,5.txt,6.txt文件里所有数字的最大值。代码如下

    package com.gxuwz.MaxValue;

    import org.apache.hadoop.conf.Configuration;
    import org.apache.hadoop.io.IntWritable;
    import org.apache.hadoop.fs.Path;
    import org.apache.hadoop.io.Text;
    import org.apache.hadoop.mapreduce.Job;
    import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
    import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

    import java.io.IOException;

    public class MaxValueDriver {
        public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
            Configuration conf = new Configuration();
            Job job = Job.getInstance(conf);
            job.setJarByClass(MaxValueDriver.class);
            job.setMapperClass(MaxValueMapper.class);
            job.setReducerClass(MaxValueReducer.class);
            job.setMapOutputKeyClass(Text.class);
            job.setMapOutputValueClass(Text.class);
            job.setOutputKeyClass(Text.class);
            job.setOutputValueClass(IntWritable.class);
            FileInputFormat.setInputPaths(job,"/home/node-1/zhouriyue/data/");
            FileOutputFormat.setOutputPath(job,new Path("/home/node-1/zhouriyue/maxvalue/"));
            Boolean b = job.waitForCompletion(true);
            System.exit(b?0:1);
        }
    }

    package com.gxuwz.MaxValue;

    import org.apache.hadoop.io.IntWritable;
    import org.apache.hadoop.io.LongWritable;
    import org.apache.hadoop.io.Text;
    import org.apache.hadoop.mapreduce.Mapper;

    import java.io.IOException;

    public class MaxValueMapper extends Mapper<LongWritable, Text,Text, Text> {
        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
            String data = value.toString();
            String[] values = data.split(" ");
            for(int i = 0;i < values.length;i++) {
                context.write(new Text("maxValue"),new Text(values[i]));
            }
        }
    }

    package com.gxuwz.MaxValue;

    import org.apache.hadoop.io.IntWritable;
    import org.apache.hadoop.io.Text;
    import org.apache.hadoop.mapreduce.Reducer;

    import java.io.IOException;
    import java.util.Iterator;

    public class MaxValueReducer extends Reducer<Text, Text,Text,IntWritable> {
        @Override
        protected void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
            int maxValue = 0;
            Iterator datas = values.iterator();
            Text v = null;
            while (datas.hasNext()) {
                v = (Text)datas.next();
                System.out.println("v:"+v.toString());
                int s = Integer.parseInt(""+v.toString());
                if(s > maxValue) {
                    maxValue = s;
                }
            }
            context.write(new Text("maxValue"),new IntWritable(maxValue));
        }
    }

  • 相关阅读:
    DetachedCriteria的使用
    Windows 9立即公布了
    加壳学习笔记(三)-简单的脱壳思路&amp;调试思路
    用异或来交换两个变量能提快速度是错误的
    珍爱生命,举手之劳__怎样优雅的叫优步专车
    【翻译自mos文章】使用aum( Automatic Undo Management) 时遇到 ORA-01555错误--- 原因和解决方式。
    HDU 5311 Hidden String (优美的暴力)
    ChargeSystem——One,Two,Three
    GCC编译uboot出现(.ARM.exidx+0x0): undefined reference to `__aeabi_unwind_cpp_pr0&#39;错误的解决的方法
    java中commons-beanutils的介绍
  • 原文地址:https://www.cnblogs.com/riyueqian/p/12254124.html
Copyright © 2011-2022 走看看