zoukankan      html  css  js  c++  java
  • MapReduce分区的使用(Partition)

    MapReduce中的分区默认是哈希分区,根据map输出key的哈希值做模运算,如下

    int result = key.hashCode()%numReduceTask;

    如果我们需要根据业务需求来将map读入的数据按照某些特定条件写入不同的文件,那就需要自定义实现Partition,自定义规则

    举个简单的例子,使用MapReduce做wordcount,但是需要根据单词的长度写入不同的文件中,单词的长度大于4的写入一个文件,小于等于4的写入另一个文件

    代码结构如下

     代码实现如下

    MapTest.java

    /**
     * 
     */
    package com.zhen.partition;
    
    import java.io.IOException;
    
    import org.apache.hadoop.io.IntWritable;
    import org.apache.hadoop.io.LongWritable;
    import org.apache.hadoop.io.Text;
    import org.apache.hadoop.mapreduce.Mapper;
    
    /**
     * @author FengZhen
     *
     */
    public class MapTest extends Mapper<LongWritable, Text, Text, IntWritable>{
    
        private IntWritable outputValue = new IntWritable(1);
        
        @Override
        protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, IntWritable>.Context context)
                throws IOException, InterruptedException {
    
            String[] splits = value.toString().split("	");
            for (int i = 0; i < splits.length; i++) {
                context.write(new Text(splits[i]), outputValue);
            }
        
        }
        
    }

    ReduceTest.java

    /**
     * 
     */
    package com.zhen.partition;
    
    import java.io.IOException;
    
    import org.apache.hadoop.io.IntWritable;
    import org.apache.hadoop.io.Text;
    import org.apache.hadoop.mapreduce.Reducer;
    
    /**
     * @author FengZhen
     *
     */
    public class ReduceTest extends Reducer<Text, IntWritable, Text, IntWritable>{
    
        @Override
        protected void reduce(Text key, Iterable<IntWritable> value,
                Reducer<Text, IntWritable, Text, IntWritable>.Context context) throws IOException, InterruptedException {
    
            int sum = 0;
            for (IntWritable intWritable : value) {
                sum += intWritable.get();
            }
            context.write(key, new IntWritable(sum));
            
        }
        
    }

    PartitionTest.java

    /**
     * 
     */
    package com.zhen.partition;
    
    import org.apache.hadoop.io.IntWritable;
    import org.apache.hadoop.io.Text;
    import org.apache.hadoop.mapreduce.Partitioner;
    
    /**
     * @author FengZhen
     * 第一个参数:map的输出key类型
     * 第二个参数:map的输出value类型
     */
    public class PartitionTest extends Partitioner<Text, IntWritable>{
    
        /**
         * key:map的输出key
         * value:mapd的输出value
         * numReduceTask:reduce的task数量
         * 返回值,指定reduce,从0开始
         * */
        @Override
        public int getPartition(Text key, IntWritable value, int numReduceTask) {
            if (key.toString().length()>4) {
                return 0;
            }else{
                return 1;
            }
        }
        
    }

    PartitionTestMain.java

    /**
     * 
     */
    package com.zhen.partition;
    
    import java.io.IOException;
    
    import org.apache.hadoop.conf.Configuration;
    import org.apache.hadoop.fs.Path;
    import org.apache.hadoop.io.IntWritable;
    import org.apache.hadoop.io.LongWritable;
    import org.apache.hadoop.io.NullWritable;
    import org.apache.hadoop.io.Text;
    import org.apache.hadoop.mapreduce.Job;
    import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
    import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
    
    /**
     * @author FengZhen
     *
     */
    public class PartitionTestMain {
    
        public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
            Configuration configuration = new Configuration();
            Job job = new Job(configuration, PartitionTestMain.class.getSimpleName());
            job.setJarByClass(PartitionTestMain.class);
            job.setMapperClass(MapTest.class);
            job.setMapOutputKeyClass(Text.class);
            job.setMapOutputValueClass(IntWritable.class);
            
            job.setReducerClass(ReduceTest.class);
            job.setOutputKeyClass(Text.class);
            job.setOutputValueClass(IntWritable.class);
            
            job.setCombinerClass(ReduceTest.class);
         //设置分区类 job.setPartitionerClass(PartitionTest.
    class);
    //设置reduce任务个数 job.setNumReduceTasks(
    2); FileInputFormat.addInputPath(job, new Path(args[0])); FileOutputFormat.setOutputPath(job, new Path(args[1])); System.exit(job.waitForCompletion(true)?0:1); } }

    打包测试

    hadoop jar /Users/FengZhen/Desktop/Hadoop/other/mapreduce_jar/PartitionTest.jar com.zhen.partition.PartitionTestMain /user/hadoop/mapreduce/partitionTest/input /user/hadoop/mapreduce/partitionTest/output/

    任务结束后可看到输出路径下有两个结果文件

    EFdeMacBook-Pro:file FengZhen$ hadoop fs -ls /user/hadoop/mapreduce/partitionTest/output/
    Found 3 items
    -rw-r--r--   1 FengZhen supergroup          0 2018-02-11 12:12 /user/hadoop/mapreduce/partitionTest/output/_SUCCESS
    -rw-r--r--   1 FengZhen supergroup         69 2018-02-11 12:12 /user/hadoop/mapreduce/partitionTest/output/part-r-00000
    -rw-r--r--   1 FengZhen supergroup         19 2018-02-11 12:12 /user/hadoop/mapreduce/partitionTest/output/part-r-00001

    查看文件内容,是按照条件来分别输出的

    part-r-00000中是length > 4的单词

    part-r-00001中是length <= 4的单词

  • 相关阅读:
    spring整合activemq发送MQ消息[Topic模式]实例
    Activemq消息持久化
    Activemq消息类型
    spring整合activemq发送MQ消息[queue模式]实例
    activemq安装与简单消息发送接收实例
    metaq安装实例
    持续集成工具Hudson安装实例
    nexus安装实例
    sonar的安装与代码质量检测实例
    dubbo发布web服务实例
  • 原文地址:https://www.cnblogs.com/EnzoDin/p/8441074.html
Copyright © 2011-2022 走看看