zoukankan      html  css  js  c++  java
  • hadoop实现倒排索引

    hadoop实现倒排索引

    本文用hadoop实现倒排索引算法,用基本的分两步完成,不使用combine

    第一步

    读入文档,统计文档中各个单词的个数,与word count类似,但这里把word-filename组合起来作为一个key,并把中间结果写到磁盘中

    InverseIndexStepTwo.java

    package postlisting;
    
    import org.apache.hadoop.conf.Configuration;
    import org.apache.hadoop.fs.FileSystem;
    import org.apache.hadoop.fs.Path;
    import org.apache.hadoop.io.LongWritable;
    import org.apache.hadoop.io.Text;
    import org.apache.hadoop.mapreduce.Job;
    import org.apache.hadoop.mapreduce.Mapper;
    import org.apache.hadoop.mapreduce.Reducer;
    import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
    import org.apache.hadoop.mapreduce.lib.input.FileSplit;
    import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
    import org.apache.hadoop.util.StringUtils;
    
    import java.io.IOException;
    
    /**
     * 倒排索引步骤一,先做word count,不过现在的key是word-filename
     */
    public class InverseIndexStepOne {
        public static class StepOneMapper extends Mapper<LongWritable, Text, Text, LongWritable>{
            @Override
            protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
                String line = value.toString();
                // 切分出各个单词
                String[] fields = line.split(" ");
                // 获取文件切片
                FileSplit inputsplit = (FileSplit)context.getInputSplit();
                // 获取文件名
                String filename = inputsplit.getPath().getName();
                // 计数hello-->a.txt  1
                for(String field: fields){
                    context.write(new Text(field+"-->"+filename), new LongWritable(1));
                }
            }
        }
    
        public static class  StepOneReducer extends Reducer<Text, LongWritable, Text, LongWritable>{
            @Override
            protected void reduce(Text key, Iterable<LongWritable> values, Context context) throws IOException, InterruptedException {
                long counter = 0;
                for (LongWritable value: values){
                    counter += value.get();
                }
                context.write(key, new LongWritable(counter));
            }
        }
    
        public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
            Configuration conf = new Configuration();
            Job job = Job.getInstance(conf);
    
            job.setJarByClass(InverseIndexStepOne.class);
    
            job.setMapperClass(StepOneMapper.class);
            job.setReducerClass(StepOneReducer.class);
    
            job.setMapOutputKeyClass(Text.class);
            job.setMapOutputValueClass(LongWritable.class);
    
            job.setOutputKeyClass(Text.class);
            job.setOutputValueClass(LongWritable.class);
    
            // 检查输出文件夹是否已存在,如果存在先删除
            // 本地测试
            Path output = new Path("res/words/output/step1");
            FileSystem fs = FileSystem.get(conf);
            if(fs.exists(output)){
                fs.delete(output, true);
            }
            FileInputFormat.setInputPaths(job, new Path("res/words/input/"));
            FileOutputFormat.setOutputPath(job, output);
            System.out.println(job.waitForCompletion(true));
        }
    }
    

    输出结果

    hello-->a.txt	2
    hello-->b.txt	2
    hello-->c.txt	2
    jerry-->a.txt	1
    jerry-->b.txt	3
    jerry-->c.txt	1
    tom-->a.txt	3
    tom-->b.txt	1
    tom-->c.txt	1
    

    第二步

    读取上一步的中间结果,解析并合并

    InverseIndexStepOne.java

    package postlisting;
    
    import org.apache.hadoop.conf.Configuration;
    import org.apache.hadoop.fs.FileSystem;
    import org.apache.hadoop.fs.Path;
    import org.apache.hadoop.io.LongWritable;
    import org.apache.hadoop.io.Text;
    import org.apache.hadoop.mapreduce.Job;
    import org.apache.hadoop.mapreduce.Mapper;
    import org.apache.hadoop.mapreduce.Reducer;
    import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
    import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
    
    import java.io.IOException;
    
    public class InverseIndexStepTwo {
        public static class StepTwoMapper extends Mapper<LongWritable, Text, Text, Text> {
            @Override
            protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
                String line = value.toString();
                // hello-->a.txt    1
                String[] fields = line.split("	");
                String[] wordAndFileName = fields[0].split("-->");
                String word = wordAndFileName[0];
                String fileName = wordAndFileName[1];
                long count = Long.parseLong(fields[1]);
                // <hello, a.txt-->3>
                context.write(new Text(word), new Text(fileName + "-->" + count));
            }
        }
    
        public static class StepTwoReducer extends Reducer<Text, Text, Text, Text>{
            @Override
            protected void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
                // 拿到的数据<hello, a.txt-->3, a.txt-->4,...>
                StringBuilder result = new StringBuilder();
                for (Text value:values){
                    result.append(" ").append(value);
                }
                context.write(key, new Text(result.toString()));
            }
        }
    
        public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
            Configuration conf = new Configuration();
            Job job = Job.getInstance(conf);
    
            job.setJarByClass(InverseIndexStepTwo.class);
    
            job.setMapperClass(StepTwoMapper.class);
            job.setReducerClass(StepTwoReducer.class);
    
            job.setMapOutputKeyClass(Text.class);
            job.setMapOutputValueClass(Text.class);
    
            job.setOutputKeyClass(Text.class);
            job.setOutputValueClass(Text.class);
    
            // 检查输出文件夹是否已存在,如果存在先删除
            Path output = new Path("res/words/output/step2");
            FileSystem fs = FileSystem.get(conf);
            if(fs.exists(output)){
                fs.delete(output, true);
            }
            FileInputFormat.setInputPaths(job, new Path("res/words/output/step1/"));
            FileOutputFormat.setOutputPath(job, output);
            System.out.println(job.waitForCompletion(true));
        }
    }
    

    输出结果

    hello	 c.txt-->2 b.txt-->2 a.txt-->2
    jerry	 c.txt-->1 b.txt-->3 a.txt-->1
    tom	 c.txt-->1 b.txt-->1 a.txt-->3
    

    小结

    虽然用combine可以节省代码,但感觉分开写更加灵活,写个shell脚本组织一下就好,Map Reduce的强大之处也在与它的自由组合。

  • 相关阅读:
    Luogu P3371 【模板】单源最短路径
    Luogu P1330 封锁阳光大学
    Luogu P2661 信息传递
    Luogu P1514 引水入城
    2017NOIP游记
    Image Processing and Analysis_8_Edge Detection:Edge and line oriented contour detection State of the art ——2011
    Image Processing and Analysis_8_Edge Detection:Learning to Detect Natural Image Boundaries Using Local Brightness, Color, and Texture Cues ——2004
    Image Processing and Analysis_8_Edge Detection:Design of steerable filters for feature detection using canny-like criteria ——2004
    Image Processing and Analysis_8_Edge Detection:Edge Detection Revisited ——2004
    Image Processing and Analysis_8_Edge Detection:Statistical edge detection_ learning and evaluating edge cues——2003
  • 原文地址:https://www.cnblogs.com/fanghao/p/8528512.html
Copyright © 2011-2022 走看看