创建项目详细过程请参考博客:
在IDEA中用Scala编写WordCount程序并且上传到集群中运行
https://blog.csdn.net/weixin_43866709/article/details/88599701
下面是代码:
package cn.edu360.spark;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import scala.Tuple2;
import java.util.Arrays;
import java.util.Iterator;
public class javaWordCount {
public static void main(String[] args){
SparkConf conf = new SparkConf().setAppName("javaWordCount");
//创建sparkContext
JavaSparkContext jsc = new JavaSparkContext(conf);
//指定以后从哪里读取数据
JavaRDD<String> lines = jsc.textFile(args[0]);
//切分压平
JavaRDD<String> words = lines.flatMap(new FlatMapFunction<String, String>() {
@Override
public Iterator<String> call(String line) throws Exception {
return Arrays.asList(line.split(" ")).iterator();
}
});
//将单词和1组合在一起
JavaPairRDD<String, Integer> wordAndOne = words.mapToPair(new PairFunction<String, String, Integer>() {
@Override
public Tuple2<String, Integer> call(String word) throws Exception {
return new Tuple2<>(word, 1);
}
});
//聚合
JavaPairRDD<String, Integer> reduced = wordAndOne.reduceByKey(new Function2<Integer, Integer, Integer>() {
@Override
public Integer call(Integer v1, Integer v2) throws Exception {
return v1 + v2;
}
});
//调换key和value顺序
JavaPairRDD<Integer, String> swaped = reduced.mapToPair(new PairFunction<Tuple2<String, Integer>, Integer, String>() {
@Override
public Tuple2<Integer, String> call(Tuple2<String, Integer> tp) throws Exception {
//return new Tuple2<>(tp._2,tp._1);
return tp.swap();
}
});
//排序
JavaPairRDD<Integer, String> sorted = swaped.sortByKey(false);
//调整顺序
JavaPairRDD<String, Integer> result = sorted.mapToPair(new PairFunction<Tuple2<Integer, String>, String, Integer>() {
@Override
public Tuple2<String, Integer> call(Tuple2<Integer, String> tp) throws Exception {
return tp.swap();
}
});
//将数据保存到hdfs
result.saveAsTextFile(args[1]);
//释放资源
jsc.stop();
}
}
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 22
- 23
- 24
- 25
- 26
- 27
- 28
- 29
- 30
- 31
- 32
- 33
- 34
- 35
- 36
- 37
- 38
- 39
- 40
- 41
- 42
- 43
- 44
- 45
- 46
- 47
- 48
- 49
- 50
- 51
- 52
- 53
- 54
- 55
- 56
- 57
- 58
- 59
- 60
- 61
- 62
- 63
- 64
- 65
- 66
- 67
- 68
- 69
- 70
- 71
- 72
- 73
- 74
- 75
然后打成jar包,上传到spark集群运行。
运行命令是:
spark-submit //向spark集群提交任务命令
–master spark://L1:7077 // --master指定spark集群上的master主机和端口
–class com.baidu.spark.javaWordCount //指定程序的类名(程序执行的入口)
/home/hadoop/original-SparkTest1-1.0-SNAPSHOT-java.jar//指定jar 包的位置
hdfs://L1:9000/aaa //指定HDFS上namenode 的主机名和端口,以及文件位置
hdfs://L1:9000/aaajavaout //指定程序运行结果存放位置