join
函数定义
def join[W](other: RDD[(K, W)]): RDD[(K, (V, W))]
def join[W](other: RDD[(K, W)], numPartitions: Int): RDD[(K, (V, W))]
def join[W](other: RDD[(K, W)], partitioner: Partitioner): RDD[(K, (V, W))]
可以把RDD1,RDD2中的相同的 key 给连接起来,类似于 sql 中的 join 操作;若有多个相同的 key 则连接多次,仔细查看比较运行结果
Scala版本
val conf = new SparkConf().setAppName("JoinScala").setMaster("local[*]")
val sc = new SparkContext(conf)
val rdd1 = sc.parallelize(List(("a",1),("c",2),("a",3),("b",4),("c",5),("d",6)))
val rdd2 = sc.parallelize(List(("b",5),("c",4),("a",6),("a",9),("c",3),("d",2)))
val join = rdd1.join(rdd2)
join.collect.foreach(println)
运行结果如下:
SparkConf conf = new SparkConf().setMaster("local[*]").setAppName("JoinJava");
JavaSparkContext sc = new JavaSparkContext(conf);
JavaRDD<Tuple2<Integer, Character>> rdd1 = sc.parallelize(Arrays.asList(
new Tuple2<Integer, Character>(1, 'a'),
new Tuple2<Integer, Character>(2, 'b'),
new Tuple2<Integer, Character>(3, 'c'),
new Tuple2<Integer, Character>(3, 'd')
));
JavaRDD<Tuple2<Integer, Character>> rdd2 = sc.parallelize(Arrays.asList(
new Tuple2<Integer, Character>(2, 'x'),
new Tuple2<Integer, Character>(3, 'y'),
new Tuple2<Integer, Character>(4, 'z')
));
//转化成PairRDD
JavaPairRDD<Integer, Character> pairRdd1 = JavaPairRDD.fromJavaRDD(rdd1);
JavaPairRDD<Integer, Character> pairRdd2 = JavaPairRDD.fromJavaRDD(rdd2);
JavaPairRDD<Integer, Tuple2<Character, Character>> joinRdd = pairRdd1.join(pairRdd2);
java.util.Map<Integer, Tuple2<Character, Character>> joinMap = joinRdd.collectAsMap();
Set<Integer> keys = joinMap.keySet();
for (Integer key :
keys) {
System.out.println(keys+" : "+joinMap.get(key));
}
运行结果如下:
fullOuterJoin
全连接,仔细查看比较运行结果
Scala版本
val conf = new SparkConf().setAppName("fullOuterJoin").setMaster("local[*]")
val sc = new SparkContext(conf)
val rdd1 = sc.parallelize(List(("a",1),("c",2),("b",4),("d",6)))
val rdd2 = sc.parallelize(List(("b",5),("c",4),("a",9),("d",2)))
val fullOut = rdd1.fullOuterJoin(rdd2)
fullOut.collect.foreach(println)
运行结果如下:
SparkConf conf = new SparkConf().setMaster("local[*]").setAppName("fullOuterJoin");
JavaSparkContext sc = new JavaSparkContext(conf);
JavaRDD<Tuple2<Integer, Character>> rdd1 = sc.parallelize(Arrays.asList(
new Tuple2<Integer, Character>(1, 'a'),
new Tuple2<Integer, Character>(2, 'b'),
new Tuple2<Integer, Character>(3, 'c'),
new Tuple2<Integer, Character>(3, 'd')
));
JavaRDD<Tuple2<Integer, Character>> rdd2 = sc.parallelize(Arrays.asList(
new Tuple2<Integer, Character>(2, 'x'),
new Tuple2<Integer, Character>(3, 'y'),
new Tuple2<Integer, Character>(4, 'z')
));
//转化成PairRDD
JavaPairRDD<Integer, Character> pairRdd1 = JavaPairRDD.fromJavaRDD(rdd1);
JavaPairRDD<Integer, Character> pairRdd2 = JavaPairRDD.fromJavaRDD(rdd2);
JavaPairRDD<Integer, Tuple2<org.apache.spark.api.java.Optional<Character>, org.apache.spark.api.java.Optional<Character>>> fullOuterJoinRdd = pairRdd1.fullOuterJoin(pairRdd2);
java.util.Map<Integer, Tuple2<org.apache.spark.api.java.Optional<Character>, org.apache.spark.api.java.Optional<Character>>> fullOuterJoinMap = fullOuterJoinRdd.collectAsMap();
Set<Integer> keys1 = fullOuterJoinMap.keySet();
for (Integer key :
keys1) {
System.out.println(key+" : "+fullOuterJoinMap.get(key));
}
运行结果如下:
leftOuterJoin
对两个 RDD 进行连接操作,类似于sql中的左外连接,仔细查看比较运行结果
Scala版本
val conf = new SparkConf().setAppName("leftOuterJoin").setMaster("local[*]")
val sc = new SparkContext(conf)
val rdd1 = sc.parallelize(List(("a",1),("c",2),("b",4),("d",6)))
val rdd2 = sc.parallelize(List(("b",5),("a",9),("d",2)))
val left = rdd1.leftOuterJoin(rdd2)
left.collect.foreach(println)
运行结果如下:
SparkConf conf = new SparkConf().setMaster("local[*]").setAppName("leftOuterJoin");
JavaSparkContext sc = new JavaSparkContext(conf);
JavaRDD<Tuple2<Integer, Character>> rdd1 = sc.parallelize(Arrays.asList(
new Tuple2<Integer, Character>(1, 'a'),
new Tuple2<Integer, Character>(2, 'b'),
new Tuple2<Integer, Character>(3, 'c'),
new Tuple2<Integer, Character>(3, 'd')
));
JavaRDD<Tuple2<Integer, Character>> rdd2 = sc.parallelize(Arrays.asList(
new Tuple2<Integer, Character>(2, 'x'),
new Tuple2<Integer, Character>(3, 'y'),
new Tuple2<Integer, Character>(4, 'z')
));
//转化成PairRDD
JavaPairRDD<Integer, Character> pairRdd1 = JavaPairRDD.fromJavaRDD(rdd1);
JavaPairRDD<Integer, Character> pairRdd2 = JavaPairRDD.fromJavaRDD(rdd2);
JavaPairRDD<Integer, Tuple2<Character, org.apache.spark.api.java.Optional<Character>>> leftOuterJoinRdd = pairRdd1.leftOuterJoin(pairRdd2);
java.util.Map<Integer, Tuple2<Character, org.apache.spark.api.java.Optional<Character>>> leftOuterJoinMap = leftOuterJoinRdd.collectAsMap();
Set<Integer> keys2 = leftOuterJoinMap.keySet();
for (Integer key :
keys2) {
System.out.println(key+" : "+leftOuterJoinMap.get(key));
}
运行结果如下:
rightOuterJoin
对两个 RDD 进行连接操作,类似于sql中的右外连接,存在的话,value用的Some, 不存在用的None,仔细查看比较运行结果
Scala版本
val conf = new SparkConf().setAppName("rightOuterJoin").setMaster("local[*]")
val sc = new SparkContext(conf)
val rdd1 = sc.parallelize(List(("c",2),("b",4),("d",6)))
val rdd2 = sc.parallelize(List(("b",5),("a",9),("d",2)))
val right = rdd1.rightOuterJoin(rdd2)
right.collect.foreach(println)
运行结果如下:
SparkConf conf = new SparkConf().setMaster("local[*]").setAppName("rightOuterJoin");
JavaSparkContext sc = new JavaSparkContext(conf);
JavaRDD<Tuple2<Integer, Character>> rdd1 = sc.parallelize(Arrays.asList(
new Tuple2<Integer, Character>(1, 'a'),
new Tuple2<Integer, Character>(2, 'b'),
new Tuple2<Integer, Character>(3, 'c'),
new Tuple2<Integer, Character>(3, 'd')
));
JavaRDD<Tuple2<Integer, Character>> rdd2 = sc.parallelize(Arrays.asList(
new Tuple2<Integer, Character>(2, 'x'),
new Tuple2<Integer, Character>(3, 'y'),
new Tuple2<Integer, Character>(4, 'z')
));
//转化成PairRDD
JavaPairRDD<Integer, Character> pairRdd1 = JavaPairRDD.fromJavaRDD(rdd1);
JavaPairRDD<Integer, Character> pairRdd2 = JavaPairRDD.fromJavaRDD(rdd2);
JavaPairRDD<Integer, Tuple2<org.apache.spark.api.java.Optional<Character>, Character>> rightOuterJoinRdd = pairRdd1.rightOuterJoin(pairRdd2);
Map<Integer, Tuple2<Optional<Character>, Character>> rightOuterJoinMap = rightOuterJoinRdd.collectAsMap();
Set<Integer> keys3 = rightOuterJoinMap.keySet();
for (Integer key :
keys3) {
System.out.println(key+" : "+rightOuterJoinMap.get(key));
}
运行结果如下:
subtractByKey
类似于 subtrac,删掉 RDD1 中键与 RDD2 中的键相同的元素,仔细查看比较运行结果
Scala版本
val conf = new SparkConf().setAppName("subtractByKey").setMaster("local[*]")
val sc = new SparkContext(conf)
val rdd1 = sc.parallelize(List(("a",6),("c",2),("b",4),("d",6)))
val rdd2 = sc.parallelize(List(("b",5),("a",9),("d",2)))
val sub = rdd1.subtractByKey(rdd2)
sub.collect.foreach(println)
运行结果如下:
Java版本
SparkConf conf = new SparkConf().setMaster("local[*]").setAppName("Sub_Join_Java");
JavaSparkContext sc = new JavaSparkContext(conf);
JavaRDD<Tuple2<Integer, Character>> rdd1 = sc.parallelize(Arrays.asList(
new Tuple2<Integer, Character>(1, 'a'),
new Tuple2<Integer, Character>(2, 'b'),
new Tuple2<Integer, Character>(3, 'c'),
new Tuple2<Integer, Character>(3, 'd')
));
JavaRDD<Tuple2<Integer, Character>> rdd2 = sc.parallelize(Arrays.asList(
new Tuple2<Integer, Character>(2, 'x'),
new Tuple2<Integer, Character>(3, 'y'),
new Tuple2<Integer, Character>(4, 'z')
));
//转化成PairRDD
JavaPairRDD<Integer, Character> pairRdd1 = JavaPairRDD.fromJavaRDD(rdd1);
JavaPairRDD<Integer, Character> pairRdd2 = JavaPairRDD.fromJavaRDD(rdd2);
JavaPairRDD<Integer, Character> subtractByKeyRdd = pairRdd1.subtractByKey(pairRdd2);
List<Tuple2<Integer, Character>> collect = subtractByKeyRdd.collect();
for (Tuple2 tp2 :
collect) {
System.out.println(tp2._1+" : "+tp2._2);
}
运行结果如下:
因篇幅问题不能全部显示,请点此查看更多更全内容