import scala.collection.mutable.Set import scala.collection.mutable.Map object Tuple { def main(args: Array[String]): Unit = { val tuple = ("wang",2019,3.3) println(tuple._1) println(tuple._2) println(tuple._3) println("-----------------------") //集 var mySet = Set("hadoop","spark") println(mySet) //添加一个 mySet += "scala" println(mySet) println("-----------------------") //映射 val people = Map("xs" -> "student","ls" ->"teacher","jc" -> "police") println(people) //更新 people("xs") = "xuesheng" //添加 people += ("ys" -> "doctor","gr" -> "worker") println(people) println("-----------------------") //循环遍历映射 for ((k,v)<- people) printf("Code is : %s and name is %s ",k,v ) //k 为键 v为值 println("-----------------------") for(k<- people.keys) println(k) println("-----------------------") for(v<- people.values) println(v) println("-----------------------") //迭代器 val iter = Iterator("as","sd","df") // while (iter.hasNext){ // println(iter.next) // } for (elem <- iter) println(elem) } }
结果:
wang 2019 3.3 ----------------------- Set(spark, hadoop) Set(spark, scala, hadoop) ----------------------- Map(jc -> police, xs -> student, ls -> teacher) Map(jc -> police, gr -> worker, ys -> doctor, xs -> xuesheng, ls -> teacher) ----------------------- Code is : jc and name is police Code is : gr and name is worker Code is : ys and name is doctor Code is : xs and name is xuesheng Code is : ls and name is teacher ----------------------- jc gr ys xs ls ----------------------- police worker doctor xuesheng teacher ----------------------- as sd df
这一部分较容易理解,所以没有做过多解释。