zoukankan      html  css  js  c++  java
  • 掌握Spark机器学习库-09.3-kmeans算法实现分类

     数据集

    iris.data

    数据集概览

    代码

    package org.apache.spark.examples.hust.hml.examplesforml
    
    import org.apache.spark.ml.clustering.{KMeans, LDA}
    import org.apache.spark.SparkConf
    import org.apache.spark.ml.feature.VectorAssembler
    import org.apache.spark.sql.SparkSession
    
    import scala.util.Random
    
    object kmeans1 {
      def main(args: Array[String]): Unit = {
        val conf = new SparkConf().setMaster("local").setAppName("iris")
        val spark = SparkSession.builder().config(conf).getOrCreate()
    
        val file = spark.read.format("csv").load("D:\9-1kmeans\iris.data")
        file.show()
    
        import spark.implicits._
        val random = new Random()
        val data = file.map(row => {
          val label = row.getString(4) match {
            case "Iris-setosa" => 0
            case "Iris-versicolor" => 1
            case "Iris-virginica" => 2
          }
    
          (row.getString(0).toDouble,
            row.getString(1).toDouble,
            row.getString(2).toDouble,
            row.getString(3).toDouble,
            label,
            random.nextDouble())
        }).toDF("_c0", "_c1", "_c2", "_c3", "label", "rand").sort("rand")
        val assembler = new VectorAssembler()
          .setInputCols(Array("_c0", "_c1", "_c2", "_c3"))
          .setOutputCol("features")
    
        val dataset = assembler.transform(data)
        val Array(train, test) = dataset.randomSplit(Array(0.8, 0.2))
        train.show()
    
        val kmeans = new KMeans().setFeaturesCol("features").setK(3).setMaxIter(20)
        val model = kmeans.fit(train)
        model.transform(train).show()
    
      }
    }

    输出结果

  • 相关阅读:
    力扣算法:组合总和IV
    力扣算法:组合总和III
    逻辑回归(Logistic Regression)学习笔记
    力扣算法:组合总和II
    力扣算法:组合总和
    寒假作业(五)
    寒假作业(四)
    寒假作业(三)
    寒假作业(二)
    寒假学习(一)
  • 原文地址:https://www.cnblogs.com/moonlightml/p/9789772.html
Copyright © 2011-2022 走看看