看到 程序员的自我修养 – SelfUp.cn 里面有Spark MLlib之K-Means聚类算法。
但是是java 语言的,于是我按照例程用Scala写了一个,分享在此。
由于在学习 spark mllib 但是如此详细的资料真的很难找,在此分享。
测试数据
0.0 0.0 0.0
0.1 0.1 0.1
0.2 0.2 0.2
9.0 9.0 9.0
9.1 9.1 9.1
9.2 9.2 9.2
15.1 15.1 15.1
18.0 17.0 19.0
20.0 21.0 22.0
package com.spark.firstApp
import org.apache.spark.SparkContext
import org.apache.spark.SparkConf
import org.apache.spark.mllib.clustering.KMeans
import org.apache.spark.mllib.linalg.Vectors
object HelloSpark {
def main(args:Array[String]): Unit = {
val conf = new SparkConf().setAppName("SimpleSVM Application")
val sc = new SparkContext(conf)
val data = sc.textFile("hdfs://192.168.0.10:9000/user/root/home/data1.txt")
val parsedData = data.map(s => Vectors.dense(s.split(‘ ‘).map(_.toDouble))).cache()
// Cluster the data into two classes using KMeans
val numClusters = 2
val numIterations = 20
val clusters = KMeans.train(parsedData, numClusters, numIterations)
// Evaluate clustering by computing Within Set Sum of Squared Errors
val WSSSE = clusters.computeCost(parsedData)
println("Within Set Sum of Squared Errors = " + WSSSE)
println("Prediction of (1.1, 2.1, 3.1): " + clusters.predict(Vectors.dense(1.1, 2.1, 3.1)))
}
}