学习Mahout(三)

开发+运行第一个Mahout的程序

代码:

/**
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements.  See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The ASF licenses this file to You under the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with
 * the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package chen.test.kmeans;

import java.util.List;
import java.util.Map;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.ToolRunner;
import org.apache.mahout.clustering.Cluster;
import org.apache.mahout.clustering.canopy.CanopyDriver;
import org.apache.mahout.clustering.conversion.InputDriver;
import org.apache.mahout.clustering.kmeans.KMeansDriver;
import org.apache.mahout.clustering.kmeans.RandomSeedGenerator;
import org.apache.mahout.common.AbstractJob;
import org.apache.mahout.common.ClassUtils;
import org.apache.mahout.common.HadoopUtil;
import org.apache.mahout.common.commandline.DefaultOptionCreator;
import org.apache.mahout.common.distance.DistanceMeasure;
import org.apache.mahout.common.distance.EuclideanDistanceMeasure;
import org.apache.mahout.common.distance.SquaredEuclideanDistanceMeasure;
import org.apache.mahout.utils.clustering.ClusterDumper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

public final class TwoJob extends AbstractJob {

  private static final Logger log = LoggerFactory.getLogger(TwoJob.class);

  private static final String DIRECTORY_CONTAINING_CONVERTED_INPUT = "data";

  private TwoJob() {
  }

  public static void main(String[] args) throws Exception {
    if (args.length > 0) {
      log.info("Running with only user-supplied arguments");
      ToolRunner.run(new Configuration(), new TwoJob(), args);
    } else {
      log.info("Running with default arguments");
      Path output = new Path("output");
      Configuration conf = new Configuration();
      HadoopUtil.delete(conf, output);
      run(conf, new Path("testdata"), output, new EuclideanDistanceMeasure(), 2, 0.5, 10);
    }
  }

  @Override
  public int run(String[] args) throws Exception {
    addInputOption();
    addOutputOption();
    addOption(DefaultOptionCreator.distanceMeasureOption().create());
    addOption(DefaultOptionCreator.numClustersOption().create());
    addOption(DefaultOptionCreator.t1Option().create());
    addOption(DefaultOptionCreator.t2Option().create());
    addOption(DefaultOptionCreator.convergenceOption().create());
    addOption(DefaultOptionCreator.maxIterationsOption().create());
    addOption(DefaultOptionCreator.overwriteOption().create());

    Map<String,List<String>> argMap = parseArguments(args);
    if (argMap == null) {
      return -1;
    }

    Path input = getInputPath();
    Path output = getOutputPath();
    String measureClass = getOption(DefaultOptionCreator.DISTANCE_MEASURE_OPTION);
    if (measureClass == null) {
      measureClass = SquaredEuclideanDistanceMeasure.class.getName();
    }
    double convergenceDelta = Double.parseDouble(getOption(DefaultOptionCreator.CONVERGENCE_DELTA_OPTION));
    int maxIterations = Integer.parseInt(getOption(DefaultOptionCreator.MAX_ITERATIONS_OPTION));
    if (hasOption(DefaultOptionCreator.OVERWRITE_OPTION)) {
      HadoopUtil.delete(getConf(), output);
    }
    DistanceMeasure measure = ClassUtils.instantiateAs(measureClass, DistanceMeasure.class);
    if (hasOption(DefaultOptionCreator.NUM_CLUSTERS_OPTION)) {
      int k = Integer.parseInt(getOption(DefaultOptionCreator.NUM_CLUSTERS_OPTION));
      run(getConf(), input, output, measure, k, convergenceDelta, maxIterations);
    } else {
      double t1 = Double.parseDouble(getOption(DefaultOptionCreator.T1_OPTION));
      double t2 = Double.parseDouble(getOption(DefaultOptionCreator.T2_OPTION));
      run(getConf(), input, output, measure, t1, t2, convergenceDelta, maxIterations);
    }
    return 0;
  }

  /**
   * Run the kmeans clustering job on an input dataset using the given the number of clusters k and iteration
   * parameters. All output data will be written to the output directory, which will be initially deleted if it exists.
   * The clustered points will reside in the path <output>/clustered-points. By default, the job expects a file
   * containing equal length space delimited data that resides in a directory named "testdata", and writes output to a
   * directory named "output".
   *
   * @param conf
   *          the Configuration to use
   * @param input
   *          the String denoting the input directory path
   * @param output
   *          the String denoting the output directory path
   * @param measure
   *          the DistanceMeasure to use
   * @param k
   *          the number of clusters in Kmeans
   * @param convergenceDelta
   *          the double convergence criteria for iterations
   * @param maxIterations
   *          the int maximum number of iterations
   */
  public static void run(Configuration conf, Path input, Path output, DistanceMeasure measure, int k,
      double convergenceDelta, int maxIterations) throws Exception {
    Path directoryContainingConvertedInput = new Path(output, DIRECTORY_CONTAINING_CONVERTED_INPUT);
    log.info("Preparing Input");
    InputDriver.runJob(input, directoryContainingConvertedInput, "org.apache.mahout.math.RandomAccessSparseVector");
    log.info("Running random seed to get initial clusters");
    Path clusters = new Path(output, "random-seeds");
    clusters = RandomSeedGenerator.buildRandom(conf, directoryContainingConvertedInput, clusters, k, measure);
    log.info("Running KMeans with k = {}", k);
    KMeansDriver.run(conf, directoryContainingConvertedInput, clusters, output, convergenceDelta,
        maxIterations, true, 0.0, false);
    // run ClusterDumper
    Path outGlob = new Path(output, "clusters-*-final");
    Path clusteredPoints = new Path(output,"clusteredPoints");
    log.info("Dumping out clusters from clusters: {} and clusteredPoints: {}", outGlob, clusteredPoints);
    ClusterDumper clusterDumper = new ClusterDumper(outGlob, clusteredPoints);

    //print the result
    clusterDumper.printClusters(null);

  }

  /**
   * Run the kmeans clustering job on an input dataset using the given distance measure, t1, t2 and iteration
   * parameters. All output data will be written to the output directory, which will be initially deleted if it exists.
   * The clustered points will reside in the path <output>/clustered-points. By default, the job expects the a file
   * containing synthetic_control.data as obtained from
   * http://archive.ics.uci.edu/ml/datasets/Synthetic+Control+Chart+Time+Series resides in a directory named "testdata",
   * and writes output to a directory named "output".
   *
   * @param conf
   *          the Configuration to use
   * @param input
   *          the String denoting the input directory path
   * @param output
   *          the String denoting the output directory path
   * @param measure
   *          the DistanceMeasure to use
   * @param t1
   *          the canopy T1 threshold
   * @param t2
   *          the canopy T2 threshold
   * @param convergenceDelta
   *          the double convergence criteria for iterations
   * @param maxIterations
   *          the int maximum number of iterations
   */
  public static void run(Configuration conf, Path input, Path output, DistanceMeasure measure, double t1, double t2,
      double convergenceDelta, int maxIterations) throws Exception {
    Path directoryContainingConvertedInput = new Path(output, DIRECTORY_CONTAINING_CONVERTED_INPUT);
    log.info("Preparing Input");
    InputDriver.runJob(input, directoryContainingConvertedInput, "org.apache.mahout.math.RandomAccessSparseVector");
    log.info("Running Canopy to get initial clusters");
    Path canopyOutput = new Path(output, "canopies");
    CanopyDriver.run(new Configuration(), directoryContainingConvertedInput, canopyOutput, measure, t1, t2, false, 0.0,
        false);
    log.info("Running KMeans");
    KMeansDriver.run(conf, directoryContainingConvertedInput, new Path(canopyOutput, Cluster.INITIAL_CLUSTERS_DIR
        + "-final"), output, convergenceDelta, maxIterations, true, 0.0, false);
    // run ClusterDumper
    ClusterDumper clusterDumper = new ClusterDumper(new Path(output, "clusters-*-final"), new Path(output,
        "clusteredPoints"));
    clusterDumper.printClusters(null);
  }
}

上面的代码就是上一篇的example 例子,使用kmeans 实现聚集。

build.xml代码

<project name="mahout_test" default="jar">

   <property name="root.dir" value="." />
   <property name="src.dir" value="${root.dir}/src" />
   <property name="lib.dir" value="${root.dir}/lib" />
   <property name="build.dir" value="${root.dir}/build" />

   <target name="clean" depends="">
      <echo>root = ${root.dir}</echo>
      <delete dir="${build.dir}" />

      <mkdir dir="${build.dir}" />

   </target>

   <target name="build" depends="clean">
      <javac fork="true" debug="true" srcdir="${src.dir}" destdir="${build.dir}">
         <classpath>
            <fileset dir="${lib.dir}" includes="*.jar" />
         </classpath>
      </javac>

   </target>

   <target name="jar" depends="build">
         <mkdir dir="${build.dir}/lib" />
       <!--
         <copy file="${lib.dir}/mahout-core-0.9.jar" todir="${build.dir}/lib" />
         <copy file="${lib.dir}/mahout-integration-0.9.jar" todir="${build.dir}/lib" />
      <copy file="${lib.dir}/hadoop-core-1.2.1.jar" todir="${build.dir}/lib" />
       -->

       <copy file="${lib.dir}/mahout-examples-0.9-job.jar" todir="${build.dir}/lib" />
       <!--
       <copy file="${lib.dir}/mahout-integration-0.9.jar" todir="${build.dir}/lib" />
       -->
         <jar destfile="${root.dir}/mahout_test.jar" basedir="${build.dir}" >
         <manifest>
            <!--
             <attribute name="Main-Class" value="chen/test/Job" />
             -->
         </manifest>
         </jar>
   </target>

</project>

编译命令:

ant -f build.xml

编译后,它会在${root.dir}下生成一个 mahout_test.jar 的文件。

编译程序依赖的jar包:mahout-core-0.9-job.jar、mahout-examples-0.9-job.jar、hadoop-core-1.2.1.jar

其中mahout-core-0.9.jar 包只是使用了org.slf4j.Logger、org.slf4j.LoggerFactory 类

你也可以依赖 hadoop lib 的 slf4j-api-1.4.3.jar 包来替换 mahout-core-0.9-job.jar 包。

制作Mahout 程序的关键在与在生成 jar 包时,要包含mahout-examples-0.9-job.jar 包。否则hadoop jar **.jar 运行是会出错。

<copy file="${lib.dir}/mahout-examples-0.9-job.jar" todir="${build.dir}/lib" />

mahout-examples-0.9-job.jar 包里面的类和 mahout-core-0.9-job.jar 包的类有很多是重叠的,这个实在太坑了。如果同时加载两个jar 包,它就报错,说找不到相应的类。

我被这个问题困扰了很久。

而且编译时,不要指定Main Class ,否则也会出错,原因我也没有细究,知道的同学可以留言。

运行命令:

bin/hadoop jar /mnt/hgfs/mnt/chenfool/mahout_test.jar  chen.test.kmeans.TwoJob

运行的环境和上一篇的要求相似,也需要再 HDFS 的 /user/${user}/testdata 目录下存在向量文件。

学习Mahout(三)

时间: 2024-10-20 17:56:54

学习Mahout(三)的相关文章

零基础学习Mahout之一:搭建单机环境

一.Mahout是什么? Mahout是Apache的一个开源项目(http://mahout.apache.org/),提供了机器学习领域的若干经典算法,以便开发人员快速构建机器学习和数据挖掘方面的应用. Mahout是基于Hadoop的.从名称上看也很有意思,Hadoop是一个大象的名字,而Mahout则是象夫.看象人,可见二者联系之紧密.(这让我自然联想到Sun和Eclipse...) 我此时是一个完全没用过Mahout的门外汉,对Hadoop也没有实际使用经验,算是真正的零基础.我的目标

学习Mahout (四)

在Mahout 学习(三)中,我贴了example的代码,里面生成向量文件的代码: InputDriver.runJob(input, directoryContainingConvertedInput, "org.apache.mahout.math.RandomAccessSparseVector"); InputDriver实际上就是启动一个MapReduce程序,文件名叫InputMapper.java,只有Map处理,输出就是向量文件,代码 protected void ma

Windows API 编程学习记录&lt;三&gt;

恩,开始写API编程的第三节,其实马上要考试了,但是不把这节写完,心里总感觉不舒服啊.写完赶紧去复习啊       在前两节中,我们介绍了Windows API 编程的一些基本概念和一个最基本API函数 MessageBox的使用,在这节中,我们就来正式编写一个Windows的窗口程序. 在具体编写代码之前,我们必须先要了解一下API 编写窗口程序具体的三个基本步骤:             1. 注册窗口类:             2.创建窗口:             3.显示窗口: 恩,

Caliburn.Micro学习笔记(三)----事件聚合IEventAggregator和 Ihandle&lt;T&gt;

Caliburn.Micro学习笔记(三)----事件聚合IEventAggregator和 Ihandle<T> 今天 说一下Caliburn.Micro的IEventAggregator和IHandle<T>分成两篇去讲这一篇写一个简单的例子 看一它的的实现和源码 下一篇用它们做一个多语言的demo 这两个是事件的订阅和广播,很强大,但用的时候要小心发生不必要的冲突. 先看一下它的实现思想 在Caliburn.Micro里EventAggregator要以单例的形式出现这样可以

WebService学习总结(三)——使用JDK开发WebService

WebService学习总结(三)——使用JDK开发WebService一.WebService的开发手段 使用Java开发WebService时可以使用以下两种开发手段 1. 使用JDK开发(1.6及以上版本) 2.使用CXF框架开发(工作中)二.使用JDK开发WebService2.1.开发WebService服务器端 1.定义一个interface,使用@WebService注解标注接口,使用@WebMethod注解标注接口中定义的所有方法,如下所示:复制代码 1 package me.g

OpenCV for Python 学习笔记 三

给源图像增加边界 cv2.copyMakeBorder(src,top, bottom, left, right ,borderType,value) src:源图像 top,bottem,left,right: 分别表示四个方向上边界的长度 borderType: 边界的类型 有以下几种: BORDER_REFLICATE # 直接用边界的颜色填充, aaaaaa | abcdefg | gggg BORDER_REFLECT # 倒映,abcdefg | gfedcbamn | nmabcd

Android学习Scroller(三)——控件平移划过屏幕 (Scroller简单使用)

MainActivity如下: package cc.cn; import android.os.Bundle; import android.view.View; import android.view.View.OnClickListener; import android.widget.Button; import android.app.Activity; /** * Demo描述: * Scroller使用示例--让控件平移划过屏幕 * * 参考资料: * http://blog.cs

NFC学习笔记——三(在windows操作系统上安装libnfc)

本篇翻译文章: 这篇文章主要是说明如何在windows操作系统上安装.配置和使用libnfc. 一.基本信息 1.操作系统: Windows Vista Home Premium SP 2 2.硬件信息: System: Dell Inspiron 1720 Processor: Intel Core 2 Duo CPU T9300 @ 2.5GHz 2.5GHz System type: 32-bit Operating System 3.所需软件: 在windows操作系统上安装软件需要下列

Python学习第三天--数据类型

数据类型: int()  整型 float()浮点型 e记法   (有点像数学中的科学计数法) 知识点概括: 字符相加,结果为和 >>> 520 + 5201040 2.字符串相加,结果为"拼接" >>> '520'+'1314''5201314' 3.逻辑运算,python认为True=1,False=0,(True和False第一个字母必须为大写) >>> True + True 2 >>> True - Tr