Hadoop之——Partitioner编程

转载请注明出处:http://blog.csdn.net/l1028386804/article/details/46136685

一、Mapper类的实现

	static class MyMapper extends Mapper<LongWritable, Text, Text, KpiWritable>{
		protected void map(LongWritable key, Text value, org.apache.hadoop.mapreduce.Mapper<LongWritable,Text,Text,KpiWritable>.Context context) throws IOException ,InterruptedException {
			final String[] splited = value.toString().split("\t");
			final String msisdn = splited[1];
			final Text k2 = new Text(msisdn);
			final KpiWritable v2 = new KpiWritable(splited[6],splited[7],splited[8],splited[9]);
			context.write(k2, v2);
		};
	}

二、Reducer类的实现

	static class MyReducer extends Reducer<Text, KpiWritable, Text, KpiWritable>{
		/**
		 * @param	k2	表示整个文件中不同的手机号码
		 * @param	v2s	表示该手机号在不同时段的流量的集合
		 */
		protected void reduce(Text k2, java.lang.Iterable<KpiWritable> v2s, org.apache.hadoop.mapreduce.Reducer<Text,KpiWritable,Text,KpiWritable>.Context context) throws IOException ,InterruptedException {
			long upPackNum = 0L;
			long downPackNum = 0L;
			long upPayLoad = 0L;
			long downPayLoad = 0L;

			for (KpiWritable kpiWritable : v2s) {
				upPackNum += kpiWritable.upPackNum;
				downPackNum += kpiWritable.downPackNum;
				upPayLoad += kpiWritable.upPayLoad;
				downPayLoad += kpiWritable.downPayLoad;
			}

			final KpiWritable v3 = new KpiWritable(upPackNum+"", downPackNum+"", upPayLoad+"", downPayLoad+"");
			context.write(k2, v3);
		};
	}

三、Partitioner类的实现

	static class KpiPartitioner extends HashPartitioner<Text, KpiWritable>{
		@Override
		public int getPartition(Text key, KpiWritable value, int numReduceTasks) {
			return (key.toString().length()==11)?0:1;
		}
	}
}

四、自定义Hadoop数据类型

class KpiWritable implements Writable{
	long upPackNum;
	long downPackNum;
	long upPayLoad;
	long downPayLoad;

	public KpiWritable(){}

	public KpiWritable(String upPackNum, String downPackNum, String upPayLoad, String downPayLoad){
		this.upPackNum = Long.parseLong(upPackNum);
		this.downPackNum = Long.parseLong(downPackNum);
		this.upPayLoad = Long.parseLong(upPayLoad);
		this.downPayLoad = Long.parseLong(downPayLoad);
	}

	@Override
	public void readFields(DataInput in) throws IOException {
		this.upPackNum = in.readLong();
		this.downPackNum = in.readLong();
		this.upPayLoad = in.readLong();
		this.downPayLoad = in.readLong();
	}

	@Override
	public void write(DataOutput out) throws IOException {
		out.writeLong(upPackNum);
		out.writeLong(downPackNum);
		out.writeLong(upPayLoad);
		out.writeLong(downPayLoad);
	}

	@Override
	public String toString() {
		return upPackNum + "\t" + downPackNum + "\t" + upPayLoad + "\t" + downPayLoad;
	}

五、程序入口Main方法

	public static void main(String[] args) throws Exception{
		Configuration conf = new Configuration();
		final FileSystem fileSystem = FileSystem.get(new URI(INPUT_PATH), conf);
		final Path outPath = new Path(OUT_PATH);
		//如果已经存在输出文件,则先删除已存在的输出文件
		if(fileSystem.exists(outPath)){
			fileSystem.delete(outPath, true);
		}
		final Job job = new Job(new Configuration(), KpiApp.class.getSimpleName());
		//打成jar包
		job.setJarByClass(KpiApp.class);

		//1.1 指定输入文件路径
		FileInputFormat.setInputPaths(job, INPUT_PATH);
		//指定哪个类用来格式化输入文件
		job.setInputFormatClass(TextInputFormat.class);

		//1.2指定自定义的Mapper类
		job.setMapperClass(MyMapper.class);
		//指定输出<k2,v2>的类型
		job.setMapOutputKeyClass(Text.class);
		job.setMapOutputValueClass(KpiWritable.class);

		//1.3 指定分区类
		job.setPartitionerClass(KpiPartitioner.class);
		job.setNumReduceTasks(2);

		//1.4 TODO 排序、分区

		//1.5  TODO (可选)合并

		//2.2 指定自定义的reduce类
		job.setReducerClass(MyReducer.class);
		//指定输出<k3,v3>的类型
		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(KpiWritable.class);

		//2.3 指定输出到哪里
		FileOutputFormat.setOutputPath(job, new Path(OUT_PATH));
		//设定输出文件的格式化类
		job.setOutputFormatClass(TextOutputFormat.class);

		//把代码提交给JobTracker执行
		job.waitForCompletion(true);
	}

六、完整程序

package com.lyz.hadoop.p;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.net.URI;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.mapreduce.lib.partition.HashPartitioner;
/**
 * 分区的例子必须打成jar运行
 * 用处 1.根据业务需要,产生多个输出文件
 * 		2.多个reduce任务在运行,提高整体job的运行效率
 */
public class KpiApp {
	static final String INPUT_PATH = "hdfs://liuyazhuang:9000/wlan";
	static final String OUT_PATH = "hdfs://liuyazhuang:9000/out";
	public static void main(String[] args) throws Exception{
		Configuration conf = new Configuration();
		final FileSystem fileSystem = FileSystem.get(new URI(INPUT_PATH), conf);
		final Path outPath = new Path(OUT_PATH);
		//如果已经存在输出文件,则先删除已存在的输出文件
		if(fileSystem.exists(outPath)){
			fileSystem.delete(outPath, true);
		}
		final Job job = new Job(new Configuration(), KpiApp.class.getSimpleName());
		//打成jar包
		job.setJarByClass(KpiApp.class);

		//1.1 指定输入文件路径
		FileInputFormat.setInputPaths(job, INPUT_PATH);
		//指定哪个类用来格式化输入文件
		job.setInputFormatClass(TextInputFormat.class);

		//1.2指定自定义的Mapper类
		job.setMapperClass(MyMapper.class);
		//指定输出<k2,v2>的类型
		job.setMapOutputKeyClass(Text.class);
		job.setMapOutputValueClass(KpiWritable.class);

		//1.3 指定分区类
		job.setPartitionerClass(KpiPartitioner.class);
		job.setNumReduceTasks(2);

		//1.4 TODO 排序、分区

		//1.5  TODO (可选)合并

		//2.2 指定自定义的reduce类
		job.setReducerClass(MyReducer.class);
		//指定输出<k3,v3>的类型
		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(KpiWritable.class);

		//2.3 指定输出到哪里
		FileOutputFormat.setOutputPath(job, new Path(OUT_PATH));
		//设定输出文件的格式化类
		job.setOutputFormatClass(TextOutputFormat.class);

		//把代码提交给JobTracker执行
		job.waitForCompletion(true);
	}

	static class MyMapper extends Mapper<LongWritable, Text, Text, KpiWritable>{
		protected void map(LongWritable key, Text value, org.apache.hadoop.mapreduce.Mapper<LongWritable,Text,Text,KpiWritable>.Context context) throws IOException ,InterruptedException {
			final String[] splited = value.toString().split("\t");
			final String msisdn = splited[1];
			final Text k2 = new Text(msisdn);
			final KpiWritable v2 = new KpiWritable(splited[6],splited[7],splited[8],splited[9]);
			context.write(k2, v2);
		};
	}

	static class MyReducer extends Reducer<Text, KpiWritable, Text, KpiWritable>{
		/**
		 * @param	k2	表示整个文件中不同的手机号码
		 * @param	v2s	表示该手机号在不同时段的流量的集合
		 */
		protected void reduce(Text k2, java.lang.Iterable<KpiWritable> v2s, org.apache.hadoop.mapreduce.Reducer<Text,KpiWritable,Text,KpiWritable>.Context context) throws IOException ,InterruptedException {
			long upPackNum = 0L;
			long downPackNum = 0L;
			long upPayLoad = 0L;
			long downPayLoad = 0L;

			for (KpiWritable kpiWritable : v2s) {
				upPackNum += kpiWritable.upPackNum;
				downPackNum += kpiWritable.downPackNum;
				upPayLoad += kpiWritable.upPayLoad;
				downPayLoad += kpiWritable.downPayLoad;
			}

			final KpiWritable v3 = new KpiWritable(upPackNum+"", downPackNum+"", upPayLoad+"", downPayLoad+"");
			context.write(k2, v3);
		};
	}

	static class KpiPartitioner extends HashPartitioner<Text, KpiWritable>{
		@Override
		public int getPartition(Text key, KpiWritable value, int numReduceTasks) {
			return (key.toString().length()==11)?0:1;
		}
	}
}

class KpiWritable implements Writable{
	long upPackNum;
	long downPackNum;
	long upPayLoad;
	long downPayLoad;

	public KpiWritable(){}

	public KpiWritable(String upPackNum, String downPackNum, String upPayLoad, String downPayLoad){
		this.upPackNum = Long.parseLong(upPackNum);
		this.downPackNum = Long.parseLong(downPackNum);
		this.upPayLoad = Long.parseLong(upPayLoad);
		this.downPayLoad = Long.parseLong(downPayLoad);
	}

	@Override
	public void readFields(DataInput in) throws IOException {
		this.upPackNum = in.readLong();
		this.downPackNum = in.readLong();
		this.upPayLoad = in.readLong();
		this.downPayLoad = in.readLong();
	}

	@Override
	public void write(DataOutput out) throws IOException {
		out.writeLong(upPackNum);
		out.writeLong(downPackNum);
		out.writeLong(upPayLoad);
		out.writeLong(downPayLoad);
	}

	@Override
	public String toString() {
		return upPackNum + "\t" + downPackNum + "\t" + upPayLoad + "\t" + downPayLoad;
	}
}
时间: 2024-10-03 14:24:07

Hadoop之——Partitioner编程的相关文章

python 实现Hadoop的partitioner和二次排序

我们知道,一个典型的Map-Reduce过程包 括:Input->Map->Patition->Reduce->Output.Pation负责把Map任务输出的中间结果 按key分发给不同的Reduce任务进行处理.Hadoop 提供了一个非常实用的partitioner类KeyFieldBasedPartitioner,通过配置相应的参数就可以使用.通过 KeyFieldBasedPartitioner可以方便地实现二次排序. 使用方法:       -partitioner o

Hadoop Partitioner编程

1.Partitioner是partitioner的基类,如果需要定制Partitioner也需要继承该类. 2. HashPartitioner是mapreduce的默认partitioner.计算方法是 which reducer=(key.hashCode() & Integer.MAX_VALUE) % numReduceTasks,得到当前的目的reducer. 3.说明,Partitioner是在Mapper执行完成,Reducer执行前.它有两个参数,就是Mapper的输出参数,在

Hadoop HDFS Java编程

import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.IOException; import java.net.URI; import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apac

0基础搭建Hadoop大数据处理-编程

Hadoop的编程可以是在Linux环境或Winows环境中,在此以Windows环境为示例,以Eclipse工具为主(也可以用IDEA).网上也有很多开发的文章,在此也参考他们的内容只作简单的介绍和要点总结. Hadoop是一个强大的并行框架,它允许任务在其分布式集群上并行处理.但是编写.调试Hadoop程序都有很大难度.正因为如此,Hadoop的开发者开发出了Hadoop Eclipse插件,它在Hadoop的开发环境中嵌入了Eclipse,从而实现了开发环境的图形化,降低了编程难度.在安装

[Hadoop入门] - 1 Ubuntu系统 Hadoop介绍 MapReduce编程思想

Ubuntu系统 (我用到版本号是140.4) ubuntu系统是一个以桌面应用为主的Linux操作系统,Ubuntu基于Debian发行版和GNOME桌面环境.Ubuntu的目标在于为一般用户提供一个最新的.同时又相当稳定的主要由自由软件构建而成的操作系统,它可免费使用,并带有社团及专业的支持应. 作为Hadoop大数据开发测试环境, 建议大家不要在windows上安装CgyWin来学习或研究, 直接用Vmware+ubuntu来学习. 下载 www.vmware.com这里下载vmware,

基础搭建Hadoop大数据处理-编程

Hadoop的编程可以是在Linux环境或Winows环境中,在此以Windows环境为示例,以Eclipse工具为主(也可以用IDEA).网上也有很多开发的文章,在此也参考他们的内容只作简单的介绍和要点总结. Hadoop是一个强大的并行框架,它允许任务在其分布式集群上并行处理.但是编写.调试Hadoop程序都有很大难度.正因为如此,Hadoop的开发者开发出了Hadoop Eclipse插件,它在Hadoop的开发环境中嵌入了Eclipse,从而实现了开发环境的图形化,降低了编程难度.在安装

myeclipse连接hadoop集群编程及问题解决

原以为搭建一个本地编程测试hadoop程序的环境很简单,没想到还是做得焦头烂额,在此分享步骤和遇到的问题,希望大家顺利. 一.要实现连接hadoop集群并能够编码的目的需要做如下准备: 1.远程hadoop集群(我的master地址为192.168.85.2) 2.本地myeclipse及myeclipse连接hadoop的插件 3.本地hadoop(我用的是hadoop-2.7.2) 先下载插件hadoop-eclipse-plugin,我用的是hadoop-eclipse-plugin-2.

对于Hadoop的MapReduce编程makefile

根据近期需要hadoop的MapReduce程序集成到一个大的应用C/C++书面框架.在需求make当自己主动MapReduce编译和打包的应用. 在这里,一个简单的WordCount1一个例子详细的实施细则,注意:hadoop版本号2.4.0. 源码包括两个文件.一个是WordCount1.java是详细的对单词计数实现的逻辑.第二个是CounterThread.java.当中简单的当前处理的行数做一个统计和打印.代码分别见附1. 编写makefile的关键是将hadoop提供的jar包的路径

Hadoop之——Combiner编程

转载请注明出处:http://blog.csdn.net/l1028386804/article/details/46135857 一.Mapper类的实现 /** * KEYIN 即k1 表示行的偏移量 * VALUEIN 即v1 表示行文本内容 * KEYOUT 即k2 表示行中出现的单词 * VALUEOUT 即v2 表示行中出现的单词的次数,固定值1 */ static class MyMapper extends Mapper<LongWritable, Text, Text, Lon