不知道为啥不是很想学习MapReduce方面的知识,不过现在这么想可能过段时间还是免不了去学,这边先记录下一个MapReduce的实例wordcount代码。
1、
pom.xml:
<dependencies> <dependency> <groupId>junit</groupId> <artifactId>junit</artifactId> <version>RELEASE</version> </dependency> <dependency> <groupId>org.apache.logging.log4j</groupId> <artifactId>log4j-core</artifactId> <version>2.8.2</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-common</artifactId> <version>2.8.3</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-client</artifactId> <version>2.8.3</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-hdfs</artifactId> <version>2.8.3</version> </dependency></dependencies> 2、WordCountMapper:
import org.apache.hadoop.io.IntWritable;import org.apache.hadoop.io.LongWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Mapper; import java.io.IOException; public class WordCountMapper extends Mapper<LongWritable, Text, Text, IntWritable> { Text k = new Text(); IntWritable v = new IntWritable(1); @Override protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { //获取一行文本 String line = value.toString(); //根据分隔符切分 String[] words = line.split(" "); //输出 for (String word : words) { k.set(word); context.write(k, v); } }} 3、WordCountReducer:
import org.apache.hadoop.io.IntWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Reducer; import java.io.IOException; public class WordCountReducer extends Reducer<Text, IntWritable, Text, IntWritable> { int sum; IntWritable v = new IntWritable(); @Override protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException { //累加求和 sum = 0; for (IntWritable count : values) { sum += count.get(); } //输出 v.set(sum); context.write(key, v); }} 4、WordCountDriver:
import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.Path;import org.apache.hadoop.io.IntWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Job;import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import java.io.IOException; public class WordCountDriver { public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException { //获取配置信息及封装任务 Configuration configuration = new Configuration(); Job job = Job.getInstance(configuration); //设置jar加载路径 job.setJarByClass(WordCountDriver.class); //设置map和reduce类 job.setMapperClass(WordCountMapper.class); job.setReducerClass(WordCountReducer.class); //设置map输出 job.setMapOutputKeyClass(Text.class); job.setOutputValueClass(IntWritable.class); //设置reduce输出 job.setOutputKeyClass(Text.class); job.setOutputValueClass(IntWritable.class); //设置输入和输出路径 FileInputFormat.setInputPaths(job, new Path(args[0])); FileOutputFormat.setOutputPath(job, new Path(args[1])); //提交 boolean result = job.waitForCompletion(true); System.exit(result ? 0 : 1); }} 5、参数设置:
6、结果查询:
原文地址:https://www.cnblogs.com/qugemingzihaonan13/p/9860673.html
时间: 2024-08-03 21:52:45