MapReduce示例-气象站

MaxTemperature.java

package cn.kissoft.hadoop.week05;

import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class MaxTemperature {

    public static void main(String[] args) throws Exception {
        if (args.length != 2) {
            System.err.println("Usage: MaxTemperature <input path> <output path>");
            System.exit(-1);
        }
        Job job = new Job();
        job.setJarByClass(MaxTemperature.class);
        job.setJobName("Max temperature");
        FileInputFormat.addInputPath(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));
        job.setMapperClass(MaxTemperatureMapper.class);
        job.setReducerClass(MaxTemperatureReducer.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);
        System.exit(job.waitForCompletion(true) ? 0 : 1);
    }
}


MaxTemperatureMapper.java

package cn.kissoft.hadoop.week05;

import java.io.IOException;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

public class MaxTemperatureMapper extends Mapper<LongWritable, Text, Text, IntWritable> {
    private static final int MISSING = 9999;

    @Override
    public void map(LongWritable key, Text value, Context context)
            throws IOException, InterruptedException {
        String line = value.toString();
        String year = line.substring(0, 4);
        int airTemperature = Integer.parseInt(line.substring(13, 19).trim());
        if (airTemperature != MISSING) {
            context.write(new Text(year), new IntWritable(airTemperature));
        }
    }

}

MaxTemperatureReducer.java

package cn.kissoft.hadoop.week05;

import java.io.IOException;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

public class MaxTemperatureReducer extends Reducer<Text, IntWritable, Text, IntWritable> {

    @Override
    public void reduce(Text key, Iterable<IntWritable> values, Context context)
            throws IOException, InterruptedException {

        int maxValue = Integer.MIN_VALUE;
        for (IntWritable value : values) {
            maxValue = Math.max(maxValue, value.get());
        }
        context.write(key, new IntWritable(maxValue));
    }
}

运行结果

[[email protected] guide]$ hadoop jar pc.jar cn.kissoft.hadoop.week05.MaxTemperature ./ch02/1959.txt ./ch02/out/
Warning: $HADOOP_HOME is deprecated.

14/08/15 11:27:15 WARN mapred.JobClient: Use GenericOptionsParser for parsing the arguments. Applications should implement Tool for the same.
14/08/15 11:27:15 INFO input.FileInputFormat: Total input paths to process : 1
14/08/15 11:27:15 INFO util.NativeCodeLoader: Loaded the native-hadoop library
14/08/15 11:27:15 WARN snappy.LoadSnappy: Snappy native library not loaded
14/08/15 11:27:17 INFO mapred.JobClient: Running job: job_201408150916_0003
14/08/15 11:27:18 INFO mapred.JobClient:  map 0% reduce 0%
14/08/15 11:27:31 INFO mapred.JobClient:  map 100% reduce 0%
14/08/15 11:27:43 INFO mapred.JobClient:  map 100% reduce 100%
14/08/15 11:27:46 INFO mapred.JobClient: Job complete: job_201408150916_0003
14/08/15 11:27:46 INFO mapred.JobClient: Counters: 29
14/08/15 11:27:46 INFO mapred.JobClient:   Job Counters
14/08/15 11:27:46 INFO mapred.JobClient:     Launched reduce tasks=1
14/08/15 11:27:46 INFO mapred.JobClient:     SLOTS_MILLIS_MAPS=14494
14/08/15 11:27:46 INFO mapred.JobClient:     Total time spent by all reduces waiting after reserving slots (ms)=0
14/08/15 11:27:46 INFO mapred.JobClient:     Total time spent by all maps waiting after reserving slots (ms)=0
14/08/15 11:27:46 INFO mapred.JobClient:     Launched map tasks=1
14/08/15 11:27:46 INFO mapred.JobClient:     Data-local map tasks=1
14/08/15 11:27:46 INFO mapred.JobClient:     SLOTS_MILLIS_REDUCES=12558
14/08/15 11:27:46 INFO mapred.JobClient:   File Output Format Counters
14/08/15 11:27:46 INFO mapred.JobClient:     Bytes Written=9
14/08/15 11:27:46 INFO mapred.JobClient:   FileSystemCounters
14/08/15 11:27:46 INFO mapred.JobClient:     FILE_BYTES_READ=9773826
14/08/15 11:27:46 INFO mapred.JobClient:     HDFS_BYTES_READ=27544475
14/08/15 11:27:46 INFO mapred.JobClient:     FILE_BYTES_WRITTEN=14776914
14/08/15 11:27:46 INFO mapred.JobClient:     HDFS_BYTES_WRITTEN=9
14/08/15 11:27:46 INFO mapred.JobClient:   File Input Format Counters
14/08/15 11:27:46 INFO mapred.JobClient:     Bytes Read=27544368
14/08/15 11:27:46 INFO mapred.JobClient:   Map-Reduce Framework
14/08/15 11:27:46 INFO mapred.JobClient:     Map output materialized bytes=4886910
14/08/15 11:27:46 INFO mapred.JobClient:     Map input records=444264
14/08/15 11:27:46 INFO mapred.JobClient:     Reduce shuffle bytes=4886910
14/08/15 11:27:46 INFO mapred.JobClient:     Spilled Records=1332792
14/08/15 11:27:46 INFO mapred.JobClient:     Map output bytes=3998376
14/08/15 11:27:46 INFO mapred.JobClient:     Total committed heap usage (bytes)=219496448
14/08/15 11:27:46 INFO mapred.JobClient:     CPU time spent (ms)=6770
14/08/15 11:27:46 INFO mapred.JobClient:     Combine input records=0
14/08/15 11:27:46 INFO mapred.JobClient:     SPLIT_RAW_BYTES=107
14/08/15 11:27:46 INFO mapred.JobClient:     Reduce input records=444264
14/08/15 11:27:46 INFO mapred.JobClient:     Reduce input groups=1
14/08/15 11:27:46 INFO mapred.JobClient:     Combine output records=0
14/08/15 11:27:46 INFO mapred.JobClient:     Physical memory (bytes) snapshot=310345728
14/08/15 11:27:46 INFO mapred.JobClient:     Reduce output records=1
14/08/15 11:27:46 INFO mapred.JobClient:     Virtual memory (bytes) snapshot=1455665152
14/08/15 11:27:46 INFO mapred.JobClient:     Map output records=444264
[[email protected] guide]$ hadoop fs -ls ./ch02/out/
Warning: $HADOOP_HOME is deprecated.

Found 3 items
-rw-r--r--   1 wukong supergroup          0 2014-08-15 11:27 /user/wukong/ch02/out/_SUCCESS
drwxr-xr-x   - wukong supergroup          0 2014-08-15 11:27 /user/wukong/ch02/out/_logs
-rw-r--r--   1 wukong supergroup          9 2014-08-15 11:27 /user/wukong/ch02/out/part-r-00000
[[email protected] guide]$ hadoop fs -cat ./ch02/out/part-r-00000
Warning: $HADOOP_HOME is deprecated.

1959    418

截图

MapReduce示例-气象站

时间: 2024-11-05 15:58:10

MapReduce示例-气象站的相关文章

运行 Hadoop 的 MapReduce 示例卡住了【已解决】

1. 说明 在以伪分布式模式运行 Hadoop 自带的 MapReduce 示例,卡在了 Running job ,如图所示 2. 解决过程 查看日志没得到有用的信息 再次确认配置信息没有错误信息 最终修改了 hosts 添加本机的 hosts ,如下 192.168.23.101 s101 重新将 SSH 进行配置,最终修改了 SSH 远程登陆配置笔记. 再次运行示例,程序顺利运行. 原文地址:https://www.cnblogs.com/share23/p/9696070.html

一个简单的MapReduce示例(多个MapReduce任务处理)

一.需求 有一个列表,只有两列:id.pro,记录了id与pro的对应关系,但是在同一个id下,pro有可能是重复的. 现在需要写一个程序,统计一下每个id下有多少个不重复的pro. 为了写一个完整的示例,我使用了多job! 二.文件目录 |- OutCount //单Job的,本次试验没有使用到,这里写出来供参考 |- OutCount2 |- OutCountMapper |- OutCountMapper2 |- OutCountReduce |- OutCountReduce2 三.样本

【大数据系列】MapReduce示例好友推荐

package org.slp; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Mapper; import java.io.IOException; import java.util.StringTokenizer; /** * Created by sanglp on 2017/7/17. */ public clas

Hadoop-2.6.0分布式单机环境搭建HDFS讲解Mapreduce示例

Hadoop安装使用 1.1 Hadoop简介 1.2 HDFS分布式存储系统 1.3 单机安装 1.4 Mapreduce 案例 1.5 伪分布式安装 1.6 课后作业 1.1 Hadoop简介 在文章的时候已经讲解了Hadoop的简介以及生态圈,有什么不懂的可以"出门右转" http://dwz.cn/4rdSdU 1.2 HDFS分布式存储系统(Hadoop Distributed File System) HDFS优点 高容错性 数据自动保存多个副本 副本都时候会自动恢复 适合

MapReduce示例式理解

从word count这个实例理解MapReduce. MapReduce大体上分为六个步骤:input, split, map, shuffle, reduce, output.细节描述如下: 输入(input):如给定一个文档,包含如下四行:Hello JavaHello CHello JavaHello C++2. 拆分(split):将上述文档中每一行的内容转换为key-value对,即: 0 - Hello Java1 - Hello C2 – Hello Java3 - Hello

【源】从零自学Hadoop(08):第一个MapReduce

阅读目录 序 数据准备 wordcount Yarn 新建MapReduce 示例下载 系列索引 本文版权归mephisto和博客园共有,欢迎转载,但须保留此段声明,并给出原文链接,谢谢合作. 文章是哥(mephisto)写的,SourceLink 序 上一篇,我们的Eclipse插件搞定,那开始我们的MapReduce之旅. 在这里,我们先调用官方的wordcount例子,然后再手动创建个例子,这样可以更好的理解Job. 数据准备 一:说明 wordcount这个类是对不同的word进行统计个

MapReduce(一) mapreduce基础入门

一.mapreduce入门 1.什么是mapreduce 首先让我们来重温一下 hadoop 的四大组件:HDFS:分布式存储系统MapReduce:分布式计算系统YARN: hadoop 的资源调度系统Common: 以上三大组件的底层支撑组件,主要提供基础工具包和 RPC 框架等 Mapreduce 是一个分布式运算程序的编程框架,是用户开发"基于 hadoop 的数据分析 应用"的核心框架Mapreduce 核心功能是将用户编写的业务逻辑代码和自带默认组件整合成一个完整的 分布式

Hadoop-2.6.0 + Zookeeper-3.4.6 + HBase-0.98.9-hadoop2环境搭建示例

1    基本信息 1.1     软件信息 hadoop-2.6.0 zookeeper-3.4.6 hbase-0.98.9-hadoop2 (以下示例中使用的操作系统是Centos 6.5,请将下载的3个tar包分别解压并放置在/usr/local/目录下) (Hbase包中lib里可以看到zookeeper的jar包,从文件名可以确定使用的zookeeper版本) 1.2     集群组成: Server Name Hadoop Cluster Zookeeper Ensemble HB

伪分布式hadoop环境搭建

官网上的教程版本不符,还过于简单(很多必要的步骤没提到), 所以自行网上找教程. 在这里整理一下: 假设java_home已经配置完成,ssh也可连通 1.修改配置文件 以下文件均在 %HADOOP_HOME%/conf/ 下 core-site.xml:  Hadoop Core的配置项,例如HDFS和MapReduce常用的I/O设置等. hdfs-site.xml:  Hadoop 守护进程的配置项,包括namenode,辅助namenode和datanode等. mapred-site.