[开发]Win7环境下Eclipse连接Hadoop2.2.0

准备:

确保hadoop2.2.0集群正常运行

1.eclipse中建立mven工程,并编辑pom文件如下

 <dependencies>
        <dependency>
            <groupId>org.apache.hbase</groupId>
            <artifactId>hbase-client</artifactId>
            <version>0.96.2-hadoop2</version>
        </dependency>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-hdfs</artifactId>
            <version>2.2.0</version>
        </dependency>
        <dependency>
            <groupId>jdk.tools</groupId>
            <artifactId>jdk.tools</artifactId>
            <version>1.7</version>
            <scope>system</scope>
            <systemPath>${JAVA_HOME}/lib/tools.jar</systemPath>
        </dependency>
    </dependencies>

2.在src/main/resources根目录下拷入log4j.properties,通过log4j查看详细日志

log4j.rootLogger=debug, stdout, R
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=%5p - %m%n
log4j.appender.R=org.apache.log4j.RollingFileAppender
log4j.appender.R.File=firestorm.log
log4j.appender.R.MaxFileSize=100KB
log4j.appender.R.MaxBackupIndex=1
log4j.appender.R.layout=org.apache.log4j.PatternLayout
log4j.appender.R.layout.ConversionPattern=%p %t %c - %m%n
log4j.logger.com.codefutures=DEBUG

3.拷入一个可执行的hadoop程序,我用的是一个HdfsDAO,可以先保证HDFS操作能执行

package com.bigdata.hdfs;

import java.io.IOException;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.mapred.JobConf;

public class HdfsDAO {
    private static final String HDFS = "hdfs://192.168.11.37:9000/";

    public HdfsDAO(Configuration conf) {
        this(HDFS, conf);
    }

    public HdfsDAO(String hdfs, Configuration conf) {
        this.hdfsPath = hdfs;
        this.conf = conf;
    }
    private String hdfsPath;
    private Configuration conf;
    public static void main(String[] args) throws IOException {
        JobConf conf = config();
        HdfsDAO hdfs = new HdfsDAO(conf);
//        hdfs.copyFile("datafile/item.csv", "/tmp/new");
//        hdfs.ls("/tmp/new");
        hdfs.ls("/");
    }        

    public static JobConf config(){
        JobConf conf = new JobConf(HdfsDAO.class);
        conf.setJobName("HdfsDAO");
        conf.addResource("classpath:/hadoop/core-site.xml");
        conf.addResource("classpath:/hadoop/hdfs-site.xml");
        conf.addResource("classpath:/hadoop/mapred-site.xml");
        return conf;
    }

    public void mkdirs(String folder) throws IOException {
        Path path = new Path(folder);
        FileSystem fs = FileSystem.get(URI.create(hdfsPath), conf);
        if (!fs.exists(path)) {
            fs.mkdirs(path);
            System.out.println("Create: " + folder);
        }
        fs.close();
    }
    public void rmr(String folder) throws IOException {
        Path path = new Path(folder);
        FileSystem fs = FileSystem.get(URI.create(hdfsPath), conf);
        fs.deleteOnExit(path);
        System.out.println("Delete: " + folder);
        fs.close();
    }
    public void ls(String folder) throws IOException {
        Path path = new Path(folder);
        FileSystem fs = FileSystem.get(URI.create(hdfsPath), conf);
        FileStatus[] list = fs.listStatus(path);
        System.out.println("ls: " + folder);
        System.out.println("==========================================================");
        for (FileStatus f : list) {
            System.out.printf("name: %s, folder: %s, size: %d\n", f.getPath(), f.isDir(), f.getLen());
        }
        System.out.println("==========================================================");
        fs.close();
    }
    public void createFile(String file, String content) throws IOException {
        FileSystem fs = FileSystem.get(URI.create(hdfsPath), conf);
        byte[] buff = content.getBytes();
        FSDataOutputStream os = null;
        try {
            os = fs.create(new Path(file));
            os.write(buff, 0, buff.length);
            System.out.println("Create: " + file);
        } finally {
            if (os != null)
                os.close();
        }
        fs.close();
    }
    public void copyFile(String local, String remote) throws IOException {
        FileSystem fs = FileSystem.get(URI.create(hdfsPath), conf);
        fs.copyFromLocalFile(new Path(local), new Path(remote));
        System.out.println("copy from: " + local + " to " + remote);
        fs.close();
    }
    public void download(String remote, String local) throws IOException {
        Path path = new Path(remote);
        FileSystem fs = FileSystem.get(URI.create(hdfsPath), conf);
        fs.copyToLocalFile(path, new Path(local));
        System.out.println("download: from" + remote + " to " + local);
        fs.close();
    }

    public void cat(String remoteFile) throws IOException {
        Path path = new Path(remoteFile);
        FileSystem fs = FileSystem.get(URI.create(hdfsPath), conf);
        FSDataInputStream fsdis = null;
        System.out.println("cat: " + remoteFile);
        try {
            fsdis =fs.open(path);
            IOUtils.copyBytes(fsdis, System.out, 4096, false);
          } finally {
            IOUtils.closeStream(fsdis);
            fs.close();
          }
    }
    public void location() throws IOException {
        // String folder = hdfsPath + "create/";
        // String file = "t2.txt";
        // FileSystem fs = FileSystem.get(URI.create(hdfsPath), new
        // Configuration());
        // FileStatus f = fs.getFileStatus(new Path(folder + file));
        // BlockLocation[] list = fs.getFileBlockLocations(f, 0, f.getLen());
        //
        // System.out.println("File Location: " + folder + file);
        // for (BlockLocation bl : list) {
        // String[] hosts = bl.getHosts();
        // for (String host : hosts) {
        // System.out.println("host:" + host);
        // }
        // }
        // fs.close();
    }

}

4.运行HdfsDAO

报错:
java.io.IOException: HADOOP_HOME or hadoop.home.dir are not set.
    at org.apache.hadoop.util.Shell.checkHadoopHome(Shell.java:225)
    at org.apache.hadoop.util.Shell.<clinit>(Shell.java:250)
    at org.apache.hadoop.util.StringUtils.<clinit>(StringUtils.java:76)
    at org.apache.hadoop.conf.Configuration.getTrimmedStrings(Configuration.java:1546)
    at org.apache.hadoop.hdfs.DFSClient.<init>(DFSClient.java:519)
    at org.apache.hadoop.hdfs.DFSClient.<init>(DFSClient.java:453)
    at org.apache.hadoop.hdfs.DistributedFileSystem.initialize(DistributedFileSystem.java:136)
    at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:2433)
    at org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:88)
    at org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:2467)
    at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:2449)
    at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:367)
    at HdfsDAO.copyFile(HdfsDAO.java:94)
    at HdfsDAO.main(HdfsDAO.java:34)
ERROR - Failed to locate the winutils binary in the hadoop binary path
java.io.IOException: Could not locate executable null\bin\winutils.exe in the Hadoop binaries.
    at org.apache.hadoop.util.Shell.getQualifiedBinPath(Shell.java:278)
    at org.apache.hadoop.util.Shell.getWinUtilsPath(Shell.java:300)
    at org.apache.hadoop.util.Shell.<clinit>(Shell.java:293)
    at org.apache.hadoop.util.StringUtils.<clinit>(StringUtils.java:76)
    at org.apache.hadoop.conf.Configuration.getTrimmedStrings(Configuration.java:1546)
    at org.apache.hadoop.hdfs.DFSClient.<init>(DFSClient.java:519)
    at org.apache.hadoop.hdfs.DFSClient.<init>(DFSClient.java:453)
    at org.apache.hadoop.hdfs.DistributedFileSystem.initialize(DistributedFileSystem.java:136)
    at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:2433)
    at org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:88)
    at org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:2467)
    at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:2449)
    at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:367)
    at HdfsDAO.copyFile(HdfsDAO.java:94)
    at HdfsDAO.main(HdfsDAO.java:34)

  

解决:

首先,在win7中设置环境变量HADOOP_HOME,指向win7中的hadoop2.2.0根目录。

然后,到 https://github.com/srccodes/hadoop-common-2.2.0-bin 去下载hadoop2.2.0的bin,里面有winutils.exe

将其拷贝到 $HADOOP_HOME/bin 下。

5.重新启动,顺利执行

DEBUG - field org.apache.hadoop.metrics2.lib.MutableRate org.apache.hadoop.security.UserGroupInformation$UgiMetrics.loginSuccess with annotation @org.apache.hadoop.metrics2.annotation.Metric(valueName=Time, about=, value=[Rate of successful kerberos logins and latency (milliseconds)], always=false, type=DEFAULT, sampleName=Ops)
DEBUG - field org.apache.hadoop.metrics2.lib.MutableRate org.apache.hadoop.security.UserGroupInformation$UgiMetrics.loginFailure with annotation @org.apache.hadoop.metrics2.annotation.Metric(valueName=Time, about=, value=[Rate of failed kerberos logins and latency (milliseconds)], always=false, type=DEFAULT, sampleName=Ops)
DEBUG - UgiMetrics, User and group related metrics
DEBUG - Kerberos krb5 configuration not found, setting default realm to empty
DEBUG -  Creating new Groups object
DEBUG - Trying to load the custom-built native-hadoop library...
DEBUG - Failed to load native-hadoop with error: java.lang.UnsatisfiedLinkError: no hadoop in java.library.path
DEBUG - java.library.path=D:\Program Files\Java\jre7\bin;C:\Windows\Sun\Java\bin;C:\Windows\system32;C:\Windows;C:\Program Files (x86)\NVIDIA Corporation\PhysX\Common;C:\Program Files (x86)\Intel\iCLS Client\;C:\Program Files\Intel\iCLS Client\;C:\Windows\system32;C:\Windows;C:\Windows\System32\Wbem;C:\Windows\System32\WindowsPowerShell\v1.0\;C:\Program Files\Intel\Intel(R) Management Engine Components\DAL;C:\Program Files\Intel\Intel(R) Management Engine Components\IPT;C:\Program Files (x86)\Intel\Intel(R) Management Engine Components\DAL;C:\Program Files (x86)\Intel\Intel(R) Management Engine Components\IPT;C:\Program Files (x86)\Intel\OpenCL SDK\3.0\bin\x86;C:\Program Files (x86)\Intel\OpenCL SDK\3.0\bin\x64;D:\Program Files\Java\jdk1.7.0_40\bin;D:\Program Files\Java\jdk1.7.0_40\jre\bin;D:\Program Files\TortoiseSVN\bin;D:\Program Files (x86)\ant\bin;D:\Program Files\maven3\bin;.
 WARN - Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
DEBUG - Falling back to shell based
DEBUG - Group mapping impl=org.apache.hadoop.security.ShellBasedUnixGroupsMapping
DEBUG - Group mapping impl=org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback; cacheTimeout=300000
DEBUG - hadoop login
DEBUG - hadoop login commit
DEBUG - using local user:NTUserPrincipal: Administrator
DEBUG - UGI loginUser:Administrator (auth:SIMPLE)
DEBUG - dfs.client.use.legacy.blockreader.local = false
DEBUG - dfs.client.read.shortcircuit = false
DEBUG - dfs.client.domain.socket.data.traffic = false
DEBUG - dfs.domain.socket.path =
DEBUG - StartupProgress, NameNode startup progress
DEBUG - multipleLinearRandomRetry = null
DEBUG - rpcKind=RPC_PROTOCOL_BUFFER, rpcRequestWrapperClass=class org.apache.hadoop.ipc.ProtobufRpcEngine$RpcRequestWrapper, rpcInvoker=or[email protected]1afde4a3
DEBUG - Both short-circuit local reads and UNIX domain socket are disabled.
DEBUG - The ping interval is 60000 ms.
DEBUG - Connecting to /192.168.0.160:8020
DEBUG - IPC Client (60133785) connection to /192.168.0.160:8020 from Administrator: starting, having connections 1
DEBUG - IPC Client (60133785) connection to /192.168.0.160:8020 from Administrator sending #0
DEBUG - IPC Client (60133785) connection to /192.168.0.160:8020 from Administrator got value #0
DEBUG - Call: getListing took 136ms
ls: /
==========================================================
name: hdfs://192.168.0.160:8020/data, folder: true, size: 0
name: hdfs://192.168.0.160:8020/fulong, folder: true, size: 0
name: hdfs://192.168.0.160:8020/test, folder: true, size: 0
name: hdfs://192.168.0.160:8020/tmp, folder: true, size: 0
name: hdfs://192.168.0.160:8020/user, folder: true, size: 0
name: hdfs://192.168.0.160:8020/workspace, folder: true, size: 0
==========================================================
DEBUG - Stopping client
DEBUG - IPC Client (60133785) connection to /192.168.0.160:8020 from Administrator: closed
DEBUG - IPC Client (60133785) connection to /192.168.0.160:8020 from Administrator: stopped, remaining connections 0

6.测试hbase代码

package com.rockontrol.tryhbase;
import static org.junit.Assert.*;

import java.io.IOException;
import java.io.InputStream;
import java.util.Random;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.HTablePool;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.filter.PrefixFilter;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.log4j.Logger;
import org.junit.Test;

public class TestUseHbase {

   private String table = "Tenant";
   private String cfs[] = {"i"};
   private final int availableProcessors =
         Runtime.getRuntime().availableProcessors();
   private ExecutorService exec =
         Executors.newFixedThreadPool(availableProcessors*2);
   private Random rnd = new Random();
   private final int ROW_KEY_LEN = Bytes.SIZEOF_LONG + Bytes.SIZEOF_BYTE;
   private final String colId = "id";
   private final String colStat = "stat";
   private final String colCert = "cert";

   private Configuration conf;
   private HTablePool pool;

   private static final Logger logger =
         Logger.getLogger(TestUseHbase.class);

   public TestUseHbase() throws Exception {
      conf = new Configuration();
      conf.addResource(getHbaseConfStream());
      pool = new HTablePool(conf, 1000);
   }

   @Test
   public void testSetupTable() throws Exception {

      HBaseAdmin admin = new HBaseAdmin(conf);

      try {
         if (admin.tableExists(table)) {
            logger.info("table already exists!");
         } else {
            HTableDescriptor tableDesc =new HTableDescriptor(table);
            for(String cf : cfs) {
               tableDesc.addFamily(new HColumnDescriptor(cf));
            }
            admin.createTable(tableDesc);
            logger.info("table created!");
         }
      } finally {
         admin.close();
      }
   }

   @Test
   public void testPuts() throws Exception {

      final HTable htable = (HTable) pool.getTable(table);
      // put random id
      for (int i = 0; i < 10; i++) {
         exec.execute(new Runnable() {
            @Override
            public void run() {
               long authId = getAuthId();
               byte[] rowkey = createRowKey(authId, (byte) 0);
               htable.setAutoFlush(false);
               Put put = new Put(rowkey);
               put.add(cfs[0].getBytes(), colId.getBytes(), String.valueOf(authId)
                     .getBytes());
               put.add(cfs[0].getBytes(), colStat.getBytes(), String.valueOf(0)
                     .getBytes());
               try {
                  synchronized (htable) {
                     htable.put(put);
                     htable.flushCommits();
                  }
               } catch (IOException e) {
                  logger.error("ERROR: insert authId=" + authId, e);
               }
            }
         });
      }
      exec.shutdown();

      int count = 0;
      while (!exec.awaitTermination(10, TimeUnit.SECONDS)) {
         logger.warn("thread pool is still running");
         if (count++ > 3) {
            logger.warn("force to exit anyway...");
            break;
         }
      }

      htable.flushCommits();
      pool.putTable(htable);

   }

   @Test
   public void testFullScan() throws Exception {

      HTable htable = (HTable) pool.getTable(table);
      long last = Long.MIN_VALUE;

      ResultScanner rs = htable.getScanner(new Scan());
      long authId = 0;
      byte stat = 0;
      String strAuthId;
      String strStat;
      for (Result r : rs) {

         KeyValue kvId = r.getColumnLatest(cfs[0].getBytes(), colId.getBytes());
         KeyValue kvStat = r.getColumnLatest(cfs[0].getBytes(), colStat.getBytes());
         if (kvId != null && kvStat != null) {
            strAuthId = new String(kvId.getValue());
            strStat = new String(kvStat.getValue());
            authId = getIdByRowKey(kvId.getKey());
            stat = getStatByRowKey(kvId.getKey());
            assertTrue("last=" + last +
                  ", current=" + authId, authId >= last); // incremental sorted
            last = authId;
            logger.info("authId=" + authId + ", stat=" + stat + ", value=[" + strAuthId
                  + ", " + strStat + "]");
         } else {
            for (KeyValue kv : r.raw()) {
               authId = getIdByRowKey(kv.getKey());
               stat = getStatByRowKey(kv.getKey());
               assertTrue("last=" + last +
                     ", current=" + authId, authId >= last); // incremental sort
               last = authId;
               logger.info("authId=" + authId + ", stat=" + stat);
               logger.info(new String(kv.getValue()));
            }
         }
      }

   }

   @Test
   public void testSpecScan() throws Exception {
      HTable htable = (HTable) pool.getTable(table);
      long specId = getAuthId();
      byte[] rowkey = createRowKey(specId, (byte) 0);

      // PUT
      Put put = new Put(rowkey);
      put.add(cfs[0].getBytes(), colId.getBytes(), String.valueOf(specId)
            .getBytes());
      put.add(cfs[0].getBytes(), colStat.getBytes(), String.valueOf(0)
            .getBytes());
      htable.put(put);

      // Get with rowkey
      Get scan = new Get(rowkey);
      Result r = htable.get(scan);
      assertTrue(!r.isEmpty());
      long id = 0;
      for(KeyValue kv : r.raw()) {
         id = getIdByRowKey(kv.getKey());
         assertEquals(specId, id);
         logger.info("authId=" + id +
               ", cf=" + new String(kv.getFamily()) +
               ", key=" + new String(kv.getQualifier()) +
               ", value=" + new String(kv.getValue()));
      }

      // Put with specId but stat and different column
      rowkey = createRowKey(specId, (byte)1);
      put = new Put(rowkey);
      put.add(cfs[0].getBytes(), colCert.getBytes(), "xyz".getBytes());
      htable.put(put);

      // Get with rowkey prefix
      Scan s = new Scan();
      s.setFilter(new PrefixFilter(createRowKeyPrefix(specId)));
      ResultScanner rs = htable.getScanner(s);
      for(Result ret : rs) {
         String strk = new String(ret.getRow());
         logger.info("ret=" + strk);
         for(KeyValue kv : ret.raw()) {
            id = getIdByRowKey(kv.getKey());
            assertEquals(specId, id);
            logger.info("authId=" + id +
                  ", stat=" + getStatByRowKey(kv.getKey()) +
                  ", cf=" + new String(kv.getFamily()) +
                  ", key=" + new String(kv.getQualifier()) +
                  ", value=" + new String(kv.getValue()));
         }
      }

      // Get with start and end row
      s = new Scan();
      s.setStartRow(createRowKeyPrefix(specId));
      s.setStopRow(createRowKeyPrefix(specId+1));
      rs = htable.getScanner(s);
      for(Result ret : rs) {
         String strk = new String(ret.getRow());
         logger.info("ret=" + strk);
         for(KeyValue kv : ret.raw()) {
            id = getIdByRowKey(kv.getKey());
            assertEquals(specId, id);
            logger.info("authId=" + id +
                  ", stat=" + getStatByRowKey(kv.getKey()) +
                  ", cf=" + new String(kv.getFamily()) +
                  ", key=" + new String(kv.getQualifier()) +
                  ", value=" + new String(kv.getValue()));
         }
      }
   }

   @Test
   public void testBytesConv() throws Exception {
      long a = 120;
      byte s = 0;
      byte[] data = new byte[9];
      int off = Bytes.putLong(data, 0, a);
      Bytes.putByte(data, off, s);
      long b = Bytes.toLong(data);
      byte t = data[8];
      assertEquals(a, b);
      assertEquals(s, t);
   }

   private byte[] createRowKey(long authId, byte stat) {
      byte[] rowkey = new byte[ROW_KEY_LEN];
      int off = Bytes.putLong(rowkey, 0, authId);
      Bytes.putByte(rowkey, off, stat);
      return rowkey;
   }

   private byte[] createRowKeyPrefix(long authId) {
      byte[] prefix = new byte[Bytes.SIZEOF_LONG];
      Bytes.putLong(prefix, 0, authId);
      return prefix;
   }

   private long getIdByRowKey(byte[] rowkey) {
      // HACK
      return Bytes.toLong(rowkey, Bytes.SIZEOF_SHORT);
   }

   private byte getStatByRowKey(byte[] rowkey) {
      // HACK
      return rowkey[Bytes.SIZEOF_SHORT + ROW_KEY_LEN - 1];
   }

   private long getAuthId() {
      long authId = rnd.nextLong();
      authId = authId > 0 ? authId : -authId;
      return authId;
   }

   private static InputStream getHbaseConfStream() throws Exception {
      return TestUseHbase.class.getClassLoader().getResourceAsStream("hbase-site.xml");
   }

}

7.执行成功

2014-09-04 12:52:29  [ main:0 ] - [ DEBUG ]  field org.apache.hadoop.metrics2.lib.MutableRate org.apache.hadoop.security.UserGroupInformation$UgiMetrics.loginSuccess with annotation @org.apache.hadoop.metrics2.annotation.Metric(valueName=Time, about=, value=[Rate of successful kerberos logins and latency (milliseconds)], always=false, type=DEFAULT, sampleName=Ops)
2014-09-04 12:52:29  [ main:10 ] - [ DEBUG ]  field org.apache.hadoop.metrics2.lib.MutableRate org.apache.hadoop.security.UserGroupInformation$UgiMetrics.loginFailure with annotation @org.apache.hadoop.metrics2.annotation.Metric(valueName=Time, about=, value=[Rate of failed kerberos logins and latency (milliseconds)], always=false, type=DEFAULT, sampleName=Ops)
2014-09-04 12:52:29  [ main:11 ] - [ DEBUG ]  UgiMetrics, User and group related metrics
2014-09-04 12:52:29  [ main:253 ] - [ DEBUG ]  Kerberos krb5 configuration not found, setting default realm to empty
2014-09-04 12:52:29  [ main:257 ] - [ DEBUG ]   Creating new Groups object
2014-09-04 12:52:29  [ main:259 ] - [ DEBUG ]  Trying to load the custom-built native-hadoop library...
2014-09-04 12:52:29  [ main:261 ] - [ DEBUG ]  Failed to load native-hadoop with error: java.lang.UnsatisfiedLinkError: no hadoop in java.library.path
2014-09-04 12:52:29  [ main:261 ] - [ DEBUG ]  java.library.path=D:\Program Files\Java\jdk1.7.0_45\bin;C:\Windows\Sun\Java\bin;C:\Windows\system32;C:\Windows;D:\Perl64\bin;D:\Perl64\site\bin;C:\Program Files (x86)\Common Files\NetSarang;C:\Program Files (x86)\Intel\iCLS Client\;C:\Program Files\Intel\iCLS Client\;C:\Windows\system32;C:\Windows;C:\Windows\System32\Wbem;C:\Windows\System32\WindowsPowerShell\v1.0\;C:\Program Files (x86)\Intel\OpenCL SDK\2.0\bin\x86;C:\Program Files (x86)\Intel\OpenCL SDK\2.0\bin\x64;C:\Program Files\Intel\Intel(R) Management Engine Components\DAL;C:\Program Files\Intel\Intel(R) Management Engine Components\IPT;C:\Program Files (x86)\Intel\Intel(R) Management Engine Components\DAL;C:\Program Files (x86)\Intel\Intel(R) Management Engine Components\IPT;D:\java\maven/bin;D:\Program Files\Java\jdk1.8.0/bin;d:\Program Files (x86)\YYXT\AudioEditorOCX;D:\Program Files\MySQL\MySQL Server 5.5\bin;D:\hadoop\apache-ant-1.9.3\bin;D:\Program Files\nodejs\;D:\Program Files\TortoiseSVN\bin;D:\Perl64\bin;D:\Perl64\site\bin;C:\Users\lenovo\AppData\Roaming\npm;.
2014-09-04 12:52:29  [ main:261 ] - [ WARN ]  Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
2014-09-04 12:52:29  [ main:261 ] - [ DEBUG ]  Falling back to shell based
2014-09-04 12:52:29  [ main:262 ] - [ DEBUG ]  Group mapping impl=org.apache.hadoop.security.ShellBasedUnixGroupsMapping
2014-09-04 12:52:29  [ main:262 ] - [ DEBUG ]  Group mapping impl=org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback; cacheTimeout=300000
2014-09-04 12:52:29  [ main:268 ] - [ DEBUG ]  hadoop login
2014-09-04 12:52:29  [ main:268 ] - [ DEBUG ]  hadoop login commit
2014-09-04 12:52:29  [ main:274 ] - [ DEBUG ]  using local user:NTUserPrincipal: lenovo
2014-09-04 12:52:29  [ main:276 ] - [ DEBUG ]  UGI loginUser:lenovo (auth:SIMPLE)
2014-09-04 12:52:29  [ main:418 ] - [ INFO ]  Client environment:zookeeper.version=3.4.5-1392090, built on 09/30/2012 17:52 GMT
2014-09-04 12:52:29  [ main:418 ] - [ INFO ]  Client environment:host.name=qiaokai-PC
2014-09-04 12:52:29  [ main:418 ] - [ INFO ]  Client environment:java.version=1.7.0_45
2014-09-04 12:52:29  [ main:418 ] - [ INFO ]  Client environment:java.vendor=Oracle Corporation
2014-09-04 12:52:29  [ main:418 ] - [ INFO ]  Client environment:java.home=D:\Program Files\Java\jdk1.7.0_45\jre
2014-09-04 12:52:29  [ main:418 ] - [ INFO ]  Client environment:java.class.path=D:\Users\lenovo\koalaSP\dbhbase\target\classes;D:\java\mavenRepo\org\apache\hbase\hbase-client\0.96.2-hadoop2\hbase-client-0.96.2-hadoop2.jar;D:\java\mavenRepo\org\apache\hbase\hbase-common\0.96.2-hadoop2\hbase-common-0.96.2-hadoop2.jar;D:\java\mavenRepo\commons-collections\commons-collections\3.2.1\commons-collections-3.2.1.jar;D:\java\mavenRepo\org\apache\hbase\hbase-protocol\0.96.2-hadoop2\hbase-protocol-0.96.2-hadoop2.jar;D:\java\mavenRepo\commons-codec\commons-codec\1.7\commons-codec-1.7.jar;D:\java\mavenRepo\commons-io\commons-io\2.4\commons-io-2.4.jar;D:\java\mavenRepo\commons-lang\commons-lang\2.6\commons-lang-2.6.jar;D:\java\mavenRepo\commons-logging\commons-logging\1.1.1\commons-logging-1.1.1.jar;D:\java\mavenRepo\com\google\guava\guava\12.0.1\guava-12.0.1.jar;D:\java\mavenRepo\com\google\code\findbugs\jsr305\1.3.9\jsr305-1.3.9.jar;D:\java\mavenRepo\com\google\protobuf\protobuf-java\2.5.0\protobuf-java-2.5.0.jar;D:\java\mavenRepo\io\netty\netty\3.6.6.Final\netty-3.6.6.Final.jar;D:\java\mavenRepo\org\apache\zookeeper\zookeeper\3.4.5\zookeeper-3.4.5.jar;D:\java\mavenRepo\org\slf4j\slf4j-api\1.6.1\slf4j-api-1.6.1.jar;D:\java\mavenRepo\org\slf4j\slf4j-log4j12\1.6.1\slf4j-log4j12-1.6.1.jar;D:\java\mavenRepo\org\cloudera\htrace\htrace-core\2.04\htrace-core-2.04.jar;D:\java\mavenRepo\org\codehaus\jackson\jackson-mapper-asl\1.8.8\jackson-mapper-asl-1.8.8.jar;D:\java\mavenRepo\org\apache\hadoop\hadoop-common\2.2.0\hadoop-common-2.2.0.jar;D:\java\mavenRepo\org\apache\commons\commons-math\2.1\commons-math-2.1.jar;D:\java\mavenRepo\commons-httpclient\commons-httpclient\3.1\commons-httpclient-3.1.jar;D:\java\mavenRepo\commons-net\commons-net\3.1\commons-net-3.1.jar;D:\java\mavenRepo\com\sun\jersey\jersey-json\1.9\jersey-json-1.9.jar;D:\java\mavenRepo\org\codehaus\jettison\jettison\1.1\jettison-1.1.jar;D:\java\mavenRepo\stax\stax-api\1.0.1\stax-api-1.0.1.jar;D:\java\mavenRepo\com\sun\xml\bind\jaxb-impl\2.2.3-1\jaxb-impl-2.2.3-1.jar;D:\java\mavenRepo\javax\xml\bind\jaxb-api\2.2.2\jaxb-api-2.2.2.jar;D:\java\mavenRepo\javax\activation\activation\1.1\activation-1.1.jar;D:\java\mavenRepo\org\codehaus\jackson\jackson-jaxrs\1.8.3\jackson-jaxrs-1.8.3.jar;D:\java\mavenRepo\org\codehaus\jackson\jackson-xc\1.8.3\jackson-xc-1.8.3.jar;D:\java\mavenRepo\commons-el\commons-el\1.0\commons-el-1.0.jar;D:\java\mavenRepo\net\java\dev\jets3t\jets3t\0.6.1\jets3t-0.6.1.jar;D:\java\mavenRepo\commons-configuration\commons-configuration\1.6\commons-configuration-1.6.jar;D:\java\mavenRepo\commons-digester\commons-digester\1.8\commons-digester-1.8.jar;D:\java\mavenRepo\commons-beanutils\commons-beanutils\1.7.0\commons-beanutils-1.7.0.jar;D:\java\mavenRepo\commons-beanutils\commons-beanutils-core\1.8.0\commons-beanutils-core-1.8.0.jar;D:\java\mavenRepo\org\apache\avro\avro\1.7.4\avro-1.7.4.jar;D:\java\mavenRepo\com\thoughtworks\paranamer\paranamer\2.3\paranamer-2.3.jar;D:\java\mavenRepo\org\xerial\snappy\snappy-java\1.0.4.1\snappy-java-1.0.4.1.jar;D:\java\mavenRepo\com\jcraft\jsch\0.1.42\jsch-0.1.42.jar;D:\java\mavenRepo\org\apache\commons\commons-compress\1.4.1\commons-compress-1.4.1.jar;D:\java\mavenRepo\org\tukaani\xz\1.0\xz-1.0.jar;D:\java\mavenRepo\org\apache\hadoop\hadoop-auth\2.2.0\hadoop-auth-2.2.0.jar;D:\java\mavenRepo\org\apache\hadoop\hadoop-mapreduce-client-core\2.2.0\hadoop-mapreduce-client-core-2.2.0.jar;D:\java\mavenRepo\org\apache\hadoop\hadoop-yarn-common\2.2.0\hadoop-yarn-common-2.2.0.jar;D:\java\mavenRepo\org\apache\hadoop\hadoop-yarn-api\2.2.0\hadoop-yarn-api-2.2.0.jar;D:\java\mavenRepo\com\google\inject\guice\3.0\guice-3.0.jar;D:\java\mavenRepo\javax\inject\javax.inject\1\javax.inject-1.jar;D:\java\mavenRepo\aopalliance\aopalliance\1.0\aopalliance-1.0.jar;D:\java\mavenRepo\com\sun\jersey\contribs\jersey-guice\1.9\jersey-guice-1.9.jar;D:\java\mavenRepo\com\google\inject\extensions\guice-servlet\3.0\guice-servlet-3.0.jar;D:\java\mavenRepo\org\apache\hadoop\hadoop-annotations\2.2.0\hadoop-annotations-2.2.0.jar;D:\java\mavenRepo\com\github\stephenc\findbugs\findbugs-annotations\1.3.9-1\findbugs-annotations-1.3.9-1.jar;D:\java\mavenRepo\junit\junit\4.11\junit-4.11.jar;D:\java\mavenRepo\org\hamcrest\hamcrest-core\1.3\hamcrest-core-1.3.jar;D:\java\mavenRepo\org\apache\hadoop\hadoop-hdfs\2.2.0\hadoop-hdfs-2.2.0.jar;D:\java\mavenRepo\org\mortbay\jetty\jetty\6.1.26\jetty-6.1.26.jar;D:\java\mavenRepo\org\mortbay\jetty\jetty-util\6.1.26\jetty-util-6.1.26.jar;D:\java\mavenRepo\com\sun\jersey\jersey-core\1.9\jersey-core-1.9.jar;D:\java\mavenRepo\com\sun\jersey\jersey-server\1.9\jersey-server-1.9.jar;D:\java\mavenRepo\asm\asm\3.1\asm-3.1.jar;D:\java\mavenRepo\commons-cli\commons-cli\1.2\commons-cli-1.2.jar;D:\java\mavenRepo\commons-daemon\commons-daemon\1.0.13\commons-daemon-1.0.13.jar;D:\java\mavenRepo\javax\servlet\jsp\jsp-api\2.1\jsp-api-2.1.jar;D:\java\mavenRepo\log4j\log4j\1.2.17\log4j-1.2.17.jar;D:\java\mavenRepo\javax\servlet\servlet-api\2.5\servlet-api-2.5.jar;D:\java\mavenRepo\org\codehaus\jackson\jackson-core-asl\1.8.8\jackson-core-asl-1.8.8.jar;D:\java\mavenRepo\tomcat\jasper-runtime\5.5.23\jasper-runtime-5.5.23.jar;D:\java\mavenRepo\xmlenc\xmlenc\0.52\xmlenc-0.52.jar;D:\Program Files\Java\jdk1.8.0\lib\tools.jar
2014-09-04 12:52:29  [ main:418 ] - [ INFO ]  Client environment:java.library.path=D:\Program Files\Java\jdk1.7.0_45\bin;C:\Windows\Sun\Java\bin;C:\Windows\system32;C:\Windows;D:\Perl64\bin;D:\Perl64\site\bin;C:\Program Files (x86)\Common Files\NetSarang;C:\Program Files (x86)\Intel\iCLS Client\;C:\Program Files\Intel\iCLS Client\;C:\Windows\system32;C:\Windows;C:\Windows\System32\Wbem;C:\Windows\System32\WindowsPowerShell\v1.0\;C:\Program Files (x86)\Intel\OpenCL SDK\2.0\bin\x86;C:\Program Files (x86)\Intel\OpenCL SDK\2.0\bin\x64;C:\Program Files\Intel\Intel(R) Management Engine Components\DAL;C:\Program Files\Intel\Intel(R) Management Engine Components\IPT;C:\Program Files (x86)\Intel\Intel(R) Management Engine Components\DAL;C:\Program Files (x86)\Intel\Intel(R) Management Engine Components\IPT;D:\java\maven/bin;D:\Program Files\Java\jdk1.8.0/bin;d:\Program Files (x86)\YYXT\AudioEditorOCX;D:\Program Files\MySQL\MySQL Server 5.5\bin;D:\hadoop\apache-ant-1.9.3\bin;D:\Program Files\nodejs\;D:\Program Files\TortoiseSVN\bin;D:\Perl64\bin;D:\Perl64\site\bin;C:\Users\lenovo\AppData\Roaming\npm;.
2014-09-04 12:52:29  [ main:418 ] - [ INFO ]  Client environment:java.io.tmpdir=C:\Users\lenovo\AppData\Local\Temp2014-09-04 12:52:29  [ main:419 ] - [ INFO ]  Client environment:java.compiler=<NA>
2014-09-04 12:52:29  [ main:419 ] - [ INFO ]  Client environment:os.name=Windows 7
2014-09-04 12:52:29  [ main:419 ] - [ INFO ]  Client environment:os.arch=amd64
2014-09-04 12:52:29  [ main:419 ] - [ INFO ]  Client environment:os.version=6.1
2014-09-04 12:52:29  [ main:419 ] - [ INFO ]  Client environment:user.name=lenovo
2014-09-04 12:52:29  [ main:419 ] - [ INFO ]  Client environment:user.home=C:\Users\lenovo
2014-09-04 12:52:29  [ main:419 ] - [ INFO ]  Client environment:user.dir=D:\Users\lenovo\koalaSP\dbhbase
2014-09-04 12:52:29  [ main:420 ] - [ INFO ]  Initiating client connection, connectString=compute1:2181 sessionTimeout=90000 watcher=hconnection-0xda5a705, quorum=compute1:2181, baseZNode=/hbase
2014-09-04 12:52:29  [ main:425 ] - [ DEBUG ]  zookeeper.disableAutoWatchReset is false
2014-09-04 12:52:29  [ main:457 ] - [ INFO ]  Process identifier=hconnection-0xda5a705 connecting to ZooKeeper ensemble=compute1:2181
2014-09-04 12:52:29  [ main-SendThread(compute1:2181):458 ] - [ INFO ]  Opening socket connection to server compute1/192.168.11.39:2181. Will not attempt to authenticate using SASL (unknown error)
2014-09-04 12:52:29  [ main-SendThread(compute1:2181):459 ] - [ INFO ]  Socket connection established to compute1/192.168.11.39:2181, initiating session
2014-09-04 12:52:29  [ main-SendThread(compute1:2181):461 ] - [ DEBUG ]  Session establishment request sent on compute1/192.168.11.39:2181
2014-09-04 12:52:29  [ main-SendThread(compute1:2181):472 ] - [ INFO ]  Session establishment complete on server compute1/192.168.11.39:2181, sessionid = 0x2483a55a18c0013, negotiated timeout = 40000
2014-09-04 12:52:29  [ main-EventThread:474 ] - [ DEBUG ]  hconnection-0xda5a705, quorum=compute1:2181, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null
2014-09-04 12:52:29  [ main-EventThread:476 ] - [ DEBUG ]  hconnection-0xda5a705-0x2483a55a18c0013 connected
2014-09-04 12:52:29  [ main-SendThread(compute1:2181):477 ] - [ DEBUG ]  Reading reply sessionid:0x2483a55a18c0013, packet:: clientPath:null serverPath:null finished:false header:: 1,3  replyHeader:: 1,4294967438,0  request:: ‘/hbase/hbaseid,F  response:: s{4294967310,4294967310,1409728069737,1409728069737,0,0,0,0,67,0,4294967310}
2014-09-04 12:52:29  [ main-SendThread(compute1:2181):480 ] - [ DEBUG ]  Reading reply sessionid:0x2483a55a18c0013, packet:: clientPath:null serverPath:null finished:false header:: 2,4  replyHeader:: 2,4294967438,0  request:: ‘/hbase/hbaseid,F  response:: #ffffffff000146d61737465723a363030303033ffffff8036ffffff94ffffffabcfffffffd6750425546a2430643537303664662d653431622d343332382d383833342d356533643531363362393736,s{4294967310,4294967310,1409728069737,1409728069737,0,0,0,0,67,0,4294967310}
2014-09-04 12:52:30  [ main:755 ] - [ DEBUG ]  [email protected], compressor=null, tcpKeepAlive=true, tcpNoDelay=true, maxIdleTime=10000, maxRetries=0, fallbackAllowed=false, ping interval=60000ms, bind address=null
2014-09-04 12:52:30  [ main-SendThread(compute1:2181):776 ] - [ DEBUG ]  Reading reply sessionid:0x2483a55a18c0013, packet:: clientPath:null serverPath:null finished:false header:: 3,4  replyHeader:: 3,4294967438,0  request:: ‘/hbase/meta-region-server,F  response:: #ffffffff0001a726567696f6e7365727665723a3630303230ffffffff133cffffff88341c1effffffef50425546a15a8636f6d707574653110fffffff4ffffffd4318ffffffe4ffffffefffffffdcffffffd2ffffff8329100,s{4294967339,4294967339,1409728076023,1409728076023,0,0,0,0,60,0,4294967339}
2014-09-04 12:52:30  [ main-SendThread(compute1:2181):789 ] - [ DEBUG ]  Reading reply sessionid:0x2483a55a18c0013, packet:: clientPath:null serverPath:null finished:false header:: 4,4  replyHeader:: 4,4294967438,0  request:: ‘/hbase/meta-region-server,F  response:: #ffffffff0001a726567696f6e7365727665723a3630303230ffffffff133cffffff88341c1effffffef50425546a15a8636f6d707574653110fffffff4ffffffd4318ffffffe4ffffffefffffffdcffffffd2ffffff8329100,s{4294967339,4294967339,1409728076023,1409728076023,0,0,0,0,60,0,4294967339}
2014-09-04 12:52:30  [ main-SendThread(compute1:2181):798 ] - [ DEBUG ]  Reading reply sessionid:0x2483a55a18c0013, packet:: clientPath:null serverPath:null finished:false header:: 5,4  replyHeader:: 5,4294967438,0  request:: ‘/hbase/meta-region-server,F  response:: #ffffffff0001a726567696f6e7365727665723a3630303230ffffffff133cffffff88341c1effffffef50425546a15a8636f6d707574653110fffffff4ffffffd4318ffffffe4ffffffefffffffdcffffffd2ffffff8329100,s{4294967339,4294967339,1409728076023,1409728076023,0,0,0,0,60,0,4294967339}
2014-09-04 12:52:30  [ main:1209 ] - [ DEBUG ]  Use SIMPLE authentication for service ClientService, sasl=false
2014-09-04 12:52:30  [ main:1218 ] - [ DEBUG ]  Connecting to compute1/192.168.11.39:60020
2014-09-04 12:52:30  [ IPC Client (1568147101) connection to compute1/192.168.11.39:60020 from lenovo:1225 ] - [ DEBUG ]  IPC Client (1568147101) connection to compute1/192.168.11.39:60020 from lenovo: starting, connections 1
2014-09-04 12:52:30  [ main:1291 ] - [ DEBUG ]  IPC Client (1568147101) connection to compute1/192.168.11.39:60020 from lenovo: wrote request header call_id: 0 method_name: "Get" request_param: true
2014-09-04 12:52:30  [ IPC Client (1568147101) connection to compute1/192.168.11.39:60020 from lenovo:1291 ] - [ DEBUG ]  IPC Client (1568147101) connection to compute1/192.168.11.39:60020 from lenovo: got response header call_id: 0, totalSize: 435 bytes
2014-09-04 12:52:31  [ main-SendThread(compute1:2181):1613 ] - [ DEBUG ]  Reading reply sessionid:0x2483a55a18c0013, packet:: clientPath:null serverPath:null finished:false header:: 6,4  replyHeader:: 6,4294967438,0  request:: ‘/hbase/meta-region-server,F  response:: #ffffffff0001a726567696f6e7365727665723a3630303230ffffffff133cffffff88341c1effffffef50425546a15a8636f6d707574653110fffffff4ffffffd4318ffffffe4ffffffefffffffdcffffffd2ffffff8329100,s{4294967339,4294967339,1409728076023,1409728076023,0,0,0,0,60,0,4294967339}
2014-09-04 12:52:31  [ main:1751 ] - [ DEBUG ]  IPC Client (1568147101) connection to compute1/192.168.11.39:60020 from lenovo: wrote request header call_id: 1 method_name: "Scan" request_param: true
2014-09-04 12:52:31  [ IPC Client (1568147101) connection to compute1/192.168.11.39:60020 from lenovo:1752 ] - [ DEBUG ]  IPC Client (1568147101) connection to compute1/192.168.11.39:60020 from lenovo: got response header call_id: 1, totalSize: 13 bytes
2014-09-04 12:52:31  [ main:1762 ] - [ DEBUG ]  IPC Client (1568147101) connection to compute1/192.168.11.39:60020 from lenovo: wrote request header call_id: 2 method_name: "Scan" request_param: true priority: 100
2014-09-04 12:52:31  [ IPC Client (1568147101) connection to compute1/192.168.11.39:60020 from lenovo:1769 ] - [ DEBUG ]  IPC Client (1568147101) connection to compute1/192.168.11.39:60020 from lenovo: got response header call_id: 2 cell_block_meta { length: 1359 }, totalSize: 1383 bytes
2014-09-04 12:52:31  [ main:1772 ] - [ DEBUG ]  IPC Client (1568147101) connection to compute1/192.168.11.39:60020 from lenovo: wrote request header call_id: 3 method_name: "Scan" request_param: true
2014-09-04 12:52:31  [ IPC Client (1568147101) connection to compute1/192.168.11.39:60020 from lenovo:1773 ] - [ DEBUG ]  IPC Client (1568147101) connection to compute1/192.168.11.39:60020 from lenovo: got response header call_id: 3, totalSize: 9 bytes
2014-09-04 12:52:31  [ main:1792 ] - [ DEBUG ]  IPC Client (1568147101) connection to compute1/192.168.11.39:60020 from lenovo: wrote request header call_id: 4 method_name: "Scan" request_param: true
2014-09-04 12:52:31  [ IPC Client (1568147101) connection to compute1/192.168.11.39:60020 from lenovo:1793 ] - [ DEBUG ]  IPC Client (1568147101) connection to compute1/192.168.11.39:60020 from lenovo: got response header call_id: 4, totalSize: 13 bytes
2014-09-04 12:52:31  [ main:1794 ] - [ DEBUG ]  IPC Client (1568147101) connection to compute1/192.168.11.39:60020 from lenovo: wrote request header call_id: 5 method_name: "Scan" request_param: true
2014-09-04 12:52:31  [ IPC Client (1568147101) connection to compute1/192.168.11.39:60020 from lenovo:1795 ] - [ DEBUG ]  IPC Client (1568147101) connection to compute1/192.168.11.39:60020 from lenovo: got response header call_id: 5, totalSize: 13 bytes
2014-09-04 12:52:31  [ main:1795 ] - [ DEBUG ]  IPC Client (1568147101) connection to compute1/192.168.11.39:60020 from lenovo: wrote request header call_id: 6 method_name: "Scan" request_param: true
2014-09-04 12:52:31  [ IPC Client (1568147101) connection to compute1/192.168.11.39:60020 from lenovo:1796 ] - [ DEBUG ]  IPC Client (1568147101) connection to compute1/192.168.11.39:60020 from lenovo: got response header call_id: 6, totalSize: 9 bytes

总结:

1.将hadoop-2.2.0.tar.gz解压一份放到win7的程序目录下,注意hadoop版本一定要和集群的版本一致,然后拷贝集群中的以下几个配置文件覆盖到win7本地的对应目录:

core-site.xml

hdfs-site.xml

mapred-site.xml

yarn-site.xml

2.在eclipse中新建java工程后,最好直接引入所有hadoop2.2.0相关的jar包,包括以下几个目录下的jar包:

share\hadoop\common

share\hadoop\hdfs

share\hadoop\mapreduce

share\hadoop\yarn

注:如果使用hadoop的eclipse插件,就无需该步骤,但2.2.0的插件需自行编译,编译过程参见我的另一篇博客:

http://blog.csdn.net/fulongfbi/article/details/23850575

3.需要在win7中设置环境变量%HADOOP_HOME%,并把%HADOOP_HOME%\bin加入PATH环境变量中

4.需要下载https://github.com/srccodes/hadoop-common-2.2.0-bin,解压后把下载的bin目录覆盖%HADOOP_HOME%\bin

5.注意参考hadoop集群的配置,Eclipse中的程序配置“hadoop地址:端口”的代码需和hadoop集群的配置一致

<property>

<name>fs.default.name</name>

<value>hdfs://singlehadoop:8020</value>

</property>

6.在hadoop集群的hdfs-site.xml中加入如下属性,关闭权限校验。

<property>

<name>dfs.permissions</name>

<value>false</value>

</property>

7.hbase文件设置

<property>
<name>hbase.zookeeper.quorum</name>
<value>compute1</value>
</property>

一定要配置 quorum 的值为 hostname,  节点个数必须为子节点而且要为奇数个。

在Windows 的 C:\Windows\System32\drivers\etc  目录下修改hosts映射文件,与集群服务器的映射文件保持一致。

192.168.14.20 CS020
192.168.14.16 CS016
192.168.11.37 master
192.168.11.39 compute1
192.168.11.40 thinkit-4

 

时间: 2024-10-08 07:10:56

[开发]Win7环境下Eclipse连接Hadoop2.2.0的相关文章

【甘道夫】Win7环境下Eclipse连接Hadoop2.2.0

准备: 确保hadoop2.2.0集群正常运行 1.eclipse中建立java工程,导入hadoop2.2.0相关jar包 2.在src根目录下拷入log4j.properties,通过log4j查看详细日志 log4j.rootLogger=debug, stdout, R log4j.appender.stdout=org.apache.log4j.ConsoleAppender log4j.appender.stdout.layout=org.apache.log4j.PatternLa

Win7环境下Eclipse连接Hadoop2.2.0

准备: 确保hadoop2.2.0集群正常运行 1.eclipse中建立java工程,导入hadoop2.2.0相关jar包 2.在src根目录下拷入log4j.properties,通过log4j查看详细日志 log4j.rootLogger=debug, stdout, R log4j.appender.stdout=org.apache.log4j.ConsoleAppender log4j.appender.stdout.layout=org.apache.log4j.PatternLa

【甘道夫】Win7x64环境下编译Apache Hadoop2.2.0的Eclipse小工具

目标: 编译Apache Hadoop2.2.0在win7x64环境下的Eclipse插件 环境: win7x64家庭普通版 eclipse-jee-kepler-SR1-win32-x86_64.zip Apache Ant(TM) version 1.8.4 compiled on May 22 2012 java version "1.7.0_45" 參考文章: http://kangfoo.u.qiniudn.com/article/2013/12/build-hadoop2x

【甘道夫】Win7x64环境下编译Apache Hadoop2.2.0的Eclipse插件

目标: 编译Apache Hadoop2.2.0在win7x64环境下的Eclipse插件 环境: win7x64家庭普通版 eclipse-jee-kepler-SR1-win32-x86_64.zip Apache Ant(TM) version 1.8.4 compiled on May 22 2012 java version "1.7.0_45" 参考文章: http://kangfoo.u.qiniudn.com/article/2013/12/build-hadoop2x

关于64位win7环境下VS连接oracle数据库的问题

本机环境:64位win7,安装了64位的oracle桌面类 服务器环境:64位windows server 2008,64位oracle服务器端 问题:本机用sql developer连数据库没有问题,winform程序连接也没有问题,但是web程序连接oracle失败,报错:“尝试加载 Oracle 客户端库时引发 BadImageFormatException.如果在安装 32 位 Oracle 客户端组件的情况下以 64 位模式运行,将出现此问题.“ 解决过程:在网上查找原因,说是因为VS

LInux环境下Eclipse + Tomcat + MySQL 配置J2EE开发环境的方法

上一篇博文里总结了面向对象三大特性在Java中的体现,现在谈一谈Java中的抽象类,接口,内部类等特性. 一. 抽象类 public abstract class Shape { public int shapeID = 0; public abstract double getArea(); } Java中允许类.接口或成员方法具有抽象属性,而不允许成员域或构造方法具有抽象属性. 类修饰词中含有abstract的类具有抽象属性,称为抽象类:接口总是具有抽象属性. 如果成员方法的修饰词中含有ab

Win7环境下VS2010配置Cocos2d-x-2.1.4最新版本号的开发环境

2014-06-01 BaoXinjian In Capgemini 一.摘要 因为用户在调用Request时需输入很多参数,经过很多Form页面,如果想打印采购订单,发票,日记账时, 客户会要求在From中新增一个按钮,客户只需按这个按钮即可将采购订单进行输出,系统会自动调取采购订单页面上的数据,无需用户输入,实行自动化,增加用户交互性 一般这种需求有两种实现方式 通过custom.pll,用代码进行定制 通过form personalization,用设定进行定制 在调用程式时,一般会用到f

Win7中使用Eclipse连接虚拟机中的Ubuntu中的Hadoop2.4&amp;lt;3&amp;gt;

经过前几天的学习,基本上能够小试牛刀编写一些小程序玩一玩了,在此之前做几项准备工作 明白我要用hadoop干什么 大体学习一下mapreduce ubuntu重新启动后,再启动hadoop会报连接异常的问题 答: 数据提炼.探索数据.挖掘数据 map=切碎,reduce=合并 重新启动后会清空tmp目录,默认namenode会存在这里,须要在core-site.xml文件里添加(别忘了创建目录,没权限的话,须要用root创建并把权限改成777):<property> <name>h

Windows下Eclipse连接hadoop

? hadoop在虚拟机上(远程连接也是一样只需要知道master的ip和core-site.xml配置即可. Vmware上搭建了hadoop分布式平台: ? 192.168.11.134 master 192.168.11.135 slave1 192.168.11.136 slave2 ? core-site.xml 配置文件: <property> ????????<name>fs.defaultFS</name> ????????<value>hd