1、启动hadoop工程
2、eclipse导入插件
将hadoop-eclipse-plugin-2.6.0.jar插件导入eclipse中,重启eclipse
3、在Map/Reduce的窗口下建立连接(单机版)
4、创建工程,导入jar,配置文件
提示:工程要用到hadoop的jar包,所以要把hadoop的jar包全部导入建立的工程!
core-site.xml:
<?xml version="1.0" encoding="UTF-8"?> <?xml-stylesheet type="text/xsl" href="configuration.xsl"?> <!-- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> <!-- Put site-specific property overrides in this file. --> <configuration> <property> <name>fs.default.name</name> <value>hdfs://127.0.0.1:8020</value> </property> <property> <name>hadoop.tmp.dir</name> <value>/home/tian/Downloads/hadhoop/data/tmp</value> </property> <property> <name>fs.checkpoint.period</name> <value>300</value> </property> <property> <name>fs.checkpoint.dir</name> <value>${hadoop.tmp.dir}/dfs/namesecondary</value> </property> </configuration>
hdfs-site.xml:
<?xml version="1.0" encoding="UTF-8"?> <?xml-stylesheet type="text/xsl" href="configuration.xsl"?> <!-- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> <!-- Put site-specific property overrides in this file. --> <configuration> <property> <name>dfs.replication</name> <value>1</value> </property> <property> <name>dfs.namenode.name.dir</name> <value>file:/home/tian/Downloads/hadhoop/data/hdfs/namenode</value> </property> <property> <name>dfs.datanode.data.dir</name> <value>file:/home/tian/Downloads/hadhoop/data/hdfs/datanode</value> </property> <property> <name>dfs.http.address</name> <value>0.0.0.0:50070</value> </property> <property> <name>dfs.datanode.http.address</name> <value>0.0.0.0:50075</value> </property> <property> <name>dfs.permissions</name> <value>false</value> </property> </configuration>
yarn-site.xml:
<?xml version="1.0"?> <!-- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> <configuration> <property> <name>yarn.nodemanager.aux-services</name> <value>mapreduce_shuffle</value> </property> <property> <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name> <value>org.apache.hadoop.mapred.ShuffleHandler</value> </property> </configuration>
5、hadoop操作文件
加载配置:
import org.apache.hadoop.conf.Configuration; /** * @author tian Hadoop 的配置信息 */ public class HadoopConfig { private static Configuration configuration; private HadoopConfig() { } public static Configuration getConfiguration() { if (configuration == null) { configuration = new Configuration(); configuration.addResource(HadoopConfig.class .getResource("core-site.xml")); configuration.addResource(HadoopConfig.class .getResource("hdfs-site.xml")); configuration.addResource(HadoopConfig.class .getResource("yarn-site.xml")); } return configuration; } }
基本操作:
// 创建目录 public static void mkdir(String dirPath) throws IOException { Configuration configuration = HadoopConfig.getConfiguration(); FileSystem fileSystem = FileSystem.get(configuration); fileSystem.mkdirs(new Path(dirPath)); fileSystem.close(); } // 创建文件 public static void createFile(String filePath) throws IOException { Configuration configuration = HadoopConfig.getConfiguration(); FileSystem fileSystem = FileSystem.get(configuration); fileSystem.create(new Path(filePath)); fileSystem.close(); } // 删除目录或者文件 public static void deleteFile(String filePath) throws IOException { Configuration configuration = HadoopConfig.getConfiguration(); FileSystem fileSystem = FileSystem.get(configuration); fileSystem.deleteOnExit(new Path(filePath)); fileSystem.close(); } // 遍历文件 public static void getListFile(String filePath) throws IOException { Configuration configuration = HadoopConfig.getConfiguration(); FileSystem fileSystem = FileSystem.get(configuration); FileStatus[] fileStatus = fileSystem.listStatus(new Path(filePath)); for (FileStatus file_Status : fileStatus) { System.out.println(file_Status.getPath().toString()); } fileSystem.close(); } // 上传文件 public static void upLoadFile(String src, String dest) throws IOException { Configuration configuration = HadoopConfig.getConfiguration(); FileSystem fileSystem = FileSystem.get(configuration); fileSystem.copyFromLocalFile(new Path(src), new Path(dest)); fileSystem.close(); } // 下载文件 public static void downloadFile(String src, String dest) throws IOException { Configuration configuration = HadoopConfig.getConfiguration(); FileSystem fileSystem = FileSystem.get(configuration); fileSystem.copyToLocalFile(new Path(src), new Path(dest)); fileSystem.close(); } // 写文件 public static void writeFile(String filePath) throws IOException { Configuration configuration = HadoopConfig.getConfiguration(); FileSystem fileSystem = FileSystem.get(configuration); Path path = new Path(filePath); FSDataOutputStream out = fileSystem.create(path); out.writeUTF("我不是学霸,哈哈哈"); fileSystem.close(); }
注:这些我们都可以用java application来运行,可以用一下简单的hdfs命令来查看运行结果,比如:
基本命令和linux差不多,当然也可以到官网查看:http://hadoop.apache.org/ 当然在eclipse里面也可以查看到我们文件的操作结果:
时间: 2024-10-10 01:04:07