一、首先自然是导包
$HADOOP_HOME/share/hadoop/common/*.jar
$HADOOP_HOME/share/hadoop/common/lib/*.jar
$HADOOP_HOME/share/hadoop/hdfs/*.jar
$HADOOP_HOME/share/hadoop/hdfs/lib/*.jar
二、代码如下
package com.stu.hdfs;
/*** * @author ysw28* HDFS的API操作*/import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URI;
import java.util.Arrays;import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.io.IOUtils;
import org.junit.Test;public class TestDemo319 {@Test// 创建目录public void mkDir() {try {// 连接HDFS,设置副本数Configuration conf = new Configuration();conf.set("dfs.replication", "1");// 创建一个客户端FileSystem client = FileSystem.get(new URI("hdfs://HP110:9000"), conf, "root");// 创建目录Path path = new Path("/Demo319");client.mkdirs(path);// 关闭客户端client.close();System.out.println("创建目录成功");} catch (Exception e) {e.printStackTrace();System.out.println("创建目录失败");}}@Test// 删除目录,删除文件同理public void delDir() {try {// 连接HDFSConfiguration conf = new Configuration();// 创建客户端FileSystem client = FileSystem.get(new URI("hdfs://HP110:9000"), conf, "root");// 删除目录Path path = new Path("/Demo319");if (client.exists(path)) {client.delete(path, true);// 关闭客户端client.close();System.out.println("删除目录成功");} else {System.out.println("没有这个文件");}} catch (Exception e) {e.printStackTrace();System.out.println("删除目录失败");}}@Test// 上传文件public void copyFromLocal() {try {// 连接HDFS,指定副本数Configuration conf = new Configuration();conf.set("dfs.replication", "1");// 创建客户端FileSystem client = FileSystem.get(new URI("hdfs://Hp110:9000"), conf, "root");// 上传文件client.copyFromLocalFile(new Path("C:\\Users\\ysw28\\Desktop\\hadoop-2.8.5.tar.gz"), new Path("/Demo319"));// 关闭客户端client.close();System.out.println("上传文件成功");} catch (Exception e) {e.printStackTrace();System.out.println("上传文件失败");}}@Test// 下载文件public void copyToFrom() {try {// 连接HDFSConfiguration conf = new Configuration();// 创建客户端FileSystem client = FileSystem.get(new URI("hdfs://HP110:9000"), conf, "root");// 下载文件client.copyToLocalFile(true, new Path("/Demo319/hadoop-2.8.5.tar.gz"), new Path("E://"), true);// 关闭客户端client.close();System.out.println("上传文件成功");} catch (Exception e) {e.printStackTrace();System.out.println("上传文件失败");}}@Test// 使用HDFS的IO流上传文件public void putFileToHdfs() {try {// 连接HDFS,指定副本数Configuration conf = new Configuration();conf.set("dfs.replication", "1");// 创建客户端FileSystem client = FileSystem.get(new URI("hdfs://HP110:9000"), conf, "root");// 上传文件,构造一个输入流InputStream input = new FileInputStream("E:\\hadoop-2.8.5.tar.gz");// 构造一个输出流OutputStream output = client.create(new Path("/Demo319/hadoop.tar.gz"));// 构造一个缓存区byte buffer[] = new byte[1024];// 指定长度int len = 0;// 读取数据while ((len = input.read(buffer)) > 0) {// 写入数据output.write(buffer, 0, len);}// 刷新output.flush();// 关闭流output.close();input.close();// 关闭客户端client.close();System.out.println("通过HDFS的IO流上传文件成功");} catch (Exception e) {e.printStackTrace();System.out.println("通过HDFS的IO流上传文件失败");}}@Test// 使用HDFS的IO流下载文件public void getFileFromHdfs() {try {// 连接HDFSConfiguration conf = new Configuration();// 创建客户端FileSystem client = FileSystem.get(new URI("hdfs://HP110:9000"), conf, "root");// 下载文件,构造一个输入流InputStream input = client.open(new Path("/Demo319/hadoop.tar.gz"));// 构造一个输出流OutputStream output = new FileOutputStream("E://1.tar.gz");// 使用工具类IOUtils.copyBytes(input, output, 1024);// 关闭客户端client.close();System.out.println("通过HDFS的IO流下载文件成功");} catch (Exception e) {e.printStackTrace();System.out.println("通过HDFS的IO流下载文件失败");}}@Test// 更改文件名,更改目录名同理public void reName() {try {// 连接HDFSConfiguration conf = new Configuration();// 创建客户端FileSystem client = FileSystem.get(new URI("hdfs://HP110:9000"), conf, "root");// 更改文件名Path oldpath = new Path("/Demo319/1.txt");Path newPath = new Path("/Demo319/2.txt");client.rename(oldpath, newPath);// 关闭客户端client.close();System.out.println("更改文件名成功");} catch (Exception e) {e.printStackTrace();System.out.println("更改文件名失败");}}@Test// 写入数据public void writeFile() {try {// 连接HDFS,配置副本数Configuration conf = new Configuration();conf.set("dfs.replication", "1");// 创建客户端FileSystem client = FileSystem.get(new URI("hdfs://HP110:9000"), conf, "root");// 写入数据FSDataOutputStream output = client.create(new Path("/Demo319/3.txt"));output.writeUTF("我喜欢大数据");// 关闭流output.close();// 关闭客户端client.close();System.out.println("写入文件成功");} catch (Exception e) {e.printStackTrace();System.out.println("写入文件失败");}}@Test// 读取文件public void readFile() {try {// 连接HDFSConfiguration conf = new Configuration();// 创建客户端FileSystem client = FileSystem.get(new URI("hdfs://HP110:9000"), conf, "root");// 读取文件Path path = new Path("/Demo319/3.txt");if (client.exists(path)) {InputStream input = client.open(path);// 构造一个缓存区byte buffer[] = new byte[1024];int foot = 0;// 指定数组的下标int temp = 0;// 指定接受每次读取的字节数据while ((temp = input.read()) != -1) {buffer[foot++] = (byte) temp;}System.out.println(new String(buffer, 0, foot));// 关闭流input.close();// 关闭客户端client.close();System.out.println("读取文件成功");} else {System.out.println("没有这个文件");}} catch (Exception e) {e.printStackTrace();System.out.println("读取文件失败");}}@Test// 判断HDFS上是否存在某文件public void hdfsFileExists() {try {// 连接HDFS// 指定当前使用的用户是rootSystem.setProperty("HADOOP_USER_NAME", "root");// 配置参数,指定NameNode地址Configuration conf = new Configuration();conf.set("fs.defaultFS", "hdfs://HP110:9000");// 创建一个客户端FileSystem client = FileSystem.get(conf);// 判断是否存在某文件if (client.exists(new Path("/Demo/1.txt"))) {System.out.println("存在文件1.txt");} else {System.out.println("不存在文件1.txt");}System.out.println("没有出现错误");} catch (Exception e) {e.printStackTrace();System.out.println("出现错误");}}@Test// 查看文件信息public void getFileInfo() {try {// 连接HDFSConfiguration conf = new Configuration();// 创建客户端FileSystem client = FileSystem.get(new URI("hdfs://HP110:9000"), conf, "root");// 查看文件信息Path path = new Path("/");RemoteIterator<LocatedFileStatus> listFiles = client.listFiles(path, true);while (listFiles.hasNext()) {LocatedFileStatus status = listFiles.next();System.out.println(status.getPath().getName()); // 获取文件名System.out.println(status.getGroup()); // 获取所在组System.out.println(status.getLen());// 文件大小System.out.println(status.getPermission());// 获取权限// 获取数据块信息BlockLocation[] blockLocations = status.getBlockLocations();for (BlockLocation blockLocation : blockLocations) {// 数据块所在主机名String[] hosts = blockLocation.getHosts();for (String host : hosts) {System.out.println(host);}// 数据块所在主机IPString[] names = blockLocation.getNames();for (String name : names) {System.out.println(name);}}System.out.println("这是一个分割线-----------------------------");}System.out.println("查看文件信息成功");} catch (Exception e) {e.printStackTrace();System.out.println("查看文件信息失败");}}@Test// 获取数据块信息public void getBlockInfo() {try {// 连接HDFS// 指定使用的用户是rootSystem.setProperty("HADOOP_USER_NAME", "root");// 配置参数,指明NameNode的地址Configuration conf = new Configuration();conf.set("fs.defaultFS", "hdfs://HP110:9000");// 创建一个客户端FileSystem client = FileSystem.get(conf);// 获取文件status信息FileStatus fileStatus = client.getFileStatus(new Path("/Demo/100.txt"));// 获取文件的数据块信息BlockLocation locaktions[] = client.getFileBlockLocations(fileStatus, 0, fileStatus.getLen());/** 将数组转换为字符串 语法:Arrays.toString(数组名);*/for (BlockLocation blockLocation : locaktions) {System.out.println(Arrays.toString(blockLocation.getHosts()) + "\t" + Arrays.toString(blockLocation.getNames()));System.out.println(fileStatus);}// 关闭客户端client.close();System.out.println("获取数据块信息成功");} catch (Exception e) {e.printStackTrace();System.out.println("获取数据块信息失败");}}@Test// 查看HDFS中所有的文件和内容,想看具体的话就在/后面累加文件的名字public void getListFileInfo() {try {// 连接HDFS// 指定当前用户是rootSystem.setProperty("HADOOP_USER_NAME", "root");// 配置参数,指定NameNode地址Configuration conf = new Configuration();conf.set("fs.defaultFS", "hdfs://HP110:9000");// 创建一个客户端FileSystem client = FileSystem.get(conf);// 指定查看地址Path path = new Path("/Demo/100.txt");FileStatus list[] = client.listStatus(path);for (FileStatus fileStatus : list) {System.out.println(fileStatus);}System.out.println("查看文件成功");} catch (Exception e) {e.printStackTrace();System.out.println("查看文件失败");}}}
任何程序错误,以及技术疑问或需要解答的,请添加