【Spark-HDFS小文件合并】使用 Spark 实现 HDFS 小文件合并
- 1)导入依赖
- 2)代码实现
- 2.1.HDFSUtils
- 2.2.MergeFilesApplication
需求描述:
1、使用 Spark 做小文件合并压缩处理。
2、实际生产中相关配置、日志、明细可以记录在 Mysql 中。
3、core-site.xml、hdfs-site.xml、hive-site.xml、yarn-site.xmlx 等文件放在项目的 resources 目录下进行认证。
4、下面的案例抽取出了主体部分的代码,具体实现时需要结合 HDFS 工具类,利用好 Mysql 做好配置、日志、以及相关明细,结合各自业务进行文件合并。
1)导入依赖
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"><modelVersion>4.0.0</modelVersion><groupId>test.cn.suitcase</groupId><artifactId>mergefiles</artifactId><version>4.0.0</version><properties><project.build.sourceEncoding>UTF-8</project.build.sourceEncoding><project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding><java.version>1.8</java.version><maven.compiler.source>1.8</maven.compiler.source><maven.compiler.target>1.8</maven.compiler.target><encoding>UTF-8</encoding>
<!-- <spark.version>3.0.2</spark.version>--><spark.version>2.4.8</spark.version><scala.version>2.11.12</scala.version></properties><dependencies><dependency><groupId>org.apache.logging.log4j</groupId><artifactId>log4j-core</artifactId><version>2.20.0</version></dependency><dependency><groupId>org.apache.hadoop</groupId><artifactId>hadoop-common</artifactId><version>3.3.2</version></dependency><!-- https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-client --><dependency><groupId>org.apache.hadoop</groupId><artifactId>hadoop-client</artifactId><version>3.3.2</version></dependency><!-- https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-hdfs --><dependency><groupId>org.apache.hadoop</groupId><artifactId>hadoop-hdfs</artifactId><version>3.3.2</version></dependency><dependency><groupId>org.apache.logging.log4j</groupId><artifactId>log4j-api</artifactId><version>2.20.0</version></dependency><dependency><groupId>org.scala-lang</groupId><artifactId>scala-library</artifactId><version>${scala.version}</version></dependency><dependency><groupId>org.scala-lang</groupId><artifactId>scala-compiler</artifactId><version>${scala.version}</version></dependency><dependency><groupId>org.scala-lang</groupId><artifactId>scala-reflect</artifactId><version>${scala.version}</version></dependency><dependency><groupId>org.apache.spark</groupId><artifactId>spark-core_2.11</artifactId><version>${spark.version}</version><scope>provided</scope></dependency><dependency><groupId>org.apache.spark</groupId><artifactId>spark-launcher_2.11</artifactId><version>${spark.version}</version><scope>provided</scope></dependency><dependency><groupId>org.apache.spark</groupId><artifactId>spark-sql_2.11</artifactId><version>${spark.version}</version><scope>provided</scope></dependency><dependency><groupId>org.apache.spark</groupId><artifactId>spark-hive_2.11</artifactId><version>${spark.version}</version><scope>provided</scope></dependency><dependency><groupId>com.alibaba</groupId><artifactId>fastjson</artifactId><version>2.0.32</version></dependency><dependency><groupId>mysql</groupId><artifactId>mysql-connector-java</artifactId><version>8.0.33</version></dependency><dependency><groupId>com.fasterxml.jackson.core</groupId><artifactId>jackson-core</artifactId><version>2.14.2</version></dependency></dependencies><build><plugins><!-- Java Compiler --><plugin><groupId>org.apache.maven.plugins</groupId><artifactId>maven-compiler-plugin</artifactId><version>3.1</version><configuration><source>${java.version}</source><target>${java.version}</target></configuration></plugin><!-- We use the maven-shade plugin to create a fat jar that contains all necessary dependencies. --><!-- Change the value of <mainClass>...</mainClass> if your program entry point changes. --><plugin><groupId>org.apache.maven.plugins</groupId><artifactId>maven-shade-plugin</artifactId><version>3.0.0</version><executions><!-- Run shade goal on package phase --><execution><phase>package</phase><goals><goal>shade</goal></goals><configuration><artifactSet><excludes><exclude>org.apache.flink:force-shading</exclude><exclude>com.google.code.findbugs:jsr305</exclude><exclude>org.slf4j:*</exclude><exclude>org.apache.logging.log4j:*</exclude></excludes></artifactSet><filters><filter><!-- Do not copy the signatures in the META-INF folder.Otherwise, this might cause SecurityExceptions when using the JAR. --><artifact>*:*</artifact><excludes><exclude>META-INF/*.SF</exclude><exclude>META-INF/*.DSA</exclude><exclude>META-INF/*.RSA</exclude></excludes></filter></filters></configuration></execution></executions></plugin><plugin><groupId>org.apache.maven.plugins</groupId><artifactId>maven-surefire-plugin</artifactId><version>2.22.1</version><configuration><groups>IntegrationTest</groups></configuration></plugin><plugin><groupId>org.apache.maven.plugins</groupId><artifactId>maven-compiler-plugin</artifactId><version>3.1</version><configuration><source>${java.version}</source><target>${java.version}</target></configuration></plugin><plugin><groupId>org.apache.maven.plugins</groupId><artifactId>maven-surefire-plugin</artifactId><version>2.22.1</version></plugin></plugins></build></project>
2)代码实现
2.1.HDFSUtils
public class HDFSUtils {private static Logger logger = LoggerFactory.getLogger(HDFSUtils.class);private static final Configuration hdfsConfig = new Configuration();private static FileSystem fs;public static void init() {System.out.println(Thread.currentThread().getContextClassLoader());try {hdfsConfig.addResource(Thread.currentThread().getContextClassLoader().getResource("./core-site.xml"));hdfsConfig.addResource(Thread.currentThread().getContextClassLoader().getResource("./hdfs-site.xml"));fs = FileSystem.get(hdfsConfig);} catch (FileNotFoundException fnfe) {fnfe.printStackTrace();logger.error("Load properties failed.");} catch (IOException ioe) {ioe.printStackTrace();logger.error(String.format("IOException: " + ioe.getMessage()));}}public static long getDirectorySize(String directoryPath) {final Path path = new Path(directoryPath);long size = 0;try {size = fs.getContentSummary(path).getLength();} catch (IOException ex) {}return size;}public static long getFileCount(String directoryPath) {final Path path = new Path(directoryPath);long count = 0;try {count = fs.getContentSummary(path).getFileCount();} catch (IOException ex) {}return count;}public static long getBlockSize() {return fs.getDefaultBlockSize(fs.getHomeDirectory());}public static String getFile(String filePath) {final Path path = new Path(filePath);FSDataInputStream dis = null;String fileName = null;try {if (fs.exists(path) && fs.isFile(path)) {dis = fs.open(path);StringWriter stringWriter = new StringWriter();IOUtils.copy(dis, stringWriter, "UTF-8");fileName = stringWriter.toString();return fileName;} else {throw new FileNotFoundException();}} catch (IOException ioException) {logger.error("Get file from hdfs failed: " + ioException.getMessage());} finally {if (dis != null) {try {dis.close();} catch (IOException ex) {logger.error("close FSDataInputStream failed: " + ex.getMessage());}}}return fileName;}public static Boolean exists(String filePath) {Path path = new Path(filePath);Boolean ifExists = false;try {ifExists = fs.exists(path);return ifExists;} catch (IOException ex) {logger.error(String.format("hdfs file %s not exists", filePath));}return ifExists;}public static boolean renameDir(String existingName, String newName) {final Path existingPath = new Path(existingName);final Path finalName = new Path(newName);try {if (exists(newName)) {logger.error(String.format("Path %s already exists when try to rename %s to %s.", newName, existingName, newName));return false;}return fs.rename(existingPath, finalName);} catch (IOException ex) {logger.error("Rename hdfs directory failed: " + ex.getMessage());}return false;}public static boolean removeDirSkipTrash(String dir) {Path path = new Path(dir);boolean rv = false;try {if (exists(dir)) {if (fs.delete(path, true)) {logger.info(String.format("文件夹 %s 删除成功.", path));rv = true;}} else {logger.error(String.format("要删除的文件夹 %s 不存在", dir));return false;}} catch (IOException ex) {logger.error("文件夹 %s 存在但是删除失败");}return rv;}public static List<String> listDirs(String baseDir) {Path path = new Path(baseDir);List<String> dirs = new ArrayList<>();try {FileStatus[] fileStatuses = fs.globStatus(path);for (int i = 0; i < fileStatuses.length; i++) {dirs.add(fileStatuses[i].getPath().toUri().getRawPath());}}} catch (Exception ex) {logger.error(String.format("List directories under %s failed.", baseDir));}return dirs;}public static void close() {try {fs.close();} catch (IOException ex) {logger.error("hdfs file system close failed: " + ex.getMessage());}}}
2.2.MergeFilesApplication
下面的案例抽取出了主体部分的代码,具体实现时需要结合 HDFS 工具类,利用好 Mysql 做好配置、日志、以及相关明细,结合各自业务进行文件合并。
public class MergeFilesApplication {public static void main(String[] args) {System.out.println(Arrays.asList(args));//指定hadoop用户System.setProperty("HADOOP_USER_NAME", "hdfs");System.setProperty("user.name", "hdfs");//获取 SparkSession 对象SparkSession sparkSession = SparkSession.builder().config("spark.scheduler.mode", "FAIR")//配置调度模式.config("spark.sql.warehouse.dir", "/warehouse/tablespace/external/hive")//配置warehouse目录.appName("MergeFilesApplication").getOrCreate();//合并文件sparkSession.read()//spark读取.parquet(sourceDir)//读取数据源目录.coalesce(partitions)//配置spark分区数.sortWithinPartitions("col1", "col2")//每个分区内按照指定需要的列进行排序.write()//spark写入.mode(SaveMode.Append)//写入模式为追加.option("compression", "gzip")//压缩方式以为gzip.parquet(targetMergedDir);//写入目标目录}
}