使用MPI-IO并行读写HDF5文件
HDF5支持通过MPI-IO进行并行读写,这对于大规模科学计算应用非常重要。下面我将提供C++和Fortran的示例程序,展示如何使用MPI-IO并行读写HDF5文件。
准备工作
在使用MPI-IO的HDF5之前,需要确保:
- HDF5库编译时启用了MPI支持
- 程序链接了HDF5的MPI库
C++示例
#include <hdf5.h>
#include <mpi.h>
#include <iostream>
#include <vector>#define FILE_NAME "parallel.h5"
#define DATASET_NAME "IntArray"
#define DIM0 100 // 全局维度
#define DIM1 100int main(int argc, char** argv) {// 初始化MPIMPI_Init(&argc, &argv);int mpi_rank, mpi_size;MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);// 初始化HDF5的MPI环境hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);H5Pset_fapl_mpio(plist_id, MPI_COMM_WORLD, MPI_INFO_NULL);// 创建或打开文件hid_t file_id = H5Fopen(FILE_NAME, H5F_ACC_RDWR, plist_id);if (file_id < 0) {file_id = H5Fcreate(FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);}H5Pclose(plist_id);// 定义数据集维度hsize_t dims[2] = {DIM0, DIM1};// 创建数据空间hid_t filespace = H5Screate_simple(2, dims, NULL);// 创建数据集hid_t dset_id = H5Dcreate(file_id, DATASET_NAME, H5T_NATIVE_INT, filespace,H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);H5Sclose(filespace);// 定义每个进程的写入区域hsize_t count[2] = {DIM0/mpi_size, DIM1};hsize_t offset[2] = {mpi_rank * count[0], 0};// 选择数据集的超平面filespace = H5Dget_space(dset_id);H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, NULL, count, NULL);// 创建内存数据空间hid_t memspace = H5Screate_simple(2, count, NULL);// 准备数据 - 每个进程填充自己的部分std::vector<int> data(count[0] * count[1]);for (size_t i = 0; i < count[0]; ++i) {for (size_t j = 0; j < count[1]; ++j) {data[i * count[1] + j] = mpi_rank * 1000 + i * count[1] + j;}}// 设置集体写入属性plist_id = H5Pcreate(H5P_DATASET_XFER);H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);// 并行写入数据herr_t status = H5Dwrite(dset_id, H5T_NATIVE_INT, memspace, filespace,plist_id, data.data());// 清理资源H5Dclose(dset_id);H5Sclose(filespace);H5Sclose(memspace);H5Pclose(plist_id);H5Fclose(file_id);// 读取示例 - 类似写入过程if (mpi_rank == 0) {std::cout << "数据写入完成,开始读取验证..." << std::endl;}// 重新打开文件和数据集file_id = H5Fopen(FILE_NAME, H5F_ACC_RDONLY, plist_id);dset_id = H5Dopen(file_id, DATASET_NAME, H5P_DEFAULT);// 分配读取缓冲区std::vector<int> read_data(count[0] * count[1]);// 设置集体读取属性plist_id = H5Pcreate(H5P_DATASET_XFER);H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);// 选择相同的超平面filespace = H5Dget_space(dset_id);H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, NULL, count, NULL);// 并行读取数据status = H5Dread(dset_id, H5T_NATIVE_INT, memspace, filespace,plist_id, read_data.data());// 验证数据bool error = false;for (size_t i = 0; i < count[0] * count[1]; ++i) {if (read_data[i] != data[i]) {error = true;break;}}if (!error && mpi_rank == 0) {std::cout << "数据验证成功!" << std::endl;}// 清理资源H5Dclose(dset_id);H5Sclose(filespace);H5Sclose(memspace);H5Pclose(plist_id);H5Fclose(file_id);// 结束MPIMPI_Finalize();return 0;
}
Fortran示例
program parallel_hdf5_mpiuse hdf5use mpiimplicit noneinteger :: ierr, mpi_rank, mpi_sizeinteger(hid_t) :: file_id, dset_id, filespace, memspace, plist_idinteger(hsize_t), dimension(2) :: dims = (/100, 100/) ! 全局维度integer(hsize_t), dimension(2) :: count, offsetinteger, allocatable :: data(:, :)integer :: i, jcharacter(len=*), parameter :: file_name = "parallel.h5"character(len=*), parameter :: dset_name = "IntArray"! 初始化MPIcall MPI_Init(ierr)call MPI_Comm_rank(MPI_COMM_WORLD, mpi_rank, ierr)call MPI_Comm_size(MPI_COMM_WORLD, mpi_size, ierr)! 初始化HDF5call h5open_f(ierr)! 设置文件访问属性为MPI-IOcall h5pcreate_f(H5P_FILE_ACCESS_F, plist_id, ierr)call h5pset_fapl_mpio_f(plist_id, MPI_COMM_WORLD, MPI_INFO_NULL, ierr)! 创建或打开文件call h5fopen_f(file_name, H5F_ACC_RDWR_F, file_id, ierr, access_prp=plist_id)if (ierr /= 0) thencall h5fcreate_f(file_name, H5F_ACC_TRUNC_F, file_id, ierr, access_prp=plist_id)endifcall h5pclose_f(plist_id, ierr)! 创建数据空间call h5screate_simple_f(2, dims, filespace, ierr)! 创建数据集call h5dcreate_f(file_id, dset_name, H5T_NATIVE_INTEGER, filespace, &dset_id, ierr)call h5sclose_f(filespace, ierr)! 定义每个进程的写入区域count(1) = dims(1)/mpi_sizecount(2) = dims(2)offset(1) = mpi_rank * count(1)offset(2) = 0! 选择数据集的超平面call h5dget_space_f(dset_id, filespace, ierr)call h5sselect_hyperslab_f(filespace, H5S_SELECT_SET_F, offset, count, ierr)! 创建内存数据空间call h5screate_simple_f(2, count, memspace, ierr)! 准备数据 - 每个进程填充自己的部分allocate(data(count(1), count(2)))do i = 1, count(1)do j = 1, count(2)data(i, j) = mpi_rank * 1000 + (i-1)*count(2) + jend doend do! 设置集体写入属性call h5pcreate_f(H5P_DATASET_XFER_F, plist_id, ierr)call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, ierr)! 并行写入数据call h5dwrite_f(dset_id, H5T_NATIVE_INTEGER, data, count, ierr, &file_space_id=filespace, mem_space_id=memspace, &xfer_prp=plist_id)! 清理资源deallocate(data)call h5dclose_f(dset_id, ierr)call h5sclose_f(filespace, ierr)call h5sclose_f(memspace, ierr)call h5pclose_f(plist_id, ierr)call h5fclose_f(file_id, ierr)! 读取示例 - 类似写入过程if (mpi_rank == 0) thenprint *, "数据写入完成,开始读取验证..."endif! 重新打开文件和数据集call h5fopen_f(file_name, H5F_ACC_RDONLY_F, file_id, ierr, access_prp=plist_id)call h5dopen_f(file_id, dset_name, dset_id, ierr)! 分配读取缓冲区allocate(data(count(1), count(2)))! 设置集体读取属性call h5pcreate_f(H5P_DATASET_XFER_F, plist_id, ierr)call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, ierr)! 选择相同的超平面call h5dget_space_f(dset_id, filespace, ierr)call h5sselect_hyperslab_f(filespace, H5S_SELECT_SET_F, offset, count, ierr)! 并行读取数据call h5dread_f(dset_id, H5T_NATIVE_INTEGER, data, count, ierr, &file_space_id=filespace, mem_space_id=memspace, &xfer_prp=plist_id)! 验证数据 (这里简化为检查第一个元素)if (data(1,1) == mpi_rank * 1000 + 1 .and. mpi_rank == 0) thenprint *, "数据验证成功!"endif! 清理资源deallocate(data)call h5dclose_f(dset_id, ierr)call h5sclose_f(filespace, ierr)call h5sclose_f(memspace, ierr)call h5pclose_f(plist_id, ierr)call h5fclose_f(file_id, ierr)! 关闭HDF5call h5close_f(ierr)! 结束MPIcall MPI_Finalize(ierr)end program parallel_hdf5_mpi
编译和运行
对于C++程序:
mpicxx -o parallel_hdf5 parallel_hdf5.cpp -lhdf5 -lz
mpiexec -n 4 ./parallel_hdf5
对于Fortran程序:
mpif90 -o parallel_hdf5 parallel_hdf5.f90 -lhdf5_fortran -lhdf5 -lz
mpiexec -n 4 ./parallel_hdf5
关键点说明
- MPI初始化: 必须首先初始化MPI环境
- HDF5 MPI属性: 使用
H5Pset_fapl_mpio
设置文件访问属性 - 数据分区: 每个进程负责数据集的不同部分
- 超平面选择: 使用
H5Sselect_hyperslab
选择要读写的区域 - 集体操作: 使用
H5Pset_dxpl_mpio
设置集体I/O模式 - 并行一致性: 所有进程必须参与集体操作
这些示例展示了基本的并行读写操作,实际应用中可能需要更复杂的数据分区和访问模式。