本文使用两台PC,一台做NVMe over Fabrics Target(服务端),一台做NVMe over Fabrics initiator(客户端)。首先使用SoftRoCE来实现底层的rdma传输,然后使用SPDK来实现NVMe over Fabrics Target。
一.首先使用SoftRoCE来实现rdma
服务端和客户端都是:CentOS-7-x86_64-DVD-1810
服务端和客户端都要load softRoCE的kernel模块:rdma_rxe
服务端
# rxe_cfg start
# rxe_cfg add enp5s0f0
# rxe_cfg status
客户端
# rxe_cfg start
# rxe_cfg add enp1s0f0
# rxe_cfg status
测试
服务端
# iptables -F
# rping -s -a 192.168.80.100 -v -C 1000
客户端
# rping -c -a 192.168.80.100 -v -C 1000
二.SPDK nvmf-tgt环境搭建
参考:https://spdk.io/doc/nvmf.html
服务端
1.确定NVME物理设备
[root@localhost ~]# lspci
01:00.0 Non-Volatile memory controller: Intel Corporation PCIe Data Center SSD (rev 01)
2.编译spdk
git clone https://github.com/spdk/spdk
cd spdk
git submodule update --init
scripts/pkgdep.sh
./configure --with-rdma
make
3.启动NVMe-oF target
# modprobe nvme_rdma
# scripts/setup.sh
setup会把nvme盘的驱动由nvme改为uio_pci_generic
# build/bin/nvmf_tgt &
# scripts/rpc.py nvmf_create_transport -t RDMA -u 8192 -p 4 -c 0
4.通过rpc创建导出bdev
创建内存测试盘
# scripts/rpc.py bdev_malloc_create -b Malloc0 512 512
#scripts/rpc.py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001 -d SPDK_Controller1
# scripts/rpc.py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc0
# scripts/rpc.py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t rdma -a 192.168.80.100 -s 4420
创建nvme盘
# scripts/rpc.py bdev_nvme_attach_controller -b Nvme0 -t PCIe -a 0000:01:00.0
# scripts/rpc.py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode2 -a -s SPDK00000000000002 -d SPDK_Controller1
# scripts/rpc.py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode2 Nvme0n1
# scripts/rpc.py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode2 -t rdma -a 192.168.80.100 -s 4420
客户端
1.load module
# modprobe nvme-rdma
2.discovery
# nvme discover -t rdma -a 192.168.80.100 -s 4420
3.connect
连接cnode1
# nvme connect -t rdma -n "nqn.2016-06.io.spdk:cnode1" -a 192.168.80.100 -s 4420
连接cnode2
# nvme connect -t rdma -n "nqn.2016-06.io.spdk:cnode2" -a 192.168.80.100 -s 4420
# lsblk
4.disconnect
# nvme disconnect -n "nqn.2016-06.io.spdk:cnode1"
# nvme disconnect -n "nqn.2016-06.io.spdk:cnode2"