一、前言
前面几篇都是在 kernel space 对 dma-buf 进行访问的,本篇我们将一起来学习,如何在 user space 访问 dma-buf。当然,user space 访问 dma-buf 也属于 CPU Access 的一种。
二、mmap
为了方便应用程序能直接在用户空间读写 dma-buf 的内存,dma_buf_ops 为我们提供了一个 mmap 回调接口,可以把 dma-buf 的物理内存直接映射到用户空间,这样应用程序就可以像访问普通文件那样访问 dma-buf 的物理内存了。
在linux 设备驱动中,大多数驱动的 mmap 操作接口都是通过调用 remap_pfn_range()
函数来实现的,dma-buf 也不例外。
除了 dma_buf_ops 提供的 mmap 回调接口外,dma-buf 还为我们提供了 dma_buf_mmap()
内核 API,使得我们可以在其他设备驱动中就地取材,直接引用 dma-buf 的 mmap 实现,以此来间接的实现设备驱动的 mmap 文件操作接口.
接下来,我们将通过两个示例来演示如何在 Userspace 访问 dma-buf 的物理内存。
- 示例一:直接使用 dma-buf 的 fd 做 mmap() 操作
- 示例二:使用 exporter 的 fd 做 mmap() 操作
三、直接使用 dma-buf 的 fd 做 mmap() 操作
本示例主要演示如何在驱动层实现 dma-buf 的 mmap 回调接口,以及如何在用户空间直接使用 dma-buf 的 fd 进行 mmap() 操作。
export_test.c
#include <linux/dma-buf.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/miscdevice.h>struct dma_buf *dmabuf_export;
EXPORT_SYMBOL(dmabuf_export);static int exporter_attach(struct dma_buf* dmabuf, struct dma_buf_attachment *attachment)
{pr_info("dmanbuf attach device :%s \n",dev_name(attachment->dev));return 0;}static void exporter_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attachment)
{pr_info("dmabuf detach device :%s \n",dev_name(attachment->dev));
}static struct sg_table *exporter_map_dma_buf(struct dma_buf_attachment *attachment,enum dma_data_direction dir)
{
// void *vaddr = attachment->dmabuf->priv;struct sg_table *table;int ret;table = kmalloc(sizeof(struct sg_table),GFP_KERNEL);ret = sg_alloc_table(table, 1, GFP_KERNEL);if(ret)pr_info("sg_alloc_table err\n");sg_dma_len(table->sgl) = PAGE_SIZE;pr_info("sg_dma_len: %d\n ", sg_dma_len(table->sgl));// sg_dma_address(table->sgl) = dma_map_single(NULL, vaddr, PAGE_SIZE,dir);
// pr_info("sg_dma_address: 0x%llx\n",(unsigned long long)sg_dma_address(table->sgl));return table;
}static void exporter_unmap_dma_buf(struct dma_buf_attachment *attachment,struct sg_table *table,enum dma_data_direction dir)
{dma_unmap_single(NULL, sg_dma_address(table->sgl), PAGE_SIZE, dir);sg_free_table(table);kfree(table);
}static void exporter_release(struct dma_buf *dmabuf)
{return kfree(dmabuf->priv);
}/*static void *exporter_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num)
{return NULL;
}static void *exporter_kmap(struct dma_buf *dmabuf, unsigned long page_num)
{return NULL;
}*/
static void* exporter_vmap(struct dma_buf *dmabuf)
{return dmabuf->priv;}
static int exporter_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
{void *vaddr = dmabuf->priv;struct page * page_ptr = virt_to_page(vaddr);return remap_pfn_range(vma,vma->vm_start, page_to_pfn(page_ptr),PAGE_SIZE, vma->vm_page_prot);
}static const struct dma_buf_ops exp_dmabuf_ops = {.attach = exporter_attach,.detach = exporter_detach,.map_dma_buf = exporter_map_dma_buf,.unmap_dma_buf = exporter_unmap_dma_buf,.release = exporter_release,
// .map_atomic = exporter_kmap_atomic,
// .map = exporter_kmap,.vmap = exporter_vmap,.mmap = exporter_mmap,
};static struct dma_buf *exporter_alloc_page(void)
{DEFINE_DMA_BUF_EXPORT_INFO(exp_info);struct dma_buf *dmabuf;void *vaddr;vaddr = kzalloc(PAGE_SIZE,GFP_KERNEL);exp_info.ops = &exp_dmabuf_ops;exp_info.size = PAGE_SIZE;exp_info.flags = O_CLOEXEC;exp_info.priv = vaddr;dmabuf= dma_buf_export(&exp_info);if(dmabuf == NULL)printk(KERN_INFO"DMA buf export error\n");sprintf(vaddr, "hello world");return dmabuf;
}static long exporter_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{int fd = dma_buf_fd(dmabuf_export, O_CLOEXEC);if(unlikely(copy_to_user((void __user*)arg, &fd,sizeof(fd)))){return -EFAULT;}return 0;}
static struct file_operations exporter_fops = {.owner = THIS_MODULE,.unlocked_ioctl = exporter_ioctl,};static struct miscdevice mdev ={.minor = MISC_DYNAMIC_MINOR,.name = "exporter",.fops = &exporter_fops,};static int __init exporter_init(void)
{dmabuf_export = exporter_alloc_page();return misc_register(&mdev);}static void __exit exporter_exit(void)
{misc_deregister(&mdev);}
module_init(exporter_init);
module_exit(exporter_exit);MODULE_LICENSE("GPL");
MODULE_AUTHOR("ZWQ");
MODULE_DESCRIPTION("zwq dma used buffer");
从上面的示例可以看到,除了要实现 dma-buf 的 mmap 回调接口外,我们还引入了 misc driver,目的是想通过 misc driver 的 ioctl 接口将 dma-buf 的 fd 传递给上层应用程序,这样才能实现应用程序直接使用这个 dma-buf fd 做 mmap() 操作。
补充:
static int exporter_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
{
void *vaddr = dmabuf->priv;
struct page * page_ptr = virt_to_page(vaddr);
return remap_pfn_range(vma,vma->vm_start, page_to_pfn(page_ptr),
PAGE_SIZE, vma->vm_page_prot);
}
上面的虚拟地址 vaddr 如果使用:
remap_pfn_range(vma, vma->vm_start, virt_to_pfn(vaddr), PAGE_SIZE, vma->vm_page_prot);
这个会编译不过,只能先把vaddr 转换page ,在page 转换成页号
为什么非要通过 ioctl 的方式来传递 fd ?这个问题我会在下一篇中详细讨论。
在 ioctl 接口中,我们使用到了 dma_buf_fd()
函数,该函数用于创建一个新的 fd,并与该 dma-buf 的文件相绑定。关于该函数,我也会在下一篇中做详细介绍。
userspace 程序
mmap_dmabuf.c
#include <stdio.h>
#include <stddef.h>#include <fcntl.h>
#include <sys/ioctl.h>
#include <unistd.h>
#include <sys/mman.h>int main(int argc, char *argv[])
{int fd;int dmabuf_fd = 0;fd = open("/dev/exporter", O_RDONLY);ioctl(fd, 0, &dmabuf_fd);close(fd);char *str = mmap(NULL, 4096, PROT_READ, MAP_SHARED, dmabuf_fd, 0);printf("read from dmabuf mmap: %s\n", str);return 0;
}
编译运行后,结果如下:
可以看到,userspace 程序通过 mmap() 接口成功的访问到 dma-buf 的物理内存。
四、使用 exporter 的 fd 做 mmap() 操作
本示例主要演示如何使用 dma_buf_mmap()
内核 API,以此来简化设备驱动的 mmap 文件操作接口的实现。
export_test.c
新增 exporter_misc_mmap() 函数, 具体修改如下:
#include <linux/dma-buf.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/miscdevice.h>struct dma_buf *dmabuf_export;
EXPORT_SYMBOL(dmabuf_export);static int exporter_attach(struct dma_buf* dmabuf, struct dma_buf_attachment *attachment)
{pr_info("dmanbuf attach device :%s \n",dev_name(attachment->dev));return 0;}static void exporter_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attachment)
{pr_info("dmabuf detach device :%s \n",dev_name(attachment->dev));
}static struct sg_table *exporter_map_dma_buf(struct dma_buf_attachment *attachment,enum dma_data_direction dir)
{
// void *vaddr = attachment->dmabuf->priv;struct sg_table *table;int ret;table = kmalloc(sizeof(struct sg_table),GFP_KERNEL);ret = sg_alloc_table(table, 1, GFP_KERNEL);if(ret)pr_info("sg_alloc_table err\n");sg_dma_len(table->sgl) = PAGE_SIZE;pr_info("sg_dma_len: %d\n ", sg_dma_len(table->sgl));// sg_dma_address(table->sgl) = dma_map_single(NULL, vaddr, PAGE_SIZE,dir);
// pr_info("sg_dma_address: 0x%llx\n",(unsigned long long)sg_dma_address(table->sgl));return table;
}static void exporter_unmap_dma_buf(struct dma_buf_attachment *attachment,struct sg_table *table,enum dma_data_direction dir)
{dma_unmap_single(NULL, sg_dma_address(table->sgl), PAGE_SIZE, dir);sg_free_table(table);kfree(table);
}static void exporter_release(struct dma_buf *dmabuf)
{return kfree(dmabuf->priv);
}/*static void *exporter_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num)
{return NULL;
}static void *exporter_kmap(struct dma_buf *dmabuf, unsigned long page_num)
{return NULL;
}*/
static void* exporter_vmap(struct dma_buf *dmabuf)
{return dmabuf->priv;}
static int exporter_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
{void *vaddr = dmabuf->priv;struct page * page_ptr = virt_to_page(vaddr);return remap_pfn_range(vma,vma->vm_start, page_to_pfn(page_ptr),PAGE_SIZE, vma->vm_page_prot);
}static const struct dma_buf_ops exp_dmabuf_ops = {.attach = exporter_attach,.detach = exporter_detach,.map_dma_buf = exporter_map_dma_buf,.unmap_dma_buf = exporter_unmap_dma_buf,.release = exporter_release,
// .map_atomic = exporter_kmap_atomic,
// .map = exporter_kmap,.vmap = exporter_vmap,.mmap = exporter_mmap,
};static struct dma_buf *exporter_alloc_page(void)
{DEFINE_DMA_BUF_EXPORT_INFO(exp_info);struct dma_buf *dmabuf;void *vaddr;vaddr = kzalloc(PAGE_SIZE,GFP_KERNEL);exp_info.ops = &exp_dmabuf_ops;exp_info.size = PAGE_SIZE;exp_info.flags = O_CLOEXEC;exp_info.priv = vaddr;dmabuf= dma_buf_export(&exp_info);if(dmabuf == NULL)printk(KERN_INFO"DMA buf export error\n");sprintf(vaddr, "hello world");return dmabuf;
}static long exporter_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{int fd = dma_buf_fd(dmabuf_export, O_CLOEXEC);if(unlikely(copy_to_user((void __user*)arg, &fd,sizeof(fd)))){return -EFAULT;}return 0;}
static int exporter_misc_mmap(struct file *file, struct vm_area_struct *vma)
{return dma_buf_mmap(dmabuf_export, vma, 0);
}static struct file_operations exporter_fops = {.owner = THIS_MODULE,.unlocked_ioctl = exporter_ioctl,.mmap = exporter_misc_mmap,
};static struct miscdevice mdev ={.minor = MISC_DYNAMIC_MINOR,.name = "exporter",.fops = &exporter_fops,};
static int __init exporter_init(void)
{dmabuf_export = exporter_alloc_page();return misc_register(&mdev);}static void __exit exporter_exit(void)
{misc_deregister(&mdev);}
module_init(exporter_init);
module_exit(exporter_exit);MODULE_LICENSE("GPL");
MODULE_AUTHOR("ZWQ");
MODULE_DESCRIPTION("zwq dma used buffer");
与示例一的驱动相比,示例二的驱动可以不再需要把 dma-buf 的 fd 通过 ioctl 传给上层,而是直接将 dma-buf 的 mmap 回调接口嫁接到 misc driver 的 mmap 文件操作接口上。这样上层在对 misc device 进行 mmap() 操作时,实际映射的是 dma-buf 的物理内存。
userspace 程序
mmap_dmabuf.c
#include <stdio.h>
#include <stddef.h>#include <fcntl.h>
#include <sys/ioctl.h>
#include <unistd.h>
#include <sys/mman.h>int main(int argc, char *argv[])
{int fd;
// int dmabuf_fd = 0;fd = open("/dev/exporter", O_RDONLY);
#if 0ioctl(fd, 0, &dmabuf_fd);close(fd);char *str = mmap(NULL, 4096, PROT_READ, MAP_SHARED, dmabuf_fd, 0);printf("read from dmabuf mmap: %s\n", str);
#endifchar *str = mmap(NULL, 4096, PROT_READ, MAP_SHARED,fd, 0);printf("read from dmabuf mmap: %s\n", str);return 0;
}
与示例一的 userspace 程序相比,示例二不再通过 ioctl() 方式获取 dma-buf 的 fd,而是直接使用 exporter misc device 的 fd 进行 mmap() 操作,此时执行的则是 misc driver 的 mmap 文件操作接口。当然最终输出的结果都是一样的
运行结果: