一、rbd内核驱动写入流程
1)初始化
首先是rbd驱动的初始化工作:包括验证libceph的兼容性,分配内存,在sysfs中创建块设备控制文件、创建工作队列rbd_wq并调用INIT_WORK初始化它
module_init(rbd_init);
static int __init rbd_init(void)
{
if (!libceph_compatible(NULL)) { //兼容性
rbd_warn(NULL, "libceph incompatibility (quitting)");
return -EINVAL;
}
rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0); //创建工作队列
if (!rbd_wq)
{rc = -ENOMEM;goto err_out_slab;
}
rc = rbd_slab_init(); //初始化内存分配器
if (rc)
return rc;
.......
rc = rbd_sysfs_init(); //创建/sys/bus/rbd/
if (rc)
goto err_out_blkdev;
...}static int rbd_init_request(void *data, struct request *rq,unsigned int hctx_idx, unsigned int request_idx,unsigned int numa_node)
{struct work_struct *work = blk_mq_rq_to_pdu(rq);INIT_WORK(work, rbd_queue_workfn); //初始化一个work,work通过rbd_queue_workfn进行处理return 0;
}
2)块设备创建、工作队列中启动work
添加块设备,首先创建一个rbd client用来通信,然后选择一个pool存储池去创建rbd设备,创建完成后调用rbd_dev_device_setup初始化rbd设备,在初始化块设备的时候会启动工作队列rbd_wq,并将通用块设备层的请求转化为一个work添加到rbd_wq工作队列中,然后由cpu调度执行工作队列rbd_wq中的work,work对应的处理函数为rbd_queue_workfn,该work用于处理通用块设备层的IO请求。
启动work的调用关系: rbd_dev_device_setup → rbd_init_disk → rbd_mq_ops → rbd_init_request → rbd_queue_workfn处理函数
static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx,const struct blk_mq_queue_data *bd)
{struct request *rq = bd->rq;struct work_struct *work = blk_mq_rq_to_pdu(rq); //通用块设备层请求转为workqueue_work(rbd_wq, work); //将work加入到工作队列,工作队列中的work由cpu调度处理return BLK_MQ_RQ_QUEUE_OK;
}static ssize_t rbd_add(struct bus_type *bus,const char *buf,size_t count)
{if (single_major)return -EINVAL;return do_rbd_add(bus, buf, count);
}static ssize_t do_rbd_add(struct bus_type *bus,const char *buf,size_t count)
{.....rbdc = rbd_get_client(ceph_opts); //获取或创建rbd_clientif (IS_ERR(rbdc)) {rc = PTR_ERR(rbdc);goto err_out_args;}/* pick the pool */rc = rbd_add_get_pool_id(rbdc, spec->pool_name); //选择存储池if (rc < 0) {if (rc == -ENOENT)pr_info("pool %s does not exist\n", spec->pool_name);goto err_out_client;}spec->pool_id = (u64)rc;rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts); //创建rbd设备down_write(&rbd_dev->header_rwsem);
......rc = rbd_dev_image_probe(rbd_dev, 0); //探针更多的是检查rbd image是否被mapif (rc < 0) {up_write(&rbd_dev->header_rwsem);goto err_out_rbd_dev;}
......rc = rbd_dev_device_setup(rbd_dev); //包括obj->pg映射等static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
{int ret;
....../* Set up the blkdev mapping. */ret = rbd_init_disk(rbd_dev); ......
}static int rbd_init_disk(struct rbd_device *rbd_dev)
{struct gendisk *disk;struct request_queue *q;u64 segment_size;int err;
.....memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));rbd_dev->tag_set.ops = &rbd_mq_ops; //rbd_dev初始化rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
.....
}static struct blk_mq_ops rbd_mq_ops = {.queue_rq = rbd_queue_rq,.init_request = rbd_init_request, //调用rbd_init_request
};static int rbd_init_request(void *data, struct request *rq,unsigned int hctx_idx, unsigned int request_idx,unsigned int numa_node)
{struct work_struct *work = blk_mq_rq_to_pdu(rq);INIT_WORK(work, rbd_queue_workfn); //通过work_struct启动线程return 0;
}
3)work处理函数rbd_queue_workfn内流程分析
从上层取出通用块设备层请求后,转换为image对象,再从image对象批量转为object对象,再计算出object到pg,pg到osd的映射关系。
3.1 获取通用块设备层信息
在rbd_queue_workfn中,通过blk_mq_rq_from_pdu获取到通用块设备层IO请求rq、通过blk_rq_bytes(rq)获取到请求中需要写入的数据长度length(length表示的是客户端需要写到磁盘总的数据长度),通过blk_rq_pos(rq)获取块设备写入偏移量offset。
static void rbd_queue_workfn(struct work_struct *work)
{struct request *rq = blk_mq_rq_from_pdu(work); //通用块设备层请求struct rbd_device *rbd_dev = rq->q->queuedata;struct rbd_img_request *img_request;struct ceph_snap_context *snapc = NULL;u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT; //块设备的偏移量u64 length = blk_rq_bytes(rq); //enum obj_operation_type op_type;
.....
}
3.2 通用块设备层信息转换image请求,image请求批量转换为object
在rbd_queue_workfn中从通用块设备层请求中获取到块设备偏移offset和长度length后,再使用这些指标来创建img_request并将img_request→offset进行填充中,然后调用rbd_img_request_fill函数,在该函数中,基于rados object的大小(4M)与rados对象在rbd中的segment排列,对请求进行拆分,最终将rbd_img_request拆分成多个rbd_obj_request对象,通过这样的过程实现从linux内核的通用块请求到ceph rados object的转换。
static void rbd_queue_workfn(struct work_struct *work)
{struct request *rq = blk_mq_rq_from_pdu(work);struct rbd_device *rbd_dev = rq->q->queuedata;struct rbd_img_request *img_request;u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT; //块设备偏移u64 length = blk_rq_bytes(rq); //长度
......img_request = rbd_img_request_create(rbd_dev, offset, length, op_type, //创建img_requestsnapc); img_request->offset = offset; //填充img_request→offsetresult = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO, //将rbd_img_request划分为一个个rbd_obj_requestrq->bio);
.....
}static int rbd_img_request_fill(struct rbd_img_request *img_request,enum obj_request_type type,void *data_desc)
{struct rbd_obj_request *obj_request = NULL;u64 img_offset;img_offset = img_request->offset; //块设备当前写入的偏移位置resid = img_request->length; //待写入的长度while (resid) {
......object_name = rbd_segment_name(rbd_dev, img_offset); //对象名length = rbd_segment_length(rbd_dev, img_offset, resid); //长度obj_request = rbd_obj_request_create(object_name, //创建obj_request对象offset, length, type);
......img_offset += length; //偏移增加lengthresid -= length;
......
}
3.3 rbd块设备offset到rados object的映射
rbd块设备到rados对象的映射是根据rados对象的大小以及当前块设备的偏移量来决定的,并且rados对象的命名方式采用前缀rbd_data.$image_id.16位16进制的序号构成。
3.3.1 rados对象大小与命名方式
每个rbd块设备都定义了一个2为底的指数来表示每个rbd对象的大小,这个指数称为rbd的obj order。obj order默认值为22,因此每个rbd对象大小2^22Bytes,即每个rados对象大小为4MB。