一个摄像头监控应用程序的系统调用如下所示:
/* open
* VIDIOC_QUERYCAP 确定它是否视频捕捉设备,支持哪种接口(streaming/read,write)
* VIDIOC_ENUM_FMT 查询支持哪种格式
* VIDIOC_S_FMT 设置摄像头使用哪种格式
* VIDIOC_REQBUFS 申请buffer
对于 streaming:
* VIDIOC_QUERYBUF 确定每一个buffer的信息 并且 mmap
* VIDIOC_QBUF 放入队列
* VIDIOC_STREAMON 启动设备
* poll 等待有数据
* VIDIOC_DQBUF 从队列中取出
* 处理....
* VIDIOC_QBUF 放入队列
* ....
* VIDIOC_STREAMOFF 停止设备
*
*/
本文是作者之前做的一个摄像头监控项目的笔记,主要目的就是记录学习的过程,方便以后用到时复习使用,我们结合uvc摄像头的应用程序和驱动程序来分析uvc摄像头从硬件产生数据到上层应用程序获取数据的过程。
一:VIDIOC_QUERYCAP
VIDIOC_QUERYCAP函数主要是设置APP传入的v4l2_capability结构体变量tV4l2Cap(通过指针传入,本文涉及的基本都是通过指针传入,为了叙述时的方便,简单说成传入变量),然后APP根据设置的值再判断它是不是一个视频设备、是不是一个流(stream)设备等等...
APP程序:
struct v4l2_capability tV4l2Cap;
memset(&tV4l2Cap, 0, sizeof(struct v4l2_capability));
iError = ioctl(iFd, VIDIOC_QUERYCAP, &tV4l2Cap);
if (iError) {DBG_PRINTF("Error opening device %s: unable to query device.\n", strDevName);goto err_exit;
}if (!(tV4l2Cap.capabilities & V4L2_CAP_VIDEO_CAPTURE))
{DBG_PRINTF("%s is not a video capture device\n", strDevName);goto err_exit;
}if (tV4l2Cap.capabilities & V4L2_CAP_STREAMING) {DBG_PRINTF("%s supports streaming i/o\n", strDevName);
}if (tV4l2Cap.capabilities & V4L2_CAP_READWRITE) {DBG_PRINTF("%s supports read i/o\n", strDevName);
}
驱动程序:
case VIDIOC_QUERYCAP: {struct v4l2_capability *cap = arg; //获取APP传入的指针memset(cap, 0, sizeof *cap);strlcpy(cap->driver, "uvcvideo", sizeof cap->driver);strlcpy(cap->card, vdev->name, sizeof cap->card);usb_make_path(stream->dev->udev,cap->bus_info, sizeof(cap->bus_info));cap->version = DRIVER_VERSION_NUMBER; if (stream->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) //如果插入的设备是摄像头cap->capabilities = V4L2_CAP_VIDEO_CAPTURE //设置传入的结构体变量的信息 | V4L2_CAP_STREAMING;elsecap->capabilities = V4L2_CAP_VIDEO_OUTPUT| V4L2_CAP_STREAMING;break;}
问:底层驱动是根据什么来设置我们传入的变量的?
答:设备描述符。
问:设备描述符从哪来呢?
答:我们的uvc摄像头刚插上开发板时,USB总线驱动程序会生成一个usb_device结构体并把它挂到USB总线驱动程序的队列中,这个usb_device结构体里面就包含有我们的硬件信息,也就是所说的描述符。
二:VIDIOC_ENUM_FMT
VIDIOC_ENUM_FMT函数主要是设置APP传入的v4l2_fmtdesc结构体变量tFmtDesc,然后APP判断是否支持这种pixelformat,若是支持便把它赋给APP的tVideoDevice(ptVideoDevice是它的指针)->iPixelFormat,无法支持则结束程序。
APP程序:
struct v4l2_fmtdesc tFmtDesc;memset(&tFmtDesc, 0, sizeof(tFmtDesc));tFmtDesc.index = 0;tFmtDesc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;//枚举摄像头硬件支持的各种格式,若我们的应用程序支持这种格式则跳出循环while ((iError = ioctl(iFd, VIDIOC_ENUM_FMT, &tFmtDesc)) == 0) {if (isSupportThisFormat(tFmtDesc.pixelformat))//这个函数在下面定义了{ptVideoDevice->iPixelFormat = tFmtDesc.pixelformat;break;}tFmtDesc.index++;//index++再传入驱动即可查看硬件所支持的下一种格式}if (!ptVideoDevice->iPixelFormat)//无法支持{DBG_PRINTF("can not support the format of this device\n");goto err_exit; }
驱动程序:
case VIDIOC_ENUM_FMT:{struct v4l2_fmtdesc *fmt = arg; //获取APP传入的指针struct uvc_format *format;enum v4l2_buf_type type = fmt->type;__u32 index = fmt->index;//检查APP传入的数据if (fmt->type != stream->type ||fmt->index >= stream->nformats) //类型不对或者超过硬件支持的种数了return -EINVAL;memset(fmt, 0, sizeof(*fmt));fmt->index = index;fmt->type = type;//根据硬件的信息设置APP传入的变量format = &stream->format[fmt->index];fmt->flags = 0;if (format->flags & UVC_FMT_FLAG_COMPRESSED)fmt->flags |= V4L2_FMT_FLAG_COMPRESSED;strlcpy(fmt->description, format->name,sizeof fmt->description);fmt->description[sizeof fmt->description - 1] = 0;fmt->pixelformat = format->fcc; //设置pixelformat,如YUYV、RGB、MJPEG等等...break;}
三:VIDIOC_S_FMT
我们先在APP获取LCD的分辨率和tVideoDevice->iPixelFormat(如YUYV、MJPEG等),然后通过v4l2_format结构体变量tV4l2Fmt传入到驱动程序里,传入的这些值其实还没真正设置到硬件上,只是暂时保存起来,等streamon的时候才真正发给硬件。
问:为什么要先在APP获取LCD的分辨率,然后传入驱动呢
答:我们这个项目是获取摄像头采集的数据然后在LCD上显示,为了增强显示效果,我们通过传入的参数设置摄像头采集的图片的分辨率和我们的LCD一致(如果摄像头不支持这种分辨率,那么驱动程序会找到和我们设置的值相近的分辨率)。
int iLcdWidth;int iLcdHeigt;int iLcdBpp;struct v4l2_format tV4l2Fmt;/* set format in */GetDispResolution(&iLcdWidth, &iLcdHeigt, &iLcdBpp);//获取LCD分辨率memset(&tV4l2Fmt, 0, sizeof(struct v4l2_format));tV4l2Fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;tV4l2Fmt.fmt.pix.pixelformat = ptVideoDevice->iPixelFormat;tV4l2Fmt.fmt.pix.width = iLcdWidth;tV4l2Fmt.fmt.pix.height = iLcdHeigt;tV4l2Fmt.fmt.pix.field = V4L2_FIELD_ANY;/* 如果驱动程序发现无法某些参数(比如分辨率),* 它会调整这些参数, 并且返回给应用程序*/iError = ioctl(iFd, VIDIOC_S_FMT, &tV4l2Fmt); if (iError) {DBG_PRINTF("Unable to set format\n");goto err_exit; }/* 读出调整后的参数 */ptVideoDevice->iWidth = tV4l2Fmt.fmt.pix.width;ptVideoDevice->iHeight = tV4l2Fmt.fmt.pix.height;
驱动程序:
/* Find the closest image size. The distance between image sizes is* the size in pixels of the non-overlapping regions between the* requested size and the frame-specified size.*///找到和我们传入的值最接近的分辨率(fmt是我们在APP里传入的那个变量)rw = fmt->fmt.pix.width;rh = fmt->fmt.pix.height;maxd = (unsigned int)-1;for (i = 0; i < format->nframes; ++i) {__u16 w = format->frame[i].wWidth;__u16 h = format->frame[i].wHeight;d = min(w, rw) * min(h, rh);d = w*h + rw*rh - 2*d;if (d < maxd) {maxd = d;frame = &format->frame[i];}if (maxd == 0)break;} //此处代码没贴全(因为太长了)......fmt->fmt.pix.width = frame->wWidth;fmt->fmt.pix.height = frame->wHeight;fmt->fmt.pix.field = V4L2_FIELD_NONE;fmt->fmt.pix.bytesperline = format->bpp * frame->wWidth / 8;fmt->fmt.pix.sizeimage = probe->dwMaxVideoFrameSize;fmt->fmt.pix.colorspace = format->colorspace;fmt->fmt.pix.priv = 0;
四:VIDIOC_REQBUFS
VIDIOC_REQBUFS先在APP设置一些值,例如要申请的缓冲区个数,然后通过v4l2_requestbuffers类型的结构体变量tV4l2ReqBuffs传入到驱动中,驱动程序根据传入的个数和前面第三步的dwMaxVideoFrameSize(sizeimage)来申请缓冲区,然后设置队列queue里关于缓冲区的信息(如:各个缓冲区的号数和偏移量),最后得到真正申请到的缓冲区个数回到APP,我们把它保存到ptVideoDevice->iVideoBufCnt中(ptVideoDevice->iVideoBufCnt = tV4l2ReqBuffs.count;)
APP程序:
struct v4l2_requestbuffers tV4l2ReqBuffs;/* request buffers */memset(&tV4l2ReqBuffs, 0, sizeof(struct v4l2_requestbuffers));tV4l2ReqBuffs.count = NB_BUFFER;tV4l2ReqBuffs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;tV4l2ReqBuffs.memory = V4L2_MEMORY_MMAP;iError = ioctl(iFd, VIDIOC_REQBUFS, &tV4l2ReqBuffs);if (iError) {DBG_PRINTF("Unable to allocate buffers.\n");goto err_exit; }/* 申请buffer不一定能成功,真正申请到的buffer个数记录在tV4l2ReqBuffs.count */ptVideoDevice->iVideoBufCnt = tV4l2ReqBuffs.count;
驱动程序
struct v4l2_requestbuffers *rb = arg;
ret = uvc_alloc_buffers(&stream->queue, rb->count,stream->ctrl.dwMaxVideoFrameSize);
uvc_alloc_buffers的部分代码如下所示:
/* Decrement the number of buffers until allocation succeeds. */
//如果分配不成功,则减少要分配的缓冲区个数,然后再尝试一下for (; nbuffers > 0; --nbuffers) {mem = vmalloc_32(nbuffers * bufsize);if (mem != NULL)break;}
//一个缓冲区都分配不成功,返回错误信息if (mem == NULL) {ret = -ENOMEM;goto done;}//设置队列queue里各个缓冲区的信息,例如偏移量,号数index等等...for (i = 0; i < nbuffers; ++i) {memset(&queue->buffer[i], 0, sizeof queue->buffer[i]);queue->buffer[i].buf.= i;queue->buffer[i].buf.m.offset = i * bufsize;queue->buffer[i].buf.length = buflength;queue->buffer[i].buf.type = queue->type;queue->buffer[i].buf.field = V4L2_FIELD_NONE;queue->buffer[i].buf.memory = V4L2_MEMORY_MMAP;queue->buffer[i].buf.flags = 0;init_waitqueue_head(&queue->buffer[i].wait);}queue->mem = mem; //真正存放图片数据的地址在这里,队列里面的buffer数组只是记录各个缓冲区的偏移量之类的信息queue->count = nbuffers;queue->buf_size = bufsize;ret = nbuffers;done:mutex_unlock(&queue->mutex);return ret;
五、for(i = 0; i < ptVideoDevice->iVideoBufCnt; i++){
VIDIOC_QUERYBUF 和 mmap
}
VIDIOC_QUERYBUF 和 mmap,先在APP设置一些值到v4l2_buffer结构体变量tV4l2Buf中,例如:tV4l2Buf.index(表示我们要查询哪个缓冲区的信息(什么信息?偏移值和页大小(sizeimage)等)),然后把tV4l2Buf传入驱动,驱动程序根据传入的tV4l2Buf.index拷贝队列queue对应的缓冲区信息到tV4l2Buf,返回到APP,APP把tV4l2Buf.length保存到tVideoDevice->iVideoBufMaxLen中。
然后调用mmap把tV4l2Buf.length,tV4l2Buf.m.offset传入驱动,驱动程序进行某种对比后找出要mmap的缓冲区(第四步申请的那些缓冲区),把该缓冲区的首地址通过返回值的形式赋给了ptVideoDevice->pucVideBuf[i],一个循环下来,ptVideoDevice->pucVideBuf数组便指向了各个缓冲区的首地址。
APP程序:
memset(&tV4l2Buf, 0, sizeof(struct v4l2_buffer));tV4l2Buf.index = i;tV4l2Buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;tV4l2Buf.memory = V4L2_MEMORY_MMAP;iError = ioctl(iFd, VIDIOC_QUERYBUF, &tV4l2Buf);if (iError) {DBG_PRINTF("Unable to query buffer.\n");goto err_exit;}ptVideoDevice->iVideoBufMaxLen = tV4l2Buf.length;ptVideoDevice->pucVideBuf[i] = mmap(0 /* start anywhere */ ,tV4l2Buf.length, PROT_READ, MAP_SHARED, iFd,tV4l2Buf.m.offset);if (ptVideoDevice->pucVideBuf[i] == MAP_FAILED) {DBG_PRINTF("Unable to map buffer\n");goto err_exit;}
驱动程序:
int uvc_query_buffer(struct uvc_video_queue *queue,struct v4l2_buffer *v4l2_buf)
{int ret = 0;mutex_lock(&queue->mutex);if (v4l2_buf->index >= queue->count) {ret = -EINVAL;goto done;}//根据传入的v4l2_buf->index决定要查询哪个缓冲区的信息__uvc_query_buffer(&queue->buffer[v4l2_buf->index], v4l2_buf);//函数定义在下面done:mutex_unlock(&queue->mutex);return ret;
}
static void __uvc_query_buffer(struct uvc_buffer *buf,struct v4l2_buffer *v4l2_buf)
{//把内核里面缓冲区的信息拷贝到APP传进来的地址memcpy(v4l2_buf, &buf->buf, sizeof *v4l2_buf);if (buf->vma_use_count)v4l2_buf->flags |= V4L2_BUF_FLAG_MAPPED;//设置flagswitch (buf->state) {case UVC_BUF_STATE_ERROR:case UVC_BUF_STATE_DONE:v4l2_buf->flags |= V4L2_BUF_FLAG_DONE;break;case UVC_BUF_STATE_QUEUED:case UVC_BUF_STATE_ACTIVE:case UVC_BUF_STATE_READY:v4l2_buf->flags |= V4L2_BUF_FLAG_QUEUED;break;case UVC_BUF_STATE_IDLE:default:break;}
}
/** Memory-map a video buffer.** This function implements video buffers memory mapping and is intended to be* used by the device mmap handler.*/
int uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma)
{struct uvc_buffer *uninitialized_var(buffer);struct page *page;unsigned long addr, start, size;unsigned int i;int ret = 0;start = vma->vm_start;size = vma->vm_end - vma->vm_start;mutex_lock(&queue->mutex);/* 应用程序调用mmap函数时, 会传入offset参数* 根据这个offset找出指定的缓冲区*/for (i = 0; i < queue->count; ++i) {buffer = &queue->buffer[i];if ((buffer->buf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff)//把内核里的缓冲区信息和传进来的参数进行某种对比,如果符合则证明就是要mmap这个缓冲区break;}if (i == queue->count || PAGE_ALIGN(size) != queue->buf_size) {ret = -EINVAL;goto done;}/** VM_IO marks the area as being an mmaped region for I/O to a* device. It also prevents the region from being core dumped.*/vma->vm_flags |= VM_IO;/* 根据虚拟地址找到缓冲区对应的page构体 */addr = (unsigned long)queue->mem + buffer->buf.m.offset;
#ifdef CONFIG_MMUwhile (size > 0) {page = vmalloc_to_page((void *)addr);/* 把page和APP传入的虚拟地址挂构 */if ((ret = vm_insert_page(vma, start, page)) < 0)goto done;start += PAGE_SIZE;addr += PAGE_SIZE;size -= PAGE_SIZE;}
#endifvma->vm_ops = &uvc_vm_ops;vma->vm_private_data = buffer;uvc_vm_open(vma);done:mutex_unlock(&queue->mutex);return ret;
}
六、for(i = 0; i < ptVideoDevice->iVideoBufCnt; i++){
VIDIOC_QBUF
}
首先清空第五步的变量tV4l2Buf,并设置tV4l2Buf.index传入驱动,驱动根据tV4l2Buf.index找到各个缓冲区的buffer(看代码感觉其实里面包含的是缓冲区的信息而已,真正存数据的地方是queme->mem),把它们的stream和queue分别挂到队列queue的mainqueue和irqqueue形成两条队列。
其实是双向链表,上面的图只是为了容易理解而已。
APP程序:
memset(&tV4l2Buf, 0, sizeof(struct v4l2_buffer));tV4l2Buf.index = i;tV4l2Buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;tV4l2Buf.memory = V4L2_MEMORY_MMAP;iError = ioctl(iFd, VIDIOC_QBUF, &tV4l2Buf);if (iError){DBG_PRINTF("Unable to queue buffer.\n");goto err_exit;}
驱动程序:
/** Queue a video buffer. Attempting to queue a buffer that has already been* queued will return -EINVAL.*/
int uvc_queue_buffer(struct uvc_video_queue *queue,struct v4l2_buffer *v4l2_buf)
{struct uvc_buffer *buf;unsigned long flags;int ret = 0;uvc_trace(UVC_TRACE_CAPTURE, "Queuing buffer %u.\n", v4l2_buf->index);//判断传入的参数是否正确if (v4l2_buf->type != queue->type ||v4l2_buf->memory != V4L2_MEMORY_MMAP) {uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer type (%u) ""and/or memory (%u).\n", v4l2_buf->type,v4l2_buf->memory);return -EINVAL;}mutex_lock(&queue->mutex);//判断传入的参数是否正确if (v4l2_buf->index >= queue->count) {uvc_trace(UVC_TRACE_CAPTURE, "[E] Out of range index.\n");ret = -EINVAL;goto done;}//找到对应的bufferbuf = &queue->buffer[v4l2_buf->index];if (buf->state != UVC_BUF_STATE_IDLE) {uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer state ""(%u).\n", buf->state);ret = -EINVAL;goto done;}if (v4l2_buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&v4l2_buf->bytesused > buf->buf.length) {uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n");ret = -EINVAL;goto done;}spin_lock_irqsave(&queue->irqlock, flags);if (queue->flags & UVC_QUEUE_DISCONNECTED) {spin_unlock_irqrestore(&queue->irqlock, flags);ret = -ENODEV;goto done;}buf->state = UVC_BUF_STATE_QUEUED;if (v4l2_buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)buf->buf.bytesused = 0;elsebuf->buf.bytesused = v4l2_buf->bytesused;//把buffer的stream和queue分别挂到队列queue的mainqueue和irqqueuelist_add_tail(&buf->stream, &queue->mainqueue);list_add_tail(&buf->queue, &queue->irqqueue);spin_unlock_irqrestore(&queue->irqlock, flags);done:mutex_unlock(&queue->mutex);return ret;
}
七:streamon
这一步的工作主要是初始化URB和启动摄像头,我们在内核代码中搜索urb->complete可在Uvc_video.c里找到uvc_video_complete函数,当硬件产生一帧数据给URB之后,便回进入uvc_video_complete函数
uvc_video_complete函数 如下所示:
static void uvc_video_complete(struct urb *urb)
{struct uvc_streaming *stream = urb->context;struct uvc_video_queue *queue = &stream->queue;struct uvc_buffer *buf = NULL;unsigned long flags;int ret;switch (urb->status) {case 0:break;default:uvc_printk(KERN_WARNING, "Non-zero status (%d) in video ""completion handler.\n", urb->status);case -ENOENT: /* usb_kill_urb() called. */if (stream->frozen)return;case -ECONNRESET: /* usb_unlink_urb() called. */case -ESHUTDOWN: /* The endpoint is being disabled. */uvc_queue_cancel(queue, urb->status == -ESHUTDOWN);return;}spin_lock_irqsave(&queue->irqlock, flags);//如果irqqueue队列不为空,则让buf指向它第一个节点,准备把数据从URB拷贝到第一个节点if (!list_empty(&queue->irqqueue))buf = list_first_entry(&queue->irqqueue, struct uvc_buffer,queue);spin_unlock_irqrestore(&queue->irqlock, flags);//decode是解码的意思,其实内部就是拷贝URB上的数据到irqqueue队列的第一个节点stream->decode(urb, stream, buf);//重新提交URBif ((ret = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {uvc_printk(KERN_ERR, "Failed to resubmit video URB (%d).\n",ret);}
}
在内核源码搜索stream->decode,我们会找到stream->decode = uvc_video_decode_isoc;分析uvc_video_decode_isoc函数的源码我们看到下面的这一部分,这部分代码的作用就是把URB上的数据(也就是下面代码的data)拷贝到irqqueue队列第一个节点对应的缓冲区,因为调用的过程比较繁琐,这里长话短说,详细分析得看源代码。
/* Copy the video data to the buffer. */maxlen = buf->buf.length - buf->buf.bytesused;mem = queue->mem + buf->buf.m.offset + buf->buf.bytesused;nbytes = min((unsigned int)len, maxlen);memcpy(mem, data, nbytes);buf->buf.bytesused += nbytes;
然后把irqqueue队列的这个节点删除掉并唤醒APP进程(等数据时进入休眠)
list_del(&buf->queue);
wake_up(&buf->wait);
八:VIDIOC_DQBUF
当应用程序被唤醒后,会调用VIDIOC_DQBUF,APP把v4l2_buffer类型的结构体变量tV4l2Buf传入到驱动程序,驱动程序把mainqueue队列的第一个节点的数据拷贝到tV4l2Buf(我们主要用到tV4l2Buf.index),然后把这个节点从mainqueue队列中删去。
APP有了这个tV4l2Buf.index便知道了ptVideoDevice->pucVideBuf[i]有数据,把它地址赋给ptVideoBuf->tPixelDatas.aucPixelDatas(用来指向每次有图片数据的那块缓存)。到此我们便从硬件得到一帧图片到APP了。
APP程序:
/* VIDIOC_DQBUF */memset(&tV4l2Buf, 0, sizeof(struct v4l2_buffer));tV4l2Buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;tV4l2Buf.memory = V4L2_MEMORY_MMAP;iRet = ioctl(ptVideoDevice->iFd, VIDIOC_DQBUF, &tV4l2Buf);if (iRet < 0) {DBG_PRINTF("Unable to dequeue buffer.\n");return -1;}ptVideoDevice->iVideoBufCurIndex = tV4l2Buf.index;ptVideoBuf->iPixelFormat = ptVideoDevice->iPixelFormat;ptVideoBuf->tPixelDatas.iWidth = ptVideoDevice->iWidth;ptVideoBuf->tPixelDatas.iHeight = ptVideoDevice->iHeight;ptVideoBuf->tPixelDatas.iBpp = (ptVideoDevice->iPixelFormat == V4L2_PIX_FMT_YUYV) ? 16 : \(ptVideoDevice->iPixelFormat == V4L2_PIX_FMT_MJPEG) ? 0 : \(ptVideoDevice->iPixelFormat == V4L2_PIX_FMT_RGB565) ? 16 : \0;ptVideoBuf->tPixelDatas.iLineBytes = ptVideoDevice->iWidth * ptVideoBuf->tPixelDatas.iBpp / 8;ptVideoBuf->tPixelDatas.iTotalBytes = tV4l2Buf.bytesused;//获取图片缓存的地址ptVideoBuf->tPixelDatas.aucPixelDatas = ptVideoDevice->pucVideBuf[tV4l2Buf.index];
部分驱动程序:
//取出mainqueue队列的第一个节点buf = list_first_entry(&queue->mainqueue, struct uvc_buffer, stream);//把这个节点从mainqueue队列中删除list_del(&buf->stream);//和第五步的VIDIOC_QUERYBUF一样,它的作用其实就是把buf(也就是mainqueue队列第一个节点)的数据拷贝到v4l2_buf(我们主要用到v4l2_buf.index)__uvc_query_buffer(buf, v4l2_buf);
九:再次VIDIOC_QBUF,为了能够循环得到图片,我们还得调用VIDIOC_QBUF把前面从两个队列删除的那个节点插回去。
执行tV4l2Buf.index = ptVideoDevice->iVideoBufCurIndex(在第八步记录了tV4l2Buf.index);找回上次从队列中删掉的是哪个buffer,然后把它重新插到队列上去,然后等待下一帧数据的到来。
struct v4l2_buffer tV4l2Buf;int iError;memset(&tV4l2Buf, 0, sizeof(struct v4l2_buffer));tV4l2Buf.index = ptVideoDevice->iVideoBufCurIndex;tV4l2Buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;tV4l2Buf.memory = V4L2_MEMORY_MMAP;iError = ioctl(ptVideoDevice->iFd, VIDIOC_QBUF, &tV4l2Buf);if (iError) {DBG_PRINTF("Unable to queue buffer.\n");return -1;}return 0;
/** Queue a video buffer. Attempting to queue a buffer that has already been* queued will return -EINVAL.*/
int uvc_queue_buffer(struct uvc_video_queue *queue,struct v4l2_buffer *v4l2_buf)
{struct uvc_buffer *buf;unsigned long flags;int ret = 0;uvc_trace(UVC_TRACE_CAPTURE, "Queuing buffer %u.\n", v4l2_buf->index);//判断传入的参数是否正确if (v4l2_buf->type != queue->type ||v4l2_buf->memory != V4L2_MEMORY_MMAP) {uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer type (%u) ""and/or memory (%u).\n", v4l2_buf->type,v4l2_buf->memory);return -EINVAL;}mutex_lock(&queue->mutex);//判断传入的参数是否正确if (v4l2_buf->index >= queue->count) {uvc_trace(UVC_TRACE_CAPTURE, "[E] Out of range index.\n");ret = -EINVAL;goto done;}//找到对应的bufferbuf = &queue->buffer[v4l2_buf->index];if (buf->state != UVC_BUF_STATE_IDLE) {uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer state ""(%u).\n", buf->state);ret = -EINVAL;goto done;}if (v4l2_buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&v4l2_buf->bytesused > buf->buf.length) {uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n");ret = -EINVAL;goto done;}spin_lock_irqsave(&queue->irqlock, flags);if (queue->flags & UVC_QUEUE_DISCONNECTED) {spin_unlock_irqrestore(&queue->irqlock, flags);ret = -ENODEV;goto done;}buf->state = UVC_BUF_STATE_QUEUED;if (v4l2_buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)buf->buf.bytesused = 0;elsebuf->buf.bytesused = v4l2_buf->bytesused;//把buffer的stream和queue分别挂到队列queue的mainqueue和irqqueuelist_add_tail(&buf->stream, &queue->mainqueue);list_add_tail(&buf->queue, &queue->irqqueue);spin_unlock_irqrestore(&queue->irqlock, flags);done:mutex_unlock(&queue->mutex);return ret;
}
最后再贴几个上面用到的结构体,方便查看
1.T_VideoBuf
typedef struct VideoBuf {T_PixelDatas tPixelDatas;int iPixelFormat;
}T_VideoBuf, *PT_VideoBuf;/* 图片的象素数据 */
typedef struct PixelDatas {int iWidth; /* 宽度: 一行有多少个象素 */int iHeight; /* 高度: 一列有多少个象素 */int iBpp; /* 一个象素用多少位来表示 */int iLineBytes; /* 一行数据有多少字节 */int iTotalBytes; /* 所有字节数 */ unsigned char *aucPixelDatas; /* 象素数据存储的地方,每次都是指向有新数据的缓冲区 */
}T_PixelDatas, *PT_PixelDatas;
2.
struct VideoDevice {int iFd;int iPixelFormat;int iWidth;int iHeight;int iVideoBufCnt;int iVideoBufMaxLen;int iVideoBufCurIndex; unsigned char *pucVideBuf[NB_BUFFER]; //通过mmap函数映射,该数组的每个变量都指向了缓冲区的地址(比如 men + 1 * offset 、men + 2 * offset)/* 函数 */PT_VideoOpr ptOPr;
};
3.uvc_video_queue
struct uvc_video_queue {enum v4l2_buf_type type;void *mem; //正真存放图片数据的地方unsigned int flags;unsigned int count;unsigned int buf_size;unsigned int buf_used;struct uvc_buffer buffer[UVC_MAX_VIDEO_BUFFERS]; //n个buffer,里面记录了每个缓冲区的偏移值和号数index等信息struct mutex mutex; /* protects buffers and mainqueue */spinlock_t irqlock; /* protects irqqueue */struct list_head mainqueue; //供APP使用的队列(头节点)struct list_head irqqueue; //供驱动使用的队列(头节点)
};
4.uvc_buffer
struct uvc_buffer {unsigned long vma_use_count;struct list_head stream; //供APP使用的队列(普通节点)/* Touched by interrupt handler. */struct v4l2_buffer buf;struct list_head queue; //供驱动使用的队列(普通节点)wait_queue_head_t wait;enum uvc_buffer_state state;unsigned int error;
};
5.v4l2_buffer
struct v4l2_buffer {__u32 index; //记录buffer的号数enum v4l2_buf_type type;__u32 bytesused;__u32 flags;enum v4l2_field field;struct timeval timestamp;struct v4l2_timecode timecode;__u32 sequence;/* memory location */enum v4l2_memory memory;union {__u32 offset; //记录各个缓冲区的偏移值,正真的图片数据在mem + n * offsetunsigned long userptr;struct v4l2_plane *planes;} m;__u32 length;__u32 input;__u32 reserved;
};