现在的位置: 首页 > 技术文章 > 应用开发 > 正文

TI DVSDK初探 — 视频数据从哪里来到哪里去

2013年11月21日 应用开发 ⁄ 共 18358字 ⁄ 字号 TI DVSDK初探 — 视频数据从哪里来到哪里去已关闭评论 ⁄ 阅读 2,563 次

本文讨论的是基于DVSDK3,硬件平台是ZMV6467。

DVSDK TI为达芬奇平台的开发者提供了一套完善的视频软件开发套件。DVSDK功能非常强大,它将很多视频编解码的细节都隐藏起来了,使用者只需要关系应用就可以,但是DVSDK的强大又使得它变得异常的庞大,让使用者摸不着头脑,或者根本不知道怎样开始开发。接下来的TI DVSDK学习记录文章将会一步步分析DVSDK,将我学习使用DVSDK进行开发的过程中碰到的问题或者相关的经验一一写出,文章如有错误敬请提出。

ZMV6467功能框图

ZMV6467功能框图

DM6467支持BT.656/BT.1120/RAW的数据输入,最高可以支持1080P30FPS,如果需要支持1080P60fps,可以使用1GHz版本的DM6467T。

1)模拟高清分量输入,经过视频解码器后输出符合BT.1120的数据,然后再通过VPIF接口进入CPU。

2)模拟标清信号输入,经过视频解码器后输出符合BT.656的数据,然后再通过VPIF接口进入CPU。

3)数字摄像头(eg.cmos),直接通过VPIF接口进入CPU。这里要注意,因为DM6467只支持YUVPSemi4:2:0格式的数据显示和编码(H.264),所以数字摄像头输出的YCBCR或RGB数据再进入CPU后需要进行转换,这个转换可以通过ARM,也可以通过DSP来进行

数据进入CPU后,接下来就是要获取数据了,这部分的只使用到ARM端。

数据流向:

----> [vpif_caputre driver] ---->[V4L2 driver]--->[应用层]

这里我们先不去关心video decoder driver 和  vpif_caputre driver,因为这两个不影响我们分析DVSDK。

     这里的V4L2 driver和我们平常使用的V4L2驱动没什么区别,就是提供了一些访问视频设备的接口,但是TI为了
简化对V4L2的操作,方便用户更快的编写出视频应用程序,所以他们在V4L2的基础上又封装了一层叫做DMAI的东西。DMAI是什么呢?
    因为接下来会有更多的新名词冒出,所以这里我们先看一下DVSDK的一个简单的构成框图
DVSDK的简单的构成框图

DVSDK的简单的构成框图

    从框图可以看出DMAI将视频编解码器(CE)、内存管理(CMEM)和V4L2的访问操作都统一到一套接口上了,这样视频编解码应用程序的开发者只需要关系数据的业务应用而不需要去理会逻辑层的东西。
   但是作为底层的开发人员,我们还是有必要去看看DMAI到底是怎样和CE/V4L2/CMEM进行交互,并且把它们封装起来的,然后到最后再看怎样使用DMAI进行应用程序的开发。
1.DMAI与VL42的交互(DMAI版本:2_20_00_15)。
在dmai_2_20_00_15/packages/ti/sdo/dmai/linux/dm6467目录下有一个Capture.c的文件,Capture.c包含了所有对V4L2的操作封装。现在来看一下
文件中的一个函数,这个函数用于创建一个 视频捕获设备的操作句柄。
/******************************************************************************
 * Capture_create
 ******************************************************************************/
Capture_Handle Capture_create(BufTab_Handle hBufTab, Capture_Attrs *attrs)
{
 struct v4l2_capability cap;
 struct v4l2_cropcap cropCap;
 struct v4l2_crop crop;
 struct v4l2_format fmt;
 enum v4l2_buf_type type;
 Capture_Handle hCapture;
 VideoStd_Type videoStd;
 Int32 width, height;
 Uint32 pixelFormat;

assert(attrs);
 Dmai_clear(fmt);

/* Allocate space for state object */
 hCapture = calloc(1, sizeof(Capture_Object));

if (hCapture == NULL) {
 Dmai_err0("Failed to allocate space for Capture Object\n");
 return NULL;
 }

/* User allocated buffers by default */
 hCapture->userAlloc = TRUE;

/* Open video capture device */
 /* 打开V4L2视频输入设备 */
 hCapture->fd = open(attrs->captureDevice, O_RDWR, 0);

if (hCapture->fd == -1) {
 Dmai_err2("Cannot open %s (%s)\n", attrs->captureDevice,
 strerror(errno));
 cleanup(hCapture);
 return NULL;
 }

/* See if an input is connected, and if so which standard */
 /* 检测V4L2设备当前是否有视频信号输入,输入信号的标准 */
 if (Capture_detectVideoStd(hCapture, &videoStd, attrs) < 0) {  cleanup(hCapture);  return NULL;  } hCapture->videoStd = videoStd;

if (VideoStd_getResolution(videoStd, &width, &height) < 0) {  cleanup(hCapture);  Dmai_err0("Failed to get resolution of capture video standard\n");  return NULL;  } /* Query for capture device capabilities */  /* 这里就是调用ioctl直接操作v4l2设备了,查询设备的特性*/  if (ioctl(hCapture->fd, VIDIOC_QUERYCAP, &cap) == -1) {
 cleanup(hCapture);
 if (errno == EINVAL) {
 Dmai_err1("%s is no V4L2 device\n", attrs->captureDevice);
 cleanup(hCapture);
 return NULL;
 }
 Dmai_err2("Failed VIDIOC_QUERYCAP on %s (%s)\n", attrs->captureDevice,
 strerror(errno));
 cleanup(hCapture);
 return NULL;
 }

if (!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE)) {
 Dmai_err1("%s is not a video capture device\n", attrs->captureDevice);
 cleanup(hCapture);
 return NULL;
 }

if (!(cap.capabilities & V4L2_CAP_STREAMING)) {
 Dmai_err1("%s does not support streaming i/o\n", attrs->captureDevice);
 cleanup(hCapture);
 return NULL;
 }

fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
 /* 获取V4L2设备的帧格式 */
 if (ioctl(hCapture->fd, VIDIOC_G_FMT, &fmt) == -1) {
 Dmai_err2("Failed VIDIOC_G_FMT on %s (%s)\n", attrs->captureDevice,
 strerror(errno));
 cleanup(hCapture);
 return NULL;
 }

fmt.fmt.pix.width = width;
 fmt.fmt.pix.height = height;
 fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;

switch(attrs->colorSpace) {
 case ColorSpace_UYVY:
 fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_UYVY;
 break;
 case ColorSpace_YUV420PSEMI:
 fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_NV12;
 break;
 case ColorSpace_YUV422PSEMI:
 fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_NV16;
 break;
 default:
 Dmai_err1("Unsupported color format %g\n", attrs->colorSpace);
 cleanup(hCapture);
 return NULL;
 }

if ((videoStd == VideoStd_BAYER_CIF) || (videoStd == VideoStd_BAYER_VGA) ||
 (videoStd == VideoStd_BAYER_1280)) {
 fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_SBGGR8;
 }

fmt.fmt.pix.bytesperline = BufferGfx_calcLineLength(fmt.fmt.pix.width,
 attrs->colorSpace);
 fmt.fmt.pix.sizeimage = BufferGfx_calcSize(attrs->videoStd, attrs->colorSpace);

//printf("DMAI: pix.bytesperline= %d, pix.sizeimage= %d\r\n",fmt.fmt.pix.bytesperline,fmt.fmt.pix.sizeimage);
 pixelFormat = fmt.fmt.pix.pixelformat;

 if ((videoStd == VideoStd_CIF) || (videoStd == VideoStd_SIF_PAL) ||
 (videoStd == VideoStd_SIF_NTSC) || (videoStd == VideoStd_D1_PAL) ||
 (videoStd == VideoStd_D1_NTSC) || (videoStd == VideoStd_1080I_30) ||
 (videoStd == VideoStd_1080I_25)) {
 fmt.fmt.pix.field = V4L2_FIELD_INTERLACED;
 } else {
 fmt.fmt.pix.field = V4L2_FIELD_NONE;
 }

/* 设置V4L2输入设备的帧格式 */
 if (ioctl(hCapture->fd, VIDIOC_S_FMT, &fmt) == -1) {
 printf("Failed VIDIOC_S_FMT on %s (%s)\n", attrs->captureDevice,
 strerror(errno));
 cleanup(hCapture);
 return NULL;
 }

if ((fmt.fmt.pix.width != width) || (fmt.fmt.pix.height != height)) {
 Dmai_err4("Failed to set resolution %d x %d (%d x %d)\n", width,
 height, fmt.fmt.pix.width, fmt.fmt.pix.height);
 cleanup(hCapture);
 return NULL;
 }

if (pixelFormat != fmt.fmt.pix.pixelformat) {
 Dmai_err2("Pixel format 0x%x not supported. Received 0x%x\n",
 pixelFormat, fmt.fmt.pix.pixelformat);
 cleanup(hCapture);
 return NULL;
 }

Dmai_dbg3("Video input connected size %dx%d pitch %d\n",
 fmt.fmt.pix.width, fmt.fmt.pix.height, fmt.fmt.pix.bytesperline);

/* Query for video input cropping capability */

if (attrs->cropWidth > 0 && attrs->cropHeight > 0) {

cropCap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
 if (ioctl(hCapture->fd, VIDIOC_CROPCAP, &cropCap) == -1) {
 Dmai_err2("VIDIOC_CROPCAP failed on %s (%s)\n", attrs->captureDevice,
 strerror(errno));
 cleanup(hCapture);
 return NULL;
 }

if (attrs->cropX & 0x1) {
 Dmai_err1("Crop width (%ld) needs to be even\n", attrs->cropX);
 cleanup(hCapture);
 return NULL;
 }

crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
 crop.c.left = attrs->cropX;
 crop.c.top = attrs->cropY;
 crop.c.width = attrs->cropWidth;
 crop.c.height = hCapture->topOffset ? attrs->cropHeight + 4 + 2 :
 attrs->cropHeight;

Dmai_dbg4("Setting capture cropping at %dx%d size %dx%d\n",
 crop.c.left, crop.c.top, crop.c.width, crop.c.height);

/* Crop the image depending on requested image size */
 if (ioctl(hCapture->fd, VIDIOC_S_CROP, &crop) == -1) {
 Dmai_err2("VIDIOC_S_CROP failed on %s (%s)\n", attrs->captureDevice,
 strerror(errno));
 cleanup(hCapture);
 return NULL;
 }
 }

if (hBufTab == NULL) {
 hCapture->userAlloc = FALSE;

/* The driver allocates the buffers */
 /* 调用CMEM创建要用到的视频缓冲区 */
 if (_Dmai_v4l2DriverAlloc(hCapture->fd,
 attrs->numBufs,
 V4L2_BUF_TYPE_VIDEO_CAPTURE,
 &hCapture->bufDescs,
 &hBufTab,
 hCapture->topOffset,
 attrs->colorSpace) < 0) {  Dmai_err1("Failed to allocate capture driver buffers on %s\n",  attrs->captureDevice);
 cleanup(hCapture);
 return NULL;
 }
 }
 else {
 /* Make the driver use the user supplied buffers */
 /* 如果调用者已经创建好缓冲区,那只需要加入相应的队列管理就可以 */
 if (_Dmai_v4l2UserAlloc(hCapture->fd,
 attrs->numBufs,
 V4L2_BUF_TYPE_VIDEO_CAPTURE,
 &hCapture->bufDescs,
 hBufTab,
 0, attrs->colorSpace) < 0) {  Dmai_err1("Failed to intialize capture driver buffers on %s\n",  attrs->captureDevice);
 cleanup(hCapture);
 return NULL;
 }
 }

hCapture->hBufTab = hBufTab;

/* Start the video streaming */
 type = V4L2_BUF_TYPE_VIDEO_CAPTURE;

/* 配置完后,启动V4L2设备 */
 if (ioctl(hCapture->fd, VIDIOC_STREAMON, &type) == -1) {
 Dmai_err2("VIDIOC_STREAMON failed on %s (%s)\n", attrs->captureDevice,
 strerror(errno));
 cleanup(hCapture);
 return NULL;
 }

hCapture->started = TRUE;

return hCapture;
}

从上面的代码可以看出,DMAI的函数将大量的对V4L2的控制操作都封装起来了,使用者只需要传入缓冲区列表和视频采集参数就可以创建一个视频采集设备。设备采集到

数据后,会将数据填充到使用者提供的缓冲区,使用者只需要取出相应的缓冲区就可以得到视频数据。在上面的函数中,调用了一个_Dmai_v4l2DriverAlloc()函数,这个函数

的作用是分配视频缓冲区,它最终是调用CMEM进行内存分配的,下面一起看看这个函数的具体实现。

2.DMAI与CMEM的交互。

查看dmai_2_20_00_15/packages/ti/sdo/dmai/linux/dm6467/_VideoBuf.c

_Dmai_v4l2DriverAlloc()
/******************************************************************************
 * _Dmai_v4l2DriverAlloc
 ******************************************************************************/
Int _Dmai_v4l2DriverAlloc(Int fd, Int numBufs, enum v4l2_buf_type type,
 struct _VideoBufDesc **bufDescsPtr,
 BufTab_Handle *hBufTabPtr, Int topOffset,
 ColorSpace_Type colorSpace)
{
 BufferGfx_Attrs gfxAttrs = BufferGfx_Attrs_DEFAULT;
 struct v4l2_requestbuffers req;
 struct v4l2_format fmt;
 _VideoBufDesc *bufDesc;
 Buffer_Handle hBuf;
 Int bufIdx;
 Int8 *virtPtr;

Dmai_clear(fmt);
 fmt.type = type;

if (ioctl(fd, VIDIOC_G_FMT, &fmt) == -1) {
 Dmai_err1("VIDIOC_G_FMT failed (%s)\n", strerror(errno));
 return Dmai_EFAIL;
 }

Dmai_clear(req);
 req.count = numBufs;
 req.type = type;
 req.memory = V4L2_MEMORY_MMAP;

/* Allocate buffers in the capture device driver */
 /* 申请建立V4L2的视频缓冲区管理队列*/
 if (ioctl(fd, VIDIOC_REQBUFS, &req) == -1) {
 Dmai_err1("VIDIOC_REQBUFS failed (%s)\n", strerror(errno));
 return Dmai_ENOMEM;
 }

if (req.count < numBufs || !req.count) {
 Dmai_err0("Insufficient device driver buffer memory\n");
 return Dmai_ENOMEM;
 }

/* Allocate space for buffer descriptors */
 *bufDescsPtr = calloc(numBufs, sizeof(_VideoBufDesc));

if (*bufDescsPtr == NULL) {
 Dmai_err0("Failed to allocate space for buffer descriptors\n");
 return Dmai_ENOMEM;
 }

gfxAttrs.dim.width = fmt.fmt.pix.width;
 gfxAttrs.dim.height = fmt.fmt.pix.height;
 gfxAttrs.dim.lineLength = fmt.fmt.pix.bytesperline;
 gfxAttrs.colorSpace = colorSpace;
 gfxAttrs.bAttrs.reference = TRUE;

/* 调用CMEM建立缓冲区 */
 *hBufTabPtr = BufTab_create(numBufs, fmt.fmt.pix.sizeimage,
 BufferGfx_getBufferAttrs(&gfxAttrs));

if (*hBufTabPtr == NULL) {
 return Dmai_ENOMEM;
 }

/* 将建立好的缓冲区放到队列中并且配置好相关属性 */
 for (bufIdx = 0; bufIdx < numBufs; bufIdx++) {  bufDesc = &(*bufDescsPtr)[bufIdx]; /* Ask for information about the driver buffer */  Dmai_clear(bufDesc->v4l2buf);
 bufDesc->v4l2buf.type = type;
 bufDesc->v4l2buf.memory = V4L2_MEMORY_MMAP;
 bufDesc->v4l2buf.index = bufIdx;

/* 查找队列中的缓冲区 */
 if (ioctl(fd, VIDIOC_QUERYBUF, &bufDesc->v4l2buf) == -1) {
 Dmai_err1("Failed VIDIOC_QUERYBUF (%s)\n", strerror(errno));
 return Dmai_EFAIL;
 }

/* 修改缓冲区的属性 */

 /* Map the driver buffer to user space */
 virtPtr = mmap(NULL,
 bufDesc->v4l2buf.length,
 PROT_READ | PROT_WRITE,
 MAP_SHARED,
 fd,
 bufDesc->v4l2buf.m.offset) + topOffset;

if (virtPtr == MAP_FAILED) {
 Dmai_err1("Failed to mmap buffer (%s)\n", strerror(errno));
 return Dmai_EFAIL;
 }

/* Initialize the Buffer with driver buffer information */
 hBuf = BufTab_getBuf(*hBufTabPtr, bufIdx);

Buffer_setNumBytesUsed(hBuf, fmt.fmt.pix.bytesperline *
 fmt.fmt.pix.height);
 Buffer_setUseMask(hBuf, gfxAttrs.bAttrs.useMask);
 Buffer_setUserPtr(hBuf, virtPtr);

/* Initialize buffer to black */
 _Dmai_blackFill(hBuf);

Dmai_dbg3("Driver buffer %d mapped to %#x has physical address "
 "%#lx\n", bufIdx, (Int) virtPtr, Buffer_getPhysicalPtr(hBuf));

bufDesc->hBuf = hBuf;

/* Queue buffer in device driver */
 /* 将缓冲区放回队列中 */
 if (ioctl(fd, VIDIOC_QBUF, &bufDesc->v4l2buf) == -1) {
 Dmai_err1("VIODIOC_QBUF failed (%s)\n", strerror(errno));
 return Dmai_EFAIL;
 }
 }

return Dmai_EOK;
}

继续分析Buffer_create()函数

查看dmai_2_20_00_15/packages/ti/sdo/dmai/Buffer.c。

Buffer_create()
/******************************************************************************
 * Buffer_create
 ******************************************************************************/
Buffer_Handle Buffer_create(Int32 size, Buffer_Attrs *attrs)
{
 Buffer_Handle hBuf;
 UInt32 objSize;

if (attrs == NULL) {
 Dmai_err0("Must provide attrs\n");
 return NULL;
 }

if (attrs->type != Buffer_Type_BASIC &&
 attrs->type != Buffer_Type_GRAPHICS) {

Dmai_err1("Unknown Buffer type (%d)\n", attrs->type);
 return NULL;
 }

objSize = attrs->type == Buffer_Type_GRAPHICS ? sizeof(_BufferGfx_Object) :
 sizeof(_Buffer_Object);

hBuf = (Buffer_Handle) calloc(1, objSize);

if (hBuf == NULL) {
 Dmai_err0("Failed to allocate space for Buffer Object\n");
 return NULL;
 }

_Buffer_init(hBuf, size, attrs);

if (!attrs->reference) {

 /* 这里就是调用了CMEM的接口进行缓冲区的创建 */
 hBuf->userPtr = (Int8*)Memory_alloc(size, &attrs->memParams);

if (hBuf->userPtr == NULL) {
 printf("Failed to allocate memory.\n");
 free(hBuf);
 return NULL;
 }

/* 获取缓冲区的物理地址 */
 hBuf->physPtr = Memory_getBufferPhysicalAddress(hBuf->userPtr,
 size, NULL);

Dmai_dbg3("Alloc Buffer of size %u at 0x%x (0x%x phys)\n",
 (Uns) size, (Uns) hBuf->userPtr, (Uns) hBuf->physPtr);
 }

hBuf->reference = attrs->reference;

return hBuf;
}

数据已经得到了,它们就放在通过CMEM创建的缓冲区里,那么验证或使用这些数据的一种最简单又有效的方式就是将它们直接

显示出来。再来看一下DVSDK的框图:

DVSDK的简单的构成框图

DVSDK的简单的构成框图

框图的两条红线表示数据的流向,需要将数据显示出来只需要两部,第一步创建V4L2的显示出输出设备,第二创建显示缓冲区并将采集到的数据放到缓冲区内。

创建V4L2的显示设备和创建VL42的采集设备很相似,请查看

dmai_2_20_00_15/packages/ti/sdo/dmai/linux/dm6467/Display_v4l2.c


Display_v4l2_create()
/******************************************************************************
 * Display_v4l2_create
 ******************************************************************************/
Display_Handle Display_v4l2_create(BufTab_Handle hBufTab, Display_Attrs *attrs)
{
 struct v4l2_format fmt;
 enum v4l2_buf_type type;
 Display_Handle hDisplay;

assert(attrs);

Dmai_clear(fmt);

 /* delayStreamon not supported for this platform */
 if (attrs->delayStreamon == TRUE) {
 Dmai_err0("Support for delayed VIDIOC_STREAMON not implemented\n");
 return NULL;
 }

/* Allocate space for state object */
 hDisplay = calloc(1, sizeof(Display_Object));

if (hDisplay == NULL) {
 Dmai_err0("Failed to allocate space for Display Object\n");
 return NULL;
 }

hDisplay->userAlloc = TRUE;

/* Open video capture device */
 hDisplay->fd = open(attrs->displayDevice, O_RDWR, 0);

if (hDisplay->fd == -1) {
 Dmai_err2("Cannot open %s (%s)\n",
 attrs->displayDevice, strerror(errno));
 cleanup(hDisplay);
 return NULL;
 }

if(Display_detectVideoStd(hDisplay, attrs) != Dmai_EOK) {
 Dmai_err0("Display_detectVideoStd Failed\n");
 cleanup(hDisplay);
 return NULL;
 }
 /* Determine the video image dimensions */
 fmt.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;

if (ioctl(hDisplay->fd, VIDIOC_G_FMT, &fmt) == -1) {
 Dmai_err0("Failed to determine video display format\n");
 cleanup(hDisplay);
 return NULL;
 }

fmt.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
 switch(attrs->colorSpace) {
 case ColorSpace_UYVY:
 fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_UYVY;
 break;
 case ColorSpace_YUV420PSEMI:
 fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_NV12;
 break;
 case ColorSpace_YUV422PSEMI:
 fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_NV16;
 break;
 default:
 Dmai_err1("Unsupported color format %g\n", attrs->colorSpace);
 cleanup(hDisplay);
 return NULL;
 };

if (hBufTab == NULL) {
 fmt.fmt.pix.bytesperline = Dmai_roundUp(BufferGfx_calcLineLength(fmt.fmt.pix.width,
 attrs->colorSpace), 32);
 fmt.fmt.pix.sizeimage = BufferGfx_calcSize(attrs->videoStd, attrs->colorSpace);
#if 1
 } else {
 /* This will help user to pass lineLength to display driver. */
 Buffer_Handle hBuf;
 BufferGfx_Dimensions dim;

hBuf = BufTab_getBuf(hBufTab, 0);
 BufferGfx_getDimensions(hBuf, &dim);
 if((dim.height > fmt.fmt.pix.height) ||
 (dim.width > fmt.fmt.pix.width)) {
 Dmai_err2("User buffer size check failed %dx%d\n",
 dim.height, dim.width);
 cleanup(hDisplay);
 return NULL;
 }
 fmt.fmt.pix.bytesperline = dim.lineLength;
 fmt.fmt.pix.sizeimage = Buffer_getSize(hBuf);
 }
#endif
 Dmai_dbg4("Video output set to size %dx%d pitch %d imageSize %d\n",
 fmt.fmt.pix.width, fmt.fmt.pix.height,
 fmt.fmt.pix.bytesperline, fmt.fmt.pix.sizeimage);

if ((attrs->videoStd == VideoStd_CIF) || (attrs->videoStd == VideoStd_SIF_PAL) ||
 (attrs->videoStd == VideoStd_SIF_NTSC) || (attrs->videoStd == VideoStd_D1_PAL) ||
 (attrs->videoStd == VideoStd_D1_NTSC) || (attrs->videoStd == VideoStd_1080I_30) ||
 (attrs->videoStd == VideoStd_1080I_25)) {
 fmt.fmt.pix.field = V4L2_FIELD_INTERLACED;
 } else {
 fmt.fmt.pix.field = V4L2_FIELD_NONE;
 }

if (ioctl(hDisplay->fd, VIDIOC_S_FMT, &fmt) == -1) {
 Dmai_err2("Failed VIDIOC_S_FMT on %s (%s)\n", attrs->displayDevice,
 strerror(errno));
 cleanup(hDisplay);
 return NULL;
 }

/* Should the device driver allocate the display buffers? */
 if (hBufTab == NULL) {
 hDisplay->userAlloc = FALSE;

if (_Dmai_v4l2DriverAlloc(hDisplay->fd,
 attrs->numBufs,
 V4L2_BUF_TYPE_VIDEO_OUTPUT,
 &hDisplay->bufDescs,
 &hBufTab,
 0, attrs->colorSpace) < 0) {  Dmai_err1("Failed to allocate display driver buffers on %s\n",  attrs->displayDevice);
 cleanup(hDisplay);
 return NULL;
 }
 }
 else {
 hDisplay->userAlloc = TRUE;

if (_Dmai_v4l2UserAlloc(hDisplay->fd,
 attrs->numBufs,
 V4L2_BUF_TYPE_VIDEO_OUTPUT,
 &hDisplay->bufDescs,
 hBufTab,
 0, attrs->colorSpace) < 0) {  Dmai_err1("Failed to intialize display driver buffers on %s\n",  attrs->displayDevice);
 cleanup(hDisplay);
 return NULL;
 }
 }

/* Start the video streaming */
 type = V4L2_BUF_TYPE_VIDEO_OUTPUT;

if (ioctl(hDisplay->fd, VIDIOC_STREAMON, &type) == -1) {
 Dmai_err2("VIDIOC_STREAMON failed on %s (%s)\n", attrs->displayDevice,
 strerror(errno));
 cleanup(hDisplay);
 return NULL;
 }

hDisplay->started = TRUE;
 hDisplay->hBufTab = hBufTab;
 hDisplay->displayStd = Display_Std_V4L2;

return hDisplay;
}

创建显示设备不需要检查视频标准,因为输出的标准是有调用者决定的。显示设备创建好了,那怎样将采集到的数据显示出来呢?TI提供了一个叫encode的demo程序,大家可以在dvsdk_demos_3_10_00_16中找到。

这个demo中很巧妙的将视频采集缓冲区和视频显示缓冲区管理起来,接下来一下分析一下。

encode demo

encode demo

因为单个显示缓冲和采集缓冲区的大小是一样的,所以可以直接进行交互。

下面截取demo中的代码片段,看一下这个缓冲区的交互的具体实现。


capture and display buffer
 /* 采集及显示线程中的循环 */
 while (!gblGetQuit()) {

/* 获取采集缓冲区 */
 if (Capture_get(hCapture, &hCapBuf) < 0) {
 ERR("Failed to get capture buffer\n");
 cleanup(THREAD_FAILURE);
 }

/* 获取显示缓冲区 */
 if (Display_get(hDisplay, &hDisBuf) < 0) {  ERR("Failed to get display buffer\n");  cleanup(THREAD_FAILURE);  } /* 叠加字幕 */  if (envp->osd) {
 /* Get the current transparency */
 trans = UI_getTransparency(envp->hUI);

if (trans != oldTrans) {
 /* Change the transparency in the palette */
 for (i = 0; i < 4; i++) {
 bConfigParams.palette[i][3] = trans;
 }

/* Reconfigure the blending job if transparency has changed */
 if (Blend_config(hBlend, NULL, hBmpBuf, hCapBuf, hCapBuf,
 &bConfigParams) < 0) {  ERR("Failed to configure blending job\n");  cleanup(THREAD_FAILURE);  }  } /*  * Because the whole screen is shown even if -r is used,  * reset the dimensions while Blending to make sure the OSD  * always ends up in the same place. After blending, restore  * the real dimensions.  */  BufferGfx_getDimensions(hCapBuf, &srcDim);  BufferGfx_resetDimensions(hCapBuf); /*  * Lock the screen making sure no changes are done to  * the bitmap while we render it.  */  hBmpBuf = UI_lockScreen(envp->hUI);

/* Execute the blending job to draw the OSD */
 /* 直接叠加在采集缓冲区的数据上 */
 if (Blend_execute(hBlend, hBmpBuf, hCapBuf, hCapBuf) < 0) {  ERR("Failed to execute blending job\n");  cleanup(THREAD_FAILURE);  } UI_unlockScreen(envp->hUI);

BufferGfx_setDimensions(hCapBuf, &srcDim);
 }

/* Color convert the captured buffer from 422Psemi to 420Psemi */
 /* 在进行H264编码前需要进行 422 到 420的颜色空间转换,hDstBuf是从视频编码缓冲区队列中得到的一个缓冲区 */
 if (Ccv_execute(hCcv, hCapBuf, hDstBuf) < 0) {  ERR("Failed to execute color conversion job\n");  cleanup(THREAD_FAILURE);  } /* Send color converted buffer to video thread for encoding */  /* 转换后将这个缓冲区放回视频编码缓冲区队列中 */  if (Fifo_put(envp->hOutFifo, hDstBuf) < 0) {
 ERR("Failed to send buffer to display thread\n");
 cleanup(THREAD_FAILURE);
 }

BufferGfx_resetDimensions(hCapBuf);

/* Send the preview to the display device driver */
 /* 将采集缓冲区放到显示缓冲区队列中 */
 if (Display_put(hDisplay, hCapBuf) < 0) {
 ERR("Failed to put display buffer\n");
 cleanup(THREAD_FAILURE);
 }

BufferGfx_resetDimensions(hDisBuf);

/* Return a buffer to the capture driver */
 /* 将显示缓冲区放到采集缓冲区队列中 */
 if (Capture_put(hCapture, hDisBuf) < 0) {  ERR("Failed to put capture buffer\n");  cleanup(THREAD_FAILURE);  } /* Incremement statistics for the user interface */  /* 帧计数加1 */  gblIncFrames(); /* Get a buffer from the video thread */  /* 从视频编码缓冲区队列中得到一个缓冲区,用于下一次颜色空间转换使用 */  fifoRet = Fifo_get(envp->hInFifo, &hDstBuf);

if (fifoRet < 0) {
 ERR("Failed to get buffer from video thread\n");
 cleanup(THREAD_FAILURE);
 }

/* Did the video thread flush the fifo? */
 if (fifoRet == Dmai_EFLUSH) {
 cleanup(THREAD_SUCCESS);
 }
 }

到此为止,已经可以知道了数据是从哪里来到哪里去了,但是数据来了,肯定没那么容易就放它走,我们将采集到的数据编码并且保存。将编码后的数据通过live555发送出去,实现rtsp视频服务器。

×