您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ xioctl函数代码示例

51自学网 2021-06-03 10:21:17
  C++
这篇教程C++ xioctl函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中xioctl函数的典型用法代码示例。如果您正苦于以下问题:C++ xioctl函数的具体用法?C++ xioctl怎么用?C++ xioctl使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了xioctl函数的30个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: initdevice

int initdevice(void) {	struct v4l2_capability cap;	struct v4l2_cropcap cropcap;	struct v4l2_crop crop;	struct v4l2_format fmt;	unsigned int min;	if (-1 == xioctl (fd, VIDIOC_QUERYCAP, &cap)) {		if (EINVAL == errno) {			LOGE("%s is no V4L2 device", dev_name);			return ERROR_LOCAL;		} else {			return errnoexit ("VIDIOC_QUERYCAP");		}	}	if (!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE)) {		LOGE("%s is no video capture device", dev_name);		return ERROR_LOCAL;	}	if (!(cap.capabilities & V4L2_CAP_STREAMING)) {		LOGE("%s does not support streaming i/o", dev_name);		return ERROR_LOCAL;	}		CLEAR (cropcap);	cropcap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;	if (0 == xioctl (fd, VIDIOC_CROPCAP, &cropcap)) {		crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;		crop.c = cropcap.defrect; 		if (-1 == xioctl (fd, VIDIOC_S_CROP, &crop)) {			switch (errno) {				case EINVAL:					break;				default:					break;			}		}	} else {	}	CLEAR (fmt);	fmt.type                = V4L2_BUF_TYPE_VIDEO_CAPTURE;	fmt.fmt.pix.width       = IMG_WIDTH; 	fmt.fmt.pix.height      = IMG_HEIGHT;	fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;	fmt.fmt.pix.field       = V4L2_FIELD_INTERLACED;	if (-1 == xioctl (fd, VIDIOC_S_FMT, &fmt))		return errnoexit ("VIDIOC_S_FMT");	min = fmt.fmt.pix.width * 2;	if (fmt.fmt.pix.bytesperline < min)		fmt.fmt.pix.bytesperline = min;	min = fmt.fmt.pix.bytesperline * fmt.fmt.pix.height;	if (fmt.fmt.pix.sizeimage < min)		fmt.fmt.pix.sizeimage = min;	return initmmap ();}
开发者ID:james089,项目名称:test1,代码行数:69,


示例2: init_mmap

static void init_mmap(void){        struct v4l2_requestbuffers req;        CLEAR(req);        req.count = 4;        req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;        req.memory = V4L2_MEMORY_MMAP;        if (-1 == xioctl(fd, VIDIOC_REQBUFS, &req)) {                if (EINVAL == errno) {                        fprintf(stderr, "%s does not support "                                 "memory mapping/n", dev_name);                        exit(EXIT_FAILURE);                } else {                        errno_exit("VIDIOC_REQBUFS");                }        }        if (req.count < 2) {                fprintf(stderr, "Insufficient buffer memory on %s/n",                         dev_name);                exit(EXIT_FAILURE);        }		printf("req.count: %d/n", req.count);        buffers = calloc(req.count, sizeof(*buffers));        if (!buffers) {                fprintf(stderr, "Out of memory/n");                exit(EXIT_FAILURE);        }		int count = 0;        for (n_buffers = 0; n_buffers < req.count; ++n_buffers) {                struct v4l2_buffer buf;                CLEAR(buf);                buf.type        = V4L2_BUF_TYPE_VIDEO_CAPTURE;                buf.memory      = V4L2_MEMORY_MMAP;                buf.index       = n_buffers;                if (-1 == xioctl(fd, VIDIOC_QUERYBUF, &buf))                        errno_exit("VIDIOC_QUERYBUF");								printf("mmap try count: %d/n", n_buffers);				printf("buf.length: %d/n", buf.length);				printf("buf.m.offset: %d/n", buf.m.offset);                buffers[n_buffers].length = buf.length;                buffers[n_buffers].start =                        mmap(NULL /* start anywhere */,                              buf.length,                              PROT_READ | PROT_WRITE /* required */,                              MAP_SHARED /* recommended */,                              fd, buf.m.offset);                if (MAP_FAILED == buffers[n_buffers].start)                        errno_exit("mmap");        }}
开发者ID:crazysjf,项目名称:sjf-repo,代码行数:64,


示例3: read_frame

static int read_frame(void){        struct v4l2_buffer buf;        unsigned int i;        switch (io) {        case IO_METHOD_READ:                if (-1 == read(fd, buffers[0].start, buffers[0].length)) {                        switch (errno) {                        case EAGAIN:                                return 0;                        case EIO:                                /* Could ignore EIO, see spec. */                                /* fall through */                        default:                                errno_exit("read");                        }                }                process_image(buffers[0].start, buffers[0].length);                break;        case IO_METHOD_MMAP:                CLEAR(buf);                buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;                buf.memory = V4L2_MEMORY_MMAP;                if (-1 == xioctl(fd, VIDIOC_DQBUF, &buf)) {                        switch (errno) {                        case EAGAIN:                                return 0;                        case EIO:                                /* Could ignore EIO, see spec. */                                /* fall through */                        default:                                errno_exit("VIDIOC_DQBUF");                        }                }                assert(buf.index < n_buffers);                process_image(buffers[buf.index].start, buf.bytesused);                if (-1 == xioctl(fd, VIDIOC_QBUF, &buf))                        errno_exit("VIDIOC_QBUF");                break;        case IO_METHOD_USERPTR:                CLEAR(buf);                buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;                buf.memory = V4L2_MEMORY_USERPTR;                if (-1 == xioctl(fd, VIDIOC_DQBUF, &buf)) {                        switch (errno) {                        case EAGAIN:                                return 0;                        case EIO:                                /* Could ignore EIO, see spec. */                                /* fall through */                        default:                                errno_exit("VIDIOC_DQBUF");                        }                }                for (i = 0; i < n_buffers; ++i)                        if (buf.m.userptr == (unsigned long)buffers[i].start                            && buf.length == buffers[i].length)                                break;                assert(i < n_buffers);                process_image((void *)buf.m.userptr, buf.bytesused);                if (-1 == xioctl(fd, VIDIOC_QBUF, &buf))                        errno_exit("VIDIOC_QBUF");                break;        }        return 1;}
开发者ID:crazysjf,项目名称:sjf-repo,代码行数:91,


示例4: StartCapture

    void StartCapture()    {        if( mCapturing ) THROW("already capturing!");        mCapturing = true;                // grab current frame format        v4l2_pix_format fmt = GetFormat();        // from the v4l2 docs: "Buggy driver paranoia."        unsigned int min = fmt.width * 2;        if (fmt.bytesperline < min)            fmt.bytesperline = min;        min = fmt.bytesperline * fmt.height;        if (fmt.sizeimage < min)            fmt.sizeimage = min;        const unsigned int bufCount = 4;                if( mIO == READ )        {            // allocate buffer            mBuffers.resize( 1 );            mBuffers[ 0 ].length = fmt.sizeimage;            mBuffers[ 0 ].start = new char[ fmt.sizeimage ];        }        else        {            // request buffers            v4l2_requestbuffers req;            memset( &req, 0, sizeof(req) );            req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;            req.memory = ( mIO == MMAP ? V4L2_MEMORY_MMAP : V4L2_MEMORY_USERPTR );            req.count = bufCount;            xioctl( mFd, VIDIOC_REQBUFS, &req );                                    if( mIO == USERPTR )            {                // allocate buffers                mBuffers.resize( req.count );                for( size_t i = 0; i < mBuffers.size(); ++i )                {                    mBuffers[ i ].length = fmt.sizeimage;                    mBuffers[ i ].start = new char[ fmt.sizeimage ];                }            }                        else            {                // mmap buffers                mBuffers.resize( req.count );                for( size_t i = 0; i < mBuffers.size(); ++i )                {                    v4l2_buffer buf;                    memset( &buf, 0, sizeof(buf) );                    buf.type    = V4L2_BUF_TYPE_VIDEO_CAPTURE;                    buf.memory  = V4L2_MEMORY_MMAP;                    buf.index   = i;                    xioctl( mFd, VIDIOC_QUERYBUF, &buf );                    mBuffers[i].length = buf.length;                    mBuffers[i].start = (char*)v4l2_mmap(NULL, buf.length, PROT_READ | PROT_WRITE, MAP_SHARED, mFd, buf.m.offset);                    if( mBuffers[i].start == MAP_FAILED )                        THROW("mmap() failed!");                }                            }                        // queue buffers            for( size_t i = 0; i < mBuffers.size(); ++i )            {                v4l2_buffer buf;                memset( &buf, 0, sizeof(buf) );                buf.type        = V4L2_BUF_TYPE_VIDEO_CAPTURE;                buf.index       = i;                buf.memory = ( mIO == MMAP ? V4L2_MEMORY_MMAP : V4L2_MEMORY_USERPTR );                if( mIO == USERPTR )                {                    buf.m.userptr   = (unsigned long)mBuffers[i].start;                    buf.length      = mBuffers[i].length;                }                                xioctl( mFd, VIDIOC_QBUF, &buf );            }                             // start streaming            v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;            xioctl( mFd, VIDIOC_STREAMON, &type );                    }    }
开发者ID:biddyweb,项目名称:LowLatencyVideoPrototype,代码行数:87,


示例5: while

// process video datavoid CaptureThread::run() {	while (devam) {		mutex.lock();		do {			FD_ZERO(&fds);			FD_SET(fd, &fds);			/* Timeout. */			tv.tv_sec = 2;			tv.tv_usec = 0;			r = select(fd + 1, &fds, NULL, NULL, &tv);		} while ((r == -1 && (errno = EINTR)));		if (r == -1) {			kDebug() << "select";			quit();			return;		}		CLEAR(buf);		buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;		buf.memory = V4L2_MEMORY_MMAP;		xioctl(fd, VIDIOC_DQBUF, &buf);		if (v4lconvert_convert(v4lconvert_data,													 &src_fmt,													 &fmt,													 (unsigned char*)buffers[buf.index].start, buf.bytesused,													 dst_buf, fmt.fmt.pix.sizeimage) < 0) {			if (errno != EAGAIN)				kDebug() << "v4l_convert";		}		unsigned char* asil=(unsigned char*)malloc(fmt.fmt.pix.sizeimage+qstrlen(header));		memmove(asil, dst_buf, fmt.fmt.pix.sizeimage);		memmove(asil+qstrlen(header), asil, fmt.fmt.pix.sizeimage);		memcpy(asil,header,qstrlen(header));		QImage *qq=new QImage();		if(qq->loadFromData(asil,fmt.fmt.pix.sizeimage+qstrlen(header), "PPM")){			QTransform outTransform;			if(Settings::mirror()){				// scaling x * -1 - making the output image mirror.				outTransform.scale(-1, 1);			}			if(Settings::flip()){				// flipping y * -1				outTransform.scale(1, -1);			}			emit renderedImage(qq->transformed(outTransform));		}		free(asil);		delete qq;		if (delay>0) {			this->msleep(delay);		}		xioctl(fd, VIDIOC_QBUF, &buf);		di++;		mutex.unlock();	}}
开发者ID:DHalens,项目名称:kamerka,代码行数:67,


示例6: JPEGENC_Scale_For_Thumbnail

LOCAL JPEG_RET_E JPEGENC_Scale_For_Thumbnail(SCALE_PARAM_T *scale_param){	static int fd = -1; 		SCALE_CONFIG_T scale_config;		SCALE_MODE_E scale_mode;	uint32_t enable = 0, endian_mode;	fd = open("/dev/sc8800g_scale", O_RDONLY);//O_RDWR /* required */, 0);  	if (-1 == fd) 	{   		SCI_TRACE_LOW("Fail to open scale device.");        	return JPEG_FAILED;      	 }    		//set mode	scale_config.id = SCALE_PATH_MODE;		scale_mode = SCALE_MODE_SCALE;	scale_config.param = &scale_mode;	 	if (-1 == xioctl(fd, SCALE_IOC_CONFIG, &scale_config))   	{		SCI_TRACE_LOW("Fail to SCALE_IOC_CONFIG: id=%d", scale_config.id);		return JPEG_FAILED;	}	//set input data format	scale_config.id = SCALE_PATH_INPUT_FORMAT;		scale_config.param = &scale_param->in_fmt;	 	if (-1 == xioctl(fd, SCALE_IOC_CONFIG, &scale_config))   	{		SCI_TRACE_LOW("Fail to SCALE_IOC_CONFIG: id=%d", scale_config.id);		return JPEG_FAILED;	}	//set output data format	scale_config.id = SCALE_PATH_OUTPUT_FORMAT;		scale_config.param = &scale_param->out_fmt;	 	if (-1 == xioctl(fd, SCALE_IOC_CONFIG, &scale_config))   	{		SCI_TRACE_LOW("Fail to SCALE_IOC_CONFIG: id=%d", scale_config.id);		return JPEG_FAILED;	}	//set input size	scale_config.id = SCALE_PATH_INPUT_SIZE;		scale_config.param = &scale_param->in_size;	 	if (-1 == xioctl(fd, SCALE_IOC_CONFIG, &scale_config))   	{		SCI_TRACE_LOW("Fail to SCALE_IOC_CONFIG: id=%d", scale_config.id);		return JPEG_FAILED;	}	//set output size	scale_config.id = SCALE_PATH_OUTPUT_SIZE;		scale_config.param = &scale_param->out_size;	 	if (-1 == xioctl(fd, SCALE_IOC_CONFIG, &scale_config))   	{		SCI_TRACE_LOW("Fail to SCALE_IOC_CONFIG: id=%d", scale_config.id);		return JPEG_FAILED;	}		//set input size	scale_config.id = SCALE_PATH_INPUT_RECT;	scale_config.param = &scale_param->in_rect;	 	if (-1 == xioctl(fd, SCALE_IOC_CONFIG, &scale_config))   	{		SCI_TRACE_LOW("Fail to SCALE_IOC_CONFIG: id=%d", scale_config.id);		return JPEG_FAILED;	}	//set input address	scale_config.id = SCALE_PATH_INPUT_ADDR;		scale_config.param = &scale_param->in_addr;	 	if (-1 == xioctl(fd, SCALE_IOC_CONFIG, &scale_config))   	{		SCI_TRACE_LOW("Fail to SCALE_IOC_CONFIG: id=%d", scale_config.id);		return JPEG_FAILED;	}	//set output address	scale_config.id = SCALE_PATH_OUTPUT_ADDR;		scale_config.param = &scale_param->out_addr;	 	if (-1 == xioctl(fd, SCALE_IOC_CONFIG, &scale_config))   	{		SCI_TRACE_LOW("Fail to SCALE_IOC_CONFIG: id=%d", scale_config.id);		return JPEG_FAILED;	}			if((scale_param->in_rect.w > scale_param->out_size.w * 4)  ||		(scale_param->in_rect.h > scale_param->out_size.h * 4))	{		//set sub sample mode		uint32_t mode = 0; //0: 1/2 1:1/4 2:1/8 3:1/16		uint32_t enable = 1;		if((scale_param->in_rect.w <= scale_param->out_size.w * 4 * 2) && 		(scale_param->in_rect.h <= scale_param->out_size.h * 4 * 2)){			mode = 0;		}		else if((scale_param->in_rect.w <= scale_param->out_size.w * 4 * 4) && 		(scale_param->in_rect.h <= scale_param->out_size.h * 4 * 4)){			mode = 1;		}		else if((scale_param->in_rect.w <= scale_param->out_size.w * 4 * 8) && 		(scale_param->in_rect.h <= scale_param->out_size.h * 4 * 8)){			mode = 2;		}		else if((scale_param->in_rect.w <= scale_param->out_size.w * 4 * 16) && 		(scale_param->in_rect.h <= scale_param->out_size.h * 4 * 16)){//.........这里部分代码省略.........
开发者ID:onyx-intl,项目名称:p400_hardware,代码行数:101,


示例7: v4l2_scan_devs

/* Scan V4L2 devices */static pj_status_t v4l2_scan_devs(vid4lin_factory *f){    vid4lin_dev_info vdi[V4L2_MAX_DEVS];    char dev_name[32];    unsigned i, old_count;    pj_status_t status;    if (f->dev_pool) {        pj_pool_release(f->dev_pool);        f->dev_pool = NULL;    }    pj_bzero(vdi, sizeof(vdi));    old_count = f->dev_count;    f->dev_count = 0;    f->dev_pool = pj_pool_create(f->pf, DRIVER_NAME, 500, 500, NULL);    for (i=0; i<V4L2_MAX_DEVS && f->dev_count < V4L2_MAX_DEVS; ++i) {	int fd;	vid4lin_dev_info *pdi;	pj_uint32_t fmt_cap[8];	int j, fmt_cnt=0;	pdi = &vdi[f->dev_count];	snprintf(dev_name, sizeof(dev_name), "/dev/video%d", i);	if (!pj_file_exists(dev_name))	    continue;	fd = v4l2_open(dev_name, O_RDWR, 0);	if (fd == -1)	    continue;	status = xioctl(fd, VIDIOC_QUERYCAP, &pdi->v4l2_cap);	if (status != PJ_SUCCESS) {	    PJ_PERROR(4,(THIS_FILE, status, "Error querying %s", dev_name));	    v4l2_close(fd);	    continue;	}	if ((pdi->v4l2_cap.capabilities & V4L2_CAP_VIDEO_CAPTURE) == 0) {	    v4l2_close(fd);	    continue;	}	PJ_LOG(5,(THIS_FILE, "Found capture device %s", pdi->v4l2_cap.card));	PJ_LOG(5,(THIS_FILE, "  Enumerating formats:"));	for (j=0; fmt_cnt<PJ_ARRAY_SIZE(fmt_cap); ++j) {	    struct v4l2_fmtdesc fdesc;	    unsigned k;	    pj_bzero(&fdesc, sizeof(fdesc));	    fdesc.index = j;	    fdesc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;	    status = xioctl(fd, VIDIOC_ENUM_FMT, &fdesc);	    if (status != PJ_SUCCESS)		break;	    for (k=0; k<PJ_ARRAY_SIZE(v4l2_fmt_maps); ++k) {		if (v4l2_fmt_maps[k].v4l2_fmt_id == fdesc.pixelformat) {		    fmt_cap[fmt_cnt++] = v4l2_fmt_maps[k].pjmedia_fmt_id;		    PJ_LOG(5,(THIS_FILE, "   Supported: %s",			      fdesc.description));		    break;		}	    }	    if (k==PJ_ARRAY_SIZE(v4l2_fmt_maps)) {		PJ_LOG(5,(THIS_FILE, "   Unsupported: %s", fdesc.description));	    }	}	v4l2_close(fd);	if (fmt_cnt==0) {	    PJ_LOG(5,(THIS_FILE, "    Found no common format"));	    continue;	}	strncpy(pdi->dev_name, dev_name, sizeof(pdi->dev_name));	pdi->dev_name[sizeof(pdi->dev_name)-1] = '/0';	strncpy(pdi->info.name, (char*)pdi->v4l2_cap.card,		sizeof(pdi->info.name));	pdi->info.name[sizeof(pdi->info.name)-1] = '/0';	strncpy(pdi->info.driver, DRIVER_NAME, sizeof(pdi->info.driver));	pdi->info.driver[sizeof(pdi->info.driver)-1] = '/0';	pdi->info.dir = PJMEDIA_DIR_CAPTURE;	pdi->info.has_callback = PJ_FALSE;	pdi->info.caps = PJMEDIA_VID_DEV_CAP_FORMAT;	pdi->info.fmt_cnt = fmt_cnt;	for (j=0; j<fmt_cnt; ++j) {	    pjmedia_format_init_video(&pdi->info.fmt[j],				      fmt_cap[j],				      DEFAULT_WIDTH,				      DEFAULT_HEIGHT,				      DEFAULT_FPS, 1);	}	if (j < fmt_cnt)//.........这里部分代码省略.........
开发者ID:carlosdelfino,项目名称:WorkshopTelefoniaAutomacao,代码行数:101,


示例8: rtcwake_main

//.........这里部分代码省略.........#if ENABLE_LONG_OPTS	static const char rtcwake_longopts[] ALIGN1 =		"auto/0"    No_argument "a"		"local/0"   No_argument "l"		"utc/0"     No_argument "u"		"device/0"  Required_argument "d"		"mode/0"    Required_argument "m"		"seconds/0" Required_argument "s"		"time/0"    Required_argument "t"		;#endif	opt = getopt32long(argv,			/* Must have -s or -t, exclusive */			"^alud:m:s:t:" "/0" "s:t:s--t:t--s", rtcwake_longopts,			&rtcname, &suspend, &opt_seconds, &opt_time);	/* this is the default	if (opt & RTCWAKE_OPT_AUTO)		utc = -1;	*/	if (opt & (RTCWAKE_OPT_UTC | RTCWAKE_OPT_LOCAL))		utc = opt & RTCWAKE_OPT_UTC;	if (opt & RTCWAKE_OPT_SECONDS) {		/* alarm time, seconds-to-sleep (relative) */		seconds = xatou(opt_seconds);	} else {		/* RTCWAKE_OPT_TIME */		/* alarm time, time_t (absolute, seconds since 1/1 1970 UTC) */		if (sizeof(alarm_time) <= sizeof(long))			alarm_time = xatol(opt_time);		else			alarm_time = xatoll(opt_time);	}	if (utc == -1)		utc = rtc_adjtime_is_utc();	/* the rtcname is relative to /dev */	xchdir("/dev");	/* this RTC must exist and (if we'll sleep) be wakeup-enabled */	fd = rtc_xopen(&rtcname, O_RDONLY);	if (strcmp(suspend, "on") != 0)		if (!may_wakeup(rtcname))			bb_error_msg_and_die("%s not enabled for wakeup events", rtcname);	/* relative or absolute alarm time, normalized to time_t */	sys_time = time(NULL);	{		struct tm tm_time;		rtc_read_tm(&tm_time, fd);		rtc_time = rtc_tm2time(&tm_time, utc);	}	if (opt & RTCWAKE_OPT_TIME) {		/* Correct for RTC<->system clock difference */		alarm_time += rtc_time - sys_time;		if (alarm_time < rtc_time)			/*			 * Compat message text.			 * I'd say "RTC time is already ahead of ..." instead.			 */			bb_error_msg_and_die("time doesn't go backward to %s", ctime(&alarm_time));	} else		alarm_time = rtc_time + seconds + 1;	setup_alarm(fd, &alarm_time, rtc_time);	sync();#if 0 /*debug*/	printf("sys_time: %s", ctime(&sys_time));	printf("rtc_time: %s", ctime(&rtc_time));#endif	printf("wakeup from /"%s/" at %s", suspend, ctime(&alarm_time));	fflush_all();	usleep(10 * 1000);	if (strcmp(suspend, "on") != 0)		xopen_xwrite_close(SYS_POWER_PATH, suspend);	else {		/* "fake" suspend ... we'll do the delay ourselves */		unsigned long data;		do {			ssize_t ret = safe_read(fd, &data, sizeof(data));			if (ret < 0) {				bb_perror_msg("rtc read");				break;			}		} while (!(data & RTC_AF));	}	xioctl(fd, RTC_AIE_OFF, 0);	if (ENABLE_FEATURE_CLEAN_UP)		close(fd);	return EXIT_SUCCESS;}
开发者ID:nawawi,项目名称:busybox,代码行数:101,


示例9: fprintf

void Camera::Init() {  struct v4l2_capability cap;  struct v4l2_cropcap cropcap;  struct v4l2_crop crop;  struct v4l2_format fmt;  unsigned int min;  if(-1 == xioctl (fd, VIDIOC_QUERYCAP, &cap)) {    if (EINVAL == errno) {      fprintf(stderr, "%s is no V4L2 device/n",name);      exit(1);    } else {       errno_exit("VIDIOC_QUERYCAP");    }  }  if(!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE)) {    fprintf(stderr, "%s is no video capture device/n", name);    exit(1);  }  switch(io) {    case IO_METHOD_READ:      if(!(cap.capabilities & V4L2_CAP_READWRITE)) {        fprintf(stderr, "%s does not support read i/o/n", name);        exit (1);      }      break;    case IO_METHOD_MMAP:    case IO_METHOD_USERPTR:    if(!(cap.capabilities & V4L2_CAP_STREAMING)) {      fprintf (stderr, "%s does not support streaming i/o/n", name);      exit(1);    }    break;  }  CLEAR (cropcap);  cropcap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;  if(0 == xioctl (fd, VIDIOC_CROPCAP, &cropcap)) {    crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;    crop.c = cropcap.defrect; /* reset to default */    if(-1 == xioctl (fd, VIDIOC_S_CROP, &crop)) {      switch (errno) {        case EINVAL:          /* Cropping not supported. */          break;        default:          /* Errors ignored. */          break;        }      }    } else {      /* Errors ignored. */    }    CLEAR (fmt);    fmt.type                = V4L2_BUF_TYPE_VIDEO_CAPTURE;    fmt.fmt.pix.width       = width;    fmt.fmt.pix.height      = height;    fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;    fmt.fmt.pix.field       = V4L2_FIELD_INTERLACED;  if(-1 == xioctl (fd, VIDIOC_S_FMT, &fmt))    errno_exit ("VIDIOC_S_FMT");/*struct v4l2_standard s;s.name[0]='A';s.frameperiod.numerator=1;s.frameperiod.denominator=fps;if(-1==xioctl(fd, VIDIOC_S_STD, &s))  errno_exit("VIDIOC_S_STD");*/struct v4l2_streamparm p;p.type=V4L2_BUF_TYPE_VIDEO_CAPTURE;//p.parm.capture.capability=V4L2_CAP_TIMEPERFRAME;//p.parm.capture.capturemode=V4L2_MODE_HIGHQUALITY;p.parm.capture.timeperframe.numerator=1;p.parm.capture.timeperframe.denominator=fps;p.parm.output.timeperframe.numerator=1;p.parm.output.timeperframe.denominator=fps;//p.parm.output.outputmode=V4L2_MODE_HIGHQUALITY;//p.parm.capture.extendedmode=0;//p.parm.capture.readbuffers=n_buffers;//.........这里部分代码省略.........
开发者ID:sodeq,项目名称:BBxM-Robot,代码行数:101,


示例10: read_frame

static intread_frame (V4L2WHandler_t * handle, int (*process_frame)(V4L2WHandler_t *, const void *, int)){        struct v4l2_buffer buf;        unsigned int i;        switch (handle->io) {        case IO_METHOD_READ:                if (-1 == read (handle->fd, handle->buffers[0].start, handle->buffers[0].length)) {                        switch (errno) {                        case EAGAIN:                                return 0;                        case EIO:                                /* Could ignore EIO, see spec. */                                /* fall through */                        default:                                errno_exit ("read");                        }                }                process_frame (handle, handle->buffers[0].start, buf.bytesused);                break;        case IO_METHOD_MMAP:                CLEAR (buf);                buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;                buf.memory = V4L2_MEMORY_MMAP;                if (-1 == xioctl (handle->fd, VIDIOC_DQBUF, &buf)) {                        switch (errno) {                        case EAGAIN:                                return 0;                        case EIO:                                /* Could ignore EIO, see spec. */                                /* fall through */                        default:                                errno_exit ("VIDIOC_DQBUF");                        }                }                assert (buf.index < handle->n_buffers);                process_frame (handle, handle->buffers[buf.index].start, buf.bytesused);                // fprintf(stderr, "%d/n", buf.bytesused);                if (-1 == xioctl (handle->fd, VIDIOC_QBUF, &buf))                        errno_exit ("VIDIOC_QBUF");                break;        case IO_METHOD_USERPTR:                CLEAR (buf);                buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;                buf.memory = V4L2_MEMORY_USERPTR;                if (-1 == xioctl (handle->fd, VIDIOC_DQBUF, &buf)) {                        switch (errno) {                        case EAGAIN:                                return 0;                        case EIO:                                /* Could ignore EIO, see spec. */                                /* fall through */                        default:                                errno_exit ("VIDIOC_DQBUF");                        }                }                for (i = 0; i < handle->n_buffers; ++i)                        if (buf.m.userptr == (unsigned long) handle->buffers[i].start                            && buf.length == handle->buffers[i].length)                                break;                assert (i < handle->n_buffers);                process_frame (handle, (void *) buf.m.userptr, buf.bytesused);                if (-1 == xioctl (handle->fd, VIDIOC_QBUF, &buf))                        errno_exit ("VIDIOC_QBUF");                break;        }        return 1;}
开发者ID:nozaki87,项目名称:v4l2wrapper,代码行数:96,


示例11: fprintf

static voidinit_device                     (V4L2WHandler_t * handle){        struct v4l2_capability cap;        struct v4l2_cropcap cropcap;        struct v4l2_crop crop;        struct v4l2_format fmt;        struct v4l2_streamparm stream;        unsigned int min;        if (-1 == xioctl (handle->fd, VIDIOC_QUERYCAP, &cap)) {                if (EINVAL == errno) {                        fprintf (stderr, "%s is no V4L2 device/n",                                 handle->dev_name);                        exit (EXIT_FAILURE);                } else {                        errno_exit ("VIDIOC_QUERYCAP");                }        }        if (!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE)) {                fprintf (stderr, "%s is no video capture device/n",                         handle->dev_name);                exit (EXIT_FAILURE);        }        switch (handle->io) {        case IO_METHOD_READ:                if (!(cap.capabilities & V4L2_CAP_READWRITE)) {                        fprintf (stderr, "%s does not support read i/o/n",                                 handle->dev_name);                        exit (EXIT_FAILURE);                }                break;        case IO_METHOD_MMAP:        case IO_METHOD_USERPTR:                if (!(cap.capabilities & V4L2_CAP_STREAMING)) {                        fprintf (stderr, "%s does not support streaming i/o/n",                                 handle->dev_name);                        exit (EXIT_FAILURE);                }                break;        }        /* Select video input, video standard and tune here. */        CLEAR (cropcap);        cropcap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;        if (0 == xioctl (handle->fd, VIDIOC_CROPCAP, &cropcap)) {                crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;                crop.c = cropcap.defrect; /* reset to default */                if (-1 == xioctl (handle->fd, VIDIOC_S_CROP, &crop)) {                        switch (errno) {                        case EINVAL:                                /* Cropping not supported. */                                break;                        default:                                /* Errors ignored. */                                break;                        }                }        } else {                        /* Errors ignored. */        }        CLEAR (fmt);        fmt.type                = V4L2_BUF_TYPE_VIDEO_CAPTURE;        fmt.fmt.pix.width       = handle->imgparam.width;         fmt.fmt.pix.height      = handle->imgparam.height;        fmt.fmt.pix.pixelformat = handle->imgparam.pix_fmt;        fmt.fmt.pix.field       = V4L2_FIELD_INTERLACED;        if (-1 == xioctl (handle->fd, VIDIOC_S_FMT, &fmt))                errno_exit ("VIDIOC_S_FMT");        CLEAR (stream);        stream.type                = V4L2_BUF_TYPE_VIDEO_CAPTURE;                stream.parm.capture.timeperframe.numerator = 1;        stream.parm.capture.timeperframe.denominator = handle->imgparam.fps;        /* stream.parm.capture.timeperframe.numerator = 1; */        /* stream.parm.capture.timeperframe.denominator = 10; */        stream.parm.capture.readbuffers = 4;        if (-1 == xioctl (handle->fd, VIDIOC_S_PARM, &stream))                errno_exit ("VIDIOC_S_PARM");        //printf("%d %d/n", stream.parm.capture.timeperframe.numerator, stream.parm.capture.timeperframe.denominator);        stream.parm.capture.readbuffers = 4;        /* Note VIDIOC_S_FMT may change width and height. *///.........这里部分代码省略.........
开发者ID:nozaki87,项目名称:v4l2wrapper,代码行数:101,


示例12: zcip_main

int zcip_main(int argc UNUSED_PARAM, char **argv){	char *r_opt;	const char *l_opt = "169.254.0.0";	int state;	int nsent;	unsigned opts;	// Ugly trick, but I want these zeroed in one go	struct {		const struct ether_addr null_ethaddr;		struct ifreq ifr;		uint32_t chosen_nip;		int conflicts;		int timeout_ms; // must be signed		int verbose;	} L;#define null_ethaddr (L.null_ethaddr)#define ifr          (L.ifr         )#define chosen_nip   (L.chosen_nip  )#define conflicts    (L.conflicts   )#define timeout_ms   (L.timeout_ms  )#define verbose      (L.verbose     )	memset(&L, 0, sizeof(L));	INIT_G();#define FOREGROUND (opts & 1)#define QUIT       (opts & 2)	// Parse commandline: prog [options] ifname script	// exactly 2 args; -v accumulates and implies -f	opt_complementary = "=2:vv:vf";	opts = getopt32(argv, "fqr:l:v", &r_opt, &l_opt, &verbose);#if !BB_MMU	// on NOMMU reexec early (or else we will rerun things twice)	if (!FOREGROUND)		bb_daemonize_or_rexec(0 /*was: DAEMON_CHDIR_ROOT*/, argv);#endif	// Open an ARP socket	// (need to do it before openlog to prevent openlog from taking	// fd 3 (sock_fd==3))	xmove_fd(xsocket(AF_PACKET, SOCK_PACKET, htons(ETH_P_ARP)), sock_fd);	if (!FOREGROUND) {		// do it before all bb_xx_msg calls		openlog(applet_name, 0, LOG_DAEMON);		logmode |= LOGMODE_SYSLOG;	}	bb_logenv_override();	{ // -l n.n.n.n		struct in_addr net;		if (inet_aton(l_opt, &net) == 0		 || (net.s_addr & htonl(IN_CLASSB_NET)) != net.s_addr		) {			bb_error_msg_and_die("invalid network address");		}		G.localnet_ip = ntohl(net.s_addr);	}	if (opts & 4) { // -r n.n.n.n		struct in_addr ip;		if (inet_aton(r_opt, &ip) == 0		 || (ntohl(ip.s_addr) & IN_CLASSB_NET) != G.localnet_ip		) {			bb_error_msg_and_die("invalid link address");		}		chosen_nip = ip.s_addr;	}	argv += optind - 1;	/* Now: argv[0]:junk argv[1]:intf argv[2]:script argv[3]:NULL */	/* We need to make space for script argument: */	argv[0] = argv[1];	argv[1] = argv[2];	/* Now: argv[0]:intf argv[1]:script argv[2]:junk argv[3]:NULL */#define argv_intf (argv[0])	xsetenv("interface", argv_intf);	// Initialize the interface (modprobe, ifup, etc)	if (run(argv, "init", 0))		return EXIT_FAILURE;	// Initialize G.iface_sockaddr	// G.iface_sockaddr is: { u16 sa_family; u8 sa_data[14]; }	//memset(&G.iface_sockaddr, 0, sizeof(G.iface_sockaddr));	//TODO: are we leaving sa_family == 0 (AF_UNSPEC)?!	safe_strncpy(G.iface_sockaddr.sa_data, argv_intf, sizeof(G.iface_sockaddr.sa_data));	// Bind to the interface's ARP socket	xbind(sock_fd, &G.iface_sockaddr, sizeof(G.iface_sockaddr));	// Get the interface's ethernet address	//memset(&ifr, 0, sizeof(ifr));	strncpy_IFNAMSIZ(ifr.ifr_name, argv_intf);	xioctl(sock_fd, SIOCGIFHWADDR, &ifr);	memcpy(&G.our_ethaddr, &ifr.ifr_hwaddr.sa_data, ETH_ALEN);	// Start with some stable ip address, either a function of	// the hardware address or else the last address we used.	// we are taking low-order four bytes, as top-order ones//.........这里部分代码省略.........
开发者ID:andy-padavan,项目名称:rt-n56u,代码行数:101,


示例13: device_read_thread

static void device_read_thread(v4l_device_t *dev){  int working;  int frame;  dprintf("Entering read thread/n");  /* Queue all available frame buffers */  if ( dev->streaming ) {    for (frame = 0; frame < dev->mbuf.frames; frame++)      device_qbuf_streaming(dev, frame);  }  frame = 0;  working = 1;  while ( working ) {    unsigned char index;    int ret = 0;    /* Get an available output buffer */    if ( dev->read_in[1] != -1 ) {      ret = read(dev->read_in[0], &index, 1);    }    if ( ret == 1 ) {      unsigned char *ibuf = NULL;      int isize = 0;      dprintf("Read thread input: buf%d/n", index);      dev->h.bufs[index].bytesused = 0;      /* Read input buffer from capture device */      if ( dev->streaming ) {	dprintf("ioctl(VIDIOCSYNC,%d)/n", frame);	if ( xioctl(dev->fd, VIDIOCSYNC, &frame) == -1 ) {	  eprintf("%s: ioctl(VIDIOCSYNC,%d): %s/n", dev->h.name, frame, strerror(errno));	  exit(EXIT_FAILURE);	}	ibuf = dev->buf + dev->mbuf.offsets[frame];	isize = dev->imagesize;      }      else {	ibuf = dev->buf;	isize = read(dev->fd, ibuf, dev->imagesize);	if ( isize < 0 ) {	  if ( (errno != EAGAIN) && (errno != EINTR) ) {	    eprintf("%s: read(%lu): %s/n", dev->h.name, dev->imagesize, strerror(errno));	    exit(EXIT_FAILURE);	  }	  else {	    isize = 0;	  }	}      }      /* Copy input buffer to output buffer */      if ( isize > 0 ) {	int length = 0;	dprintf("Got %d bytes from capture device/n", isize);	if ( dev->decoder != NULL ) {	  if ( dev->decoder->process != NULL ) {	    length = dev->decoder->process(dev->decoder,					   ibuf, isize,					   dev->h.bufs[index].base, dev->h.bufs[index].length);	  }	}	else {	  length = isize;	  if ( length > dev->h.bufs[index].length )	    length = dev->h.bufs[index].length;	  memcpy(dev->h.bufs[index].base, ibuf, length);	}	dev->h.bufs[index].bytesused = length;      }      /* Send filled output buffer back for processing */      if ( dev->streaming ) {	device_qbuf_streaming(dev, frame);	frame++;	if ( frame >= dev->mbuf.frames )	  frame = 0;      }      if ( dev->read_out[1] != -1 ) {	dprintf("Read thread output: buf%d/n", index);	if ( write(dev->read_out[1], &index, 1) != 1 ) {	  eprintf("%s: Read output pipe: write error: %s/n", dev->h.name, strerror(errno));	  exit(EXIT_FAILURE);	}      }    }    else if ( ret == 0 ) {      working = 0;//.........这里部分代码省略.........
开发者ID:testfarm,项目名称:testfarm,代码行数:101,


示例14: device_init_format

static int device_init_format(v4l_device_t *dev){  struct video_window window;  struct video_picture picture;  int palette;  unsigned long pixelformat2 = 0;  /* Get frame size */  if ( xioctl(dev->fd, VIDIOCGWIN, &window) == -1 ) {    eprintf("%s: Cannot get video device window: %s/n",	    dev->h.name, strerror(errno));    return -1;  }  dprintf("%s: VIDIOCGWIN -> width=%d height=%d/n", dev->h.name, window.width, window.height);  dev->h.width = window.width;  dev->h.height = window.height;  /* Choose an acceptable pixel format */  if ( xioctl(dev->fd, VIDIOCGPICT, &picture) == -1 ) {    eprintf("%s: Cannot get video device picture format: %s/n",	    dev->h.name, strerror(errno));    return -1;  }  dprintf("%s: VIDIOCGPICT -> palette=%d currently selected/n", dev->h.name, picture.palette);  palette = picture.palette;  picture.palette = VIDEO_PALETTE_RGB24;  if ( xioctl(dev->fd, VIDIOCSPICT, &picture) == 0 ) {    palette = picture.palette;  }  else {    dprintf("%s: VIDIOCSPICT -> palette=%d rejected/n", dev->h.name, picture.palette);  }  dev->pixelformat = palette;  dev->imagesize = dev->h.width * dev->h.height * picture.depth / 8;  dprintf("%s: Using palette=%d - imagesize=%lu/n", dev->h.name, palette, dev->imagesize);  switch ( palette ) {  case VIDEO_PALETTE_RGB24:    dev->h.pixfmt = CAPTURE_PIXFMT_BGR24;  /* V4L1 RGB is actually BGR */    pixelformat2 = V4L2_PIX_FMT_BGR24;    break;  case VIDEO_PALETTE_YUV420:    dev->h.pixfmt = CAPTURE_PIXFMT_RGB24;    pixelformat2 = V4L2_PIX_FMT_YUV420;    dev->decoder = yuv_create(pixelformat2, dev->h.width, dev->h.height);    break;  default:    eprintf("%s: Video device does not support pixel formats RGB24/YUV420/n",	    dev->h.name);    eprintf("%s: Video device only supports format #%d/n",	    dev->h.name, palette);    return -1;    break;  }  /* Set pixel format string */  dev->pixelformat_str[0] = (pixelformat2 >> 0)  & 0xFF;  dev->pixelformat_str[1] = (pixelformat2 >> 8)  & 0xFF;  dev->pixelformat_str[2] = (pixelformat2 >> 16) & 0xFF;  dev->pixelformat_str[3] = (pixelformat2 >> 24) & 0xFF;  dev->pixelformat_str[4] = '/0';  /* Check for streaming capabilitiy */  if ( xioctl(dev->fd, VIDIOCGMBUF, &(dev->mbuf)) == 0 ) {    dprintf("%s: VIDIOCGMBUF -> size=%d frames=%d/n",	    dev->h.name, dev->mbuf.size, dev->mbuf.frames);    if ( dev->mbuf.frames > 0 ) {      dev->streaming = 1;      dprintf("%s: Streaming mode enabled/n", dev->h.name);    }  }  return 0;}
开发者ID:testfarm,项目名称:testfarm,代码行数:78,


示例15: inotifyd_main

int inotifyd_main(int argc, char **argv){	int n;	unsigned mask;	struct pollfd pfd;	char **watches; // names of files being watched	const char *args[5];	// sanity check: agent and at least one watch must be given	if (!argv[1] || !argv[2])		bb_show_usage();	argv++;	// inotify_add_watch will number watched files	// starting from 1, thus watches[0] is unimportant,	// and 1st file name is watches[1].	watches = argv;	args[0] = *argv;	args[4] = NULL;	argc -= 2; // number of files we watch	// open inotify	pfd.fd = inotify_init();	if (pfd.fd < 0)		bb_perror_msg_and_die("no kernel support");	// setup watches	while (*++argv) {		char *path = *argv;		char *masks = strchr(path, ':');		mask = 0x0fff; // assuming we want all non-kernel events		// if mask is specified ->		if (masks) {			*masks = '/0'; // split path and mask			// convert mask names to mask bitset			mask = 0;			while (*++masks) {				const char *found;				found = memchr(mask_names, *masks, MASK_BITS);				if (found)					mask |= (1 << (found - mask_names));			}		}		// add watch		n = inotify_add_watch(pfd.fd, path, mask);		if (n < 0)			bb_perror_msg_and_die("add watch (%s) failed", path);		//bb_error_msg("added %d [%s]:%4X", n, path, mask);	}	// setup signals	bb_signals(BB_FATAL_SIGS, record_signo);	// do watch	pfd.events = POLLIN;	while (1) {		int len;		void *buf;		struct inotify_event *ie; again:		if (bb_got_signal)			break;		n = poll(&pfd, 1, -1);		// Signal interrupted us?		if (n < 0 && errno == EINTR)			goto again;		// Under Linux, above if() is not necessary.		// Non-fatal signals, e.g. SIGCHLD, when set to SIG_DFL,		// are not interrupting poll().		// Thus we can just break if n <= 0 (see below),		// because EINTR will happen only on SIGTERM et al.		// But this might be not true under other Unixes,		// and is generally way too subtle to depend on.		if (n <= 0) // strange error?			break;		// read out all pending events		// (NB: len must be int, not ssize_t or long!)#define eventbuf bb_common_bufsiz1		setup_common_bufsiz();		xioctl(pfd.fd, FIONREAD, &len);		ie = buf = (len <= COMMON_BUFSIZE) ? eventbuf : xmalloc(len);		len = full_read(pfd.fd, buf, len);		// process events. N.B. events may vary in length		while (len > 0) {			int i;			// cache relevant events mask			unsigned m = ie->mask & ((1 << MASK_BITS) - 1);			if (m) {				char events[MASK_BITS + 1];				char *s = events;				for (i = 0; i < MASK_BITS; ++i, m >>= 1) {					if ((m & 1) && (mask_names[i] != '/0'))						*s++ = mask_names[i];				}				*s = '/0';				if (LONE_CHAR(args[0], '-')) {					/* "inotifyd - FILE": built-in echo */					printf(ie->len ? "%s/t%s/t%s/n" : "%s/t%s/n", events,//.........这里部分代码省略.........
开发者ID:beyond2002,项目名称:GT813C,代码行数:101,


示例16: switch

unsigned char *Camera::Get() {  struct v4l2_buffer buf;  switch(io) {    case IO_METHOD_READ:/*    		if (-1 == read (fd, buffers[0].start, buffers[0].length)) {            		switch (errno) {            		case EAGAIN:                    		return 0;			case EIO:			default:				errno_exit ("read");			}		}    		process_image (buffers[0].start);*/      break;    case IO_METHOD_MMAP:      CLEAR(buf);      buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;      buf.memory = V4L2_MEMORY_MMAP;      if(-1 == xioctl (fd, VIDIOC_DQBUF, &buf)) {        switch (errno) {          case EAGAIN:            return 0;          case EIO:          default:            return 0; //errno_exit ("VIDIOC_DQBUF");        }      }      assert(buf.index < (unsigned int)n_buffers);      memcpy(data, (unsigned char *)buffers[buf.index].start, buffers[buf.index].length);      if(-1 == xioctl (fd, VIDIOC_QBUF, &buf))        return 0; //errno_exit ("VIDIOC_QBUF");    return data;      break;    case IO_METHOD_USERPTR:/*		CLEAR (buf);    		buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;    		buf.memory = V4L2_MEMORY_USERPTR;		if (-1 == xioctl (fd, VIDIOC_DQBUF, &buf)) {			switch (errno) {			case EAGAIN:				return 0;			case EIO:			default:				errno_exit ("VIDIOC_DQBUF");			}		}		for (i = 0; i < n_buffers; ++i)			if (buf.m.userptr == (unsigned long) buffers[i].start			    && buf.length == buffers[i].length)				break;		assert (i < n_buffers);    		process_image ((void *) buf.m.userptr);		if (-1 == xioctl (fd, VIDIOC_QBUF, &buf))			errno_exit ("VIDIOC_QBUF");*/      break;  }  return 0;}
开发者ID:sodeq,项目名称:BBxM-Robot,代码行数:87,


示例17: init_camera

void init_camera(struct camera *cam) {	struct v4l2_capability *cap = &(cam->v4l2_cap);	struct v4l2_cropcap *cropcap = &(cam->v4l2_cropcap);	struct v4l2_crop *crop = &(cam->crop);	struct v4l2_format *fmt = &(cam->v4l2_fmt);	unsigned int min;	if (-1 == xioctl(cam->fd, VIDIOC_QUERYCAP, cap)) {		if (EINVAL == errno) {			fprintf(stderr, "%s is no V4L2 device/n", cam->device_name);			exit(EXIT_FAILURE);		} else {			errno_exit("VIDIOC_QUERYCAP");		}	}	if (!(cap->capabilities & V4L2_CAP_VIDEO_CAPTURE)) {		fprintf(stderr, "%s is no video capture device/n", cam->device_name);		exit(EXIT_FAILURE);	}	if (!(cap->capabilities & V4L2_CAP_STREAMING)) {		fprintf(stderr, "%s does not support streaming i/o/n",				cam->device_name);		exit(EXIT_FAILURE);	}	//#ifdef DEBUG_CAM	printf("/nVIDOOC_QUERYCAP/n");	printf("the camera driver is %s/n", cap->driver);	printf("the camera card is %s/n", cap->card);	printf("the camera bus info is %s/n", cap->bus_info);	printf("the version is %d/n", cap->version);	//#endif	/* Select video input, video standard and tune here. */	CLEAR(*cropcap);	cropcap->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;	crop->c.width = cam->width;	crop->c.height = cam->height;	crop->c.left = 0;	crop->c.top = 0;	crop->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;	CLEAR(*fmt);	fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;	fmt->fmt.pix.width = cam->width;	fmt->fmt.pix.height = cam->height;	fmt->fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV; //yuv422	//  fmt->fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420  //yuv420 但是我电脑不支持	fmt->fmt.pix.field = V4L2_FIELD_INTERLACED; //隔行扫描	if (-1 == xioctl(cam->fd, VIDIOC_S_FMT, fmt))		errno_exit("VIDIOC_S_FMT");	/* Note VIDIOC_S_FMT may change width and height. */	/* Buggy driver paranoia. */	min = fmt->fmt.pix.width * 2;	if (fmt->fmt.pix.bytesperline < min)		fmt->fmt.pix.bytesperline = min;	min = fmt->fmt.pix.bytesperline * fmt->fmt.pix.height;	if (fmt->fmt.pix.sizeimage < min)		fmt->fmt.pix.sizeimage = min;	init_mmap(cam);}
开发者ID:RuanJG,项目名称:cameratoh264,代码行数:71,


示例18: captureFrames

////////////////////////////////////////////////////////////////////////////////////// @fn int captureFrames(int fileDescriptor)//////  Creates memory buffers for receiving frames from the video capture device///  and then enters a frame-capture loop that runs until the terminate flag is///  toggled.////// @param fileDescriptor The open file descriptor to the camera device////// @return 0 on success, -1 otherwise.///////////////////////////////////////////////////////////////////////////////////int captureFrames(int fileDescriptor) {  struct v4l2_buffer deviceBuffer;  struct v4l2_buffer readBuffer;  struct v4l2_buffer queryBuffer;  fd_set fileDescriptorSet;  struct timeval time;  int ready = -1;  int i = 0;  CLEAR(time);  for(i = 0; i < bufferCount; i++) {    CLEAR(deviceBuffer);    CLEAR(queryBuffer);    deviceBuffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;    deviceBuffer.memory = V4L2_MEMORY_MMAP;    deviceBuffer.index = i;    // Query the device memory buffer for settings    if (xioctl(fileDescriptor, VIDIOC_QUERYBUF, &deviceBuffer) == -1) {      perror("Error querying the device memory buffer");      return -1;    }    // Generate memory-mapped buffer to device memory    frameBuffers[i].length = deviceBuffer.length;    frameBuffers[i].data = mmap(        NULL, deviceBuffer.length, PROT_READ | PROT_WRITE, MAP_SHARED,        fileDescriptor, deviceBuffer.m.offset);    if (frameBuffers[i].data == MAP_FAILED) {      perror("Error establishing memory map");    }    queryBuffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;    queryBuffer.memory = V4L2_MEMORY_MMAP;    queryBuffer.index = i;    if(xioctl(fileDescriptor, VIDIOC_QBUF, &queryBuffer) == -1) {      perror("Error establishing device query buffer");      return -1;    }  }  // Turn video streaming on  if (xioctl(fileDescriptor, VIDIOC_STREAMON, &queryBuffer.type) == -1) {    perror("Error starting the device video stream");    return -1;  }  // Begin reading from the device  while(!terminate) {    // Initiailize the file descriptor set    FD_ZERO(&fileDescriptorSet);    FD_SET(fileDescriptor, &fileDescriptorSet);    // Set select timeout to be one second    time.tv_sec = 2;    time.tv_usec = 0;    // Wait for signal indicating the device has delivered a frame    ready = select(fileDescriptor + 1, &fileDescriptorSet, NULL, NULL, &time);    if (ready == -1) {      perror("Error waiting on video frame");      continue;    }    // Capture the frame    CLEAR(readBuffer);    readBuffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;    readBuffer.memory = V4L2_MEMORY_MMAP;    if (xioctl(fileDescriptor, VIDIOC_DQBUF, &readBuffer) == -1) {      perror("Failed to retrieve frame from device");    } else {      fprintf(stderr, ".");      fflush(stdout);    }    if (xioctl(fileDescriptor, VIDIOC_QBUF, &readBuffer) == -1) {      perror("Error queueing the video buffer");    }  }  return 0;}
开发者ID:bgoldber,项目名称:linux-webcam-stream,代码行数:99,


示例19: init_device

static void init_device(void){  struct v4l2_capability cap;  struct v4l2_cropcap cropcap;  struct v4l2_crop crop;  struct v4l2_format fmt;  unsigned int min;  if (-1 == xioctl(fd, VIDIOC_QUERYCAP, &cap))  {    if (EINVAL == errno)    {      fprintf(stderr, "%s is no V4L2 device/n", dev_name);      exit(EXIT_FAILURE);    }    else    {      errno_exit("VIDIOC_QUERYCAP");    }  }  if (!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE))  {    fprintf(stderr, "%s is no video capture device/n", dev_name);    exit(EXIT_FAILURE);  }  if (!(cap.capabilities & V4L2_CAP_STREAMING))  {    fprintf(stderr, "%s does not support streaming i/o/n", dev_name);    exit(EXIT_FAILURE);  }  /* Select video input, video standard and tune here. */  CLEAR(cropcap);  cropcap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;  if (0 == xioctl(fd, VIDIOC_CROPCAP, &cropcap))  {    crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;    crop.c = cropcap.defrect;   /* reset to default */    if (-1 == xioctl(fd, VIDIOC_S_CROP, &crop))    {      switch (errno)      {      case EINVAL:        /* Cropping not supported. */        break;      default:        /* Errors ignored. */        break;      }    }  }  else  {      /* Errors ignored. */  }  CLEAR(fmt);  fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;  fmt.fmt.pix.width = WIDTH;  fmt.fmt.pix.height = HEIGHT;  fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;  fmt.fmt.pix.field = V4L2_FIELD_INTERLACED;  if (-1 == xioctl(fd, VIDIOC_S_FMT, &fmt))    errno_exit("VIDIOC_S_FMT");  /* Note VIDIOC_S_FMT may change width and height. */  /* Buggy driver paranoia. */  min = fmt.fmt.pix.width * 2;  if (fmt.fmt.pix.bytesperline < min)    fmt.fmt.pix.bytesperline = min;  min = fmt.fmt.pix.bytesperline * fmt.fmt.pix.height;  if (fmt.fmt.pix.sizeimage < min)    fmt.fmt.pix.sizeimage = min;  if (WIDTH > fmt.fmt.pix.width)    errno_exit("Width parameter is too big./n");  if (fmt.fmt.pix.height != HEIGHT)    errno_exit("Height parameter is too big./n");  init_mmap();}
开发者ID:VovaFender,项目名称:face_recognizer,代码行数:96,


示例20: zcip_main

int zcip_main(int argc UNUSED_PARAM, char **argv){	int state;	char *r_opt;	const char *l_opt = "169.254.0.0";	unsigned opts;	// ugly trick, but I want these zeroed in one go	struct {		const struct in_addr null_ip;		const struct ether_addr null_addr;		struct in_addr ip;		struct ifreq ifr;		int timeout_ms; /* must be signed */		unsigned conflicts;		unsigned nprobes;		unsigned nclaims;		int ready;		int verbose;	} L;#define null_ip    (L.null_ip   )#define null_addr  (L.null_addr )#define ip         (L.ip        )#define ifr        (L.ifr       )#define timeout_ms (L.timeout_ms)#define conflicts  (L.conflicts )#define nprobes    (L.nprobes   )#define nclaims    (L.nclaims   )#define ready      (L.ready     )#define verbose    (L.verbose   )	memset(&L, 0, sizeof(L));	INIT_G();#define FOREGROUND (opts & 1)#define QUIT       (opts & 2)	// parse commandline: prog [options] ifname script	// exactly 2 args; -v accumulates and implies -f	opt_complementary = "=2:vv:vf";	opts = getopt32(argv, "fqr:l:v", &r_opt, &l_opt, &verbose);#if !BB_MMU	// on NOMMU reexec early (or else we will rerun things twice)	if (!FOREGROUND)		bb_daemonize_or_rexec(0 /*was: DAEMON_CHDIR_ROOT*/, argv);#endif	// open an ARP socket	// (need to do it before openlog to prevent openlog from taking	// fd 3 (sock_fd==3))	xmove_fd(xsocket(AF_PACKET, SOCK_PACKET, htons(ETH_P_ARP)), sock_fd);	if (!FOREGROUND) {		// do it before all bb_xx_msg calls		openlog(applet_name, 0, LOG_DAEMON);		logmode |= LOGMODE_SYSLOG;	}	bb_logenv_override();	{ // -l n.n.n.n		struct in_addr net;		if (inet_aton(l_opt, &net) == 0		 || (net.s_addr & htonl(IN_CLASSB_NET)) != net.s_addr		) {			bb_error_msg_and_die("invalid network address");		}		G.localnet_ip = ntohl(net.s_addr);	}	if (opts & 4) { // -r n.n.n.n		if (inet_aton(r_opt, &ip) == 0		 || (ntohl(ip.s_addr) & IN_CLASSB_NET) != G.localnet_ip		) {			bb_error_msg_and_die("invalid link address");		}	}	argv += optind - 1;	/* Now: argv[0]:junk argv[1]:intf argv[2]:script argv[3]:NULL */	/* We need to make space for script argument: */	argv[0] = argv[1];	argv[1] = argv[2];	/* Now: argv[0]:intf argv[1]:script argv[2]:junk argv[3]:NULL */#define argv_intf (argv[0])	xsetenv("interface", argv_intf);	// initialize the interface (modprobe, ifup, etc)	if (run(argv, "init", NULL))		return EXIT_FAILURE;	// initialize saddr	// saddr is: { u16 sa_family; u8 sa_data[14]; }	//memset(&saddr, 0, sizeof(saddr));	//TODO: are we leaving sa_family == 0 (AF_UNSPEC)?!	safe_strncpy(saddr.sa_data, argv_intf, sizeof(saddr.sa_data));	// bind to the interface's ARP socket	xbind(sock_fd, &saddr, sizeof(saddr));	// get the interface's ethernet address	//memset(&ifr, 0, sizeof(ifr));	strncpy_IFNAMSIZ(ifr.ifr_name, argv_intf);	xioctl(sock_fd, SIOCGIFHWADDR, &ifr);//.........这里部分代码省略.........
开发者ID:jing-git,项目名称:rt-n56u,代码行数:101,


示例21: print_caps

static int print_caps(int fd){	struct v4l2_capability caps;	struct v4l2_fmtdesc fmtdesc;	struct v4l2_format fmt;	bool support_h264 = false;	char fourcc[5] = {0};	char c;	int err;	memset(&caps, 0, sizeof(caps));	memset(&fmtdesc, 0, sizeof(fmtdesc));	memset(&fmt, 0, sizeof(fmt));	if (-1 == xioctl(fd, VIDIOC_QUERYCAP, &caps)) {		err = errno;		warning("v4l2_codec: error Querying Capabilities (%m)/n", err);		return err;	}	info("v4l2_codec: Driver Caps:/n"		"  Driver:        /"%s/"/n"		"  Card:          /"%s/"/n"		"  Bus:           /"%s/"/n"		"  Version:       %d.%d/n"		"  Capabilities:  0x%08x/n",		caps.driver,		caps.card,		caps.bus_info,		(caps.version>>16) & 0xff,		(caps.version>>24) & 0xff,		caps.capabilities);	fmtdesc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;	info("  FMT : CE Desc/n--------------------/n");	while (0 == xioctl(fd, VIDIOC_ENUM_FMT, &fmtdesc)) {		bool selected = fmtdesc.pixelformat == V4L2_PIX_FMT_H264;		strncpy(fourcc, (char *)&fmtdesc.pixelformat, 4);		if (fmtdesc.pixelformat == V4L2_PIX_FMT_H264)		    support_h264 = true;		c = fmtdesc.flags & V4L2_FMT_FLAG_COMPRESSED ? 'C' : ' ';		info("  %c  %s: %c  '%s'/n",		       selected ? '>' : ' ',		       fourcc, c, fmtdesc.description);		fmtdesc.index++;	}	info("/n");	if (!support_h264) {		warning("v4l2_codec: Doesn't support H264./n");		return ENODEV;	}	fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;	fmt.fmt.pix.width = v4l2.width;	fmt.fmt.pix.height = v4l2.height;	fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_H264;	fmt.fmt.pix.field = V4L2_FIELD_NONE;	if (-1 == xioctl(fd, VIDIOC_S_FMT, &fmt)) {		err = errno;		warning("v4l2_codec: Setting Pixel Format (%m)/n", err);		return err;	}	strncpy(fourcc, (char *)&fmt.fmt.pix.pixelformat, 4);	info("v4l2_codec: Selected Camera Mode:/n"		"  Width:   %d/n"		"  Height:  %d/n"		"  PixFmt:  %s/n"		"  Field:   %d/n",		fmt.fmt.pix.width,		fmt.fmt.pix.height,		fourcc,		fmt.fmt.pix.field);	return 0;}
开发者ID:sealaunch,项目名称:baresip,代码行数:87,


示例22: rtcwake_main

int rtcwake_main(int argc UNUSED_PARAM, char **argv){	time_t rtc_time;	unsigned opt;	const char *rtcname = NULL;	const char *suspend;	const char *opt_seconds;	const char *opt_time;	time_t sys_time;	time_t alarm_time = 0;	unsigned seconds = 0;	int utc = -1;	int fd;#if ENABLE_LONG_OPTS	static const char rtcwake_longopts[] ALIGN1 =		"auto/0"    No_argument "a"		"local/0"   No_argument "l"		"utc/0"     No_argument "u"		"device/0"  Required_argument "d"		"mode/0"    Required_argument "m"		"seconds/0" Required_argument "s"		"time/0"    Required_argument "t"		;	applet_long_options = rtcwake_longopts;#endif	opt = getopt32(argv, "alud:m:s:t:", &rtcname, &suspend, &opt_seconds, &opt_time);	/* this is the default	if (opt & RTCWAKE_OPT_AUTO)		utc = -1;	*/	if (opt & (RTCWAKE_OPT_UTC | RTCWAKE_OPT_LOCAL))		utc = opt & RTCWAKE_OPT_UTC;	if (!(opt & RTCWAKE_OPT_SUSPEND_MODE))		suspend = DEFAULT_MODE;	if (opt & RTCWAKE_OPT_SECONDS)		/* alarm time, seconds-to-sleep (relative) */		seconds = xatoi(opt_seconds);	if (opt & RTCWAKE_OPT_TIME)		/* alarm time, time_t (absolute, seconds since 1/1 1970 UTC) */		alarm_time = xatol(opt_time);	if (!alarm_time && !seconds)		bb_error_msg_and_die("must provide wake time");	if (utc == -1)		utc = rtc_adjtime_is_utc();	/* the rtcname is relative to /dev */	xchdir("/dev");	/* this RTC must exist and (if we'll sleep) be wakeup-enabled */	fd = rtc_xopen(&rtcname, O_RDONLY);	if (strcmp(suspend, "on") && !may_wakeup(rtcname))		bb_error_msg_and_die("%s not enabled for wakeup events", rtcname);	/* relative or absolute alarm time, normalized to time_t */	sys_time = time(NULL);	{		struct tm tm_time;		rtc_read_tm(&tm_time, fd);		rtc_time = rtc_tm2time(&tm_time, utc);	}	if (alarm_time) {		if (alarm_time < sys_time)			bb_error_msg_and_die("time doesn't go backward to %s", ctime(&alarm_time));		alarm_time += sys_time - rtc_time;	} else		alarm_time = rtc_time + seconds + 1;	setup_alarm(fd, &alarm_time, rtc_time);	sync();	printf("wakeup from /"%s/" at %s", suspend, ctime(&alarm_time));	fflush_all();	usleep(10 * 1000);	if (strcmp(suspend, "on"))		xopen_xwrite_close(SYS_POWER_PATH, suspend);	else {		/* "fake" suspend ... we'll do the delay ourselves */		unsigned long data;		do {			ssize_t ret = safe_read(fd, &data, sizeof(data));			if (ret < 0) {				bb_perror_msg("rtc read");				break;			}		} while (!(data & RTC_AF));	}	xioctl(fd, RTC_AIE_OFF, 0);	if (ENABLE_FEATURE_CLEAN_UP)//.........这里部分代码省略.........
开发者ID:Ayyayay,项目名称:busybox,代码行数:101,


示例23: wait

int CaptureThread::start() {	wait();	devam=false;	fd = -1;	// read config	dev_name = Settings::node();	width    = Settings::width();	height   = Settings::height();	fps      = Settings::fps();	if (fps>0) {		delay = 1000/fps;	}	else { delay = 0; }	// open webcam device node	fd = v4l2_open(dev_name.toStdString().c_str(), O_RDWR | O_NONBLOCK, 0);	if (fd < 0) {		kError() << "Cannot open device";		quit();		return 1;	}	CLEAR(fmt);	fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;	fmt.fmt.pix.width       = width;	fmt.fmt.pix.height      = height;	fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_RGB24;	fmt.fmt.pix.field       = V4L2_FIELD_INTERLACED;	xioctl(fd, VIDIOC_S_FMT, &fmt);	if (fmt.fmt.pix.pixelformat != V4L2_PIX_FMT_RGB24) {		kError() << "Libv4l didn't accept RGB24 format. Can't proceed.";		quit();		return 1;	}	emit startedCapture(fmt.fmt.pix.width, fmt.fmt.pix.height);	v4lconvert_data = v4lconvert_create(fd);	if (v4lconvert_data == NULL)		kDebug() << "v4lconvert_create";	if (v4lconvert_try_format(v4lconvert_data, &fmt, &src_fmt) != 0)		kDebug() << "v4lconvert_try_format";	xioctl(fd, VIDIOC_S_FMT, &src_fmt);	dst_buf = (unsigned char*)malloc(fmt.fmt.pix.sizeimage);	CLEAR(req);	req.count = 2;	req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;	req.memory = V4L2_MEMORY_MMAP;	xioctl(fd, VIDIOC_REQBUFS, &req);	buffers = (buffer*)calloc(req.count, sizeof(*buffers));	for (n_buffers = 0; n_buffers < req.count; ++n_buffers) {		CLEAR(buf);		buf.type        = V4L2_BUF_TYPE_VIDEO_CAPTURE;		buf.memory      = V4L2_MEMORY_MMAP;		buf.index       = n_buffers;		xioctl(fd, VIDIOC_QUERYBUF, &buf);		buffers[n_buffers].length = buf.length;		buffers[n_buffers].start = v4l2_mmap(NULL, buf.length,																				 PROT_READ | PROT_WRITE, MAP_SHARED,																				 fd, buf.m.offset);		if (MAP_FAILED == buffers[n_buffers].start) {			kDebug() << "mmap";			quit();			return 1;		}	}	for (unsigned int i = 0; i < n_buffers; ++i) {		CLEAR(buf);		buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;		buf.memory = V4L2_MEMORY_MMAP;		buf.index = i;		xioctl(fd, VIDIOC_QBUF, &buf);	}	type = V4L2_BUF_TYPE_VIDEO_CAPTURE;	xioctl(fd, VIDIOC_STREAMON, &type);	di=0;	sprintf(header,"P6/n%d %d 255/n",fmt.fmt.pix.width,fmt.fmt.pix.height);	devam=true;	// start processing video data	running = true;	QThread::start();	return 0;}
开发者ID:DHalens,项目名称:kamerka,代码行数:93,


示例24: grab_frame

int grab_frame(){        struct v4l2_format              fmt;        struct v4l2_buffer              buf;        struct v4l2_requestbuffers      req;        enum v4l2_buf_type              type;        fd_set                          fds;        struct timeval                  tv;        int                             r, fd = -1;        unsigned int                    i, n_buffers;        char                            *dev_name = "/dev/video1";        struct buffer                   *buffers;        fd = v4l2_open(dev_name, O_RDWR | O_NONBLOCK, 0);        if (fd < 0) {                perror("Cannot open device");                exit(EXIT_FAILURE);        }        printf("grabbing frame.../n");        CLEAR(fmt);        fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;        fmt.fmt.pix.width       = 640;        fmt.fmt.pix.height      = 480;        fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_RGB24;        fmt.fmt.pix.field       = V4L2_FIELD_INTERLACED;        xioctl(fd, VIDIOC_S_FMT, &fmt);        if (fmt.fmt.pix.pixelformat != V4L2_PIX_FMT_RGB24) {                printf("Libv4l didn't accept RGB24 format. Can't proceed./n");                exit(EXIT_FAILURE);        }        if ((fmt.fmt.pix.width != 640) || (fmt.fmt.pix.height != 480))                printf("Warning: driver is sending image at %dx%d/n",                        fmt.fmt.pix.width, fmt.fmt.pix.height);        CLEAR(req);        req.count = 100;        req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;        req.memory = V4L2_MEMORY_MMAP;        xioctl(fd, VIDIOC_REQBUFS, &req);        buffers = calloc(req.count, sizeof(*buffers));        for (n_buffers = 0; n_buffers < req.count; ++n_buffers) {                CLEAR(buf);                buf.type        = V4L2_BUF_TYPE_VIDEO_CAPTURE;                buf.memory      = V4L2_MEMORY_MMAP;                buf.index       = n_buffers;                xioctl(fd, VIDIOC_QUERYBUF, &buf);                buffers[n_buffers].length = buf.length;                buffers[n_buffers].start = v4l2_mmap(NULL, buf.length,                              PROT_READ | PROT_WRITE, MAP_SHARED,                              fd, buf.m.offset);                if (MAP_FAILED == buffers[n_buffers].start) {                        perror("mmap");                        exit(EXIT_FAILURE);                }        }        for (i = 0; i < n_buffers; ++i) {                CLEAR(buf);                buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;                buf.memory = V4L2_MEMORY_MMAP;                buf.index = i;                xioctl(fd, VIDIOC_QBUF, &buf);        }        type = V4L2_BUF_TYPE_VIDEO_CAPTURE;        xioctl(fd, VIDIOC_STREAMON, &type);        for (i = 0; i < req.count; i++) {                do {                        FD_ZERO(&fds);                        FD_SET(fd, &fds);                        /* Timeout. */                        tv.tv_sec = 2;                        tv.tv_usec = 0;                        r = select(fd + 1, &fds, NULL, NULL, &tv);                } while ((r == -1 && (errno = EINTR)));                if (r == -1) {                        perror("select");                        return errno;                }                CLEAR(buf);                buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;                buf.memory = V4L2_MEMORY_MMAP;                xioctl(fd, VIDIOC_DQBUF, &buf);                xioctl(fd, VIDIOC_QBUF, &buf);        }        binarize(buffers, buf);        type = V4L2_BUF_TYPE_VIDEO_CAPTURE;//.........这里部分代码省略.........
开发者ID:flavioribeiro,项目名称:formsdetector,代码行数:101,


示例25:

void V4LIn::stop() {	enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;	if (-1 == xioctl(fd, VIDIOC_STREAMOFF, &type)) errno_exit("VIDIOC_STREAMOFF");	bStarted = false;}
开发者ID:jfellus,项目名称:paper-navigation-vision,代码行数:5,


示例26: flash_eraseall_main

int flash_eraseall_main(int argc UNUSED_PARAM, char **argv){	struct jffs2_unknown_node cleanmarker;	mtd_info_t meminfo;	int fd, clmpos, clmlen;	erase_info_t erase;	struct stat st;	unsigned int flags;	char *mtd_name;	opt_complementary = "=1";	flags = BBTEST | getopt32(argv, "jq");	mtd_name = argv[optind];	fd = xopen(mtd_name, O_RDWR);	fstat(fd, &st);	if (!S_ISCHR(st.st_mode))		bb_error_msg_and_die("%s: not a char device", mtd_name);	xioctl(fd, MEMGETINFO, &meminfo);	erase.length = meminfo.erasesize;	if (meminfo.type == MTD_NANDFLASH)		flags |= IS_NAND;	clmpos = 0;	clmlen = 8;	if (flags & OPTION_J) {		uint32_t *crc32_table;		crc32_table = crc32_filltable(NULL, 0);		cleanmarker.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);		cleanmarker.nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER);		if (!(flags & IS_NAND))			cleanmarker.totlen = cpu_to_je32(sizeof(struct jffs2_unknown_node));		else {			struct nand_oobinfo oobinfo;			xioctl(fd, MEMGETOOBSEL, &oobinfo);			/* Check for autoplacement */			if (oobinfo.useecc == MTD_NANDECC_AUTOPLACE) {				/* Get the position of the free bytes */				clmpos = oobinfo.oobfree[0][0];				clmlen = oobinfo.oobfree[0][1];				if (clmlen > 8)					clmlen = 8;				if (clmlen == 0)					bb_error_msg_and_die("autoplacement selected and no empty space in oob");			} else {				/* Legacy mode */				switch (meminfo.oobsize) {				case 8:					clmpos = 6;					clmlen = 2;					break;				case 16:					clmpos = 8;					/*clmlen = 8;*/					break;				case 64:					clmpos = 16;					/*clmlen = 8;*/					break;				}			}			cleanmarker.totlen = cpu_to_je32(8);		}		cleanmarker.hdr_crc = cpu_to_je32(			crc32_block_endian0(0, &cleanmarker, sizeof(struct jffs2_unknown_node) - 4, crc32_table)		);	}	/* Don't want to destroy progress indicator by bb_error_msg's */	applet_name = xasprintf("/n%s: %s", applet_name, mtd_name);	for (erase.start = 0; erase.start < meminfo.size;	     erase.start += meminfo.erasesize) {		if (flags & BBTEST) {			int ret;			loff_t offset = erase.start;			ret = ioctl(fd, MEMGETBADBLOCK, &offset);			if (ret > 0) {				if (!(flags & OPTION_Q))					bb_info_msg("/nSkipping bad block at 0x%08x", erase.start);				continue;			}			if (ret < 0) {				/* Black block table is not available on certain flash				 * types e.g. NOR				 */				if (errno == EOPNOTSUPP) {					flags &= ~BBTEST;					if (flags & IS_NAND)						bb_error_msg_and_die("bad block check not available");				} else {					bb_perror_msg_and_die("MEMGETBADBLOCK error");				}//.........这里部分代码省略.........
开发者ID:915546302,项目名称:busybox-osx,代码行数:101,


示例27: init_device

static void init_device(void){        struct v4l2_capability cap;        struct v4l2_cropcap cropcap;        struct v4l2_crop crop;        struct v4l2_format fmt;        unsigned int min;        if (-1 == xioctl(fd, VIDIOC_QUERYCAP, &cap)) {                if (EINVAL == errno) {                        fprintf(stderr, "%s is no V4L2 device/n",                                 dev_name);                        exit(EXIT_FAILURE);                } else {                        errno_exit("VIDIOC_QUERYCAP");                }        }        if (!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE)) {                fprintf(stderr, "%s is no video capture device/n",                         dev_name);                exit(EXIT_FAILURE);        }        switch (io) {        case IO_METHOD_READ:                if (!(cap.capabilities & V4L2_CAP_READWRITE)) {                        fprintf(stderr, "%s does not support read i/o/n",                                 dev_name);                        exit(EXIT_FAILURE);                }                break;        case IO_METHOD_MMAP:        case IO_METHOD_USERPTR:                if (!(cap.capabilities & V4L2_CAP_STREAMING)) {                        fprintf(stderr, "%s does not support streaming i/o/n",                                 dev_name);                        exit(EXIT_FAILURE);                }                break;        }        /* Select video input, video standard and tune here. */        CLEAR(cropcap);        cropcap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;        if (0 == xioctl(fd, VIDIOC_CROPCAP, &cropcap)) {                crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;                crop.c = cropcap.defrect; /* reset to default */                if (-1 == xioctl(fd, VIDIOC_S_CROP, &crop)) {                        switch (errno) {                        case EINVAL:                                /* Cropping not supported. */                                break;                        default:                                /* Errors ignored. */                                break;                        }                }        } else {                /* Errors ignored. */        }        CLEAR(fmt);        fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;        if (force_format) {                fmt.fmt.pix.width       = 640;                fmt.fmt.pix.height      = 480;                fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;                fmt.fmt.pix.field       = V4L2_FIELD_INTERLACED;                if (-1 == xioctl(fd, VIDIOC_S_FMT, &fmt))                        errno_exit("VIDIOC_S_FMT");                /* Note VIDIOC_S_FMT may change width and height. */        } else {                /* Preserve original settings as set by v4l2-ctl for example */                if (-1 == xioctl(fd, VIDIOC_G_FMT, &fmt))                        errno_exit("VIDIOC_G_FMT");        }        /* Buggy driver paranoia. */        min = fmt.fmt.pix.width * 2;        if (fmt.fmt.pix.bytesperline < min)                fmt.fmt.pix.bytesperline = min;        min = fmt.fmt.pix.bytesperline * fmt.fmt.pix.height;        if (fmt.fmt.pix.sizeimage < min)                fmt.fmt.pix.sizeimage = min;        switch (io) {        case IO_METHOD_READ:                init_read(fmt.fmt.pix.sizeimage);//.........这里部分代码省略.........
开发者ID:crazysjf,项目名称:sjf-repo,代码行数:101,


示例28: showkey_main

int showkey_main(int argc UNUSED_PARAM, char **argv){	enum {		OPT_a = (1<<0), // display the decimal/octal/hex values of the keys		OPT_k = (1<<1), // display only the interpreted keycodes (default)		OPT_s = (1<<2), // display only the raw scan-codes	};	INIT_G();	// FIXME: aks are all mutually exclusive	getopt32(argv, "aks");	// prepare for raw mode	xget1(&tio, &tio0);	// put stdin in raw mode	xset1(&tio);#define press_keys "Press any keys, program terminates %s:/r/n/n"	if (option_mask32 & OPT_a) {		// just read stdin char by char		unsigned char c;		printf(press_keys, "on EOF (ctrl-D)");		// read and show byte values		while (1 == read(STDIN_FILENO, &c, 1)) {			printf("%3u 0%03o 0x%02x/r/n", c, c, c);			if (04 /*CTRL-D*/ == c)				break;		}	} else {		// we assume a PC keyboard		xioctl(STDIN_FILENO, KDGKBMODE, &kbmode);		printf("Keyboard mode was %s./r/n/n",			kbmode == K_RAW ? "RAW" :				(kbmode == K_XLATE ? "XLATE" :					(kbmode == K_MEDIUMRAW ? "MEDIUMRAW" :						(kbmode == K_UNICODE ? "UNICODE" : "UNKNOWN")))		);		// set raw keyboard mode		xioctl(STDIN_FILENO, KDSKBMODE, (void *)(ptrdiff_t)((option_mask32 & OPT_k) ? K_MEDIUMRAW : K_RAW));		// we should exit on any signal; signals should interrupt read		bb_signals_recursive_norestart(BB_FATAL_SIGS, record_signo);		// inform user that program ends after time of inactivity		printf(press_keys, "10s after last keypress");		// read and show scancodes		while (!bb_got_signal) {			char buf[18];			int i, n;			// setup 10s watchdog			alarm(10);			// read scancodes			n = read(STDIN_FILENO, buf, sizeof(buf));			i = 0;			while (i < n) {				if (option_mask32 & OPT_s) {					// show raw scancodes					printf("0x%02x ", buf[i++]);				} else {					// show interpreted scancodes (default)					char c = buf[i];					int kc;					if (i+2 < n					 && (c & 0x7f) == 0					 && (buf[i+1] & 0x80) != 0					 && (buf[i+2] & 0x80) != 0					) {						kc = ((buf[i+1] & 0x7f) << 7) | (buf[i+2] & 0x7f);						i += 3;					} else {						kc = (c & 0x7f);						i++;					}					printf("keycode %3u %s", kc, (c & 0x80) ? "release" : "press");				}			}			puts("/r");		}		// restore keyboard mode		xioctl(STDIN_FILENO, KDSKBMODE, (void *)(ptrdiff_t)kbmode);	}	// restore console settings	xset1(&tio0);	return EXIT_SUCCESS;}
开发者ID:593141477,项目名称:Learning-Linux,代码行数:97,


示例29: OrangePi_VideoIn

int OrangePi_VideoIn(struct vdIn *vd,char *device,int width,int height,int fps, int format,	    int grabmethod,globals *pglobal,int id){    int currentWidth, currentHeight = 0;    struct v4l2_format currentFormat;    if(vd == NULL || device == NULL) 	return -1;    if(width == 0 || height == 0) 	return -1;    if(grabmethod < 0 || grabmethod > 1) 	grabmethod = 1;    vd->videodevice = NULL;    vd->status = NULL;    vd->pictName = NULL;    vd->videodevice = (char *)calloc(1 , 16 * sizeof(char));    vd->status = (char *)calloc(1 , 100 * sizeof(char));    vd->pictName = (char *)calloc(1 , 80 * sizeof(char));    snprintf(vd->videodevice , 12 , "%s" , device);    vd->toggleAvi = 0;    vd->getPict = 0;    vd->signalquit = 1;    vd->width = width;    vd->height = height;    vd->fps = fps;    vd->formatIn = format;    vd->grabmethod = grabmethod;    if(OrangePi_init(&OrangePi,vd) < 0) {	fprintf(stderr,"Init v4l2 failed !! exit fatal /n");	printf("ERROR %s %d/n",__func__,__LINE__);    }    // enumerating formats    currentFormat.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;    if(xioctl(vd->fd, VIDIOC_G_FMT, &currentFormat) == 0) {        currentWidth = currentFormat.fmt.pix.width;        currentHeight = currentFormat.fmt.pix.height;        DBG("Current size: %dx%d/n", currentWidth, currentHeight);    }    pglobal->in[id].in_formats = NULL;    for(pglobal->in[id].formatCount = 0; 1; pglobal->in[id].formatCount++) {        struct v4l2_fmtdesc fmtdesc;        fmtdesc.index = pglobal->in[id].formatCount;        fmtdesc.type  = V4L2_BUF_TYPE_VIDEO_CAPTURE;        if(xioctl(vd->fd, VIDIOC_ENUM_FMT, &fmtdesc) < 0) {            break;        }        if (pglobal->in[id].in_formats == NULL) {            pglobal->in[id].in_formats = (input_format*)calloc(1, sizeof(input_format));        } else {            pglobal->in[id].in_formats = (input_format*)realloc(pglobal->in[id].in_formats, (pglobal->in[id].formatCount + 1) * sizeof(input_format));        }        if (pglobal->in[id].in_formats == NULL) {            DBG("Calloc/realloc failed: %s/n", strerror(errno));            return -1;        }        memcpy(&pglobal->in[id].in_formats[pglobal->in[id].formatCount], &fmtdesc, sizeof(input_format));        if(fmtdesc.pixelformat == format)            pglobal->in[id].currentFormat = pglobal->in[id].formatCount;        DBG("Supported format: %s/n", fmtdesc.description);        struct v4l2_frmsizeenum fsenum;        fsenum.index = pglobal->in[id].formatCount;        fsenum.pixel_format = fmtdesc.pixelformat;        int j = 0;        pglobal->in[id].in_formats[pglobal->in[id].formatCount].supportedResolutions = NULL;        pglobal->in[id].in_formats[pglobal->in[id].formatCount].resolutionCount = 0;        pglobal->in[id].in_formats[pglobal->in[id].formatCount].currentResolution = -1;        while(1) {            fsenum.index = j;            j++;            if(xioctl(vd->fd, VIDIOC_ENUM_FRAMESIZES, &fsenum) == 0) {                pglobal->in[id].in_formats[pglobal->in[id].formatCount].resolutionCount++;                if (pglobal->in[id].in_formats[pglobal->in[id].formatCount].supportedResolutions == NULL) {                    pglobal->in[id].in_formats[pglobal->in[id].formatCount].supportedResolutions =                        (input_resolution*)calloc(1, sizeof(input_resolution));                } else {                    pglobal->in[id].in_formats[pglobal->in[id].formatCount].supportedResolutions =                        (input_resolution*)realloc(pglobal->in[id].in_formats[pglobal->in[id].formatCount].supportedResolutions, j * sizeof(input_resolution));                }                if (pglobal->in[id].in_formats[pglobal->in[id].formatCount].supportedResolutions == NULL) {                    DBG("Calloc/realloc failed/n");                    return -1;                }                pglobal->in[id].in_formats[pglobal->in[id].formatCount].supportedResolutions[j-1].width = fsenum.discrete.width;                pglobal->in[id].in_formats[pglobal->in[id].formatCount].supportedResolutions[j-1].height = fsenum.discrete.height;                if(format == fmtdesc.pixelformat) {                    pglobal->in[id].in_formats[pglobal->in[id].formatCount].currentResolution = (j - 1);                    DBG("/tSupported size with the current format: %dx%d/n", fsenum.discrete.width, fsenum.discrete.height);                } else {                    DBG("/tSupported size: %dx%d/n", fsenum.discrete.width, fsenum.discrete.height);                }//.........这里部分代码省略.........
开发者ID:zhaoyifan,项目名称:OrangePi2MicrosoftAzure,代码行数:101,


示例30: v4l2_stream_record

//.........这里部分代码省略.........	/* meteor_get_geometry(&_geo); */	SET_SHP_FLAGS(shpp, 0);	SET_SHP_ROWS(shpp, 480);	/* BUG don't hard code */	SET_SHP_COLS(shpp, 640); /* BUG don't hard code */	/* should get bytes per pixel from _geo... */	SET_SHP_COMPS(shpp,  DEFAULT_BYTES_PER_PIXEL);	SET_SHP_FRAMES(shpp, n_frames);	SET_SHP_SEQS(shpp, 1);	SET_SHP_PREC_PTR(shpp, PREC_FOR_CODE(PREC_UBY) );	set_shape_flags(shpp,NO_OBJ,AUTO_SHAPE);	rv_set_shape(QSP_ARG  ifp->if_name,shpp);	/* We write an entire frame to each disk in turn... */#ifdef RECORD_TIMESTAMPS	if( stamping ) init_stamps(n_frames);#endif /* RECORD_TIMESTAMPS */	record_state = RECORDING;	/* stuff from video_reader */	for(i=0;i<n_cameras;i++)		start_capturing(QSP_ARG  vd_tbl[i]);	n_so_far = 0;	which_disk=0;	bufp=next_frame(QSP_ARG  n_cameras,vd_tbl);	while( bufp != NULL ){		int n_written;		/* write the frame to disk */if( really_writing ){		if( (n_written = write(fd_arr[which_disk],vd_tbl[which_device]->vd_buf_tbl[ bufp->index ].mb_start,n_to_write))			!= n_to_write ){			sprintf(ERROR_STRING,"write (frm %ld, fd=%d)",n_so_far,ifp->if_fd);			perror(ERROR_STRING);			sprintf(ERROR_STRING,				"%ld requested, %d written",				n_to_write,n_written);			WARN(ERROR_STRING);			return;		}		which_disk = (which_disk+1) % ndisks;}		n_so_far++;		/* QBUG releases this buffer to be used again */		if( xioctl(vd_tbl[which_device]->vd_fd, VIDIOC_QBUF, bufp) < 0 )			ERRNO_WARN ("v4l2_stream_record:  error queueing frame");		if( n_so_far >= n_frames )			bufp = NULL;		else			bufp=next_frame(QSP_ARG  n_cameras,vd_tbl);	}	if( bufp != NULL ){		if( xioctl(vd_tbl[which_device]->vd_fd, VIDIOC_QBUF, bufp) < 0 )			ERRNO_WARN ("v4l2_stream_record:  error queueing frame");	}	for(i=0;i<n_cameras;i++)		stop_capturing(QSP_ARG  vd_tbl[i]);	rv_sync(SINGLE_QSP_ARG);	/* we used to disable real-time scheduling here, but	 * when video_reader executes as a separate thread there	 * is no point, because it won't affect the main process!	 */	recording_in_process = 0;	record_state=NOT_RECORDING;#ifdef RECORD_TIMESTAMPS	n_stored_times = n_so_far;#endif#ifdef CAUTIOUS	if( ifp == NO_IMAGE_FILE ){		WARN("CAUTIOUS:  v4l2_stream_record:  ifp is NULL!?");		return;	}#endif /* CAUTIOUS */	v4l2_finish_recording( QSP_ARG  ifp );	/* Because the disk writers don't use the fileio library,	 * the ifp doesn't know how many frames have been written.	 */	ifp->if_nfrms = n_frames;	/* BUG? is this really what happened? */#else // ! HAVE_RAWVOL	WARN("v4l2_stream_record:  Program not compiled with raw volume support, can't record!?");#endif // ! HAVE_RAWVOL} /* end v4l2_stream_record */
开发者ID:E-LLP,项目名称:QuIP,代码行数:101,



注:本文中的xioctl函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


C++ xisspace函数代码示例
C++ xil_printf函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。