您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ rq_data_dir函数代码示例

51自学网 2021-06-02 11:56:25
  C++
这篇教程C++ rq_data_dir函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中rq_data_dir函数的典型用法代码示例。如果您正苦于以下问题:C++ rq_data_dir函数的具体用法?C++ rq_data_dir怎么用?C++ rq_data_dir使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了rq_data_dir函数的23个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: htifblk_segment

static int htifblk_segment(struct htifblk_device *dev,	struct request *req){	static struct htifblk_request pkt __aligned(HTIF_ALIGN);	u64 offset, size, end;	unsigned long cmd;	offset = (blk_rq_pos(req) << SECTOR_SIZE_SHIFT);	size = (blk_rq_cur_sectors(req) << SECTOR_SIZE_SHIFT);	end = offset + size;	if (unlikely(end < offset || end > dev->size)) {		dev_err(&dev->dev->dev, "out-of-bounds access:"			" offset=%llu size=%llu/n", offset, size);		return -EINVAL;	}	rmb();	pkt.addr = __pa(req->buffer);	pkt.offset = offset;	pkt.size = size;	pkt.tag = dev->tag;	switch (rq_data_dir(req)) {	case READ:		cmd = HTIF_CMD_READ;		break;	case WRITE:		cmd = HTIF_CMD_WRITE;		break;	default:		return -EINVAL;	}	dev->req = req;	htif_tohost(dev->dev->index, cmd, __pa(&pkt));	return 0;}
开发者ID:arunthomas,项目名称:riscv-linux,代码行数:38,


示例2: do_ldm_req

//从请求队列上获取请求操作对象,从请求对象中获得操作参数:读写操作的起始sector和操作字节数,然后将所需的操作执行到硬件上去//本函数是由blk驱动框架来自动调用的,调用时机由电梯算法调度决定static void do_ldm_req(struct request_queue *q){	//从请求队列上获取一个请求对象	struct request *req = blk_fetch_request(q);	while (req) {		//从第几个扇区开始操作		u32 start = blk_rq_pos(req) * SECTOR_SIZE;		//获得当前请求操作的字节数		u32 len = blk_rq_cur_bytes(req);		//检查本次request操作是否越界		int err = 0;		if (start + len > DEV_SIZE) {			printk(KERN_ERR "request region is out of device capacity/n");			err = -EIO;			goto err_request;		}		//rq_data_dir获得当前请求的操作方向		//建议在memcpy前后加上打印语句,以便观察读写操作的调度时机		//数据从内核传输到应用		if (rq_data_dir(req) == READ) {			memcpy(req->buffer, (u8*)ldm.addr + start, len);			printk("read from %d, size %d/n", start, len);		} else { //数据从应用层传输到内核并写入			memcpy((u8*)ldm.addr + start, req->buffer, len);			printk("write from %d, size %d/n", start, len);		}		//__blk_end_request_cur:返回false表示当前req的所有操作都完成了,于是下面试图调用blk_fetch_request再从队列上获取新的请求,如果获取不到,则req得到NULL将退出循环;		//返回true的话说明当前req操作还没完成,继续循环执行		//err参数可以独立改变__blk_end_request_cur的返回值,err<0时,函数返回false。当发生其他错误时可以用err参数来结束当前req请求,从请求队列上获取新的请求err_request:		if (!__blk_end_request_cur(req, err)) {			req = blk_fetch_request(q);		}	}}
开发者ID:sktwj,项目名称:var,代码行数:40,


示例3: sbull_request

/** The simple form of the request function.*/static void sbull_request(struct request_queue *q){	struct request *req;	req = blk_fetch_request(q);	while (req != NULL) {		struct sbull_dev *dev = req->rq_disk->private_data;		if (! blk_fs_request(req)) {			printk (KERN_NOTICE "Skip non-fs request/n");			__blk_end_request_all(req, -EIO);			continue;		}	    //    printk (KERN_NOTICE "Req dev %d dir %ld sec %ld, nr %d f %lx/n",	    //        dev - Devices, rq_data_dir(req),	    //        req->sector, req->current_nr_sectors,	    //        req->flags);		sbull_transfer(dev, blk_rq_pos(req), blk_rq_cur_sectors(req), req->buffer, rq_data_dir(req));		/* end_request(req, 1); */		if(!__blk_end_request_cur(req, 0)) {			req = blk_fetch_request(q);		}	}}
开发者ID:zhu8920253,项目名称:linux_driver,代码行数:26,


示例4: htifbd_request

static void htifbd_request(struct request_queue *q){	struct request *req;	req = blk_fetch_request(q);	while (req != NULL) {		struct htifbd_dev *dev;		dev = req->rq_disk->private_data;		if (req->cmd_type != REQ_TYPE_FS) {			pr_notice(DRIVER_NAME ": ignoring non-fs request for %s/n",				req->rq_disk->disk_name);			__blk_end_request_all(req, -EIO);			continue;		}		htifbd_transfer(dev, blk_rq_pos(req), blk_rq_cur_sectors(req),			req->buffer, rq_data_dir(req));		if (!__blk_end_request_cur(req, 0)) {			req = blk_fetch_request(q);		}	}}
开发者ID:rishinaidu,项目名称:riscv-linux,代码行数:23,


示例5: sd_do_request

/* * Request dispatcher. */static int sd_do_request(struct sd_host *host, struct request *req){	int nr_sectors = 0;	int error;	error = sd_check_request(host, req);	if (error) {		nr_sectors = error;		goto out;	}	switch (rq_data_dir(req)) {	case WRITE:		nr_sectors = sd_write_request(host, req);		break;	case READ:		nr_sectors = sd_read_request(host, req);		break;	}out:	return nr_sectors;}
开发者ID:Linux-Wii-Mod,项目名称:linux-wii-2.6.32,代码行数:26,


示例6: flash_merged_requests

/*    This function does 3 tasks:   1 check if next expires before req, is so set expire time of req to be the expire time of next   2 delete next from async fifo queue   3 check if merged req size >= bundle_size; if so, delete req from async fifo queue, reinit and insert it to bundle queue */static voidflash_merged_requests(struct request_queue *q, struct request *req,			 struct request *next){	struct flash_data *fd = q->elevator->elevator_data;	// const int data_type = !rq_is_sync(req);	// FIXME:	const int data_type = rq_data_dir(req);	/*	 * if next expires before rq, assign its expire time to rq	 * and move into next position (next will be deleted) in fifo	 */	// TODO: why need to check if async queue is empty here?	if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {		if (time_before(rq_fifo_time(next), rq_fifo_time(req))) {			list_move(&req->queuelist, &next->queuelist);			rq_set_fifo_time(req, rq_fifo_time(next));		}	}	/* delete next */	rq_fifo_clear(next);		/* task 3 only kick into bundle queue if req is async */	if(req->__data_len >= fd->bundle_size && data_type == 1)	{		/* did both delete and init */		rq_fifo_clear(req); 		list_add_tail(&req->queuelist, &fd->bundle_list);				#ifdef DEBUG_FLASH		printk("req of type %d of size %d is inserted to bundle queue/n", data_type, req->__data_len);		#endif	}}
开发者ID:luyao-jiang,项目名称:scheduler,代码行数:43,


示例7: osprd_process_request

/* * osprd_process_request(d, req) *   Called when the user reads or writes a sector. *   Should perform the read or write, as appropriate. */static void osprd_process_request(osprd_info_t *d, struct request *req){	if (!blk_fs_request(req)) {		end_request(req, 0);		return;	}	// EXERCISE: Perform the read or write request by copying data between	// our data array and the request's buffer.	// Hint: The 'struct request' argument tells you what kind of request	// this is, and which sectors are being read or written.	// Read about 'struct request' in <linux/blkdev.h>.	// Consider the 'req->sector', 'req->current_nr_sectors', and	// 'req->buffer' members, and the rq_data_dir() function.	// Your code here.	if(req->sector+req->current_nr_sectors <= nsectors) {			switch(rq_data_dir(req)) {			case READ:				memcpy(req->buffer, d->data+req->sector*SECTOR_SIZE, req->current_nr_sectors*SECTOR_SIZE);				break;			case WRITE:				memcpy(d->data+req->sector*SECTOR_SIZE, req->buffer, req->current_nr_sectors*SECTOR_SIZE);				break;			default:				eprintk("Failed to process request.../n");				end_request(req, 0);		}	}	else {		eprintk("Sector overflow.../n");		end_request(req, 0);	}		end_request(req, 1);}
开发者ID:jzhang121391,项目名称:COMSCI111_Lab2,代码行数:42,


示例8: osprd_process_request

 //first implement this, and test cases not involved lock can passstatic void osprd_process_request(osprd_info_t *d, struct request *req){	if (!blk_fs_request(req)) {		end_request(req, 0);		return;	}	// EXERCISE: Perform the read or write request by copying data between	// our data array and the request's buffer.	// Hint: The 'struct request' argument tells you what kind of request	// this is, and which sectors are being read or written.	// Read about 'struct request' in <linux/blkdev.h>.	// Consider the 'req->sector', 'req->current_nr_sectors', and	// 'req->buffer' members, and the rq_data_dir() function.	// Your code here.	//specify the request is read or write	unsigned int requestType = rq_data_dir(req);	//compute the offset, set pointer to corret region 	//the beginning address in osprd we are going to interact with 	uint8_t *data_ptr = d->data + (req->sector) * SECTOR_SIZE;		if (requestType == READ)	{		memcpy((void *)req->buffer, (void *)data_ptr, req->current_nr_sectors * SECTOR_SIZE);	}	else if (requestType == WRITE)	{		memcpy((void *)data_ptr, (void *)req->buffer, req->current_nr_sectors * SECTOR_SIZE);	}	else	{		eprintk("Error read/wirte./n");		end_request(req, 0);		}	end_request(req, 1);  //minimum read/write is one sector}
开发者ID:AmberYu,项目名称:CS111,代码行数:38,


示例9: blk_rq_merge_ok

bool blk_rq_merge_ok(struct request *rq, struct bio *bio){	struct request_queue *q = rq->q;	if (!rq_mergeable(rq) || !bio_mergeable(bio))		return false;	if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw))		return false;	/* different data direction or already started, don't merge */	if (bio_data_dir(bio) != rq_data_dir(rq))		return false;	/* must be same device and not a special request */	if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq))		return false;	/* only merge integrity protected bio into ditto rq */	if (blk_integrity_merge_bio(rq->q, rq, bio) == false)		return false;	/* must be using the same buffer */	if (rq->cmd_flags & REQ_WRITE_SAME &&	    !blk_write_same_mergeable(rq->bio, bio))		return false;	if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) {		struct bio_vec *bprev;		bprev = &rq->biotail->bi_io_vec[bio->bi_vcnt - 1];		if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset))			return false;	}	return true;}
开发者ID:robcore,项目名称:machinex,代码行数:37,


示例10: lkl_disk_request

static void lkl_disk_request(struct request_queue *q){	struct request *req;	while ((req = elv_next_request(q)) != NULL) {		struct lkl_disk_dev *dev = req->rq_disk->private_data;		struct lkl_disk_cs cs;		if (! blk_fs_request(req)) {			printk (KERN_NOTICE "lkl_disk_request: skip non-fs request/n");			__blk_end_request(req, -EIO, req->hard_cur_sectors << 9);			continue;		}		cs.linux_cookie=req;		lkl_disk_do_rw(dev->data, req->sector, req->current_nr_sectors,			       req->buffer, rq_data_dir(req), &cs);		/*		 * Async is broken.		 */		BUG_ON (cs.sync == 0);		blk_end_request(req, cs.error ? -EIO : 0, blk_rq_bytes(req));	}}
开发者ID:luciang,项目名称:lkl-linux-2.6,代码行数:24,


示例11: pd_next_buf

static void pd_next_buf( int unit ){	long	saved_flags;	spin_lock_irqsave(&pd_lock,saved_flags);	end_request(1);	if (!pd_run) {  spin_unlock_irqrestore(&pd_lock,saved_flags);			return; 	}	/* paranoia */	if (QUEUE_EMPTY ||	    (rq_data_dir(CURRENT) != pd_cmd) ||	    (minor(CURRENT->rq_dev) != pd_dev) ||	    (CURRENT->rq_status == RQ_INACTIVE) ||	    (CURRENT->sector != pd_block)) 		printk("%s: OUCH: request list changed unexpectedly/n",			PD.name);	pd_count = CURRENT->current_nr_sectors;	pd_buf = CURRENT->buffer;	spin_unlock_irqrestore(&pd_lock,saved_flags);}
开发者ID:fgeraci,项目名称:cs518-sched,代码行数:24,


示例12: sbull_request

/* * The simple form of the request function. */static void sbull_request(struct request_queue *q){	struct request *req;	while ((req = blk_fetch_request(q)) != NULL) {		do {			struct sbull_dev *dev = req->rq_disk->private_data;			if (req->cmd_type != REQ_TYPE_FS) {				printk (KERN_NOTICE "Skip non-fs request/n");				if (!__blk_end_request_cur(req, -1))					req = NULL;				continue;			}			//    	printk (KERN_NOTICE "Req dev %d dir %ld sec %ld, nr %d f %lx/n",			//    			dev - Devices, rq_data_dir(req),			//    			req->sector, req->current_nr_sectors,			//    			req->flags);			sbull_transfer(dev, blk_rq_pos(req), blk_rq_cur_sectors(req),				       req->buffer, rq_data_dir(req));			if (!__blk_end_request_cur(req, 0))				req = NULL;		} while(req != NULL);	}}
开发者ID:largeplum,项目名称:lddsaul,代码行数:27,


示例13: do_z2_request

static void do_z2_request(struct request_queue *q){	struct request *req;	req = blk_fetch_request(q);	while (req) {		unsigned long start = blk_rq_pos(req) << 9;		unsigned long len  = blk_rq_cur_bytes(req);		int err = 0;		if (start + len > z2ram_size) {			pr_err(DEVICE_NAME ": bad access: block=%llu, "			       "count=%u/n",			       (unsigned long long)blk_rq_pos(req),			       blk_rq_cur_sectors(req));			err = -EIO;			goto done;		}		while (len) {			unsigned long addr = start & Z2RAM_CHUNKMASK;			unsigned long size = Z2RAM_CHUNKSIZE - addr;			if (len < size)				size = len;			addr += z2ram_map[ start >> Z2RAM_CHUNKSHIFT ];			if (rq_data_dir(req) == READ)				memcpy(req->buffer, (char *)addr, size);			else				memcpy((char *)addr, req->buffer, size);			start += size;			len -= size;		}	done:		if (!__blk_end_request_cur(req, err))			req = blk_fetch_request(q);	}}
开发者ID:03199618,项目名称:linux,代码行数:36,


示例14: sg_io

static int sg_io(struct request_queue *q, struct gendisk *bd_disk,		struct sg_io_hdr *hdr, fmode_t mode){	unsigned long start_time;	ssize_t ret = 0;	int writing = 0;	int at_head = 0;	struct request *rq;	char sense[SCSI_SENSE_BUFFERSIZE];	struct bio *bio;	if (hdr->interface_id != 'S')		return -EINVAL;	if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9))		return -EIO;	if (hdr->dxfer_len)		switch (hdr->dxfer_direction) {		default:			return -EINVAL;		case SG_DXFER_TO_DEV:			writing = 1;			break;		case SG_DXFER_TO_FROM_DEV:		case SG_DXFER_FROM_DEV:			break;		}	if (hdr->flags & SG_FLAG_Q_AT_HEAD)		at_head = 1;	ret = -ENOMEM;	rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);	if (IS_ERR(rq))		return PTR_ERR(rq);	blk_rq_set_block_pc(rq);	if (hdr->cmd_len > BLK_MAX_CDB) {		rq->cmd = kzalloc(hdr->cmd_len, GFP_KERNEL);		if (!rq->cmd)			goto out_put_request;	}	ret = blk_fill_sghdr_rq(q, rq, hdr, mode);	if (ret < 0)		goto out_free_cdb;	ret = 0;	if (hdr->iovec_count) {		struct iov_iter i;		struct iovec *iov = NULL;		ret = import_iovec(rq_data_dir(rq),				   hdr->dxferp, hdr->iovec_count,				   0, &iov, &i);		if (ret < 0)			goto out_free_cdb;		/* SG_IO howto says that the shorter of the two wins */		iov_iter_truncate(&i, hdr->dxfer_len);		ret = blk_rq_map_user_iov(q, rq, NULL, &i, GFP_KERNEL);		kfree(iov);	} else if (hdr->dxfer_len)		ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,				      GFP_KERNEL);	if (ret)		goto out_free_cdb;	bio = rq->bio;	memset(sense, 0, sizeof(sense));	rq->sense = sense;	rq->sense_len = 0;	rq->retries = 0;	start_time = jiffies;	/* ignore return value. All information is passed back to caller	 * (if he doesn't check that is his problem).	 * N.B. a non-zero SCSI status is _not_ necessarily an error.	 */	blk_execute_rq(q, bd_disk, rq, at_head);	hdr->duration = jiffies_to_msecs(jiffies - start_time);	ret = blk_complete_sghdr_rq(rq, hdr, bio);out_free_cdb:	if (rq->cmd != rq->__cmd)		kfree(rq->cmd);out_put_request:	blk_put_request(rq);	return ret;}
开发者ID:abhijit-mahajani-imgtec,项目名称:linux,代码行数:95,


示例15: virtio_queue_rq

static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,			   const struct blk_mq_queue_data *bd){	struct virtio_blk *vblk = hctx->queue->queuedata;	struct request *req = bd->rq;	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);	unsigned long flags;	unsigned int num;	int qid = hctx->queue_num;	int err;	bool notify = false;	BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);	vbr->req = req;	if (req->cmd_flags & REQ_FLUSH) {		vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_FLUSH);		vbr->out_hdr.sector = 0;		vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));	} else {		switch (req->cmd_type) {		case REQ_TYPE_FS:			vbr->out_hdr.type = 0;			vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, blk_rq_pos(vbr->req));			vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));			break;		case REQ_TYPE_BLOCK_PC:			vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_SCSI_CMD);			vbr->out_hdr.sector = 0;			vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));			break;		case REQ_TYPE_DRV_PRIV:			vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID);			vbr->out_hdr.sector = 0;			vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));			break;		default:			/* We don't put anything else in the queue. */			BUG();		}	}	blk_mq_start_request(req);	num = blk_rq_map_sg(hctx->queue, vbr->req, vbr->sg);	if (num) {		if (rq_data_dir(vbr->req) == WRITE)			vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);		else			vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);	}	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);	err = __virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);	if (err) {		virtqueue_kick(vblk->vqs[qid].vq);		blk_mq_stop_hw_queue(hctx);		spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);		/* Out of mem doesn't actually happen, since we fall back		 * to direct descriptors */		if (err == -ENOMEM || err == -ENOSPC)			return BLK_MQ_RQ_QUEUE_BUSY;		return BLK_MQ_RQ_QUEUE_ERROR;	}	if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))		notify = true;	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);	if (notify)		virtqueue_notify(vblk->vqs[qid].vq);	return BLK_MQ_RQ_QUEUE_OK;}
开发者ID:0-T-0,项目名称:ps4-linux,代码行数:73,


示例16: swim3_interrupt

static irqreturn_t swim3_interrupt(int irq, void *dev_id){	struct floppy_state *fs = (struct floppy_state *) dev_id;	struct swim3 __iomem *sw = fs->swim3;	int intr, err, n;	int stat, resid;	struct dbdma_regs __iomem *dr;	struct dbdma_cmd *cp;	intr = in_8(&sw->intr);	err = (intr & ERROR_INTR)? in_8(&sw->error): 0;	if ((intr & ERROR_INTR) && fs->state != do_transfer)		printk(KERN_ERR "swim3_interrupt, state=%d, dir=%x, intr=%x, err=%x/n",		       fs->state, rq_data_dir(fd_req), intr, err);	switch (fs->state) {	case locating:		if (intr & SEEN_SECTOR) {			out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);			out_8(&sw->select, RELAX);			out_8(&sw->intr_enable, 0);			del_timer(&fs->timeout);			fs->timeout_pending = 0;			if (sw->ctrack == 0xff) {				printk(KERN_ERR "swim3: seen sector but cyl=ff?/n");				fs->cur_cyl = -1;				if (fs->retries > 5) {					swim3_end_request_cur(-EIO);					fs->state = idle;					start_request(fs);				} else {					fs->state = jogging;					act(fs);				}				break;			}			fs->cur_cyl = sw->ctrack;			fs->cur_sector = sw->csect;			if (fs->expect_cyl != -1 && fs->expect_cyl != fs->cur_cyl)				printk(KERN_ERR "swim3: expected cyl %d, got %d/n",				       fs->expect_cyl, fs->cur_cyl);			fs->state = do_transfer;			act(fs);		}		break;	case seeking:	case jogging:		if (sw->nseek == 0) {			out_8(&sw->control_bic, DO_SEEK);			out_8(&sw->select, RELAX);			out_8(&sw->intr_enable, 0);			del_timer(&fs->timeout);			fs->timeout_pending = 0;			if (fs->state == seeking)				++fs->retries;			fs->state = settling;			act(fs);		}		break;	case settling:		out_8(&sw->intr_enable, 0);		del_timer(&fs->timeout);		fs->timeout_pending = 0;		act(fs);		break;	case do_transfer:		if ((intr & (ERROR_INTR | TRANSFER_DONE)) == 0)			break;		out_8(&sw->intr_enable, 0);		out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);		out_8(&sw->select, RELAX);		del_timer(&fs->timeout);		fs->timeout_pending = 0;		dr = fs->dma;		cp = fs->dma_cmd;		if (rq_data_dir(fd_req) == WRITE)			++cp;		/*		 * Check that the main data transfer has finished.		 * On writing, the swim3 sometimes doesn't use		 * up all the bytes of the postamble, so we can still		 * see DMA active here.  That doesn't matter as long		 * as all the sector data has been transferred.		 */		if ((intr & ERROR_INTR) == 0 && cp->xfer_status == 0) {			/* wait a little while for DMA to complete */			for (n = 0; n < 100; ++n) {				if (cp->xfer_status != 0)					break;				udelay(1);				barrier();			}		}		/* turn off DMA */		out_le32(&dr->control, (RUN | PAUSE) << 16);		stat = ld_le16(&cp->xfer_status);		resid = ld_le16(&cp->res_count);		if (intr & ERROR_INTR) {			n = fs->scount - 1 - resid / 512;			if (n > 0) {				blk_update_request(fd_req, 0, n << 9);//.........这里部分代码省略.........
开发者ID:ANFS,项目名称:ANFS-kernel,代码行数:101,


示例17: udelay

	/* We must wait a bit for dbdma to stop */	for (n = 0; (in_le32(&dr->status) & ACTIVE) && n < 1000; n++)		udelay(1);	out_8(&sw->intr_enable, 0);	out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);	out_8(&sw->select, RELAX);	if (rq_data_dir(fd_req) == WRITE)		++cp;	if (ld_le16(&cp->xfer_status) != 0)		s = fs->scount - ((ld_le16(&cp->res_count) + 511) >> 9);	else		s = 0;	fd_req->sector += s;	fd_req->current_nr_sectors -= s;	printk(KERN_ERR "swim3: timeout %sing sector %ld/n",	       (rq_data_dir(fd_req)==WRITE? "writ": "read"), (long)fd_req->sector);	end_request(fd_req, 0);	fs->state = idle;	start_request(fs);}static irqreturn_t swim3_interrupt(int irq, void *dev_id, struct pt_regs *regs){	struct floppy_state *fs = (struct floppy_state *) dev_id;	struct swim3 __iomem *sw = fs->swim3;	int intr, err, n;	int stat, resid;	struct dbdma_regs __iomem *dr;	struct dbdma_cmd *cp;	intr = in_8(&sw->intr);
开发者ID:BackupTheBerlios,项目名称:tew632-brp-svn,代码行数:31,


示例18: row_add_request

/* * row_add_request() - Add request to the scheduler * @q:	requests queue * @rq:	request to add * */static void row_add_request(struct request_queue *q,			    struct request *rq){	struct row_data *rd = (struct row_data *)q->elevator->elevator_data;	struct row_queue *rqueue = RQ_ROWQ(rq);	s64 diff_ms;	bool queue_was_empty = list_empty(&rqueue->fifo);	list_add_tail(&rq->queuelist, &rqueue->fifo);	rd->nr_reqs[rq_data_dir(rq)]++;	rqueue->nr_req++;	rq_set_fifo_time(rq, jiffies); /* for statistics*/	if (rq->cmd_flags & REQ_URGENT) {		WARN_ON(1);		blk_dump_rq_flags(rq, "");		rq->cmd_flags &= ~REQ_URGENT;	}	if (row_queues_def[rqueue->prio].idling_enabled) {		if (rd->rd_idle_data.idling_queue_idx == rqueue->prio &&		    hrtimer_active(&rd->rd_idle_data.hr_timer)) {			if (hrtimer_try_to_cancel(				&rd->rd_idle_data.hr_timer) >= 0) {				row_log_rowq(rd, rqueue->prio,				    "Canceled delayed work on %d",				    rd->rd_idle_data.idling_queue_idx);				rd->rd_idle_data.idling_queue_idx =					ROWQ_MAX_PRIO;			}		}		diff_ms = ktime_to_ms(ktime_sub(ktime_get(),				rqueue->idle_data.last_insert_time));		if (unlikely(diff_ms < 0)) {			pr_err("%s(): time delta error: diff_ms < 0",				__func__);			rqueue->idle_data.begin_idling = false;			return;		}		if (diff_ms < rd->rd_idle_data.freq_ms) {			rqueue->idle_data.begin_idling = true;			row_log_rowq(rd, rqueue->prio, "Enable idling");		} else {			rqueue->idle_data.begin_idling = false;			row_log_rowq(rd, rqueue->prio, "Disable idling (%ldms)",				(long)diff_ms);		}		rqueue->idle_data.last_insert_time = ktime_get();	}	if (row_queues_def[rqueue->prio].is_urgent &&	    !rd->pending_urgent_rq && !rd->urgent_in_flight) {		/* Handle High Priority queues */		if (rqueue->prio < ROWQ_REG_PRIO_IDX &&		    rd->last_served_ioprio_class != IOPRIO_CLASS_RT &&		    queue_was_empty) {			row_log_rowq(rd, rqueue->prio,				"added (high prio) urgent request");			rq->cmd_flags |= REQ_URGENT;			rd->pending_urgent_rq = rq;		} else  if (row_rowq_unserved(rd, rqueue->prio)) {			/* Handle Regular priotity queues */			row_log_rowq(rd, rqueue->prio,				"added urgent request (total on queue=%d)",				rqueue->nr_req);			rq->cmd_flags |= REQ_URGENT;			rd->pending_urgent_rq = rq;		}	} else		row_log_rowq(rd, rqueue->prio,			"added request (total on queue=%d)", rqueue->nr_req);}
开发者ID:duki994,项目名称:G900H_LP_Kernel,代码行数:78,


示例19: hd_request

/* * The driver enables interrupts as much as possible.  In order to do this, * (a) the device-interrupt is disabled before entering hd_request(), * and (b) the timeout-interrupt is disabled before the sti(). * * Interrupts are still masked (by default) whenever we are exchanging * data/cmds with a drive, because some drives seem to have very poor * tolerance for latency during I/O. The IDE driver has support to unmask * interrupts for non-broken hardware, so use that driver if required. */static void hd_request(void){	unsigned int block, nsect, sec, track, head, cyl;	struct hd_i_struct *disk;	struct request *req;	if (do_hd)		return;repeat:	del_timer(&device_timer);	local_irq_enable();	req = CURRENT;	if (!req) {		do_hd = NULL;		return;	}	if (reset) {		local_irq_disable();		reset_hd();		return;	}	disk = req->rq_disk->private_data;	block = req->sector;	nsect = req->nr_sectors;	if (block >= get_capacity(req->rq_disk) ||	    ((block+nsect) > get_capacity(req->rq_disk))) {		printk("%s: bad access: block=%d, count=%d/n",			req->rq_disk->disk_name, block, nsect);		end_request(req, 0);		goto repeat;	}	if (disk->special_op) {		if (do_special_op(disk, req))			goto repeat;		return;	}	sec   = block % disk->sect + 1;	track = block / disk->sect;	head  = track % disk->head;	cyl   = track / disk->head;#ifdef DEBUG	printk("%s: %sing: CHS=%d/%d/%d, sectors=%d, buffer=%p/n",		req->rq_disk->disk_name, (req->cmd == READ)?"read":"writ",		cyl, head, sec, nsect, req->buffer);#endif	if (blk_fs_request(req)) {		switch (rq_data_dir(req)) {		case READ:			hd_out(disk,nsect,sec,head,cyl,WIN_READ,&read_intr);			if (reset)				goto repeat;			break;		case WRITE:			hd_out(disk,nsect,sec,head,cyl,WIN_WRITE,&write_intr);			if (reset)				goto repeat;			if (wait_DRQ()) {				bad_rw_intr();				goto repeat;			}			outsw(HD_DATA,req->buffer,256);			break;		default:			printk("unknown hd-command/n");			end_request(req, 0);			break;		}	}}
开发者ID:3sOx,项目名称:asuswrt-merlin,代码行数:82,


示例20: cyasblkdev_blk_issue_rq

/* issue astoria blkdev request (issue_fn) */static int cyasblkdev_blk_issue_rq(					struct cyasblkdev_queue *bq,					struct request *req					){	struct cyasblkdev_blk_data *bd = bq->data;	int index = 0;	int ret = CY_AS_ERROR_SUCCESS;	uint32_t req_sector = 0;	uint32_t req_nr_sectors = 0;	int bus_num = 0;	int lcl_unit_no = 0;	DBGPRN_FUNC_NAME;	/*	 * will construct a scatterlist for the given request;	 * the return value is the number of actually used	 * entries in the resulting list. Then, this scatterlist	 * can be used for the actual DMA prep operation.	 */	spin_lock_irq(&bd->lock);	index = blk_rq_map_sg(bq->queue, req, bd->sg);	if (req->rq_disk == bd->user_disk_0) {		bus_num = bd->user_disk_0_bus_num;		req_sector = blk_rq_pos(req) + gl_bd->user_disk_0_first_sector;		req_nr_sectors = blk_rq_sectors(req);		lcl_unit_no = gl_bd->user_disk_0_unit_no;		#ifndef WESTBRIDGE_NDEBUG		cy_as_hal_print_message("%s: request made to disk 0 "			"for sector=%d, num_sectors=%d, unit_no=%d/n",			__func__, req_sector, (int) blk_rq_sectors(req),			lcl_unit_no);		#endif	} else if (req->rq_disk == bd->user_disk_1) {		bus_num = bd->user_disk_1_bus_num;		req_sector = blk_rq_pos(req) + gl_bd->user_disk_1_first_sector;		/*SECT_NUM_TRANSLATE(blk_rq_sectors(req));*/		req_nr_sectors = blk_rq_sectors(req);		lcl_unit_no = gl_bd->user_disk_1_unit_no;		#ifndef WESTBRIDGE_NDEBUG		cy_as_hal_print_message("%s: request made to disk 1 for "			"sector=%d, num_sectors=%d, unit_no=%d/n", __func__,			req_sector, (int) blk_rq_sectors(req), lcl_unit_no);		#endif	} else if (req->rq_disk == bd->system_disk) {		bus_num = bd->system_disk_bus_num;		req_sector = blk_rq_pos(req) + gl_bd->system_disk_first_sector;		req_nr_sectors = blk_rq_sectors(req);		lcl_unit_no = gl_bd->system_disk_unit_no;		#ifndef WESTBRIDGE_NDEBUG		cy_as_hal_print_message("%s: request made to system disk "			"for sector=%d, num_sectors=%d, unit_no=%d/n", __func__,			req_sector, (int) blk_rq_sectors(req), lcl_unit_no);		#endif	}	#ifndef WESTBRIDGE_NDEBUG	else {		cy_as_hal_print_message(			"%s: invalid disk used for request/n", __func__);	}	#endif	spin_unlock_irq(&bd->lock);	if (rq_data_dir(req) == READ) {		#ifndef WESTBRIDGE_NDEBUG		cy_as_hal_print_message("%s: calling readasync() "			"req_sector=0x%x, req_nr_sectors=0x%x, bd->sg:%x/n/n",			__func__, req_sector, req_nr_sectors, (uint32_t)bd->sg);		#endif		ret = cy_as_storage_read_async(bd->dev_handle, bus_num, 0,			lcl_unit_no, req_sector, bd->sg, req_nr_sectors,			(cy_as_storage_callback)cyasblkdev_issuecallback);		if (ret != CY_AS_ERROR_SUCCESS) {			#ifndef WESTBRIDGE_NDEBUG			cy_as_hal_print_message("%s:readasync() error %d at "				"address %ld, unit no %d/n", __func__, ret,				blk_rq_pos(req), lcl_unit_no);			cy_as_hal_print_message("%s:ending i/o request "				"on reg:%x/n", __func__, (uint32_t)req);			#endif			while (blk_end_request(req,				(ret == CY_AS_ERROR_SUCCESS),				req_nr_sectors*512))				;			bq->req = NULL;		}	} else {		ret = cy_as_storage_write_async(bd->dev_handle, bus_num, 0,			lcl_unit_no, req_sector, bd->sg, req_nr_sectors,//.........这里部分代码省略.........
开发者ID:119-org,项目名称:hi3518-osdrv,代码行数:101,


示例21: mmc_blk_issue_rq

static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req){	struct mmc_blk_data *md = mq->data;	struct mmc_card *card = md->queue.card;	struct mmc_blk_request brq;	int ret = 1;	if (mmc_card_claim_host(card))		goto flush_queue;	do {		struct mmc_command cmd;		u32 readcmd, writecmd;		memset(&brq, 0, sizeof(struct mmc_blk_request));		brq.mrq.cmd = &brq.cmd;		brq.mrq.data = &brq.data;		brq.cmd.arg = req->sector;		if (!mmc_card_blockaddr(card))			brq.cmd.arg <<= 9;		brq.cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;		brq.data.blksz = 1 << md->block_bits;		brq.stop.opcode = MMC_STOP_TRANSMISSION;		brq.stop.arg = 0;		brq.stop.flags = MMC_RSP_R1B | MMC_CMD_AC;		brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);		if (brq.data.blocks > card->host->max_blk_count)			brq.data.blocks = card->host->max_blk_count;		mmc_set_data_timeout(&brq.data, card, rq_data_dir(req) != READ);#ifdef CONFIG_MMC_SUPPORT_MOVINAND		if (mmc_card_movinand(card)) {			if ((brq.data.blocks > 1) || (rq_data_dir(req) == WRITE)) {				cmd.opcode = MMC_SET_BLOCK_COUNT;				cmd.arg = req->nr_sectors;				cmd.flags = MMC_RSP_R1;				ret = mmc_wait_for_cmd(card->host, &cmd, 2);			}			if (rq_data_dir(req) == READ) {				if (brq.data.blocks > 1) {					brq.cmd.opcode = MMC_READ_MULTIPLE_BLOCK;					brq.data.flags |= (MMC_DATA_READ | MMC_DATA_MULTI);//					brq.mrq.stop = &brq.stop;				} else {					brq.cmd.opcode = MMC_READ_SINGLE_BLOCK;					brq.data.flags |= MMC_DATA_READ;					brq.mrq.stop = NULL;				}			} else {				brq.cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;				brq.data.flags |= MMC_DATA_WRITE | MMC_DATA_MULTI;//				brq.mrq.stop = &brq.stop;			}		} else {#endif		/*		 * If the host doesn't support multiple block writes, force		 * block writes to single block. SD cards are excepted from		 * this rule as they support querying the number of		 * successfully written sectors.		 */		if (rq_data_dir(req) != READ &&		    !(card->host->caps & MMC_CAP_MULTIWRITE) &&		    !mmc_card_sd(card))			brq.data.blocks = 1;		if (brq.data.blocks > 1) {			brq.data.flags |= MMC_DATA_MULTI;			brq.mrq.stop = &brq.stop;			readcmd = MMC_READ_MULTIPLE_BLOCK;			writecmd = MMC_WRITE_MULTIPLE_BLOCK;		} else {			brq.mrq.stop = NULL;			readcmd = MMC_READ_SINGLE_BLOCK;			writecmd = MMC_WRITE_BLOCK;		}		if (rq_data_dir(req) == READ) {			brq.cmd.opcode = readcmd;			brq.data.flags |= MMC_DATA_READ;		} else {			brq.cmd.opcode = writecmd;			brq.data.flags |= MMC_DATA_WRITE;		}#ifdef CONFIG_MMC_SUPPORT_MOVINAND		}#endif		brq.data.sg = mq->sg;		brq.data.sg_len = blk_rq_map_sg(req->q, req, brq.data.sg);		mmc_wait_for_req(card->host, &brq.mrq);		if (brq.cmd.error) {			printk(KERN_ERR "%s: error %d sending read/write command/n",			       req->rq_disk->disk_name, brq.cmd.error);			goto cmd_err;		}//.........这里部分代码省略.........
开发者ID:maliyu,项目名称:SOM2416,代码行数:101,


示例22: ace_fsm_dostate

//.........这里部分代码省略.........				ace->data_result);		} else {			ace->media_change = 0;			/* Record disk parameters */			set_capacity(ace->gd,				ata_id_u32(ace->cf_id, ATA_ID_LBA_CAPACITY));			dev_info(ace->dev, "capacity: %i sectors/n",				ata_id_u32(ace->cf_id, ATA_ID_LBA_CAPACITY));		}		/* We're done, drop to IDLE state and notify waiters */		ace->fsm_state = ACE_FSM_STATE_IDLE;		ace->id_result = ace->data_result;		while (ace->id_req_count) {			complete(&ace->id_completion);			ace->id_req_count--;		}		break;	case ACE_FSM_STATE_REQ_PREPARE:		req = ace_get_next_request(ace->queue);		if (!req) {			ace->fsm_state = ACE_FSM_STATE_IDLE;			break;		}		blk_start_request(req);		/* Okay, it's a data request, set it up for transfer */		dev_dbg(ace->dev,			"request: sec=%llx hcnt=%x, ccnt=%x, dir=%i/n",			(unsigned long long)blk_rq_pos(req),			blk_rq_sectors(req), blk_rq_cur_sectors(req),			rq_data_dir(req));		ace->req = req;		ace->data_ptr = req->buffer;		ace->data_count = blk_rq_cur_sectors(req) * ACE_BUF_PER_SECTOR;		ace_out32(ace, ACE_MPULBA, blk_rq_pos(req) & 0x0FFFFFFF);		count = blk_rq_sectors(req);		if (rq_data_dir(req)) {			/* Kick off write request */			dev_dbg(ace->dev, "write data/n");			ace->fsm_task = ACE_TASK_WRITE;			ace_out(ace, ACE_SECCNTCMD,				count | ACE_SECCNTCMD_WRITE_DATA);		} else {			/* Kick off read request */			dev_dbg(ace->dev, "read data/n");			ace->fsm_task = ACE_TASK_READ;			ace_out(ace, ACE_SECCNTCMD,				count | ACE_SECCNTCMD_READ_DATA);		}		/* As per datasheet, put config controller in reset */		val = ace_in(ace, ACE_CTRL);		ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET);		/* Move to the transfer state.  The systemace will raise		 * an interrupt once there is something to do		 */		ace->fsm_state = ACE_FSM_STATE_REQ_TRANSFER;		if (ace->fsm_task == ACE_TASK_READ)			ace_fsm_yieldirq(ace);	/* wait for data ready */		break;
开发者ID:Aaroneke,项目名称:galaxy-2636,代码行数:67,


示例23: virtio_queue_rq

static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,			   const struct blk_mq_queue_data *bd){	struct virtio_blk *vblk = hctx->queue->queuedata;	struct request *req = bd->rq;	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);	unsigned long flags;	unsigned int num;	int qid = hctx->queue_num;	int err;	bool notify = false;	u32 type;	BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);	switch (req_op(req)) {	case REQ_OP_READ:	case REQ_OP_WRITE:		type = 0;		break;	case REQ_OP_FLUSH:		type = VIRTIO_BLK_T_FLUSH;		break;	case REQ_OP_SCSI_IN:	case REQ_OP_SCSI_OUT:		type = VIRTIO_BLK_T_SCSI_CMD;		break;	case REQ_OP_DRV_IN:		type = VIRTIO_BLK_T_GET_ID;		break;	default:		WARN_ON_ONCE(1);		return BLK_MQ_RQ_QUEUE_ERROR;	}	vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);	vbr->out_hdr.sector = type ?		0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req));	vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));	blk_mq_start_request(req);	num = blk_rq_map_sg(hctx->queue, req, vbr->sg);	if (num) {		if (rq_data_dir(req) == WRITE)			vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);		else			vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);	}	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);	if (req_op(req) == REQ_OP_SCSI_IN || req_op(req) == REQ_OP_SCSI_OUT)		err = virtblk_add_req_scsi(vblk->vqs[qid].vq, vbr, vbr->sg, num);	else		err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);	if (err) {		virtqueue_kick(vblk->vqs[qid].vq);		blk_mq_stop_hw_queue(hctx);		spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);		/* Out of mem doesn't actually happen, since we fall back		 * to direct descriptors */		if (err == -ENOMEM || err == -ENOSPC)			return BLK_MQ_RQ_QUEUE_BUSY;		return BLK_MQ_RQ_QUEUE_ERROR;	}	if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))		notify = true;	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);	if (notify)		virtqueue_notify(vblk->vqs[qid].vq);	return BLK_MQ_RQ_QUEUE_OK;}
开发者ID:BWhitten,项目名称:linux-stable,代码行数:74,



注:本文中的rq_data_dir函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


C++ rq_entry_fifo函数代码示例
C++ rq函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。