您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ svc_rdma_put_context函数代码示例

51自学网 2021-06-03 08:33:59
  C++
这篇教程C++ svc_rdma_put_context函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中svc_rdma_put_context函数的典型用法代码示例。如果您正苦于以下问题:C++ svc_rdma_put_context函数的具体用法?C++ svc_rdma_put_context怎么用?C++ svc_rdma_put_context使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了svc_rdma_put_context函数的23个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: svc_rdma_post_recv

static intsvc_rdma_post_recv(struct svcxprt_rdma *xprt){	struct ib_recv_wr recv_wr, *bad_recv_wr;	struct svc_rdma_op_ctxt *ctxt;	struct page *page;	dma_addr_t pa;	int sge_no;	int buflen;	int ret;	ctxt = svc_rdma_get_context(xprt);	buflen = 0;	ctxt->direction = DMA_FROM_DEVICE;	ctxt->cqe.done = svc_rdma_wc_receive;	for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) {		if (sge_no >= xprt->sc_max_sge) {			pr_err("svcrdma: Too many sges (%d)/n", sge_no);			goto err_put_ctxt;		}		page = alloc_page(GFP_KERNEL);		if (!page)			goto err_put_ctxt;		ctxt->pages[sge_no] = page;		pa = ib_dma_map_page(xprt->sc_cm_id->device,				     page, 0, PAGE_SIZE,				     DMA_FROM_DEVICE);		if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa))			goto err_put_ctxt;		svc_rdma_count_mappings(xprt, ctxt);		ctxt->sge[sge_no].addr = pa;		ctxt->sge[sge_no].length = PAGE_SIZE;		ctxt->sge[sge_no].lkey = xprt->sc_pd->local_dma_lkey;		ctxt->count = sge_no + 1;		buflen += PAGE_SIZE;	}	recv_wr.next = NULL;	recv_wr.sg_list = &ctxt->sge[0];	recv_wr.num_sge = ctxt->count;	recv_wr.wr_cqe = &ctxt->cqe;	svc_xprt_get(&xprt->sc_xprt);	ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr);	if (ret) {		svc_rdma_unmap_dma(ctxt);		svc_rdma_put_context(ctxt, 1);		svc_xprt_put(&xprt->sc_xprt);	}	return ret; err_put_ctxt:	svc_rdma_unmap_dma(ctxt);	svc_rdma_put_context(ctxt, 1);	return -ENOMEM;}
开发者ID:the-snowwhite,项目名称:linux-socfpga,代码行数:55,


示例2: svc_rdma_post_recv

int svc_rdma_post_recv(struct svcxprt_rdma *xprt){	struct ib_recv_wr recv_wr, *bad_recv_wr;	struct svc_rdma_op_ctxt *ctxt;	struct page *page;	dma_addr_t pa;	int sge_no;	int buflen;	int ret;	ctxt = svc_rdma_get_context(xprt);	buflen = 0;	ctxt->direction = DMA_FROM_DEVICE;	for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) {		BUG_ON(sge_no >= xprt->sc_max_sge);		page = svc_rdma_get_page();		ctxt->pages[sge_no] = page;		pa = ib_dma_map_page(xprt->sc_cm_id->device,				     page, 0, PAGE_SIZE,				     DMA_FROM_DEVICE);		if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa))			goto err_put_ctxt;		atomic_inc(&xprt->sc_dma_used);		ctxt->sge[sge_no].addr = pa;		ctxt->sge[sge_no].length = PAGE_SIZE;		ctxt->sge[sge_no].lkey = xprt->sc_dma_lkey;		ctxt->count = sge_no + 1;		buflen += PAGE_SIZE;	}	recv_wr.next = NULL;	recv_wr.sg_list = &ctxt->sge[0];	recv_wr.num_sge = ctxt->count;	recv_wr.wr_id = (u64)(unsigned long)ctxt;	svc_xprt_get(&xprt->sc_xprt);	ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr);	if (ret) {		svc_rdma_unmap_dma(ctxt);		svc_rdma_put_context(ctxt, 1);		svc_xprt_put(&xprt->sc_xprt);	}	return ret; err_put_ctxt:	svc_rdma_unmap_dma(ctxt);	svc_rdma_put_context(ctxt, 1);	return -ENOMEM;}
开发者ID:1yankeedt,项目名称:D710BST_FL24_Kernel,代码行数:48,


示例3: svc_rdma_wc_read

/** * svc_rdma_wc_read - Invoked by RDMA provider for each polled Read WC * @cq:        completion queue * @wc:        completed WR * */void svc_rdma_wc_read(struct ib_cq *cq, struct ib_wc *wc){	struct svcxprt_rdma *xprt = cq->cq_context;	struct ib_cqe *cqe = wc->wr_cqe;	struct svc_rdma_op_ctxt *ctxt;	svc_rdma_send_wc_common(xprt, wc, "read");	ctxt = container_of(cqe, struct svc_rdma_op_ctxt, cqe);	svc_rdma_unmap_dma(ctxt);	svc_rdma_put_frmr(xprt, ctxt->frmr);	if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) {		struct svc_rdma_op_ctxt *read_hdr;		read_hdr = ctxt->read_hdr;		spin_lock(&xprt->sc_rq_dto_lock);		list_add_tail(&read_hdr->dto_q,			      &xprt->sc_read_complete_q);		spin_unlock(&xprt->sc_rq_dto_lock);		set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);		svc_xprt_enqueue(&xprt->sc_xprt);	}	svc_rdma_put_context(ctxt, 0);	svc_xprt_put(&xprt->sc_xprt);}
开发者ID:acton393,项目名称:linux,代码行数:34,


示例4: rdma_read_complete

static int rdma_read_complete(struct svc_rqst *rqstp,			      struct svc_rdma_op_ctxt *head){	int page_no;	int ret;	/* Copy RPC pages */	for (page_no = 0; page_no < head->count; page_no++) {		put_page(rqstp->rq_pages[page_no]);		rqstp->rq_pages[page_no] = head->pages[page_no];	}	/* Adjustments made for RDMA_NOMSG type requests */	if (head->position == 0) {		if (head->arg.len <= head->sge[0].length) {			head->arg.head[0].iov_len = head->arg.len -							head->byte_len;			head->arg.page_len = 0;		} else {			head->arg.head[0].iov_len = head->sge[0].length -								head->byte_len;			head->arg.page_len = head->arg.len -						head->sge[0].length;		}	}	/* Point rq_arg.pages past header */	rqstp->rq_arg.pages = &rqstp->rq_pages[head->hdr_count];	rqstp->rq_arg.page_len = head->arg.page_len;	rqstp->rq_arg.page_base = head->arg.page_base;	/* rq_respages starts after the last arg page */	rqstp->rq_respages = &rqstp->rq_arg.pages[page_no];	rqstp->rq_next_page = rqstp->rq_respages + 1;	/* Rebuild rq_arg head and tail. */	rqstp->rq_arg.head[0] = head->arg.head[0];	rqstp->rq_arg.tail[0] = head->arg.tail[0];	rqstp->rq_arg.len = head->arg.len;	rqstp->rq_arg.buflen = head->arg.buflen;	/* Free the context */	svc_rdma_put_context(head, 0);	/* XXX: What should this be? */	rqstp->rq_prot = IPPROTO_MAX;	svc_xprt_copy_addrs(rqstp, rqstp->rq_xprt);	ret = rqstp->rq_arg.head[0].iov_len		+ rqstp->rq_arg.page_len		+ rqstp->rq_arg.tail[0].iov_len;	dprintk("svcrdma: deferred read ret=%d, rq_arg.len=%u, "		"rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len=%zu/n",		ret, rqstp->rq_arg.len,	rqstp->rq_arg.head[0].iov_base,		rqstp->rq_arg.head[0].iov_len);	return ret;}
开发者ID:hw-claudio,项目名称:linux,代码行数:58,


示例5: svc_rdma_bc_sendto

/* Send a backwards direction RPC call. * * Caller holds the connection's mutex and has already marshaled * the RPC/RDMA request. * * This is similar to svc_rdma_reply, but takes an rpc_rqst * instead, does not support chunks, and avoids blocking memory * allocation. * * XXX: There is still an opportunity to block in svc_rdma_send() * if there are no SQ entries to post the Send. This may occur if * the adapter has a small maximum SQ depth. */static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma,			      struct rpc_rqst *rqst){	struct xdr_buf *sndbuf = &rqst->rq_snd_buf;	struct svc_rdma_op_ctxt *ctxt;	struct svc_rdma_req_map *vec;	struct ib_send_wr send_wr;	int ret;	vec = svc_rdma_get_req_map(rdma);	ret = svc_rdma_map_xdr(rdma, sndbuf, vec, false);	if (ret)		goto out_err;	ret = svc_rdma_repost_recv(rdma, GFP_NOIO);	if (ret)		goto out_err;	ctxt = svc_rdma_get_context(rdma);	ctxt->pages[0] = virt_to_page(rqst->rq_buffer);	ctxt->count = 1;	ctxt->direction = DMA_TO_DEVICE;	ctxt->sge[0].lkey = rdma->sc_pd->local_dma_lkey;	ctxt->sge[0].length = sndbuf->len;	ctxt->sge[0].addr =	    ib_dma_map_page(rdma->sc_cm_id->device, ctxt->pages[0], 0,			    sndbuf->len, DMA_TO_DEVICE);	if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr)) {		ret = -EIO;		goto out_unmap;	}	svc_rdma_count_mappings(rdma, ctxt);	memset(&send_wr, 0, sizeof(send_wr));	ctxt->cqe.done = svc_rdma_wc_send;	send_wr.wr_cqe = &ctxt->cqe;	send_wr.sg_list = ctxt->sge;	send_wr.num_sge = 1;	send_wr.opcode = IB_WR_SEND;	send_wr.send_flags = IB_SEND_SIGNALED;	ret = svc_rdma_send(rdma, &send_wr);	if (ret) {		ret = -EIO;		goto out_unmap;	}out_err:	svc_rdma_put_req_map(rdma, vec);	dprintk("svcrdma: %s returns %d/n", __func__, ret);	return ret;out_unmap:	svc_rdma_unmap_dma(ctxt);	svc_rdma_put_context(ctxt, 1);	goto out_err;}
开发者ID:BWhitten,项目名称:linux-stable,代码行数:71,


示例6: process_context

/* * Process a completion context */static void process_context(struct svcxprt_rdma *xprt,			    struct svc_rdma_op_ctxt *ctxt){	svc_rdma_unmap_dma(ctxt);	switch (ctxt->wr_op) {	case IB_WR_SEND:		if (ctxt->frmr)			pr_err("svcrdma: SEND: ctxt->frmr != NULL/n");		svc_rdma_put_context(ctxt, 1);		break;	case IB_WR_RDMA_WRITE:		if (ctxt->frmr)			pr_err("svcrdma: WRITE: ctxt->frmr != NULL/n");		svc_rdma_put_context(ctxt, 0);		break;	case IB_WR_RDMA_READ:	case IB_WR_RDMA_READ_WITH_INV:		svc_rdma_put_frmr(xprt, ctxt->frmr);		if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) {			struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr;			if (read_hdr) {				spin_lock_bh(&xprt->sc_rq_dto_lock);				set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);				list_add_tail(&read_hdr->dto_q,					      &xprt->sc_read_complete_q);				spin_unlock_bh(&xprt->sc_rq_dto_lock);			} else {				pr_err("svcrdma: ctxt->read_hdr == NULL/n");			}			svc_xprt_enqueue(&xprt->sc_xprt);		}		svc_rdma_put_context(ctxt, 0);		break;	default:		printk(KERN_ERR "svcrdma: unexpected completion type, "		       "opcode=%d/n",		       ctxt->wr_op);		break;	}}
开发者ID:ammubhave,项目名称:bargud,代码行数:47,


示例7: process_context

/* * Process a completion context */static void process_context(struct svcxprt_rdma *xprt,			    struct svc_rdma_op_ctxt *ctxt){	struct svc_rdma_op_ctxt *read_hdr;	int free_pages = 0;	svc_rdma_unmap_dma(ctxt);	switch (ctxt->wr_op) {	case IB_WR_SEND:		free_pages = 1;		break;	case IB_WR_RDMA_WRITE:		break;	case IB_WR_RDMA_READ:	case IB_WR_RDMA_READ_WITH_INV:		svc_rdma_put_frmr(xprt, ctxt->frmr);		if (!test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags))			break;		read_hdr = ctxt->read_hdr;		svc_rdma_put_context(ctxt, 0);		spin_lock_bh(&xprt->sc_rq_dto_lock);		set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);		list_add_tail(&read_hdr->dto_q,			      &xprt->sc_read_complete_q);		spin_unlock_bh(&xprt->sc_rq_dto_lock);		svc_xprt_enqueue(&xprt->sc_xprt);		return;	default:		dprintk("svcrdma: unexpected completion opcode=%d/n",			ctxt->wr_op);		break;	}	svc_rdma_put_context(ctxt, free_pages);}
开发者ID:andy-shev,项目名称:linux,代码行数:45,


示例8: svc_rdma_wc_write

/** * svc_rdma_wc_write - Invoked by RDMA provider for each polled Write WC * @cq:        completion queue * @wc:        completed WR * */void svc_rdma_wc_write(struct ib_cq *cq, struct ib_wc *wc){	struct ib_cqe *cqe = wc->wr_cqe;	struct svc_rdma_op_ctxt *ctxt;	svc_rdma_send_wc_common_put(cq, wc, "write");	ctxt = container_of(cqe, struct svc_rdma_op_ctxt, cqe);	svc_rdma_unmap_dma(ctxt);	svc_rdma_put_context(ctxt, 0);}
开发者ID:acton393,项目名称:linux,代码行数:17,


示例9: rdma_read_complete

static int rdma_read_complete(struct svc_rqst *rqstp,			      struct svc_rdma_op_ctxt *head){	int page_no;	int ret;	BUG_ON(!head);	/* Copy RPC pages */	for (page_no = 0; page_no < head->count; page_no++) {		put_page(rqstp->rq_pages[page_no]);		rqstp->rq_pages[page_no] = head->pages[page_no];	}	/* Point rq_arg.pages past header */	rdma_fix_xdr_pad(&head->arg);	rqstp->rq_arg.pages = &rqstp->rq_pages[head->hdr_count];	rqstp->rq_arg.page_len = head->arg.page_len;	rqstp->rq_arg.page_base = head->arg.page_base;	/* rq_respages starts after the last arg page */	rqstp->rq_respages = &rqstp->rq_arg.pages[page_no];#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))	rqstp->rq_next_page = rqstp->rq_respages + 1;#else	rqstp->rq_resused = 0;#endif	/* Rebuild rq_arg head and tail. */	rqstp->rq_arg.head[0] = head->arg.head[0];	rqstp->rq_arg.tail[0] = head->arg.tail[0];	rqstp->rq_arg.len = head->arg.len;	rqstp->rq_arg.buflen = head->arg.buflen;	/* Free the context */	svc_rdma_put_context(head, 0);	/* XXX: What should this be? */	rqstp->rq_prot = IPPROTO_MAX;	svc_xprt_copy_addrs(rqstp, rqstp->rq_xprt);	ret = rqstp->rq_arg.head[0].iov_len		+ rqstp->rq_arg.page_len		+ rqstp->rq_arg.tail[0].iov_len;	dprintk("svcrdma: deferred read ret=%d, rq_arg.len =%d, "		"rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len = %zd/n",		ret, rqstp->rq_arg.len,	rqstp->rq_arg.head[0].iov_base,		rqstp->rq_arg.head[0].iov_len);	return ret;}
开发者ID:antiguru,项目名称:ofed-compat-rdma,代码行数:50,


示例10: rdma_read_complete

static int rdma_read_complete(struct svc_rqst *rqstp,			      struct svc_rdma_op_ctxt *head){	int page_no;	int ret;	BUG_ON(!head);	/*                */	for (page_no = 0; page_no < head->count; page_no++) {		put_page(rqstp->rq_pages[page_no]);		rqstp->rq_pages[page_no] = head->pages[page_no];	}	/*                                */	rqstp->rq_arg.pages = &rqstp->rq_pages[head->hdr_count];	rqstp->rq_arg.page_len = head->arg.page_len;	rqstp->rq_arg.page_base = head->arg.page_base;	/*                                            */	rqstp->rq_respages = &rqstp->rq_arg.pages[page_no];	rqstp->rq_resused = 0;	/*                               */	rqstp->rq_arg.head[0] = head->arg.head[0];	rqstp->rq_arg.tail[0] = head->arg.tail[0];	rqstp->rq_arg.len = head->arg.len;	rqstp->rq_arg.buflen = head->arg.buflen;	/*                  */	svc_rdma_put_context(head, 0);	/*                           */	rqstp->rq_prot = IPPROTO_MAX;	svc_xprt_copy_addrs(rqstp, rqstp->rq_xprt);	ret = rqstp->rq_arg.head[0].iov_len		+ rqstp->rq_arg.page_len		+ rqstp->rq_arg.tail[0].iov_len;	dprintk("svcrdma: deferred read ret=%d, rq_arg.len =%d, "		"rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len = %zd/n",		ret, rqstp->rq_arg.len,	rqstp->rq_arg.head[0].iov_base,		rqstp->rq_arg.head[0].iov_len);	return ret;}
开发者ID:romanbb,项目名称:android_kernel_lge_d851,代码行数:45,


示例11: rq_cq_reap

/* * rq_cq_reap - Process the RQ CQ. * * Take all completing WC off the CQE and enqueue the associated DTO * context on the dto_q for the transport. * * Note that caller must hold a transport reference. */static void rq_cq_reap(struct svcxprt_rdma *xprt){	int ret;	struct ib_wc wc;	struct svc_rdma_op_ctxt *ctxt = NULL;	if (!test_and_clear_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags))		return;	ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);	atomic_inc(&rdma_stat_rq_poll);	while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {		ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;		ctxt->wc_status = wc.status;		ctxt->byte_len = wc.byte_len;		svc_rdma_unmap_dma(ctxt);		if (wc.status != IB_WC_SUCCESS) {			/* Close the transport */			dprintk("svcrdma: transport closing putting ctxt %p/n", ctxt);			set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);			svc_rdma_put_context(ctxt, 1);			svc_xprt_put(&xprt->sc_xprt);			continue;		}		spin_lock_bh(&xprt->sc_rq_dto_lock);		list_add_tail(&ctxt->dto_q, &xprt->sc_rq_dto_q);		spin_unlock_bh(&xprt->sc_rq_dto_lock);		svc_xprt_put(&xprt->sc_xprt);	}	if (ctxt)		atomic_inc(&rdma_stat_rq_prod);	set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);	/*	 * If data arrived before established event,	 * don't enqueue. This defers RPC I/O until the	 * RDMA connection is complete.	 */	if (!test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags))		svc_xprt_enqueue(&xprt->sc_xprt);}
开发者ID:LouZiffer,项目名称:m900_kernel_cupcake-SDX,代码行数:51,


示例12: svc_rdma_wc_receive

/** * svc_rdma_wc_receive - Invoked by RDMA provider for each polled Receive WC * @cq:        completion queue * @wc:        completed WR * */static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc){	struct svcxprt_rdma *xprt = cq->cq_context;	struct ib_cqe *cqe = wc->wr_cqe;	struct svc_rdma_op_ctxt *ctxt;	/* WARNING: Only wc->wr_cqe and wc->status are reliable */	ctxt = container_of(cqe, struct svc_rdma_op_ctxt, cqe);	svc_rdma_unmap_dma(ctxt);	if (wc->status != IB_WC_SUCCESS)		goto flushed;	/* All wc fields are now known to be valid */	ctxt->byte_len = wc->byte_len;	spin_lock(&xprt->sc_rq_dto_lock);	list_add_tail(&ctxt->list, &xprt->sc_rq_dto_q);	spin_unlock(&xprt->sc_rq_dto_lock);	svc_rdma_post_recv(xprt);	set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);	if (test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags))		goto out;	goto out_enqueue;flushed:	if (wc->status != IB_WC_WR_FLUSH_ERR)		pr_err("svcrdma: Recv: %s (%u/0x%x)/n",		       ib_wc_status_msg(wc->status),		       wc->status, wc->vendor_err);	set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);	svc_rdma_put_context(ctxt, 1);out_enqueue:	svc_xprt_enqueue(&xprt->sc_xprt);out:	svc_xprt_put(&xprt->sc_xprt);}
开发者ID:the-snowwhite,项目名称:linux-socfpga,代码行数:45,


示例13: svc_rdma_wc_send

/** * svc_rdma_wc_send - Invoked by RDMA provider for each polled Send WC * @cq:        completion queue * @wc:        completed WR * */void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc){	struct svcxprt_rdma *xprt = cq->cq_context;	struct ib_cqe *cqe = wc->wr_cqe;	struct svc_rdma_op_ctxt *ctxt;	atomic_inc(&xprt->sc_sq_avail);	wake_up(&xprt->sc_send_wait);	ctxt = container_of(cqe, struct svc_rdma_op_ctxt, cqe);	svc_rdma_unmap_dma(ctxt);	svc_rdma_put_context(ctxt, 1);	if (unlikely(wc->status != IB_WC_SUCCESS)) {		set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);		svc_xprt_enqueue(&xprt->sc_xprt);		if (wc->status != IB_WC_WR_FLUSH_ERR)			pr_err("svcrdma: Send: %s (%u/0x%x)/n",			       ib_wc_status_msg(wc->status),			       wc->status, wc->vendor_err);	}	svc_xprt_put(&xprt->sc_xprt);}
开发者ID:the-snowwhite,项目名称:linux-socfpga,代码行数:30,


示例14: rdma_read_chunk_lcl

/* Issue an RDMA_READ using the local lkey to map the data sink */static int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt,			       struct svc_rqst *rqstp,			       struct svc_rdma_op_ctxt *head,			       int *page_no,			       u32 *page_offset,			       u32 rs_handle,			       u32 rs_length,			       u64 rs_offset,			       int last){	struct ib_send_wr read_wr;	int pages_needed = PAGE_ALIGN(*page_offset + rs_length) >> PAGE_SHIFT;	struct svc_rdma_op_ctxt *ctxt = svc_rdma_get_context(xprt);	int ret, read, pno;	u32 pg_off = *page_offset;	u32 pg_no = *page_no;	ctxt->direction = DMA_FROM_DEVICE;	ctxt->read_hdr = head;	pages_needed =		min_t(int, pages_needed, rdma_read_max_sge(xprt, pages_needed));	read = min_t(int, pages_needed << PAGE_SHIFT, rs_length);	for (pno = 0; pno < pages_needed; pno++) {		int len = min_t(int, rs_length, PAGE_SIZE - pg_off);		head->arg.pages[pg_no] = rqstp->rq_arg.pages[pg_no];		head->arg.page_len += len;		head->arg.len += len;		if (!pg_off)			head->count++;		rqstp->rq_respages = &rqstp->rq_arg.pages[pg_no+1];#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))		rqstp->rq_next_page = rqstp->rq_respages + 1;#endif		ctxt->sge[pno].addr =			ib_dma_map_page(xprt->sc_cm_id->device,					head->arg.pages[pg_no], pg_off,					PAGE_SIZE - pg_off,					DMA_FROM_DEVICE);		ret = ib_dma_mapping_error(xprt->sc_cm_id->device,					   ctxt->sge[pno].addr);		if (ret)			goto err;		atomic_inc(&xprt->sc_dma_used);		/* The lkey here is either a local dma lkey or a dma_mr lkey */		ctxt->sge[pno].lkey = xprt->sc_dma_lkey;		ctxt->sge[pno].length = len;		ctxt->count++;		/* adjust offset and wrap to next page if needed */		pg_off += len;		if (pg_off == PAGE_SIZE) {			pg_off = 0;			pg_no++;		}		rs_length -= len;	}	if (last && rs_length == 0)		set_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);	else		clear_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);	memset(&read_wr, 0, sizeof(read_wr));	read_wr.wr_id = (unsigned long)ctxt;	read_wr.opcode = IB_WR_RDMA_READ;	ctxt->wr_op = read_wr.opcode;	read_wr.send_flags = IB_SEND_SIGNALED;	read_wr.wr.rdma.rkey = rs_handle;	read_wr.wr.rdma.remote_addr = rs_offset;	read_wr.sg_list = ctxt->sge;	read_wr.num_sge = pages_needed;	ret = svc_rdma_send(xprt, &read_wr);	if (ret) {		pr_err("svcrdma: Error %d posting RDMA_READ/n", ret);		set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);		goto err;	}	/* return current location in page array */	*page_no = pg_no;	*page_offset = pg_off;	ret = read;	atomic_inc(&rdma_stat_read);	return ret; err:	svc_rdma_unmap_dma(ctxt);	svc_rdma_put_context(ctxt, 0);	return ret;}
开发者ID:antiguru,项目名称:ofed-compat-rdma,代码行数:94,


示例15: send_reply

/* This function prepares the portion of the RPCRDMA message to be * sent in the RDMA_SEND. This function is called after data sent via * RDMA has already been transmitted. There are three cases: * - The RPCRDMA header, RPC header, and payload are all sent in a *   single RDMA_SEND. This is the "inline" case. * - The RPCRDMA header and some portion of the RPC header and data *   are sent via this RDMA_SEND and another portion of the data is *   sent via RDMA. * - The RPCRDMA header [NOMSG] is sent in this RDMA_SEND and the RPC *   header and data are all transmitted via RDMA. * In all three cases, this function prepares the RPCRDMA header in * sge[0], the 'type' parameter indicates the type to place in the * RPCRDMA header, and the 'byte_count' field indicates how much of * the XDR to include in this RDMA_SEND. */static int send_reply(struct svcxprt_rdma *rdma,		      struct svc_rqst *rqstp,		      struct page *page,		      struct rpcrdma_msg *rdma_resp,		      struct svc_rdma_op_ctxt *ctxt,		      struct svc_rdma_req_map *vec,		      int byte_count){	struct ib_send_wr send_wr;	int sge_no;	int sge_bytes;	int page_no;	int ret;	/* Post a recv buffer to handle another request. */	ret = svc_rdma_post_recv(rdma);	if (ret) {		printk(KERN_INFO		       "svcrdma: could not post a receive buffer, err=%d."		       "Closing transport %p./n", ret, rdma);		set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);		svc_rdma_put_context(ctxt, 0);		return -ENOTCONN;	}	/* Prepare the context */	ctxt->pages[0] = page;	ctxt->count = 1;	/* Prepare the SGE for the RPCRDMA Header */	atomic_inc(&rdma->sc_dma_used);	ctxt->sge[0].addr =		ib_dma_map_page(rdma->sc_cm_id->device,				page, 0, PAGE_SIZE, DMA_TO_DEVICE);	ctxt->direction = DMA_TO_DEVICE;	ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);	ctxt->sge[0].lkey = rdma->sc_phys_mr->lkey;	/* Determine how many of our SGE are to be transmitted */	for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {		sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);		byte_count -= sge_bytes;		atomic_inc(&rdma->sc_dma_used);		ctxt->sge[sge_no].addr =			ib_dma_map_single(rdma->sc_cm_id->device,					  vec->sge[sge_no].iov_base,					  sge_bytes, DMA_TO_DEVICE);		ctxt->sge[sge_no].length = sge_bytes;		ctxt->sge[sge_no].lkey = rdma->sc_phys_mr->lkey;	}	BUG_ON(byte_count != 0);	/* Save all respages in the ctxt and remove them from the	 * respages array. They are our pages until the I/O	 * completes.	 */	for (page_no = 0; page_no < rqstp->rq_resused; page_no++) {		ctxt->pages[page_no+1] = rqstp->rq_respages[page_no];		ctxt->count++;		rqstp->rq_respages[page_no] = NULL;		/* If there are more pages than SGE, terminate SGE list */		if (page_no+1 >= sge_no)			ctxt->sge[page_no+1].length = 0;	}	BUG_ON(sge_no > rdma->sc_max_sge);	memset(&send_wr, 0, sizeof send_wr);	ctxt->wr_op = IB_WR_SEND;	send_wr.wr_id = (unsigned long)ctxt;	send_wr.sg_list = ctxt->sge;	send_wr.num_sge = sge_no;	send_wr.opcode = IB_WR_SEND;	send_wr.send_flags =  IB_SEND_SIGNALED;	ret = svc_rdma_send(rdma, &send_wr);	if (ret)		svc_rdma_put_context(ctxt, 1);	return ret;}
开发者ID:LouZiffer,项目名称:m900_kernel_cupcake-SDX,代码行数:94,


示例16: rdma_read_xdr

static int rdma_read_xdr(struct svcxprt_rdma *xprt,			 struct rpcrdma_msg *rmsgp,			 struct svc_rqst *rqstp,			 struct svc_rdma_op_ctxt *hdr_ctxt){	struct ib_send_wr read_wr;	struct ib_send_wr inv_wr;	int err = 0;	int ch_no;	int ch_count;	int byte_count;	int sge_count;	u64 sgl_offset;	struct rpcrdma_read_chunk *ch;	struct svc_rdma_op_ctxt *ctxt = NULL;	struct svc_rdma_req_map *rpl_map;	struct svc_rdma_req_map *chl_map;	/*                                      */	ch = svc_rdma_get_read_chunk(rmsgp);	if (!ch)		return 0;	svc_rdma_rcl_chunk_counts(ch, &ch_count, &byte_count);	if (ch_count > RPCSVC_MAXPAGES)		return -EINVAL;	/*                                         */	rpl_map = svc_rdma_get_req_map();	chl_map = svc_rdma_get_req_map();	if (!xprt->sc_frmr_pg_list_len)		sge_count = map_read_chunks(xprt, rqstp, hdr_ctxt, rmsgp,					    rpl_map, chl_map, ch_count,					    byte_count);	else		sge_count = fast_reg_read_chunks(xprt, rqstp, hdr_ctxt, rmsgp,						 rpl_map, chl_map, ch_count,						 byte_count);	if (sge_count < 0) {		err = -EIO;		goto out;	}	sgl_offset = 0;	ch_no = 0;	for (ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];	     ch->rc_discrim != 0; ch++, ch_no++) {		u64 rs_offset;next_sge:		ctxt = svc_rdma_get_context(xprt);		ctxt->direction = DMA_FROM_DEVICE;		ctxt->frmr = hdr_ctxt->frmr;		ctxt->read_hdr = NULL;		clear_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);		clear_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags);		/*                 */		memset(&read_wr, 0, sizeof read_wr);		read_wr.wr_id = (unsigned long)ctxt;		read_wr.opcode = IB_WR_RDMA_READ;		ctxt->wr_op = read_wr.opcode;		read_wr.send_flags = IB_SEND_SIGNALED;		read_wr.wr.rdma.rkey = ntohl(ch->rc_target.rs_handle);		xdr_decode_hyper((__be32 *)&ch->rc_target.rs_offset,				 &rs_offset);		read_wr.wr.rdma.remote_addr = rs_offset + sgl_offset;		read_wr.sg_list = ctxt->sge;		read_wr.num_sge =			rdma_read_max_sge(xprt, chl_map->ch[ch_no].count);		err = rdma_set_ctxt_sge(xprt, ctxt, hdr_ctxt->frmr,					&rpl_map->sge[chl_map->ch[ch_no].start],					&sgl_offset,					read_wr.num_sge);		if (err) {			svc_rdma_unmap_dma(ctxt);			svc_rdma_put_context(ctxt, 0);			goto out;		}		if (((ch+1)->rc_discrim == 0) &&		    (read_wr.num_sge == chl_map->ch[ch_no].count)) {			/*                                                                                                                                                  */			set_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);			if (hdr_ctxt->frmr) {				set_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags);				/*                                                                    */				if (xprt->sc_dev_caps &				    SVCRDMA_DEVCAP_READ_W_INV) {					read_wr.opcode =						IB_WR_RDMA_READ_WITH_INV;					ctxt->wr_op = read_wr.opcode;					read_wr.ex.invalidate_rkey =//.........这里部分代码省略.........
开发者ID:romanbb,项目名称:android_kernel_lge_d851,代码行数:101,


示例17: svc_rdma_sendto

int svc_rdma_sendto(struct svc_rqst *rqstp){	struct svc_xprt *xprt = rqstp->rq_xprt;	struct svcxprt_rdma *rdma =		container_of(xprt, struct svcxprt_rdma, sc_xprt);	struct rpcrdma_msg *rdma_argp;	struct rpcrdma_msg *rdma_resp;	struct rpcrdma_write_array *reply_ary;	enum rpcrdma_proc reply_type;	int ret;	int inline_bytes;	struct page *res_page;	struct svc_rdma_op_ctxt *ctxt;	struct svc_rdma_req_map *vec;	dprintk("svcrdma: sending response for rqstp=%p/n", rqstp);	/* Get the RDMA request header. */	rdma_argp = xdr_start(&rqstp->rq_arg);	/* Build an req vec for the XDR */	ctxt = svc_rdma_get_context(rdma);	ctxt->direction = DMA_TO_DEVICE;	vec = svc_rdma_get_req_map();	ret = map_xdr(rdma, &rqstp->rq_res, vec);	if (ret)		goto err0;	inline_bytes = rqstp->rq_res.len;	/* Create the RDMA response header */	res_page = svc_rdma_get_page();	rdma_resp = page_address(res_page);	reply_ary = svc_rdma_get_reply_array(rdma_argp);	if (reply_ary)		reply_type = RDMA_NOMSG;	else		reply_type = RDMA_MSG;	svc_rdma_xdr_encode_reply_header(rdma, rdma_argp,					 rdma_resp, reply_type);	/* Send any write-chunk data and build resp write-list */	ret = send_write_chunks(rdma, rdma_argp, rdma_resp,				rqstp, vec);	if (ret < 0) {		printk(KERN_ERR "svcrdma: failed to send write chunks, rc=%d/n",		       ret);		goto err1;	}	inline_bytes -= ret;	/* Send any reply-list data and update resp reply-list */	ret = send_reply_chunks(rdma, rdma_argp, rdma_resp,				rqstp, vec);	if (ret < 0) {		printk(KERN_ERR "svcrdma: failed to send reply chunks, rc=%d/n",		       ret);		goto err1;	}	inline_bytes -= ret;	ret = send_reply(rdma, rqstp, res_page, rdma_resp, ctxt, vec,			 inline_bytes);	svc_rdma_put_req_map(vec);	dprintk("svcrdma: send_reply returns %d/n", ret);	return ret; err1:	put_page(res_page); err0:	svc_rdma_put_req_map(vec);	svc_rdma_put_context(ctxt, 0);	return ret;}
开发者ID:flwh,项目名称:Alcatel_OT_985_kernel,代码行数:73,


示例18: send_reply

static int send_reply(struct svcxprt_rdma *rdma,		      struct svc_rqst *rqstp,		      struct page *page,		      struct rpcrdma_msg *rdma_resp,		      struct svc_rdma_op_ctxt *ctxt,		      struct svc_rdma_req_map *vec,		      int byte_count){	struct ib_send_wr send_wr;	struct ib_send_wr inv_wr;	int sge_no;	int sge_bytes;	int page_no;	int ret;	/* Post a recv buffer to handle another request. */	ret = svc_rdma_post_recv(rdma);	if (ret) {		printk(KERN_INFO		       "svcrdma: could not post a receive buffer, err=%d."		       "Closing transport %p./n", ret, rdma);		set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);		svc_rdma_put_frmr(rdma, vec->frmr);		svc_rdma_put_context(ctxt, 0);		return -ENOTCONN;	}	/* Prepare the context */	ctxt->pages[0] = page;	ctxt->count = 1;	ctxt->frmr = vec->frmr;	if (vec->frmr)		set_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags);	else		clear_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags);	/* Prepare the SGE for the RPCRDMA Header */	ctxt->sge[0].lkey = rdma->sc_dma_lkey;	ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);	ctxt->sge[0].addr =		ib_dma_map_single(rdma->sc_cm_id->device, page_address(page),				  ctxt->sge[0].length, DMA_TO_DEVICE);	if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr))		goto err;	atomic_inc(&rdma->sc_dma_used);	ctxt->direction = DMA_TO_DEVICE;	/* Determine how many of our SGE are to be transmitted */	for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {		sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);		byte_count -= sge_bytes;		if (!vec->frmr) {			ctxt->sge[sge_no].addr =				ib_dma_map_single(rdma->sc_cm_id->device,						  vec->sge[sge_no].iov_base,						  sge_bytes, DMA_TO_DEVICE);			if (ib_dma_mapping_error(rdma->sc_cm_id->device,						 ctxt->sge[sge_no].addr))				goto err;			atomic_inc(&rdma->sc_dma_used);			ctxt->sge[sge_no].lkey = rdma->sc_dma_lkey;		} else {			ctxt->sge[sge_no].addr = (unsigned long)				vec->sge[sge_no].iov_base;			ctxt->sge[sge_no].lkey = vec->frmr->mr->lkey;		}		ctxt->sge[sge_no].length = sge_bytes;	}	BUG_ON(byte_count != 0);	/* Save all respages in the ctxt and remove them from the	 * respages array. They are our pages until the I/O	 * completes.	 */	for (page_no = 0; page_no < rqstp->rq_resused; page_no++) {		ctxt->pages[page_no+1] = rqstp->rq_respages[page_no];		ctxt->count++;		rqstp->rq_respages[page_no] = NULL;		/*		 * If there are more pages than SGE, terminate SGE		 * list so that svc_rdma_unmap_dma doesn't attempt to		 * unmap garbage.		 */		if (page_no+1 >= sge_no)			ctxt->sge[page_no+1].length = 0;	}	BUG_ON(sge_no > rdma->sc_max_sge);	memset(&send_wr, 0, sizeof send_wr);	ctxt->wr_op = IB_WR_SEND;	send_wr.wr_id = (unsigned long)ctxt;	send_wr.sg_list = ctxt->sge;	send_wr.num_sge = sge_no;	send_wr.opcode = IB_WR_SEND;	send_wr.send_flags =  IB_SEND_SIGNALED;	if (vec->frmr) {		/* Prepare INVALIDATE WR */		memset(&inv_wr, 0, sizeof inv_wr);		inv_wr.opcode = IB_WR_LOCAL_INV;		inv_wr.send_flags = IB_SEND_SIGNALED;//.........这里部分代码省略.........
开发者ID:flwh,项目名称:Alcatel_OT_985_kernel,代码行数:101,


示例19: send_write

static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,		      u32 rmr, u64 to,		      u32 xdr_off, int write_len,		      struct svc_rdma_req_map *vec){	struct ib_send_wr write_wr;	struct ib_sge *sge;	int xdr_sge_no;	int sge_no;	int sge_bytes;	int sge_off;	int bc;	struct svc_rdma_op_ctxt *ctxt;	BUG_ON(vec->count > RPCSVC_MAXPAGES);	dprintk("svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, "		"write_len=%d, vec->sge=%p, vec->count=%lu/n",		rmr, (unsigned long long)to, xdr_off,		write_len, vec->sge, vec->count);	ctxt = svc_rdma_get_context(xprt);	ctxt->direction = DMA_TO_DEVICE;	sge = ctxt->sge;	/* Find the SGE associated with xdr_off */	for (bc = xdr_off, xdr_sge_no = 1; bc && xdr_sge_no < vec->count;	     xdr_sge_no++) {		if (vec->sge[xdr_sge_no].iov_len > bc)			break;		bc -= vec->sge[xdr_sge_no].iov_len;	}	sge_off = bc;	bc = write_len;	sge_no = 0;	/* Copy the remaining SGE */	while (bc != 0) {		sge_bytes = min_t(size_t,			  bc, vec->sge[xdr_sge_no].iov_len-sge_off);		sge[sge_no].length = sge_bytes;		if (!vec->frmr) {			sge[sge_no].addr =				ib_dma_map_single(xprt->sc_cm_id->device,						  (void *)						  vec->sge[xdr_sge_no].iov_base + sge_off,						  sge_bytes, DMA_TO_DEVICE);			if (ib_dma_mapping_error(xprt->sc_cm_id->device,						 sge[sge_no].addr))				goto err;			atomic_inc(&xprt->sc_dma_used);			sge[sge_no].lkey = xprt->sc_dma_lkey;		} else {			sge[sge_no].addr = (unsigned long)				vec->sge[xdr_sge_no].iov_base + sge_off;			sge[sge_no].lkey = vec->frmr->mr->lkey;		}		ctxt->count++;		ctxt->frmr = vec->frmr;		sge_off = 0;		sge_no++;		xdr_sge_no++;		BUG_ON(xdr_sge_no > vec->count);		bc -= sge_bytes;	}	/* Prepare WRITE WR */	memset(&write_wr, 0, sizeof write_wr);	ctxt->wr_op = IB_WR_RDMA_WRITE;	write_wr.wr_id = (unsigned long)ctxt;	write_wr.sg_list = &sge[0];	write_wr.num_sge = sge_no;	write_wr.opcode = IB_WR_RDMA_WRITE;	write_wr.send_flags = IB_SEND_SIGNALED;	write_wr.wr.rdma.rkey = rmr;	write_wr.wr.rdma.remote_addr = to;	/* Post It */	atomic_inc(&rdma_stat_write);	if (svc_rdma_send(xprt, &write_wr))		goto err;	return 0; err:	svc_rdma_put_context(ctxt, 0);	/* Fatal error, close transport */	return -EIO;}
开发者ID:flwh,项目名称:Alcatel_OT_985_kernel,代码行数:87,


示例20: rdma_read_xdr

//.........这里部分代码省略.........	sgl_offset = 0;	ch_no = 0;	for (ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];	     ch->rc_discrim != 0; ch++, ch_no++) {next_sge:		ctxt = svc_rdma_get_context(xprt);		ctxt->direction = DMA_FROM_DEVICE;		ctxt->frmr = hdr_ctxt->frmr;		ctxt->read_hdr = NULL;		clear_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);		clear_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags);		/* Prepare READ WR */		memset(&read_wr, 0, sizeof read_wr);		read_wr.wr_id = (unsigned long)ctxt;		read_wr.opcode = IB_WR_RDMA_READ;		ctxt->wr_op = read_wr.opcode;		read_wr.send_flags = IB_SEND_SIGNALED;		read_wr.wr.rdma.rkey = ch->rc_target.rs_handle;		read_wr.wr.rdma.remote_addr =			get_unaligned(&(ch->rc_target.rs_offset)) +			sgl_offset;		read_wr.sg_list = ctxt->sge;		read_wr.num_sge =			rdma_read_max_sge(xprt, chl_map->ch[ch_no].count);		err = rdma_set_ctxt_sge(xprt, ctxt, hdr_ctxt->frmr,					&rpl_map->sge[chl_map->ch[ch_no].start],					&sgl_offset,					read_wr.num_sge);		if (err) {			svc_rdma_unmap_dma(ctxt);			svc_rdma_put_context(ctxt, 0);			goto out;		}		if (((ch+1)->rc_discrim == 0) &&		    (read_wr.num_sge == chl_map->ch[ch_no].count)) {			/*			 * Mark the last RDMA_READ with a bit to			 * indicate all RPC data has been fetched from			 * the client and the RPC needs to be enqueued.			 */			set_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);			if (hdr_ctxt->frmr) {				set_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags);				/*				 * Invalidate the local MR used to map the data				 * sink.				 */				if (xprt->sc_dev_caps &				    SVCRDMA_DEVCAP_READ_W_INV) {					read_wr.opcode =						IB_WR_RDMA_READ_WITH_INV;					ctxt->wr_op = read_wr.opcode;					read_wr.ex.invalidate_rkey =						ctxt->frmr->mr->lkey;				} else {					/* Prepare INVALIDATE WR */					memset(&inv_wr, 0, sizeof inv_wr);					inv_wr.opcode = IB_WR_LOCAL_INV;					inv_wr.send_flags = IB_SEND_SIGNALED;					inv_wr.ex.invalidate_rkey =						hdr_ctxt->frmr->mr->lkey;					read_wr.next = &inv_wr;				}
开发者ID:3null,项目名称:fastsocket,代码行数:67,


示例21: rdma_read_chunk_frmr

//.........这里部分代码省略.........			    len, pg_off);		rqstp->rq_respages = &rqstp->rq_arg.pages[pg_no+1];		rqstp->rq_next_page = rqstp->rq_respages + 1;		/* adjust offset and wrap to next page if needed */		pg_off += len;		if (pg_off == PAGE_SIZE) {			pg_off = 0;			pg_no++;		}		rs_length -= len;	}	if (last && rs_length == 0)		set_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);	else		clear_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);	dma_nents = ib_dma_map_sg(xprt->sc_cm_id->device,				  frmr->sg, frmr->sg_nents,				  frmr->direction);	if (!dma_nents) {		pr_err("svcrdma: failed to dma map sg %p/n",		       frmr->sg);		return -ENOMEM;	}	atomic_inc(&xprt->sc_dma_used);	n = ib_map_mr_sg(frmr->mr, frmr->sg, frmr->sg_nents, PAGE_SIZE);	if (unlikely(n != frmr->sg_nents)) {		pr_err("svcrdma: failed to map mr %p (%d/%d elements)/n",		       frmr->mr, n, frmr->sg_nents);		return n < 0 ? n : -EINVAL;	}	/* Bump the key */	key = (u8)(frmr->mr->lkey & 0x000000FF);	ib_update_fast_reg_key(frmr->mr, ++key);	ctxt->sge[0].addr = frmr->mr->iova;	ctxt->sge[0].lkey = frmr->mr->lkey;	ctxt->sge[0].length = frmr->mr->length;	ctxt->count = 1;	ctxt->read_hdr = head;	/* Prepare REG WR */	reg_wr.wr.opcode = IB_WR_REG_MR;	reg_wr.wr.wr_id = 0;	reg_wr.wr.send_flags = IB_SEND_SIGNALED;	reg_wr.wr.num_sge = 0;	reg_wr.mr = frmr->mr;	reg_wr.key = frmr->mr->lkey;	reg_wr.access = frmr->access_flags;	reg_wr.wr.next = &read_wr.wr;	/* Prepare RDMA_READ */	memset(&read_wr, 0, sizeof(read_wr));	read_wr.wr.send_flags = IB_SEND_SIGNALED;	read_wr.rkey = rs_handle;	read_wr.remote_addr = rs_offset;	read_wr.wr.sg_list = ctxt->sge;	read_wr.wr.num_sge = 1;	if (xprt->sc_dev_caps & SVCRDMA_DEVCAP_READ_W_INV) {		read_wr.wr.opcode = IB_WR_RDMA_READ_WITH_INV;		read_wr.wr.wr_id = (unsigned long)ctxt;		read_wr.wr.ex.invalidate_rkey = ctxt->frmr->mr->lkey;	} else {		read_wr.wr.opcode = IB_WR_RDMA_READ;		read_wr.wr.next = &inv_wr;		/* Prepare invalidate */		memset(&inv_wr, 0, sizeof(inv_wr));		inv_wr.wr_id = (unsigned long)ctxt;		inv_wr.opcode = IB_WR_LOCAL_INV;		inv_wr.send_flags = IB_SEND_SIGNALED | IB_SEND_FENCE;		inv_wr.ex.invalidate_rkey = frmr->mr->lkey;	}	ctxt->wr_op = read_wr.wr.opcode;	/* Post the chain */	ret = svc_rdma_send(xprt, &reg_wr.wr);	if (ret) {		pr_err("svcrdma: Error %d posting RDMA_READ/n", ret);		set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);		goto err;	}	/* return current location in page array */	*page_no = pg_no;	*page_offset = pg_off;	ret = read;	atomic_inc(&rdma_stat_read);	return ret; err:	ib_dma_unmap_sg(xprt->sc_cm_id->device,			frmr->sg, frmr->sg_nents, frmr->direction);	svc_rdma_put_context(ctxt, 0);	svc_rdma_put_frmr(xprt, frmr);	return ret;}
开发者ID:Chong-Li,项目名称:cse522,代码行数:101,


示例22: sq_cq_reap

/* * Send Queue Completion Handler - potentially called on interrupt context. * * Note that caller must hold a transport reference. */static void sq_cq_reap(struct svcxprt_rdma *xprt){	struct svc_rdma_op_ctxt *ctxt = NULL;	struct ib_wc wc;	struct ib_cq *cq = xprt->sc_sq_cq;	int ret;	if (!test_and_clear_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags))		return;	ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);	atomic_inc(&rdma_stat_sq_poll);	while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {		ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;		xprt = ctxt->xprt;		svc_rdma_unmap_dma(ctxt);		if (wc.status != IB_WC_SUCCESS)			/* Close the transport */			set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);		/* Decrement used SQ WR count */		atomic_dec(&xprt->sc_sq_count);		wake_up(&xprt->sc_send_wait);		switch (ctxt->wr_op) {		case IB_WR_SEND:			svc_rdma_put_context(ctxt, 1);			break;		case IB_WR_RDMA_WRITE:			svc_rdma_put_context(ctxt, 0);			break;		case IB_WR_RDMA_READ:			if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) {				struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr;				BUG_ON(!read_hdr);				spin_lock_bh(&xprt->sc_rq_dto_lock);				set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);				list_add_tail(&read_hdr->dto_q,					      &xprt->sc_read_complete_q);				spin_unlock_bh(&xprt->sc_rq_dto_lock);				svc_xprt_enqueue(&xprt->sc_xprt);			}			svc_rdma_put_context(ctxt, 0);			break;		default:			printk(KERN_ERR "svcrdma: unexpected completion type, "			       "opcode=%d, status=%d/n",			       wc.opcode, wc.status);			break;		}		svc_xprt_put(&xprt->sc_xprt);	}	if (ctxt)		atomic_inc(&rdma_stat_sq_prod);}
开发者ID:LouZiffer,项目名称:m900_kernel_cupcake-SDX,代码行数:66,


示例23: rdma_read_chunk_frmr

//.........这里部分代码省略.........		head->arg.pages[pg_no] = rqstp->rq_arg.pages[pg_no];		head->arg.page_len += len;		head->arg.len += len;		if (!pg_off)			head->count++;		rqstp->rq_respages = &rqstp->rq_arg.pages[pg_no+1];#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))		rqstp->rq_next_page = rqstp->rq_respages + 1;#endif		frmr->page_list->page_list[pno] =			ib_dma_map_page(xprt->sc_cm_id->device,					head->arg.pages[pg_no], 0,					PAGE_SIZE, DMA_FROM_DEVICE);		ret = ib_dma_mapping_error(xprt->sc_cm_id->device,					   frmr->page_list->page_list[pno]);		if (ret)			goto err;		atomic_inc(&xprt->sc_dma_used);		/* adjust offset and wrap to next page if needed */		pg_off += len;		if (pg_off == PAGE_SIZE) {			pg_off = 0;			pg_no++;		}		rs_length -= len;	}	if (last && rs_length == 0)		set_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);	else		clear_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);	/* Bump the key */	key = (u8)(frmr->mr->lkey & 0x000000FF);	ib_update_fast_reg_key(frmr->mr, ++key);	ctxt->sge[0].addr = (unsigned long)frmr->kva + *page_offset;	ctxt->sge[0].lkey = frmr->mr->lkey;	ctxt->sge[0].length = read;	ctxt->count = 1;	ctxt->read_hdr = head;	/* Prepare FASTREG WR */	memset(&fastreg_wr, 0, sizeof(fastreg_wr));	fastreg_wr.opcode = IB_WR_FAST_REG_MR;	fastreg_wr.send_flags = IB_SEND_SIGNALED;	fastreg_wr.wr.fast_reg.iova_start = (unsigned long)frmr->kva;	fastreg_wr.wr.fast_reg.page_list = frmr->page_list;	fastreg_wr.wr.fast_reg.page_list_len = frmr->page_list_len;	fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT;	fastreg_wr.wr.fast_reg.length = frmr->map_len;	fastreg_wr.wr.fast_reg.access_flags = frmr->access_flags;	fastreg_wr.wr.fast_reg.rkey = frmr->mr->lkey;	fastreg_wr.next = &read_wr;	/* Prepare RDMA_READ */	memset(&read_wr, 0, sizeof(read_wr));	read_wr.send_flags = IB_SEND_SIGNALED;	read_wr.wr.rdma.rkey = rs_handle;	read_wr.wr.rdma.remote_addr = rs_offset;	read_wr.sg_list = ctxt->sge;	read_wr.num_sge = 1;	if (xprt->sc_dev_caps & SVCRDMA_DEVCAP_READ_W_INV) {		read_wr.opcode = IB_WR_RDMA_READ_WITH_INV;		read_wr.wr_id = (unsigned long)ctxt;		read_wr.ex.invalidate_rkey = ctxt->frmr->mr->lkey;	} else {		read_wr.opcode = IB_WR_RDMA_READ;		read_wr.next = &inv_wr;		/* Prepare invalidate */		memset(&inv_wr, 0, sizeof(inv_wr));		inv_wr.wr_id = (unsigned long)ctxt;		inv_wr.opcode = IB_WR_LOCAL_INV;		inv_wr.send_flags = IB_SEND_SIGNALED | IB_SEND_FENCE;		inv_wr.ex.invalidate_rkey = frmr->mr->lkey;	}	ctxt->wr_op = read_wr.opcode;	/* Post the chain */	ret = svc_rdma_send(xprt, &fastreg_wr);	if (ret) {		pr_err("svcrdma: Error %d posting RDMA_READ/n", ret);		set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);		goto err;	}	/* return current location in page array */	*page_no = pg_no;	*page_offset = pg_off;	ret = read;	atomic_inc(&rdma_stat_read);	return ret; err:	svc_rdma_unmap_dma(ctxt);	svc_rdma_put_context(ctxt, 0);	svc_rdma_put_frmr(xprt, frmr);	return ret;}
开发者ID:antiguru,项目名称:ofed-compat-rdma,代码行数:101,



注:本文中的svc_rdma_put_context函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


C++ svc_register函数代码示例
C++ svc_plugin_send函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。