您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ virt_to_mfn函数代码示例

51自学网 2021-06-03 09:42:52
  C++
这篇教程C++ virt_to_mfn函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中virt_to_mfn函数的典型用法代码示例。如果您正苦于以下问题:C++ virt_to_mfn函数的具体用法?C++ virt_to_mfn怎么用?C++ virt_to_mfn使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了virt_to_mfn函数的29个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: do_suspend

static void do_suspend(void){	int err;	struct suspend_info si;	shutting_down = SHUTDOWN_SUSPEND;#ifdef CONFIG_PREEMPT	/* If the kernel is preemptible, we need to freeze all the processes	   to prevent them from being in the middle of a pagetable update	   during suspend. */	err = freeze_processes();	if (err) {;		goto out;	}#endif	err = dpm_suspend_start(PMSG_FREEZE);	if (err) {;		goto out_thaw;	};	xs_suspend();	err = dpm_suspend_noirq(PMSG_FREEZE);	if (err) {;		goto out_resume;	}	si.cancelled = 1;	if (xen_hvm_domain()) {		si.arg = 0UL;		si.pre = NULL;		si.post = &xen_hvm_post_suspend;	} else {		si.arg = virt_to_mfn(xen_start_info);		si.pre = &xen_pre_suspend;		si.post = &xen_post_suspend;	}	err = stop_machine(xen_suspend, &si, cpumask_of(0));	dpm_resume_noirq(si.cancelled ? PMSG_THAW : PMSG_RESTORE);	if (err) {;		si.cancelled = 1;	}out_resume:	if (!si.cancelled) {		xen_arch_resume();		xs_resume();	} else		xs_suspend_cancel();	dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE);	/* Make sure timer events get retriggered on all CPUs */	clock_was_set();out_thaw:#ifdef CONFIG_PREEMPT	thaw_processes();out:#endif	shutting_down = SHUTDOWN_INVALID;}
开发者ID:rrowicki,项目名称:Chrono_Kernel-1,代码行数:73,


示例2: gnttab_copy_grant_page

/* * Must not be called with IRQs off.  This should only be used on the * slow path. * * Copy a foreign granted page to local memory. */int gnttab_copy_grant_page(grant_ref_t ref, struct page **pagep){	struct gnttab_unmap_and_replace unmap;	mmu_update_t mmu;	struct page *page;	struct page *new_page;	void *new_addr;	void *addr;	paddr_t pfn;	maddr_t mfn;	maddr_t new_mfn;	int err;	page = *pagep;	if (!get_page_unless_zero(page))		return -ENOENT;	err = -ENOMEM;	new_page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);	if (!new_page)		goto out;	new_addr = page_address(new_page);	addr = page_address(page);	memcpy(new_addr, addr, PAGE_SIZE);	pfn = page_to_pfn(page);	mfn = pfn_to_mfn(pfn);	new_mfn = virt_to_mfn(new_addr);	write_seqlock(&gnttab_dma_lock);	/* Make seq visible before checking page_mapped. */	smp_mb();	/* Has the page been DMA-mapped? */	if (unlikely(page_mapped(page))) {		write_sequnlock(&gnttab_dma_lock);		put_page(new_page);		err = -EBUSY;		goto out;	}	if (!xen_feature(XENFEAT_auto_translated_physmap))		set_phys_to_machine(pfn, new_mfn);	gnttab_set_replace_op(&unmap, (unsigned long)addr,			      (unsigned long)new_addr, ref);	err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_and_replace,					&unmap, 1);	BUG_ON(err);	BUG_ON(unmap.status);	write_sequnlock(&gnttab_dma_lock);	if (!xen_feature(XENFEAT_auto_translated_physmap)) {		set_phys_to_machine(page_to_pfn(new_page), INVALID_P2M_ENTRY);		mmu.ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;		mmu.val = pfn;		err = HYPERVISOR_mmu_update(&mmu, 1, NULL, DOMID_SELF);		BUG_ON(err);	}	new_page->mapping = page->mapping;	new_page->index = page->index;	set_bit(PG_foreign, &new_page->flags);	*pagep = new_page;	SetPageForeign(page, gnttab_page_free);	page->mapping = NULL;out:	put_page(page);	return err;}
开发者ID:Jinjian0609,项目名称:UVP-Tools,代码行数:83,


示例3: void

struct netfront_dev *init_netfront(char *_nodename, void (*thenetif_rx)(unsigned char* data, int len), unsigned char rawmac[6], char **ip){    xenbus_transaction_t xbt;    char* err;    char* message=NULL;    struct netif_tx_sring *txs;    struct netif_rx_sring *rxs;    int retry=0;    int i;    char* msg = NULL;    char nodename[256];    char path[256];    struct netfront_dev *dev;    static int netfrontends = 0;    if (!_nodename)        snprintf(nodename, sizeof(nodename), "device/vif/%d", netfrontends);    else {        strncpy(nodename, _nodename, sizeof(nodename) - 1);        nodename[sizeof(nodename) - 1] = 0;    }    netfrontends++;    if (!thenetif_rx)	thenetif_rx = netif_rx;    printk("************************ NETFRONT for %s **********/n/n/n", nodename);    dev = malloc(sizeof(*dev));    memset(dev, 0, sizeof(*dev));    dev->nodename = strdup(nodename);#ifdef HAVE_LIBC    dev->fd = -1;#endif    printk("net TX ring size %d/n", NET_TX_RING_SIZE);    printk("net RX ring size %d/n", NET_RX_RING_SIZE);    init_SEMAPHORE(&dev->tx_sem, NET_TX_RING_SIZE);    for(i=0;i<NET_TX_RING_SIZE;i++)    {	add_id_to_freelist(i,dev->tx_freelist);        dev->tx_buffers[i].page = NULL;    }    for(i=0;i<NET_RX_RING_SIZE;i++)    {	/* TODO: that's a lot of memory */        dev->rx_buffers[i].page = (char*)alloc_page();    }    snprintf(path, sizeof(path), "%s/backend-id", nodename);    dev->dom = xenbus_read_integer(path);#ifdef HAVE_LIBC    if (thenetif_rx == NETIF_SELECT_RX)        evtchn_alloc_unbound(dev->dom, netfront_select_handler, dev, &dev->evtchn);    else#endif        evtchn_alloc_unbound(dev->dom, netfront_handler, dev, &dev->evtchn);    txs = (struct netif_tx_sring *) alloc_page();    rxs = (struct netif_rx_sring *) alloc_page();    memset(txs,0,PAGE_SIZE);    memset(rxs,0,PAGE_SIZE);    SHARED_RING_INIT(txs);    SHARED_RING_INIT(rxs);    FRONT_RING_INIT(&dev->tx, txs, PAGE_SIZE);    FRONT_RING_INIT(&dev->rx, rxs, PAGE_SIZE);    dev->tx_ring_ref = gnttab_grant_access(dev->dom,virt_to_mfn(txs),0);    dev->rx_ring_ref = gnttab_grant_access(dev->dom,virt_to_mfn(rxs),0);    init_rx_buffers(dev);    dev->netif_rx = thenetif_rx;    dev->events = NULL;again:    err = xenbus_transaction_start(&xbt);    if (err) {        printk("starting transaction/n");        free(err);    }    err = xenbus_printf(xbt, nodename, "tx-ring-ref","%u",                dev->tx_ring_ref);    if (err) {        message = "writing tx ring-ref";        goto abort_transaction;    }    err = xenbus_printf(xbt, nodename, "rx-ring-ref","%u",                dev->rx_ring_ref);    if (err) {        message = "writing rx ring-ref";        goto abort_transaction;    }    err = xenbus_printf(xbt, nodename,                "event-channel", "%u", dev->evtchn);//.........这里部分代码省略.........
开发者ID:dornerworks,项目名称:mini-os,代码行数:101,


示例4: xen_update_mem_tables

/* * Helper function to update the p2m and m2p tables and kernel mapping. */static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn){	struct mmu_update update = {		.ptr = ((uint64_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,		.val = pfn	};	/* Update p2m */	if (!set_phys_to_machine(pfn, mfn)) {		WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld/n",		     pfn, mfn);		BUG();	}	/* Update m2p */	if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) {		WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld/n",		     mfn, pfn);		BUG();	}	/* Update kernel mapping, but not for highmem. */	if (pfn >= PFN_UP(__pa(high_memory - 1)))		return;	if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),					 mfn_pte(mfn, PAGE_KERNEL), 0)) {		WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld/n",		      mfn, pfn);		BUG();	}}/* * This function updates the p2m and m2p tables with an identity map from * start_pfn to start_pfn+size and prepares remapping the underlying RAM of the * original allocation at remap_pfn. The information needed for remapping is * saved in the memory itself to avoid the need for allocating buffers. The * complete remap information is contained in a list of MFNs each containing * up to REMAP_SIZE MFNs and the start target PFN for doing the remap. * This enables us to preserve the original mfn sequence while doing the * remapping at a time when the memory management is capable of allocating * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and * its callers. */static void __init xen_do_set_identity_and_remap_chunk(        unsigned long start_pfn, unsigned long size, unsigned long remap_pfn){	unsigned long buf = (unsigned long)&xen_remap_buf;	unsigned long mfn_save, mfn;	unsigned long ident_pfn_iter, remap_pfn_iter;	unsigned long ident_end_pfn = start_pfn + size;	unsigned long left = size;	unsigned int i, chunk;	WARN_ON(size == 0);	BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));	mfn_save = virt_to_mfn(buf);	for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;	     ident_pfn_iter < ident_end_pfn;	     ident_pfn_iter += REMAP_SIZE, remap_pfn_iter += REMAP_SIZE) {		chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE;		/* Map first pfn to xen_remap_buf */		mfn = pfn_to_mfn(ident_pfn_iter);		set_pte_mfn(buf, mfn, PAGE_KERNEL);		/* Save mapping information in page */		xen_remap_buf.next_area_mfn = xen_remap_mfn;		xen_remap_buf.target_pfn = remap_pfn_iter;		xen_remap_buf.size = chunk;		for (i = 0; i < chunk; i++)			xen_remap_buf.mfns[i] = pfn_to_mfn(ident_pfn_iter + i);		/* Put remap buf into list. */		xen_remap_mfn = mfn;		/* Set identity map */		set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);		left -= chunk;	}	/* Restore old xen_remap_buf mapping */	set_pte_mfn(buf, mfn_save, PAGE_KERNEL);}
开发者ID:0-T-0,项目名称:ps4-linux,代码行数:92,


示例5: xenvif_gop_frag_copy

/* * Set up the grant operations for this fragment. If it's a flipping * interface, we also set up the unmap request from here. */static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,				 struct netrx_pending_operations *npo,				 struct page *page, unsigned long size,				 unsigned long offset, int *head,				 struct xenvif *foreign_vif,				 grant_ref_t foreign_gref){	struct gnttab_copy *copy_gop;	struct xenvif_rx_meta *meta;	unsigned long bytes;	int gso_type = XEN_NETIF_GSO_TYPE_NONE;	/* Data must not cross a page boundary. */	BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));	meta = npo->meta + npo->meta_prod - 1;	/* Skip unused frames from start of page */	page += offset >> PAGE_SHIFT;	offset &= ~PAGE_MASK;	while (size > 0) {		BUG_ON(offset >= PAGE_SIZE);		BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);		bytes = PAGE_SIZE - offset;		if (bytes > size)			bytes = size;		if (start_new_rx_buffer(npo->copy_off, bytes, *head)) {			/*			 * Netfront requires there to be some data in the head			 * buffer.			 */			BUG_ON(*head);			meta = get_next_rx_buffer(vif, npo);		}		if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)			bytes = MAX_BUFFER_OFFSET - npo->copy_off;		copy_gop = npo->copy + npo->copy_prod++;		copy_gop->flags = GNTCOPY_dest_gref;		copy_gop->len = bytes;		if (foreign_vif) {			copy_gop->source.domid = foreign_vif->domid;			copy_gop->source.u.ref = foreign_gref;			copy_gop->flags |= GNTCOPY_source_gref;		} else {			copy_gop->source.domid = DOMID_SELF;			copy_gop->source.u.gmfn =				virt_to_mfn(page_address(page));		}		copy_gop->source.offset = offset;		copy_gop->dest.domid = vif->domid;		copy_gop->dest.offset = npo->copy_off;		copy_gop->dest.u.ref = npo->copy_gref;		npo->copy_off += bytes;		meta->size += bytes;		offset += bytes;		size -= bytes;		/* Next frame */		if (offset == PAGE_SIZE && size) {			BUG_ON(!PageCompound(page));			page++;			offset = 0;		}		/* Leave a gap for the GSO descriptor. */		if (skb_is_gso(skb)) {			if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)				gso_type = XEN_NETIF_GSO_TYPE_TCPV4;			else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)				gso_type = XEN_NETIF_GSO_TYPE_TCPV6;		}		if (*head && ((1 << gso_type) & vif->gso_mask))			vif->rx.req_cons++;		*head = 0; /* There must be something in this buffer now. */	}}
开发者ID:7799,项目名称:linux,代码行数:94,


示例6: netbk_gop_frag_copy

/* * Set up the grant operations for this fragment. If it's a flipping * interface, we also set up the unmap request from here. */static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,				struct netrx_pending_operations *npo,				struct page *page, unsigned long size,				unsigned long offset, int *head){	struct gnttab_copy *copy_gop;	struct netbk_rx_meta *meta;	/*	 * These variables a used iff get_page_ext returns true,	 * in which case they are guaranteed to be initialized.	 */	unsigned int uninitialized_var(group), uninitialized_var(idx);	int foreign = get_page_ext(page, &group, &idx);	unsigned long bytes;	/* Data must not cross a page boundary. */	BUG_ON(size + offset > PAGE_SIZE);	meta = npo->meta + npo->meta_prod - 1;	while (size > 0) {		BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);		if (start_new_rx_buffer(npo->copy_off, size, *head)) {			/*			 * Netfront requires there to be some data in the head			 * buffer.			 */			BUG_ON(*head);			meta = get_next_rx_buffer(vif, npo);		}		bytes = size;		if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)			bytes = MAX_BUFFER_OFFSET - npo->copy_off;		copy_gop = npo->copy + npo->copy_prod++;		copy_gop->flags = GNTCOPY_dest_gref;		if (foreign) {			struct xen_netbk *netbk = &xen_netbk[group];			struct pending_tx_info *src_pend;			src_pend = &netbk->pending_tx_info[idx];			copy_gop->source.domid = src_pend->vif->domid;			copy_gop->source.u.ref = src_pend->req.gref;			copy_gop->flags |= GNTCOPY_source_gref;		} else {			void *vaddr = page_address(page);			copy_gop->source.domid = DOMID_SELF;			copy_gop->source.u.gmfn = virt_to_mfn(vaddr);		}		copy_gop->source.offset = offset;		copy_gop->dest.domid = vif->domid;		copy_gop->dest.offset = npo->copy_off;		copy_gop->dest.u.ref = npo->copy_gref;		copy_gop->len = bytes;		npo->copy_off += bytes;		meta->size += bytes;		offset += bytes;		size -= bytes;		/* Leave a gap for the GSO descriptor. */		if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix)			vif->rx.req_cons++;		*head = 0; /* There must be something in this buffer now. */	}}
开发者ID:kenkit,项目名称:AndromadusMod-New,代码行数:78,


示例7: strlen

struct blkfront_dev *init_blkfront(char *_nodename, struct blkfront_info *info){    xenbus_transaction_t xbt;    char* err;    char* message=NULL;    struct blkif_sring *s;    int retry=0;    char* msg = NULL;    char* c;    char* nodename = _nodename ? _nodename : "device/vbd/768";    struct blkfront_dev *dev;    char path[strlen(nodename) + strlen("/backend-id") + 1];    printk("******************* BLKFRONT for %s **********/n/n/n", nodename);    dev = malloc(sizeof(*dev));    memset(dev, 0, sizeof(*dev));    dev->nodename = strdup(nodename);#ifdef HAVE_LIBC    dev->fd = -1;#endif    snprintf(path, sizeof(path), "%s/backend-id", nodename);    dev->dom = xenbus_read_integer(path);     evtchn_alloc_unbound(dev->dom, blkfront_handler, dev, &dev->evtchn);    s = (struct blkif_sring*) alloc_page();    memset(s,0,PAGE_SIZE);    SHARED_RING_INIT(s);    FRONT_RING_INIT(&dev->ring, s, PAGE_SIZE);    dev->ring_ref = gnttab_grant_access(dev->dom,virt_to_mfn(s),0);    dev->events = NULL;again:    err = xenbus_transaction_start(&xbt);    if (err) {        printk("starting transaction/n");        free(err);    }    err = xenbus_printf(xbt, nodename, "ring-ref","%u",                dev->ring_ref);    if (err) {        message = "writing ring-ref";        goto abort_transaction;    }    err = xenbus_printf(xbt, nodename,                "event-channel", "%u", dev->evtchn);    if (err) {        message = "writing event-channel";        goto abort_transaction;    }    err = xenbus_printf(xbt, nodename,                "protocol", "%s", XEN_IO_PROTO_ABI_NATIVE);    if (err) {        message = "writing protocol";        goto abort_transaction;    }    snprintf(path, sizeof(path), "%s/state", nodename);    err = xenbus_switch_state(xbt, path, XenbusStateConnected);    if (err) {        message = "switching state";        goto abort_transaction;    }    err = xenbus_transaction_end(xbt, 0, &retry);    free(err);    if (retry) {            goto again;        printk("completing transaction/n");    }    goto done;abort_transaction:    free(err);    err = xenbus_transaction_end(xbt, 1, &retry);    printk("Abort transaction %s/n", message);    goto error;done:    snprintf(path, sizeof(path), "%s/backend", nodename);    msg = xenbus_read(XBT_NIL, path, &dev->backend);    if (msg) {        printk("Error %s when reading the backend path %s/n", msg, path);        goto error;    }    printk("backend at %s/n", dev->backend);    dev->handle = strtoul(strrchr(nodename, '/')+1, NULL, 0);//.........这里部分代码省略.........
开发者ID:jonludlam,项目名称:mini-os,代码行数:101,


示例8: snprintf

static struct netfront_dev *_init_netfront(struct netfront_dev *dev,					   unsigned char rawmac[6],					   char **ip){	xenbus_transaction_t xbt;	char* err = NULL;	const char* message=NULL;	struct netif_tx_sring *txs;	struct netif_rx_sring *rxs;	int feature_split_evtchn;	int retry=0;	int i;	char* msg = NULL;	char path[256];	snprintf(path, sizeof(path), "%s/backend-id", dev->nodename);	dev->dom = xenbus_read_integer(path);	snprintf(path, sizeof(path), "%s/backend", dev->nodename);	msg = xenbus_read(XBT_NIL, path, &dev->backend);	snprintf(path, sizeof(path), "%s/mac", dev->nodename);	msg = xenbus_read(XBT_NIL, path, &dev->mac);	if ((dev->backend == NULL) || (dev->mac == NULL)) {		printk("%s: backend/mac failed/n", __func__);		goto error;	}#ifdef CONFIG_NETMAP	snprintf(path, sizeof(path), "%s/feature-netmap", dev->backend);	dev->netmap = xenbus_read_integer(path) > 0 ? 1 : 0;	if (dev->netmap) {			dev->na = init_netfront_netmap(dev, dev->netif_rx);			goto skip;	}#endif	/* Check feature-split-event-channels */	snprintf(path, sizeof(path), "%s/feature-split-event-channels",		 dev->backend);	feature_split_evtchn = xenbus_read_integer(path) > 0 ? 1 : 0;#ifdef HAVE_LIBC	/* Force the use of a single event channel */	if (dev->netif_rx == NETIF_SELECT_RX)		feature_split_evtchn = 0;#endif	printk("************************ NETFRONT for %s **********/n/n/n",	       dev->nodename);	init_SEMAPHORE(&dev->tx_sem, NET_TX_RING_SIZE);	for(i=0;i<NET_TX_RING_SIZE;i++)	{		add_id_to_freelist(i,dev->tx_freelist);#if defined CONFIG_NETFRONT_PERSISTENT_GRANTS || !defined CONFIG_NETFRONT_LWIP_ONLY		dev->tx_buffers[i].page = (void*)alloc_page();		BUG_ON(dev->tx_buffers[i].page == NULL);#ifdef CONFIG_NETFRONT_PERSISTENT_GRANTS		dev->tx_buffers[i].gref = gnttab_grant_access(dev->dom,							      virt_to_mfn(dev->tx_buffers[i].page), 0);		BUG_ON(dev->tx_buffers[i].gref == GRANT_INVALID_REF);		dprintk("tx[%d]: page = %p, gref=0x%x/n", i, dev->tx_buffers[i].page, dev->tx_buffers[i].gref);#endif#endif	}#if defined CONFIG_NETFRONT_PERSISTENT_GRANTS || !defined CONFIG_NETFRONT_LWIP_ONLY	printk("net TX ring size %d, %lu KB/n", NET_TX_RING_SIZE, (unsigned long)(NET_TX_RING_SIZE * PAGE_SIZE)/1024);#else	printk("net TX ring size %d/n", NET_TX_RING_SIZE);#endif#ifdef CONFIG_NETFRONT_PERSISTENT_GRANTS	for(i=0;i<NET_RX_RING_SIZE;i++)	{	/* TODO: that's a lot of memory */		dev->rx_buffers[i].page = (void*)alloc_page();		BUG_ON(dev->rx_buffers[i].page == NULL);		dprintk("rx[%d]: page = %p/n", i, dev->rx_buffers[i].page);	}	printk("net RX ring size %d, %lu KB/n", NET_RX_RING_SIZE, (unsigned long)(NET_RX_RING_SIZE * PAGE_SIZE)/1024);#else	for(i=0;i<NET_RX_RING_SIZE;i++)		dev->rx_buffers[i] = NULL;	for(i=0;i<NET_RX_BUFFERS;i++)	{		/* allocate rx buffer pool */		dev->rx_buffer_pool[i].page = (void*)alloc_page();		BUG_ON(dev->rx_buffer_pool[i].page == NULL);		dprintk("rx[%d]: page = %p/n", i, dev->rx_buffer_pool[i].page);		add_id_to_freelist(i,dev->rx_freelist);	}	dev->rx_avail = NET_RX_BUFFERS;	printk("net RX ring size %d, %lu KB buffer space/n", NET_RX_RING_SIZE, (unsigned long)(NET_RX_BUFFERS * PAGE_SIZE)/1024);#endif	if (feature_split_evtchn) {		evtchn_alloc_unbound(dev->dom, netfront_tx_handler, dev,				     &dev->tx_evtchn);		evtchn_alloc_unbound(dev->dom, netfront_rx_handler, dev,				     &dev->rx_evtchn);		printk("split event channels enabled/n");//.........这里部分代码省略.........
开发者ID:cnplab,项目名称:mini-os,代码行数:101,


示例9: scsifront_free

static void scsifront_free(struct vscsifrnt_info *info){	struct Scsi_Host *host = info->host;#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14)	if (host->shost_state != SHOST_DEL) {#else	if (!test_bit(SHOST_DEL, &host->shost_state)) {#endif		scsi_remove_host(info->host);	}	if (info->ring_ref != GRANT_INVALID_REF) {		gnttab_end_foreign_access(info->ring_ref,					(unsigned long)info->ring.sring);		info->ring_ref = GRANT_INVALID_REF;		info->ring.sring = NULL;	}	if (info->irq)		unbind_from_irqhandler(info->irq, info);	info->irq = 0;	scsi_host_put(info->host);}static int scsifront_alloc_ring(struct vscsifrnt_info *info){	struct xenbus_device *dev = info->dev;	struct vscsiif_sring *sring;	int err = -ENOMEM;	info->ring_ref = GRANT_INVALID_REF;	/***** Frontend to Backend ring start *****/	sring = (struct vscsiif_sring *) __get_free_page(GFP_KERNEL);	if (!sring) {		xenbus_dev_fatal(dev, err, "fail to allocate shared ring (Front to Back)");		return err;	}	SHARED_RING_INIT(sring);	FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);	err = xenbus_grant_ring(dev, virt_to_mfn(sring));	if (err < 0) {		free_page((unsigned long) sring);		info->ring.sring = NULL;		xenbus_dev_fatal(dev, err, "fail to grant shared ring (Front to Back)");		goto free_sring;	}	info->ring_ref = err;	err = bind_listening_port_to_irqhandler(			dev->otherend_id, scsifront_intr,			SA_SAMPLE_RANDOM, "scsifront", info);	if (err <= 0) {		xenbus_dev_fatal(dev, err, "bind_listening_port_to_irqhandler");		goto free_sring;	}	info->irq = err;	return 0;/* free resource */free_sring:	scsifront_free(info);	return err;}static int scsifront_init_ring(struct vscsifrnt_info *info){	struct xenbus_device *dev = info->dev;	struct xenbus_transaction xbt;	int err;	DPRINTK("%s/n",__FUNCTION__);	err = scsifront_alloc_ring(info);	if (err)		return err;	DPRINTK("%u %u/n", info->ring_ref, info->evtchn);again:	err = xenbus_transaction_start(&xbt);	if (err) {		xenbus_dev_fatal(dev, err, "starting transaction");	}	err = xenbus_printf(xbt, dev->nodename, "ring-ref", "%u",				info->ring_ref);	if (err) {		xenbus_dev_fatal(dev, err, "%s", "writing ring-ref");		goto fail;	}//.........这里部分代码省略.........
开发者ID:zhoupeng,项目名称:spice4xen,代码行数:101,


示例10: setup_arch

//.........这里部分代码省略.........		     va += PAGE_SIZE) {			HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0);		}		if (!xen_feature(XENFEAT_auto_translated_physmap)) {			/* Make sure we have a large enough P->M table. */			phys_to_machine_mapping = alloc_bootmem(				end_pfn * sizeof(unsigned long));			memset(phys_to_machine_mapping, ~0,			       end_pfn * sizeof(unsigned long));			memcpy(phys_to_machine_mapping,			       (unsigned long *)xen_start_info->mfn_list,			       xen_start_info->nr_pages * sizeof(unsigned long));			free_bootmem(				__pa(xen_start_info->mfn_list),				PFN_PHYS(PFN_UP(xen_start_info->nr_pages *						sizeof(unsigned long))));			/* Destroyed 'initial mapping' of old p2m table. */			for (va = xen_start_info->mfn_list;			     va < (xen_start_info->mfn_list +				   (xen_start_info->nr_pages*sizeof(unsigned long)));			     va += PAGE_SIZE) {				HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0);			}			/*			 * Initialise the list of the frames that specify the			 * list of frames that make up the p2m table. Used by                         * save/restore.			 */			pfn_to_mfn_frame_list_list = alloc_bootmem(PAGE_SIZE);			HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =				virt_to_mfn(pfn_to_mfn_frame_list_list);			fpp = PAGE_SIZE/sizeof(unsigned long);			for (i=0, j=0, k=-1; i< end_pfn; i+=fpp, j++) {				if ((j % fpp) == 0) {					k++;					BUG_ON(k>=fpp);					pfn_to_mfn_frame_list[k] =						alloc_bootmem(PAGE_SIZE);					pfn_to_mfn_frame_list_list[k] =						virt_to_mfn(pfn_to_mfn_frame_list[k]);					j=0;				}				pfn_to_mfn_frame_list[k][j] =					virt_to_mfn(&phys_to_machine_mapping[i]);			}			HYPERVISOR_shared_info->arch.max_pfn = end_pfn;		}	}	if (xen_start_info->flags & SIF_INITDOMAIN)		dmi_scan_machine();	if ( ! (xen_start_info->flags & SIF_INITDOMAIN))	{		acpi_disabled = 1;#ifdef  CONFIG_ACPI		acpi_ht = 0;#endif	}#endif
开发者ID:BackupTheBerlios,项目名称:arp2-svn,代码行数:66,


示例11: __do_suspend

static int __do_suspend(void *ignore){	int err;	extern void time_resume(void);	BUG_ON(smp_processor_id() != 0);	BUG_ON(in_interrupt());#if defined(__i386__) || defined(__x86_64__)	if (xen_feature(XENFEAT_auto_translated_physmap)) {		printk(KERN_WARNING "Cannot suspend in "		       "auto_translated_physmap mode./n");		return -EOPNOTSUPP;	}#endif	err = smp_suspend();	if (err)		return err;	xenbus_suspend();	preempt_disable();	mm_pin_all();	local_irq_disable();	preempt_enable();	gnttab_suspend();	pre_suspend();	/*	 * We'll stop somewhere inside this hypercall. When it returns,	 * we'll start resuming after the restore.	 */	HYPERVISOR_suspend(virt_to_mfn(xen_start_info));	shutting_down = SHUTDOWN_INVALID;	post_suspend();	gnttab_resume();	irq_resume();	time_resume();	switch_idle_mm();	local_irq_enable();	xencons_resume();	xenbus_resume();	smp_resume();	return err;}
开发者ID:xf739645524,项目名称:kernel-rhel5,代码行数:61,


示例12: alloc_p2m

/* * Fully allocate the p2m structure for a given pfn.  We need to check * that both the top and mid levels are allocated, and make sure the * parallel mfn tree is kept in sync.  We may race with other cpus, so * the new pages are installed with cmpxchg; if we lose the race then * simply free the page we allocated and use the one that's there. */static bool alloc_p2m(unsigned long pfn){    unsigned topidx, mididx;    unsigned long *top_mfn_p, *mid_mfn;    pte_t *ptep, *pte_pg;    unsigned int level;    unsigned long flags;    unsigned long addr = (unsigned long)(xen_p2m_addr + pfn);    unsigned long p2m_pfn;    topidx = p2m_top_index(pfn);    mididx = p2m_mid_index(pfn);    ptep = lookup_address(addr, &level);    BUG_ON(!ptep || level != PG_LEVEL_4K);    pte_pg = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1));    if (pte_pg == p2m_missing_pte || pte_pg == p2m_identity_pte) {        /* PMD level is missing, allocate a new one */        ptep = alloc_p2m_pmd(addr, pte_pg);        if (!ptep)            return false;    }    if (p2m_top_mfn) {        top_mfn_p = &p2m_top_mfn[topidx];        mid_mfn = ACCESS_ONCE(p2m_top_mfn_p[topidx]);        BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p);        if (mid_mfn == p2m_mid_missing_mfn) {            /* Separately check the mid mfn level */            unsigned long missing_mfn;            unsigned long mid_mfn_mfn;            unsigned long old_mfn;            mid_mfn = alloc_p2m_page();            if (!mid_mfn)                return false;            p2m_mid_mfn_init(mid_mfn, p2m_missing);            missing_mfn = virt_to_mfn(p2m_mid_missing_mfn);            mid_mfn_mfn = virt_to_mfn(mid_mfn);            old_mfn = cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn);            if (old_mfn != missing_mfn) {                free_p2m_page(mid_mfn);                mid_mfn = mfn_to_virt(old_mfn);            } else {                p2m_top_mfn_p[topidx] = mid_mfn;            }        }    } else {        mid_mfn = NULL;    }    p2m_pfn = pte_pfn(READ_ONCE(*ptep));    if (p2m_pfn == PFN_DOWN(__pa(p2m_identity)) ||            p2m_pfn == PFN_DOWN(__pa(p2m_missing))) {        /* p2m leaf page is missing */        unsigned long *p2m;        p2m = alloc_p2m_page();        if (!p2m)            return false;        if (p2m_pfn == PFN_DOWN(__pa(p2m_missing)))            p2m_init(p2m);        else            p2m_init_identity(p2m, pfn & ~(P2M_PER_PAGE - 1));        spin_lock_irqsave(&p2m_update_lock, flags);        if (pte_pfn(*ptep) == p2m_pfn) {            set_pte(ptep,                    pfn_pte(PFN_DOWN(__pa(p2m)), PAGE_KERNEL));            if (mid_mfn)                mid_mfn[mididx] = virt_to_mfn(p2m);            p2m = NULL;        }        spin_unlock_irqrestore(&p2m_update_lock, flags);        if (p2m)            free_p2m_page(p2m);    }    return true;}
开发者ID:quadcores,项目名称:cbs_4.2.4,代码行数:96,


示例13: cpu_initialize_context

static int __cpuinitcpu_initialize_context(unsigned int cpu, struct task_struct *idle){	struct vcpu_guest_context *ctxt;	struct desc_struct *gdt;	unsigned long gdt_mfn;	if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))		return 0;	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);	if (ctxt == NULL)		return -ENOMEM;	gdt = get_cpu_gdt_table(cpu);	ctxt->flags = VGCF_IN_KERNEL;	ctxt->user_regs.ss = __KERNEL_DS;#ifdef CONFIG_X86_32	ctxt->user_regs.fs = __KERNEL_PERCPU;	ctxt->user_regs.gs = __KERNEL_STACK_CANARY;#else	ctxt->gs_base_kernel = per_cpu_offset(cpu);#endif	ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;	memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));	if (xen_feature(XENFEAT_auto_translated_physmap) &&	    xen_feature(XENFEAT_supervisor_mode_kernel)) {		/* Note: PVH is not supported on x86_32. */#ifdef CONFIG_X86_64		ctxt->user_regs.ds = __KERNEL_DS;		ctxt->user_regs.es = 0;		ctxt->user_regs.gs = 0;		/* GUEST_GDTR_BASE and */		ctxt->u.pvh.gdtaddr = (unsigned long)gdt;		/* GUEST_GDTR_LIMIT in the VMCS. */		ctxt->u.pvh.gdtsz = (unsigned long)(GDT_SIZE - 1);		ctxt->gs_base_user = (unsigned long)					per_cpu(irq_stack_union.gs_base, cpu);#endif	} else {		ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */		ctxt->user_regs.ds = __USER_DS;		ctxt->user_regs.es = __USER_DS;		xen_copy_trap_info(ctxt->trap_ctxt);		ctxt->ldt_ents = 0;		BUG_ON((unsigned long)gdt & ~PAGE_MASK);		gdt_mfn = arbitrary_virt_to_mfn(gdt);		make_lowmem_page_readonly(gdt);		make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));		ctxt->u.pv.gdt_frames[0] = gdt_mfn;		ctxt->u.pv.gdt_ents      = GDT_ENTRIES;		ctxt->kernel_ss = __KERNEL_DS;		ctxt->kernel_sp = idle->thread.sp0;#ifdef CONFIG_X86_32		ctxt->event_callback_cs     = __KERNEL_CS;		ctxt->failsafe_callback_cs  = __KERNEL_CS;#endif		ctxt->event_callback_eip    =					(unsigned long)xen_hypervisor_callback;		ctxt->failsafe_callback_eip =					(unsigned long)xen_failsafe_callback;	}	ctxt->user_regs.cs = __KERNEL_CS;	ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);	per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);	ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));	if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))		BUG();	kfree(ctxt);	return 0;}
开发者ID:mbgg,项目名称:linux,代码行数:86,


示例14: ixp_queue_request

/* * blkif_queue_request * * request block io * * id: for guest use only. * operation: BLKIF_OP_{READ,WRITE,PROBE} * buffer: buffer to read/write into. this should be a *   virtual address in the guest os. */int ixp_queue_request(struct app_request *app_req, void *metadata){	struct ixpfront_info *info = (struct ixpfront_info *) metadata;	unsigned long buffer_mfn;	struct ixp_request *ring_req;  	char *req_page = 0, *curr_pos;	unsigned long id;	int ref, err;	grant_ref_t gref_head;	if (unlikely(info->connected != IXP_STATE_CONNECTED))		return 1;  	if (RING_FULL(&info->ring)) {		printk(KERN_ERR "%s:Ring full - returning backpressure/n", __FUNCTION__);		return 1;	}	if (gnttab_alloc_grant_references(		IXPIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) {		/*gnttab_request_free_callback(			&info->callback,			ixp_restart_queue_callback,			info,			IXP_MAX_SEGMENTS_PER_REQUEST);*/		return 1; 	}	/* Fill out a communications ring structure. */	ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);	id = get_id_from_freelist(info);	ring_req->id = id;	ring_req->handle = info->handle;	ring_req->operation = IXP_OP_3DES_ENCRYPT;	ring_req->nr_segments = 1;	BUG_ON(ring_req->nr_segments > IXPIF_MAX_SEGMENTS_PER_REQUEST);	req_page = (char *)__get_free_page(GFP_NOIO | __GFP_HIGH);	if(req_page == 0) {	  printk(KERN_ERR "ixp_queue_request:Error allocating memory");	  return 1;	}	((struct des_request *)req_page)->key_size = app_req->key_size;	((struct des_request *)req_page)->iv_size = app_req->iv_size;	((struct des_request *)req_page)->msg_size = app_req->msg_size;	curr_pos = req_page + sizeof(struct des_request);	memcpy(curr_pos, app_req->key, app_req->key_size);	curr_pos += app_req->key_size;	memcpy(curr_pos, app_req->iv, app_req->iv_size);	curr_pos += app_req->iv_size;	memcpy(curr_pos, app_req->msg, app_req->msg_size);	curr_pos += app_req->msg_size;	buffer_mfn = virt_to_mfn(req_page); 	/* install a grant reference. */	ref = gnttab_claim_grant_reference(&gref_head);  	BUG_ON(ref == -ENOSPC);	gnttab_grant_foreign_access_ref(	      ref,	      info->xbdev->otherend_id,	      buffer_mfn,	      0);		info->shadow[id].r_params.presp = app_req->presp;	info->shadow[id].r_params.callbk_tag = app_req->callbk_tag;	info->shadow[id].frame[0] = mfn_to_pfn(buffer_mfn);	info->shadow[id].req_page = req_page;	ring_req->seg[0] =	      (struct ixp_request_segment) {		.gref       = ref	      };	info->ring.req_prod_pvt++;	/* Keep a private copy so we can reissue requests when recovering. */	info->shadow[id].req = *ring_req;  	flush_requests(info);//.........这里部分代码省略.........
开发者ID:kong123456,项目名称:tolapai,代码行数:101,


示例15: xennet_make_frags

static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,			      struct xen_netif_tx_request *tx){	struct netfront_info *np = netdev_priv(dev);	char *data = skb->data;	unsigned long mfn;	RING_IDX prod = np->tx.req_prod_pvt;	int frags = skb_shinfo(skb)->nr_frags;	unsigned int offset = offset_in_page(data);	unsigned int len = skb_headlen(skb);	unsigned int id;	grant_ref_t ref;	int i;	/* While the header overlaps a page boundary (including being	   larger than a page), split it it into page-sized chunks. */	while (len > PAGE_SIZE - offset) {		tx->size = PAGE_SIZE - offset;		tx->flags |= XEN_NETTXF_more_data;		len -= tx->size;		data += tx->size;		offset = 0;		id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);		np->tx_skbs[id].skb = skb_get(skb);		tx = RING_GET_REQUEST(&np->tx, prod++);		tx->id = id;		ref = gnttab_claim_grant_reference(&np->gref_tx_head);		BUG_ON((signed short)ref < 0);		mfn = virt_to_mfn(data);		gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,						mfn, GNTMAP_readonly);		tx->gref = np->grant_tx_ref[id] = ref;		tx->offset = offset;		tx->size = len;		tx->flags = 0;	}	/* Grant backend access to each skb fragment page. */	for (i = 0; i < frags; i++) {		skb_frag_t *frag = skb_shinfo(skb)->frags + i;		tx->flags |= XEN_NETTXF_more_data;		id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);		np->tx_skbs[id].skb = skb_get(skb);		tx = RING_GET_REQUEST(&np->tx, prod++);		tx->id = id;		ref = gnttab_claim_grant_reference(&np->gref_tx_head);		BUG_ON((signed short)ref < 0);		mfn = pfn_to_mfn(page_to_pfn(skb_frag_page(frag)));		gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,						mfn, GNTMAP_readonly);		tx->gref = np->grant_tx_ref[id] = ref;		tx->offset = frag->page_offset;		tx->size = skb_frag_size(frag);		tx->flags = 0;	}	np->tx.req_prod_pvt = prod;}
开发者ID:vineetnayak,项目名称:linux,代码行数:65,


示例16: cpu_initialize_context

static void cpu_initialize_context(unsigned int cpu){    vcpu_guest_context_t ctxt;    struct thread *idle_thread;    init_cpu_pda(cpu);    idle_thread = per_cpu(cpu, idle_thread);    memset(&ctxt, 0, sizeof(ctxt));    ctxt.flags = VGCF_IN_KERNEL;    ctxt.user_regs.ds = __KERNEL_DS;    ctxt.user_regs.es = 0;    ctxt.user_regs.fs = 0;    ctxt.user_regs.gs = 0;    ctxt.user_regs.ss = __KERNEL_SS;    ctxt.user_regs.eip = idle_thread->ip;    ctxt.user_regs.eflags = X86_EFLAGS_IF | 0x1000;	/* IOPL_RING1 */    memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));    ctxt.ldt_ents = 0;    ctxt.gdt_ents = 0;#ifdef __i386__    ctxt.user_regs.cs = __KERNEL_CS;    ctxt.user_regs.esp = idle_thread->sp;    ctxt.kernel_ss = __KERNEL_SS;    ctxt.kernel_sp = ctxt.user_regs.esp;    ctxt.event_callback_cs = __KERNEL_CS;    ctxt.event_callback_eip = (unsigned long)hypervisor_callback;    ctxt.failsafe_callback_cs = __KERNEL_CS;    ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;    ctxt.ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(start_info.pt_base));#else /* __x86_64__ */    ctxt.user_regs.cs = __KERNEL_CS;    ctxt.user_regs.esp = idle_thread->sp;    ctxt.kernel_ss = __KERNEL_SS;    ctxt.kernel_sp = ctxt.user_regs.esp;    ctxt.event_callback_eip = (unsigned long)hypervisor_callback;    ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;    ctxt.syscall_callback_eip = 0;    ctxt.ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(start_info.pt_base));    ctxt.gs_base_kernel = (unsigned long)&percpu[cpu];#endif    int err = HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, &ctxt);    if (err) {	char *str;	switch (err) {	    case -EINVAL:		/*		 * This interface squashes multiple error sources		 * to one error code.  In particular, an X_EINVAL		 * code can mean:		 *		 * -	the vcpu id is out of range		 * -	cs or ss are in ring 0		 * -	cr3 is wrong		 * -	an entry in the new gdt is above the		 *	reserved entry		 * -	a frame underneath the new gdt is bad		 */		str = "something is wrong :(";		break;	    case -ENOENT:		str = "no such cpu";		break;	    case -ENOMEM:		str = "no mem to copy ctxt";		break;	    case -EFAULT:		str = "bad address";		break;	    case -EEXIST:		/*		 * Hmm.  This error is returned if the vcpu has already		 * been initialized once before in the lifetime of this		 995 			 * domain.  This is a logic error in the kernel.		 996 			 */		str = "already initialized";		break;	    default:		str = "<unexpected>";		break;	}	xprintk("vcpu%d: failed to init: error %d: %s",		cpu, -err, str);    }}
开发者ID:SnakeDoc,项目名称:GuestVM,代码行数:87,


示例17: xennet_start_xmit

static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev){	unsigned short id;	struct netfront_info *np = netdev_priv(dev);	struct netfront_stats *stats = this_cpu_ptr(np->stats);	struct xen_netif_tx_request *tx;	struct xen_netif_extra_info *extra;	char *data = skb->data;	RING_IDX i;	grant_ref_t ref;	unsigned long mfn;	int notify;	int frags = skb_shinfo(skb)->nr_frags;	unsigned int offset = offset_in_page(data);	unsigned int len = skb_headlen(skb);	unsigned long flags;	frags += DIV_ROUND_UP(offset + len, PAGE_SIZE);	if (unlikely(frags > MAX_SKB_FRAGS + 1)) {		printk(KERN_ALERT "xennet: skb rides the rocket: %d frags/n",		       frags);		dump_stack();		goto drop;	}	spin_lock_irqsave(&np->tx_lock, flags);	if (unlikely(!netif_carrier_ok(dev) ||		     (frags > 1 && !xennet_can_sg(dev)) ||		     netif_needs_gso(skb, netif_skb_features(skb)))) {		spin_unlock_irqrestore(&np->tx_lock, flags);		goto drop;	}	i = np->tx.req_prod_pvt;	id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);	np->tx_skbs[id].skb = skb;	tx = RING_GET_REQUEST(&np->tx, i);	tx->id   = id;	ref = gnttab_claim_grant_reference(&np->gref_tx_head);	BUG_ON((signed short)ref < 0);	mfn = virt_to_mfn(data);	gnttab_grant_foreign_access_ref(		ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);	tx->gref = np->grant_tx_ref[id] = ref;	tx->offset = offset;	tx->size = len;	extra = NULL;	tx->flags = 0;	if (skb->ip_summed == CHECKSUM_PARTIAL)		/* local packet? */		tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;	else if (skb->ip_summed == CHECKSUM_UNNECESSARY)		/* remote but checksummed. */		tx->flags |= XEN_NETTXF_data_validated;	if (skb_shinfo(skb)->gso_size) {		struct xen_netif_extra_info *gso;		gso = (struct xen_netif_extra_info *)			RING_GET_REQUEST(&np->tx, ++i);		if (extra)			extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;		else			tx->flags |= XEN_NETTXF_extra_info;		gso->u.gso.size = skb_shinfo(skb)->gso_size;		gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;		gso->u.gso.pad = 0;		gso->u.gso.features = 0;		gso->type = XEN_NETIF_EXTRA_TYPE_GSO;		gso->flags = 0;		extra = gso;	}	np->tx.req_prod_pvt = i + 1;	xennet_make_frags(skb, dev, tx);	tx->size = skb->len;	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);	if (notify)		notify_remote_via_irq(np->netdev->irq);	u64_stats_update_begin(&stats->syncp);	stats->tx_bytes += skb->len;	stats->tx_packets++;	u64_stats_update_end(&stats->syncp);	/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */	xennet_tx_buf_gc(dev);	if (!netfront_tx_slot_available(np))		netif_stop_queue(dev);//.........这里部分代码省略.........
开发者ID:vineetnayak,项目名称:linux,代码行数:101,


示例18: xenmem_add_to_physmap_one

static int xenmem_add_to_physmap_one(    struct domain *d,    uint16_t space,    domid_t foreign_domid,    unsigned long idx,    xen_pfn_t gpfn){    unsigned long mfn = 0;    int rc;    switch ( space )    {    case XENMAPSPACE_grant_table:        spin_lock(&d->grant_table->lock);        if ( d->grant_table->gt_version == 0 )            d->grant_table->gt_version = 1;        if ( d->grant_table->gt_version == 2 &&                (idx & XENMAPIDX_grant_table_status) )        {            idx &= ~XENMAPIDX_grant_table_status;            if ( idx < nr_status_frames(d->grant_table) )                mfn = virt_to_mfn(d->grant_table->status[idx]);        }        else        {            if ( (idx >= nr_grant_frames(d->grant_table)) &&                    (idx < max_nr_grant_frames) )                gnttab_grow_table(d, idx + 1);            if ( idx < nr_grant_frames(d->grant_table) )                mfn = virt_to_mfn(d->grant_table->shared_raw[idx]);        }                d->arch.grant_table_gpfn[idx] = gpfn;        spin_unlock(&d->grant_table->lock);        break;    case XENMAPSPACE_shared_info:        if ( idx == 0 )            mfn = virt_to_mfn(d->shared_info);        break;    case XENMAPSPACE_gmfn_foreign:    {        paddr_t maddr;        struct domain *od;        rc = rcu_lock_target_domain_by_id(foreign_domid, &od);        if ( rc < 0 )            return rc;        maddr = p2m_lookup(od, idx << PAGE_SHIFT);        if ( maddr == INVALID_PADDR )        {            dump_p2m_lookup(od, idx << PAGE_SHIFT);            rcu_unlock_domain(od);            return -EINVAL;        }        mfn = maddr >> PAGE_SHIFT;        rcu_unlock_domain(od);        break;    }    default:        return -ENOSYS;    }    domain_lock(d);    /* Map at new location. */    rc = guest_physmap_add_page(d, gpfn, mfn, 0);    domain_unlock(d);    return rc;}
开发者ID:abligh,项目名称:xen,代码行数:78,


示例19: netbk_gop_frag

/* Set up the grant operations for this fragment.  If it's a flipping   interface, we also set up the unmap request from here. */static u16 netbk_gop_frag(netif_t *netif, struct netbk_rx_meta *meta,			  int i, struct netrx_pending_operations *npo,			  struct page *page, unsigned long size,			  unsigned long offset){	mmu_update_t *mmu;	gnttab_transfer_t *gop;	gnttab_copy_t *copy_gop;	multicall_entry_t *mcl;	netif_rx_request_t *req;	unsigned long old_mfn, new_mfn;	old_mfn = virt_to_mfn(page_address(page));	req = RING_GET_REQUEST(&netif->rx, netif->rx.req_cons + i);	if (netif->copying_receiver) {		/* The fragment needs to be copied rather than		   flipped. */		meta->copy = 1;		copy_gop = npo->copy + npo->copy_prod++;		copy_gop->flags = GNTCOPY_dest_gref;		if (PageForeign(page)) {			struct pending_tx_info *src_pend =				&pending_tx_info[page->index];			copy_gop->source.domid = src_pend->netif->domid;			copy_gop->source.u.ref = src_pend->req.gref;			copy_gop->flags |= GNTCOPY_source_gref;		} else {			copy_gop->source.domid = DOMID_SELF;			copy_gop->source.u.gmfn = old_mfn;		}		copy_gop->source.offset = offset;		copy_gop->dest.domid = netif->domid;		copy_gop->dest.offset = 0;		copy_gop->dest.u.ref = req->gref;		copy_gop->len = size;	} else {		meta->copy = 0;		if (!xen_feature(XENFEAT_auto_translated_physmap)) {			new_mfn = alloc_mfn();			/*			 * Set the new P2M table entry before			 * reassigning the old data page. Heed the			 * comment in pgtable-2level.h:pte_page(). :-)			 */			set_phys_to_machine(page_to_pfn(page), new_mfn);			mcl = npo->mcl + npo->mcl_prod++;			MULTI_update_va_mapping(mcl,					     (unsigned long)page_address(page),					     pfn_pte_ma(new_mfn, PAGE_KERNEL),					     0);			mmu = npo->mmu + npo->mmu_prod++;			mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) |				MMU_MACHPHYS_UPDATE;			mmu->val = page_to_pfn(page);		}		gop = npo->trans + npo->trans_prod++;		gop->mfn = old_mfn;		gop->domid = netif->domid;		gop->ref = req->gref;	}	return req->id;}
开发者ID:xf739645524,项目名称:kernel-rhel5,代码行数:69,


示例20: alloc_p2m

/*  * Fully allocate the p2m structure for a given pfn.  We need to check * that both the top and mid levels are allocated, and make sure the * parallel mfn tree is kept in sync.  We may race with other cpus, so * the new pages are installed with cmpxchg; if we lose the race then * simply free the page we allocated and use the one that's there. */static bool alloc_p2m(unsigned long pfn){	unsigned topidx, mididx;	unsigned long ***top_p, **mid;	unsigned long *top_mfn_p, *mid_mfn;	topidx = p2m_top_index(pfn);	mididx = p2m_mid_index(pfn);	top_p = &p2m_top[topidx];	mid = *top_p;	if (mid == p2m_mid_missing) {		/* Mid level is missing, allocate a new one */		mid = alloc_p2m_page();		if (!mid)			return false;		p2m_mid_init(mid);		if (cmpxchg(top_p, p2m_mid_missing, mid) != p2m_mid_missing)			free_p2m_page(mid);	}	top_mfn_p = &p2m_top_mfn[topidx];	mid_mfn = p2m_top_mfn_p[topidx];	BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p);	if (mid_mfn == p2m_mid_missing_mfn) {		/* Separately check the mid mfn level */		unsigned long missing_mfn;		unsigned long mid_mfn_mfn;		mid_mfn = alloc_p2m_page();		if (!mid_mfn)			return false;		p2m_mid_mfn_init(mid_mfn);		missing_mfn = virt_to_mfn(p2m_mid_missing_mfn);		mid_mfn_mfn = virt_to_mfn(mid_mfn);		if (cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn) != missing_mfn)			free_p2m_page(mid_mfn);		else			p2m_top_mfn_p[topidx] = mid_mfn;	}	if (p2m_top[topidx][mididx] == p2m_identity ||	    p2m_top[topidx][mididx] == p2m_missing) {		/* p2m leaf page is missing */		unsigned long *p2m;		unsigned long *p2m_orig = p2m_top[topidx][mididx];		p2m = alloc_p2m_page();		if (!p2m)			return false;		p2m_init(p2m);		if (cmpxchg(&mid[mididx], p2m_orig, p2m) != p2m_orig)			free_p2m_page(p2m);		else			mid_mfn[mididx] = virt_to_mfn(p2m);	}	return true;}
开发者ID:7hunderbug,项目名称:kernel-adaptation-n950-n9,代码行数:75,


示例21: setup_ring

/* caller must clean up in case of errors */static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv){	struct xenbus_transaction xbt;	const char *message = NULL;	int rv;	priv->shr = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);	if (!priv->shr) {		xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");		return -ENOMEM;	}	rv = xenbus_grant_ring(dev, virt_to_mfn(priv->shr));	if (rv < 0)		return rv;	priv->ring_ref = rv;	rv = xenbus_alloc_evtchn(dev, &priv->evtchn);	if (rv)		return rv;	rv = bind_evtchn_to_irqhandler(priv->evtchn, tpmif_interrupt, 0,				       "tpmif", priv);	if (rv <= 0) {		xenbus_dev_fatal(dev, rv, "allocating TPM irq");		return rv;	}	priv->chip->vendor.irq = rv; again:	rv = xenbus_transaction_start(&xbt);	if (rv) {		xenbus_dev_fatal(dev, rv, "starting transaction");		return rv;	}	rv = xenbus_printf(xbt, dev->nodename,			"ring-ref", "%u", priv->ring_ref);	if (rv) {		message = "writing ring-ref";		goto abort_transaction;	}	rv = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",			priv->evtchn);	if (rv) {		message = "writing event-channel";		goto abort_transaction;	}	rv = xenbus_printf(xbt, dev->nodename, "feature-protocol-v2", "1");	if (rv) {		message = "writing feature-protocol-v2";		goto abort_transaction;	}	rv = xenbus_transaction_end(xbt, 0);	if (rv == -EAGAIN)		goto again;	if (rv) {		xenbus_dev_fatal(dev, rv, "completing transaction");		return rv;	}	xenbus_switch_state(dev, XenbusStateInitialised);	return 0; abort_transaction:	xenbus_transaction_end(xbt, 1);	if (message)		xenbus_dev_error(dev, rv, "%s", message);	return rv;}
开发者ID:AeroGirl,项目名称:VAR-SOM-AM33-SDK7-Kernel,代码行数:77,


示例22: snprintf

struct consfront_dev *init_consfront(char *_nodename){    xenbus_transaction_t xbt;    char* err;    char* message=NULL;    int retry=0;    char* msg = NULL;    char nodename[256];    char path[256];    static int consfrontends = 3;    struct consfront_dev *dev;    int res;    if (!_nodename)        snprintf(nodename, sizeof(nodename), "device/console/%d", consfrontends);    else        strncpy(nodename, _nodename, sizeof(nodename));    printk("******************* CONSFRONT for %s **********/n/n/n", nodename);    consfrontends++;    dev = malloc(sizeof(*dev));    memset(dev, 0, sizeof(*dev));    dev->nodename = strdup(nodename);#ifdef HAVE_LIBC    dev->fd = -1;#endif    snprintf(path, sizeof(path), "%s/backend-id", nodename);    if ((res = xenbus_read_integer(path)) < 0)         return NULL;    else        dev->dom = res;    evtchn_alloc_unbound(dev->dom, console_handle_input, dev, &dev->evtchn);    dev->ring = (struct xencons_interface *) alloc_page();    memset(dev->ring, 0, PAGE_SIZE);    dev->ring_ref = gnttab_grant_access(dev->dom, virt_to_mfn(dev->ring), 0);    dev->events = NULL;again:    err = xenbus_transaction_start(&xbt);    if (err) {        printk("starting transaction/n");        free(err);    }    err = xenbus_printf(xbt, nodename, "ring-ref","%u",                dev->ring_ref);    if (err) {        message = "writing ring-ref";        goto abort_transaction;    }    err = xenbus_printf(xbt, nodename,                "port", "%u", dev->evtchn);    if (err) {        message = "writing event-channel";        goto abort_transaction;    }    err = xenbus_printf(xbt, nodename,                "protocol", "%s", XEN_IO_PROTO_ABI_NATIVE);    if (err) {        message = "writing protocol";        goto abort_transaction;    }    err = xenbus_printf(xbt, nodename, "type", "%s", "ioemu");    if (err) {        message = "writing type";        goto abort_transaction;    }    snprintf(path, sizeof(path), "%s/state", nodename);    err = xenbus_switch_state(xbt, path, XenbusStateConnected);    if (err) {        message = "switching state";        goto abort_transaction;    }    err = xenbus_transaction_end(xbt, 0, &retry);    if (err) free(err);    if (retry) {            goto again;        printk("completing transaction/n");    }    goto done;abort_transaction:    free(err);    err = xenbus_transaction_end(xbt, 1, &retry);    printk("Abort transaction %s/n", message);    goto error;done:    snprintf(path, sizeof(path), "%s/backend", nodename);    msg = xenbus_read(XBT_NIL, path, &dev->backend);//.........这里部分代码省略.........
开发者ID:CPFL,项目名称:gxen,代码行数:101,


示例23: xenvif_tx_build_gops

//.........这里部分代码省略.........		idx += ret;		if (unlikely(txreq.size < ETH_HLEN)) {			netdev_dbg(vif->dev,				   "Bad packet size: %d/n", txreq.size);			xenvif_tx_err(vif, &txreq, idx);			break;		}		/* No crossing a page as the payload mustn't fragment. */		if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {			netdev_err(vif->dev,				   "txreq.offset: %x, size: %u, end: %lu/n",				   txreq.offset, txreq.size,				   (txreq.offset&~PAGE_MASK) + txreq.size);			xenvif_fatal_tx_err(vif);			break;		}		index = pending_index(vif->pending_cons);		pending_idx = vif->pending_ring[index];		data_len = (txreq.size > PKT_PROT_LEN &&			    ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?			PKT_PROT_LEN : txreq.size;		skb = xenvif_alloc_skb(data_len);		if (unlikely(skb == NULL)) {			netdev_dbg(vif->dev,				   "Can't allocate a skb in start_xmit./n");			xenvif_tx_err(vif, &txreq, idx);			break;		}		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {			struct xen_netif_extra_info *gso;			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];			if (xenvif_set_skb_gso(vif, skb, gso)) {				/* Failure in xenvif_set_skb_gso is fatal. */				kfree_skb(skb);				break;			}		}		XENVIF_TX_CB(skb)->pending_idx = pending_idx;		__skb_put(skb, data_len);		vif->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;		vif->tx_copy_ops[*copy_ops].source.domid = vif->domid;		vif->tx_copy_ops[*copy_ops].source.offset = txreq.offset;		vif->tx_copy_ops[*copy_ops].dest.u.gmfn =			virt_to_mfn(skb->data);		vif->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;		vif->tx_copy_ops[*copy_ops].dest.offset =			offset_in_page(skb->data);		vif->tx_copy_ops[*copy_ops].len = data_len;		vif->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;		(*copy_ops)++;		skb_shinfo(skb)->nr_frags = ret;		if (data_len < txreq.size) {			skb_shinfo(skb)->nr_frags++;			frag_set_pending_idx(&skb_shinfo(skb)->frags[0],					     pending_idx);			xenvif_tx_create_map_op(vif, pending_idx, &txreq, gop);			gop++;		} else {			frag_set_pending_idx(&skb_shinfo(skb)->frags[0],					     INVALID_PENDING_IDX);			memcpy(&vif->pending_tx_info[pending_idx].req, &txreq,			       sizeof(txreq));		}		vif->pending_cons++;		request_gop = xenvif_get_requests(vif, skb, txfrags, gop);		if (request_gop == NULL) {			kfree_skb(skb);			xenvif_tx_err(vif, &txreq, idx);			break;		}		gop = request_gop;		__skb_queue_tail(&vif->tx_queue, skb);		vif->tx.req_cons = idx;		if (((gop-vif->tx_map_ops) >= ARRAY_SIZE(vif->tx_map_ops)) ||		    (*copy_ops >= ARRAY_SIZE(vif->tx_copy_ops)))			break;	}	(*map_ops) = gop - vif->tx_map_ops;	return;}
开发者ID:7799,项目名称:linux,代码行数:101,


示例24: printk

struct fbfront_dev *init_fbfront(char *_nodename, unsigned long *mfns, int width, int height, int depth, int stride, int n){    xenbus_transaction_t xbt;    char* err;    char* message=NULL;    struct xenfb_page *s;    int retry=0;    char* msg;    int i, j;    struct fbfront_dev *dev;    int max_pd;    unsigned long mapped;    char* nodename = _nodename ? _nodename : "device/vfb/0";    char path[strlen(nodename) + 1 + 10 + 1];    printk("******************* FBFRONT for %s **********/n/n/n", nodename);    dev = malloc(sizeof(*dev));    memset(dev, 0, sizeof(*dev));    dev->nodename = strdup(nodename);#ifdef HAVE_LIBC    dev->fd = -1;#endif    snprintf(path, sizeof(path), "%s/backend-id", nodename);    dev->dom = xenbus_read_integer(path);     evtchn_alloc_unbound(dev->dom, fbfront_handler, dev, &dev->evtchn);    dev->page = s = (struct xenfb_page*) alloc_page();    memset(s,0,PAGE_SIZE);    s->in_cons = s->in_prod = 0;    s->out_cons = s->out_prod = 0;    dev->width = s->width = width;    dev->height = s->height = height;    dev->depth = s->depth = depth;    dev->stride = s->line_length = stride;    dev->mem_length = s->mem_length = n * PAGE_SIZE;    dev->offset = 0;    dev->events = NULL;    max_pd = sizeof(s->pd) / sizeof(s->pd[0]);    mapped = 0;    for (i = 0; mapped < n && i < max_pd; i++) {        unsigned long *pd = (unsigned long *) alloc_page();        for (j = 0; mapped < n && j < PAGE_SIZE / sizeof(unsigned long); j++)            pd[j] = mfns[mapped++];        for ( ; j < PAGE_SIZE / sizeof(unsigned long); j++)            pd[j] = 0;        s->pd[i] = virt_to_mfn(pd);    }    for ( ; i < max_pd; i++)        s->pd[i] = 0;again:    err = xenbus_transaction_start(&xbt);    if (err) {        printk("starting transaction/n");        free(err);    }    err = xenbus_printf(xbt, nodename, "page-ref","%u", virt_to_mfn(s));    if (err) {        message = "writing page-ref";        goto abort_transaction;    }    err = xenbus_printf(xbt, nodename, "event-channel", "%u", dev->evtchn);    if (err) {        message = "writing event-channel";        goto abort_transaction;    }    err = xenbus_printf(xbt, nodename, "protocol", "%s",                        XEN_IO_PROTO_ABI_NATIVE);    if (err) {        message = "writing event-channel";        goto abort_transaction;    }    err = xenbus_printf(xbt, nodename, "feature-update", "1");    if (err) {        message = "writing event-channel";        goto abort_transaction;    }    snprintf(path, sizeof(path), "%s/state", nodename);    err = xenbus_switch_state(xbt, path, XenbusStateInitialised);    if (err) {        message = "switching state";        goto abort_transaction;    }    err = xenbus_transaction_end(xbt, 0, &retry);    if (err) free(err);    if (retry) {            goto again;        printk("completing transaction/n");    }//.........这里部分代码省略.........
开发者ID:Kristoffer,项目名称:Xen-4.1.2,代码行数:101,


示例25: cpu_initialize_context

static __cpuinit intcpu_initialize_context(unsigned int cpu, struct task_struct *idle){	struct vcpu_guest_context *ctxt;	struct desc_struct *gdt;	unsigned long gdt_mfn;	if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))		return 0;	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);	if (ctxt == NULL)		return -ENOMEM;	gdt = get_cpu_gdt_table(cpu);	ctxt->flags = VGCF_IN_KERNEL;	ctxt->user_regs.ds = __KERNEL_DS;	ctxt->user_regs.es = __KERNEL_DS;	ctxt->user_regs.ss = __KERNEL_DS;#ifdef CONFIG_X86_32	ctxt->user_regs.fs = __KERNEL_PERCPU;#else	ctxt->gs_base_kernel = per_cpu_offset(cpu);#endif	ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;	ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */	memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));	xen_copy_trap_info(ctxt->trap_ctxt);	ctxt->ldt_ents = 0;	BUG_ON((unsigned long)gdt & ~PAGE_MASK);	gdt_mfn = arbitrary_virt_to_mfn(gdt);	make_lowmem_page_readonly(gdt);	make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));	ctxt->gdt_frames[0] = gdt_mfn;	ctxt->gdt_ents      = GDT_ENTRIES;	ctxt->user_regs.cs = __KERNEL_CS;	ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);	ctxt->kernel_ss = __KERNEL_DS;	ctxt->kernel_sp = idle->thread.sp0;#ifdef CONFIG_X86_32	ctxt->event_callback_cs     = __KERNEL_CS;	ctxt->failsafe_callback_cs  = __KERNEL_CS;#endif	ctxt->event_callback_eip    = (unsigned long)xen_hypervisor_callback;	ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback;	per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);	ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));	if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))		BUG();	kfree(ctxt);	return 0;}
开发者ID:mikeberkelaar,项目名称:grhardened,代码行数:65,


示例26: kexec

void kexec(void *kernel, long kernel_size, void *module, long module_size, char *cmdline, unsigned long flags){    struct xc_dom_image *dom;    int rc;    domid_t domid = DOMID_SELF;    xen_pfn_t pfn;    xc_interface *xc_handle;    unsigned long i;    void *seg;    xen_pfn_t boot_page_mfn = virt_to_mfn(&_boot_page);    char features[] = "";    struct mmu_update *m2p_updates;    unsigned long nr_m2p_updates;    DEBUG("booting with cmdline %s/n", cmdline);    xc_handle = xc_interface_open(0,0,0);    dom = xc_dom_allocate(xc_handle, cmdline, features);    dom->allocate = kexec_allocate;    /* We are using guest owned memory, therefore no limits. */    xc_dom_kernel_max_size(dom, 0);    xc_dom_ramdisk_max_size(dom, 0);    dom->kernel_blob = kernel;    dom->kernel_size = kernel_size;    dom->ramdisk_blob = module;    dom->ramdisk_size = module_size;    dom->flags = flags;    dom->console_evtchn = start_info.console.domU.evtchn;    dom->xenstore_evtchn = start_info.store_evtchn;    tpm_hash2pcr(dom, cmdline);    if ( (rc = xc_dom_boot_xen_init(dom, xc_handle, domid)) != 0 ) {        grub_printf("xc_dom_boot_xen_init returned %d/n", rc);        errnum = ERR_BOOT_FAILURE;        goto out;    }    if ( (rc = xc_dom_parse_image(dom)) != 0 ) {        grub_printf("xc_dom_parse_image returned %d/n", rc);        errnum = ERR_BOOT_FAILURE;        goto out;    }#ifdef __i386__    if (strcmp(dom->guest_type, "xen-3.0-x86_32p")) {        grub_printf("can only boot x86 32 PAE kernels, not %s/n", dom->guest_type);        errnum = ERR_EXEC_FORMAT;        goto out;    }#endif#ifdef __x86_64__    if (strcmp(dom->guest_type, "xen-3.0-x86_64")) {        grub_printf("can only boot x86 64 kernels, not %s/n", dom->guest_type);        errnum = ERR_EXEC_FORMAT;        goto out;    }#endif    /* equivalent of xc_dom_mem_init */    dom->arch_hooks = xc_dom_find_arch_hooks(xc_handle, dom->guest_type);    dom->total_pages = start_info.nr_pages;    /* equivalent of arch_setup_meminit */    /* setup initial p2m */    dom->p2m_host = malloc(sizeof(*dom->p2m_host) * dom->total_pages);    /* Start with our current P2M */    for (i = 0; i < dom->total_pages; i++)        dom->p2m_host[i] = pfn_to_mfn(i);    if ( (rc = xc_dom_build_image(dom)) != 0 ) {        grub_printf("xc_dom_build_image returned %d/n", rc);        errnum = ERR_BOOT_FAILURE;        goto out;    }    /* copy hypercall page */    /* TODO: domctl instead, but requires privileges */    if (dom->parms.virt_hypercall != -1) {        pfn = PHYS_PFN(dom->parms.virt_hypercall - dom->parms.virt_base);        memcpy((void *) pages[pfn], hypercall_page, PAGE_SIZE);    }    /* Equivalent of xc_dom_boot_image */    dom->shared_info_mfn = PHYS_PFN(start_info.shared_info);    if (!xc_dom_compat_check(dom)) {        grub_printf("xc_dom_compat_check failed/n");        errnum = ERR_EXEC_FORMAT;        goto out;    }    /* Move current console, xenstore and boot MFNs to the allocated place */    do_exchange(dom, dom->console_pfn, start_info.console.domU.mfn);    do_exchange(dom, dom->xenstore_pfn, start_info.store_mfn);//.........这里部分代码省略.........
开发者ID:CrazyXen,项目名称:XEN_CODE,代码行数:101,


示例27: __do_suspend

static int __do_suspend(void *ignore){	int i, j, k, fpp, err;	extern unsigned long max_pfn;	extern unsigned long *pfn_to_mfn_frame_list_list;	extern unsigned long *pfn_to_mfn_frame_list[];	extern void time_resume(void);	BUG_ON(smp_processor_id() != 0);	BUG_ON(in_interrupt());	if (xen_feature(XENFEAT_auto_translated_physmap)) {		printk(KERN_WARNING "Cannot suspend in "		       "auto_translated_physmap mode./n");		return -EOPNOTSUPP;	}	err = smp_suspend();	if (err)		return err;	xenbus_suspend();	preempt_disable();#ifdef __i386__	kmem_cache_shrink(pgd_cache);#endif	mm_pin_all();	__cli();	preempt_enable();	gnttab_suspend();	HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;	clear_fixmap(FIX_SHARED_INFO);	xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn);	xen_start_info->console_mfn = mfn_to_pfn(xen_start_info->console_mfn);	/*	 * We'll stop somewhere inside this hypercall. When it returns,	 * we'll start resuming after the restore.	 */	HYPERVISOR_suspend(virt_to_mfn(xen_start_info));	shutting_down = SHUTDOWN_INVALID;	set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);	HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);	memset(empty_zero_page, 0, PAGE_SIZE);	HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =		virt_to_mfn(pfn_to_mfn_frame_list_list);	fpp = PAGE_SIZE/sizeof(unsigned long);	for (i = 0, j = 0, k = -1; i < max_pfn; i += fpp, j++) {		if ((j % fpp) == 0) {			k++;			pfn_to_mfn_frame_list_list[k] =				virt_to_mfn(pfn_to_mfn_frame_list[k]);			j = 0;		}		pfn_to_mfn_frame_list[k][j] =			virt_to_mfn(&phys_to_machine_mapping[i]);	}	HYPERVISOR_shared_info->arch.max_pfn = max_pfn;	gnttab_resume();	irq_resume();	time_resume();	switch_idle_mm();	__sti();	xencons_resume();	xenbus_resume();	smp_resume();	return err;}
开发者ID:BackupTheBerlios,项目名称:arp2-svn,代码行数:91,


示例28: network_rx

void network_rx(struct netfront_dev *dev){    RING_IDX rp,cons,req_prod;    int nr_consumed, more, i, notify;#ifdef HAVE_LIBC    int some;#endif    nr_consumed = 0;moretodo:    rp = dev->rx.sring->rsp_prod;    rmb(); /* Ensure we see queued responses up to 'rp'. */#ifdef HAVE_LIBC    some = 0;#endif    for (cons = dev->rx.rsp_cons; cons != rp; nr_consumed++, cons++)    {        struct net_buffer* buf;        unsigned char* page;        int id;        struct netif_rx_response *rx = RING_GET_RESPONSE(&dev->rx, cons);        id = rx->id;        BUG_ON(id >= NET_RX_RING_SIZE);        buf = &dev->rx_buffers[id];        page = (unsigned char*)buf->page;        gnttab_end_access(buf->gref);        if (rx->status > NETIF_RSP_NULL)        {#ifdef HAVE_LIBC	    if (dev->netif_rx == NETIF_SELECT_RX) {		int len = rx->status;		ASSERT(current == main_thread);		if (len > dev->len)		    len = dev->len;		memcpy(dev->data, page+rx->offset, len);		dev->rlen = len;		some = 1;                break;	    } else#endif		dev->netif_rx(page+rx->offset,rx->status);        }    }    dev->rx.rsp_cons=cons;    RING_FINAL_CHECK_FOR_RESPONSES(&dev->rx,more);#ifdef HAVE_LIBC    if(more && !some) goto moretodo;#else    if(more) goto moretodo;#endif    req_prod = dev->rx.req_prod_pvt;    for(i=0; i<nr_consumed; i++)    {        int id = xennet_rxidx(req_prod + i);        netif_rx_request_t *req = RING_GET_REQUEST(&dev->rx, req_prod + i);        struct net_buffer* buf = &dev->rx_buffers[id];        void* page = buf->page;        /* We are sure to have free gnttab entries since they got released above */        buf->gref = req->gref =             gnttab_grant_access(dev->dom,virt_to_mfn(page),0);        req->id = id;    }    wmb();    dev->rx.req_prod_pvt = req_prod + i;        RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->rx, notify);    if (notify)        notify_remote_via_evtchn(dev->evtchn);}
开发者ID:dornerworks,项目名称:mini-os,代码行数:82,


示例29: xenkbd_connect_backend

static int xenkbd_connect_backend(struct xenbus_device *dev,				  struct xenkbd_info *info){	int ret, evtchn;	struct xenbus_transaction xbt;	ret = gnttab_grant_foreign_access(dev->otherend_id,	                                  virt_to_mfn(info->page), 0);	if (ret < 0)		return ret;	info->gref = ret;	ret = xenbus_alloc_evtchn(dev, &evtchn);	if (ret)		goto error_grant;	ret = bind_evtchn_to_irqhandler(evtchn, input_handler,					0, dev->devicetype, info);	if (ret < 0) {		xenbus_dev_fatal(dev, ret, "bind_evtchn_to_irqhandler");		goto error_evtchan;	}	info->irq = ret; again:	ret = xenbus_transaction_start(&xbt);	if (ret) {		xenbus_dev_fatal(dev, ret, "starting transaction");		goto error_irqh;	}	ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",			    virt_to_mfn(info->page));	if (ret)		goto error_xenbus;	ret = xenbus_printf(xbt, dev->nodename, "page-gref", "%u", info->gref);	if (ret)		goto error_xenbus;	ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",			    evtchn);	if (ret)		goto error_xenbus;	ret = xenbus_transaction_end(xbt, 0);	if (ret) {		if (ret == -EAGAIN)			goto again;		xenbus_dev_fatal(dev, ret, "completing transaction");		goto error_irqh;	}	xenbus_switch_state(dev, XenbusStateInitialised);	return 0; error_xenbus:	xenbus_transaction_end(xbt, 1);	xenbus_dev_fatal(dev, ret, "writing xenstore"); error_irqh:	unbind_from_irqhandler(info->irq, info);	info->irq = -1; error_evtchan:	xenbus_free_evtchn(dev, evtchn); error_grant:	gnttab_end_foreign_access_ref(info->gref, 0);	info->gref = -1;	return ret;}
开发者ID:DirtyDroidX,项目名称:android_kernel_htc_m8ul,代码行数:64,



注:本文中的virt_to_mfn函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


C++ virt_to_page函数代码示例
C++ virt_addr_valid函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。