您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ vma_pages函数代码示例

51自学网 2021-06-03 09:45:52
  C++
这篇教程C++ vma_pages函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中vma_pages函数的典型用法代码示例。如果您正苦于以下问题:C++ vma_pages函数的具体用法?C++ vma_pages怎么用?C++ vma_pages使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了vma_pages函数的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: drm_gem_mmap

/** * drm_gem_mmap - memory map routine for GEM objects * @filp: DRM file pointer * @vma: VMA for the area to be mapped * * If a driver supports GEM object mapping, mmap calls on the DRM file * descriptor will end up here. * * Look up the GEM object based on the offset passed in (vma->vm_pgoff will * contain the fake offset we created when the GTT map ioctl was called on * the object) and map it with a call to drm_gem_mmap_obj(). * * If the caller is not granted access to the buffer object, the mmap will fail * with EACCES. Please see the vma manager for more information. */int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma){	struct drm_file *priv = filp->private_data;	struct drm_device *dev = priv->minor->dev;	struct drm_gem_object *obj = NULL;	struct drm_vma_offset_node *node;	int ret;	if (drm_device_is_unplugged(dev))		return -ENODEV;	drm_vma_offset_lock_lookup(dev->vma_offset_manager);	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,						  vma->vm_pgoff,						  vma_pages(vma));	if (likely(node)) {		obj = container_of(node, struct drm_gem_object, vma_node);		/*		 * When the object is being freed, after it hits 0-refcnt it		 * proceeds to tear down the object. In the process it will		 * attempt to remove the VMA offset and so acquire this		 * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt		 * that matches our range, we know it is in the process of being		 * destroyed and will be freed as soon as we release the lock -		 * so we have to check for the 0-refcnted object and treat it as		 * invalid.		 */		if (!kref_get_unless_zero(&obj->refcount))			obj = NULL;	}	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);	if (!obj)		return -EINVAL;	if (!drm_vma_node_is_allowed(node, filp)) {		drm_gem_object_unreference_unlocked(obj);		return -EACCES;	}	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,			       vma);	drm_gem_object_unreference_unlocked(obj);	return ret;}
开发者ID:jobnz,项目名称:adi-axi-hdmi,代码行数:62,


示例2: __swiotlb_mmap_pfn

static int __swiotlb_mmap_pfn(struct vm_area_struct *vma,			      unsigned long pfn, size_t size){	int ret = -ENXIO;	unsigned long nr_vma_pages = vma_pages(vma);	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;	unsigned long off = vma->vm_pgoff;	if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {		ret = remap_pfn_range(vma, vma->vm_start,				      pfn + off,				      vma->vm_end - vma->vm_start,				      vma->vm_page_prot);	}	return ret;}
开发者ID:bristot,项目名称:linux,代码行数:17,


示例3: vma_pages_init

int __init vma_pages_init(void) { 	struct mm_struct *mm = current->mm;                //mm指向当前进程的地址空间	unsigned long addr = mm->mmap->vm_start + 1;       //进程虚拟区间中某一地址	printk("<0>addr=0x%lx/n", addr); 	struct vm_area_struct * vma = find_vma(mm, addr);  //找到地址addr所属的线性区间	 	if(vma != NULL ) 	{		 	    printk("<0>vma->vm_start=0x%lx/n",vma->vm_start); 		printk("<0>vma->vm_end=0x%lx/n", vma->vm_end); 		 		int page_number = vma_pages(vma);            //获得线性区间vma所包含的页数		printk("<0>the page_number of vma is:%d/n",page_number); 	} 	else 		printk("<0> You have failed!/n"); 	return 0; }
开发者ID:fjrti,项目名称:snippets,代码行数:20,


示例4: mspec_mmap

/* * mspec_mmap * * Called when mmapping the device.  Initializes the vma with a fault handler * and private data structure necessary to allocate, track, and free the * underlying pages. */static intmspec_mmap(struct file *file, struct vm_area_struct *vma,					enum mspec_page_type type){	struct vma_data *vdata;	int pages, vdata_size;	if (vma->vm_pgoff != 0)		return -EINVAL;	if ((vma->vm_flags & VM_SHARED) == 0)		return -EINVAL;	if ((vma->vm_flags & VM_WRITE) == 0)		return -EPERM;	pages = vma_pages(vma);	vdata_size = sizeof(struct vma_data) + pages * sizeof(long);	if (vdata_size <= PAGE_SIZE)		vdata = kzalloc(vdata_size, GFP_KERNEL);	else		vdata = vzalloc(vdata_size);	if (!vdata)		return -ENOMEM;	vdata->vm_start = vma->vm_start;	vdata->vm_end = vma->vm_end;	vdata->type = type;	spin_lock_init(&vdata->lock);	atomic_set(&vdata->refcnt, 1);	vma->vm_private_data = vdata;	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;	if (vdata->type == MSPEC_FETCHOP || vdata->type == MSPEC_UNCACHED)		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);	vma->vm_ops = &mspec_vm_ops;	return 0;}
开发者ID:AshishNamdev,项目名称:linux,代码行数:46,


示例5: ncp_mmap

/* This is used for a general mmap of a ncp file */int ncp_mmap(struct file *file, struct vm_area_struct *vma){	struct inode *inode = file_inode(file);		DPRINTK("ncp_mmap: called/n");	if (!ncp_conn_valid(NCP_SERVER(inode)))		return -EIO;	/* only PAGE_COW or read-only supported now */	if (vma->vm_flags & VM_SHARED)		return -EINVAL;	/* we do not support files bigger than 4GB... We eventually 	   supports just 4GB... */	if (vma_pages(vma) + vma->vm_pgoff	   > (1U << (32 - PAGE_SHIFT)))		return -EFBIG;	vma->vm_ops = &ncp_file_mmap;	file_accessed(file);	return 0;}
开发者ID:AeroGirl,项目名称:VAR-SOM-AM33-SDK7-Kernel,代码行数:23,


示例6: blktap_ring_mmap_request

static intblktap_ring_mmap_request(struct blktap *tap,			 struct vm_area_struct *vma){	struct blktap_ring *ring = &tap->ring;	struct blktap_request *request;	int usr_idx, seg, err;	unsigned long addr, n_segs;	usr_idx  = vma->vm_pgoff - 1;	seg      = usr_idx % BLKTAP_SEGMENT_MAX;	usr_idx /= BLKTAP_SEGMENT_MAX;	request = ring->pending[usr_idx];	if (!request)		return -EINVAL;	n_segs = request->nr_pages - seg;	n_segs = min(n_segs, vma_pages(vma));	for (addr = vma->vm_start;	     seg < n_segs;	     seg++, addr += PAGE_SIZE) {		struct page *page = request->pages[seg];		dev_dbg(tap->ring.dev,			"mmap request %d seg %d addr %lx/n",			usr_idx, seg, addr);		err = vm_insert_page(vma, addr, page);		if (err)			return err;	}	vma->vm_flags |= VM_DONTCOPY;	vma->vm_flags |= VM_RESERVED;	return 0;}
开发者ID:relip,项目名称:blktap-dkms,代码行数:39,


示例7: uio_mmap

static int uio_mmap(struct file *filep, struct vm_area_struct *vma){	struct uio_listener *listener = filep->private_data;	struct uio_device *idev = listener->dev;	int mi;	unsigned long requested_pages, actual_pages;	int ret = 0;	if (vma->vm_end < vma->vm_start)		return -EINVAL;	vma->vm_private_data = idev;	mi = uio_find_mem_index(vma);	if (mi < 0)		return -EINVAL;	requested_pages = vma_pages(vma);	actual_pages = ((idev->info->mem[mi].addr & ~PAGE_MASK)			+ idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT;	if (requested_pages > actual_pages)		return -EINVAL;	if (idev->info->mmap) {		ret = idev->info->mmap(idev->info, vma);		return ret;	}	switch (idev->info->mem[mi].memtype) {		case UIO_MEM_PHYS:			return uio_mmap_physical(vma);		case UIO_MEM_LOGICAL:		case UIO_MEM_VIRTUAL:			return uio_mmap_logical(vma);		default:			return -EINVAL;	}}
开发者ID:3null,项目名称:linux,代码行数:38,


示例8: mtk_drm_gem_mmap

int mtk_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma){	struct drm_file *file_priv = filp->private_data;	struct drm_device *dev = file_priv->minor->dev;	struct drm_gem_object *obj;	struct drm_vma_offset_node *node;	int ret;	if (drm_device_is_unplugged(dev))		return -ENODEV;	mutex_lock(&dev->struct_mutex);	node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,				   vma->vm_pgoff,				   vma_pages(vma));	if (!node) {		mutex_unlock(&dev->struct_mutex);		DRM_ERROR("failed to find vma node./n");		return -EINVAL;	} else if (!drm_vma_node_is_allowed(node, filp)) {		mutex_unlock(&dev->struct_mutex);		return -EACCES;	}	/*	 * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the	 * whole buffer from the start.	 */	vma->vm_pgoff = 0;	obj = container_of(node, struct drm_gem_object, vma_node);	ret = mtk_drm_gem_mmap_buf(obj, vma);	mutex_unlock(&dev->struct_mutex);	return ret;}
开发者ID:linzhangru,项目名称:Sony-xa-kernel-tuba,代码行数:38,


示例9: arch_dma_mmap

int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,		void *cpu_addr, dma_addr_t dma_addr, size_t size,		unsigned long attrs){	unsigned long user_count = vma_pages(vma);	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;	unsigned long pfn = __phys_to_pfn(dma_addr);	unsigned long off = vma->vm_pgoff;	int ret = -ENXIO;	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))		return ret;	if (off < count && user_count <= (count - off)) {		ret = remap_pfn_range(vma, vma->vm_start,				      pfn + off,				      user_count << PAGE_SHIFT,				      vma->vm_page_prot);	}	return ret;}
开发者ID:AskDrCatcher,项目名称:linux,代码行数:24,


示例10: dup_mmap

static inline int dup_mmap(struct mm_struct * mm, struct mm_struct * oldmm){	struct vm_area_struct * mpnt, *tmp, **pprev;	struct rb_node **rb_link, *rb_parent;	int retval;	unsigned long charge;	struct mempolicy *pol;	down_write(&oldmm->mmap_sem);	flush_cache_mm(current->mm);	mm->locked_vm = 0;	mm->mmap = NULL;	mm->mmap_cache = NULL;	mm->free_area_cache = oldmm->mmap_base;	mm->map_count = 0;	mm->rss = 0;	mm->anon_rss = 0;	cpus_clear(mm->cpu_vm_mask);	mm->mm_rb = RB_ROOT;	rb_link = &mm->mm_rb.rb_node;	rb_parent = NULL;	pprev = &mm->mmap;	for (mpnt = current->mm->mmap ; mpnt ; mpnt = mpnt->vm_next) {		struct file *file;		if (mpnt->vm_flags & VM_DONTCOPY) {			__vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,							-vma_pages(mpnt));			continue;		}		charge = 0;		if (mpnt->vm_flags & VM_ACCOUNT) {			unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;			if (security_vm_enough_memory(len))				goto fail_nomem;			charge = len;		}		tmp = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);		if (!tmp)			goto fail_nomem;		*tmp = *mpnt;		pol = mpol_copy(vma_policy(mpnt));		retval = PTR_ERR(pol);		if (IS_ERR(pol))			goto fail_nomem_policy;		vma_set_policy(tmp, pol);		tmp->vm_flags &= ~VM_LOCKED;		tmp->vm_mm = mm;		tmp->vm_next = NULL;		anon_vma_link(tmp);		file = tmp->vm_file;		if (file) {			struct inode *inode = file->f_dentry->d_inode;			get_file(file);			if (tmp->vm_flags & VM_DENYWRITE)				atomic_dec(&inode->i_writecount);      			/* insert tmp into the share list, just after mpnt */			spin_lock(&file->f_mapping->i_mmap_lock);			tmp->vm_truncate_count = mpnt->vm_truncate_count;			flush_dcache_mmap_lock(file->f_mapping);			vma_prio_tree_add(tmp, mpnt);			flush_dcache_mmap_unlock(file->f_mapping);			spin_unlock(&file->f_mapping->i_mmap_lock);		}		/*		 * Link in the new vma and copy the page table entries:		 * link in first so that swapoff can see swap entries,		 * and try_to_unmap_one's find_vma find the new vma.		 */		spin_lock(&mm->page_table_lock);		*pprev = tmp;		pprev = &tmp->vm_next;		__vma_link_rb(mm, tmp, rb_link, rb_parent);		rb_link = &tmp->vm_rb.rb_right;		rb_parent = &tmp->vm_rb;		mm->map_count++;		retval = copy_page_range(mm, current->mm, tmp);		spin_unlock(&mm->page_table_lock);		if (tmp->vm_ops && tmp->vm_ops->open)			tmp->vm_ops->open(tmp);		if (retval)			goto out;	}
开发者ID:Antonio-Zhou,项目名称:Linux-2.6.11,代码行数:90,


示例11: dup_mmap

static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm){	struct vm_area_struct *mpnt, *tmp, **pprev;	struct rb_node **rb_link, *rb_parent;	int retval;	unsigned long charge;	struct mempolicy *pol;	down_write(&oldmm->mmap_sem);	flush_cache_dup_mm(oldmm);	/*	 * Not linked in yet - no deadlock potential:	 */	down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);	mm->locked_vm = 0;	mm->mmap = NULL;	mm->mmap_cache = NULL;	mm->free_area_cache = oldmm->mmap_base;	mm->cached_hole_size = ~0UL;	mm->map_count = 0;	cpumask_clear(mm_cpumask(mm));	mm->mm_rb = RB_ROOT;	rb_link = &mm->mm_rb.rb_node;	rb_parent = NULL;	pprev = &mm->mmap;	retval = ksm_fork(mm, oldmm);	if (retval)		goto out;	for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {		struct file *file;		if (mpnt->vm_flags & VM_DONTCOPY) {			long pages = vma_pages(mpnt);			mm->total_vm -= pages;			vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,								-pages);			continue;		}		charge = 0;		if (mpnt->vm_flags & VM_ACCOUNT) {			unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;			if (security_vm_enough_memory(len))				goto fail_nomem;			charge = len;		}		tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);		if (!tmp)			goto fail_nomem;		*tmp = *mpnt;		pol = mpol_dup(vma_policy(mpnt));		retval = PTR_ERR(pol);		if (IS_ERR(pol))			goto fail_nomem_policy;		vma_set_policy(tmp, pol);		tmp->vm_flags &= ~VM_LOCKED;		tmp->vm_mm = mm;		tmp->vm_next = NULL;		anon_vma_link(tmp);		file = tmp->vm_file;		if (file) {			struct inode *inode = file->f_path.dentry->d_inode;			struct address_space *mapping = file->f_mapping;			get_file(file);			if (tmp->vm_flags & VM_DENYWRITE)				atomic_dec(&inode->i_writecount);			spin_lock(&mapping->i_mmap_lock);			if (tmp->vm_flags & VM_SHARED)				mapping->i_mmap_writable++;			tmp->vm_truncate_count = mpnt->vm_truncate_count;			flush_dcache_mmap_lock(mapping);			/* insert tmp into the share list, just after mpnt */			vma_prio_tree_add(tmp, mpnt);			flush_dcache_mmap_unlock(mapping);			spin_unlock(&mapping->i_mmap_lock);		}		/*		 * Clear hugetlb-related page reserves for children. This only		 * affects MAP_PRIVATE mappings. Faults generated by the child		 * are not guaranteed to succeed, even if read-only		 */		if (is_vm_hugetlb_page(tmp))			reset_vma_resv_huge_pages(tmp);		/*		 * Link in the new vma and copy the page table entries.		 */		*pprev = tmp;		pprev = &tmp->vm_next;		__vma_link_rb(mm, tmp, rb_link, rb_parent);		rb_link = &tmp->vm_rb.rb_right;		rb_parent = &tmp->vm_rb;		mm->map_count++;		retval = copy_page_range(mm, oldmm, mpnt);//.........这里部分代码省略.........
开发者ID:genua,项目名称:anoubis_os,代码行数:101,


示例12: dup_mmap

static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm){	struct vm_area_struct *mpnt, *tmp, *prev, **pprev;	struct rb_node **rb_link, *rb_parent;	int retval;	unsigned long charge;	uprobe_start_dup_mmap();	down_write(&oldmm->mmap_sem);	flush_cache_dup_mm(oldmm);	uprobe_dup_mmap(oldmm, mm);	/*	 * Not linked in yet - no deadlock potential:	 */	down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);	/* No ordering required: file already has been exposed. */	RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));	mm->total_vm = oldmm->total_vm;	mm->shared_vm = oldmm->shared_vm;	mm->exec_vm = oldmm->exec_vm;	mm->stack_vm = oldmm->stack_vm;	rb_link = &mm->mm_rb.rb_node;	rb_parent = NULL;	pprev = &mm->mmap;	retval = ksm_fork(mm, oldmm);	if (retval)		goto out;	retval = khugepaged_fork(mm, oldmm);	if (retval)		goto out;	prev = NULL;	for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {		struct file *file;		if (mpnt->vm_flags & VM_DONTCOPY) {			vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,							-vma_pages(mpnt));			continue;		}		charge = 0;		if (mpnt->vm_flags & VM_ACCOUNT) {			unsigned long len = vma_pages(mpnt);			if (security_vm_enough_memory_mm(oldmm, len)) /* sic */				goto fail_nomem;			charge = len;		}		tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);		if (!tmp)			goto fail_nomem;		*tmp = *mpnt;		INIT_LIST_HEAD(&tmp->anon_vma_chain);		retval = vma_dup_policy(mpnt, tmp);		if (retval)			goto fail_nomem_policy;		tmp->vm_mm = mm;		if (anon_vma_fork(tmp, mpnt))			goto fail_nomem_anon_vma_fork;		tmp->vm_flags &= ~VM_LOCKED;		tmp->vm_next = tmp->vm_prev = NULL;		file = tmp->vm_file;		if (file) {			struct inode *inode = file_inode(file);			struct address_space *mapping = file->f_mapping;			get_file(file);			if (tmp->vm_flags & VM_DENYWRITE)				atomic_dec(&inode->i_writecount);			i_mmap_lock_write(mapping);			if (tmp->vm_flags & VM_SHARED)				atomic_inc(&mapping->i_mmap_writable);			flush_dcache_mmap_lock(mapping);			/* insert tmp into the share list, just after mpnt */			vma_interval_tree_insert_after(tmp, mpnt,					&mapping->i_mmap);			flush_dcache_mmap_unlock(mapping);			i_mmap_unlock_write(mapping);		}		/*		 * Clear hugetlb-related page reserves for children. This only		 * affects MAP_PRIVATE mappings. Faults generated by the child		 * are not guaranteed to succeed, even if read-only		 */		if (is_vm_hugetlb_page(tmp))			reset_vma_resv_huge_pages(tmp);		/*		 * Link in the new vma and copy the page table entries.		 */		*pprev = tmp;		pprev = &tmp->vm_next;		tmp->vm_prev = prev;		prev = tmp;		__vma_link_rb(mm, tmp, rb_link, rb_parent);//.........这里部分代码省略.........
开发者ID:19Dan01,项目名称:linux,代码行数:101,


示例13: dup_mmap

static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm){	struct vm_area_struct *mpnt, *tmp, *prev, **pprev;	struct rb_node **rb_link, *rb_parent;	int retval;	unsigned long charge;	struct mempolicy *pol;	down_write(&oldmm->mmap_sem);	flush_cache_dup_mm(oldmm);	uprobe_dup_mmap(oldmm, mm);	/*	 * Not linked in yet - no deadlock potential:	 */	down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);	mm->locked_vm = 0;	mm->mmap = NULL;	mm->mmap_cache = NULL;	mm->free_area_cache = oldmm->mmap_base;	mm->cached_hole_size = ~0UL;	mm->map_count = 0;	cpumask_clear(mm_cpumask(mm));	mm->mm_rb = RB_ROOT;	rb_link = &mm->mm_rb.rb_node;	rb_parent = NULL;	pprev = &mm->mmap;	retval = ksm_fork(mm, oldmm);	if (retval)		goto out;	retval = khugepaged_fork(mm, oldmm);	if (retval)		goto out;	prev = NULL;	for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {		struct file *file;		if (mpnt->vm_flags & VM_DONTCOPY) {			vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,							-vma_pages(mpnt));			continue;		}		charge = 0;		if (mpnt->vm_flags & VM_ACCOUNT) {			unsigned long len = vma_pages(mpnt);			if (security_vm_enough_memory_mm(oldmm, len)) /* sic */				goto fail_nomem;			charge = len;		}		tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);		if (!tmp)			goto fail_nomem;		*tmp = *mpnt;		INIT_LIST_HEAD(&tmp->anon_vma_chain);		pol = mpol_dup(vma_policy(mpnt));		retval = PTR_ERR(pol);		if (IS_ERR(pol))			goto fail_nomem_policy;		vma_set_policy(tmp, pol);		tmp->vm_mm = mm;		if (anon_vma_fork(tmp, mpnt))			goto fail_nomem_anon_vma_fork;		tmp->vm_flags &= ~VM_LOCKED;		tmp->vm_next = tmp->vm_prev = NULL;		file = tmp->vm_file;		if (file) {			struct inode *inode = file->f_path.dentry->d_inode;			struct address_space *mapping = file->f_mapping;			get_file(file);			if (tmp->vm_prfile)				get_file(tmp->vm_prfile);			if (tmp->vm_flags & VM_DENYWRITE)				atomic_dec(&inode->i_writecount);			mutex_lock(&mapping->i_mmap_mutex);			if (tmp->vm_flags & VM_SHARED)				mapping->i_mmap_writable++;			flush_dcache_mmap_lock(mapping);			/* insert tmp into the share list, just after mpnt */			if (unlikely(tmp->vm_flags & VM_NONLINEAR))				vma_nonlinear_insert(tmp,						&mapping->i_mmap_nonlinear);			else				vma_interval_tree_insert_after(tmp, mpnt,							&mapping->i_mmap);			flush_dcache_mmap_unlock(mapping);			mutex_unlock(&mapping->i_mmap_mutex);		}		/*		 * Clear hugetlb-related page reserves for children. This only		 * affects MAP_PRIVATE mappings. Faults generated by the child		 * are not guaranteed to succeed, even if read-only		 */		if (is_vm_hugetlb_page(tmp))			reset_vma_resv_huge_pages(tmp);		/*//.........这里部分代码省略.........
开发者ID:hexianren,项目名称:linux-3.7-Read,代码行数:101,


示例14: dup_mmap

static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm){	struct vm_area_struct *mpnt, *tmp, *prev, **pprev;	struct rb_node **rb_link, *rb_parent;	int retval;	unsigned long charge;	struct mempolicy *pol;	down_write(&oldmm->mmap_sem);	flush_cache_dup_mm(oldmm);	down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);	mm->locked_vm = 0;	mm->mmap = NULL;	mm->mmap_cache = NULL;	mm->free_area_cache = oldmm->mmap_base;	mm->cached_hole_size = ~0UL;	mm->map_count = 0;	cpumask_clear(mm_cpumask(mm));	mm->mm_rb = RB_ROOT;	rb_link = &mm->mm_rb.rb_node;	rb_parent = NULL;	pprev = &mm->mmap;	retval = ksm_fork(mm, oldmm);	if (retval)		goto out;	retval = khugepaged_fork(mm, oldmm);	if (retval)		goto out;	prev = NULL;	for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {		struct file *file;		if (mpnt->vm_flags & VM_DONTCOPY) {			long pages = vma_pages(mpnt);			mm->total_vm -= pages;			vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,								-pages);			continue;		}		charge = 0;		if (mpnt->vm_flags & VM_ACCOUNT) {			unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;			if (security_vm_enough_memory_mm(oldmm, len)) 				goto fail_nomem;			charge = len;		}		tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);		if (!tmp)			goto fail_nomem;		*tmp = *mpnt;		INIT_LIST_HEAD(&tmp->anon_vma_chain);		pol = mpol_dup(vma_policy(mpnt));		retval = PTR_ERR(pol);		if (IS_ERR(pol))			goto fail_nomem_policy;		vma_set_policy(tmp, pol);		tmp->vm_mm = mm;		if (anon_vma_fork(tmp, mpnt))			goto fail_nomem_anon_vma_fork;		tmp->vm_flags &= ~VM_LOCKED;		tmp->vm_next = tmp->vm_prev = NULL;		file = tmp->vm_file;		if (file) {			struct inode *inode = file->f_path.dentry->d_inode;			struct address_space *mapping = file->f_mapping;			get_file(file);			if (tmp->vm_flags & VM_DENYWRITE)				atomic_dec(&inode->i_writecount);			mutex_lock(&mapping->i_mmap_mutex);			if (tmp->vm_flags & VM_SHARED)				mapping->i_mmap_writable++;			flush_dcache_mmap_lock(mapping);						vma_prio_tree_add(tmp, mpnt);			flush_dcache_mmap_unlock(mapping);			mutex_unlock(&mapping->i_mmap_mutex);		}		if (is_vm_hugetlb_page(tmp))			reset_vma_resv_huge_pages(tmp);		*pprev = tmp;		pprev = &tmp->vm_next;		tmp->vm_prev = prev;		prev = tmp;		__vma_link_rb(mm, tmp, rb_link, rb_parent);		rb_link = &tmp->vm_rb.rb_right;		rb_parent = &tmp->vm_rb;		mm->map_count++;		retval = copy_page_range(mm, oldmm, mpnt);		if (tmp->vm_ops && tmp->vm_ops->open)			tmp->vm_ops->open(tmp);		if (retval)//.........这里部分代码省略.........
开发者ID:MiniBlu,项目名称:cm11_kernel_htc_msm8974a3ul,代码行数:101,



注:本文中的vma_pages函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


C++ vma_policy函数代码示例
C++ vma_create函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。