您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ vma_policy函数代码示例

51自学网 2021-06-03 09:45:54
  C++
这篇教程C++ vma_policy函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中vma_policy函数的典型用法代码示例。如果您正苦于以下问题:C++ vma_policy函数的具体用法?C++ vma_policy怎么用?C++ vma_policy使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了vma_policy函数的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: vma_dup_policy

int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst){  struct mempolicy *pol = mpol_dup(vma_policy(src));  if (IS_ERR(pol))    return PTR_ERR(pol);  dst->vm_policy = pol;  return 0; }
开发者ID:tkokamo,项目名称:cow_monitor,代码行数:9,


示例2: might_sleep

static struct vm_area_struct *remove_vma(struct vm_area_struct *vma){	struct vm_area_struct *next = vma->vm_next;	might_sleep();	if (vma->vm_ops && vma->vm_ops->close)		vma->vm_ops->close(vma);	if (vma->vm_file) {		fput(vma->vm_file);		if (vma->vm_flags & VM_EXECUTABLE)			removed_exe_file_vma(vma->vm_mm);	}	mpol_put(vma_policy(vma));	kmem_cache_free(vm_area_cachep, vma);	return next;}
开发者ID:mjduddin,项目名称:B14CKB1RD_kernel_m8,代码行数:16,


示例3: remove_vm_struct

/* * Remove one vm structure and free it. */static void remove_vm_struct(struct vm_area_struct *vma){	struct file *file = vma->vm_file;	if (file) {		struct address_space *mapping = file->f_mapping;		spin_lock(&mapping->i_mmap_lock);		__remove_shared_vm_struct(vma, file, mapping);		spin_unlock(&mapping->i_mmap_lock);	}	if (vma->vm_ops && vma->vm_ops->close)		vma->vm_ops->close(vma);	if (file)		fput(file);	anon_vma_unlink(vma);	mpol_free(vma_policy(vma));	kmem_cache_free(vm_area_cachep, vma);}
开发者ID:iPodLinux,项目名称:linux-2.6.7-ipod,代码行数:21,


示例4: dup_mmap

static inline int dup_mmap(struct mm_struct * mm, struct mm_struct * oldmm){	struct vm_area_struct * mpnt, *tmp, **pprev;	struct rb_node **rb_link, *rb_parent;	int retval;	unsigned long charge;	struct mempolicy *pol;	down_write(&oldmm->mmap_sem);	flush_cache_mm(current->mm);	mm->locked_vm = 0;	mm->mmap = NULL;	mm->mmap_cache = NULL;	mm->free_area_cache = oldmm->mmap_base;	mm->map_count = 0;	mm->rss = 0;	mm->anon_rss = 0;	cpus_clear(mm->cpu_vm_mask);	mm->mm_rb = RB_ROOT;	rb_link = &mm->mm_rb.rb_node;	rb_parent = NULL;	pprev = &mm->mmap;	for (mpnt = current->mm->mmap ; mpnt ; mpnt = mpnt->vm_next) {		struct file *file;		if (mpnt->vm_flags & VM_DONTCOPY) {			__vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,							-vma_pages(mpnt));			continue;		}		charge = 0;		if (mpnt->vm_flags & VM_ACCOUNT) {			unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;			if (security_vm_enough_memory(len))				goto fail_nomem;			charge = len;		}		tmp = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);		if (!tmp)			goto fail_nomem;		*tmp = *mpnt;		pol = mpol_copy(vma_policy(mpnt));		retval = PTR_ERR(pol);		if (IS_ERR(pol))			goto fail_nomem_policy;		vma_set_policy(tmp, pol);		tmp->vm_flags &= ~VM_LOCKED;		tmp->vm_mm = mm;		tmp->vm_next = NULL;		anon_vma_link(tmp);		file = tmp->vm_file;		if (file) {			struct inode *inode = file->f_dentry->d_inode;			get_file(file);			if (tmp->vm_flags & VM_DENYWRITE)				atomic_dec(&inode->i_writecount);      			/* insert tmp into the share list, just after mpnt */			spin_lock(&file->f_mapping->i_mmap_lock);			tmp->vm_truncate_count = mpnt->vm_truncate_count;			flush_dcache_mmap_lock(file->f_mapping);			vma_prio_tree_add(tmp, mpnt);			flush_dcache_mmap_unlock(file->f_mapping);			spin_unlock(&file->f_mapping->i_mmap_lock);		}		/*		 * Link in the new vma and copy the page table entries:		 * link in first so that swapoff can see swap entries,		 * and try_to_unmap_one's find_vma find the new vma.		 */		spin_lock(&mm->page_table_lock);		*pprev = tmp;		pprev = &tmp->vm_next;		__vma_link_rb(mm, tmp, rb_link, rb_parent);		rb_link = &tmp->vm_rb.rb_right;		rb_parent = &tmp->vm_rb;		mm->map_count++;		retval = copy_page_range(mm, current->mm, tmp);		spin_unlock(&mm->page_table_lock);		if (tmp->vm_ops && tmp->vm_ops->open)			tmp->vm_ops->open(tmp);		if (retval)			goto out;	}
开发者ID:Antonio-Zhou,项目名称:Linux-2.6.11,代码行数:90,


示例5: dup_mmap

static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm){	struct vm_area_struct *mpnt, *tmp, **pprev;	struct rb_node **rb_link, *rb_parent;	int retval;	unsigned long charge;	struct mempolicy *pol;	down_write(&oldmm->mmap_sem);	flush_cache_dup_mm(oldmm);	/*	 * Not linked in yet - no deadlock potential:	 */	down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);	mm->locked_vm = 0;	mm->mmap = NULL;	mm->mmap_cache = NULL;	mm->free_area_cache = oldmm->mmap_base;	mm->cached_hole_size = ~0UL;	mm->map_count = 0;	cpumask_clear(mm_cpumask(mm));	mm->mm_rb = RB_ROOT;	rb_link = &mm->mm_rb.rb_node;	rb_parent = NULL;	pprev = &mm->mmap;	retval = ksm_fork(mm, oldmm);	if (retval)		goto out;	for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {		struct file *file;		if (mpnt->vm_flags & VM_DONTCOPY) {			long pages = vma_pages(mpnt);			mm->total_vm -= pages;			vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,								-pages);			continue;		}		charge = 0;		if (mpnt->vm_flags & VM_ACCOUNT) {			unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;			if (security_vm_enough_memory(len))				goto fail_nomem;			charge = len;		}		tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);		if (!tmp)			goto fail_nomem;		*tmp = *mpnt;		pol = mpol_dup(vma_policy(mpnt));		retval = PTR_ERR(pol);		if (IS_ERR(pol))			goto fail_nomem_policy;		vma_set_policy(tmp, pol);		tmp->vm_flags &= ~VM_LOCKED;		tmp->vm_mm = mm;		tmp->vm_next = NULL;		anon_vma_link(tmp);		file = tmp->vm_file;		if (file) {			struct inode *inode = file->f_path.dentry->d_inode;			struct address_space *mapping = file->f_mapping;			get_file(file);			if (tmp->vm_flags & VM_DENYWRITE)				atomic_dec(&inode->i_writecount);			spin_lock(&mapping->i_mmap_lock);			if (tmp->vm_flags & VM_SHARED)				mapping->i_mmap_writable++;			tmp->vm_truncate_count = mpnt->vm_truncate_count;			flush_dcache_mmap_lock(mapping);			/* insert tmp into the share list, just after mpnt */			vma_prio_tree_add(tmp, mpnt);			flush_dcache_mmap_unlock(mapping);			spin_unlock(&mapping->i_mmap_lock);		}		/*		 * Clear hugetlb-related page reserves for children. This only		 * affects MAP_PRIVATE mappings. Faults generated by the child		 * are not guaranteed to succeed, even if read-only		 */		if (is_vm_hugetlb_page(tmp))			reset_vma_resv_huge_pages(tmp);		/*		 * Link in the new vma and copy the page table entries.		 */		*pprev = tmp;		pprev = &tmp->vm_next;		__vma_link_rb(mm, tmp, rb_link, rb_parent);		rb_link = &tmp->vm_rb.rb_right;		rb_parent = &tmp->vm_rb;		mm->map_count++;		retval = copy_page_range(mm, oldmm, mpnt);//.........这里部分代码省略.........
开发者ID:genua,项目名称:anoubis_os,代码行数:101,


示例6: dup_mmap

//.........这里部分代码省略.........	retval = khugepaged_fork(mm, oldmm);	if (retval)		goto out;	prev = NULL;	for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {		struct file *file;		if (mpnt->vm_flags & VM_DONTCOPY) {			vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,							-vma_pages(mpnt));			continue;		}		charge = 0;		if (mpnt->vm_flags & VM_ACCOUNT) {			unsigned long len = vma_pages(mpnt);			if (security_vm_enough_memory_mm(oldmm, len)) /* sic */				goto fail_nomem;			charge = len;		}		tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);		if (!tmp)			goto fail_nomem;		*tmp = *mpnt;		INIT_LIST_HEAD(&tmp->anon_vma_chain);		retval = vma_dup_policy(mpnt, tmp);		if (retval)			goto fail_nomem_policy;		tmp->vm_mm = mm;		if (anon_vma_fork(tmp, mpnt))			goto fail_nomem_anon_vma_fork;		tmp->vm_flags &= ~VM_LOCKED;		tmp->vm_next = tmp->vm_prev = NULL;		file = tmp->vm_file;		if (file) {			struct inode *inode = file_inode(file);			struct address_space *mapping = file->f_mapping;			get_file(file);			if (tmp->vm_flags & VM_DENYWRITE)				atomic_dec(&inode->i_writecount);			i_mmap_lock_write(mapping);			if (tmp->vm_flags & VM_SHARED)				atomic_inc(&mapping->i_mmap_writable);			flush_dcache_mmap_lock(mapping);			/* insert tmp into the share list, just after mpnt */			vma_interval_tree_insert_after(tmp, mpnt,					&mapping->i_mmap);			flush_dcache_mmap_unlock(mapping);			i_mmap_unlock_write(mapping);		}		/*		 * Clear hugetlb-related page reserves for children. This only		 * affects MAP_PRIVATE mappings. Faults generated by the child		 * are not guaranteed to succeed, even if read-only		 */		if (is_vm_hugetlb_page(tmp))			reset_vma_resv_huge_pages(tmp);		/*		 * Link in the new vma and copy the page table entries.		 */		*pprev = tmp;		pprev = &tmp->vm_next;		tmp->vm_prev = prev;		prev = tmp;		__vma_link_rb(mm, tmp, rb_link, rb_parent);		rb_link = &tmp->vm_rb.rb_right;		rb_parent = &tmp->vm_rb;		mm->map_count++;		retval = copy_page_range(mm, oldmm, mpnt);		if (tmp->vm_ops && tmp->vm_ops->open)			tmp->vm_ops->open(tmp);		if (retval)			goto out;	}	/* a new mm has just been created */	arch_dup_mmap(oldmm, mm);	retval = 0;out:	up_write(&mm->mmap_sem);	flush_tlb_mm(oldmm);	up_write(&oldmm->mmap_sem);	uprobe_end_dup_mmap();	return retval;fail_nomem_anon_vma_fork:	mpol_put(vma_policy(tmp));fail_nomem_policy:	kmem_cache_free(vm_area_cachep, tmp);fail_nomem:	retval = -ENOMEM;	vm_unacct_memory(charge);	goto out;}
开发者ID:19Dan01,项目名称:linux,代码行数:101,


示例7: dup_mmap

static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm){	struct vm_area_struct *mpnt, *tmp, *prev, **pprev;	struct rb_node **rb_link, *rb_parent;	int retval;	unsigned long charge;	struct mempolicy *pol;	down_write(&oldmm->mmap_sem);	flush_cache_dup_mm(oldmm);	uprobe_dup_mmap(oldmm, mm);	/*	 * Not linked in yet - no deadlock potential:	 */	down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);	mm->locked_vm = 0;	mm->mmap = NULL;	mm->mmap_cache = NULL;	mm->free_area_cache = oldmm->mmap_base;	mm->cached_hole_size = ~0UL;	mm->map_count = 0;	cpumask_clear(mm_cpumask(mm));	mm->mm_rb = RB_ROOT;	rb_link = &mm->mm_rb.rb_node;	rb_parent = NULL;	pprev = &mm->mmap;	retval = ksm_fork(mm, oldmm);	if (retval)		goto out;	retval = khugepaged_fork(mm, oldmm);	if (retval)		goto out;	prev = NULL;	for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {		struct file *file;		if (mpnt->vm_flags & VM_DONTCOPY) {			vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,							-vma_pages(mpnt));			continue;		}		charge = 0;		if (mpnt->vm_flags & VM_ACCOUNT) {			unsigned long len = vma_pages(mpnt);			if (security_vm_enough_memory_mm(oldmm, len)) /* sic */				goto fail_nomem;			charge = len;		}		tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);		if (!tmp)			goto fail_nomem;		*tmp = *mpnt;		INIT_LIST_HEAD(&tmp->anon_vma_chain);		pol = mpol_dup(vma_policy(mpnt));		retval = PTR_ERR(pol);		if (IS_ERR(pol))			goto fail_nomem_policy;		vma_set_policy(tmp, pol);		tmp->vm_mm = mm;		if (anon_vma_fork(tmp, mpnt))			goto fail_nomem_anon_vma_fork;		tmp->vm_flags &= ~VM_LOCKED;		tmp->vm_next = tmp->vm_prev = NULL;		file = tmp->vm_file;		if (file) {			struct inode *inode = file->f_path.dentry->d_inode;			struct address_space *mapping = file->f_mapping;			get_file(file);			if (tmp->vm_prfile)				get_file(tmp->vm_prfile);			if (tmp->vm_flags & VM_DENYWRITE)				atomic_dec(&inode->i_writecount);			mutex_lock(&mapping->i_mmap_mutex);			if (tmp->vm_flags & VM_SHARED)				mapping->i_mmap_writable++;			flush_dcache_mmap_lock(mapping);			/* insert tmp into the share list, just after mpnt */			if (unlikely(tmp->vm_flags & VM_NONLINEAR))				vma_nonlinear_insert(tmp,						&mapping->i_mmap_nonlinear);			else				vma_interval_tree_insert_after(tmp, mpnt,							&mapping->i_mmap);			flush_dcache_mmap_unlock(mapping);			mutex_unlock(&mapping->i_mmap_mutex);		}		/*		 * Clear hugetlb-related page reserves for children. This only		 * affects MAP_PRIVATE mappings. Faults generated by the child		 * are not guaranteed to succeed, even if read-only		 */		if (is_vm_hugetlb_page(tmp))			reset_vma_resv_huge_pages(tmp);		/*//.........这里部分代码省略.........
开发者ID:hexianren,项目名称:linux-3.7-Read,代码行数:101,


示例8: device_ioctl

//.........这里部分代码省略.........        if (target_mm == NULL) {      printk(KERN_ALERT "no process found with pid %d/n", (int) cow->pid);      return -EINVAL;    }            // we got target process    printk("we got target process/n");    // try to get semaphore     down_write(&target_mm->mmap_sem);     down_write(&current_mm->mmap_sem);        /*** check if the memory region specified by address is valid and has len length ***/    target_vm = find_vma(target_mm, (unsigned long) cow->addr);    if (target_vm == NULL || (unsigned long)cow->addr != target_vm->vm_start) {      printk(KERN_ALERT "no vm area found with addr == vm_start/n");      goto free_out;    }        printk("we got target vm area/n");    /*** check if current task can have another memory map ***/    if (current_mm->map_count > sysctl_max_map_count) {      printk(KERN_ALERT "no more region can be mapped to this process/n");      goto free_out;    }    flags = calc_mmap_flag_bits(target_vm->vm_flags); // calc MAP_XXX => VM_XXX flag for get_unmapped_area    cow->len = PAGE_ALIGN(cow->len);    /*** search for area which has enough size sequencially ***/    addr = get_unmapped_area(NULL, (unsigned long)cow->addr, cow->len, 0, flags);    if (addr & ~PAGE_MASK) { //if addr is not page size aligned      printk(KERN_ALERT "no unmapped are/n");      goto free_out;    }    tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);    if (!tmp) {      printk(KERN_ALERT "not enough memory to allocate vm_area_struct/n");      goto oom_out;    }    if (target_vm->vm_flags & VM_ACCOUNT)      printk("target region has VM_ACCOUNT flag/n");    /*** copy and set tmp ***/    *tmp = *target_vm; // copy target_vm    INIT_LIST_HEAD(&tmp->anon_vma_chain);    // here, vma_dup_policy    retval = vma_dup_policy(target_vm, tmp);    if (retval)      goto oom_policy_out;    if (anon_vma_fork(tmp, target_vm))      goto oom_anon_vma_fork;    tmp->vm_mm = current_mm;    // here, anon_vma_fork    tmp->vm_flags &= ~VM_LOCKED;    tmp->vm_next = tmp->vm_prev = NULL;    rb_link = &current_mm->mm_rb.rb_node;    rb_parent = NULL;    /*    pprev = &current_mm->mmap;    *pprev = tmp;    tmp->vm_prev = NULL;    prev = tmp;*/         //here __vma_link_rb    //  __vma_link_rb    rb_link = &tmp->vm_rb.rb_right;    rb_parent = &tmp->vm_rb;        //   current_mm->map_count++;    up_write(&current_mm->mmap_sem);    up_write(&target_mm->mmap_sem); //free semaphore        kmem_cache_free(vm_area_cachep, tmp);    /***  ***/    return 0;  oom_anon_vma_fork:    retval = -ENOMEM;    mpol_put(vma_policy(tmp));  oom_policy_out:      oom_out:    retval = -ENOMEM;  free_out:    kmem_cache_free(vm_area_cachep, tmp);    up_write(&current_mm->mmap_sem);    up_write(&target_mm->mmap_sem); //free semaphore        return retval;  }  return retval;}
开发者ID:tkokamo,项目名称:cow_monitor,代码行数:101,


示例9: dup_mmap

static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm){	struct vm_area_struct *mpnt, *tmp, *prev, **pprev;	struct rb_node **rb_link, *rb_parent;	int retval;	unsigned long charge;	struct mempolicy *pol;	down_write(&oldmm->mmap_sem);	flush_cache_dup_mm(oldmm);	down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);	mm->locked_vm = 0;	mm->mmap = NULL;	mm->mmap_cache = NULL;	mm->free_area_cache = oldmm->mmap_base;	mm->cached_hole_size = ~0UL;	mm->map_count = 0;	cpumask_clear(mm_cpumask(mm));	mm->mm_rb = RB_ROOT;	rb_link = &mm->mm_rb.rb_node;	rb_parent = NULL;	pprev = &mm->mmap;	retval = ksm_fork(mm, oldmm);	if (retval)		goto out;	retval = khugepaged_fork(mm, oldmm);	if (retval)		goto out;	prev = NULL;	for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {		struct file *file;		if (mpnt->vm_flags & VM_DONTCOPY) {			long pages = vma_pages(mpnt);			mm->total_vm -= pages;			vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,								-pages);			continue;		}		charge = 0;		if (mpnt->vm_flags & VM_ACCOUNT) {			unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;			if (security_vm_enough_memory_mm(oldmm, len)) 				goto fail_nomem;			charge = len;		}		tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);		if (!tmp)			goto fail_nomem;		*tmp = *mpnt;		INIT_LIST_HEAD(&tmp->anon_vma_chain);		pol = mpol_dup(vma_policy(mpnt));		retval = PTR_ERR(pol);		if (IS_ERR(pol))			goto fail_nomem_policy;		vma_set_policy(tmp, pol);		tmp->vm_mm = mm;		if (anon_vma_fork(tmp, mpnt))			goto fail_nomem_anon_vma_fork;		tmp->vm_flags &= ~VM_LOCKED;		tmp->vm_next = tmp->vm_prev = NULL;		file = tmp->vm_file;		if (file) {			struct inode *inode = file->f_path.dentry->d_inode;			struct address_space *mapping = file->f_mapping;			get_file(file);			if (tmp->vm_flags & VM_DENYWRITE)				atomic_dec(&inode->i_writecount);			mutex_lock(&mapping->i_mmap_mutex);			if (tmp->vm_flags & VM_SHARED)				mapping->i_mmap_writable++;			flush_dcache_mmap_lock(mapping);						vma_prio_tree_add(tmp, mpnt);			flush_dcache_mmap_unlock(mapping);			mutex_unlock(&mapping->i_mmap_mutex);		}		if (is_vm_hugetlb_page(tmp))			reset_vma_resv_huge_pages(tmp);		*pprev = tmp;		pprev = &tmp->vm_next;		tmp->vm_prev = prev;		prev = tmp;		__vma_link_rb(mm, tmp, rb_link, rb_parent);		rb_link = &tmp->vm_rb.rb_right;		rb_parent = &tmp->vm_rb;		mm->map_count++;		retval = copy_page_range(mm, oldmm, mpnt);		if (tmp->vm_ops && tmp->vm_ops->open)			tmp->vm_ops->open(tmp);		if (retval)//.........这里部分代码省略.........
开发者ID:MiniBlu,项目名称:cm11_kernel_htc_msm8974a3ul,代码行数:101,



注:本文中的vma_policy函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


C++ vma_prio_tree_add函数代码示例
C++ vma_pages函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。