这篇教程C++ vm_unmapped_area函数代码示例写得很实用,希望能帮到您。
本文整理汇总了C++中vm_unmapped_area函数的典型用法代码示例。如果您正苦于以下问题:C++ vm_unmapped_area函数的具体用法?C++ vm_unmapped_area怎么用?C++ vm_unmapped_area使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。 在下文中一共展示了vm_unmapped_area函数的26个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。 示例1: hugetlb_get_unmapped_area_bottomupstatic unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags){ unsigned long task_size = TASK_SIZE; struct vm_unmapped_area_info info; if (test_thread_flag(TIF_32BIT)) task_size = STACK_TOP32; info.flags = 0; info.length = len; info.low_limit = TASK_UNMAPPED_BASE; info.high_limit = min(task_size, VA_EXCLUDE_START); info.align_mask = PAGE_MASK & ~HPAGE_MASK; info.align_offset = 0; addr = vm_unmapped_area(&info); if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) { VM_BUG_ON(addr != -ENOMEM); info.low_limit = VA_EXCLUDE_END; info.high_limit = task_size; addr = vm_unmapped_area(&info); } return addr;}
开发者ID:7799,项目名称:linux,代码行数:29,
示例2: hugetlb_get_unmapped_area_topdownstatic unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr0, unsigned long len, unsigned long pgoff, unsigned long flags){ struct hstate *h = hstate_file(file); struct vm_unmapped_area_info info; unsigned long addr; info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = PAGE_SIZE; info.high_limit = current->mm->mmap_base; info.align_mask = PAGE_MASK & ~huge_page_mask(h); info.align_offset = 0; addr = vm_unmapped_area(&info); /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ if (addr & ~PAGE_MASK) { VM_BUG_ON(addr != -ENOMEM); info.flags = 0; info.low_limit = TASK_UNMAPPED_BASE; info.high_limit = TASK_SIZE; addr = vm_unmapped_area(&info); } return addr;}
开发者ID:0-T-0,项目名称:ps4-linux,代码行数:32,
示例3: arch_get_unmapped_areaunsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags){ struct mm_struct *mm = current->mm; struct vm_area_struct * vma; unsigned long task_size = TASK_SIZE; int do_color_align; struct vm_unmapped_area_info info; if (flags & MAP_FIXED) { /* We do not accept a shared mapping if it would violate * cache aliasing constraints. */ if ((flags & MAP_SHARED) && ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) return -EINVAL; return addr; } if (test_thread_flag(TIF_32BIT)) task_size = STACK_TOP32; if (unlikely(len > task_size || len >= VA_EXCLUDE_START)) return -ENOMEM; do_color_align = 0; if (filp || (flags & MAP_SHARED)) do_color_align = 1; if (addr) { if (do_color_align) addr = COLOR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (task_size - len >= addr && (!vma || addr + len <= vma->vm_start)) return addr; } info.flags = 0; info.length = len; info.low_limit = TASK_UNMAPPED_BASE; info.high_limit = min(task_size, VA_EXCLUDE_START); info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; info.align_offset = pgoff << PAGE_SHIFT; addr = vm_unmapped_area(&info); if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) { VM_BUG_ON(addr != -ENOMEM); info.low_limit = VA_EXCLUDE_END; info.high_limit = task_size; addr = vm_unmapped_area(&info); } return addr;}
开发者ID:03199618,项目名称:linux,代码行数:56,
示例4: arch_get_unmapped_area/* * place non-fixed mmaps firstly in the bottom part of memory, working up, and then in the top part * of memory, working down */unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags){ struct vm_area_struct *vma; struct vm_unmapped_area_info info; if (len > TASK_SIZE) return -ENOMEM; /* handle MAP_FIXED */ if (flags & MAP_FIXED) return addr; /* only honour a hint if we're not going to clobber something doing so */ if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(current->mm, addr); if (TASK_SIZE - len >= addr && (!vma || addr + len <= vm_start_gap(vma))) goto success; } /* search between the bottom of user VM and the stack grow area */ info.flags = 0; info.length = len; info.low_limit = PAGE_SIZE; info.high_limit = (current->mm->start_stack - 0x00200000); info.align_mask = 0; info.align_offset = 0; addr = vm_unmapped_area(&info); if (!(addr & ~PAGE_MASK)) goto success; VM_BUG_ON(addr != -ENOMEM); /* search from just above the WorkRAM area to the top of memory */ info.low_limit = PAGE_ALIGN(0x80000000); info.high_limit = TASK_SIZE; addr = vm_unmapped_area(&info); if (!(addr & ~PAGE_MASK)) goto success; VM_BUG_ON(addr != -ENOMEM);#if 0 printk("[area] l=%lx (ENOMEM) f='%s'/n", len, filp ? filp->f_path.dentry->d_name.name : "");#endif return -ENOMEM; success:#if 0 printk("[area] l=%lx ad=%lx f='%s'/n", len, addr, filp ? filp->f_path.dentry->d_name.name : "");#endif return addr;} /* end arch_get_unmapped_area() */
开发者ID:12zz,项目名称:linux,代码行数:59,
示例5: arch_get_unmapped_area_topdownunsigned longarch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags){ struct vm_area_struct *vma; struct mm_struct *mm = current->mm; unsigned long addr = addr0; struct vm_unmapped_area_info info; /* requested length too big for entire address space */ if (len > TASK_SIZE - mmap_min_addr) return -ENOMEM; if (flags & MAP_FIXED) return addr; /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && (!vma || addr + len <= vma->vm_start)) return addr; } info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = max(PAGE_SIZE, mmap_min_addr); info.high_limit = mm->mmap_base; if (filp || (flags & MAP_SHARED)) info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT; else info.align_mask = 0; info.align_offset = pgoff << PAGE_SHIFT; addr = vm_unmapped_area(&info); /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ if (addr & ~PAGE_MASK) { VM_BUG_ON(addr != -ENOMEM); info.flags = 0; info.low_limit = TASK_UNMAPPED_BASE; info.high_limit = TASK_SIZE; addr = vm_unmapped_area(&info); } return addr;}
开发者ID:0-T-0,项目名称:ps4-linux,代码行数:53,
示例6: arch_get_unmapped_areaunsigned longarch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags){ struct mm_struct *mm = current->mm; struct vm_area_struct *vma; struct vm_unmapped_area_info info; if (len > TASK_SIZE - mmap_min_addr) return -ENOMEM; if (flags & MAP_FIXED) return addr; if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && (!vma || addr + len <= vma->vm_start)) return addr; } info.flags = 0; info.length = len; info.low_limit = mm->mmap_base; info.high_limit = TASK_SIZE; if (filp || (flags & MAP_SHARED)) info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT; else info.align_mask = 0; info.align_offset = pgoff << PAGE_SHIFT; return vm_unmapped_area(&info);}
开发者ID:0-T-0,项目名称:ps4-linux,代码行数:33,
示例7: hugetlb_get_unmapped_areaunsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags){ struct vm_unmapped_area_info info; if (len > RGN_MAP_LIMIT) return -ENOMEM; if (len & ~HPAGE_MASK) return -EINVAL; /* Handle MAP_FIXED */ if (flags & MAP_FIXED) { if (prepare_hugepage_range(file, addr, len)) return -EINVAL; return addr; } /* This code assumes that RGN_HPAGE != 0. */ if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1))) addr = HPAGE_REGION_BASE; info.flags = 0; info.length = len; info.low_limit = addr; info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT; info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1); info.align_offset = 0; return vm_unmapped_area(&info);}
开发者ID:0x000000FF,项目名称:Linux4Edison,代码行数:29,
示例8: arch_get_unmapped_areaunsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags){ struct vm_unmapped_area_info info; if (flags & MAP_FIXED) { /* We do not accept a shared mapping if it would violate * cache aliasing constraints. */ if ((flags & MAP_SHARED) && ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) return -EINVAL; return addr; } /* See asm-sparc/uaccess.h */ if (len > TASK_SIZE - PAGE_SIZE) return -ENOMEM; if (!addr) addr = TASK_UNMAPPED_BASE; info.flags = 0; info.length = len; info.low_limit = addr; info.high_limit = TASK_SIZE; info.align_mask = (flags & MAP_SHARED) ? (PAGE_MASK & (SHMLBA - 1)) : 0; info.align_offset = pgoff << PAGE_SHIFT; return vm_unmapped_area(&info);}
开发者ID:AdrianHuang,项目名称:linux-3.8.13,代码行数:29,
示例9: arch_get_unmapped_areaunsigned longarch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags){ struct mm_struct *mm = current->mm; struct vm_area_struct *vma; struct vm_unmapped_area_info info; unsigned long begin, end; if (flags & MAP_FIXED) return addr; find_start_end(flags, &begin, &end); if (len > end) return -ENOMEM; if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (end - len >= addr && (!vma || addr + len <= vma->vm_start)) return addr; } info.flags = 0; info.length = len; info.low_limit = begin; info.high_limit = end; info.align_mask = filp ? get_align_mask() : 0; info.align_offset = pgoff << PAGE_SHIFT; return vm_unmapped_area(&info);}
开发者ID:cHunter791,项目名称:pmfs,代码行数:33,
示例10: hugetlb_get_unmapped_area_topdownstatic unsigned longhugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags, const unsigned long offset){ struct mm_struct *mm = current->mm; unsigned long addr = addr0; struct vm_unmapped_area_info info; /* This should only ever run for 32-bit processes. */ BUG_ON(!test_thread_flag(TIF_32BIT)); info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = PAGE_SIZE; info.high_limit = mm->mmap_base; info.align_mask = PAGE_MASK & ~HPAGE_MASK; info.align_offset = 0; info.threadstack_offset = offset; addr = vm_unmapped_area(&info); /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ if (addr & ~PAGE_MASK) { VM_BUG_ON(addr != -ENOMEM); info.flags = 0; info.low_limit = TASK_UNMAPPED_BASE;#ifdef CONFIG_PAX_RANDMMAP if (mm->pax_flags & MF_PAX_RANDMMAP) info.low_limit += mm->delta_mmap;#endif info.high_limit = STACK_TOP32; addr = vm_unmapped_area(&info); } return addr;}
开发者ID:magarto,项目名称:linux-rpi-grsecurity,代码行数:45,
示例11: arch_get_unmapped_areaunsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags){ struct mm_struct *mm = current->mm; struct vm_area_struct *vma, *prev; unsigned long task_size = TASK_SIZE; int do_color_align, last_mmap; struct vm_unmapped_area_info info; if (len > task_size) return -ENOMEM; do_color_align = 0; if (filp || (flags & MAP_SHARED)) do_color_align = 1; last_mmap = GET_LAST_MMAP(filp); if (flags & MAP_FIXED) { if ((flags & MAP_SHARED) && last_mmap && (addr - shared_align_offset(last_mmap, pgoff)) & (SHM_COLOUR - 1)) return -EINVAL; goto found_addr; } if (addr) { if (do_color_align && last_mmap) addr = COLOR_ALIGN(addr, last_mmap, pgoff); else addr = PAGE_ALIGN(addr); vma = find_vma_prev(mm, addr, &prev); if (task_size - len >= addr && (!vma || addr + len <= vm_start_gap(vma)) && (!prev || addr >= vm_end_gap(prev))) goto found_addr; } info.flags = 0; info.length = len; info.low_limit = mm->mmap_legacy_base; info.high_limit = mmap_upper_limit(); info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0; info.align_offset = shared_align_offset(last_mmap, pgoff); addr = vm_unmapped_area(&info);found_addr: if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK)) SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT)); return addr;}
开发者ID:CaptainThrowback,项目名称:android_kernel_htc_pme,代码行数:52,
示例12: arch_get_unmapped_areaunsigned longarch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags){ long map_shared = (flags & MAP_SHARED); unsigned long align_mask = 0; struct mm_struct *mm = current->mm; struct vm_unmapped_area_info info; unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); if (len > RGN_MAP_LIMIT) return -ENOMEM; /* handle fixed mapping: prevent overlap with huge pages */ if (flags & MAP_FIXED) { if (is_hugepage_only_range(mm, addr, len)) return -EINVAL; return addr; }#ifdef CONFIG_HUGETLB_PAGE if (REGION_NUMBER(addr) == RGN_HPAGE) addr = 0;#endif#ifdef CONFIG_PAX_RANDMMAP if (mm->pax_flags & MF_PAX_RANDMMAP) addr = mm->free_area_cache; else#endif if (!addr) addr = TASK_UNMAPPED_BASE; if (map_shared && (TASK_SIZE > 0xfffffffful)) /* * For 64-bit tasks, align shared segments to 1MB to avoid potential * performance penalty due to virtual aliasing (see ASDM). For 32-bit * tasks, we prefer to avoid exhausting the address space too quickly by * limiting alignment to a single page. */ align_mask = PAGE_MASK & (SHMLBA - 1); info.flags = 0; info.length = len; info.low_limit = addr; info.high_limit = TASK_SIZE; info.align_mask = align_mask; info.align_offset = 0; info.threadstack_offset = offset; return vm_unmapped_area(&info);}
开发者ID:AdaLovelance,项目名称:lxcGrsecKernels,代码行数:52,
示例13: hugetlb_get_unmapped_area_new_pmd/* Do a full search to find an area without any nearby normal pages. */static unsigned longhugetlb_get_unmapped_area_new_pmd(unsigned long len){ struct vm_unmapped_area_info info; info.flags = 0; info.length = len; info.low_limit = TASK_UNMAPPED_BASE; info.high_limit = TASK_SIZE; info.align_mask = PAGE_MASK & HUGEPT_MASK; info.align_offset = 0; return vm_unmapped_area(&info);}
开发者ID:01org,项目名称:thunderbolt-software-kernel-tree,代码行数:14,
示例14: pmfs_get_unmapped_areastatic unsigned longpmfs_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags){ unsigned long align_size; struct vm_area_struct *vma; struct mm_struct *mm = current->mm; struct inode *inode = file->f_mapping->host; struct pmfs_inode *pi = pmfs_get_inode(inode->i_sb, inode->i_ino); struct vm_unmapped_area_info info; if (len > TASK_SIZE) return -ENOMEM; if (pi->i_blk_type == PMFS_BLOCK_TYPE_1G) align_size = PUD_SIZE; else if (pi->i_blk_type == PMFS_BLOCK_TYPE_2M) align_size = PMD_SIZE; else align_size = PAGE_SIZE; if (flags & MAP_FIXED) { /* FIXME: We could use 4K mappings as fallback. */ if (len & (align_size - 1)) return -EINVAL; if (addr & (align_size - 1)) return -EINVAL; return addr; } if (addr) { addr = ALIGN(addr, align_size); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && (!vma || addr + len <= vma->vm_start)) return addr; } /* * FIXME: Using the following values for low_limit and high_limit * implicitly disables ASLR. Awaiting a better way to have this fixed. */ info.flags = 0; info.length = len; info.low_limit = TASK_UNMAPPED_BASE; info.high_limit = TASK_SIZE; info.align_mask = align_size - 1; info.align_offset = 0; return vm_unmapped_area(&info);}
开发者ID:Andiry,项目名称:PMFS-test,代码行数:51,
示例15: hugetlb_get_unmapped_area_bottomupstatic unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long offset){ struct mm_struct *mm = current->mm; unsigned long task_size = TASK_SIZE; struct vm_unmapped_area_info info; if (test_thread_flag(TIF_32BIT)) task_size = STACK_TOP32; info.flags = 0; info.length = len; info.low_limit = mm->mmap_base; info.high_limit = min(task_size, VA_EXCLUDE_START); info.align_mask = PAGE_MASK & ~HPAGE_MASK; info.align_offset = 0; info.threadstack_offset = offset; addr = vm_unmapped_area(&info); if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) { VM_BUG_ON(addr != -ENOMEM); info.low_limit = VA_EXCLUDE_END;#ifdef CONFIG_PAX_RANDMMAP if (mm->pax_flags & MF_PAX_RANDMMAP) info.low_limit += mm->delta_mmap;#endif info.high_limit = task_size; addr = vm_unmapped_area(&info); } return addr;}
开发者ID:magarto,项目名称:linux-rpi-grsecurity,代码行数:38,
示例16: arch_get_unmapped_area/* * We need to ensure that shared mappings are correctly aligned to * avoid aliasing issues with VIPT caches. We need to ensure that * a specific page of an object is always mapped at a multiple of * SHMLBA bytes. * * We unconditionally provide this function for all cases, however * in the VIVT case, we optimise out the alignment rules. */unsigned longarch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags){ struct mm_struct *mm = current->mm; struct vm_area_struct *vma; int do_align = 0; int aliasing = cache_is_vipt_aliasing(); struct vm_unmapped_area_info info; /* * We only need to do colour alignment if either the I or D * caches alias. */ if (aliasing) do_align = filp || (flags & MAP_SHARED); /* * We enforce the MAP_FIXED case. */ if (flags & MAP_FIXED) { if (aliasing && flags & MAP_SHARED && (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) return -EINVAL; return addr; } if (len > TASK_SIZE) return -ENOMEM; if (addr) { if (do_align) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && (!vma || addr + len <= vma->vm_start)) return addr; } info.flags = 0; info.length = len; info.low_limit = mm->mmap_base; info.high_limit = TASK_SIZE; info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; info.align_offset = pgoff << PAGE_SHIFT; return vm_unmapped_area(&info);}
开发者ID:duki994,项目名称:G900H_LP_Kernel,代码行数:59,
示例17: arch_get_unmapped_area_topdownunsigned longarch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags){ struct vm_area_struct *vma; struct mm_struct *mm = current->mm; unsigned long addr = addr0; struct vm_unmapped_area_info info; /* requested length too big for entire address space */ if (len > TASK_SIZE) return -ENOMEM; if (flags & MAP_FIXED) return addr; /* for MAP_32BIT mappings we force the legacy mmap base */ if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) goto bottomup; /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && (!vma || addr + len <= vma->vm_start)) return addr; } info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = PAGE_SIZE; info.high_limit = mm->mmap_base; info.align_mask = filp ? get_align_mask() : 0; info.align_offset = pgoff << PAGE_SHIFT; addr = vm_unmapped_area(&info); if (!(addr & ~PAGE_MASK)) return addr; VM_BUG_ON(addr != -ENOMEM);bottomup: /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);}
开发者ID:cHunter791,项目名称:pmfs,代码行数:50,
示例18: radix__hugetlb_get_unmapped_area/* * A vairant of hugetlb_get_unmapped_area doing topdown search * FIXME!! should we do as x86 does or non hugetlb area does ? * ie, use topdown or not based on mmap_is_legacy check ? */unsigned longradix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags){ struct mm_struct *mm = current->mm; struct vm_area_struct *vma; struct hstate *h = hstate_file(file); int fixed = (flags & MAP_FIXED); unsigned long high_limit; struct vm_unmapped_area_info info; high_limit = DEFAULT_MAP_WINDOW; if (addr >= high_limit || (fixed && (addr + len > high_limit))) high_limit = TASK_SIZE; if (len & ~huge_page_mask(h)) return -EINVAL; if (len > high_limit) return -ENOMEM; if (fixed) { if (addr > high_limit - len) return -ENOMEM; if (prepare_hugepage_range(file, addr, len)) return -EINVAL; return addr; } if (addr) { addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (high_limit - len >= addr && addr >= mmap_min_addr && (!vma || addr + len <= vm_start_gap(vma))) return addr; } /* * We are always doing an topdown search here. Slice code * does that too. */ info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = max(PAGE_SIZE, mmap_min_addr); info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW); info.align_mask = PAGE_MASK & ~huge_page_mask(h); info.align_offset = 0; return vm_unmapped_area(&info);}
开发者ID:150balbes,项目名称:Amlogic_s905-kernel,代码行数:54,
示例19: hugetlb_get_unmapped_area_bottomupstatic unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags){ struct hstate *h = hstate_file(file); struct vm_unmapped_area_info info; info.flags = 0; info.length = len; info.low_limit = TASK_UNMAPPED_BASE; info.high_limit = TASK_SIZE; info.align_mask = PAGE_MASK & ~huge_page_mask(h); info.align_offset = 0; return vm_unmapped_area(&info);}
开发者ID:0-T-0,项目名称:ps4-linux,代码行数:15,
示例20: arch_get_unmapped_areaunsigned longarch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags){ struct mm_struct *mm = current->mm; struct vm_area_struct *vma; struct vm_unmapped_area_info info; int rc; if (len > TASK_SIZE - mmap_min_addr) return -ENOMEM; if (flags & MAP_FIXED) goto check_asce_limit; if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && (!vma || addr + len <= vm_start_gap(vma))) goto check_asce_limit; } info.flags = 0; info.length = len; info.low_limit = mm->mmap_base; info.high_limit = TASK_SIZE; if (filp || (flags & MAP_SHARED)) info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT; else info.align_mask = 0; info.align_offset = pgoff << PAGE_SHIFT; addr = vm_unmapped_area(&info); if (addr & ~PAGE_MASK) return addr;check_asce_limit: if (addr + len > current->mm->context.asce_limit) { rc = crst_table_upgrade(mm, addr + len); if (rc) return (unsigned long) rc; } return addr;}
开发者ID:Endika,项目名称:linux,代码行数:45,
示例21: arch_get_unmapped_area_topdownunsigned longarch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags){ struct vm_area_struct *vma; struct mm_struct *mm = current->mm; unsigned long task_size = STACK_TOP32; unsigned long addr = addr0; int do_color_align; struct vm_unmapped_area_info info; /* This should only ever run for 32-bit processes. */ BUG_ON(!test_thread_flag(TIF_32BIT)); if (flags & MAP_FIXED) { /* We do not accept a shared mapping if it would violate * cache aliasing constraints. */ if ((flags & MAP_SHARED) && ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) return -EINVAL; return addr; } if (unlikely(len > task_size)) return -ENOMEM; do_color_align = 0; if (filp || (flags & MAP_SHARED)) do_color_align = 1; /* requesting a specific address */ if (addr) { if (do_color_align) addr = COLOR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (task_size - len >= addr && (!vma || addr + len <= vma->vm_start)) return addr; } info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = PAGE_SIZE; info.high_limit = mm->mmap_base; info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; info.align_offset = pgoff << PAGE_SHIFT; addr = vm_unmapped_area(&info); /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ if (addr & ~PAGE_MASK) { VM_BUG_ON(addr != -ENOMEM); info.flags = 0; info.low_limit = TASK_UNMAPPED_BASE; info.high_limit = STACK_TOP32; addr = vm_unmapped_area(&info); } return addr;}
开发者ID:03199618,项目名称:linux,代码行数:69,
示例22: arch_get_unmapped_area_topdownunsigned longarch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags){ struct vm_area_struct *vma; struct mm_struct *mm = current->mm; unsigned long addr = addr0, pax_task_size = TASK_SIZE; struct vm_unmapped_area_info info; unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);#ifdef CONFIG_PAX_SEGMEXEC if (mm->pax_flags & MF_PAX_SEGMEXEC) pax_task_size = SEGMEXEC_TASK_SIZE;#endif pax_task_size -= PAGE_SIZE; /* requested length too big for entire address space */ if (len > pax_task_size) return -ENOMEM; if (flags & MAP_FIXED) return addr;#ifdef CONFIG_PAX_PAGEEXEC if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) goto bottomup;#endif#ifdef CONFIG_PAX_RANDMMAP if (!(mm->pax_flags & MF_PAX_RANDMMAP))#endif /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); if (pax_task_size - len >= addr) { vma = find_vma(mm, addr); if (check_heap_stack_gap(vma, addr, len, offset)) return addr; } } info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = PAGE_SIZE; info.high_limit = mm->mmap_base; info.align_mask = filp ? get_align_mask() : 0; info.align_offset = pgoff << PAGE_SHIFT; info.threadstack_offset = offset; addr = vm_unmapped_area(&info); if (!(addr & ~PAGE_MASK)) return addr; VM_BUG_ON(addr != -ENOMEM);bottomup: /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);}
开发者ID:AdaLovelance,项目名称:lxcGrsecKernels,代码行数:66,
示例23: arch_get_unmapped_areaunsigned longarch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags){ struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long pax_task_size = TASK_SIZE; struct vm_unmapped_area_info info; unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);#ifdef CONFIG_PAX_SEGMEXEC if (mm->pax_flags & MF_PAX_SEGMEXEC) pax_task_size = SEGMEXEC_TASK_SIZE;#endif pax_task_size -= PAGE_SIZE; if (len > pax_task_size) return -ENOMEM; if (flags & MAP_FIXED) return addr;#ifdef CONFIG_PAX_RANDMMAP if (!(mm->pax_flags & MF_PAX_RANDMMAP))#endif if (addr) { addr = PAGE_ALIGN(addr); if (pax_task_size - len >= addr) { vma = find_vma(mm, addr); if (check_heap_stack_gap(vma, addr, len, offset)) return addr; } } info.flags = 0; info.length = len; info.align_mask = filp ? get_align_mask() : 0; info.align_offset = pgoff << PAGE_SHIFT; info.threadstack_offset = offset;#ifdef CONFIG_PAX_PAGEEXEC if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) { info.low_limit = 0x00110000UL; info.high_limit = mm->start_code;#ifdef CONFIG_PAX_RANDMMAP if (mm->pax_flags & MF_PAX_RANDMMAP) info.low_limit += mm->delta_mmap & 0x03FFF000UL;#endif if (info.low_limit < info.high_limit) { addr = vm_unmapped_area(&info); if (!IS_ERR_VALUE(addr)) return addr; } } else#endif info.low_limit = mm->mmap_base; info.high_limit = pax_task_size; return vm_unmapped_area(&info);}
开发者ID:AdaLovelance,项目名称:lxcGrsecKernels,代码行数:65,
示例24: arch_get_unmapped_area_commonstatic unsigned long arch_get_unmapped_area_common(struct file *filp, unsigned long addr0, unsigned long len, unsigned long pgoff, unsigned long flags, enum mmap_allocation_direction dir){ struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long addr = addr0; int do_color_align; struct vm_unmapped_area_info info; if (unlikely(len > TASK_SIZE)) return -ENOMEM; if (flags & MAP_FIXED) { /* Even MAP_FIXED mappings must reside within TASK_SIZE */ if (TASK_SIZE - len < addr) return -EINVAL; /* * We do not accept a shared mapping if it would violate * cache aliasing constraints. */ if ((flags & MAP_SHARED) && ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) return -EINVAL; return addr; } do_color_align = 0; if (filp || (flags & MAP_SHARED)) do_color_align = 1; /* requesting a specific address */#ifdef CONFIG_PAX_RANDMMAP if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))#endif if (addr) { if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len)) return addr; } info.length = len; info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0; info.align_offset = pgoff << PAGE_SHIFT; if (dir == DOWN) { info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.low_limit = PAGE_SIZE; info.high_limit = mm->mmap_base; addr = vm_unmapped_area(&info); if (!(addr & ~PAGE_MASK)) return addr; /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ } info.flags = 0; info.low_limit = mm->mmap_base; info.high_limit = TASK_SIZE; return vm_unmapped_area(&info);}
开发者ID:garyvan,项目名称:openwrt-1.6,代码行数:75,
示例25: arch_get_unmapped_area_topdownunsigned longarch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags){ struct vm_area_struct *vma; struct mm_struct *mm = current->mm; unsigned long addr = addr0; int do_color_align, last_mmap; struct vm_unmapped_area_info info;#ifdef CONFIG_64BIT /* This should only ever run for 32-bit processes. */ BUG_ON(!test_thread_flag(TIF_32BIT));#endif /* requested length too big for entire address space */ if (len > TASK_SIZE) return -ENOMEM; do_color_align = 0; if (filp || (flags & MAP_SHARED)) do_color_align = 1; last_mmap = GET_LAST_MMAP(filp); if (flags & MAP_FIXED) { if ((flags & MAP_SHARED) && last_mmap && (addr - shared_align_offset(last_mmap, pgoff)) & (SHM_COLOUR - 1)) return -EINVAL; goto found_addr; } /* requesting a specific address */ if (addr) { if (do_color_align && last_mmap) addr = COLOR_ALIGN(addr, last_mmap, pgoff); else addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && (!vma || addr + len <= vma->vm_start)) goto found_addr; } info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = PAGE_SIZE; info.high_limit = mm->mmap_base; info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0; info.align_offset = shared_align_offset(last_mmap, pgoff); addr = vm_unmapped_area(&info); if (!(addr & ~PAGE_MASK)) goto found_addr; VM_BUG_ON(addr != -ENOMEM); /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);found_addr: if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK)) SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT)); return addr;}
开发者ID:pashkouski,项目名称:linux,代码行数:70,
示例26: arch_get_unmapped_area_topdownunsigned longarch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags){ struct vm_area_struct *vma; struct mm_struct *mm = current->mm; unsigned long addr = addr0; int do_align = 0; int aliasing = cache_is_vipt_aliasing(); struct vm_unmapped_area_info info; /* * We only need to do colour alignment if either the I or D * caches alias. */ if (aliasing) do_align = filp || (flags & MAP_SHARED); /* requested length too big for entire address space */ if (len > TASK_SIZE) return -ENOMEM; if (flags & MAP_FIXED) { if (aliasing && flags & MAP_SHARED && (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) return -EINVAL; return addr; } /* requesting a specific address */ if (addr) { if (do_align) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && (!vma || addr + len <= vma->vm_start)) return addr; } info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = FIRST_USER_ADDRESS; info.high_limit = mm->mmap_base; info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; info.align_offset = pgoff << PAGE_SHIFT; addr = vm_unmapped_area(&info); /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ if (addr & ~PAGE_MASK) { VM_BUG_ON(addr != -ENOMEM); info.flags = 0; info.low_limit = mm->mmap_base; info.high_limit = TASK_SIZE; addr = vm_unmapped_area(&info); } return addr;}
开发者ID:duki994,项目名称:G900H_LP_Kernel,代码行数:66,
注:本文中的vm_unmapped_area函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 C++ vm_vec_add2函数代码示例 C++ vm_page_lock函数代码示例 |