您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ trylock_page函数代码示例

51自学网 2021-06-03 08:59:20
  C++
这篇教程C++ trylock_page函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中trylock_page函数的典型用法代码示例。如果您正苦于以下问题:C++ trylock_page函数的具体用法?C++ trylock_page怎么用?C++ trylock_page使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了trylock_page函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: free_swap_cache

/** * free_swap_cache:page从交换区高速缓存中删除 */static inline void free_swap_cache(struct page *page){	if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {		try_to_free_swap(page);		unlock_page(page);	}}
开发者ID:yl849646685,项目名称:linux-2.6.32,代码行数:10,


示例2: free_swap_cache

/*  * If we are the only user, then try to free up the swap cache.  *  * Its ok to check for PageSwapCache without the page lock * here because we are going to recheck again inside  * exclusive_swap_page() _with_ the lock.  * 					- Marcelo */static inline void free_swap_cache(struct page *page){	if (PageSwapCache(page) && trylock_page(page)) {		remove_exclusive_swap_page(page);		unlock_page(page);	}}
开发者ID:kizukukoto,项目名称:WDN900_GPL,代码行数:15,


示例3: release_buffer_page

/* * When an ext3-ordered file is truncated, it is possible that many pages are * not successfully freed, because they are attached to a committing transaction. * After the transaction commits, these pages are left on the LRU, with no * ->mapping, and with attached buffers.  These pages are trivially reclaimable * by the VM, but their apparent absence upsets the VM accounting, and it makes * the numbers in /proc/meminfo look odd. * * So here, we have a buffer which has just come off the forget list.  Look to * see if we can strip all buffers from the backing page. * * Called under journal->j_list_lock.  The caller provided us with a ref * against the buffer, and we drop that here. */static void release_buffer_page(struct buffer_head *bh){	struct page *page;	if (buffer_dirty(bh))		goto nope;	if (atomic_read(&bh->b_count) != 1)		goto nope;	page = bh->b_page;	if (!page)		goto nope;	if (page->mapping)		goto nope;	/* OK, it's a truncated page */	if (!trylock_page(page))		goto nope;	page_cache_get(page);	__brelse(bh);	try_to_free_buffers(page);	unlock_page(page);	page_cache_release(page);	return;nope:	__brelse(bh);}
开发者ID:33d,项目名称:linux-2.6.21-hh20,代码行数:42,


示例4: free_swap_and_cache

/* * Free the swap entry like above, but also try to * free the page cache entry if it is the last user. */void free_swap_and_cache(swp_entry_t entry){	struct swap_info_struct * p;	struct page *page = NULL;	if (is_migration_entry(entry))		return;	p = swap_info_get(entry);	if (p) {		if (swap_entry_free(p, swp_offset(entry)) == 1) {			page = find_get_page(&swapper_space, entry.val);			if (page && unlikely(!trylock_page(page))) {				page_cache_release(page);				page = NULL;			}		}		spin_unlock(&swap_lock);	}	if (page) {		int one_user;		BUG_ON(PagePrivate(page));		one_user = (page_count(page) == 2);		/* Only cache user (+us), or swap space full? Free it! */		/* Also recheck PageSwapCache after page is locked (above) */		if (PageSwapCache(page) && !PageWriteback(page) &&					(one_user || vm_swap_full())) {			delete_from_swap_cache(page);			SetPageDirty(page);		}		unlock_page(page);		page_cache_release(page);	}}
开发者ID:deepikateriar,项目名称:Onlive-Source-Backup,代码行数:39,


示例5: read_cache_pages_invalidate_page

/* * see if a page needs releasing upon read_cache_pages() failure * - the caller of read_cache_pages() may have set PG_private or PG_fscache *   before calling, such as the NFS fs marking pages that are cached locally *   on disk, thus we need to give the fs a chance to clean up in the event of *   an error */static void read_cache_pages_invalidate_page(struct address_space *mapping,					     struct page *page){	if (page_has_private(page)) {		if (!trylock_page(page))			BUG();		page->mapping = mapping;		do_invalidatepage(page, 0);		page->mapping = NULL;		unlock_page(page);	}	page_cache_release(page);}
开发者ID:Core2idiot,项目名称:Kernel-Samsung-3.0...-,代码行数:20,


示例6: pagevec_swap_free

/** * pagevec_swap_free - try to free swap space from the pages in a pagevec * @pvec: pagevec with swapcache pages to free the swap space of * * The caller needs to hold an extra reference to each page and * not hold the page lock on the pages.  This function uses a * trylock on the page lock so it may not always free the swap * space associated with a page. */void pagevec_swap_free(struct pagevec *pvec){	int i;	for (i = 0; i < pagevec_count(pvec); i++) {		struct page *page = pvec->pages[i];		if (PageSwapCache(page) && trylock_page(page)) {			if (PageSwapCache(page))				remove_exclusive_swap_page_ref(page);			unlock_page(page);		}	}}
开发者ID:kizukukoto,项目名称:WDN900_GPL,代码行数:23,


示例7: invalidate_mapping_pages

/** * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode * @mapping: the address_space which holds the pages to invalidate * @start: the offset 'from' which to invalidate * @end: the offset 'to' which to invalidate (inclusive) * * This function only removes the unlocked pages, if you want to * remove all the pages of one inode, you must call truncate_inode_pages. * * invalidate_mapping_pages() will not block on IO activity. It will not * invalidate pages which are dirty, locked, under writeback or mapped into * pagetables. */unsigned long invalidate_mapping_pages(struct address_space *mapping,		pgoff_t start, pgoff_t end){	pgoff_t indices[PAGEVEC_SIZE];	struct pagevec pvec;	pgoff_t index = start;	unsigned long ret;	unsigned long count = 0;	int i;	pagevec_init(&pvec, 0);	while (index <= end && __pagevec_lookup(&pvec, mapping, index,			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,			indices)) {		mem_cgroup_uncharge_start();		for (i = 0; i < pagevec_count(&pvec); i++) {			struct page *page = pvec.pages[i];			/* We rely upon deletion not changing page->index */			index = indices[i];			if (index > end)				break;			if (radix_tree_exceptional_entry(page)) {				clear_exceptional_entry(mapping, index, page);				continue;			}			if (!trylock_page(page))				continue;			WARN_ON(page->index != index);			ret = invalidate_inode_page(page);			unlock_page(page);			/*			 * Invalidation is a hint that the page is no longer			 * of interest and try to speed up its reclaim.			 */			if (!ret)				deactivate_page(page);			count += ret;		}		pagevec_remove_exceptionals(&pvec);		pagevec_release(&pvec);		mem_cgroup_uncharge_end();		cond_resched();		index++;	}	return count;}
开发者ID:spacex,项目名称:kernel-centos7,代码行数:62,


示例8: invalidate_mapping_pages

/** * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode * @mapping: the address_space which holds the pages to invalidate * @start: the offset 'from' which to invalidate * @end: the offset 'to' which to invalidate (inclusive) * * This function only removes the unlocked pages, if you want to * remove all the pages of one inode, you must call truncate_inode_pages. * * invalidate_mapping_pages() will not block on IO activity. It will not * invalidate pages which are dirty, locked, under writeback or mapped into * pagetables. */unsigned long invalidate_mapping_pages(struct address_space *mapping,		pgoff_t start, pgoff_t end){	struct pagevec pvec;	pgoff_t index = start;	unsigned long ret;	unsigned long count = 0;	int i;	/*	 * Note: this function may get called on a shmem/tmpfs mapping:	 * pagevec_lookup() might then return 0 prematurely (because it	 * got a gangful of swap entries); but it's hardly worth worrying	 * about - it can rarely have anything to free from such a mapping	 * (most pages are dirty), and already skips over any difficulties.	 */	pagevec_init(&pvec, 0);	while (index <= end && pagevec_lookup(&pvec, mapping, index,			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {		mem_cgroup_uncharge_start();		for (i = 0; i < pagevec_count(&pvec); i++) {			struct page *page = pvec.pages[i];			/* We rely upon deletion not changing page->index */			index = page->index;			if (index > end)				break;			if (!trylock_page(page))				continue;			WARN_ON(page->index != index);			ret = invalidate_inode_page(page);			unlock_page(page);			/*			 * Invalidation is a hint that the page is no longer			 * of interest and try to speed up its reclaim.			 */			if (!ret)				deactivate_page(page);			count += ret;		}		pagevec_release(&pvec);		mem_cgroup_uncharge_end();		cond_resched();		index++;	}	return count;}
开发者ID:GuojianZhou,项目名称:linux-yocto-3.14,代码行数:62,


示例9: no_page_table

//.........这里部分代码省略.........		return NULL;	}	page = vm_normal_page(vma, address, pte);	if (!page && pte_devmap(pte) && (flags & FOLL_GET)) {		/*		 * Only return device mapping pages in the FOLL_GET case since		 * they are only valid while holding the pgmap reference.		 */		pgmap = get_dev_pagemap(pte_pfn(pte), NULL);		if (pgmap)			page = pte_page(pte);		else			goto no_page;	} else if (unlikely(!page)) {		if (flags & FOLL_DUMP) {			/* Avoid special (like zero) pages in core dumps */			page = ERR_PTR(-EFAULT);			goto out;		}		if (is_zero_pfn(pte_pfn(pte))) {			page = pte_page(pte);		} else {			int ret;			ret = follow_pfn_pte(vma, address, ptep, flags);			page = ERR_PTR(ret);			goto out;		}	}	if (flags & FOLL_SPLIT && PageTransCompound(page)) {		int ret;		get_page(page);		pte_unmap_unlock(ptep, ptl);		lock_page(page);		ret = split_huge_page(page);		unlock_page(page);		put_page(page);		if (ret)			return ERR_PTR(ret);		goto retry;	}	if (flags & FOLL_GET) {		get_page(page);		/* drop the pgmap reference now that we hold the page */		if (pgmap) {			put_dev_pagemap(pgmap);			pgmap = NULL;		}	}	if (flags & FOLL_TOUCH) {		if ((flags & FOLL_WRITE) &&		    !pte_dirty(pte) && !PageDirty(page))			set_page_dirty(page);		/*		 * pte_mkyoung() would be more correct here, but atomic care		 * is needed to avoid losing the dirty bit: it is easier to use		 * mark_page_accessed().		 */		mark_page_accessed(page);	}	if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {		/* Do not mlock pte-mapped THP */		if (PageTransCompound(page))			goto out;		/*		 * The preliminary mapping check is mainly to avoid the		 * pointless overhead of lock_page on the ZERO_PAGE		 * which might bounce very badly if there is contention.		 *		 * If the page is already locked, we don't need to		 * handle it now - vmscan will handle it later if and		 * when it attempts to reclaim the page.		 */		if (page->mapping && trylock_page(page)) {			lru_add_drain();  /* push cached pages to LRU */			/*			 * Because we lock page here, and migration is			 * blocked by the pte's page reference, and we			 * know the page is still mapped, we don't even			 * need to check for file-cache page truncation.			 */			mlock_vma_page(page);			unlock_page(page);		}	}out:	pte_unmap_unlock(ptep, ptl);	return page;no_page:	pte_unmap_unlock(ptep, ptl);	if (!pte_none(pte))		return NULL;	return no_page_table(vma, flags);}
开发者ID:BWhitten,项目名称:linux-stable,代码行数:101,


示例10: journal_commit_transaction

//.........这里部分代码省略.........	journal->j_running_transaction = NULL;	start_time = ktime_get();	commit_transaction->t_log_start = journal->j_head;	wake_up(&journal->j_wait_transaction_locked);	spin_unlock(&journal->j_state_lock);	jbd_debug (3, "JBD: commit phase 2/n");	/*	 * Now start flushing things to disk, in the order they appear	 * on the transaction lists.  Data blocks go first.	 */	blk_start_plug(&plug);	err = journal_submit_data_buffers(journal, commit_transaction,					  WRITE_SYNC);	blk_finish_plug(&plug);	/*	 * Wait for all previously submitted IO to complete.	 */	spin_lock(&journal->j_list_lock);	while (commit_transaction->t_locked_list) {		struct buffer_head *bh;		jh = commit_transaction->t_locked_list->b_tprev;		bh = jh2bh(jh);		get_bh(bh);		if (buffer_locked(bh)) {			spin_unlock(&journal->j_list_lock);			wait_on_buffer(bh);			spin_lock(&journal->j_list_lock);		}		if (unlikely(!buffer_uptodate(bh))) {			if (!trylock_page(bh->b_page)) {				spin_unlock(&journal->j_list_lock);				lock_page(bh->b_page);				spin_lock(&journal->j_list_lock);			}			if (bh->b_page->mapping)				set_bit(AS_EIO, &bh->b_page->mapping->flags);			unlock_page(bh->b_page);			SetPageError(bh->b_page);			err = -EIO;		}		if (!inverted_lock(journal, bh)) {			put_bh(bh);			spin_lock(&journal->j_list_lock);			continue;		}		if (buffer_jbd(bh) && bh2jh(bh) == jh &&		    jh->b_transaction == commit_transaction &&		    jh->b_jlist == BJ_Locked)			__journal_unfile_buffer(jh);		jbd_unlock_bh_state(bh);		release_data_buffer(bh);		cond_resched_lock(&journal->j_list_lock);	}	spin_unlock(&journal->j_list_lock);	if (err) {		char b[BDEVNAME_SIZE];		printk(KERN_WARNING			"JBD: Detected IO errors while flushing file data "			"on %s/n", bdevname(journal->j_fs_dev, b));
开发者ID:33d,项目名称:linux-2.6.21-hh20,代码行数:67,


示例11: truncate_inode_pages_range

/** * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets * @mapping: mapping to truncate * @lstart: offset from which to truncate * @lend: offset to which to truncate (inclusive) * * Truncate the page cache, removing the pages that are between * specified offsets (and zeroing out partial pages * if lstart or lend + 1 is not page aligned). * * Truncate takes two passes - the first pass is nonblocking.  It will not * block on page locks and it will not block on writeback.  The second pass * will wait.  This is to prevent as much IO as possible in the affected region. * The first pass will remove most pages, so the search cost of the second pass * is low. * * We pass down the cache-hot hint to the page freeing code.  Even if the * mapping is large, it is probably the case that the final pages are the most * recently touched, and freeing happens in ascending file offset order. * * Note that since ->invalidatepage() accepts range to invalidate * truncate_inode_pages_range is able to handle cases where lend + 1 is not * page aligned properly. */void truncate_inode_pages_range(struct address_space *mapping,                                loff_t lstart, loff_t lend){    pgoff_t		start;		/* inclusive */    pgoff_t		end;		/* exclusive */    unsigned int	partial_start;	/* inclusive */    unsigned int	partial_end;	/* exclusive */    struct pagevec	pvec;    pgoff_t		indices[PAGEVEC_SIZE];    pgoff_t		index;    int		i;    cleancache_invalidate_inode(mapping);    if (mapping->nrpages == 0 && mapping->nrexceptional == 0)        return;    /* Offsets within partial pages */    partial_start = lstart & (PAGE_CACHE_SIZE - 1);    partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);    /*     * 'start' and 'end' always covers the range of pages to be fully     * truncated. Partial pages are covered with 'partial_start' at the     * start of the range and 'partial_end' at the end of the range.     * Note that 'end' is exclusive while 'lend' is inclusive.     */    start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;    if (lend == -1)        /*         * lend == -1 indicates end-of-file so we have to set 'end'         * to the highest possible pgoff_t and since the type is         * unsigned we're using -1.         */        end = -1;    else        end = (lend + 1) >> PAGE_CACHE_SHIFT;    pagevec_init(&pvec, 0);    index = start;    while (index < end && pagevec_lookup_entries(&pvec, mapping, index,            min(end - index, (pgoff_t)PAGEVEC_SIZE),            indices)) {        for (i = 0; i < pagevec_count(&pvec); i++) {            struct page *page = pvec.pages[i];            /* We rely upon deletion not changing page->index */            index = indices[i];            if (index >= end)                break;            if (radix_tree_exceptional_entry(page)) {                clear_exceptional_entry(mapping, index, page);                continue;            }            if (!trylock_page(page))                continue;            WARN_ON(page->index != index);            if (PageWriteback(page)) {                unlock_page(page);                continue;            }            truncate_inode_page(mapping, page);            unlock_page(page);        }        pagevec_remove_exceptionals(&pvec);        pagevec_release(&pvec);        cond_resched();        index++;    }    if (partial_start) {        struct page *page = find_lock_page(mapping, start - 1);        if (page) {            unsigned int top = PAGE_CACHE_SIZE;            if (start > end) {//.........这里部分代码省略.........
开发者ID:stefanberger,项目名称:linux-tpmdd,代码行数:101,


示例12: invalidate_mapping_pages

/** * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode * @mapping: the address_space which holds the pages to invalidate * @start: the offset 'from' which to invalidate * @end: the offset 'to' which to invalidate (inclusive) * * This function only removes the unlocked pages, if you want to * remove all the pages of one inode, you must call truncate_inode_pages. * * invalidate_mapping_pages() will not block on IO activity. It will not * invalidate pages which are dirty, locked, under writeback or mapped into * pagetables. */unsigned long invalidate_mapping_pages(struct address_space *mapping,		pgoff_t start, pgoff_t end){	pgoff_t indices[PAGEVEC_SIZE];	struct pagevec pvec;	pgoff_t index = start;	unsigned long ret;	unsigned long count = 0;	int i;	pagevec_init(&pvec, 0);	while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,			indices)) {		for (i = 0; i < pagevec_count(&pvec); i++) {			struct page *page = pvec.pages[i];			/* We rely upon deletion not changing page->index */			index = indices[i];			if (index > end)				break;			if (radix_tree_exceptional_entry(page)) {				clear_exceptional_entry(mapping, index, page);				continue;			}			if (!trylock_page(page))				continue;			WARN_ON(page_to_index(page) != index);			/* Middle of THP: skip */			if (PageTransTail(page)) {				unlock_page(page);				continue;			} else if (PageTransHuge(page)) {				index += HPAGE_PMD_NR - 1;				i += HPAGE_PMD_NR - 1;				/* 'end' is in the middle of THP */				if (index ==  round_down(end, HPAGE_PMD_NR))					continue;			}			ret = invalidate_inode_page(page);			unlock_page(page);			/*			 * Invalidation is a hint that the page is no longer			 * of interest and try to speed up its reclaim.			 */			if (!ret)				deactivate_file_page(page);			count += ret;		}		pagevec_remove_exceptionals(&pvec);		pagevec_release(&pvec);		cond_resched();		index++;	}	return count;}
开发者ID:HarryWei,项目名称:linux,代码行数:74,


示例13: afs_write_back_from_locked_page

/* * Synchronously write back the locked page and any subsequent non-locked dirty * pages. */static int afs_write_back_from_locked_page(struct address_space *mapping,					   struct writeback_control *wbc,					   struct page *primary_page,					   pgoff_t final_page){	struct afs_vnode *vnode = AFS_FS_I(mapping->host);	struct page *pages[8], *page;	unsigned long count, priv;	unsigned n, offset, to, f, t;	pgoff_t start, first, last;	int loop, ret;	_enter(",%lx", primary_page->index);	count = 1;	if (test_set_page_writeback(primary_page))		BUG();	/* Find all consecutive lockable dirty pages that have contiguous	 * written regions, stopping when we find a page that is not	 * immediately lockable, is not dirty or is missing, or we reach the	 * end of the range.	 */	start = primary_page->index;	priv = page_private(primary_page);	offset = priv & AFS_PRIV_MAX;	to = priv >> AFS_PRIV_SHIFT;	trace_afs_page_dirty(vnode, tracepoint_string("store"),			     primary_page->index, priv);	WARN_ON(offset == to);	if (offset == to)		trace_afs_page_dirty(vnode, tracepoint_string("WARN"),				     primary_page->index, priv);	if (start >= final_page ||	    (to < PAGE_SIZE && !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)))		goto no_more;	start++;	do {		_debug("more %lx [%lx]", start, count);		n = final_page - start + 1;		if (n > ARRAY_SIZE(pages))			n = ARRAY_SIZE(pages);		n = find_get_pages_contig(mapping, start, ARRAY_SIZE(pages), pages);		_debug("fgpc %u", n);		if (n == 0)			goto no_more;		if (pages[0]->index != start) {			do {				put_page(pages[--n]);			} while (n > 0);			goto no_more;		}		for (loop = 0; loop < n; loop++) {			page = pages[loop];			if (to != PAGE_SIZE &&			    !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags))				break;			if (page->index > final_page)				break;			if (!trylock_page(page))				break;			if (!PageDirty(page) || PageWriteback(page)) {				unlock_page(page);				break;			}			priv = page_private(page);			f = priv & AFS_PRIV_MAX;			t = priv >> AFS_PRIV_SHIFT;			if (f != 0 &&			    !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) {				unlock_page(page);				break;			}			to = t;			trace_afs_page_dirty(vnode, tracepoint_string("store+"),					     page->index, priv);			if (!clear_page_dirty_for_io(page))				BUG();			if (test_set_page_writeback(page))				BUG();			unlock_page(page);			put_page(page);		}		count += loop;		if (loop < n) {			for (; loop < n; loop++)				put_page(pages[loop]);			goto no_more;		}//.........这里部分代码省略.........
开发者ID:krzk,项目名称:linux,代码行数:101,


示例14: __m4u_get_user_pages

//.........这里部分代码省略.........                                            (VM_FAULT_HWPOISON|VM_FAULT_SIGBUS)) {                                                M4UMSG("handle_mm_fault() error: invalide memory address, vaddr:0x%lx (%d pages are allocated), module=%d/n",                                                 start, i, eModuleID);                                                //m4u_dump_maps(start);                                                return i ? i : -EFAULT;					                    }                                        BUG();                                }                                if (ret & VM_FAULT_MAJOR)                                        tsk->maj_flt++;                                else                                        tsk->min_flt++;                                /*                                 * The VM_FAULT_WRITE bit tells us that                                 * do_wp_page has broken COW when necessary,                                 * even if maybe_mkwrite decided not to set                                 * pte_write. We can thus safely do subsequent                                 * page lookups as if they were reads. But only                                 * do so when looping for pte_write is futile:                                 * in some cases userspace may also be wanting                                 * to write to the gotten user page, which a                                 * read fault here might prevent (a readonly                                 * page might get reCOWed by userspace write).                                 */                                if ((ret & VM_FAULT_WRITE) &&                                    !(vma->vm_flags & VM_WRITE))                                        foll_flags &= ~FOLL_WRITE;                                MMProfileLogEx(M4U_MMP_Events[PROFILE_FOLLOW_PAGE], MMProfileFlagStart, eModuleID, start&(~0xFFF));                                page = follow_page(vma, start, foll_flags);                                MMProfileLogEx(M4U_MMP_Events[PROFILE_FOLLOW_PAGE], MMProfileFlagEnd, eModuleID, 0x1000);                        }                        if (IS_ERR(page)) {                                M4UMSG("handle_mm_fault() error: faulty page is returned, vaddr:0x%lx (%d pages are allocated), module=%d /n",                                         start, i, eModuleID);                                //m4u_dump_maps(start);                                return i ? i : PTR_ERR(page);			            }                        if (pages) {                                pages[i] = page;                                MMProfileLogEx(M4U_MMP_Events[PROFILE_MLOCK], MMProfileFlagStart, eModuleID, start&(~0xFFF));								/* Use retry version to guarantee it will succeed in getting the lock */				trycnt = 3000;				do {					if (trylock_page(page)) {						mlock_vma_page(page);						unlock_page(page);                        //make sure hw pte is not 0                        {                            int i;                            for(i=0; i<3000; i++)                            {                                   if(!m4u_user_v2p(start))                                {                                    handle_mm_fault(mm, vma, start, (foll_flags & FOLL_WRITE)? FAULT_FLAG_WRITE : 0);                                    cond_resched();                                }                                else                                    break;                            }                            if(i==3000)                                M4UMSG("error: cannot handle_mm_fault to get hw pte: va=0x%x/n", start);                        }                        break;					}				} while (trycnt-- > 0);                                if(PageMlocked(page)==0)                                {                                    M4UMSG("Can't mlock page/n");                                    dump_page(page);                                }                                else                                {                                    unsigned int pfn = page_to_pfn(page);                                    if(pfn < mlock_cnt_size)                                    {                                        pMlock_cnt[page_to_pfn(page)]++;                                    }                                    else                                    {                                        M4UERR("mlock_cnt_size is too small: pfn=%d, size=%d/n", pfn, mlock_cnt_size);                                    }                                                                        //M4UMSG("lock page:/n");                                    //dump_page(page);                                }                                MMProfileLogEx(M4U_MMP_Events[PROFILE_MLOCK], MMProfileFlagEnd, eModuleID, 0x1000);                        }                        if (vmas)                                vmas[i] = vma;                        i++;                        start += PAGE_SIZE;                        nr_pages--;                } while (nr_pages && start < vma->vm_end);        } while (nr_pages);
开发者ID:CobraJet93,项目名称:kernel-3.10.54,代码行数:101,


示例15: no_page_table

static struct page *follow_page_pte(struct vm_area_struct *vma,		unsigned long address, pmd_t *pmd, unsigned int flags){	struct mm_struct *mm = vma->vm_mm;	struct page *page;	spinlock_t *ptl;	pte_t *ptep, pte;retry:	if (unlikely(pmd_bad(*pmd)))		return no_page_table(vma, flags);	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);	pte = *ptep;	if (!pte_present(pte)) {		swp_entry_t entry;		/*		 * KSM's break_ksm() relies upon recognizing a ksm page		 * even while it is being migrated, so for that case we		 * need migration_entry_wait().		 */		if (likely(!(flags & FOLL_MIGRATION)))			goto no_page;		if (pte_none(pte) || pte_file(pte))			goto no_page;		entry = pte_to_swp_entry(pte);		if (!is_migration_entry(entry))			goto no_page;		pte_unmap_unlock(ptep, ptl);		migration_entry_wait(mm, pmd, address);		goto retry;	}	if ((flags & FOLL_NUMA) && pte_numa(pte))		goto no_page;	if ((flags & FOLL_WRITE) && !pte_write(pte)) {		pte_unmap_unlock(ptep, ptl);		return NULL;	}	page = vm_normal_page(vma, address, pte);	if (unlikely(!page)) {		if ((flags & FOLL_DUMP) ||		    !is_zero_pfn(pte_pfn(pte)))			goto bad_page;		page = pte_page(pte);	}	if (flags & FOLL_GET)		get_page_foll(page);	if (flags & FOLL_TOUCH) {		if ((flags & FOLL_WRITE) &&		    !pte_dirty(pte) && !PageDirty(page))			set_page_dirty(page);		/*		 * pte_mkyoung() would be more correct here, but atomic care		 * is needed to avoid losing the dirty bit: it is easier to use		 * mark_page_accessed().		 */		mark_page_accessed(page);	}	if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {		/*		 * The preliminary mapping check is mainly to avoid the		 * pointless overhead of lock_page on the ZERO_PAGE		 * which might bounce very badly if there is contention.		 *		 * If the page is already locked, we don't need to		 * handle it now - vmscan will handle it later if and		 * when it attempts to reclaim the page.		 */		if (page->mapping && trylock_page(page)) {			lru_add_drain();  /* push cached pages to LRU */			/*			 * Because we lock page here, and migration is			 * blocked by the pte's page reference, and we			 * know the page is still mapped, we don't even			 * need to check for file-cache page truncation.			 */			mlock_vma_page(page);			unlock_page(page);		}	}	pte_unmap_unlock(ptep, ptl);	return page;bad_page:	pte_unmap_unlock(ptep, ptl);	return ERR_PTR(-EFAULT);no_page:	pte_unmap_unlock(ptep, ptl);	if (!pte_none(pte))		return NULL;	return no_page_table(vma, flags);}
开发者ID:LarryShang,项目名称:linux,代码行数:94,



注:本文中的trylock_page函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


C++ ts函数代码示例
C++ trylock函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。