您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ unlock_page函数代码示例

51自学网 2021-06-03 09:11:03
  C++
这篇教程C++ unlock_page函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中unlock_page函数的典型用法代码示例。如果您正苦于以下问题:C++ unlock_page函数的具体用法?C++ unlock_page怎么用?C++ unlock_page使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了unlock_page函数的25个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: ext4_try_to_write_inline_data

/* * Try to write data in the inode. * If the inode has inline data, check whether the new write can be * in the inode also. If not, create the page the handle, move the data * to the page make it update and let the later codes create extent for it. */int ext4_try_to_write_inline_data(struct address_space *mapping,				  struct inode *inode,				  loff_t pos, unsigned len,				  unsigned flags,				  struct page **pagep){	int ret;	handle_t *handle;	struct page *page;	struct ext4_iloc iloc;	if (pos + len > ext4_get_max_inline_size(inode))		goto convert;	ret = ext4_get_inode_loc(inode, &iloc);	if (ret)		return ret;	/*	 * The possible write could happen in the inode,	 * so try to reserve the space in inode first.	 */	handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);	if (IS_ERR(handle)) {		ret = PTR_ERR(handle);		handle = NULL;		goto out;	}	ret = ext4_prepare_inline_data(handle, inode, pos + len);	if (ret && ret != -ENOSPC)		goto out;	/* We don't have space in inline inode, so convert it to extent. */	if (ret == -ENOSPC) {		ext4_journal_stop(handle);		brelse(iloc.bh);		goto convert;	}	flags |= AOP_FLAG_NOFS;	page = grab_cache_page_write_begin(mapping, 0, flags);	if (!page) {		ret = -ENOMEM;		goto out;	}	*pagep = page;	down_read(&EXT4_I(inode)->xattr_sem);	if (!ext4_has_inline_data(inode)) {		ret = 0;		unlock_page(page);		page_cache_release(page);		goto out_up_read;	}	if (!PageUptodate(page)) {		ret = ext4_read_inline_page(inode, page);		if (ret < 0)			goto out_up_read;	}	ret = 1;	handle = NULL;out_up_read:	up_read(&EXT4_I(inode)->xattr_sem);out:	if (handle)		ext4_journal_stop(handle);	brelse(iloc.bh);	return ret;convert:	return ext4_convert_inline_data_to_extent(mapping,						  inode, flags);}
开发者ID:Runner85sx,项目名称:android_kernel_huawei_msm8909,代码行数:82,


示例2: jffs2_do_readpage_unlock

int jffs2_do_readpage_unlock(struct inode *inode, struct page *pg){	int ret = jffs2_do_readpage_nolock(inode, pg);	unlock_page(pg);	return ret;}
开发者ID:BackupTheBerlios,项目名称:wl530g-svn,代码行数:6,


示例3: gfs2_write_begin

static int gfs2_write_begin(struct file *file, struct address_space *mapping,			    loff_t pos, unsigned len, unsigned flags,			    struct page **pagep, void **fsdata){	struct gfs2_inode *ip = GFS2_I(mapping->host);	struct gfs2_sbd *sdp = GFS2_SB(mapping->host);	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);	unsigned int data_blocks = 0, ind_blocks = 0, rblocks;	unsigned requested = 0;	int alloc_required;	int error = 0;	pgoff_t index = pos >> PAGE_CACHE_SHIFT;	unsigned from = pos & (PAGE_CACHE_SIZE - 1);	struct page *page;	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);	error = gfs2_glock_nq(&ip->i_gh);	if (unlikely(error))		goto out_uninit;	if (&ip->i_inode == sdp->sd_rindex) {		error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,					   GL_NOCACHE, &m_ip->i_gh);		if (unlikely(error)) {			gfs2_glock_dq(&ip->i_gh);			goto out_uninit;		}	}	alloc_required = gfs2_write_alloc_required(ip, pos, len);	if (alloc_required || gfs2_is_jdata(ip))		gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);	if (alloc_required) {		struct gfs2_alloc_parms ap = { .aflags = 0, };		requested = data_blocks + ind_blocks;		ap.target = requested;		error = gfs2_quota_lock_check(ip, &ap);		if (error)			goto out_unlock;		error = gfs2_inplace_reserve(ip, &ap);		if (error)			goto out_qunlock;	}	rblocks = RES_DINODE + ind_blocks;	if (gfs2_is_jdata(ip))		rblocks += data_blocks ? data_blocks : 1;	if (ind_blocks || data_blocks)		rblocks += RES_STATFS + RES_QUOTA;	if (&ip->i_inode == sdp->sd_rindex)		rblocks += 2 * RES_STATFS;	if (alloc_required)		rblocks += gfs2_rg_blocks(ip, requested);	error = gfs2_trans_begin(sdp, rblocks,				 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);	if (error)		goto out_trans_fail;	error = -ENOMEM;	flags |= AOP_FLAG_NOFS;	page = grab_cache_page_write_begin(mapping, index, flags);	*pagep = page;	if (unlikely(!page))		goto out_endtrans;	if (gfs2_is_stuffed(ip)) {		error = 0;		if (pos + len > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {			error = gfs2_unstuff_dinode(ip, page);			if (error == 0)				goto prepare_write;		} else if (!PageUptodate(page)) {			error = stuffed_readpage(ip, page);		}		goto out;	}prepare_write:	error = __block_write_begin(page, from, len, gfs2_block_map);out:	if (error == 0)		return 0;	unlock_page(page);	page_cache_release(page);	gfs2_trans_end(sdp);	if (pos + len > ip->i_inode.i_size)		gfs2_trim_blocks(&ip->i_inode);	goto out_trans_fail;out_endtrans:	gfs2_trans_end(sdp);out_trans_fail:	if (alloc_required) {		gfs2_inplace_release(ip);out_qunlock://.........这里部分代码省略.........
开发者ID:a2hojsjsjs,项目名称:linux,代码行数:101,


示例4: reiserfs_xattr_set_handle

/* * inode->i_mutex: down */intreiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,			  struct inode *inode, const char *name,			  const void *buffer, size_t buffer_size, int flags){	int err = 0;	struct dentry *dentry;	struct page *page;	char *data;	size_t file_pos = 0;	size_t buffer_pos = 0;	size_t new_size;	__u32 xahash = 0;	if (get_inode_sd_version(inode) == STAT_DATA_V1)		return -EOPNOTSUPP;	reiserfs_write_unlock(inode->i_sb);	if (!buffer) {		err = lookup_and_delete_xattr(inode, name);		reiserfs_write_lock(inode->i_sb);		return err;	}	dentry = xattr_lookup(inode, name, flags);	if (IS_ERR(dentry)) {		reiserfs_write_lock(inode->i_sb);		return PTR_ERR(dentry);	}	down_write(&REISERFS_I(inode)->i_xattr_sem);	reiserfs_write_lock(inode->i_sb);	xahash = xattr_hash(buffer, buffer_size);	while (buffer_pos < buffer_size || buffer_pos == 0) {		size_t chunk;		size_t skip = 0;		size_t page_offset = (file_pos & (PAGE_CACHE_SIZE - 1));		if (buffer_size - buffer_pos > PAGE_CACHE_SIZE)			chunk = PAGE_CACHE_SIZE;		else			chunk = buffer_size - buffer_pos;		page = reiserfs_get_page(dentry->d_inode, file_pos);		if (IS_ERR(page)) {			err = PTR_ERR(page);			goto out_unlock;		}		lock_page(page);		data = page_address(page);		if (file_pos == 0) {			struct reiserfs_xattr_header *rxh;			skip = file_pos = sizeof(struct reiserfs_xattr_header);			if (chunk + skip > PAGE_CACHE_SIZE)				chunk = PAGE_CACHE_SIZE - skip;			rxh = (struct reiserfs_xattr_header *)data;			rxh->h_magic = cpu_to_le32(REISERFS_XATTR_MAGIC);			rxh->h_hash = cpu_to_le32(xahash);		}		err = __reiserfs_write_begin(page, page_offset, chunk + skip);		if (!err) {			if (buffer)				memcpy(data + skip, buffer + buffer_pos, chunk);			err = reiserfs_commit_write(NULL, page, page_offset,						    page_offset + chunk +						    skip);		}		unlock_page(page);		reiserfs_put_page(page);		buffer_pos += chunk;		file_pos += chunk;		skip = 0;		if (err || buffer_size == 0 || !buffer)			break;	}	new_size = buffer_size + sizeof(struct reiserfs_xattr_header);	if (!err && new_size < i_size_read(dentry->d_inode)) {		struct iattr newattrs = {			.ia_ctime = current_fs_time(inode->i_sb),			.ia_size = new_size,			.ia_valid = ATTR_SIZE | ATTR_CTIME,		};		reiserfs_write_unlock(inode->i_sb);		mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_XATTR);		inode_dio_wait(dentry->d_inode);		reiserfs_write_lock(inode->i_sb);		err = reiserfs_setattr(dentry, &newattrs);		mutex_unlock(&dentry->d_inode->i_mutex);	} else//.........这里部分代码省略.........
开发者ID:303750856,项目名称:linux-3.1,代码行数:101,


示例5: journal_commit_transaction

//.........这里部分代码省略.........	jbd_debug (3, "JBD: commit phase 2/n");	/*	 * Now start flushing things to disk, in the order they appear	 * on the transaction lists.  Data blocks go first.	 */	err = journal_submit_data_buffers(journal, commit_transaction);	/*	 * Wait for all previously submitted IO to complete.	 */	spin_lock(&journal->j_list_lock);	while (commit_transaction->t_locked_list) {		struct buffer_head *bh;		jh = commit_transaction->t_locked_list->b_tprev;		bh = jh2bh(jh);		get_bh(bh);		if (buffer_locked(bh)) {			spin_unlock(&journal->j_list_lock);			wait_on_buffer(bh);			spin_lock(&journal->j_list_lock);		}		if (unlikely(!buffer_uptodate(bh))) {			if (!trylock_page(bh->b_page)) {				spin_unlock(&journal->j_list_lock);				lock_page(bh->b_page);				spin_lock(&journal->j_list_lock);			}			if (bh->b_page->mapping)				set_bit(AS_EIO, &bh->b_page->mapping->flags);			unlock_page(bh->b_page);			SetPageError(bh->b_page);			err = -EIO;		}		if (!inverted_lock(journal, bh)) {			put_bh(bh);			spin_lock(&journal->j_list_lock);			continue;		}		if (buffer_jbd(bh) && jh->b_jlist == BJ_Locked) {			__journal_unfile_buffer(jh);			jbd_unlock_bh_state(bh);			journal_remove_journal_head(bh);			put_bh(bh);		} else {			jbd_unlock_bh_state(bh);		}		release_data_buffer(bh);		cond_resched_lock(&journal->j_list_lock);	}	spin_unlock(&journal->j_list_lock);	if (err) {		char b[BDEVNAME_SIZE];		printk(KERN_WARNING			"JBD: Detected IO errors while flushing file data "			"on %s/n", bdevname(journal->j_fs_dev, b));		err = 0;	}	journal_write_revoke_records(journal, commit_transaction);
开发者ID:LouZiffer,项目名称:m900_kernel_cupcake-SDX,代码行数:66,


示例6: write_one_page

/** * write_one_page - write out a single page and optionally wait on I/O * @page: the page to write * @wait: if true, wait on writeout * * The page must be locked by the caller and will be unlocked upon return. * * write_one_page() returns a negative error code if I/O failed. */int write_one_page(struct page *page, int wait){	struct address_space *mapping = page->mapping;	int ret = 0;	struct writeback_control wbc = {		.sync_mode = WB_SYNC_ALL,		.nr_to_write = 1,	};	BUG_ON(!PageLocked(page));	if (wait)		wait_on_page_writeback(page);	if (clear_page_dirty_for_io(page)) {		page_cache_get(page);		ret = mapping->a_ops->writepage(page, &wbc);		if (ret == 0 && wait) {			wait_on_page_writeback(page);			if (PageError(page))				ret = -EIO;		}		page_cache_release(page);	} else {		unlock_page(page);	}	return ret;}EXPORT_SYMBOL(write_one_page);/* * For address_spaces which do not use buffers nor write back. */int __set_page_dirty_no_writeback(struct page *page){	if (!PageDirty(page))		SetPageDirty(page);	return 0;}/* * For address_spaces which do not use buffers.  Just tag the page as dirty in * its radix tree. * * This is also used when a single buffer is being dirtied: we want to set the * page dirty in that case, but not all the buffers.  This is a "bottom-up" * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying. * * Most callers have locked the page, which pins the address_space in memory. * But zap_pte_range() does not lock the page, however in that case the * mapping is pinned by the vma's ->vm_file reference. * * We take care to handle the case where the page was truncated from the * mapping by re-checking page_mapping() inside tree_lock. */int __set_page_dirty_nobuffers(struct page *page){	if (!TestSetPageDirty(page)) {		struct address_space *mapping = page_mapping(page);		struct address_space *mapping2;		if (!mapping)			return 1;		spin_lock_irq(&mapping->tree_lock);		mapping2 = page_mapping(page);		if (mapping2) { /* Race with truncate? */			BUG_ON(mapping2 != mapping);			WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));			if (mapping_cap_account_dirty(mapping)) {				__inc_zone_page_state(page, NR_FILE_DIRTY);				__inc_bdi_stat(mapping->backing_dev_info,						BDI_RECLAIMABLE);				task_io_account_write(PAGE_CACHE_SIZE);			}			radix_tree_tag_set(&mapping->page_tree,				page_index(page), PAGECACHE_TAG_DIRTY);		}		spin_unlock_irq(&mapping->tree_lock);		if (mapping->host) {			/* !PageAnon && !swapper_space */			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);		}		return 1;	}	return 0;}EXPORT_SYMBOL(__set_page_dirty_nobuffers);/* * When a writepage implementation decides that it doesn't want to write this//.........这里部分代码省略.........
开发者ID:masbog,项目名称:iphonelinux-kernel,代码行数:101,


示例7: try_to_unuse

//.........这里部分代码省略.........			while (*swap_map > 1 && !retval &&					(p = p->next) != &start_mm->mmlist) {				mm = list_entry(p, struct mm_struct, mmlist);				if (!atomic_inc_not_zero(&mm->mm_users))					continue;				spin_unlock(&mmlist_lock);				mmput(prev_mm);				prev_mm = mm;				cond_resched();				swcount = *swap_map;				if (swcount <= 1)					;				else if (mm == &init_mm) {					set_start_mm = 1;					shmem = shmem_unuse(entry, page);				} else					retval = unuse_mm(mm, entry, page);				if (set_start_mm && *swap_map < swcount) {					mmput(new_start_mm);					atomic_inc(&mm->mm_users);					new_start_mm = mm;					set_start_mm = 0;				}				spin_lock(&mmlist_lock);			}			spin_unlock(&mmlist_lock);			mmput(prev_mm);			mmput(start_mm);			start_mm = new_start_mm;		}		if (retval) {			unlock_page(page);			page_cache_release(page);			break;		}		/*		 * How could swap count reach 0x7fff when the maximum		 * pid is 0x7fff, and there's no way to repeat a swap		 * page within an mm (except in shmem, where it's the		 * shared object which takes the reference count)?		 * We believe SWAP_MAP_MAX cannot occur in Linux 2.4.		 *		 * If that's wrong, then we should worry more about		 * exit_mmap() and do_munmap() cases described above:		 * we might be resetting SWAP_MAP_MAX too early here.		 * We know "Undead"s can happen, they're okay, so don't		 * report them; but do report if we reset SWAP_MAP_MAX.		 */		if (*swap_map == SWAP_MAP_MAX) {			spin_lock(&swap_lock);			*swap_map = 1;			spin_unlock(&swap_lock);			reset_overflow = 1;		}		/*		 * If a reference remains (rare), we would like to leave		 * the page in the swap cache; but try_to_unmap could		 * then re-duplicate the entry once we drop page lock,		 * so we might loop indefinitely; also, that page could		 * not be swapped out to other storage meanwhile.  So:		 * delete from cache even if there's another reference,		 * after ensuring that the data has been saved to disk -
开发者ID:WiseMan787,项目名称:ralink_sdk,代码行数:67,


示例8: ext4_bio_write_page

int ext4_bio_write_page(struct ext4_io_submit *io,			struct page *page,			int len,			struct writeback_control *wbc){	struct inode *inode = page->mapping->host;	unsigned block_start, block_end, blocksize;	struct ext4_io_page *io_page;	struct buffer_head *bh, *head;	int ret = 0;	blocksize = 1 << inode->i_blkbits;	BUG_ON(!PageLocked(page));	BUG_ON(PageWriteback(page));	io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS);	if (!io_page) {		set_page_dirty(page);		unlock_page(page);		return -ENOMEM;	}	io_page->p_page = page;	atomic_set(&io_page->p_count, 1);	get_page(page);	set_page_writeback(page);	ClearPageError(page);	/*	 * Comments copied from block_write_full_page_endio:	 *	 * The page straddles i_size.  It must be zeroed out on each and every	 * writepage invocation because it may be mmapped.  "A file is mapped	 * in multiples of the page size.  For a file that is not a multiple of	 * the page size, the remaining memory is zeroed when mapped, and	 * writes to that region are not written out to the file."	 */	if (len < PAGE_CACHE_SIZE)		zero_user_segment(page, len, PAGE_CACHE_SIZE);	for (bh = head = page_buffers(page), block_start = 0;	     bh != head || !block_start;	     block_start = block_end, bh = bh->b_this_page) {		block_end = block_start + blocksize;		if (block_start >= len) {			clear_buffer_dirty(bh);			set_buffer_uptodate(bh);			continue;		}		clear_buffer_dirty(bh);		ret = io_submit_add_bh(io, io_page, inode, wbc, bh);		if (ret) {			/*			 * We only get here on ENOMEM.  Not much else			 * we can do but mark the page as dirty, and			 * better luck next time.			 */			set_page_dirty(page);			break;		}	}	unlock_page(page);	/*	 * If the page was truncated before we could do the writeback,	 * or we had a memory allocation error while trying to write	 * the first buffer head, we won't have submitted any pages for	 * I/O.  In that case we need to make sure we've cleared the	 * PageWriteback bit from the page to prevent the system from	 * wedging later on.	 */	put_io_page(io_page);	return ret;}
开发者ID:mb3dot,项目名称:community-b3-kernel,代码行数:74,


示例9: gfs2_page_mkwrite

static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf){	struct page *page = vmf->page;	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;	struct gfs2_inode *ip = GFS2_I(inode);	struct gfs2_sbd *sdp = GFS2_SB(inode);	unsigned long last_index;	u64 pos = page->index << PAGE_CACHE_SHIFT;	unsigned int data_blocks, ind_blocks, rblocks;	int alloc_required = 0;	struct gfs2_holder gh;	struct gfs2_alloc *al;	int ret;	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);	ret = gfs2_glock_nq(&gh);	if (ret)		goto out;	set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);	set_bit(GIF_SW_PAGED, &ip->i_flags);	ret = gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE, &alloc_required);	if (ret || !alloc_required)		goto out_unlock;	ret = -ENOMEM;	al = gfs2_alloc_get(ip);	if (al == NULL)		goto out_unlock;	ret = gfs2_quota_lock_check(ip);	if (ret)		goto out_alloc_put;	gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);	al->al_requested = data_blocks + ind_blocks;	ret = gfs2_inplace_reserve(ip);	if (ret)		goto out_quota_unlock;	rblocks = RES_DINODE + ind_blocks;	if (gfs2_is_jdata(ip))		rblocks += data_blocks ? data_blocks : 1;	if (ind_blocks || data_blocks)		rblocks += RES_STATFS + RES_QUOTA;	ret = gfs2_trans_begin(sdp, rblocks, 0);	if (ret)		goto out_trans_fail;	lock_page(page);	ret = -EINVAL;	last_index = ip->i_inode.i_size >> PAGE_CACHE_SHIFT;	if (page->index > last_index)		goto out_unlock_page;	ret = 0;	if (!PageUptodate(page) || page->mapping != ip->i_inode.i_mapping)		goto out_unlock_page;	if (gfs2_is_stuffed(ip)) {		ret = gfs2_unstuff_dinode(ip, page);		if (ret)			goto out_unlock_page;	}	ret = gfs2_allocate_page_backing(page);out_unlock_page:	unlock_page(page);	gfs2_trans_end(sdp);out_trans_fail:	gfs2_inplace_release(ip);out_quota_unlock:	gfs2_quota_unlock(ip);out_alloc_put:	gfs2_alloc_put(ip);out_unlock:	gfs2_glock_dq(&gh);out:	gfs2_holder_uninit(&gh);	if (ret == -ENOMEM)		ret = VM_FAULT_OOM;	else if (ret)		ret = VM_FAULT_SIGBUS;	return ret;}
开发者ID:AdrianHuang,项目名称:uclinux-robutest,代码行数:82,


示例10: nilfs_btnode_prepare_change_key

/** * nilfs_btnode_prepare_change_key *  prepare to move contents of the block for old key to one of new key. *  the old buffer will not be removed, but might be reused for new buffer. *  it might return -ENOMEM because of memory allocation errors, *  and might return -EIO because of disk read errors. */int nilfs_btnode_prepare_change_key(struct address_space *btnc,				    struct nilfs_btnode_chkey_ctxt *ctxt){	struct buffer_head *obh, *nbh;	struct inode *inode = NILFS_BTNC_I(btnc);	__u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;	int err;	if (oldkey == newkey)		return 0;	obh = ctxt->bh;	ctxt->newbh = NULL;	if (inode->i_blkbits == PAGE_CACHE_SHIFT) {		lock_page(obh->b_page);#if HAVE_EXPORTED_RADIX_TREE_PRELOAD		/*		 * We cannot call radix_tree_preload for the kernels older		 * than 2.6.23, because it is not exported for modules.		 */		err = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);		if (err)			goto failed_unlock;#endif		/* BUG_ON(oldkey != obh->b_page->index); */		if (unlikely(oldkey != obh->b_page->index))			NILFS_PAGE_BUG(obh->b_page,				       "invalid oldkey %lld (newkey=%lld)",				       (unsigned long long)oldkey,				       (unsigned long long)newkey);retry:		WRITE_LOCK_IRQ(&btnc->tree_lock);		err = radix_tree_insert(&btnc->page_tree, newkey, obh->b_page);		WRITE_UNLOCK_IRQ(&btnc->tree_lock);		/*		 * Note: page->index will not change to newkey until		 * nilfs_btnode_commit_change_key() will be called.		 * To protect the page in intermediate state, the page lock		 * is held.		 */#if HAVE_EXPORTED_RADIX_TREE_PRELOAD		radix_tree_preload_end();#endif		if (!err)			return 0;		else if (err != -EEXIST)			goto failed_unlock;		err = invalidate_inode_pages2_range(btnc, newkey, newkey);		if (!err)			goto retry;		/* fallback to copy mode */		unlock_page(obh->b_page);	}	err = nilfs_btnode_get(btnc, newkey, 0, &nbh, 1);	if (likely(!err)) {		BUG_ON(nbh == obh);		ctxt->newbh = nbh;	}	return err; failed_unlock:	unlock_page(obh->b_page);	return err;}
开发者ID:traveller42,项目名称:linux-2.6.28.mx233-falconwing,代码行数:75,


示例11: nilfs_btnode_submit_block

int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,			      sector_t pblocknr, struct buffer_head **pbh,			      int newblk){	struct buffer_head *bh;	struct inode *inode = NILFS_BTNC_I(btnc);	int err;	btnode_debug(3, "called: blocknr=%llu pblocknr=%llu new=%d ino=%lu/n",		     (unsigned long long)blocknr, (unsigned long long)pblocknr,		     newblk, inode->i_ino);	bh = nilfs_grab_buffer(inode, btnc, blocknr, 1 << BH_NILFS_Node);	if (unlikely(!bh))		return -ENOMEM;	err = -EEXIST; /* internal code */	if (newblk) {		if (unlikely(buffer_mapped(bh) || buffer_uptodate(bh) ||			     buffer_dirty(bh))) {			BH_DEBUG(bh, "invalid new bh");			BUG();		}		bh->b_bdev = NILFS_I_NILFS(inode)->ns_bdev;		bh->b_blocknr = blocknr;		set_buffer_mapped(bh);		set_buffer_uptodate(bh);		goto found;	}	if (buffer_uptodate(bh) || buffer_dirty(bh))		goto found;	if (pblocknr == 0) {		pblocknr = blocknr;		if (inode->i_ino != NILFS_DAT_INO) {			struct inode *dat =				nilfs_dat_inode(NILFS_I_NILFS(inode));			/* blocknr is a virtual block number */			err = nilfs_dat_translate(dat, blocknr, &pblocknr);			if (unlikely(err)) {				brelse(bh);				btnode_debug(1, "return %d (xlate)./n", err);				goto out_locked;			}		}	}	lock_buffer(bh);	if (buffer_uptodate(bh)) {		unlock_buffer(bh);		err = -EEXIST; /* internal code */		goto found;	}	set_buffer_mapped(bh);	bh->b_bdev = NILFS_I_NILFS(inode)->ns_bdev;	bh->b_blocknr = pblocknr; /* set block address for read */	bh->b_end_io = end_buffer_read_sync;	get_bh(bh);	submit_bh(READ, bh);	bh->b_blocknr = blocknr; /* set back to the given block address */	err = 0;found:	*pbh = bh;out_locked:	unlock_page(bh->b_page);	page_cache_release(bh->b_page);	btnode_debug(3, "done (err=%d)/n", err);	return err;}
开发者ID:traveller42,项目名称:linux-2.6.28.mx233-falconwing,代码行数:70,


示例12: nilfs_find_uncommitted_extent

/** * nilfs_find_uncommitted_extent - find extent of uncommitted data * @inode: inode * @start_blk: start block offset (in) * @blkoff: start offset of the found extent (out) * * This function searches an extent of buffers marked "delayed" which * starts from a block offset equal to or larger than @start_blk.  If * such an extent was found, this will store the start offset in * @blkoff and return its length in blocks.  Otherwise, zero is * returned. */unsigned long nilfs_find_uncommitted_extent(struct inode *inode,					    sector_t start_blk,					    sector_t *blkoff){	unsigned int i;	pgoff_t index;	unsigned int nblocks_in_page;	unsigned long length = 0;	sector_t b;	struct pagevec pvec;	struct page *page;	if (inode->i_mapping->nrpages == 0)		return 0;	index = start_blk >> (PAGE_SHIFT - inode->i_blkbits);	nblocks_in_page = 1U << (PAGE_SHIFT - inode->i_blkbits);	pagevec_init(&pvec, 0);repeat:	pvec.nr = find_get_pages_contig(inode->i_mapping, index, PAGEVEC_SIZE,					pvec.pages);	if (pvec.nr == 0)		return length;	if (length > 0 && pvec.pages[0]->index > index)		goto out;	b = pvec.pages[0]->index << (PAGE_SHIFT - inode->i_blkbits);	i = 0;	do {		page = pvec.pages[i];		lock_page(page);		if (page_has_buffers(page)) {			struct buffer_head *bh, *head;			bh = head = page_buffers(page);			do {				if (b < start_blk)					continue;				if (buffer_delay(bh)) {					if (length == 0)						*blkoff = b;					length++;				} else if (length > 0) {					goto out_locked;				}			} while (++b, bh = bh->b_this_page, bh != head);		} else {			if (length > 0)				goto out_locked;			b += nblocks_in_page;		}		unlock_page(page);	} while (++i < pagevec_count(&pvec));	index = page->index + 1;	pagevec_release(&pvec);	cond_resched();	goto repeat;out_locked:	unlock_page(page);out:	pagevec_release(&pvec);	return length;}
开发者ID:513855417,项目名称:linux,代码行数:83,


示例13: nilfs_copy_back_pages

/** * nilfs_copy_back_pages -- copy back pages to original cache from shadow cache * @dmap: destination page cache * @smap: source page cache * * No pages must no be added to the cache during this process. * This must be ensured by the caller. */void nilfs_copy_back_pages(struct address_space *dmap,			   struct address_space *smap){	struct pagevec pvec;	unsigned int i, n;	pgoff_t index = 0;	int err;	pagevec_init(&pvec, 0);repeat:	n = pagevec_lookup(&pvec, smap, index, PAGEVEC_SIZE);	if (!n)		return;	index = pvec.pages[n - 1]->index + 1;	for (i = 0; i < pagevec_count(&pvec); i++) {		struct page *page = pvec.pages[i], *dpage;		pgoff_t offset = page->index;		lock_page(page);		dpage = find_lock_page(dmap, offset);		if (dpage) {			/* override existing page on the destination cache */			WARN_ON(PageDirty(dpage));			nilfs_copy_page(dpage, page, 0);			unlock_page(dpage);			put_page(dpage);		} else {			struct page *page2;			/* move the page to the destination cache */			spin_lock_irq(&smap->tree_lock);			page2 = radix_tree_delete(&smap->page_tree, offset);			WARN_ON(page2 != page);			smap->nrpages--;			spin_unlock_irq(&smap->tree_lock);			spin_lock_irq(&dmap->tree_lock);			err = radix_tree_insert(&dmap->page_tree, offset, page);			if (unlikely(err < 0)) {				WARN_ON(err == -EEXIST);				page->mapping = NULL;				put_page(page); /* for cache */			} else {				page->mapping = dmap;				dmap->nrpages++;				if (PageDirty(page))					radix_tree_tag_set(&dmap->page_tree,							   offset,							   PAGECACHE_TAG_DIRTY);			}			spin_unlock_irq(&dmap->tree_lock);		}		unlock_page(page);	}	pagevec_release(&pvec);	cond_resched();	goto repeat;}
开发者ID:513855417,项目名称:linux,代码行数:69,


示例14: ext4_da_write_inline_data_begin

/* * Prepare the write for the inline data. * If the the data can be written into the inode, we just read * the page and make it uptodate, and start the journal. * Otherwise read the page, makes it dirty so that it can be * handle in writepages(the i_disksize update is left to the * normal ext4_da_write_end). */int ext4_da_write_inline_data_begin(struct address_space *mapping,				    struct inode *inode,				    loff_t pos, unsigned len,				    unsigned flags,				    struct page **pagep,				    void **fsdata){	int ret, inline_size;	handle_t *handle;	struct page *page;	struct ext4_iloc iloc;	ret = ext4_get_inode_loc(inode, &iloc);	if (ret)		return ret;	handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);	if (IS_ERR(handle)) {		ret = PTR_ERR(handle);		handle = NULL;		goto out;	}	inline_size = ext4_get_max_inline_size(inode);	ret = -ENOSPC;	if (inline_size >= pos + len) {		ret = ext4_prepare_inline_data(handle, inode, pos + len);		if (ret && ret != -ENOSPC)			goto out;	}	if (ret == -ENOSPC) {		ret = ext4_da_convert_inline_data_to_extent(mapping,							    inode,							    flags,							    fsdata);		goto out;	}	/*	 * We cannot recurse into the filesystem as the transaction	 * is already started.	 */	flags |= AOP_FLAG_NOFS;	page = grab_cache_page_write_begin(mapping, 0, flags);	if (!page) {		ret = -ENOMEM;		goto out;	}	down_read(&EXT4_I(inode)->xattr_sem);	if (!ext4_has_inline_data(inode)) {		ret = 0;		goto out_release_page;	}	if (!PageUptodate(page)) {		ret = ext4_read_inline_page(inode, page);		if (ret < 0)			goto out_release_page;	}	up_read(&EXT4_I(inode)->xattr_sem);	*pagep = page;	handle = NULL;	brelse(iloc.bh);	return 1;out_release_page:	up_read(&EXT4_I(inode)->xattr_sem);	unlock_page(page);	page_cache_release(page);out:	if (handle)		ext4_journal_stop(handle);	brelse(iloc.bh);	return ret;}
开发者ID:Runner85sx,项目名称:android_kernel_huawei_msm8909,代码行数:87,


示例15: invalidate_inode_pages2_range

/** * invalidate_inode_pages2_range - remove range of pages from an address_space * @mapping: the address_space * @start: the page offset 'from' which to invalidate * @end: the page offset 'to' which to invalidate (inclusive) * * Any pages which are found to be mapped into pagetables are unmapped prior to * invalidation. * * Returns -EBUSY if any pages could not be invalidated. */int invalidate_inode_pages2_range(struct address_space *mapping,				  pgoff_t start, pgoff_t end){	pgoff_t indices[PAGEVEC_SIZE];	struct pagevec pvec;	pgoff_t index;	int i;	int ret = 0;	int ret2 = 0;	int did_range_unmap = 0;	cleancache_invalidate_inode(mapping);	pagevec_init(&pvec, 0);	index = start;	while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,			indices)) {		for (i = 0; i < pagevec_count(&pvec); i++) {			struct page *page = pvec.pages[i];			/* We rely upon deletion not changing page->index */			index = indices[i];			if (index > end)				break;			if (radix_tree_exceptional_entry(page)) {				if (!invalidate_exceptional_entry2(mapping,								   index, page))					ret = -EBUSY;				continue;			}			lock_page(page);			WARN_ON(page_to_index(page) != index);			if (page->mapping != mapping) {				unlock_page(page);				continue;			}			wait_on_page_writeback(page);			if (page_mapped(page)) {				if (!did_range_unmap) {					/*					 * Zap the rest of the file in one hit.					 */					unmap_mapping_range(mapping,					   (loff_t)index << PAGE_SHIFT,					   (loff_t)(1 + end - index)							 << PAGE_SHIFT,							 0);					did_range_unmap = 1;				} else {					/*					 * Just zap this page					 */					unmap_mapping_range(mapping,					   (loff_t)index << PAGE_SHIFT,					   PAGE_SIZE, 0);				}			}			BUG_ON(page_mapped(page));			ret2 = do_launder_page(mapping, page);			if (ret2 == 0) {				if (!invalidate_complete_page2(mapping, page))					ret2 = -EBUSY;			}			if (ret2 < 0)				ret = ret2;			unlock_page(page);		}		pagevec_remove_exceptionals(&pvec);		pagevec_release(&pvec);		cond_resched();		index++;	}	cleancache_invalidate_inode(mapping);	return ret;}
开发者ID:AshishNamdev,项目名称:linux,代码行数:88,


示例16: ll_page_mkwrite0

/* Sharing code of page_mkwrite method for rhel5 and rhel6 */static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,			    bool *retry){	struct lu_env	   *env;	struct cl_io	    *io;	struct vvp_io	   *vio;	int		      result;	u16 refcheck;	sigset_t	     set;	struct inode	     *inode;	struct ll_inode_info     *lli;	env = cl_env_get(&refcheck);	if (IS_ERR(env))		return PTR_ERR(env);	io = ll_fault_io_init(env, vma, vmpage->index, NULL);	if (IS_ERR(io)) {		result = PTR_ERR(io);		goto out;	}	result = io->ci_result;	if (result < 0)		goto out_io;	io->u.ci_fault.ft_mkwrite = 1;	io->u.ci_fault.ft_writable = 1;	vio = vvp_env_io(env);	vio->u.fault.ft_vma    = vma;	vio->u.fault.ft_vmpage = vmpage;	set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));	inode = vvp_object_inode(io->ci_obj);	lli = ll_i2info(inode);	result = cl_io_loop(env, io);	cfs_restore_sigs(set);	if (result == 0) {		struct inode *inode = file_inode(vma->vm_file);		struct ll_inode_info *lli = ll_i2info(inode);		lock_page(vmpage);		if (!vmpage->mapping) {			unlock_page(vmpage);			/* page was truncated and lock was cancelled, return			 * ENODATA so that VM_FAULT_NOPAGE will be returned			 * to handle_mm_fault().			 */			if (result == 0)				result = -ENODATA;		} else if (!PageDirty(vmpage)) {			/* race, the page has been cleaned by ptlrpcd after			 * it was unlocked, it has to be added into dirty			 * cache again otherwise this soon-to-dirty page won't			 * consume any grants, even worse if this page is being			 * transferred because it will break RPC checksum.			 */			unlock_page(vmpage);			CDEBUG(D_MMAP,			       "Race on page_mkwrite %p/%lu, page has been written out, retry./n",			       vmpage, vmpage->index);			*retry = true;			result = -EAGAIN;		}		if (!result)			set_bit(LLIF_DATA_MODIFIED, &lli->lli_flags);	}out_io:	cl_io_fini(env, io);out:	cl_env_put(env, &refcheck);	CDEBUG(D_MMAP, "%s mkwrite with %d/n", current->comm, result);	LASSERT(ergo(result == 0, PageLocked(vmpage)));	return result;}
开发者ID:ReneNyffenegger,项目名称:linux,代码行数:87,


示例17: gfs2_page_mkwrite

static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf){	struct page *page = vmf->page;	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;	struct gfs2_inode *ip = GFS2_I(inode);	struct gfs2_sbd *sdp = GFS2_SB(inode);	unsigned long last_index;	u64 pos = page->index << PAGE_CACHE_SHIFT;	unsigned int data_blocks, ind_blocks, rblocks;	struct gfs2_holder gh;	loff_t size;	int ret;	sb_start_pagefault(inode->i_sb);	/* Update file times before taking page lock */	file_update_time(vma->vm_file);	ret = gfs2_rs_alloc(ip);	if (ret)		return ret;	gfs2_size_hint(vma->vm_file, pos, PAGE_CACHE_SIZE);	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);	ret = gfs2_glock_nq(&gh);	if (ret)		goto out;	set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);	set_bit(GIF_SW_PAGED, &ip->i_flags);	if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE)) {		lock_page(page);		if (!PageUptodate(page) || page->mapping != inode->i_mapping) {			ret = -EAGAIN;			unlock_page(page);		}		goto out_unlock;	}	ret = gfs2_rindex_update(sdp);	if (ret)		goto out_unlock;	ret = gfs2_quota_lock_check(ip);	if (ret)		goto out_unlock;	gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);	ret = gfs2_inplace_reserve(ip, data_blocks + ind_blocks);	if (ret)		goto out_quota_unlock;	rblocks = RES_DINODE + ind_blocks;	if (gfs2_is_jdata(ip))		rblocks += data_blocks ? data_blocks : 1;	if (ind_blocks || data_blocks) {		rblocks += RES_STATFS + RES_QUOTA;		rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);	}	ret = gfs2_trans_begin(sdp, rblocks, 0);	if (ret)		goto out_trans_fail;	lock_page(page);	ret = -EINVAL;	size = i_size_read(inode);	last_index = (size - 1) >> PAGE_CACHE_SHIFT;	/* Check page index against inode size */	if (size == 0 || (page->index > last_index))		goto out_trans_end;	ret = -EAGAIN;	/* If truncated, we must retry the operation, we may have raced	 * with the glock demotion code.	 */	if (!PageUptodate(page) || page->mapping != inode->i_mapping)		goto out_trans_end;	/* Unstuff, if required, and allocate backing blocks for page */	ret = 0;	if (gfs2_is_stuffed(ip))		ret = gfs2_unstuff_dinode(ip, page);	if (ret == 0)		ret = gfs2_allocate_page_backing(page);out_trans_end:	if (ret)		unlock_page(page);	gfs2_trans_end(sdp);out_trans_fail:	gfs2_inplace_release(ip);out_quota_unlock:	gfs2_quota_unlock(ip);out_unlock:	gfs2_glock_dq(&gh);out:	gfs2_holder_uninit(&gh);	if (ret == 0) {		set_page_dirty(page);//.........这里部分代码省略.........
开发者ID:AllenDou,项目名称:linux,代码行数:101,


示例18: write_cache_pages

/** * write_cache_pages - walk the list of dirty pages of the given address space and write all of them. * @mapping: address space structure to write * @wbc: subtract the number of written pages from *@wbc->nr_to_write * @writepage: function called for each page * @data: data passed to writepage function * * If a page is already under I/O, write_cache_pages() skips it, even * if it's dirty.  This is desirable behaviour for memory-cleaning writeback, * but it is INCORRECT for data-integrity system calls such as fsync().  fsync() * and msync() need to guarantee that all the data which was dirty at the time * the call was made get new I/O started against them.  If wbc->sync_mode is * WB_SYNC_ALL then we were called for data integrity and we must wait for * existing IO to complete. */int write_cache_pages(struct address_space *mapping,		      struct writeback_control *wbc, writepage_t writepage,		      void *data){	struct backing_dev_info *bdi = mapping->backing_dev_info;	int ret = 0;	int done = 0;	struct pagevec pvec;	int nr_pages;	pgoff_t index;	pgoff_t end;		/* Inclusive */	int scanned = 0;	int range_whole = 0;	long nr_to_write = wbc->nr_to_write;	if (wbc->nonblocking && bdi_write_congested(bdi)) {		wbc->encountered_congestion = 1;		return 0;	}	pagevec_init(&pvec, 0);	if (wbc->range_cyclic) {		index = mapping->writeback_index; /* Start from prev offset */		end = -1;	} else {		index = wbc->range_start >> PAGE_CACHE_SHIFT;		end = wbc->range_end >> PAGE_CACHE_SHIFT;		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)			range_whole = 1;		scanned = 1;	}retry:	while (!done && (index <= end) &&	       (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,					      PAGECACHE_TAG_DIRTY,					      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {		unsigned i;		scanned = 1;		for (i = 0; i < nr_pages; i++) {			struct page *page = pvec.pages[i];			/*			 * At this point we hold neither mapping->tree_lock nor			 * lock on the page itself: the page may be truncated or			 * invalidated (changing page->mapping to NULL), or even			 * swizzled back from swapper_space to tmpfs file			 * mapping			 */			lock_page(page);			if (unlikely(page->mapping != mapping)) {				unlock_page(page);				continue;			}			if (!wbc->range_cyclic && page->index > end) {				done = 1;				unlock_page(page);				continue;			}			if (wbc->sync_mode != WB_SYNC_NONE)				wait_on_page_writeback(page);			if (PageWriteback(page) ||			    !clear_page_dirty_for_io(page)) {				unlock_page(page);				continue;			}			ret = (*writepage)(page, wbc, data);			if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {				unlock_page(page);				ret = 0;			}			if (ret || (--nr_to_write <= 0))				done = 1;			if (wbc->nonblocking && bdi_write_congested(bdi)) {				wbc->encountered_congestion = 1;				done = 1;			}		}		pagevec_release(&pvec);//.........这里部分代码省略.........
开发者ID:masbog,项目名称:iphonelinux-kernel,代码行数:101,


示例19: nilfs_page_mkwrite

static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf){	struct page *page = vmf->page;	struct inode *inode = vma->vm_file->f_dentry->d_inode;	struct nilfs_transaction_info ti;	int ret;	if (unlikely(nilfs_near_disk_full(inode->i_sb->s_fs_info)))		return VM_FAULT_SIGBUS; /* -ENOSPC */	lock_page(page);	if (page->mapping != inode->i_mapping ||	    page_offset(page) >= i_size_read(inode) || !PageUptodate(page)) {		unlock_page(page);		return VM_FAULT_NOPAGE; /* make the VM retry the fault */	}	/*	 * check to see if the page is mapped already (no holes)	 */	if (PageMappedToDisk(page))		goto mapped;	if (page_has_buffers(page)) {		struct buffer_head *bh, *head;		int fully_mapped = 1;		bh = head = page_buffers(page);		do {			if (!buffer_mapped(bh)) {				fully_mapped = 0;				break;			}		} while (bh = bh->b_this_page, bh != head);		if (fully_mapped) {			SetPageMappedToDisk(page);			goto mapped;		}	}	unlock_page(page);	/*	 * fill hole blocks	 */	ret = nilfs_transaction_begin(inode->i_sb, &ti, 1);	/* never returns -ENOMEM, but may return -ENOSPC */	if (unlikely(ret))		return VM_FAULT_SIGBUS;	file_update_time(vma->vm_file);	ret = block_page_mkwrite(vma, vmf, nilfs_get_block);	if (ret != VM_FAULT_LOCKED) {		nilfs_transaction_abort(inode->i_sb);		return ret;	}	nilfs_set_file_dirty(inode, 1 << (PAGE_SHIFT - inode->i_blkbits));	nilfs_transaction_commit(inode->i_sb); mapped:	wait_for_stable_page(page); out:	sb_end_pagefault(inode->i_sb);	return block_page_mkwrite_return(ret);}
开发者ID:robcore,项目名称:Alucard-Kernel-jfltexx,代码行数:65,


示例20: reiserfs_xattr_get

/* * inode->i_mutex: down */intreiserfs_xattr_get(struct inode *inode, const char *name, void *buffer,		   size_t buffer_size){	ssize_t err = 0;	struct dentry *dentry;	size_t isize;	size_t file_pos = 0;	size_t buffer_pos = 0;	struct page *page;	__u32 hash = 0;	if (name == NULL)		return -EINVAL;	/* We can't have xattrs attached to v1 items since they don't have	 * generation numbers */	if (get_inode_sd_version(inode) == STAT_DATA_V1)		return -EOPNOTSUPP;	dentry = xattr_lookup(inode, name, XATTR_REPLACE);	if (IS_ERR(dentry)) {		err = PTR_ERR(dentry);		goto out;	}	down_read(&REISERFS_I(inode)->i_xattr_sem);	isize = i_size_read(dentry->d_inode);	/* Just return the size needed */	if (buffer == NULL) {		err = isize - sizeof(struct reiserfs_xattr_header);		goto out_unlock;	}	if (buffer_size < isize - sizeof(struct reiserfs_xattr_header)) {		err = -ERANGE;		goto out_unlock;	}	while (file_pos < isize) {		size_t chunk;		char *data;		size_t skip = 0;		if (isize - file_pos > PAGE_CACHE_SIZE)			chunk = PAGE_CACHE_SIZE;		else			chunk = isize - file_pos;		page = reiserfs_get_page(dentry->d_inode, file_pos);		if (IS_ERR(page)) {			err = PTR_ERR(page);			goto out_unlock;		}		lock_page(page);		data = page_address(page);		if (file_pos == 0) {			struct reiserfs_xattr_header *rxh =			    (struct reiserfs_xattr_header *)data;			skip = file_pos = sizeof(struct reiserfs_xattr_header);			chunk -= skip;			/* Magic doesn't match up.. */			if (rxh->h_magic != cpu_to_le32(REISERFS_XATTR_MAGIC)) {				unlock_page(page);				reiserfs_put_page(page);				reiserfs_warning(inode->i_sb, "jdm-20001",						 "Invalid magic for xattr (%s) "						 "associated with %k", name,						 INODE_PKEY(inode));				err = -EIO;				goto out_unlock;			}			hash = le32_to_cpu(rxh->h_hash);		}		memcpy(buffer + buffer_pos, data + skip, chunk);		unlock_page(page);		reiserfs_put_page(page);		file_pos += chunk;		buffer_pos += chunk;		skip = 0;	}	err = isize - sizeof(struct reiserfs_xattr_header);	if (xattr_hash(buffer, isize - sizeof(struct reiserfs_xattr_header)) !=	    hash) {		reiserfs_warning(inode->i_sb, "jdm-20002",				 "Invalid hash for xattr (%s) associated "				 "with %k", name, INODE_PKEY(inode));		err = -EIO;	}out_unlock:	up_read(&REISERFS_I(inode)->i_xattr_sem);	dput(dentry);//.........这里部分代码省略.........
开发者ID:303750856,项目名称:linux-3.1,代码行数:101,


示例21: gfs2_page_mkwrite

static int gfs2_page_mkwrite(struct vm_fault *vmf){	struct page *page = vmf->page;	struct inode *inode = file_inode(vmf->vma->vm_file);	struct gfs2_inode *ip = GFS2_I(inode);	struct gfs2_sbd *sdp = GFS2_SB(inode);	struct gfs2_alloc_parms ap = { .aflags = 0, };	unsigned long last_index;	u64 pos = page->index << PAGE_SHIFT;	unsigned int data_blocks, ind_blocks, rblocks;	struct gfs2_holder gh;	loff_t size;	int ret;	sb_start_pagefault(inode->i_sb);	ret = gfs2_rsqa_alloc(ip);	if (ret)		goto out;	gfs2_size_hint(vmf->vma->vm_file, pos, PAGE_SIZE);	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);	ret = gfs2_glock_nq(&gh);	if (ret)		goto out_uninit;	/* Update file times before taking page lock */	file_update_time(vmf->vma->vm_file);	set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);	set_bit(GIF_SW_PAGED, &ip->i_flags);	if (!gfs2_write_alloc_required(ip, pos, PAGE_SIZE)) {		lock_page(page);		if (!PageUptodate(page) || page->mapping != inode->i_mapping) {			ret = -EAGAIN;			unlock_page(page);		}		goto out_unlock;	}	ret = gfs2_rindex_update(sdp);	if (ret)		goto out_unlock;	gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);	ap.target = data_blocks + ind_blocks;	ret = gfs2_quota_lock_check(ip, &ap);	if (ret)		goto out_unlock;	ret = gfs2_inplace_reserve(ip, &ap);	if (ret)		goto out_quota_unlock;	rblocks = RES_DINODE + ind_blocks;	if (gfs2_is_jdata(ip))		rblocks += data_blocks ? data_blocks : 1;	if (ind_blocks || data_blocks) {		rblocks += RES_STATFS + RES_QUOTA;		rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);	}	ret = gfs2_trans_begin(sdp, rblocks, 0);	if (ret)		goto out_trans_fail;	lock_page(page);	ret = -EINVAL;	size = i_size_read(inode);	last_index = (size - 1) >> PAGE_SHIFT;	/* Check page index against inode size */	if (size == 0 || (page->index > last_index))		goto out_trans_end;	ret = -EAGAIN;	/* If truncated, we must retry the operation, we may have raced	 * with the glock demotion code.	 */	if (!PageUptodate(page) || page->mapping != inode->i_mapping)		goto out_trans_end;	/* Unstuff, if required, and allocate backing blocks for page */	ret = 0;	if (gfs2_is_stuffed(ip))		ret = gfs2_unstuff_dinode(ip, page);	if (ret == 0)		ret = gfs2_allocate_page_backing(page);out_trans_end:	if (ret)		unlock_page(page);	gfs2_trans_end(sdp);out_trans_fail:	gfs2_inplace_release(ip);out_quota_unlock:	gfs2_quota_unlock(ip);out_unlock:	gfs2_glock_dq(&gh);out_uninit:	gfs2_holder_uninit(&gh);//.........这里部分代码省略.........
开发者ID:SantoshShilimkar,项目名称:linux,代码行数:101,


示例22: truncate_inode_pages_range

/** * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets * @mapping: mapping to truncate * @lstart: offset from which to truncate * @lend: offset to which to truncate (inclusive) * * Truncate the page cache, removing the pages that are between * specified offsets (and zeroing out partial pages * if lstart or lend + 1 is not page aligned). * * Truncate takes two passes - the first pass is nonblocking.  It will not * block on page locks and it will not block on writeback.  The second pass * will wait.  This is to prevent as much IO as possible in the affected region. * The first pass will remove most pages, so the search cost of the second pass * is low. * * We pass down the cache-hot hint to the page freeing code.  Even if the * mapping is large, it is probably the case that the final pages are the most * recently touched, and freeing happens in ascending file offset order. * * Note that since ->invalidatepage() accepts range to invalidate * truncate_inode_pages_range is able to handle cases where lend + 1 is not * page aligned properly. */void truncate_inode_pages_range(struct address_space *mapping,				loff_t lstart, loff_t lend){	pgoff_t		start;		/* inclusive */	pgoff_t		end;		/* exclusive */	unsigned int	partial_start;	/* inclusive */	unsigned int	partial_end;	/* exclusive */	struct pagevec	pvec;	pgoff_t		indices[PAGEVEC_SIZE];	pgoff_t		index;	int		i;	cleancache_invalidate_inode(mapping);	if (mapping->nrpages == 0 && mapping->nrexceptional == 0)		return;	/* Offsets within partial pages */	partial_start = lstart & (PAGE_SIZE - 1);	partial_end = (lend + 1) & (PAGE_SIZE - 1);	/*	 * 'start' and 'end' always covers the range of pages to be fully	 * truncated. Partial pages are covered with 'partial_start' at the	 * start of the range and 'partial_end' at the end of the range.	 * Note that 'end' is exclusive while 'lend' is inclusive.	 */	start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;	if (lend == -1)		/*		 * lend == -1 indicates end-of-file so we have to set 'end'		 * to the highest possible pgoff_t and since the type is		 * unsigned we're using -1.		 */		end = -1;	else		end = (lend + 1) >> PAGE_SHIFT;	pagevec_init(&pvec, 0);	index = start;	while (index < end && pagevec_lookup_entries(&pvec, mapping, index,			min(end - index, (pgoff_t)PAGEVEC_SIZE),			indices)) {		for (i = 0; i < pagevec_count(&pvec); i++) {			struct page *page = pvec.pages[i];			/* We rely upon deletion not changing page->index */			index = indices[i];			if (index >= end)				break;			if (radix_tree_exceptional_entry(page)) {				truncate_exceptional_entry(mapping, index,							   page);				continue;			}			if (!trylock_page(page))				continue;			WARN_ON(page_to_index(page) != index);			if (PageWriteback(page)) {				unlock_page(page);				continue;			}			truncate_inode_page(mapping, page);			unlock_page(page);		}		pagevec_remove_exceptionals(&pvec);		pagevec_release(&pvec);		cond_resched();		index++;	}	if (partial_start) {		struct page *page = find_lock_page(mapping, start - 1);		if (page) {			unsigned int top = PAGE_SIZE;//.........这里部分代码省略.........
开发者ID:AshishNamdev,项目名称:linux,代码行数:101,


示例23: gfs2_write_jdata_pagevec

static int gfs2_write_jdata_pagevec(struct address_space *mapping,				    struct writeback_control *wbc,				    struct pagevec *pvec,				    int nr_pages, pgoff_t end,				    pgoff_t *done_index){	struct inode *inode = mapping->host;	struct gfs2_sbd *sdp = GFS2_SB(inode);	unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize);	int i;	int ret;	ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);	if (ret < 0)		return ret;	for(i = 0; i < nr_pages; i++) {		struct page *page = pvec->pages[i];		/*		 * At this point, the page may be truncated or		 * invalidated (changing page->mapping to NULL), or		 * even swizzled back from swapper_space to tmpfs file		 * mapping. However, page->index will not change		 * because we have a reference on the page.		 */		if (page->index > end) {			/*			 * can't be range_cyclic (1st pass) because			 * end == -1 in that case.			 */			ret = 1;			break;		}		*done_index = page->index;		lock_page(page);		if (unlikely(page->mapping != mapping)) {continue_unlock:			unlock_page(page);			continue;		}		if (!PageDirty(page)) {			/* someone wrote it for us */			goto continue_unlock;		}		if (PageWriteback(page)) {			if (wbc->sync_mode != WB_SYNC_NONE)				wait_on_page_writeback(page);			else				goto continue_unlock;		}		BUG_ON(PageWriteback(page));		if (!clear_page_dirty_for_io(page))			goto continue_unlock;		trace_wbc_writepage(wbc, inode_to_bdi(inode));		ret = __gfs2_jdata_writepage(page, wbc);		if (unlikely(ret)) {			if (ret == AOP_WRITEPAGE_ACTIVATE) {				unlock_page(page);				ret = 0;			} else {				/*				 * done_index is set past this page,				 * so media errors will not choke				 * background writeout for the entire				 * file. This has consequences for				 * range_cyclic semantics (ie. it may				 * not be suitable for data integrity				 * writeout).				 */				*done_index = page->index + 1;				ret = 1;				break;			}		}		/*		 * We stop writing back only if we are not doing		 * integrity sync. In case of integrity sync we have to		 * keep going until we have written all the pages		 * we tagged for writeback prior to entering this loop.		 */		if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {			ret = 1;			break;		}	}	gfs2_trans_end(sdp);	return ret;}
开发者ID:a2hojsjsjs,项目名称:linux,代码行数:100,


示例24: invalidate_mapping_pages

/** * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode * @mapping: the address_space which holds the pages to invalidate * @start: the offset 'from' which to invalidate * @end: the offset 'to' which to invalidate (inclusive) * * This function only removes the unlocked pages, if you want to * remove all the pages of one inode, you must call truncate_inode_pages. * * invalidate_mapping_pages() will not block on IO activity. It will not * invalidate pages which are dirty, locked, under writeback or mapped into * pagetables. */unsigned long invalidate_mapping_pages(struct address_space *mapping,		pgoff_t start, pgoff_t end){	pgoff_t indices[PAGEVEC_SIZE];	struct pagevec pvec;	pgoff_t index = start;	unsigned long ret;	unsigned long count = 0;	int i;	pagevec_init(&pvec, 0);	while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,			indices)) {		for (i = 0; i < pagevec_count(&pvec); i++) {			struct page *page = pvec.pages[i];			/* We rely upon deletion not changing page->index */			index = indices[i];			if (index > end)				break;			if (radix_tree_exceptional_entry(page)) {				invalidate_exceptional_entry(mapping, index,							     page);				continue;			}			if (!trylock_page(page))				continue;			WARN_ON(page_to_index(page) != index);			/* Middle of THP: skip */			if (PageTransTail(page)) {				unlock_page(page);				continue;			} else if (PageTransHuge(page)) {				index += HPAGE_PMD_NR - 1;				i += HPAGE_PMD_NR - 1;				/* 'end' is in the middle of THP */				if (index ==  round_down(end, HPAGE_PMD_NR))					continue;			}			ret = invalidate_inode_page(page);			unlock_page(page);			/*			 * Invalidation is a hint that the page is no longer			 * of interest and try to speed up its reclaim.			 */			if (!ret)				deactivate_file_page(page);			count += ret;		}		pagevec_remove_exceptionals(&pvec);		pagevec_release(&pvec);		cond_resched();		index++;	}	return count;}
开发者ID:AshishNamdev,项目名称:linux,代码行数:75,


示例25: ext4_convert_inline_data_to_extent

static int ext4_convert_inline_data_to_extent(struct address_space *mapping,					      struct inode *inode,					      unsigned flags){	int ret, needed_blocks;	handle_t *handle = NULL;	int retries = 0, sem_held = 0;	struct page *page = NULL;	unsigned from, to;	struct ext4_iloc iloc;	if (!ext4_has_inline_data(inode)) {		/*		 * clear the flag so that no new write		 * will trap here again.		 */		ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);		return 0;	}	needed_blocks = ext4_writepage_trans_blocks(inode);	ret = ext4_get_inode_loc(inode, &iloc);	if (ret)		return ret;retry:	handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);	if (IS_ERR(handle)) {		ret = PTR_ERR(handle);		handle = NULL;		goto out;	}	/* We cannot recurse into the filesystem as the transaction is already	 * started */	flags |= AOP_FLAG_NOFS;	page = grab_cache_page_write_begin(mapping, 0, flags);	if (!page) {		ret = -ENOMEM;		goto out;	}	down_write(&EXT4_I(inode)->xattr_sem);	sem_held = 1;	/* If some one has already done this for us, just exit. */	if (!ext4_has_inline_data(inode)) {		ret = 0;		goto out;	}	from = 0;	to = ext4_get_inline_size(inode);	if (!PageUptodate(page)) {		ret = ext4_read_inline_page(inode, page);		if (ret < 0)			goto out;	}	ret = ext4_destroy_inline_data_nolock(handle, inode);	if (ret)		goto out;	if (ext4_should_dioread_nolock(inode))		ret = __block_write_begin(page, from, to, ext4_get_block_write);	else		ret = __block_write_begin(page, from, to, ext4_get_block);	if (!ret && ext4_should_journal_data(inode)) {		ret = ext4_walk_page_buffers(handle, page_buffers(page),					     from, to, NULL,					     do_journal_get_write_access);	}	if (ret) {		unlock_page(page);		page_cache_release(page);		ext4_orphan_add(handle, inode);		up_write(&EXT4_I(inode)->xattr_sem);		sem_held = 0;		ext4_journal_stop(handle);		handle = NULL;		ext4_truncate_failed_write(inode);		/*		 * If truncate failed early the inode might		 * still be on the orphan list; we need to		 * make sure the inode is removed from the		 * orphan list in that case.		 */		if (inode->i_nlink)			ext4_orphan_del(NULL, inode);	}	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))		goto retry;	block_commit_write(page, from, to);out:	if (page) {//.........这里部分代码省略.........
开发者ID:Runner85sx,项目名称:android_kernel_huawei_msm8909,代码行数:101,



注:本文中的unlock_page函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


C++ unlock_rename函数代码示例
C++ unlock_new_inode函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。