这篇教程C++ wait_on_page_writeback函数代码示例写得很实用,希望能帮到您。
本文整理汇总了C++中wait_on_page_writeback函数的典型用法代码示例。如果您正苦于以下问题:C++ wait_on_page_writeback函数的具体用法?C++ wait_on_page_writeback怎么用?C++ wait_on_page_writeback使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。 在下文中一共展示了wait_on_page_writeback函数的26个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。 示例1: new_inode_pagestatic struct page *init_inode_metadata(struct inode *inode, struct inode *dir, const struct qstr *name){ struct page *page; int err; if (is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) { page = new_inode_page(inode, name); if (IS_ERR(page)) return page; if (S_ISDIR(inode->i_mode)) { err = make_empty_dir(inode, dir, page); if (err) goto error; } err = f2fs_init_acl(inode, dir, page); if (err) goto put_error; err = f2fs_init_security(inode, dir, name, page); if (err) goto put_error; wait_on_page_writeback(page); } else { page = get_node_page(F2FS_SB(dir->i_sb), inode->i_ino); if (IS_ERR(page)) return page; wait_on_page_writeback(page); set_cold_node(inode, page); } init_dent_inode(name, page); /* * This file should be checkpointed during fsync. * We lost i_pino from now on. */ if (is_inode_flag_set(F2FS_I(inode), FI_INC_LINK)) { file_lost_pino(inode); inc_nlink(inode); } return page;put_error: f2fs_put_page(page, 1);error: remove_inode_page(inode); return ERR_PTR(err);}
开发者ID:Astralix,项目名称:mainline-dss11,代码行数:53,
示例2: f2fs_delete_entry/* * It only removes the dentry from the dentry page,corresponding name * entry in name page does not need to be touched during deletion. */void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, struct inode *inode){ struct f2fs_dentry_block *dentry_blk; unsigned int bit_pos; struct address_space *mapping = page->mapping; struct inode *dir = mapping->host; struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len)); void *kaddr = page_address(page); int i; lock_page(page); wait_on_page_writeback(page); dentry_blk = (struct f2fs_dentry_block *)kaddr; bit_pos = dentry - (struct f2fs_dir_entry *)dentry_blk->dentry; for (i = 0; i < slots; i++) test_and_clear_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap); /* Let's check and deallocate this dentry page */ bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap, NR_DENTRY_IN_BLOCK, 0); kunmap(page); /* kunmap - pair of f2fs_find_entry */ set_page_dirty(page); dir->i_ctime = dir->i_mtime = CURRENT_TIME; if (inode && S_ISDIR(inode->i_mode)) { drop_nlink(dir); update_inode_page(dir); } else { mark_inode_dirty(dir); } if (inode) { inode->i_ctime = CURRENT_TIME; drop_nlink(inode); if (S_ISDIR(inode->i_mode)) { drop_nlink(inode); i_size_write(inode, 0); } update_inode_page(inode); if (inode->i_nlink == 0) add_orphan_inode(sbi, inode->i_ino); else release_orphan_inode(sbi); } if (bit_pos == NR_DENTRY_IN_BLOCK) { truncate_hole(dir, page->index, page->index + 1); clear_page_dirty_for_io(page); ClearPageUptodate(page); dec_page_count(sbi, F2FS_DIRTY_DENTS); inode_dec_dirty_dents(dir); } f2fs_put_page(page, 1);}
开发者ID:rbheromax,项目名称:f2fs-3.4.y,代码行数:64,
示例3: invalidate_inode_pages2/** * invalidate_inode_pages2 - remove all unmapped pages from an address_space * @mapping - the address_space * * invalidate_inode_pages2() is like truncate_inode_pages(), except for the case * where the page is seen to be mapped into process pagetables. In that case, * the page is marked clean but is left attached to its address_space. * * The page is also marked not uptodate so that a subsequent pagefault will * perform I/O to bringthe page's contents back into sync with its backing * store. * * FIXME: invalidate_inode_pages2() is probably trivially livelockable. */void invalidate_inode_pages2(struct address_space *mapping){ struct pagevec pvec; pgoff_t next = 0; int i; pagevec_init(&pvec, 0); while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; lock_page(page); if (page->mapping == mapping) { /* truncate race? */ wait_on_page_writeback(page); next = page->index + 1; if (page_mapped(page)) { clear_page_dirty(page); ClearPageUptodate(page); } else { if (!invalidate_complete_page(mapping, page)) { clear_page_dirty(page); ClearPageUptodate(page); } } } unlock_page(page); } pagevec_release(&pvec); cond_resched(); }}
开发者ID:QiuLihua83,项目名称:linux-2.6.10,代码行数:46,
示例4: page_cache_pipe_buf_steal/* * Attempt to steal a page from a pipe buffer. This should perhaps go into * a vm helper function, it's already simplified quite a bit by the * addition of remove_mapping(). If success is returned, the caller may * attempt to reuse this page for another destination. */static int page_cache_pipe_buf_steal(struct pipe_inode_info *pipe, struct pipe_buffer *buf){ struct page *page = buf->page; struct address_space *mapping = page_mapping(page); lock_page(page); WARN_ON(!PageUptodate(page)); /* * At least for ext2 with nobh option, we need to wait on writeback * completing on this page, since we'll remove it from the pagecache. * Otherwise truncate wont wait on the page, allowing the disk * blocks to be reused by someone else before we actually wrote our * data to them. fs corruption ensues. */ wait_on_page_writeback(page); if (PagePrivate(page)) try_to_release_page(page, mapping_gfp_mask(mapping)); if (!remove_mapping(mapping, page)) { unlock_page(page); return 1; } buf->flags |= PIPE_BUF_FLAG_LRU; return 0;}
开发者ID:liucx6312,项目名称:supermicro_ipmi_firmware,代码行数:36,
示例5: gfs2_write_jdata_pagevecstatic int gfs2_write_jdata_pagevec(struct address_space *mapping, struct writeback_control *wbc, struct pagevec *pvec, int nr_pages, pgoff_t end){ struct inode *inode = mapping->host; struct gfs2_sbd *sdp = GFS2_SB(inode); loff_t i_size = i_size_read(inode); pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; unsigned offset = i_size & (PAGE_CACHE_SIZE-1); unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize); int i; int ret; ret = gfs2_trans_begin(sdp, nrblocks, nrblocks); if (ret < 0) return ret; for(i = 0; i < nr_pages; i++) { struct page *page = pvec->pages[i]; lock_page(page); if (unlikely(page->mapping != mapping)) { unlock_page(page); continue; } if (!wbc->range_cyclic && page->index > end) { ret = 1; unlock_page(page); continue; } if (wbc->sync_mode != WB_SYNC_NONE) wait_on_page_writeback(page); if (PageWriteback(page) || !clear_page_dirty_for_io(page)) { unlock_page(page); continue; } /* Is the page fully outside i_size? (truncate in progress) */ if (page->index > end_index || (page->index == end_index && !offset)) { page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); unlock_page(page); continue; } ret = __gfs2_jdata_writepage(page, wbc); if (ret || (--(wbc->nr_to_write) <= 0)) ret = 1; } gfs2_trans_end(sdp); return ret;}
开发者ID:Menpiko,项目名称:SnaPKernel-N6P,代码行数:59,
示例6: nilfs_page_mkwritestatic int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf){ struct page *page = vmf->page; struct inode *inode = vma->vm_file->f_dentry->d_inode; struct nilfs_transaction_info ti; int ret; if (unlikely(nilfs_near_disk_full(inode->i_sb->s_fs_info))) return VM_FAULT_SIGBUS; lock_page(page); if (page->mapping != inode->i_mapping || page_offset(page) >= i_size_read(inode) || !PageUptodate(page)) { unlock_page(page); return VM_FAULT_NOPAGE; } if (PageMappedToDisk(page)) goto mapped; if (page_has_buffers(page)) { struct buffer_head *bh, *head; int fully_mapped = 1; bh = head = page_buffers(page); do { if (!buffer_mapped(bh)) { fully_mapped = 0; break; } } while (bh = bh->b_this_page, bh != head); if (fully_mapped) { SetPageMappedToDisk(page); goto mapped; } } unlock_page(page); ret = nilfs_transaction_begin(inode->i_sb, &ti, 1); if (unlikely(ret)) return VM_FAULT_SIGBUS; ret = block_page_mkwrite(vma, vmf, nilfs_get_block); if (ret != VM_FAULT_LOCKED) { nilfs_transaction_abort(inode->i_sb); return ret; } nilfs_set_file_dirty(inode, 1 << (PAGE_SHIFT - inode->i_blkbits)); nilfs_transaction_commit(inode->i_sb); mapped: wait_on_page_writeback(page); return VM_FAULT_LOCKED;}
开发者ID:MiniBlu,项目名称:cm11_kernel_htc_msm8974a3ul,代码行数:56,
示例7: wrapfs_writepage/* added: begin address space operations definitions */static int wrapfs_writepage(struct page *page, struct writeback_control *wbc){ int err = -EIO; struct inode *inode; struct inode *lower_inode; struct page *lower_page; struct address_space *lower_mapping; /* lower inode mapping */ gfp_t mask; BUG_ON(!PageUptodate(page)); inode = page->mapping->host; if (!inode || !WRAPFS_I(inode)){ err = 0; goto out; } lower_inode = wrapfs_lower_inode(inode); lower_mapping = lower_inode->i_mapping; mask = mapping_gfp_mask(lower_mapping) & ~(__GFP_FS); lower_page = find_or_create_page(lower_mapping, page->index, mask); if (!lower_page) { err = 0; set_page_dirty(page); goto out; } copy_highpage(lower_page, page); flush_dcache_page(lower_page); SetPageUptodate(lower_page); set_page_dirty(lower_page); if (wbc->for_reclaim) { unlock_page(lower_page); goto out_release; } BUG_ON(!lower_mapping->a_ops->writepage); wait_on_page_writeback(lower_page); /* prevent multiple writers */ clear_page_dirty_for_io(lower_page); /* emulate VFS behavior */ err = lower_mapping->a_ops->writepage(lower_page, wbc); if (err < 0) goto out_release; if (err == AOP_WRITEPAGE_ACTIVATE) { err = 0; unlock_page(lower_page); } fsstack_copy_attr_times(inode, lower_inode);out_release: page_cache_release(lower_page);out: unlock_page(page); return err;}
开发者ID:abhishekShukla,项目名称:Linux-Stackable-File-System-,代码行数:57,
示例8: f2fs_set_linkvoid f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de, struct page *page, struct inode *inode){ lock_page(page); wait_on_page_writeback(page); de->ino = cpu_to_le32(inode->i_ino); set_de_type(de, inode); kunmap(page); set_page_dirty(page); dir->i_mtime = dir->i_ctime = CURRENT_TIME; mark_inode_dirty(dir); f2fs_put_page(page, 1);}
开发者ID:Astralix,项目名称:mainline-dss11,代码行数:14,
示例9: init_dent_inodevoid init_dent_inode(const struct qstr *name, struct page *ipage){ struct f2fs_node *rn; if (IS_ERR(ipage)) return; wait_on_page_writeback(ipage); /* copy name info. to this inode page */ rn = (struct f2fs_node *)page_address(ipage); rn->i.i_namelen = cpu_to_le32(name->len); memcpy(rn->i.i_name, name->name, name->len); set_page_dirty(ipage);}
开发者ID:mbgg,项目名称:linux,代码行数:15,
示例10: f2fs_set_linkvoid f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de, struct page *page, struct inode *inode){ lock_page(page); wait_on_page_writeback(page); de->ino = cpu_to_le32(inode->i_ino); set_de_type(de, inode); kunmap(page); set_page_dirty(page); dir->i_mtime = dir->i_ctime = CURRENT_TIME; mark_inode_dirty(dir); /* update parent inode number before releasing dentry page */ F2FS_I(inode)->i_pino = dir->i_ino; f2fs_put_page(page, 1);}
开发者ID:rbheromax,项目名称:f2fs-3.4.y,代码行数:17,
示例11: page_cache_pipe_buf_steal/* * Attempt to steal a page from a pipe buffer. This should perhaps go into * a vm helper function, it's already simplified quite a bit by the * addition of remove_mapping(). If success is returned, the caller may * attempt to reuse this page for another destination. */static int page_cache_pipe_buf_steal(struct pipe_inode_info *pipe, struct pipe_buffer *buf){ struct page *page = buf->page; struct address_space *mapping; lock_page(page); mapping = page_mapping(page); if (mapping) { WARN_ON(!PageUptodate(page)); /* * At least for ext2 with nobh option, we need to wait on * writeback completing on this page, since we'll remove it * from the pagecache. Otherwise truncate wont wait on the * page, allowing the disk blocks to be reused by someone else * before we actually wrote our data to them. fs corruption * ensues. */ wait_on_page_writeback(page); if (PagePrivate(page) && try_to_release_page(page, GFP_KERNEL)) goto out_unlock; /* * If we succeeded in removing the mapping, set LRU flag * and return good. */ if (remove_mapping(mapping, page)) { buf->flags |= PIPE_BUF_FLAG_LRU; return 0; } } /* * Raced with truncate or failed to remove page from current * address space, unlock and return failure. */out_unlock: unlock_page(page); return 1;}
开发者ID:xf739645524,项目名称:kernel-rhel5,代码行数:50,
示例12: nilfs_btnode_deletevoid nilfs_btnode_delete(struct buffer_head *bh){ struct address_space *mapping; struct page *page = bh->b_page; pgoff_t index = page_index(page); int still_dirty; page_cache_get(page); lock_page(page); wait_on_page_writeback(page); nilfs_forget_buffer(bh); still_dirty = PageDirty(page); mapping = page->mapping; unlock_page(page); page_cache_release(page); if (!still_dirty && mapping) invalidate_inode_pages2_range(mapping, index, index);}
开发者ID:MiniBlu,项目名称:cm11_kernel_htc_msm8974a3ul,代码行数:20,
示例13: nfs_vm_page_mkwrite/* * Notification that a PTE pointing to an NFS page is about to be made * writable, implying that someone is about to modify the page through a * shared-writable mapping */static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf){ struct page *page = vmf->page; struct file *filp = vma->vm_file; struct dentry *dentry = filp->f_path.dentry; unsigned pagelen; int ret = VM_FAULT_NOPAGE; struct address_space *mapping; dfprintk(PAGECACHE, "NFS: vm_page_mkwrite(%s/%s(%ld), offset %lld)/n", dentry->d_parent->d_name.name, dentry->d_name.name, filp->f_mapping->host->i_ino, (long long)page_offset(page)); /* make sure the cache has finished storing the page */ nfs_fscache_wait_on_page_write(NFS_I(dentry->d_inode), page); lock_page(page); mapping = page_file_mapping(page); if (mapping != dentry->d_inode->i_mapping) goto out_unlock; wait_on_page_writeback(page); pagelen = nfs_page_length(page); if (pagelen == 0) goto out_unlock; ret = VM_FAULT_LOCKED; if (nfs_flush_incompatible(filp, page) == 0 && nfs_updatepage(filp, page, 0, pagelen) == 0) goto out; ret = VM_FAULT_SIGBUS;out_unlock: unlock_page(page);out: return ret;}
开发者ID:ARMWorks,项目名称:FA_2451_Linux_Kernel,代码行数:44,
示例14: write_cache_pages//.........这里部分代码省略......... /* * can't be range_cyclic (1st pass) because * end == -1 in that case. */ done = 1; break; } done_index = page->index + 1; lock_page(page); /* * Page truncated or invalidated. We can freely skip it * then, even for data integrity operations: the page * has disappeared concurrently, so there could be no * real expectation of this data interity operation * even if there is now a new, dirty page at the same * pagecache address. */ if (unlikely(page->mapping != mapping)) {continue_unlock: unlock_page(page); continue; } if (!PageDirty(page)) { /* someone wrote it for us */ goto continue_unlock; } if (PageWriteback(page)) { if (wbc->sync_mode != WB_SYNC_NONE) wait_on_page_writeback(page); else goto continue_unlock; } BUG_ON(PageWriteback(page)); if (!clear_page_dirty_for_io(page)) goto continue_unlock; ret = (*writepage)(page, wbc, data); if (unlikely(ret)) { if (ret == AOP_WRITEPAGE_ACTIVATE) { unlock_page(page); ret = 0; } else { /* * done_index is set past this page, * so media errors will not choke * background writeout for the entire * file. This has consequences for * range_cyclic semantics (ie. it may * not be suitable for data integrity * writeout). */ done = 1; break; } } if (wbc->nr_to_write > 0) { wbc->nr_to_write--; if (wbc->nr_to_write == 0 &&
开发者ID:khenam,项目名称:ardrone-kernel,代码行数:67,
示例15: write_one_page/** * write_one_page - write out a single page and optionally wait on I/O * @page: the page to write * @wait: if true, wait on writeout * * The page must be locked by the caller and will be unlocked upon return. * * write_one_page() returns a negative error code if I/O failed. */int write_one_page(struct page *page, int wait){ struct address_space *mapping = page->mapping; int ret = 0; struct writeback_control wbc = { .sync_mode = WB_SYNC_ALL, .nr_to_write = 1, }; BUG_ON(!PageLocked(page)); if (wait) wait_on_page_writeback(page); if (clear_page_dirty_for_io(page)) { page_cache_get(page); ret = mapping->a_ops->writepage(page, &wbc); if (ret == 0 && wait) { wait_on_page_writeback(page); if (PageError(page)) ret = -EIO; } page_cache_release(page); } else { unlock_page(page); } return ret;}EXPORT_SYMBOL(write_one_page);/* * For address_spaces which do not use buffers nor write back. */int __set_page_dirty_no_writeback(struct page *page){ if (!PageDirty(page)) SetPageDirty(page); return 0;}/* * For address_spaces which do not use buffers. Just tag the page as dirty in * its radix tree. * * This is also used when a single buffer is being dirtied: we want to set the * page dirty in that case, but not all the buffers. This is a "bottom-up" * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying. * * Most callers have locked the page, which pins the address_space in memory. * But zap_pte_range() does not lock the page, however in that case the * mapping is pinned by the vma's ->vm_file reference. * * We take care to handle the case where the page was truncated from the * mapping by re-checking page_mapping() inside tree_lock. */int __set_page_dirty_nobuffers(struct page *page){ if (!TestSetPageDirty(page)) { struct address_space *mapping = page_mapping(page); struct address_space *mapping2; if (!mapping) return 1; spin_lock_irq(&mapping->tree_lock); mapping2 = page_mapping(page); if (mapping2) { /* Race with truncate? */ BUG_ON(mapping2 != mapping); WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); if (mapping_cap_account_dirty(mapping)) { __inc_zone_page_state(page, NR_FILE_DIRTY); __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); task_io_account_write(PAGE_CACHE_SIZE); } radix_tree_tag_set(&mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY); } spin_unlock_irq(&mapping->tree_lock); if (mapping->host) { /* !PageAnon && !swapper_space */ __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); } return 1; } return 0;}EXPORT_SYMBOL(__set_page_dirty_nobuffers);/* * When a writepage implementation decides that it doesn't want to write this//.........这里部分代码省略.........
开发者ID:khenam,项目名称:ardrone-kernel,代码行数:101,
示例16: gfs2_write_jdata_pagevecstatic int gfs2_write_jdata_pagevec(struct address_space *mapping, struct writeback_control *wbc, struct pagevec *pvec, int nr_pages, pgoff_t end, pgoff_t *done_index){ struct inode *inode = mapping->host; struct gfs2_sbd *sdp = GFS2_SB(inode); unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize); int i; int ret; ret = gfs2_trans_begin(sdp, nrblocks, nrblocks); if (ret < 0) return ret; for(i = 0; i < nr_pages; i++) { struct page *page = pvec->pages[i]; /* * At this point, the page may be truncated or * invalidated (changing page->mapping to NULL), or * even swizzled back from swapper_space to tmpfs file * mapping. However, page->index will not change * because we have a reference on the page. */ if (page->index > end) { /* * can't be range_cyclic (1st pass) because * end == -1 in that case. */ ret = 1; break; } *done_index = page->index; lock_page(page); if (unlikely(page->mapping != mapping)) {continue_unlock: unlock_page(page); continue; } if (!PageDirty(page)) { /* someone wrote it for us */ goto continue_unlock; } if (PageWriteback(page)) { if (wbc->sync_mode != WB_SYNC_NONE) wait_on_page_writeback(page); else goto continue_unlock; } BUG_ON(PageWriteback(page)); if (!clear_page_dirty_for_io(page)) goto continue_unlock; trace_wbc_writepage(wbc, mapping->backing_dev_info); ret = __gfs2_jdata_writepage(page, wbc); if (unlikely(ret)) { if (ret == AOP_WRITEPAGE_ACTIVATE) { unlock_page(page); ret = 0; } else { /* * done_index is set past this page, * so media errors will not choke * background writeout for the entire * file. This has consequences for * range_cyclic semantics (ie. it may * not be suitable for data integrity * writeout). */ *done_index = page->index + 1; ret = 1; break; } } /* * We stop writing back only if we are not doing * integrity sync. In case of integrity sync we have to * keep going until we have written all the pages * we tagged for writeback prior to entering this loop. */ if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) { ret = 1; break; } } gfs2_trans_end(sdp); return ret;}
开发者ID:hejin,项目名称:kernel-3.10.0-327.13.1.el7.x86_64-fs,代码行数:100,
示例17: write_one_page/** * write_one_page - write out a single page and optionally wait on I/O * @page: the page to write * @wait: if true, wait on writeout * * The page must be locked by the caller and will be unlocked upon return. * * write_one_page() returns a negative error code if I/O failed. */int write_one_page(struct page *page, int wait){ struct address_space *mapping = page->mapping; int ret = 0; struct writeback_control wbc = { .sync_mode = WB_SYNC_ALL, .nr_to_write = 1, }; BUG_ON(!PageLocked(page)); if (wait) wait_on_page_writeback(page); if (clear_page_dirty_for_io(page)) { page_cache_get(page); ret = mapping->a_ops->writepage(page, &wbc); if (ret == 0 && wait) { wait_on_page_writeback(page); if (PageError(page)) ret = -EIO; } page_cache_release(page); } else { unlock_page(page); } return ret;}EXPORT_SYMBOL(write_one_page);/* * For address_spaces which do not use buffers nor write back. */int __set_page_dirty_no_writeback(struct page *page){ if (!PageDirty(page)) return !TestSetPageDirty(page); return 0;}/* * Helper function for set_page_dirty family. * NOTE: This relies on being atomic wrt interrupts. */void account_page_dirtied(struct page *page, struct address_space *mapping){ if (mapping_cap_account_dirty(mapping)) { __inc_zone_page_state(page, NR_FILE_DIRTY); __inc_zone_page_state(page, NR_DIRTIED); __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); task_dirty_inc(current); task_io_account_write(PAGE_CACHE_SIZE); }}EXPORT_SYMBOL(account_page_dirtied);/* * Helper function for set_page_writeback family. * NOTE: Unlike account_page_dirtied this does not rely on being atomic * wrt interrupts. */void account_page_writeback(struct page *page){ inc_zone_page_state(page, NR_WRITEBACK); inc_zone_page_state(page, NR_WRITTEN);}
开发者ID:printusrzero,项目名称:hwp6s-kernel,代码行数:75,
示例18: write_cache_pages//.........这里部分代码省略......... * because we have a reference on the page. */ if (page->index > end) { /* * can't be range_cyclic (1st pass) because * end == -1 in that case. */ done = 1; break; } done_index = page->index + 1; //页面加锁 lock_page(page); /* * Page truncated or invalidated. We can freely skip it * then, even for data integrity operations: the page * has disappeared concurrently, so there could be no * real expectation of this data interity operation * even if there is now a new, dirty page at the same * pagecache address. */ /*由于在加锁过程中可能其它进程对页面做过改动,因此要做以下判断*/ if (unlikely(page->mapping != mapping)) {//页面无效continue_unlock: unlock_page(page); continue; } if (!PageDirty(page)) {//页面回写完成,I_DIRTY标志已经清除。 /* someone wrote it for us */ goto continue_unlock; } if (PageWriteback(page)) {//页面正在回写中,那要根据sync_mode采取策略 if (wbc->sync_mode != WB_SYNC_NONE) wait_on_page_writeback(page);//要等待正在回写完成后才继续 else goto continue_unlock; } BUG_ON(PageWriteback(page)); if (!clear_page_dirty_for_io(page)) goto continue_unlock; trace_wbc_writepage(wbc, mapping->backing_dev_info); //开始回写"脏"页面 ret = (*writepage)(page, wbc, data); if (unlikely(ret)) { if (ret == AOP_WRITEPAGE_ACTIVATE) { unlock_page(page); ret = 0; } else { /* * done_index is set past this page, * so media errors will not choke * background writeout for the entire * file. This has consequences for * range_cyclic semantics (ie. it may * not be suitable for data integrity * writeout). */ done = 1; break; } } /* * We stop writing back only if we are not doing * integrity sync. In case of integrity sync we have to * keep going until we have written all the pages * we tagged for writeback prior to entering this loop. */ /*页面写成功后,递减计数器*/ if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) { done = 1; break; } } pagevec_release(&pvec); cond_resched(); } if (!cycled && !done) { /* * range_cyclic: * We hit the last page and there is more work to be done: wrap * back to the start of the file */ cycled = 1; index = 0; end = writeback_index - 1; goto retry; } if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) mapping->writeback_index = done_index; return ret;}
开发者ID:laitianli,项目名称:loongson-linux-2.6.36-3_mips,代码行数:101,
示例19: __f2fs_add_link/* * Caller should grab and release a rwsem by calling f2fs_lock_op() and * f2fs_unlock_op(). */int __f2fs_add_link(struct inode *dir, const struct qstr *name, struct inode *inode){ unsigned int bit_pos; unsigned int level; unsigned int current_depth; unsigned long bidx, block; f2fs_hash_t dentry_hash; struct f2fs_dir_entry *de; unsigned int nbucket, nblock; size_t namelen = name->len; struct page *dentry_page = NULL; struct f2fs_dentry_block *dentry_blk = NULL; int slots = GET_DENTRY_SLOTS(namelen); struct page *page; int err = 0; int i; dentry_hash = f2fs_dentry_hash(name->name, name->len); level = 0; current_depth = F2FS_I(dir)->i_current_depth; if (F2FS_I(dir)->chash == dentry_hash) { level = F2FS_I(dir)->clevel; F2FS_I(dir)->chash = 0; }start: if (unlikely(current_depth == MAX_DIR_HASH_DEPTH)) return -ENOSPC; /* Increase the depth, if required */ if (level == current_depth) ++current_depth; nbucket = dir_buckets(level); nblock = bucket_blocks(level); bidx = dir_block_index(level, (le32_to_cpu(dentry_hash) % nbucket)); for (block = bidx; block <= (bidx + nblock - 1); block++) { dentry_page = get_new_data_page(dir, NULL, block, true); if (IS_ERR(dentry_page)) return PTR_ERR(dentry_page); dentry_blk = kmap(dentry_page); bit_pos = room_for_filename(dentry_blk, slots); if (bit_pos < NR_DENTRY_IN_BLOCK) goto add_dentry; kunmap(dentry_page); f2fs_put_page(dentry_page, 1); } /* Move to next level to find the empty slot for new dentry */ ++level; goto start;add_dentry: wait_on_page_writeback(dentry_page); page = init_inode_metadata(inode, dir, name); if (IS_ERR(page)) { err = PTR_ERR(page); goto fail; } de = &dentry_blk->dentry[bit_pos]; de->hash_code = dentry_hash; de->name_len = cpu_to_le16(namelen); memcpy(dentry_blk->filename[bit_pos], name->name, name->len); de->ino = cpu_to_le32(inode->i_ino); set_de_type(de, inode); for (i = 0; i < slots; i++) test_and_set_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap); set_page_dirty(dentry_page); /* we don't need to mark_inode_dirty now */ F2FS_I(inode)->i_pino = dir->i_ino; update_inode(inode, page); f2fs_put_page(page, 1); update_parent_metadata(dir, inode, current_depth);fail: clear_inode_flag(F2FS_I(dir), FI_UPDATE_DIR); kunmap(dentry_page); f2fs_put_page(dentry_page, 1); return err;}
开发者ID:Astralix,项目名称:mainline-dss11,代码行数:90,
示例20: wrapfs_writepagestatic int wrapfs_writepage(struct page *page, struct writeback_control *wbc){ int err = -EIO; struct inode *inode; struct inode *lower_inode; struct page *lower_page; struct address_space *lower_mapping; /* lower inode mapping */ gfp_t mask; /*printk(KERN_ALERT "in writepage() /n");*/ BUG_ON(!PageUptodate(page)); inode = page->mapping->host; /* if no lower inode, nothing to do */ if (!inode || !WRAPFS_I(inode) || WRAPFS_I(inode)->lower_inode) { err = 0; goto out; } lower_inode = wrapfs_lower_inode(inode); lower_mapping = lower_inode->i_mapping; /* * find lower page (returns a locked page) * * We turn off __GFP_FS while we look for or create a new lower * page. This prevents a recursion into the file system code, which * under memory pressure conditions could lead to a deadlock. This * is similar to how the loop driver behaves (see loop_set_fd in * drivers/block/loop.c). If we can't find the lower page, we * redirty our page and return "success" so that the VM will call us * again in the (hopefully near) future. */ mask = mapping_gfp_mask(lower_mapping) & ~(__GFP_FS); lower_page = find_or_create_page(lower_mapping, page->index, mask); if (!lower_page) { err = 0; set_page_dirty(page); goto out; } /* copy page data from our upper page to the lower page */ copy_highpage(lower_page, page); flush_dcache_page(lower_page); SetPageUptodate(lower_page); set_page_dirty(lower_page); /* * Call lower writepage (expects locked page). However, if we are * called with wbc->for_reclaim, then the VFS/VM just wants to * reclaim our page. Therefore, we don't need to call the lower * ->writepage: just copy our data to the lower page (already done * above), then mark the lower page dirty and unlock it, and return * success. */ if (wbc->for_reclaim) { unlock_page(lower_page); goto out_release; } BUG_ON(!lower_mapping->a_ops->writepage); wait_on_page_writeback(lower_page); /* prevent multiple writers */ clear_page_dirty_for_io(lower_page); /* emulate VFS behavior */ err = lower_mapping->a_ops->writepage(lower_page, wbc); if (err < 0) goto out_release; /* * Lower file systems such as ramfs and tmpfs, may return * AOP_WRITEPAGE_ACTIVATE so that the VM won't try to (pointlessly) * write the page again for a while. But those lower file systems * also set the page dirty bit back again. Since we successfully * copied our page data to the lower page, then the VM will come * back to the lower page (directly) and try to flush it. So we can * save the VM the hassle of coming back to our page and trying to * flush too. Therefore, we don't re-dirty our own page, and we * never return AOP_WRITEPAGE_ACTIVATE back to the VM (we consider * this a success). * * We also unlock the lower page if the lower ->writepage returned * AOP_WRITEPAGE_ACTIVATE. (This "anomalous" behaviour may be * addressed in future shmem/VM code.) */ if (err == AOP_WRITEPAGE_ACTIVATE) { err = 0; unlock_page(lower_page); } /* all is well */ /* lower mtimes have changed: update ours */ /* fsstack_copy_inode_size(dentry->d_inode, lower_file->f_path.dentry->d_inode); fsstack_copy_attr_times(dentry->d_inode, lower_file->f_path.dentry->d_inode); */out_release: /* b/c find_or_create_page increased refcnt */ page_cache_release(lower_page);out://.........这里部分代码省略.........
开发者ID:disdi,项目名称:address-space-ops-in-wrapfs,代码行数:101,
示例21: ilookup5//.........这里部分代码省略......... inode->i_fop = &f2fs_file_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &f2fs_dir_inode_operations; inode->i_fop = &f2fs_dir_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER_MOVABLE | __GFP_ZERO); } else if (S_ISLNK(inode->i_mode)) { inode->i_op = &f2fs_symlink_inode_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { inode->i_op = &f2fs_special_inode_operations; init_special_inode(inode, inode->i_mode, inode->i_rdev); } else { ret = -EIO; goto bad_inode; } unlock_new_inode(inode); return inode;bad_inode: iget_failed(inode); return ERR_PTR(ret);}void update_inode(struct inode *inode, struct page *node_page){ struct f2fs_node *rn; struct f2fs_inode *ri; wait_on_page_writeback(node_page); rn = page_address(node_page); ri = &(rn->i); ri->i_mode = cpu_to_le16(inode->i_mode); ri->i_advise = F2FS_I(inode)->i_advise; ri->i_uid = cpu_to_le32(i_uid_read(inode)); ri->i_gid = cpu_to_le32(i_gid_read(inode)); ri->i_links = cpu_to_le32(inode->i_nlink); ri->i_size = cpu_to_le64(i_size_read(inode)); ri->i_blocks = cpu_to_le64(inode->i_blocks); set_raw_extent(&F2FS_I(inode)->ext, &ri->i_ext); ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec); ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec); ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec); ri->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); ri->i_current_depth = cpu_to_le32(F2FS_I(inode)->i_current_depth); ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid); ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags); ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino); ri->i_generation = cpu_to_le32(inode->i_generation); set_cold_node(inode, node_page); set_page_dirty(node_page);}int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc){ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); struct page *node_page;
开发者ID:AdrianHuang,项目名称:linux-3.8.13,代码行数:67,
示例22: truncate_inode_pages_range//.........这里部分代码省略......... if (index >= end) break; if (radix_tree_exceptional_entry(page)) { clear_exceptional_entry(mapping, index, page); continue; } if (!trylock_page(page)) continue; WARN_ON(page->index != index); if (PageWriteback(page)) { unlock_page(page); continue; } truncate_inode_page(mapping, page); unlock_page(page); } pagevec_remove_exceptionals(&pvec); pagevec_release(&pvec); cond_resched(); index++; } if (partial_start) { struct page *page = find_lock_page(mapping, start - 1); if (page) { unsigned int top = PAGE_CACHE_SIZE; if (start > end) { /* Truncation within a single page */ top = partial_end; partial_end = 0; } wait_on_page_writeback(page); zero_user_segment(page, partial_start, top); cleancache_invalidate_page(mapping, page); if (page_has_private(page)) do_invalidatepage(page, partial_start, top - partial_start); unlock_page(page); page_cache_release(page); } } if (partial_end) { struct page *page = find_lock_page(mapping, end); if (page) { wait_on_page_writeback(page); zero_user_segment(page, 0, partial_end); cleancache_invalidate_page(mapping, page); if (page_has_private(page)) do_invalidatepage(page, 0, partial_end); unlock_page(page); page_cache_release(page); } } /* * If the truncation happened within a single page no pages * will be released, just zeroed, so we can bail out now. */ if (start >= end) return; index = start; for ( ; ; ) { cond_resched();
开发者ID:stefanberger,项目名称:linux-tpmdd,代码行数:67,
示例23: invalidate_inode_pages2_range/** * invalidate_inode_pages2_range - remove range of pages from an address_space * @mapping: the address_space * @start: the page offset 'from' which to invalidate * @end: the page offset 'to' which to invalidate (inclusive) * * Any pages which are found to be mapped into pagetables are unmapped prior to * invalidation. * * Returns -EBUSY if any pages could not be invalidated. */int invalidate_inode_pages2_range(struct address_space *mapping, pgoff_t start, pgoff_t end){ pgoff_t indices[PAGEVEC_SIZE]; struct pagevec pvec; pgoff_t index; int i; int ret = 0; int ret2 = 0; int did_range_unmap = 0; cleancache_invalidate_inode(mapping); pagevec_init(&pvec, 0); index = start; while (index <= end && pagevec_lookup_entries(&pvec, mapping, index, min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1, indices)) { for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; /* We rely upon deletion not changing page->index */ index = indices[i]; if (index > end) break; if (radix_tree_exceptional_entry(page)) { clear_exceptional_entry(mapping, index, page); continue; } lock_page(page); WARN_ON(page->index != index); if (page->mapping != mapping) { unlock_page(page); continue; } wait_on_page_writeback(page); if (page_mapped(page)) { if (!did_range_unmap) { /* * Zap the rest of the file in one hit. */ unmap_mapping_range(mapping, (loff_t)index << PAGE_CACHE_SHIFT, (loff_t)(1 + end - index) << PAGE_CACHE_SHIFT, 0); did_range_unmap = 1; } else { /* * Just zap this page */ unmap_mapping_range(mapping, (loff_t)index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE, 0); } } BUG_ON(page_mapped(page)); ret2 = do_launder_page(mapping, page); if (ret2 == 0) { if (!invalidate_complete_page2(mapping, page)) ret2 = -EBUSY; } if (ret2 < 0) ret = ret2; unlock_page(page); } pagevec_remove_exceptionals(&pvec); pagevec_release(&pvec); cond_resched(); index++; } cleancache_invalidate_inode(mapping); return ret;}
开发者ID:stefanberger,项目名称:linux-tpmdd,代码行数:86,
示例24: gfs2_page_mkwritestatic int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf){ struct page *page = vmf->page; struct inode *inode = vma->vm_file->f_path.dentry->d_inode; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); unsigned long last_index; u64 pos = page->index << PAGE_CACHE_SHIFT; unsigned int data_blocks, ind_blocks, rblocks; struct gfs2_holder gh; struct gfs2_qadata *qa; loff_t size; int ret; vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); ret = gfs2_glock_nq(&gh); if (ret) goto out; set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); set_bit(GIF_SW_PAGED, &ip->i_flags); if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE)) { lock_page(page); if (!PageUptodate(page) || page->mapping != inode->i_mapping) { ret = -EAGAIN; unlock_page(page); } goto out_unlock; } ret = -ENOMEM; qa = gfs2_qadata_get(ip); if (qa == NULL) goto out_unlock; ret = gfs2_quota_lock_check(ip); if (ret) goto out_alloc_put; gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks); ret = gfs2_inplace_reserve(ip, data_blocks + ind_blocks); if (ret) goto out_quota_unlock; rblocks = RES_DINODE + ind_blocks; if (gfs2_is_jdata(ip)) rblocks += data_blocks ? data_blocks : 1; if (ind_blocks || data_blocks) { rblocks += RES_STATFS + RES_QUOTA; rblocks += gfs2_rg_blocks(ip); } ret = gfs2_trans_begin(sdp, rblocks, 0); if (ret) goto out_trans_fail; lock_page(page); ret = -EINVAL; size = i_size_read(inode); last_index = (size - 1) >> PAGE_CACHE_SHIFT; if (size == 0 || (page->index > last_index)) goto out_trans_end; ret = -EAGAIN; if (!PageUptodate(page) || page->mapping != inode->i_mapping) goto out_trans_end; ret = 0; if (gfs2_is_stuffed(ip)) ret = gfs2_unstuff_dinode(ip, page); if (ret == 0) ret = gfs2_allocate_page_backing(page);out_trans_end: if (ret) unlock_page(page); gfs2_trans_end(sdp);out_trans_fail: gfs2_inplace_release(ip);out_quota_unlock: gfs2_quota_unlock(ip);out_alloc_put: gfs2_qadata_put(ip);out_unlock: gfs2_glock_dq(&gh);out: gfs2_holder_uninit(&gh); if (ret == 0) { set_page_dirty(page); if (inode->i_sb->s_frozen == SB_UNFROZEN) { wait_on_page_writeback(page); } else { ret = -EAGAIN; unlock_page(page); } }//.........这里部分代码省略.........
开发者ID:MiniBlu,项目名称:cm11_kernel_htc_msm8974a3ul,代码行数:101,
示例25: try_to_unuse/* * We completely avoid races by reading each swap page in advance, * and then search for the process using it. All the necessary * page table adjustments can then be made atomically. */static int try_to_unuse(unsigned int type){ struct swap_info_struct * si = &swap_info[type]; struct mm_struct *start_mm; unsigned short *swap_map; unsigned short swcount; struct page *page; swp_entry_t entry; unsigned int i = 0; int retval = 0; int reset_overflow = 0; int shmem; /* * When searching mms for an entry, a good strategy is to * start at the first mm we freed the previous entry from * (though actually we don't notice whether we or coincidence * freed the entry). Initialize this start_mm with a hold. * * A simpler strategy would be to start at the last mm we * freed the previous entry from; but that would take less * advantage of mmlist ordering, which clusters forked mms * together, child after parent. If we race with dup_mmap(), we * prefer to resolve parent before child, lest we miss entries * duplicated after we scanned child: using last mm would invert * that. Though it's only a serious concern when an overflowed * swap count is reset from SWAP_MAP_MAX, preventing a rescan. */ start_mm = &init_mm; atomic_inc(&init_mm.mm_users); /* * Keep on scanning until all entries have gone. Usually, * one pass through swap_map is enough, but not necessarily: * there are races when an instance of an entry might be missed. */ while ((i = find_next_to_unuse(si, i)) != 0) { if (signal_pending(current)) { retval = -EINTR; break; } /* * Get a page for the entry, using the existing swap * cache page if there is one. Otherwise, get a clean * page and read the swap into it. */ swap_map = &si->swap_map[i]; entry = swp_entry(type, i); page = read_swap_cache_async(entry, NULL, 0); if (!page) { /* * Either swap_duplicate() failed because entry * has been freed independently, and will not be * reused since sys_swapoff() already disabled * allocation from here, or alloc_page() failed. */ if (!*swap_map) continue; retval = -ENOMEM; break; } /* * Don't hold on to start_mm if it looks like exiting. */ if (atomic_read(&start_mm->mm_users) == 1) { mmput(start_mm); start_mm = &init_mm; atomic_inc(&init_mm.mm_users); } /* * Wait for and lock page. When do_swap_page races with * try_to_unuse, do_swap_page can handle the fault much * faster than try_to_unuse can locate the entry. This * apparently redundant "wait_on_page_locked" lets try_to_unuse * defer to do_swap_page in such a case - in some tests, * do_swap_page and try_to_unuse repeatedly compete. */ wait_on_page_locked(page); wait_on_page_writeback(page); lock_page(page); wait_on_page_writeback(page); /* * Remove all references to entry. * Whenever we reach init_mm, there's no address space * to search, but use it as a reminder to search shmem. */ shmem = 0; swcount = *swap_map; if (swcount > 1) { if (start_mm == &init_mm) shmem = shmem_unuse(entry, page);//.........这里部分代码省略.........
开发者ID:acassis,项目名称:emlinux-ssd1935,代码行数:101,
示例26: truncate_inode_pages/** * truncate_inode_pages - truncate *all* the pages from an offset * @mapping: mapping to truncate * @lstart: offset from which to truncate * * Truncate the page cache at a set offset, removing the pages that are beyond * that offset (and zeroing out partial pages). * * Truncate takes two passes - the first pass is nonblocking. It will not * block on page locks and it will not block on writeback. The second pass * will wait. This is to prevent as much IO as possible in the affected region. * The first pass will remove most pages, so the search cost of the second pass * is low. * * When looking at page->index outside the page lock we need to be careful to * copy it into a local to avoid races (it could change at any time). * * We pass down the cache-hot hint to the page freeing code. Even if the * mapping is large, it is probably the case that the final pages are the most * recently touched, and freeing happens in ascending file offset order. * * Called under (and serialised by) inode->i_sem. */void truncate_inode_pages(struct address_space *mapping, loff_t lstart){ const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1); struct pagevec pvec; pgoff_t next; int i; if (mapping->nrpages == 0) return; pagevec_init(&pvec, 0); next = start; while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; pgoff_t page_index = page->index; if (page_index > next) next = page_index; next++; if (TestSetPageLocked(page)) continue; if (PageWriteback(page)) { unlock_page(page); continue; } truncate_complete_page(mapping, page); unlock_page(page); } pagevec_release(&pvec); cond_resched(); } if (partial) { struct page *page = find_lock_page(mapping, start - 1); if (page) { wait_on_page_writeback(page); truncate_partial_page(page, partial); unlock_page(page); page_cache_release(page); } } next = start; for ( ; ; ) { cond_resched(); if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { if (next == start) break; next = start; continue; } for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; lock_page(page); wait_on_page_writeback(page); if (page->index > next) next = page->index; next++; truncate_complete_page(mapping, page); unlock_page(page); } pagevec_release(&pvec); }}
开发者ID:QiuLihua83,项目名称:linux-2.6.10,代码行数:90,
注:本文中的wait_on_page_writeback函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 C++ wait_on_retry_sync_kiocb函数代码示例 C++ wait_on_buffer函数代码示例 |