您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ up_read函数代码示例

51自学网 2021-06-03 09:13:02
  C++
这篇教程C++ up_read函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中up_read函数的典型用法代码示例。如果您正苦于以下问题:C++ up_read函数的具体用法?C++ up_read怎么用?C++ up_read使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了up_read函数的28个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: ext4_da_write_inline_data_begin

/* * Prepare the write for the inline data. * If the the data can be written into the inode, we just read * the page and make it uptodate, and start the journal. * Otherwise read the page, makes it dirty so that it can be * handle in writepages(the i_disksize update is left to the * normal ext4_da_write_end). */int ext4_da_write_inline_data_begin(struct address_space *mapping,				    struct inode *inode,				    loff_t pos, unsigned len,				    unsigned flags,				    struct page **pagep,				    void **fsdata){	int ret, inline_size;	handle_t *handle;	struct page *page;	struct ext4_iloc iloc;	int retries;	ret = ext4_get_inode_loc(inode, &iloc);	if (ret)		return ret;retry_journal:	handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);	if (IS_ERR(handle)) {		ret = PTR_ERR(handle);		goto out;	}	inline_size = ext4_get_max_inline_size(inode);	ret = -ENOSPC;	if (inline_size >= pos + len) {		ret = ext4_prepare_inline_data(handle, inode, pos + len);		if (ret && ret != -ENOSPC)			goto out_journal;	}	/*	 * We cannot recurse into the filesystem as the transaction	 * is already started.	 */	flags |= AOP_FLAG_NOFS;	if (ret == -ENOSPC) {		ret = ext4_da_convert_inline_data_to_extent(mapping,							    inode,							    flags,							    fsdata);		ext4_journal_stop(handle);		if (ret == -ENOSPC &&		    ext4_should_retry_alloc(inode->i_sb, &retries))			goto retry_journal;		goto out;	}	page = grab_cache_page_write_begin(mapping, 0, flags);	if (!page) {		ret = -ENOMEM;		goto out_journal;	}	down_read(&EXT4_I(inode)->xattr_sem);	if (!ext4_has_inline_data(inode)) {		ret = 0;		goto out_release_page;	}	if (!PageUptodate(page)) {		ret = ext4_read_inline_page(inode, page);		if (ret < 0)			goto out_release_page;	}	up_read(&EXT4_I(inode)->xattr_sem);	*pagep = page;	brelse(iloc.bh);	return 1;out_release_page:	up_read(&EXT4_I(inode)->xattr_sem);	unlock_page(page);	page_cache_release(page);out_journal:	ext4_journal_stop(handle);out:	brelse(iloc.bh);	return ret;}
开发者ID:hejin,项目名称:kernel-3.10.0-327.13.1.el7.x86_64-fs,代码行数:92,


示例2: cfs_get_environ

/* Read the environment variable of current process specified by @key. */int cfs_get_environ(const char *key, char *value, int *val_len){	struct mm_struct *mm;	char *buffer;	int buf_len = PAGE_CACHE_SIZE;	int key_len = strlen(key);	unsigned long addr;	int rc;	ENTRY;	buffer = kmalloc(buf_len, GFP_USER);	if (!buffer)		RETURN(-ENOMEM);	mm = get_task_mm(current);	if (!mm) {		kfree(buffer);		RETURN(-EINVAL);	}	/* Avoid deadlocks on mmap_sem if called from sys_mmap_pgoff(),	 * which is already holding mmap_sem for writes.  If some other	 * thread gets the write lock in the meantime, this thread will	 * block, but at least it won't deadlock on itself.  LU-1735 */	if (down_read_trylock(&mm->mmap_sem) == 0) {		kfree(buffer);		return -EDEADLK;	}	up_read(&mm->mmap_sem);	addr = mm->env_start;	while (addr < mm->env_end) {		int this_len, retval, scan_len;		char *env_start, *env_end;		memset(buffer, 0, buf_len);		this_len = min_t(int, mm->env_end - addr, buf_len);		retval = cfs_access_process_vm(current, addr, buffer,					       this_len, 0);		if (retval != this_len)			break;		addr += retval;		/* Parse the buffer to find out the specified key/value pair.		 * The "key=value" entries are separated by '/0'. */		env_start = buffer;		scan_len = this_len;		while (scan_len) {			char *entry;			int entry_len;			env_end = memscan(env_start, '/0', scan_len);			LASSERT(env_end >= env_start &&				env_end <= env_start + scan_len);			/* The last entry of this buffer cross the buffer			 * boundary, reread it in next cycle. */			if (unlikely(env_end - env_start == scan_len)) {				/* This entry is too large to fit in buffer */				if (unlikely(scan_len == this_len)) {					CERROR("Too long env variable./n");					GOTO(out, rc = -EINVAL);				}				addr -= scan_len;				break;			}			entry = env_start;			entry_len = env_end - env_start;			/* Key length + length of '=' */			if (entry_len > key_len + 1 &&			    !memcmp(entry, key, key_len)) {				entry += key_len + 1;				entry_len -= key_len + 1;				/* The 'value' buffer passed in is too small.*/				if (entry_len >= *val_len)					GOTO(out, rc = -EOVERFLOW);				memcpy(value, entry, entry_len);				*val_len = entry_len;				GOTO(out, rc = 0);			}			scan_len -= (env_end - env_start + 1);			env_start = env_end + 1;		}	}	GOTO(out, rc = -ENOENT);out:	mmput(mm);	kfree((void *)buffer);	return rc;}
开发者ID:Lezval,项目名称:lustre,代码行数:98,


示例3: xfs_ialloc_ag_select

/* * Select an allocation group to look for a free inode in, based on the parent * inode and then mode.  Return the allocation group buffer. */STATIC xfs_buf_t *			/* allocation group buffer */xfs_ialloc_ag_select(	xfs_trans_t	*tp,		/* transaction pointer */	xfs_ino_t	parent,		/* parent directory inode number */	mode_t		mode,		/* bits set to indicate file type */	int		okalloc)	/* ok to allocate more space */{	xfs_buf_t	*agbp;		/* allocation group header buffer */	xfs_agnumber_t	agcount;	/* number of ag's in the filesystem */	xfs_agnumber_t	agno;		/* current ag number */	int		flags;		/* alloc buffer locking flags */	xfs_extlen_t	ineed;		/* blocks needed for inode allocation */	xfs_extlen_t	longest = 0;	/* longest extent available */	xfs_mount_t	*mp;		/* mount point structure */	int		needspace;	/* file mode implies space allocated */	xfs_perag_t	*pag;		/* per allocation group data */	xfs_agnumber_t	pagno;		/* parent (starting) ag number */	/*	 * Files of these types need at least one block if length > 0	 * (and they won't fit in the inode, but that's hard to figure out).	 */	needspace = S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode);	mp = tp->t_mountp;	agcount = mp->m_maxagi;	if (S_ISDIR(mode))		pagno = xfs_ialloc_next_ag(mp);	else {		pagno = XFS_INO_TO_AGNO(mp, parent);		if (pagno >= agcount)			pagno = 0;	}	ASSERT(pagno < agcount);	/*	 * Loop through allocation groups, looking for one with a little	 * free space in it.  Note we don't look for free inodes, exactly.	 * Instead, we include whether there is a need to allocate inodes	 * to mean that blocks must be allocated for them,	 * if none are currently free.	 */	agno = pagno;	flags = XFS_ALLOC_FLAG_TRYLOCK;	down_read(&mp->m_peraglock);	for (;;) {		pag = &mp->m_perag[agno];		if (!pag->pagi_init) {			if (xfs_ialloc_read_agi(mp, tp, agno, &agbp)) {				agbp = NULL;				goto nextag;			}		} else			agbp = NULL;		if (!pag->pagi_inodeok) {			xfs_ialloc_next_ag(mp);			goto unlock_nextag;		}		/*		 * Is there enough free space for the file plus a block		 * of inodes (if we need to allocate some)?		 */		ineed = pag->pagi_freecount ? 0 : XFS_IALLOC_BLOCKS(mp);		if (ineed && !pag->pagf_init) {			if (agbp == NULL &&			    xfs_ialloc_read_agi(mp, tp, agno, &agbp)) {				agbp = NULL;				goto nextag;			}			(void)xfs_alloc_pagf_init(mp, tp, agno, flags);		}		if (!ineed || pag->pagf_init) {			if (ineed && !(longest = pag->pagf_longest))				longest = pag->pagf_flcount > 0;			if (!ineed ||			    (pag->pagf_freeblks >= needspace + ineed &&			     longest >= ineed &&			     okalloc)) {				if (agbp == NULL &&				    xfs_ialloc_read_agi(mp, tp, agno, &agbp)) {					agbp = NULL;					goto nextag;				}				up_read(&mp->m_peraglock);				return agbp;			}		}unlock_nextag:		if (agbp)			xfs_trans_brelse(tp, agbp);nextag:		/*		 * No point in iterating over the rest, if we're shutting		 * down.		 */		if (XFS_FORCED_SHUTDOWN(mp)) {//.........这里部分代码省略.........
开发者ID:xiandaicxsj,项目名称:copyKvm,代码行数:101,


示例4: xwcMemTranslateUserVirtual

RMstatus xwcMemTranslateUserVirtual(uint32_t    entity,                                    void       *desc,                                    void       *auxmap){    unsigned       i;    linkEntry     *hwsgl;    size_t         count;    int            pageEst;    T2DPD         *pd = desc;    T2DESC_AUXMAP *pdmap = auxmap;    /* Estimate the number of needed page pointers */    pageEst = (((unsigned long)pd->pair[entity].ptr & ~PAGE_MASK) +               pd->pair[entity].size + ~PAGE_MASK) >> PAGE_SHIFT;    /* Allocate list of pointers to pages for this user buffer reference */    pdmap->pair[entity].pages = vmalloc(pageEst * sizeof(pdmap->pair[entity].pages));    if (pdmap->pair[entity].pages == NULL)        return RM_NO_MEMORY;    /* Lock this process' pages and map them. The descriptor pair pointer */    /* still references the user's buffer at this point                   */    down_read(&current->mm->mmap_sem);    pdmap->pair[entity].pageCt =        get_user_pages(current,                       current->mm,                       (unsigned long)pd->pair[entity].ptr,                       pageEst,                       WRITE, 1,                       pdmap->pair[entity].pages,                       NULL);    up_read(&current->mm->mmap_sem);    /* here for development, remove once stabilized */    if (pageEst != pdmap->pair[entity].pageCt)        printk("t23xwc: user page estimate = %d, actual = %d/n", pageEst, pdmap->pair[entity].pageCt);    if (pdmap->pair[entity].pageCt > pageEst)        panic("t23xwc - user pages mapped exceeds estimate/n");    /* Needed user pages are now mapped. If data element fits in 1 page, then */    /* we can just do a physical pointer, no scatterlist is needed. If it     */    /* exceeds one page, we must have a scatterlist                           */    if (pdmap->pair[entity].pageCt > 1) /* Does entry span pages? */    {        /* Allocate "hardware" scatterlist */        hwsgl = kmalloc(pageEst * sizeof(linkEntry), GFP_KERNEL | GFP_DMA);        if (hwsgl == NULL)        {            /* Out of kmalloc() space, gotta bail. Release mapped pages */            for (i = 0; i < pdmap->pair[entity].pageCt; i++)                page_cache_release(pdmap->pair[entity].pages[i]);            /* Free allocated page list */            vfree(pdmap->pair[entity].pages);            return RM_NO_MEMORY;        }        count = pd->pair[entity].size;        hwsgl[0].segAddr =            (unsigned char *)virt_to_phys(lowmem_page_address(pdmap->pair[entity].pages[0]) +                                          ((unsigned long)pd->pair[entity].ptr & ~PAGE_MASK));        hwsgl[0].chainCtrl = 0;        hwsgl[0].extAddr   = 0;        if (pdmap->pair[entity].pageCt > 1)        {            hwsgl[0].segLen = PAGE_SIZE - ((unsigned long)pd->pair[entity].ptr & ~PAGE_MASK);            count -= hwsgl[0].segLen;            for (i = 1; i < pdmap->pair[entity].pageCt; i++)            {                hwsgl[i].segLen    = count < PAGE_SIZE ? count : PAGE_SIZE;                hwsgl[i].segAddr   = (unsigned char *)                                     virt_to_phys(lowmem_page_address(pdmap->pair[entity].pages[i]));                hwsgl[i].extAddr   = 0;                hwsgl[i].chainCtrl = 0;                count -= PAGE_SIZE;            }        }        else            hwsgl[0].segLen = pd->pair[entity].size;        /* mark the last entry in the Talitos scatterlist */        hwsgl[pdmap->pair[entity].pageCt - 1].chainCtrl = LAST_ENTRY;        /* Point to descriptor pair to the Talitos scatterlist */        pd->pair[entity].ptr     = (unsigned char *)virt_to_phys(hwsgl);        pd->pair[entity].extent |= JUMPTABLE;    }    else        pd->pair[entity].ptr =            (unsigned char *)virt_to_phys(lowmem_page_address(pdmap->pair[entity].pages[0]) +                                          ((unsigned long)pd->pair[entity].ptr & ~PAGE_MASK));    return RM_OK;}
开发者ID:DavionKnight,项目名称:H18CE-1604C,代码行数:100,


示例5: c_stop

static void c_stop(struct seq_file *m, void *p){	up_read(&crypto_alg_sem);}
开发者ID:Voskrese,项目名称:mipsonqemu,代码行数:4,


示例6: nilfs_fiemap

int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,		 __u64 start, __u64 len){	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;	__u64 logical = 0, phys = 0, size = 0;	__u32 flags = 0;	loff_t isize;	sector_t blkoff, end_blkoff;	sector_t delalloc_blkoff;	unsigned long delalloc_blklen;	unsigned int blkbits = inode->i_blkbits;	int ret, n;	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);	if (ret)		return ret;	mutex_lock(&inode->i_mutex);	isize = i_size_read(inode);	blkoff = start >> blkbits;	end_blkoff = (start + len - 1) >> blkbits;	delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,							&delalloc_blkoff);	do {		__u64 blkphy;		unsigned int maxblocks;		if (delalloc_blklen && blkoff == delalloc_blkoff) {			if (size) {								ret = fiemap_fill_next_extent(					fieinfo, logical, phys, size, flags);				if (ret)					break;			}			if (blkoff > end_blkoff)				break;			flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;			logical = blkoff << blkbits;			phys = 0;			size = delalloc_blklen << blkbits;			blkoff = delalloc_blkoff + delalloc_blklen;			delalloc_blklen = nilfs_find_uncommitted_extent(				inode, blkoff, &delalloc_blkoff);			continue;		}		maxblocks = INT_MAX;		if (delalloc_blklen)			maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,					  maxblocks);		blkphy = 0;		down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);		n = nilfs_bmap_lookup_contig(			NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);		up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);		if (n < 0) {			int past_eof;			if (unlikely(n != -ENOENT))				break; 						blkoff++;			past_eof = ((blkoff << blkbits) >= isize);			if (size) {								if (past_eof)					flags |= FIEMAP_EXTENT_LAST;				ret = fiemap_fill_next_extent(					fieinfo, logical, phys, size, flags);				if (ret)					break;				size = 0;			}			if (blkoff > end_blkoff || past_eof)				break;		} else {			if (size) {				if (phys && blkphy << blkbits == phys + size) {										size += n << blkbits;				} else {										ret = fiemap_fill_next_extent(						fieinfo, logical, phys, size,						flags);					if (ret || blkoff > end_blkoff)						break;//.........这里部分代码省略.........
开发者ID:MiniBlu,项目名称:cm11_kernel_htc_msm8974a3ul,代码行数:101,


示例7: do_exception

//.........这里部分代码省略.........	tsk = current;	mm = tsk->mm;	/*	 * Verify that the fault happened in user space, that	 * we are not in an interrupt and that there is a 	 * user context.	 */	fault = VM_FAULT_BADCONTEXT;	if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))		goto out;	address = trans_exc_code & __FAIL_ADDR_MASK;	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);	flags = FAULT_FLAG_ALLOW_RETRY;	if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)		flags |= FAULT_FLAG_WRITE;	down_read(&mm->mmap_sem);#ifdef CONFIG_PGSTE	if (test_tsk_thread_flag(current, TIF_SIE) && S390_lowcore.gmap) {		address = __gmap_fault(address,				     (struct gmap *) S390_lowcore.gmap);		if (address == -EFAULT) {			fault = VM_FAULT_BADMAP;			goto out_up;		}		if (address == -ENOMEM) {			fault = VM_FAULT_OOM;			goto out_up;		}	}#endifretry:	fault = VM_FAULT_BADMAP;	vma = find_vma(mm, address);	if (!vma)		goto out_up;	if (unlikely(vma->vm_start > address)) {		if (!(vma->vm_flags & VM_GROWSDOWN))			goto out_up;		if (expand_stack(vma, address))			goto out_up;	}	/*	 * Ok, we have a good vm_area for this memory access, so	 * we can handle it..	 */	fault = VM_FAULT_BADACCESS;	if (unlikely(!(vma->vm_flags & access)))		goto out_up;	if (is_vm_hugetlb_page(vma))		address &= HPAGE_MASK;	/*	 * If for any reason at all we couldn't handle the fault,	 * make sure we exit gracefully rather than endlessly redo	 * the fault.	 */	fault = handle_mm_fault(mm, vma, address, flags);	if (unlikely(fault & VM_FAULT_ERROR))		goto out_up;	/*	 * Major/minor page fault accounting is only done on the	 * initial attempt. If we go through a retry, it is extremely	 * likely that the page will be found in page cache at that point.	 */	if (flags & FAULT_FLAG_ALLOW_RETRY) {		if (fault & VM_FAULT_MAJOR) {			tsk->maj_flt++;			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,				      regs, address);		} else {			tsk->min_flt++;			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,				      regs, address);		}		if (fault & VM_FAULT_RETRY) {			/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk			 * of starvation. */			flags &= ~FAULT_FLAG_ALLOW_RETRY;			down_read(&mm->mmap_sem);			goto retry;		}	}	/*	 * The instruction that caused the program check will	 * be repeated. Don't signal single step via SIGTRAP.	 */	clear_tsk_thread_flag(tsk, TIF_PER_TRAP);	fault = 0;out_up:	up_read(&mm->mmap_sem);out:	return fault;}
开发者ID:08opt,项目名称:linux,代码行数:101,


示例8: lov_conf_thaw

static inline void lov_conf_thaw(struct lov_object *lov){	if (lov->lo_owner != current)		up_read(&lov->lo_type_guard);}
开发者ID:IDM350,项目名称:linux,代码行数:5,


示例9: do_page_fault

int do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs){	struct task_struct *tsk;	struct mm_struct *mm;	int fault;	tsk = current;	mm  = tsk->mm;	/*	 * If we're in an interrupt or have no user	 * context, we must not take the fault..	 */	if (in_interrupt() || !mm)		goto no_context;	down_read(&mm->mmap_sem);	fault = __do_page_fault(mm, addr, fsr, tsk);	up_read(&mm->mmap_sem);	/*	 * Handle the "normal" case first	 */	if (fault > 0)		return 0;	/*	 * We had some memory, but were unable to	 * successfully fix up this page fault.	 */	if (fault == 0)		goto do_sigbus;	/*	 * If we are in kernel mode at this point, we	 * have no context to handle this fault with.	 */	if (!user_mode(regs))		goto no_context;	if (fault == -3) {		/*		 * We ran out of memory, or some other thing happened to		 * us that made us unable to handle the page fault gracefully.		 */		printk("VM: killing process %s/n", tsk->comm);		do_exit(SIGKILL);	} else		__do_user_fault(tsk, addr, fsr, fault == -1 ?				SEGV_ACCERR : SEGV_MAPERR, regs);	return 0;/* * We ran out of memory, or some other thing happened to us that made * us unable to handle the page fault gracefully. */do_sigbus:	/*	 * Send a sigbus, regardless of whether we were in kernel	 * or user mode.	 */	tsk->thread.address = addr;	tsk->thread.error_code = fsr;	tsk->thread.trap_no = 14;	force_sig(SIGBUS, tsk);#ifdef CONFIG_DEBUG_USER	printk("%s: sigbus at 0x%08lx, pc=0x%08lx/n",		current->comm, addr, instruction_pointer(regs));#endif	/* Kernel mode? Handle exceptions or die */	if (user_mode(regs))		return 0;no_context:	__do_kernel_fault(mm, addr, fsr, regs);	return 0;}
开发者ID:SimonKagstrom,项目名称:mci500h-linux-2.4.27,代码行数:79,


示例10: do_page_fault

void do_page_fault(struct pt_regs *regs){	struct vm_area_struct * vma;	struct mm_struct *mm = current->mm;	unsigned int exccause = regs->exccause;	unsigned int address = regs->excvaddr;	siginfo_t info;	int is_write, is_exec;	int fault;	info.si_code = SEGV_MAPERR;	/* We fault-in kernel-space virtual memory on-demand. The	 * 'reference' page table is init_mm.pgd.	 */	if (address >= TASK_SIZE && !user_mode(regs))		goto vmalloc_fault;	/* If we're in an interrupt or have no user	 * context, we must not take the fault..	 */	if (in_atomic() || !mm) {		bad_page_fault(regs, address, SIGSEGV);		return;	}	is_write = (exccause == EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0;	is_exec =  (exccause == EXCCAUSE_ITLB_PRIVILEGE ||		    exccause == EXCCAUSE_ITLB_MISS ||		    exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;#ifdef DEBUG_PAGE_FAULT	printk("[%s:%d:%08x:%d:%08x:%s%s]/n", current->comm, current->pid,	       address, exccause, regs->pc, is_write? "w":"", is_exec? "x":"");#endif	down_read(&mm->mmap_sem);	vma = find_vma(mm, address);	if (!vma)		goto bad_area;	if (vma->vm_start <= address)		goto good_area;	if (!(vma->vm_flags & VM_GROWSDOWN))		goto bad_area;	if (expand_stack(vma, address))		goto bad_area;	/* Ok, we have a good vm_area for this memory access, so	 * we can handle it..	 */good_area:	info.si_code = SEGV_ACCERR;	if (is_write) {		if (!(vma->vm_flags & VM_WRITE))			goto bad_area;	} else if (is_exec) {		if (!(vma->vm_flags & VM_EXEC))			goto bad_area;	} else	/* Allow read even from write-only pages. */		if (!(vma->vm_flags & (VM_READ | VM_WRITE)))			goto bad_area;	/* If for any reason at all we couldn't handle the fault,	 * make sure we exit gracefully rather than endlessly redo	 * the fault.	 */survive:	fault = handle_mm_fault(mm, vma, address, is_write);	if (unlikely(fault & VM_FAULT_ERROR)) {		if (fault & VM_FAULT_OOM)			goto out_of_memory;		else if (fault & VM_FAULT_SIGBUS)			goto do_sigbus;		BUG();	}	if (fault & VM_FAULT_MAJOR)		current->maj_flt++;	else		current->min_flt++;	up_read(&mm->mmap_sem);	return;	/* Something tried to access memory that isn't in our memory map..	 * Fix it, but check if it's kernel or user first..	 */bad_area:	up_read(&mm->mmap_sem);	if (user_mode(regs)) {		current->thread.bad_vaddr = address;		current->thread.error_code = is_write;		info.si_signo = SIGSEGV;		info.si_errno = 0;		/* info.si_code has been set above */		info.si_addr = (void *) address;		force_sig_info(SIGSEGV, &info, current);//.........这里部分代码省略.........
开发者ID:E-LLP,项目名称:n900,代码行数:101,


示例11: do_page_fault

/* * This routine handles page faults.  It determines the problem, and * then passes it off to one of the appropriate routines. * * error_code: *	bit 0 == 0 means no page found, 1 means protection fault *	bit 1 == 0 means read, 1 means write * * If this routine detects a bad access, it returns 1, otherwise it * returns 0. */int do_page_fault(struct pt_regs *regs, unsigned long address,			      unsigned long error_code){	struct mm_struct *mm = current->mm;	struct vm_area_struct * vma;	int write, fault;#ifdef DEBUG	printk ("do page fault:/nregs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld, %p/n",		regs->sr, regs->pc, address, error_code,		current->mm->pgd);#endif	/*	 * If we're in an interrupt or have no user	 * context, we must not take the fault..	 */	if (!mm || pagefault_disabled())		goto no_context;	down_read(&mm->mmap_sem);	vma = find_vma(mm, address);	if (!vma)		goto map_err;	if (vma->vm_flags & VM_IO)		goto acc_err;	if (vma->vm_start <= address)		goto good_area;	if (!(vma->vm_flags & VM_GROWSDOWN))		goto map_err;	if (user_mode(regs)) {		/* Accessing the stack below usp is always a bug.  The		   "+ 256" is there due to some instructions doing		   pre-decrement on the stack and that doesn't show up		   until later.  */		if (address + 256 < rdusp())			goto map_err;	}	if (expand_stack(vma, address))		goto map_err;/* * Ok, we have a good vm_area for this memory access, so * we can handle it.. */good_area:#ifdef DEBUG	printk("do_page_fault: good_area/n");#endif	write = 0;	switch (error_code & 3) {		default:	/* 3: write, present */			/* fall through */		case 2:		/* write, not present */			if (!(vma->vm_flags & VM_WRITE))				goto acc_err;			write++;			break;		case 1:		/* read, present */			goto acc_err;		case 0:		/* read, not present */			if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))				goto acc_err;	}	/*	 * If for any reason at all we couldn't handle the fault,	 * make sure we exit gracefully rather than endlessly redo	 * the fault.	 */	fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);#ifdef DEBUG	printk("handle_mm_fault returns %d/n",fault);#endif	if (unlikely(fault & VM_FAULT_ERROR)) {		if (fault & VM_FAULT_OOM)			goto out_of_memory;		else if (fault & VM_FAULT_SIGBUS)			goto bus_err;		BUG();	}	if (fault & VM_FAULT_MAJOR)		current->maj_flt++;	else		current->min_flt++;	up_read(&mm->mmap_sem);//.........这里部分代码省略.........
开发者ID:dh-electronics,项目名称:linux-am35x,代码行数:101,


示例12: do_page_fault

static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,				   struct pt_regs *regs){	struct task_struct *tsk;	struct mm_struct *mm;	int fault, sig, code;	unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;	unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;	tsk = current;	mm  = tsk->mm;		if (interrupts_enabled(regs))		local_irq_enable();	if (in_atomic() || !mm)		goto no_context;	if (user_mode(regs))		mm_flags |= FAULT_FLAG_USER;	if (esr & ESR_LNX_EXEC) {		vm_flags = VM_EXEC;	} else if (esr & ESR_EL1_WRITE) {		vm_flags = VM_WRITE;		mm_flags |= FAULT_FLAG_WRITE;	}	if (!down_read_trylock(&mm->mmap_sem)) {		if (!user_mode(regs) && !search_exception_tables(regs->pc))			goto no_context;retry:		down_read(&mm->mmap_sem);	} else {		might_sleep();#ifdef CONFIG_DEBUG_VM		if (!user_mode(regs) && !search_exception_tables(regs->pc))			goto no_context;#endif	}	fault = __do_page_fault(mm, addr, mm_flags, vm_flags, tsk);	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))		return 0;	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);	if (mm_flags & FAULT_FLAG_ALLOW_RETRY) {		if (fault & VM_FAULT_MAJOR) {			tsk->maj_flt++;			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,				      addr);		} else {			tsk->min_flt++;			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs,				      addr);		}		if (fault & VM_FAULT_RETRY) {			mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;			mm_flags |= FAULT_FLAG_TRIED;			goto retry;		}	}	up_read(&mm->mmap_sem);	if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP |			      VM_FAULT_BADACCESS))))		return 0;	if (!user_mode(regs))		goto no_context;	if (fault & VM_FAULT_OOM) {		pagefault_out_of_memory();		return 0;	}	if (fault & VM_FAULT_SIGBUS) {		sig = SIGBUS;		code = BUS_ADRERR;	} else {		sig = SIGSEGV;		code = fault == VM_FAULT_BADACCESS ?			SEGV_ACCERR : SEGV_MAPERR;	}	__do_user_fault(tsk, addr, esr, sig, code, regs);	return 0;no_context:	__do_kernel_fault(mm, addr, esr, regs);	return 0;}
开发者ID:Clumsy-Kernel-Development,项目名称:HTC_10_Kernel,代码行数:96,


示例13: do_page_fault

//.........这里部分代码省略.........		else if (fault & VM_FAULT_SIGSEGV)			goto bad_area;		else if (fault & VM_FAULT_SIGBUS)			goto do_sigbus;		BUG();	}	/*	 * Major/minor page fault accounting is only done on the	 * initial attempt. If we go through a retry, it is extremely	 * likely that the page will be found in page cache at that point.	 */	if (flags & FAULT_FLAG_ALLOW_RETRY) {		if (fault & VM_FAULT_MAJOR)			current->maj_flt++;		else			current->min_flt++;		if (fault & VM_FAULT_RETRY) {			/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk			 * of starvation. */			flags &= ~FAULT_FLAG_ALLOW_RETRY;			flags |= FAULT_FLAG_TRIED;			/*			 * No need to up_read(&mm->mmap_sem) as we would			 * have already released it in __lock_page_or_retry			 * in mm/filemap.c.			 */			goto retry;		}	}	up_read(&mm->mmap_sem);	return;/* * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */bad_area:	up_read(&mm->mmap_sem);bad_area_nosemaphore:	/* User mode accesses just cause a SIGSEGV */	if (user_mode(regs)) {		if (unhandled_signal(current, SIGSEGV) && printk_ratelimit()) {			pr_info("%s: unhandled page fault (%d) at 0x%08lx, "				"cause %ld/n", current->comm, SIGSEGV, address, cause);			show_regs(regs);		}		_exception(SIGSEGV, regs, code, address);		return;	}no_context:	/* Are we prepared to handle this kernel fault? */	if (fixup_exception(regs))		return;	/*	 * Oops. The kernel tried to access some bad page. We'll have to	 * terminate things with extreme prejudice.	 */	bust_spinlocks(1);
开发者ID:AlexShiLucky,项目名称:linux,代码行数:66,


示例14: __i915_gem_userptr_get_pages_worker

static void__i915_gem_userptr_get_pages_worker(struct work_struct *_work){	struct get_pages_work *work = container_of(_work, typeof(*work), work);	struct drm_i915_gem_object *obj = work->obj;	const int npages = obj->base.size >> PAGE_SHIFT;	struct vm_page **pvec;	int pinned, ret;	ret = -ENOMEM;	pinned = 0;	pvec = kvmalloc_array(npages, sizeof(struct vm_page *), GFP_KERNEL);	if (pvec != NULL) {		struct mm_struct *mm = obj->userptr.mm->mm;		unsigned int flags = 0;		if (!i915_gem_object_is_readonly(obj))			flags |= FOLL_WRITE;		ret = -EFAULT;		if (mmget_not_zero(mm)) {			down_read(&mm->mmap_sem);			while (pinned < npages) {				ret = get_user_pages_remote					(work->task, mm,					 obj->userptr.ptr + pinned * PAGE_SIZE,					 npages - pinned,					 flags,					 pvec + pinned, NULL, NULL);				if (ret < 0)					break;				pinned += ret;			}			up_read(&mm->mmap_sem);			mmput(mm);		}	}	mutex_lock(&obj->mm.lock);	if (obj->userptr.work == &work->work) {		struct sg_table *pages = ERR_PTR(ret);		if (pinned == npages) {			pages = __i915_gem_userptr_alloc_pages(obj, pvec,							       npages);			if (!IS_ERR(pages)) {				pinned = 0;				pages = NULL;			}		}		obj->userptr.work = ERR_CAST(pages);		if (IS_ERR(pages))			__i915_gem_userptr_set_active(obj, false);	}	mutex_unlock(&obj->mm.lock);	release_pages(pvec, pinned);	kvfree(pvec);	i915_gem_object_put(obj);	put_task_struct(work->task);	kfree(work);}
开发者ID:bluhm,项目名称:sys,代码行数:66,


示例15: do_page_fault

void do_page_fault(struct pt_regs *regs, int write, unsigned long address,		   unsigned long cause_code){	struct vm_area_struct *vma = NULL;	struct task_struct *tsk = current;	struct mm_struct *mm = tsk->mm;	siginfo_t info;	int fault, ret;	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;	/*	 * We fault-in kernel-space virtual memory on-demand. The	 * 'reference' page table is init_mm.pgd.	 *	 * NOTE! We MUST NOT take any locks for this case. We may	 * be in an interrupt or a critical region, and should	 * only copy the information from the master page table,	 * nothing more.	 */	if (address >= VMALLOC_START && address <= VMALLOC_END) {		ret = handle_vmalloc_fault(address);		if (unlikely(ret))			goto bad_area_nosemaphore;		else			return;	}	info.si_code = SEGV_MAPERR;	/*	 * If we're in an interrupt or have no user	 * context, we must not take the fault..	 */	if (in_atomic() || !mm)		goto no_context;	if (user_mode(regs))		flags |= FAULT_FLAG_USER;retry:	down_read(&mm->mmap_sem);	vma = find_vma(mm, address);	if (!vma)		goto bad_area;	if (vma->vm_start <= address)		goto good_area;	if (!(vma->vm_flags & VM_GROWSDOWN))		goto bad_area;	if (expand_stack(vma, address))		goto bad_area;	/*	 * Ok, we have a good vm_area for this memory access, so	 * we can handle it..	 */good_area:	info.si_code = SEGV_ACCERR;	/* Handle protection violation, execute on heap or stack */	if (cause_code == ((ECR_V_PROTV << 16) | ECR_C_PROTV_INST_FETCH))		goto bad_area;	if (write) {		if (!(vma->vm_flags & VM_WRITE))			goto bad_area;		flags |= FAULT_FLAG_WRITE;	} else {		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))			goto bad_area;	}survive:	/*	 * If for any reason at all we couldn't handle the fault,	 * make sure we exit gracefully rather than endlessly redo	 * the fault.	 */	fault = handle_mm_fault(mm, vma, address, flags);	/* If Pagefault was interrupted by SIGKILL, exit page fault "early" */	if (unlikely(fatal_signal_pending(current))) {		if ((fault & VM_FAULT_ERROR) && !(fault & VM_FAULT_RETRY))			up_read(&mm->mmap_sem);		if (user_mode(regs))			return;	}	if (likely(!(fault & VM_FAULT_ERROR))) {		if (flags & FAULT_FLAG_ALLOW_RETRY) {			/* To avoid updating stats twice for retry case */			if (fault & VM_FAULT_MAJOR)				tsk->maj_flt++;			else				tsk->min_flt++;			if (fault & VM_FAULT_RETRY) {				flags &= ~FAULT_FLAG_ALLOW_RETRY;				flags |= FAULT_FLAG_TRIED;				goto retry;			}//.........这里部分代码省略.........
开发者ID:Mr-AW,项目名称:Kernel_TeLo_LP_LenovoA6000,代码行数:101,


示例16: nilfs_cpfile_do_get_ssinfo

static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,					  void *buf, unsigned cisz, size_t nci){	struct buffer_head *bh;	struct nilfs_cpfile_header *header;	struct nilfs_checkpoint *cp;	struct nilfs_cpinfo *ci = buf;	__u64 curr = *cnop, next;	unsigned long curr_blkoff, next_blkoff;	void *kaddr;	int n = 0, ret;	down_read(&NILFS_MDT(cpfile)->mi_sem);	if (curr == 0) {		ret = nilfs_cpfile_get_header_block(cpfile, &bh);		if (ret < 0)			goto out;		kaddr = kmap_atomic(bh->b_page);		header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);		curr = le64_to_cpu(header->ch_snapshot_list.ssl_next);		kunmap_atomic(kaddr);		brelse(bh);		if (curr == 0) {			ret = 0;			goto out;		}	} else if (unlikely(curr == ~(__u64)0)) {		ret = 0;		goto out;	}	curr_blkoff = nilfs_cpfile_get_blkoff(cpfile, curr);	ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, 0, &bh);	if (unlikely(ret < 0)) {		if (ret == -ENOENT)			ret = 0; 		goto out;	}	kaddr = kmap_atomic(bh->b_page);	while (n < nci) {		cp = nilfs_cpfile_block_get_checkpoint(cpfile, curr, bh, kaddr);		curr = ~(__u64)0; 		if (unlikely(nilfs_checkpoint_invalid(cp) ||			     !nilfs_checkpoint_snapshot(cp)))			break;		nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp, ci);		ci = (void *)ci + cisz;		n++;		next = le64_to_cpu(cp->cp_snapshot_list.ssl_next);		if (next == 0)			break; 		next_blkoff = nilfs_cpfile_get_blkoff(cpfile, next);		if (curr_blkoff != next_blkoff) {			kunmap_atomic(kaddr);			brelse(bh);			ret = nilfs_cpfile_get_checkpoint_block(cpfile, next,								0, &bh);			if (unlikely(ret < 0)) {				WARN_ON(ret == -ENOENT);				goto out;			}			kaddr = kmap_atomic(bh->b_page);		}		curr = next;		curr_blkoff = next_blkoff;	}	kunmap_atomic(kaddr);	brelse(bh);	*cnop = curr;	ret = n; out:	up_read(&NILFS_MDT(cpfile)->mi_sem);	return ret;}
开发者ID:MiniBlu,项目名称:cm11_kernel_htc_msm8974a3ul,代码行数:77,


示例17: f2fs_sync_file

int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync){	struct inode *inode = file->f_mapping->host;	struct f2fs_inode_info *fi = F2FS_I(inode);	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);	nid_t ino = inode->i_ino;	int ret = 0;	bool need_cp = false;	struct writeback_control wbc = {		.sync_mode = WB_SYNC_ALL,		.nr_to_write = LONG_MAX,		.for_reclaim = 0,	};	if (unlikely(f2fs_readonly(inode->i_sb)))		return 0;	trace_f2fs_sync_file_enter(inode);	/* if fdatasync is triggered, let's do in-place-update */	if (get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)		set_inode_flag(fi, FI_NEED_IPU);	ret = filemap_write_and_wait_range(inode->i_mapping, start, end);	clear_inode_flag(fi, FI_NEED_IPU);	if (ret) {		trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);		return ret;	}	/* if the inode is dirty, let's recover all the time */	if (!datasync && is_inode_flag_set(fi, FI_DIRTY_INODE)) {		update_inode_page(inode);		goto go_write;	}	/*	 * if there is no written data, don't waste time to write recovery info.	 */	if (!is_inode_flag_set(fi, FI_APPEND_WRITE) &&			!exist_written_data(sbi, ino, APPEND_INO)) {		/* it may call write_inode just prior to fsync */		if (need_inode_page_update(sbi, ino))			goto go_write;		if (is_inode_flag_set(fi, FI_UPDATE_WRITE) ||				exist_written_data(sbi, ino, UPDATE_INO))			goto flush_out;		goto out;	}go_write:	/* guarantee free sections for fsync */	f2fs_balance_fs(sbi);	/*	 * Both of fdatasync() and fsync() are able to be recovered from	 * sudden-power-off.	 */	down_read(&fi->i_sem);	need_cp = need_do_checkpoint(inode);	up_read(&fi->i_sem);	if (need_cp) {		/* all the dirty node pages should be flushed for POR */		ret = f2fs_sync_fs(inode->i_sb, 1);		/*		 * We've secured consistency through sync_fs. Following pino		 * will be used only for fsynced inodes after checkpoint.		 */		try_to_fix_pino(inode);		clear_inode_flag(fi, FI_APPEND_WRITE);		clear_inode_flag(fi, FI_UPDATE_WRITE);		goto out;	}sync_nodes:	sync_node_pages(sbi, ino, &wbc);	/* if cp_error was enabled, we should avoid infinite loop */	if (unlikely(f2fs_cp_error(sbi)))		goto out;	if (need_inode_block_update(sbi, ino)) {		mark_inode_dirty_sync(inode);		f2fs_write_inode(inode, NULL);		goto sync_nodes;	}	ret = wait_on_node_pages_writeback(sbi, ino);	if (ret)		goto out;	/* once recovery info is written, don't need to tack this */	remove_dirty_inode(sbi, ino, APPEND_INO);	clear_inode_flag(fi, FI_APPEND_WRITE);flush_out:	remove_dirty_inode(sbi, ino, UPDATE_INO);	clear_inode_flag(fi, FI_UPDATE_WRITE);	ret = f2fs_issue_flush(sbi);//.........这里部分代码省略.........
开发者ID:ench0,项目名称:android_kernel_samsung_hltet,代码行数:101,


示例18: siw_post_receive

/* * siw_post_receive() * * Post a list of R-WR's to a RQ. * * @ofa_qp:	OFA QP contained in siw QP * @wr:		Null terminated list of user WR's * @bad_wr:	Points to failing WR in case of synchronous failure. */int siw_post_receive(struct ib_qp *ofa_qp, struct ib_recv_wr *wr,		     struct ib_recv_wr **bad_wr){	struct siw_wqe	*wqe = NULL;	struct siw_qp	*qp = siw_qp_ofa2siw(ofa_qp);	unsigned long	flags;	int rv = 0;	dprint(DBG_WR|DBG_TX, "(QP%d): state=%d/n", QP_ID(qp),		qp->attrs.state);	if (unlikely(qp->srq)) {		*bad_wr = wr;		return -EOPNOTSUPP; /* what else from errno.h? */	}	/*	 * Try to acquire QP state lock. Must be non-blocking	 * to accommodate kernel clients needs.	 */	if (!down_read_trylock(&qp->state_lock)) {		*bad_wr = wr;		return -ENOTCONN;	}	if (qp->attrs.state > SIW_QP_STATE_RTS) {		up_read(&qp->state_lock);		dprint(DBG_ON, " (QP%d): state=%d/n", QP_ID(qp),			qp->attrs.state);		*bad_wr = wr;		return -EINVAL;	}	while (wr) {		wqe = siw_wqe_alloc(qp, SIW_WR_RECEIVE);		if (!wqe) {			rv = -ENOMEM;			break;		}		if (wr->num_sge > qp->attrs.rq_max_sges) {			dprint(DBG_WR|DBG_ON, "(QP%d): Num SGE: %d/n",				QP_ID(qp), wr->num_sge);			rv = -EINVAL;			break;		}		wr_type(wqe) = SIW_WR_RECEIVE;		wr_id(wqe) = wr->wr_id;		rv = siw_copy_sgl(wr->sg_list, wqe->wr.recv.sge, wr->num_sge);		if (rv < 0) {			/*			 * XXX tentatively allow zero length receive			 */			rv = -EINVAL;			break;		}		wqe->wr.recv.num_sge = wr->num_sge;		wqe->bytes = rv;		wqe->wr_status = SR_WR_QUEUED;		lock_rq_rxsave(qp, flags);		list_add_tail(&wqe->list, &qp->rq);		unlock_rq_rxsave(qp, flags);		wr = wr->next;	}	if (rv <= 0) {		dprint(DBG_WR|DBG_ON, "(QP%d): error=%d/n", QP_ID(qp), rv);		if (wqe != NULL)			siw_wqe_put(wqe);		*bad_wr = wr;	}	dprint(DBG_WR|DBG_RX, "(QP%d): rq_space=%d/n", QP_ID(qp),		atomic_read(&qp->rq_space));	up_read(&qp->state_lock);	return rv > 0 ? 0 : rv;}
开发者ID:asaf-levy,项目名称:softiwarp,代码行数:86,


示例19: xfs_filestream_associate

/* * xfs_filestream_associate() should only be called to associate a regular file * with its parent directory.  Calling it with a child directory isn't * appropriate because filestreams don't apply to entire directory hierarchies. * Creating a file in a child directory of an existing filestream directory * starts a new filestream with its own allocation group association. * * Returns < 0 on error, 0 if successful association occurred, > 0 if * we failed to get an association because of locking issues. */intxfs_filestream_associate(	xfs_inode_t	*pip,	xfs_inode_t	*ip){	xfs_mount_t	*mp;	xfs_mru_cache_t	*cache;	fstrm_item_t	*item;	xfs_agnumber_t	ag, rotorstep, startag;	int		err = 0;	ASSERT(pip->i_d.di_mode & S_IFDIR);	ASSERT(ip->i_d.di_mode & S_IFREG);	if (!(pip->i_d.di_mode & S_IFDIR) || !(ip->i_d.di_mode & S_IFREG))		return -EINVAL;	mp = pip->i_mount;	cache = mp->m_filestream;	down_read(&mp->m_peraglock);	/*	 * We have a problem, Houston.	 *	 * Taking the iolock here violates inode locking order - we already	 * hold the ilock. Hence if we block getting this lock we may never	 * wake. Unfortunately, that means if we can't get the lock, we're	 * screwed in terms of getting a stream association - we can't spin	 * waiting for the lock because someone else is waiting on the lock we	 * hold and we cannot drop that as we are in a transaction here.	 *	 * Lucky for us, this inversion is not a problem because it's a	 * directory inode that we are trying to lock here.	 *	 * So, if we can't get the iolock without sleeping then just give up	 */	if (!xfs_ilock_nowait(pip, XFS_IOLOCK_EXCL)) {		up_read(&mp->m_peraglock);		return 1;	}	/* If the parent directory is already in the cache, use its AG. */	item = xfs_mru_cache_lookup(cache, pip->i_ino);	if (item) {		ASSERT(item->ip == pip);		ag = item->ag;		xfs_mru_cache_done(cache);		TRACE_LOOKUP(mp, pip, pip, ag, xfs_filestream_peek_ag(mp, ag));		err = _xfs_filestream_update_ag(ip, pip, ag);		goto exit;	}	/*	 * Set the starting AG using the rotor for inode32, otherwise	 * use the directory inode's AG.	 */	if (mp->m_flags & XFS_MOUNT_32BITINODES) {		rotorstep = xfs_rotorstep;		startag = (mp->m_agfrotor / rotorstep) % mp->m_sb.sb_agcount;		mp->m_agfrotor = (mp->m_agfrotor + 1) %		                 (mp->m_sb.sb_agcount * rotorstep);	} else		startag = XFS_INO_TO_AGNO(mp, pip->i_ino);	/* Pick a new AG for the parent inode starting at startag. */	err = _xfs_filestream_pick_ag(mp, startag, &ag, 0, 0);	if (err || ag == NULLAGNUMBER)		goto exit_did_pick;	/* Associate the parent inode with the AG. */	err = _xfs_filestream_update_ag(pip, NULL, ag);	if (err)		goto exit_did_pick;	/* Associate the file inode with the AG. */	err = _xfs_filestream_update_ag(ip, pip, ag);	if (err)		goto exit_did_pick;	TRACE_ASSOCIATE(mp, ip, pip, ag, xfs_filestream_peek_ag(mp, ag));exit_did_pick:	/*	 * If _xfs_filestream_pick_ag() returned a valid AG, remove the	 * reference it took on it, since the file and directory will have taken	 * their own now if they were successfully cached.	 */	if (ag != NULLAGNUMBER)		xfs_filestream_put_ag(mp, ag);//.........这里部分代码省略.........
开发者ID:mikuhatsune001,项目名称:linux2.6.32,代码行数:101,


示例20: siw_post_send

/* * siw_post_send() * * Post a list of S-WR's to a SQ. * * @ofa_qp:	OFA QP contained in siw QP * @wr:		Null terminated list of user WR's * @bad_wr:	Points to failing WR in case of synchronous failure. */int siw_post_send(struct ib_qp *ofa_qp, struct ib_send_wr *wr,		  struct ib_send_wr **bad_wr){	struct siw_wqe	*wqe = NULL;	struct siw_qp	*qp = siw_qp_ofa2siw(ofa_qp);	unsigned long flags;	int rv = 0;	dprint(DBG_WR|DBG_TX, "(QP%d): state=%d/n",		QP_ID(qp), qp->attrs.state);	/*	 * Try to acquire QP state lock. Must be non-blocking	 * to accommodate kernel clients needs.	 */	if (!down_read_trylock(&qp->state_lock)) {		*bad_wr = wr;		return -ENOTCONN;	}	if (qp->attrs.state != SIW_QP_STATE_RTS) {		dprint(DBG_WR|DBG_ON, "(QP%d): state=%d/n",			QP_ID(qp), qp->attrs.state);		up_read(&qp->state_lock);		*bad_wr = wr;		return -ENOTCONN;	}	dprint(DBG_WR|DBG_TX, "(QP%d): sq_space(#1)=%d/n",		QP_ID(qp), atomic_read(&qp->sq_space));	while (wr) {		wqe = siw_wqe_alloc(qp, opcode_ofa2siw(wr->opcode));		if (!wqe) {			dprint(DBG_ON, " siw_wqe_alloc/n");			rv = -ENOMEM;			break;		}		wr_type(wqe) = opcode_ofa2siw(wr->opcode);		wr_id(wqe) = wr->wr_id;		wr_flags(wqe) = wr->send_flags;		if (qp->attrs.flags & SIW_SIGNAL_ALL_WR)			wr_flags(wqe) |= IB_SEND_SIGNALED;		if (wr->num_sge > qp->attrs.sq_max_sges) {			/*			 * NOTE: we allow for zero length wr's here.			 */			dprint(DBG_WR, "(QP%d): Num SGE: %d/n",				QP_ID(qp), wr->num_sge);			rv = -EINVAL;			break;		}		switch (wr->opcode) {		case IB_WR_SEND:			if (!SIW_INLINED_DATA(wqe)) {				rv = siw_copy_sgl(wr->sg_list,						  wqe->wr.send.sge,						  wr->num_sge);				wqe->wr.send.num_sge = wr->num_sge;			} else				rv = siw_copy_inline_sgl(wr, wqe);			if (rv < 0) {				rv = -EINVAL;				break;			}			wqe->bytes = rv;			break;		case IB_WR_RDMA_READ:			/*			 * OFED WR restricts RREAD sink to SGL containing			 * 1 SGE only. we could relax to SGL with multiple			 * elements referring the SAME ltag or even sending			 * a private per-rreq tag referring to a checked			 * local sgl with MULTIPLE ltag's. would be easy			 * to do...			 */			if (wr->num_sge != 1) {				rv = -EINVAL;				break;			}			rv = siw_copy_sgl(wr->sg_list, wqe->wr.rread.sge, 1);			/*			 * NOTE: zero length RREAD is allowed!			 */			wqe->wr.rread.raddr = wr->wr.rdma.remote_addr;//.........这里部分代码省略.........
开发者ID:asaf-levy,项目名称:softiwarp,代码行数:101,


示例21: sys_msgctl

asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf){	struct msg_queue *msq;	int err, version;	struct ipc_namespace *ns;	if (msqid < 0 || cmd < 0)		return -EINVAL;	version = ipc_parse_version(&cmd);	ns = current->nsproxy->ipc_ns;	switch (cmd) {	case IPC_INFO:	case MSG_INFO:	{		struct msginfo msginfo;		int max_id;		if (!buf)			return -EFAULT;		/*		 * We must not return kernel stack data.		 * due to padding, it's not enough		 * to set all member fields.		 */		err = security_msg_queue_msgctl(NULL, cmd);		if (err)			return err;		memset(&msginfo, 0, sizeof(msginfo));		msginfo.msgmni = ns->msg_ctlmni;		msginfo.msgmax = ns->msg_ctlmax;		msginfo.msgmnb = ns->msg_ctlmnb;		msginfo.msgssz = MSGSSZ;		msginfo.msgseg = MSGSEG;		down_read(&msg_ids(ns).rw_mutex);		if (cmd == MSG_INFO) {			msginfo.msgpool = msg_ids(ns).in_use;			msginfo.msgmap = atomic_read(&ns->msg_hdrs);			msginfo.msgtql = atomic_read(&ns->msg_bytes);		} else {			msginfo.msgmap = MSGMAP;			msginfo.msgpool = MSGPOOL;			msginfo.msgtql = MSGTQL;		}		max_id = ipc_get_maxid(&msg_ids(ns));		up_read(&msg_ids(ns).rw_mutex);		if (copy_to_user(buf, &msginfo, sizeof(struct msginfo)))			return -EFAULT;		return (max_id < 0) ? 0 : max_id;	}	case MSG_STAT:	/* msqid is an index rather than a msg queue id */	case IPC_STAT:	{		struct msqid64_ds tbuf;		int success_return;		if (!buf)			return -EFAULT;		if (cmd == MSG_STAT) {			msq = msg_lock(ns, msqid);			if (IS_ERR(msq))				return PTR_ERR(msq);			success_return = msq->q_perm.id;		} else {			msq = msg_lock_check(ns, msqid);			if (IS_ERR(msq))				return PTR_ERR(msq);			success_return = 0;		}		err = -EACCES;		if (ipcperms(&msq->q_perm, S_IRUGO))			goto out_unlock;		err = security_msg_queue_msgctl(msq, cmd);		if (err)			goto out_unlock;		memset(&tbuf, 0, sizeof(tbuf));		kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm);		tbuf.msg_stime  = msq->q_stime;		tbuf.msg_rtime  = msq->q_rtime;		tbuf.msg_ctime  = msq->q_ctime;		tbuf.msg_cbytes = msq->q_cbytes;		tbuf.msg_qnum   = msq->q_qnum;		tbuf.msg_qbytes = msq->q_qbytes;		tbuf.msg_lspid  = msq->q_lspid;		tbuf.msg_lrpid  = msq->q_lrpid;		msg_unlock(msq);		if (copy_msqid_to_user(buf, &tbuf, version))			return -EFAULT;		return success_return;	}	case IPC_SET:	case IPC_RMID:		err = msgctl_down(ns, msqid, cmd, buf, version);		return err;//.........这里部分代码省略.........
开发者ID:LouZiffer,项目名称:m900_kernel_cupcake-SDX,代码行数:101,


示例22: do_page_fault

static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,                                   struct pt_regs *regs){    struct task_struct *tsk;    struct mm_struct *mm;    int fault, sig, code;    unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;    unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;    if (esr & ESR_LNX_EXEC) {        vm_flags = VM_EXEC;    } else if ((esr & ESR_WRITE) && !(esr & ESR_CM)) {        vm_flags = VM_WRITE;        mm_flags |= FAULT_FLAG_WRITE;    }    tsk = current;    mm  = tsk->mm;    /* Enable interrupts if they were enabled in the parent context. */    if (interrupts_enabled(regs))        local_irq_enable();    /*     * If we're in an interrupt or have no user context, we must not take     * the fault.     */    if (in_atomic() || !mm)        goto no_context;    /*     * As per x86, we may deadlock here. However, since the kernel only     * validly references user space from well defined areas of the code,     * we can bug out early if this is from code which shouldn't.     */    if (!down_read_trylock(&mm->mmap_sem)) {        if (!user_mode(regs) && !search_exception_tables(regs->pc))            goto no_context;retry:        down_read(&mm->mmap_sem);    } else {        /*         * The above down_read_trylock() might have succeeded in which         * case, we'll have missed the might_sleep() from down_read().         */        might_sleep();#ifdef CONFIG_DEBUG_VM        if (!user_mode(regs) && !search_exception_tables(regs->pc))            goto no_context;#endif    }    fault = __do_page_fault(mm, addr, mm_flags, vm_flags, tsk);    /*     * If we need to retry but a fatal signal is pending, handle the     * signal first. We do not need to release the mmap_sem because it     * would already be released in __lock_page_or_retry in mm/filemap.c.     */    if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))        return 0;    /*     * Major/minor page fault accounting is only done on the initial     * attempt. If we go through a retry, it is extremely likely that the     * page will be found in page cache at that point.     */    perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);    if (mm_flags & FAULT_FLAG_ALLOW_RETRY) {        if (fault & VM_FAULT_MAJOR) {            tsk->maj_flt++;            perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,                          addr);        } else {            tsk->min_flt++;            perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs,                          addr);        }        if (fault & VM_FAULT_RETRY) {            /*             * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of             * starvation.             */            mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;            goto retry;        }    }    up_read(&mm->mmap_sem);    /*     * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR     */    if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP |                          VM_FAULT_BADACCESS))))        return 0;    if (fault & VM_FAULT_OOM) {        /*//.........这里部分代码省略.........
开发者ID:kodavanty,项目名称:Buffalo-WZR600DHP,代码行数:101,


示例23: videobuf_qbuf

//.........这里部分代码省略.........	unsigned long flags = 0;	int retval;	MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);	if (b->memory == V4L2_MEMORY_MMAP)		down_read(&current->mm->mmap_sem);	mutex_lock(&q->vb_lock);	retval = -EBUSY;	if (q->reading) {		dprintk(1, "qbuf: Reading running.../n");		goto done;	}	retval = -EINVAL;	if (b->type != q->type) {		dprintk(1, "qbuf: Wrong type./n");		goto done;	}	if (b->index >= VIDEO_MAX_FRAME) {		dprintk(1, "qbuf: index out of range./n");		goto done;	}	buf = q->bufs[b->index];	if (NULL == buf) {		dprintk(1, "qbuf: buffer is null./n");		goto done;	}	MAGIC_CHECK(buf->magic, MAGIC_BUFFER);	if (buf->memory != b->memory) {		dprintk(1, "qbuf: memory type is wrong./n");		goto done;	}	if (buf->state != VIDEOBUF_NEEDS_INIT && buf->state != VIDEOBUF_IDLE) {		dprintk(1, "qbuf: buffer is already queued or active./n");		goto done;	}	if (b->flags & V4L2_BUF_FLAG_INPUT) {		if (b->input >= q->inputs) {			dprintk(1, "qbuf: wrong input./n");			goto done;		}		buf->input = b->input;	} else {		buf->input = UNSET;	}	switch (b->memory) {	case V4L2_MEMORY_MMAP:		if (0 == buf->baddr) {			dprintk(1, "qbuf: mmap requested "				   "but buffer addr is zero!/n");			goto done;		}		break;	case V4L2_MEMORY_USERPTR:		if (b->length < buf->bsize) {			dprintk(1, "qbuf: buffer length is not enough/n");			goto done;		}		if (VIDEOBUF_NEEDS_INIT != buf->state &&		    buf->baddr != b->m.userptr)			q->ops->buf_release(q, buf);		buf->baddr = b->m.userptr;		break;	case V4L2_MEMORY_OVERLAY:		buf->boff = b->m.offset;		break;	default:		dprintk(1, "qbuf: wrong memory type/n");		goto done;	}	dprintk(1, "qbuf: requesting next field/n");	field = videobuf_next_field(q);	retval = q->ops->buf_prepare(q, buf, field);	if (0 != retval) {		dprintk(1, "qbuf: buffer_prepare returned %d/n", retval);		goto done;	}	list_add_tail(&buf->stream, &q->stream);	if (q->streaming) {		spin_lock_irqsave(q->irqlock, flags);		q->ops->buf_queue(q, buf);		spin_unlock_irqrestore(q->irqlock, flags);	}	dprintk(1, "qbuf: succeded/n");	retval = 0;	wake_up_interruptible_sync(&q->wait); done:	mutex_unlock(&q->vb_lock);	if (b->memory == V4L2_MEMORY_MMAP)		up_read(&current->mm->mmap_sem);	return retval;}
开发者ID:KaZoom,项目名称:buildroot-linux-kernel,代码行数:101,


示例24: task_mem

/* * Logic: we've got two memory sums for each process, "shared", and * "non-shared". Shared memory may get counted more than once, for * each process that owns it. Non-shared memory is counted * accurately. */void task_mem(struct seq_file *m, struct mm_struct *mm){	struct vm_area_struct *vma;	struct vm_region *region;	struct rb_node *p;	unsigned long bytes = 0, sbytes = 0, slack = 0, size;        	down_read(&mm->mmap_sem);	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {		vma = rb_entry(p, struct vm_area_struct, vm_rb);		bytes += kobjsize(vma);		region = vma->vm_region;		if (region) {			size = kobjsize(region);			size += region->vm_end - region->vm_start;		} else {			size = vma->vm_end - vma->vm_start;		}		if (atomic_read(&mm->mm_count) > 1 ||		    vma->vm_flags & VM_MAYSHARE) {			sbytes += size;		} else {			bytes += size;			if (region)				slack = region->vm_end - vma->vm_end;		}	}	if (atomic_read(&mm->mm_count) > 1)		sbytes += kobjsize(mm);	else		bytes += kobjsize(mm);		if (current->fs && current->fs->users > 1)		sbytes += kobjsize(current->fs);	else		bytes += kobjsize(current->fs);	if (current->files && atomic_read(&current->files->count) > 1)		sbytes += kobjsize(current->files);	else		bytes += kobjsize(current->files);	if (current->sighand && atomic_read(&current->sighand->count) > 1)		sbytes += kobjsize(current->sighand);	else		bytes += kobjsize(current->sighand);	bytes += kobjsize(current); /* includes kernel stack */	seq_printf(m,		"Mem:/t%8lu bytes/n"		"Slack:/t%8lu bytes/n"		"Shared:/t%8lu bytes/n",		bytes, slack, sbytes);	up_read(&mm->mmap_sem);}
开发者ID:Core2idiot,项目名称:Kernel-Samsung-3.0...-,代码行数:67,


示例25: xfs_ialloc_ag_alloc

//.........这里部分代码省略.........		return 0;	}	ASSERT(args.len == args.minlen);	/*	 * Convert the results.	 */	newino = XFS_OFFBNO_TO_AGINO(args.mp, args.agbno, 0);	/*	 * Loop over the new block(s), filling in the inodes.	 * For small block sizes, manipulate the inodes in buffers	 * which are multiples of the blocks size.	 */	if (args.mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(args.mp)) {		blks_per_cluster = 1;		nbufs = (int)args.len;		ninodes = args.mp->m_sb.sb_inopblock;	} else {		blks_per_cluster = XFS_INODE_CLUSTER_SIZE(args.mp) /				   args.mp->m_sb.sb_blocksize;		nbufs = (int)args.len / blks_per_cluster;		ninodes = blks_per_cluster * args.mp->m_sb.sb_inopblock;	}	/*	 * Figure out what version number to use in the inodes we create.	 * If the superblock version has caught up to the one that supports	 * the new inode format, then use the new inode version.  Otherwise	 * use the old version so that old kernels will continue to be	 * able to use the file system.	 */	if (XFS_SB_VERSION_HASNLINK(&args.mp->m_sb))		version = XFS_DINODE_VERSION_2;	else		version = XFS_DINODE_VERSION_1;	for (j = 0; j < nbufs; j++) {		/*		 * Get the block.		 */		d = XFS_AGB_TO_DADDR(args.mp, be32_to_cpu(agi->agi_seqno),				     args.agbno + (j * blks_per_cluster));		fbuf = xfs_trans_get_buf(tp, args.mp->m_ddev_targp, d,					 args.mp->m_bsize * blks_per_cluster,					 XFS_BUF_LOCK);		ASSERT(fbuf);		ASSERT(!XFS_BUF_GETERROR(fbuf));		/*		 * Set initial values for the inodes in this buffer.		 */		xfs_biozero(fbuf, 0, ninodes << args.mp->m_sb.sb_inodelog);		for (i = 0; i < ninodes; i++) {			free = XFS_MAKE_IPTR(args.mp, fbuf, i);			INT_SET(free->di_core.di_magic, ARCH_CONVERT, XFS_DINODE_MAGIC);			INT_SET(free->di_core.di_version, ARCH_CONVERT, version);			INT_SET(free->di_next_unlinked, ARCH_CONVERT, NULLAGINO);			xfs_ialloc_log_di(tp, fbuf, i,				XFS_DI_CORE_BITS | XFS_DI_NEXT_UNLINKED);		}		xfs_trans_inode_alloc_buf(tp, fbuf);	}	be32_add(&agi->agi_count, newlen);	be32_add(&agi->agi_freecount, newlen);	down_read(&args.mp->m_peraglock);	args.mp->m_perag[be32_to_cpu(agi->agi_seqno)].pagi_freecount += newlen;	up_read(&args.mp->m_peraglock);	agi->agi_newino = cpu_to_be32(newino);	/*	 * Insert records describing the new inode chunk into the btree.	 */	cur = xfs_btree_init_cursor(args.mp, tp, agbp,			be32_to_cpu(agi->agi_seqno),			XFS_BTNUM_INO, (xfs_inode_t *)0, 0);	for (thisino = newino;	     thisino < newino + newlen;	     thisino += XFS_INODES_PER_CHUNK) {		if ((error = xfs_inobt_lookup_eq(cur, thisino,				XFS_INODES_PER_CHUNK, XFS_INOBT_ALL_FREE, &i))) {			xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);			return error;		}		ASSERT(i == 0);		if ((error = xfs_inobt_insert(cur, &i))) {			xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);			return error;		}		ASSERT(i == 1);	}	xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);	/*	 * Log allocation group header fields	 */	xfs_ialloc_log_agi(tp, agbp,		XFS_AGI_COUNT | XFS_AGI_FREECOUNT | XFS_AGI_NEWINO);	/*	 * Modify/log superblock values for inode count and inode free count.	 */	xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, (long)newlen);	xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, (long)newlen);	*alloc = 1;	return 0;}
开发者ID:xiandaicxsj,项目名称:copyKvm,代码行数:101,


示例26: reiserfs_xattr_get

/* * inode->i_mutex: down */intreiserfs_xattr_get(struct inode *inode, const char *name, void *buffer,		   size_t buffer_size){	ssize_t err = 0;	struct dentry *dentry;	size_t isize;	size_t file_pos = 0;	size_t buffer_pos = 0;	struct page *page;	__u32 hash = 0;	if (name == NULL)		return -EINVAL;	/* We can't have xattrs attached to v1 items since they don't have	 * generation numbers */	if (get_inode_sd_version(inode) == STAT_DATA_V1)		return -EOPNOTSUPP;	dentry = xattr_lookup(inode, name, XATTR_REPLACE);	if (IS_ERR(dentry)) {		err = PTR_ERR(dentry);		goto out;	}	down_read(&REISERFS_I(inode)->i_xattr_sem);	isize = i_size_read(dentry->d_inode);	/* Just return the size needed */	if (buffer == NULL) {		err = isize - sizeof(struct reiserfs_xattr_header);		goto out_unlock;	}	if (buffer_size < isize - sizeof(struct reiserfs_xattr_header)) {		err = -ERANGE;		goto out_unlock;	}	while (file_pos < isize) {		size_t chunk;		char *data;		size_t skip = 0;		if (isize - file_pos > PAGE_CACHE_SIZE)			chunk = PAGE_CACHE_SIZE;		else			chunk = isize - file_pos;		page = reiserfs_get_page(dentry->d_inode, file_pos);		if (IS_ERR(page)) {			err = PTR_ERR(page);			goto out_unlock;		}		lock_page(page);		data = page_address(page);		if (file_pos == 0) {			struct reiserfs_xattr_header *rxh =			    (struct reiserfs_xattr_header *)data;			skip = file_pos = sizeof(struct reiserfs_xattr_header);			chunk -= skip;			/* Magic doesn't match up.. */			if (rxh->h_magic != cpu_to_le32(REISERFS_XATTR_MAGIC)) {				unlock_page(page);				reiserfs_put_page(page);				reiserfs_warning(inode->i_sb, "jdm-20001",						 "Invalid magic for xattr (%s) "						 "associated with %k", name,						 INODE_PKEY(inode));				err = -EIO;				goto out_unlock;			}			hash = le32_to_cpu(rxh->h_hash);		}		memcpy(buffer + buffer_pos, data + skip, chunk);		unlock_page(page);		reiserfs_put_page(page);		file_pos += chunk;		buffer_pos += chunk;		skip = 0;	}	err = isize - sizeof(struct reiserfs_xattr_header);	if (xattr_hash(buffer, isize - sizeof(struct reiserfs_xattr_header)) !=	    hash) {		reiserfs_warning(inode->i_sb, "jdm-20002",				 "Invalid hash for xattr (%s) associated "				 "with %k", name, INODE_PKEY(inode));		err = -EIO;	}out_unlock:	up_read(&REISERFS_I(inode)->i_xattr_sem);	dput(dentry);//.........这里部分代码省略.........
开发者ID:kenkit,项目名称:AndromadusMod-New,代码行数:101,


示例27: xfs_dialloc

//.........这里部分代码省略.........				} else					return error;			}			if (ialloced) {				/*				 * We successfully allocated some inodes, return				 * the current context to the caller so that it				 * can commit the current transaction and call				 * us again where we left off.				 */				ASSERT(be32_to_cpu(agi->agi_freecount) > 0);				*alloc_done = B_TRUE;				*IO_agbp = agbp;				*inop = NULLFSINO;				return 0;			}		}		/*		 * If it failed, give up on this ag.		 */		xfs_trans_brelse(tp, agbp);		/*		 * Go on to the next ag: get its ag header.		 */nextag:		if (++tagno == agcount)			tagno = 0;		if (tagno == agno) {			*inop = NULLFSINO;			return noroom ? ENOSPC : 0;		}		down_read(&mp->m_peraglock);		if (mp->m_perag[tagno].pagi_inodeok == 0) {			up_read(&mp->m_peraglock);			goto nextag;		}		error = xfs_ialloc_read_agi(mp, tp, tagno, &agbp);		up_read(&mp->m_peraglock);		if (error)			goto nextag;		agi = XFS_BUF_TO_AGI(agbp);		ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC);	}	/*	 * Here with an allocation group that has a free inode.	 * Reset agno since we may have chosen a new ag in the	 * loop above.	 */	agno = tagno;	*IO_agbp = NULL;	cur = xfs_btree_init_cursor(mp, tp, agbp, be32_to_cpu(agi->agi_seqno),				    XFS_BTNUM_INO, (xfs_inode_t *)0, 0);	/*	 * If pagino is 0 (this is the root inode allocation) use newino.	 * This must work because we've just allocated some.	 */	if (!pagino)		pagino = be32_to_cpu(agi->agi_newino);#ifdef DEBUG	if (cur->bc_nlevels == 1) {		int	freecount = 0;		if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i)))			goto error0;		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);		do {
开发者ID:xiandaicxsj,项目名称:copyKvm,代码行数:67,


示例28: ext4_try_to_write_inline_data

/* * Try to write data in the inode. * If the inode has inline data, check whether the new write can be * in the inode also. If not, create the page the handle, move the data * to the page make it update and let the later codes create extent for it. */int ext4_try_to_write_inline_data(struct address_space *mapping,				  struct inode *inode,				  loff_t pos, unsigned len,				  unsigned flags,				  struct page **pagep){	int ret;	handle_t *handle;	struct page *page;	struct ext4_iloc iloc;	if (pos + len > ext4_get_max_inline_size(inode))		goto convert;	ret = ext4_get_inode_loc(inode, &iloc);	if (ret)		return ret;	/*	 * The possible write could happen in the inode,	 * so try to reserve the space in inode first.	 */	handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);	if (IS_ERR(handle)) {		ret = PTR_ERR(handle);		handle = NULL;		goto out;	}	ret = ext4_prepare_inline_data(handle, inode, pos + len);	if (ret && ret != -ENOSPC)		goto out;	/* We don't have space in inline inode, so convert it to extent. */	if (ret == -ENOSPC) {		ext4_journal_stop(handle);		brelse(iloc.bh);		goto convert;	}	flags |= AOP_FLAG_NOFS;	page = grab_cache_page_write_begin(mapping, 0, flags);	if (!page) {		ret = -ENOMEM;		goto out;	}	*pagep = page;	down_read(&EXT4_I(inode)->xattr_sem);	if (!ext4_has_inline_data(inode)) {		ret = 0;		unlock_page(page);		page_cache_release(page);		goto out_up_read;	}	if (!PageUptodate(page)) {		ret = ext4_read_inline_page(inode, page);		if (ret < 0)			goto out_up_read;	}	ret = 1;	handle = NULL;out_up_read:	up_read(&EXT4_I(inode)->xattr_sem);out:	if (handle)		ext4_journal_stop(handle);	brelse(iloc.bh);	return ret;convert:	return ext4_convert_inline_data_to_extent(mapping,						  inode, flags);}
开发者ID:hejin,项目名称:kernel-3.10.0-327.13.1.el7.x86_64-fs,代码行数:82,



注:本文中的up_read函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


C++ up_savestate函数代码示例
C++ up_ledon函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。