您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ spin_lock函数代码示例

51自学网 2021-06-03 08:10:06
  C++
这篇教程C++ spin_lock函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中spin_lock函数的典型用法代码示例。如果您正苦于以下问题:C++ spin_lock函数的具体用法?C++ spin_lock怎么用?C++ spin_lock使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了spin_lock函数的26个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: lfsck_namespace_double_scan_main

//.........这里部分代码省略.........		lfsck_object_put(env, target);checkpoint:		com->lc_new_checked++;		com->lc_new_scanned++;		ns->ln_fid_latest_scanned_phase2 = fid;		if (rc > 0)			ns->ln_objs_repaired_phase2++;		else if (rc < 0)			ns->ln_objs_failed_phase2++;		up_write(&com->lc_sem);		if ((rc == 0) || ((rc > 0) && !(bk->lb_param & LPF_DRYRUN))) {			lfsck_namespace_delete(env, com, &fid);		} else if (rc < 0) {			flags |= LLF_REPAIR_FAILED;			lfsck_namespace_update(env, com, &fid, flags, true);		}		if (rc < 0 && bk->lb_param & LPF_FAILOUT)			GOTO(put, rc);		if (unlikely(cfs_time_beforeq(com->lc_time_next_checkpoint,					      cfs_time_current())) &&		    com->lc_new_checked != 0) {			down_write(&com->lc_sem);			ns->ln_run_time_phase2 +=				cfs_duration_sec(cfs_time_current() +				HALF_SEC - com->lc_time_last_checkpoint);			ns->ln_time_last_checkpoint = cfs_time_current_sec();			ns->ln_objs_checked_phase2 += com->lc_new_checked;			com->lc_new_checked = 0;			rc = lfsck_namespace_store(env, com, false);			up_write(&com->lc_sem);			if (rc != 0)				GOTO(put, rc);			com->lc_time_last_checkpoint = cfs_time_current();			com->lc_time_next_checkpoint =				com->lc_time_last_checkpoint +				cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);		}		lfsck_control_speed_by_self(com);		if (unlikely(!thread_is_running(thread)))			GOTO(put, rc = 0);		rc = iops->next(env, di);	} while (rc == 0);	GOTO(put, rc);put:	iops->put(env, di);fini:	iops->fini(env, di);out:	down_write(&com->lc_sem);	ns->ln_run_time_phase2 += cfs_duration_sec(cfs_time_current() +				HALF_SEC - lfsck->li_time_last_checkpoint);	ns->ln_time_last_checkpoint = cfs_time_current_sec();	ns->ln_objs_checked_phase2 += com->lc_new_checked;	com->lc_new_checked = 0;	if (rc > 0) {		com->lc_journal = 0;		ns->ln_status = LS_COMPLETED;		if (!(bk->lb_param & LPF_DRYRUN))			ns->ln_flags &= ~(LF_SCANNED_ONCE | LF_INCONSISTENT);		ns->ln_time_last_complete = ns->ln_time_last_checkpoint;		ns->ln_success_count++;	} else if (rc == 0) {		ns->ln_status = lfsck->li_status;		if (ns->ln_status == 0)			ns->ln_status = LS_STOPPED;	} else {		ns->ln_status = LS_FAILED;	}	if (ns->ln_status != LS_PAUSED) {		spin_lock(&lfsck->li_lock);		cfs_list_del_init(&com->lc_link);		cfs_list_add_tail(&com->lc_link, &lfsck->li_list_idle);		spin_unlock(&lfsck->li_lock);	}	rc = lfsck_namespace_store(env, com, false);	up_write(&com->lc_sem);	if (atomic_dec_and_test(&lfsck->li_double_scan_count))		wake_up_all(&thread->t_ctl_waitq);	lfsck_thread_args_fini(lta);	return rc;}
开发者ID:hejin,项目名称:lustre-stable,代码行数:101,


示例2: lfsck_namespace_prep

static int lfsck_namespace_prep(const struct lu_env *env,				struct lfsck_component *com,				struct lfsck_start_param *lsp){	struct lfsck_instance	*lfsck	= com->lc_lfsck;	struct lfsck_namespace	*ns	= com->lc_file_ram;	struct lfsck_position	*pos	= &com->lc_pos_start;	if (ns->ln_status == LS_COMPLETED) {		int rc;		rc = lfsck_namespace_reset(env, com, false);		if (rc != 0)			return rc;	}	down_write(&com->lc_sem);	ns->ln_time_latest_start = cfs_time_current_sec();	spin_lock(&lfsck->li_lock);	if (ns->ln_flags & LF_SCANNED_ONCE) {		if (!lfsck->li_drop_dryrun ||		    lfsck_pos_is_zero(&ns->ln_pos_first_inconsistent)) {			ns->ln_status = LS_SCANNING_PHASE2;			cfs_list_del_init(&com->lc_link);			cfs_list_add_tail(&com->lc_link,					  &lfsck->li_list_double_scan);			if (!cfs_list_empty(&com->lc_link_dir))				cfs_list_del_init(&com->lc_link_dir);			lfsck_pos_set_zero(pos);		} else {			ns->ln_status = LS_SCANNING_PHASE1;			ns->ln_run_time_phase1 = 0;			ns->ln_run_time_phase2 = 0;			ns->ln_items_checked = 0;			ns->ln_items_repaired = 0;			ns->ln_items_failed = 0;			ns->ln_dirs_checked = 0;			ns->ln_mlinked_checked = 0;			ns->ln_objs_checked_phase2 = 0;			ns->ln_objs_repaired_phase2 = 0;			ns->ln_objs_failed_phase2 = 0;			ns->ln_objs_nlink_repaired = 0;			ns->ln_objs_lost_found = 0;			fid_zero(&ns->ln_fid_latest_scanned_phase2);			if (cfs_list_empty(&com->lc_link_dir))				cfs_list_add_tail(&com->lc_link_dir,						  &lfsck->li_list_dir);			*pos = ns->ln_pos_first_inconsistent;		}	} else {		ns->ln_status = LS_SCANNING_PHASE1;		if (cfs_list_empty(&com->lc_link_dir))			cfs_list_add_tail(&com->lc_link_dir,					  &lfsck->li_list_dir);		if (!lfsck->li_drop_dryrun ||		    lfsck_pos_is_zero(&ns->ln_pos_first_inconsistent)) {			*pos = ns->ln_pos_last_checkpoint;			pos->lp_oit_cookie++;		} else {			*pos = ns->ln_pos_first_inconsistent;		}	}	spin_unlock(&lfsck->li_lock);	up_write(&com->lc_sem);	return 0;}
开发者ID:hejin,项目名称:lustre-stable,代码行数:69,


示例3: scfs_readpages

/** * scfs_readpages * * Parameters: * @file: upper file * @*mapping: address_space struct for the file * @*pages: list of pages to read in * @nr_pages: number of pages to read in * * Return: * SCFS_SUCCESS if success, otherwise if error * * Description: * - Asynchronously read pages for readahead. A scaling number of background threads *   will read & decompress them in a slightly deferred but parallelized manner. */static intscfs_readpages(struct file *file, struct address_space *mapping,		struct list_head *pages, unsigned nr_pages){	struct scfs_inode_info *sii = SCFS_I(file->f_mapping->host);	struct scfs_sb_info *sbi = SCFS_S(file->f_mapping->host->i_sb);	struct file *lower_file = NULL;	struct page *page;	struct scfs_cinfo cinfo;	loff_t i_size;	pgoff_t start, end;	int page_idx, page_idx_readahead = 1024, ret = 0;	int readahead_page = 0;	int prev_cbi = 0;	int prev_cluster = -1, cur_cluster = -1;	int cluster_idx = 0;	i_size = i_size_read(&sii->vfs_inode);	if (!i_size) {		SCFS_PRINT("file %s: i_size is zero, "			"flags 0x%x sii->clust_info_size %d/n",			file->f_path.dentry->d_name.name, sii->flags,			sii->cinfo_array_size);		return 0;	}#ifdef SCFS_ASYNC_READ_PROFILE	atomic_add(nr_pages, &sbi->scfs_standby_readpage_count);#endif#ifdef SCFS_NOTIFY_RANDOM_READ	lower_file = scfs_lower_file(file);	if (!lower_file) {		SCFS_PRINT_ERROR("file %s: lower file is null!/n",		        file->f_path.dentry->d_name.name);		return -EINVAL;	}	/* if the read request was random (enough), hint it to the lower file. 	 * scfs_sequential_page_number is the tunable threshold.	 * filemap.c will later on refer to this FMODE_RANDOM flag.	*/	spin_lock(&lower_file->f_lock);	if (nr_pages > sbi->scfs_sequential_page_number)		lower_file->f_mode &= ~FMODE_RANDOM;	else		lower_file->f_mode |= FMODE_RANDOM;	spin_unlock(&lower_file->f_lock);#endif	lower_file = scfs_lower_file(file);	page = list_entry(pages->prev, struct page, lru);	cluster_idx = page->index / (sii->cluster_size / PAGE_SIZE);	if (sii->compressed) {		mutex_lock(&sii->cinfo_mutex);		ret = get_cluster_info(file, cluster_idx, &cinfo);		mutex_unlock(&sii->cinfo_mutex);		if (ret) {			SCFS_PRINT_ERROR("err in get_cluster_info, ret : %d,"				"i_size %lld/n", ret, i_size);			return ret;		}		if (!cinfo.size || cinfo.size > sii->cluster_size) {			SCFS_PRINT_ERROR("file %s: cinfo is invalid, "				"clust %u cinfo.size %u/n",				file->f_path.dentry->d_name.name,				cluster_idx, cinfo.size);			return -EINVAL;		}		start = (pgoff_t)(cinfo.offset / PAGE_SIZE);	} else {		start = (pgoff_t)(cluster_idx * sii->cluster_size / PAGE_SIZE);	}	cluster_idx = (page->index + nr_pages - 1) / (sii->cluster_size / PAGE_SIZE);	if (sii->compressed) {		mutex_lock(&sii->cinfo_mutex);		ret = get_cluster_info(file, cluster_idx, &cinfo);		mutex_unlock(&sii->cinfo_mutex);		if (ret) {			SCFS_PRINT_ERROR("err in get_cluster_info, ret : %d,"				"i_size %lld/n", ret, i_size);			return ret;//.........这里部分代码省略.........
开发者ID:civato,项目名称:sm-n9005-Note5port-kernel,代码行数:101,


示例4: afs_d_revalidate

/* * check that a dentry lookup hit has found a valid entry * - NOTE! the hit can be a negative hit too, so we can't assume we have an inode * (derived from nfs_lookup_revalidate) */static int afs_d_revalidate(struct dentry *dentry, struct nameidata *nd){	struct afs_dir_lookup_cookie cookie;	struct dentry *parent;	struct inode *inode, *dir;	unsigned fpos;	int ret;	_enter("{sb=%p n=%s},",dentry->d_sb,dentry->d_name.name);	/* lock down the parent dentry so we can peer at it */	parent = dget_parent(dentry->d_parent);	dir = parent->d_inode;	inode = dentry->d_inode;	/* handle a negative inode */	if (!inode)		goto out_bad;	/* handle a bad inode */	if (is_bad_inode(inode)) {		printk("kAFS: afs_d_revalidate: %s/%s has bad inode/n",		       dentry->d_parent->d_name.name,dentry->d_name.name);		goto out_bad;	}	/* force a full look up if the parent directory changed since last the server was consulted	 * - otherwise this inode must still exist, even if the inode details themselves have	 *   changed	 */	if (AFS_FS_I(dir)->flags & AFS_VNODE_CHANGED)		afs_vnode_fetch_status(AFS_FS_I(dir));	if (AFS_FS_I(dir)->flags & AFS_VNODE_DELETED) {		_debug("%s: parent dir deleted",dentry->d_name.name);		goto out_bad;	}	if (AFS_FS_I(inode)->flags & AFS_VNODE_DELETED) {		_debug("%s: file already deleted",dentry->d_name.name);		goto out_bad;	}	if ((unsigned long)dentry->d_fsdata != (unsigned long)AFS_FS_I(dir)->status.version) {		_debug("%s: parent changed %lu -> %u",		       dentry->d_name.name,		       (unsigned long)dentry->d_fsdata,		       (unsigned)AFS_FS_I(dir)->status.version);		/* search the directory for this vnode */		cookie.name	= dentry->d_name.name;		cookie.nlen	= dentry->d_name.len;		cookie.fid.vid	= AFS_FS_I(inode)->volume->vid;		cookie.found	= 0;		fpos = 0;		ret = afs_dir_iterate(dir,&fpos,&cookie,afs_dir_lookup_filldir);		if (ret<0) {			_debug("failed to iterate dir %s: %d",parent->d_name.name,ret);			goto out_bad;		}		if (!cookie.found) {			_debug("%s: dirent not found",dentry->d_name.name);			goto not_found;		}		/* if the vnode ID has changed, then the dirent points to a different file */		if (cookie.fid.vnode!=AFS_FS_I(inode)->fid.vnode) {			_debug("%s: dirent changed",dentry->d_name.name);			goto not_found;		}		/* if the vnode ID uniqifier has changed, then the file has been deleted */		if (cookie.fid.unique!=AFS_FS_I(inode)->fid.unique) {			_debug("%s: file deleted (uq %u -> %u I:%lu)",			       dentry->d_name.name,			       cookie.fid.unique,			       AFS_FS_I(inode)->fid.unique,			       inode->i_version);			spin_lock(&AFS_FS_I(inode)->lock);			AFS_FS_I(inode)->flags |= AFS_VNODE_DELETED;			spin_unlock(&AFS_FS_I(inode)->lock);			invalidate_remote_inode(inode);			goto out_bad;		}		dentry->d_fsdata = (void*) (unsigned long) AFS_FS_I(dir)->status.version;	} out_valid:	dput(parent);	_leave(" = 1 [valid]");	return 1;//.........这里部分代码省略.........
开发者ID:xricson,项目名称:knoppix,代码行数:101,


示例5: ipath_error_qp

int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err){	struct ipath_ibdev *dev = to_idev(qp->ibqp.device);	struct ib_wc wc;	int ret = 0;	if (qp->state == IB_QPS_ERR)		goto bail;	qp->state = IB_QPS_ERR;	spin_lock(&dev->pending_lock);	if (!list_empty(&qp->timerwait))		list_del_init(&qp->timerwait);	if (!list_empty(&qp->piowait))		list_del_init(&qp->piowait);	spin_unlock(&dev->pending_lock);	/* Schedule the sending tasklet to drain the send work queue. */	if (qp->s_last != qp->s_head)		ipath_schedule_send(qp);	memset(&wc, 0, sizeof(wc));	wc.qp = &qp->ibqp;	wc.opcode = IB_WC_RECV;	if (test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags)) {		wc.wr_id = qp->r_wr_id;		wc.status = err;		ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);	}	wc.status = IB_WC_WR_FLUSH_ERR;	if (qp->r_rq.wq) {		struct ipath_rwq *wq;		u32 head;		u32 tail;		spin_lock(&qp->r_rq.lock);		/* sanity check pointers before trusting them */		wq = qp->r_rq.wq;		head = wq->head;		if (head >= qp->r_rq.size)			head = 0;		tail = wq->tail;		if (tail >= qp->r_rq.size)			tail = 0;		while (tail != head) {			wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;			if (++tail >= qp->r_rq.size)				tail = 0;			ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);		}		wq->tail = tail;		spin_unlock(&qp->r_rq.lock);	} else if (qp->ibqp.event_handler)		ret = 1;bail:	return ret;}
开发者ID:458941968,项目名称:mini2440-kernel-2.6.29,代码行数:63,


示例6: ERR_PTR

//.........这里部分代码省略.........		    init_attr->qp_type == IB_QPT_SMI ||		    init_attr->qp_type == IB_QPT_GSI)) {			qp->r_ud_sg_list = kmalloc(sg_list_sz, GFP_KERNEL);			if (!qp->r_ud_sg_list) {				ret = ERR_PTR(-ENOMEM);				goto bail_qp;			}		} else			qp->r_ud_sg_list = NULL;		if (init_attr->srq) {			sz = 0;			qp->r_rq.size = 0;			qp->r_rq.max_sge = 0;			qp->r_rq.wq = NULL;			init_attr->cap.max_recv_wr = 0;			init_attr->cap.max_recv_sge = 0;		} else {			qp->r_rq.size = init_attr->cap.max_recv_wr + 1;			qp->r_rq.max_sge = init_attr->cap.max_recv_sge;			sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +				sizeof(struct ipath_rwqe);			qp->r_rq.wq = vmalloc_user(sizeof(struct ipath_rwq) +					      qp->r_rq.size * sz);			if (!qp->r_rq.wq) {				ret = ERR_PTR(-ENOMEM);				goto bail_sg_list;			}		}		/*		 * ib_create_qp() will initialize qp->ibqp		 * except for qp->ibqp.qp_num.		 */		spin_lock_init(&qp->s_lock);		spin_lock_init(&qp->r_rq.lock);		atomic_set(&qp->refcount, 0);		init_waitqueue_head(&qp->wait);		init_waitqueue_head(&qp->wait_dma);		tasklet_init(&qp->s_task, ipath_do_send, (unsigned long)qp);		INIT_LIST_HEAD(&qp->piowait);		INIT_LIST_HEAD(&qp->timerwait);		qp->state = IB_QPS_RESET;		qp->s_wq = swq;		qp->s_size = init_attr->cap.max_send_wr + 1;		qp->s_max_sge = init_attr->cap.max_send_sge;		if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)			qp->s_flags = IPATH_S_SIGNAL_REQ_WR;		else			qp->s_flags = 0;		dev = to_idev(ibpd->device);		err = ipath_alloc_qpn(&dev->qp_table, qp,				      init_attr->qp_type);		if (err) {			ret = ERR_PTR(err);			vfree(qp->r_rq.wq);			goto bail_sg_list;		}		qp->ip = NULL;		qp->s_tx = NULL;		ipath_reset_qp(qp, init_attr->qp_type);		break;	default:		/* Don't support raw QPs */		ret = ERR_PTR(-ENOSYS);		goto bail;
开发者ID:458941968,项目名称:mini2440-kernel-2.6.29,代码行数:67,


示例7: ERR_PTR

//.........这里部分代码省略.........			/* we lost it */			journal_release_buffer(handle, bitmap_bh);			if (++ino < EXT3_INODES_PER_GROUP(sb))				goto repeat_in_this_group;		}		/*		 * This case is possible in concurrent environment.  It is very		 * rare.  We cannot repeat the find_group_xxx() call because		 * that will simply return the same blockgroup, because the		 * group descriptor metadata has not yet been updated.		 * So we just go onto the next blockgroup.		 */		if (++group == sbi->s_groups_count)			group = 0;	}	err = -ENOSPC;	goto out;got:	ino += group * EXT3_INODES_PER_GROUP(sb) + 1;	if (ino < EXT3_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {		ext3_error (sb, "ext3_new_inode",			    "reserved inode or inode > inodes count - "			    "block_group = %d, inode=%lu", group, ino);		err = -EIO;		goto fail;	}	BUFFER_TRACE(bh2, "get_write_access");	err = ext3_journal_get_write_access(handle, bh2);	if (err) goto fail;	spin_lock(sb_bgl_lock(sbi, group));	le16_add_cpu(&gdp->bg_free_inodes_count, -1);	if (S_ISDIR(mode)) {		le16_add_cpu(&gdp->bg_used_dirs_count, 1);	}	spin_unlock(sb_bgl_lock(sbi, group));	BUFFER_TRACE(bh2, "call ext3_journal_dirty_metadata");	err = ext3_journal_dirty_metadata(handle, bh2);	if (err) goto fail;	percpu_counter_dec(&sbi->s_freeinodes_counter);	if (S_ISDIR(mode))		percpu_counter_inc(&sbi->s_dirs_counter);	if (test_opt(sb, GRPID)) {		inode->i_mode = mode;		inode->i_uid = current_fsuid();		inode->i_gid = dir->i_gid;	} else		inode_init_owner(inode, dir, mode);	inode->i_ino = ino;	/* This is the optimal IO size (for stat), not the fs block size */	inode->i_blocks = 0;	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;	memset(ei->i_data, 0, sizeof(ei->i_data));	ei->i_dir_start_lookup = 0;	ei->i_disksize = 0;	ei->i_flags =		ext3_mask_flags(mode, EXT3_I(dir)->i_flags & EXT3_FL_INHERITED);
开发者ID:rrowicki,项目名称:Chrono_Kernel-1,代码行数:67,


示例8: ext3_free_inode

/* * NOTE! When we get the inode, we're the only people * that have access to it, and as such there are no * race conditions we have to worry about. The inode * is not on the hash-lists, and it cannot be reached * through the filesystem because the directory entry * has been deleted earlier. * * HOWEVER: we must make sure that we get no aliases, * which means that we have to call "clear_inode()" * _before_ we mark the inode not in use in the inode * bitmaps. Otherwise a newly created file might use * the same inode number (not actually the same pointer * though), and then we'd have two inodes sharing the * same inode number and space on the harddisk. */void ext3_free_inode (handle_t *handle, struct inode * inode){	struct super_block * sb = inode->i_sb;	int is_directory;	unsigned long ino;	struct buffer_head *bitmap_bh = NULL;	struct buffer_head *bh2;	unsigned long block_group;	unsigned long bit;	struct ext3_group_desc * gdp;	struct ext3_super_block * es;	struct ext3_sb_info *sbi;	int fatal = 0, err;	if (atomic_read(&inode->i_count) > 1) {		printk ("ext3_free_inode: inode has count=%d/n",					atomic_read(&inode->i_count));		return;	}	if (inode->i_nlink) {		printk ("ext3_free_inode: inode has nlink=%d/n",			inode->i_nlink);		return;	}	if (!sb) {;		return;	}	sbi = EXT3_SB(sb);	ino = inode->i_ino;	ext3_debug ("freeing inode %lu/n", ino);	is_directory = S_ISDIR(inode->i_mode);	es = EXT3_SB(sb)->s_es;	if (ino < EXT3_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {		ext3_error (sb, "ext3_free_inode",			    "reserved or nonexistent inode %lu", ino);		goto error_return;	}	block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);	bit = (ino - 1) % EXT3_INODES_PER_GROUP(sb);	bitmap_bh = read_inode_bitmap(sb, block_group);	if (!bitmap_bh)		goto error_return;	BUFFER_TRACE(bitmap_bh, "get_write_access");	fatal = ext3_journal_get_write_access(handle, bitmap_bh);	if (fatal)		goto error_return;	/* Ok, now we can actually update the inode bitmaps.. */	if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, block_group),					bit, bitmap_bh->b_data))		ext3_error (sb, "ext3_free_inode",			      "bit already cleared for inode %lu", ino);	else {		gdp = ext3_get_group_desc (sb, block_group, &bh2);		BUFFER_TRACE(bh2, "get_write_access");		fatal = ext3_journal_get_write_access(handle, bh2);		if (fatal) goto error_return;		if (gdp) {			spin_lock(sb_bgl_lock(sbi, block_group));			le16_add_cpu(&gdp->bg_free_inodes_count, 1);			if (is_directory)				le16_add_cpu(&gdp->bg_used_dirs_count, -1);			spin_unlock(sb_bgl_lock(sbi, block_group));			percpu_counter_inc(&sbi->s_freeinodes_counter);			if (is_directory)				percpu_counter_dec(&sbi->s_dirs_counter);		}		BUFFER_TRACE(bh2, "call ext3_journal_dirty_metadata");		err = ext3_journal_dirty_metadata(handle, bh2);		if (!fatal) fatal = err;	}	BUFFER_TRACE(bitmap_bh, "call ext3_journal_dirty_metadata");	err = ext3_journal_dirty_metadata(handle, bitmap_bh);	if (!fatal)		fatal = err;//.........这里部分代码省略.........
开发者ID:rrowicki,项目名称:Chrono_Kernel-1,代码行数:101,


示例9: interrupt_handler

static irqreturn_t interrupt_handler(int irq, void *dev_id){	struct nozomi *dc = dev_id;	unsigned int a;	u16 read_iir;	if (!dc)		return IRQ_NONE;	spin_lock(&dc->spin_mutex);	read_iir = readw(dc->reg_iir);	/* Card removed */	if (read_iir == (u16)-1)		goto none;	/*	 * Just handle interrupt enabled in IER	 * (by masking with dc->last_ier)	 */	read_iir &= dc->last_ier;	if (read_iir == 0)		goto none;	DBG4("%s irq:0x%04X, prev:0x%04X", interrupt2str(read_iir), read_iir,		dc->last_ier);	if (read_iir & RESET) {		if (unlikely(!nozomi_read_config_table(dc))) {			dc->last_ier = 0x0;			writew(dc->last_ier, dc->reg_ier);			dev_err(&dc->pdev->dev, "Could not read status from "				"card, we should disable interface/n");		} else {			writew(RESET, dc->reg_fcr);		}		/* No more useful info if this was the reset interrupt. */		goto exit_handler;	}	if (read_iir & CTRL_UL) {		DBG1("CTRL_UL");		dc->last_ier &= ~CTRL_UL;		writew(dc->last_ier, dc->reg_ier);		if (send_flow_control(dc)) {			writew(CTRL_UL, dc->reg_fcr);			dc->last_ier = dc->last_ier | CTRL_UL;			writew(dc->last_ier, dc->reg_ier);		}	}	if (read_iir & CTRL_DL) {		receive_flow_control(dc);		writew(CTRL_DL, dc->reg_fcr);	}	if (read_iir & MDM_DL) {		if (!handle_data_dl(dc, PORT_MDM,				&(dc->port[PORT_MDM].toggle_dl), read_iir,				MDM_DL1, MDM_DL2)) {			dev_err(&dc->pdev->dev, "MDM_DL out of sync!/n");			goto exit_handler;		}	}	if (read_iir & MDM_UL) {		if (!handle_data_ul(dc, PORT_MDM, read_iir)) {			dev_err(&dc->pdev->dev, "MDM_UL out of sync!/n");			goto exit_handler;		}	}	if (read_iir & DIAG_DL) {		if (!handle_data_dl(dc, PORT_DIAG,				&(dc->port[PORT_DIAG].toggle_dl), read_iir,				DIAG_DL1, DIAG_DL2)) {			dev_err(&dc->pdev->dev, "DIAG_DL out of sync!/n");			goto exit_handler;		}	}	if (read_iir & DIAG_UL) {		dc->last_ier &= ~DIAG_UL;		writew(dc->last_ier, dc->reg_ier);		if (send_data(PORT_DIAG, dc)) {			writew(DIAG_UL, dc->reg_fcr);			dc->last_ier = dc->last_ier | DIAG_UL;			writew(dc->last_ier, dc->reg_ier);		}	}	if (read_iir & APP1_DL) {		if (receive_data(PORT_APP1, dc))			writew(APP1_DL, dc->reg_fcr);	}	if (read_iir & APP1_UL) {		dc->last_ier &= ~APP1_UL;		writew(dc->last_ier, dc->reg_ier);		if (send_data(PORT_APP1, dc)) {			writew(APP1_UL, dc->reg_fcr);			dc->last_ier = dc->last_ier | APP1_UL;			writew(dc->last_ier, dc->reg_ier);		}	}	if (read_iir & APP2_DL) {		if (receive_data(PORT_APP2, dc))//.........这里部分代码省略.........
开发者ID:513855417,项目名称:linux,代码行数:101,


示例10: fd_link_ioctl

static longfd_link_ioctl (struct file *f, unsigned int ioctl, unsigned long arg){	void __user *argp = (void __user *) arg;	struct task_struct *task_target = NULL;	struct file *file;	struct files_struct *files;	struct fdtable *fdt;	struct fd_copy fd_copy;	switch (ioctl)	{		case FD_COPY:			if (copy_from_user (&fd_copy, argp, sizeof (struct fd_copy)))				return -EFAULT;			/*			 * Find the task struct for the target pid			 */			task_target =				pid_task (find_vpid (fd_copy.target_pid), PIDTYPE_PID);			if (task_target == NULL)			{				printk (KERN_DEBUG "Failed to get mem ctx for target pid/n");				return -EFAULT;			}			files = get_files_struct (current);			if (files == NULL)			{				printk (KERN_DEBUG "Failed to get files struct/n");				return -EFAULT;			}			rcu_read_lock ();			file = fcheck_files (files, fd_copy.source_fd);			if (file)			{				if (file->f_mode & FMODE_PATH						|| !atomic_long_inc_not_zero (&file->f_count))					file = NULL;			}			rcu_read_unlock ();			put_files_struct (files);			if (file == NULL)			{				printk (KERN_DEBUG "Failed to get file from source pid/n");				return 0;			}			/*			 * Release the existing fd in the source process			 */			spin_lock (&files->file_lock);			filp_close (file, files);			fdt = files_fdtable (files);			fdt->fd[fd_copy.source_fd] = NULL;			spin_unlock (&files->file_lock);			/*			 * Find the file struct associated with the target fd.			 */			files = get_files_struct (task_target);			if (files == NULL)			{				printk (KERN_DEBUG "Failed to get files struct/n");				return -EFAULT;			}			rcu_read_lock ();			file = fcheck_files (files, fd_copy.target_fd);			if (file)			{				if (file->f_mode & FMODE_PATH						|| !atomic_long_inc_not_zero (&file->f_count))					file = NULL;			}			rcu_read_unlock ();			put_files_struct (files);			if (file == NULL)			{				printk (KERN_DEBUG "Failed to get file from target pid/n");				return 0;			}			/*			 * Install the file struct from the target process into the			 * file desciptor of the source process,			 */			fd_install (fd_copy.source_fd, file);			return 0;		default:			return -ENOIOCTLCMD;//.........这里部分代码省略.........
开发者ID:Grace-Liu,项目名称:dpdk-ovs,代码行数:101,


示例11: mdss_dsi_isr

irqreturn_t mdss_dsi_isr(int irq, void *ptr){	u32 isr;	struct mdss_dsi_ctrl_pdata *ctrl =			(struct mdss_dsi_ctrl_pdata *)ptr;	if (!ctrl->ctrl_base)		pr_err("%s:%d DSI base adr no Initialized",				       __func__, __LINE__);	isr = MIPI_INP(ctrl->ctrl_base + 0x0110);/* DSI_INTR_CTRL */	MIPI_OUTP(ctrl->ctrl_base + 0x0110, isr);	if (ctrl->shared_pdata.broadcast_enable)		if ((ctrl->panel_data.panel_info.pdest == DISPLAY_2)		    && (left_ctrl_pdata != NULL)) {			u32 isr0;			isr0 = MIPI_INP(left_ctrl_pdata->ctrl_base						+ 0x0110);/* DSI_INTR_CTRL */			MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x0110, isr0);		}	pr_debug("%s: ndx=%d isr=%x/n", __func__, ctrl->ndx, isr);	if (isr & DSI_INTR_ERROR) {#ifdef F_WA_WATCHDOG_DURING_BOOTUP		if(ctrl->octa_blck_set)#endif					pr_err("%s: ndx=%d isr=%x/n", __func__, ctrl->ndx, isr);		mdss_dsi_error(ctrl);	}	if (isr & DSI_INTR_VIDEO_DONE) {		spin_lock(&ctrl->mdp_lock);		mdss_dsi_disable_irq_nosync(ctrl, DSI_VIDEO_TERM);		complete(&ctrl->video_comp);		spin_unlock(&ctrl->mdp_lock);	}	if (isr & DSI_INTR_CMD_DMA_DONE) {		spin_lock(&ctrl->mdp_lock);		mdss_dsi_disable_irq_nosync(ctrl, DSI_CMD_TERM);		complete(&ctrl->dma_comp);		spin_unlock(&ctrl->mdp_lock);	}	if (isr & DSI_INTR_CMD_MDP_DONE) {		spin_lock(&ctrl->mdp_lock);		ctrl->mdp_busy = false;		mdss_dsi_disable_irq_nosync(ctrl, DSI_MDP_TERM);		complete(&ctrl->mdp_comp);		spin_unlock(&ctrl->mdp_lock);	}	if (isr & DSI_INTR_BTA_DONE) {		spin_lock(&ctrl->mdp_lock);		mdss_dsi_disable_irq_nosync(ctrl, DSI_BTA_TERM);		complete(&ctrl->bta_comp);		spin_unlock(&ctrl->mdp_lock);	}	return IRQ_HANDLED;}
开发者ID:thanhphat11,项目名称:android_kernel_pantech_910,代码行数:64,


示例12: sid_to_id

static intsid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,		struct cifs_fattr *fattr, uint sidtype){	int rc;	unsigned long cid;	struct key *idkey;	const struct cred *saved_cred;	struct cifs_sid_id *psidid, *npsidid;	struct rb_root *cidtree;	spinlock_t *cidlock;	if (sidtype == SIDOWNER) {		cid = cifs_sb->mnt_uid; /* default uid, in case upcall fails */		cidlock = &siduidlock;		cidtree = &uidtree;	} else if (sidtype == SIDGROUP) {		cid = cifs_sb->mnt_gid; /* default gid, in case upcall fails */		cidlock = &sidgidlock;		cidtree = &gidtree;	} else		return -ENOENT;	spin_lock(cidlock);	psidid = id_rb_search(cidtree, psid);	if (!psidid) { /* node does not exist, allocate one & attempt adding */		spin_unlock(cidlock);		npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL);		if (!npsidid)			return -ENOMEM;		npsidid->sidstr = kmalloc(SIDLEN, GFP_KERNEL);		if (!npsidid->sidstr) {			kfree(npsidid);			return -ENOMEM;		}		spin_lock(cidlock);		psidid = id_rb_search(cidtree, psid);		if (psidid) { /* node happened to get inserted meanwhile */			++psidid->refcount;			spin_unlock(cidlock);			kfree(npsidid->sidstr);			kfree(npsidid);		} else {			psidid = npsidid;			id_rb_insert(cidtree, psid, &psidid,					sidtype == SIDOWNER ? "os:" : "gs:");			++psidid->refcount;			spin_unlock(cidlock);		}	} else {		++psidid->refcount;		spin_unlock(cidlock);	}	/*	 * If we are here, it is safe to access psidid and its fields	 * since a reference was taken earlier while holding the spinlock.	 * A reference on the node is put without holding the spinlock	 * and it is OK to do so in this case, shrinker will not erase	 * this node until all references are put and we do not access	 * any fields of the node after a reference is put .	 */	if (test_bit(SID_ID_MAPPED, &psidid->state)) {		cid = psidid->id;		psidid->time = jiffies; /* update ts for accessing */		goto sid_to_id_out;	}	if (time_after(psidid->time + SID_MAP_RETRY, jiffies))		goto sid_to_id_out;	if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) {		saved_cred = override_creds(root_cred);		idkey = request_key(&cifs_idmap_key_type, psidid->sidstr, "");		if (IS_ERR(idkey))			cFYI(1, "%s: Can't map SID to an id", __func__);		else {			cid = *(unsigned long *)idkey->payload.value;			psidid->id = cid;			set_bit(SID_ID_MAPPED, &psidid->state);			key_put(idkey);			kfree(psidid->sidstr);		}		revert_creds(saved_cred);		psidid->time = jiffies; /* update ts for accessing */		clear_bit(SID_ID_PENDING, &psidid->state);		wake_up_bit(&psidid->state, SID_ID_PENDING);	} else {		rc = wait_on_bit(&psidid->state, SID_ID_PENDING,				sidid_pending_wait, TASK_INTERRUPTIBLE);		if (rc) {			cFYI(1, "%s: sidid_pending_wait interrupted %d",					__func__, rc);			--psidid->refcount; /* decremented without spinlock */			return rc;		}		if (test_bit(SID_ID_MAPPED, &psidid->state))//.........这里部分代码省略.........
开发者ID:rrowicki,项目名称:Chrono_Kernel-1,代码行数:101,


示例13: id_to_sid

static intid_to_sid(unsigned long cid, uint sidtype, struct cifs_sid *ssid){	int rc = 0;	struct key *sidkey;	const struct cred *saved_cred;	struct cifs_sid *lsid;	struct cifs_sid_id *psidid, *npsidid;	struct rb_root *cidtree;	spinlock_t *cidlock;	if (sidtype == SIDOWNER) {		cidlock = &siduidlock;		cidtree = &uidtree;	} else if (sidtype == SIDGROUP) {		cidlock = &sidgidlock;		cidtree = &gidtree;	} else		return -EINVAL;	spin_lock(cidlock);	psidid = sid_rb_search(cidtree, cid);	if (!psidid) { /* node does not exist, allocate one & attempt adding */		spin_unlock(cidlock);		npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL);		if (!npsidid)			return -ENOMEM;		npsidid->sidstr = kmalloc(SIDLEN, GFP_KERNEL);		if (!npsidid->sidstr) {			kfree(npsidid);			return -ENOMEM;		}		spin_lock(cidlock);		psidid = sid_rb_search(cidtree, cid);		if (psidid) { /* node happened to get inserted meanwhile */			++psidid->refcount;			spin_unlock(cidlock);			kfree(npsidid->sidstr);			kfree(npsidid);		} else {			psidid = npsidid;			sid_rb_insert(cidtree, cid, &psidid,					sidtype == SIDOWNER ? "oi:" : "gi:");			++psidid->refcount;			spin_unlock(cidlock);		}	} else {		++psidid->refcount;		spin_unlock(cidlock);	}	/*	 * If we are here, it is safe to access psidid and its fields	 * since a reference was taken earlier while holding the spinlock.	 * A reference on the node is put without holding the spinlock	 * and it is OK to do so in this case, shrinker will not erase	 * this node until all references are put and we do not access	 * any fields of the node after a reference is put .	 */	if (test_bit(SID_ID_MAPPED, &psidid->state)) {		memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid));		psidid->time = jiffies; /* update ts for accessing */		goto id_sid_out;	}	if (time_after(psidid->time + SID_MAP_RETRY, jiffies)) {		rc = -EINVAL;		goto id_sid_out;	}	if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) {		saved_cred = override_creds(root_cred);		sidkey = request_key(&cifs_idmap_key_type, psidid->sidstr, "");		if (IS_ERR(sidkey)) {			rc = -EINVAL;			cFYI(1, "%s: Can't map and id to a SID", __func__);		} else {			lsid = (struct cifs_sid *)sidkey->payload.data;			memcpy(&psidid->sid, lsid,				sidkey->datalen < sizeof(struct cifs_sid) ?				sidkey->datalen : sizeof(struct cifs_sid));			memcpy(ssid, &psidid->sid,				sidkey->datalen < sizeof(struct cifs_sid) ?				sidkey->datalen : sizeof(struct cifs_sid));			set_bit(SID_ID_MAPPED, &psidid->state);			key_put(sidkey);			kfree(psidid->sidstr);		}		psidid->time = jiffies; /* update ts for accessing */		revert_creds(saved_cred);		clear_bit(SID_ID_PENDING, &psidid->state);		wake_up_bit(&psidid->state, SID_ID_PENDING);	} else {		rc = wait_on_bit(&psidid->state, SID_ID_PENDING,				sidid_pending_wait, TASK_INTERRUPTIBLE);		if (rc) {			cFYI(1, "%s: sidid_pending_wait interrupted %d",//.........这里部分代码省略.........
开发者ID:rrowicki,项目名称:Chrono_Kernel-1,代码行数:101,


示例14: journal_submit_data_buffers

/* *  Submit all the data buffers to disk */static void journal_submit_data_buffers(journal_t *journal,				transaction_t *commit_transaction){	struct journal_head *jh;	struct buffer_head *bh;	int locked;	int bufs = 0;	struct buffer_head **wbuf = journal->j_wbuf;	/*	 * Whenever we unlock the journal and sleep, things can get added	 * onto ->t_sync_datalist, so we have to keep looping back to	 * write_out_data until we *know* that the list is empty.	 *	 * Cleanup any flushed data buffers from the data list.  Even in	 * abort mode, we want to flush this out as soon as possible.	 */write_out_data:	cond_resched();	spin_lock(&journal->j_list_lock);	while (commit_transaction->t_sync_datalist) {		jh = commit_transaction->t_sync_datalist;		bh = jh2bh(jh);		locked = 0;		/* Get reference just to make sure buffer does not disappear		 * when we are forced to drop various locks */		get_bh(bh);		/* If the buffer is dirty, we need to submit IO and hence		 * we need the buffer lock. We try to lock the buffer without		 * blocking. If we fail, we need to drop j_list_lock and do		 * blocking lock_buffer().		 */		if (buffer_dirty(bh)) {			if (test_set_buffer_locked(bh)) {				BUFFER_TRACE(bh, "needs blocking lock");				spin_unlock(&journal->j_list_lock);				/* Write out all data to prevent deadlocks */				journal_do_submit_data(wbuf, bufs);				bufs = 0;				lock_buffer(bh);				spin_lock(&journal->j_list_lock);			}			locked = 1;		}		/* We have to get bh_state lock. Again out of order, sigh. */		if (!inverted_lock(journal, bh)) {			jbd_lock_bh_state(bh);			spin_lock(&journal->j_list_lock);		}		/* Someone already cleaned up the buffer? */		if (!buffer_jbd(bh)			|| jh->b_transaction != commit_transaction			|| jh->b_jlist != BJ_SyncData) {			jbd_unlock_bh_state(bh);			if (locked)				unlock_buffer(bh);			BUFFER_TRACE(bh, "already cleaned up");			put_bh(bh);			continue;		}		if (locked && test_clear_buffer_dirty(bh)) {			BUFFER_TRACE(bh, "needs writeout, adding to array");			wbuf[bufs++] = bh;			__jbd2_journal_file_buffer(jh, commit_transaction,						BJ_Locked);			jbd_unlock_bh_state(bh);			if (bufs == journal->j_wbufsize) {				spin_unlock(&journal->j_list_lock);				journal_do_submit_data(wbuf, bufs);				bufs = 0;				goto write_out_data;			}		} else if (!locked && buffer_locked(bh)) {			__jbd2_journal_file_buffer(jh, commit_transaction,						BJ_Locked);			jbd_unlock_bh_state(bh);			put_bh(bh);		} else {			BUFFER_TRACE(bh, "writeout complete: unfile");			__jbd2_journal_unfile_buffer(jh);			jbd_unlock_bh_state(bh);			if (locked)				unlock_buffer(bh);			jbd2_journal_remove_journal_head(bh);			/* Once for our safety reference, once for			 * jbd2_journal_remove_journal_head() */			put_bh(bh);			put_bh(bh);		}		if (lock_need_resched(&journal->j_list_lock)) {			spin_unlock(&journal->j_list_lock);			goto write_out_data;		}	}//.........这里部分代码省略.........
开发者ID:3sOx,项目名称:asuswrt-merlin,代码行数:101,


示例15: xuartps_isr

/** * xuartps_isr - Interrupt handler * @irq: Irq number * @dev_id: Id of the port * * Returns IRQHANDLED **/static irqreturn_t xuartps_isr(int irq, void *dev_id){	struct uart_port *port = (struct uart_port *)dev_id;	unsigned long flags;	unsigned int isrstatus, numbytes;	unsigned int data;	char status = TTY_NORMAL;	spin_lock_irqsave(&port->lock, flags);	/* Read the interrupt status register to determine which	 * interrupt(s) is/are active.	 */	isrstatus = xuartps_readl(XUARTPS_ISR_OFFSET);	/* drop byte with parity error if IGNPAR specified */	if (isrstatus & port->ignore_status_mask & XUARTPS_IXR_PARITY)		isrstatus &= ~(XUARTPS_IXR_RXTRIG | XUARTPS_IXR_TOUT);	isrstatus &= port->read_status_mask;	isrstatus &= ~port->ignore_status_mask;	if ((isrstatus & XUARTPS_IXR_TOUT) ||		(isrstatus & XUARTPS_IXR_RXTRIG)) {		/* Receive Timeout Interrupt */		while ((xuartps_readl(XUARTPS_SR_OFFSET) &			XUARTPS_SR_RXEMPTY) != XUARTPS_SR_RXEMPTY) {			data = xuartps_readl(XUARTPS_FIFO_OFFSET);			port->icount.rx++;			if (isrstatus & XUARTPS_IXR_PARITY) {				port->icount.parity++;				status = TTY_PARITY;			} else if (isrstatus & XUARTPS_IXR_FRAMING) {				port->icount.frame++;				status = TTY_FRAME;			} else if (isrstatus & XUARTPS_IXR_OVERRUN)				port->icount.overrun++;			uart_insert_char(port, isrstatus, XUARTPS_IXR_OVERRUN,					data, status);		}		spin_unlock(&port->lock);		tty_flip_buffer_push(&port->state->port);		spin_lock(&port->lock);	}	/* Dispatch an appropriate handler */	if ((isrstatus & XUARTPS_IXR_TXEMPTY) == XUARTPS_IXR_TXEMPTY) {		if (uart_circ_empty(&port->state->xmit)) {			xuartps_writel(XUARTPS_IXR_TXEMPTY,						XUARTPS_IDR_OFFSET);		} else {			numbytes = port->fifosize;			/* Break if no more data available in the UART buffer */			while (numbytes--) {				if (uart_circ_empty(&port->state->xmit))					break;				/* Get the data from the UART circular buffer				 * and write it to the xuartps's TX_FIFO				 * register.				 */				xuartps_writel(					port->state->xmit.buf[port->state->xmit.					tail], XUARTPS_FIFO_OFFSET);				port->icount.tx++;				/* Adjust the tail of the UART buffer and wrap				 * the buffer if it reaches limit.				 */				port->state->xmit.tail =					(port->state->xmit.tail + 1) & /						(UART_XMIT_SIZE - 1);			}			if (uart_circ_chars_pending(					&port->state->xmit) < WAKEUP_CHARS)				uart_write_wakeup(port);		}	}	xuartps_writel(isrstatus, XUARTPS_ISR_OFFSET);	/* be sure to release the lock and tty before leaving */	spin_unlock_irqrestore(&port->lock, flags);	return IRQ_HANDLED;}
开发者ID:CoerWatt,项目名称:linux,代码行数:96,


示例16: jbd2_journal_commit_transaction

/* * jbd2_journal_commit_transaction * * The primary function for committing a transaction to the log.  This * function is called by the journal thread to begin a complete commit. */void jbd2_journal_commit_transaction(journal_t *journal){	transaction_t *commit_transaction;	struct journal_head *jh, *new_jh, *descriptor;	struct buffer_head **wbuf = journal->j_wbuf;	int bufs;	int flags;	int err;	unsigned long long blocknr;	char *tagp = NULL;	journal_header_t *header;	journal_block_tag_t *tag = NULL;	int space_left = 0;	int first_tag = 0;	int tag_flag;	int i;	int tag_bytes = journal_tag_bytes(journal);	/*	 * First job: lock down the current transaction and wait for	 * all outstanding updates to complete.	 */#ifdef COMMIT_STATS	spin_lock(&journal->j_list_lock);	summarise_journal_usage(journal);	spin_unlock(&journal->j_list_lock);#endif	/* Do we need to erase the effects of a prior jbd2_journal_flush? */	if (journal->j_flags & JBD2_FLUSHED) {		jbd_debug(3, "super block updated/n");		jbd2_journal_update_superblock(journal, 1);	} else {		jbd_debug(3, "superblock not updated/n");	}	J_ASSERT(journal->j_running_transaction != NULL);	J_ASSERT(journal->j_committing_transaction == NULL);	commit_transaction = journal->j_running_transaction;	J_ASSERT(commit_transaction->t_state == T_RUNNING);	jbd_debug(1, "JBD: starting commit of transaction %d/n",			commit_transaction->t_tid);	spin_lock(&journal->j_state_lock);	commit_transaction->t_state = T_LOCKED;	spin_lock(&commit_transaction->t_handle_lock);	while (commit_transaction->t_updates) {		DEFINE_WAIT(wait);		prepare_to_wait(&journal->j_wait_updates, &wait,					TASK_UNINTERRUPTIBLE);		if (commit_transaction->t_updates) {			spin_unlock(&commit_transaction->t_handle_lock);			spin_unlock(&journal->j_state_lock);			schedule();			spin_lock(&journal->j_state_lock);			spin_lock(&commit_transaction->t_handle_lock);		}		finish_wait(&journal->j_wait_updates, &wait);	}	spin_unlock(&commit_transaction->t_handle_lock);	J_ASSERT (commit_transaction->t_outstanding_credits <=			journal->j_max_transaction_buffers);	/*	 * First thing we are allowed to do is to discard any remaining	 * BJ_Reserved buffers.  Note, it is _not_ permissible to assume	 * that there are no such buffers: if a large filesystem	 * operation like a truncate needs to split itself over multiple	 * transactions, then it may try to do a jbd2_journal_restart() while	 * there are still BJ_Reserved buffers outstanding.  These must	 * be released cleanly from the current transaction.	 *	 * In this case, the filesystem must still reserve write access	 * again before modifying the buffer in the new transaction, but	 * we do not require it to remember exactly which old buffers it	 * has reserved.  This is consistent with the existing behaviour	 * that multiple jbd2_journal_get_write_access() calls to the same	 * buffer are perfectly permissable.	 */	while (commit_transaction->t_reserved_list) {		jh = commit_transaction->t_reserved_list;		JBUFFER_TRACE(jh, "reserved, unused: refile");		/*		 * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may		 * leave undo-committed data.		 */		if (jh->b_committed_data) {			struct buffer_head *bh = jh2bh(jh);//.........这里部分代码省略.........
开发者ID:3sOx,项目名称:asuswrt-merlin,代码行数:101,


示例17: xpmem_open

/* * User open of the XPMEM driver. Called whenever /dev/xpmem is opened. * Create a struct xpmem_thread_group structure for the specified thread group. * And add the structure to the tg hash table. */static intxpmem_open(struct inode *inode, struct file *file){	struct xpmem_thread_group *tg;	int index;	struct proc_dir_entry *unpin_entry;	char tgid_string[XPMEM_TGID_STRING_LEN];	/* if this has already been done, just return silently */	tg = xpmem_tg_ref_by_tgid(current->tgid);	if (!IS_ERR(tg)) {		xpmem_tg_deref(tg);		return 0;	}	/* create tg */	tg = kzalloc(sizeof(struct xpmem_thread_group), GFP_KERNEL);	if (tg == NULL) {		return -ENOMEM;	}	tg->lock = SPIN_LOCK_UNLOCKED;	tg->tgid = current->tgid;	tg->uid = current->cred->uid;	tg->gid = current->cred->gid;	atomic_set(&tg->uniq_segid, 0);	atomic_set(&tg->uniq_apid, 0);	atomic_set(&tg->n_pinned, 0);	tg->addr_limit = TASK_SIZE;	tg->seg_list_lock = RW_LOCK_UNLOCKED;	INIT_LIST_HEAD(&tg->seg_list);	INIT_LIST_HEAD(&tg->tg_hashlist);	atomic_set(&tg->n_recall_PFNs, 0);	mutex_init(&tg->recall_PFNs_mutex);	init_waitqueue_head(&tg->block_recall_PFNs_wq);	init_waitqueue_head(&tg->allow_recall_PFNs_wq);	tg->mmu_initialized = 0;	tg->mmu_unregister_called = 0;	tg->mm = current->mm;	/* Register MMU notifier callbacks */	if (xpmem_mmu_notifier_init(tg) != 0) {		kfree(tg);		return -EFAULT;	}		/* create and initialize struct xpmem_access_permit hashtable */	tg->ap_hashtable = kzalloc(sizeof(struct xpmem_hashlist) *				     XPMEM_AP_HASHTABLE_SIZE, GFP_KERNEL);	if (tg->ap_hashtable == NULL) {		xpmem_mmu_notifier_unlink(tg);		kfree(tg);		return -ENOMEM;	}	for (index = 0; index < XPMEM_AP_HASHTABLE_SIZE; index++) {		tg->ap_hashtable[index].lock = RW_LOCK_UNLOCKED;		INIT_LIST_HEAD(&tg->ap_hashtable[index].list);	}	snprintf(tgid_string, XPMEM_TGID_STRING_LEN, "%d", current->tgid);	spin_lock(&xpmem_unpin_procfs_lock);	unpin_entry = create_proc_entry(tgid_string, 0644,					xpmem_unpin_procfs_dir);	spin_unlock(&xpmem_unpin_procfs_lock);	if (unpin_entry != NULL) {		unpin_entry->data = (void *)(unsigned long)current->tgid;		unpin_entry->write_proc = xpmem_unpin_procfs_write;		unpin_entry->read_proc = xpmem_unpin_procfs_read;		//unpin_entry->owner = THIS_MODULE;		unpin_entry->uid = current->cred->uid;		unpin_entry->gid = current->cred->gid;	}	xpmem_tg_not_destroyable(tg);	/* add tg to its hash list */	index = xpmem_tg_hashtable_index(tg->tgid);	write_lock(&xpmem_my_part->tg_hashtable[index].lock);	list_add_tail(&tg->tg_hashlist,		      &xpmem_my_part->tg_hashtable[index].list);	write_unlock(&xpmem_my_part->tg_hashtable[index].lock);	/*	 * Increment 'usage' and 'mm->mm_users' for the current task's thread	 * group leader. This ensures that both its task_struct and mm_struct	 * will still be around when our thread group exits. (The Linux kernel	 * normally tears down the mm_struct prior to calling a module's	 * 'flush' function.) Since all XPMEM thread groups must go through	 * this path, this extra reference to mm_users also allows us to	 * directly inc/dec mm_users in xpmem_ensure_valid_PFNs() and avoid	 * mmput() which has a scaling issue with the mmlist_lock.	 */	get_task_struct(current->group_leader);	tg->group_leader = current->group_leader;	BUG_ON(current->mm != current->group_leader->mm);//.........这里部分代码省略.........
开发者ID:hyoklee,项目名称:xpmem,代码行数:101,


示例18: ipath_modify_qp

/** * ipath_modify_qp - modify the attributes of a queue pair * @ibqp: the queue pair who's attributes we're modifying * @attr: the new attributes * @attr_mask: the mask of attributes to modify * @udata: user data for ipathverbs.so * * Returns 0 on success, otherwise returns an errno. */int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,		    int attr_mask, struct ib_udata *udata){	struct ipath_ibdev *dev = to_idev(ibqp->device);	struct ipath_qp *qp = to_iqp(ibqp);	enum ib_qp_state cur_state, new_state;	int lastwqe = 0;	int ret;	spin_lock_irq(&qp->s_lock);	cur_state = attr_mask & IB_QP_CUR_STATE ?		attr->cur_qp_state : qp->state;	new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;	if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,				attr_mask))		goto inval;	if (attr_mask & IB_QP_AV) {		if (attr->ah_attr.dlid == 0 ||		    attr->ah_attr.dlid >= IPATH_MULTICAST_LID_BASE)			goto inval;		if ((attr->ah_attr.ah_flags & IB_AH_GRH) &&		    (attr->ah_attr.grh.sgid_index > 1))			goto inval;	}	if (attr_mask & IB_QP_PKEY_INDEX)		if (attr->pkey_index >= ipath_get_npkeys(dev->dd))			goto inval;	if (attr_mask & IB_QP_MIN_RNR_TIMER)		if (attr->min_rnr_timer > 31)			goto inval;	if (attr_mask & IB_QP_PORT)		if (attr->port_num == 0 ||		    attr->port_num > ibqp->device->phys_port_cnt)			goto inval;	/*	 * don't allow invalid Path MTU values or greater than 2048	 * unless we are configured for a 4KB MTU	 */	if ((attr_mask & IB_QP_PATH_MTU) &&		(ib_mtu_enum_to_int(attr->path_mtu) == -1 ||		(attr->path_mtu > IB_MTU_2048 && !ipath_mtu4096)))		goto inval;	if (attr_mask & IB_QP_PATH_MIG_STATE)		if (attr->path_mig_state != IB_MIG_MIGRATED &&		    attr->path_mig_state != IB_MIG_REARM)			goto inval;	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)		if (attr->max_dest_rd_atomic > IPATH_MAX_RDMA_ATOMIC)			goto inval;	switch (new_state) {	case IB_QPS_RESET:		if (qp->state != IB_QPS_RESET) {			qp->state = IB_QPS_RESET;			spin_lock(&dev->pending_lock);			if (!list_empty(&qp->timerwait))				list_del_init(&qp->timerwait);			if (!list_empty(&qp->piowait))				list_del_init(&qp->piowait);			spin_unlock(&dev->pending_lock);			qp->s_flags &= ~IPATH_S_ANY_WAIT;			spin_unlock_irq(&qp->s_lock);			/* Stop the sending tasklet */			tasklet_kill(&qp->s_task);			wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));			spin_lock_irq(&qp->s_lock);		}		ipath_reset_qp(qp, ibqp->qp_type);		break;	case IB_QPS_SQD:		qp->s_draining = qp->s_last != qp->s_cur;		qp->state = new_state;		break;	case IB_QPS_SQE:		if (qp->ibqp.qp_type == IB_QPT_RC)			goto inval;		qp->state = new_state;		break;//.........这里部分代码省略.........
开发者ID:458941968,项目名称:mini2440-kernel-2.6.29,代码行数:101,


示例19: mdss_dsi_isr

irqreturn_t mdss_dsi_isr(int irq, void *ptr){	u32 isr;	u32 isr0 = 0;	struct mdss_dsi_ctrl_pdata *ctrl =			(struct mdss_dsi_ctrl_pdata *)ptr;	if (!ctrl) {		pr_err("%s unable to access ctrl/n", __func__);		return IRQ_HANDLED;	}	if (!ctrl->ctrl_base) {		pr_err("%s:%d DSI base adr no Initialized",				       __func__, __LINE__);		return IRQ_HANDLED;	}	isr = MIPI_INP(ctrl->ctrl_base + 0x0110);/* DSI_INTR_CTRL */	MIPI_OUTP(ctrl->ctrl_base + 0x0110, isr);	if (ctrl->shared_pdata.broadcast_enable)		if ((ctrl->panel_data.panel_info.pdest == DISPLAY_2)		    && (left_ctrl_pdata != NULL)) {			isr0 = MIPI_INP(left_ctrl_pdata->ctrl_base						+ 0x0110);/* DSI_INTR_CTRL */			MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x0110, isr0 & (~DSI_INTR_CMD_MDP_DONE));		}	pr_debug("%s: isr=%x, isr0=%x", __func__, isr, isr0);	if (isr & DSI_INTR_ERROR) {#if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)	xlog(__func__, ctrl->ndx, ctrl->mdp_busy, isr, 0, 0, 0x97);#endif		pr_err("%s: isr[%d]=%x %x", __func__, ctrl->ndx, isr, (int)DSI_INTR_ERROR);		mdss_dsi_error(ctrl);	}	if (isr & DSI_INTR_VIDEO_DONE) {		spin_lock(&ctrl->mdp_lock);		mdss_dsi_disable_irq_nosync(ctrl, DSI_VIDEO_TERM);		complete(&ctrl->video_comp);		spin_unlock(&ctrl->mdp_lock);	}	if (isr & DSI_INTR_CMD_DMA_DONE) {		spin_lock(&ctrl->mdp_lock);#if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)	xlog(__func__,ctrl->ndx, ctrl->mdp_busy, isr, 0, 0, 0x98);#endif		mdss_dsi_disable_irq_nosync(ctrl, DSI_CMD_TERM);		complete(&ctrl->dma_comp);		spin_unlock(&ctrl->mdp_lock);	}	if (isr & DSI_INTR_CMD_MDP_DONE) {		spin_lock(&ctrl->mdp_lock);#if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)	xlog(__func__, ctrl->ndx, ctrl->mdp_busy, isr, 0, 0, 0x99);#endif		ctrl->mdp_busy = false;		mdss_dsi_disable_irq_nosync(ctrl, DSI_MDP_TERM);		complete(&ctrl->mdp_comp);		spin_unlock(&ctrl->mdp_lock);	}	if (isr & DSI_INTR_BTA_DONE) {		spin_lock(&ctrl->mdp_lock);		mdss_dsi_disable_irq_nosync(ctrl, DSI_BTA_TERM);		complete(&ctrl->bta_comp);		spin_unlock(&ctrl->mdp_lock);	}	return IRQ_HANDLED;}
开发者ID:caelin99,项目名称:klte_tree,代码行数:76,


示例20: smb_thread

int smb_thread(void *data){	u32 length = 0, io_index, filling_index;	struct scfs_sb_info *sbi = (struct scfs_sb_info *)data;	struct page *page;	struct page *temp_page;	struct page *page_buffer[3] = {NULL, NULL, NULL};	struct file *file;	struct file *temp_file = NULL;	struct scfs_inode_info *sii;	int cluster_number = -1;	int page_buffer_count = 0;	int i;	int prev_cbi = 0;	set_freezable();	/* handle any queued-up read requests, or else go back to sleep */	while (!kthread_should_stop()) {		set_current_state(TASK_INTERRUPTIBLE);		spin_lock(&sbi->spinlock_smb);		/* calculate number of pages of page buffer */		io_index = sbi->page_buffer_next_io_index_smb;		filling_index = sbi->page_buffer_next_filling_index_smb;		if (filling_index == MAX_PAGE_BUFFER_SIZE_SMB) {			length = MAX_PAGE_BUFFER_SIZE_SMB;			sbi->page_buffer_next_filling_index_smb =				sbi->page_buffer_next_io_index_smb;		} else if (filling_index > io_index)			length = filling_index - io_index;		else if (filling_index < io_index)			length = (MAX_PAGE_BUFFER_SIZE_SMB - io_index) + filling_index;		else if (filling_index == io_index) 			length = 0;		page_buffer_count = 0;		/* the requested page, as well as subsequent pages in the same cluster,		 * will be serviced, in two separate readpage calls		 */		if (length > 0) {			__set_current_state(TASK_RUNNING);			page = sbi->page_buffer_smb[sbi->page_buffer_next_io_index_smb];			file = sbi->file_buffer_smb[sbi->page_buffer_next_io_index_smb];			sbi->page_buffer_next_io_index_smb++;			if (sbi->page_buffer_next_io_index_smb >= MAX_PAGE_BUFFER_SIZE_SMB)				sbi->page_buffer_next_io_index_smb = 0;			length--;			sii = SCFS_I(page->mapping->host);			cluster_number = PAGE_TO_CLUSTER_INDEX(page, sii);			while (length-- > 0) {				temp_page = sbi->page_buffer_smb[sbi->page_buffer_next_io_index_smb];				temp_file = sbi->file_buffer_smb[sbi->page_buffer_next_io_index_smb];				if ((temp_file == file) &&					(cluster_number == PAGE_TO_CLUSTER_INDEX(temp_page, sii))) {					page_buffer[page_buffer_count++] = temp_page;					sbi->page_buffer_next_io_index_smb++;					if (sbi->page_buffer_next_io_index_smb >=						MAX_PAGE_BUFFER_SIZE_SMB)						sbi->page_buffer_next_io_index_smb = 0;				} else					break;			}			spin_unlock(&sbi->spinlock_smb);			/* read first page */			prev_cbi = _scfs_readpage(file, page, -1);			fput(SCFS_F(file)->lower_file);			fput(file);			page_cache_release(page);			/* read related pages with cluster of first page*/			for (i = 0; i < page_buffer_count; i++) {				prev_cbi = _scfs_readpage(file, page_buffer[i], prev_cbi - 1);				fput(SCFS_F(file)->lower_file);				fput(file);				page_cache_release(page_buffer[i]);			}		} else {			//sbi->smb_task_status[xx] = 0;			spin_unlock(&sbi->spinlock_smb);			schedule();			//sbi->smb_task_status[xx] = 1;		}	}	return 0;}
开发者ID:civato,项目名称:sm-n9005-Note5port-kernel,代码行数:95,


示例21: _nfs42_proc_fallocate

//.........这里部分代码省略.........	nfs_put_lock_context(lock);	return err;}static voidnfs42_layoutstat_prepare(struct rpc_task *task, void *calldata){	struct nfs42_layoutstat_data *data = calldata;	struct nfs_server *server = NFS_SERVER(data->args.inode);	nfs41_setup_sequence(nfs4_get_session(server), &data->args.seq_args,			     &data->res.seq_res, task);}static voidnfs42_layoutstat_done(struct rpc_task *task, void *calldata){	struct nfs42_layoutstat_data *data = calldata;	struct inode *inode = data->inode;	struct pnfs_layout_hdr *lo;	if (!nfs4_sequence_done(task, &data->res.seq_res))		return;	switch (task->tk_status) {	case 0:		break;	case -NFS4ERR_EXPIRED:	case -NFS4ERR_STALE_STATEID:	case -NFS4ERR_OLD_STATEID:	case -NFS4ERR_BAD_STATEID:		spin_lock(&inode->i_lock);		lo = NFS_I(inode)->layout;		if (lo && nfs4_stateid_match(&data->args.stateid,					     &lo->plh_stateid)) {			LIST_HEAD(head);			/*			 * Mark the bad layout state as invalid, then retry			 * with the current stateid.			 */			set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);			pnfs_mark_matching_lsegs_invalid(lo, &head, NULL, 0);			spin_unlock(&inode->i_lock);			pnfs_free_lseg_list(&head);		} else			spin_unlock(&inode->i_lock);		break;	case -ENOTSUPP:	case -EOPNOTSUPP:		NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTSTATS;	default:		break;	}	dprintk("%s server returns %d/n", __func__, task->tk_status);}static voidnfs42_layoutstat_release(void *calldata){	struct nfs42_layoutstat_data *data = calldata;	struct nfs_server *nfss = NFS_SERVER(data->args.inode);
开发者ID:513855417,项目名称:linux,代码行数:66,


示例22: _scfs_readpage

/** * scfs_readpage * * Parameters: * @file: upper file * @page: upper page from SCFS inode mapping, data will be copied in here * * Return: * SCFS_SUCCESS if success, otherwise if error * * Description: * - Read in a page by reading a cluster from the file's lower file. *   (Reading in a cluster for just a single page read is inevitable, but this *    "amplified read" and decompressing overhead should be amortized when *    other pages in that same cluster is accessed later, and only incurs *    memcpy from the cached cluster buffer.) * - Recently accessed clusters ("buffer_cache") are cached for later reads. */static inline int _scfs_readpage(struct file *file, struct page *page, int pref_index){	struct scfs_inode_info *sii = SCFS_I(page->mapping->host);	struct scfs_sb_info *sbi = SCFS_S(page->mapping->host->i_sb);	struct scfs_cluster_buffer buffer = {NULL, NULL, NULL, NULL, 0};	int ret = 0, compressed = 0;	int alloc_membuffer = 1;	int allocated_index = -1;	int i;	char *virt;	SCFS_PRINT("f:%s i:%d c:0x%x u:0x%x/n",		file->f_path.dentry->d_name.name,		page->index, buffer.c_buffer, buffer.u_buffer);	ASSERT(sii->cluster_size <= SCFS_CLUSTER_SIZE_MAX);#ifdef SCFS_ASYNC_READ_PROFILE	sbi->scfs_readpage_total_count++;#endif#if MAX_BUFFER_CACHE	/* search buffer_cache first in case the cluster is left cached */	if (pref_index >= 0 &&		sbi->buffer_cache[pref_index].inode_number == sii->vfs_inode.i_ino &&		sbi->buffer_cache[pref_index].cluster_number ==			PAGE_TO_CLUSTER_INDEX(page, sii) &&		atomic_read(&sbi->buffer_cache[pref_index].is_used) != 1) {		spin_lock(&sbi->buffer_cache_lock);		/* this pref_index is used for another page */		if (atomic_read(&sbi->buffer_cache[pref_index].is_used) == 1) {			spin_unlock(&sbi->buffer_cache_lock);			sbi->buffer_cache_reclaimed_before_used_count++;			goto pick_slot;		}		atomic_set(&sbi->buffer_cache[pref_index].is_used, 1);		spin_unlock(&sbi->buffer_cache_lock);		virt = kmap_atomic(page);		if (sbi->buffer_cache[pref_index].is_compressed)			memcpy(virt, page_address(sbi->buffer_cache[pref_index].u_page) +				PGOFF_IN_CLUSTER(page, sii) * PAGE_SIZE, PAGE_SIZE);		else			memcpy(virt, page_address(sbi->buffer_cache[pref_index].c_page) +				PGOFF_IN_CLUSTER(page, sii) * PAGE_SIZE, PAGE_SIZE);		atomic_set(&sbi->buffer_cache[pref_index].is_used, 0);		kunmap_atomic(virt);		SetPageUptodate(page);		unlock_page(page);		SCFS_PRINT("%s<h> %d/n",file->f_path.dentry->d_name.name, page->index);		return pref_index + 1;	} else if (pref_index >= 0) {		sbi->buffer_cache_reclaimed_before_used_count++;		goto pick_slot;	}	/* search buffer_cache first in case the cluster is left cached */	for (i = 0; i < MAX_BUFFER_CACHE; i++) {		if (sbi->buffer_cache[i].inode_number == sii->vfs_inode.i_ino &&			sbi->buffer_cache[i].cluster_number ==				PAGE_TO_CLUSTER_INDEX(page, sii) &&			atomic_read(&sbi->buffer_cache[i].is_used) != 1) {			spin_lock(&sbi->buffer_cache_lock);			if (atomic_read(&sbi->buffer_cache[i].is_used) == 1) {				spin_unlock(&sbi->buffer_cache_lock);				goto pick_slot;			}			atomic_set(&sbi->buffer_cache[i].is_used, 1);			spin_unlock(&sbi->buffer_cache_lock);			virt = kmap_atomic(page);			if (sbi->buffer_cache[i].is_compressed)				memcpy(virt, page_address(sbi->buffer_cache[i].u_page) +					PGOFF_IN_CLUSTER(page, sii) * PAGE_SIZE, PAGE_SIZE);			else				memcpy(virt, page_address(sbi->buffer_cache[i].c_page) +					PGOFF_IN_CLUSTER(page, sii) * PAGE_SIZE, PAGE_SIZE);			atomic_set(&sbi->buffer_cache[i].is_used, 0);			kunmap_atomic(virt);			SetPageUptodate(page);//.........这里部分代码省略.........
开发者ID:civato,项目名称:sm-n9005-Note5port-kernel,代码行数:101,


示例23: lfsck_namespace_dump

//.........这里部分代码省略.........			      checked,			      ns->ln_objs_checked_phase2,			      ns->ln_items_repaired,			      ns->ln_objs_repaired_phase2,			      ns->ln_items_failed,			      ns->ln_objs_failed_phase2,			      ns->ln_dirs_checked,			      ns->ln_mlinked_checked,			      ns->ln_objs_nlink_repaired,			      ns->ln_objs_lost_found,			      ns->ln_success_count,			      rtime,			      ns->ln_run_time_phase2,			      speed,			      new_checked);		if (rc <= 0)			goto out;		buf += rc;		len -= rc;		LASSERT(lfsck->li_di_oit != NULL);		iops = &lfsck->li_obj_oit->do_index_ops->dio_it;		/* The low layer otable-based iteration position may NOT		 * exactly match the namespace-based directory traversal		 * cookie. Generally, it is not a serious issue. But the		 * caller should NOT make assumption on that. */		pos.lp_oit_cookie = iops->store(env, lfsck->li_di_oit);		if (!lfsck->li_current_oit_processed)			pos.lp_oit_cookie--;		spin_lock(&lfsck->li_lock);		if (lfsck->li_di_dir != NULL) {			pos.lp_dir_cookie = lfsck->li_cookie_dir;			if (pos.lp_dir_cookie >= MDS_DIR_END_OFF) {				fid_zero(&pos.lp_dir_parent);				pos.lp_dir_cookie = 0;			} else {				pos.lp_dir_parent =					*lfsck_dto2fid(lfsck->li_obj_dir);			}		} else {			fid_zero(&pos.lp_dir_parent);			pos.lp_dir_cookie = 0;		}		spin_unlock(&lfsck->li_lock);		rc = lfsck_pos_dump(&buf, &len, &pos, "current_position");		if (rc <= 0)			goto out;	} else if (ns->ln_status == LS_SCANNING_PHASE2) {		cfs_duration_t duration = cfs_time_current() -					  lfsck->li_time_last_checkpoint;		__u64 checked = ns->ln_objs_checked_phase2 +				com->lc_new_checked;		__u64 speed1 = ns->ln_items_checked;		__u64 speed2 = checked;		__u64 new_checked = com->lc_new_checked * HZ;		__u32 rtime = ns->ln_run_time_phase2 +			      cfs_duration_sec(duration + HALF_SEC);		if (duration != 0)			do_div(new_checked, duration);		if (ns->ln_run_time_phase1 != 0)			do_div(speed1, ns->ln_run_time_phase1);
开发者ID:hejin,项目名称:lustre-stable,代码行数:67,


示例24: lfsck_namespace_setup

int lfsck_namespace_setup(const struct lu_env *env,			  struct lfsck_instance *lfsck){	struct lfsck_component	*com;	struct lfsck_namespace	*ns;	struct dt_object	*root = NULL;	struct dt_object	*obj;	int			 rc;	ENTRY;	LASSERT(lfsck->li_master);	OBD_ALLOC_PTR(com);	if (com == NULL)		RETURN(-ENOMEM);	CFS_INIT_LIST_HEAD(&com->lc_link);	CFS_INIT_LIST_HEAD(&com->lc_link_dir);	init_rwsem(&com->lc_sem);	atomic_set(&com->lc_ref, 1);	com->lc_lfsck = lfsck;	com->lc_type = LT_NAMESPACE;	com->lc_ops = &lfsck_namespace_ops;	com->lc_file_size = sizeof(struct lfsck_namespace);	OBD_ALLOC(com->lc_file_ram, com->lc_file_size);	if (com->lc_file_ram == NULL)		GOTO(out, rc = -ENOMEM);	OBD_ALLOC(com->lc_file_disk, com->lc_file_size);	if (com->lc_file_disk == NULL)		GOTO(out, rc = -ENOMEM);	root = dt_locate(env, lfsck->li_bottom, &lfsck->li_local_root_fid);	if (IS_ERR(root))		GOTO(out, rc = PTR_ERR(root));	if (unlikely(!dt_try_as_dir(env, root)))		GOTO(out, rc = -ENOTDIR);	obj = local_index_find_or_create(env, lfsck->li_los, root,					 lfsck_namespace_name,					 S_IFREG | S_IRUGO | S_IWUSR,					 &dt_lfsck_features);	if (IS_ERR(obj))		GOTO(out, rc = PTR_ERR(obj));	com->lc_obj = obj;	rc = obj->do_ops->do_index_try(env, obj, &dt_lfsck_features);	if (rc != 0)		GOTO(out, rc);	rc = lfsck_namespace_load(env, com);	if (rc > 0)		rc = lfsck_namespace_reset(env, com, true);	else if (rc == -ENODATA)		rc = lfsck_namespace_init(env, com);	if (rc != 0)		GOTO(out, rc);	ns = com->lc_file_ram;	switch (ns->ln_status) {	case LS_INIT:	case LS_COMPLETED:	case LS_FAILED:	case LS_STOPPED:		spin_lock(&lfsck->li_lock);		cfs_list_add_tail(&com->lc_link, &lfsck->li_list_idle);		spin_unlock(&lfsck->li_lock);		break;	default:		CERROR("%s: unknown lfsck_namespace status: rc = %u/n",		       lfsck_lfsck2name(lfsck), ns->ln_status);		/* fall through */	case LS_SCANNING_PHASE1:	case LS_SCANNING_PHASE2:		/* No need to store the status to disk right now.		 * If the system crashed before the status stored,		 * it will be loaded back when next time. */		ns->ln_status = LS_CRASHED;		/* fall through */	case LS_PAUSED:	case LS_CRASHED:		spin_lock(&lfsck->li_lock);		cfs_list_add_tail(&com->lc_link, &lfsck->li_list_scan);		cfs_list_add_tail(&com->lc_link_dir, &lfsck->li_list_dir);		spin_unlock(&lfsck->li_lock);		break;	}	GOTO(out, rc = 0);out:	if (root != NULL && !IS_ERR(root))		lu_object_put(env, &root->do_lu);	if (rc != 0)		lfsck_component_cleanup(env, com);	return rc;}
开发者ID:hejin,项目名称:lustre-stable,代码行数:98,


示例25: nilfs_inode_by_name

static struct dentry *nilfs_get_parent(struct dentry *child){	unsigned long ino;	struct inode *inode;	struct qstr dotdot = {.name = "..", .len = 2};	struct nilfs_root *root;	ino = nilfs_inode_by_name(child->d_inode, &dotdot);	if (!ino)		return ERR_PTR(-ENOENT);	root = NILFS_I(child->d_inode)->i_root;	inode = nilfs_iget(child->d_inode->i_sb, root, ino);	if (IS_ERR(inode))		return ERR_CAST(inode);	return d_obtain_alias(inode);}static struct dentry *nilfs_get_dentry(struct super_block *sb, u64 cno,				       u64 ino, u32 gen){	struct nilfs_root *root;	struct inode *inode;	if (ino < NILFS_FIRST_INO(sb) && ino != NILFS_ROOT_INO)		return ERR_PTR(-ESTALE);	root = nilfs_lookup_root(sb->s_fs_info, cno);	if (!root)		return ERR_PTR(-ESTALE);	inode = nilfs_iget(sb, root, ino);	nilfs_put_root(root);	if (IS_ERR(inode))		return ERR_CAST(inode);	if (gen && inode->i_generation != gen) {		iput(inode);		return ERR_PTR(-ESTALE);	}	return d_obtain_alias(inode);}static struct dentry *nilfs_fh_to_dentry(struct super_block *sb, struct fid *fh,					 int fh_len, int fh_type){	struct nilfs_fid *fid = (struct nilfs_fid *)fh;	if ((fh_len != NILFS_FID_SIZE_NON_CONNECTABLE &&	     fh_len != NILFS_FID_SIZE_CONNECTABLE) ||	    (fh_type != FILEID_NILFS_WITH_PARENT &&	     fh_type != FILEID_NILFS_WITHOUT_PARENT))		return NULL;	return nilfs_get_dentry(sb, fid->cno, fid->ino, fid->gen);}static struct dentry *nilfs_fh_to_parent(struct super_block *sb, struct fid *fh,					 int fh_len, int fh_type){	struct nilfs_fid *fid = (struct nilfs_fid *)fh;	if (fh_len != NILFS_FID_SIZE_CONNECTABLE ||	    fh_type != FILEID_NILFS_WITH_PARENT)		return NULL;	return nilfs_get_dentry(sb, fid->cno, fid->parent_ino, fid->parent_gen);}static int nilfs_encode_fh(struct dentry *dentry, __u32 *fh, int *lenp,			   int connectable){	struct nilfs_fid *fid = (struct nilfs_fid *)fh;	struct inode *inode = dentry->d_inode;	struct nilfs_root *root = NILFS_I(inode)->i_root;	int type;	if (*lenp < NILFS_FID_SIZE_NON_CONNECTABLE ||	    (connectable && *lenp < NILFS_FID_SIZE_CONNECTABLE))		return 255;	fid->cno = root->cno;	fid->ino = inode->i_ino;	fid->gen = inode->i_generation;	if (connectable && !S_ISDIR(inode->i_mode)) {		struct inode *parent;		spin_lock(&dentry->d_lock);		parent = dentry->d_parent->d_inode;		fid->parent_ino = parent->i_ino;		fid->parent_gen = parent->i_generation;		spin_unlock(&dentry->d_lock);		type = FILEID_NILFS_WITH_PARENT;		*lenp = NILFS_FID_SIZE_CONNECTABLE;	} else {		type = FILEID_NILFS_WITHOUT_PARENT;//.........这里部分代码省略.........
开发者ID:romanbb,项目名称:android_kernel_lge_d851,代码行数:101,


示例26: list_for_each_entry

static struct pci_dev *alloc_pdev(struct pci_seg *pseg, u8 bus, u8 devfn){    struct pci_dev *pdev;    list_for_each_entry ( pdev, &pseg->alldevs_list, alldevs_list )        if ( pdev->bus == bus && pdev->devfn == devfn )            return pdev;    pdev = xzalloc(struct pci_dev);    if ( !pdev )        return NULL;    *(u16*) &pdev->seg = pseg->nr;    *((u8*) &pdev->bus) = bus;    *((u8*) &pdev->devfn) = devfn;    pdev->domain = NULL;    INIT_LIST_HEAD(&pdev->msi_list);    if ( pci_find_cap_offset(pseg->nr, bus, PCI_SLOT(devfn), PCI_FUNC(devfn),                             PCI_CAP_ID_MSIX) )    {        struct arch_msix *msix = xzalloc(struct arch_msix);        if ( !msix )        {            xfree(pdev);            return NULL;        }        spin_lock_init(&msix->table_lock);        pdev->msix = msix;    }    list_add(&pdev->alldevs_list, &pseg->alldevs_list);    /* update bus2bridge */    switch ( pdev->type = pdev_type(pseg->nr, bus, devfn) )    {        int pos;        u16 cap;        u8 sec_bus, sub_bus;        case DEV_TYPE_PCIe2PCI_BRIDGE:        case DEV_TYPE_LEGACY_PCI_BRIDGE:            sec_bus = pci_conf_read8(pseg->nr, bus, PCI_SLOT(devfn),                                     PCI_FUNC(devfn), PCI_SECONDARY_BUS);            sub_bus = pci_conf_read8(pseg->nr, bus, PCI_SLOT(devfn),                                     PCI_FUNC(devfn), PCI_SUBORDINATE_BUS);            spin_lock(&pseg->bus2bridge_lock);            for ( ; sec_bus <= sub_bus; sec_bus++ )            {                pseg->bus2bridge[sec_bus].map = 1;                pseg->bus2bridge[sec_bus].bus = bus;                pseg->bus2bridge[sec_bus].devfn = devfn;            }            spin_unlock(&pseg->bus2bridge_lock);            break;        case DEV_TYPE_PCIe_ENDPOINT:            pos = pci_find_cap_offset(pseg->nr, bus, PCI_SLOT(devfn),                                      PCI_FUNC(devfn), PCI_CAP_ID_EXP);            BUG_ON(!pos);            cap = pci_conf_read16(pseg->nr, bus, PCI_SLOT(devfn),                                  PCI_FUNC(devfn), pos + PCI_EXP_DEVCAP);            if ( cap & PCI_EXP_DEVCAP_PHANTOM )            {                pdev->phantom_stride = 8 >> MASK_EXTR(cap,                                                      PCI_EXP_DEVCAP_PHANTOM);                if ( PCI_FUNC(devfn) >= pdev->phantom_stride )                    pdev->phantom_stride = 0;            }            else            {
开发者ID:hwanju,项目名称:xen,代码行数:73,



注:本文中的spin_lock函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


C++ spin_lock_bh函数代码示例
C++ spin_is_locked函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。