您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ task_lock函数代码示例

51自学网 2021-06-03 08:40:43
  C++
这篇教程C++ task_lock函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中task_lock函数的典型用法代码示例。如果您正苦于以下问题:C++ task_lock函数的具体用法?C++ task_lock怎么用?C++ task_lock使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了task_lock函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: ptrace_attach

int ptrace_attach(struct task_struct *task){	int retval;	unsigned long flags;	audit_ptrace(task);	retval = -EPERM;	if (same_thread_group(task, current))		goto out;	/* Protect exec's credential calculations against our interference;	 * SUID, SGID and LSM creds get determined differently under ptrace.	 */	retval = mutex_lock_interruptible(&task->cred_exec_mutex);	if (retval  < 0)		goto out;	retval = -EPERM;repeat:	/*	 * Nasty, nasty.	 *	 * We want to hold both the task-lock and the	 * tasklist_lock for writing at the same time.	 * But that's against the rules (tasklist_lock	 * is taken for reading by interrupts on other	 * cpu's that may have task_lock).	 */	task_lock(task);	if (!write_trylock_irqsave(&tasklist_lock, flags)) {		task_unlock(task);		do {			cpu_relax();		} while (!write_can_lock(&tasklist_lock));		goto repeat;	}	if (!task->mm)		goto bad;	/* the same process cannot be attached many times */	if (task->ptrace & PT_PTRACED)		goto bad;	retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);	if (retval)		goto bad;	/* Go */	task->ptrace |= PT_PTRACED;	if (capable(CAP_SYS_PTRACE))		task->ptrace |= PT_PTRACE_CAP;	__ptrace_link(task, current);	send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);bad:	write_unlock_irqrestore(&tasklist_lock, flags);	task_unlock(task);	mutex_unlock(&task->cred_exec_mutex);out:	return retval;}
开发者ID:achristensen3,项目名称:cm-kernel,代码行数:62,


示例2: SYSCALL_DEFINE1

/* * unshare allows a process to 'unshare' part of the process * context which was originally shared using clone.  copy_* * functions used by do_fork() cannot be used here directly * because they modify an inactive task_struct that is being * constructed. Here we are modifying the current, active, * task_struct. */SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags){	struct fs_struct *fs, *new_fs = NULL;	struct files_struct *fd, *new_fd = NULL;	struct nsproxy *new_nsproxy = NULL;	int do_sysvsem = 0;	int err;	err = check_unshare_flags(unshare_flags);	if (err)		goto bad_unshare_out;	/*	 * If unsharing namespace, must also unshare filesystem information.	 */	if (unshare_flags & CLONE_NEWNS)		unshare_flags |= CLONE_FS;	/*	 * CLONE_NEWIPC must also detach from the undolist: after switching	 * to a new ipc namespace, the semaphore arrays from the old	 * namespace are unreachable.	 */	if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))		do_sysvsem = 1;	err = unshare_fs(unshare_flags, &new_fs);	if (err)		goto bad_unshare_out;	err = unshare_fd(unshare_flags, &new_fd);	if (err)		goto bad_unshare_cleanup_fs;	err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, new_fs);	if (err)		goto bad_unshare_cleanup_fd;	if (new_fs || new_fd || do_sysvsem || new_nsproxy) {		if (do_sysvsem) {			/*			 * CLONE_SYSVSEM is equivalent to sys_exit().			 */			exit_sem(current);		}		if (new_nsproxy) {			switch_task_namespaces(current, new_nsproxy);			new_nsproxy = NULL;		}		task_lock(current);		if (new_fs) {			fs = current->fs;			spin_lock(&fs->lock);			current->fs = new_fs;			if (--fs->users)				new_fs = NULL;			else				new_fs = fs;			spin_unlock(&fs->lock);		}		if (new_fd) {			fd = current->files;			current->files = new_fd;			new_fd = fd;		}		task_unlock(current);	}	if (new_nsproxy)		put_nsproxy(new_nsproxy);bad_unshare_cleanup_fd:	if (new_fd)		put_files_struct(new_fd);bad_unshare_cleanup_fs:	if (new_fs)		free_fs_struct(new_fs);bad_unshare_out:	return err;}
开发者ID:Ntemis,项目名称:LG_X3_P880_v20a,代码行数:91,


示例3: fill_procregioninfo

intfill_procregioninfo(task_t task, uint64_t arg, struct proc_regioninfo_internal *pinfo, uintptr_t *vnodeaddr, uint32_t  *vid){	vm_map_t map;	vm_map_offset_t	address = (vm_map_offset_t )arg;	vm_map_entry_t		tmp_entry;	vm_map_entry_t		entry;	vm_map_offset_t		start;	vm_region_extended_info_data_t extended;	vm_region_top_info_data_t top;	    task_lock(task);	    map = task->map;	    if (map == VM_MAP_NULL) 	    {			task_unlock(task);			return(0);	    }	    vm_map_reference(map); 	    task_unlock(task);	    	    vm_map_lock_read(map);	    start = address;	    if (!vm_map_lookup_entry(map, start, &tmp_entry)) {		if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {			vm_map_unlock_read(map);	    		vm_map_deallocate(map); 		   	return(0);		}	    } else {		entry = tmp_entry;	    }	    start = entry->vme_start;	    pinfo->pri_offset = entry->offset;	    pinfo->pri_protection = entry->protection;	    pinfo->pri_max_protection = entry->max_protection;	    pinfo->pri_inheritance = entry->inheritance;	    pinfo->pri_behavior = entry->behavior;	    pinfo->pri_user_wired_count = entry->user_wired_count;	    pinfo->pri_user_tag = entry->alias;	    if (entry->is_sub_map) {		pinfo->pri_flags |= PROC_REGION_SUBMAP;	    } else {		if (entry->is_shared)			pinfo->pri_flags |= PROC_REGION_SHARED;	    }	    extended.protection = entry->protection;	    extended.user_tag = entry->alias;	    extended.pages_resident = 0;	    extended.pages_swapped_out = 0;	    extended.pages_shared_now_private = 0;	    extended.pages_dirtied = 0;	    extended.external_pager = 0;	    extended.shadow_depth = 0;	    vm_map_region_walk(map, start, entry, entry->offset, entry->vme_end - start, &extended);	    if (extended.external_pager && extended.ref_count == 2 && extended.share_mode == SM_SHARED)	            extended.share_mode = SM_PRIVATE;	    top.private_pages_resident = 0;	    top.shared_pages_resident = 0;	    vm_map_region_top_walk(entry, &top);		    pinfo->pri_pages_resident = extended.pages_resident;	    pinfo->pri_pages_shared_now_private = extended.pages_shared_now_private;	    pinfo->pri_pages_swapped_out = extended.pages_swapped_out;	    pinfo->pri_pages_dirtied = extended.pages_dirtied;	    pinfo->pri_ref_count = extended.ref_count;	    pinfo->pri_shadow_depth = extended.shadow_depth;	    pinfo->pri_share_mode = extended.share_mode;	    pinfo->pri_private_pages_resident = top.private_pages_resident;	    pinfo->pri_shared_pages_resident = top.shared_pages_resident;	    pinfo->pri_obj_id = top.obj_id;			    pinfo->pri_address = (uint64_t)start;	    pinfo->pri_size = (uint64_t)(entry->vme_end - start);	    pinfo->pri_depth = 0;		    if ((vnodeaddr != 0) && (entry->is_sub_map == 0)) {		*vnodeaddr = (uintptr_t)0;		if (fill_vnodeinfoforaddr(entry, vnodeaddr, vid) ==0) {			vm_map_unlock_read(map);	    		vm_map_deallocate(map); 			return(1);		}	    }	    vm_map_unlock_read(map);	    vm_map_deallocate(map); //.........这里部分代码省略.........
开发者ID:Prajna,项目名称:xnu,代码行数:101,


示例4: show_vfsmnt

//.........这里部分代码省略.........	/* mount point */	seq_puts(m, " mounted on ");	seq_path(m, &mnt_path, " /t/n//");	seq_putc(m, ' ');	/* file system type */	seq_puts(m, "with fstype ");	show_type(m, sb);	/* optional statistics */	if (sb->s_op->show_stats) {		seq_putc(m, ' ');		if (!err)			err = sb->s_op->show_stats(m, mnt_path.dentry);	}	seq_putc(m, '/n');	return err;}static int mounts_open_common(struct inode *inode, struct file *file,			      int (*show)(struct seq_file *, struct vfsmount *)){	struct task_struct *task = get_proc_task(inode);	struct nsproxy *nsp;	struct mnt_namespace *ns = NULL;	struct path root;	struct proc_mounts *p;	int ret = -EINVAL;	if (!task)		goto err;	task_lock(task);	nsp = task->nsproxy;	if (!nsp || !nsp->mnt_ns) {		task_unlock(task);		put_task_struct(task);		goto err;	}	ns = nsp->mnt_ns;	get_mnt_ns(ns);	if (!task->fs) {		task_unlock(task);		put_task_struct(task);		ret = -ENOENT;		goto err_put_ns;	}	get_fs_root(task->fs, &root);	task_unlock(task);	put_task_struct(task);	ret = -ENOMEM;	p = kmalloc(sizeof(struct proc_mounts), GFP_KERNEL);	if (!p)		goto err_put_path;	file->private_data = &p->m;	ret = seq_open(file, &mounts_op);	if (ret)		goto err_free;	p->ns = ns;	p->root = root;	p->m.poll_event = ns->event;	p->show = show;
开发者ID:383530895,项目名称:linux,代码行数:67,


示例5: lowmem_shrink

static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc){	struct task_struct *p;	struct task_struct *selected = NULL;	int rem = 0;	int tasksize;	int i;	int min_adj = OOM_ADJUST_MAX + 1;	int selected_tasksize = 0;	int selected_oom_adj;	int array_size = ARRAY_SIZE(lowmem_adj);	int other_free = global_page_state(NR_FREE_PAGES);	int other_file = global_page_state(NR_FILE_PAGES) -		global_page_state(NR_SHMEM) - global_page_state(NR_MLOCK);	int fork_boost = 0;	int *adj_array;	size_t *min_array;	struct zone *zone;	if (offlining) {		/* Discount all free space in the section being offlined */		for_each_zone(zone) {			 if (zone_idx(zone) == ZONE_MOVABLE) {				other_free -= zone_page_state(zone,						NR_FREE_PAGES);				lowmem_print(4, "lowmem_shrink discounted "					"%lu pages in movable zone/n",					zone_page_state(zone, NR_FREE_PAGES));			}		}	}	/*	 * If we already have a death outstanding, then	 * bail out right away; indicating to vmscan	 * that we have nothing further to offer on	 * this pass.	 *	 */	if (lowmem_deathpending &&	    time_before_eq(jiffies, lowmem_deathpending_timeout))		return 0;	if (lowmem_fork_boost &&	    time_before_eq(jiffies, lowmem_fork_boost_timeout)) {		for (i = 0; i < lowmem_minfree_size; i++)			minfree_tmp[i] = lowmem_minfree[i] + lowmem_fork_boost_minfree[i] ;		adj_array = fork_boost_adj;		min_array = minfree_tmp;	}	else {		adj_array = lowmem_adj;		min_array = lowmem_minfree;	}	if (lowmem_adj_size < array_size)		array_size = lowmem_adj_size;	if (lowmem_minfree_size < array_size)		array_size = lowmem_minfree_size;	for (i = 0; i < array_size; i++) {		if (other_free < min_array[i] &&		    (other_file < min_array[i] || !shrink_cache_possible(sc->gfp_mask))) {			min_adj = adj_array[i];			fork_boost = lowmem_fork_boost_minfree[i];			break;		}	}	if (sc->nr_to_scan > 0)		lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d/n",			     sc->nr_to_scan, sc->gfp_mask, other_free, other_file,			     min_adj);	rem = global_page_state(NR_ACTIVE_ANON) +		global_page_state(NR_ACTIVE_FILE) +		global_page_state(NR_INACTIVE_ANON) +		global_page_state(NR_INACTIVE_FILE);	if (sc->nr_to_scan <= 0 || min_adj == OOM_ADJUST_MAX + 1) {		lowmem_print(5, "lowmem_shrink %lu, %x, return %d/n",			     sc->nr_to_scan, sc->gfp_mask, rem);		return rem;	}	selected_oom_adj = min_adj;	read_lock(&tasklist_lock);	for_each_process(p) {		struct mm_struct *mm;		struct signal_struct *sig;		int oom_adj;		task_lock(p);		mm = p->mm;		sig = p->signal;		if (!mm || !sig) {			task_unlock(p);			continue;		}		oom_adj = sig->oom_adj;//.........这里部分代码省略.........
开发者ID:ms79723,项目名称:HTC-Jewel-Kernel-OC,代码行数:101,


示例6: lowmem_shrink

static int lowmem_shrink(struct shrinker *s, int nr_to_scan, gfp_t gfp_mask){	struct task_struct *p;	struct task_struct *selected = NULL;	int rem = 0;	int tasksize;	int i;	int min_adj = OOM_ADJUST_MAX + 1;	int selected_tasksize = 0;	int selected_oom_adj;	int array_size = ARRAY_SIZE(lowmem_adj);	int other_free = global_page_state(NR_FREE_PAGES);	int other_file = global_page_state(NR_FILE_PAGES) -						global_page_state(NR_SHMEM);	int lru_file = global_page_state(NR_ACTIVE_FILE) +			global_page_state(NR_INACTIVE_FILE);	/*	 * If we already have a death outstanding, then	 * bail out right away; indicating to vmscan	 * that we have nothing further to offer on	 * this pass.	 *	 */	if (lowmem_deathpending &&	    time_before_eq(jiffies, lowmem_deathpending_timeout)) {		dump_deathpending(lowmem_deathpending);		return 0;	}#ifdef CONFIG_SWAP	if(fudgeswap != 0){		struct sysinfo si;		si_swapinfo(&si);		if(si.freeswap > 0){			if(fudgeswap > si.freeswap)				other_file += si.freeswap;			else				other_file += fudgeswap;		}	}#endif	if (lowmem_adj_size < array_size)		array_size = lowmem_adj_size;	if (lowmem_minfree_size < array_size)		array_size = lowmem_minfree_size;	for (i = 0; i < array_size; i++) {		if (other_free < lowmem_minfree[i]) {			if (other_file < lowmem_minfree[i] ||				(lowmem_check_filepages &&				(lru_file < lowmem_minfile[i]))) {				min_adj = lowmem_adj[i];				break;			}		}	}	if (nr_to_scan > 0)		lowmem_print(3, "lowmem_shrink %d, %x, ofree %d %d, ma %d/n",			     nr_to_scan, gfp_mask, other_free, other_file,			     min_adj);	rem = global_page_state(NR_ACTIVE_ANON) +		global_page_state(NR_ACTIVE_FILE) +		global_page_state(NR_INACTIVE_ANON) +		global_page_state(NR_INACTIVE_FILE);	if (nr_to_scan <= 0 || min_adj == OOM_ADJUST_MAX + 1) {		lowmem_print(5, "lowmem_shrink %d, %x, return %d/n",			     nr_to_scan, gfp_mask, rem);		return rem;	}	selected_oom_adj = min_adj;	read_lock(&tasklist_lock);	for_each_process(p) {		struct mm_struct *mm;		struct signal_struct *sig;		int oom_adj;		task_lock(p);		mm = p->mm;		sig = p->signal;		if (!mm || !sig) {			task_unlock(p);			continue;		}		oom_adj = sig->oom_adj;		if (oom_adj < min_adj) {			task_unlock(p);			continue;		}		tasksize = get_mm_rss(mm);		task_unlock(p);		if (tasksize <= 0)			continue;		if (selected) {			if (oom_adj < selected_oom_adj)				continue;			if (oom_adj == selected_oom_adj &&//.........这里部分代码省略.........
开发者ID:LeeDroid-,项目名称:Flyer-2.6.35-HC-MR,代码行数:101,


示例7: copy_signal

static inline int copy_signal(unsigned long clone_flags, struct task_struct * tsk){	struct signal_struct *sig;	int ret;	if (clone_flags & CLONE_THREAD) {		atomic_inc(&current->signal->count);		atomic_inc(&current->signal->live);		return 0;	}	sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);	tsk->signal = sig;	if (!sig)		return -ENOMEM;	ret = copy_thread_group_keys(tsk);	if (ret < 0) {		kmem_cache_free(signal_cachep, sig);		return ret;	}	atomic_set(&sig->count, 1);	atomic_set(&sig->live, 1);	init_waitqueue_head(&sig->wait_chldexit);	sig->flags = 0;	sig->group_exit_code = 0;	sig->group_exit_task = NULL;	sig->group_stop_count = 0;	sig->curr_target = NULL;	init_sigpending(&sig->shared_pending);	INIT_LIST_HEAD(&sig->posix_timers);	hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);	sig->it_real_incr.tv64 = 0;	sig->real_timer.function = it_real_fn;	sig->tsk = tsk;	sig->it_virt_expires = cputime_zero;	sig->it_virt_incr = cputime_zero;	sig->it_prof_expires = cputime_zero;	sig->it_prof_incr = cputime_zero;	sig->leader = 0;	/* session leadership doesn't inherit */	sig->tty_old_pgrp = NULL;	sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;	sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;	sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;	sig->sum_sched_runtime = 0;	INIT_LIST_HEAD(&sig->cpu_timers[0]);	INIT_LIST_HEAD(&sig->cpu_timers[1]);	INIT_LIST_HEAD(&sig->cpu_timers[2]);	taskstats_tgid_init(sig);	task_lock(current->group_leader);	memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);	task_unlock(current->group_leader);	if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {		/*		 * New sole thread in the process gets an expiry time		 * of the whole CPU time limit.		 */		tsk->it_prof_expires =			secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);	}	acct_init_pacct(&sig->pacct);	return 0;}
开发者ID:maliyu,项目名称:SOM2416,代码行数:70,


示例8: sys_unshare

/* * unshare allows a process to 'unshare' part of the process * context which was originally shared using clone.  copy_* * functions used by do_fork() cannot be used here directly * because they modify an inactive task_struct that is being * constructed. Here we are modifying the current, active, * task_struct. */asmlinkage long sys_unshare(unsigned long unshare_flags){	int err = 0;	struct fs_struct *fs, *new_fs = NULL;	struct mnt_namespace *ns, *new_ns = NULL;	struct sighand_struct *new_sigh = NULL;	struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL;	struct files_struct *fd, *new_fd = NULL;	struct sem_undo_list *new_ulist = NULL;	struct nsproxy *new_nsproxy = NULL, *old_nsproxy = NULL;	struct uts_namespace *uts, *new_uts = NULL;	struct ipc_namespace *ipc, *new_ipc = NULL;	check_unshare_flags(&unshare_flags);	/* Return -EINVAL for all unsupported flags */	err = -EINVAL;	if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|				CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|				CLONE_NEWUTS|CLONE_NEWIPC))		goto bad_unshare_out;	if ((err = unshare_thread(unshare_flags)))		goto bad_unshare_out;	if ((err = unshare_fs(unshare_flags, &new_fs)))		goto bad_unshare_cleanup_thread;	if ((err = unshare_mnt_namespace(unshare_flags, &new_ns, new_fs)))		goto bad_unshare_cleanup_fs;	if ((err = unshare_sighand(unshare_flags, &new_sigh)))		goto bad_unshare_cleanup_ns;	if ((err = unshare_vm(unshare_flags, &new_mm)))		goto bad_unshare_cleanup_sigh;	if ((err = unshare_fd(unshare_flags, &new_fd)))		goto bad_unshare_cleanup_vm;	if ((err = unshare_semundo(unshare_flags, &new_ulist)))		goto bad_unshare_cleanup_fd;	if ((err = unshare_utsname(unshare_flags, &new_uts)))		goto bad_unshare_cleanup_semundo;	if ((err = unshare_ipcs(unshare_flags, &new_ipc)))		goto bad_unshare_cleanup_uts;	if (new_ns || new_uts || new_ipc) {		old_nsproxy = current->nsproxy;		new_nsproxy = dup_namespaces(old_nsproxy);		if (!new_nsproxy) {			err = -ENOMEM;			goto bad_unshare_cleanup_ipc;		}	}	if (new_fs || new_ns || new_mm || new_fd || new_ulist ||				new_uts || new_ipc) {		task_lock(current);		if (new_nsproxy) {			current->nsproxy = new_nsproxy;			new_nsproxy = old_nsproxy;		}		if (new_fs) {			fs = current->fs;			current->fs = new_fs;			new_fs = fs;		}		if (new_ns) {			ns = current->nsproxy->mnt_ns;			current->nsproxy->mnt_ns = new_ns;			new_ns = ns;		}		if (new_mm) {			mm = current->mm;			active_mm = current->active_mm;			current->mm = new_mm;			current->active_mm = new_mm;			activate_mm(active_mm, new_mm);			new_mm = mm;		}		if (new_fd) {			fd = current->files;			current->files = new_fd;			new_fd = fd;		}		if (new_uts) {			uts = current->nsproxy->uts_ns;			current->nsproxy->uts_ns = new_uts;			new_uts = uts;		}//.........这里部分代码省略.........
开发者ID:maliyu,项目名称:SOM2416,代码行数:101,


示例9: try_to_freeze_tasks

static int try_to_freeze_tasks(bool sig_only){	struct task_struct *g, *p;	unsigned long end_time;	unsigned int todo;	bool wq_busy = false;	struct timeval start, end;	u64 elapsed_csecs64;	unsigned int elapsed_csecs;	unsigned int wakeup = 0;	do_gettimeofday(&start);	end_time = jiffies + TIMEOUT;	if (!sig_only)		freeze_workqueues_begin();	while (true) {		todo = 0;		read_lock(&tasklist_lock);		do_each_thread(g, p) {			if (frozen(p) || !freezeable(p))				continue;			if (!freeze_task(p, sig_only))				continue;			/*			 * Now that we've done set_freeze_flag, don't			 * perturb a task in TASK_STOPPED or TASK_TRACED.			 * It is "frozen enough".  If the task does wake			 * up, it will immediately call try_to_freeze.			 */			if (!task_is_stopped_or_traced(p) &&			    !freezer_should_skip(p))				todo++;		} while_each_thread(g, p);		read_unlock(&tasklist_lock);		if (!sig_only) {			wq_busy = freeze_workqueues_busy();			todo += wq_busy;		}		if (todo && has_wake_lock(WAKE_LOCK_SUSPEND)) {			printk(KERN_ERR "Freezing aborted by %s/n", p->comm);			wakeup = 1;			break;		}		if (!todo || time_after(jiffies, end_time))			break;		/*		 * We need to retry, but first give the freezing tasks some		 * time to enter the regrigerator.		 */		msleep(10);	}	do_gettimeofday(&end);	elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);	do_div(elapsed_csecs64, NSEC_PER_SEC / 100);	elapsed_csecs = elapsed_csecs64;	if (todo) {		/* This does not unfreeze processes that are already frozen		 * (we have slightly ugly calling convention in that respect,		 * and caller must call thaw_processes() if something fails),		 * but it cleans up leftover PF_FREEZE requests.		 */		if(wakeup) {			printk("/n");			printk(KERN_ERR "Freezing of %s aborted/n",					sig_only ? "user space " : "tasks ");		}		else {			printk("/n");			printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds "			       "(%d tasks refusing to freeze, wq_busy=%d):/n",			       elapsed_csecs / 100, elapsed_csecs % 100,			       todo - wq_busy, wq_busy);		}		thaw_workqueues();		read_lock(&tasklist_lock);		do_each_thread(g, p) {			task_lock(p);			if (freezing(p) && !freezer_should_skip(p) &&				elapsed_csecs > 100)				sched_show_task(p);			cancel_freezing(p);			task_unlock(p);		} while_each_thread(g, p);		read_unlock(&tasklist_lock);	} else {
开发者ID:ahadjaman,项目名称:lge-kernel-startablet-ICS,代码行数:96,


示例10: force_contiguous_lowmem_shrink

static int force_contiguous_lowmem_shrink(IN gckKERNEL Kernel){	struct task_struct *p;	struct task_struct *selected = NULL;	int tasksize;        int ret = -1;	int min_adj = 0;	int selected_tasksize = 0;	int selected_oom_adj;	/*	 * If we already have a death outstanding, then	 * bail out right away; indicating to vmscan	 * that we have nothing further to offer on	 * this pass.	 *	 */	if (lowmem_deathpending &&	    time_before_eq(jiffies, lowmem_deathpending_timeout))		return 0;	selected_oom_adj = min_adj;       rcu_read_lock();	for_each_process(p) {		struct mm_struct *mm;		struct signal_struct *sig;                gcuDATABASE_INFO info;		int oom_adj;		task_lock(p);		mm = p->mm;		sig = p->signal;		if (!mm || !sig) {			task_unlock(p);			continue;		}		oom_adj = sig->oom_score_adj;		if (oom_adj < min_adj) {			task_unlock(p);			continue;		}		tasksize = 0;		task_unlock(p);               rcu_read_unlock();		if (gckKERNEL_QueryProcessDB(Kernel, p->pid, gcvFALSE, gcvDB_VIDEO_MEMORY, &info) == gcvSTATUS_OK){			tasksize += info.counters.bytes / PAGE_SIZE;		}		if (gckKERNEL_QueryProcessDB(Kernel, p->pid, gcvFALSE, gcvDB_CONTIGUOUS, &info) == gcvSTATUS_OK){			tasksize += info.counters.bytes / PAGE_SIZE;		}               rcu_read_lock();		if (tasksize <= 0)			continue;		gckOS_Print("<gpu> pid %d (%s), adj %d, size %d /n", p->pid, p->comm, oom_adj, tasksize);		if (selected) {			if (oom_adj < selected_oom_adj)				continue;			if (oom_adj == selected_oom_adj &&			    tasksize <= selected_tasksize)				continue;		}		selected = p;		selected_tasksize = tasksize;		selected_oom_adj = oom_adj;	}	if (selected) {		gckOS_Print("<gpu> send sigkill to %d (%s), adj %d, size %d/n",			     selected->pid, selected->comm,			     selected_oom_adj, selected_tasksize);		lowmem_deathpending = selected;		lowmem_deathpending_timeout = jiffies + HZ;		force_sig(SIGKILL, selected);		ret = 0;	}       rcu_read_unlock();	return ret;}
开发者ID:engicam-stable,项目名称:engicam-kernel-geam6ul-3.14.38,代码行数:82,


示例11: SYSCALL_DEFINE1

/* * unshare allows a process to 'unshare' part of the process * context which was originally shared using clone.  copy_* * functions used by do_fork() cannot be used here directly * because they modify an inactive task_struct that is being * constructed. Here we are modifying the current, active, * task_struct. */SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags){	struct fs_struct *fs, *new_fs = NULL;	struct files_struct *fd, *new_fd = NULL;	struct cred *new_cred = NULL;	struct nsproxy *new_nsproxy = NULL;	int do_sysvsem = 0;	int err;	/*	 * If unsharing a user namespace must also unshare the thread.	 */	if (unshare_flags & CLONE_NEWUSER)		unshare_flags |= CLONE_THREAD | CLONE_FS;	/*	 * If unsharing a thread from a thread group, must also unshare vm.	 */	if (unshare_flags & CLONE_THREAD)		unshare_flags |= CLONE_VM;	/*	 * If unsharing vm, must also unshare signal handlers.	 */	if (unshare_flags & CLONE_VM)		unshare_flags |= CLONE_SIGHAND;	/*	 * If unsharing namespace, must also unshare filesystem information.	 */	if (unshare_flags & CLONE_NEWNS)		unshare_flags |= CLONE_FS;	err = check_unshare_flags(unshare_flags);	if (err)		goto bad_unshare_out;	/*	 * CLONE_NEWIPC must also detach from the undolist: after switching	 * to a new ipc namespace, the semaphore arrays from the old	 * namespace are unreachable.	 */	if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))		do_sysvsem = 1;	err = unshare_fs(unshare_flags, &new_fs);	if (err)		goto bad_unshare_out;	err = unshare_fd(unshare_flags, &new_fd);	if (err)		goto bad_unshare_cleanup_fs;	err = unshare_userns(unshare_flags, &new_cred);	if (err)		goto bad_unshare_cleanup_fd;	err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,					 new_cred, new_fs);	if (err)		goto bad_unshare_cleanup_cred;	if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) {		if (do_sysvsem) {			/*			 * CLONE_SYSVSEM is equivalent to sys_exit().			 */			exit_sem(current);		}		if (unshare_flags & CLONE_NEWIPC) {			/* Orphan segments in old ns (see sem above). */			exit_shm(current);			shm_init_task(current);		}		if (new_nsproxy)			switch_task_namespaces(current, new_nsproxy);		task_lock(current);		if (new_fs) {			fs = current->fs;			spin_lock(&fs->lock);			current->fs = new_fs;			if (--fs->users)				new_fs = NULL;			else				new_fs = fs;			spin_unlock(&fs->lock);		}		if (new_fd) {			fd = current->files;			current->files = new_fd;			new_fd = fd;		}		task_unlock(current);		if (new_cred) {//.........这里部分代码省略.........
开发者ID:lovejavaee,项目名称:linux-2,代码行数:101,


示例12: ptrace_start

//.........这里部分代码省略.........	if (IS_ERR(engine) || engine == NULL)		goto out_tsk_rcu;	state = rcu_dereference(engine->data);	if (state == NULL || state->parent != current)		goto out_tsk_rcu;	/*	 * Traditional ptrace behavior demands that the target already be	 * quiescent, but not dead.	 */	if (request != PTRACE_KILL	    && !(engine->flags & UTRACE_ACTION_QUIESCE)) {		/*		 * If it's in job control stop, turn it into proper quiescence.		 */		struct sighand_struct *sighand;		unsigned long flags;		sighand = lock_task_sighand(child, &flags);		if (likely(sighand != NULL)) {			if (child->state == TASK_STOPPED)				ret = 0;			unlock_task_sighand(child, &flags);		}		if (ret == 0) {			ret = ptrace_update(child, state,					    UTRACE_ACTION_QUIESCE, 0);			if (unlikely(ret == -EALREADY))				ret = -ESRCH;			if (unlikely(ret))				BUG_ON(ret != -ESRCH);		}		if (ret) {			pr_debug("%d not stopped (%lu)/n",				 child->pid, child->state);			goto out_tsk_rcu;		}		ret = -ESRCH;  /* Return value for exit_state bail-out.  */	}	atomic_inc(&state->refcnt);	rcu_read_unlock();	NO_LOCKS;	/*	 * We do this for all requests to match traditional ptrace behavior.	 * If the machine state synchronization done at context switch time	 * includes e.g. writing back to user memory, we want to make sure	 * that has finished before a PTRACE_PEEKDATA can fetch the results.	 * On most machines, only regset data is affected by context switch	 * and calling utrace_regset later on will take care of that, so	 * this is superfluous.	 *	 * To do this purely in utrace terms, we could do:	 *  (void) utrace_regset(child, engine, utrace_native_view(child), 0);	 */	if (request != PTRACE_KILL) {		wait_task_inactive(child);		while (child->state != TASK_TRACED && child->state != TASK_STOPPED) {			if (child->exit_state) {				__ptrace_state_free(state);				goto out_tsk;			}			task_lock(child);			if (child->mm && child->mm->core_waiters) {				task_unlock(child);				__ptrace_state_free(state);				goto out_tsk;			}			task_unlock(child);			/*			 * This is a dismal kludge, but it only comes up on ia64.			 * It might be blocked inside regset->writeback() called			 * from ptrace_report(), when it's on its way to quiescing			 * in TASK_TRACED real soon now.  We actually need that			 * writeback call to have finished, before a PTRACE_PEEKDATA			 * here, for example.  So keep waiting until it's really there.			 */			yield();			wait_task_inactive(child);		}	}	wait_task_inactive(child);	*childp = child;	*enginep = engine;	*statep = state;	return -EIO;out_tsk_rcu:	rcu_read_unlock();out_tsk:	NO_LOCKS;	put_task_struct(child);out:	return ret;}
开发者ID:FeifeiWang7,项目名称:Exploit,代码行数:101,


示例13: strnlen

struct ion_client *ion_client_create(struct ion_device *dev,				     unsigned int heap_mask,				     const char *name){	struct ion_client *client;	struct task_struct *task;	struct rb_node **p;	struct rb_node *parent = NULL;	struct ion_client *entry;	pid_t pid;	unsigned int name_len = strnlen(name, 64);	get_task_struct(current->group_leader);	task_lock(current->group_leader);	pid = task_pid_nr(current->group_leader);	/* don't bother to store task struct for kernel threads,	   they can't be killed anyway */	if (current->group_leader->flags & PF_KTHREAD) {		put_task_struct(current->group_leader);		task = NULL;	} else {		task = current->group_leader;	}	task_unlock(current->group_leader);	/* if this isn't a kernel thread, see if a client already	   exists */	if (task) {		client = ion_client_lookup(dev, task);		if (!IS_ERR_OR_NULL(client)) {			put_task_struct(current->group_leader);			return client;		}	}	client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);	if (!client) {		put_task_struct(current->group_leader);		return ERR_PTR(-ENOMEM);	}	client->dev = dev;	client->handles = RB_ROOT;	mutex_init(&client->lock);	client->name = kzalloc(name_len+1, GFP_KERNEL);	if (!client->name) {		put_task_struct(current->group_leader);		kfree(client);		return ERR_PTR(-ENOMEM);	} else {		strlcpy(client->name, name, name_len+1);	}	client->heap_mask = heap_mask;	client->task = task;	client->pid = pid;	kref_init(&client->ref);	mutex_lock(&dev->lock);	if (task) {		p = &dev->user_clients.rb_node;		while (*p) {			parent = *p;			entry = rb_entry(parent, struct ion_client, node);			if (task < entry->task)				p = &(*p)->rb_left;			else if (task > entry->task)				p = &(*p)->rb_right;		}		rb_link_node(&client->node, parent, p);		rb_insert_color(&client->node, &dev->user_clients);	} else {
开发者ID:DarkSense,项目名称:PyramidION,代码行数:74,


示例14: sys_setrlimit

asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim){	struct rlimit new_rlim, *old_rlim;	unsigned long it_prof_secs;	int retval;	if (resource >= RLIM_NLIMITS)		return -EINVAL;	if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))		return -EFAULT;	if (new_rlim.rlim_cur > new_rlim.rlim_max)		return -EINVAL;	old_rlim = current->signal->rlim + resource;	if ((new_rlim.rlim_max > old_rlim->rlim_max) &&	    !capable(CAP_SYS_RESOURCE))		return -EPERM;	if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > NR_OPEN)		return -EPERM;	retval = security_task_setrlimit(resource, &new_rlim);	if (retval)		return retval;	if (resource == RLIMIT_CPU && new_rlim.rlim_cur == 0) {		/*		 * The caller is asking for an immediate RLIMIT_CPU		 * expiry.  But we use the zero value to mean "it was		 * never set".  So let's cheat and make it one second		 * instead		 */		new_rlim.rlim_cur = 1;	}	task_lock(current->group_leader);	*old_rlim = new_rlim;	task_unlock(current->group_leader);	if (resource != RLIMIT_CPU)		goto out;	/*	 * RLIMIT_CPU handling.   Note that the kernel fails to return an error	 * code if it rejected the user's attempt to set RLIMIT_CPU.  This is a	 * very long-standing error, and fixing it now risks breakage of	 * applications, so we live with it	 */	if (new_rlim.rlim_cur == RLIM_INFINITY)		goto out;	it_prof_secs = cputime_to_secs(current->signal->it_prof_expires);	if (it_prof_secs == 0 || new_rlim.rlim_cur <= it_prof_secs) {		unsigned long rlim_cur = new_rlim.rlim_cur;		cputime_t cputime;		cputime = secs_to_cputime(rlim_cur);		read_lock(&tasklist_lock);		spin_lock_irq(&current->sighand->siglock);		set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);		spin_unlock_irq(&current->sighand->siglock);		read_unlock(&tasklist_lock);	}out:	return 0;}
开发者ID:acassis,项目名称:emlinux-ssd1935,代码行数:64,


示例15: lowmem_scan

static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc){	struct task_struct *tsk;	struct task_struct *selected = NULL;	unsigned long rem = 0;	int tasksize;	int i;	short min_score_adj = OOM_SCORE_ADJ_MAX + 1;	int minfree = 0;	int selected_tasksize = 0;	short selected_oom_score_adj;	int array_size = ARRAY_SIZE(lowmem_adj);	int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages;	int other_file = global_node_page_state(NR_FILE_PAGES) -						global_node_page_state(NR_SHMEM) -						total_swapcache_pages();	if (lowmem_adj_size < array_size)		array_size = lowmem_adj_size;	if (lowmem_minfree_size < array_size)		array_size = lowmem_minfree_size;	for (i = 0; i < array_size; i++) {		minfree = lowmem_minfree[i];		if (other_free < minfree && other_file < minfree) {			min_score_adj = lowmem_adj[i];			break;		}	}	lowmem_print(3, "lowmem_scan %lu, %x, ofree %d %d, ma %hd/n",		     sc->nr_to_scan, sc->gfp_mask, other_free,		     other_file, min_score_adj);	if (min_score_adj == OOM_SCORE_ADJ_MAX + 1) {		lowmem_print(5, "lowmem_scan %lu, %x, return 0/n",			     sc->nr_to_scan, sc->gfp_mask);		return 0;	}	selected_oom_score_adj = min_score_adj;	rcu_read_lock();	for_each_process(tsk) {		struct task_struct *p;		short oom_score_adj;		if (tsk->flags & PF_KTHREAD)			continue;		p = find_lock_task_mm(tsk);		if (!p)			continue;		if (task_lmk_waiting(p) &&		    time_before_eq(jiffies, lowmem_deathpending_timeout)) {			task_unlock(p);			rcu_read_unlock();			return 0;		}		oom_score_adj = p->signal->oom_score_adj;		if (oom_score_adj < min_score_adj) {			task_unlock(p);			continue;		}		tasksize = get_mm_rss(p->mm);		task_unlock(p);		if (tasksize <= 0)			continue;		if (selected) {			if (oom_score_adj < selected_oom_score_adj)				continue;			if (oom_score_adj == selected_oom_score_adj &&			    tasksize <= selected_tasksize)				continue;		}		selected = p;		selected_tasksize = tasksize;		selected_oom_score_adj = oom_score_adj;		lowmem_print(2, "select '%s' (%d), adj %hd, size %d, to kill/n",			     p->comm, p->pid, oom_score_adj, tasksize);	}	if (selected) {		task_lock(selected);		send_sig(SIGKILL, selected, 0);		if (selected->mm)			task_set_lmk_waiting(selected);		task_unlock(selected);		lowmem_print(1, "Killing '%s' (%d), adj %hd,/n"				 "   to free %ldkB on behalf of '%s' (%d) because/n"				 "   cache %ldkB is below limit %ldkB for oom_score_adj %hd/n"				 "   Free memory is %ldkB above reserved/n",			     selected->comm, selected->pid,			     selected_oom_score_adj,			     selected_tasksize * (long)(PAGE_SIZE / 1024),			     current->comm, current->pid,			     other_file * (long)(PAGE_SIZE / 1024),			     minfree * (long)(PAGE_SIZE / 1024),			     min_score_adj,			     other_free * (long)(PAGE_SIZE / 1024));		lowmem_deathpending_timeout = jiffies + HZ;//.........这里部分代码省略.........
开发者ID:AK101111,项目名称:linux,代码行数:101,


示例16: lowmem_shrink

static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask){	struct task_struct *p;	struct task_struct *selected = NULL;	int rem = 0;	int tasksize;	int i;	int min_adj = OOM_ADJUST_MAX + 1;	int selected_tasksize = 0;	int selected_oom_adj;	int array_size = ARRAY_SIZE(lowmem_adj);	int other_free = global_page_state(NR_FREE_PAGES);	int other_file = global_page_state(NR_FILE_PAGES);	if (lowmem_adj_size < array_size)		array_size = lowmem_adj_size;	if (lowmem_minfree_size < array_size)		array_size = lowmem_minfree_size;	for (i = 0; i < array_size; i++) {		if (other_free < lowmem_minfree[i] &&		    other_file < lowmem_minfree[i]) {			min_adj = lowmem_adj[i];			break;		}	}	if (nr_to_scan > 0)		lowmem_print(3, "lowmem_shrink %d, %x, ofree %d %d, ma %d/n",			     nr_to_scan, gfp_mask, other_free, other_file,			     min_adj);	rem = global_page_state(NR_ACTIVE_ANON) +		global_page_state(NR_ACTIVE_FILE) +		global_page_state(NR_INACTIVE_ANON) +		global_page_state(NR_INACTIVE_FILE);	if (nr_to_scan <= 0 || min_adj == OOM_ADJUST_MAX + 1) {		lowmem_print(5, "lowmem_shrink %d, %x, return %d/n",			     nr_to_scan, gfp_mask, rem);		return rem;	}	selected_oom_adj = min_adj;	read_lock(&tasklist_lock);	for_each_process(p) {		struct mm_struct *mm;		int oom_adj;		task_lock(p);		mm = p->mm;		if (!mm) {			task_unlock(p);			continue;		}		oom_adj = mm->oom_adj;		if (oom_adj < min_adj) {			task_unlock(p);			continue;		}		tasksize = get_mm_rss(mm);		task_unlock(p);		if (tasksize <= 0)			continue;		if (selected) {			if (oom_adj < selected_oom_adj)				continue;			if (oom_adj == selected_oom_adj &&			    tasksize <= selected_tasksize)				continue;		}		selected = p;		selected_tasksize = tasksize;		selected_oom_adj = oom_adj;		lowmem_print(2, "select %d (%s), adj %d, size %d, to kill/n",			     p->pid, p->comm, oom_adj, tasksize);	}	if (selected) {		lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d/n",			     selected->pid, selected->comm,			     selected_oom_adj, selected_tasksize);		force_sig(SIGKILL, selected);		rem -= selected_tasksize;	}	lowmem_print(4, "lowmem_shrink %d, %x, return %d/n",		     nr_to_scan, gfp_mask, rem);	read_unlock(&tasklist_lock);	return rem;}
开发者ID:3null,项目名称:fastsocket,代码行数:85,


示例17: lowmem_shrink

static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc){	struct task_struct *p;	struct task_struct *selected = NULL;	int rem = 0;	int tasksize;	int i;	int min_adj = OOM_ADJUST_MAX + 1;	int selected_tasksize = 0;	int selected_oom_adj;	int array_size = ARRAY_SIZE(lowmem_adj);	int other_free = global_page_state(NR_FREE_PAGES);	int other_file = global_page_state(NR_FILE_PAGES) -						global_page_state(NR_SHMEM);	struct zone *zone;	unsigned long flags;	if (offlining) {		/* Discount all free space in the section being offlined */		for_each_zone(zone) {			 if (zone_idx(zone) == ZONE_MOVABLE) {				other_free -= zone_page_state(zone,						NR_FREE_PAGES);				lowmem_print(4, "lowmem_shrink discounted "					"%lu pages in movable zone/n",					zone_page_state(zone, NR_FREE_PAGES));			}		}	}	/*	 * If we already have a death outstanding, then	 * bail out right away; indicating to vmscan	 * that we have nothing further to offer on	 * this pass.	 *	 */	//if (lowmem_deathpending &&	//    time_before_eq(jiffies, lowmem_deathpending_timeout))	if (lowmem_deathpending)		return 0;	if (lowmem_adj_size < array_size)		array_size = lowmem_adj_size;	if (lowmem_minfree_size < array_size)		array_size = lowmem_minfree_size;	for (i = 0; i < array_size; i++) {		if (other_free < lowmem_minfree[i] &&		    other_file < lowmem_minfree[i]) {			min_adj = lowmem_adj[i];			break;		}	}	if (sc->nr_to_scan > 0)		lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d/n",			     sc->nr_to_scan, sc->gfp_mask, other_free, other_file,			     min_adj);	rem = global_page_state(NR_ACTIVE_ANON) +		global_page_state(NR_ACTIVE_FILE) +		global_page_state(NR_INACTIVE_ANON) +		global_page_state(NR_INACTIVE_FILE);	if (sc->nr_to_scan <= 0 || min_adj == OOM_ADJUST_MAX + 1) {		lowmem_print(5, "lowmem_shrink %lu, %x, return %d/n",			     sc->nr_to_scan, sc->gfp_mask, rem);		return rem;	}	selected_oom_adj = min_adj;	read_lock(&tasklist_lock);	for_each_process(p) {		struct mm_struct *mm;		struct signal_struct *sig;		int oom_adj;		task_lock(p);		mm = p->mm;		sig = p->signal;		if (!mm || !sig) {			task_unlock(p);			continue;		}		oom_adj = sig->oom_adj;		if (oom_adj < min_adj) {			task_unlock(p);			continue;		}		tasksize = get_mm_rss(mm);		task_unlock(p);		if (tasksize <= 0)			continue;		if (selected) {			if (oom_adj < selected_oom_adj)				continue;			if (oom_adj == selected_oom_adj &&			    tasksize <= selected_tasksize)				continue;		}		selected = p;		selected_tasksize = tasksize;		selected_oom_adj = oom_adj;		lowmem_print(2, "select %d (%s), adj %d, size %d, to kill/n",//.........这里部分代码省略.........
开发者ID:mblaster,项目名称:android_kernel_huawei_hws7300u,代码行数:101,


示例18: lowmem_vm_shrinker

static void lowmem_vm_shrinker(int largest, int rss_threshold){       struct task_struct *p;       struct task_struct *selected = NULL;       int vmsize, rssize;       int min_adj, min_large_adj;       int selected_vmsize = 0;       int selected_oom_adj;       int array_size = ARRAY_SIZE(lowmem_adj);       unsigned long flags;       /*        * If we already have a death outstanding, then        * bail out right away; indicating to vmscan        * that we have nothing further to offer on        * this pass.        *        */       if (lowmem_deathpending)               return;       if (lowmem_adj_size < array_size)               array_size = lowmem_adj_size;       if (lowmem_minfree_size < array_size)               array_size = lowmem_minfree_size;       min_adj = lowmem_adj[array_size - 2];  /* lock onto cached processes only */       min_large_adj = lowmem_adj[array_size - 3];  /* Minimum priority for large processes */       lowmem_print(3, "lowmem_vm_shrink ma %d, large ma %d, largest %d, rss_threshold=%d/n",                     min_adj, min_large_adj, largest, rss_threshold);       selected_oom_adj = min_adj;       read_lock(&tasklist_lock);       for_each_process(p) {               struct mm_struct *mm;               struct signal_struct *sig;               int oom_adj;               task_lock(p);               mm = p->mm;               sig = p->signal;               if (!mm || !sig) {                       task_unlock(p);                       continue;               }               oom_adj = sig->oom_adj;               vmsize = get_mm_hiwater_vm(mm);                rssize = get_mm_rss(mm) * PAGE_SIZE;               task_unlock(p);               if (vmsize <= 0)                       continue;                /* Only look at cached processes */                if (oom_adj < min_adj) {                    /* Is this a very large home process in the background? */                    if ((oom_adj > min_large_adj) && (rssize >= rss_threshold)) {                        selected = p;                        selected_vmsize = vmsize;                       selected_oom_adj = oom_adj;                        lowmem_print(2, "lowmem_shrink override %d (%s), adj %d, vm size %d, rs size %d to kill/n" ,p->pid, p->comm, oom_adj, vmsize, rssize);                        break;                    }                    continue;                }                /* Is this process a better fit than last selected? */               if (selected) {                       if (oom_adj < selected_oom_adj)                               continue;                        /* If looking for largest, ignore priority */                       if ((largest || (oom_adj == selected_oom_adj)) &&                           (vmsize <= selected_vmsize))                               continue;               }               selected = p;               selected_vmsize = vmsize;               if (largest == 0)  /* Do not filter by priority if searching for largest */                       selected_oom_adj = oom_adj;               lowmem_print(2, "lowmem_shrink select %d (%s), adj %d, vm size %d, rs size %d to kill/n",                            p->pid, p->comm, oom_adj, vmsize, rssize);       }       if (selected) {               spin_lock_irqsave(&lowmem_deathpending_lock, flags);               if (!lowmem_deathpending) {                       lowmem_print(1,                               "lowmem_shrink send sigkill to %d (%s), adj %d, vm size %d/n",                               selected->pid, selected->comm,                               selected_oom_adj, selected_vmsize);                       lowmem_deathpending = selected;                       task_free_register(&task_nb);                       force_sig(SIGKILL, selected);               }               spin_unlock_irqrestore(&lowmem_deathpending_lock, flags);//.........这里部分代码省略.........
开发者ID:mblaster,项目名称:android_kernel_huawei_hws7300u,代码行数:101,


示例19: ERR_PTR

//.........这里部分代码省略.........	 * Process group and session signals need to be delivered to just the	 * parent before the fork or both the parent and the child after the	 * fork. Restart if a signal comes in before we add the new process to	 * it's process group.	 * A fatal signal pending means that current will exit, so the new	 * thread can't slip out of an OOM kill (or normal SIGKILL).	*/	recalc_sigpending();	if (signal_pending(current)) {		spin_unlock(&current->sighand->siglock);		write_unlock_irq(&tasklist_lock);		retval = -ERESTARTNOINTR;		goto bad_fork_free_pid;	}	if (clone_flags & CLONE_THREAD) {		current->signal->nr_threads++;		atomic_inc(&current->signal->live);		atomic_inc(&current->signal->sigcnt);		p->group_leader = current->group_leader;		list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);	}	if (likely(p->pid)) {		ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);		if (thread_group_leader(p)) {			if (is_child_reaper(pid))				p->nsproxy->pid_ns->child_reaper = p;			p->signal->leader_pid = pid;			p->signal->tty = tty_kref_get(current->signal->tty);			attach_pid(p, PIDTYPE_PGID, task_pgrp(current));			attach_pid(p, PIDTYPE_SID, task_session(current));			list_add_tail(&p->sibling, &p->real_parent->children);			list_add_tail_rcu(&p->tasks, &init_task.tasks);			__this_cpu_inc(process_counts);		}		attach_pid(p, PIDTYPE_PID, pid);		nr_threads++;	}	total_forks++;	spin_unlock(&current->sighand->siglock);	write_unlock_irq(&tasklist_lock);	proc_fork_connector(p);	cgroup_post_fork(p);	if (clone_flags & CLONE_THREAD)		threadgroup_fork_read_unlock(current);	perf_event_fork(p);	return p;bad_fork_free_pid:	if (pid != &init_struct_pid)		free_pid(pid);bad_fork_cleanup_io:	if (p->io_context)		exit_io_context(p);bad_fork_cleanup_namespaces:	exit_task_namespaces(p);bad_fork_cleanup_mm:	if (p->mm) {		task_lock(p);		if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)			atomic_dec(&p->mm->oom_disable_count);		task_unlock(p);		mmput(p->mm);	}bad_fork_cleanup_signal:	if (!(clone_flags & CLONE_THREAD))		free_signal_struct(p->signal);bad_fork_cleanup_sighand:	__cleanup_sighand(p->sighand);bad_fork_cleanup_fs:	exit_fs(p); /* blocking */bad_fork_cleanup_files:	exit_files(p); /* blocking */bad_fork_cleanup_semundo:	exit_sem(p);bad_fork_cleanup_audit:	audit_free(p);bad_fork_cleanup_policy:	perf_event_free_task(p);#ifdef CONFIG_NUMA	mpol_put(p->mempolicy);bad_fork_cleanup_cgroup:#endif	if (clone_flags & CLONE_THREAD)		threadgroup_fork_read_unlock(current);	cgroup_exit(p, cgroup_callbacks_done);	delayacct_tsk_free(p);	module_put(task_thread_info(p)->exec_domain->module);bad_fork_cleanup_count:	atomic_dec(&p->cred->user->processes);	exit_creds(p);bad_fork_free:	free_task(p);fork_out:	return ERR_PTR(retval);}
开发者ID:Ntemis,项目名称:LG_X3_P880_v20a,代码行数:101,


示例20: try_to_freeze_tasks

static int try_to_freeze_tasks(bool sig_only){	struct task_struct *g, *p;	unsigned long end_time;	unsigned int todo;	struct timeval start, end;	u64 elapsed_csecs64;	unsigned int elapsed_csecs;	do_gettimeofday(&start);	end_time = jiffies + TIMEOUT;	do {		todo = 0;		read_lock(&tasklist_lock);		do_each_thread(g, p) {			if (frozen(p) || !freezeable(p))				continue;			if (!freeze_task(p, sig_only))				continue;			/*			 * Now that we've done set_freeze_flag, don't			 * perturb a task in TASK_STOPPED or TASK_TRACED.			 * It is "frozen enough".  If the task does wake			 * up, it will immediately call try_to_freeze.			 *			 * Because freeze_task() goes through p's			 * scheduler lock after setting TIF_FREEZE, it's			 * guaranteed that either we see TASK_RUNNING or			 * try_to_stop() after schedule() in ptrace/signal			 * stop sees TIF_FREEZE.			 */			if (!task_is_stopped_or_traced(p) &&			    !freezer_should_skip(p))				todo++;		} while_each_thread(g, p);		read_unlock(&tasklist_lock);		yield();			/* Yield is okay here */		if (time_after(jiffies, end_time))			break;	} while (todo);	do_gettimeofday(&end);	elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);	do_div(elapsed_csecs64, NSEC_PER_SEC / 100);	elapsed_csecs = elapsed_csecs64;	if (todo) {		/* This does not unfreeze processes that are already frozen		 * (we have slightly ugly calling convention in that respect,		 * and caller must call thaw_processes() if something fails),		 * but it cleans up leftover PF_FREEZE requests.		 */		printk("/n");		printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds "				"(%d tasks refusing to freeze):/n",				elapsed_csecs / 100, elapsed_csecs % 100, todo);		show_state();		read_lock(&tasklist_lock);		do_each_thread(g, p) {			task_lock(p);			if (freezing(p) && !freezer_should_skip(p))				printk(KERN_ERR " %s/n", p->comm);			cancel_freezing(p);			task_unlock(p);		} while_each_thread(g, p);		read_unlock(&tasklist_lock);	} else {
开发者ID:DeltaResero,项目名称:GC-Wii-Linux-Kernel-2.6.32.y,代码行数:70,



注:本文中的task_lock函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


C++ task_now函数代码示例
C++ task_io_accounting_init函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。