您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ task_cpu函数代码示例

51自学网 2021-06-03 08:40:33
  C++
这篇教程C++ task_cpu函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中task_cpu函数的典型用法代码示例。如果您正苦于以下问题:C++ task_cpu函数的具体用法?C++ task_cpu怎么用?C++ task_cpu使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了task_cpu函数的26个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: resched_task

static inline void resched_task(task_t *p){#ifdef CONFIG_SMP	preempt_disable();	if (/* balabala */ && (task_cpu(p) != smp_processor_id()))		smp_send_reschedule(task_cpu(p));	preempt_enable();#else	set_tsk_need_resched(p);#endif}
开发者ID:shuitian,项目名称:Code,代码行数:11,


示例2: probe_wakeup

static voidprobe_wakeup(struct rq *rq, struct task_struct *p, int success){    struct trace_array_cpu *data;    int cpu = smp_processor_id();    unsigned long flags;    long disabled;    int pc;    if (likely(!tracer_enabled))        return;    tracing_record_cmdline(p);    tracing_record_cmdline(current);    if ((wakeup_rt && !rt_task(p)) ||            p->prio >= wakeup_prio ||            p->prio >= current->prio)        return;    pc = preempt_count();    disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);    if (unlikely(disabled != 1))        goto out;    /* interrupts should be off from try_to_wake_up */    __raw_spin_lock(&wakeup_lock);    /* check for races. */    if (!tracer_enabled || p->prio >= wakeup_prio)        goto out_locked;    /* reset the trace */    __wakeup_reset(wakeup_trace);    wakeup_cpu = task_cpu(p);    wakeup_current_cpu = wakeup_cpu;    wakeup_prio = p->prio;    wakeup_task = p;    get_task_struct(wakeup_task);    local_save_flags(flags);    data = wakeup_trace->data[wakeup_cpu];    data->preempt_timestamp = ftrace_now(cpu);    tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);    /*     * We must be careful in using CALLER_ADDR2. But since wake_up     * is not called by an assembly function  (where as schedule is)     * it should be safe to use it here.     */    trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);out_locked:    __raw_spin_unlock(&wakeup_lock);out:    atomic_dec(&wakeup_trace->data[cpu]->disabled);}
开发者ID:chunyenho,项目名称:RTS-hw2,代码行数:60,


示例3: tracing_sched_switch_trace

voidtracing_sched_switch_trace(struct trace_array *tr,			   struct task_struct *prev,			   struct task_struct *next,			   unsigned long flags, int pc){	struct ftrace_event_call *call = &event_context_switch;	struct ring_buffer *buffer = tr->buffer;	struct ring_buffer_event *event;	struct ctx_switch_entry *entry;	event = trace_buffer_lock_reserve(buffer, TRACE_CTX,					  sizeof(*entry), flags, pc);	if (!event)		return;	entry	= ring_buffer_event_data(event);	entry->prev_pid			= prev->pid;	entry->prev_prio		= prev->prio;	entry->prev_state		= prev->state;	entry->next_pid			= next->pid;	entry->next_prio		= next->prio;	entry->next_state		= next->state;	entry->next_cpu	= task_cpu(next);	if (!filter_check_discard(call, entry, buffer, event))		trace_buffer_unlock_commit(buffer, event, flags, pc);}
开发者ID:Ca1ne,项目名称:Enoch316,代码行数:27,


示例4: print_task

static voidprint_task(struct seq_file *m, struct rq *rq, struct task_struct *p){	if (rq->curr == p)		SEQ_printf(m, "R");	else		SEQ_printf(m, " ");	SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",		p->comm, task_pid_nr(p),		SPLIT_NS(p->se.vruntime),		(long long)(p->nvcsw + p->nivcsw),		p->prio);#ifdef CONFIG_SCHEDSTATS	SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",		SPLIT_NS(p->se.vruntime),		SPLIT_NS(p->se.sum_exec_runtime),		SPLIT_NS(p->se.statistics.sum_sleep_runtime));#else	SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",		0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);#endif#ifdef CONFIG_NUMA_BALANCING	SEQ_printf(m, " %d", cpu_to_node(task_cpu(p)));#endif#ifdef CONFIG_CGROUP_SCHED	SEQ_printf(m, " %s", task_group_path(task_group(p)));#endif	SEQ_printf(m, "/n");}
开发者ID:RobinSystems,项目名称:linux-3.13,代码行数:31,


示例5: print_rq_at_KE

void print_rq_at_KE(struct seq_file *m, struct rq *rq, int rq_cpu){	struct task_struct *g, *p;	unsigned long flags;	int locked;	SEQ_printf(m,	"/nrunnable tasks:/n"	"            task   PID         tree-key  switches  prio"	"     exec-runtime         sum-exec        sum-sleep/n"	"------------------------------------------------------"	"----------------------------------------------------/n");	//read_lock_irqsave(&tasklist_lock, flags);	locked = read_trylock_n_irqsave(&tasklist_lock, &flags, m, "print_rq_at_KE");	do_each_thread(g, p) {		if (!p->on_rq || task_cpu(p) != rq_cpu)			continue;		print_task(m, rq, p);	} while_each_thread(g, p);	if (locked)		read_unlock_irqrestore(&tasklist_lock, flags);}
开发者ID:openube,项目名称:android_kernel_sony_c2305,代码行数:26,


示例6: tracing_sched_wakeup_trace

voidtracing_sched_wakeup_trace(struct trace_array *tr,			   struct task_struct *wakee,			   struct task_struct *curr,			   unsigned long flags, int pc){	struct ftrace_event_call *call = &event_wakeup;	struct ring_buffer_event *event;	struct ctx_switch_entry *entry;	struct ring_buffer *buffer = tr->buffer;	event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,					  sizeof(*entry), flags, pc);	if (!event)		return;	entry	= ring_buffer_event_data(event);	entry->prev_pid			= curr->pid;	entry->prev_prio		= curr->prio;	entry->prev_state		= curr->state;	entry->next_pid			= wakee->pid;	entry->next_prio		= wakee->prio;	entry->next_state		= wakee->state;	entry->next_cpu			= task_cpu(wakee);	if (!filter_check_discard(call, entry, buffer, event))		ring_buffer_unlock_commit(buffer, event);	ftrace_trace_stack(tr->buffer, flags, 6, pc);	ftrace_trace_userstack(tr->buffer, flags, pc);}
开发者ID:Ca1ne,项目名称:Enoch316,代码行数:29,


示例7: dump_tasks_info

static void dump_tasks_info(void){	struct task_struct *p;	struct task_struct *task;	pr_info("[ pid ]   uid	tgid total_vm	   rss cpu oom_adj oom_score_adj name/n");	for_each_process(p) {		/* check unkillable tasks */		if (is_global_init(p))			continue;		if (p->flags & PF_KTHREAD)			continue;		task = find_lock_task_mm(p);		if (!task) {			/*			* This is a kthread or all of p's threads have already			* detached their mm's.	There's no need to report			* them; they can't be oom killed anyway.			*/			continue;		}		pr_info("[%5d] %5d %5d %8lu %8lu %3u	 %3d	     %5d %s/n",		task->pid, task_uid(task), task->tgid,		task->mm->total_vm, get_mm_rss(task->mm),		task_cpu(task), task->signal->oom_adj,		task->signal->oom_score_adj, task->comm);		task_unlock(task);	}}
开发者ID:iTechnoguy,项目名称:kernel-nk1-negalite-lt02ltespr,代码行数:31,


示例8: sched_state_will_schedule

void sched_state_will_schedule(struct task_struct* tsk){	/* Litmus hack: we only care about processor-local invocations of	 * set_tsk_need_resched(). We can't reliably set the flag remotely	 * since it might race with other updates to the scheduling state.  We	 * can't rely on the runqueue lock protecting updates to the sched	 * state since processors do not acquire the runqueue locks for all	 * updates to the sched state (to avoid acquiring two runqueue locks at	 * the same time). Further, if tsk is residing on a remote processor,	 * then that processor doesn't actually know yet that it is going to	 * reschedule; it still must receive an IPI (unless a local invocation	 * races).	 */	if (likely(task_cpu(tsk) == smp_processor_id())) {		VERIFY_SCHED_STATE(TASK_SCHEDULED | SHOULD_SCHEDULE | TASK_PICKED | WILL_SCHEDULE);		if (is_in_sched_state(TASK_PICKED | PICKED_WRONG_TASK))			set_sched_state(PICKED_WRONG_TASK);		else			set_sched_state(WILL_SCHEDULE);	} else		/* Litmus tasks should never be subject to a remote		 * set_tsk_need_resched(). */		BUG_ON(is_realtime(tsk));	TRACE_TASK(tsk, "set_tsk_need_resched() ret:%p/n",		   __builtin_return_address(0));}
开发者ID:AshishPrasad,项目名称:BTP,代码行数:26,


示例9: rtcc_idle_handler

/* * RTCC idle handler, called when CPU is idle */static int rtcc_idle_handler(struct notifier_block *nb, unsigned long val, void *data){	if (likely(!atomic_read(&krtccd_enabled)))		return 0;	if (likely(atomic_read(&need_to_reclaim) == 0))		return 0;	// To prevent RTCC from running too frequently	if (likely(time_before(jiffies, prev_jiffy + rtcc_reclaim_jiffies)))		return 0;	if (unlikely(atomic_read(&kswapd_running) == 1))		return 0;	if (unlikely(idle_cpu(task_cpu(krtccd)) && this_cpu_loadx(3) == 0) || rtcc_boost_mode) {		if (likely(atomic_read(&krtccd_running) == 0)) {			atomic_set(&krtccd_running, 1);			wake_up_process(krtccd);			prev_jiffy = jiffies;		}	}	return 0;}
开发者ID:acorn-marvell,项目名称:brillo_pxa_kernel,代码行数:29,


示例10: select_task_rq_other_rr

static intselect_task_rq_other_rr(struct rq *rq, struct task_struct *p, int sd_flag, int flags){	if (sd_flag != SD_BALANCE_WAKE)		return smp_processor_id();	return task_cpu(p);}
开发者ID:zmotsing,项目名称:KVM,代码行数:8,


示例11: linsched_disable_migrations

void linsched_disable_migrations(void){	int i;	for (i = 0; i < curr_task_id; i++)		set_cpus_allowed(__linsched_tasks[i],				 cpumask_of_cpu(					 task_cpu(__linsched_tasks[i])));}
开发者ID:varrunr,项目名称:ml-cfs,代码行数:9,


示例12: probe_wakeup

static voidprobe_wakeup(struct rq *rq, struct task_struct *p, int success){	int cpu = smp_processor_id();	unsigned long flags;	long disabled;	int pc;	if (likely(!tracer_enabled))		return;	tracing_record_cmdline(p);	tracing_record_cmdline(current);	if (likely(!rt_task(p)) ||			p->prio >= wakeup_prio ||			p->prio >= current->prio)		return;	pc = preempt_count();	disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);	if (unlikely(disabled != 1))		goto out;	/* interrupts should be off from try_to_wake_up */	__raw_spin_lock(&wakeup_lock);	/* check for races. */	if (!tracer_enabled || p->prio >= wakeup_prio)		goto out_locked;	/* reset the trace */	__wakeup_reset(wakeup_trace);	wakeup_cpu = task_cpu(p);	wakeup_prio = p->prio;	wakeup_task = p;	get_task_struct(wakeup_task);	local_save_flags(flags);	wakeup_trace->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu);	trace_function(wakeup_trace, wakeup_trace->data[wakeup_cpu],		       CALLER_ADDR1, CALLER_ADDR2, flags, pc);out_locked:	__raw_spin_unlock(&wakeup_lock);out:	atomic_dec(&wakeup_trace->data[cpu]->disabled);}
开发者ID:percy-g2,项目名称:rowboat-kernel,代码行数:51,


示例13: dump_one_task_info

static void dump_one_task_info(struct task_struct *tsk){	char stat_array[3] = { 'R', 'S', 'D'};	char stat_ch;	stat_ch = tsk->state <= TASK_UNINTERRUPTIBLE ?		stat_array[tsk->state] : '?';	pr_info("%8d %8d %8d %16lld %c(%d) %4d    %p %s/n",			tsk->pid, (int)(tsk->utime), (int)(tsk->stime),			tsk->se.exec_start, stat_ch, (int)(tsk->state),			task_cpu(tsk), tsk, tsk->comm);	show_stack(tsk, NULL);}
开发者ID:acorn-marvell,项目名称:brillo_pxa_kernel,代码行数:14,


示例14: check_for_tasks

static inline void check_for_tasks(int cpu){	struct task_struct *p;	write_lock_irq(&tasklist_lock);	for_each_process(p) {		if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&		    (p->utime || p->stime))			printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "				"(state = %ld, flags = %x)/n",				p->comm, task_pid_nr(p), cpu,				p->state, p->flags);	}	write_unlock_irq(&tasklist_lock);}
开发者ID:LiquidSmooth-Devices,项目名称:Deathly_Kernel_D2,代码行数:15,


示例15: linsched_force_migration

/* Force a migration of task to the dest_cpu. * If migr is set, allow migrations after the forced migration... otherwise, * do not allow them. (We need to disable migrations so that the forced * migration takes place correctly.) * Returns old cpu of task. */int linsched_force_migration(struct task_struct *task, int dest_cpu, int migr){	int old_cpu = task_cpu(task);		linsched_disable_migrations();	set_cpus_allowed(task, cpumask_of_cpu(dest_cpu));	linsched_change_cpu(old_cpu);	schedule();	linsched_change_cpu(dest_cpu);	schedule();	if (migr)		linsched_enable_migrations();	return old_cpu;}
开发者ID:varrunr,项目名称:ml-cfs,代码行数:21,


示例16: acct_update_power

void acct_update_power(struct task_struct *task, cputime_t cputime) {	struct cpufreq_power_stats *powerstats;	struct cpufreq_stats *stats;	unsigned int cpu_num, curr;	if (!task)		return;	cpu_num = task_cpu(task);	powerstats = per_cpu(cpufreq_power_stats, cpu_num);	stats = per_cpu(cpufreq_stats_table, cpu_num);	if (!powerstats || !stats)		return;	curr = powerstats->curr[stats->last_index];	task->cpu_power += curr * cputime_to_usecs(cputime);}
开发者ID:skeevy420,项目名称:NebulaKernel,代码行数:16,


示例17: check_for_tasks

static inline void check_for_tasks(int cpu){	struct task_struct *p;	cputime_t utime, stime;	write_lock_irq(&tasklist_lock);	for_each_process(p) {		task_cputime(p, &utime, &stime);		if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&		    (utime || stime))			pr_warn("Task %s (pid = %d) is on cpu %d (state = %ld, flags = %x)/n",				p->comm, task_pid_nr(p), cpu,				p->state, p->flags);	}	write_unlock_irq(&tasklist_lock);}
开发者ID:borkmann,项目名称:kasan,代码行数:16,


示例18: wakeup_tracer_call

/* * irqsoff uses its own tracer function to keep the overhead down: */static voidwakeup_tracer_call(unsigned long ip, unsigned long parent_ip){	struct trace_array *tr = wakeup_trace;	struct trace_array_cpu *data;	unsigned long flags;	long disabled;	int resched;	int cpu;	int pc;	if (likely(!wakeup_task))		return;	pc = preempt_count();	resched = ftrace_preempt_disable();	cpu = raw_smp_processor_id();	data = tr->data[cpu];	disabled = atomic_inc_return(&data->disabled);	if (unlikely(disabled != 1))		goto out;	local_irq_save(flags);	__raw_spin_lock(&wakeup_lock);	if (unlikely(!wakeup_task))		goto unlock;	/*	 * The task can't disappear because it needs to	 * wake up first, and we have the wakeup_lock.	 */	if (task_cpu(wakeup_task) != cpu)		goto unlock;	trace_function(tr, ip, parent_ip, flags, pc); unlock:	__raw_spin_unlock(&wakeup_lock);	local_irq_restore(flags); out:	atomic_dec(&data->disabled);	ftrace_preempt_enable(resched);}
开发者ID:AppEngine,项目名称:linux-2.6,代码行数:50,


示例19: check_for_tasks

static inline void check_for_tasks(int cpu){	struct task_struct *p;	write_lock_irq(&tasklist_lock);	for_each_process(p) {		if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&		    (!cputime_eq(p->utime, cputime_zero) ||		     !cputime_eq(p->stime, cputime_zero)))#ifdef CONFIG_DEBUG_PRINTK			printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "				"(state = %ld, flags = %x)/n",				p->comm, task_pid_nr(p), cpu,				p->state, p->flags);#else			;#endif	}	write_unlock_irq(&tasklist_lock);}
开发者ID:nos1609,项目名称:Chrono_Kernel-1,代码行数:20,


示例20: resched_task

void resched_task(struct task_struct *p){	int cpu;	assert_raw_spin_locked(&task_rq(p)->lock);	if (test_tsk_need_resched(p))		return;	set_tsk_need_resched(p);	cpu = task_cpu(p);	if (cpu == smp_processor_id())		return;	/* NEED_RESCHED must be visible before we test polling */	smp_mb();	if (!tsk_is_polling(p))		smp_send_reschedule(cpu);}
开发者ID:tobsan,项目名称:parparse,代码行数:20,


示例21: take_cpu_down

/* Take this CPU down. */static int __ref take_cpu_down(void *_param){	struct take_cpu_down_param *param = _param;	unsigned int cpu = (unsigned long)param->hcpu;	int err;	/* Ensure this CPU doesn't handle any more interrupts. */	err = __cpu_disable();	if (err < 0)		return err;	cpu_notify(CPU_DYING | param->mod, param->hcpu);	if (task_cpu(param->caller) == cpu)		move_task_off_dead_cpu(cpu, param->caller);	/* Force idle task to run as soon as we yield: it should	   immediately notice cpu is offline and die quickly. */	sched_idle_next();	return 0;}
开发者ID:myfluxi,项目名称:xxKernel,代码行数:21,


示例22: print_rq

static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu){	struct task_struct *g, *p;	SEQ_printf(m,	"/nrunnable tasks:/n"	"            task   PID         tree-key  switches  prio"	"     exec-runtime         sum-exec        sum-sleep/n"	"------------------------------------------------------"	"----------------------------------------------------/n");	rcu_read_lock();	for_each_process_thread(g, p) {		if (task_cpu(p) != rq_cpu)			continue;		print_task(m, rq, p);	}	rcu_read_unlock();}
开发者ID:3null,项目名称:linux,代码行数:20,


示例23: print_rq

static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu){	struct task_struct *g, *p;	unsigned long flags;	SEQ_printf(m,	"/nrunnable tasks:/n"	"            task   PID         tree-key  switches  prio"	"     exec-runtime         sum-exec        sum-sleep/n"	"------------------------------------------------------"	"----------------------------------------------------/n");	read_lock_irqsave(&tasklist_lock, flags);	for_each_process_thread(g, p) {		if (!p->on_rq || task_cpu(p) != rq_cpu)			continue;		print_task(m, rq, p);	}	read_unlock_irqrestore(&tasklist_lock, flags);}
开发者ID:AOSPERIA,项目名称:android_kernel_sony,代码行数:21,


示例24: could_cswap

void could_cswap(void){	if (atomic_read(&s_reclaim.need_to_reclaim) == 0)		return;	if (time_before(jiffies, prev_jiffy + minimum_interval_time))		return;	if (atomic_read(&s_reclaim.lmk_running) == 1 || atomic_read(&kswapd_thread_on) == 1) 		return;	if (get_nr_swap_pages() < minimum_freeswap_pages)		return;	if (idle_cpu(task_cpu(s_reclaim.kcompcached)) && this_cpu_loadx(4) == 0) {		if (atomic_read(&s_reclaim.kcompcached_running) == 0) {			wake_up_process(s_reclaim.kcompcached);			atomic_set(&s_reclaim.kcompcached_running, 1);			prev_jiffy = jiffies;		}	}}
开发者ID:iTechnoguy,项目名称:kernel-nk1-negalite-lt02ltespr,代码行数:22,


示例25: psi_task_change

void psi_task_change(struct task_struct *task, int clear, int set){	int cpu = task_cpu(task);	struct psi_group *group;	bool wake_clock = true;	void *iter = NULL;	if (!task->pid)		return;	if (((task->psi_flags & set) ||	     (task->psi_flags & clear) != clear) &&	    !psi_bug) {		printk_deferred(KERN_ERR "psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x/n",				task->pid, task->comm, cpu,				task->psi_flags, clear, set);		psi_bug = 1;	}	task->psi_flags &= ~clear;	task->psi_flags |= set;	/*	 * Periodic aggregation shuts off if there is a period of no	 * task changes, so we wake it back up if necessary. However,	 * don't do this if the task change is the aggregation worker	 * itself going to sleep, or we'll ping-pong forever.	 */	if (unlikely((clear & TSK_RUNNING) &&		     (task->flags & PF_WQ_WORKER) &&		     wq_worker_last_func(task) == psi_update_work))		wake_clock = false;	while ((group = iterate_groups(task, &iter))) {		psi_group_change(group, cpu, clear, set);		if (wake_clock && !delayed_work_pending(&group->clock_work))			schedule_delayed_work(&group->clock_work, PSI_FREQ);	}}
开发者ID:AlexShiLucky,项目名称:linux,代码行数:39,


示例26: check_for_tasks

static inline void check_for_tasks(int dead_cpu){	struct task_struct *g, *p;	read_lock_irq(&tasklist_lock);	do_each_thread(g, p) {		if (!p->on_rq)			continue;		/*		 * We do the check with unlocked task_rq(p)->lock.		 * Order the reading to do not warn about a task,		 * which was running on this cpu in the past, and		 * it's just been woken on another cpu.		 */		rmb();		if (task_cpu(p) != dead_cpu)			continue;		pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)/n",			p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);	} while_each_thread(g, p);	read_unlock_irq(&tasklist_lock);}
开发者ID:tsj123,项目名称:androidx86_remix,代码行数:23,



注:本文中的task_cpu函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


C++ task_create函数代码示例
C++ task_alloc函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。