您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ zalloc_cpumask_var函数代码示例

51自学网 2021-06-03 11:55:30
  C++
这篇教程C++ zalloc_cpumask_var函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中zalloc_cpumask_var函数的典型用法代码示例。如果您正苦于以下问题:C++ zalloc_cpumask_var函数的具体用法?C++ zalloc_cpumask_var怎么用?C++ zalloc_cpumask_var使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了zalloc_cpumask_var函数的25个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: setup_cpu_sibling_map

static void setup_cpu_sibling_map(int cpu){    if ( !zalloc_cpumask_var(&per_cpu(cpu_sibling_mask, cpu)) ||         !zalloc_cpumask_var(&per_cpu(cpu_core_mask, cpu)) )        panic("No memory for CPU sibling/core maps");    /* A CPU is a sibling with itself and is always on its own core. */    cpumask_set_cpu(cpu, per_cpu(cpu_sibling_mask, cpu));    cpumask_set_cpu(cpu, per_cpu(cpu_core_mask, cpu));}
开发者ID:lwhibernate,项目名称:xen,代码行数:10,


示例2: pseries_add_processor

/* * Update cpu_present_mask and paca(s) for a new cpu node.  The wrinkle * here is that a cpu device node may represent up to two logical cpus * in the SMT case.  We must honor the assumption in other code that * the logical ids for sibling SMT threads x and y are adjacent, such * that x^1 == y and y^1 == x. */static int pseries_add_processor(struct device_node *np){	unsigned int cpu;	cpumask_var_t candidate_mask, tmp;	int err = -ENOSPC, len, nthreads, i;	const __be32 *intserv;	intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len);	if (!intserv)		return 0;	zalloc_cpumask_var(&candidate_mask, GFP_KERNEL);	zalloc_cpumask_var(&tmp, GFP_KERNEL);	nthreads = len / sizeof(u32);	for (i = 0; i < nthreads; i++)		cpumask_set_cpu(i, tmp);	cpu_maps_update_begin();	BUG_ON(!cpumask_subset(cpu_present_mask, cpu_possible_mask));	/* Get a bitmap of unoccupied slots. */	cpumask_xor(candidate_mask, cpu_possible_mask, cpu_present_mask);	if (cpumask_empty(candidate_mask)) {		/* If we get here, it most likely means that NR_CPUS is		 * less than the partition's max processors setting.		 */		printk(KERN_ERR "Cannot add cpu %pOF; this system configuration"		       " supports %d logical cpus./n", np,		       num_possible_cpus());		goto out_unlock;	}	while (!cpumask_empty(tmp))		if (cpumask_subset(tmp, candidate_mask))			/* Found a range where we can insert the new cpu(s) */			break;		else			cpumask_shift_left(tmp, tmp, nthreads);	if (cpumask_empty(tmp)) {		printk(KERN_ERR "Unable to find space in cpu_present_mask for"		       " processor %s with %d thread(s)/n", np->name,		       nthreads);		goto out_unlock;	}	for_each_cpu(cpu, tmp) {		BUG_ON(cpu_present(cpu));		set_cpu_present(cpu, true);		set_hard_smp_processor_id(cpu, be32_to_cpu(*intserv++));	}
开发者ID:Daniel-Abrecht,项目名称:linux,代码行数:60,


示例3: init_irq_default_affinity

static void __init init_irq_default_affinity(void){	if (!cpumask_available(irq_default_affinity))		zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);	if (cpumask_empty(irq_default_affinity))		cpumask_setall(irq_default_affinity);}
开发者ID:avagin,项目名称:linux,代码行数:7,


示例4: cpu_rmap_update

/** * cpu_rmap_update - update CPU rmap following a change of object affinity * @rmap: CPU rmap to update * @index: Index of object whose affinity changed * @affinity: New CPU affinity of object */int cpu_rmap_update(struct cpu_rmap *rmap, u16 index,		    const struct cpumask *affinity){	cpumask_var_t update_mask;	unsigned int cpu;	if (unlikely(!zalloc_cpumask_var(&update_mask, GFP_KERNEL)))		return -ENOMEM;	/* Invalidate distance for all CPUs for which this used to be	 * the nearest object.  Mark those CPUs for update.	 */	for_each_online_cpu(cpu) {		if (rmap->near[cpu].index == index) {			rmap->near[cpu].dist = CPU_RMAP_DIST_INF;			cpumask_set_cpu(cpu, update_mask);		}	}	debug_print_rmap(rmap, "after invalidating old distances");	/* Set distance to 0 for all CPUs in the new affinity mask.	 * Mark all CPUs within their NUMA nodes for update.	 */	for_each_cpu(cpu, affinity) {		rmap->near[cpu].index = index;		rmap->near[cpu].dist = 0;		cpumask_or(update_mask, update_mask,			   cpumask_of_node(cpu_to_node(cpu)));	}
开发者ID:020gzh,项目名称:linux,代码行数:36,


示例5: cpupri_init

/** * cpupri_init - initialize the cpupri structure * @cp: The cpupri context * * Return: -ENOMEM on memory allocation failure. */int cpupri_init(struct cpupri *cp){	int i;	memset(cp, 0, sizeof(*cp));	for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) {		struct cpupri_vec *vec = &cp->pri_to_cpu[i];		atomic_set(&vec->count, 0);		if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL))			goto cleanup;	}	cp->cpu_to_pri = kcalloc(nr_cpu_ids, sizeof(int), GFP_KERNEL);	if (!cp->cpu_to_pri)		goto cleanup;	for_each_possible_cpu(i)		cp->cpu_to_pri[i] = CPUPRI_INVALID;	return 0;cleanup:	for (i--; i >= 0; i--)		free_cpumask_var(cp->pri_to_cpu[i].mask);	return -ENOMEM;}
开发者ID:020gzh,项目名称:linux,代码行数:34,


示例6: domain_update_node_affinity

void domain_update_node_affinity(struct domain *d){    cpumask_var_t cpumask;    cpumask_var_t online_affinity;    const cpumask_t *online;    nodemask_t nodemask = NODE_MASK_NONE;    struct vcpu *v;    unsigned int node;    if ( !zalloc_cpumask_var(&cpumask) )        return;    if ( !alloc_cpumask_var(&online_affinity) )    {        free_cpumask_var(cpumask);        return;    }    online = cpupool_online_cpumask(d->cpupool);    spin_lock(&d->node_affinity_lock);    for_each_vcpu ( d, v )    {        cpumask_and(online_affinity, v->cpu_affinity, online);        cpumask_or(cpumask, cpumask, online_affinity);    }
开发者ID:YongMan,项目名称:Xen-4.3.1,代码行数:26,


示例7: xen_smp_prepare_cpus

static void __init xen_smp_prepare_cpus(unsigned int max_cpus){	unsigned cpu;	unsigned int i;	xen_init_lock_cpu(0);	smp_store_cpu_info(0);	cpu_data(0).x86_max_cores = 1;	for_each_possible_cpu(i) {		zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);		zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);		zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);	}	set_cpu_sibling_map(0);	if (xen_smp_intr_init(0))		BUG();	if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))		panic("could not allocate xen_cpu_initialized_map/n");	cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));	/* Restrict the possible_map according to max_cpus. */	while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {		for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)			continue;		set_cpu_possible(cpu, false);	}	for_each_possible_cpu (cpu) {		struct task_struct *idle;		if (cpu == 0)			continue;		idle = fork_idle(cpu);		if (IS_ERR(idle))			panic("failed fork for CPU %d", cpu);		set_cpu_present(cpu, true);	}}
开发者ID:kaustubh-kabra,项目名称:xenified-jeremy-kernel-,代码行数:45,


示例8: init_irq_default_affinity

static void __init init_irq_default_affinity(void){#ifdef CONFIG_CPUMASK_OFFSTACK	if (!irq_default_affinity)		zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);#endif	if (cpumask_empty(irq_default_affinity))		cpumask_setall(irq_default_affinity);}
开发者ID:AshishNamdev,项目名称:linux,代码行数:9,


示例9: xzalloc

static struct cpupool *alloc_cpupool_struct(void){    struct cpupool *c = xzalloc(struct cpupool);    if ( !c || !zalloc_cpumask_var(&c->cpu_valid) )    {        xfree(c);        c = NULL;    }    else if ( !zalloc_cpumask_var(&c->cpu_suspended) )    {        free_cpumask_var(c->cpu_valid);        xfree(c);        c = NULL;    }    return c;}
开发者ID:HackLinux,项目名称:xen-1,代码行数:18,


示例10: irq_affinity_setup

static int __init irq_affinity_setup(char *str){	zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);	cpulist_parse(str, irq_default_affinity);	/*	 * Set at least the boot cpu. We don't want to end up with	 * bugreports caused by random comandline masks	 */	cpumask_set_cpu(smp_processor_id(), irq_default_affinity);	return 1;}
开发者ID:AshishNamdev,项目名称:linux,代码行数:11,


示例11: mem_rx_setup

static int mem_rx_setup(struct link_device *ld){	struct mem_link_device *mld = to_mem_link_device(ld);	if (!zalloc_cpumask_var(&mld->dmask, GFP_KERNEL))		return -ENOMEM;	if (!zalloc_cpumask_var(&mld->imask, GFP_KERNEL))		return -ENOMEM;	if (!zalloc_cpumask_var(&mld->tmask, GFP_KERNEL))		return -ENOMEM;#ifdef CONFIG_ARGOS	/* Below hard-coded mask values should be removed later on.	 * Like net-sysfs, argos module also should support sysfs knob,	 * so that user layer must be able to control these cpu mask. */#ifdef CONFIG_SCHED_HMP	cpumask_copy(mld->dmask, &hmp_slow_cpu_mask);#endif	cpumask_or(mld->imask, mld->imask, cpumask_of(3));	argos_irq_affinity_setup_label(217, "IPC", mld->imask, mld->dmask);#endif	ld->tx_wq = create_singlethread_workqueue("mem_tx_work");	if (!ld->tx_wq) {		mif_err("%s: ERR! fail to create tx_wq/n", ld->name);		return -ENOMEM;	}	ld->rx_wq = alloc_workqueue(			"mem_rx_work", WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1);	if (!ld->rx_wq) {		mif_err("%s: ERR! fail to create rx_wq/n", ld->name);		return -ENOMEM;	}	INIT_DELAYED_WORK(&ld->rx_delayed_work, link_to_demux_work);	return 0;}
开发者ID:ShedrockN4,项目名称:wiliteneo,代码行数:41,


示例12: init_rootdomain

static int init_rootdomain(struct root_domain *rd){	if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))		goto out;	if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))		goto free_span;	if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))		goto free_online;	if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))		goto free_dlo_mask;#ifdef HAVE_RT_PUSH_IPI	rd->rto_cpu = -1;	raw_spin_lock_init(&rd->rto_lock);	init_irq_work(&rd->rto_push_work, rto_push_irq_work_func);#endif	init_dl_bw(&rd->dl_bw);	if (cpudl_init(&rd->cpudl) != 0)		goto free_rto_mask;	if (cpupri_init(&rd->cpupri) != 0)		goto free_cpudl;	return 0;free_cpudl:	cpudl_cleanup(&rd->cpudl);free_rto_mask:	free_cpumask_var(rd->rto_mask);free_dlo_mask:	free_cpumask_var(rd->dlo_mask);free_online:	free_cpumask_var(rd->online);free_span:	free_cpumask_var(rd->span);out:	return -ENOMEM;}
开发者ID:the-snowwhite,项目名称:linux-socfpga,代码行数:38,


示例13: show_rps_map

static ssize_t show_rps_map(struct netdev_rx_queue *queue, char *buf){	struct rps_map *map;	cpumask_var_t mask;	int i, len;	if (!zalloc_cpumask_var(&mask, GFP_KERNEL))		return -ENOMEM;	rcu_read_lock();	map = rcu_dereference(queue->rps_map);	if (map)		for (i = 0; i < map->len; i++)			cpumask_set_cpu(map->cpus[i], mask);	len = snprintf(buf, PAGE_SIZE, "%*pb/n", cpumask_pr_args(mask));	rcu_read_unlock();	free_cpumask_var(mask);	return len < PAGE_SIZE ? len : -EINVAL;}
开发者ID:lfd,项目名称:PreemptRT,代码行数:21,


示例14: cpudl_init

/* * cpudl_init - initialize the cpudl structure * @cp: the cpudl max-heap context */int cpudl_init(struct cpudl *cp){	int i;	memset(cp, 0, sizeof(*cp));	raw_spin_lock_init(&cp->lock);	cp->size = 0;	cp->elements = kcalloc(nr_cpu_ids,			       sizeof(struct cpudl_item),			       GFP_KERNEL);	if (!cp->elements)		return -ENOMEM;	if (!zalloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) {		kfree(cp->elements);		return -ENOMEM;	}	for_each_possible_cpu(i)		cp->elements[i].idx = IDX_INVALID;	return 0;}
开发者ID:AshishNamdev,项目名称:linux,代码行数:28,


示例15: acpi_processor_add

static int acpi_processor_add(struct acpi_device *device){	struct acpi_processor *pr = NULL;	if (!device)		return -EINVAL;	pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);	if (!pr)		return -ENOMEM;	if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {		kfree(pr);		return -ENOMEM;	}	pr->handle = device->handle;	strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);	strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);	device->driver_data = pr;	return 0;}
开发者ID:tch-opensrc,项目名称:TC72XX_LxG1.7.1mp1_OpenSrc,代码行数:24,


示例16: init_amd_e400_c1e_mask

void __init init_amd_e400_c1e_mask(void){	/* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */	if (x86_idle == amd_e400_idle)		zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL);}
开发者ID:Seagate,项目名称:SMR_FS-EXT4,代码行数:6,


示例17: acpi_cpufreq_cpu_init

static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy){	unsigned int i;	unsigned int valid_states = 0;	unsigned int cpu = policy->cpu;	struct acpi_cpufreq_data *data;	unsigned int result = 0;	struct cpuinfo_x86 *c = &cpu_data(policy->cpu);	struct acpi_processor_performance *perf;#ifdef CONFIG_SMP	static int blacklisted;#endif	pr_debug("acpi_cpufreq_cpu_init/n");#ifdef CONFIG_SMP	if (blacklisted)		return blacklisted;	blacklisted = acpi_cpufreq_blacklist(c);	if (blacklisted)		return blacklisted;#endif	data = kzalloc(sizeof(*data), GFP_KERNEL);	if (!data)		return -ENOMEM;	if (!zalloc_cpumask_var(&data->freqdomain_cpus, GFP_KERNEL)) {		result = -ENOMEM;		goto err_free;	}	perf = per_cpu_ptr(acpi_perf_data, cpu);	data->acpi_perf_cpu = cpu;	policy->driver_data = data;	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))		acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;	result = acpi_processor_register_performance(perf, cpu);	if (result)		goto err_free_mask;	policy->shared_type = perf->shared_type;	/*	 * Will let policy->cpus know about dependency only when software	 * coordination is required.	 */	if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||	    policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {		cpumask_copy(policy->cpus, perf->shared_cpu_map);	}	cpumask_copy(data->freqdomain_cpus, perf->shared_cpu_map);#ifdef CONFIG_SMP	dmi_check_system(sw_any_bug_dmi_table);	if (bios_with_sw_any_bug && !policy_is_shared(policy)) {		policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;		cpumask_copy(policy->cpus, topology_core_cpumask(cpu));	}	if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {		cpumask_clear(policy->cpus);		cpumask_set_cpu(cpu, policy->cpus);		cpumask_copy(data->freqdomain_cpus,			     topology_sibling_cpumask(cpu));		policy->shared_type = CPUFREQ_SHARED_TYPE_HW;		pr_info_once(PFX "overriding BIOS provided _PSD data/n");	}#endif	/* capability check */	if (perf->state_count <= 1) {		pr_debug("No P-States/n");		result = -ENODEV;		goto err_unreg;	}	if (perf->control_register.space_id != perf->status_register.space_id) {		result = -ENODEV;		goto err_unreg;	}	switch (perf->control_register.space_id) {	case ACPI_ADR_SPACE_SYSTEM_IO:		if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&		    boot_cpu_data.x86 == 0xf) {			pr_debug("AMD K8 systems must use native drivers./n");			result = -ENODEV;			goto err_unreg;		}		pr_debug("SYSTEM IO addr space/n");		data->cpu_feature = SYSTEM_IO_CAPABLE;		break;	case ACPI_ADR_SPACE_FIXED_HARDWARE:		pr_debug("HARDWARE addr space/n");		if (check_est_cpu(cpu)) {			data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;			break;//.........这里部分代码省略.........
开发者ID:343829084,项目名称:linux-study,代码行数:101,


示例18: ERR_PTR

struct domain *domain_create(    domid_t domid, unsigned int domcr_flags, uint32_t ssidref){    struct domain *d, **pd;    enum { INIT_xsm = 1u<<0, INIT_watchdog = 1u<<1, INIT_rangeset = 1u<<2,           INIT_evtchn = 1u<<3, INIT_gnttab = 1u<<4, INIT_arch = 1u<<5 };    int err, init_status = 0;    int poolid = CPUPOOLID_NONE;    if ( (d = alloc_domain_struct()) == NULL )        return ERR_PTR(-ENOMEM);    d->domain_id = domid;    lock_profile_register_struct(LOCKPROF_TYPE_PERDOM, d, domid, "Domain");    if ( (err = xsm_alloc_security_domain(d)) != 0 )        goto fail;    init_status |= INIT_xsm;    watchdog_domain_init(d);    init_status |= INIT_watchdog;    atomic_set(&d->refcnt, 1);    spin_lock_init_prof(d, domain_lock);    spin_lock_init_prof(d, page_alloc_lock);    spin_lock_init(&d->hypercall_deadlock_mutex);    INIT_PAGE_LIST_HEAD(&d->page_list);    INIT_PAGE_LIST_HEAD(&d->xenpage_list);    spin_lock_init(&d->node_affinity_lock);    d->node_affinity = NODE_MASK_ALL;    d->auto_node_affinity = 1;    spin_lock_init(&d->shutdown_lock);    d->shutdown_code = -1;    err = -ENOMEM;    if ( !zalloc_cpumask_var(&d->domain_dirty_cpumask) )        goto fail;    if ( domcr_flags & DOMCRF_hvm )        d->is_hvm = 1;    if ( domid == 0 )    {        d->is_pinned = opt_dom0_vcpus_pin;        d->disable_migrate = 1;    }    rangeset_domain_initialise(d);    init_status |= INIT_rangeset;    d->iomem_caps = rangeset_new(d, "I/O Memory", RANGESETF_prettyprint_hex);    d->irq_caps   = rangeset_new(d, "Interrupts", 0);    if ( (d->iomem_caps == NULL) || (d->irq_caps == NULL) )        goto fail;    if ( domcr_flags & DOMCRF_dummy )        return d;    if ( !is_idle_domain(d) )    {        if ( (err = xsm_domain_create(XSM_HOOK, d, ssidref)) != 0 )            goto fail;        d->is_paused_by_controller = 1;        atomic_inc(&d->pause_count);        if ( domid )            d->nr_pirqs = nr_static_irqs + extra_domU_irqs;        else            d->nr_pirqs = nr_static_irqs + extra_dom0_irqs;        if ( d->nr_pirqs > nr_irqs )            d->nr_pirqs = nr_irqs;        radix_tree_init(&d->pirq_tree);        if ( (err = evtchn_init(d)) != 0 )            goto fail;        init_status |= INIT_evtchn;        if ( (err = grant_table_create(d)) != 0 )            goto fail;        init_status |= INIT_gnttab;        poolid = 0;        err = -ENOMEM;        d->mem_event = xzalloc(struct mem_event_per_domain);        if ( !d->mem_event )            goto fail;    }    if ( (err = arch_domain_create(d, domcr_flags)) != 0 )        goto fail;    init_status |= INIT_arch;    if ( (err = cpupool_add_domain(d, poolid)) != 0 )        goto fail;//.........这里部分代码省略.........
开发者ID:YongMan,项目名称:Xen-4.3.1,代码行数:101,


示例19: BUG_ON

struct vcpu *alloc_vcpu(    struct domain *d, unsigned int vcpu_id, unsigned int cpu_id){    struct vcpu *v;    BUG_ON((!is_idle_domain(d) || vcpu_id) && d->vcpu[vcpu_id]);    if ( (v = alloc_vcpu_struct()) == NULL )        return NULL;    v->domain = d;    v->vcpu_id = vcpu_id;    spin_lock_init(&v->virq_lock);    tasklet_init(&v->continue_hypercall_tasklet, NULL, 0);    if ( !zalloc_cpumask_var(&v->cpu_affinity) ||         !zalloc_cpumask_var(&v->cpu_affinity_tmp) ||         !zalloc_cpumask_var(&v->cpu_affinity_saved) ||         !zalloc_cpumask_var(&v->vcpu_dirty_cpumask) )        goto fail_free;    if ( is_idle_domain(d) )    {        v->runstate.state = RUNSTATE_running;    }    else    {        v->runstate.state = RUNSTATE_offline;                v->runstate.state_entry_time = NOW();        set_bit(_VPF_down, &v->pause_flags);        v->vcpu_info = ((vcpu_id < XEN_LEGACY_MAX_VCPUS)                        ? (vcpu_info_t *)&shared_info(d, vcpu_info[vcpu_id])                        : &dummy_vcpu_info);        v->vcpu_info_mfn = INVALID_MFN;        init_waitqueue_vcpu(v);    }    if ( sched_init_vcpu(v, cpu_id) != 0 )        goto fail_wq;    if ( vcpu_initialise(v) != 0 )    {        sched_destroy_vcpu(v); fail_wq:        destroy_waitqueue_vcpu(v); fail_free:        free_cpumask_var(v->cpu_affinity);        free_cpumask_var(v->cpu_affinity_tmp);        free_cpumask_var(v->cpu_affinity_saved);        free_cpumask_var(v->vcpu_dirty_cpumask);        free_vcpu_struct(v);        return NULL;    }    d->vcpu[vcpu_id] = v;    if ( vcpu_id != 0 )    {        int prev_id = v->vcpu_id - 1;        while ( (prev_id >= 0) && (d->vcpu[prev_id] == NULL) )            prev_id--;        BUG_ON(prev_id < 0);        v->next_in_list = d->vcpu[prev_id]->next_in_list;        d->vcpu[prev_id]->next_in_list = v;    }    /* Must be called after making new vcpu visible to for_each_vcpu(). */    vcpu_check_shutdown(v);    domain_update_node_affinity(d);    return v;}
开发者ID:YongMan,项目名称:Xen-4.3.1,代码行数:74,


示例20: channel_backend_init

/** * channel_backend_init - initialize a channel backend * @chanb: channel backend * @name: channel name * @config: client ring buffer configuration * @priv: client private data * @parent: dentry of parent directory, %NULL for root directory * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2) * @num_subbuf: number of sub-buffers (power of 2) * * Returns channel pointer if successful, %NULL otherwise. * * Creates per-cpu channel buffers using the sizes and attributes * specified.  The created channel buffer files will be named * name_0...name_N-1.  File permissions will be %S_IRUSR. * * Called with CPU hotplug disabled. */int channel_backend_init(struct channel_backend *chanb,			 const char *name,			 const struct lib_ring_buffer_config *config,			 void *priv, size_t subbuf_size, size_t num_subbuf){	struct channel *chan = container_of(chanb, struct channel, backend);	unsigned int i;	int ret;	if (!name)		return -EPERM;	/* Check that the subbuffer size is larger than a page. */	if (subbuf_size < PAGE_SIZE)		return -EINVAL;	/*	 * Make sure the number of subbuffers and subbuffer size are	 * power of 2 and nonzero.	 */	if (!subbuf_size || (subbuf_size & (subbuf_size - 1)))		return -EINVAL;	if (!num_subbuf || (num_subbuf & (num_subbuf - 1)))		return -EINVAL;	/*	 * Overwrite mode buffers require at least 2 subbuffers per	 * buffer.	 */	if (config->mode == RING_BUFFER_OVERWRITE && num_subbuf < 2)		return -EINVAL;	ret = subbuffer_id_check_index(config, num_subbuf);	if (ret)		return ret;	chanb->priv = priv;	chanb->buf_size = num_subbuf * subbuf_size;	chanb->subbuf_size = subbuf_size;	chanb->buf_size_order = get_count_order(chanb->buf_size);	chanb->subbuf_size_order = get_count_order(subbuf_size);	chanb->num_subbuf_order = get_count_order(num_subbuf);	chanb->extra_reader_sb =			(config->mode == RING_BUFFER_OVERWRITE) ? 1 : 0;	chanb->num_subbuf = num_subbuf;	strlcpy(chanb->name, name, NAME_MAX);	memcpy(&chanb->config, config, sizeof(chanb->config));	if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {		if (!zalloc_cpumask_var(&chanb->cpumask, GFP_KERNEL))			return -ENOMEM;	}	if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {		/* Allocating the buffer per-cpu structures */		chanb->buf = alloc_percpu(struct lib_ring_buffer);		if (!chanb->buf)			goto free_cpumask;#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))		chanb->cpuhp_prepare.component = LTTNG_RING_BUFFER_BACKEND;		ret = cpuhp_state_add_instance(lttng_rb_hp_prepare,			&chanb->cpuhp_prepare.node);		if (ret)			goto free_bufs;#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */		{			/*			 * In case of non-hotplug cpu, if the ring-buffer is allocated			 * in early initcall, it will not be notified of secondary cpus.			 * In that off case, we need to allocate for all possible cpus.			 */#ifdef CONFIG_HOTPLUG_CPU			/*			 * buf->backend.allocated test takes care of concurrent CPU			 * hotplug.			 * Priority higher than frontend, so we create the ring buffer			 * before we start the timer.			 */			chanb->cpu_hp_notifier.notifier_call =					lib_ring_buffer_cpu_hp_callback;			chanb->cpu_hp_notifier.priority = 5;//.........这里部分代码省略.........
开发者ID:tahini,项目名称:lttng-modules,代码行数:101,


示例21: acpi_processor_add

/* * Do not put anything in here which needs the core to be online. * For example MSR access or setting up things which check for cpuinfo_x86 * (cpu_data(cpu)) values, like CPU feature flags, family, model, etc. * Such things have to be put in and set up above in acpi_processor_start() */static int __cpuinit acpi_processor_add(struct acpi_device *device){	struct acpi_processor *pr = NULL;	int result = 0;	struct device *dev;	pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);	if (!pr)		return -ENOMEM;	if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {		result = -ENOMEM;		goto err_free_pr;	}	pr->handle = device->handle;	strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);	strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);	device->driver_data = pr;	result = acpi_processor_get_info(device);	if (result) {		/* Processor is physically not present */		return 0;	}#ifdef CONFIG_SMP	if (pr->id >= setup_max_cpus && pr->id != 0)		return 0;#endif	BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));	/*	 * Buggy BIOS check	 * ACPI id of processors can be reported wrongly by the BIOS.	 * Don't trust it blindly	 */	if (per_cpu(processor_device_array, pr->id) != NULL &&	    per_cpu(processor_device_array, pr->id) != device) {		printk(KERN_WARNING "BIOS reported wrong ACPI id "			"for the processor/n");		result = -ENODEV;		goto err_free_cpumask;	}	per_cpu(processor_device_array, pr->id) = device;	per_cpu(processors, pr->id) = pr;	dev = get_cpu_device(pr->id);	if (sysfs_create_link(&device->dev.kobj, &dev->kobj, "sysdev")) {		result = -EFAULT;		goto err_clear_processor;	}	/*	 * Do not start hotplugged CPUs now, but when they	 * are onlined the first time	 */	if (pr->flags.need_hotplug_init)		return 0;	result = acpi_processor_start(pr);	if (result)		goto err_remove_sysfs;	return 0;err_remove_sysfs:	sysfs_remove_link(&device->dev.kobj, "sysdev");err_clear_processor:	/*	 * processor_device_array is not cleared to allow checks for buggy BIOS	 */ 	per_cpu(processors, pr->id) = NULL;err_free_cpumask:	free_cpumask_var(pr->throttling.shared_cpu_map);err_free_pr:	kfree(pr);	return result;}
开发者ID:kozmikkick,项目名称:tripndroid-endeavoru-3.6,代码行数:87,


示例22: acpi_processor_add

static int __cpuinit acpi_processor_add(struct acpi_device *device){	struct acpi_processor *pr = NULL;	int result = 0;	struct sys_device *sysdev;	pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);	if (!pr)		return -ENOMEM;	if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {		kfree(pr);		return -ENOMEM;	}	pr->handle = device->handle;	strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);	strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);	device->driver_data = pr; 	processor_extcntl_init();	result = acpi_processor_get_info(device);	if (result ||	    ((pr->id == -1) && !processor_cntl_external())) {		/* Processor is physically not present */		return 0;	}	BUG_ON(!processor_cntl_external() &&	       ((pr->id >= nr_cpu_ids) || (pr->id < 0)));	/*	 * Buggy BIOS check	 * ACPI id of processors can be reported wrongly by the BIOS.	 * Don't trust it blindly	 */#ifndef CONFIG_XEN	if (per_cpu(processor_device_array, pr->id) != NULL &&	    per_cpu(processor_device_array, pr->id) != device) {#else	BUG_ON(pr->acpi_id >= NR_ACPI_CPUS);	if (processor_device_array[pr->acpi_id] != NULL &&	    processor_device_array[pr->acpi_id] != device) {#endif		printk(KERN_WARNING "BIOS reported wrong ACPI id "			"for the processor/n");		result = -ENODEV;		goto err_free_cpumask;	}#ifndef CONFIG_XEN	per_cpu(processor_device_array, pr->id) = device;	per_cpu(processors, pr->id) = pr;#else	processor_device_array[pr->acpi_id] = device;	if (pr->id != -1)		per_cpu(processors, pr->id) = pr;#endif	result = acpi_processor_add_fs(device);	if (result)		goto err_free_cpumask;	if (pr->id != -1) {		sysdev = get_cpu_sysdev(pr->id);		if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev")) {			result = -EFAULT;			goto err_remove_fs;		}	}	/* _PDC call should be done before doing anything else (if reqd.). */	arch_acpi_processor_init_pdc(pr);	acpi_processor_set_pdc(pr);	arch_acpi_processor_cleanup_pdc(pr);#if defined(CONFIG_CPU_FREQ) || defined(CONFIG_PROCESSOR_EXTERNAL_CONTROL)	acpi_processor_ppc_has_changed(pr);#endif	/*	 * pr->id may equal to -1 while processor_cntl_external enabled.	 * throttle and thermal module don't support this case.	 * Tx only works when dom0 vcpu == pcpu num by far, as we give	 * control to dom0.	 */	if (pr->id != -1) {		acpi_processor_get_throttling_info(pr);		acpi_processor_get_limit_info(pr);	}	acpi_processor_power_init(pr, device);	result = processor_extcntl_prepare(pr);	if (result)		goto err_power_exit;	pr->cdev = thermal_cooling_device_register("Processor", device,						&processor_cooling_ops);//.........这里部分代码省略.........
开发者ID:AsadRaza,项目名称:OCTEON-Linux,代码行数:101,


示例23: acpi_processor_add

static int __cpuinit acpi_processor_add(struct acpi_device *device){	struct acpi_processor *pr = NULL;	int result = 0;	struct sys_device *sysdev;	pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);	if (!pr)		return -ENOMEM;	if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {		kfree(pr);		return -ENOMEM;	}	pr->handle = device->handle;	strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);	strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);	device->driver_data = pr;	result = acpi_processor_get_info(device);	if (result) {		/* Processor is physically not present */		return 0;	}	BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));	/*	 * Buggy BIOS check	 * ACPI id of processors can be reported wrongly by the BIOS.	 * Don't trust it blindly	 */	if (per_cpu(processor_device_array, pr->id) != NULL &&	    per_cpu(processor_device_array, pr->id) != device) {		printk(KERN_WARNING "BIOS reported wrong ACPI id "			"for the processor/n");		result = -ENODEV;		goto err_free_cpumask;	}	per_cpu(processor_device_array, pr->id) = device;	per_cpu(processors, pr->id) = pr;	result = acpi_processor_add_fs(device);	if (result)		goto err_free_cpumask;	sysdev = get_cpu_sysdev(pr->id);	if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev")) {		result = -EFAULT;		goto err_remove_fs;	}	/* _PDC call should be done before doing anything else (if reqd.). */	arch_acpi_processor_init_pdc(pr);	acpi_processor_set_pdc(pr);	arch_acpi_processor_cleanup_pdc(pr);#ifdef CONFIG_CPU_FREQ	acpi_processor_ppc_has_changed(pr);#endif	acpi_processor_get_throttling_info(pr);	acpi_processor_get_limit_info(pr);	acpi_processor_power_init(pr, device);	pr->cdev = thermal_cooling_device_register("Processor", device,						&processor_cooling_ops);	if (IS_ERR(pr->cdev)) {		result = PTR_ERR(pr->cdev);		goto err_power_exit;	}	dev_info(&device->dev, "registered as cooling_device%d/n",		 pr->cdev->id);	result = sysfs_create_link(&device->dev.kobj,				   &pr->cdev->device.kobj,				   "thermal_cooling");	if (result) {		printk(KERN_ERR PREFIX "Create sysfs link/n");		goto err_thermal_unregister;	}	result = sysfs_create_link(&pr->cdev->device.kobj,				   &device->dev.kobj,				   "device");	if (result) {		printk(KERN_ERR PREFIX "Create sysfs link/n");		goto err_remove_sysfs;	}	return 0;err_remove_sysfs:	sysfs_remove_link(&device->dev.kobj, "thermal_cooling");err_thermal_unregister:	thermal_cooling_device_unregister(pr->cdev);err_power_exit://.........这里部分代码省略.........
开发者ID:12019,项目名称:kernel_zte_u880,代码行数:101,


示例24: acpi_processor_add

static int acpi_processor_add(struct acpi_device *device,					const struct acpi_device_id *id){	struct acpi_processor *pr;	struct device *dev;	int result = 0;	pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);	if (!pr)		return -ENOMEM;	if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {		result = -ENOMEM;		goto err_free_pr;	}	pr->handle = device->handle;	strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);	strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);	device->driver_data = pr;	result = acpi_processor_get_info(device);	if (result) /* Processor is not physically present or unavailable */		return 0;#ifdef CONFIG_SMP	if (pr->id >= setup_max_cpus && pr->id != 0)		return 0;#endif	BUG_ON(pr->id >= nr_cpu_ids);	/*	 * Buggy BIOS check.	 * ACPI id of processors can be reported wrongly by the BIOS.	 * Don't trust it blindly	 */	if (per_cpu(processor_device_array, pr->id) != NULL &&	    per_cpu(processor_device_array, pr->id) != device) {		dev_warn(&device->dev,			"BIOS reported wrong ACPI id %d for the processor/n",			pr->id);		/* Give up, but do not abort the namespace scan. */		goto err;	}	/*	 * processor_device_array is not cleared on errors to allow buggy BIOS	 * checks.	 */	per_cpu(processor_device_array, pr->id) = device;	per_cpu(processors, pr->id) = pr;	dev = get_cpu_device(pr->id);	if (!dev) {		result = -ENODEV;		goto err;	}	result = acpi_bind_one(dev, pr->handle);	if (result)		goto err;	pr->dev = dev;	dev->offline = pr->flags.need_hotplug_init;	/* Trigger the processor driver's .probe() if present. */	if (device_attach(dev) >= 0)		return 1;	dev_err(dev, "Processor driver could not be attached/n");	acpi_unbind_one(dev); err:	free_cpumask_var(pr->throttling.shared_cpu_map);	device->driver_data = NULL;	per_cpu(processors, pr->id) = NULL; err_free_pr:	kfree(pr);	return result;}
开发者ID:AnadoluPanteri,项目名称:kernel-plus-harmattan,代码行数:80,


示例25: acpi_get_psd_map

/** * acpi_get_psd_map - Map the CPUs in a common freq domain. * @all_cpu_data: Ptrs to CPU specific CPPC data including PSD info. * *	Return: 0 for success or negative value for err. */int acpi_get_psd_map(struct cpudata **all_cpu_data){	int count_target;	int retval = 0;	unsigned int i, j;	cpumask_var_t covered_cpus;	struct cpudata *pr, *match_pr;	struct acpi_psd_package *pdomain;	struct acpi_psd_package *match_pdomain;	struct cpc_desc *cpc_ptr, *match_cpc_ptr;	if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))		return -ENOMEM;	/*	 * Now that we have _PSD data from all CPUs, lets setup P-state	 * domain info.	 */	for_each_possible_cpu(i) {		pr = all_cpu_data[i];		if (!pr)			continue;		if (cpumask_test_cpu(i, covered_cpus))			continue;		cpc_ptr = per_cpu(cpc_desc_ptr, i);		if (!cpc_ptr) {			retval = -EFAULT;			goto err_ret;		}		pdomain = &(cpc_ptr->domain_info);		cpumask_set_cpu(i, pr->shared_cpu_map);		cpumask_set_cpu(i, covered_cpus);		if (pdomain->num_processors <= 1)			continue;		/* Validate the Domain info */		count_target = pdomain->num_processors;		if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)			pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;		else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)			pr->shared_type = CPUFREQ_SHARED_TYPE_HW;		else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)			pr->shared_type = CPUFREQ_SHARED_TYPE_ANY;		for_each_possible_cpu(j) {			if (i == j)				continue;			match_cpc_ptr = per_cpu(cpc_desc_ptr, j);			if (!match_cpc_ptr) {				retval = -EFAULT;				goto err_ret;			}			match_pdomain = &(match_cpc_ptr->domain_info);			if (match_pdomain->domain != pdomain->domain)				continue;			/* Here i and j are in the same domain */			if (match_pdomain->num_processors != count_target) {				retval = -EFAULT;				goto err_ret;			}			if (pdomain->coord_type != match_pdomain->coord_type) {				retval = -EFAULT;				goto err_ret;			}			cpumask_set_cpu(j, covered_cpus);			cpumask_set_cpu(j, pr->shared_cpu_map);		}		for_each_possible_cpu(j) {			if (i == j)				continue;			match_pr = all_cpu_data[j];			if (!match_pr)				continue;			match_cpc_ptr = per_cpu(cpc_desc_ptr, j);			if (!match_cpc_ptr) {				retval = -EFAULT;				goto err_ret;			}			match_pdomain = &(match_cpc_ptr->domain_info);			if (match_pdomain->domain != pdomain->domain)				continue;//.........这里部分代码省略.........
开发者ID:fromfuture,项目名称:Elizium,代码行数:101,



注:本文中的zalloc_cpumask_var函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


C++ zap函数代码示例
C++ zalloc函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。