这篇教程C++ CPU_ZERO函数代码示例写得很实用,希望能帮到您。
本文整理汇总了C++中CPU_ZERO函数的典型用法代码示例。如果您正苦于以下问题:C++ CPU_ZERO函数的具体用法?C++ CPU_ZERO怎么用?C++ CPU_ZERO使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。 在下文中一共展示了CPU_ZERO函数的27个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。 示例1: __attribute__//.........这里部分代码省略......... liblock_server_cores[2] = topology->nodes[0].cores[0]; liblock_server_cores[3] = topology->nodes[0].cores[0]; liblock_server_cores[4] = topology->nodes[0].cores[0]; liblock_server_cores[5] = topology->nodes[0].cores[0]; liblock_server_cores[6] = topology->nodes[0].cores[0]; liblock_server_cores[7] = topology->nodes[0].cores[0]; liblock_server_cores[8] = topology->nodes[0].cores[0]; liblock_server_cores[9] = topology->nodes[0].cores[0]; liblock_server_cores[10] = topology->nodes[0].cores[0];#else liblock_server_cores[0] = topology->nodes[0].cores[0]; liblock_server_cores[1] = topology->nodes[0].cores[0]; liblock_server_cores[2] = topology->nodes[0].cores[0]; liblock_server_cores[3] = topology->nodes[0].cores[0]; liblock_server_cores[4] = topology->nodes[0].cores[0]; liblock_server_cores[5] = topology->nodes[0].cores[1]; liblock_server_cores[6] = topology->nodes[0].cores[1]; liblock_server_cores[7] = topology->nodes[0].cores[0]; liblock_server_cores[8] = topology->nodes[0].cores[1]; liblock_server_cores[9] = topology->nodes[0].cores[1]; liblock_server_cores[10] = topology->nodes[0].cores[1];#endif liblock_lock_name = getenv("LIBLOCK_LOCK_NAME"); if(!liblock_lock_name) liblock_lock_name = "rcl"; is_rcl = !strcmp(liblock_lock_name, "rcl") || !strcmp(liblock_lock_name, "multircl"); liblock_start_server_threads_by_hand = 1; liblock_servers_always_up = 1; sprintf(get_cmd, "/proc/%d/cmdline", getpid()); FILE* f=fopen(get_cmd, "r"); if(!f) { printf("!!! warning: unable to find command line/n"); } char buf[1024]; buf[0] = 0; if(!fgets(buf, 1024, f)) printf("fgets/n"); printf("**** testing %s with lock %s/n", buf, liblock_lock_name); /* Pre-bind */ cpu_set_t cpuset; CPU_ZERO(&cpuset); CPU_SET(topology->nodes[0].cores[2]->core_id, &cpuset); if(sched_setaffinity(0, sizeof(cpu_set_t), &cpuset)) fatal("pthread_setaffinity_np"); /* /Pre-bind */ if(is_rcl) { go = 0; liblock_reserve_core_for(topology->nodes[0].cores[0], liblock_lock_name);#ifndef ONE_SERVER liblock_reserve_core_for(topology->nodes[0].cores[1], liblock_lock_name);#endif/* for (i = 0; i < NUM_LOCKS; i++) liblock_reserve_core_for(liblock_server_cores[i], liblock_lock_name);*/ /* launch the liblock threads */ liblock_lookup(liblock_lock_name)->run(do_go); while(!go) PAUSE(); } client_cores = malloc(sizeof(int)*topology->nb_cores); int j, k, z; for(i=0, z=0; i<topology->nb_nodes; i++) { for(j=0; j<topology->nodes[i].nb_cores; j++) { int is_server_core = 0; if (is_rcl) { for (k = 0; k < NUM_LOCKS; k++) if(topology->nodes[i].cores[j] == liblock_server_cores[k]) is_server_core = 1; } if (!is_server_core) client_cores[z++] = topology->nodes[i].cores[j]->core_id; } } n_available_cores = z; printf("**** %d available cores for clients./n", z); liblock_auto_bind();}
开发者ID:SANL-2015,项目名称:SANL-2015,代码行数:101,
示例2: pfring_zc_daq_initializestatic int pfring_zc_daq_initialize(const DAQ_Config_t *config, void **ctxt_ptr, char *errbuf, size_t len) { Pfring_Context_t *context; DAQ_Dict* entry; u_int numCPU = get_nprocs(); int i, max_buffer_len = 0; int num_buffers; context = calloc(1, sizeof(Pfring_Context_t)); if (context == NULL) { snprintf(errbuf, len, "%s: Couldn't allocate memory for context!", __FUNCTION__); return DAQ_ERROR_NOMEM; } context->mode = config->mode; context->snaplen = config->snaplen; context->promisc_flag =(config->flags & DAQ_CFG_PROMISC); context->timeout = (config->timeout > 0) ? (int) config->timeout : -1; context->devices[DAQ_PF_RING_PASSIVE_DEV_IDX] = strdup(config->name); context->num_devices = 1; context->ids_bridge = 0; context->clusterid = 0; context->max_buffer_len = 0; context->bindcpu = 0; if (!context->devices[DAQ_PF_RING_PASSIVE_DEV_IDX]) { snprintf(errbuf, len, "%s: Couldn't allocate memory for the device string!", __FUNCTION__); free(context); return DAQ_ERROR_NOMEM; } for (entry = config->values; entry; entry = entry->next) { if (!entry->value || !*entry->value) { snprintf(errbuf, len, "%s: variable needs value(%s)/n", __FUNCTION__, entry->key); return DAQ_ERROR; } else if (!strcmp(entry->key, "bindcpu")) { char *end = entry->value; context->bindcpu = (int) strtol(entry->value, &end, 0); if(*end || (context->bindcpu >= numCPU)) { snprintf(errbuf, len, "%s: bad bindcpu(%s)/n", __FUNCTION__, entry->value); return DAQ_ERROR; } else { cpu_set_t mask; CPU_ZERO(&mask); CPU_SET((int)context->bindcpu, &mask); if (sched_setaffinity(0, sizeof(mask), &mask) < 0) { snprintf(errbuf, len, "%s:failed to set bindcpu(%u) on pid %i/n", __FUNCTION__, context->bindcpu, getpid()); return DAQ_ERROR; } } } else if (!strcmp(entry->key, "timeout")) { char *end = entry->value; context->timeout = (int) strtol(entry->value, &end, 0); if (*end || (context->timeout < 0)) { snprintf(errbuf, len, "%s: bad timeout(%s)/n", __FUNCTION__, entry->value); return DAQ_ERROR; } } else if (!strcmp(entry->key, "idsbridge")) { if (context->mode == DAQ_MODE_PASSIVE) { char* end = entry->value; context->ids_bridge = (int) strtol(entry->value, &end, 0); if (*end || (context->ids_bridge < 0) || (context->ids_bridge > 2)) { snprintf(errbuf, len, "%s: bad ids bridge mode(%s)/n", __FUNCTION__, entry->value); return DAQ_ERROR; } } else { snprintf(errbuf, len, "%s: idsbridge is for passive mode only/n", __FUNCTION__); return DAQ_ERROR; } } else if (!strcmp(entry->key, "clusterid")) { char *end = entry->value; context->clusterid = (int) strtol(entry->value, &end, 0); if (*end || (context->clusterid < 0)) { snprintf(errbuf, len, "%s: bad clusterid(%s)/n", __FUNCTION__, entry->value); return DAQ_ERROR; } } else { snprintf(errbuf, len, "%s: unsupported variable(%s=%s)/n", __FUNCTION__, entry->key, entry->value); return DAQ_ERROR; } } if (context->mode == DAQ_MODE_READ_FILE) { snprintf(errbuf, len, "%s: function not supported on PF_RING", __FUNCTION__); free(context); return DAQ_ERROR; } else if (context->mode == DAQ_MODE_INLINE || (context->mode == DAQ_MODE_PASSIVE && context->ids_bridge)) { /* zc:ethX+zc:ethY,zc:ethZ+zc:ethJ */ char *twins, *twins_pos = NULL; context->num_devices = 0; twins = strtok_r(context->devices[DAQ_PF_RING_PASSIVE_DEV_IDX], ",", &twins_pos); while(twins != NULL) { char *dev, *dev_pos = NULL, *tx_dev; int last_twin = 0; dev = strtok_r(twins, "+", &dev_pos);//.........这里部分代码省略.........
开发者ID:nakuljavali,项目名称:pfring,代码行数:101,
示例3: start void start(std::vector<uint64_t> const & procs) { if ( ! thread.get() ) { thread_ptr_type tthread(new pthread_t); thread = UNIQUE_PTR_MOVE(tthread); pthread_attr_t attr; if ( pthread_attr_init(&attr) ) { ::libmaus2::exception::LibMausException se; se.getStream() << "pthread_attr_init failed:" << strerror(errno); se.finish(); throw se; } cpu_set_t cpuset; CPU_ZERO(&cpuset); for ( uint64_t i = 0; i < procs.size(); ++i ) CPU_SET(procs[i],&cpuset); if ( pthread_attr_setaffinity_np(&attr,sizeof(cpu_set_t),&cpuset) ) { pthread_attr_destroy(&attr); ::libmaus2::exception::LibMausException se; se.getStream() << "pthread_attr_setaffinity_np failed:" << strerror(errno); se.finish(); throw se; } #if 0 std::cerr << "Creating thread with affinity." << std::endl; std::cerr << ::libmaus2::util::StackTrace::getStackTrace() << std::endl; #endif if ( pthread_create(thread.get(),&attr,dispatch,this) ) { pthread_attr_destroy(&attr); ::libmaus2::exception::LibMausException se; se.getStream() << "pthread_create() failed in PosixThread::start()"; se.finish(); throw se; } if ( pthread_attr_destroy(&attr) ) { ::libmaus2::exception::LibMausException se; se.getStream() << "pthread_attr_destroy failed:" << strerror(errno); se.finish(); throw se; } } else { ::libmaus2::exception::LibMausException se; se.getStream() << "PosixThread::start() called but object is already in use."; se.finish(); throw se; } }
开发者ID:whitwham,项目名称:libmaus2,代码行数:62,
示例4: CPU_ZEROaffinity_set::affinity_set(){ CPU_ZERO(&set);}
开发者ID:cjy7117,项目名称:FT-MAGMA,代码行数:4,
示例5: scheprocessint scheprocess(MTN *mtn, MTNJOB *job, int job_max, int cpu_lim, int cpu_num){ int i; int cpu_id; int cpu_use; cpu_set_t cpumask; cpu_id = 0; cpu_use = 0; scanprocess(job, job_max); for(i=0;i<job_max;i++){ if(!job[i].pid){ continue; } getjobusage(job + i); if(cpu_id != job[i].cid){ CPU_ZERO(&cpumask); CPU_SET(cpu_id, &cpumask); if(sched_setaffinity(job[i].pid, cpu_num, &cpumask) == -1){ mtnlogger(mtn, 0, "[error] %s: sched_setaffinity: %s/n", __func__, strerror(errno)); job->cid = -1; }else{ job->cid = cpu_id; } } cpu_id += 1; cpu_id %= cpu_num; cpu_use += job[i].cpu; //MTNDEBUG("CMD=%s STATE=%c CPU=%d.%d/n", job->cmd, job->pstat[0].state, job->cpu / 10, job->cpu % 10); } //MTNDEBUG("[CPU=%d.%d%% LIM=%d CPU=%d]/n", ctx->cpu_use / 10, ctx->cpu_use % 10, ctx->cpu_lim / 10, ctx->cnt.cpu); if(!cpu_lim){ return(cpu_use); } for(i=0;i<job_max;i++){ if(!job[i].pid){ continue; } if(cpu_lim * cpu_num < cpu_use){ // C++ CP_HASH_PAIR函数代码示例 C++ CPU_TO_LE16函数代码示例
|