这篇教程C++ xcgroup_destroy函数代码示例写得很实用,希望能帮到您。
本文整理汇总了C++中xcgroup_destroy函数的典型用法代码示例。如果您正苦于以下问题:C++ xcgroup_destroy函数的具体用法?C++ xcgroup_destroy怎么用?C++ xcgroup_destroy使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。 在下文中一共展示了xcgroup_destroy函数的29个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。 示例1: _slurm_cgroup_destroyint _slurm_cgroup_destroy(void){ xcgroup_lock(&freezer_cg); if (jobstep_cgroup_path[0] != '/0') { if (xcgroup_delete(&step_freezer_cg) != XCGROUP_SUCCESS) { error("_slurm_cgroup_destroy: problem deleting step " "cgroup path %s: %m", step_freezer_cg.path); xcgroup_unlock(&freezer_cg); return SLURM_ERROR; } xcgroup_destroy(&step_freezer_cg); } if (job_cgroup_path[0] != '/0') { xcgroup_delete(&job_freezer_cg); xcgroup_destroy(&job_freezer_cg); } if (user_cgroup_path[0] != '/0') { xcgroup_delete(&user_freezer_cg); xcgroup_destroy(&user_freezer_cg); } if (slurm_freezer_init) { xcgroup_destroy(&slurm_freezer_cg); } xcgroup_unlock(&freezer_cg); xcgroup_destroy(&freezer_cg); xcgroup_ns_destroy(&freezer_ns); return SLURM_SUCCESS;}
开发者ID:nqn,项目名称:slurm-mesos,代码行数:34,
示例2: _slurm_cgroup_destroyint _slurm_cgroup_destroy(void){ if (slurm_freezer_init) xcgroup_lock(&slurm_freezer_cg); if (jobstep_cgroup_path[0] != '/0') { if ( xcgroup_delete(&step_freezer_cg) != XCGROUP_SUCCESS ) { if (slurm_freezer_init) xcgroup_unlock(&slurm_freezer_cg); return SLURM_ERROR; } xcgroup_destroy(&step_freezer_cg); } if (job_cgroup_path[0] != '/0') { xcgroup_delete(&job_freezer_cg); xcgroup_destroy(&job_freezer_cg); } if (user_cgroup_path[0] != '/0') { xcgroup_delete(&user_freezer_cg); xcgroup_destroy(&user_freezer_cg); } if (slurm_freezer_init) { xcgroup_unlock(&slurm_freezer_cg); xcgroup_destroy(&slurm_freezer_cg); } xcgroup_ns_destroy(&freezer_ns); return SLURM_SUCCESS;}
开发者ID:Cray,项目名称:slurm,代码行数:33,
示例3: jobacct_gather_cgroup_cpuacct_finiextern int jobacct_gather_cgroup_cpuacct_fini( slurm_cgroup_conf_t *slurm_cgroup_conf){ xcgroup_t cpuacct_cg; if (user_cgroup_path[0] == '/0' || job_cgroup_path[0] == '/0' || jobstep_cgroup_path[0] == '/0') return SLURM_SUCCESS; /* * Move the slurmstepd back to the root cpuacct cg. * The release_agent will asynchroneously be called for the step * cgroup. It will do the necessary cleanup. */ if (xcgroup_create(&cpuacct_ns, &cpuacct_cg, "", 0, 0) == XCGROUP_SUCCESS) { xcgroup_set_uint32_param(&cpuacct_cg, "tasks", getpid()); xcgroup_destroy(&cpuacct_cg); } xcgroup_destroy(&user_cpuacct_cg); xcgroup_destroy(&job_cpuacct_cg); xcgroup_destroy(&step_cpuacct_cg); user_cgroup_path[0]='/0'; job_cgroup_path[0]='/0'; jobstep_cgroup_path[0]='/0'; xcgroup_ns_destroy(&cpuacct_ns); return SLURM_SUCCESS;}
开发者ID:IFCA,项目名称:slurm,代码行数:32,
示例4: fini_system_cgroupextern void fini_system_cgroup(void){ xcgroup_destroy(&system_cpuset_cg); xcgroup_destroy(&system_memory_cg); xcgroup_ns_destroy(&cpuset_ns); xcgroup_ns_destroy(&memory_ns);}
开发者ID:jtfrey,项目名称:slurm,代码行数:7,
示例5: jobacct_cgroup_create_slurm_cgextern char* jobacct_cgroup_create_slurm_cg(xcgroup_ns_t* ns) { /* we do it here as we do not have access to the conf structure */ /* in libslurm (src/common/xcgroup.c) */ xcgroup_t slurm_cg; char* pre = (char*) xstrdup(slurm_cgroup_conf.cgroup_prepend);#ifdef MULTIPLE_SLURMD if (conf->node_name != NULL) xstrsubstitute(pre,"%n", conf->node_name); else { xfree(pre); pre = (char*) xstrdup("/slurm"); }#endif /* create slurm cgroup in the ns (it could already exist) */ if (xcgroup_create(ns,&slurm_cg,pre, getuid(), getgid()) != XCGROUP_SUCCESS) { return pre; } if (xcgroup_instanciate(&slurm_cg) != XCGROUP_SUCCESS) { error("unable to build slurm cgroup for ns %s: %m", ns->subsystems); xcgroup_destroy(&slurm_cg); return pre; } else { debug3("slurm cgroup %s successfully created for ns %s: %m", pre,ns->subsystems); xcgroup_destroy(&slurm_cg); } return pre;}
开发者ID:BYUHPC,项目名称:slurm,代码行数:34,
示例6: task_cgroup_memory_finiextern int task_cgroup_memory_fini(slurm_cgroup_conf_t *slurm_cgroup_conf){ xcgroup_t memory_cg; if (user_cgroup_path[0] == '/0' || job_cgroup_path[0] == '/0' || jobstep_cgroup_path[0] == '/0') return SLURM_SUCCESS; /* * Lock the root memcg and try to remove the different memcgs. * The reason why we are locking here is that if a concurrent * step is in the process of being executed, he could try to * create the step memcg just after we remove the job memcg, * resulting in a failure. * First, delete step memcg as all the tasks have now exited. * Then, try to remove the job memcg. * If it fails, it is due to the fact that it is still in use by an * other running step. * After that, try to remove the user memcg. If it fails, it is due * to jobs that are still running for the same user on the node or * because of tasks attached directly to the user cg by an other * component (PAM). The user memcg was created with the * notify_on_release=1 flag (default) so it will be removed * automatically after that. * For now, do not try to detect if only externally attached tasks * are present to see if they can be be moved to an orhpan memcg. * That could be done in the future, if it is necessary. */ if (xcgroup_create(&memory_ns,&memory_cg,"",0,0) == XCGROUP_SUCCESS) { if (xcgroup_lock(&memory_cg) == XCGROUP_SUCCESS) { if (xcgroup_delete(&step_memory_cg) != SLURM_SUCCESS) debug2("task/cgroup: unable to remove step " "memcg : %m"); if (xcgroup_delete(&job_memory_cg) != XCGROUP_SUCCESS) debug2("task/cgroup: not removing " "job memcg : %m"); if (xcgroup_delete(&user_memory_cg) != XCGROUP_SUCCESS) debug2("task/cgroup: not removing " "user memcg : %m"); xcgroup_unlock(&memory_cg); } else error("task/cgroup: unable to lock root memcg : %m"); xcgroup_destroy(&memory_cg); } else error("task/cgroup: unable to create root memcg : %m"); xcgroup_destroy(&user_memory_cg); xcgroup_destroy(&job_memory_cg); xcgroup_destroy(&step_memory_cg); user_cgroup_path[0]='/0'; job_cgroup_path[0]='/0'; jobstep_cgroup_path[0]='/0'; xcgroup_ns_destroy(&memory_ns); return SLURM_SUCCESS;}
开发者ID:johntconklin,项目名称:slurm,代码行数:58,
示例7: task_cgroup_devices_finiextern int task_cgroup_devices_fini(slurm_cgroup_conf_t *slurm_cgroup_conf){ xcgroup_t devices_cg; /* Similarly to task_cgroup_{memory,cpuset}_fini(), we must lock the * root cgroup so we don't race with another job step that is * being started. */ if (xcgroup_create(&devices_ns, &devices_cg,"",0,0) == XCGROUP_SUCCESS) { if (xcgroup_lock(&devices_cg) == XCGROUP_SUCCESS) { /* First move slurmstepd to the root devices cg * so we can remove the step/job/user devices * cg's. */ xcgroup_move_process(&devices_cg, getpid()); if (xcgroup_delete(&step_devices_cg) != SLURM_SUCCESS) debug2("task/cgroup: unable to remove step " "devices : %m"); if (xcgroup_delete(&job_devices_cg) != XCGROUP_SUCCESS) debug2("task/cgroup: not removing " "job devices : %m"); if (xcgroup_delete(&user_devices_cg) != XCGROUP_SUCCESS) debug2("task/cgroup: not removing " "user devices : %m"); xcgroup_unlock(&devices_cg); } else error("task/cgroup: unable to lock root devices : %m"); xcgroup_destroy(&devices_cg); } else error("task/cgroup: unable to create root devices : %m"); if ( user_cgroup_path[0] != '/0' ) xcgroup_destroy(&user_devices_cg); if ( job_cgroup_path[0] != '/0' ) xcgroup_destroy(&job_devices_cg); if ( jobstep_cgroup_path[0] != '/0' ) xcgroup_destroy(&step_devices_cg); user_cgroup_path[0] = '/0'; job_cgroup_path[0] = '/0'; jobstep_cgroup_path[0] = '/0'; cgroup_allowed_devices_file[0] = '/0'; xcgroup_ns_destroy(&devices_ns); xcpuinfo_fini(); return SLURM_SUCCESS;}
开发者ID:Q-Leap-Networks,项目名称:qlustar-slurm,代码行数:49,
示例8: memcg_initializestatic int memcg_initialize (xcgroup_ns_t *ns, xcgroup_t *cg, char *path, uint64_t mem_limit, uid_t uid, gid_t gid){ uint64_t mlb = mem_limit_in_bytes (mem_limit); uint64_t mls = swap_limit_in_bytes (mem_limit); if (xcgroup_create (ns, cg, path, uid, gid) != XCGROUP_SUCCESS) return -1; if (xcgroup_instanciate (cg) != XCGROUP_SUCCESS) { xcgroup_destroy (cg); return -1; } xcgroup_set_param (cg, "memory.use_hierarchy","1"); xcgroup_set_uint64_param (cg, "memory.limit_in_bytes", mlb); xcgroup_set_uint64_param (cg, "memory.memsw.limit_in_bytes", mls); info ("task/cgroup: %s: alloc=%luMB mem.limit=%luMB memsw.limit=%luMB", path, (unsigned long) mem_limit, (unsigned long) mlb/(1024*1024), (unsigned long) mls/(1024*1024)); return 0;}
开发者ID:Xarthisius,项目名称:slurm,代码行数:26,
示例9: task_cgroup_cpuset_finiextern int task_cgroup_cpuset_fini(slurm_cgroup_conf_t *slurm_cgroup_conf){ if (user_cgroup_path[0] != '/0') xcgroup_destroy(&user_cpuset_cg); if (job_cgroup_path[0] != '/0') xcgroup_destroy(&job_cpuset_cg); if (jobstep_cgroup_path[0] != '/0') xcgroup_destroy(&step_cpuset_cg); user_cgroup_path[0]='/0'; job_cgroup_path[0]='/0'; jobstep_cgroup_path[0]='/0'; xcgroup_ns_destroy(&cpuset_ns); return SLURM_SUCCESS;}
开发者ID:kwangiit,项目名称:SLURMPP,代码行数:18,
示例10: _system_cgroup_create_slurm_cgstatic char* _system_cgroup_create_slurm_cg (xcgroup_ns_t* ns){ /* we do it here as we do not have access to the conf structure */ /* in libslurm (src/common/xcgroup.c) */ xcgroup_t slurm_cg; char* pre = (char*) xstrdup(slurm_cgroup_conf.cgroup_prepend);#ifdef MULTIPLE_SLURMD if ( conf->node_name != NULL ) xstrsubstitute(pre, "%n", conf->node_name); else { xfree(pre); pre = (char*) xstrdup("/slurm"); }#endif /* create slurm cgroup in the ns * disable notify_on_release to avoid the removal/creation * of this cgroup for each last/first running job on the node */ if (xcgroup_create(ns, &slurm_cg, pre, getuid(), getgid()) != XCGROUP_SUCCESS) { xfree(pre); return pre; } slurm_cg.notify = 0; if (xcgroup_instantiate(&slurm_cg) != XCGROUP_SUCCESS) { error("system cgroup: unable to build slurm cgroup for " "ns %s: %m", ns->subsystems); xcgroup_destroy(&slurm_cg); xfree(pre); return pre; } else { debug3("system cgroup: slurm cgroup %s successfully created " "for ns %s: %m", pre, ns->subsystems); xcgroup_destroy(&slurm_cg); } return pre;}
开发者ID:HPCNow,项目名称:slurm,代码行数:42,
示例11: _slurm_cgroup_destroyint _slurm_cgroup_destroy(void){ xcgroup_lock(&freezer_cg); /* * First move slurmstepd process to the root cgroup, otherwise * the rmdir(2) triggered by the calls below will always fail, * because slurmstepd is still in the cgroup! */ _move_current_to_root_cgroup(&freezer_ns); if (jobstep_cgroup_path[0] != '/0') { if (xcgroup_delete(&step_freezer_cg) != XCGROUP_SUCCESS) { debug("_slurm_cgroup_destroy: problem deleting step cgroup path %s: %m", step_freezer_cg.path); xcgroup_unlock(&freezer_cg); return SLURM_ERROR; } xcgroup_destroy(&step_freezer_cg); } if (job_cgroup_path[0] != '/0') { xcgroup_delete(&job_freezer_cg); xcgroup_destroy(&job_freezer_cg); } if (user_cgroup_path[0] != '/0') { xcgroup_delete(&user_freezer_cg); xcgroup_destroy(&user_freezer_cg); } if (slurm_freezer_init) { xcgroup_destroy(&slurm_freezer_cg); } xcgroup_unlock(&freezer_cg); xcgroup_destroy(&freezer_cg); xcgroup_ns_destroy(&freezer_ns); return SLURM_SUCCESS;}
开发者ID:rohgarg,项目名称:slurm,代码行数:41,
示例12: task_cgroup_devices_finiextern int task_cgroup_devices_fini(slurm_cgroup_conf_t *slurm_cgroup_conf){ if ( user_cgroup_path[0] != '/0' ) xcgroup_destroy(&user_devices_cg); if ( job_cgroup_path[0] != '/0' ) xcgroup_destroy(&job_devices_cg); if ( jobstep_cgroup_path[0] != '/0' ) xcgroup_destroy(&step_devices_cg); user_cgroup_path[0] = '/0'; job_cgroup_path[0] = '/0'; jobstep_cgroup_path[0] = '/0'; cgroup_allowed_devices_file[0] = '/0'; xcgroup_ns_destroy(&devices_ns); xcpuinfo_fini(); return SLURM_SUCCESS;}
开发者ID:FredHutch,项目名称:slurm,代码行数:21,
示例13: task_cgroup_memory_finiextern int task_cgroup_memory_fini(slurm_cgroup_conf_t *slurm_cgroup_conf){ xcgroup_t memory_cg; if (user_cgroup_path[0] == '/0' || job_cgroup_path[0] == '/0' || jobstep_cgroup_path[0] == '/0') return SLURM_SUCCESS; /* * Move the slurmstepd back to the root memory cg and remove[*] * the step cgroup to move its allocated pages to its parent. * * [*] Calling rmdir(2) on an empty cgroup moves all resident charged * pages to the parent (i.e. the job cgroup). (If force_empty were * used instead, only clean pages would be flushed). This keeps * resident pagecache pages associated with the job. It is expected * that the job epilog will then optionally force_empty the * job cgroup (to flush pagecache), and then rmdir(2) the cgroup * or wait for release notification from kernel. */ if (xcgroup_create(&memory_ns,&memory_cg,"",0,0) == XCGROUP_SUCCESS) { xcgroup_move_process(&memory_cg, getpid()); xcgroup_destroy(&memory_cg); if (xcgroup_delete(&step_memory_cg) != XCGROUP_SUCCESS) error ("cgroup: rmdir step memcg failed: %m"); } xcgroup_destroy(&user_memory_cg); xcgroup_destroy(&job_memory_cg); xcgroup_destroy(&step_memory_cg); user_cgroup_path[0]='/0'; job_cgroup_path[0]='/0'; jobstep_cgroup_path[0]='/0'; xcgroup_ns_destroy(&memory_ns); return SLURM_SUCCESS;}
开发者ID:Xarthisius,项目名称:slurm,代码行数:40,
示例14: _move_current_to_root_cgroupstatic int _move_current_to_root_cgroup(xcgroup_ns_t *ns){ xcgroup_t cg; int rc; if (xcgroup_create(ns, &cg, "", 0, 0) != XCGROUP_SUCCESS) return SLURM_ERROR; rc = xcgroup_move_process(&cg, getpid()); xcgroup_destroy(&cg); return rc;}
开发者ID:rohgarg,项目名称:slurm,代码行数:13,
示例15: jobacct_gather_cgroup_memory_finiextern int jobacct_gather_cgroup_memory_fini( slurm_cgroup_conf_t *slurm_cgroup_conf){ xcgroup_t memory_cg; if (user_cgroup_path[0] == '/0' || job_cgroup_path[0] == '/0' || jobstep_cgroup_path[0] == '/0') return SLURM_SUCCESS; /* * Move the slurmstepd back to the root memory cg and force empty * the step cgroup to move its allocated pages to its parent. * The release_agent will asynchroneously be called for the step * cgroup. It will do the necessary cleanup. * It should be good if this force_empty mech could be done directly * by the memcg implementation at the end of the last task managed * by a cgroup. It is too difficult and near impossible to handle * that cleanup correctly with current memcg. */ if (xcgroup_create(&memory_ns, &memory_cg, "", 0, 0) == XCGROUP_SUCCESS) { xcgroup_set_uint32_param(&memory_cg, "tasks", getpid()); xcgroup_destroy(&memory_cg); xcgroup_set_param(&step_memory_cg, "memory.force_empty", "1"); } xcgroup_destroy(&user_memory_cg); xcgroup_destroy(&job_memory_cg); xcgroup_destroy(&step_memory_cg); user_cgroup_path[0]='/0'; job_cgroup_path[0]='/0'; jobstep_cgroup_path[0]='/0'; xcgroup_ns_destroy(&memory_ns); return SLURM_SUCCESS;}
开发者ID:IFCA,项目名称:slurm,代码行数:39,
示例16: memcg_initializestatic int memcg_initialize (xcgroup_ns_t *ns, xcgroup_t *cg, char *path, uint64_t mem_limit, uid_t uid, gid_t gid, uint32_t notify){ uint64_t mlb = mem_limit_in_bytes (mem_limit); uint64_t mls = swap_limit_in_bytes (mem_limit); if (xcgroup_create (ns, cg, path, uid, gid) != XCGROUP_SUCCESS) return -1; cg->notify = notify; if (xcgroup_instantiate (cg) != XCGROUP_SUCCESS) { xcgroup_destroy (cg); return -1; } xcgroup_set_param (cg, "memory.use_hierarchy", "1"); /* when RAM space has not to be constrained and we are here, it * means that only Swap space has to be constrained. Thus set * RAM space limit to the mem+swap limit too */ if ( ! constrain_ram_space ) mlb = mls; xcgroup_set_uint64_param (cg, "memory.limit_in_bytes", mlb); /* * Also constrain kernel memory (if available). * See https://lwn.net/Articles/516529/ */ xcgroup_set_uint64_param (cg, "memory.kmem.limit_in_bytes", mlb); /* this limit has to be set only if ConstrainSwapSpace is set to yes */ if ( constrain_swap_space ) { xcgroup_set_uint64_param (cg, "memory.memsw.limit_in_bytes", mls); info ("task/cgroup: %s: alloc=%luMB mem.limit=%luMB " "memsw.limit=%luMB", path, (unsigned long) mem_limit, (unsigned long) mlb/(1024*1024), (unsigned long) mls/(1024*1024)); } else { info ("task/cgroup: %s: alloc=%luMB mem.limit=%luMB " "memsw.limit=unlimited", path, (unsigned long) mem_limit, (unsigned long) mlb/(1024*1024)); } return 0;}
开发者ID:A1ve5,项目名称:slurm,代码行数:50,
示例17: task_cgroup_memory_check_oomextern int task_cgroup_memory_check_oom(stepd_step_rec_t *job){ xcgroup_t memory_cg; if (xcgroup_create(&memory_ns, &memory_cg, "", 0, 0) == XCGROUP_SUCCESS) { if (xcgroup_lock(&memory_cg) == XCGROUP_SUCCESS) { /* for some reason the job cgroup limit is hit * for a step and vice versa... * can't tell which is which so we'll treat * them the same */ if (failcnt_non_zero(&step_memory_cg, "memory.memsw.failcnt")) /* reports the number of times that the * memory plus swap space limit has * reached the value set in * memory.memsw.limit_in_bytes. */ error("Exceeded step memory limit at some point."); else if (failcnt_non_zero(&step_memory_cg, "memory.failcnt")) /* reports the number of times that the * memory limit has reached the value set * in memory.limit_in_bytes. */ error("Exceeded step memory limit at some point."); if (failcnt_non_zero(&job_memory_cg, "memory.memsw.failcnt")) error("Exceeded job memory limit at some point."); else if (failcnt_non_zero(&job_memory_cg, "memory.failcnt")) error("Exceeded job memory limit at some point."); xcgroup_unlock(&memory_cg); } else error("task/cgroup task_cgroup_memory_check_oom: " "task_cgroup_memory_check_oom: unable to lock " "root memcg : %m"); xcgroup_destroy(&memory_cg); } else error("task/cgroup task_cgroup_memory_check_oom: " "unable to create root memcg : %m"); return SLURM_SUCCESS;}
开发者ID:A1ve5,项目名称:slurm,代码行数:44,
示例18: task_cgroup_memory_check_oomextern int task_cgroup_memory_check_oom(stepd_step_rec_t *job){ xcgroup_t memory_cg; if (xcgroup_create(&memory_ns, &memory_cg, "", 0, 0) == XCGROUP_SUCCESS) { if (xcgroup_lock(&memory_cg) == XCGROUP_SUCCESS) { /* for some reason the job cgroup limit is hit * for a step and vice versa... * can't tell which is which so we'll treat * them the same */ if (failcnt_non_zero(&step_memory_cg, "memory.memsw.failcnt")) error("Exceeded step memory limit at some " "point. oom-killer likely killed a " "process."); else if(failcnt_non_zero(&step_memory_cg, "memory.failcnt")) error("Exceeded step memory limit at some " "point. Step may have been partially " "swapped out to disk."); if (failcnt_non_zero(&job_memory_cg, "memory.memsw.failcnt")) error("Exceeded job memory limit at some " "point. oom-killer likely killed a " "process."); else if (failcnt_non_zero(&job_memory_cg, "memory.failcnt")) error("Exceeded job memory limit at some " "point. Job may have been partially " "swapped out to disk."); xcgroup_unlock(&memory_cg); } else error("task/cgroup task_cgroup_memory_check_oom: " "task_cgroup_memory_check_oom: unable to lock " "root memcg : %m"); xcgroup_destroy(&memory_cg); } else error("task/cgroup task_cgroup_memory_check_oom: " "unable to create root memcg : %m"); return SLURM_SUCCESS;}
开发者ID:johntconklin,项目名称:slurm,代码行数:43,
示例19: _slurm_cgroup_has_pidbool_slurm_cgroup_has_pid(pid_t pid){ bool fstatus; xcgroup_t cg; fstatus = xcgroup_ns_find_by_pid(&freezer_ns, &cg, pid); if ( fstatus != XCGROUP_SUCCESS) return false; if (strcmp(cg.path, step_freezer_cg.path)) { fstatus = false; } else { fstatus = true; } xcgroup_destroy(&cg); return fstatus;}
开发者ID:nqn,项目名称:slurm-mesos,代码行数:20,
示例20: init_system_memory_cgroup//.........这里部分代码省略......... /* * as the swap space threshold will be configured with a * mem+swp parameter value, if RAM space is not monitored, * set allowed RAM space to 100% of the job requested memory. * It will help to construct the mem+swp value that will be * used for both mem and mem+swp limit during memcg creation. */ if ( constrain_ram_space ) allowed_ram_space = slurm_cgroup_conf.allowed_ram_space; else allowed_ram_space = 100.0; allowed_swap_space = slurm_cgroup_conf.allowed_swap_space; if ((totalram = (uint64_t) conf->real_memory_size) == 0) error ("system cgroup: Unable to get RealMemory size"); max_kmem = _percent_in_bytes(totalram, slurm_cgroup_conf.max_kmem_percent); max_ram = _percent_in_bytes(totalram, slurm_cgroup_conf.max_ram_percent); max_swap = _percent_in_bytes(totalram, slurm_cgroup_conf.max_swap_percent); max_swap += max_ram; min_ram_space = slurm_cgroup_conf.min_ram_space * 1024 * 1024; debug ("system cgroup: memory: total:%luM allowed:%.4g%%(%s), " "swap:%.4g%%(%s), max:%.4g%%(%luM) " "max+swap:%.4g%%(%luM) min:%luM " "kmem:%.4g%%(%luM %s) min:%luM", (unsigned long) totalram, allowed_ram_space, constrain_ram_space?"enforced":"permissive", allowed_swap_space, constrain_swap_space?"enforced":"permissive", slurm_cgroup_conf.max_ram_percent, (unsigned long) (max_ram/(1024*1024)), slurm_cgroup_conf.max_swap_percent, (unsigned long) (max_swap/(1024*1024)), (unsigned long) slurm_cgroup_conf.min_ram_space, slurm_cgroup_conf.max_kmem_percent, (unsigned long)(max_kmem/(1024*1024)), constrain_kmem_space?"enforced":"permissive", (unsigned long) slurm_cgroup_conf.min_kmem_space); /* * Warning: OOM Killer must be disabled for slurmstepd * or it would be destroyed if the application use * more memory than permitted * * If an env value is already set for slurmstepd * OOM killer behavior, keep it, otherwise set the * -1000 value, wich means do not let OOM killer kill it * * FYI, setting "export SLURMSTEPD_OOM_ADJ=-1000" * in /etc/sysconfig/slurm would be the same */ setenv("SLURMSTEPD_OOM_ADJ", "-1000", 0); /* create slurm root cg in this cg namespace */ slurm_cgpath = _system_cgroup_create_slurm_cg(&memory_ns); if ( slurm_cgpath == NULL ) { xcgroup_ns_destroy(&memory_ns); free_slurm_cgroup_conf(&slurm_cgroup_conf); return SLURM_ERROR; } /* build system cgroup relative path */ snprintf(system_cgroup_path, PATH_MAX, "%s/system", slurm_cgpath); xfree(slurm_cgpath); /* create system cgroup in the cpuset ns */ if (xcgroup_create(&memory_ns, &system_memory_cg, system_cgroup_path, getuid(), getgid()) != XCGROUP_SUCCESS) { goto error; } if (xcgroup_instantiate(&system_memory_cg) != XCGROUP_SUCCESS) { goto error; } if ( xcgroup_set_param(&system_memory_cg, "memory.use_hierarchy", "1") != XCGROUP_SUCCESS ) { error("system cgroup: unable to ask for hierarchical accounting" "of system memcg '%s'", system_memory_cg.path); goto error; } free_slurm_cgroup_conf(&slurm_cgroup_conf); debug("system cgroup: system memory cgroup initialized"); return SLURM_SUCCESS;error: xcgroup_unlock(&system_memory_cg); xcgroup_destroy(&system_memory_cg); xcgroup_ns_destroy(&memory_ns); free_slurm_cgroup_conf(&slurm_cgroup_conf); return fstatus;}
开发者ID:jtfrey,项目名称:slurm,代码行数:101,
示例21: task_cgroup_devices_createextern int task_cgroup_devices_create(stepd_step_rec_t *job){ int f, k, rc, gres_conf_lines, allow_lines; int fstatus = SLURM_ERROR; char **gres_name = NULL; char **gres_cgroup = NULL, **dev_path = NULL; char *allowed_devices[PATH_MAX], *allowed_dev_major[PATH_MAX]; int *gres_job_bit_alloc = NULL; int *gres_step_bit_alloc = NULL; int *gres_count = NULL; xcgroup_t devices_cg; uint32_t jobid = job->jobid; uint32_t stepid = job->stepid; uid_t uid = job->uid; uid_t gid = job->gid; List job_gres_list = job->job_gres_list; List step_gres_list = job->step_gres_list; char* slurm_cgpath ; /* create slurm root cgroup in this cgroup namespace */ slurm_cgpath = task_cgroup_create_slurm_cg(&devices_ns); if (slurm_cgpath == NULL) return SLURM_ERROR; /* build user cgroup relative path if not set (should not be) */ if (*user_cgroup_path == '/0') { if (snprintf(user_cgroup_path, PATH_MAX, "%s/uid_%u", slurm_cgpath, uid) >= PATH_MAX) { error("unable to build uid %u cgroup relative path : %m", uid); xfree(slurm_cgpath); return SLURM_ERROR; } } xfree(slurm_cgpath); /* build job cgroup relative path if no set (should not be) */ if (*job_cgroup_path == '/0') { if (snprintf(job_cgroup_path, PATH_MAX, "%s/job_%u", user_cgroup_path, jobid) >= PATH_MAX) { error("task/cgroup: unable to build job %u devices " "cgroup relative path : %m", jobid); return SLURM_ERROR; } } /* build job step cgroup relative path (should not be) */ if (*jobstep_cgroup_path == '/0') { int cc; if (stepid == SLURM_BATCH_SCRIPT) { cc = snprintf(jobstep_cgroup_path, PATH_MAX, "%s/step_batch", job_cgroup_path); } else if (stepid == SLURM_EXTERN_CONT) { cc = snprintf(jobstep_cgroup_path, PATH_MAX, "%s/step_extern", job_cgroup_path); } else { cc = snprintf(jobstep_cgroup_path, PATH_MAX, "%s/step_%u", job_cgroup_path, stepid); } if (cc >= PATH_MAX) { error("task/cgroup: unable to build job step %u.%u " "devices cgroup relative path : %m", jobid, stepid); return SLURM_ERROR; } } /* * create devices root cgroup and lock it * * we will keep the lock until the end to avoid the effect of a release * agent that would remove an existing cgroup hierarchy while we are * setting it up. As soon as the step cgroup is created, we can release * the lock. * Indeed, consecutive slurm steps could result in cgroup being removed * between the next EEXIST instanciation and the first addition of * a task. The release_agent will have to lock the root devices cgroup * to avoid this scenario. */ if (xcgroup_create(&devices_ns, &devices_cg, "", 0, 0) != XCGROUP_SUCCESS ) { error("task/cgroup: unable to create root devices cgroup"); return SLURM_ERROR; } if (xcgroup_lock(&devices_cg) != XCGROUP_SUCCESS) { xcgroup_destroy(&devices_cg); error("task/cgroup: unable to lock root devices cgroup"); return SLURM_ERROR; } info("task/cgroup: manage devices jor job '%u'", jobid); /* * collect info concerning the gres.conf file * the GRES devices paths and the GRES names */ gres_conf_lines = gres_plugin_node_config_devices_path(&dev_path,//.........这里部分代码省略.........
开发者ID:Q-Leap-Networks,项目名称:qlustar-slurm,代码行数:101,
示例22: jobacct_gather_cgroup_memory_attach_taskextern int jobacct_gather_cgroup_memory_attach_task( pid_t pid, jobacct_id_t *jobacct_id){ xcgroup_t memory_cg; slurmd_job_t *job; uid_t uid; gid_t gid; uint32_t jobid; uint32_t stepid; uint32_t taskid; int fstatus = SLURM_SUCCESS; int rc; char* slurm_cgpath; job = jobacct_id->job; uid = job->uid; gid = job->gid; jobid = job->jobid; stepid = job->stepid; taskid = jobacct_id->taskid; /* create slurm root cg in this cg namespace */ slurm_cgpath = jobacct_cgroup_create_slurm_cg(&memory_ns); if (!slurm_cgpath) { return SLURM_ERROR; } /* build user cgroup relative path if not set (should not be) */ if (*user_cgroup_path == '/0') { if (snprintf(user_cgroup_path, PATH_MAX, "%s/uid_%u", slurm_cgpath, uid) >= PATH_MAX) { error("unable to build uid %u cgroup relative " "path : %m", uid); xfree(slurm_cgpath); return SLURM_ERROR; } } /* build job cgroup relative path if not set (may not be) */ if (*job_cgroup_path == '/0') { if (snprintf(job_cgroup_path, PATH_MAX, "%s/job_%u", user_cgroup_path, jobid) >= PATH_MAX) { error("jobacct_gather/cgroup: unable to build job %u " "memory cg relative path : %m", jobid); return SLURM_ERROR; } } /* build job step cgroup relative path if not set (may not be) */ if (*jobstep_cgroup_path == '/0') { if (snprintf(jobstep_cgroup_path, PATH_MAX, "%s/step_%u", job_cgroup_path, stepid) >= PATH_MAX) { error("jobacct_gather/cgroup: unable to build job step " "%u memory cg relative path : %m", stepid); return SLURM_ERROR; } } /* build task cgroup relative path */ if (snprintf(task_cgroup_path, PATH_MAX, "%s/task_%u", jobstep_cgroup_path, taskid) >= PATH_MAX) { error("jobacct_gather/cgroup: unable to build task %u " "memory cg relative path : %m", taskid); return SLURM_ERROR; } fstatus = SLURM_SUCCESS; /* * create memory root cg and lock it * * we will keep the lock until the end to avoid the effect of a release * agent that would remove an existing cgroup hierarchy while we are * setting it up. As soon as the step cgroup is created, we can release * the lock. * Indeed, consecutive slurm steps could result in cg being removed * between the next EEXIST instanciation and the first addition of * a task. The release_agent will have to lock the root memory cgroup * to avoid this scenario. */ if (xcgroup_create(&memory_ns, &memory_cg, "", 0, 0) != XCGROUP_SUCCESS) { error("jobacct_gather/cgroup: unable to create root memory " "xcgroup"); return SLURM_ERROR; } if (xcgroup_lock(&memory_cg) != XCGROUP_SUCCESS) { xcgroup_destroy(&memory_cg); error("jobacct_gather/cgroup: unable to lock root memory cg"); return SLURM_ERROR; } /* * Create user cgroup in the memory ns (it could already exist) * Ask for hierarchical memory accounting starting from the user * container in order to track the memory consumption up to the * user. */ if (xcgroup_create(&memory_ns, &user_memory_cg,//.........这里部分代码省略.........
开发者ID:IFCA,项目名称:slurm,代码行数:101,
示例23: jobacct_gather_cgroup_cpuacct_finiextern intjobacct_gather_cgroup_cpuacct_fini(slurm_cgroup_conf_t *slurm_cgroup_conf){ xcgroup_t cpuacct_cg; bool lock_ok; int cc; if (user_cgroup_path[0] == '/0' || job_cgroup_path[0] == '/0' || jobstep_cgroup_path[0] == '/0' || task_cgroup_path[0] == 0) return SLURM_SUCCESS; /* * Move the slurmstepd back to the root cpuacct cg. * The release_agent will asynchroneously be called for the step * cgroup. It will do the necessary cleanup. */ if (xcgroup_create(&cpuacct_ns, &cpuacct_cg, "", 0, 0) == XCGROUP_SUCCESS) { xcgroup_set_uint32_param(&cpuacct_cg, "tasks", getpid()); } /* Lock the root of the cgroup and remove the subdirectories * related to this job. */ lock_ok = true; if (xcgroup_lock(&cpuacct_cg) != XCGROUP_SUCCESS) { error("%s: failed to flock() %s %m", __func__, cpuacct_cg.path); lock_ok = false; } /* Clean up starting from the leaves way up, the * reverse order in which the cgroups were created. */ for (cc = 0; cc <= max_task_id; cc++) { xcgroup_t cgroup; char buf[PATH_MAX]; /* rmdir all tasks this running slurmstepd * was responsible for. */ sprintf(buf, "%s%s/task_%d", cpuacct_ns.mnt_point, jobstep_cgroup_path, cc); cgroup.path = buf; if (strstr(buf, "step_extern")) kill_extern_procs(cgroup.path); if (xcgroup_delete(&cgroup) != XCGROUP_SUCCESS) { debug2("%s: failed to delete %s %m", __func__, buf); } } if (xcgroup_delete(&step_cpuacct_cg) != XCGROUP_SUCCESS) { debug2("%s: failed to delete %s %m", __func__, cpuacct_cg.path); } if (xcgroup_delete(&job_cpuacct_cg) != XCGROUP_SUCCESS) { debug2("%s: failed to delete %s %m", __func__, job_cpuacct_cg.path); } if (xcgroup_delete(&user_cpuacct_cg) != XCGROUP_SUCCESS) { debug2("%s: failed to delete %s %m", __func__, user_cpuacct_cg.path); } if (lock_ok == true) xcgroup_unlock(&cpuacct_cg); xcgroup_destroy(&task_cpuacct_cg); xcgroup_destroy(&user_cpuacct_cg); xcgroup_destroy(&job_cpuacct_cg); xcgroup_destroy(&step_cpuacct_cg); xcgroup_destroy(&cpuacct_cg); user_cgroup_path[0]='/0'; job_cgroup_path[0]='/0'; jobstep_cgroup_path[0]='/0'; task_cgroup_path[0] = 0; xcgroup_ns_destroy(&cpuacct_ns); return SLURM_SUCCESS;}
开发者ID:acrognale,项目名称:slurm,代码行数:87,
示例24: _slurm_cgroup_createint _slurm_cgroup_create(stepd_step_rec_t *job, uint64_t id, uid_t uid, gid_t gid){ /* we do it here as we do not have access to the conf structure */ /* in libslurm (src/common/xcgroup.c) */ char *pre = (char *)xstrdup(slurm_cgroup_conf.cgroup_prepend);#ifdef MULTIPLE_SLURMD if ( conf->node_name != NULL ) xstrsubstitute(pre,"%n", conf->node_name); else { xfree(pre); pre = (char*) xstrdup("/slurm"); }#endif if (xcgroup_create(&freezer_ns, &slurm_freezer_cg, pre, getuid(), getgid()) != XCGROUP_SUCCESS) { return SLURM_ERROR; } /* * While creating the cgroup hierarchy of the step, lock the root * cgroup directory. The same lock is hold during removal of the * hierarchies of other jobs/steps. This helps to avoid the race * condition with concurrent creation/removal of the intermediate * shared directories that could result in the failure of the * hierarchy setup */ xcgroup_lock(&freezer_cg); /* create slurm cgroup in the freezer ns (it could already exist) */ if (xcgroup_instanciate(&slurm_freezer_cg) != XCGROUP_SUCCESS) goto bail; /* build user cgroup relative path if not set (should not be) */ if (*user_cgroup_path == '/0') { if (snprintf(user_cgroup_path, PATH_MAX, "%s/uid_%u", pre, uid) >= PATH_MAX) { error("unable to build uid %u cgroup relative " "path : %m", uid); xfree(pre); goto bail; } } xfree(pre); /* build job cgroup relative path if no set (should not be) */ if (*job_cgroup_path == '/0') { if (snprintf(job_cgroup_path, PATH_MAX, "%s/job_%u", user_cgroup_path, job->jobid) >= PATH_MAX) { error("unable to build job %u cgroup relative " "path : %m", job->jobid); goto bail; } } /* build job step cgroup relative path (should not be) */ if (*jobstep_cgroup_path == '/0') { if (job->stepid == NO_VAL) { if (snprintf(jobstep_cgroup_path, PATH_MAX, "%s/step_batch", job_cgroup_path) >= PATH_MAX) { error("proctrack/cgroup unable to build job step" " %u.batch freezer cg relative path: %m", job->jobid); goto bail; } } else { if (snprintf(jobstep_cgroup_path, PATH_MAX, "%s/step_%u", job_cgroup_path, job->stepid) >= PATH_MAX) { error("proctrack/cgroup unable to build job step" " %u.%u freezer cg relative path: %m", job->jobid, job->stepid); goto bail; } } } /* create user cgroup in the freezer ns (it could already exist) */ if (xcgroup_create(&freezer_ns, &user_freezer_cg, user_cgroup_path, getuid(), getgid()) != XCGROUP_SUCCESS) { xcgroup_destroy(&slurm_freezer_cg); goto bail; } /* create job cgroup in the freezer ns (it could already exist) */ if (xcgroup_create(&freezer_ns, &job_freezer_cg, job_cgroup_path, getuid(), getgid()) != XCGROUP_SUCCESS) { xcgroup_destroy(&slurm_freezer_cg); xcgroup_destroy(&user_freezer_cg); goto bail; } /* create step cgroup in the freezer ns (it should not exists) */ if (xcgroup_create(&freezer_ns, &step_freezer_cg, jobstep_cgroup_path, getuid(), getgid()) != XCGROUP_SUCCESS) { xcgroup_destroy(&slurm_freezer_cg); xcgroup_destroy(&user_freezer_cg);//.........这里部分代码省略.........
开发者ID:nqn,项目名称:slurm-mesos,代码行数:101,
示例25: _xcgroup_cpuset_init/* when cgroups are configured with cpuset, at least * cpuset.cpus and cpuset.mems must be set or the cgroup * will not be available at all. * we duplicate the ancestor configuration in the init step */static int _xcgroup_cpuset_init(xcgroup_t* cg){ int fstatus,i; char* cpuset_metafiles[] = { "cpuset.cpus", "cpuset.mems" }; char* cpuset_meta; char* cpuset_conf; size_t csize; xcgroup_t acg; char* acg_name; char* p; fstatus = XCGROUP_ERROR; /* load ancestor cg */ acg_name = (char*) xstrdup(cg->name); p = rindex(acg_name,'/'); if (p == NULL) { debug2("task/cgroup: unable to get ancestor path for " "cpuset cg '%s' : %m",cg->path); return fstatus; } else *p = '/0'; if (xcgroup_load(cg->ns,&acg,acg_name) != XCGROUP_SUCCESS) { debug2("task/cgroup: unable to load ancestor for " "cpuset cg '%s' : %m",cg->path); return fstatus; } /* inherits ancestor params */ for (i = 0 ; i < 2 ; i++) { cpuset_meta = cpuset_metafiles[i]; if (xcgroup_get_param(&acg,cpuset_meta, &cpuset_conf,&csize) != XCGROUP_SUCCESS) { debug2("task/cgroup: assuming no cpuset cg " "support for '%s'",acg.path); xcgroup_destroy(&acg); return fstatus; } if (csize > 0) cpuset_conf[csize-1]='/0'; if (xcgroup_set_param(cg,cpuset_meta,cpuset_conf) != XCGROUP_SUCCESS) { debug2("task/cgroup: unable to write %s configuration " "(%s) for cpuset cg '%s'",cpuset_meta, cpuset_conf,cg->path); xcgroup_destroy(&acg); xfree(cpuset_conf); return fstatus; } xfree(cpuset_conf); } xcgroup_destroy(&acg); return XCGROUP_SUCCESS;}
开发者ID:tpatki,项目名称:slurm_test,代码行数:65,
示例26: task_cgroup_cpuset_createextern int task_cgroup_cpuset_create(slurmd_job_t *job){ int rc; int fstatus = SLURM_ERROR; xcgroup_t cpuset_cg; uint32_t jobid = job->jobid; uint32_t stepid = job->stepid; uid_t uid = job->uid; uid_t gid = job->gid; char* user_alloc_cores = NULL; char* job_alloc_cores = NULL; char* step_alloc_cores = NULL; char* cpus = NULL; size_t cpus_size; char* slurm_cgpath ; xcgroup_t slurm_cg; /* create slurm root cg in this cg namespace */ slurm_cgpath = task_cgroup_create_slurm_cg(&cpuset_ns); if ( slurm_cgpath == NULL ) { return SLURM_ERROR; } /* check that this cgroup has cpus allowed or initialize them */ if (xcgroup_load(&cpuset_ns,&slurm_cg,slurm_cgpath) != XCGROUP_SUCCESS) { error("task/cgroup: unable to load slurm cpuset xcgroup"); xfree(slurm_cgpath); return SLURM_ERROR; } rc = xcgroup_get_param(&slurm_cg,"cpuset.cpus",&cpus,&cpus_size); if (rc != XCGROUP_SUCCESS || cpus_size == 1) { /* initialize the cpusets as it was inexistant */ if (_xcgroup_cpuset_init(&slurm_cg) != XCGROUP_SUCCESS) { xfree(slurm_cgpath); xcgroup_destroy(&slurm_cg); return SLURM_ERROR; } } xfree(cpus); /* build user cgroup relative path if not set (should not be) */ if (*user_cgroup_path == '/0') { if (snprintf(user_cgroup_path, PATH_MAX, "%s/uid_%u", slurm_cgpath, uid) >= PATH_MAX) { error("unable to build uid %u cgroup relative " "path : %m", uid); xfree(slurm_cgpath); return SLURM_ERROR; } } xfree(slurm_cgpath); /* build job cgroup relative path if no set (should not be) */ if (*job_cgroup_path == '/0') { if (snprintf(job_cgroup_path,PATH_MAX,"%s/job_%u", user_cgroup_path,jobid) >= PATH_MAX) { error("task/cgroup: unable to build job %u cpuset " "cg relative path : %m",jobid); return SLURM_ERROR; } } /* build job step cgroup relative path (should not be) */ if (*jobstep_cgroup_path == '/0') { if (stepid == NO_VAL) { if (snprintf(jobstep_cgroup_path, PATH_MAX, "%s/step_batch", job_cgroup_path) >= PATH_MAX) { error("task/cgroup: unable to build job step" " %u.batch cpuset cg relative path: %m", jobid); return SLURM_ERROR; } } else { if (snprintf(jobstep_cgroup_path, PATH_MAX, "%s/step_%u", job_cgroup_path, stepid) >= PATH_MAX) { error("task/cgroup: unable to build job step" " %u.%u cpuset cg relative path: %m", jobid, stepid); return SLURM_ERROR; } } } /* * create cpuset root cg and lock it * * we will keep the lock until the end to avoid the effect of a release * agent that would remove an existing cgroup hierarchy while we are * setting it up. As soon as the step cgroup is created, we can release * the lock. * Indeed, consecutive slurm steps could result in cg being removed * between the next EEXIST instanciation and the first addition of * a task. The release_agent will have to lock the root cpuset cgroup//.........这里部分代码省略.........
开发者ID:tpatki,项目名称:slurm_test,代码行数:101,
示例27: task_cgroup_cpuset_createextern int task_cgroup_cpuset_create(stepd_step_rec_t *job){ int rc; int fstatus = SLURM_ERROR; xcgroup_t cpuset_cg; uint32_t jobid = job->jobid; uint32_t stepid = job->stepid; uid_t uid = job->uid; uid_t gid = job->gid; char* user_alloc_cores = NULL; char* job_alloc_cores = NULL; char* step_alloc_cores = NULL; char cpuset_meta[PATH_MAX]; char* cpus = NULL; size_t cpus_size; char* slurm_cgpath; xcgroup_t slurm_cg;#ifdef HAVE_NATIVE_CRAY char expected_usage[32];#endif /* create slurm root cg in this cg namespace */ slurm_cgpath = task_cgroup_create_slurm_cg(&cpuset_ns); if ( slurm_cgpath == NULL ) { return SLURM_ERROR; } /* check that this cgroup has cpus allowed or initialize them */ if (xcgroup_load(&cpuset_ns,&slurm_cg,slurm_cgpath) != XCGROUP_SUCCESS) { error("task/cgroup: unable to load slurm cpuset xcgroup"); xfree(slurm_cgpath); return SLURM_ERROR; }again: snprintf(cpuset_meta, sizeof(cpuset_meta), "%scpus", cpuset_prefix); rc = xcgroup_get_param(&slurm_cg, cpuset_meta, &cpus,&cpus_size); if (rc != XCGROUP_SUCCESS || cpus_size == 1) { if (!cpuset_prefix_set && (rc != XCGROUP_SUCCESS)) { cpuset_prefix_set = 1; cpuset_prefix = "cpuset."; goto again; } /* initialize the cpusets as it was inexistant */ if (_xcgroup_cpuset_init(&slurm_cg) != XCGROUP_SUCCESS) { xfree(slurm_cgpath); xcgroup_destroy(&slurm_cg); return SLURM_ERROR; } } xfree(cpus); /* build user cgroup relative path if not set (should not be) */ if (*user_cgroup_path == '/0') { if (snprintf(user_cgroup_path, PATH_MAX, "%s/uid_%u", slurm_cgpath, uid) >= PATH_MAX) { error("task/cgroup: unable to build uid %u cgroup " "relative path : %m", uid); xfree(slurm_cgpath); return SLURM_ERROR; } } xfree(slurm_cgpath); /* build job cgroup relative path if no set (should not be) */ if (*job_cgroup_path == '/0') { if (snprintf(job_cgroup_path,PATH_MAX,"%s/job_%u", user_cgroup_path,jobid) >= PATH_MAX) { error("task/cgroup: unable to build job %u cpuset " "cg relative path : %m",jobid); return SLURM_ERROR; } } /* build job step cgroup relative path (should not be) */ if (*jobstep_cgroup_path == '/0') { int cc; if (stepid == SLURM_BATCH_SCRIPT) { cc = snprintf(jobstep_cgroup_path, PATH_MAX, "%s/step_batch", job_cgroup_path); } else if (stepid == SLURM_EXTERN_CONT) { cc = snprintf(jobstep_cgroup_path, PATH_MAX, "%s/step_extern", job_cgroup_path); } else { cc = snprintf(jobstep_cgroup_path, PATH_MAX, "%s/step_%u", job_cgroup_path, stepid); } if (cc >= PATH_MAX) { error("task/cgroup: unable to build job step %u.%u " "cpuset cg relative path: %m", jobid, stepid); return SLURM_ERROR; }//.........这里部分代码省略.........
开发者ID:Qilewuqiong,项目名称:slurm,代码行数:101,
示例28: init_system_cpuset_cgroupextern int init_system_cpuset_cgroup(void){ int rc; int fstatus = SLURM_ERROR; char* cpus = NULL; size_t cpus_size; char* slurm_cgpath; xcgroup_t slurm_cg; /* read cgroup configuration */ if (read_slurm_cgroup_conf(&slurm_cgroup_conf)) return SLURM_ERROR; /* initialize cpuset cgroup namespace */ if (xcgroup_ns_create(&slurm_cgroup_conf, &cpuset_ns, "", "cpuset") != XCGROUP_SUCCESS) { error("system cgroup: unable to create cpuset namespace"); free_slurm_cgroup_conf(&slurm_cgroup_conf); return SLURM_ERROR; } /* create slurm root cg in this cg namespace */ slurm_cgpath = _system_cgroup_create_slurm_cg(&cpuset_ns); if ( slurm_cgpath == NULL ) { xcgroup_ns_destroy(&cpuset_ns); free_slurm_cgroup_conf(&slurm_cgroup_conf); return SLURM_ERROR; } /* check that this cgroup has cpus allowed or initialize them */ if (xcgroup_load(&cpuset_ns, &slurm_cg, slurm_cgpath) != XCGROUP_SUCCESS) { error("system cgroup: unable to load slurm cpuset xcgroup"); xfree(slurm_cgpath); xcgroup_ns_destroy(&cpuset_ns); free_slurm_cgroup_conf(&slurm_cgroup_conf); return SLURM_ERROR; }again: snprintf(cpuset_meta, sizeof(cpuset_meta), "%scpus", cpuset_prefix); rc = xcgroup_get_param(&slurm_cg, cpuset_meta, &cpus, &cpus_size); if (rc != XCGROUP_SUCCESS || cpus_size == 1) { if (!cpuset_prefix_set && (rc != XCGROUP_SUCCESS)) { cpuset_prefix_set = 1; cpuset_prefix = "cpuset."; goto again; } /* initialize the cpusets as it was nonexistent */ if (_xcgroup_cpuset_init(&slurm_cg) != XCGROUP_SUCCESS) { xfree(slurm_cgpath); xcgroup_destroy(&slurm_cg); xcgroup_ns_destroy(&cpuset_ns); free_slurm_cgroup_conf(&slurm_cgroup_conf); xfree(cpus); return SLURM_ERROR; } } xcgroup_destroy(&slurm_cg); xfree(cpus); /* build system cgroup relative path */ snprintf(system_cgroup_path, PATH_MAX, "%s/system", slurm_cgpath); xfree(slurm_cgpath); /* create system cgroup in the cpuset ns */ if (xcgroup_create(&cpuset_ns, &system_cpuset_cg, system_cgroup_path, getuid(),getgid()) != XCGROUP_SUCCESS) { goto error; } if (xcgroup_instantiate(&system_cpuset_cg) != XCGROUP_SUCCESS) { goto error; } if (_xcgroup_cpuset_init(&system_cpuset_cg) != XCGROUP_SUCCESS) { goto error; } free_slurm_cgroup_conf(&slurm_cgroup_conf); debug("system cgroup: system cpuset cgroup initialized"); return SLURM_SUCCESS;error: xcgroup_unlock(&system_cpuset_cg); xcgroup_destroy(&system_cpuset_cg); xcgroup_ns_destroy(&cpuset_ns); free_slurm_cgroup_conf(&slurm_cgroup_conf); return fstatus;}
开发者ID:jtfrey,项目名称:slurm,代码行数:89,
示例29: _xcgroup_cpuset_init/* when cgroups are configured with cpuset, at least * cpuset.cpus and cpuset.mems must be set or the cgroup * will not be available at all. * we duplicate the ancestor configuration in the init step */static int _xcgroup_cpuset_init(xcgroup_t* cg){ int fstatus, i; char* cpuset_metafiles[] = { "cpus", "mems" }; char* cpuset_conf = NULL; size_t csize = 0; xcgroup_t acg; char* acg_name = NULL; char* p; fstatus = XCGROUP_ERROR; /* load ancestor cg */ acg_name = (char*) xstrdup(cg->name); p = xstrrchr(acg_name, '/'); if (p == NULL) { debug2("system cgroup: unable to get ancestor path for " "cpuset cg '%s' : %m", cg->path); xfree(acg_name); return fstatus; } else *p = '/0'; if (xcgroup_load(cg->ns, &acg, acg_name) != XCGROUP_SUCCESS) { debug2("system cgroup: unable to load ancestor for " "cpuset cg '%s' : %m", cg->path); xfree(acg_name); return fstatus; } xfree(acg_name); /* inherits ancestor params */ for (i = 0 ; i < 2 ; i++) { again: snprintf(cpuset_meta, sizeof(cpuset_meta), "%s%s", cpuset_prefix, cpuset_metafiles[i]); if (xcgroup_get_param(&acg ,cpuset_meta, &cpuset_conf, &csize) != XCGROUP_SUCCESS) { if (!cpuset_prefix_set) { cpuset_prefix_set = 1; cpuset_prefix = "cpuset."; goto again; } debug("system cgroup: assuming no cpuset cg " "support for '%s'",acg.path); xcgroup_destroy(&acg); return fstatus; } if (csize > 0) cpuset_conf[csize-1] = '/0'; if (xcgroup_set_param(cg,cpuset_meta, cpuset_conf) != XCGROUP_SUCCESS) { debug("system cgroup: unable to write %s configuration " "(%s) for cpuset cg '%s'",cpuset_meta, cpuset_conf, cg->path); xcgroup_destroy(&acg); xfree(cpuset_conf); return fstatus; } xfree(cpuset_conf); } xcgroup_destroy(&acg); return XCGROUP_SUCCESS;}
开发者ID:jtfrey,项目名称:slurm,代码行数:75,
注:本文中的xcgroup_destroy函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 C++ xchat_print函数代码示例 C++ xceiv_to_twl函数代码示例 |