这篇教程C++ xbt_malloc函数代码示例写得很实用,希望能帮到您。
本文整理汇总了C++中xbt_malloc函数的典型用法代码示例。如果您正苦于以下问题:C++ xbt_malloc函数的具体用法?C++ xbt_malloc怎么用?C++ xbt_malloc使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。 在下文中一共展示了xbt_malloc函数的30个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。 示例1: smpi_coll_tuned_reduce_flat_treeintsmpi_coll_tuned_reduce_flat_tree(void *sbuf, void *rbuf, int count, MPI_Datatype dtype, MPI_Op op, int root, MPI_Comm comm){ int i, tag = 4321; int size; int rank; MPI_Aint extent; char *origin = 0; char *inbuf; MPI_Status status; rank = smpi_comm_rank(comm); size = smpi_comm_size(comm); /* If not root, send data to the root. */ extent = smpi_datatype_get_extent(dtype); if (rank != root) { smpi_mpi_send(sbuf, count, dtype, root, tag, comm); return 0; } /* Root receives and reduces messages. Allocate buffer to receive messages. */ if (size > 1) origin = (char *) xbt_malloc(count * extent); /* Initialize the receive buffer. */ if (rank == (size - 1)) smpi_mpi_sendrecv(sbuf, count, dtype, rank, tag, rbuf, count, dtype, rank, tag, comm, &status); else smpi_mpi_recv(rbuf, count, dtype, size - 1, tag, comm, &status); /* Loop receiving and calling reduction function (C or Fortran). */ for (i = size - 2; i >= 0; --i) { if (rank == i) inbuf = sbuf; else { smpi_mpi_recv(origin, count, dtype, i, tag, comm, &status); inbuf = origin; } /* Call reduction function. */ smpi_op_apply(op, inbuf, rbuf, &count, &dtype); } if (origin) free(origin); /* All done */ return 0;}
开发者ID:dhascome,项目名称:simgrid,代码行数:59,
示例2: mainint main(int argc, char *argv[]){ int rank, size; int i; int *sb; int *rb; int status; int mult=1; MPI_Init(&argc, &argv); int maxlen = argc >= 2 ? atoi(argv[1]) : 1; MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); if (maxlen>1)mult=size; sb = (int *) xbt_malloc(size *maxlen * sizeof(int)); rb = (int *) xbt_malloc(size *maxlen * sizeof(int)); for (i = 0; i < size *maxlen; ++i) { sb[i] = rank*size + i; rb[i] = 0; } printf("[%d] sndbuf=[", rank); for (i = 0; i < size *mult; i++) printf("%d ", sb[i]); printf("]/n"); status = MPI_Allreduce(sb, rb, size *maxlen, MPI_INT, MPI_SUM, MPI_COMM_WORLD); printf("[%d] rcvbuf=[", rank); for (i = 0; i < size *mult; i++)//do not print everything printf("%d ", rb[i]); printf("]/n"); if (rank == 0) { if (status != MPI_SUCCESS) { printf("all_to_all returned %d/n", status); fflush(stdout); } } free(sb); free(rb); MPI_Finalize(); return (EXIT_SUCCESS);}
开发者ID:R7R8,项目名称:simgrid,代码行数:46,
示例3: main/** * Main function * Create the platform, list the available hosts and give them some work */int main(int argc, char **argv) { unsigned long seed[] = {134, 233445, 865, 2634, 424242, 876543}; int connected; int max_tries = 10; //MSG initialization MSG_init(&argc, argv); //Set up the seed for the platform generation platf_random_seed(seed); XBT_INFO("creating nodes..."); platf_graph_uniform(50); do { max_tries--; XBT_INFO("creating links..."); platf_graph_clear_links(); platf_graph_interconnect_uniform(0.07); //Unrealistic, but simple XBT_INFO("done. Check connectedness..."); connected = platf_graph_is_connected(); XBT_INFO("Is it connected : %s", connected ? "yes" : (max_tries ? "no, retrying" : "no")); } while(!connected && max_tries); if(!connected && !max_tries) { xbt_die("Impossible to connect the graph, aborting."); } XBT_INFO("registering callbacks..."); platf_graph_promoter(promoter_1); platf_graph_labeler(labeler_1); XBT_INFO("protmoting..."); platf_do_promote(); XBT_INFO("labeling..."); platf_do_label(); XBT_INFO("Putting it in surf..."); platf_generate(); XBT_INFO("Let's get the available hosts and dispatch work:"); unsigned int i; msg_host_t host = NULL; msg_host_t host_master = NULL; xbt_dynar_t host_dynar = MSG_hosts_as_dynar(); char** hostname_list = xbt_malloc(sizeof(char*) * xbt_dynar_length(host_dynar)); xbt_dynar_foreach(host_dynar, i, host) { MSG_process_create("slave", slave, NULL, host); if(i==0) { //The first node will also be the master XBT_INFO("%s will be the master", MSG_host_get_name(host)); host_master = host; } hostname_list[i] = (char*) MSG_host_get_name(host); }
开发者ID:apargupta,项目名称:simgrid,代码行数:62,
示例4: mainint main(int argc, char *argv[]){ int i, rank, size; int *sb, *rb; int status; int root = 0; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); int count = 2; sb = (int *) xbt_malloc(count * sizeof(int)); rb = (int *) xbt_malloc(count * size * sizeof(int)); for (i = 0; i < count; ++i) sb[i] = rank * count + i; for (i = 0; i < count * size; ++i) rb[i] = 0; printf("[%d] sndbuf=[", rank); for (i = 0; i < count; i++) printf("%d ", sb[i]); printf("]/n"); status = MPI_Gather(sb, count, MPI_INT, rb, count, MPI_INT, root, MPI_COMM_WORLD); if (rank == root) { printf("[%d] rcvbuf=[", rank); for (i = 0; i < count * size; i++) printf("%d ", rb[i]); printf("]/n"); if (status != MPI_SUCCESS) { printf("allgather returned %d/n", status); fflush(stdout); } } free(sb); free(rb); MPI_Barrier(MPI_COMM_WORLD); MPI_Finalize(); return (EXIT_SUCCESS);}
开发者ID:R7R8,项目名称:simgrid,代码行数:45,
示例5: smpi_get_tmp_recvbuffer//allocate a single buffer for all recvvoid* smpi_get_tmp_recvbuffer(int size){ if (!smpi_process_get_replaying()) return xbt_malloc(size); if (recvbuffer_size<size){ recvbuffer=static_cast<char*>(xbt_realloc(recvbuffer,size)); recvbuffer_size=size; } return recvbuffer;}
开发者ID:fabienchaix,项目名称:simgrid,代码行数:10,
示例6: smpi_get_tmp_recvbuffer//allocate a single buffer for all recvvoid* smpi_get_tmp_recvbuffer(int size){ if (!smpi_process_get_replaying()) return xbt_malloc(size); if (recvbuffer_size<size){ recvbuffer=xbt_realloc(recvbuffer,size); recvbuffer_size=size; } return sendbuffer;}
开发者ID:apargupta,项目名称:simgrid,代码行数:10,
示例7: mainint main(int argc, char *argv[]){ int rank, size; int i; int *sb; int *rb; int status; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); sb = (int *) xbt_malloc(size * sizeof(int) * 2); rb = (int *) xbt_malloc(size * sizeof(int) * 2); for (i = 0; i < size; ++i) { sb[i] = rank*size + i; rb[i] = 0; } printf("[%d] sndbuf=[", rank); for (i = 0; i < size; i++) printf("%d ", sb[i]); printf("]/n"); status = MPI_Alltoall(sb, 1, MPI_INT, rb, 1, MPI_INT, MPI_COMM_WORLD); printf("[%d] rcvbuf=[", rank); for (i = 0; i < size; i++) printf("%d ", rb[i]); printf("]/n"); if (rank == 0) { if (status != MPI_SUCCESS) { printf("all_to_all returned %d/n", status); fflush(stdout); } } free(sb); free(rb); MPI_Finalize(); return (EXIT_SUCCESS);}
开发者ID:FlorianPO,项目名称:simgrid,代码行数:44,
示例8: smpi_process_initvoid smpi_process_init(int *argc, char ***argv){ int index=-1; smpi_process_data_t data; smx_process_t proc; if (argc && argv) { proc = SIMIX_process_self(); //FIXME: dirty cleanup method to avoid using msg cleanup functions on these processes when using MSG+SMPI SIMIX_process_set_cleanup_function(proc, MSG_process_cleanup_from_SIMIX); char* instance_id = (*argv)[1]; int rank = xbt_str_parse_int((*argv)[2], "Invalid rank: %s"); index = smpi_process_index_of_smx_process(proc); if(!index_to_process_data){ index_to_process_data=(int*)xbt_malloc(SIMIX_process_count()*sizeof(int)); } if(smpi_privatize_global_variables){ /* Now using segment index of the process */ index = proc->segment_index; /* Done at the process's creation */ SMPI_switch_data_segment(index); } MPI_Comm* temp_comm_world; xbt_bar_t temp_bar; smpi_deployment_register_process(instance_id, rank, index, &temp_comm_world, &temp_bar); data = smpi_process_remote_data(index); data->comm_world = temp_comm_world; if(temp_bar != NULL) data->finalization_barrier = temp_bar; data->index = index; data->instance_id = instance_id; data->replaying = 0; //xbt_free(simcall_process_get_data(proc)); simdata_process_t simdata = static_cast<simdata_process_t>(simcall_process_get_data(proc)); simdata->data = data; if (*argc > 3) { free((*argv)[1]); memmove(&(*argv)[0], &(*argv)[2], sizeof(char *) * (*argc - 2)); (*argv)[(*argc) - 1] = NULL; (*argv)[(*argc) - 2] = NULL; } (*argc)-=2; data->argc = argc; data->argv = argv; // set the process attached to the mailbox simcall_mbox_set_receiver(data->mailbox_small, proc); XBT_DEBUG("<%d> New process in the game: %p", index, proc); } xbt_assert(smpi_process_data(), "smpi_process_data() returned NULL. You probably gave a NULL parameter to MPI_Init. Although it's required by " "MPI-2, this is currently not supported by SMPI.");}
开发者ID:krytarowski,项目名称:simgrid,代码行数:56,
示例9: xbt_matrix_new/** /brief constructor */xbt_matrix_t xbt_matrix_new(int lines, int rows, const unsigned long elmsize, void_f_pvoid_t const free_f){ xbt_matrix_t res = xbt_new(s_xbt_matrix_t, 1); res->lines = lines; res->rows = rows; res->elmsize = elmsize; res->free_f = free_f; res->data = xbt_malloc(elmsize * lines * rows); return res;}
开发者ID:R7R8,项目名称:simgrid,代码行数:11,
示例10: xbt_str_split_str/** * /brief This functions splits a string after using another string as separator * For example A!!B!!C splitted after !! will return the dynar {A,B,C} * /return An array of dynars containing the string tokens */xbt_dynar_t xbt_str_split_str(const char *s, const char *sep){ xbt_dynar_t res = xbt_dynar_new(sizeof(char *), &xbt_free_ref); int done; const char *p, *q; p = q = s; done = 0; if (s[0] == '/0') return res; if (sep[0] == '/0') { s = xbt_strdup(s); xbt_dynar_push(res, &s); return res; } while (!done) { char *to_push; int v = 0; //get the start of the first occurence of the substring q = strstr(p, sep); //if substring was not found add the entire string if (NULL == q) { v = strlen(p); to_push = xbt_malloc(v + 1); memcpy(to_push, p, v); to_push[v] = '/0'; xbt_dynar_push(res, &to_push); done = 1; } else { //get the appearance to_push = xbt_malloc(q - p + 1); memcpy(to_push, p, q - p); //add string terminator to_push[q - p] = '/0'; xbt_dynar_push(res, &to_push); p = q + strlen(sep); } } return res;}
开发者ID:FlorianPO,项目名称:simgrid,代码行数:47,
示例11: whilestatic char *remplace(char *value, const char **src_list, int src_size, const char **dst_list, int dst_size){ char result[BUFFER_SIZE]; int i_res = 0; int i = 0; while (value[i]) { if (value[i] == '$') { i++; /* skip the '$' */ if (value[i] < '0' || value[i] > '9') xbt_die("bad string parameter, no number indication, at offset: " "%d (/"%s/")", i, value); /* solve the number */ int number = value[i++] - '0'; while (value[i] >= '0' && value[i] <= '9') number = 10 * number + (value[i++] - '0'); /* solve the indication */ const char **param_list; _XBT_GNUC_UNUSED int param_size; if (value[i] == 's' && value[i + 1] == 'r' && value[i + 2] == 'c') { param_list = src_list; param_size = src_size; } else if (value[i] == 'd' && value[i + 1] == 's' && value[i + 2] == 't') { param_list = dst_list; param_size = dst_size; } else { xbt_die("bad string parameter, support only /"src/" and /"dst/", " "at offset: %d (/"%s/")", i, value); } i += 3; xbt_assert(number < param_size, "bad string parameter, not enough length param_size, " "at offset: %d (/"%s/") %d %d", i, value, param_size, number); const char *param = param_list[number]; int j = 0; while (param[j] && i_res < BUFFER_SIZE) result[i_res++] = param[j++]; } else { result[i_res++] = value[i++]; /* next char */ } if (i_res >= BUFFER_SIZE) xbt_die("solving string /"%s/", small buffer size (%d)", value, BUFFER_SIZE); } result[i_res++] = '/0'; char *res = xbt_malloc(i_res); return memcpy(res, result, i_res);}
开发者ID:Shurakai,项目名称:SimGrid,代码行数:54,
示例12: array_newstatic void array_new(unsigned **a, xbt_dynar_t *data){ int i; *a = xbt_malloc(ARRAY_SIZE * sizeof **a); *data = xbt_dynar_new(sizeof *a, NULL); xbt_dynar_shrink(*data, ARRAY_SIZE); for (i = 0 ; i < ARRAY_SIZE ; i++) { (*a)[i] = i; xbt_dynar_push_as(*data, void*, &(*a)[i]); }}
开发者ID:Shurakai,项目名称:SimGrid,代码行数:11,
示例13: _xbt_log_event_logvoid _xbt_log_event_log(xbt_log_event_t ev, const char *fmt, ...){ xbt_log_category_t cat = ev->cat; xbt_assert(ev->priority >= 0, "Negative logging priority naturally forbidden"); xbt_assert(ev->priority < sizeof(xbt_log_priority_names), "Priority %d is greater than the biggest allowed value", ev->priority); do { xbt_log_appender_t appender = cat->appender; if (!appender) continue; /* No appender, try next */ xbt_assert(cat->layout, "No valid layout for the appender of category %s", cat->name); /* First, try with a static buffer */ if (XBT_LOG_STATIC_BUFFER_SIZE) { char buff[XBT_LOG_STATIC_BUFFER_SIZE]; int done; ev->buffer = buff; ev->buffer_size = sizeof buff; va_start(ev->ap, fmt); done = cat->layout->do_layout(cat->layout, ev, fmt); va_end(ev->ap); if (done) { appender->do_append(appender, buff); continue; /* Ok, that worked: go next */ } } /* The static buffer was too small, use a dynamically expanded one */ ev->buffer_size = XBT_LOG_DYNAMIC_BUFFER_SIZE; ev->buffer = xbt_malloc(ev->buffer_size); while (1) { int done; va_start(ev->ap, fmt); done = cat->layout->do_layout(cat->layout, ev, fmt); va_end(ev->ap); if (done) break; /* Got it */ ev->buffer_size *= 2; ev->buffer = xbt_realloc(ev->buffer, ev->buffer_size); } appender->do_append(appender, ev->buffer); xbt_free(ev->buffer); } while (cat->additivity && (cat = cat->parent, 1));}
开发者ID:Tien-Dat,项目名称:simgrid,代码行数:52,
示例14: smpi_coll_tuned_allgather_spreading_simple/***************************************************************************** * Function: allgather_spreading_simple * return: int * inputs: * send_buff: send input buffer * send_count: number of elements to send * send_type: data type of elements being sent * recv_buff: receive output buffer * recv_count: number of elements to received * recv_type: data type of elements being received * comm: communication * Descrp: Let i -> j denote the communication from node i to node j. The * order of communications for node i is i -> i + 1, i -> i + 2, ..., * i -> (i + p -1) % P. * * Auther: Ahmad Faraj ****************************************************************************/intsmpi_coll_tuned_allgather_spreading_simple(void *send_buff, int send_count, MPI_Datatype send_type, void *recv_buff, int recv_count, MPI_Datatype recv_type, MPI_Comm comm){ MPI_Request *reqs, *req_ptr; MPI_Aint extent; int i, src, dst, rank, num_procs, num_reqs; int tag = COLL_TAG_ALLGATHER; MPI_Status status; char *recv_ptr = (char *) recv_buff; rank = smpi_comm_rank(comm); num_procs = smpi_comm_size(comm); extent = smpi_datatype_get_extent(send_type); num_reqs = (2 * num_procs) - 2; reqs = (MPI_Request *) xbt_malloc(num_reqs * sizeof(MPI_Request)); if (!reqs) { printf("allgather-spreading-simple.c:40: cannot allocate memory/n"); MPI_Finalize(); exit(0); } req_ptr = reqs; smpi_mpi_sendrecv(send_buff, send_count, send_type, rank, tag, (char *) recv_buff + rank * recv_count * extent, recv_count, recv_type, rank, tag, comm, &status); for (i = 0; i < num_procs; i++) { src = (rank + i) % num_procs; if (src == rank) continue; *(req_ptr++) = smpi_mpi_irecv(recv_ptr + src * recv_count * extent, recv_count, recv_type, src, tag, comm); } for (i = 0; i < num_procs; i++) { dst = (rank + i) % num_procs; if (dst == rank) continue; *(req_ptr++) = smpi_mpi_isend(send_buff, send_count, send_type, dst, tag, comm); } smpi_mpi_waitall(num_reqs, reqs, MPI_STATUSES_IGNORE); free(reqs); return MPI_SUCCESS;}
开发者ID:Julio-Anjos,项目名称:simgrid,代码行数:68,
示例15: luaopen_simgrid/** * /brief Opens the simgrid Lua module. * * This function is called automatically by the Lua interpreter when some * Lua code requires the "simgrid" module. * * /param L the Lua state */int luaopen_simgrid(lua_State *L){ XBT_DEBUG("luaopen_simgrid *****"); /* Get the command line arguments from the lua interpreter */ char **argv = xbt_malloc(sizeof(char *) * LUA_MAX_ARGS_COUNT); int argc = 1; argv[0] = (char *) "/usr/bin/lua"; /* Lie on the argv[0] so that the stack dumping facilities find the right binary. FIXME: what if lua is not in that location? */ lua_getglobal(L, "arg"); /* if arg is a null value, it means we use lua only as a script to init platform * else it should be a table and then take arg in consideration */ if (lua_istable(L, -1)) { int done = 0; while (!done) { argc++; lua_pushinteger(L, argc - 2); lua_gettable(L, -2); if (lua_isnil(L, -1)) { done = 1; } else { xbt_assert(lua_isstring(L, -1), "argv[%d] got from lua is no string", argc - 1); xbt_assert(argc < LUA_MAX_ARGS_COUNT, "Too many arguments, please increase LUA_MAX_ARGS_COUNT in %s before recompiling SimGrid if you insist on having more than %d args on command line", __FILE__, LUA_MAX_ARGS_COUNT - 1); argv[argc - 1] = (char *) luaL_checkstring(L, -1); lua_pop(L, 1); XBT_DEBUG("Got command line argument %s from lua", argv[argc - 1]); } } argv[argc--] = NULL; /* Initialize the MSG core */ MSG_init(&argc, argv); MSG_process_set_data_cleanup((void_f_pvoid_t) lua_close); XBT_DEBUG("Still %d arguments on command line", argc); // FIXME: update the lua's arg table to reflect the changes from SimGrid } /* Keep the context mechanism informed of our lua world today */ sglua_maestro_state = L; /* initialize access to my tables by children Lua states */ lua_newtable(L); lua_setfield(L, LUA_REGISTRYINDEX, "simgrid.maestro_tables"); sglua_register_c_functions(L); return 1;}
开发者ID:dhascome,项目名称:simgrid,代码行数:59,
示例16: PMPI_Type_create_hindexed_blockint PMPI_Type_create_hindexed_block(int count, int blocklength, const MPI_Aint* indices, MPI_Datatype old_type, MPI_Datatype* new_type) { if (old_type == MPI_DATATYPE_NULL) { return MPI_ERR_TYPE; } else if (count<0){ return MPI_ERR_COUNT; } else { int* blocklens=(int*)xbt_malloc(blocklength*count*sizeof(int)); for (int i = 0; i < count; i++) blocklens[i] = blocklength; int retval = simgrid::smpi::Datatype::create_hindexed(count, blocklens, indices, old_type, new_type); xbt_free(blocklens); return retval; }}
开发者ID:simgrid,项目名称:simgrid,代码行数:15,
示例17: PMPI_Win_allocateint PMPI_Win_allocate( MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, void *base, MPI_Win *win){ int retval = 0; smpi_bench_end(); if (comm == MPI_COMM_NULL) { retval= MPI_ERR_COMM; }else if (disp_unit <= 0 || size < 0 ){ retval= MPI_ERR_OTHER; }else{ void* ptr = xbt_malloc(size); if(ptr==nullptr) return MPI_ERR_NO_MEM; *static_cast<void**>(base) = ptr; *win = new simgrid::smpi::Win( ptr, size, disp_unit, info, comm,1); retval = MPI_SUCCESS; } smpi_bench_begin(); return retval;}
开发者ID:simgrid,项目名称:simgrid,代码行数:18,
示例18: RngStream_CopyStreamRngStream RngStream_CopyStream (const RngStream src){ RngStream g; if(src == NULL) { printf ("RngStream_CopyStream: 'src' not initialized/n/n"); exit (EXIT_FAILURE); } g = (RngStream) xbt_malloc (sizeof (struct RngStream_InfoState)); if (g == NULL) { printf ("RngStream_CopyStream: No more memory/n/n"); exit (EXIT_FAILURE); } memcpy((void*) g, (void*) src, sizeof (struct RngStream_InfoState)); return g;}
开发者ID:FlorianPO,项目名称:simgrid,代码行数:18,
示例19: smpi_coll_tuned_scatter_ompiint smpi_coll_tuned_scatter_ompi(void *sbuf, int scount, MPI_Datatype sdtype, void* rbuf, int rcount, MPI_Datatype rdtype, int root, MPI_Comm comm ){ const size_t small_block_size = 300; const int small_comm_size = 10; int communicator_size, rank; size_t dsize, block_size; XBT_DEBUG("smpi_coll_tuned_scatter_ompi"); communicator_size = smpi_comm_size(comm); rank = smpi_comm_rank(comm); // Determine block size if (root == rank) { dsize=smpi_datatype_size(sdtype); block_size = dsize * scount; } else { dsize=smpi_datatype_size(rdtype); block_size = dsize * rcount; } if ((communicator_size > small_comm_size) && (block_size < small_block_size)) { if(rank!=root){ sbuf=xbt_malloc(rcount*smpi_datatype_get_extent(rdtype)); scount=rcount; sdtype=rdtype; } int ret=smpi_coll_tuned_scatter_ompi_binomial (sbuf, scount, sdtype, rbuf, rcount, rdtype, root, comm); if(rank!=root){ xbt_free(sbuf); } return ret; } return smpi_coll_tuned_scatter_ompi_basic_linear (sbuf, scount, sdtype, rbuf, rcount, rdtype, root, comm);}
开发者ID:cemsbr,项目名称:simgrid,代码行数:44,
示例20: xbt_str_splitxbt_dynar_t xbt_str_split(const char *s, const char *sep){ xbt_dynar_t res = xbt_dynar_new(sizeof(char *), &xbt_free_ref); const char *p, *q; int done; const char *sep_dflt = " /t/n/r/x0B"; char is_sep[256] = { 1, 0 }; /* check what are the separators */ memset(is_sep, 0, sizeof(is_sep)); if (!sep) { while (*sep_dflt) is_sep[(unsigned char) *sep_dflt++] = 1; } else { while (*sep) is_sep[(unsigned char) *sep++] = 1; } is_sep[0] = 1; /* End of string is also separator */ /* Do the job */ p = q = s; done = 0; if (s[0] == '/0') return res; while (!done) { char *topush; while (!is_sep[(unsigned char) *q]) { q++; } if (*q == '/0') done = 1; topush = xbt_malloc(q - p + 1); memcpy(topush, p, q - p); topush[q - p] = '/0'; xbt_dynar_push(res, &topush); p = ++q; } return res;}
开发者ID:FlorianPO,项目名称:simgrid,代码行数:43,
示例21: vasprintfint vasprintf(char **ptr, const char *fmt, va_list ap){ size_t str_m; int str_l; *ptr = NULL; { va_list ap2; va_copy(ap2, ap); /* don't consume the original ap, we'll need it again */ str_l = vsnprintf(NULL, (size_t) 0, fmt, ap2); /*get required size */ va_end(ap2); } xbt_assert(str_l >= 0); /* possible integer overflow if str_m > INT_MAX */ *ptr = (char *) xbt_malloc(str_m = (size_t) str_l + 1); int str_l2 = vsnprintf(*ptr, str_m, fmt, ap); assert(str_l2 == str_l); return str_l;}
开发者ID:R7R8,项目名称:simgrid,代码行数:20,
示例22: smpi_comm_copy_buffer_callbackvoid smpi_comm_copy_buffer_callback(smx_synchro_t comm, void *buff, size_t buff_size){ XBT_DEBUG("Copy the data over"); void* tmpbuff=buff; if((smpi_privatize_global_variables) && ((char*)buff >= smpi_start_data_exe) && ((char*)buff < smpi_start_data_exe + smpi_size_data_exe ) ){ XBT_DEBUG("Privatization : We are copying from a zone inside global memory... Saving data to temp buffer !"); smpi_switch_data_segment(((smpi_process_data_t)(((simdata_process_t)SIMIX_process_get_data(comm->comm.src_proc))->data))->index); tmpbuff = (void*)xbt_malloc(buff_size); memcpy(tmpbuff, buff, buff_size); } if((smpi_privatize_global_variables) && ((char*)comm->comm.dst_buff >= smpi_start_data_exe) && ((char*)comm->comm.dst_buff < smpi_start_data_exe + smpi_size_data_exe ) ){ XBT_DEBUG("Privatization : We are copying to a zone inside global memory - Switch data segment"); smpi_switch_data_segment(((smpi_process_data_t)(((simdata_process_t)SIMIX_process_get_data(comm->comm.dst_proc))->data))->index); } memcpy(comm->comm.dst_buff, tmpbuff, buff_size); if (comm->comm.detached) { // if this is a detached send, the source buffer was duplicated by SMPI // sender to make the original buffer available to the application ASAP xbt_free(buff); //It seems that the request is used after the call there this should //be free somewhereelse but where??? //xbt_free(comm->comm.src_data);// inside SMPI the request is keep //inside the user data and should be free comm->comm.src_buff = NULL; } if(tmpbuff!=buff)xbt_free(tmpbuff);}
开发者ID:RockyMeadow,项目名称:simgrid,代码行数:41,
示例23: test_parmap_basicstatic int test_parmap_basic(e_xbt_parmap_mode_t mode){ int ret = 0; unsigned num_workers; for (num_workers = 1 ; num_workers <= 16 ; num_workers *= 2) { const unsigned len = 1033; const unsigned num = 5; unsigned *a; xbt_dynar_t data; xbt_parmap_t parmap; unsigned i; parmap = xbt_parmap_new(num_workers, mode); a = xbt_malloc(len * sizeof *a); data = xbt_dynar_new(sizeof a, NULL); for (i = 0; i < len; i++) { a[i] = i; xbt_dynar_push_as(data, void *, &a[i]); } for (i = 0; i < num; i++) xbt_parmap_apply(parmap, fun_double, data); for (i = 0; i < len; i++) { unsigned expected = (1U << num) * (i + 1) - 1; if (a[i] != expected) { XBT_CRITICAL("with %u threads, a[%u]: expected %u, got %u", num_workers, i, expected, a[i]); ret = 1; break; } } xbt_dynar_free(&data); xbt_free(a); xbt_parmap_destroy(parmap); } return ret;}
开发者ID:dhascome,项目名称:simgrid,代码行数:40,
示例24: SMPI_app_instance_register/** /ingroup smpi_simulation * /brief Registers a running instance of a MPI program. * * FIXME : remove MSG from the loop at some point. * /param name the reference name of the function. * /param code the main mpi function (must have a int ..(int argc, char *argv[])) prototype * /param num_processes the size of the instance we want to deploy */void SMPI_app_instance_register(const char *name, xbt_main_func_t code, int num_processes){ SIMIX_function_register(name, code); s_smpi_mpi_instance_t* instance = (s_smpi_mpi_instance_t*)xbt_malloc(sizeof(s_smpi_mpi_instance_t)); instance->name = name; instance->size = num_processes; instance->present_processes = 0; instance->index = process_count; instance->comm_world = MPI_COMM_NULL; instance->finalization_barrier=xbt_barrier_init(num_processes); process_count+=num_processes; if(smpi_instances==nullptr){ smpi_instances = xbt_dict_new_homogeneous(xbt_free_f); } xbt_dict_set(smpi_instances, name, (void*)instance, nullptr); return;}
开发者ID:dindon-sournois,项目名称:simgrid,代码行数:30,
示例25: new_xbt_graph_edgexbt_edge_t new_xbt_graph_edge(xbt_graph_t graph, xbt_node_t s, xbt_node_t d, xbt_dict_t edges){ const char *sn = instr_node_name(s); const char *dn = instr_node_name(d); int len = strlen(sn) + strlen(dn) + 1; char *name = (char *) xbt_malloc(len * sizeof(char)); snprintf(name, len, "%s%s", sn, dn); xbt_edge_t ret = (xbt_edge_t) xbt_dict_get_or_null(edges, name); if (ret == nullptr) { snprintf(name, len, "%s%s", dn, sn); ret = (xbt_edge_t) xbt_dict_get_or_null(edges, name); } if (ret == nullptr) { ret = xbt_graph_new_edge(graph, s, d, nullptr); xbt_dict_set(edges, name, ret, nullptr); } free(name); return ret;}
开发者ID:dindon-sournois,项目名称:simgrid,代码行数:22,
示例26: test_parmap_extendedstatic int test_parmap_extended(e_xbt_parmap_mode_t mode){ int ret = 0; unsigned num_workers; for (num_workers = 1 ; num_workers <= 16 ; num_workers *= 2) { const unsigned len = 2 * num_workers; uintptr_t *a; xbt_parmap_t parmap; xbt_dynar_t data; unsigned i; unsigned count; parmap = xbt_parmap_new(num_workers, mode); a = xbt_malloc(len * sizeof *a); data = xbt_dynar_new(sizeof a, NULL); for (i = 0; i < len; i++) xbt_dynar_push_as(data, void *, &a[i]); xbt_parmap_apply(parmap, fun_get_id, data); qsort(a, len, sizeof a[0], fun_compare); count = 1; for (i = 1; i < len; i++) if (a[i] != a[i - 1]) count++; if (count != num_workers) { XBT_CRITICAL("only %u/%u threads did some work", count, num_workers); ret = 1; } xbt_dynar_free(&data); xbt_free(a); xbt_parmap_destroy(parmap); } return ret;}
开发者ID:dhascome,项目名称:simgrid,代码行数:38,
示例27: smpi_coll_tuned_gather_mvapich2_two_level//.........这里部分代码省略......... int *displs = NULL; int *recvcnts = NULL; int *node_sizes; int i = 0; /* Node leaders have all the data. But, different nodes can have * different number of processes. Do a Gather first to get the * buffer lengths at each leader, followed by a Gatherv to move * the actual data */ if (leader_comm_rank == leader_root && root != leader_of_root) { /* The root of the Gather operation is not a node-level * leader and this process's rank in the leader_comm * is the same as leader_root */ if(rank == root) { leader_gather_buf = smpi_get_tmp_recvbuffer(recvcnt * MAX(recvtype_extent, recvtype_true_extent) * comm_size); } else { leader_gather_buf = smpi_get_tmp_sendbuffer(sendcnt * MAX(sendtype_extent, sendtype_true_extent) * comm_size); } if (leader_gather_buf == NULL) { mpi_errno = MPI_ERR_OTHER; return mpi_errno; } } node_sizes = smpi_comm_get_non_uniform_map(comm); if (leader_comm_rank == leader_root) { displs = xbt_malloc(sizeof (int) * leader_comm_size); recvcnts = xbt_malloc(sizeof (int) * leader_comm_size); if (!displs || !recvcnts) { mpi_errno = MPI_ERR_OTHER; return mpi_errno; } } if (root == leader_of_root) { /* The root of the gather operation is also the node * leader. Receive into recvbuf and we are done */ if (leader_comm_rank == leader_root) { recvcnts[0] = node_sizes[0] * recvcnt; displs[0] = 0; for (i = 1; i < leader_comm_size; i++) { displs[i] = displs[i - 1] + node_sizes[i - 1] * recvcnt; recvcnts[i] = node_sizes[i] * recvcnt; } } smpi_mpi_gatherv(tmp_buf, local_size * nbytes, MPI_BYTE, recvbuf, recvcnts, displs, recvtype, leader_root, leader_comm); } else { /* The root of the gather operation is not the node leader. * Receive into leader_gather_buf and then send * to the root */ if (leader_comm_rank == leader_root) { recvcnts[0] = node_sizes[0] * nbytes; displs[0] = 0;
开发者ID:FlorianPO,项目名称:simgrid,代码行数:66,
示例28: smpi_coll_tuned_bcast_arrival_pattern_aware_wait/* Non-topology-specific pipelined linear-bcast function */int smpi_coll_tuned_bcast_arrival_pattern_aware_wait(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm){ MPI_Status status; MPI_Request request; MPI_Request *send_request_array; MPI_Request *recv_request_array; MPI_Status *send_status_array; MPI_Status *recv_status_array; MPI_Status temp_status_array[BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE]; int rank, size; int i, j, k; int tag = -COLL_TAG_BCAST; int will_send[BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE]; int sent_count; int header_index; int flag_array[BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE]; int already_sent[BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE]; int header_buf[BCAST_ARRIVAL_PATTERN_AWARE_HEADER_SIZE]; char temp_buf[BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE]; int max_node = BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE; int header_size = BCAST_ARRIVAL_PATTERN_AWARE_HEADER_SIZE; MPI_Aint extent; extent = smpi_datatype_get_extent(datatype); /* source and destination */ int to, from; rank = smpi_comm_rank(MPI_COMM_WORLD); size = smpi_comm_size(MPI_COMM_WORLD); /* segment is segment size in number of elements (not bytes) */ int segment = bcast_arrival_pattern_aware_wait_segment_size_in_byte / extent; /* pipeline length */ int pipe_length = count / segment; /* use for buffer offset for sending and receiving data = segment size in byte */ int increment = segment * extent; /* if the input size is not divisible by segment size => the small remainder will be done with native implementation */ int remainder = count % segment; /* if root is not zero send to rank zero first this can be modified to make it faster by using logical src, dst. */ if (root != 0) { if (rank == root) { smpi_mpi_send(buf, count, datatype, 0, tag, comm); } else if (rank == 0) { smpi_mpi_recv(buf, count, datatype, root, tag, comm, &status); } } /* value == 0 means root has not send data (or header) to the node yet */ for (i = 0; i < max_node; i++) { already_sent[i] = 0; } /* when a message is smaller than a block size => no pipeline */ if (count <= segment) { segment = count; pipe_length = 1; } /* start pipeline bcast */ send_request_array = (MPI_Request *) xbt_malloc((size + pipe_length) * sizeof(MPI_Request)); recv_request_array = (MPI_Request *) xbt_malloc((size + pipe_length) * sizeof(MPI_Request)); send_status_array = (MPI_Status *) xbt_malloc((size + pipe_length) * sizeof(MPI_Status)); recv_status_array = (MPI_Status *) xbt_malloc((size + pipe_length) * sizeof(MPI_Status)); /* root */ if (rank == 0) { sent_count = 0; int iteration = 0; for (i = 0; i < BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE; i++) will_send[i] = 0; while (sent_count < (size - 1)) { iteration++;//.........这里部分代码省略.........
开发者ID:ricardojrdez,项目名称:simgrid,代码行数:101,
示例29: THROWFint Coll_allgather_mvapich2_smp::allgather(void *sendbuf,int sendcnt, MPI_Datatype sendtype, void *recvbuf, int recvcnt,MPI_Datatype recvtype, MPI_Comm comm){ int rank, size; int local_rank, local_size; int leader_comm_size = 0; int mpi_errno = MPI_SUCCESS; MPI_Aint recvtype_extent = 0; /* Datatype extent */ MPI_Comm shmem_comm, leader_comm; if(comm->get_leaders_comm()==MPI_COMM_NULL){ comm->init_smp(); } if (not comm->is_uniform() || not comm->is_blocked()) THROWF(arg_error,0, "allgather MVAPICH2 smp algorithm can't be used with irregular deployment. Please insure that processes deployed on the same node are contiguous and that each node has the same number of processes"); if (recvcnt == 0) { return MPI_SUCCESS; } rank = comm->rank(); size = comm->size(); /* extract the rank,size information for the intra-node communicator */ recvtype_extent=recvtype->get_extent(); shmem_comm = comm->get_intra_comm(); local_rank = shmem_comm->rank(); local_size = shmem_comm->size(); if (local_rank == 0) { /* Node leader. Extract the rank, size information for the leader communicator */ leader_comm = comm->get_leaders_comm(); if(leader_comm==MPI_COMM_NULL){ leader_comm = MPI_COMM_WORLD; } leader_comm_size = leader_comm->size(); } /*If there is just one node, after gather itself, * root has all the data and it can do bcast*/ if(local_rank == 0) { mpi_errno = Colls::gather(sendbuf, sendcnt,sendtype, (void*)((char*)recvbuf + (rank * recvcnt * recvtype_extent)), recvcnt, recvtype, 0, shmem_comm); } else { /*Since in allgather all the processes could have * its own data in place*/ if(sendbuf == MPI_IN_PLACE) { mpi_errno = Colls::gather((void*)((char*)recvbuf + (rank * recvcnt * recvtype_extent)), recvcnt , recvtype, recvbuf, recvcnt, recvtype, 0, shmem_comm); } else { mpi_errno = Colls::gather(sendbuf, sendcnt,sendtype, recvbuf, recvcnt, recvtype, 0, shmem_comm); } } /* Exchange the data between the node leaders*/ if (local_rank == 0 && (leader_comm_size > 1)) { /*When data in each socket is different*/ if (comm->is_uniform() != 1) { int *displs = NULL; int *recvcnts = NULL; int *node_sizes = NULL; int i = 0; node_sizes = comm->get_non_uniform_map(); displs = static_cast<int *>(xbt_malloc(sizeof (int) * leader_comm_size)); recvcnts = static_cast<int *>(xbt_malloc(sizeof (int) * leader_comm_size)); if (not displs || not recvcnts) { return MPI_ERR_OTHER; } recvcnts[0] = node_sizes[0] * recvcnt; displs[0] = 0; for (i = 1; i < leader_comm_size; i++) { displs[i] = displs[i - 1] + node_sizes[i - 1] * recvcnt; recvcnts[i] = node_sizes[i] * recvcnt; } void* sendbuf=((char*)recvbuf)+recvtype->get_extent()*displs[leader_comm->rank()]; mpi_errno = Colls::allgatherv(sendbuf, (recvcnt*local_size), recvtype, recvbuf, recvcnts, displs, recvtype, leader_comm); xbt_free(displs); xbt_free(recvcnts); } else { void* sendtmpbuf=((char*)recvbuf)+recvtype->get_extent()*(recvcnt*local_size)*leader_comm->rank();//.........这里部分代码省略.........
开发者ID:mpoquet,项目名称:simgrid,代码行数:101,
示例30: MPIR_Scatter_MV2_Direct//.........这里部分代码省略......... leader_root = leader_comm->group()->rank(leaders_map[root]); /* leader_root is the rank of the leader of the root in leader_comm. * leader_root is to be used as the root of the inter-leader gather ops */ if ((local_rank == 0) && (root != rank) && (leader_of_root == rank)) { /* The root of the scatter operation is not the node leader. Recv * data from the node leader */ leader_scatter_buf = smpi_get_tmp_sendbuffer(nbytes * comm_size); Request::recv(leader_scatter_buf, nbytes * comm_size, MPI_BYTE, root, COLL_TAG_SCATTER, comm, &status); } if (rank == root && local_rank != 0) { /* The root of the scatter operation is not the node leader. Send * data to the node leader */ Request::send(sendbuf, sendcnt * comm_size, sendtype, leader_of_root, COLL_TAG_SCATTER, comm ); } if (leader_comm_size > 1 && local_rank == 0) { if (not comm->is_uniform()) { int* displs = NULL; int* sendcnts = NULL; int* node_sizes; int i = 0; node_sizes = comm->get_non_uniform_map(); if (root != leader_of_root) { if (leader_comm_rank == leader_root) { displs = static_cast<int*>(xbt_malloc(sizeof(int) * leader_comm_size)); sendcnts = static_cast<int*>(xbt_malloc(sizeof(int) * leader_comm_size)); sendcnts[0] = node_sizes[0] * nbytes; displs[0] = 0; for (i = 1; i < leader_comm_size; i++) { displs[i] = displs[i - 1] + node_sizes[i - 1] * nbytes; sendcnts[i] = node_sizes[i] * nbytes; } } Colls::scatterv(leader_scatter_buf, sendcnts, displs, MPI_BYTE, tmp_buf, nbytes * local_size, MPI_BYTE, leader_root, leader_comm); } else { if (leader_comm_rank == leader_root) { displs = static_cast<int*>(xbt_malloc(sizeof(int) * leader_comm_size)); sendcnts = static_cast<int*>(xbt_malloc(sizeof(int) * leader_comm_size)); sendcnts[0] = node_sizes[0] * sendcnt; displs[0] = 0; for (i = 1; i < leader_comm_size; i++) { displs[i] = displs[i - 1] + node_sizes[i - 1] * sendcnt; sendcnts[i] = node_sizes[i] * sendcnt; } } Colls::scatterv(sendbuf, sendcnts, displs, sendtype, tmp_buf, nbytes * local_size, MPI_BYTE, leader_root, leader_comm); } if (leader_comm_rank == leader_root) { xbt_free(displs); xbt_free(sendcnts); } } else { if (leader_of_root != root) {
开发者ID:mpoquet,项目名称:simgrid,代码行数:67,
注:本文中的xbt_malloc函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 C++ xbt_new0函数代码示例 C++ xbt_free函数代码示例 |