这篇教程C++ zfs_alloc函数代码示例写得很实用,希望能帮到您。
本文整理汇总了C++中zfs_alloc函数的典型用法代码示例。如果您正苦于以下问题:C++ zfs_alloc函数的具体用法?C++ zfs_alloc怎么用?C++ zfs_alloc使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。 在下文中一共展示了zfs_alloc函数的28个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。 示例1: zfs_sort_snapsstatic intzfs_sort_snaps(zfs_handle_t *zhp, void *data){ avl_tree_t *avl = data; zfs_node_t *node; zfs_node_t search; search.zn_handle = zhp; node = avl_find(avl, &search, NULL); if (node) { /* * If this snapshot was renamed while we were creating the * AVL tree, it's possible that we already inserted it under * its old name. Remove the old handle before adding the new * one. */ zfs_close(node->zn_handle); avl_remove(avl, node); free(node); } node = zfs_alloc(zhp->zfs_hdl, sizeof (zfs_node_t)); node->zn_handle = zhp; avl_add(avl, node); return (0);}
开发者ID:Acidburn0zzz,项目名称:zfs,代码行数:27,
示例2: zpool_open_silent/* * Like the above, but silent on error. Used when iterating over pools (because * the configuration cache may be out of date). */intzpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret){ zpool_handle_t *zhp; boolean_t missing; if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) return (-1); zhp->zpool_hdl = hdl; (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); if (zpool_refresh_stats(zhp, &missing) != 0) { zpool_close(zhp); return (-1); } if (missing) { zpool_close(zhp); *ret = NULL; return (0); } *ret = zhp; return (0);}
开发者ID:andreiw,项目名称:polaris,代码行数:30,
示例3: zpool_enable_datasetsintzpool_enable_datasets(zpool_handle_t *zhp, const char *mntopts, int flags){ get_all_cb_t cb = { 0 }; libzfs_handle_t *hdl = zhp->zpool_hdl; zfs_handle_t *zfsp; int i, ret = -1; int *good; /* * Gather all non-snap datasets within the pool. */ if ((zfsp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_DATASET)) == NULL) goto out; libzfs_add_handle(&cb, zfsp); if (zfs_iter_filesystems(zfsp, mount_cb, &cb) != 0) goto out; /* * Sort the datasets by mountpoint. */ qsort(cb.cb_handles, cb.cb_used, sizeof (void *), libzfs_dataset_cmp); /* * And mount all the datasets, keeping track of which ones * succeeded or failed. */ if ((good = zfs_alloc(zhp->zpool_hdl, cb.cb_used * sizeof (int))) == NULL) goto out; ret = 0; for (i = 0; i < cb.cb_used; i++) { if (zfs_mount(cb.cb_handles[i], mntopts, flags) != 0) ret = -1; else good[i] = 1; } /* * Then share all the ones that need to be shared. This needs * to be a separate pass in order to avoid excessive reloading * of the configuration. Good should never be NULL since * zfs_alloc is supposed to exit if memory isn't available. */ for (i = 0; i < cb.cb_used; i++) { if (good[i] && zfs_share(cb.cb_handles[i]) != 0) ret = -1; } free(good);out: for (i = 0; i < cb.cb_used; i++) zfs_close(cb.cb_handles[i]); free(cb.cb_handles); return (ret);}
开发者ID:kelsieflynn,项目名称:SamFlynnOS,代码行数:60,
示例4: zfs_graph_create/* * Construct a new graph object. We allow the size to be specified as a * parameter so in the future we can size the hash according to the number of * datasets in the pool. */static zfs_graph_t *zfs_graph_create(libzfs_handle_t *hdl, size_t size){ zfs_graph_t *zgp = zfs_alloc(hdl, sizeof (zfs_graph_t)); if (zgp == NULL) return (NULL); zgp->zg_size = size; if ((zgp->zg_hash = zfs_alloc(hdl, size * sizeof (zfs_vertex_t *))) == NULL) { free(zgp); return (NULL); } return (zgp);}
开发者ID:BjoKaSH,项目名称:mac-zfs,代码行数:22,
示例5: zfs_graph_create/* * Construct a new graph object. We allow the size to be specified as a * parameter so in the future we can size the hash according to the number of * datasets in the pool. */static zfs_graph_t *zfs_graph_create(libzfs_handle_t *hdl, const char *dataset, size_t size){ zfs_graph_t *zgp = zfs_alloc(hdl, sizeof (zfs_graph_t)); if (zgp == NULL) return (NULL); zgp->zg_size = size; if ((zgp->zg_hash = zfs_alloc(hdl, size * sizeof (zfs_vertex_t *))) == NULL) { free(zgp); return (NULL); } zgp->zg_root = dataset; zgp->zg_clone_count = 0; return (zgp);}
开发者ID:YaroslavLitvinov,项目名称:zfs-port,代码行数:25,
示例6: zfs_edge_create/* * Allocate a new edge pointing to the target vertex. */static zfs_edge_t *zfs_edge_create(libzfs_handle_t *hdl, zfs_vertex_t *dest){ zfs_edge_t *zep = zfs_alloc(hdl, sizeof (zfs_edge_t)); if (zep == NULL) return (NULL); zep->ze_dest = dest; return (zep);}
开发者ID:BjoKaSH,项目名称:mac-zfs,代码行数:15,
示例7: derive_keystatic intderive_key(libzfs_handle_t *hdl, zfs_keyformat_t format, uint64_t iters, uint8_t *key_material, size_t key_material_len, uint64_t salt, uint8_t **key_out){ int ret; uint8_t *key; *key_out = NULL; key = zfs_alloc(hdl, WRAPPING_KEY_LEN); if (!key) return (ENOMEM); switch (format) { case ZFS_KEYFORMAT_RAW: bcopy(key_material, key, WRAPPING_KEY_LEN); break; case ZFS_KEYFORMAT_HEX: ret = hex_key_to_raw((char *)key_material, WRAPPING_KEY_LEN * 2, key); if (ret != 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Invalid hex key provided.")); goto error; } break; case ZFS_KEYFORMAT_PASSPHRASE: salt = LE_64(salt); ret = pbkdf2(key_material, strlen((char *)key_material), ((uint8_t *)&salt), sizeof (uint64_t), iters, key, WRAPPING_KEY_LEN); if (ret != 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Failed to generate key from passphrase.")); goto error; } break; default: ret = EINVAL; goto error; } *key_out = key; return (0);error: free(key); *key_out = NULL; return (ret);}
开发者ID:cbreak-black,项目名称:zfs,代码行数:52,
示例8: construct_graph/* * Construct a complete graph of all necessary vertices. First, we iterate over * only our object's children. If we don't find any cloned snapshots, then we * simple return that. Otherwise, we have to start at the pool root and iterate * over all datasets. */static zfs_graph_t *construct_graph(libzfs_handle_t *hdl, const char *dataset){ zfs_graph_t *zgp = zfs_graph_create(hdl, ZFS_GRAPH_SIZE); zfs_cmd_t zc = { 0 }; int ret = 0; if (zgp == NULL) return (zgp); /* * We need to explicitly check whether this dataset has clones or not, * since iterate_children() only checks the children. */ (void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name)); (void) ioctl(hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc); if (zc.zc_objset_stats.dds_num_clones != 0 || (ret = iterate_children(hdl, zgp, dataset)) != 0) { /* * Determine pool name and try again. */ char *pool, *slash; if ((slash = strchr(dataset, '/')) != NULL || (slash = strchr(dataset, '@')) != NULL) { pool = zfs_alloc(hdl, slash - dataset + 1); if (pool == NULL) { zfs_graph_destroy(zgp); return (NULL); } (void) strncpy(pool, dataset, slash - dataset); pool[slash - dataset] = '/0'; if (iterate_children(hdl, zgp, pool) == -1 || zfs_graph_add(hdl, zgp, pool, NULL, 0) != 0) { free(pool); zfs_graph_destroy(zgp); return (NULL); } free(pool); } } if (ret == -1 || zfs_graph_add(hdl, zgp, dataset, NULL, 0) != 0) { zfs_graph_destroy(zgp); return (NULL); } return (zgp);}
开发者ID:BjoKaSH,项目名称:mac-zfs,代码行数:58,
示例9: zfs_vertex_create/* * Allocate a new vertex with the given name. */static zfs_vertex_t *zfs_vertex_create(libzfs_handle_t *hdl, const char *dataset){ zfs_vertex_t *zvp = zfs_alloc(hdl, sizeof (zfs_vertex_t)); if (zvp == NULL) return (NULL); assert(strlen(dataset) < ZFS_MAXNAMELEN); (void) strlcpy(zvp->zv_dataset, dataset, sizeof (zvp->zv_dataset)); if ((zvp->zv_edges = zfs_alloc(hdl, MIN_EDGECOUNT * sizeof (void *))) == NULL) { free(zvp); return (NULL); } zvp->zv_edgealloc = MIN_EDGECOUNT; return (zvp);}
开发者ID:BjoKaSH,项目名称:mac-zfs,代码行数:25,
示例10: topo_sort/* * Given a graph, do a recursive topological sort into the given array. This is * really just a depth first search, so that the deepest nodes appear first. * hijack the 'zv_visited' marker to avoid visiting the same vertex twice. */static inttopo_sort(libzfs_handle_t *hdl, boolean_t allowrecursion, char **result, size_t *idx, zfs_vertex_t *zgv){ int i; if (zgv->zv_visited == VISIT_SORT_PRE && !allowrecursion) { /* * If we've already seen this vertex as part of our depth-first * search, then we have a cyclic dependency, and we must return * an error. */ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "recursive dependency at '%s'"), zgv->zv_dataset); return (zfs_error(hdl, EZFS_RECURSIVE, dgettext(TEXT_DOMAIN, "cannot determine dependent datasets"))); } else if (zgv->zv_visited >= VISIT_SORT_PRE) { /* * If we've already processed this as part of the topological * sort, then don't bother doing so again. */ return (0); } zgv->zv_visited = VISIT_SORT_PRE; /* avoid doing a search if we don't have to */ zfs_vertex_sort_edges(zgv); for (i = 0; i < zgv->zv_edgecount; i++) { if (topo_sort(hdl, allowrecursion, result, idx, zgv->zv_edges[i]->ze_dest) != 0) return (-1); } /* we may have visited this in the course of the above */ if (zgv->zv_visited == VISIT_SORT_POST) return (0); if ((result[*idx] = zfs_alloc(hdl, strlen(zgv->zv_dataset) + 1)) == NULL) return (-1); (void) strcpy(result[*idx], zgv->zv_dataset); *idx += 1; zgv->zv_visited = VISIT_SORT_POST; return (0);}
开发者ID:BjoKaSH,项目名称:mac-zfs,代码行数:54,
示例11: get_keydata_curl/*ARGSUSED*/static size_tget_keydata_curl(void *ptr, size_t size, size_t nmemb, void *arg){ struct cb_arg_curl *cb = arg; size_t datalen = size * nmemb; if (ptr == NULL || datalen == 0) return (0); cb->cb_keydatalen = datalen; cb->cb_keydata = zfs_alloc(cb->cb_hdl, datalen); bcopy(ptr, cb->cb_keydata, datalen); return (datalen);}
开发者ID:corbosman,项目名称:zfs-crypto,代码行数:16,
示例12: zpool_handle/* * Returns a handle to the pool that contains the provided dataset. * If a handle to that pool already exists then that handle is returned. * Otherwise, a new handle is created and added to the list of handles. */static zpool_handle_t *zpool_handle(zfs_handle_t *zhp){ char *pool_name; int len; zpool_handle_t *zph; len = strcspn(zhp->zfs_name, "/@") + 1; pool_name = zfs_alloc(zhp->zfs_hdl, len); (void) strlcpy(pool_name, zhp->zfs_name, len); zph = zpool_find_handle(zhp, pool_name, len); if (zph == NULL) zph = zpool_add_handle(zhp, pool_name); free(pool_name); return (zph);}
开发者ID:Anuradha-Talur,项目名称:nfs-ganesha,代码行数:23,
示例13: get_dependents/* * The only public interface for this file. Do the dirty work of constructing a * child list for the given object. Construct the graph, do the toplogical * sort, and then return the array of strings to the caller. * * The 'allowrecursion' parameter controls behavior when cycles are found. If * it is set, the the cycle is ignored and the results returned as if the cycle * did not exist. If it is not set, then the routine will generate an error if * a cycle is found. */intget_dependents(libzfs_handle_t *hdl, boolean_t allowrecursion, const char *dataset, char ***result, size_t *count){ zfs_graph_t *zgp; zfs_vertex_t *zvp; if ((zgp = construct_graph(hdl, dataset)) == NULL) return (-1); if ((*result = zfs_alloc(hdl, zgp->zg_nvertex * sizeof (char *))) == NULL) { zfs_graph_destroy(zgp); return (-1); } if ((zvp = zfs_graph_lookup(hdl, zgp, dataset, 0)) == NULL) { free(*result); zfs_graph_destroy(zgp); return (-1); } *count = 0; if (topo_sort(hdl, allowrecursion, *result, count, zvp) != 0) { free(*result); zfs_graph_destroy(zgp); return (-1); } /* * Get rid of the last entry, which is our starting vertex and not * strictly a dependent. */ assert(*count > 0); free((*result)[*count - 1]); (*count)--; zfs_graph_destroy(zgp); return (0);}
开发者ID:BjoKaSH,项目名称:mac-zfs,代码行数:51,
示例14: zcmd_write_src_nvlistintzcmd_write_src_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t *nvl, size_t *size){ char *packed; size_t len; verify(nvlist_size(nvl, &len, NV_ENCODE_NATIVE) == 0); if ((packed = zfs_alloc(hdl, len)) == NULL) return (-1); verify(nvlist_pack(nvl, &packed, &len, NV_ENCODE_NATIVE, 0) == 0); zc->zc_nvlist_src = (uint64_t)(uintptr_t)packed; zc->zc_nvlist_src_size = len; if (size) *size = len; return (0);}
开发者ID:roddi,项目名称:mac-zfs,代码行数:21,
示例15: zpool_open_canfail/* * Open a handle to the given pool, even if the pool is currently in the FAULTED * state. */zpool_handle_t *zpool_open_canfail(libzfs_handle_t *hdl, const char *pool){ zpool_handle_t *zhp; boolean_t missing; /* * Make sure the pool name is valid. */ if (!zpool_name_valid(hdl, B_TRUE, pool)) { (void) zfs_error(hdl, EZFS_INVALIDNAME, dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool); return (NULL); } if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) return (NULL); zhp->zpool_hdl = hdl; (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); if (zpool_refresh_stats(zhp, &missing) != 0) { zpool_close(zhp); return (NULL); } if (missing) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool")); (void) zfs_error(hdl, EZFS_NOENT, dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool); zpool_close(zhp); return (NULL); } return (zhp);}
开发者ID:andreiw,项目名称:polaris,代码行数:43,
示例16: construct_graph/* * Construct a complete graph of all necessary vertices. First, iterate over * only our object's children. If no cloned snapshots are found, or all of * the cloned snapshots are in this subtree then return a graph of the subtree. * Otherwise, start at the root of the pool and iterate over all datasets. */static zfs_graph_t *construct_graph(libzfs_handle_t *hdl, const char *dataset){ zfs_graph_t *zgp = zfs_graph_create(hdl, dataset, ZFS_GRAPH_SIZE); int ret = 0; if (zgp == NULL) return (zgp); if ((strchr(dataset, '/') == NULL) || (external_dependents(hdl, zgp, dataset))) { /* * Determine pool name and try again. */ int len = strcspn(dataset, "/@") + 1; char *pool = zfs_alloc(hdl, len); if (pool == NULL) { zfs_graph_destroy(zgp); return (NULL); } (void) strlcpy(pool, dataset, len); if (iterate_children(hdl, zgp, pool) == -1 || zfs_graph_add(hdl, zgp, pool, NULL, 0) != 0) { free(pool); zfs_graph_destroy(zgp); return (NULL); } free(pool); } if (ret == -1 || zfs_graph_add(hdl, zgp, dataset, NULL, 0) != 0) { zfs_graph_destroy(zgp); return (NULL); } return (zgp);}
开发者ID:YaroslavLitvinov,项目名称:zfs-port,代码行数:45,
示例17: zpool_find_import_cached/* * Given a cache file, return the contents as a list of importable pools. * poolname or guid (but not both) are provided by the caller when trying * to import a specific pool. */nvlist_t *zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile, char *poolname, uint64_t guid){ char *buf; int fd; struct stat statbuf; nvlist_t *raw, *src, *dst; nvlist_t *pools; nvpair_t *elem; char *name; uint64_t this_guid; boolean_t active; verify(poolname == NULL || guid == 0); if ((fd = open(cachefile, O_RDONLY)) < 0) { zfs_error_aux(hdl, "%s", strerror(errno)); (void) zfs_error(hdl, EZFS_BADCACHE, dgettext(TEXT_DOMAIN, "failed to open cache file")); return (NULL); } if (fstat(fd, &statbuf) != 0) { zfs_error_aux(hdl, "%s", strerror(errno)); (void) close(fd); (void) zfs_error(hdl, EZFS_BADCACHE, dgettext(TEXT_DOMAIN, "failed to get size of cache file")); return (NULL); } if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) { (void) close(fd); return (NULL); } if (read(fd, buf, statbuf.st_size) != statbuf.st_size) { (void) close(fd); free(buf); (void) zfs_error(hdl, EZFS_BADCACHE, dgettext(TEXT_DOMAIN, "failed to read cache file contents")); return (NULL); } (void) close(fd); if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) { free(buf); (void) zfs_error(hdl, EZFS_BADCACHE, dgettext(TEXT_DOMAIN, "invalid or corrupt cache file contents")); return (NULL); } free(buf); /* * Go through and get the current state of the pools and refresh their * state. */ if (nvlist_alloc(&pools, 0, 0) != 0) { (void) no_memory(hdl); nvlist_free(raw); return (NULL); } elem = NULL; while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) { src = fnvpair_value_nvlist(elem); name = fnvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME); if (poolname != NULL && strcmp(poolname, name) != 0) continue; this_guid = fnvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID); if (guid != 0 && guid != this_guid) continue; if (pool_active(hdl, name, this_guid, &active) != 0) { nvlist_free(raw); nvlist_free(pools); return (NULL); } if (active) continue; if (nvlist_add_string(src, ZPOOL_CONFIG_CACHEFILE, cachefile) != 0) { (void) no_memory(hdl); nvlist_free(raw); nvlist_free(pools); return (NULL); }//.........这里部分代码省略.........
开发者ID:cbreak-black,项目名称:zfs,代码行数:101,
示例18: zpool_find_import_impl//.........这里部分代码省略......... /* * Using raw devices instead of block devices when we're * reading the labels skips a bunch of slow operations during * close(2) processing, so we replace /dev/dsk with /dev/rdsk. */ if (strcmp(path, ZFS_DISK_ROOTD) == 0) (void) strlcpy(rdsk, ZFS_RDISK_ROOTD, sizeof (rdsk)); else (void) strlcpy(rdsk, path, sizeof (rdsk)); if ((dfd = open(rdsk, O_RDONLY)) < 0 || (dirp = fdopendir(dfd)) == NULL) { if (dfd >= 0) (void) close(dfd); zfs_error_aux(hdl, strerror(errno)); (void) zfs_error_fmt(hdl, EZFS_BADPATH, dgettext(TEXT_DOMAIN, "cannot open '%s'"), rdsk); goto error; } avl_create(&slice_cache, slice_cache_compare, sizeof (rdsk_node_t), offsetof(rdsk_node_t, rn_node)); /* * This is not MT-safe, but we have no MT consumers of libzfs */ while ((dp = readdir(dirp)) != NULL) { const char *name = dp->d_name; if (name[0] == '.' && (name[1] == 0 || (name[1] == '.' && name[2] == 0))) continue; slice = zfs_alloc(hdl, sizeof (rdsk_node_t)); slice->rn_name = zfs_strdup(hdl, name); slice->rn_avl = &slice_cache; slice->rn_dfd = dfd; slice->rn_hdl = hdl; slice->rn_nozpool = B_FALSE; avl_add(&slice_cache, slice); } /* * create a thread pool to do all of this in parallel; * rn_nozpool is not protected, so this is racy in that * multiple tasks could decide that the same slice can * not hold a zpool, which is benign. Also choose * double the number of processors; we hold a lot of * locks in the kernel, so going beyond this doesn't * buy us much. */ t = taskq_create("z_import", 2 * max_ncpus, defclsyspri, 2 * max_ncpus, INT_MAX, TASKQ_PREPOPULATE); for (slice = avl_first(&slice_cache); slice; (slice = avl_walk(&slice_cache, slice, AVL_AFTER))) (void) taskq_dispatch(t, zpool_open_func, slice, TQ_SLEEP); taskq_wait(t); taskq_destroy(t); cookie = NULL; while ((slice = avl_destroy_nodes(&slice_cache, &cookie)) != NULL) { if (slice->rn_config != NULL && !config_failed) { nvlist_t *config = slice->rn_config;
开发者ID:cbreak-black,项目名称:zfs,代码行数:67,
示例19: zpool_vdev_attach/* * Attach new_disk (fully described by nvroot) to old_disk. * If 'replacing' is specified, tne new disk will replace the old one. */intzpool_vdev_attach(zpool_handle_t *zhp, const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing){ zfs_cmd_t zc = { 0 }; char msg[1024]; char *packed; int ret; size_t len; nvlist_t *tgt; boolean_t avail_spare; uint64_t val; char *path; nvlist_t **child; uint_t children; nvlist_t *config_root; libzfs_handle_t *hdl = zhp->zpool_hdl; if (replacing) (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, "cannot replace %s with %s"), old_disk, new_disk); else (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, "cannot attach %s to %s"), new_disk, old_disk); (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare)) == 0) return (zfs_error(hdl, EZFS_NODEVICE, msg)); if (avail_spare) return (zfs_error(hdl, EZFS_ISSPARE, msg)); verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); zc.zc_cookie = replacing; if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &child, &children) != 0 || children != 1) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "new device must be a single disk")); return (zfs_error(hdl, EZFS_INVALCONFIG, msg)); } verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0); /* * If the target is a hot spare that has been swapped in, we can only * replace it with another hot spare. */ if (replacing && nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 && nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 && (zpool_find_vdev(zhp, path, &avail_spare) == NULL || !avail_spare) && is_replacing_spare(config_root, tgt, 1)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "can only be replaced by another hot spare")); return (zfs_error(hdl, EZFS_BADTARGET, msg)); } /* * If we are attempting to replace a spare, it canot be applied to an * already spared device. */ if (replacing && nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 && zpool_find_vdev(zhp, path, &avail_spare) != NULL && avail_spare && is_replacing_spare(config_root, tgt, 0)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "device has already been replaced with a spare")); return (zfs_error(hdl, EZFS_BADTARGET, msg)); } verify(nvlist_size(nvroot, &len, NV_ENCODE_NATIVE) == 0); if ((packed = zfs_alloc(zhp->zpool_hdl, len)) == NULL) return (-1); verify(nvlist_pack(nvroot, &packed, &len, NV_ENCODE_NATIVE, 0) == 0); zc.zc_config_src = (uint64_t)(uintptr_t)packed; zc.zc_config_src_size = len; ret = ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ATTACH, &zc); free(packed); if (ret == 0) return (0); switch (errno) { case ENOTSUP: /* * Can't attach to or replace this type of vdev. */ if (replacing) zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,//.........这里部分代码省略.........
开发者ID:andreiw,项目名称:polaris,代码行数:101,
示例20: zpool_import/* * Import the given pool using the known configuration. The configuration * should have come from zpool_find_import(). The 'newname' and 'altroot' * parameters control whether the pool is imported with a different name or with * an alternate root, respectively. */intzpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, const char *altroot){ zfs_cmd_t zc; char *packed; size_t len; char *thename; char *origname; int ret; verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, &origname) == 0); if (newname != NULL) { if (!zpool_name_valid(hdl, B_FALSE, newname)) return (zfs_error(hdl, EZFS_INVALIDNAME, dgettext(TEXT_DOMAIN, "cannot import '%s'"), newname)); thename = (char *)newname; } else { thename = origname; } if (altroot != NULL && altroot[0] != '/') return (zfs_error(hdl, EZFS_BADPATH, dgettext(TEXT_DOMAIN, "bad alternate root '%s'"), altroot)); (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); if (altroot != NULL) (void) strlcpy(zc.zc_root, altroot, sizeof (zc.zc_root)); else zc.zc_root[0] = '/0'; verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &zc.zc_guid) == 0); verify(nvlist_size(config, &len, NV_ENCODE_NATIVE) == 0); if ((packed = zfs_alloc(hdl, len)) == NULL) return (-1); verify(nvlist_pack(config, &packed, &len, NV_ENCODE_NATIVE, 0) == 0); zc.zc_config_src = (uint64_t)(uintptr_t)packed; zc.zc_config_src_size = len; ret = 0; if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_IMPORT, &zc) != 0) { char desc[1024]; if (newname == NULL) (void) snprintf(desc, sizeof (desc), dgettext(TEXT_DOMAIN, "cannot import '%s'"), thename); else (void) snprintf(desc, sizeof (desc), dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), origname, thename); switch (errno) { case ENOTSUP: /* * Unsupported version. */ (void) zfs_error(hdl, EZFS_BADVERSION, desc); break; case EINVAL: (void) zfs_error(hdl, EZFS_INVALCONFIG, desc); break; default: (void) zpool_standard_error(hdl, errno, desc); } ret = -1; } else { zpool_handle_t *zhp; /* * This should never fail, but play it safe anyway. */ if (zpool_open_silent(hdl, thename, &zhp) != 0) { ret = -1; } else if (zhp != NULL) { ret = zpool_create_zvol_links(zhp); zpool_close(zhp); } } free(packed); return (ret);}
开发者ID:andreiw,项目名称:polaris,代码行数:100,
示例21: zpool_add/* * Add the given vdevs to the pool. The caller must have already performed the * necessary verification to ensure that the vdev specification is well-formed. */intzpool_add(zpool_handle_t *zhp, nvlist_t *nvroot){ char *packed; size_t len; zfs_cmd_t zc; int ret; libzfs_handle_t *hdl = zhp->zpool_hdl; char msg[1024]; nvlist_t **spares; uint_t nspares; (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, "cannot add to '%s'"), zhp->zpool_name); if (zpool_get_version(zhp) < ZFS_VERSION_SPARES && nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " "upgraded to add hot spares")); return (zfs_error(hdl, EZFS_BADVERSION, msg)); } verify(nvlist_size(nvroot, &len, NV_ENCODE_NATIVE) == 0); if ((packed = zfs_alloc(zhp->zpool_hdl, len)) == NULL) return (-1); verify(nvlist_pack(nvroot, &packed, &len, NV_ENCODE_NATIVE, 0) == 0); (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); zc.zc_config_src = (uint64_t)(uintptr_t)packed; zc.zc_config_src_size = len; if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ADD, &zc) != 0) { switch (errno) { case EBUSY: /* * This can happen if the user has specified the same * device multiple times. We can't reliably detect this * until we try to add it and see we already have a * label. */ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "one or more vdevs refer to the same device")); (void) zfs_error(hdl, EZFS_BADDEV, msg); break; case EOVERFLOW: /* * This occurrs when one of the devices is below * SPA_MINDEVSIZE. Unfortunately, we can't detect which * device was the problem device since there's no * reliable way to determine device size from userland. */ { char buf[64]; zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "device is less than the minimum " "size (%s)"), buf); } (void) zfs_error(hdl, EZFS_BADDEV, msg); break; case ENOTSUP: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be upgraded to add raidz2 vdevs")); (void) zfs_error(hdl, EZFS_BADVERSION, msg); break; default: (void) zpool_standard_error(hdl, errno, msg); } ret = -1; } else { ret = 0; } free(packed); return (ret);}
开发者ID:andreiw,项目名称:polaris,代码行数:90,
示例22: zpool_create/* * Create the named pool, using the provided vdev list. It is assumed * that the consumer has already validated the contents of the nvlist, so we * don't have to worry about error semantics. */intzpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot, const char *altroot){ zfs_cmd_t zc = { 0 }; char *packed; size_t len; char msg[1024]; (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, "cannot create '%s'"), pool); if (!zpool_name_valid(hdl, B_FALSE, pool)) return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); if (altroot != NULL && altroot[0] != '/') return (zfs_error(hdl, EZFS_BADPATH, dgettext(TEXT_DOMAIN, "bad alternate root '%s'"), altroot)); if (nvlist_size(nvroot, &len, NV_ENCODE_NATIVE) != 0) return (no_memory(hdl)); if ((packed = zfs_alloc(hdl, len)) == NULL) return (-1); if (nvlist_pack(nvroot, &packed, &len, NV_ENCODE_NATIVE, 0) != 0) { free(packed); return (no_memory(hdl)); } (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); zc.zc_config_src = (uint64_t)(uintptr_t)packed; zc.zc_config_src_size = len; if (altroot != NULL) (void) strlcpy(zc.zc_root, altroot, sizeof (zc.zc_root)); if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_CREATE, &zc) != 0) { free(packed); switch (errno) { case EBUSY: /* * This can happen if the user has specified the same * device multiple times. We can't reliably detect this * until we try to add it and see we already have a * label. */ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "one or more vdevs refer to the same device")); return (zfs_error(hdl, EZFS_BADDEV, msg)); case EOVERFLOW: /* * This occurs when one of the devices is below * SPA_MINDEVSIZE. Unfortunately, we can't detect which * device was the problem device since there's no * reliable way to determine device size from userland. */ { char buf[64]; zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "one or more devices is less than the " "minimum size (%s)"), buf); } return (zfs_error(hdl, EZFS_BADDEV, msg)); case ENOSPC: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "one or more devices is out of space")); return (zfs_error(hdl, EZFS_BADDEV, msg)); default: return (zpool_standard_error(hdl, errno, msg)); } } free(packed); /* * If this is an alternate root pool, then we automatically set the * moutnpoint of the root dataset to be '/'. */ if (altroot != NULL) { zfs_handle_t *zhp; verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_ANY)) != NULL); verify(zfs_prop_set(zhp, ZFS_PROP_MOUNTPOINT, "/") == 0); zfs_close(zhp); }//.........这里部分代码省略.........
开发者ID:andreiw,项目名称:polaris,代码行数:101,
示例23: zpool_get_errlog/* * Retrieve the persistent error log, uniquify the members, and return to the * caller. */intzpool_get_errlog(zpool_handle_t *zhp, nvlist_t ***list, size_t *nelem){ zfs_cmd_t zc = { 0 }; uint64_t count; zbookmark_t *zb; int i, j; if (zhp->zpool_error_log != NULL) { *list = zhp->zpool_error_log; *nelem = zhp->zpool_error_count; return (0); } /* * Retrieve the raw error list from the kernel. If the number of errors * has increased, allocate more space and continue until we get the * entire list. */ verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, &count) == 0); if ((zc.zc_config_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl, count * sizeof (zbookmark_t))) == NULL) return (-1); zc.zc_config_dst_size = count; (void) strcpy(zc.zc_name, zhp->zpool_name); for (;;) { if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG, &zc) != 0) { free((void *)(uintptr_t)zc.zc_config_dst); if (errno == ENOMEM) { if ((zc.zc_config_dst = (uintptr_t) zfs_alloc(zhp->zpool_hdl, zc.zc_config_dst_size)) == NULL) return (-1); } else { return (-1); } } else { break; } } /* * Sort the resulting bookmarks. This is a little confusing due to the * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last * to first, and 'zc_config_dst_size' indicates the number of boomarks * _not_ copied as part of the process. So we point the start of our * array appropriate and decrement the total number of elements. */ zb = ((zbookmark_t *)(uintptr_t)zc.zc_config_dst) + zc.zc_config_dst_size; count -= zc.zc_config_dst_size; qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare); /* * Count the number of unique elements */ j = 0; for (i = 0; i < count; i++) { if (i > 0 && memcmp(&zb[i - 1], &zb[i], sizeof (zbookmark_t)) == 0) continue; j++; } /* * If the user has only requested the number of items, return it now * without bothering with the extra work. */ if (list == NULL) { *nelem = j; free((void *)(uintptr_t)zc.zc_config_dst); return (0); } zhp->zpool_error_count = j; /* * Allocate an array of nvlists to hold the results */ if ((zhp->zpool_error_log = zfs_alloc(zhp->zpool_hdl, j * sizeof (nvlist_t *))) == NULL) { free((void *)(uintptr_t)zc.zc_config_dst); return (-1); } /* * Fill in the results with names from the kernel. */ j = 0; for (i = 0; i < count; i++) { char buf[64]; nvlist_t *nv;//.........这里部分代码省略.........
开发者ID:andreiw,项目名称:polaris,代码行数:101,
示例24: zpool_disable_datasets/* * Unshare and unmount all datasets within the given pool. We don't want to * rely on traversing the DSL to discover the filesystems within the pool, * because this may be expensive (if not all of them are mounted), and can fail * arbitrarily (on I/O error, for example). Instead, we walk /etc/mtab and * gather all the filesystems that are currently mounted. */intzpool_disable_datasets(zpool_handle_t *zhp, boolean_t force){ int used, alloc; struct mnttab entry; size_t namelen; char **mountpoints = NULL; zfs_handle_t **datasets = NULL; libzfs_handle_t *hdl = zhp->zpool_hdl; int i; int ret = -1; int flags = (force ? MS_FORCE : 0); namelen = strlen(zhp->zpool_name); rewind(hdl->libzfs_mnttab); used = alloc = 0; while (getmntent(hdl->libzfs_mnttab, &entry) == 0) { /* * Ignore non-ZFS entries. */ if (entry.mnt_fstype == NULL || strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0) continue; /* * Ignore filesystems not within this pool. */ if (entry.mnt_mountp == NULL || strncmp(entry.mnt_special, zhp->zpool_name, namelen) != 0 || (entry.mnt_special[namelen] != '/' && entry.mnt_special[namelen] != '/0')) continue; /* * At this point we've found a filesystem within our pool. Add * it to our growing list. */ if (used == alloc) { if (alloc == 0) { if ((mountpoints = zfs_alloc(hdl, 8 * sizeof (void *))) == NULL) goto out; if ((datasets = zfs_alloc(hdl, 8 * sizeof (void *))) == NULL) goto out; alloc = 8; } else { void *ptr; if ((ptr = zfs_realloc(hdl, mountpoints, alloc * sizeof (void *), alloc * 2 * sizeof (void *))) == NULL) goto out; mountpoints = ptr; if ((ptr = zfs_realloc(hdl, datasets, alloc * sizeof (void *), alloc * 2 * sizeof (void *))) == NULL) goto out; datasets = ptr; alloc *= 2; } } if ((mountpoints[used] = zfs_strdup(hdl, entry.mnt_mountp)) == NULL) goto out; /* * This is allowed to fail, in case there is some I/O error. It * is only used to determine if we need to remove the underlying * mountpoint, so failure is not fatal. */ datasets[used] = make_dataset_handle(hdl, entry.mnt_special); used++; } /* * At this point, we have the entire list of filesystems, so sort it by * mountpoint. */ qsort(mountpoints, used, sizeof (char *), mountpoint_compare); /* * Walk through and first unshare everything. */ for (i = 0; i < used; i++) { zfs_share_proto_t *curr_proto;//.........这里部分代码省略.........
开发者ID:tommiatplayfish,项目名称:zfs-crypto,代码行数:101,
示例25: changelist_gather/* * Given a ZFS handle and a property, construct a complete list of datasets * that need to be modified as part of this process. For anything but the * 'mountpoint' and 'sharenfs' properties, this just returns an empty list. * Otherwise, we iterate over all children and look for any datasets that * inherit the property. For each such dataset, we add it to the list and * mark whether it was shared beforehand. */prop_changelist_t *changelist_gather(zfs_handle_t *zhp, zfs_prop_t prop, int flags){ prop_changelist_t *clp; prop_changenode_t *cn; zfs_handle_t *temp; char property[ZFS_MAXPROPLEN]; uu_compare_fn_t *compare = NULL; if ((clp = zfs_alloc(zhp->zfs_hdl, sizeof (prop_changelist_t))) == NULL) return (NULL); /* * For mountpoint-related tasks, we want to sort everything by * mountpoint, so that we mount and unmount them in the appropriate * order, regardless of their position in the hierarchy. */ if (prop == ZFS_PROP_NAME || prop == ZFS_PROP_ZONED || prop == ZFS_PROP_MOUNTPOINT || prop == ZFS_PROP_SHARENFS) { compare = compare_mountpoints; clp->cl_sorted = B_TRUE; } clp->cl_pool = uu_list_pool_create("changelist_pool", sizeof (prop_changenode_t), offsetof(prop_changenode_t, cn_listnode), compare, 0); if (clp->cl_pool == NULL) { assert(uu_error() == UU_ERROR_NO_MEMORY); (void) zfs_error(zhp->zfs_hdl, EZFS_NOMEM, "internal error"); changelist_free(clp); return (NULL); } clp->cl_list = uu_list_create(clp->cl_pool, NULL, clp->cl_sorted ? UU_LIST_SORTED : 0); clp->cl_flags = flags; if (clp->cl_list == NULL) { assert(uu_error() == UU_ERROR_NO_MEMORY); (void) zfs_error(zhp->zfs_hdl, EZFS_NOMEM, "internal error"); changelist_free(clp); return (NULL); } /* * If this is a rename or the 'zoned' property, we pretend we're * changing the mountpoint and flag it so we can catch all children in * change_one(). * * Flag cl_alldependents to catch all children plus the dependents * (clones) that are not in the hierarchy. */ if (prop == ZFS_PROP_NAME) { clp->cl_prop = ZFS_PROP_MOUNTPOINT; clp->cl_alldependents = B_TRUE; } else if (prop == ZFS_PROP_ZONED) { clp->cl_prop = ZFS_PROP_MOUNTPOINT; clp->cl_allchildren = B_TRUE; } else if (prop == ZFS_PROP_CANMOUNT) { clp->cl_prop = ZFS_PROP_MOUNTPOINT; } else if (prop == ZFS_PROP_VOLSIZE) { clp->cl_prop = ZFS_PROP_MOUNTPOINT; } else if (prop == ZFS_PROP_VERSION) { clp->cl_prop = ZFS_PROP_MOUNTPOINT; } else { clp->cl_prop = prop; } clp->cl_realprop = prop; if (clp->cl_prop != ZFS_PROP_MOUNTPOINT && clp->cl_prop != ZFS_PROP_SHARENFS && clp->cl_prop != ZFS_PROP_SHAREISCSI) return (clp); if (clp->cl_alldependents) { if (zfs_iter_dependents(zhp, B_TRUE, change_one, clp) != 0) { changelist_free(clp); return (NULL); } } else if (zfs_iter_children(zhp, change_one, clp) != 0) { changelist_free(clp); return (NULL); } /* * We have to re-open ourselves because we auto-close all the handles * and can't tell the difference. */ if ((temp = zfs_open(zhp->zfs_hdl, zfs_get_name(zhp), ZFS_TYPE_ANY)) == NULL) { changelist_free(clp);//.........这里部分代码省略.........
开发者ID:roddi,项目名称:mac-zfs,代码行数:101,
示例26: add_config/* * Add the given configuration to the list of known devices. */static intadd_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path, int order, int num_labels, nvlist_t *config){ uint64_t pool_guid, vdev_guid, top_guid, txg, state; pool_entry_t *pe; vdev_entry_t *ve; config_entry_t *ce; name_entry_t *ne; /* * If this is a hot spare not currently in use or level 2 cache * device, add it to the list of names to translate, but don't do * anything else. */ if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, &state) == 0 && (state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) && nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) { if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL) return (-1); if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) { free(ne); return (-1); } ne->ne_guid = vdev_guid; ne->ne_order = order; ne->ne_num_labels = num_labels; ne->ne_next = pl->names; pl->names = ne; return (0); } /* * If we have a valid config but cannot read any of these fields, then * it means we have a half-initialized label. In vdev_label_init() * we write a label with txg == 0 so that we can identify the device * in case the user refers to the same disk later on. If we fail to * create the pool, we'll be left with a label in this state * which should not be considered part of a valid pool. */ if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid) != 0 || nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) != 0 || nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID, &top_guid) != 0 || nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) != 0 || txg == 0) { nvlist_free(config); return (0); } /* * First, see if we know about this pool. If not, then add it to the * list of known pools. */ for (pe = pl->pools; pe != NULL; pe = pe->pe_next) { if (pe->pe_guid == pool_guid) break; } if (pe == NULL) { if ((pe = zfs_alloc(hdl, sizeof (pool_entry_t))) == NULL) { nvlist_free(config); return (-1); } pe->pe_guid = pool_guid; pe->pe_next = pl->pools; pl->pools = pe; } /* * Second, see if we know about this toplevel vdev. Add it if its * missing. */ for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) { if (ve->ve_guid == top_guid) break; } if (ve == NULL) { if ((ve = zfs_alloc(hdl, sizeof (vdev_entry_t))) == NULL) { nvlist_free(config); return (-1); } ve->ve_guid = top_guid; ve->ve_next = pe->pe_vdevs; pe->pe_vdevs = ve; } /* * Third, see if we have a config with a matching transaction group. If * so, then we do nothing. Otherwise, add it to the list of known * configs. *///.........这里部分代码省略.........
开发者ID:cbreak-black,项目名称:zfs,代码行数:101,
示例27: get_configs//.........这里部分代码省略......... fnvlist_add_string(config, ZPOOL_CONFIG_COMMENT, comment); state = fnvlist_lookup_uint64(tmp, ZPOOL_CONFIG_POOL_STATE); fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, state); hostid = 0; if (nvlist_lookup_uint64(tmp, ZPOOL_CONFIG_HOSTID, &hostid) == 0) { fnvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID, hostid); hostname = fnvlist_lookup_string(tmp, ZPOOL_CONFIG_HOSTNAME); fnvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME, hostname); } config_seen = B_TRUE; } /* * Add this top-level vdev to the child array. */ verify(nvlist_lookup_nvlist(tmp, ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0); verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID, &id) == 0); if (id >= children) { nvlist_t **newchild; newchild = zfs_alloc(hdl, (id + 1) * sizeof (nvlist_t *)); if (newchild == NULL) goto nomem; for (c = 0; c < children; c++) newchild[c] = child[c]; free(child); child = newchild; children = id + 1; } if (nvlist_dup(nvtop, &child[id], 0) != 0) goto nomem; } /* * If we have information about all the top-levels then * clean up the nvlist which we've constructed. This * means removing any extraneous devices that are * beyond the valid range or adding devices to the end * of our array which appear to be missing. */ if (valid_top_config) { if (max_id < children) { for (c = max_id; c < children; c++) nvlist_free(child[c]); children = max_id; } else if (max_id > children) { nvlist_t **newchild; newchild = zfs_alloc(hdl, (max_id) *
开发者ID:cbreak-black,项目名称:zfs,代码行数:67,
示例28: namespace_reload/* * Loads the pool namespace, or re-loads it if the cache has changed. */static intnamespace_reload(libzfs_handle_t *hdl){ nvlist_t *config; config_node_t *cn; nvpair_t *elem; zfs_cmd_t zc = { "/0", "/0", "/0", "/0", 0 }; void *cookie; if (hdl->libzfs_ns_gen == 0) { /* * This is the first time we've accessed the configuration * cache. Initialize the AVL tree and then fall through to the * common code. */ if ((hdl->libzfs_ns_avlpool = uu_avl_pool_create("config_pool", sizeof (config_node_t), offsetof(config_node_t, cn_avl), config_node_compare, UU_DEFAULT)) == NULL) return (no_memory(hdl)); if ((hdl->libzfs_ns_avl = uu_avl_create(hdl->libzfs_ns_avlpool, NULL, UU_DEFAULT)) == NULL) return (no_memory(hdl)); } if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) return (-1); for (;;) { zc.zc_cookie = hdl->libzfs_ns_gen; //if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_CONFIGS, &zc) != 0) { if (zfs_ioctl(hdl, ZFS_IOC_POOL_CONFIGS, &zc) != 0) { switch (errno) { case EEXIST: /* * The namespace hasn't changed. */ zcmd_free_nvlists(&zc); return (0); case ENOMEM: if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { zcmd_free_nvlists(&zc); return (-1); } break; default: zcmd_free_nvlists(&zc); return (zfs_standard_error(hdl, errno, dgettext(TEXT_DOMAIN, "failed to read " "pool configuration"))); } } else { hdl->libzfs_ns_gen = zc.zc_cookie; break; } } if (zcmd_read_dst_nvlist(hdl, &zc, &config) != 0) { zcmd_free_nvlists(&zc); return (-1); } zcmd_free_nvlists(&zc); /* * Clear out any existing configuration information. */ cookie = NULL; while ((cn = uu_avl_teardown(hdl->libzfs_ns_avl, &cookie)) != NULL) { nvlist_free(cn->cn_config); free(cn->cn_name); free(cn); } elem = NULL; while ((elem = nvlist_next_nvpair(config, elem)) != NULL) { nvlist_t *child; uu_avl_index_t where; if ((cn = zfs_alloc(hdl, sizeof (config_node_t))) == NULL) { nvlist_free(config); return (-1); } if ((cn->cn_name = zfs_strdup(hdl, nvpair_name(elem))) == NULL) { free(cn); nvlist_free(config); return (-1); } verify(nvpair_value_nvlist(elem, &child) == 0); if (nvlist_dup(child, &cn->cn_config, 0) != 0) { free(cn->cn_name);//.........这里部分代码省略.........
开发者ID:dariaphoebe,项目名称:zfs-1,代码行数:101,
注:本文中的zfs_alloc函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 C++ zfs_error_fmt函数代码示例 C++ zfree函数代码示例 |