这篇教程C++ txg_wait_synced函数代码示例写得很实用,希望能帮到您。
本文整理汇总了C++中txg_wait_synced函数的典型用法代码示例。如果您正苦于以下问题:C++ txg_wait_synced函数的具体用法?C++ txg_wait_synced怎么用?C++ txg_wait_synced使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。 在下文中一共展示了txg_wait_synced函数的27个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。 示例1: dsl_sync_task_group_waitintdsl_sync_task_group_wait(dsl_sync_task_group_t *dstg){ dmu_tx_t *tx; uint64_t txg; dsl_sync_task_t *dst;top: tx = dmu_tx_create_dd(dstg->dstg_pool->dp_mos_dir); VERIFY(0 == dmu_tx_assign(tx, TXG_WAIT)); txg = dmu_tx_get_txg(tx); /* Do a preliminary error check. */ dstg->dstg_err = 0;#ifdef ZFS_DEBUG /* * Only check half the time, otherwise, the sync-context * check will almost never fail. */ if (spa_get_random(2) == 0) goto skip;#endif rw_enter(&dstg->dstg_pool->dp_config_rwlock, RW_READER); for (dst = list_head(&dstg->dstg_tasks); dst; dst = list_next(&dstg->dstg_tasks, dst)) { dst->dst_err = dst->dst_checkfunc(dst->dst_arg1, dst->dst_arg2, tx); if (dst->dst_err) dstg->dstg_err = dst->dst_err; } rw_exit(&dstg->dstg_pool->dp_config_rwlock); if (dstg->dstg_err) { dmu_tx_commit(tx); return (dstg->dstg_err); }skip: /* * We don't generally have many sync tasks, so pay the price of * add_tail to get the tasks executed in the right order. */ VERIFY(0 == txg_list_add_tail(&dstg->dstg_pool->dp_sync_tasks, dstg, txg)); dmu_tx_commit(tx); txg_wait_synced(dstg->dstg_pool, txg); if (dstg->dstg_err == EAGAIN) { txg_wait_synced(dstg->dstg_pool, txg + TXG_DEFER_SIZE); goto top; } return (dstg->dstg_err);}
开发者ID:vadimcomanescu,项目名称:illumos-gate,代码行数:57,
示例2: dsl_sync_task/* * Called from open context to perform a callback in syncing context. Waits * for the operation to complete. * * The checkfunc will be called from open context as a preliminary check * which can quickly fail. If it succeeds, it will be called again from * syncing context. The checkfunc should generally be designed to work * properly in either context, but if necessary it can check * dmu_tx_is_syncing(tx). * * The synctask infrastructure enforces proper locking strategy with respect * to the dp_config_rwlock -- the lock will always be held when the callbacks * are called. It will be held for read during the open-context (preliminary) * call to the checkfunc, and then held for write from syncing context during * the calls to the check and sync funcs. * * A dataset or pool name can be passed as the first argument. Typically, * the check func will hold, check the return value of the hold, and then * release the dataset. The sync func will VERIFYO(hold()) the dataset. * This is safe because no changes can be made between the check and sync funcs, * and the sync func will only be called if the check func successfully opened * the dataset. */intdsl_sync_task(const char *pool, dsl_checkfunc_t *checkfunc, dsl_syncfunc_t *syncfunc, void *arg, int blocks_modified, zfs_space_check_t space_check){ spa_t *spa; dmu_tx_t *tx; int err; dsl_sync_task_t dst = { { { NULL } } }; dsl_pool_t *dp; err = spa_open(pool, &spa, FTAG); if (err != 0) return (err); dp = spa_get_dsl(spa);top: tx = dmu_tx_create_dd(dp->dp_mos_dir); VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); dst.dst_pool = dp; dst.dst_txg = dmu_tx_get_txg(tx); dst.dst_space = blocks_modified << DST_AVG_BLKSHIFT; dst.dst_space_check = space_check; dst.dst_checkfunc = checkfunc != NULL ? checkfunc : dsl_null_checkfunc; dst.dst_syncfunc = syncfunc; dst.dst_arg = arg; dst.dst_error = 0; dst.dst_nowaiter = B_FALSE; dsl_pool_config_enter(dp, FTAG); err = dst.dst_checkfunc(arg, tx); dsl_pool_config_exit(dp, FTAG); if (err != 0) { dmu_tx_commit(tx); spa_close(spa, FTAG); return (err); } VERIFY(txg_list_add_tail(&dp->dp_sync_tasks, &dst, dst.dst_txg)); dmu_tx_commit(tx); txg_wait_synced(dp, dst.dst_txg); if (dst.dst_error == EAGAIN) { txg_wait_synced(dp, dst.dst_txg + TXG_DEFER_SIZE); goto top; } spa_close(spa, FTAG); return (dst.dst_error);}
开发者ID:64116278,项目名称:zfs,代码行数:77,
示例3: txg_sync_stop/* * Stop syncing transaction groups. */voidtxg_sync_stop(dsl_pool_t *dp){ tx_state_t *tx = &dp->dp_tx; dprintf("pool %p/n", dp); /* * Finish off any work in progress. */ ASSERT(tx->tx_threads == 2); txg_wait_synced(dp, 0); /* * Wake all sync threads and wait for them to die. */ mutex_enter(&tx->tx_sync_lock); ASSERT(tx->tx_threads == 2); tx->tx_exiting = 1; cv_broadcast(&tx->tx_quiesce_more_cv); cv_broadcast(&tx->tx_quiesce_done_cv); cv_broadcast(&tx->tx_sync_more_cv); while (tx->tx_threads != 0) cv_wait(&tx->tx_exit_cv, &tx->tx_sync_lock); tx->tx_exiting = 0; mutex_exit(&tx->tx_sync_lock);}
开发者ID:YaroslavLitvinov,项目名称:zfs-prezerovm,代码行数:35,
示例4: zvol_update_volsize/* * Ensure the zap is flushed then inform the VFS of the capacity change. */static intzvol_update_volsize(uint64_t volsize, objset_t *os){ dmu_tx_t *tx; int error; uint64_t txg; ASSERT(MUTEX_HELD(&zvol_state_lock)); tx = dmu_tx_create(os); dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); dmu_tx_mark_netfree(tx); error = dmu_tx_assign(tx, TXG_WAIT); if (error) { dmu_tx_abort(tx); return (SET_ERROR(error)); } txg = dmu_tx_get_txg(tx); error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx); dmu_tx_commit(tx); txg_wait_synced(dmu_objset_pool(os), txg); if (error == 0) error = dmu_free_long_range(os, ZVOL_OBJ, volsize, DMU_OBJECT_END); return (error);}
开发者ID:alek-p,项目名称:zfs,代码行数:34,
示例5: txg_sync_stop/* * Stop syncing transaction groups. */voidtxg_sync_stop(dsl_pool_t *dp){ tx_state_t *tx = &dp->dp_tx; /* * Finish off any work in progress. */ ASSERT(tx->tx_threads == 2); /* * We need to ensure that we've vacated the deferred space_maps. */ txg_wait_synced(dp, tx->tx_open_txg + TXG_DEFER_SIZE); /* * Wake all sync threads and wait for them to die. */ mutex_enter(&tx->tx_sync_lock); ASSERT(tx->tx_threads == 2); tx->tx_exiting = 1; cv_broadcast(&tx->tx_quiesce_more_cv); cv_broadcast(&tx->tx_quiesce_done_cv); cv_broadcast(&tx->tx_sync_more_cv); while (tx->tx_threads != 0) cv_wait(&tx->tx_exit_cv, &tx->tx_sync_lock); tx->tx_exiting = 0; mutex_exit(&tx->tx_sync_lock);}
开发者ID:Zak-Adelman,项目名称:zfs,代码行数:38,
示例6: zil_destroy/* * In one tx, free all log blocks and clear the log header. * If keep_first is set, then we're replaying a log with no content. * We want to keep the first block, however, so that the first * synchronous transaction doesn't require a txg_wait_synced() * in zil_create(). We don't need to txg_wait_synced() here either * when keep_first is set, because both zil_create() and zil_destroy() * will wait for any in-progress destroys to complete. */voidzil_destroy(zilog_t *zilog, boolean_t keep_first){ const zil_header_t *zh = zilog->zl_header; lwb_t *lwb; dmu_tx_t *tx; uint64_t txg; /* * Wait for any previous destroy to complete. */ txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); if (BP_IS_HOLE(&zh->zh_log)) return; tx = dmu_tx_create(zilog->zl_os); (void) dmu_tx_assign(tx, TXG_WAIT); dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); txg = dmu_tx_get_txg(tx); mutex_enter(&zilog->zl_lock); /* * It is possible for the ZIL to get the previously mounted zilog * structure of the same dataset if quickly remounted and the dbuf * eviction has not completed. In this case we can see a non * empty lwb list and keep_first will be set. We fix this by * clearing the keep_first. This will be slower but it's very rare. */ if (!list_is_empty(&zilog->zl_lwb_list) && keep_first) keep_first = B_FALSE; ASSERT3U(zilog->zl_destroy_txg, <, txg); zilog->zl_destroy_txg = txg; zilog->zl_keep_first = keep_first; if (!list_is_empty(&zilog->zl_lwb_list)) { ASSERT(zh->zh_claim_txg == 0); ASSERT(!keep_first); while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { list_remove(&zilog->zl_lwb_list, lwb); if (lwb->lwb_buf != NULL) zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); zio_free_blk(zilog->zl_spa, &lwb->lwb_blk, txg); kmem_cache_free(zil_lwb_cache, lwb); } } else { if (!keep_first) { (void) zil_parse(zilog, zil_free_log_block, zil_free_log_record, tx, zh->zh_claim_txg); } } mutex_exit(&zilog->zl_lock); dmu_tx_commit(tx);}
开发者ID:harshada,项目名称:zfs,代码行数:66,
示例7: zfs_vfs_syncstatic intzfs_vfs_sync(struct mount *mp, __unused int waitfor, __unused vfs_context_t context){ zfsvfs_t *zfsvfs = vfs_fsprivate(mp); ZFS_ENTER(zfsvfs); /* * Mac OS X needs a file system modify time * * We use the mtime of the "com.apple.system.mtime" * extended attribute, which is associated with the * file system root directory. * * Here we sync any mtime changes to this attribute. */ if (zfsvfs->z_mtime_vp != NULL) { timestruc_t mtime; znode_t *zp;top: zp = VTOZ(zfsvfs->z_mtime_vp); ZFS_TIME_DECODE(&mtime, zp->z_phys->zp_mtime); if (zfsvfs->z_last_mtime_synced < mtime.tv_sec) { dmu_tx_t *tx; int error; tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_bonus(tx, zp->z_id); error = dmu_tx_assign(tx, zfsvfs->z_assign); if (error) { if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) { dmu_tx_wait(tx); dmu_tx_abort(tx); goto top; } dmu_tx_abort(tx); } else { dmu_buf_will_dirty(zp->z_dbuf, tx); dmu_tx_commit(tx); zfsvfs->z_last_mtime_synced = mtime.tv_sec; } } } if (zfsvfs->z_log != NULL) zil_commit(zfsvfs->z_log, UINT64_MAX, 0); else txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0); ZFS_EXIT(zfsvfs); return (0);}
开发者ID:roddi,项目名称:mac-zfs,代码行数:52,
示例8: osd_object_syncstatic int osd_object_sync(const struct lu_env *env, struct dt_object *dt, __u64 start, __u64 end){ struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt)); ENTRY; /* XXX: no other option than syncing the whole filesystem until we * support ZIL. If the object tracked the txg that it was last * modified in, it could pass that txg here instead of "0". Maybe * the changes are already committed, so no wait is needed at all? */ txg_wait_synced(dmu_objset_pool(osd->od_os), 0ULL); RETURN(0);}
开发者ID:sdsc,项目名称:lustre-release,代码行数:14,
示例9: zil_replay/* * If this dataset has a non-empty intent log, replay it and destroy it. */voidzil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE]){ zilog_t *zilog = dmu_objset_zil(os); const zil_header_t *zh = zilog->zl_header; zil_replay_arg_t zr; if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) { zil_destroy(zilog, B_TRUE); return; } zr.zr_os = os; zr.zr_replay = replay_func; zr.zr_arg = arg; zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log); /* XXX: Changed to use vmem_alloc instead of kmem_alloc for * large allocation size (I think this is safe here). */ zr.zr_lrbuf = vmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP); /* * Wait for in-progress removes to sync before starting replay. */ txg_wait_synced(zilog->zl_dmu_pool, 0); zilog->zl_replay = B_TRUE; zilog->zl_replay_time = lbolt; ASSERT(zilog->zl_replay_blks == 0); (void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr, zh->zh_claim_txg); vmem_free(zr.zr_lrbuf, 2 * SPA_MAXBLOCKSIZE); zil_destroy(zilog, B_FALSE); txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); zilog->zl_replay = B_FALSE;}
开发者ID:harshada,项目名称:zfs,代码行数:40,
示例10: dsl_sync_task_group_waitintdsl_sync_task_group_wait(dsl_sync_task_group_t *dstg){ dmu_tx_t *tx; uint64_t txg; dsl_sync_task_t *dst;top: tx = dmu_tx_create_dd(dstg->dstg_pool->dp_mos_dir); VERIFY(0 == dmu_tx_assign(tx, TXG_WAIT)); txg = dmu_tx_get_txg(tx); /* Do a preliminary error check. */ dstg->dstg_err = 0; rw_enter(&dstg->dstg_pool->dp_config_rwlock, RW_READER); for (dst = list_head(&dstg->dstg_tasks); dst; dst = list_next(&dstg->dstg_tasks, dst)) {#ifdef ZFS_DEBUG /* * Only check half the time, otherwise, the sync-context * check will almost never fail. */ if (spa_get_random(2) == 0) continue;#endif dst->dst_err = dst->dst_checkfunc(dst->dst_arg1, dst->dst_arg2, tx); if (dst->dst_err) dstg->dstg_err = dst->dst_err; } rw_exit(&dstg->dstg_pool->dp_config_rwlock); if (dstg->dstg_err) { dmu_tx_commit(tx); return (dstg->dstg_err); } VERIFY(0 == txg_list_add(&dstg->dstg_pool->dp_sync_tasks, dstg, txg)); dmu_tx_commit(tx); txg_wait_synced(dstg->dstg_pool, txg); if (dstg->dstg_err == EAGAIN) goto top; return (dstg->dstg_err);}
开发者ID:harshada,项目名称:zfs,代码行数:49,
示例11: zfs_vnop_ioctl_fullfsyncintzfs_vnop_ioctl_fullfsync(struct vnode *vp, vfs_context_t ct, zfsvfs_t *zfsvfs){ int error; error = zfs_fsync(vp, /*syncflag*/0, NULL, (caller_context_t *)ct); if (error) return (error); if (zfsvfs->z_log != NULL) zil_commit(zfsvfs->z_log, 0); else txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0); return (0);}
开发者ID:RJVB,项目名称:zfs,代码行数:15,
示例12: dmu_objset_userspace_upgradeintdmu_objset_userspace_upgrade(objset_t *os){ uint64_t obj; int err = 0; if (dmu_objset_userspace_present(os)) return (0); if (!dmu_objset_userused_enabled(os->os)) return (ENOTSUP); if (dmu_objset_is_snapshot(os)) return (EINVAL); /* * We simply need to mark every object dirty, so that it will be * synced out and now accounted. If this is called * concurrently, or if we already did some work before crashing, * that's fine, since we track each object's accounted state * independently. */ for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, 0)) { dmu_tx_t *tx; dmu_buf_t *db; int objerr; if (issig(JUSTLOOKING) && issig(FORREAL)) return (EINTR); objerr = dmu_bonus_hold(os, obj, FTAG, &db); if (objerr) continue; tx = dmu_tx_create(os); dmu_tx_hold_bonus(tx, obj); objerr = dmu_tx_assign(tx, TXG_WAIT); if (objerr) { dmu_tx_abort(tx); continue; } dmu_buf_will_dirty(db, tx); dmu_buf_rele(db, FTAG); dmu_tx_commit(tx); } os->os->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE; txg_wait_synced(dmu_objset_pool(os), 0); return (0);}
开发者ID:harshada,项目名称:zfs,代码行数:48,
示例13: spa_config_update/* * Update all disk labels, generate a fresh config based on the current * in-core state, and sync the global config cache (do not sync the config * cache if this is a booting rootpool). */voidspa_config_update(spa_t *spa, int what){ vdev_t *rvd = spa->spa_root_vdev; uint64_t txg; int c; ASSERT(MUTEX_HELD(&spa_namespace_lock)); spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); txg = spa_last_synced_txg(spa) + 1; if (what == SPA_CONFIG_UPDATE_POOL) { vdev_config_dirty(rvd); } else { /* * If we have top-level vdevs that were added but have * not yet been prepared for allocation, do that now. * (It's safe now because the config cache is up to date, * so it will be able to translate the new DVAs.) * See comments in spa_vdev_add() for full details. */ for (c = 0; c < rvd->vdev_children; c++) { vdev_t *tvd = rvd->vdev_child[c]; if (tvd->vdev_ms_array == 0) vdev_metaslab_set_size(tvd); vdev_expand(tvd, txg); } } spa_config_exit(spa, SCL_ALL, FTAG); /* * Wait for the mosconfig to be regenerated and synced. */ txg_wait_synced(spa->spa_dsl_pool, txg); /* * Update the global config cache to reflect the new mosconfig. */ if (!spa->spa_is_root) { spa_write_cachefile(spa, B_FALSE, what != SPA_CONFIG_UPDATE_POOL); } if (what == SPA_CONFIG_UPDATE_POOL) spa_config_update(spa, SPA_CONFIG_UPDATE_VDEVS);}
开发者ID:openzfsonosx,项目名称:zfs,代码行数:51,
示例14: zfs_sync/*ARGSUSED*/intzfs_sync(vfs_t *vfsp, short flag, cred_t *cr){ /* * Data integrity is job one. We don't want a compromised kernel * writing to the storage pool, so we never sync during panic. */ if (panicstr) return (0); /* * SYNC_ATTR is used by fsflush() to force old filesystems like UFS * to sync metadata, which they would otherwise cache indefinitely. * Semantically, the only requirement is that the sync be initiated. * The DMU syncs out txgs frequently, so there's nothing to do. */ if (flag & SYNC_ATTR) return (0); if (vfsp != NULL) { /* * Sync a specific filesystem. */ zfsvfs_t *zfsvfs = vfsp->vfs_data; ZFS_ENTER(zfsvfs); if (zfsvfs->z_log != NULL) zil_commit(zfsvfs->z_log, UINT64_MAX, 0); else txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0); ZFS_EXIT(zfsvfs); } else { /* * Sync all ZFS filesystems. This is what happens when you * run sync(1M). Unlike other filesystems, ZFS honors the * request by waiting for all pools to commit all dirty data. */ spa_sync_allpools(); } return (0);}
开发者ID:andreiw,项目名称:polaris,代码行数:43,
示例15: zvol_last_closestatic voidzvol_last_close(zvol_state_t *zv){ zil_close(zv->zv_zilog); zv->zv_zilog = NULL; dmu_buf_rele(zv->zv_dbuf, zvol_tag); zv->zv_dbuf = NULL; /* * Evict cached data */ if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) && !(zv->zv_flags & ZVOL_RDONLY)) txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0); (void) dmu_objset_evict_dbufs(zv->zv_objset); dmu_objset_disown(zv->zv_objset, zvol_tag); zv->zv_objset = NULL;}
开发者ID:alek-p,项目名称:zfs,代码行数:20,
示例16: zfs_sync/*ARGSUSED*/static intzfs_sync(vfs_t *vfsp, int waitfor){ /* * Data integrity is job one. We don't want a compromised kernel * writing to the storage pool, so we never sync during panic. */ if (panicstr) return (0); if (vfsp != NULL) { /* * Sync a specific filesystem. */ zfsvfs_t *zfsvfs = vfsp->vfs_data; int error; error = vfs_stdsync(vfsp, waitfor); if (error != 0) return (error); ZFS_ENTER(zfsvfs); if (zfsvfs->z_log != NULL) zil_commit(zfsvfs->z_log, UINT64_MAX, 0); else txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0); ZFS_EXIT(zfsvfs); } else { /* * Sync all ZFS filesystems. This is what happens when you * run sync(1M). Unlike other filesystems, ZFS honors the * request by waiting for all pools to commit all dirty data. */ spa_sync_allpools(); } return (0);}
开发者ID:DangerDexter,项目名称:FreeBSD-8.0-dyntick,代码行数:40,
示例17: zil_close/* * Close an intent log. */voidzil_close(zilog_t *zilog){ /* * If the log isn't already committed, mark the objset dirty * (so zil_sync() will be called) and wait for that txg to sync. */ if (!zil_is_committed(zilog)) { uint64_t txg; dmu_tx_t *tx = dmu_tx_create(zilog->zl_os); (void) dmu_tx_assign(tx, TXG_WAIT); dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); txg = dmu_tx_get_txg(tx); dmu_tx_commit(tx); txg_wait_synced(zilog->zl_dmu_pool, txg); } taskq_destroy(zilog->zl_clean_taskq); zilog->zl_clean_taskq = NULL; zilog->zl_get_data = NULL; zil_itx_clean(zilog); ASSERT(list_head(&zilog->zl_itx_list) == NULL);}
开发者ID:harshada,项目名称:zfs,代码行数:27,
示例18: spa_history_get/* * Read out the command history. */intspa_history_get(spa_t *spa, uint64_t *offp, uint64_t *len, char *buf){ objset_t *mos = spa->spa_meta_objset; dmu_buf_t *dbp; uint64_t read_len, phys_read_off, phys_eof; uint64_t leftover = 0; spa_history_phys_t *shpp; int err; /* * If the command history doesn't exist (older pool), * that's ok, just return ENOENT. */ if (!spa->spa_history) return (ENOENT); /* * The history is logged asynchronously, so when they request * the first chunk of history, make sure everything has been * synced to disk so that we get it. */ if (*offp == 0 && spa_writeable(spa)) txg_wait_synced(spa_get_dsl(spa), 0); if ((err = dmu_bonus_hold(mos, spa->spa_history, FTAG, &dbp)) != 0) return (err); shpp = dbp->db_data;#ifdef ZFS_DEBUG { dmu_object_info_t doi; dmu_object_info_from_db(dbp, &doi); ASSERT3U(doi.doi_bonus_type, ==, DMU_OT_SPA_HISTORY_OFFSETS); }#endif mutex_enter(&spa->spa_history_lock); phys_eof = spa_history_log_to_phys(shpp->sh_eof, shpp); if (*offp < shpp->sh_pool_create_len) { /* read in just the zpool create history */ phys_read_off = *offp; read_len = MIN(*len, shpp->sh_pool_create_len - phys_read_off); } else { /* * Need to reset passed in offset to BOF if the passed in * offset has since been overwritten. */ *offp = MAX(*offp, shpp->sh_bof); phys_read_off = spa_history_log_to_phys(*offp, shpp); /* * Read up to the minimum of what the user passed down or * the EOF (physical or logical). If we hit physical EOF, * use 'leftover' to read from the physical BOF. */ if (phys_read_off <= phys_eof) { read_len = MIN(*len, phys_eof - phys_read_off); } else { read_len = MIN(*len, shpp->sh_phys_max_off - phys_read_off); if (phys_read_off + *len > shpp->sh_phys_max_off) { leftover = MIN(*len - read_len, phys_eof - shpp->sh_pool_create_len); } } } /* offset for consumer to use next */ *offp += read_len + leftover; /* tell the consumer how much you actually read */ *len = read_len + leftover; if (read_len == 0) { mutex_exit(&spa->spa_history_lock); dmu_buf_rele(dbp, FTAG); return (0); } err = dmu_read(mos, spa->spa_history, phys_read_off, read_len, buf, DMU_READ_PREFETCH); if (leftover && err == 0) { err = dmu_read(mos, spa->spa_history, shpp->sh_pool_create_len, leftover, buf + read_len, DMU_READ_PREFETCH); } mutex_exit(&spa->spa_history_lock); dmu_buf_rele(dbp, FTAG); return (err);}
开发者ID:bluemutedwisdom,项目名称:zfs,代码行数:96,
示例19: zil_replay_log_recordstatic voidzil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg){ zil_replay_arg_t *zr = zra; const zil_header_t *zh = zilog->zl_header; uint64_t reclen = lr->lrc_reclen; uint64_t txtype = lr->lrc_txtype; char *name; int pass, error; if (!zilog->zl_replay) /* giving up */ return; if (lr->lrc_txg < claim_txg) /* already committed */ return; if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */ return; /* Strip case-insensitive bit, still present in log record */ txtype &= ~TX_CI; if (txtype == 0 || txtype >= TX_MAX_TYPE) { error = EINVAL; goto bad; } /* * Make a copy of the data so we can revise and extend it. */ bcopy(lr, zr->zr_lrbuf, reclen); /* * The log block containing this lr may have been byteswapped * so that we can easily examine common fields like lrc_txtype. * However, the log is a mix of different data types, and only the * replay vectors know how to byteswap their records. Therefore, if * the lr was byteswapped, undo it before invoking the replay vector. */ if (zr->zr_byteswap) byteswap_uint64_array(zr->zr_lrbuf, reclen); /* * If this is a TX_WRITE with a blkptr, suck in the data. */ if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) { lr_write_t *lrw = (lr_write_t *)lr; blkptr_t *wbp = &lrw->lr_blkptr; uint64_t wlen = lrw->lr_length; char *wbuf = zr->zr_lrbuf + reclen; if (BP_IS_HOLE(wbp)) { /* compressed to a hole */ bzero(wbuf, wlen); } else { /* * A subsequent write may have overwritten this block, * in which case wbp may have been been freed and * reallocated, and our read of wbp may fail with a * checksum error. We can safely ignore this because * the later write will provide the correct data. */ zbookmark_t zb; zb.zb_objset = dmu_objset_id(zilog->zl_os); zb.zb_object = lrw->lr_foid; zb.zb_level = -1; zb.zb_blkid = lrw->lr_offset / BP_GET_LSIZE(wbp); (void) zio_wait(zio_read(NULL, zilog->zl_spa, wbp, wbuf, BP_GET_LSIZE(wbp), NULL, NULL, ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, &zb)); (void) memmove(wbuf, wbuf + lrw->lr_blkoff, wlen); } } /* * We must now do two things atomically: replay this log record, * and update the log header sequence number to reflect the fact that * we did so. At the end of each replay function the sequence number * is updated if we are in replay mode. */ for (pass = 1; pass <= 2; pass++) { zilog->zl_replaying_seq = lr->lrc_seq; /* Only byteswap (if needed) on the 1st pass. */ error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lrbuf, zr->zr_byteswap && pass == 1); if (!error) return; /* * The DMU's dnode layer doesn't see removes until the txg * commits, so a subsequent claim can spuriously fail with * EEXIST. So if we receive any error we try syncing out * any removes then retry the transaction. */ if (pass == 1) txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0); }//.........这里部分代码省略.........
开发者ID:harshada,项目名称:zfs,代码行数:101,
示例20: zil_create/* * Create an on-disk intent log. */static voidzil_create(zilog_t *zilog){ const zil_header_t *zh = zilog->zl_header; lwb_t *lwb; uint64_t txg = 0; dmu_tx_t *tx = NULL; blkptr_t blk; int error = 0; /* * Wait for any previous destroy to complete. */ txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); ASSERT(zh->zh_claim_txg == 0); ASSERT(zh->zh_replay_seq == 0); blk = zh->zh_log; /* * If we don't already have an initial log block or we have one * but it's the wrong endianness then allocate one. */ if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) { tx = dmu_tx_create(zilog->zl_os); (void) dmu_tx_assign(tx, TXG_WAIT); dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); txg = dmu_tx_get_txg(tx); if (!BP_IS_HOLE(&blk)) { zio_free_blk(zilog->zl_spa, &blk, txg); BP_ZERO(&blk); } error = zio_alloc_blk(zilog->zl_spa, ZIL_MIN_BLKSZ, &blk, NULL, txg); if (error == 0) zil_init_log_chain(zilog, &blk); } /* * Allocate a log write buffer (lwb) for the first log block. */ if (error == 0) { lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); lwb->lwb_zilog = zilog; lwb->lwb_blk = blk; lwb->lwb_nused = 0; lwb->lwb_sz = BP_GET_LSIZE(&lwb->lwb_blk); lwb->lwb_buf = zio_buf_alloc(lwb->lwb_sz); lwb->lwb_max_txg = txg; lwb->lwb_zio = NULL; mutex_enter(&zilog->zl_lock); list_insert_tail(&zilog->zl_lwb_list, lwb); mutex_exit(&zilog->zl_lock); } /* * If we just allocated the first log block, commit our transaction * and wait for zil_sync() to stuff the block poiner into zh_log. * (zh is part of the MOS, so we cannot modify it in open context.) */ if (tx != NULL) { dmu_tx_commit(tx); txg_wait_synced(zilog->zl_dmu_pool, txg); } ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0);}
开发者ID:harshada,项目名称:zfs,代码行数:75,
示例21: zil_lwb_commitstatic lwb_t *zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb){ lr_t *lrc = &itx->itx_lr; /* common log record */ lr_write_t *lr = (lr_write_t *)lrc; uint64_t txg = lrc->lrc_txg; uint64_t reclen = lrc->lrc_reclen; uint64_t dlen; if (lwb == NULL) return (NULL); ASSERT(lwb->lwb_buf != NULL); if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) dlen = P2ROUNDUP_TYPED( lr->lr_length, sizeof (uint64_t), uint64_t); else dlen = 0; zilog->zl_cur_used += (reclen + dlen); zil_lwb_write_init(zilog, lwb); /* * If this record won't fit in the current log block, start a new one. */ if (lwb->lwb_nused + reclen + dlen > ZIL_BLK_DATA_SZ(lwb)) { lwb = zil_lwb_write_start(zilog, lwb); if (lwb == NULL) return (NULL); zil_lwb_write_init(zilog, lwb); ASSERT(lwb->lwb_nused == 0); if (reclen + dlen > ZIL_BLK_DATA_SZ(lwb)) { txg_wait_synced(zilog->zl_dmu_pool, txg); return (lwb); } } /* * Update the lrc_seq, to be log record sequence number. See zil.h * Then copy the record to the log buffer. */ lrc->lrc_seq = ++zilog->zl_lr_seq; /* we are single threaded */ bcopy(lrc, lwb->lwb_buf + lwb->lwb_nused, reclen); /* * If it's a write, fetch the data or get its blkptr as appropriate. */ if (lrc->lrc_txtype == TX_WRITE) { if (txg > spa_freeze_txg(zilog->zl_spa)) txg_wait_synced(zilog->zl_dmu_pool, txg); if (itx->itx_wr_state != WR_COPIED) { char *dbuf; int error; /* alignment is guaranteed */ lr = (lr_write_t *)(lwb->lwb_buf + lwb->lwb_nused); if (dlen) { ASSERT(itx->itx_wr_state == WR_NEED_COPY); dbuf = lwb->lwb_buf + lwb->lwb_nused + reclen; lr->lr_common.lrc_reclen += dlen; } else { ASSERT(itx->itx_wr_state == WR_INDIRECT); dbuf = NULL; } error = zilog->zl_get_data( itx->itx_private, lr, dbuf, lwb->lwb_zio); if (error == EIO) { txg_wait_synced(zilog->zl_dmu_pool, txg); return (lwb); } if (error) { ASSERT(error == ENOENT || error == EEXIST || error == EALREADY); return (lwb); } } } lwb->lwb_nused += reclen + dlen; lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg); ASSERT3U(lwb->lwb_nused, <=, ZIL_BLK_DATA_SZ(lwb)); ASSERT3U(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)), ==, 0); return (lwb);}
开发者ID:harshada,项目名称:zfs,代码行数:86,
示例22: zfs_sb_teardown//.........这里部分代码省略......... * We can safely read z_nr_znodes without locking because the * VFS has already blocked operations which add to the * z_all_znodes list and thus increment z_nr_znodes. */ int round = 0; while (zsb->z_nr_znodes > 0) { taskq_wait_outstanding(dsl_pool_iput_taskq( dmu_objset_pool(zsb->z_os)), 0); if (++round > 1 && !unmounting) break; } } rrm_enter(&zsb->z_teardown_lock, RW_WRITER, FTAG); if (!unmounting) { /* * We purge the parent filesystem's super block as the * parent filesystem and all of its snapshots have their * inode's super block set to the parent's filesystem's * super block. Note, 'z_parent' is self referential * for non-snapshots. */ shrink_dcache_sb(zsb->z_parent->z_sb); } /* * Close the zil. NB: Can't close the zil while zfs_inactive * threads are blocked as zil_close can call zfs_inactive. */ if (zsb->z_log) { zil_close(zsb->z_log); zsb->z_log = NULL; } rw_enter(&zsb->z_teardown_inactive_lock, RW_WRITER); /* * If we are not unmounting (ie: online recv) and someone already * unmounted this file system while we were doing the switcheroo, * or a reopen of z_os failed then just bail out now. */ if (!unmounting && (zsb->z_unmounted || zsb->z_os == NULL)) { rw_exit(&zsb->z_teardown_inactive_lock); rrm_exit(&zsb->z_teardown_lock, FTAG); return (SET_ERROR(EIO)); } /* * At this point there are no VFS ops active, and any new VFS ops * will fail with EIO since we have z_teardown_lock for writer (only * relevant for forced unmount). * * Release all holds on dbufs. */ if (!unmounting) { mutex_enter(&zsb->z_znodes_lock); for (zp = list_head(&zsb->z_all_znodes); zp != NULL; zp = list_next(&zsb->z_all_znodes, zp)) { if (zp->z_sa_hdl) zfs_znode_dmu_fini(zp); } mutex_exit(&zsb->z_znodes_lock); } /* * If we are unmounting, set the unmounted flag and let new VFS ops * unblock. zfs_inactive will have the unmounted behavior, and all * other VFS ops will fail with EIO. */ if (unmounting) { zsb->z_unmounted = B_TRUE; rrm_exit(&zsb->z_teardown_lock, FTAG); rw_exit(&zsb->z_teardown_inactive_lock); } /* * z_os will be NULL if there was an error in attempting to reopen * zsb, so just return as the properties had already been * * unregistered and cached data had been evicted before. */ if (zsb->z_os == NULL) return (0); /* * Unregister properties. */ zfs_unregister_callbacks(zsb); /* * Evict cached data */ if (dsl_dataset_is_dirty(dmu_objset_ds(zsb->z_os)) && !zfs_is_readonly(zsb)) txg_wait_synced(dmu_objset_pool(zsb->z_os), 0); dmu_objset_evict_dbufs(zsb->z_os); return (0);}
开发者ID:nwf,项目名称:zfs,代码行数:101,
示例23: zfs_vfs_unmount//.........这里部分代码省略......... } } mutex_exit(&zfsvfs->z_znodes_lock); /* * Set the unmounted flag and let new vops unblock. * zfs_inactive will have the unmounted behavior, and all other * vops will fail with EIO. */ zfsvfs->z_unmounted = B_TRUE; rw_exit(&zfsvfs->z_unmount_lock); rw_exit(&zfsvfs->z_unmount_inactive_lock); /* * Unregister properties. */#ifndef __APPLE__ if (!dmu_objset_is_snapshot(os)) zfs_unregister_callbacks(zfsvfs);#endif /* * Close the zil. NB: Can't close the zil while zfs_inactive * threads are blocked as zil_close can call zfs_inactive. */ if (zfsvfs->z_log) { zil_close(zfsvfs->z_log); zfsvfs->z_log = NULL; } /* * Evict all dbufs so that cached znodes will be freed */ if (dmu_objset_evict_dbufs(os, B_TRUE)) { txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0); (void) dmu_objset_evict_dbufs(os, B_FALSE); } /* * Finally close the objset */ dmu_objset_close(os); /* * We can now safely destroy the '.zfs' directory node. */#if 0 if (zfsvfs->z_ctldir != NULL) zfsctl_destroy(zfsvfs);#endif /* * Note that this work is normally done in zfs_freevfs, but since * there is no VOP_FREEVFS in OSX, we free VFS items here */ OSDecrementAtomic((SInt32 *)&zfs_active_fs_count); for (i = 0; i != ZFS_OBJ_MTX_SZ; i++) mutex_destroy(&zfsvfs->z_hold_mtx[i]); mutex_destroy(&zfsvfs->z_znodes_lock); list_destroy(&zfsvfs->z_all_znodes); rw_destroy(&zfsvfs->z_unmount_lock); rw_destroy(&zfsvfs->z_unmount_inactive_lock); return (0);}
开发者ID:roddi,项目名称:mac-zfs,代码行数:66,
示例24: zfsvfs_teardown//.........这里部分代码省略......... rrw_enter(&zfsvfs->z_teardown_lock, RW_WRITER, FTAG); if (!unmounting) { /* * We purge the parent filesystem's vfsp as the parent * filesystem and all of its snapshots have their vnode's * v_vfsp set to the parent's filesystem's vfsp. Note, * 'z_parent' is self referential for non-snapshots. */ (void) dnlc_purge_vfsp(zfsvfs->z_parent->z_vfs, 0);#ifdef FREEBSD_NAMECACHE cache_purgevfs(zfsvfs->z_parent->z_vfs);#endif } /* * Close the zil. NB: Can't close the zil while zfs_inactive * threads are blocked as zil_close can call zfs_inactive. */ if (zfsvfs->z_log) { zil_close(zfsvfs->z_log); zfsvfs->z_log = NULL; } rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_WRITER); /* * If we are not unmounting (ie: online recv) and someone already * unmounted this file system while we were doing the switcheroo, * or a reopen of z_os failed then just bail out now. */ if (!unmounting && (zfsvfs->z_unmounted || zfsvfs->z_os == NULL)) { rw_exit(&zfsvfs->z_teardown_inactive_lock); rrw_exit(&zfsvfs->z_teardown_lock, FTAG); return (EIO); } /* * At this point there are no vops active, and any new vops will * fail with EIO since we have z_teardown_lock for writer (only * relavent for forced unmount). * * Release all holds on dbufs. */ mutex_enter(&zfsvfs->z_znodes_lock); for (zp = list_head(&zfsvfs->z_all_znodes); zp != NULL; zp = list_next(&zfsvfs->z_all_znodes, zp)) if (zp->z_dbuf) { ASSERT(ZTOV(zp)->v_count >= 0); zfs_znode_dmu_fini(zp); } mutex_exit(&zfsvfs->z_znodes_lock); /* * If we are unmounting, set the unmounted flag and let new vops * unblock. zfs_inactive will have the unmounted behavior, and all * other vops will fail with EIO. */ if (unmounting) { zfsvfs->z_unmounted = B_TRUE; rrw_exit(&zfsvfs->z_teardown_lock, FTAG); rw_exit(&zfsvfs->z_teardown_inactive_lock);#ifdef __FreeBSD__ /* * Some znodes might not be fully reclaimed, wait for them. */ mutex_enter(&zfsvfs->z_znodes_lock); while (list_head(&zfsvfs->z_all_znodes) != NULL) { msleep(zfsvfs, &zfsvfs->z_znodes_lock, 0, "zteardown", 0); } mutex_exit(&zfsvfs->z_znodes_lock);#endif } /* * z_os will be NULL if there was an error in attempting to reopen * zfsvfs, so just return as the properties had already been * unregistered and cached data had been evicted before. */ if (zfsvfs->z_os == NULL) return (0); /* * Unregister properties. */ zfs_unregister_callbacks(zfsvfs); /* * Evict cached data */ if (dmu_objset_evict_dbufs(zfsvfs->z_os)) { txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0); (void) dmu_objset_evict_dbufs(zfsvfs->z_os); } return (0);}
开发者ID:DangerDexter,项目名称:FreeBSD-8.0-dyntick,代码行数:101,
示例25: zfs_sb_teardown//.........这里部分代码省略.........intzfs_sb_teardown(zfs_sb_t *zsb, boolean_t unmounting){ znode_t *zp; rrw_enter(&zsb->z_teardown_lock, RW_WRITER, FTAG); if (!unmounting) { /* * We purge the parent filesystem's super block as the * parent filesystem and all of its snapshots have their * inode's super block set to the parent's filesystem's * super block. Note, 'z_parent' is self referential * for non-snapshots. */ shrink_dcache_sb(zsb->z_parent->z_sb); } /* * If someone has not already unmounted this file system, * drain the iput_taskq to ensure all active references to the * zfs_sb_t have been handled only then can it be safely destroyed. */ if (zsb->z_os) taskq_wait(dsl_pool_iput_taskq(dmu_objset_pool(zsb->z_os))); /* * Close the zil. NB: Can't close the zil while zfs_inactive * threads are blocked as zil_close can call zfs_inactive. */ if (zsb->z_log) { zil_close(zsb->z_log); zsb->z_log = NULL; } rw_enter(&zsb->z_teardown_inactive_lock, RW_WRITER); /* * If we are not unmounting (ie: online recv) and someone already * unmounted this file system while we were doing the switcheroo, * or a reopen of z_os failed then just bail out now. */ if (!unmounting && (zsb->z_unmounted || zsb->z_os == NULL)) { rw_exit(&zsb->z_teardown_inactive_lock); rrw_exit(&zsb->z_teardown_lock, FTAG); return (EIO); } /* * At this point there are no VFS ops active, and any new VFS ops * will fail with EIO since we have z_teardown_lock for writer (only * relevant for forced unmount). * * Release all holds on dbufs. */ mutex_enter(&zsb->z_znodes_lock); for (zp = list_head(&zsb->z_all_znodes); zp != NULL; zp = list_next(&zsb->z_all_znodes, zp)) { if (zp->z_sa_hdl) { ASSERT(atomic_read(&ZTOI(zp)->i_count) > 0); zfs_znode_dmu_fini(zp); } } mutex_exit(&zsb->z_znodes_lock); /* * If we are unmounting, set the unmounted flag and let new VFS ops * unblock. zfs_inactive will have the unmounted behavior, and all * other VFS ops will fail with EIO. */ if (unmounting) { zsb->z_unmounted = B_TRUE; rrw_exit(&zsb->z_teardown_lock, FTAG); rw_exit(&zsb->z_teardown_inactive_lock); } /* * z_os will be NULL if there was an error in attempting to reopen * zsb, so just return as the properties had already been * * unregistered and cached data had been evicted before. */ if (zsb->z_os == NULL) return (0); /* * Unregister properties. */ zfs_unregister_callbacks(zsb); /* * Evict cached data */ if (dsl_dataset_is_dirty(dmu_objset_ds(zsb->z_os)) && !zfs_is_readonly(zsb)) txg_wait_synced(dmu_objset_pool(zsb->z_os), 0); dmu_objset_evict_dbufs(zsb->z_os); return (0);}
开发者ID:AB17,项目名称:zfs,代码行数:101,
示例26: zfs_objset_closestatic voidzfs_objset_close(zfsvfs_t *zfsvfs){ zfs_delete_t *zd = &zfsvfs->z_delete_head; znode_t *zp, *nextzp; objset_t *os = zfsvfs->z_os; /* * Stop all delete threads. */ (void) zfs_delete_thread_target(zfsvfs, 0); /* * For forced unmount, at this point all vops except zfs_inactive * are erroring EIO. We need to now suspend zfs_inactive threads * while we are freeing dbufs before switching zfs_inactive * to use behaviour without a objset. */ rw_enter(&zfsvfs->z_um_lock, RW_WRITER); /* * Release all delete in progress znodes * They will be processed when the file system remounts. */ mutex_enter(&zd->z_mutex); while (zp = list_head(&zd->z_znodes)) { list_remove(&zd->z_znodes, zp); zp->z_dbuf_held = 0; dmu_buf_rele(zp->z_dbuf, NULL); } mutex_exit(&zd->z_mutex); /* * Release all holds on dbufs * Note, although we have stopped all other vop threads and * zfs_inactive(), the dmu can callback via znode_pageout_func() * which can zfs_znode_free() the znode. * So we lock z_all_znodes; search the list for a held * dbuf; drop the lock (we know zp can't disappear if we hold * a dbuf lock; then regrab the lock and restart. */ mutex_enter(&zfsvfs->z_znodes_lock); for (zp = list_head(&zfsvfs->z_all_znodes); zp; zp = nextzp) { nextzp = list_next(&zfsvfs->z_all_znodes, zp); if (zp->z_dbuf_held) { /* dbufs should only be held when force unmounting */ zp->z_dbuf_held = 0; mutex_exit(&zfsvfs->z_znodes_lock); dmu_buf_rele(zp->z_dbuf, NULL); /* Start again */ mutex_enter(&zfsvfs->z_znodes_lock); nextzp = list_head(&zfsvfs->z_all_znodes); } } mutex_exit(&zfsvfs->z_znodes_lock); /* * Unregister properties. */ if (!dmu_objset_is_snapshot(os)) zfs_unregister_callbacks(zfsvfs); /* * Switch zfs_inactive to behaviour without an objset. * It just tosses cached pages and frees the znode & vnode. * Then re-enable zfs_inactive threads in that new behaviour. */ zfsvfs->z_unmounted2 = B_TRUE; rw_exit(&zfsvfs->z_um_lock); /* re-enable any zfs_inactive threads */ /* * Close the zil. Can't close the zil while zfs_inactive * threads are blocked as zil_close can call zfs_inactive. */ if (zfsvfs->z_log) { zil_close(zfsvfs->z_log); zfsvfs->z_log = NULL; } /* * Evict all dbufs so that cached znodes will be freed */ if (dmu_objset_evict_dbufs(os, 1)) { txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0); (void) dmu_objset_evict_dbufs(os, 0); } /* * Finally close the objset */ dmu_objset_close(os); /* * We can now safely destroy the '.zfs' directory node. */ if (zfsvfs->z_ctldir != NULL) zfsctl_destroy(zfsvfs);}
开发者ID:andreiw,项目名称:polaris,代码行数:99,
示例27: zvol_strategyintzvol_strategy(buf_t *bp){ zvol_state_t *zv = ddi_get_soft_state(zvol_state, getminor(bp->b_edev)); uint64_t off, volsize; size_t size, resid; char *addr; objset_t *os; int error = 0; int sync; int reading; int txg_sync_needed = B_FALSE; if (zv == NULL) { bioerror(bp, ENXIO); biodone(bp); return (0); } if (getminor(bp->b_edev) == 0) { bioerror(bp, EINVAL); biodone(bp); return (0); } if (zv->zv_readonly && !(bp->b_flags & B_READ)) { bioerror(bp, EROFS); biodone(bp); return (0); } off = ldbtob(bp->b_blkno); volsize = zv->zv_volsize; os = zv->zv_objset; ASSERT(os != NULL); sync = !(bp->b_flags & B_ASYNC) && !(zil_disable); bp_mapin(bp); addr = bp->b_un.b_addr; resid = bp->b_bcount; /* * There must be no buffer changes when doing a dmu_sync() because * we can't change the data whilst calculating the checksum. * A better approach than a per zvol rwlock would be to lock ranges. */ reading = bp->b_flags & B_READ; if (reading || resid <= zvol_immediate_write_sz) rw_enter(&zv->zv_dslock, RW_READER); else rw_enter(&zv->zv_dslock, RW_WRITER); while (resid != 0 && off < volsize) { size = MIN(resid, 1UL << 20); /* cap at 1MB per tx */ if (size > volsize - off) /* don't write past the end */ size = volsize - off; if (reading) { error = dmu_read(os, ZVOL_OBJ, off, size, addr); } else { dmu_tx_t *tx = dmu_tx_create(os); dmu_tx_hold_write(tx, ZVOL_OBJ, off, size); error = dmu_tx_assign(tx, TXG_WAIT); if (error) { dmu_tx_abort(tx); } else { dmu_write(os, ZVOL_OBJ, off, size, addr, tx); if (sync) { /* use the ZIL to commit this write */ if (zvol_log_write(zv, tx, off, size, addr) != 0) { txg_sync_needed = B_TRUE; } } dmu_tx_commit(tx); } } if (error) break; off += size; addr += size; resid -= size; } rw_exit(&zv->zv_dslock); if ((bp->b_resid = resid) == bp->b_bcount) bioerror(bp, off > volsize ? EINVAL : error); biodone(bp); if (sync) { if (txg_sync_needed) txg_wait_synced(dmu_objset_pool(os), 0); else zil_commit(zv->zv_zilog, UINT64_MAX, 0); }//.........这里部分代码省略.........
开发者ID:andreiw,项目名称:polaris,代码行数:101,
注:本文中的txg_wait_synced函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 C++ txphase函数代码示例 C++ tx_enabled函数代码示例 |