这篇教程C++ FLD_ISSET函数代码示例写得很实用,希望能帮到您。
本文整理汇总了C++中FLD_ISSET函数的典型用法代码示例。如果您正苦于以下问题:C++ FLD_ISSET函数的具体用法?C++ FLD_ISSET怎么用?C++ FLD_ISSET使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。 在下文中一共展示了FLD_ISSET函数的28个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。 示例1: __wt_log_truncate_files/* * __wt_log_truncate_files -- * Truncate log files via archive once. Requires that the server is not * currently running. */int__wt_log_truncate_files( WT_SESSION_IMPL *session, WT_CURSOR *cursor, const char *cfg[]){ WT_CONNECTION_IMPL *conn; WT_DECL_RET; WT_LOG *log; uint32_t backup_file; WT_UNUSED(cfg); conn = S2C(session); if (!FLD_ISSET(conn->log_flags, WT_CONN_LOG_ENABLED)) return (0); if (F_ISSET(conn, WT_CONN_SERVER_RUN) && FLD_ISSET(conn->log_flags, WT_CONN_LOG_ARCHIVE)) WT_RET_MSG(session, EINVAL, "Attempt to archive manually while a server is running"); log = conn->log; backup_file = 0; if (cursor != NULL) backup_file = WT_CURSOR_BACKUP_ID(cursor); WT_ASSERT(session, backup_file <= log->alloc_lsn.l.file); __wt_verbose(session, WT_VERB_LOG, "log_truncate_files: Archive once up to %" PRIu32, backup_file); __wt_writelock(session, log->log_archive_lock); ret = __log_archive_once(session, backup_file); __wt_writeunlock(session, log->log_archive_lock); return (ret);}
开发者ID:judahschvimer,项目名称:mongo,代码行数:37,
示例2: __wt_btcur_range_truncate/* * __wt_btcur_range_truncate -- * Discard a cursor range from the tree. */int__wt_btcur_range_truncate(WT_CURSOR_BTREE *start, WT_CURSOR_BTREE *stop){ WT_BTREE *btree; WT_CURSOR_BTREE *cbt; WT_DECL_RET; WT_SESSION_IMPL *session; cbt = (start != NULL) ? start : stop; session = (WT_SESSION_IMPL *)cbt->iface.session; btree = cbt->btree; /* * For recovery, we log the start and stop keys for a truncate * operation, not the individual records removed. On the other hand, * for rollback we need to keep track of all the in-memory operations. * * We deal with this here by logging the truncate range first, then (in * the logging code) disabling writing of the in-memory remove records * to disk. */ if (FLD_ISSET(S2C(session)->log_flags, WT_CONN_LOG_ENABLED)) WT_RET(__wt_txn_truncate_log(session, start, stop)); switch (btree->type) { case BTREE_COL_FIX: WT_ERR(__cursor_truncate_fix( session, start, stop, __cursor_col_modify)); break; case BTREE_COL_VAR: WT_ERR(__cursor_truncate( session, start, stop, __cursor_col_modify)); break; case BTREE_ROW: /* * The underlying cursor comparison routine requires cursors be * fully instantiated when truncating row-store objects because * it's comparing page and/or skiplist positions, not keys. (Key * comparison would work, it's only that a key comparison would * be relatively expensive. Column-store objects have record * number keys, so the key comparison is cheap.) Cursors may * have only had their keys set, so we must ensure the cursors * are positioned in the tree. */ if (start != NULL) WT_ERR(__wt_btcur_search(start)); if (stop != NULL) WT_ERR(__wt_btcur_search(stop)); WT_ERR(__cursor_truncate( session, start, stop, __cursor_row_modify)); break; }err: if (FLD_ISSET(S2C(session)->log_flags, WT_CONN_LOG_ENABLED)) WT_TRET(__wt_txn_truncate_end(session)); return (ret);}
开发者ID:3rf,项目名称:mongo,代码行数:61,
示例3: __wt_meta_track_off/* * __wt_meta_track_off -- * Turn off metadata operation tracking, unrolling on error. */int__wt_meta_track_off(WT_SESSION_IMPL *session, int need_sync, int unroll){ WT_DECL_RET; WT_META_TRACK *trk, *trk_orig; WT_ASSERT(session, WT_META_TRACKING(session) && session->meta_track_nest > 0); trk_orig = session->meta_track; trk = session->meta_track_next; /* If it was a nested transaction, there is nothing to do. */ if (--session->meta_track_nest != 0) return (0); /* Turn off tracking for unroll. */ session->meta_track_next = session->meta_track_sub = NULL; /* * If there were no operations logged, return now and avoid unnecessary * metadata checkpoints. For example, this happens if attempting to * create a data source that already exists (or drop one that doesn't). */ if (trk == trk_orig) return (0); while (--trk >= trk_orig) WT_TRET(__meta_track_apply(session, trk, unroll)); /* * Unroll operations don't need to flush the metadata. * * Also, if we don't have the metadata handle (e.g, we're in the * process of creating the metadata), we can't sync it. */ if (unroll || ret != 0 || !need_sync || session->meta_dhandle == NULL) return (ret); /* If we're logging, make sure the metadata update was flushed. */ if (FLD_ISSET(S2C(session)->log_flags, WT_CONN_LOG_ENABLED)) { if (!FLD_ISSET(S2C(session)->txn_logsync, WT_LOG_DSYNC | WT_LOG_FSYNC)) WT_WITH_DHANDLE(session, session->meta_dhandle, ret = __wt_txn_checkpoint_log(session, 0, WT_TXN_LOG_CKPT_SYNC, NULL)); } else { WT_WITH_DHANDLE(session, session->meta_dhandle, ret = __wt_checkpoint(session, NULL)); WT_RET(ret); WT_WITH_DHANDLE(session, session->meta_dhandle, ret = __wt_checkpoint_sync(session, NULL)); } return (ret);}
开发者ID:7segments,项目名称:mongo-1,代码行数:60,
示例4: __wt_btcur_range_truncate/* * __wt_btcur_range_truncate -- * Discard a cursor range from the tree. */int__wt_btcur_range_truncate(WT_CURSOR_BTREE *start, WT_CURSOR_BTREE *stop){ WT_BTREE *btree; WT_DECL_RET; WT_SESSION_IMPL *session; session = (WT_SESSION_IMPL *)start->iface.session; btree = start->btree; WT_STAT_DATA_INCR(session, cursor_truncate); /* * For recovery, log the start and stop keys for a truncate operation, * not the individual records removed. On the other hand, for rollback * we need to keep track of all the in-memory operations. * * We deal with this here by logging the truncate range first, then (in * the logging code) disabling writing of the in-memory remove records * to disk. */ if (FLD_ISSET(S2C(session)->log_flags, WT_CONN_LOG_ENABLED)) WT_RET(__wt_txn_truncate_log(session, start, stop)); switch (btree->type) { case BTREE_COL_FIX: WT_ERR(__cursor_truncate_fix( session, start, stop, __cursor_col_modify)); break; case BTREE_COL_VAR: WT_ERR(__cursor_truncate( session, start, stop, __cursor_col_modify)); break; case BTREE_ROW: /* * The underlying cursor comparison routine requires cursors be * fully instantiated when truncating row-store objects because * it's comparing page and/or skiplist positions, not keys. (Key * comparison would work, it's only that a key comparison would * be relatively expensive, especially with custom collators. * Column-store objects have record number keys, so the key * comparison is cheap.) The session truncate code did cursor * searches when setting up the truncate so we're good to go: if * that ever changes, we'd need to do something here to ensure a * fully instantiated cursor. */ WT_ERR(__cursor_truncate( session, start, stop, __cursor_row_modify)); break; }err: if (FLD_ISSET(S2C(session)->log_flags, WT_CONN_LOG_ENABLED)) __wt_txn_truncate_end(session); return (ret);}
开发者ID:ksuarz,项目名称:mongo,代码行数:58,
示例5: __wt_verbose_dump_log/* * __wt_verbose_dump_log -- * Dump information about the logging subsystem. */int__wt_verbose_dump_log(WT_SESSION_IMPL *session){ WT_CONNECTION_IMPL *conn; WT_LOG *log; conn = S2C(session); log = conn->log; WT_RET(__wt_msg(session, "%s", WT_DIVIDER)); WT_RET(__wt_msg(session, "Logging subsystem: Enabled: %s", FLD_ISSET(conn->log_flags, WT_CONN_LOG_ENABLED) ? "yes" : "no")); if (!FLD_ISSET(conn->log_flags, WT_CONN_LOG_ENABLED)) return (0); /* * Logging is enabled, print out the other information. */ WT_RET(__wt_msg(session, "Archiving: %s", FLD_ISSET(conn->log_flags, WT_CONN_LOG_ARCHIVE) ? "yes" : "no")); WT_RET(__wt_msg(session, "Running downgraded: %s", FLD_ISSET(conn->log_flags, WT_CONN_LOG_DOWNGRADED) ? "yes" : "no")); WT_RET(__wt_msg(session, "Zero fill files: %s", FLD_ISSET(conn->log_flags, WT_CONN_LOG_ZERO_FILL) ? "yes" : "no")); WT_RET(__wt_msg(session, "Pre-allocate files: %s", conn->log_prealloc > 0 ? "yes" : "no")); WT_RET(__wt_msg(session, "Logging directory: %s", conn->log_path)); WT_RET(__wt_msg(session, "Logging maximum file size: %" PRId64, (int64_t)conn->log_file_max)); WT_RET(__wt_msg(session, "Log sync setting: %s", !FLD_ISSET(conn->txn_logsync, WT_LOG_SYNC_ENABLED) ? "none" : FLD_ISSET(conn->txn_logsync, WT_LOG_DSYNC) ? "dsync" : FLD_ISSET(conn->txn_logsync, WT_LOG_FLUSH) ? "write to OS" : FLD_ISSET(conn->txn_logsync, WT_LOG_FSYNC) ? "fsync to disk": "unknown sync setting")); WT_RET(__wt_msg(session, "Log record allocation alignment: %" PRIu32, log->allocsize)); WT_RET(__wt_msg(session, "Current log file number: %" PRIu32, log->fileid)); WT_RET(__wt_msg(session, "Current log version number: %" PRIu16, log->log_version)); WT_RET(WT_LSN_MSG(&log->alloc_lsn, "Next allocation")); WT_RET(WT_LSN_MSG(&log->bg_sync_lsn, "Last background sync")); WT_RET(WT_LSN_MSG(&log->ckpt_lsn, "Last checkpoint")); WT_RET(WT_LSN_MSG(&log->sync_dir_lsn, "Last directory sync")); WT_RET(WT_LSN_MSG(&log->sync_lsn, "Last sync")); WT_RET(WT_LSN_MSG(&log->trunc_lsn, "Recovery truncate")); WT_RET(WT_LSN_MSG(&log->write_lsn, "Last written")); WT_RET(WT_LSN_MSG(&log->write_start_lsn, "Start of last written")); /* * If we wanted a dump of the slots, it would go here. Walking * the slot pool may not require a lock since they're statically * allocated, but output could be inconsistent without it. */ return (0);}
开发者ID:DINKIN,项目名称:mongo,代码行数:60,
示例6: __curlog_close/* * __curlog_close -- * WT_CURSOR.close method for the log cursor type. */static int__curlog_close(WT_CURSOR *cursor){ WT_CONNECTION_IMPL *conn; WT_CURSOR_LOG *cl; WT_DECL_RET; WT_LOG *log; WT_SESSION_IMPL *session; CURSOR_API_CALL(cursor, session, close, NULL); cl = (WT_CURSOR_LOG *)cursor; conn = S2C(session); WT_ASSERT(session, FLD_ISSET(conn->log_flags, WT_CONN_LOG_ENABLED)); log = conn->log; WT_TRET(__wt_readunlock(session, log->log_archive_lock)); WT_TRET(__curlog_reset(cursor)); __wt_free(session, cl->cur_lsn); __wt_free(session, cl->next_lsn); __wt_scr_free(session, &cl->logrec); __wt_scr_free(session, &cl->opkey); __wt_scr_free(session, &cl->opvalue); __wt_free(session, cl->packed_key); __wt_free(session, cl->packed_value); WT_TRET(__wt_cursor_close(cursor));err: API_END_RET(session, ret);}
开发者ID:brianleepzx,项目名称:mongo,代码行数:31,
示例7: __wt_direct_io_size_check/* * __wt_direct_io_size_check -- * Return a size from the configuration, complaining if it's insufficient * for direct I/O. */int__wt_direct_io_size_check(WT_SESSION_IMPL *session, const char **cfg, const char *config_name, uint32_t *allocsizep){ WT_CONFIG_ITEM cval; WT_CONNECTION_IMPL *conn; int64_t align; *allocsizep = 0; conn = S2C(session); WT_RET(__wt_config_gets(session, cfg, config_name, &cval)); /* * This function exists as a place to hang this comment: if direct I/O * is configured, page sizes must be at least as large as any buffer * alignment as well as a multiple of the alignment. Linux gets unhappy * if you configure direct I/O and then don't do I/O in alignments and * units of its happy place. */ if (FLD_ISSET(conn->direct_io, WT_FILE_TYPE_CHECKPOINT | WT_FILE_TYPE_DATA)) { align = (int64_t)conn->buffer_alignment; if (align != 0 && (cval.val < align || cval.val % align != 0)) WT_RET_MSG(session, EINVAL, "when direct I/O is configured, the %s size must " "be at least as large as the buffer alignment as " "well as a multiple of the buffer alignment", config_name); } *allocsizep = (uint32_t)cval.val; return (0);}
开发者ID:alabid,项目名称:mongo,代码行数:39,
示例8: __ckpt_server_config/* * __ckpt_server_config -- * Parse and setup the checkpoint server options. */static int__ckpt_server_config(WT_SESSION_IMPL *session, const char **cfg, bool *startp){ WT_CONFIG_ITEM cval; WT_CONNECTION_IMPL *conn; *startp = false; conn = S2C(session); WT_RET(__wt_config_gets(session, cfg, "checkpoint.wait", &cval)); conn->ckpt_usecs = (uint64_t)cval.val * WT_MILLION; WT_RET(__wt_config_gets(session, cfg, "checkpoint.log_size", &cval)); conn->ckpt_logsize = (wt_off_t)cval.val; /* * The checkpoint configuration requires a wait time and/or a log size, * if neither is set, we're not running at all. Checkpoints based on log * size also require logging be enabled. */ if (conn->ckpt_usecs != 0 || (conn->ckpt_logsize != 0 && FLD_ISSET(conn->log_flags, WT_CONN_LOG_ENABLED))) { /* * If checkpointing based on log data, use a minimum of the * log file size. The logging subsystem has already been * initialized. */ if (conn->ckpt_logsize != 0 && FLD_ISSET(conn->log_flags, WT_CONN_LOG_ENABLED)) conn->ckpt_logsize = WT_MAX( conn->ckpt_logsize, conn->log_file_max); /* Checkpoints are incompatible with in-memory configuration */ WT_RET(__wt_config_gets(session, cfg, "in_memory", &cval)); if (cval.val != 0) WT_RET_MSG(session, EINVAL, "checkpoint configuration incompatible with " "in-memory configuration"); __wt_log_written_reset(session); *startp = true; } return (0);}
开发者ID:judahschvimer,项目名称:mongo,代码行数:51,
示例9: __logmgr_version/* * __logmgr_version -- * Set up the versions in the log manager. */static int__logmgr_version(WT_SESSION_IMPL *session, bool reconfig){ WT_CONNECTION_IMPL *conn; WT_LOG *log; bool downgrade; uint32_t first_record, lognum; uint16_t new_version; conn = S2C(session); log = conn->log; if (log == NULL) return (0); /* * Set the log file format versions based on compatibility versions * set in the connection. We must set this before we call log_open * to open or create a log file. * * Since the log version changed at a major release number we only need * to check the major number, not the minor number in the compatibility * setting. */ if (conn->compat_major < WT_LOG_V2) { new_version = 1; first_record = WT_LOG_END_HEADER; downgrade = true; } else { new_version = WT_LOG_VERSION; first_record = WT_LOG_END_HEADER + log->allocsize; downgrade = false; } /* * If the version is the same, there is nothing to do. */ if (log->log_version == new_version) return (0); /* * If we are reconfiguring and at a new version we need to force * the log file to advance so that we write out a log file at the * correct version. When we are downgrading we must force a checkpoint * and finally archive, even if disabled, so that all new version log * files are gone. * * All of the version changes must be handled with locks on reconfigure * because other threads may be changing log files, using pre-allocated * files. */ /* * Set the version. If it is a live change the logging subsystem will * do other work as well to move to a new log file. */ WT_RET(__wt_log_set_version(session, new_version, first_record, downgrade, reconfig, &lognum)); if (reconfig && FLD_ISSET(conn->log_flags, WT_CONN_LOG_DOWNGRADED)) WT_RET(__logmgr_force_ckpt(session, lognum)); return (0);}
开发者ID:mpobrien,项目名称:mongo,代码行数:63,
示例10: __wt_logmgr_create/* * __wt_logmgr_create -- * Initialize the log subsystem (before running recovery). */int__wt_logmgr_create(WT_SESSION_IMPL *session, const char *cfg[]){ WT_CONNECTION_IMPL *conn; WT_LOG *log; bool run; conn = S2C(session); /* Handle configuration. */ WT_RET(__logmgr_config(session, cfg, &run, false)); /* If logging is not configured, we're done. */ if (!run) return (0); FLD_SET(conn->log_flags, WT_CONN_LOG_ENABLED); /* * Logging is on, allocate the WT_LOG structure and open the log file. */ WT_RET(__wt_calloc_one(session, &conn->log)); log = conn->log; WT_RET(__wt_spin_init(session, &log->log_lock, "log")); WT_RET(__wt_spin_init(session, &log->log_slot_lock, "log slot")); WT_RET(__wt_spin_init(session, &log->log_sync_lock, "log sync")); WT_RET(__wt_spin_init(session, &log->log_writelsn_lock, "log write LSN")); WT_RET(__wt_rwlock_alloc(session, &log->log_archive_lock, "log archive lock")); if (FLD_ISSET(conn->direct_io, WT_FILE_TYPE_LOG)) log->allocsize = WT_MAX((uint32_t)conn->buffer_alignment, WT_LOG_ALIGN); else log->allocsize = WT_LOG_ALIGN; WT_INIT_LSN(&log->alloc_lsn); WT_INIT_LSN(&log->ckpt_lsn); WT_INIT_LSN(&log->first_lsn); WT_INIT_LSN(&log->sync_lsn); /* * We only use file numbers for directory sync, so this needs to * initialized to zero. */ WT_ZERO_LSN(&log->sync_dir_lsn); WT_INIT_LSN(&log->trunc_lsn); WT_INIT_LSN(&log->write_lsn); WT_INIT_LSN(&log->write_start_lsn); log->fileid = 0; WT_RET(__wt_cond_alloc( session, "log sync", false, &log->log_sync_cond)); WT_RET(__wt_cond_alloc( session, "log write", false, &log->log_write_cond)); WT_RET(__wt_log_open(session)); WT_RET(__wt_log_slot_init(session)); return (0);}
开发者ID:BobbWu,项目名称:wiredtiger,代码行数:60,
示例11: __lsm_worker_general_op/* * __lsm_worker_general_op -- * Execute a single bloom, drop or flush work unit. */static int__lsm_worker_general_op( WT_SESSION_IMPL *session, WT_LSM_WORKER_ARGS *cookie, int *completed){ WT_DECL_RET; WT_LSM_CHUNK *chunk; WT_LSM_WORK_UNIT *entry; int force; *completed = 0; /* * Return if this thread cannot process a bloom, drop or flush. */ if (!FLD_ISSET(cookie->type, WT_LSM_WORK_BLOOM | WT_LSM_WORK_DROP | WT_LSM_WORK_FLUSH)) return (WT_NOTFOUND); if ((ret = __wt_lsm_manager_pop_entry(session, cookie->type, &entry)) != 0 || entry == NULL) return (ret); if (entry->type == WT_LSM_WORK_FLUSH) { force = F_ISSET(entry, WT_LSM_WORK_FORCE); F_CLR(entry, WT_LSM_WORK_FORCE); WT_ERR(__wt_lsm_get_chunk_to_flush(session, entry->lsm_tree, force, &chunk)); /* * If we got a chunk to flush, checkpoint it. */ if (chunk != NULL) { WT_ERR(__wt_verbose(session, WT_VERB_LSM, "Flush%s chunk %d %s", force ? " w/ force" : "", chunk->id, chunk->uri)); ret = __wt_lsm_checkpoint_chunk( session, entry->lsm_tree, chunk); WT_ASSERT(session, chunk->refcnt > 0); (void)WT_ATOMIC_SUB4(chunk->refcnt, 1); WT_ERR(ret); } } else if (entry->type == WT_LSM_WORK_DROP) WT_ERR(__wt_lsm_free_chunks(session, entry->lsm_tree)); else if (entry->type == WT_LSM_WORK_BLOOM) WT_ERR(__wt_lsm_work_bloom(session, entry->lsm_tree)); *completed = 1;err: __wt_lsm_manager_free_work_unit(session, entry); return (ret);}
开发者ID:7segments,项目名称:mongo-1,代码行数:53,
示例12: __wt_txn_log_op/* * __wt_txn_log_op -- * Write the last logged operation into the in-memory buffer. */int__wt_txn_log_op(WT_SESSION_IMPL *session, WT_CURSOR_BTREE *cbt){ WT_DECL_RET; WT_ITEM *logrec; WT_TXN *txn; WT_TXN_OP *op; txn = &session->txn; if (!FLD_ISSET(S2C(session)->log_flags, WT_CONN_LOG_ENABLED) || F_ISSET(session, WT_SESSION_NO_LOGGING) || F_ISSET(S2BT(session), WT_BTREE_NO_LOGGING)) return (0); /* We'd better have a transaction. */ WT_ASSERT(session, F_ISSET(txn, WT_TXN_RUNNING) && F_ISSET(txn, WT_TXN_HAS_ID)); WT_ASSERT(session, txn->mod_count > 0); op = txn->mod + txn->mod_count - 1; WT_RET(__txn_logrec_init(session)); logrec = txn->logrec; switch (op->type) { case WT_TXN_OP_BASIC: case WT_TXN_OP_BASIC_TS: ret = __txn_op_log(session, logrec, op, cbt); break; case WT_TXN_OP_INMEM: case WT_TXN_OP_REF: /* Nothing to log, we're done. */ break; case WT_TXN_OP_TRUNCATE_COL: ret = __wt_logop_col_truncate_pack(session, logrec, op->fileid, op->u.truncate_col.start, op->u.truncate_col.stop); break; case WT_TXN_OP_TRUNCATE_ROW: ret = __wt_logop_row_truncate_pack(session, txn->logrec, op->fileid, &op->u.truncate_row.start, &op->u.truncate_row.stop, (uint32_t)op->u.truncate_row.mode); break; } return (ret);}
开发者ID:DINKIN,项目名称:mongo,代码行数:52,
示例13: __log_server/* * __log_server -- * The log server thread. */static WT_THREAD_RET__log_server(void *arg){ WT_CONNECTION_IMPL *conn; WT_DECL_RET; WT_LOG *log; WT_SESSION_IMPL *session; u_int locked; session = arg; conn = S2C(session); log = conn->log; locked = 0; while (F_ISSET(conn, WT_CONN_LOG_SERVER_RUN)) { /* * Perform log pre-allocation. */ if (conn->log_prealloc > 0) WT_ERR(__log_prealloc_once(session)); /* * Perform the archive. */ if (FLD_ISSET(conn->log_flags, WT_CONN_LOG_ARCHIVE)) { if (__wt_try_writelock( session, log->log_archive_lock) == 0) { locked = 1; WT_ERR(__log_archive_once(session, 0)); WT_ERR( __wt_writeunlock( session, log->log_archive_lock)); locked = 0; } else WT_ERR(__wt_verbose(session, WT_VERB_LOG, "log_archive: Blocked due to open log " "cursor holding archive lock")); } /* Wait until the next event. */ WT_ERR(__wt_cond_wait(session, conn->log_cond, WT_MILLION)); } if (0) {err: __wt_err(session, ret, "log server error"); } if (locked) (void)__wt_writeunlock(session, log->log_archive_lock); return (WT_THREAD_RET_VALUE);}
开发者ID:christkv,项目名称:mongo,代码行数:51,
示例14: __lsm_tree_start_worker/* * __wt_lsm_start_worker -- * Start the worker thread for an LSM tree. */static int__lsm_tree_start_worker(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree){ WT_CONNECTION *wt_conn; WT_LSM_WORKER_ARGS *wargs; WT_SESSION *wt_session; WT_SESSION_IMPL *s; uint32_t i; wt_conn = &S2C(session)->iface; WT_RET(wt_conn->open_session(wt_conn, NULL, NULL, &wt_session)); lsm_tree->ckpt_session = (WT_SESSION_IMPL *)wt_session; F_SET(lsm_tree->ckpt_session, WT_SESSION_INTERNAL); F_SET(lsm_tree, WT_LSM_TREE_WORKING); /* The new thread will rely on the WORKING value being visible. */ WT_FULL_BARRIER(); if (F_ISSET(S2C(session), WT_CONN_LSM_MERGE)) for (i = 0; i < lsm_tree->merge_threads; i++) { WT_RET(wt_conn->open_session( wt_conn, NULL, NULL, &wt_session)); s = (WT_SESSION_IMPL *)wt_session; F_SET(s, WT_SESSION_INTERNAL); lsm_tree->worker_sessions[i] = s; WT_RET(__wt_calloc_def(session, 1, &wargs)); wargs->lsm_tree = lsm_tree; wargs->id = i; WT_RET(__wt_thread_create(session, &lsm_tree->worker_tids[i], __wt_lsm_merge_worker, wargs)); } if (FLD_ISSET(lsm_tree->bloom, WT_LSM_BLOOM_NEWEST)) { WT_RET(wt_conn->open_session(wt_conn, NULL, NULL, &wt_session)); lsm_tree->bloom_session = (WT_SESSION_IMPL *)wt_session; F_SET(lsm_tree->bloom_session, WT_SESSION_INTERNAL); WT_RET(__wt_thread_create(session, &lsm_tree->bloom_tid, __wt_lsm_bloom_worker, lsm_tree)); } WT_RET(__wt_thread_create(session, &lsm_tree->ckpt_tid, __wt_lsm_checkpoint_worker, lsm_tree)); return (0);}
开发者ID:niumowm,项目名称:wiredtiger,代码行数:50,
示例15: __wt_lsm_tree_worker/* * __wt_lsm_tree_worker -- * Run a schema worker operation on each level of a LSM tree. */int__wt_lsm_tree_worker(WT_SESSION_IMPL *session, const char *uri, int (*file_func)(WT_SESSION_IMPL *, const char *[]), int (*name_func)(WT_SESSION_IMPL *, const char *, int *), const char *cfg[], uint32_t open_flags){ WT_DECL_RET; WT_LSM_CHUNK *chunk; WT_LSM_TREE *lsm_tree; u_int i; int exclusive, locked; locked = 0; exclusive = FLD_ISSET(open_flags, WT_DHANDLE_EXCLUSIVE) ? 1 : 0; WT_RET(__wt_lsm_tree_get(session, uri, exclusive, &lsm_tree)); /* * We mark that we're busy using the tree to coordinate * with merges so that merging doesn't change the chunk * array out from underneath us. */ WT_ERR(exclusive ? __wt_lsm_tree_writelock(session, lsm_tree) : __wt_lsm_tree_readlock(session, lsm_tree)); locked = 1; for (i = 0; i < lsm_tree->nchunks; i++) { chunk = lsm_tree->chunk[i]; if (file_func == __wt_checkpoint && F_ISSET(chunk, WT_LSM_CHUNK_ONDISK)) continue; WT_ERR(__wt_schema_worker(session, chunk->uri, file_func, name_func, cfg, open_flags)); if (name_func == __wt_backup_list_uri_append && F_ISSET(chunk, WT_LSM_CHUNK_BLOOM)) WT_ERR(__wt_schema_worker(session, chunk->bloom_uri, file_func, name_func, cfg, open_flags)); }err: if (locked) WT_TRET(exclusive ? __wt_lsm_tree_writeunlock(session, lsm_tree) : __wt_lsm_tree_readunlock(session, lsm_tree)); __wt_lsm_tree_release(session, lsm_tree); return (ret);}
开发者ID:RedSunCMX,项目名称:wiredtiger,代码行数:49,
示例16: __wt_las_stats_update/* * __wt_las_stats_update -- * Update the lookaside table statistics for return to the application. */void__wt_las_stats_update(WT_SESSION_IMPL *session){ WT_CONNECTION_IMPL *conn; WT_CONNECTION_STATS **cstats; WT_DSRC_STATS **dstats; int64_t v; conn = S2C(session); /* * Lookaside table statistics are copied from the underlying lookaside * table data-source statistics. If there's no lookaside table, values * remain 0. */ if (!F_ISSET(conn, WT_CONN_LAS_OPEN)) return; /* * We have a cursor, and we need the underlying data handle; we can get * to it by way of the underlying btree handle, but it's a little ugly. */ cstats = conn->stats; dstats = ((WT_CURSOR_BTREE *) conn->las_session->las_cursor)->btree->dhandle->stats; v = WT_STAT_READ(dstats, cursor_insert); WT_STAT_SET(session, cstats, cache_lookaside_insert, v); v = WT_STAT_READ(dstats, cursor_remove); WT_STAT_SET(session, cstats, cache_lookaside_remove, v); /* * If we're clearing stats we need to clear the cursor values we just * read. This does not clear the rest of the statistics in the * lookaside data source stat cursor, but we own that namespace so we * don't have to worry about users seeing inconsistent data source * information. */ if (FLD_ISSET(conn->stat_flags, WT_CONN_STAT_CLEAR)) { WT_STAT_SET(session, dstats, cursor_insert, 0); WT_STAT_SET(session, dstats, cursor_remove, 0); }}
开发者ID:hgGeorg,项目名称:mongo,代码行数:46,
示例17: __wt_meta_track_init/* * __wt_meta_track_init -- * Initialize metadata tracking. */int__wt_meta_track_init(WT_SESSION_IMPL *session){ WT_CONNECTION_IMPL *conn; conn = S2C(session); if (!FLD_ISSET(conn->log_flags, WT_CONN_LOG_ENABLED)) { WT_RET(__wt_open_internal_session(conn, "metadata-ckpt", false, WT_SESSION_NO_DATA_HANDLES, &conn->meta_ckpt_session)); /* * Sessions default to read-committed isolation, we rely on * that for the correctness of metadata checkpoints. */ WT_ASSERT(session, conn->meta_ckpt_session->txn.isolation == WT_ISO_READ_COMMITTED); } return (0);}
开发者ID:GYGit,项目名称:mongo,代码行数:25,
示例18: __wt_lsm_tree_worker/* * __wt_lsm_tree_worker -- * Run a schema worker operation on each level of a LSM tree. */int__wt_lsm_tree_worker(WT_SESSION_IMPL *session, const char *uri, int (*func)(WT_SESSION_IMPL *, const char *[]), const char *cfg[], uint32_t open_flags){ WT_DECL_RET; WT_LSM_CHUNK *chunk; WT_LSM_TREE *lsm_tree; u_int i; WT_RET(__wt_lsm_tree_get(session, uri, FLD_ISSET(open_flags, WT_BTREE_EXCLUSIVE) ? 1 : 0, &lsm_tree)); for (i = 0; i < lsm_tree->nchunks; i++) { chunk = lsm_tree->chunk[i]; if (func == __wt_checkpoint && F_ISSET(chunk, WT_LSM_CHUNK_ONDISK)) continue; WT_ERR(__wt_schema_worker( session, chunk->uri, func, cfg, open_flags)); }err: __wt_lsm_tree_release(session, lsm_tree); return (ret);}
开发者ID:niumowm,项目名称:wiredtiger,代码行数:28,
示例19: __wt_logmgr_destroy/* * __wt_logmgr_destroy -- * Destroy the log archiving server thread and logging subsystem. */int__wt_logmgr_destroy(WT_SESSION_IMPL *session){ WT_CONNECTION_IMPL *conn; WT_DECL_RET; WT_SESSION *wt_session; conn = S2C(session); if (!FLD_ISSET(conn->log_flags, WT_CONN_LOG_ENABLED)) { /* * We always set up the log_path so printlog can work without * recovery. Therefore, always free it, even if logging isn't * on. */ __wt_free(session, conn->log_path); return (0); } if (conn->log_tid_set) { WT_TRET(__wt_cond_signal(session, conn->log_cond)); WT_TRET(__wt_thread_join(session, conn->log_tid)); conn->log_tid_set = 0; } if (conn->log_file_tid_set) { WT_TRET(__wt_cond_signal(session, conn->log_file_cond)); WT_TRET(__wt_thread_join(session, conn->log_file_tid)); conn->log_file_tid_set = 0; } if (conn->log_file_session != NULL) { wt_session = &conn->log_file_session->iface; WT_TRET(wt_session->close(wt_session, NULL)); conn->log_file_session = NULL; } if (conn->log_wrlsn_tid_set) { WT_TRET(__wt_cond_signal(session, conn->log_wrlsn_cond)); WT_TRET(__wt_thread_join(session, conn->log_wrlsn_tid)); conn->log_wrlsn_tid_set = 0; } if (conn->log_wrlsn_session != NULL) { wt_session = &conn->log_wrlsn_session->iface; WT_TRET(wt_session->close(wt_session, NULL)); conn->log_wrlsn_session = NULL; } WT_TRET(__wt_log_slot_destroy(session)); WT_TRET(__wt_log_close(session)); /* Close the server thread's session. */ if (conn->log_session != NULL) { wt_session = &conn->log_session->iface; WT_TRET(wt_session->close(wt_session, NULL)); conn->log_session = NULL; } /* Destroy the condition variables now that all threads are stopped */ WT_TRET(__wt_cond_destroy(session, &conn->log_cond)); WT_TRET(__wt_cond_destroy(session, &conn->log_file_cond)); WT_TRET(__wt_cond_destroy(session, &conn->log_wrlsn_cond)); WT_TRET(__wt_cond_destroy(session, &conn->log->log_sync_cond)); WT_TRET(__wt_cond_destroy(session, &conn->log->log_write_cond)); WT_TRET(__wt_rwlock_destroy(session, &conn->log->log_archive_lock)); __wt_spin_destroy(session, &conn->log->log_lock); __wt_spin_destroy(session, &conn->log->log_slot_lock); __wt_spin_destroy(session, &conn->log->log_sync_lock); __wt_spin_destroy(session, &conn->log->log_writelsn_lock); __wt_free(session, conn->log_path); __wt_free(session, conn->log); return (ret);}
开发者ID:pgmarchenko,项目名称:mongo,代码行数:74,
示例20: __wt_logmgr_open/* * __wt_logmgr_open -- * Start the log service threads. */int__wt_logmgr_open(WT_SESSION_IMPL *session){ WT_CONNECTION_IMPL *conn; conn = S2C(session); /* If no log thread services are configured, we're done. */ if (!FLD_ISSET(conn->log_flags, WT_CONN_LOG_ENABLED)) return (0); /* * Start the log close thread. It is not configurable. * If logging is enabled, this thread runs. */ WT_RET(__wt_open_internal_session( conn, "log-close-server", 0, 0, &conn->log_file_session)); WT_RET(__wt_cond_alloc(conn->log_file_session, "log close server", 0, &conn->log_file_cond)); /* * Start the log file close thread. */ WT_RET(__wt_thread_create(conn->log_file_session, &conn->log_file_tid, __log_file_server, conn->log_file_session)); conn->log_file_tid_set = 1; /* * Start the log write LSN thread. It is not configurable. * If logging is enabled, this thread runs. */ WT_RET(__wt_open_internal_session( conn, "log-wrlsn-server", 0, 0, &conn->log_wrlsn_session)); WT_RET(__wt_cond_alloc(conn->log_wrlsn_session, "log write lsn server", 0, &conn->log_wrlsn_cond)); WT_RET(__wt_thread_create(conn->log_wrlsn_session, &conn->log_wrlsn_tid, __log_wrlsn_server, conn->log_wrlsn_session)); conn->log_wrlsn_tid_set = 1; /* If no log thread services are configured, we're done. */ if (!FLD_ISSET(conn->log_flags, (WT_CONN_LOG_ARCHIVE | WT_CONN_LOG_PREALLOC))) return (0); /* * If a log server thread exists, the user may have reconfigured * archiving or pre-allocation. Signal the thread. Otherwise the * user wants archiving and/or allocation and we need to start up * the thread. */ if (conn->log_session != NULL) { WT_ASSERT(session, conn->log_cond != NULL); WT_ASSERT(session, conn->log_tid_set != 0); WT_RET(__wt_cond_signal(session, conn->log_cond)); } else { /* The log server gets its own session. */ WT_RET(__wt_open_internal_session( conn, "log-server", 0, 0, &conn->log_session)); WT_RET(__wt_cond_alloc(conn->log_session, "log server", 0, &conn->log_cond)); /* * Start the thread. */ WT_RET(__wt_thread_create(conn->log_session, &conn->log_tid, __log_server, conn->log_session)); conn->log_tid_set = 1; } return (0);}
开发者ID:pgmarchenko,项目名称:mongo,代码行数:75,
示例21: __log_server/* * __log_server -- * The log server thread. */static WT_THREAD_RET__log_server(void *arg){ WT_CONNECTION_IMPL *conn; WT_DECL_RET; WT_LOG *log; WT_SESSION_IMPL *session; int freq_per_sec, signalled; session = arg; conn = S2C(session); log = conn->log; signalled = 0; /* * Set this to the number of times per second we want to force out the * log slot buffer. */#define WT_FORCE_PER_SECOND 20 freq_per_sec = WT_FORCE_PER_SECOND; /* * The log server thread does a variety of work. It forces out any * buffered log writes. It pre-allocates log files and it performs * log archiving. The reason the wrlsn thread does not force out * the buffered writes is because we want to process and move the * write_lsn forward as quickly as possible. The same reason applies * to why the log file server thread does not force out the writes. * That thread does fsync calls which can take a long time and we * don't want log records sitting in the buffer over the time it * takes to sync out an earlier file. */ while (F_ISSET(conn, WT_CONN_LOG_SERVER_RUN)) { /* * Slots depend on future activity. Force out buffered * writes in case we are idle. This cannot be part of the * wrlsn thread because of interaction advancing the write_lsn * and a buffer may need to wait for the write_lsn to advance * in the case of a synchronous buffer. We end up with a hang. */ WT_ERR_BUSY_OK(__wt_log_force_write(session, 0)); /* * We don't want to archive or pre-allocate files as often as * we want to force out log buffers. Only do it once per second * or if the condition was signalled. */ if (--freq_per_sec <= 0 || signalled != 0) { freq_per_sec = WT_FORCE_PER_SECOND; /* * Perform log pre-allocation. */ if (conn->log_prealloc > 0) WT_ERR(__log_prealloc_once(session)); /* * Perform the archive. */ if (FLD_ISSET(conn->log_flags, WT_CONN_LOG_ARCHIVE)) { if (__wt_try_writelock( session, log->log_archive_lock) == 0) { ret = __log_archive_once(session, 0); WT_TRET(__wt_writeunlock( session, log->log_archive_lock)); WT_ERR(ret); } else WT_ERR( __wt_verbose(session, WT_VERB_LOG, "log_archive: Blocked due to open " "log cursor holding archive lock")); } } /* Wait until the next event. */ WT_ERR(__wt_cond_wait_signal(session, conn->log_cond, WT_MILLION / WT_FORCE_PER_SECOND, &signalled)); } if (0) {err: __wt_err(session, ret, "log server error"); } return (WT_THREAD_RET_VALUE);}
开发者ID:pgmarchenko,项目名称:mongo,代码行数:88,
示例22: __wt_lsm_checkpoint_chunk//.........这里部分代码省略......... flush_set = true; WT_ERR(__wt_verbose(session, WT_VERB_LSM, "LSM worker flushing %s", chunk->uri)); /* * Flush the file before checkpointing: this is the expensive part in * terms of I/O. * * !!! * We can wait here for checkpoints and fsyncs to complete, which can * take a long time. */ if ((ret = __wt_session_get_btree( session, chunk->uri, NULL, NULL, 0)) == 0) { /* * Set read-uncommitted: we have already checked that all of the * updates in this chunk are globally visible, use the cheapest * possible check in reconciliation. */ saved_isolation = session->txn.isolation; session->txn.isolation = WT_ISO_READ_UNCOMMITTED; ret = __wt_cache_op(session, NULL, WT_SYNC_WRITE_LEAVES); session->txn.isolation = saved_isolation; WT_TRET(__wt_session_release_btree(session)); } WT_ERR(ret); WT_ERR(__wt_verbose(session, WT_VERB_LSM, "LSM worker checkpointing %s", chunk->uri)); /* * Turn on metadata tracking to ensure the checkpoint gets the * necessary handle locks. * * Ensure that we don't race with a running checkpoint: the checkpoint * lock protects against us racing with an application checkpoint in * this chunk. Don't wait for it, though: checkpoints can take a long * time, and our checkpoint operation should be very quick. */ WT_ERR(__wt_meta_track_on(session)); WT_WITH_CHECKPOINT_LOCK(session, ret, WT_WITH_SCHEMA_LOCK(session, ret, ret = __wt_schema_worker( session, chunk->uri, __wt_checkpoint, NULL, NULL, 0))); WT_TRET(__wt_meta_track_off(session, false, ret != 0)); if (ret != 0) WT_ERR_MSG(session, ret, "LSM checkpoint"); /* Now the file is written, get the chunk size. */ WT_ERR(__wt_lsm_tree_set_chunk_size(session, chunk)); /* Update the flush timestamp to help track ongoing progress. */ WT_ERR(__wt_epoch(session, &lsm_tree->last_flush_ts)); ++lsm_tree->chunks_flushed; /* Lock the tree, mark the chunk as on disk and update the metadata. */ WT_ERR(__wt_lsm_tree_writelock(session, lsm_tree)); F_SET(chunk, WT_LSM_CHUNK_ONDISK); ret = __wt_lsm_meta_write(session, lsm_tree); ++lsm_tree->dsk_gen; /* Update the throttle time. */ __wt_lsm_tree_throttle(session, lsm_tree, true); WT_TRET(__wt_lsm_tree_writeunlock(session, lsm_tree)); if (ret != 0) WT_ERR_MSG(session, ret, "LSM metadata write"); WT_PUBLISH(chunk->flushing, 0); flush_set = false; /* * Clear the no-eviction flag so the primary can be evicted and * eventually closed. Only do this once the checkpoint has succeeded: * otherwise, accessing the leaf page during the checkpoint can trigger * forced eviction. */ WT_ERR(__wt_session_get_btree(session, chunk->uri, NULL, NULL, 0)); __wt_btree_evictable(session, true); WT_ERR(__wt_session_release_btree(session)); /* Make sure we aren't pinning a transaction ID. */ __wt_txn_release_snapshot(session); WT_ERR(__wt_verbose(session, WT_VERB_LSM, "LSM worker checkpointed %s", chunk->uri)); /* Schedule a bloom filter create for our newly flushed chunk. */ if (!FLD_ISSET(lsm_tree->bloom, WT_LSM_BLOOM_OFF)) WT_ERR(__wt_lsm_manager_push_entry( session, WT_LSM_WORK_BLOOM, 0, lsm_tree)); else WT_ERR(__wt_lsm_manager_push_entry( session, WT_LSM_WORK_MERGE, 0, lsm_tree));err: if (flush_set) WT_PUBLISH(chunk->flushing, 0); return (ret);}
开发者ID:brianleepzx,项目名称:mongo,代码行数:101,
示例23: __wt_lsm_work_bloom/* * __wt_lsm_work_bloom -- * Try to create a Bloom filter for the newest on-disk chunk that doesn't * have one. */int__wt_lsm_work_bloom(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree){ WT_DECL_RET; WT_LSM_CHUNK *chunk; WT_LSM_WORKER_COOKIE cookie; u_int i, merge; WT_CLEAR(cookie); WT_RET(__lsm_copy_chunks(session, lsm_tree, &cookie, false)); /* Create bloom filters in all checkpointed chunks. */ merge = 0; for (i = 0; i < cookie.nchunks; i++) { chunk = cookie.chunk_array[i]; /* * Skip if a thread is still active in the chunk or it * isn't suitable. */ if (!F_ISSET(chunk, WT_LSM_CHUNK_ONDISK) || F_ISSET(chunk, WT_LSM_CHUNK_BLOOM | WT_LSM_CHUNK_MERGING) || chunk->generation > 0 || chunk->count == 0) continue; /* Never create a bloom filter on the oldest chunk */ if (chunk == lsm_tree->chunk[0] && !FLD_ISSET(lsm_tree->bloom, WT_LSM_BLOOM_OLDEST)) continue; /* * See if we win the race to switch on the "busy" flag and * recheck that the chunk still needs a Bloom filter. */ if (__wt_atomic_cas32(&chunk->bloom_busy, 0, 1)) { if (!F_ISSET(chunk, WT_LSM_CHUNK_BLOOM)) { ret = __lsm_bloom_create( session, lsm_tree, chunk, (u_int)i); /* * Record if we were successful so that we can * later push a merge work unit. */ if (ret == 0) merge = 1; } chunk->bloom_busy = 0; break; } } /* * If we created any bloom filters, we push a merge work unit now. */ if (merge) WT_ERR(__wt_lsm_manager_push_entry( session, WT_LSM_WORK_MERGE, 0, lsm_tree));err: __lsm_unpin_chunks(session, &cookie); __wt_free(session, cookie.chunk_array); return (ret);}
开发者ID:brianleepzx,项目名称:mongo,代码行数:67,
示例24: __wt_txn_recover/* * __wt_txn_recover -- * Run recovery. */int__wt_txn_recover(WT_SESSION_IMPL *session){ WT_CONNECTION_IMPL *conn; WT_CURSOR *metac; WT_DECL_RET; WT_RECOVERY r; struct WT_RECOVERY_FILE *metafile; char *config; bool eviction_started, needs_rec, was_backup; conn = S2C(session); WT_CLEAR(r); WT_INIT_LSN(&r.ckpt_lsn); eviction_started = false; was_backup = F_ISSET(conn, WT_CONN_WAS_BACKUP); /* We need a real session for recovery. */ WT_RET(__wt_open_internal_session(conn, "txn-recover", false, WT_SESSION_NO_LOGGING, &session)); r.session = session; F_SET(conn, WT_CONN_RECOVERING); WT_ERR(__wt_metadata_search(session, WT_METAFILE_URI, &config)); WT_ERR(__recovery_setup_file(&r, WT_METAFILE_URI, config)); WT_ERR(__wt_metadata_cursor_open(session, NULL, &metac)); metafile = &r.files[WT_METAFILE_ID]; metafile->c = metac; /* * If no log was found (including if logging is disabled), or if the * last checkpoint was done with logging disabled, recovery should not * run. Scan the metadata to figure out the largest file ID. */ if (!FLD_ISSET(S2C(session)->log_flags, WT_CONN_LOG_EXISTED) || WT_IS_MAX_LSN(&metafile->ckpt_lsn)) { WT_ERR(__recovery_file_scan(&r)); conn->next_file_id = r.max_fileid; goto done; } /* * First, do a pass through the log to recover the metadata, and * establish the last checkpoint LSN. Skip this when opening a hot * backup: we already have the correct metadata in that case. */ if (!was_backup) { r.metadata_only = true; /* * If this is a read-only connection, check if the checkpoint * LSN in the metadata file is up to date, indicating a clean * shutdown. */ if (F_ISSET(conn, WT_CONN_READONLY)) { WT_ERR(__wt_log_needs_recovery( session, &metafile->ckpt_lsn, &needs_rec)); if (needs_rec) WT_ERR_MSG(session, WT_RUN_RECOVERY, "Read-only database needs recovery"); } if (WT_IS_INIT_LSN(&metafile->ckpt_lsn)) WT_ERR(__wt_log_scan(session, NULL, WT_LOGSCAN_FIRST, __txn_log_recover, &r)); else { /* * Start at the last checkpoint LSN referenced in the * metadata. If we see the end of a checkpoint while * scanning, we will change the full scan to start from * there. */ r.ckpt_lsn = metafile->ckpt_lsn; ret = __wt_log_scan(session, &metafile->ckpt_lsn, 0, __txn_log_recover, &r); if (ret == ENOENT) ret = 0; WT_ERR(ret); } } /* Scan the metadata to find the live files and their IDs. */ WT_ERR(__recovery_file_scan(&r)); /* * We no longer need the metadata cursor: close it to avoid pinning any * resources that could block eviction during recovery. */ r.files[0].c = NULL; WT_ERR(metac->close(metac)); /* * Now, recover all the files apart from the metadata. * Pass WT_LOGSCAN_RECOVER so that old logs get truncated. */ r.metadata_only = false; WT_ERR(__wt_verbose(session, WT_VERB_RECOVERY, "Main recovery loop: starting at %" PRIu32 "/%" PRIu32,//.........这里部分代码省略.........
开发者ID:ChineseDr,项目名称:mongo,代码行数:101,
示例25: __wt_open/* * __wt_open -- * Open a file handle. */int__wt_open(WT_SESSION_IMPL *session, const char *name, int ok_create, int exclusive, int dio_type, WT_FH **fhp){ DWORD dwCreationDisposition; HANDLE filehandle, filehandle_secondary; WT_CONNECTION_IMPL *conn; WT_DECL_RET; WT_FH *fh, *tfh; uint64_t bucket, hash; int direct_io, f, matched, share_mode; char *path; conn = S2C(session); fh = NULL; path = NULL; filehandle = INVALID_HANDLE_VALUE; filehandle_secondary = INVALID_HANDLE_VALUE; direct_io = 0; hash = __wt_hash_city64(name, strlen(name)); bucket = hash % WT_HASH_ARRAY_SIZE; WT_RET(__wt_verbose(session, WT_VERB_FILEOPS, "%s: open", name)); /* Increment the reference count if we already have the file open. */ matched = 0; __wt_spin_lock(session, &conn->fh_lock); SLIST_FOREACH(tfh, &conn->fhhash[bucket], l) if (strcmp(name, tfh->name) == 0) { ++tfh->ref; *fhp = tfh; matched = 1; break; } __wt_spin_unlock(session, &conn->fh_lock); if (matched) return (0); /* For directories, create empty file handles with invalid handles */ if (dio_type == WT_FILE_TYPE_DIRECTORY) { goto setupfh; } WT_RET(__wt_filename(session, name, &path)); share_mode = FILE_SHARE_READ | FILE_SHARE_WRITE; /* * Security: * The application may spawn a new process, and we don't want another * process to have access to our file handles. * * TODO: Set tighter file permissions but set bInheritHandle to false * to prevent inheritance */ f = FILE_ATTRIBUTE_NORMAL; dwCreationDisposition = 0; if (ok_create) { dwCreationDisposition = CREATE_NEW; if (exclusive) dwCreationDisposition = CREATE_ALWAYS; } else dwCreationDisposition = OPEN_EXISTING; if (dio_type && FLD_ISSET(conn->direct_io, dio_type)) { f |= FILE_FLAG_NO_BUFFERING | FILE_FLAG_WRITE_THROUGH; direct_io = 1; } if (dio_type == WT_FILE_TYPE_LOG && FLD_ISSET(conn->txn_logsync, WT_LOG_DSYNC)) { f |= FILE_FLAG_WRITE_THROUGH; } /* Disable read-ahead on trees: it slows down random read workloads. */ if (dio_type == WT_FILE_TYPE_DATA || dio_type == WT_FILE_TYPE_CHECKPOINT) f |= FILE_FLAG_RANDOM_ACCESS; filehandle = CreateFileA(path, (GENERIC_READ | GENERIC_WRITE), share_mode, NULL, dwCreationDisposition, f, NULL); if (filehandle == INVALID_HANDLE_VALUE) { if (GetLastError() == ERROR_FILE_EXISTS && ok_create) filehandle = CreateFileA(path, (GENERIC_READ | GENERIC_WRITE), share_mode, NULL, OPEN_EXISTING, f, NULL);//.........这里部分代码省略.........
开发者ID:deepinit-arek,项目名称:wiredtiger,代码行数:101,
示例26: __lsm_worker/* * __lsm_worker -- * A thread that executes work units for all open LSM trees. */static WT_THREAD_RET__lsm_worker(void *arg){ WT_CONNECTION_IMPL *conn; WT_DECL_RET; WT_LSM_WORK_UNIT *entry; WT_LSM_WORKER_ARGS *cookie; WT_SESSION_IMPL *session; int progress, ran; cookie = (WT_LSM_WORKER_ARGS *)arg; session = cookie->session; conn = S2C(session); entry = NULL; while (F_ISSET(conn, WT_CONN_SERVER_RUN) && F_ISSET(cookie, WT_LSM_WORKER_RUN)) { progress = 0; /* * Workers process the different LSM work queues. Some workers * can handle several or all work unit types. So the code is * prioritized so important operations happen first. * Switches are the highest priority. */ while (FLD_ISSET(cookie->type, WT_LSM_WORK_SWITCH) && (ret = __wt_lsm_manager_pop_entry( session, WT_LSM_WORK_SWITCH, &entry)) == 0 && entry != NULL) WT_ERR( __wt_lsm_work_switch(session, &entry, &progress)); /* Flag an error if the pop failed. */ WT_ERR(ret); /* * Next the general operations. */ ret = __lsm_worker_general_op(session, cookie, &ran); if (ret == EBUSY || ret == WT_NOTFOUND) ret = 0; WT_ERR(ret); progress = progress || ran; /* * Finally see if there is any merge work we can do. This is * last because the earlier operations may result in adding * merge work to the queue. */ if (FLD_ISSET(cookie->type, WT_LSM_WORK_MERGE) && (ret = __wt_lsm_manager_pop_entry( session, WT_LSM_WORK_MERGE, &entry)) == 0 && entry != NULL) { WT_ASSERT(session, entry->type == WT_LSM_WORK_MERGE); ret = __wt_lsm_merge(session, entry->lsm_tree, cookie->id); if (ret == WT_NOTFOUND) { F_CLR(entry->lsm_tree, WT_LSM_TREE_COMPACTING); ret = 0; } else if (ret == EBUSY) ret = 0; /* Paranoia: clear session state. */ session->dhandle = NULL; __wt_lsm_manager_free_work_unit(session, entry); entry = NULL; progress = 1; } /* Flag an error if the pop failed. */ WT_ERR(ret); /* Don't busy wait if there was any work to do. */ if (!progress) { WT_ERR( __wt_cond_wait(session, cookie->work_cond, 10000)); continue; } } if (ret != 0) {err: __wt_lsm_manager_free_work_unit(session, entry); WT_PANIC_MSG(session, ret, "Error in LSM worker thread %d", cookie->id); } return (WT_THREAD_RET_VALUE);}
开发者ID:7segments,项目名称:mongo-1,代码行数:90,
示例27: __lsm_tree_close/* * __lsm_tree_close -- * Close an LSM tree structure. */static int__lsm_tree_close(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree){ WT_DECL_RET; WT_SESSION *wt_session; WT_SESSION_IMPL *s; uint32_t i; if (F_ISSET(lsm_tree, WT_LSM_TREE_WORKING)) { F_CLR(lsm_tree, WT_LSM_TREE_WORKING); if (F_ISSET(S2C(session), WT_CONN_LSM_MERGE)) for (i = 0; i < lsm_tree->merge_threads; i++) WT_TRET(__wt_thread_join( session, lsm_tree->worker_tids[i])); WT_TRET(__wt_thread_join(session, lsm_tree->ckpt_tid)); if (FLD_ISSET(lsm_tree->bloom, WT_LSM_BLOOM_NEWEST)) WT_TRET(__wt_thread_join(session, lsm_tree->bloom_tid)); } /* * Close the worker thread sessions and free their hazard arrays * (necessary because we set WT_SESSION_INTERNAL to simplify shutdown * ordering). * * Do this in the main thread to avoid deadlocks. */ for (i = 0; i < lsm_tree->merge_threads; i++) { if ((s = lsm_tree->worker_sessions[i]) == NULL) continue; lsm_tree->worker_sessions[i] = NULL; F_SET(s, F_ISSET(session, WT_SESSION_SCHEMA_LOCKED)); wt_session = &s->iface; WT_TRET(wt_session->close(wt_session, NULL)); /* * This is safe after the close because session handles are * not freed, but are managed by the connection. */ __wt_free(NULL, s->hazard); } if (lsm_tree->bloom_session != NULL) { F_SET(lsm_tree->bloom_session, F_ISSET(session, WT_SESSION_SCHEMA_LOCKED)); wt_session = &lsm_tree->bloom_session->iface; WT_TRET(wt_session->close(wt_session, NULL)); /* * This is safe after the close because session handles are * not freed, but are managed by the connection. */ __wt_free(NULL, lsm_tree->bloom_session->hazard); } if (lsm_tree->ckpt_session != NULL) { F_SET(lsm_tree->ckpt_session, F_ISSET(session, WT_SESSION_SCHEMA_LOCKED)); wt_session = &lsm_tree->ckpt_session->iface; WT_TRET(wt_session->close(wt_session, NULL)); /* * This is safe after the close because session handles are * not freed, but are managed by the connection. */ __wt_free(NULL, lsm_tree->ckpt_session->hazard); } if (ret != 0) { __wt_err(session, ret, "shutdown error while cleaning up LSM"); (void)__wt_panic(session); } return (ret);}
开发者ID:niumowm,项目名称:wiredtiger,代码行数:79,
示例28: __wt_lsm_tree_create/* * __wt_lsm_tree_create -- * Create an LSM tree structure for the given name. */int__wt_lsm_tree_create(WT_SESSION_IMPL *session, const char *uri, int exclusive, const char *config){ WT_CONFIG_ITEM cval; WT_DECL_ITEM(buf); WT_DECL_RET; WT_LSM_TREE *lsm_tree; const char *cfg[] = API_CONF_DEFAULTS(session, create, config); const char *tmpconfig; /* If the tree is open, it already exists. */ if ((ret = __wt_lsm_tree_get(session, uri, 0, &lsm_tree)) == 0) { __wt_lsm_tree_release(session, lsm_tree); return (exclusive ? EEXIST : 0); } WT_RET_NOTFOUND_OK(ret); /* * If the tree has metadata, it already exists. * * !!! * Use a local variable: we don't care what the existing configuration * is, but we don't want to overwrite the real config. */ if (__wt_metadata_read(session, uri, &tmpconfig) == 0) { __wt_free(session, tmpconfig); return (exclusive ? EEXIST : 0); } WT_RET_NOTFOUND_OK(ret); WT_RET(__wt_config_gets(session, cfg, "key_format", &cval)); if (WT_STRING_MATCH("r", cval.str, cval.len)) WT_RET_MSG(session, EINVAL, "LSM trees cannot be configured as column stores"); WT_RET(__wt_calloc_def(session, 1, &lsm_tree)); WT_RET(__lsm_tree_set_name(session, lsm_tree, uri)); WT_ERR(__wt_config_gets(session, cfg, "key_format", &cval)); WT_ERR(__wt_strndup(session, cval.str, cval.len, &lsm_tree->key_format)); WT_ERR(__wt_config_gets(session, cfg, "value_format", &cval)); WT_ERR(__wt_strndup(session, cval.str, cval.len, &lsm_tree->value_format)); WT_ERR(__wt_config_gets(session, cfg, "lsm_bloom", &cval)); FLD_SET(lsm_tree->bloom, (cval.val == 0 ? WT_LSM_BLOOM_OFF : WT_LSM_BLOOM_MERGED)); WT_ERR(__wt_config_gets(session, cfg, "lsm_bloom_newest", &cval)); if (cval.val != 0) FLD_SET(lsm_tree->bloom, WT_LSM_BLOOM_NEWEST); WT_ERR(__wt_config_gets(session, cfg, "lsm_bloom_oldest", &cval)); if (cval.val != 0) FLD_SET(lsm_tree->bloom, WT_LSM_BLOOM_OLDEST); if (FLD_ISSET(lsm_tree->bloom, WT_LSM_BLOOM_OFF) && (FLD_ISSET(lsm_tree->bloom, WT_LSM_BLOOM_NEWEST) || FLD_ISSET(lsm_tree->bloom, WT_LSM_BLOOM_OLDEST))) WT_ERR_MSG(session, EINVAL, "Bloom filters can only be created on newest and oldest " "chunks if bloom filters are enabled"); WT_ERR(__wt_config_gets(session, cfg, "lsm_bloom_config", &cval)); if (cval.type == ITEM_STRUCT) { cval.str++; cval.len -= 2; } WT_ERR(__wt_strndup(session, cval.str, cval.len, &lsm_tree->bloom_config)); WT_ERR(__wt_config_gets(session, cfg, "lsm_bloom_bit_count", &cval)); lsm_tree->bloom_bit_count = (uint32_t)cval.val; WT_ERR(__wt_config_gets(session, cfg, "lsm_bloom_hash_count", &cval)); lsm_tree->bloom_hash_count = (uint32_t)cval.val; WT_ERR(__wt_config_gets(session, cfg, "lsm_chunk_size", &cval)); lsm_tree->chunk_size = (uint32_t)cval.val; WT_ERR(__wt_config_gets(session, cfg, "lsm_merge_max", &cval)); lsm_tree->merge_max = (uint32_t)cval.val; WT_ERR(__wt_config_gets(session, cfg, "lsm_merge_threads", &cval)); lsm_tree->merge_threads = (uint32_t)cval.val; /* Sanity check that api_data.py is in sync with lsm.h */ WT_ASSERT(session, lsm_tree->merge_threads <= WT_LSM_MAX_WORKERS); WT_ERR(__wt_scr_alloc(session, 0, &buf)); WT_ERR(__wt_buf_fmt(session, buf, "%s,key_format=u,value_format=u", config)); lsm_tree->file_config = __wt_buf_steal(session, buf, NULL); /* Create the first chunk and flush the metadata. */ WT_ERR(__wt_lsm_meta_write(session, lsm_tree)); /* Discard our partially populated handle. */ ret = __lsm_tree_discard(session, lsm_tree); lsm_tree = NULL;//.........这里部分代码省略.........
开发者ID:niumowm,项目名称:wiredtiger,代码行数:101,
注:本文中的FLD_ISSET函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 C++ FLIGHT_MODE函数代码示例 C++ FLDATA函数代码示例 |