您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ verify函数代码示例

51自学网 2021-06-03 09:37:45
  C++
这篇教程C++ verify函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中verify函数的典型用法代码示例。如果您正苦于以下问题:C++ verify函数的具体用法?C++ verify怎么用?C++ verify使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了verify函数的28个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: main

//.........这里部分代码省略.........	prot_r = (MFS_Prot_t*)malloc(sizeof(MFS_Prot_t));	printf("Started listening at port %d/n", portnum);	    while (1) {		struct sockaddr_in s;		rc = UDP_Read(sd, &s, (char*)prot_r, sizeof(MFS_Prot_t));		if (rc > 0) {						//Special case for shutdown			if(prot_r->cmd == CMD_INIT){				printf("Server initialized/n");				prot_r->ret = 0;			} else if(prot_r->cmd == CMD_LOOKUP){								prot_r->ret = -1;				MFS_Inode_t* parent_inode = fix_inode(header, prot_r->pinum);				prot_r->ret = lookup(block_ptr, parent_inode, &(prot_r->datapacket[0]));			} else if(prot_r->cmd == CMD_SHUTDOWN){				//Close file				rc = close(fd);				if(rc < 0){					fprintf(stderr, "Cannot open file");					exit(1);				}				prot_r->ret = 0;				if(UDP_Write(sd, &s, (char*)prot_r, sizeof(MFS_Prot_t)) < -1){					fprintf(stderr, "Unable to send result");					exit(1);				}				exit(0);			} else if(prot_r->cmd == CMD_UNLINK){								verify(&header, &block_ptr, 16384);				prot_r->ret = -1;				MFS_Inode_t* parent_inode = fix_inode(header, prot_r->pinum);				if(parent_inode != NULL && parent_inode->type == MFS_DIRECTORY){					int exist = lookup(block_ptr, parent_inode, &(prot_r->datapacket[0]));					if(exist != -1){						//Check if empty						MFS_Inode_t* this_inode = fix_inode(header, exist);						if(!(this_inode->type == MFS_DIRECTORY && this_inode->size != 0)){							//Need to remove							MFS_DirEnt_t* new_dir_entry = allot_space(&header, MFS_BLOCK_SIZE, &entry_offset);							MFS_Inode_t* new_parent_inode = allot_space(&header, sizeof(MFS_Inode_t), &parent_inode_offset);							prepare_inode(new_parent_inode, 0, parent_inode);							update_inode(&header, prot_r->pinum, parent_inode_offset);							i = 0, done = 0;							while(i < 14) {								if(parent_inode->data[i] != -1){									j = 0;									while(j < MFS_BLOCK_SIZE / sizeof(MFS_DirEnt_t)){										//printf("Parent node %d %d/n", inode->data[i], MFS_BLOCK_SIZE / sizeof(MFS_DirEnt_t) );										MFS_DirEnt_t* entry = (MFS_DirEnt_t*)(block_ptr + parent_inode->data[i] + (j * sizeof(MFS_DirEnt_t)));													if(entry->inum != -1 && strcmp(entry->name, prot_r->datapacket) == 0 ){											memcpy(new_dir_entry, block_ptr + parent_inode->data[i] , MFS_BLOCK_SIZE);											//We now know which entry											new_parent_inode->data[i] = entry_offset;											new_dir_entry[j].inum = -1;											update_inode(&header, exist, -1);											prot_r->ret = 0;											new_parent_inode->size--;											done = 1;											break;										}
开发者ID:samgooi4189,项目名称:cs537,代码行数:67,


示例2: game_name

FString FSkookumScriptEditor::make_project_editable()  {  FString error_msg;  FString game_name(FApp::GetGameName());  if (game_name.IsEmpty())    {    error_msg = TEXT("Tried to make project editable but engine has no project loaded!");    }  else    {    // Check if maybe already editable - if so, silently do nothing    FString editable_scripts_path = FPaths::GameDir() / TEXT("Scripts");    FString editable_project_path(editable_scripts_path / TEXT("Skookum-project.ini"));    if (!FPaths::FileExists(editable_project_path))      {      // Check temporary location (in `Intermediate` folder)      FString temp_root_path(FPaths::GameIntermediateDir() / TEXT("SkookumScript"));      FString temp_scripts_path(temp_root_path / TEXT("Scripts"));      FString temp_project_path = temp_scripts_path / TEXT("Skookum-project.ini");      if (!FPaths::FileExists(temp_project_path))        {        error_msg = TEXT("Tried to make project editable but neither an editable nor a non-editable project was found!");        }      else        {        if (!IFileManager::Get().Move(*editable_scripts_path, *temp_scripts_path, true, true))          {          error_msg = TEXT("Failed moving project information from temporary to editable location!");          }        else          {          // Move compiled binaries for convenience          // We don't care if this succeeds          FString temp_binary_folder_path = temp_root_path / TEXT("Content/skookumscript");          FString editable_binary_folder_path = FPaths::GameDir() / TEXT("Content/skookumscript");          IFileManager::Get().Move(*editable_binary_folder_path, *temp_binary_folder_path, true, true);          // Change project packaging settings to include Sk binaries          UProjectPackagingSettings * packaging_settings_p = Cast<UProjectPackagingSettings>(UProjectPackagingSettings::StaticClass()->GetDefaultObject());          const TCHAR * binary_path_name_p = TEXT("skookumscript");          for (TArray<FDirectoryPath>::TConstIterator dir_path(packaging_settings_p->DirectoriesToAlwaysStageAsNonUFS); dir_path; ++dir_path)            {            if (dir_path->Path == binary_path_name_p)              {              binary_path_name_p = nullptr;              break;              }            }          if (binary_path_name_p)            {            FDirectoryPath binary_path;            binary_path.Path = binary_path_name_p;            packaging_settings_p->DirectoriesToAlwaysStageAsNonUFS.Add(binary_path);            FString config_file_name = FPaths::GameConfigDir() / TEXT("DefaultGame.ini");            if (ISourceControlModule::Get().IsEnabled())              {              SourceControlHelpers::CheckOutFile(config_file_name);              }            packaging_settings_p->SaveConfig(CPF_Config, *config_file_name);            }          // Create Project overlay folder          IFileManager::Get().MakeDirectory(*(editable_scripts_path / TEXT("Project/Object")), true);          // Change project to be editable          FString proj_ini;          verify(FFileHelper::LoadFileToString(proj_ini, *editable_project_path));          proj_ini = proj_ini.Replace(m_editable_ini_settings_p, TEXT("")); // Remove editable settings          proj_ini += TEXT("Overlay7=Project|Project/r/n"); // Create Project overlay definition          verify(FFileHelper::SaveStringToFile(proj_ini, *editable_project_path, FFileHelper::EEncodingOptions::ForceAnsi));          }        }      }    }  return error_msg;  }
开发者ID:marynate,项目名称:SkookumScript-UnrealEngine-1,代码行数:78,


示例3: verify

    void* MemoryMappedFile::map(const char *filenameIn, unsigned long long &length, int options) {        verify( fd == 0 && len == 0 ); // can't open more than once        setFilename(filenameIn);        FileAllocator::get()->allocateAsap( filenameIn, length );        /* big hack here: Babble uses db names with colons.  doesn't seem to work on windows.  temporary perhaps. */        char filename[256];        strncpy(filename, filenameIn, 255);        filename[255] = 0;        {            size_t len = strlen( filename );            for ( size_t i=len-1; i>=0; i-- ) {                if ( filename[i] == '/' ||                        filename[i] == '//' )                    break;                if ( filename[i] == ':' )                    filename[i] = '_';            }        }        updateLength( filename, length );        {            DWORD createOptions = FILE_ATTRIBUTE_NORMAL;            if ( options & SEQUENTIAL )                createOptions |= FILE_FLAG_SEQUENTIAL_SCAN;            DWORD rw = GENERIC_READ | GENERIC_WRITE;            fd = CreateFileW(                     toWideString(filename).c_str(),                     rw, // desired access                     FILE_SHARE_WRITE | FILE_SHARE_READ, // share mode                     NULL, // security                     OPEN_ALWAYS, // create disposition                     createOptions , // flags                     NULL); // hTempl            if ( fd == INVALID_HANDLE_VALUE ) {                DWORD dosError = GetLastError();                log() << "CreateFileW for " << filename                        << " failed with " << errnoWithDescription( dosError )                        << " (file size is " << length << ")"                        << " in MemoryMappedFile::map"                        << endl;                return 0;            }        }        mapped += length;        {            DWORD flProtect = PAGE_READWRITE; //(options & READONLY)?PAGE_READONLY:PAGE_READWRITE;            maphandle = CreateFileMappingW(fd, NULL, flProtect,                                          length >> 32 /*maxsizehigh*/,                                          (unsigned) length /*maxsizelow*/,                                          NULL/*lpName*/);            if ( maphandle == NULL ) {                DWORD dosError = GetLastError();                log() << "CreateFileMappingW for " << filename                        << " failed with " << errnoWithDescription( dosError )                        << " (file size is " << length << ")"                        << " in MemoryMappedFile::map"                        << endl;                close();                fassertFailed( 16225 );            }        }        void *view = 0;        {            scoped_lock lk(mapViewMutex);            DWORD access = ( options & READONLY ) ? FILE_MAP_READ : FILE_MAP_ALL_ACCESS;            int current_retry = 0;            while (true) {                LPVOID thisAddress = getNextMemoryMappedFileLocation(length);                view = MapViewOfFileEx(                    maphandle,      // file mapping handle                    access,         // access                    0, 0,           // file offset, high and low                    0,              // bytes to map, 0 == all                    thisAddress);  // address to place file                if (view == 0) {                    DWORD dosError = GetLastError();                    ++current_retry;                    // If we failed to allocate a memory mapped file, try again in case we picked                    // an address that Windows is also trying to use for some other VM allocations                    if (dosError == ERROR_INVALID_ADDRESS && current_retry < 5) {                        continue;                    }#ifndef _WIN64                    // Warn user that if they are running a 32-bit app on 64-bit Windows                    if (dosError == ERROR_NOT_ENOUGH_MEMORY) {                        BOOL wow64Process;                        BOOL retWow64 = IsWow64Process(GetCurrentProcess(), &wow64Process);                        if (retWow64 && wow64Process) {//.........这里部分代码省略.........
开发者ID:Aaron20141021,项目名称:mongo,代码行数:101,


示例4: sceNpTrophyGetGameInfo

s32 sceNpTrophyGetGameInfo(u32 context, u32 handle, vm::ptr<SceNpTrophyGameDetails> details, vm::ptr<SceNpTrophyGameData> data){	sceNpTrophy.error("sceNpTrophyGetGameInfo(context=0x%x, handle=0x%x, details=*0x%x, data=*0x%x)", context, handle, details, data);	const auto ctxt = idm::get<trophy_context_t>(context);	if (!ctxt)	{		return SCE_NP_TROPHY_ERROR_UNKNOWN_CONTEXT;	}	const auto hndl = idm::get<trophy_handle_t>(handle);	if (!hndl)	{		return SCE_NP_TROPHY_ERROR_UNKNOWN_HANDLE;	}	// TODO: Get the path of the current user	const std::string& path = vfs::get("/dev_hdd0/home/00000001/trophy/" + ctxt->trp_name + "/TROPCONF.SFM");		// TODO: rXmlDocument can open only real file	verify(HERE), !fs::get_virtual_device(path); 	rXmlDocument doc;	doc.Load(path);	std::string titleName;	std::string titleDetail;	for (std::shared_ptr<rXmlNode> n = doc.GetRoot()->GetChildren(); n; n = n->GetNext())	{		if (n->GetName() == "title-name")			titleName = n->GetNodeContent();		if (n->GetName() == "title-detail")			titleDetail = n->GetNodeContent();		if (n->GetName() == "trophy")		{			u32 trophy_id = atoi(n->GetAttribute("id").c_str());						details->numTrophies++;			switch (n->GetAttribute("ttype")[0]) {			case 'B': details->numBronze++;   break;			case 'S': details->numSilver++;   break;			case 'G': details->numGold++;     break;			case 'P': details->numPlatinum++; break;			}						if (ctxt->tropusr->GetTrophyUnlockState(trophy_id))			{				data->unlockedTrophies++;				switch (n->GetAttribute("ttype")[0]) {				case 'B': data->unlockedBronze++;   break;				case 'S': data->unlockedSilver++;   break;				case 'G': data->unlockedGold++;     break;				case 'P': data->unlockedPlatinum++; break;				}			}		}	}	strcpy_trunc(details->title, titleName);	strcpy_trunc(details->description, titleDetail);	return CELL_OK;}
开发者ID:O1L,项目名称:rpcs3,代码行数:63,


示例5: receivedQuery

    static bool receivedQuery(Client& c, DbResponse& dbresponse, Message& m ) {        bool ok = true;        MSGID responseTo = m.header()->id;        DbMessage d(m);        QueryMessage q(d);        auto_ptr< Message > resp( new Message() );        CurOp& op = *(c.curop());        shared_ptr<AssertionException> ex;        try {            if (!NamespaceString::isCommand(d.getns())) {                // Auth checking for Commands happens later.                Status status = cc().getAuthorizationManager()->checkAuthForQuery(d.getns());                uassert(16550, status.reason(), status.isOK());            }            dbresponse.exhaustNS = runQuery(m, q, op, *resp);            verify( !resp->empty() );        }        catch ( SendStaleConfigException& e ){            ex.reset( new SendStaleConfigException( e.getns(), e.getInfo().msg, e.getVersionReceived(), e.getVersionWanted() ) );            ok = false;        }        catch ( AssertionException& e ) {            ex.reset( new AssertionException( e.getInfo().msg, e.getCode() ) );            ok = false;        }        if( ex ){            op.debug().exceptionInfo = ex->getInfo();            LOGWITHRATELIMIT {                log() << "assertion " << ex->toString() << " ns:" << q.ns << " query:" <<                (q.query.valid() ? q.query.toString() : "query object is corrupt") << endl;                if( q.ntoskip || q.ntoreturn )                    log() << " ntoskip:" << q.ntoskip << " ntoreturn:" << q.ntoreturn << endl;            }            SendStaleConfigException* scex = NULL;            if ( ex->getCode() == SendStaleConfigCode ) scex = static_cast<SendStaleConfigException*>( ex.get() );            BSONObjBuilder err;            ex->getInfo().append( err );            if( scex ){                err.append( "ns", scex->getns() );                scex->getVersionReceived().addToBSON( err, "vReceived" );                scex->getVersionWanted().addToBSON( err, "vWanted" );            }            BSONObj errObj = err.done();            if( scex ){                log() << "stale version detected during query over "                      << q.ns << " : " << errObj << endl;            }            else{                log() << "problem detected during query over "                      << q.ns << " : " << errObj << endl;            }            BufBuilder b;            b.skip(sizeof(QueryResult));            b.appendBuf((void*) errObj.objdata(), errObj.objsize());            // todo: call replyToQuery() from here instead of this!!! see dbmessage.h            QueryResult * msgdata = (QueryResult *) b.buf();            b.decouple();            QueryResult *qr = msgdata;            qr->_resultFlags() = ResultFlag_ErrSet;            if( scex ) qr->_resultFlags() |= ResultFlag_ShardConfigStale;            qr->len = b.len();            qr->setOperation(opReply);            qr->cursorId = 0;            qr->startingFrom = 0;            qr->nReturned = 1;            resp.reset( new Message() );            resp->setData( msgdata, true );        }        op.debug().responseLength = resp->header()->dataLen();        dbresponse.response = resp.release();        dbresponse.responseTo = responseTo;        return ok;    }
开发者ID:nvdnkpr,项目名称:mongo,代码行数:88,


示例6: dbConfigVersion

    int ConfigServer::checkConfigVersion( bool upgrade ) {        int cur = dbConfigVersion();        if ( cur == VERSION )            return 0;        if ( cur == 0 ) {            ScopedDbConnection conn( _primary );            conn->insert( "config.version" , BSON( "_id" << 1 << "version" << VERSION ) );            pool.flush();            verify( VERSION == dbConfigVersion( conn.conn() ) );            conn.done();            return 0;        }        if ( cur == 2 ) {            // need to upgrade            verify( VERSION == 3 );            if ( ! upgrade ) {                log() << "newer version of mongo meta data/n"                      << "need to --upgrade after shutting all mongos down"                      << endl;                return -9;            }            ScopedDbConnection conn( _primary );            // do a backup            string backupName;            {                stringstream ss;                ss << "config-backup-" << terseCurrentTime(false);                backupName = ss.str();            }            log() << "backing up config to: " << backupName << endl;            conn->copyDatabase( "config" , backupName );            map<string,string> hostToShard;            set<string> shards;            // shards            {                unsigned n = 0;                auto_ptr<DBClientCursor> c = conn->query( ShardNS::shard , BSONObj() );                while ( c->more() ) {                    BSONObj o = c->next();                    string host = o["host"].String();                    string name = "";                    BSONElement id = o["_id"];                    if ( id.type() == String ) {                        name = id.String();                    }                    else {                        stringstream ss;                        ss << "shard" << hostToShard.size();                        name = ss.str();                    }                    hostToShard[host] = name;                    shards.insert( name );                    n++;                }                verify( n == hostToShard.size() );                verify( n == shards.size() );                conn->remove( ShardNS::shard , BSONObj() );                for ( map<string,string>::iterator i=hostToShard.begin(); i != hostToShard.end(); i++ ) {                    conn->insert( ShardNS::shard , BSON( "_id" << i->second << "host" << i->first ) );                }            }            // databases            {                auto_ptr<DBClientCursor> c = conn->query( ShardNS::database , BSONObj() );                map<string,BSONObj> newDBs;                unsigned n = 0;                while ( c->more() ) {                    BSONObj old = c->next();                    n++;                    if ( old["name"].eoo() ) {                        // already done                        newDBs[old["_id"].String()] = old;                        continue;                    }                    BSONObjBuilder b(old.objsize());                    b.appendAs( old["name"] , "_id" );                    BSONObjIterator i(old);                    while ( i.more() ) {                        BSONElement e = i.next();                        if ( strcmp( "_id" , e.fieldName() ) == 0 ||                                strcmp( "name" , e.fieldName() ) == 0 ) {                            continue;                        }//.........这里部分代码省略.........
开发者ID:matulef,项目名称:mongo,代码行数:101,


示例7: invariant

    PlanExecutor::ExecState PlanExecutor::getNext(BSONObj* objOut, DiskLoc* dlOut) {        if (_killed) { return PlanExecutor::DEAD; }        for (;;) {            WorkingSetID id = WorkingSet::INVALID_ID;            PlanStage::StageState code = _root->work(&id);            if (PlanStage::ADVANCED == code) {                // Fast count.                if (WorkingSet::INVALID_ID == id) {                    invariant(NULL == objOut);                    invariant(NULL == dlOut);                    return PlanExecutor::ADVANCED;                }                WorkingSetMember* member = _workingSet->get(id);                bool hasRequestedData = true;                if (NULL != objOut) {                    if (WorkingSetMember::LOC_AND_IDX == member->state) {                        if (1 != member->keyData.size()) {                            _workingSet->free(id);                            hasRequestedData = false;                        }                        else {                            *objOut = member->keyData[0].keyData;                        }                    }                    else if (member->hasObj()) {                        *objOut = member->obj;                    }                    else {                        _workingSet->free(id);                        hasRequestedData = false;                    }                }                if (NULL != dlOut) {                    if (member->hasLoc()) {                        *dlOut = member->loc;                    }                    else {                        _workingSet->free(id);                        hasRequestedData = false;                    }                }                if (hasRequestedData) {                    _workingSet->free(id);                    return PlanExecutor::ADVANCED;                }                // This result didn't have the data the caller wanted, try again.            }            else if (PlanStage::NEED_TIME == code) {                // Fall through to yield check at end of large conditional.            }            else if (PlanStage::IS_EOF == code) {                return PlanExecutor::IS_EOF;            }            else if (PlanStage::DEAD == code) {                return PlanExecutor::DEAD;            }            else {                verify(PlanStage::FAILURE == code);                if (NULL != objOut) {                    WorkingSetCommon::getStatusMemberObject(*_workingSet, id, objOut);                }                return PlanExecutor::EXEC_ERROR;            }        }    }
开发者ID:DieterLutz,项目名称:mongo,代码行数:71,


示例8: verify

 void DBDirectClient::killCursor(long long id) {     // The killCursor command on the DB client is only used by sharding,     // so no need to have it for MongoD.     verify(!"killCursor should not be used in MongoD"); }
开发者ID:DieterLutz,项目名称:mongo,代码行数:5,


示例9: change_one

static intchange_one(zfs_handle_t *zhp, void *data){	prop_changelist_t *clp = data;	char property[ZFS_MAXPROPLEN];	char where[64];	prop_changenode_t *cn;	zprop_source_t sourcetype = ZPROP_SRC_NONE;	zprop_source_t share_sourcetype = ZPROP_SRC_NONE;	/*	 * We only want to unmount/unshare those filesystems that may inherit	 * from the target filesystem.  If we find any filesystem with a	 * locally set mountpoint, we ignore any children since changing the	 * property will not affect them.  If this is a rename, we iterate	 * over all children regardless, since we need them unmounted in	 * order to do the rename.  Also, if this is a volume and we're doing	 * a rename, then always add it to the changelist.	 */	if (!(ZFS_IS_VOLUME(zhp) && clp->cl_realprop == ZFS_PROP_NAME) &&	    zfs_prop_get(zhp, clp->cl_prop, property,	    sizeof (property), &sourcetype, where, sizeof (where),	    B_FALSE) != 0) {		zfs_close(zhp);		return (0);	}	/*	 * If we are "watching" sharenfs or sharesmb	 * then check out the companion property which is tracked	 * in cl_shareprop	 */	if (clp->cl_shareprop != ZPROP_INVAL &&	    zfs_prop_get(zhp, clp->cl_shareprop, property,	    sizeof (property), &share_sourcetype, where, sizeof (where),	    B_FALSE) != 0) {		zfs_close(zhp);		return (0);	}	if (clp->cl_alldependents || clp->cl_allchildren ||	    sourcetype == ZPROP_SRC_DEFAULT ||	    sourcetype == ZPROP_SRC_INHERITED ||	    (clp->cl_shareprop != ZPROP_INVAL &&	    (share_sourcetype == ZPROP_SRC_DEFAULT ||	    share_sourcetype == ZPROP_SRC_INHERITED))) {		if ((cn = zfs_alloc(zfs_get_handle(zhp),		    sizeof (prop_changenode_t))) == NULL) {			zfs_close(zhp);			return (-1);		}		cn->cn_handle = zhp;		cn->cn_mounted = (clp->cl_gflags & CL_GATHER_MOUNT_ALWAYS) ||		    zfs_is_mounted(zhp, NULL);		cn->cn_shared = zfs_is_shared(zhp);		cn->cn_zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED);		cn->cn_needpost = B_TRUE;		/* Indicate if any child is exported to a local zone. */		if (getzoneid() == GLOBAL_ZONEID && cn->cn_zoned)			clp->cl_haszonedchild = B_TRUE;		uu_list_node_init(cn, &cn->cn_listnode, clp->cl_pool);		if (clp->cl_sorted) {			uu_list_index_t idx;			(void) uu_list_find(clp->cl_list, cn, NULL,			    &idx);			uu_list_insert(clp->cl_list, cn, idx);		} else {			/*			 * Add this child to beginning of the list. Children			 * below this one in the hierarchy will get added above			 * this one in the list. This produces a list in			 * reverse dataset name order.			 * This is necessary when the original mountpoint			 * is legacy or none.			 */			ASSERT(!clp->cl_alldependents);			verify(uu_list_insert_before(clp->cl_list,			    uu_list_first(clp->cl_list), cn) == 0);		}		if (!clp->cl_alldependents)			return (zfs_iter_children(zhp, change_one, data));	} else {		zfs_close(zhp);	}	return (0);}
开发者ID:ColinIanKing,项目名称:zfs,代码行数:94,


示例10: zpool_in_use

/* * Determines if the pool is in use.  If so, it returns true and the state of * the pool as well as the name of the pool.  Both strings are allocated and * must be freed by the caller. */intzpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,    boolean_t *inuse){	nvlist_t *config;	char *name;	boolean_t ret;	uint64_t guid, vdev_guid;	zpool_handle_t *zhp;	nvlist_t *pool_config;	uint64_t stateval, isspare;	aux_cbdata_t cb = { 0 };	boolean_t isactive;	*inuse = B_FALSE;	if (zpool_read_label(fd, &config) != 0) {		(void) no_memory(hdl);		return (-1);	}	if (config == NULL)		return (0);	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,	    &stateval) == 0);	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,	    &vdev_guid) == 0);	if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) {		verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,		    &name) == 0);		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,		    &guid) == 0);	}	switch (stateval) {	case POOL_STATE_EXPORTED:		/*		 * A pool with an exported state may in fact be imported		 * read-only, so check the in-core state to see if it's		 * active and imported read-only.  If it is, set		 * its state to active.		 */		if (pool_active(hdl, name, guid, &isactive) == 0 && isactive &&		    (zhp = zpool_open_canfail(hdl, name)) != NULL &&		    zpool_get_prop_int(zhp, ZPOOL_PROP_READONLY, NULL))			stateval = POOL_STATE_ACTIVE;		ret = B_TRUE;		break;	case POOL_STATE_ACTIVE:		/*		 * For an active pool, we have to determine if it's really part		 * of a currently active pool (in which case the pool will exist		 * and the guid will be the same), or whether it's part of an		 * active pool that was disconnected without being explicitly		 * exported.		 */		if (pool_active(hdl, name, guid, &isactive) != 0) {			nvlist_free(config);			return (-1);		}		if (isactive) {			/*			 * Because the device may have been removed while			 * offlined, we only report it as active if the vdev is			 * still present in the config.  Otherwise, pretend like			 * it's not in use.			 */			if ((zhp = zpool_open_canfail(hdl, name)) != NULL &&			    (pool_config = zpool_get_config(zhp, NULL))			    != NULL) {				nvlist_t *nvroot;				verify(nvlist_lookup_nvlist(pool_config,				    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);				ret = find_guid(nvroot, vdev_guid);			} else {				ret = B_FALSE;			}			/*			 * If this is an active spare within another pool, we			 * treat it like an unused hot spare.  This allows the			 * user to create a pool with a hot spare that currently			 * in use within another pool.  Since we return B_TRUE,			 * libdiskmgt will continue to prevent generic consumers			 * from using the device.			 */			if (ret && nvlist_lookup_uint64(config,			    ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)				stateval = POOL_STATE_SPARE;//.........这里部分代码省略.........
开发者ID:AB17,项目名称:zfs,代码行数:101,


示例11: get_configs

/* * Convert our list of pools into the definitive set of configurations.  We * start by picking the best config for each toplevel vdev.  Once that's done, * we assemble the toplevel vdevs into a full config for the pool.  We make a * pass to fix up any incorrect paths, and then add it to the main list to * return to the user. */static nvlist_t *get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok){	pool_entry_t *pe;	vdev_entry_t *ve;	config_entry_t *ce;	nvlist_t *ret = NULL, *config = NULL, *tmp = NULL, *nvtop, *nvroot;	nvlist_t **spares, **l2cache;	uint_t i, nspares, nl2cache;	boolean_t config_seen;	uint64_t best_txg;	char *name, *hostname = NULL;	uint64_t guid;	uint_t children = 0;	nvlist_t **child = NULL;	uint_t holes;	uint64_t *hole_array, max_id;	uint_t c;	boolean_t isactive;	uint64_t hostid;	nvlist_t *nvl;	boolean_t found_one = B_FALSE;	boolean_t valid_top_config = B_FALSE;	if (nvlist_alloc(&ret, 0, 0) != 0)		goto nomem;	for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {		uint64_t id, max_txg = 0;		if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0)			goto nomem;		config_seen = B_FALSE;		/*		 * Iterate over all toplevel vdevs.  Grab the pool configuration		 * from the first one we find, and then go through the rest and		 * add them as necessary to the 'vdevs' member of the config.		 */		for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {			/*			 * Determine the best configuration for this vdev by			 * selecting the config with the latest transaction			 * group.			 */			best_txg = 0;			for (ce = ve->ve_configs; ce != NULL;			    ce = ce->ce_next) {				if (ce->ce_txg > best_txg) {					tmp = ce->ce_config;					best_txg = ce->ce_txg;				}			}			/*			 * We rely on the fact that the max txg for the			 * pool will contain the most up-to-date information			 * about the valid top-levels in the vdev namespace.			 */			if (best_txg > max_txg) {				(void) nvlist_remove(config,				    ZPOOL_CONFIG_VDEV_CHILDREN,				    DATA_TYPE_UINT64);				(void) nvlist_remove(config,				    ZPOOL_CONFIG_HOLE_ARRAY,				    DATA_TYPE_UINT64_ARRAY);				max_txg = best_txg;				hole_array = NULL;				holes = 0;				max_id = 0;				valid_top_config = B_FALSE;				if (nvlist_lookup_uint64(tmp,				    ZPOOL_CONFIG_VDEV_CHILDREN, &max_id) == 0) {					verify(nvlist_add_uint64(config,					    ZPOOL_CONFIG_VDEV_CHILDREN,					    max_id) == 0);					valid_top_config = B_TRUE;				}				if (nvlist_lookup_uint64_array(tmp,				    ZPOOL_CONFIG_HOLE_ARRAY, &hole_array,				    &holes) == 0) {					verify(nvlist_add_uint64_array(config,					    ZPOOL_CONFIG_HOLE_ARRAY,					    hole_array, holes) == 0);				}			}			if (!config_seen) {//.........这里部分代码省略.........
开发者ID:AB17,项目名称:zfs,代码行数:101,


示例12: fix_paths

/* * Go through and fix up any path and/or devid information for the given vdev * configuration. */static intfix_paths(nvlist_t *nv, name_entry_t *names){	nvlist_t **child;	uint_t c, children;	uint64_t guid;	name_entry_t *ne, *best;	char *path, *devid;	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,	    &child, &children) == 0) {		for (c = 0; c < children; c++)			if (fix_paths(child[c], names) != 0)				return (-1);		return (0);	}	/*	 * This is a leaf (file or disk) vdev.  In either case, go through	 * the name list and see if we find a matching guid.  If so, replace	 * the path and see if we can calculate a new devid.	 *	 * There may be multiple names associated with a particular guid, in	 * which case we have overlapping partitions or multiple paths to the	 * same disk.  In this case we prefer to use the path name which	 * matches the ZPOOL_CONFIG_PATH.  If no matching entry is found we	 * use the lowest order device which corresponds to the first match	 * while traversing the ZPOOL_IMPORT_PATH search path.	 */	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0);	if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)		path = NULL;	best = NULL;	for (ne = names; ne != NULL; ne = ne->ne_next) {		if (ne->ne_guid == guid) {			if (path == NULL) {				best = ne;				break;			}			if ((strlen(path) == strlen(ne->ne_name)) &&			    !strncmp(path, ne->ne_name, strlen(path))) {				best = ne;				break;			}			if (best == NULL || ne->ne_order < best->ne_order)				best = ne;		}	}	if (best == NULL)		return (0);	if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0)		return (-1);	if ((devid = get_devid(best->ne_name)) == NULL) {		(void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID);	} else {		if (nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) != 0)			return (-1);		devid_str_free(devid);	}	return (0);}
开发者ID:AB17,项目名称:zfs,代码行数:73,


示例13: zpool_find_import_cached

/* * Given a cache file, return the contents as a list of importable pools. * poolname or guid (but not both) are provided by the caller when trying * to import a specific pool. */nvlist_t *zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile,    char *poolname, uint64_t guid){	char *buf;	int fd;	struct stat64 statbuf;	nvlist_t *raw, *src, *dst;	nvlist_t *pools;	nvpair_t *elem;	char *name;	uint64_t this_guid;	boolean_t active;	verify(poolname == NULL || guid == 0);	if ((fd = open(cachefile, O_RDONLY)) < 0) {		zfs_error_aux(hdl, "%s", strerror(errno));		(void) zfs_error(hdl, EZFS_BADCACHE,		    dgettext(TEXT_DOMAIN, "failed to open cache file"));		return (NULL);	}	if (fstat64(fd, &statbuf) != 0) {		zfs_error_aux(hdl, "%s", strerror(errno));		(void) close(fd);		(void) zfs_error(hdl, EZFS_BADCACHE,		    dgettext(TEXT_DOMAIN, "failed to get size of cache file"));		return (NULL);	}	if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) {		(void) close(fd);		return (NULL);	}	if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {		(void) close(fd);		free(buf);		(void) zfs_error(hdl, EZFS_BADCACHE,		    dgettext(TEXT_DOMAIN,		    "failed to read cache file contents"));		return (NULL);	}	(void) close(fd);	if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) {		free(buf);		(void) zfs_error(hdl, EZFS_BADCACHE,		    dgettext(TEXT_DOMAIN,		    "invalid or corrupt cache file contents"));		return (NULL);	}	free(buf);	/*	 * Go through and get the current state of the pools and refresh their	 * state.	 */	if (nvlist_alloc(&pools, 0, 0) != 0) {		(void) no_memory(hdl);		nvlist_free(raw);		return (NULL);	}	elem = NULL;	while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) {		verify(nvpair_value_nvlist(elem, &src) == 0);		verify(nvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME,		    &name) == 0);		if (poolname != NULL && strcmp(poolname, name) != 0)			continue;		verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID,		    &this_guid) == 0);		if (guid != 0) {			verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID,			    &this_guid) == 0);			if (guid != this_guid)				continue;		}		if (pool_active(hdl, name, this_guid, &active) != 0) {			nvlist_free(raw);			nvlist_free(pools);			return (NULL);		}		if (active)			continue;		if ((dst = refresh_config(hdl, src)) == NULL) {//.........这里部分代码省略.........
开发者ID:AB17,项目名称:zfs,代码行数:101,


示例14: zpool_find_import_impl

/* * Given a list of directories to search, find all pools stored on disk.  This * includes partial pools which are not available to import.  If no args are * given (argc is 0), then the default directory (/dev/dsk) is searched. * poolname or guid (but not both) are provided by the caller when trying * to import a specific pool. */static nvlist_t *zpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg){	int i, dirs = iarg->paths;	DIR *dirp = NULL;	struct dirent64 *dp;	char path[MAXPATHLEN];	char *end, **dir = iarg->path;	size_t pathleft;	struct stat64 statbuf;	nvlist_t *ret = NULL, *config;	int fd;	pool_list_t pools = { 0 };	pool_entry_t *pe, *penext;	vdev_entry_t *ve, *venext;	config_entry_t *ce, *cenext;	name_entry_t *ne, *nenext;	verify(iarg->poolname == NULL || iarg->guid == 0);	if (dirs == 0) {#ifdef HAVE_LIBBLKID		/* Use libblkid to scan all device for their type */		if (zpool_find_import_blkid(hdl, &pools) == 0)			goto skip_scanning;		(void) zfs_error_fmt(hdl, EZFS_BADCACHE,		    dgettext(TEXT_DOMAIN, "blkid failure falling back "		    "to manual probing"));#endif /* HAVE_LIBBLKID */		dir = zpool_default_import_path;		dirs = DEFAULT_IMPORT_PATH_SIZE;	}	/*	 * Go through and read the label configuration information from every	 * possible device, organizing the information according to pool GUID	 * and toplevel GUID.	 */	for (i = 0; i < dirs; i++) {		char *rdsk;		int dfd;		/* use realpath to normalize the path */		if (realpath(dir[i], path) == 0) {			/* it is safe to skip missing search paths */			if (errno == ENOENT)				continue;			zfs_error_aux(hdl, strerror(errno));			(void) zfs_error_fmt(hdl, EZFS_BADPATH,			    dgettext(TEXT_DOMAIN, "cannot open '%s'"), dir[i]);			goto error;		}		end = &path[strlen(path)];		*end++ = '/';		*end = 0;		pathleft = &path[sizeof (path)] - end;		/*		 * Using raw devices instead of block devices when we're		 * reading the labels skips a bunch of slow operations during		 * close(2) processing, so we replace /dev/dsk with /dev/rdsk.		 */		if (strcmp(path, "/dev/dsk/") == 0)			rdsk = "/dev/rdsk/";		else			rdsk = path;		if ((dfd = open64(rdsk, O_RDONLY)) < 0 ||		    (dirp = fdopendir(dfd)) == NULL) {			zfs_error_aux(hdl, strerror(errno));			(void) zfs_error_fmt(hdl, EZFS_BADPATH,			    dgettext(TEXT_DOMAIN, "cannot open '%s'"),			    rdsk);			goto error;		}		/*		 * This is not MT-safe, but we have no MT consumers of libzfs		 */		while ((dp = readdir64(dirp)) != NULL) {			const char *name = dp->d_name;			if (name[0] == '.' &&			    (name[1] == 0 || (name[1] == '.' && name[2] == 0)))				continue;			/*			 * Skip checking devices with well known prefixes:			 * watchdog - A special close is required to avoid			 *            triggering it and resetting the system.//.........这里部分代码省略.........
开发者ID:AB17,项目名称:zfs,代码行数:101,


示例15: soln

    // static    QuerySolution* QueryPlannerAnalysis::analyzeDataAccess(const CanonicalQuery& query,                                                           const QueryPlannerParams& params,                                                           QuerySolutionNode* solnRoot) {        auto_ptr<QuerySolution> soln(new QuerySolution());        soln->filterData = query.getQueryObj();        verify(soln->filterData.isOwned());        soln->indexFilterApplied = params.indexFiltersApplied;        solnRoot->computeProperties();        // solnRoot finds all our results.  Let's see what transformations we must perform to the        // data.        // If we're answering a query on a sharded system, we need to drop documents that aren't        // logically part of our shard.        if (params.options & QueryPlannerParams::INCLUDE_SHARD_FILTER) {            // TODO: We could use params.shardKey to do fetch analysis instead of always fetching.            if (!solnRoot->fetched()) {                FetchNode* fetch = new FetchNode();                fetch->children.push_back(solnRoot);                solnRoot = fetch;            }            ShardingFilterNode* sfn = new ShardingFilterNode();            sfn->children.push_back(solnRoot);            solnRoot = sfn;        }        bool hasSortStage = false;        solnRoot = analyzeSort(query, params, solnRoot, &hasSortStage);        // This can happen if we need to create a blocking sort stage and we're not allowed to.        if (NULL == solnRoot) { return NULL; }        // A solution can be blocking if it has a blocking sort stage or        // a hashed AND stage.        bool hasAndHashStage = hasNode(solnRoot, STAGE_AND_HASH);        soln->hasBlockingStage = hasSortStage || hasAndHashStage;        // If we can (and should), add the keep mutations stage.        // We cannot keep mutated documents if:        //        // 1. The query requires an index to evaluate the predicate ($text).  We can't tell whether        // or not the doc actually satisfies the $text predicate since we can't evaluate a        // text MatchExpression.        //        // 2. The query implies a sort ($geoNear).  It would be rather expensive and hacky to merge        // the document at the right place.        //        // 3. There is an index-provided sort.  Ditto above comment about merging.        //        // TODO: do we want some kind of pre-planning step where we look for certain nodes and cache        // them?  We do lookups in the tree a few times.  This may not matter as most trees are        // shallow in terms of query nodes.        bool cannotKeepFlagged = hasNode(solnRoot, STAGE_TEXT)                              || hasNode(solnRoot, STAGE_GEO_NEAR_2D)                              || hasNode(solnRoot, STAGE_GEO_NEAR_2DSPHERE)                              || (!query.getParsed().getSort().isEmpty() && !hasSortStage);        // Only these stages can produce flagged results.  A stage has to hold state past one call        // to work(...) in order to possibly flag a result.        bool couldProduceFlagged = hasAndHashStage                                || hasNode(solnRoot, STAGE_AND_SORTED)                                || hasNode(solnRoot, STAGE_FETCH);        bool shouldAddMutation = !cannotKeepFlagged && couldProduceFlagged;        if (shouldAddMutation && (params.options & QueryPlannerParams::KEEP_MUTATIONS)) {            KeepMutationsNode* keep = new KeepMutationsNode();            // We must run the entire expression tree to make sure the document is still valid.            keep->filter.reset(query.root()->shallowClone());            if (STAGE_SORT == solnRoot->getType()) {                // We want to insert the invalidated results before the sort stage, if there is one.                verify(1 == solnRoot->children.size());                keep->children.push_back(solnRoot->children[0]);                solnRoot->children[0] = keep;            }            else {                keep->children.push_back(solnRoot);                solnRoot = keep;            }        }        // Project the results.        if (NULL != query.getProj()) {            QLOG() << "PROJECTION: fetched status: " << solnRoot->fetched() << endl;            QLOG() << "PROJECTION: Current plan is:/n" << solnRoot->toString() << endl;            ProjectionNode::ProjectionType projType = ProjectionNode::DEFAULT;            BSONObj coveredKeyObj;            if (query.getProj()->requiresDocument()) {                QLOG() << "PROJECTION: claims to require doc adding fetch./n";                // If the projection requires the entire document, somebody must fetch.                if (!solnRoot->fetched()) {                    FetchNode* fetch = new FetchNode();                    fetch->children.push_back(solnRoot);//.........这里部分代码省略.........
开发者ID:AndrewCEmil,项目名称:mongo,代码行数:101,


示例16: changelist_gather

//.........这里部分代码省略.........	 */	if (prop == ZFS_PROP_NAME) {		clp->cl_prop = ZFS_PROP_MOUNTPOINT;		clp->cl_alldependents = B_TRUE;	} else if (prop == ZFS_PROP_ZONED) {		clp->cl_prop = ZFS_PROP_MOUNTPOINT;		clp->cl_allchildren = B_TRUE;	} else if (prop == ZFS_PROP_CANMOUNT) {		clp->cl_prop = ZFS_PROP_MOUNTPOINT;	} else if (prop == ZFS_PROP_VOLSIZE) {		clp->cl_prop = ZFS_PROP_MOUNTPOINT;	} else {		clp->cl_prop = prop;	}	clp->cl_realprop = prop;	if (clp->cl_prop != ZFS_PROP_MOUNTPOINT &&	    clp->cl_prop != ZFS_PROP_SHARENFS &&	    clp->cl_prop != ZFS_PROP_SHARESMB)		return (clp);	/*	 * If watching SHARENFS or SHARESMB then	 * also watch its companion property.	 */	if (clp->cl_prop == ZFS_PROP_SHARENFS)		clp->cl_shareprop = ZFS_PROP_SHARESMB;	else if (clp->cl_prop == ZFS_PROP_SHARESMB)		clp->cl_shareprop = ZFS_PROP_SHARENFS;	if (clp->cl_alldependents) {		if (zfs_iter_dependents(zhp, B_TRUE, change_one, clp) != 0) {			changelist_free(clp);			return (NULL);		}	} else if (zfs_iter_children(zhp, change_one, clp) != 0) {		changelist_free(clp);		return (NULL);	}	/*	 * We have to re-open ourselves because we auto-close all the handles	 * and can't tell the difference.	 */	if ((temp = zfs_open(zhp->zfs_hdl, zfs_get_name(zhp),	    ZFS_TYPE_DATASET)) == NULL) {		changelist_free(clp);		return (NULL);	}	/*	 * Always add ourself to the list.  We add ourselves to the end so that	 * we're the last to be unmounted.	 */	if ((cn = zfs_alloc(zhp->zfs_hdl,	    sizeof (prop_changenode_t))) == NULL) {		zfs_close(temp);		changelist_free(clp);		return (NULL);	}	cn->cn_handle = temp;	cn->cn_mounted = (clp->cl_gflags & CL_GATHER_MOUNT_ALWAYS) ||	    zfs_is_mounted(temp, NULL);	cn->cn_shared = zfs_is_shared(temp);	cn->cn_zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED);	cn->cn_needpost = B_TRUE;	uu_list_node_init(cn, &cn->cn_listnode, clp->cl_pool);	if (clp->cl_sorted) {		uu_list_index_t idx;		(void) uu_list_find(clp->cl_list, cn, NULL, &idx);		uu_list_insert(clp->cl_list, cn, idx);	} else {		/*		 * Add the target dataset to the end of the list.		 * The list is not really unsorted. The list will be		 * in reverse dataset name order. This is necessary		 * when the original mountpoint is legacy or none.		 */		verify(uu_list_insert_after(clp->cl_list,		    uu_list_last(clp->cl_list), cn) == 0);	}	/*	 * If the mountpoint property was previously 'legacy', or 'none',	 * record it as the behavior of changelist_postfix() will be different.	 */	if ((clp->cl_prop == ZFS_PROP_MOUNTPOINT) && legacy) {		/*		 * do not automatically mount ex-legacy datasets if		 * we specifically set canmount to noauto		 */		if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) !=		    ZFS_CANMOUNT_NOAUTO)			clp->cl_waslegacy = B_TRUE;	}	return (clp);}
开发者ID:ColinIanKing,项目名称:zfs,代码行数:101,


示例17: if

    PlanStage::StageState MergeSortStage::work(WorkingSetID* out) {        ++_commonStats.works;        if (isEOF()) { return PlanStage::IS_EOF; }        if (!_noResultToMerge.empty()) {            // We have some child that we don't have a result from.  Each child must have a result            // in order to pick the minimum result among all our children.  Work a child.            PlanStage* child = _noResultToMerge.front();            WorkingSetID id = WorkingSet::INVALID_ID;            StageState code = child->work(&id);            if (PlanStage::ADVANCED == code) {                // If we're deduping...                if (_dedup) {                    WorkingSetMember* member = _ws->get(id);                    if (!member->hasLoc()) {                        // Can't dedup data unless there's a DiskLoc.  We go ahead and use its                        // result.                        _noResultToMerge.pop();                    }                    else {                        ++_specificStats.dupsTested;                        // ...and there's a diskloc and and we've seen the DiskLoc before                        if (_seen.end() != _seen.find(member->loc)) {                            // ...drop it.                            _ws->free(id);                            ++_commonStats.needTime;                            ++_specificStats.dupsDropped;                            return PlanStage::NEED_TIME;                        }                        else {                            // Otherwise, note that we've seen it.                            _seen.insert(member->loc);                            // We're going to use the result from the child, so we remove it from                            // the queue of children without a result.                            _noResultToMerge.pop();                        }                    }                }                else {                    // Not deduping.  We use any result we get from the child.  Remove the child                    // from the queue of things without a result.                    _noResultToMerge.pop();                }                // Store the result in our list.                StageWithValue value;                value.id = id;                value.stage = child;                _mergingData.push_front(value);                // Insert the result (indirectly) into our priority queue.                _merging.push(_mergingData.begin());                ++_commonStats.needTime;                return PlanStage::NEED_TIME;            }            else if (PlanStage::IS_EOF == code) {                // There are no more results possible from this child.  Don't bother with it                // anymore.                _noResultToMerge.pop();                ++_commonStats.needTime;                return PlanStage::NEED_TIME;            }            else if (PlanStage::FAILURE == code) {                *out = id;                return code;            }            else {                if (PlanStage::NEED_FETCH == code) {                    *out = id;                    ++_commonStats.needFetch;                }                else if (PlanStage::NEED_TIME == code) {                    ++_commonStats.needTime;                }                return code;            }        }        // If we're here, for each non-EOF child, we have a valid WSID.        verify(!_merging.empty());        // Get the 'min' WSID.  _merging is a priority queue so its top is the smallest.        MergingRef top = _merging.top();        _merging.pop();        // Since we're returning the WSID that came from top->stage, we need to work(...) it again        // to get a new result.        _noResultToMerge.push(top->stage);        // Save the ID that we're returning and remove the returned result from our data.        WorkingSetID idToTest = top->id;        _mergingData.erase(top);        // Return the min.        *out = idToTest;        ++_commonStats.advanced;//.........这里部分代码省略.........
开发者ID:EddieWu,项目名称:mongo,代码行数:101,


示例18: main

int main(int argd, char*  args[]){    init();    connected_comp();    verify();    return 0;}
开发者ID:cudaida,项目名称:mmix,代码行数:6,


示例19: pPipeline

    intrusive_ptr<Pipeline> Pipeline::parseCommand(        string &errmsg, BSONObj &cmdObj,        const intrusive_ptr<ExpressionContext> &pCtx) {        intrusive_ptr<Pipeline> pPipeline(new Pipeline(pCtx));        vector<BSONElement> pipeline;        /* gather the specification for the aggregation */        for(BSONObj::iterator cmdIterator = cmdObj.begin();                cmdIterator.more(); ) {            BSONElement cmdElement(cmdIterator.next());            const char *pFieldName = cmdElement.fieldName();            // ignore top-level fields prefixed with $. They are for the command processor, not us.            if (pFieldName[0] == '$') {                continue;            }            // ignore cursor options since they are handled externally.            if (str::equals(pFieldName, "cursor")) {                continue;            }            /* look for the aggregation command */            if (!strcmp(pFieldName, commandName)) {                pPipeline->collectionName = cmdElement.String();                continue;            }            /* check for the collection name */            if (!strcmp(pFieldName, pipelineName)) {                pipeline = cmdElement.Array();                continue;            }            /* check for explain option */            if (!strcmp(pFieldName, explainName)) {                pPipeline->explain = cmdElement.Bool();                continue;            }            /* if the request came from the router, we're in a shard */            if (!strcmp(pFieldName, fromRouterName)) {                pCtx->setInShard(cmdElement.Bool());                continue;            }            /* check for debug options */            if (!strcmp(pFieldName, splitMongodPipelineName)) {                pPipeline->splitMongodPipeline = true;                continue;            }            /* we didn't recognize a field in the command */            ostringstream sb;            sb <<               "unrecognized field /"" <<               cmdElement.fieldName();            errmsg = sb.str();            return intrusive_ptr<Pipeline>();        }        /*          If we get here, we've harvested the fields we expect for a pipeline.          Set up the specified document source pipeline.        */        SourceContainer& sources = pPipeline->sources; // shorthand        /* iterate over the steps in the pipeline */        const size_t nSteps = pipeline.size();        for(size_t iStep = 0; iStep < nSteps; ++iStep) {            /* pull out the pipeline element as an object */            BSONElement pipeElement(pipeline[iStep]);            uassert(15942, str::stream() << "pipeline element " <<                    iStep << " is not an object",                    pipeElement.type() == Object);            BSONObj bsonObj(pipeElement.Obj());            // Parse a pipeline stage from 'bsonObj'.            uassert(16435, "A pipeline stage specification object must contain exactly one field.",                    bsonObj.nFields() == 1);            BSONElement stageSpec = bsonObj.firstElement();            const char* stageName = stageSpec.fieldName();            // Create a DocumentSource pipeline stage from 'stageSpec'.            StageDesc key;            key.pName = stageName;            const StageDesc* pDesc = (const StageDesc*)                    bsearch(&key, stageDesc, nStageDesc, sizeof(StageDesc),                            stageDescCmp);            uassert(16436,                    str::stream() << "Unrecognized pipeline stage name: '" << stageName << "'",                    pDesc);            intrusive_ptr<DocumentSource> stage = (*pDesc->pFactory)(&stageSpec, pCtx);            verify(stage);            stage->setPipelineStep(iStep);            sources.push_back(stage);        }//.........这里部分代码省略.........
开发者ID:lucciano,项目名称:mongo-1,代码行数:101,


示例20: run_tests

void run_tests(void) {  int i;  int64_t arr[SIZE];  int64_t dst[SIZE];  double start_time;  double end_time;  double total_time;  printf("Running tests/n");  srand48(SEED);  total_time = 0.0;  for (i = 0; i < RUNS; i++) {    fill(arr, SIZE);    memcpy(dst, arr, sizeof(int64_t) * SIZE);    start_time = utime();    qsort(dst, SIZE, sizeof(int64_t), simple_cmp);    end_time = utime();    total_time += end_time - start_time;    verify(dst, SIZE);  }  printf("stdlib qsort time:          %10.2f us per iteration/n", total_time / RUNS);#ifndef __linux__  srand48(SEED);  total_time = 0.0;  for (i = 0; i < RUNS; i++) {    fill(arr, SIZE);    memcpy(dst, arr, sizeof(int64_t) * SIZE);    start_time = utime();    heapsort(dst, SIZE, sizeof(int64_t), simple_cmp);    end_time = utime();    total_time += end_time - start_time;    verify(dst, SIZE);  }  printf("stdlib heapsort time:       %10.2f us per iteration/n", total_time / RUNS);  srand48(SEED);  total_time = 0.0;  for (i = 0; i < RUNS; i++) {    fill(arr, SIZE);    memcpy(dst, arr, sizeof(int64_t) * SIZE);    start_time = utime();    mergesort(dst, SIZE, sizeof(int64_t), simple_cmp);    end_time = utime();    total_time += end_time - start_time;    verify(dst, SIZE);  }  printf("stdlib mergesort time:      %10.2f us per iteration/n", total_time / RUNS);#endif  srand48(SEED);  total_time = 0.0;  for (i = 0; i < RUNS; i++) {    fill(arr, SIZE);    memcpy(dst, arr, sizeof(int64_t) * SIZE);    start_time = utime();    sorter_quick_sort(dst, SIZE);    end_time = utime();    total_time += end_time - start_time;    verify(dst, SIZE);  }  printf("quick sort time:            %10.2f us per iteration/n", total_time / RUNS);  srand48(SEED);  total_time = 0.0;  for (i = 0; i < RUNS; i++) {    fill(arr, SIZE);    memcpy(dst, arr, sizeof(int64_t) * SIZE);    start_time = utime();    sorter_selection_sort(dst, SIZE);    end_time = utime();    total_time += end_time - start_time;    verify(dst, SIZE);  }  printf("selection sort time:        %10.2f us per iteration/n", total_time / RUNS);  srand48(SEED);  total_time = 0.0;  for (i = 0; i < RUNS; i++) {    fill(arr, SIZE);    memcpy(dst, arr, sizeof(int64_t) * SIZE);    start_time = utime();    sorter_merge_sort(dst, SIZE);    end_time = utime();    total_time += end_time - start_time;    verify(dst, SIZE);  }  printf("merge sort time:            %10.2f us per iteration/n", total_time / RUNS);  srand48(SEED);  total_time = 0.0;  for (i = 0; i < RUNS; i++) {    fill(arr, SIZE);    memcpy(dst, arr, sizeof(int64_t) * SIZE);//.........这里部分代码省略.........
开发者ID:627656505,项目名称:sort,代码行数:101,


示例21: utimecmp

intutimecmp (char const *dst_name,          struct stat const *dst_stat,          struct stat const *src_stat,          int options){    /* Things to watch out for:       The code uses a static hash table internally and is not safe in the       presence of signals, multiple threads, etc.       int and long int might be 32 bits.  Many of the calculations store       numbers up to 2 billion, and multiply by 10; they have to avoid       multiplying 2 billion by 10, as this exceeds 32-bit capabilities.       time_t might be unsigned.  */    verify (TYPE_IS_INTEGER (time_t));    verify (TYPE_TWOS_COMPLEMENT (int));    /* Destination and source time stamps.  */    time_t dst_s = dst_stat->st_mtime;    time_t src_s = src_stat->st_mtime;    int dst_ns = get_stat_mtime_ns (dst_stat);    int src_ns = get_stat_mtime_ns (src_stat);    if (options & UTIMECMP_TRUNCATE_SOURCE)    {        /* Look up the time stamp resolution for the destination device.  */        /* Hash table for devices.  */        static Hash_table *ht;        /* Information about the destination file system.  */        static struct fs_res *new_dst_res;        struct fs_res *dst_res;        /* Time stamp resolution in nanoseconds.  */        int res;        /* Quick exit, if possible.  Since the worst resolution is 2           seconds, anything that differs by more than that does not           needs source truncation.  */        if (dst_s == src_s && dst_ns == src_ns)            return 0;        if (dst_s <= src_s - 2)            return -1;        if (src_s <= dst_s - 2)            return 1;        if (! ht)            ht = hash_initialize (16, NULL, dev_info_hash, dev_info_compare, free);        if (! new_dst_res)        {            new_dst_res = xmalloc (sizeof *new_dst_res);            new_dst_res->resolution = 2 * BILLION;            new_dst_res->exact = false;        }        new_dst_res->dev = dst_stat->st_dev;        dst_res = hash_insert (ht, new_dst_res);        if (! dst_res)            xalloc_die ();        if (dst_res == new_dst_res)        {            /* NEW_DST_RES is now in use in the hash table, so allocate a               new entry next time.  */            new_dst_res = NULL;        }        res = dst_res->resolution;#ifdef _PC_TIMESTAMP_RESOLUTION        /* If the system will tell us the resolution, we're set!  */        if (! dst_res->exact)        {            res = pathconf (dst_name, _PC_TIMESTAMP_RESOLUTION);            if (0 < res)            {                dst_res->resolution = res;                dst_res->exact = true;            }        }#endif        if (! dst_res->exact)        {            /* This file system's resolution is not known exactly.               Deduce it, and store the result in the hash table.  */            time_t dst_a_s = dst_stat->st_atime;            time_t dst_c_s = dst_stat->st_ctime;            time_t dst_m_s = dst_s;            int dst_a_ns = get_stat_atime_ns (dst_stat);            int dst_c_ns = get_stat_ctime_ns (dst_stat);            int dst_m_ns = dst_ns;            /* Set RES to an upper bound on the file system resolution               (after truncation due to SYSCALL_RESOLUTION) by inspecting               the atime, ctime and mtime of the existing destination.//.........这里部分代码省略.........
开发者ID:ystk,项目名称:debian-gnulib,代码行数:101,


示例22: ngen_CC_Param

void ngen_CC_Param(shil_opcode* op,shil_param* par,CanonicalParamType tp){    switch(tp)    {    //push the contents    case CPT_u32:    case CPT_f32:        if (par->is_reg())        {            if (reg.IsAllocg(*par))                x86e->Emit(op_push32,reg.mapg(*par));            else if (reg.IsAllocf(*par))            {                x86e->Emit(op_sub32,ESP,4);                x86e->Emit(op_movss,x86_mrm(ESP), reg.mapf(*par));            }            else            {                die("Must not happen !/n");                x86e->Emit(op_push32,x86_ptr(par->reg_ptr()));            }        }        else if (par->is_imm())            x86e->Emit(op_push,par->_imm);        else            die("invalid combination");        ngen_CC_BytesPushed+=4;        break;    //push the ptr itself    case CPT_ptr:        verify(par->is_reg());        die("FAIL");        x86e->Emit(op_push,(unat)par->reg_ptr());        for (u32 ri=0; ri<(*par).count(); ri++)        {            if (reg.IsAllocf(*par,ri))            {                x86e->Emit(op_sub32,ESP,4);                x86e->Emit(op_movss,x86_mrm(ESP),reg.mapfv(*par,ri));            }            else            {                verify(!reg.IsAllocAny((Sh4RegType)(par->_reg+ri)));            }        }        ngen_CC_BytesPushed+=4;        break;    //store from EAX    case CPT_u64rvL:    case CPT_u32rv:        if (reg.IsAllocg(*par))            x86e->Emit(op_mov32,reg.mapg(*par),EAX);        /*else if (reg.IsAllocf(*par))        	x86e->Emit(op_movd_xmm_from_r32,reg.mapf(*par),EAX);*/        else            die("Must not happen!/n");        break;    case CPT_u64rvH:        if (reg.IsAllocg(*par))            x86e->Emit(op_mov32,reg.mapg(*par),EDX);        else            die("Must not happen!/n");        break;    //Store from ST(0)    case CPT_f32rv:        verify(reg.IsAllocf(*par));        x86e->Emit(op_fstp32f,x86_ptr(par->reg_ptr()));        x86e->Emit(op_movss,reg.mapf(*par),x86_ptr(par->reg_ptr()));        break;    }}
开发者ID:joolswills,项目名称:reicast-emulator,代码行数:79,


示例23: verify

intrusive_ptr<DocumentSource> DocumentSourceSort::getShardSource() {    verify(!_mergingPresorted);    return this;}
开发者ID:CeperaCPP,项目名称:mongo,代码行数:4,


示例24: create

//.........这里部分代码省略.........        // Populate node->element connectivity:        std::vector<size_t> node_elem_work( node_count_total , (size_t) 0 );        for ( size_t i = 0 ; i < elem_count_total ; ++i ) {            for ( size_t n = 0 ; n < element_node_count  ; ++n ) {                ++node_elem_work[ elem_node_ids(i,n) ];            }        }        mesh.node_elem_ids =            Kokkos::create_staticcrsgraph< node_elem_ids_type >( "node_elem_ids" , node_elem_work );        typename node_elem_ids_type::HostMirror        node_elem_ids = Kokkos::create_mirror( mesh.node_elem_ids );        for ( size_t i = 0 ; i < node_count_total ; ++i ) {            node_elem_work[i] = node_elem_ids.row_map[i];        }        // Looping in element order insures the list of elements        // is sorted by element index.        for ( size_t i = 0 ; i < elem_count_total ; ++i ) {            for ( size_t n = 0 ; n < element_node_count ; ++n ) {                const unsigned nid = elem_node_ids(i, n);                const unsigned j = node_elem_work[nid] ;                ++node_elem_work[nid] ;                node_elem_ids.entries( j , 0 ) = i ;                node_elem_ids.entries( j , 1 ) = n ;            }        }        //------------------------------------        // Verify setup with node coordinates matching grid indices.        verify( node_coords , elem_node_ids , node_elem_ids );        //------------------------------------        // Scale node coordinates to problem extent with        // nonlinear mapping.        {            const double problem_extent[3] =            {   static_cast<double>( vertex_box_global[0][1] - 1 ) ,                static_cast<double>( vertex_box_global[1][1] - 1 ) ,                static_cast<double>( vertex_box_global[2][1] - 1 )            };            const double grid_extent[3] =            {   static_cast<double>( node_box_global[0][1] - 1 ) ,                static_cast<double>( node_box_global[1][1] - 1 ) ,                static_cast<double>( node_box_global[2][1] - 1 )            };            for ( size_t i = 0 ; i < node_count_total ; ++i ) {                const double x_unit = node_coords(i,0) / grid_extent[0] ;                const double y_unit = node_coords(i,1) / grid_extent[1] ;                const double z_unit = node_coords(i,2) / grid_extent[2] ;                node_coords(i,0) = coordinate_scalar_type( problem_extent[0] * std::pow( x_unit , x_coord_curve ) );                node_coords(i,1) = coordinate_scalar_type( problem_extent[1] * std::pow( y_unit , y_coord_curve ) );                node_coords(i,2) = coordinate_scalar_type( problem_extent[2] * std::pow( z_unit , z_coord_curve ) );            }        }        Kokkos::deep_copy( mesh.node_coords ,   node_coords );        Kokkos::deep_copy( mesh.elem_node_ids , elem_node_ids );        Kokkos::deep_copy( mesh.node_elem_ids.entries , node_elem_ids.entries );        //------------------------------------        // Communication lists:        {            recv_msg_count = 0 ;            send_msg_count = 0 ;            send_count = 0 ;            for ( size_t i = 1 ; i < proc_count ; ++i ) {                // Order sending starting with the local processor rank                // to try to smooth out the amount of messages simultaneously                // send to a particular processor.                const int proc = ( proc_local + i ) % proc_count ;                if ( node_part_counts[i] ) {                    mesh.parallel_data_map.host_recv(recv_msg_count,0) = proc ;                    mesh.parallel_data_map.host_recv(recv_msg_count,1) = node_part_counts[i] ;                    ++recv_msg_count ;                }                if ( node_send_map[i].size() ) {                    mesh.parallel_data_map.host_send(send_msg_count,0) = proc ;                    mesh.parallel_data_map.host_send(send_msg_count,1) = node_send_map[i].size() ;                    for ( size_t j = 0 ; j < node_send_map[i].size() ; ++j , ++send_count ) {                        mesh.parallel_data_map.host_send_item(send_count) = node_send_map[i][j] - node_count_interior ;                    }                    ++send_msg_count ;                }            }        }        return mesh ;    }
开发者ID:athomps,项目名称:lammps,代码行数:101,


示例25: main

int main(int argc, char *argv[]){  int i, g=0, s=0, v=0;  char opt;    printf ("/n/n  [ RSA key generation/signing/verifcation/n/n");    for (i=1; i<argc; i++)  {    if (argv[i][0]=='-' || argv[i][0]=='/')    {      opt=argv[i][1];      switch (opt)      {        case 'g': // generate RSA key pair          g=1;          break;        case 'm': // sign a message using RSA (just for testing)          input=getparam (argc, argv, &i);          s=1;          break;        case 'k': // key length (max is 1024-bits)          keylen=atoi(getparam(argc, argv, &i));          break;        case 'v': // verify RSA signature (just for testing)          signature=getparam (argc, argv, &i);          v=1;          break;        default:          usage();          break;      }    }  }  // generate keys?  if (g)  {    printf ("  [ generating RSA key pair of %i-bits/n", keylen);    genkeys();  } else   // generate signature of message using RSA private key?  if (s==1 && v==0) {    // have input?    if (input==NULL)    {      printf ("  [ signing requires a message, use -m option/n");      return 0;    }    printf ("  [ signing message using RSA/n");    sign ();  } else   // verify signature using RSA public key?  if (v) {    // have input + signature?    if (input==NULL || signature==NULL)    {      printf ("  [ verification requires message and signature/n");      return 0;    }    printf ("  [ verifying message and signature using RSA/n");    verify ();  } else {    usage();  }  return 0;}
开发者ID:odzhan,项目名称:shells,代码行数:66,


示例26: operator

        void operator()( DBClientCursorBatchIterator &i ) {            Lock::GlobalWrite lk;            DurTransaction txn;            context.relocked();            bool createdCollection = false;            Collection* collection = NULL;            while( i.moreInCurrentBatch() ) {                if ( numSeen % 128 == 127 /*yield some*/ ) {                    collection = NULL;                    time_t now = time(0);                    if( now - lastLog >= 60 ) {                        // report progress                        if( lastLog )                            log() << "clone " << to_collection << ' ' << numSeen << endl;                        lastLog = now;                    }                    mayInterrupt( _mayBeInterrupted );                    dbtempreleaseif t( _mayYield );                }                if ( isindex == false && collection == NULL ) {                    collection = context.db()->getCollection( to_collection );                    if ( !collection ) {                        massert( 17321,                                 str::stream()                                 << "collection dropped during clone ["                                 << to_collection << "]",                                 !createdCollection );                        createdCollection = true;                        collection = context.db()->createCollection( &txn, to_collection );                        verify( collection );                    }                }                BSONObj tmp = i.nextSafe();                /* assure object is valid.  note this will slow us down a little. */                const Status status = validateBSON(tmp.objdata(), tmp.objsize());                if (!status.isOK()) {                    out() << "Cloner: skipping corrupt object from " << from_collection                          << ": " << status.reason();                    continue;                }                ++numSeen;                BSONObj js = tmp;                if ( isindex ) {                    verify(nsToCollectionSubstring(from_collection) == "system.indexes");                    js = fixindex(context.db()->name(), tmp);                    indexesToBuild->push_back( js.getOwned() );                    continue;                }                verify(nsToCollectionSubstring(from_collection) != "system.indexes");                StatusWith<DiskLoc> loc = collection->insertDocument( &txn, js, true );                if ( !loc.isOK() ) {                    error() << "error: exception cloning object in " << from_collection                            << ' ' << loc.toString() << " obj:" << js;                }                uassertStatusOK( loc.getStatus() );                if ( logForRepl )                    logOp("i", to_collection, js);                getDur().commitIfNeeded();                RARELY if ( time( 0 ) - saveLast > 60 ) {                    log() << numSeen << " objects cloned so far from collection " << from_collection;                    saveLast = time( 0 );                }            }        }
开发者ID:pooyadavoodi,项目名称:mongo,代码行数:75,


示例27: verify

 Client::Context::~Context() {     DEV verify( _client == currentClient.get() );     _client->_curOp->recordGlobalTime( _timer.micros() );     _client->_curOp->leave( this );     _client->_context = _oldContext; // note: _oldContext may be null }
开发者ID:ChrisKozak,项目名称:mongo,代码行数:6,


示例28: zfs_sort

/* * Sort datasets by specified columns. * * o  Numeric types sort in ascending order. * o  String types sort in alphabetical order. * o  Types inappropriate for a row sort that row to the literal *    bottom, regardless of the specified ordering. * * If no sort columns are specified, or two datasets compare equally * across all specified columns, they are sorted alphabetically by name * with snapshots grouped under their parents. */static intzfs_sort(const void *larg, const void *rarg, void *data){	zfs_handle_t *l = ((zfs_node_t *)larg)->zn_handle;	zfs_handle_t *r = ((zfs_node_t *)rarg)->zn_handle;	zfs_sort_column_t *sc = (zfs_sort_column_t *)data;	zfs_sort_column_t *psc;	for (psc = sc; psc != NULL; psc = psc->sc_next) {		char lbuf[ZFS_MAXPROPLEN], rbuf[ZFS_MAXPROPLEN];		char *lstr, *rstr;		uint64_t lnum, rnum;		boolean_t lvalid, rvalid;		int ret = 0;		/*		 * We group the checks below the generic code.  If 'lstr' and		 * 'rstr' are non-NULL, then we do a string based comparison.		 * Otherwise, we compare 'lnum' and 'rnum'.		 */		lstr = rstr = NULL;		if (psc->sc_prop == ZPROP_INVAL) {			nvlist_t *luser, *ruser;			nvlist_t *lval, *rval;			luser = zfs_get_user_props(l);			ruser = zfs_get_user_props(r);			lvalid = (nvlist_lookup_nvlist(luser,			    psc->sc_user_prop, &lval) == 0);			rvalid = (nvlist_lookup_nvlist(ruser,			    psc->sc_user_prop, &rval) == 0);			if (lvalid)				verify(nvlist_lookup_string(lval,				    ZPROP_VALUE, &lstr) == 0);			if (rvalid)				verify(nvlist_lookup_string(rval,				    ZPROP_VALUE, &rstr) == 0);		} else if (zfs_prop_is_string(psc->sc_prop)) {			lvalid = (zfs_prop_get(l, psc->sc_prop, lbuf,			    sizeof (lbuf), NULL, NULL, 0, B_TRUE) == 0);			rvalid = (zfs_prop_get(r, psc->sc_prop, rbuf,			    sizeof (rbuf), NULL, NULL, 0, B_TRUE) == 0);			lstr = lbuf;			rstr = rbuf;		} else {			lvalid = zfs_prop_valid_for_type(psc->sc_prop,			    zfs_get_type(l));			rvalid = zfs_prop_valid_for_type(psc->sc_prop,			    zfs_get_type(r));			if (lvalid)				(void) zfs_prop_get_numeric(l, psc->sc_prop,				    &lnum, NULL, NULL, 0);			if (rvalid)				(void) zfs_prop_get_numeric(r, psc->sc_prop,				    &rnum, NULL, NULL, 0);		}		if (!lvalid && !rvalid)			continue;		else if (!lvalid)			return (1);		else if (!rvalid)			return (-1);		if (lstr)			ret = strcmp(lstr, rstr);		else if (lnum < rnum)			ret = -1;		else if (lnum > rnum)			ret = 1;		if (ret != 0) {			if (psc->sc_reverse == B_TRUE)				ret = (ret < 0) ? 1 : -1;			return (ret);		}	}	return (zfs_compare(larg, rarg, NULL));}
开发者ID:FirmOS,项目名称:fre,代码行数:97,



注:本文中的verify函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


C++ verifyFormat函数代码示例
C++ verifier函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。