您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ vacuum_delay_point函数代码示例

51自学网 2021-06-03 09:32:26
  C++
这篇教程C++ vacuum_delay_point函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中vacuum_delay_point函数的典型用法代码示例。如果您正苦于以下问题:C++ vacuum_delay_point函数的具体用法?C++ vacuum_delay_point怎么用?C++ vacuum_delay_point使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了vacuum_delay_point函数的30个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: ginVacuumPostingTree

static voidginVacuumPostingTree(GinVacuumState *gvs, BlockNumber rootBlkno){	Buffer		rootBuffer = InvalidBuffer;	DataPageDeleteStack root,			   *ptr,			   *tmp;	if (ginVacuumPostingTreeLeaves(gvs, rootBlkno, TRUE, &rootBuffer) == FALSE)	{		Assert(rootBuffer == InvalidBuffer);		return;	}	memset(&root, 0, sizeof(DataPageDeleteStack));	root.leftBlkno = InvalidBlockNumber;	root.isRoot = TRUE;	vacuum_delay_point();	ginScanToDelete(gvs, rootBlkno, TRUE, &root, InvalidOffsetNumber);	ptr = root.child;	while (ptr)	{		tmp = ptr->child;		pfree(ptr);		ptr = tmp;	}	UnlockReleaseBuffer(rootBuffer);}
开发者ID:Epictetus,项目名称:postgres,代码行数:32,


示例2: blvacuumcleanup

/* * Post-VACUUM cleanup. * * Result: a palloc'd struct containing statistical info for VACUUM displays. */IndexBulkDeleteResult *blvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats){	Relation	index = info->index;	BlockNumber npages,				blkno;	BlockNumber totFreePages;	if (info->analyze_only)		return stats;	if (stats == NULL)		stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));	/*	 * Iterate over the pages: insert deleted pages into FSM and collect	 * statistics.	 */	npages = RelationGetNumberOfBlocks(index);	totFreePages = 0;	for (blkno = BLOOM_HEAD_BLKNO; blkno < npages; blkno++)	{		Buffer		buffer;		Page		page;		vacuum_delay_point();		buffer = ReadBufferExtended(index, MAIN_FORKNUM, blkno,									RBM_NORMAL, info->strategy);		LockBuffer(buffer, BUFFER_LOCK_SHARE);		page = BufferGetPage(buffer, NULL, NULL, BGP_NO_SNAPSHOT_TEST);		if (BloomPageIsDeleted(page))		{			RecordFreeIndexPage(index, blkno);			totFreePages++;		}		else		{			stats->num_index_tuples += BloomPageGetMaxOffset(page);			stats->estimated_count += BloomPageGetMaxOffset(page);		}		UnlockReleaseBuffer(buffer);	}	IndexFreeSpaceMapVacuum(info->index);	stats->pages_free = totFreePages;	stats->num_pages = RelationGetNumberOfBlocks(index);	return stats;}
开发者ID:Hu1-Li,项目名称:postgres,代码行数:57,


示例3: lazy_vacuum_heap

/* *	lazy_vacuum_heap() -- second pass over the heap * *		This routine marks dead tuples as unused and compacts out free *		space on their pages.  Pages not having dead tuples recorded from *		lazy_scan_heap are not visited at all. * * Note: the reason for doing this as a second pass is we cannot remove * the tuples until we've removed their index entries, and we want to * process index entry removal in batches as large as possible. */static voidlazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats){	MIRROREDLOCK_BUFMGR_DECLARE;	int			tupindex;	int			npages;	PGRUsage	ru0;	pg_rusage_init(&ru0);	npages = 0;	tupindex = 0;	/* Fetch gp_persistent_relation_node information that will be added to XLOG record. */	RelationFetchGpRelationNodeForXLog(onerel);	while (tupindex < vacrelstats->num_dead_tuples)	{		BlockNumber tblk;		Buffer		buf;		Page		page;		vacuum_delay_point();		tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]);		/* -------- MirroredLock ---------- */		MIRROREDLOCK_BUFMGR_LOCK;		buf = ReadBufferWithStrategy(onerel, tblk, vac_strategy);		LockBufferForCleanup(buf);		tupindex = lazy_vacuum_page(onerel, tblk, buf, tupindex, vacrelstats);		/* Now that we've compacted the page, record its available space */		page = BufferGetPage(buf);		lazy_record_free_space(vacrelstats, tblk,							   PageGetHeapFreeSpace(page));		UnlockReleaseBuffer(buf);		MIRROREDLOCK_BUFMGR_UNLOCK;		/* -------- MirroredLock ---------- */		npages++;	}	ereport(elevel,			(errmsg("/"%s/": removed %d row versions in %d pages",					RelationGetRelationName(onerel),					tupindex, npages),			 errdetail("%s.",					   pg_rusage_show(&ru0))));}
开发者ID:phan-pivotal,项目名称:gpdb,代码行数:63,


示例4: lazy_vacuum_heap

/* *	lazy_vacuum_heap() -- second pass over the heap * *		This routine marks dead tuples as unused and compacts out free *		space on their pages.  Pages not having dead tuples recorded from *		lazy_scan_heap are not visited at all. * * Note: the reason for doing this as a second pass is we cannot remove * the tuples until we've removed their index entries, and we want to * process index entry removal in batches as large as possible. */static voidlazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats){	int			tupindex;	int			npages;	PGRUsage	ru0;	pg_rusage_init(&ru0);	npages = 0;	tupindex = 0;	while (tupindex < vacrelstats->num_dead_tuples)	{		BlockNumber tblk;		Buffer		buf;		Page		page;		Size		freespace;		vacuum_delay_point();		tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]);		buf = ReadBufferExtended(onerel, MAIN_FORKNUM, tblk, RBM_NORMAL,								 vac_strategy);		if (!ConditionalLockBufferForCleanup(buf))		{			ReleaseBuffer(buf);			++tupindex;			continue;		}		tupindex = lazy_vacuum_page(onerel, tblk, buf, tupindex, vacrelstats);		/* Now that we've compacted the page, record its available space */		page = BufferGetPage(buf);		freespace = PageGetHeapFreeSpace(page);		UnlockReleaseBuffer(buf);		RecordPageWithFreeSpace(onerel, tblk, freespace);		npages++;	}	ereport(elevel,			(errmsg("/"%s/": removed %d row versions in %d pages",					RelationGetRelationName(onerel),					tupindex, npages),			 errdetail("%s.",					   pg_rusage_show(&ru0))));}
开发者ID:dankrusi,项目名称:postgres,代码行数:58,


示例5: hashbucketcleanup

/* * Helper function to perform deletion of index entries from a bucket. * * This function expects that the caller has acquired a cleanup lock on the * primary bucket page, and will return with a write lock again held on the * primary bucket page.  The lock won't necessarily be held continuously, * though, because we'll release it when visiting overflow pages. * * It would be very bad if this function cleaned a page while some other * backend was in the midst of scanning it, because hashgettuple assumes * that the next valid TID will be greater than or equal to the current * valid TID.  There can't be any concurrent scans in progress when we first * enter this function because of the cleanup lock we hold on the primary * bucket page, but as soon as we release that lock, there might be.  We * handle that by conspiring to prevent those scans from passing our cleanup * scan.  To do that, we lock the next page in the bucket chain before * releasing the lock on the previous page.  (This type of lock chaining is * not ideal, so we might want to look for a better solution at some point.) * * We need to retain a pin on the primary bucket to ensure that no concurrent * split can start. */voidhashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf,				  BlockNumber bucket_blkno, BufferAccessStrategy bstrategy,				  uint32 maxbucket, uint32 highmask, uint32 lowmask,				  double *tuples_removed, double *num_index_tuples,				  bool split_cleanup,				  IndexBulkDeleteCallback callback, void *callback_state){	BlockNumber blkno;	Buffer		buf;	Bucket		new_bucket PG_USED_FOR_ASSERTS_ONLY = InvalidBucket;	bool		bucket_dirty = false;	blkno = bucket_blkno;	buf = bucket_buf;	if (split_cleanup)		new_bucket = _hash_get_newbucket_from_oldbucket(rel, cur_bucket,														lowmask, maxbucket);	/* Scan each page in bucket */	for (;;)	{		HashPageOpaque opaque;		OffsetNumber offno;		OffsetNumber maxoffno;		Buffer		next_buf;		Page		page;		OffsetNumber deletable[MaxOffsetNumber];		int			ndeletable = 0;		bool		retain_pin = false;		bool		clear_dead_marking = false;		vacuum_delay_point();		page = BufferGetPage(buf);		opaque = (HashPageOpaque) PageGetSpecialPointer(page);		/* Scan each tuple in page */		maxoffno = PageGetMaxOffsetNumber(page);		for (offno = FirstOffsetNumber;			 offno <= maxoffno;			 offno = OffsetNumberNext(offno))		{			ItemPointer htup;			IndexTuple	itup;			Bucket		bucket;			bool		kill_tuple = false;			itup = (IndexTuple) PageGetItem(page,											PageGetItemId(page, offno));			htup = &(itup->t_tid);			/*			 * To remove the dead tuples, we strictly want to rely on results			 * of callback function.  refer btvacuumpage for detailed reason.			 */			if (callback && callback(htup, callback_state))			{				kill_tuple = true;				if (tuples_removed)					*tuples_removed += 1;			}			else if (split_cleanup)			{				/* delete the tuples that are moved by split. */				bucket = _hash_hashkey2bucket(_hash_get_indextuple_hashkey(itup),											  maxbucket,											  highmask,											  lowmask);				/* mark the item for deletion */				if (bucket != cur_bucket)				{					/*					 * We expect tuples to either belong to current bucket or					 * new_bucket.  This is ensured because we don't allow					 * further splits from bucket that contains garbage. See					 * comments in _hash_expandtable.//.........这里部分代码省略.........
开发者ID:BertrandAreal,项目名称:postgres,代码行数:101,


示例6: ginInsertCleanup

/* * Move tuples from pending pages into regular GIN structure. * * This can be called concurrently by multiple backends, so it must cope. * On first glance it looks completely not concurrent-safe and not crash-safe * either.	The reason it's okay is that multiple insertion of the same entry * is detected and treated as a no-op by gininsert.c.  If we crash after * posting entries to the main index and before removing them from the * pending list, it's okay because when we redo the posting later on, nothing * bad will happen.  Likewise, if two backends simultaneously try to post * a pending entry into the main index, one will succeed and one will do * nothing.  We try to notice when someone else is a little bit ahead of * us in the process, but that's just to avoid wasting cycles.  Only the * action of removing a page from the pending list really needs exclusive * lock. * * vac_delay indicates that ginInsertCleanup is called from vacuum process, * so call vacuum_delay_point() periodically. * If stats isn't null, we count deleted pending pages into the counts. */voidginInsertCleanup(GinState *ginstate,				 bool vac_delay, IndexBulkDeleteResult *stats){	Relation	index = ginstate->index;	Buffer		metabuffer,				buffer;	Page		metapage,				page;	GinMetaPageData *metadata;	MemoryContext opCtx,				oldCtx;	BuildAccumulator accum;	KeyArray	datums;	BlockNumber blkno;	metabuffer = ReadBuffer(index, GIN_METAPAGE_BLKNO);	LockBuffer(metabuffer, GIN_SHARE);	metapage = BufferGetPage(metabuffer);	metadata = GinPageGetMeta(metapage);	if (metadata->head == InvalidBlockNumber)	{		/* Nothing to do */		UnlockReleaseBuffer(metabuffer);		return;	}	/*	 * Read and lock head of pending list	 */	blkno = metadata->head;	buffer = ReadBuffer(index, blkno);	LockBuffer(buffer, GIN_SHARE);	page = BufferGetPage(buffer);	LockBuffer(metabuffer, GIN_UNLOCK);	/*	 * Initialize.	All temporary space will be in opCtx	 */	opCtx = AllocSetContextCreate(CurrentMemoryContext,								  "GIN insert cleanup temporary context",								  ALLOCSET_DEFAULT_MINSIZE,								  ALLOCSET_DEFAULT_INITSIZE,								  ALLOCSET_DEFAULT_MAXSIZE);	oldCtx = MemoryContextSwitchTo(opCtx);	initKeyArray(&datums, 128);	ginInitBA(&accum);	accum.ginstate = ginstate;	/*	 * At the top of this loop, we have pin and lock on the current page of	 * the pending list.  However, we'll release that before exiting the loop.	 * Note we also have pin but not lock on the metapage.	 */	for (;;)	{		if (GinPageIsDeleted(page))		{			/* another cleanup process is running concurrently */			UnlockReleaseBuffer(buffer);			break;		}		/*		 * read page's datums into accum		 */		processPendingPage(&accum, &datums, page, FirstOffsetNumber);		if (vac_delay)			vacuum_delay_point();		/*		 * Is it time to flush memory to disk?	Flush if we are at the end of		 * the pending list, or if we have a full row and memory is getting		 * full.		 *//.........这里部分代码省略.........
开发者ID:ASchurman,项目名称:BufStrat,代码行数:101,


示例7: compute_tsvector_stats

//.........这里部分代码省略.........	 */	bucket_width = (num_mcelem + 10) * 1000 / 7;	/*	 * Create the hashtable. It will be in local memory, so we don't need to	 * worry about overflowing the initial size. Also we don't need to pay any	 * attention to locking and memory management.	 */	MemSet(&hash_ctl, 0, sizeof(hash_ctl));	hash_ctl.keysize = sizeof(LexemeHashKey);	hash_ctl.entrysize = sizeof(TrackItem);	hash_ctl.hash = lexeme_hash;	hash_ctl.match = lexeme_match;	hash_ctl.hcxt = CurrentMemoryContext;	lexemes_tab = hash_create("Analyzed lexemes table",							  num_mcelem,							  &hash_ctl,							  HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);	/* Initialize counters. */	b_current = 1;	lexeme_no = 0;	/* Loop over the tsvectors. */	for (vector_no = 0; vector_no < samplerows; vector_no++)	{		Datum		value;		bool		isnull;		TSVector	vector;		WordEntry  *curentryptr;		char	   *lexemesptr;		int			j;		vacuum_delay_point();		value = fetchfunc(stats, vector_no, &isnull);		/*		 * Check for null/nonnull.		 */		if (isnull)		{			null_cnt++;			continue;		}		/*		 * Add up widths for average-width calculation.  Since it's a		 * tsvector, we know it's varlena.  As in the regular		 * compute_minimal_stats function, we use the toasted width for this		 * calculation.		 */		total_width += VARSIZE_ANY(DatumGetPointer(value));		/*		 * Now detoast the tsvector if needed.		 */		vector = DatumGetTSVector(value);		/*		 * We loop through the lexemes in the tsvector and add them to our		 * tracking hashtable.		 */		lexemesptr = STRPTR(vector);		curentryptr = ARRPTR(vector);		for (j = 0; j < vector->size; j++)
开发者ID:eubide,项目名称:postgres,代码行数:67,


示例8: AOCSSegmentFileFullCompaction

//.........这里部分代码省略.........						   aorel->rd_appendonly->visimaprelid,						   aorel->rd_appendonly->visimapidxid,						   ShareLock,						   snapshot);	elogif(Debug_appendonly_print_compaction,		   LOG, "Compact AO segfile %d, relation %sd",		   compact_segno, relname);	proj = palloc0(sizeof(bool) * RelationGetNumberOfAttributes(aorel));	for (i = 0; i < RelationGetNumberOfAttributes(aorel); ++i)	{		proj[i] = true;	}	scanDesc = aocs_beginrangescan(aorel,								   snapshot, snapshot,								   &compact_segno, 1, NULL, proj);	tupDesc = RelationGetDescr(aorel);	slot = MakeSingleTupleTableSlot(tupDesc);	mt_bind = create_memtuple_binding(tupDesc);	/*	 * We need a ResultRelInfo and an EState so we can use the regular	 * executor's index-entry-making machinery.	 */	estate = CreateExecutorState();	resultRelInfo = makeNode(ResultRelInfo);	resultRelInfo->ri_RangeTableIndex = 1;	/* dummy */	resultRelInfo->ri_RelationDesc = aorel;	resultRelInfo->ri_TrigDesc = NULL;	/* we don't fire triggers */	ExecOpenIndices(resultRelInfo);	estate->es_result_relations = resultRelInfo;	estate->es_num_result_relations = 1;	estate->es_result_relation_info = resultRelInfo;	while (aocs_getnext(scanDesc, ForwardScanDirection, slot))	{		CHECK_FOR_INTERRUPTS();		aoTupleId = (AOTupleId *) slot_get_ctid(slot);		if (AppendOnlyVisimap_IsVisible(&scanDesc->visibilityMap, aoTupleId))		{			AOCSMoveTuple(slot,						  insertDesc,						  resultRelInfo,						  estate);			movedTupleCount++;		}		else		{			/* Tuple is invisible and needs to be dropped */			AppendOnlyThrowAwayTuple(aorel,									 slot,									 mt_bind);		}		/*		 * Check for vacuum delay point after approximatly a var block		 */		tupleCount++;		if (VacuumCostActive && tupleCount % tuplePerPage == 0)		{			vacuum_delay_point();		}	}	SetAOCSFileSegInfoState(aorel, compact_segno,							AOSEG_STATE_AWAITING_DROP);	AppendOnlyVisimap_DeleteSegmentFile(&visiMap,										compact_segno);	/* Delete all mini pages of the segment files if block directory exists */	if (OidIsValid(aorel->rd_appendonly->blkdirrelid))	{		AppendOnlyBlockDirectory_DeleteSegmentFile(aorel,												   snapshot,												   compact_segno,												   0);	}	elogif(Debug_appendonly_print_compaction, LOG,		   "Finished compaction: "		   "AO segfile %d, relation %s, moved tuple count " INT64_FORMAT,		   compact_segno, relname, movedTupleCount);	AppendOnlyVisimap_Finish(&visiMap, NoLock);	ExecCloseIndices(resultRelInfo);	FreeExecutorState(estate);	ExecDropSingleTupleTableSlot(slot);	destroy_memtuple_binding(mt_bind);	aocs_endscan(scanDesc);	pfree(proj);	return true;}
开发者ID:adam8157,项目名称:gpdb,代码行数:101,


示例9: spgprocesspending

/* * Process the pending-TID list between pages of the main scan */static voidspgprocesspending(spgBulkDeleteState *bds){	Relation	index = bds->info->index;	spgVacPendingItem *pitem;	spgVacPendingItem *nitem;	BlockNumber blkno;	Buffer		buffer;	Page		page;	for (pitem = bds->pendingList; pitem != NULL; pitem = pitem->next)	{		if (pitem->done)			continue;			/* ignore already-done items */		/* call vacuum_delay_point while not holding any buffer lock */		vacuum_delay_point();		/* examine the referenced page */		blkno = ItemPointerGetBlockNumber(&pitem->tid);		buffer = ReadBufferExtended(index, MAIN_FORKNUM, blkno,									RBM_NORMAL, bds->info->strategy);		LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);		page = (Page) BufferGetPage(buffer);		if (PageIsNew(page) || SpGistPageIsDeleted(page))		{			/* Probably shouldn't happen, but ignore it */		}		else if (SpGistPageIsLeaf(page))		{			if (SpGistBlockIsRoot(blkno))			{				/* this should definitely not happen */				elog(ERROR, "redirection leads to root page of index /"%s/"",					 RelationGetRelationName(index));			}			/* deal with any deletable tuples */			vacuumLeafPage(bds, index, buffer, true);			/* might as well do this while we are here */			vacuumRedirectAndPlaceholder(index, buffer);			SpGistSetLastUsedPage(index, buffer);			/*			 * We can mark as done not only this item, but any later ones			 * pointing at the same page, since we vacuumed the whole page.			 */			pitem->done = true;			for (nitem = pitem->next; nitem != NULL; nitem = nitem->next)			{				if (ItemPointerGetBlockNumber(&nitem->tid) == blkno)					nitem->done = true;			}		}		else		{			/*			 * On an inner page, visit the referenced inner tuple and add all			 * its downlinks to the pending list.  We might have pending items			 * for more than one inner tuple on the same page (in fact this is			 * pretty likely given the way space allocation works), so get			 * them all while we are here.			 */			for (nitem = pitem; nitem != NULL; nitem = nitem->next)			{				if (nitem->done)					continue;				if (ItemPointerGetBlockNumber(&nitem->tid) == blkno)				{					OffsetNumber offset;					SpGistInnerTuple innerTuple;					offset = ItemPointerGetOffsetNumber(&nitem->tid);					innerTuple = (SpGistInnerTuple) PageGetItem(page,												PageGetItemId(page, offset));					if (innerTuple->tupstate == SPGIST_LIVE)					{						SpGistNodeTuple node;						int			i;						SGITITERATE(innerTuple, i, node)						{							if (ItemPointerIsValid(&node->t_tid))								spgAddPendingTID(bds, &node->t_tid);						}					}					else if (innerTuple->tupstate == SPGIST_REDIRECT)					{						/* transfer attention to redirect point */						spgAddPendingTID(bds,								   &((SpGistDeadTuple) innerTuple)->pointer);					}					else						elog(ERROR, "unexpected SPGiST tuple state: %d",							 innerTuple->tupstate);//.........这里部分代码省略.........
开发者ID:BioBD,项目名称:Hypothetical_Indexes,代码行数:101,


示例10: ginInsertCleanup

//.........这里部分代码省略.........								  ALLOCSET_DEFAULT_MINSIZE,								  ALLOCSET_DEFAULT_INITSIZE,								  ALLOCSET_DEFAULT_MAXSIZE);	oldCtx = MemoryContextSwitchTo(opCtx);	initKeyArray(&datums, 128);	ginInitBA(&accum);	accum.ginstate = ginstate;	/*	 * At the top of this loop, we have pin and lock on the current page of	 * the pending list.  However, we'll release that before exiting the loop.	 * Note we also have pin but not lock on the metapage.	 */	for (;;)	{		Assert(!GinPageIsDeleted(page));		/*		 * Are we walk through the page which as we remember was a tail when		 * we start our cleanup?  But if caller asks us to clean up whole		 * pending list then ignore old tail, we will work until list becomes		 * empty.		 */		if (blkno == blknoFinish && full_clean == false)			cleanupFinish = true;		/*		 * read page's datums into accum		 */		processPendingPage(&accum, &datums, page, FirstOffsetNumber);		vacuum_delay_point();		/*		 * Is it time to flush memory to disk?	Flush if we are at the end of		 * the pending list, or if we have a full row and memory is getting		 * full.		 */		if (GinPageGetOpaque(page)->rightlink == InvalidBlockNumber ||			(GinPageHasFullRow(page) &&			 (accum.allocatedMemory >= workMemory * 1024L)))		{			ItemPointerData *list;			uint32		nlist;			Datum		key;			GinNullCategory category;			OffsetNumber maxoff,						attnum;			/*			 * Unlock current page to increase performance. Changes of page			 * will be checked later by comparing maxoff after completion of			 * memory flush.			 */			maxoff = PageGetMaxOffsetNumber(page);			LockBuffer(buffer, GIN_UNLOCK);			/*			 * Moving collected data into regular structure can take			 * significant amount of time - so, run it without locking pending			 * list.			 */			ginBeginBAScan(&accum);			while ((list = ginGetBAEntry(&accum,
开发者ID:0x0FFF,项目名称:postgres,代码行数:67,


示例11: btvacuumpage

/* * btvacuumpage --- VACUUM one page * * This processes a single page for btvacuumscan().  In some cases we * must go back and re-examine previously-scanned pages; this routine * recurses when necessary to handle that case. * * blkno is the page to process.  orig_blkno is the highest block number * reached by the outer btvacuumscan loop (the same as blkno, unless we * are recursing to re-examine a previous page). */static voidbtvacuumpage(BTVacState *vstate, BlockNumber blkno, BlockNumber orig_blkno){	IndexVacuumInfo *info = vstate->info;	IndexBulkDeleteResult *stats = vstate->stats;	IndexBulkDeleteCallback callback = vstate->callback;	void	   *callback_state = vstate->callback_state;	Relation	rel = info->index;	bool		delete_now;	BlockNumber recurse_to;	Buffer		buf;	Page		page;	BTPageOpaque opaque = NULL;restart:	delete_now = false;	recurse_to = P_NONE;	/* call vacuum_delay_point while not holding any buffer lock */	vacuum_delay_point();	/*	 * We can't use _bt_getbuf() here because it always applies	 * _bt_checkpage(), which will barf on an all-zero page. We want to	 * recycle all-zero pages, not fail.  Also, we want to use a nondefault	 * buffer access strategy.	 */	buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL,							 info->strategy);	LockBuffer(buf, BT_READ);	page = BufferGetPage(buf);	if (!PageIsNew(page))	{		_bt_checkpage(rel, buf);		opaque = (BTPageOpaque) PageGetSpecialPointer(page);	}	/*	 * If we are recursing, the only case we want to do anything with is a	 * live leaf page having the current vacuum cycle ID.  Any other state	 * implies we already saw the page (eg, deleted it as being empty).	 */	if (blkno != orig_blkno)	{		if (_bt_page_recyclable(page) ||			P_IGNORE(opaque) ||			!P_ISLEAF(opaque) ||			opaque->btpo_cycleid != vstate->cycleid)		{			_bt_relbuf(rel, buf);			return;		}	}	/* Page is valid, see what to do with it */	if (_bt_page_recyclable(page))	{		/* Okay to recycle this page */		RecordFreeIndexPage(rel, blkno);		vstate->totFreePages++;		stats->pages_deleted++;	}	else if (P_ISDELETED(opaque))	{		/* Already deleted, but can't recycle yet */		stats->pages_deleted++;	}	else if (P_ISHALFDEAD(opaque))	{		/* Half-dead, try to delete */		delete_now = true;	}	else if (P_ISLEAF(opaque))	{		OffsetNumber deletable[MaxOffsetNumber];		int			ndeletable;		OffsetNumber offnum,					minoff,					maxoff;		/*		 * Trade in the initial read lock for a super-exclusive write lock on		 * this page.  We must get such a lock on every leaf page over the		 * course of the vacuum scan, whether or not it actually contains any		 * deletable tuples --- see nbtree/README.		 */		LockBuffer(buf, BUFFER_LOCK_UNLOCK);		LockBufferForCleanup(buf);//.........这里部分代码省略.........
开发者ID:AmiGanguli,项目名称:postgres,代码行数:101,


示例12: compute_tsvector_stats

//.........这里部分代码省略.........	 */	bucket_width = num_mcelem;	/*	 * Create the hashtable. It will be in local memory, so we don't need to	 * worry about initial size too much. Also we don't need to pay any	 * attention to locking and memory management.	 */	MemSet(&hash_ctl, 0, sizeof(hash_ctl));	hash_ctl.keysize = sizeof(LexemeHashKey);	hash_ctl.entrysize = sizeof(TrackItem);	hash_ctl.hash = lexeme_hash;	hash_ctl.match = lexeme_match;	hash_ctl.hcxt = CurrentMemoryContext;	lexemes_tab = hash_create("Analyzed lexemes table",							  bucket_width * 4,							  &hash_ctl,					HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);	/* Initialize counters. */	b_current = 1;	lexeme_no = 1;	/* Loop over the tsvectors. */	for (vector_no = 0; vector_no < samplerows; vector_no++)	{		Datum		value;		bool		isnull;		TSVector	vector;		WordEntry  *curentryptr;		char	   *lexemesptr;		int			j;		vacuum_delay_point();		value = fetchfunc(stats, vector_no, &isnull);		/*		 * Check for null/nonnull.		 */		if (isnull)		{			null_cnt++;			continue;		}		/*		 * Add up widths for average-width calculation.  Since it's a		 * tsvector, we know it's varlena.  As in the regular		 * compute_minimal_stats function, we use the toasted width for this		 * calculation.		 */		total_width += VARSIZE_ANY(DatumGetPointer(value));		/*		 * Now detoast the tsvector if needed.		 */		vector = DatumGetTSVector(value);		/*		 * We loop through the lexemes in the tsvector and add them to our		 * tracking hashtable.	Note: the hashtable entries will point into		 * the (detoasted) tsvector value, therefore we cannot free that		 * storage until we're done.		 */		lexemesptr = STRPTR(vector);
开发者ID:badalex,项目名称:postgresql-scratchpad,代码行数:67,


示例13: ginvacuumcleanup

Datumginvacuumcleanup(PG_FUNCTION_ARGS){	IndexVacuumInfo *info = (IndexVacuumInfo *) PG_GETARG_POINTER(0);	IndexBulkDeleteResult *stats = (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);	Relation	index = info->index;	bool		needLock;	BlockNumber npages,				blkno;	BlockNumber totFreePages;	GinState	ginstate;	GinStatsData idxStat;	/*	 * In an autovacuum analyze, we want to clean up pending insertions.	 * Otherwise, an ANALYZE-only call is a no-op.	 */	if (info->analyze_only)	{		if (IsAutoVacuumWorkerProcess())		{			initGinState(&ginstate, index);			ginInsertCleanup(&ginstate, true, stats);		}		PG_RETURN_POINTER(stats);	}	/*	 * Set up all-zero stats and cleanup pending inserts if ginbulkdelete	 * wasn't called	 */	if (stats == NULL)	{		stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));		initGinState(&ginstate, index);		ginInsertCleanup(&ginstate, true, stats);	}	memset(&idxStat, 0, sizeof(idxStat));	/*	 * XXX we always report the heap tuple count as the number of index	 * entries.  This is bogus if the index is partial, but it's real hard to	 * tell how many distinct heap entries are referenced by a GIN index.	 */	stats->num_index_tuples = info->num_heap_tuples;	stats->estimated_count = info->estimated_count;	/*	 * Need lock unless it's local to this backend.	 */	needLock = !RELATION_IS_LOCAL(index);	if (needLock)		LockRelationForExtension(index, ExclusiveLock);	npages = RelationGetNumberOfBlocks(index);	if (needLock)		UnlockRelationForExtension(index, ExclusiveLock);	totFreePages = 0;	for (blkno = GIN_ROOT_BLKNO; blkno < npages; blkno++)	{		Buffer		buffer;		Page		page;		vacuum_delay_point();		buffer = ReadBufferExtended(index, MAIN_FORKNUM, blkno,									RBM_NORMAL, info->strategy);		LockBuffer(buffer, GIN_SHARE);		page = (Page) BufferGetPage(buffer);		if (GinPageIsDeleted(page))		{			Assert(blkno != GIN_ROOT_BLKNO);			RecordFreeIndexPage(index, blkno);			totFreePages++;		}		else if (GinPageIsData(page))		{			idxStat.nDataPages++;		}		else if (!GinPageIsList(page))		{			idxStat.nEntryPages++;			if (GinPageIsLeaf(page))				idxStat.nEntries += PageGetMaxOffsetNumber(page);		}		UnlockReleaseBuffer(buffer);	}	/* Update the metapage with accurate page and entry counts */	idxStat.nTotalPages = npages;	ginUpdateStats(info->index, &idxStat);	/* Finally, vacuum the FSM */	IndexFreeSpaceMapVacuum(info->index);//.........这里部分代码省略.........
开发者ID:Epictetus,项目名称:postgres,代码行数:101,


示例14: rtbulkdelete

/* * Bulk deletion of all index entries pointing to a set of heap tuples. * The set of target tuples is specified via a callback routine that tells * whether any given heap tuple (identified by ItemPointer) is being deleted. * * Result: a palloc'd struct containing statistical info for VACUUM displays. */Datumrtbulkdelete(PG_FUNCTION_ARGS){	Relation	rel = (Relation) PG_GETARG_POINTER(0);	IndexBulkDeleteCallback callback = (IndexBulkDeleteCallback) PG_GETARG_POINTER(1);	void	   *callback_state = (void *) PG_GETARG_POINTER(2);	IndexBulkDeleteResult *result;	BlockNumber num_pages;	double		tuples_removed;	double		num_index_tuples;	IndexScanDesc iscan;	tuples_removed = 0;	num_index_tuples = 0;	/*	 * Since rtree is not marked "amconcurrent" in pg_am, caller should have	 * acquired exclusive lock on index relation.  We need no locking here.	 */	/*	 * XXX generic implementation --- should be improved!	 */	/* walk through the entire index */	iscan = index_beginscan(NULL, rel, SnapshotAny, 0, NULL);	/* including killed tuples */	iscan->ignore_killed_tuples = false;	while (index_getnext_indexitem(iscan, ForwardScanDirection))	{		vacuum_delay_point();		if (callback(&iscan->xs_ctup.t_self, callback_state))		{			ItemPointerData indextup = iscan->currentItemData;			BlockNumber blkno;			OffsetNumber offnum;			Buffer		buf;			Page		page;			blkno = ItemPointerGetBlockNumber(&indextup);			offnum = ItemPointerGetOffsetNumber(&indextup);			/* adjust any scans that will be affected by this deletion */			/* (namely, my own scan) */			rtadjscans(rel, RTOP_DEL, blkno, offnum);			/* delete the index tuple */			buf = ReadBuffer(rel, blkno);			page = BufferGetPage(buf);			PageIndexTupleDelete(page, offnum);			WriteBuffer(buf);			tuples_removed += 1;		}		else			num_index_tuples += 1;	}	index_endscan(iscan);	/* return statistics */	num_pages = RelationGetNumberOfBlocks(rel);	result = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));	result->num_pages = num_pages;	result->num_index_tuples = num_index_tuples;	result->tuples_removed = tuples_removed;	PG_RETURN_POINTER(result);}
开发者ID:CraigBryan,项目名称:PostgresqlFun,代码行数:81,


示例15: lazy_scan_heap

/* *	lazy_scan_heap() -- scan an open heap relation * *		This routine sets commit status bits, builds lists of dead tuples *		and pages with free space, and calculates statistics on the number *		of live tuples in the heap.  When done, or when we run low on space *		for dead-tuple TIDs, invoke vacuuming of indexes and heap. * *		If there are no indexes then we just vacuum each dirty page as we *		process it, since there's no point in gathering many tuples. */static voidlazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,			   Relation *Irel, int nindexes, List *updated_stats){	MIRROREDLOCK_BUFMGR_DECLARE;	BlockNumber nblocks,				blkno;	HeapTupleData tuple;	char	   *relname;	BlockNumber empty_pages,				vacuumed_pages;	double		num_tuples,				tups_vacuumed,				nkeep,				nunused;	IndexBulkDeleteResult **indstats;	int			i;	int reindex_count = 1;	PGRUsage	ru0;	/* Fetch gp_persistent_relation_node information that will be added to XLOG record. */	RelationFetchGpRelationNodeForXLog(onerel);	pg_rusage_init(&ru0);	relname = RelationGetRelationName(onerel);	ereport(elevel,			(errmsg("vacuuming /"%s.%s/"",					get_namespace_name(RelationGetNamespace(onerel)),					relname)));	empty_pages = vacuumed_pages = 0;	num_tuples = tups_vacuumed = nkeep = nunused = 0;	indstats = (IndexBulkDeleteResult **)		palloc0(nindexes * sizeof(IndexBulkDeleteResult *));	nblocks = RelationGetNumberOfBlocks(onerel);	vacrelstats->rel_pages = nblocks;	vacrelstats->nonempty_pages = 0;	lazy_space_alloc(vacrelstats, nblocks);	for (blkno = 0; blkno < nblocks; blkno++)	{		Buffer		buf;		Page		page;		OffsetNumber offnum,					maxoff;		bool		tupgone,					hastup;		int			prev_dead_count;		OffsetNumber frozen[MaxOffsetNumber];		int			nfrozen;		vacuum_delay_point();		/*		 * If we are close to overrunning the available space for dead-tuple		 * TIDs, pause and do a cycle of vacuuming before we tackle this page.		 */		if ((vacrelstats->max_dead_tuples - vacrelstats->num_dead_tuples) < MaxHeapTuplesPerPage &&			vacrelstats->num_dead_tuples > 0)		{			/* Remove index entries */			for (i = 0; i < nindexes; i++)				lazy_vacuum_index(Irel[i], &indstats[i], vacrelstats);			reindex_count++;			/* Remove tuples from heap */			lazy_vacuum_heap(onerel, vacrelstats);			/* Forget the now-vacuumed tuples, and press on */			vacrelstats->num_dead_tuples = 0;			vacrelstats->num_index_scans++;		}		/* -------- MirroredLock ---------- */		MIRROREDLOCK_BUFMGR_LOCK;		buf = ReadBufferWithStrategy(onerel, blkno, vac_strategy);		/* We need buffer cleanup lock so that we can prune HOT chains. */		LockBufferForCleanup(buf);		page = BufferGetPage(buf);		if (PageIsNew(page))//.........这里部分代码省略.........
开发者ID:phan-pivotal,项目名称:gpdb,代码行数:101,


示例16: ginvacuumcleanup

Datumginvacuumcleanup(PG_FUNCTION_ARGS){	MIRROREDLOCK_BUFMGR_DECLARE;	IndexVacuumInfo *info = (IndexVacuumInfo *) PG_GETARG_POINTER(0);	IndexBulkDeleteResult *stats = (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);	Relation	index = info->index;	bool		needLock;	BlockNumber npages,				blkno;	BlockNumber totFreePages,				nFreePages,			   *freePages,				maxFreePages;	BlockNumber lastBlock = GIN_ROOT_BLKNO,				lastFilledBlock = GIN_ROOT_BLKNO;	/* Set up all-zero stats if ginbulkdelete wasn't called */	if (stats == NULL)		stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));	/*	 * XXX we always report the heap tuple count as the number of index	 * entries.  This is bogus if the index is partial, but it's real hard to	 * tell how many distinct heap entries are referenced by a GIN index.	 */	stats->num_index_tuples = info->num_heap_tuples;	/*	 * If vacuum full, we already have exclusive lock on the index. Otherwise,	 * need lock unless it's local to this backend.	 */	if (info->vacuum_full)		needLock = false;	else		needLock = !RELATION_IS_LOCAL(index);	if (needLock)		LockRelationForExtension(index, ExclusiveLock);	npages = RelationGetNumberOfBlocks(index);	if (needLock)		UnlockRelationForExtension(index, ExclusiveLock);	maxFreePages = npages;	if (maxFreePages > MaxFSMPages)		maxFreePages = MaxFSMPages;	totFreePages = nFreePages = 0;	freePages = (BlockNumber *) palloc(sizeof(BlockNumber) * maxFreePages);	for (blkno = GIN_ROOT_BLKNO + 1; blkno < npages; blkno++)	{		Buffer		buffer;		Page		page;		vacuum_delay_point();				// -------- MirroredLock ----------		MIRROREDLOCK_BUFMGR_LOCK;				buffer = ReadBuffer(index, blkno);		LockBuffer(buffer, GIN_SHARE);		page = (Page) BufferGetPage(buffer);		if (GinPageIsDeleted(page))		{			if (nFreePages < maxFreePages)				freePages[nFreePages++] = blkno;			totFreePages++;		}		else			lastFilledBlock = blkno;		UnlockReleaseBuffer(buffer);				MIRROREDLOCK_BUFMGR_UNLOCK;		// -------- MirroredLock ----------			}	lastBlock = npages - 1;	if (info->vacuum_full && nFreePages > 0)	{		/* try to truncate index */		int			i;		for (i = 0; i < nFreePages; i++)			if (freePages[i] >= lastFilledBlock)			{				totFreePages = nFreePages = i;				break;			}		if (lastBlock > lastFilledBlock)			RelationTruncate(					index, 					lastFilledBlock + 1,					/* markPersistentAsPhysicallyTruncated */ true);//.........这里部分代码省略.........
开发者ID:AnLingm,项目名称:gpdb,代码行数:101,


示例17: ginbulkdelete

Datumginbulkdelete(PG_FUNCTION_ARGS){	MIRROREDLOCK_BUFMGR_DECLARE;	IndexVacuumInfo *info = (IndexVacuumInfo *) PG_GETARG_POINTER(0);	IndexBulkDeleteResult *stats = (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);	IndexBulkDeleteCallback callback = (IndexBulkDeleteCallback) PG_GETARG_POINTER(2);	void	   *callback_state = (void *) PG_GETARG_POINTER(3);	Relation	index = info->index;	BlockNumber blkno = GIN_ROOT_BLKNO;	GinVacuumState gvs;	Buffer		buffer;	BlockNumber rootOfPostingTree[BLCKSZ / (sizeof(IndexTupleData) + sizeof(ItemId))];	uint32		nRoot;	/* first time through? */	if (stats == NULL)		stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));	/* we'll re-count the tuples each time */	stats->num_index_tuples = 0;	gvs.index = index;	gvs.result = stats;	gvs.callback = callback;	gvs.callback_state = callback_state;	initGinState(&gvs.ginstate, index);		// -------- MirroredLock ----------	MIRROREDLOCK_BUFMGR_LOCK;		buffer = ReadBuffer(index, blkno);	/* find leaf page */	for (;;)	{		Page		page = BufferGetPage(buffer);		IndexTuple	itup;		LockBuffer(buffer, GIN_SHARE);		Assert(!GinPageIsData(page));		if (GinPageIsLeaf(page))		{			LockBuffer(buffer, GIN_UNLOCK);			LockBuffer(buffer, GIN_EXCLUSIVE);			if (blkno == GIN_ROOT_BLKNO && !GinPageIsLeaf(page))			{				LockBuffer(buffer, GIN_UNLOCK);				continue;		/* check it one more */			}			break;		}		Assert(PageGetMaxOffsetNumber(page) >= FirstOffsetNumber);		itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, FirstOffsetNumber));		blkno = GinItemPointerGetBlockNumber(&(itup)->t_tid);		Assert(blkno != InvalidBlockNumber);		LockBuffer(buffer, GIN_UNLOCK);		buffer = ReleaseAndReadBuffer(buffer, index, blkno);	}	/* right now we found leftmost page in entry's BTree */	for (;;)	{		Page		page = BufferGetPage(buffer);		Page		resPage;		uint32		i;		Assert(!GinPageIsData(page));		resPage = ginVacuumEntryPage(&gvs, buffer, rootOfPostingTree, &nRoot);		blkno = GinPageGetOpaque(page)->rightlink;		if (resPage)		{			START_CRIT_SECTION();			PageRestoreTempPage(resPage, page);			MarkBufferDirty(buffer);			xlogVacuumPage(gvs.index, buffer);			UnlockReleaseBuffer(buffer);			END_CRIT_SECTION();		}		else		{			UnlockReleaseBuffer(buffer);		}		vacuum_delay_point();		for (i = 0; i < nRoot; i++)		{			ginVacuumPostingTree(&gvs, rootOfPostingTree[i]);			vacuum_delay_point();//.........这里部分代码省略.........
开发者ID:AnLingm,项目名称:gpdb,代码行数:101,


示例18: hashbucketcleanup

/* * Helper function to perform deletion of index entries from a bucket. * * This function expects that the caller has acquired a cleanup lock on the * primary bucket page, and will return with a write lock again held on the * primary bucket page.  The lock won't necessarily be held continuously, * though, because we'll release it when visiting overflow pages. * * It would be very bad if this function cleaned a page while some other * backend was in the midst of scanning it, because hashgettuple assumes * that the next valid TID will be greater than or equal to the current * valid TID.  There can't be any concurrent scans in progress when we first * enter this function because of the cleanup lock we hold on the primary * bucket page, but as soon as we release that lock, there might be.  We * handle that by conspiring to prevent those scans from passing our cleanup * scan.  To do that, we lock the next page in the bucket chain before * releasing the lock on the previous page.  (This type of lock chaining is * not ideal, so we might want to look for a better solution at some point.) * * We need to retain a pin on the primary bucket to ensure that no concurrent * split can start. */voidhashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf,                  BlockNumber bucket_blkno, BufferAccessStrategy bstrategy,                  uint32 maxbucket, uint32 highmask, uint32 lowmask,                  double *tuples_removed, double *num_index_tuples,                  bool split_cleanup,                  IndexBulkDeleteCallback callback, void *callback_state){    BlockNumber blkno;    Buffer		buf;    Bucket new_bucket PG_USED_FOR_ASSERTS_ONLY = InvalidBucket;    bool		bucket_dirty = false;    blkno = bucket_blkno;    buf = bucket_buf;    if (split_cleanup)        new_bucket = _hash_get_newbucket_from_oldbucket(rel, cur_bucket,                     lowmask, maxbucket);    /* Scan each page in bucket */    for (;;)    {        HashPageOpaque opaque;        OffsetNumber offno;        OffsetNumber maxoffno;        Buffer		next_buf;        Page		page;        OffsetNumber deletable[MaxOffsetNumber];        int			ndeletable = 0;        bool		retain_pin = false;        bool		curr_page_dirty = false;        vacuum_delay_point();        page = BufferGetPage(buf);        opaque = (HashPageOpaque) PageGetSpecialPointer(page);        /* Scan each tuple in page */        maxoffno = PageGetMaxOffsetNumber(page);        for (offno = FirstOffsetNumber;                offno <= maxoffno;                offno = OffsetNumberNext(offno))        {            ItemPointer htup;            IndexTuple	itup;            Bucket		bucket;            bool		kill_tuple = false;            itup = (IndexTuple) PageGetItem(page,                                            PageGetItemId(page, offno));            htup = &(itup->t_tid);            /*             * To remove the dead tuples, we strictly want to rely on results             * of callback function.  refer btvacuumpage for detailed reason.             */            if (callback && callback(htup, callback_state))            {                kill_tuple = true;                if (tuples_removed)                    *tuples_removed += 1;            }            else if (split_cleanup)            {                /* delete the tuples that are moved by split. */                bucket = _hash_hashkey2bucket(_hash_get_indextuple_hashkey(itup),                                              maxbucket,                                              highmask,                                              lowmask);                /* mark the item for deletion */                if (bucket != cur_bucket)                {                    /*                     * We expect tuples to either belong to curent bucket or                     * new_bucket.  This is ensured because we don't allow                     * further splits from bucket that contains garbage. See                     * comments in _hash_expandtable.//.........这里部分代码省略.........
开发者ID:michaelpq,项目名称:postgres,代码行数:101,


示例19: gistbulkdelete

//.........这里部分代码省略.........			LockBuffer(buffer, GIST_UNLOCK);			LockBuffer(buffer, GIST_EXCLUSIVE);			page = (Page) BufferGetPage(buffer);			if (stack->blkno == GIST_ROOT_BLKNO && !GistPageIsLeaf(page))			{				/* only the root can become non-leaf during relock */				UnlockReleaseBuffer(buffer);				/* one more check */				continue;			}			/*			 * check for split proceeded after look at parent, we should check			 * it after relock			 */			pushStackIfSplited(page, stack);			/*			 * Remove deletable tuples from page			 */			maxoff = PageGetMaxOffsetNumber(page);			for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))			{				iid = PageGetItemId(page, i);				idxtuple = (IndexTuple) PageGetItem(page, iid);				if (callback(&(idxtuple->t_tid), callback_state))					todelete[ntodelete++] = i;				else					stats->num_index_tuples += 1;			}			stats->tuples_removed += ntodelete;			if (ntodelete)			{				START_CRIT_SECTION();				MarkBufferDirty(buffer);				PageIndexMultiDelete(page, todelete, ntodelete);				GistMarkTuplesDeleted(page);				if (RelationNeedsWAL(rel))				{					XLogRecPtr	recptr;					recptr = gistXLogUpdate(buffer,											todelete, ntodelete,											NULL, 0, InvalidBuffer);					PageSetLSN(page, recptr);				}				else					PageSetLSN(page, gistGetFakeLSN(rel));				END_CRIT_SECTION();			}		}		else		{			/* check for split proceeded after look at parent */			pushStackIfSplited(page, stack);			maxoff = PageGetMaxOffsetNumber(page);			for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))			{				iid = PageGetItemId(page, i);				idxtuple = (IndexTuple) PageGetItem(page, iid);				ptr = (GistBDItem *) palloc(sizeof(GistBDItem));				ptr->blkno = ItemPointerGetBlockNumber(&(idxtuple->t_tid));				ptr->parentlsn = BufferGetLSNAtomic(buffer);				ptr->next = stack->next;				stack->next = ptr;				if (GistTupleIsInvalid(idxtuple))					ereport(LOG,							(errmsg("index /"%s/" contains an inner tuple marked as invalid",									RelationGetRelationName(rel)),							 errdetail("This is caused by an incomplete page split at crash recovery before upgrading to PostgreSQL 9.1."),							 errhint("Please REINDEX it.")));			}		}		UnlockReleaseBuffer(buffer);		ptr = stack->next;		pfree(stack);		stack = ptr;		vacuum_delay_point();	}	return stats;}
开发者ID:maksm90,项目名称:postgresql,代码行数:101,


示例20: hashbulkdelete

/* * Bulk deletion of all index entries pointing to a set of heap tuples. * The set of target tuples is specified via a callback routine that tells * whether any given heap tuple (identified by ItemPointer) is being deleted. * * Result: a palloc'd struct containing statistical info for VACUUM displays. */IndexBulkDeleteResult *hashbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,			   IndexBulkDeleteCallback callback, void *callback_state){	Relation	rel = info->index;	double		tuples_removed;	double		num_index_tuples;	double		orig_ntuples;	Bucket		orig_maxbucket;	Bucket		cur_maxbucket;	Bucket		cur_bucket;	Buffer		metabuf;	HashMetaPage metap;	HashMetaPageData local_metapage;	tuples_removed = 0;	num_index_tuples = 0;	/*	 * Read the metapage to fetch original bucket and tuple counts.  Also, we	 * keep a copy of the last-seen metapage so that we can use its	 * hashm_spares[] values to compute bucket page addresses.  This is a bit	 * hokey but perfectly safe, since the interesting entries in the spares	 * array cannot change under us; and it beats rereading the metapage for	 * each bucket.	 */	metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ, LH_META_PAGE);	metap = HashPageGetMeta(BufferGetPage(metabuf));	orig_maxbucket = metap->hashm_maxbucket;	orig_ntuples = metap->hashm_ntuples;	memcpy(&local_metapage, metap, sizeof(local_metapage));	_hash_relbuf(rel, metabuf);	/* Scan the buckets that we know exist */	cur_bucket = 0;	cur_maxbucket = orig_maxbucket;loop_top:	while (cur_bucket <= cur_maxbucket)	{		BlockNumber bucket_blkno;		BlockNumber blkno;		bool		bucket_dirty = false;		/* Get address of bucket's start page */		bucket_blkno = BUCKET_TO_BLKNO(&local_metapage, cur_bucket);		/* Exclusive-lock the bucket so we can shrink it */		_hash_getlock(rel, bucket_blkno, HASH_EXCLUSIVE);		/* Shouldn't have any active scans locally, either */		if (_hash_has_active_scan(rel, cur_bucket))			elog(ERROR, "hash index has active scan during VACUUM");		/* Scan each page in bucket */		blkno = bucket_blkno;		while (BlockNumberIsValid(blkno))		{			Buffer		buf;			Page		page;			HashPageOpaque opaque;			OffsetNumber offno;			OffsetNumber maxoffno;			OffsetNumber deletable[MaxOffsetNumber];			int			ndeletable = 0;			vacuum_delay_point();			buf = _hash_getbuf_with_strategy(rel, blkno, HASH_WRITE,										   LH_BUCKET_PAGE | LH_OVERFLOW_PAGE,											 info->strategy);			page = BufferGetPage(buf);			opaque = (HashPageOpaque) PageGetSpecialPointer(page);			Assert(opaque->hasho_bucket == cur_bucket);			/* Scan each tuple in page */			maxoffno = PageGetMaxOffsetNumber(page);			for (offno = FirstOffsetNumber;				 offno <= maxoffno;				 offno = OffsetNumberNext(offno))			{				IndexTuple	itup;				ItemPointer htup;				itup = (IndexTuple) PageGetItem(page,												PageGetItemId(page, offno));				htup = &(itup->t_tid);				if (callback(htup, callback_state))				{					/* mark the item for deletion */					deletable[ndeletable++] = offno;					tuples_removed += 1;				}//.........这里部分代码省略.........
开发者ID:Gordiychuk,项目名称:postgres,代码行数:101,


示例21: gistvacuumcleanup

/* * VACUUM cleanup: update FSM */IndexBulkDeleteResult *gistvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats){	Relation	rel = info->index;	BlockNumber npages,				blkno;	BlockNumber totFreePages;	bool		needLock;	/* No-op in ANALYZE ONLY mode */	if (info->analyze_only)		return stats;	/* Set up all-zero stats if gistbulkdelete wasn't called */	if (stats == NULL)	{		stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));		/* use heap's tuple count */		stats->num_index_tuples = info->num_heap_tuples;		stats->estimated_count = info->estimated_count;		/*		 * XXX the above is wrong if index is partial.  Would it be OK to just		 * return NULL, or is there work we must do below?		 */	}	/*	 * Need lock unless it's local to this backend.	 */	needLock = !RELATION_IS_LOCAL(rel);	/* try to find deleted pages */	if (needLock)		LockRelationForExtension(rel, ExclusiveLock);	npages = RelationGetNumberOfBlocks(rel);	if (needLock)		UnlockRelationForExtension(rel, ExclusiveLock);	totFreePages = 0;	for (blkno = GIST_ROOT_BLKNO + 1; blkno < npages; blkno++)	{		Buffer		buffer;		Page		page;		vacuum_delay_point();		buffer = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL,									info->strategy);		LockBuffer(buffer, GIST_SHARE);		page = (Page) BufferGetPage(buffer);		if (PageIsNew(page) || GistPageIsDeleted(page))		{			totFreePages++;			RecordFreeIndexPage(rel, blkno);		}		UnlockReleaseBuffer(buffer);	}	/* Finally, vacuum the FSM */	IndexFreeSpaceMapVacuum(info->index);	/* return statistics */	stats->pages_free = totFreePages;	if (needLock)		LockRelationForExtension(rel, ExclusiveLock);	stats->num_pages = RelationGetNumberOfBlocks(rel);	if (needLock)		UnlockRelationForExtension(rel, ExclusiveLock);	return stats;}
开发者ID:maksm90,项目名称:postgresql,代码行数:76,


示例22: ginVacuumPostingTreeLeaves

/* * Scan through posting tree, delete empty tuples from leaf pages. * Also, this function collects empty subtrees (with all empty leafs). * For parents of these subtrees CleanUp lock is taken, then we call * ScanToDelete. This is done for every inner page, which points to * empty subtree. */static boolginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot){	Buffer		buffer;	Page		page;	bool		hasVoidPage = FALSE;	MemoryContext oldCxt;	buffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, blkno,								RBM_NORMAL, gvs->strategy);	page = BufferGetPage(buffer);	ginTraverseLock(buffer, false);	Assert(GinPageIsData(page));	if (GinPageIsLeaf(page))	{		oldCxt = MemoryContextSwitchTo(gvs->tmpCxt);		ginVacuumPostingTreeLeaf(gvs->index, buffer, gvs);		MemoryContextSwitchTo(oldCxt);		MemoryContextReset(gvs->tmpCxt);		/* if root is a leaf page, we don't desire further processing */		if (GinDataLeafPageIsEmpty(page))			hasVoidPage = TRUE;		UnlockReleaseBuffer(buffer);		return hasVoidPage;	}	else	{		OffsetNumber i;		bool		hasEmptyChild = FALSE;		bool		hasNonEmptyChild = FALSE;		OffsetNumber maxoff = GinPageGetOpaque(page)->maxoff;		BlockNumber *children = palloc(sizeof(BlockNumber) * (maxoff + 1));		/*		 * Read all children BlockNumbers. Not sure it is safe if there are		 * many concurrent vacuums.		 */		for (i = FirstOffsetNumber; i <= maxoff; i++)		{			PostingItem *pitem = GinDataPageGetPostingItem(page, i);			children[i] = PostingItemGetBlockNumber(pitem);		}		UnlockReleaseBuffer(buffer);		for (i = FirstOffsetNumber; i <= maxoff; i++)		{			if (ginVacuumPostingTreeLeaves(gvs, children[i], FALSE))				hasEmptyChild = TRUE;			else				hasNonEmptyChild = TRUE;		}		pfree(children);		vacuum_delay_point();		/*		 * All subtree is empty - just return TRUE to indicate that parent		 * must do a cleanup. Unless we are ROOT an there is way to go upper.		 */		if (hasEmptyChild && !hasNonEmptyChild && !isRoot)			return TRUE;		if (hasEmptyChild)		{			DataPageDeleteStack root,					   *ptr,					   *tmp;			buffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, blkno,										RBM_NORMAL, gvs->strategy);			LockBufferForCleanup(buffer);			memset(&root, 0, sizeof(DataPageDeleteStack));			root.leftBlkno = InvalidBlockNumber;			root.isRoot = TRUE;			ginScanToDelete(gvs, blkno, TRUE, &root, InvalidOffsetNumber);			ptr = root.child;			while (ptr)			{//.........这里部分代码省略.........
开发者ID:BertrandAreal,项目名称:postgres,代码行数:101,


示例23: ginbulkdelete

Datumginbulkdelete(PG_FUNCTION_ARGS){	IndexVacuumInfo *info = (IndexVacuumInfo *) PG_GETARG_POINTER(0);	IndexBulkDeleteResult *stats = (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);	IndexBulkDeleteCallback callback = (IndexBulkDeleteCallback) PG_GETARG_POINTER(2);	void	   *callback_state = (void *) PG_GETARG_POINTER(3);	Relation	index = info->index;	BlockNumber blkno = GIN_ROOT_BLKNO;	GinVacuumState gvs;	Buffer		buffer;	BlockNumber rootOfPostingTree[BLCKSZ / (sizeof(IndexTupleData) + sizeof(ItemId))];	uint32		nRoot;	gvs.index = index;	gvs.callback = callback;	gvs.callback_state = callback_state;	gvs.strategy = info->strategy;	initGinState(&gvs.ginstate, index);	/* first time through? */	if (stats == NULL)	{		/* Yes, so initialize stats to zeroes */		stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));		/* and cleanup any pending inserts */		ginInsertCleanup(&gvs.ginstate, true, stats);	}	/* we'll re-count the tuples each time */	stats->num_index_tuples = 0;	gvs.result = stats;	buffer = ReadBufferExtended(index, MAIN_FORKNUM, blkno,								RBM_NORMAL, info->strategy);	/* find leaf page */	for (;;)	{		Page		page = BufferGetPage(buffer);		IndexTuple	itup;		LockBuffer(buffer, GIN_SHARE);		Assert(!GinPageIsData(page));		if (GinPageIsLeaf(page))		{			LockBuffer(buffer, GIN_UNLOCK);			LockBuffer(buffer, GIN_EXCLUSIVE);			if (blkno == GIN_ROOT_BLKNO && !GinPageIsLeaf(page))			{				LockBuffer(buffer, GIN_UNLOCK);				continue;		/* check it one more */			}			break;		}		Assert(PageGetMaxOffsetNumber(page) >= FirstOffsetNumber);		itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, FirstOffsetNumber));		blkno = GinGetDownlink(itup);		Assert(blkno != InvalidBlockNumber);		UnlockReleaseBuffer(buffer);		buffer = ReadBufferExtended(index, MAIN_FORKNUM, blkno,									RBM_NORMAL, info->strategy);	}	/* right now we found leftmost page in entry's BTree */	for (;;)	{		Page		page = BufferGetPage(buffer);		Page		resPage;		uint32		i;		Assert(!GinPageIsData(page));		resPage = ginVacuumEntryPage(&gvs, buffer, rootOfPostingTree, &nRoot);		blkno = GinPageGetOpaque(page)->rightlink;		if (resPage)		{			START_CRIT_SECTION();			PageRestoreTempPage(resPage, page);			MarkBufferDirty(buffer);			xlogVacuumPage(gvs.index, buffer);			UnlockReleaseBuffer(buffer);			END_CRIT_SECTION();		}		else		{			UnlockReleaseBuffer(buffer);		}		vacuum_delay_point();//.........这里部分代码省略.........
开发者ID:Epictetus,项目名称:postgres,代码行数:101,


示例24: gistVacuumUpdate

static ArrayTuplegistVacuumUpdate(GistVacuum *gv, BlockNumber blkno, bool needunion){	ArrayTuple	res = {NULL, 0, false};	Buffer		buffer;	Page		page,				tempPage = NULL;	OffsetNumber i,				maxoff;	ItemId		iid;	int			lenaddon = 4,				curlenaddon = 0,				nOffToDelete = 0,				nBlkToDelete = 0;	IndexTuple	idxtuple,			   *addon = NULL;	bool		needwrite = false;	OffsetNumber offToDelete[MaxOffsetNumber];	BlockNumber blkToDelete[MaxOffsetNumber];	ItemPointerData *completed = NULL;	int			ncompleted = 0,				lencompleted = 16;	vacuum_delay_point();	buffer = ReadBufferWithStrategy(gv->index, blkno, gv->strategy);	LockBuffer(buffer, GIST_EXCLUSIVE);	gistcheckpage(gv->index, buffer);	page = (Page) BufferGetPage(buffer);	maxoff = PageGetMaxOffsetNumber(page);	if (GistPageIsLeaf(page))	{		if (GistTuplesDeleted(page))			needunion = needwrite = true;	}	else	{		completed = (ItemPointerData *) palloc(sizeof(ItemPointerData) * lencompleted);		addon = (IndexTuple *) palloc(sizeof(IndexTuple) * lenaddon);		/* get copy of page to work */		tempPage = GistPageGetCopyPage(page);		for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))		{			ArrayTuple	chldtuple;			bool		needchildunion;			iid = PageGetItemId(tempPage, i);			idxtuple = (IndexTuple) PageGetItem(tempPage, iid);			needchildunion = (GistTupleIsInvalid(idxtuple)) ? true : false;			if (needchildunion)				elog(DEBUG2, "gistVacuumUpdate: need union for block %u",					 ItemPointerGetBlockNumber(&(idxtuple->t_tid)));			chldtuple = gistVacuumUpdate(gv, ItemPointerGetBlockNumber(&(idxtuple->t_tid)),										 needchildunion);			if (chldtuple.ituplen || chldtuple.emptypage)			{				/* update tuple or/and inserts new */				if (chldtuple.emptypage)					blkToDelete[nBlkToDelete++] = ItemPointerGetBlockNumber(&(idxtuple->t_tid));				offToDelete[nOffToDelete++] = i;				PageIndexTupleDelete(tempPage, i);				i--;				maxoff--;				needwrite = needunion = true;				if (chldtuple.ituplen)				{					Assert(chldtuple.emptypage == false);					while (curlenaddon + chldtuple.ituplen >= lenaddon)					{						lenaddon *= 2;						addon = (IndexTuple *) repalloc(addon, sizeof(IndexTuple) * lenaddon);					}					memcpy(addon + curlenaddon, chldtuple.itup, chldtuple.ituplen * sizeof(IndexTuple));					curlenaddon += chldtuple.ituplen;					if (chldtuple.ituplen > 1)					{						/*						 * child was split, so we need mark completion						 * insert(split)						 */						int			j;						while (ncompleted + chldtuple.ituplen > lencompleted)						{							lencompleted *= 2;							completed = (ItemPointerData *) repalloc(completed, sizeof(ItemPointerData) * lencompleted);						}						for (j = 0; j < chldtuple.ituplen; j++)						{							ItemPointerCopy(&(chldtuple.itup[j]->t_tid), completed + ncompleted);//.........这里部分代码省略.........
开发者ID:KMU-embedded,项目名称:mosbench-ext,代码行数:101,


示例25: gistvacuumcleanup

Datumgistvacuumcleanup(PG_FUNCTION_ARGS){	IndexVacuumInfo *info = (IndexVacuumInfo *) PG_GETARG_POINTER(0);	GistBulkDeleteResult *stats = (GistBulkDeleteResult *) PG_GETARG_POINTER(1);	Relation	rel = info->index;	BlockNumber npages,				blkno;	BlockNumber totFreePages,				nFreePages,			   *freePages,				maxFreePages;	BlockNumber lastBlock = GIST_ROOT_BLKNO,				lastFilledBlock = GIST_ROOT_BLKNO;	bool		needLock;	/* Set up all-zero stats if gistbulkdelete wasn't called */	if (stats == NULL)	{		stats = (GistBulkDeleteResult *) palloc0(sizeof(GistBulkDeleteResult));		/* use heap's tuple count */		Assert(info->num_heap_tuples >= 0);		stats->std.num_index_tuples = info->num_heap_tuples;		/*		 * XXX the above is wrong if index is partial.	Would it be OK to just		 * return NULL, or is there work we must do below?		 */	}	/* gistVacuumUpdate may cause hard work */	if (info->vacuum_full)	{		GistVacuum	gv;		ArrayTuple	res;		/* note: vacuum.c already acquired AccessExclusiveLock on index */		gv.index = rel;		initGISTstate(&(gv.giststate), rel);		gv.opCtx = createTempGistContext();		gv.result = stats;		gv.strategy = info->strategy;		/* walk through the entire index for update tuples */		res = gistVacuumUpdate(&gv, GIST_ROOT_BLKNO, false);		/* cleanup */		if (res.itup)		{			int			i;			for (i = 0; i < res.ituplen; i++)				pfree(res.itup[i]);			pfree(res.itup);		}		freeGISTstate(&(gv.giststate));		MemoryContextDelete(gv.opCtx);	}	else if (stats->needFullVacuum)		ereport(NOTICE,				(errmsg("index /"%s/" needs VACUUM FULL or REINDEX to finish crash recovery",						RelationGetRelationName(rel))));	/*	 * If vacuum full, we already have exclusive lock on the index. Otherwise,	 * need lock unless it's local to this backend.	 */	if (info->vacuum_full)		needLock = false;	else		needLock = !RELATION_IS_LOCAL(rel);	/* try to find deleted pages */	if (needLock)		LockRelationForExtension(rel, ExclusiveLock);	npages = RelationGetNumberOfBlocks(rel);	if (needLock)		UnlockRelationForExtension(rel, ExclusiveLock);	maxFreePages = npages;	if (maxFreePages > MaxFSMPages)		maxFreePages = MaxFSMPages;	totFreePages = nFreePages = 0;	freePages = (BlockNumber *) palloc(sizeof(BlockNumber) * maxFreePages);	for (blkno = GIST_ROOT_BLKNO + 1; blkno < npages; blkno++)	{		Buffer		buffer;		Page		page;		vacuum_delay_point();		buffer = ReadBufferWithStrategy(rel, blkno, info->strategy);		LockBuffer(buffer, GIST_SHARE);		page = (Page) BufferGetPage(buffer);		if (PageIsNew(page) || GistPageIsDeleted(page))		{			if (nFreePages < maxFreePages)//.........这里部分代码省略.........
开发者ID:KMU-embedded,项目名称:mosbench-ext,代码行数:101,


示例26: gistbulkdelete

//.........这里部分代码省略.........				continue;			}			/*			 * check for split proceeded after look at parent, we should check			 * it after relock			 */			pushStackIfSplited(page, stack);			/*			 * Remove deletable tuples from page			 */			maxoff = PageGetMaxOffsetNumber(page);			for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))			{				iid = PageGetItemId(page, i);				idxtuple = (IndexTuple) PageGetItem(page, iid);				if (callback(&(idxtuple->t_tid), callback_state))				{					todelete[ntodelete] = i - ntodelete;					ntodelete++;					stats->std.tuples_removed += 1;				}				else					stats->std.num_index_tuples += 1;			}			if (ntodelete)			{				START_CRIT_SECTION();				MarkBufferDirty(buffer);				for (i = 0; i < ntodelete; i++)					PageIndexTupleDelete(page, todelete[i]);				GistMarkTuplesDeleted(page);				if (!rel->rd_istemp)				{					XLogRecData *rdata;					XLogRecPtr	recptr;					gistxlogPageUpdate *xlinfo;					rdata = formUpdateRdata(rel->rd_node, buffer,											todelete, ntodelete,											NULL, 0,											NULL);					xlinfo = (gistxlogPageUpdate *) rdata->next->data;					recptr = XLogInsert(RM_GIST_ID, XLOG_GIST_PAGE_UPDATE, rdata);					PageSetLSN(page, recptr);					PageSetTLI(page, ThisTimeLineID);					pfree(xlinfo);					pfree(rdata);				}				else					PageSetLSN(page, XLogRecPtrForTemp);				END_CRIT_SECTION();			}		}		else		{			/* check for split proceeded after look at parent */			pushStackIfSplited(page, stack);			maxoff = PageGetMaxOffsetNumber(page);			for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))			{				iid = PageGetItemId(page, i);				idxtuple = (IndexTuple) PageGetItem(page, iid);				ptr = (GistBDItem *) palloc(sizeof(GistBDItem));				ptr->blkno = ItemPointerGetBlockNumber(&(idxtuple->t_tid));				ptr->parentlsn = PageGetLSN(page);				ptr->next = stack->next;				stack->next = ptr;				if (GistTupleIsInvalid(idxtuple))					stats->needFullVacuum = true;			}		}		UnlockReleaseBuffer(buffer);		ptr = stack->next;		pfree(stack);		stack = ptr;		vacuum_delay_point();	}	PG_RETURN_POINTER(stats);}
开发者ID:KMU-embedded,项目名称:mosbench-ext,代码行数:101,


示例27: btvacuumpage

/* * btvacuumpage --- VACUUM one page * * This processes a single page for btvacuumscan().  In some cases we * must go back and re-examine previously-scanned pages; this routine * recurses when necessary to handle that case. * * blkno is the page to process.  orig_blkno is the highest block number * reached by the outer btvacuumscan loop (the same as blkno, unless we * are recursing to re-examine a previous page). */static voidbtvacuumpage(BTVacState *vstate, BlockNumber blkno, BlockNumber orig_blkno){	MIRROREDLOCK_BUFMGR_DECLARE;	IndexVacuumInfo *info = vstate->info;	IndexBulkDeleteResult *stats = vstate->stats;	IndexBulkDeleteCallback callback = vstate->callback;	void	   *callback_state = vstate->callback_state;	Relation	rel = info->index;	bool		delete_now;	BlockNumber recurse_to;	Buffer		buf;	Page		page;	BTPageOpaque opaque;restart:	delete_now = false;	recurse_to = P_NONE;	/* call vacuum_delay_point while not holding any buffer lock */	vacuum_delay_point();	/*	 * We can't use _bt_getbuf() here because it always applies	 * _bt_checkpage(), which will barf on an all-zero page. We want to	 * recycle all-zero pages, not fail.  Also, we want to use a nondefault	 * buffer access strategy.	 */		// -------- MirroredLock ----------	MIRROREDLOCK_BUFMGR_LOCK;		buf = ReadBufferWithStrategy(rel, blkno, info->strategy);	LockBuffer(buf, BT_READ);	page = BufferGetPage(buf);	opaque = (BTPageOpaque) PageGetSpecialPointer(page);	if (!PageIsNew(page))		_bt_checkpage(rel, buf);	/*	 * If we are recursing, the only case we want to do anything with is a	 * live leaf page having the current vacuum cycle ID.  Any other state	 * implies we already saw the page (eg, deleted it as being empty). In	 * particular, we don't want to risk adding it to freePages twice.	 */	if (blkno != orig_blkno)	{		if (_bt_page_recyclable(page) ||			P_IGNORE(opaque) ||			!P_ISLEAF(opaque) ||			opaque->btpo_cycleid != vstate->cycleid)		{			_bt_relbuf(rel, buf);			MIRROREDLOCK_BUFMGR_UNLOCK;			// -------- MirroredLock ----------			return;		}	}	/* Page is valid, see what to do with it */	if (_bt_page_recyclable(page))	{		/* Okay to recycle this page */		if (vstate->nFreePages < vstate->maxFreePages)			vstate->freePages[vstate->nFreePages++] = blkno;		vstate->totFreePages++;		stats->pages_deleted++;	}	else if (P_ISDELETED(opaque))	{		/* Already deleted, but can't recycle yet */		stats->pages_deleted++;	}	else if (P_ISHALFDEAD(opaque))	{		/* Half-dead, try to delete */		delete_now = true;	}	else if (P_ISLEAF(opaque))	{		OffsetNumber deletable[MaxOffsetNumber];		int			ndeletable;		OffsetNumber offnum,					minoff,					maxoff;		/*//.........这里部分代码省略.........
开发者ID:LJoNe,项目名称:gpdb,代码行数:101,


示例28: spgvacuumpage

/* * Process one page during a bulkdelete scan */static voidspgvacuumpage(spgBulkDeleteState *bds, BlockNumber blkno){	Relation	index = bds->info->index;	Buffer		buffer;	Page		page;	/* call vacuum_delay_point while not holding any buffer lock */	vacuum_delay_point();	buffer = ReadBufferExtended(index, MAIN_FORKNUM, blkno,								RBM_NORMAL, bds->info->strategy);	LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);	page = (Page) BufferGetPage(buffer);	if (PageIsNew(page))	{		/*		 * We found an all-zero page, which could happen if the database		 * crashed just after extending the file.  Initialize and recycle it.		 */		SpGistInitBuffer(buffer, 0);		SpGistPageSetDeleted(page);		/* We don't bother to WAL-log this action; easy to redo */		MarkBufferDirty(buffer);	}	else if (SpGistPageIsDeleted(page))	{		/* nothing to do */	}	else if (SpGistPageIsLeaf(page))	{		if (SpGistBlockIsRoot(blkno))		{			vacuumLeafRoot(bds, index, buffer);			/* no need for vacuumRedirectAndPlaceholder */		}		else		{			vacuumLeafPage(bds, index, buffer, false);			vacuumRedirectAndPlaceholder(index, buffer);		}	}	else	{		/* inner page */		vacuumRedirectAndPlaceholder(index, buffer);	}	/*	 * The root pages must never be deleted, nor marked as available in FSM,	 * because we don't want them ever returned by a search for a place to put	 * a new tuple.  Otherwise, check for empty/deletable page, and make sure	 * FSM knows about it.	 */	if (!SpGistBlockIsRoot(blkno))	{		/* If page is now empty, mark it deleted */		if (PageIsEmpty(page) && !SpGistPageIsDeleted(page))		{			SpGistPageSetDeleted(page);			/* We don't bother to WAL-log this action; easy to redo */			MarkBufferDirty(buffer);		}		if (SpGistPageIsDeleted(page))		{			RecordFreeIndexPage(index, blkno);			bds->stats->pages_deleted++;		}		else			bds->lastFilledBlock = blkno;	}	SpGistSetLastUsedPage(index, buffer);	UnlockReleaseBuffer(buffer);}
开发者ID:BioBD,项目名称:Hypothetical_Indexes,代码行数:81,


示例29: lazy_scan_heap

/* *	lazy_scan_heap() -- scan an open heap relation * *		This routine sets commit status bits, builds lists of dead tuples *		and pages with free space, and calculates statistics on the number *		of live tuples in the heap.  When done, or when we run low on space *		for dead-tuple TIDs, invoke vacuuming of indexes and heap. * *		If there are no indexes then we just vacuum each dirty page as we *		process it, since there's no point in gathering many tuples. */static voidlazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,			   Relation *Irel, int nindexes, bool scan_all){	BlockNumber nblocks,				blkno;	HeapTupleData tuple;	char	   *relname;	BlockNumber empty_pages,				vacuumed_pages;	double		num_tuples,				tups_vacuumed,				nkeep,				nunused;	IndexBulkDeleteResult **indstats;	int			i;	PGRUsage	ru0;	Buffer		vmbuffer = InvalidBuffer;	BlockNumber next_not_all_visible_block;	bool		skipping_all_visible_blocks;	pg_rusage_init(&ru0);	relname = RelationGetRelationName(onerel);	ereport(elevel,			(errmsg("vacuuming /"%s.%s/"",					get_namespace_name(RelationGetNamespace(onerel)),					relname)));	empty_pages = vacuumed_pages = 0;	num_tuples = tups_vacuumed = nkeep = nunused = 0;	indstats = (IndexBulkDeleteResult **)		palloc0(nindexes * sizeof(IndexBulkDeleteResult *));	nblocks = RelationGetNumberOfBlocks(onerel);	vacrelstats->rel_pages = nblocks;	vacrelstats->scanned_pages = 0;	vacrelstats->nonempty_pages = 0;	vacrelstats->latestRemovedXid = InvalidTransactionId;	lazy_space_alloc(vacrelstats, nblocks);	/*	 * We want to skip pages that don't require vacuuming according to the	 * visibility map, but only when we can skip at least SKIP_PAGES_THRESHOLD	 * consecutive pages.  Since we're reading sequentially, the OS should be	 * doing readahead for us, so there's no gain in skipping a page now and	 * then; that's likely to disable readahead and so be counterproductive.	 * Also, skipping even a single page means that we can't update	 * relfrozenxid, so we only want to do it if we can skip a goodly number	 * of pages.	 *	 * Before entering the main loop, establish the invariant that	 * next_not_all_visible_block is the next block number >= blkno that's not	 * all-visible according to the visibility map, or nblocks if there's no	 * such block.	Also, we set up the skipping_all_visible_blocks flag,	 * which is needed because we need hysteresis in the decision: once we've	 * started skipping blocks, we may as well skip everything up to the next	 * not-all-visible block.	 *	 * Note: if scan_all is true, we won't actually skip any pages; but we	 * maintain next_not_all_visible_block anyway, so as to set up the	 * all_visible_according_to_vm flag correctly for each page.	 */	for (next_not_all_visible_block = 0;		 next_not_all_visible_block < nblocks;		 next_not_all_visible_block++)	{		if (!visibilitymap_test(onerel, next_not_all_visible_block, &vmbuffer))			break;		vacuum_delay_point();	}	if (next_not_all_visible_block >= SKIP_PAGES_THRESHOLD)		skipping_all_visible_blocks = true;	else		skipping_all_visible_blocks = false;	for (blkno = 0; blkno < nblocks; blkno++)	{		Buffer		buf;		Page		page;		OffsetNumber offnum,					maxoff;		bool		tupgone,					hastup;		int			prev_dead_count;		OffsetNumber frozen[MaxOffsetNumber];		int			nfrozen;//.........这里部分代码省略.........
开发者ID:hl0103,项目名称:pgxc,代码行数:101,


示例30: gistvacuumcleanup

/* * VACUUM cleanup: update FSM */Datumgistvacuumcleanup(PG_FUNCTION_ARGS){	IndexVacuumInfo *info = (IndexVacuumInfo *) PG_GETARG_POINTER(0);	GistBulkDeleteResult *stats = (GistBulkDeleteResult *) PG_GETARG_POINTER(1);	Relation	rel = info->index;	BlockNumber npages,				blkno;	BlockNumber totFreePages;	BlockNumber lastBlock = GIST_ROOT_BLKNO,				lastFilledBlock = GIST_ROOT_BLKNO;	bool		needLock;	/* No-op in ANALYZE ONLY mode */	if (info->analyze_only)		PG_RETURN_POINTER(stats);	/* Set up all-zero stats if gistbulkdelete wasn't called */	if (stats == NULL)	{		stats = (GistBulkDeleteResult *) palloc0(sizeof(GistBulkDeleteResult));		/* use heap's tuple count */		stats->std.num_index_tuples = info->num_heap_tuples;		stats->std.estimated_count = info->estimated_count;		/*		 * XXX the above is wrong if index is partial.	Would it be OK to just		 * return NULL, or is there work we must do below?		 */	}	if (stats->needReindex)		ereport(NOTICE,				(errmsg("index /"%s/" needs VACUUM FULL or REINDEX to finish crash recovery",						RelationGetRelationName(rel))));	/*	 * Need lock unless it's local to this backend.	 */	needLock = !RELATION_IS_LOCAL(rel);	/* try to find deleted pages */	if (needLock)		LockRelationForExtension(rel, ExclusiveLock);	npages = RelationGetNumberOfBlocks(rel);	if (needLock)		UnlockRelationForExtension(rel, ExclusiveLock);	totFreePages = 0;	for (blkno = GIST_ROOT_BLKNO + 1; blkno < npages; blkno++)	{		Buffer		buffer;		Page		page;		vacuum_delay_point();		buffer = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL,									info->strategy);		LockBuffer(buffer, GIST_SHARE);		page = (Page) BufferGetPage(buffer);		if (PageIsNew(page) || GistPageIsDeleted(page))		{			totFreePages++;			RecordFreeIndexPage(rel, blkno);		}		else			lastFilledBlock = blkno;		UnlockReleaseBuffer(buffer);	}	lastBlock = npages - 1;	/* Finally, vacuum the FSM */	IndexFreeSpaceMapVacuum(info->index);	/* return statistics */	stats->std.pages_free = totFreePages;	if (needLock)		LockRelationForExtension(rel, ExclusiveLock);	stats->std.num_pages = RelationGetNumberOfBlocks(rel);	if (needLock)		UnlockRelationForExtension(rel, ExclusiveLock);	PG_RETURN_POINTER(stats);}
开发者ID:reith2004,项目名称:postgres,代码行数:88,



注:本文中的vacuum_delay_point函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


C++ val函数代码示例
C++ va_start函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。