您当前的位置:首页 > IT编程 > C++
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:C++ END_CRIT_SECTION函数代码示例

51自学网 2021-06-01 20:32:23
  C++
这篇教程C++ END_CRIT_SECTION函数代码示例写得很实用,希望能帮到您。

本文整理汇总了C++中END_CRIT_SECTION函数的典型用法代码示例。如果您正苦于以下问题:C++ END_CRIT_SECTION函数的具体用法?C++ END_CRIT_SECTION怎么用?C++ END_CRIT_SECTION使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。

在下文中一共展示了END_CRIT_SECTION函数的26个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: _bt_delitems

/* * Delete item(s) from a btree page. * * This must only be used for deleting leaf items.	Deleting an item on a * non-leaf page has to be done as part of an atomic action that includes * deleting the page it points to. * * This routine assumes that the caller has pinned and locked the buffer. * Also, the given itemnos *must* appear in increasing order in the array. */void_bt_delitems(Relation rel, Buffer buf,			 OffsetNumber *itemnos, int nitems,			 bool inVacuum){	Page		page;	BTPageOpaque opaque;	MIRROREDLOCK_BUFMGR_MUST_ALREADY_BE_HELD;	page = BufferGetPage(buf);	// Fetch gp_persistent_relation_node information that will be added to XLOG record.	RelationFetchGpRelationNodeForXLog(rel);	/* No ereport(ERROR) until changes are logged */	START_CRIT_SECTION();	/* Fix the page */	PageIndexMultiDelete(page, itemnos, nitems);	/*	 * If this is within VACUUM, we can clear the vacuum cycleID since this	 * page has certainly been processed by the current vacuum scan.	 */	opaque = (BTPageOpaque) PageGetSpecialPointer(page);	if (inVacuum)		opaque->btpo_cycleid = 0;	/*	 * Mark the page as not containing any LP_DELETE items.  This is not	 * certainly true (there might be some that have recently been marked, but	 * weren't included in our target-item list), but it will almost always be	 * true and it doesn't seem worth an additional page scan to check it.	 * Remember that BTP_HAS_GARBAGE is only a hint anyway.	 */	opaque->btpo_flags &= ~BTP_HAS_GARBAGE;	MarkBufferDirty(buf);	/* XLOG stuff */	if (!rel->rd_istemp)	{		xl_btree_delete xlrec;		XLogRecPtr	recptr;		XLogRecData rdata[2];		xl_btreenode_set(&(xlrec.btreenode), rel);		xlrec.block = BufferGetBlockNumber(buf);		rdata[0].data = (char *) &xlrec;		rdata[0].len = SizeOfBtreeDelete;		rdata[0].buffer = InvalidBuffer;		rdata[0].next = &(rdata[1]);		/*		 * The target-offsets array is not in the buffer, but pretend that it		 * is.	When XLogInsert stores the whole buffer, the offsets array		 * need not be stored too.		 */		if (nitems > 0)		{			rdata[1].data = (char *) itemnos;			rdata[1].len = nitems * sizeof(OffsetNumber);		}		else		{			rdata[1].data = NULL;			rdata[1].len = 0;		}		rdata[1].buffer = buf;		rdata[1].buffer_std = true;		rdata[1].next = NULL;		recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_DELETE, rdata);		PageSetLSN(page, recptr);		PageSetTLI(page, ThisTimeLineID);	}	END_CRIT_SECTION();}
开发者ID:ricky-wu,项目名称:gpdb,代码行数:92,


示例2: SaveSlotToPath

//.........这里部分代码省略.........	LWLockAcquire(slot->io_in_progress_lock, LW_EXCLUSIVE);	/* silence valgrind :( */	memset(&cp, 0, sizeof(ReplicationSlotOnDisk));	sprintf(tmppath, "%s/state.tmp", dir);	sprintf(path, "%s/state", dir);	fd = OpenTransientFile(tmppath,						   O_CREAT | O_EXCL | O_WRONLY | PG_BINARY,						   S_IRUSR | S_IWUSR);	if (fd < 0)	{		ereport(elevel,				(errcode_for_file_access(),				 errmsg("could not create file /"%s/": %m",						tmppath)));		return;	}	cp.magic = SLOT_MAGIC;	INIT_CRC32C(cp.checksum);	cp.version = SLOT_VERSION;	cp.length = ReplicationSlotOnDiskV2Size;	SpinLockAcquire(&slot->mutex);	memcpy(&cp.slotdata, &slot->data, sizeof(ReplicationSlotPersistentData));	SpinLockRelease(&slot->mutex);	COMP_CRC32C(cp.checksum,				(char *) (&cp) + SnapBuildOnDiskNotChecksummedSize,				SnapBuildOnDiskChecksummedSize);	FIN_CRC32C(cp.checksum);	if ((write(fd, &cp, sizeof(cp))) != sizeof(cp))	{		int			save_errno = errno;		CloseTransientFile(fd);		errno = save_errno;		ereport(elevel,				(errcode_for_file_access(),				 errmsg("could not write to file /"%s/": %m",						tmppath)));		return;	}	/* fsync the temporary file */	if (pg_fsync(fd) != 0)	{		int			save_errno = errno;		CloseTransientFile(fd);		errno = save_errno;		ereport(elevel,				(errcode_for_file_access(),				 errmsg("could not fsync file /"%s/": %m",						tmppath)));		return;	}	CloseTransientFile(fd);	/* rename to permanent file, fsync file and directory */	if (rename(tmppath, path) != 0)	{		ereport(elevel,				(errcode_for_file_access(),				 errmsg("could not rename file /"%s/" to /"%s/": %m",						tmppath, path)));		return;	}	/* Check CreateSlot() for the reasoning of using a crit. section. */	START_CRIT_SECTION();	fsync_fname(path, false);	fsync_fname((char *) dir, true);	fsync_fname("pg_replslot", true);	END_CRIT_SECTION();	/*	 * Successfully wrote, unset dirty bit, unless somebody dirtied again	 * already.	 */	{		volatile ReplicationSlot *vslot = slot;		SpinLockAcquire(&vslot->mutex);		if (!vslot->just_dirtied)			vslot->dirty = false;		SpinLockRelease(&vslot->mutex);	}	LWLockRelease(slot->io_in_progress_lock);}
开发者ID:EccentricLoggers,项目名称:peloton,代码行数:101,


示例3: SlruPhysicalWritePage

/* * Physical write of a page from a buffer slot * * On failure, we cannot just ereport(ERROR) since caller has put state in * shared memory that must be undone.  So, we return FALSE and save enough * info in static variables to let SlruReportIOError make the report. * * For now, assume it's not worth keeping a file pointer open across * independent read/write operations.  We do batch operations during * SimpleLruFlush, though. * * fdata is NULL for a standalone write, pointer to open-file info during * SimpleLruFlush. */static boolSlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno, SlruFlush fdata){	SlruShared	shared = ctl->shared;	int			segno = pageno / SLRU_PAGES_PER_SEGMENT;	int			rpageno = pageno % SLRU_PAGES_PER_SEGMENT;	int			offset = rpageno * BLCKSZ;	char		path[MAXPGPATH];	int			fd = -1;	/*	 * Honor the write-WAL-before-data rule, if appropriate, so that we do not	 * write out data before associated WAL records.  This is the same action	 * performed during FlushBuffer() in the main buffer manager.	 */	if (shared->group_lsn != NULL)	{		/*		 * We must determine the largest async-commit LSN for the page. This		 * is a bit tedious, but since this entire function is a slow path		 * anyway, it seems better to do this here than to maintain a per-page		 * LSN variable (which'd need an extra comparison in the		 * transaction-commit path).		 */		XLogRecPtr	max_lsn;		int			lsnindex,					lsnoff;		lsnindex = slotno * shared->lsn_groups_per_page;		max_lsn = shared->group_lsn[lsnindex++];		for (lsnoff = 1; lsnoff < shared->lsn_groups_per_page; lsnoff++)		{			XLogRecPtr	this_lsn = shared->group_lsn[lsnindex++];			if (max_lsn < this_lsn)				max_lsn = this_lsn;		}		if (!XLogRecPtrIsInvalid(max_lsn))		{			/*			 * As noted above, elog(ERROR) is not acceptable here, so if			 * XLogFlush were to fail, we must PANIC.  This isn't much of a			 * restriction because XLogFlush is just about all critical			 * section anyway, but let's make sure.			 */			START_CRIT_SECTION();			XLogFlush(max_lsn);			END_CRIT_SECTION();		}	}	/*	 * During a Flush, we may already have the desired file open.	 */	if (fdata)	{		int			i;		for (i = 0; i < fdata->num_files; i++)		{			if (fdata->segno[i] == segno)			{				fd = fdata->fd[i];				break;			}		}	}	if (fd < 0)	{		/*		 * If the file doesn't already exist, we should create it.  It is		 * possible for this to need to happen when writing a page that's not		 * first in its segment; we assume the OS can cope with that. (Note:		 * it might seem that it'd be okay to create files only when		 * SimpleLruZeroPage is called for the first page of a segment.		 * However, if after a crash and restart the REDO logic elects to		 * replay the log from a checkpoint before the latest one, then it's		 * possible that we will get commands to set transaction status of		 * transactions that have already been truncated from the commit log.		 * Easiest way to deal with that is to accept references to		 * nonexistent files here and in SlruPhysicalReadPage.)		 *		 * Note: it is possible for more than one backend to be executing this		 * code simultaneously for different pages of the same file. Hence,//.........这里部分代码省略.........
开发者ID:5A68656E67,项目名称:postgres,代码行数:101,


示例4: writeListPage

/* * Build a pending-list page from the given array of tuples, and write it out. * * Returns amount of free space left on the page. */static int32writeListPage(Relation index, Buffer buffer,			  IndexTuple *tuples, int32 ntuples, BlockNumber rightlink){	Page		page = BufferGetPage(buffer);	int32		i,				freesize,				size = 0;	OffsetNumber l,				off;	char	   *workspace;	char	   *ptr;	/* workspace could be a local array; we use palloc for alignment */	workspace = palloc(BLCKSZ);	START_CRIT_SECTION();	GinInitBuffer(buffer, GIN_LIST);	off = FirstOffsetNumber;	ptr = workspace;	for (i = 0; i < ntuples; i++)	{		int			this_size = IndexTupleSize(tuples[i]);		memcpy(ptr, tuples[i], this_size);		ptr += this_size;		size += this_size;		l = PageAddItem(page, (Item) tuples[i], this_size, off, false, false);		if (l == InvalidOffsetNumber)			elog(ERROR, "failed to add item to index page in /"%s/"",				 RelationGetRelationName(index));		off++;	}	Assert(size <= BLCKSZ);		/* else we overran workspace */	GinPageGetOpaque(page)->rightlink = rightlink;	/*	 * tail page may contain only whole row(s) or final part of row placed on	 * previous pages (a "row" here meaning all the index tuples generated for	 * one heap tuple)	 */	if (rightlink == InvalidBlockNumber)	{		GinPageSetFullRow(page);		GinPageGetOpaque(page)->maxoff = 1;	}	else	{		GinPageGetOpaque(page)->maxoff = 0;	}	MarkBufferDirty(buffer);	if (RelationNeedsWAL(index))	{		ginxlogInsertListPage data;		XLogRecPtr	recptr;		data.rightlink = rightlink;		data.ntuples = ntuples;		XLogBeginInsert();		XLogRegisterData((char *) &data, sizeof(ginxlogInsertListPage));		XLogRegisterBuffer(0, buffer, REGBUF_WILL_INIT);		XLogRegisterBufData(0, workspace, size);		recptr = XLogInsert(RM_GIN_ID, XLOG_GIN_INSERT_LISTPAGE);		PageSetLSN(page, recptr);	}	/* get free space before releasing buffer */	freesize = PageGetExactFreeSpace(page);	UnlockReleaseBuffer(buffer);	END_CRIT_SECTION();	pfree(workspace);	return freesize;}
开发者ID:dividedmind,项目名称:postgres,代码行数:95,


示例5: RestoreSlotFromDisk

/* * Load a single slot from disk into memory. */static voidRestoreSlotFromDisk(const char *name){	ReplicationSlotOnDisk cp;	int			i;	char		path[MAXPGPATH];	int			fd;	bool		restored = false;	int			readBytes;	pg_crc32c	checksum;	/* no need to lock here, no concurrent access allowed yet */	/* delete temp file if it exists */	sprintf(path, "pg_replslot/%s/state.tmp", name);	if (unlink(path) < 0 && errno != ENOENT)		ereport(PANIC,				(errcode_for_file_access(),				 errmsg("could not remove file /"%s/": %m", path)));	sprintf(path, "pg_replslot/%s/state", name);	elog(DEBUG1, "restoring replication slot from /"%s/"", path);	fd = OpenTransientFile(path, O_RDWR | PG_BINARY, 0);	/*	 * We do not need to handle this as we are rename()ing the directory into	 * place only after we fsync()ed the state file.	 */	if (fd < 0)		ereport(PANIC,				(errcode_for_file_access(),				 errmsg("could not open file /"%s/": %m", path)));	/*	 * Sync state file before we're reading from it. We might have crashed	 * while it wasn't synced yet and we shouldn't continue on that basis.	 */	if (pg_fsync(fd) != 0)	{		CloseTransientFile(fd);		ereport(PANIC,				(errcode_for_file_access(),				 errmsg("could not fsync file /"%s/": %m",						path)));	}	/* Also sync the parent directory */	START_CRIT_SECTION();	fsync_fname(path, true);	END_CRIT_SECTION();	/* read part of statefile that's guaranteed to be version independent */	readBytes = read(fd, &cp, ReplicationSlotOnDiskConstantSize);	if (readBytes != ReplicationSlotOnDiskConstantSize)	{		int			saved_errno = errno;		CloseTransientFile(fd);		errno = saved_errno;		ereport(PANIC,				(errcode_for_file_access(),				 errmsg("could not read file /"%s/", read %d of %u: %m",						path, readBytes,						(uint32) ReplicationSlotOnDiskConstantSize)));	}	/* verify magic */	if (cp.magic != SLOT_MAGIC)		ereport(PANIC,				(errcode_for_file_access(),				 errmsg("replication slot file /"%s/" has wrong magic %u instead of %u",						path, cp.magic, SLOT_MAGIC)));	/* verify version */	if (cp.version != SLOT_VERSION)		ereport(PANIC,				(errcode_for_file_access(),			errmsg("replication slot file /"%s/" has unsupported version %u",				   path, cp.version)));	/* boundary check on length */	if (cp.length != ReplicationSlotOnDiskV2Size)		ereport(PANIC,				(errcode_for_file_access(),			   errmsg("replication slot file /"%s/" has corrupted length %u",					  path, cp.length)));	/* Now that we know the size, read the entire file */	readBytes = read(fd,					 (char *) &cp + ReplicationSlotOnDiskConstantSize,					 cp.length);	if (readBytes != cp.length)	{		int			saved_errno = errno;//.........这里部分代码省略.........
开发者ID:EccentricLoggers,项目名称:peloton,代码行数:101,


示例6: ginbuild

IndexBuildResult *ginbuild(Relation heap, Relation index, IndexInfo *indexInfo){	IndexBuildResult *result;	double		reltuples;	GinBuildState buildstate;	Buffer		RootBuffer,				MetaBuffer;	ItemPointerData *list;	Datum		key;	GinNullCategory category;	uint32		nlist;	MemoryContext oldCtx;	OffsetNumber attnum;	if (RelationGetNumberOfBlocks(index) != 0)		elog(ERROR, "index /"%s/" already contains data",			 RelationGetRelationName(index));	initGinState(&buildstate.ginstate, index);	buildstate.indtuples = 0;	memset(&buildstate.buildStats, 0, sizeof(GinStatsData));	/* initialize the meta page */	MetaBuffer = GinNewBuffer(index);	/* initialize the root page */	RootBuffer = GinNewBuffer(index);	START_CRIT_SECTION();	GinInitMetabuffer(MetaBuffer);	MarkBufferDirty(MetaBuffer);	GinInitBuffer(RootBuffer, GIN_LEAF);	MarkBufferDirty(RootBuffer);	if (RelationNeedsWAL(index))	{		XLogRecPtr	recptr;		Page		page;		XLogBeginInsert();		XLogRegisterBuffer(0, MetaBuffer, REGBUF_WILL_INIT | REGBUF_STANDARD);		XLogRegisterBuffer(1, RootBuffer, REGBUF_WILL_INIT);		recptr = XLogInsert(RM_GIN_ID, XLOG_GIN_CREATE_INDEX);		page = BufferGetPage(RootBuffer);		PageSetLSN(page, recptr);		page = BufferGetPage(MetaBuffer);		PageSetLSN(page, recptr);	}	UnlockReleaseBuffer(MetaBuffer);	UnlockReleaseBuffer(RootBuffer);	END_CRIT_SECTION();	/* count the root as first entry page */	buildstate.buildStats.nEntryPages++;	/*	 * create a temporary memory context that is used to hold data not yet	 * dumped out to the index	 */	buildstate.tmpCtx = AllocSetContextCreate(CurrentMemoryContext,											  "Gin build temporary context",											  ALLOCSET_DEFAULT_SIZES);	/*	 * create a temporary memory context that is used for calling	 * ginExtractEntries(), and can be reset after each tuple	 */	buildstate.funcCtx = AllocSetContextCreate(CurrentMemoryContext,											   "Gin build temporary context for user-defined function",											   ALLOCSET_DEFAULT_SIZES);	buildstate.accum.ginstate = &buildstate.ginstate;	ginInitBA(&buildstate.accum);	/*	 * Do the heap scan.  We disallow sync scan here because dataPlaceToPage	 * prefers to receive tuples in TID order.	 */	reltuples = IndexBuildHeapScan(heap, index, indexInfo, false,								   ginBuildCallback, (void *) &buildstate);	/* dump remaining entries to the index */	oldCtx = MemoryContextSwitchTo(buildstate.tmpCtx);	ginBeginBAScan(&buildstate.accum);	while ((list = ginGetBAEntry(&buildstate.accum,								 &attnum, &key, &category, &nlist)) != NULL)	{		/* there could be many entries, so be willing to abort here */		CHECK_FOR_INTERRUPTS();		ginEntryInsert(&buildstate.ginstate, attnum, key, category,					   list, nlist, &buildstate.buildStats);	}	MemoryContextSwitchTo(oldCtx);	MemoryContextDelete(buildstate.funcCtx);//.........这里部分代码省略.........
开发者ID:paullmc,项目名称:postgres,代码行数:101,


示例7: ginHeapTupleFastInsert

//.........这里部分代码省略.........		char	   *ptr;		char	   *collectordata;		buffer = ReadBuffer(index, metadata->tail);		LockBuffer(buffer, GIN_EXCLUSIVE);		page = BufferGetPage(buffer);		off = (PageIsEmpty(page)) ? FirstOffsetNumber :			OffsetNumberNext(PageGetMaxOffsetNumber(page));		collectordata = ptr = (char *) palloc(collector->sumsize);		data.ntuples = collector->ntuples;		if (needWal)			XLogBeginInsert();		START_CRIT_SECTION();		/*		 * Increase counter of heap tuples		 */		Assert(GinPageGetOpaque(page)->maxoff <= metadata->nPendingHeapTuples);		GinPageGetOpaque(page)->maxoff++;		metadata->nPendingHeapTuples++;		for (i = 0; i < collector->ntuples; i++)		{			tupsize = IndexTupleSize(collector->tuples[i]);			l = PageAddItem(page, (Item) collector->tuples[i], tupsize, off, false, false);			if (l == InvalidOffsetNumber)				elog(ERROR, "failed to add item to index page in /"%s/"",					 RelationGetRelationName(index));			memcpy(ptr, collector->tuples[i], tupsize);			ptr += tupsize;			off++;		}		Assert((ptr - collectordata) <= collector->sumsize);		if (needWal)		{			XLogRegisterBuffer(1, buffer, REGBUF_STANDARD);			XLogRegisterBufData(1, collectordata, collector->sumsize);		}		metadata->tailFreeSize = PageGetExactFreeSpace(page);		MarkBufferDirty(buffer);	}	/*	 * Write metabuffer, make xlog entry	 */	MarkBufferDirty(metabuffer);	if (needWal)	{		XLogRecPtr	recptr;		memcpy(&data.metadata, metadata, sizeof(GinMetaPageData));		XLogRegisterBuffer(0, metabuffer, REGBUF_WILL_INIT);		XLogRegisterData((char *) &data, sizeof(ginxlogUpdateMeta));		recptr = XLogInsert(RM_GIN_ID, XLOG_GIN_UPDATE_META_PAGE);		PageSetLSN(metapage, recptr);		if (buffer != InvalidBuffer)		{			PageSetLSN(page, recptr);		}	}	if (buffer != InvalidBuffer)		UnlockReleaseBuffer(buffer);	/*	 * Force pending list cleanup when it becomes too long. And,	 * ginInsertCleanup could take significant amount of time, so we prefer to	 * call it when it can do all the work in a single collection cycle. In	 * non-vacuum mode, it shouldn't require maintenance_work_mem, so fire it	 * while pending list is still small enough to fit into	 * gin_pending_list_limit.	 *	 * ginInsertCleanup() should not be called inside our CRIT_SECTION.	 */	cleanupSize = GinGetPendingListCleanupSize(index);	if (metadata->nPendingPages * GIN_PAGE_FREESIZE > cleanupSize * 1024L)		needCleanup = true;	UnlockReleaseBuffer(metabuffer);	END_CRIT_SECTION();	if (needCleanup)		ginInsertCleanup(ginstate, true, NULL);}
开发者ID:dividedmind,项目名称:postgres,代码行数:101,


示例8: _bt_pagedel

//.........这里部分代码省略.........		nextrdata->data = NULL;		nextrdata->len = 0;		nextrdata->next = nextrdata + 1;		nextrdata->buffer = pbuf;		nextrdata->buffer_std = true;		nextrdata++;		nextrdata->data = NULL;		nextrdata->len = 0;		nextrdata->buffer = rbuf;		nextrdata->buffer_std = true;		nextrdata->next = NULL;		if (BufferIsValid(lbuf))		{			nextrdata->next = nextrdata + 1;			nextrdata++;			nextrdata->data = NULL;			nextrdata->len = 0;			nextrdata->buffer = lbuf;			nextrdata->buffer_std = true;			nextrdata->next = NULL;		}		recptr = XLogInsert(RM_BTREE_ID, xlinfo, rdata);		if (BufferIsValid(metabuf))		{			PageSetLSN(metapg, recptr);			PageSetTLI(metapg, ThisTimeLineID);		}		page = BufferGetPage(pbuf);		PageSetLSN(page, recptr);		PageSetTLI(page, ThisTimeLineID);		page = BufferGetPage(rbuf);		PageSetLSN(page, recptr);		PageSetTLI(page, ThisTimeLineID);		page = BufferGetPage(buf);		PageSetLSN(page, recptr);		PageSetTLI(page, ThisTimeLineID);		if (BufferIsValid(lbuf))		{			page = BufferGetPage(lbuf);			PageSetLSN(page, recptr);			PageSetTLI(page, ThisTimeLineID);		}	}	END_CRIT_SECTION();	/* release metapage; send out relcache inval if metapage changed */	if (BufferIsValid(metabuf))	{		CacheInvalidateRelcache(rel);		_bt_relbuf(rel, metabuf);	}	/* can always release leftsib immediately */	if (BufferIsValid(lbuf))		_bt_relbuf(rel, lbuf);	/*	 * If parent became half dead, recurse to delete it. Otherwise, if right	 * sibling is empty and is now the last child of the parent, recurse to	 * try to delete it.  (These cases cannot apply at the same time, though	 * the second case might itself recurse to the first.)	 *	 * When recursing to parent, we hold the lock on the target page until	 * done.  This delays any insertions into the keyspace that was just	 * effectively reassigned to the parent's right sibling.  If we allowed	 * that, and there were enough such insertions before we finish deleting	 * the parent, page splits within that keyspace could lead to inserting	 * out-of-order keys into the grandparent level.  It is thought that that	 * wouldn't have any serious consequences, but it still seems like a	 * pretty bad idea.	 */	if (parent_half_dead)	{		/* recursive call will release pbuf */		_bt_relbuf(rel, rbuf);		result = _bt_pagedel(rel, pbuf, stack->bts_parent) + 1;		_bt_relbuf(rel, buf);	}	else if (parent_one_child && rightsib_empty)	{		_bt_relbuf(rel, pbuf);		_bt_relbuf(rel, buf);		/* recursive call will release rbuf */		result = _bt_pagedel(rel, rbuf, stack) + 1;	}	else	{		_bt_relbuf(rel, pbuf);		_bt_relbuf(rel, buf);		_bt_relbuf(rel, rbuf);		result = 1;	}	return result;}
开发者ID:Joe-xXx,项目名称:postgres-old-soon-decommissioned,代码行数:101,


示例9: createPostingTree

/* * Creates new posting tree containing the given TIDs. Returns the page * number of the root of the new posting tree. * * items[] must be in sorted order with no duplicates. */BlockNumbercreatePostingTree(Relation index, ItemPointerData *items, uint32 nitems,				  GinStatsData *buildStats){	BlockNumber blkno;	Buffer		buffer;	Page		page;	int			nrootitems;	/* Calculate how many TIDs will fit on first page. */	nrootitems = Min(nitems, GinMaxLeafDataItems);	/*	 * Create the root page.	 */	buffer = GinNewBuffer(index);	page = BufferGetPage(buffer);	blkno = BufferGetBlockNumber(buffer);	START_CRIT_SECTION();	GinInitBuffer(buffer, GIN_DATA | GIN_LEAF);	memcpy(GinDataPageGetData(page), items, sizeof(ItemPointerData) * nrootitems);	GinPageGetOpaque(page)->maxoff = nrootitems;	MarkBufferDirty(buffer);	if (RelationNeedsWAL(index))	{		XLogRecPtr	recptr;		XLogRecData rdata[2];		ginxlogCreatePostingTree data;		data.node = index->rd_node;		data.blkno = blkno;		data.nitem = nrootitems;		rdata[0].buffer = InvalidBuffer;		rdata[0].data = (char *) &data;		rdata[0].len = sizeof(ginxlogCreatePostingTree);		rdata[0].next = &rdata[1];		rdata[1].buffer = InvalidBuffer;		rdata[1].data = (char *) items;		rdata[1].len = sizeof(ItemPointerData) * nrootitems;		rdata[1].next = NULL;		recptr = XLogInsert(RM_GIN_ID, XLOG_GIN_CREATE_PTREE, rdata);		PageSetLSN(page, recptr);	}	UnlockReleaseBuffer(buffer);	END_CRIT_SECTION();	/* During index build, count the newly-added data page */	if (buildStats)		buildStats->nDataPages++;	/*	 * Add any remaining TIDs to the newly-created posting tree.	 */	if (nitems > nrootitems)	{		ginInsertItemPointers(index, blkno,							  items + nrootitems,							  nitems - nrootitems,							  buildStats);	}	return blkno;}
开发者ID:42penguins,项目名称:postgres,代码行数:78,


示例10: _bt_getroot

//.........这里部分代码省略.........		metad->btm_root = rootblkno;		metad->btm_level = 0;		metad->btm_fastroot = rootblkno;		metad->btm_fastlevel = 0;		MarkBufferDirty(rootbuf);		MarkBufferDirty(metabuf);		/* XLOG stuff */		if (!rel->rd_istemp)		{			xl_btree_newroot xlrec;			XLogRecPtr	recptr;			XLogRecData rdata;			xlrec.node = rel->rd_node;			xlrec.rootblk = rootblkno;			xlrec.level = 0;			rdata.data = (char *) &xlrec;			rdata.len = SizeOfBtreeNewroot;			rdata.buffer = InvalidBuffer;			rdata.next = NULL;			recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_NEWROOT, &rdata);			PageSetLSN(rootpage, recptr);			PageSetTLI(rootpage, ThisTimeLineID);			PageSetLSN(metapg, recptr);			PageSetTLI(metapg, ThisTimeLineID);		}		END_CRIT_SECTION();		/*		 * Send out relcache inval for metapage change (probably unnecessary		 * here, but let's be safe).		 */		CacheInvalidateRelcache(rel);		/*		 * swap root write lock for read lock.	There is no danger of anyone		 * else accessing the new root page while it's unlocked, since no one		 * else knows where it is yet.		 */		LockBuffer(rootbuf, BUFFER_LOCK_UNLOCK);		LockBuffer(rootbuf, BT_READ);		/* okay, metadata is correct, release lock on it */		_bt_relbuf(rel, metabuf);	}	else	{		rootblkno = metad->btm_fastroot;		Assert(rootblkno != P_NONE);		rootlevel = metad->btm_fastlevel;		/*		 * Cache the metapage data for next time		 */		rel->rd_amcache = MemoryContextAlloc(rel->rd_indexcxt,											 sizeof(BTMetaPageData));		memcpy(rel->rd_amcache, metad, sizeof(BTMetaPageData));		/*
开发者ID:Joe-xXx,项目名称:postgres-old-soon-decommissioned,代码行数:67,


示例11: _bt_delitems_delete

void_bt_delitems_delete(Relation rel, Buffer buf,					OffsetNumber *itemnos, int nitems, Relation heapRel){	Page		page = BufferGetPage(buf);	BTPageOpaque opaque;	Assert(nitems > 0);	/* No ereport(ERROR) until changes are logged */	START_CRIT_SECTION();	/* Fix the page */	PageIndexMultiDelete(page, itemnos, nitems);	/*	 * We can clear the vacuum cycle ID since this page has certainly been	 * processed by the current vacuum scan.	 */	opaque = (BTPageOpaque) PageGetSpecialPointer(page);	opaque->btpo_cycleid = 0;	/*	 * Mark the page as not containing any LP_DEAD items.  This is not	 * certainly true (there might be some that have recently been marked, but	 * weren't included in our target-item list), but it will almost always be	 * true and it doesn't seem worth an additional page scan to check it.	 * Remember that BTP_HAS_GARBAGE is only a hint anyway.	 */	opaque->btpo_flags &= ~BTP_HAS_GARBAGE;	MarkBufferDirty(buf);	/* XLOG stuff */	if (!rel->rd_istemp)	{		XLogRecPtr	recptr;		XLogRecData rdata[3];		xl_btree_delete xlrec_delete;		xlrec_delete.node = rel->rd_node;		xlrec_delete.hnode = heapRel->rd_node;		xlrec_delete.block = BufferGetBlockNumber(buf);		xlrec_delete.nitems = nitems;		rdata[0].data = (char *) &xlrec_delete;		rdata[0].len = SizeOfBtreeDelete;		rdata[0].buffer = InvalidBuffer;		rdata[0].next = &(rdata[1]);		/*		 * We need the target-offsets array whether or not we store the to		 * allow us to find the latestRemovedXid on a standby server.		 */		rdata[1].data = (char *) itemnos;		rdata[1].len = nitems * sizeof(OffsetNumber);		rdata[1].buffer = InvalidBuffer;		rdata[1].next = &(rdata[2]);		rdata[2].data = NULL;		rdata[2].len = 0;		rdata[2].buffer = buf;		rdata[2].buffer_std = true;		rdata[2].next = NULL;		recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_DELETE, rdata);		PageSetLSN(page, recptr);		PageSetTLI(page, ThisTimeLineID);	}	END_CRIT_SECTION();}
开发者ID:Joe-xXx,项目名称:postgres-old-soon-decommissioned,代码行数:74,


示例12: _bt_delitems_vacuum

/* * Delete item(s) from a btree page. * * This must only be used for deleting leaf items.	Deleting an item on a * non-leaf page has to be done as part of an atomic action that includes * deleting the page it points to. * * This routine assumes that the caller has pinned and locked the buffer. * Also, the given itemnos *must* appear in increasing order in the array. * * We record VACUUMs and b-tree deletes differently in WAL. InHotStandby * we need to be able to pin all of the blocks in the btree in physical * order when replaying the effects of a VACUUM, just as we do for the * original VACUUM itself. lastBlockVacuumed allows us to tell whether an * intermediate range of blocks has had no changes at all by VACUUM, * and so must be scanned anyway during replay. We always write a WAL record * for the last block in the index, whether or not it contained any items * to be removed. This allows us to scan right up to end of index to * ensure correct locking. */void_bt_delitems_vacuum(Relation rel, Buffer buf,			OffsetNumber *itemnos, int nitems, BlockNumber lastBlockVacuumed){	Page		page = BufferGetPage(buf);	BTPageOpaque opaque;	/* No ereport(ERROR) until changes are logged */	START_CRIT_SECTION();	/* Fix the page */	if (nitems > 0)		PageIndexMultiDelete(page, itemnos, nitems);	/*	 * We can clear the vacuum cycle ID since this page has certainly been	 * processed by the current vacuum scan.	 */	opaque = (BTPageOpaque) PageGetSpecialPointer(page);	opaque->btpo_cycleid = 0;	/*	 * Mark the page as not containing any LP_DEAD items.  This is not	 * certainly true (there might be some that have recently been marked, but	 * weren't included in our target-item list), but it will almost always be	 * true and it doesn't seem worth an additional page scan to check it.	 * Remember that BTP_HAS_GARBAGE is only a hint anyway.	 */	opaque->btpo_flags &= ~BTP_HAS_GARBAGE;	MarkBufferDirty(buf);	/* XLOG stuff */	if (!rel->rd_istemp)	{		XLogRecPtr	recptr;		XLogRecData rdata[2];		xl_btree_vacuum xlrec_vacuum;		xlrec_vacuum.node = rel->rd_node;		xlrec_vacuum.block = BufferGetBlockNumber(buf);		xlrec_vacuum.lastBlockVacuumed = lastBlockVacuumed;		rdata[0].data = (char *) &xlrec_vacuum;		rdata[0].len = SizeOfBtreeVacuum;		rdata[0].buffer = InvalidBuffer;		rdata[0].next = &(rdata[1]);		/*		 * The target-offsets array is not in the buffer, but pretend that it		 * is.	When XLogInsert stores the whole buffer, the offsets array		 * need not be stored too.		 */		if (nitems > 0)		{			rdata[1].data = (char *) itemnos;			rdata[1].len = nitems * sizeof(OffsetNumber);		}		else		{			rdata[1].data = NULL;			rdata[1].len = 0;		}		rdata[1].buffer = buf;		rdata[1].buffer_std = true;		rdata[1].next = NULL;		recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_VACUUM, rdata);		PageSetLSN(page, recptr);		PageSetTLI(page, ThisTimeLineID);	}	END_CRIT_SECTION();}
开发者ID:Joe-xXx,项目名称:postgres-old-soon-decommissioned,代码行数:96,


示例13: ginDeletePage

/* * Delete a posting tree page. */static voidginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkno,			  BlockNumber parentBlkno, OffsetNumber myoff, bool isParentRoot){	Buffer		dBuffer;	Buffer		lBuffer;	Buffer		pBuffer;	Page		page,				parentPage;	BlockNumber rightlink;	/*	 * Lock the pages in the same order as an insertion would, to avoid	 * deadlocks: left, then right, then parent.	 */	lBuffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, leftBlkno,								 RBM_NORMAL, gvs->strategy);	dBuffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, deleteBlkno,								 RBM_NORMAL, gvs->strategy);	pBuffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, parentBlkno,								 RBM_NORMAL, gvs->strategy);	LockBuffer(lBuffer, GIN_EXCLUSIVE);	LockBuffer(dBuffer, GIN_EXCLUSIVE);	if (!isParentRoot)			/* parent is already locked by								 * LockBufferForCleanup() */		LockBuffer(pBuffer, GIN_EXCLUSIVE);	START_CRIT_SECTION();	/* Unlink the page by changing left sibling's rightlink */	page = BufferGetPage(dBuffer);	rightlink = GinPageGetOpaque(page)->rightlink;	page = BufferGetPage(lBuffer);	GinPageGetOpaque(page)->rightlink = rightlink;	/* Delete downlink from parent */	parentPage = BufferGetPage(pBuffer);#ifdef USE_ASSERT_CHECKING	do	{		PostingItem *tod = GinDataPageGetPostingItem(parentPage, myoff);		Assert(PostingItemGetBlockNumber(tod) == deleteBlkno);	} while (0);#endif	GinPageDeletePostingItem(parentPage, myoff);	page = BufferGetPage(dBuffer);	/*	 * we shouldn't change rightlink field to save workability of running	 * search scan	 */	GinPageGetOpaque(page)->flags = GIN_DELETED;	MarkBufferDirty(pBuffer);	MarkBufferDirty(lBuffer);	MarkBufferDirty(dBuffer);	if (RelationNeedsWAL(gvs->index))	{		XLogRecPtr	recptr;		ginxlogDeletePage data;		/*		 * We can't pass REGBUF_STANDARD for the deleted page, because we		 * didn't set pd_lower on pre-9.4 versions. The page might've been		 * binary-upgraded from an older version, and hence not have pd_lower		 * set correctly. Ditto for the left page, but removing the item from		 * the parent updated its pd_lower, so we know that's OK at this		 * point.		 */		XLogBeginInsert();		XLogRegisterBuffer(0, dBuffer, 0);		XLogRegisterBuffer(1, pBuffer, REGBUF_STANDARD);		XLogRegisterBuffer(2, lBuffer, 0);		data.parentOffset = myoff;		data.rightLink = GinPageGetOpaque(page)->rightlink;		XLogRegisterData((char *) &data, sizeof(ginxlogDeletePage));		recptr = XLogInsert(RM_GIN_ID, XLOG_GIN_DELETE_PAGE);		PageSetLSN(page, recptr);		PageSetLSN(parentPage, recptr);		PageSetLSN(BufferGetPage(lBuffer), recptr);	}	if (!isParentRoot)		LockBuffer(pBuffer, GIN_UNLOCK);	ReleaseBuffer(pBuffer);	UnlockReleaseBuffer(lBuffer);	UnlockReleaseBuffer(dBuffer);	END_CRIT_SECTION();//.........这里部分代码省略.........
开发者ID:Gordiychuk,项目名称:postgres,代码行数:101,


示例14: ginbulkdelete

IndexBulkDeleteResult *ginbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,			  IndexBulkDeleteCallback callback, void *callback_state){	Relation	index = info->index;	BlockNumber blkno = GIN_ROOT_BLKNO;	GinVacuumState gvs;	Buffer		buffer;	BlockNumber rootOfPostingTree[BLCKSZ / (sizeof(IndexTupleData) + sizeof(ItemId))];	uint32		nRoot;	gvs.tmpCxt = AllocSetContextCreate(CurrentMemoryContext,									   "Gin vacuum temporary context",									   ALLOCSET_DEFAULT_MINSIZE,									   ALLOCSET_DEFAULT_INITSIZE,									   ALLOCSET_DEFAULT_MAXSIZE);	gvs.index = index;	gvs.callback = callback;	gvs.callback_state = callback_state;	gvs.strategy = info->strategy;	initGinState(&gvs.ginstate, index);	/* first time through? */	if (stats == NULL)	{		/* Yes, so initialize stats to zeroes */		stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));		/*		 * and cleanup any pending inserts  */		ginInsertCleanup(&gvs.ginstate, !IsAutoVacuumWorkerProcess(),						 false, stats);	}	/* we'll re-count the tuples each time */	stats->num_index_tuples = 0;	gvs.result = stats;	buffer = ReadBufferExtended(index, MAIN_FORKNUM, blkno,								RBM_NORMAL, info->strategy);	/* find leaf page */	for (;;)	{		Page		page = BufferGetPage(buffer);		IndexTuple	itup;		LockBuffer(buffer, GIN_SHARE);		Assert(!GinPageIsData(page));		if (GinPageIsLeaf(page))		{			LockBuffer(buffer, GIN_UNLOCK);			LockBuffer(buffer, GIN_EXCLUSIVE);			if (blkno == GIN_ROOT_BLKNO && !GinPageIsLeaf(page))			{				LockBuffer(buffer, GIN_UNLOCK);				continue;		/* check it one more */			}			break;		}		Assert(PageGetMaxOffsetNumber(page) >= FirstOffsetNumber);		itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, FirstOffsetNumber));		blkno = GinGetDownlink(itup);		Assert(blkno != InvalidBlockNumber);		UnlockReleaseBuffer(buffer);		buffer = ReadBufferExtended(index, MAIN_FORKNUM, blkno,									RBM_NORMAL, info->strategy);	}	/* right now we found leftmost page in entry's BTree */	for (;;)	{		Page		page = BufferGetPage(buffer);		Page		resPage;		uint32		i;		Assert(!GinPageIsData(page));		resPage = ginVacuumEntryPage(&gvs, buffer, rootOfPostingTree, &nRoot);		blkno = GinPageGetOpaque(page)->rightlink;		if (resPage)		{			START_CRIT_SECTION();			PageRestoreTempPage(resPage, page);			MarkBufferDirty(buffer);			xlogVacuumPage(gvs.index, buffer);			UnlockReleaseBuffer(buffer);			END_CRIT_SECTION();		}		else		{			UnlockReleaseBuffer(buffer);//.........这里部分代码省略.........
开发者ID:Gordiychuk,项目名称:postgres,代码行数:101,


示例15: shiftList

//.........这里部分代码省略.........	Page		metapage;	GinMetaPageData *metadata;	BlockNumber blknoToDelete;	metapage = BufferGetPage(metabuffer);	metadata = GinPageGetMeta(metapage);	blknoToDelete = metadata->head;	do	{		Page		page;		int			i;		int64		nDeletedHeapTuples = 0;		ginxlogDeleteListPages data;		XLogRecData rdata[1];		Buffer		buffers[GIN_NDELETE_AT_ONCE];		data.node = index->rd_node;		rdata[0].buffer = InvalidBuffer;		rdata[0].data = (char *) &data;		rdata[0].len = sizeof(ginxlogDeleteListPages);		rdata[0].next = NULL;		data.ndeleted = 0;		while (data.ndeleted < GIN_NDELETE_AT_ONCE && blknoToDelete != newHead)		{			data.toDelete[data.ndeleted] = blknoToDelete;			buffers[data.ndeleted] = ReadBuffer(index, blknoToDelete);			LockBuffer(buffers[data.ndeleted], GIN_EXCLUSIVE);			page = BufferGetPage(buffers[data.ndeleted]);			data.ndeleted++;			if (GinPageIsDeleted(page))			{				/* concurrent cleanup process is detected */				for (i = 0; i < data.ndeleted; i++)					UnlockReleaseBuffer(buffers[i]);				return true;			}			nDeletedHeapTuples += GinPageGetOpaque(page)->maxoff;			blknoToDelete = GinPageGetOpaque(page)->rightlink;		}		if (stats)			stats->pages_deleted += data.ndeleted;		START_CRIT_SECTION();		metadata->head = blknoToDelete;		Assert(metadata->nPendingPages >= data.ndeleted);		metadata->nPendingPages -= data.ndeleted;		Assert(metadata->nPendingHeapTuples >= nDeletedHeapTuples);		metadata->nPendingHeapTuples -= nDeletedHeapTuples;		if (blknoToDelete == InvalidBlockNumber)		{			metadata->tail = InvalidBlockNumber;			metadata->tailFreeSize = 0;			metadata->nPendingPages = 0;			metadata->nPendingHeapTuples = 0;		}		MarkBufferDirty(metabuffer);		for (i = 0; i < data.ndeleted; i++)		{			page = BufferGetPage(buffers[i]);			GinPageGetOpaque(page)->flags = GIN_DELETED;			MarkBufferDirty(buffers[i]);		}		if (RelationNeedsWAL(index))		{			XLogRecPtr	recptr;			memcpy(&data.metadata, metadata, sizeof(GinMetaPageData));			recptr = XLogInsert(RM_GIN_ID, XLOG_GIN_DELETE_LISTPAGE, rdata);			PageSetLSN(metapage, recptr);			for (i = 0; i < data.ndeleted; i++)			{				page = BufferGetPage(buffers[i]);				PageSetLSN(page, recptr);			}		}		for (i = 0; i < data.ndeleted; i++)			UnlockReleaseBuffer(buffers[i]);		END_CRIT_SECTION();	} while (blknoToDelete != newHead);	return false;}
开发者ID:42penguins,项目名称:postgres,代码行数:101,


示例16: visibilitymap_set

/* *	visibilitymap_set - set a bit on a previously pinned page * * recptr is the LSN of the XLOG record we're replaying, if we're in recovery, * or InvalidXLogRecPtr in normal running.	The page LSN is advanced to the * one provided; in normal running, we generate a new XLOG record and set the * page LSN to that value.	cutoff_xid is the largest xmin on the page being * marked all-visible; it is needed for Hot Standby, and can be * InvalidTransactionId if the page contains no tuples. * * Caller is expected to set the heap page's PD_ALL_VISIBLE bit before calling * this function. Except in recovery, caller should also pass the heap * buffer. When checksums are enabled and we're not in recovery, we must add * the heap buffer to the WAL chain to protect it from being torn. * * You must pass a buffer containing the correct map page to this function. * Call visibilitymap_pin first to pin the right one. This function doesn't do * any I/O. */voidvisibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,				  XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid){	BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);	uint32		mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);	uint8		mapBit = HEAPBLK_TO_MAPBIT(heapBlk);	Page		page;	char	   *map;#ifdef TRACE_VISIBILITYMAP	elog(DEBUG1, "vm_set %s %d", RelationGetRelationName(rel), heapBlk);#endif	Assert(InRecovery || XLogRecPtrIsInvalid(recptr));	Assert(InRecovery || BufferIsValid(heapBuf));	/* Check that we have the right heap page pinned, if present */	if (BufferIsValid(heapBuf) && BufferGetBlockNumber(heapBuf) != heapBlk)		elog(ERROR, "wrong heap buffer passed to visibilitymap_set");	/* Check that we have the right VM page pinned */	if (!BufferIsValid(vmBuf) || BufferGetBlockNumber(vmBuf) != mapBlock)		elog(ERROR, "wrong VM buffer passed to visibilitymap_set");	page = BufferGetPage(vmBuf);	map = PageGetContents(page);	LockBuffer(vmBuf, BUFFER_LOCK_EXCLUSIVE);	if (!(map[mapByte] & (1 << mapBit)))	{		START_CRIT_SECTION();		map[mapByte] |= (1 << mapBit);		MarkBufferDirty(vmBuf);		if (RelationNeedsWAL(rel))		{			if (XLogRecPtrIsInvalid(recptr))			{				Assert(!InRecovery);				recptr = log_heap_visible(rel->rd_node, heapBuf, vmBuf,										  cutoff_xid);				/*				 * If data checksums are enabled, we need to protect the heap				 * page from being torn.				 */				if (DataChecksumsEnabled())				{					Page		heapPage = BufferGetPage(heapBuf);					/* caller is expected to set PD_ALL_VISIBLE first */					Assert(PageIsAllVisible(heapPage));					PageSetLSN(heapPage, recptr);				}			}			PageSetLSN(page, recptr);		}		END_CRIT_SECTION();	}	LockBuffer(vmBuf, BUFFER_LOCK_UNLOCK);}
开发者ID:EMARQUIS,项目名称:postgres,代码行数:84,


示例17: shiftList

//.........这里部分代码省略.........		ginxlogDeleteListPages data;		Buffer		buffers[GIN_NDELETE_AT_ONCE];		BlockNumber	freespace[GIN_NDELETE_AT_ONCE];		data.ndeleted = 0;		while (data.ndeleted < GIN_NDELETE_AT_ONCE && blknoToDelete != newHead)		{			freespace[data.ndeleted] = blknoToDelete;			buffers[data.ndeleted] = ReadBuffer(index, blknoToDelete);			LockBuffer(buffers[data.ndeleted], GIN_EXCLUSIVE);			page = BufferGetPage(buffers[data.ndeleted]);			data.ndeleted++;			if (GinPageIsDeleted(page))			{				/* concurrent cleanup process is detected */				for (i = 0; i < data.ndeleted; i++)					UnlockReleaseBuffer(buffers[i]);				return true;			}			nDeletedHeapTuples += GinPageGetOpaque(page)->maxoff;			blknoToDelete = GinPageGetOpaque(page)->rightlink;		}		if (stats)			stats->pages_deleted += data.ndeleted;		/*		 * This operation touches an unusually large number of pages, so		 * prepare the XLogInsert machinery for that before entering the		 * critical section.		 */		if (RelationNeedsWAL(index))			XLogEnsureRecordSpace(data.ndeleted, 0);		START_CRIT_SECTION();		metadata->head = blknoToDelete;		Assert(metadata->nPendingPages >= data.ndeleted);		metadata->nPendingPages -= data.ndeleted;		Assert(metadata->nPendingHeapTuples >= nDeletedHeapTuples);		metadata->nPendingHeapTuples -= nDeletedHeapTuples;		if (blknoToDelete == InvalidBlockNumber)		{			metadata->tail = InvalidBlockNumber;			metadata->tailFreeSize = 0;			metadata->nPendingPages = 0;			metadata->nPendingHeapTuples = 0;		}		MarkBufferDirty(metabuffer);		for (i = 0; i < data.ndeleted; i++)		{			page = BufferGetPage(buffers[i]);			GinPageGetOpaque(page)->flags = GIN_DELETED;			MarkBufferDirty(buffers[i]);		}		if (RelationNeedsWAL(index))		{			XLogRecPtr	recptr;			XLogBeginInsert();			XLogRegisterBuffer(0, metabuffer, REGBUF_WILL_INIT);			for (i = 0; i < data.ndeleted; i++)				XLogRegisterBuffer(i + 1, buffers[i], REGBUF_WILL_INIT);			memcpy(&data.metadata, metadata, sizeof(GinMetaPageData));			XLogRegisterData((char *) &data,							 sizeof(ginxlogDeleteListPages));			recptr = XLogInsert(RM_GIN_ID, XLOG_GIN_DELETE_LISTPAGE);			PageSetLSN(metapage, recptr);			for (i = 0; i < data.ndeleted; i++)			{				page = BufferGetPage(buffers[i]);				PageSetLSN(page, recptr);			}		}		for (i = 0; i < data.ndeleted; i++)			UnlockReleaseBuffer(buffers[i]);		END_CRIT_SECTION();		for (i = 0; fill_fsm && i < data.ndeleted; i++)			RecordFreeIndexPage(index, freespace[i]);	} while (blknoToDelete != newHead);	return false;}
开发者ID:dividedmind,项目名称:postgres,代码行数:101,


示例18: _hash_expandtable

//.........这里部分代码省略.........	 * where we are going to put a new splitpoint's worth of buckets.	 */	start_nblkno = BUCKET_TO_BLKNO(metap, new_bucket);	if (_hash_has_active_scan(rel, new_bucket))		elog(ERROR, "scan in progress on supposedly new bucket");	if (!_hash_try_getlock(rel, start_nblkno, HASH_EXCLUSIVE))		elog(ERROR, "could not get lock on supposedly new bucket");	/*	 * If the split point is increasing (hashm_maxbucket's log base 2	 * increases), we need to allocate a new batch of bucket pages.	 */	spare_ndx = _hash_log2(new_bucket + 1);	if (spare_ndx > metap->hashm_ovflpoint)	{		Assert(spare_ndx == metap->hashm_ovflpoint + 1);		/*		 * The number of buckets in the new splitpoint is equal to the total		 * number already in existence, i.e. new_bucket.  Currently this maps		 * one-to-one to blocks required, but someday we may need a more		 * complicated calculation here.		 */		if (!_hash_alloc_buckets(rel, start_nblkno, new_bucket))		{			/* can't split due to BlockNumber overflow */			_hash_droplock(rel, start_oblkno, HASH_EXCLUSIVE);			_hash_droplock(rel, start_nblkno, HASH_EXCLUSIVE);			goto fail;		}	}	/*	 * Okay to proceed with split.	Update the metapage bucket mapping info.	 *	 * Since we are scribbling on the metapage data right in the shared	 * buffer, any failure in this next little bit leaves us with a big	 * problem: the metapage is effectively corrupt but could get written back	 * to disk.  We don't really expect any failure, but just to be sure,	 * establish a critical section.	 */	START_CRIT_SECTION();	metap->hashm_maxbucket = new_bucket;	if (new_bucket > metap->hashm_highmask)	{		/* Starting a new doubling */		metap->hashm_lowmask = metap->hashm_highmask;		metap->hashm_highmask = new_bucket | metap->hashm_lowmask;	}	/*	 * If the split point is increasing (hashm_maxbucket's log base 2	 * increases), we need to adjust the hashm_spares[] array and	 * hashm_ovflpoint so that future overflow pages will be created beyond	 * this new batch of bucket pages.	 */	if (spare_ndx > metap->hashm_ovflpoint)	{		metap->hashm_spares[spare_ndx] = metap->hashm_spares[metap->hashm_ovflpoint];		metap->hashm_ovflpoint = spare_ndx;	}	/* Done mucking with metapage */	END_CRIT_SECTION();	/*	 * Copy bucket mapping info now; this saves re-accessing the meta page	 * inside _hash_splitbucket's inner loop.  Note that once we drop the	 * split lock, other splits could begin, so these values might be out of	 * date before _hash_splitbucket finishes.	That's okay, since all it	 * needs is to tell which of these two buckets to map hashkeys into.	 */	maxbucket = metap->hashm_maxbucket;	highmask = metap->hashm_highmask;	lowmask = metap->hashm_lowmask;	/* Write out the metapage and drop lock, but keep pin */	_hash_chgbufaccess(rel, metabuf, HASH_WRITE, HASH_NOLOCK);	/* Relocate records to the new bucket */	_hash_splitbucket(rel, metabuf, old_bucket, new_bucket,					  start_oblkno, start_nblkno,					  maxbucket, highmask, lowmask);	/* Release bucket locks, allowing others to access them */	_hash_droplock(rel, start_oblkno, HASH_EXCLUSIVE);	_hash_droplock(rel, start_nblkno, HASH_EXCLUSIVE);	return;	/* Here if decide not to split or fail to acquire old bucket lock */fail:	/* We didn't write the metapage, so just drop lock */	_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);}
开发者ID:adunstan,项目名称:postgresql-dev,代码行数:101,


示例19: gistbuild

/* * Main entry point to GiST index build. Initially calls insert over and over, * but switches to more efficient buffering build algorithm after a certain * number of tuples (unless buffering mode is disabled). */Datumgistbuild(PG_FUNCTION_ARGS){	Relation	heap = (Relation) PG_GETARG_POINTER(0);	Relation	index = (Relation) PG_GETARG_POINTER(1);	IndexInfo  *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);	IndexBuildResult *result;	double		reltuples;	GISTBuildState buildstate;	Buffer		buffer;	Page		page;	MemoryContext oldcxt = CurrentMemoryContext;	int			fillfactor;	buildstate.indexrel = index;	if (index->rd_options)	{		/* Get buffering mode from the options string */		GiSTOptions *options = (GiSTOptions *) index->rd_options;		char	   *bufferingMode = (char *) options + options->bufferingModeOffset;		if (strcmp(bufferingMode, "on") == 0)			buildstate.bufferingMode = GIST_BUFFERING_STATS;		else if (strcmp(bufferingMode, "off") == 0)			buildstate.bufferingMode = GIST_BUFFERING_DISABLED;		else			buildstate.bufferingMode = GIST_BUFFERING_AUTO;		fillfactor = options->fillfactor;	}	else	{		/*		 * By default, switch to buffering mode when the index grows too large		 * to fit in cache.		 */		buildstate.bufferingMode = GIST_BUFFERING_AUTO;		fillfactor = GIST_DEFAULT_FILLFACTOR;	}	/* Calculate target amount of free space to leave on pages */	buildstate.freespace = BLCKSZ * (100 - fillfactor) / 100;	/*	 * We expect to be called exactly once for any index relation. If that's	 * not the case, big trouble's what we have.	 */	if (RelationGetNumberOfBlocks(index) != 0)		elog(ERROR, "index /"%s/" already contains data",			 RelationGetRelationName(index));	/* no locking is needed */	buildstate.giststate = initGISTstate(index);	/*	 * Create a temporary memory context that is reset once for each tuple	 * processed.  (Note: we don't bother to make this a child of the	 * giststate's scanCxt, so we have to delete it separately at the end.)	 */	buildstate.giststate->tempCxt = createTempGistContext();	/* initialize the root page */	buffer = gistNewBuffer(index);	Assert(BufferGetBlockNumber(buffer) == GIST_ROOT_BLKNO);	page = BufferGetPage(buffer);	START_CRIT_SECTION();	GISTInitBuffer(buffer, F_LEAF);	MarkBufferDirty(buffer);	if (RelationNeedsWAL(index))	{		XLogRecPtr	recptr;		XLogRecData rdata;		rdata.data = (char *) &(index->rd_node);		rdata.len = sizeof(RelFileNode);		rdata.buffer = InvalidBuffer;		rdata.next = NULL;		recptr = XLogInsert(RM_GIST_ID, XLOG_GIST_CREATE_INDEX, &rdata);		PageSetLSN(page, recptr);	}	else		PageSetLSN(page, gistGetFakeLSN(heap));	UnlockReleaseBuffer(buffer);	END_CRIT_SECTION();	/* build the index */	buildstate.indtuples = 0;	buildstate.indtuplesSize = 0;//.........这里部分代码省略.........
开发者ID:amulsul,项目名称:postgres,代码行数:101,


示例20: vacuumLeafPage

//.........这里部分代码省略.........				prevLive = j;				interveningDeletable = false;			}			j = lt->nextOffset;		}		if (prevLive == InvalidOffsetNumber)		{			/* The chain is entirely removable, so we need a DEAD tuple */			toDead[xlrec.nDead] = i;			xlrec.nDead++;		}		else if (interveningDeletable)		{			/* One or more deletions at end of chain, so close it off */			chainSrc[xlrec.nChain] = prevLive;			chainDest[xlrec.nChain] = InvalidOffsetNumber;			xlrec.nChain++;		}	}	/* sanity check ... */	if (nDeletable != xlrec.nDead + xlrec.nPlaceholder + xlrec.nMove)		elog(ERROR, "inconsistent counts of deletable tuples");	/* Do the updates */	START_CRIT_SECTION();	spgPageIndexMultiDelete(&bds->spgstate, page,							toDead, xlrec.nDead,							SPGIST_DEAD, SPGIST_DEAD,							InvalidBlockNumber, InvalidOffsetNumber);	spgPageIndexMultiDelete(&bds->spgstate, page,							toPlaceholder, xlrec.nPlaceholder,							SPGIST_PLACEHOLDER, SPGIST_PLACEHOLDER,							InvalidBlockNumber, InvalidOffsetNumber);	/*	 * We implement the move step by swapping the item pointers of the source	 * and target tuples, then replacing the newly-source tuples with	 * placeholders.  This is perhaps unduly friendly with the page data	 * representation, but it's fast and doesn't risk page overflow when a	 * tuple to be relocated is large.	 */	for (i = 0; i < xlrec.nMove; i++)	{		ItemId		idSrc = PageGetItemId(page, moveSrc[i]);		ItemId		idDest = PageGetItemId(page, moveDest[i]);		ItemIdData	tmp;		tmp = *idSrc;		*idSrc = *idDest;		*idDest = tmp;	}	spgPageIndexMultiDelete(&bds->spgstate, page,							moveSrc, xlrec.nMove,							SPGIST_PLACEHOLDER, SPGIST_PLACEHOLDER,							InvalidBlockNumber, InvalidOffsetNumber);	for (i = 0; i < xlrec.nChain; i++)	{		SpGistLeafTuple lt;		lt = (SpGistLeafTuple) PageGetItem(page,										   PageGetItemId(page, chainSrc[i]));		Assert(lt->tupstate == SPGIST_LIVE);		lt->nextOffset = chainDest[i];	}	MarkBufferDirty(buffer);	if (RelationNeedsWAL(index))	{		XLogRecPtr	recptr;		XLogBeginInsert();		STORE_STATE(&bds->spgstate, xlrec.stateSrc);		XLogRegisterData((char *) &xlrec, SizeOfSpgxlogVacuumLeaf);		/* sizeof(xlrec) should be a multiple of sizeof(OffsetNumber) */		XLogRegisterData((char *) toDead, sizeof(OffsetNumber) * xlrec.nDead);		XLogRegisterData((char *) toPlaceholder, sizeof(OffsetNumber) * xlrec.nPlaceholder);		XLogRegisterData((char *) moveSrc, sizeof(OffsetNumber) * xlrec.nMove);		XLogRegisterData((char *) moveDest, sizeof(OffsetNumber) * xlrec.nMove);		XLogRegisterData((char *) chainSrc, sizeof(OffsetNumber) * xlrec.nChain);		XLogRegisterData((char *) chainDest, sizeof(OffsetNumber) * xlrec.nChain);		XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);		recptr = XLogInsert(RM_SPGIST_ID, XLOG_SPGIST_VACUUM_LEAF);		PageSetLSN(page, recptr);	}	END_CRIT_SECTION();}
开发者ID:hasegeli,项目名称:postgres,代码行数:101,


示例21: ReplicationSlotDropAcquired

/* * Permanently drop the currently acquired replication slot which will be * released by the point this function returns. */static voidReplicationSlotDropAcquired(void){	char		path[MAXPGPATH];	char		tmppath[MAXPGPATH];	ReplicationSlot *slot = MyReplicationSlot;	Assert(MyReplicationSlot != NULL);	/* slot isn't acquired anymore */	MyReplicationSlot = NULL;	/*	 * If some other backend ran this code concurrently with us, we might try	 * to delete a slot with a certain name while someone else was trying to	 * create a slot with the same name.	 */	LWLockAcquire(ReplicationSlotAllocationLock, LW_EXCLUSIVE);	/* Generate pathnames. */	sprintf(path, "pg_replslot/%s", NameStr(slot->data.name));	sprintf(tmppath, "pg_replslot/%s.tmp", NameStr(slot->data.name));	/*	 * Rename the slot directory on disk, so that we'll no longer recognize	 * this as a valid slot.  Note that if this fails, we've got to mark the	 * slot inactive before bailing out.  If we're dropping an ephemeral slot,	 * we better never fail hard as the caller won't expect the slot to	 * survive and this might get called during error handling.	 */	if (rename(path, tmppath) == 0)	{		/*		 * We need to fsync() the directory we just renamed and its parent to		 * make sure that our changes are on disk in a crash-safe fashion.  If		 * fsync() fails, we can't be sure whether the changes are on disk or		 * not.  For now, we handle that by panicking;		 * StartupReplicationSlots() will try to straighten it out after		 * restart.		 */		START_CRIT_SECTION();		fsync_fname(tmppath, true);		fsync_fname("pg_replslot", true);		END_CRIT_SECTION();	}	else	{		volatile ReplicationSlot *vslot = slot;		bool		fail_softly = slot->data.persistency == RS_EPHEMERAL;		SpinLockAcquire(&slot->mutex);		vslot->active_pid = 0;		SpinLockRelease(&slot->mutex);		ereport(fail_softly ? WARNING : ERROR,				(errcode_for_file_access(),				 errmsg("could not rename file /"%s/" to /"%s/": %m",						path, tmppath)));	}	/*	 * The slot is definitely gone.  Lock out concurrent scans of the array	 * long enough to kill it.  It's OK to clear the active flag here without	 * grabbing the mutex because nobody else can be scanning the array here,	 * and nobody can be attached to this slot and thus access it without	 * scanning the array.	 */	LWLockAcquire(ReplicationSlotControlLock, LW_EXCLUSIVE);	slot->active_pid = 0;	slot->in_use = false;	LWLockRelease(ReplicationSlotControlLock);	/*	 * Slot is dead and doesn't prevent resource removal anymore, recompute	 * limits.	 */	ReplicationSlotsComputeRequiredXmin(false);	ReplicationSlotsComputeRequiredLSN();	/*	 * If removing the directory fails, the worst thing that will happen is	 * that the user won't be able to create a new___ slot with the same name	 * until the next server restart.  We warn about it, but that's all.	 */	if (!rmtree(tmppath, true))		ereport(WARNING,				(errcode_for_file_access(),				 errmsg("could not remove directory /"%s/"", tmppath)));	/*	 * We release this at the very end, so that nobody starts trying to create	 * a slot while we're still cleaning up the detritus of the old one.	 */	LWLockRelease(ReplicationSlotAllocationLock);}
开发者ID:EccentricLoggers,项目名称:peloton,代码行数:99,


示例22: vacuumLeafRoot

/* * Vacuum a root page when it is also a leaf * * On the root, we just delete any dead leaf tuples; no fancy business */static voidvacuumLeafRoot(spgBulkDeleteState *bds, Relation index, Buffer buffer){	Page		page = BufferGetPage(buffer);	spgxlogVacuumRoot xlrec;	OffsetNumber toDelete[MaxIndexTuplesPerPage];	OffsetNumber i,				max = PageGetMaxOffsetNumber(page);	xlrec.nDelete = 0;	/* Scan page, identify tuples to delete, accumulate stats */	for (i = FirstOffsetNumber; i <= max; i++)	{		SpGistLeafTuple lt;		lt = (SpGistLeafTuple) PageGetItem(page,										   PageGetItemId(page, i));		if (lt->tupstate == SPGIST_LIVE)		{			Assert(ItemPointerIsValid(&lt->heapPtr));			if (bds->callback(&lt->heapPtr, bds->callback_state))			{				bds->stats->tuples_removed += 1;				toDelete[xlrec.nDelete] = i;				xlrec.nDelete++;			}			else			{				bds->stats->num_index_tuples += 1;			}		}		else		{			/* all tuples on root should be live */			elog(ERROR, "unexpected SPGiST tuple state: %d",				 lt->tupstate);		}	}	if (xlrec.nDelete == 0)		return;					/* nothing more to do */	/* Do the update */	START_CRIT_SECTION();	/* The tuple numbers are in order, so we can use PageIndexMultiDelete */	PageIndexMultiDelete(page, toDelete, xlrec.nDelete);	MarkBufferDirty(buffer);	if (RelationNeedsWAL(index))	{		XLogRecPtr	recptr;		XLogBeginInsert();		/* Prepare WAL record */		STORE_STATE(&bds->spgstate, xlrec.stateSrc);		XLogRegisterData((char *) &xlrec, SizeOfSpgxlogVacuumRoot);		/* sizeof(xlrec) should be a multiple of sizeof(OffsetNumber) */		XLogRegisterData((char *) toDelete,						 sizeof(OffsetNumber) * xlrec.nDelete);		XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);		recptr = XLogInsert(RM_SPGIST_ID, XLOG_SPGIST_VACUUM_ROOT);		PageSetLSN(page, recptr);	}	END_CRIT_SECTION();}
开发者ID:hasegeli,项目名称:postgres,代码行数:80,


示例23: ginInsertValue

/* * Insert value (stored in GinBtree) to tree described by stack * * During an index build, buildStats is non-null and the counters * it contains should be incremented as needed. * * NB: the passed-in stack is freed, as though by freeGinBtreeStack. */voidginInsertValue(GinBtree btree, GinBtreeStack *stack, GinStatsData *buildStats){	GinBtreeStack *parent;	BlockNumber rootBlkno;	Page		page,				rpage,				lpage;	/* extract root BlockNumber from stack */	Assert(stack != NULL);	parent = stack;	while (parent->parent)		parent = parent->parent;	rootBlkno = parent->blkno;	Assert(BlockNumberIsValid(rootBlkno));	/* this loop crawls up the stack until the insertion is complete */	for (;;)	{		XLogRecData *rdata;		BlockNumber savedRightLink;		page = BufferGetPage(stack->buffer);		savedRightLink = GinPageGetOpaque(page)->rightlink;		if (btree->isEnoughSpace(btree, stack->buffer, stack->off))		{			START_CRIT_SECTION();			btree->placeToPage(btree, stack->buffer, stack->off, &rdata);			MarkBufferDirty(stack->buffer);			if (RelationNeedsWAL(btree->index))			{				XLogRecPtr	recptr;				recptr = XLogInsert(RM_GIN_ID, XLOG_GIN_INSERT, rdata);				PageSetLSN(page, recptr);			}			LockBuffer(stack->buffer, GIN_UNLOCK);			END_CRIT_SECTION();			freeGinBtreeStack(stack);			return;		}		else		{			Buffer		rbuffer = GinNewBuffer(btree->index);			Page		newlpage;			/*			 * newlpage is a pointer to memory page, it doesn't associate with			 * buffer, stack->buffer should be untouched			 */			newlpage = btree->splitPage(btree, stack->buffer, rbuffer, stack->off, &rdata);			((ginxlogSplit *) (rdata->data))->rootBlkno = rootBlkno;			/* During index build, count the newly-split page */			if (buildStats)			{				if (btree->isData)					buildStats->nDataPages++;				else					buildStats->nEntryPages++;			}			parent = stack->parent;			if (parent == NULL)			{				/*				 * split root, so we need to allocate new left page and place				 * pointer on root to left and right page				 */				Buffer		lbuffer = GinNewBuffer(btree->index);				((ginxlogSplit *) (rdata->data))->isRootSplit = TRUE;				((ginxlogSplit *) (rdata->data))->rrlink = InvalidBlockNumber;				page = BufferGetPage(stack->buffer);				lpage = BufferGetPage(lbuffer);				rpage = BufferGetPage(rbuffer);				GinPageGetOpaque(rpage)->rightlink = InvalidBlockNumber;				GinPageGetOpaque(newlpage)->rightlink = BufferGetBlockNumber(rbuffer);				((ginxlogSplit *) (rdata->data))->lblkno = BufferGetBlockNumber(lbuffer);				START_CRIT_SECTION();//.........这里部分代码省略.........
开发者ID:amulsul,项目名称:postgres,代码行数:101,


示例24: vacuumRedirectAndPlaceholder

//.........这里部分代码省略.........	OffsetNumber itemnos[MaxIndexTuplesPerPage];	spgxlogVacuumRedirect xlrec;	xlrec.nToPlaceholder = 0;	xlrec.newestRedirectXid = InvalidTransactionId;	START_CRIT_SECTION();	/*	 * Scan backwards to convert old redirection tuples to placeholder tuples,	 * and identify location of last non-placeholder tuple while at it.	 */	for (i = max;		 i >= FirstOffsetNumber &&		 (opaque->nRedirection > 0 || !hasNonPlaceholder);		 i--)	{		SpGistDeadTuple dt;		dt = (SpGistDeadTuple) PageGetItem(page, PageGetItemId(page, i));		if (dt->tupstate == SPGIST_REDIRECT &&			TransactionIdPrecedes(dt->xid, RecentGlobalXmin))		{			dt->tupstate = SPGIST_PLACEHOLDER;			Assert(opaque->nRedirection > 0);			opaque->nRedirection--;			opaque->nPlaceholder++;			/* remember newest XID among the removed redirects */			if (!TransactionIdIsValid(xlrec.newestRedirectXid) ||				TransactionIdPrecedes(xlrec.newestRedirectXid, dt->xid))				xlrec.newestRedirectXid = dt->xid;			ItemPointerSetInvalid(&dt->pointer);			itemToPlaceholder[xlrec.nToPlaceholder] = i;			xlrec.nToPlaceholder++;			hasUpdate = true;		}		if (dt->tupstate == SPGIST_PLACEHOLDER)		{			if (!hasNonPlaceholder)				firstPlaceholder = i;		}		else		{			hasNonPlaceholder = true;		}	}	/*	 * Any placeholder tuples at the end of page can safely be removed.  We	 * can't remove ones before the last non-placeholder, though, because we	 * can't alter the offset numbers of non-placeholder tuples.	 */	if (firstPlaceholder != InvalidOffsetNumber)	{		/*		 * We do not store this array to rdata because it's easy to recreate.		 */		for (i = firstPlaceholder; i <= max; i++)			itemnos[i - firstPlaceholder] = i;		i = max - firstPlaceholder + 1;		Assert(opaque->nPlaceholder >= i);		opaque->nPlaceholder -= i;		/* The array is surely sorted, so can use PageIndexMultiDelete */		PageIndexMultiDelete(page, itemnos, i);		hasUpdate = true;	}	xlrec.firstPlaceholder = firstPlaceholder;	if (hasUpdate)		MarkBufferDirty(buffer);	if (hasUpdate && RelationNeedsWAL(index))	{		XLogRecPtr	recptr;		XLogBeginInsert();		XLogRegisterData((char *) &xlrec, SizeOfSpgxlogVacuumRedirect);		XLogRegisterData((char *) itemToPlaceholder,						 sizeof(OffsetNumber) * xlrec.nToPlaceholder);		XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);		recptr = XLogInsert(RM_SPGIST_ID, XLOG_SPGIST_VACUUM_REDIRECT);		PageSetLSN(page, recptr);	}	END_CRIT_SECTION();}
开发者ID:hasegeli,项目名称:postgres,代码行数:101,


示例25: heap_page_prune

//.........这里部分代码省略.........	/* Any error while applying the changes is critical */	START_CRIT_SECTION();	/* Have we found any prunable items? */	if (prstate.nredirected > 0 || prstate.ndead > 0 || prstate.nunused > 0)	{		/*		 * Apply the planned item changes, then repair page fragmentation, and		 * update the page's hint bit about whether it has free line pointers.		 */		heap_page_prune_execute(buffer,								prstate.redirected, prstate.nredirected,								prstate.nowdead, prstate.ndead,								prstate.nowunused, prstate.nunused);		/*		 * Update the page's pd_prune_xid field to either zero, or the lowest		 * XID of any soon-prunable tuple.		 */		((PageHeader) page)->pd_prune_xid = prstate.new_prune_xid;		/*		 * Also clear the "page is full" flag, since there's no point in		 * repeating the prune/defrag process until something else happens to		 * the page.		 */		PageClearFull(page);		MarkBufferDirty(buffer);		/*		 * Emit a WAL HEAP_CLEAN record showing what we did		 */		if (RelationNeedsWAL(relation))		{			XLogRecPtr	recptr;			recptr = log_heap_clean(relation, buffer,									prstate.redirected, prstate.nredirected,									prstate.nowdead, prstate.ndead,									prstate.nowunused, prstate.nunused,									prstate.latestRemovedXid);			PageSetLSN(BufferGetPage(buffer), recptr);			PageSetTLI(BufferGetPage(buffer), ThisTimeLineID);		}	}	else	{		/*		 * If we didn't prune anything, but have found a new value for the		 * pd_prune_xid field, update it and mark the buffer dirty. This is		 * treated as a non-WAL-logged hint.		 *		 * Also clear the "page is full" flag if it is set, since there's no		 * point in repeating the prune/defrag process until something else		 * happens to the page.		 */		if (((PageHeader) page)->pd_prune_xid != prstate.new_prune_xid ||			PageIsFull(page))		{			((PageHeader) page)->pd_prune_xid = prstate.new_prune_xid;			PageClearFull(page);			SetBufferCommitInfoNeedsSave(buffer);		}	}	END_CRIT_SECTION();	/*	 * If requested, report the number of tuples reclaimed to pgstats. This is	 * ndeleted minus ndead, because we don't want to count a now-DEAD root	 * item as a deletion for this purpose.	 */	if (report_stats && ndeleted > prstate.ndead)		pgstat_update_heap_dead_tuples(relation, ndeleted - prstate.ndead);	*latestRemovedXid = prstate.latestRemovedXid;	/*	 * XXX Should we update the FSM information of this page ?	 *	 * There are two schools of thought here. We may not want to update FSM	 * information so that the page is not used for unrelated UPDATEs/INSERTs	 * and any free space in this page will remain available for further	 * UPDATEs in *this* page, thus improving chances for doing HOT updates.	 *	 * But for a large table and where a page does not receive further UPDATEs	 * for a long time, we might waste this space by not updating the FSM	 * information. The relation may get extended and fragmented further.	 *	 * One possibility is to leave "fillfactor" worth of space in this page	 * and update FSM with the remaining space.	 *	 * In any case, the current FSM implementation doesn't accept	 * one-page-at-a-time updates, so this is all academic for now.	 */	return ndeleted;}
开发者ID:AllenDou,项目名称:postgresql,代码行数:101,


示例26: gistplacetopage

//.........这里部分代码省略.........		if (is_rootsplit)		{			for (ptr = dist->next; ptr; ptr = ptr->next)				UnlockReleaseBuffer(ptr->buffer);		}	}	else	{		/*		 * Enough space.  We always get here if ntup==0.		 */		START_CRIT_SECTION();		/*		 * Delete old tuple if any, then insert new tuple(s) if any.  If		 * possible, use the fast path of PageIndexTupleOverwrite.		 */		if (OffsetNumberIsValid(oldoffnum))		{			if (ntup == 1)			{				/* One-for-one replacement, so use PageIndexTupleOverwrite */				if (!PageIndexTupleOverwrite(page, oldoffnum, (Item) *itup,											 IndexTupleSize(*itup)))					elog(ERROR, "failed to add item to index page in /"%s/"",						 RelationGetRelationName(rel));			}			else			{				/* Delete old, then append new tuple(s) to page */				PageIndexTupleDelete(page, oldoffnum);				gistfillbuffer(page, itup, ntup, InvalidOffsetNumber);			}		}		else		{			/* Just append new tuples at the end of the page */			gistfillbuffer(page, itup, ntup, InvalidOffsetNumber);		}		MarkBufferDirty(buffer);		if (BufferIsValid(leftchildbuf))			MarkBufferDirty(leftchildbuf);		if (is_build)			recptr = GistBuildLSN;		else		{			if (RelationNeedsWAL(rel))			{				OffsetNumber ndeloffs = 0,							deloffs[1];				if (OffsetNumberIsValid(oldoffnum))				{					deloffs[0] = oldoffnum;					ndeloffs = 1;				}				recptr = gistXLogUpdate(buffer,										deloffs, ndeloffs, itup, ntup,										leftchildbuf);			}			else				recptr = gistGetFakeLSN(rel);		}		PageSetLSN(page, recptr);		if (newblkno)			*newblkno = blkno;	}	/*	 * If we inserted the downlink for a child page, set NSN and clear	 * F_FOLLOW_RIGHT flag on the left child, so that concurrent scans know to	 * follow the rightlink if and only if they looked at the parent page	 * before we inserted the downlink.	 *	 * Note that we do this *after* writing the WAL record. That means that	 * the possible full page image in the WAL record does not include these	 * changes, and they must be replayed even if the page is restored from	 * the full page image. There's a chicken-and-egg problem: if we updated	 * the child pages first, we wouldn't know the recptr of the WAL record	 * we're about to write.	 */	if (BufferIsValid(leftchildbuf))	{		Page		leftpg = BufferGetPage(leftchildbuf);		GistPageSetNSN(leftpg, recptr);		GistClearFollowRight(leftpg);		PageSetLSN(leftpg, recptr);	}	END_CRIT_SECTION();	return is_split;}
开发者ID:davidfetter,项目名称:postgresql_projects,代码行数:101,



注:本文中的END_CRIT_SECTION函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


C++ END_MENU函数代码示例
C++ END_BATCH函数代码示例
万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。