这篇教程C++ BufferGetBlockNumber函数代码示例写得很实用,希望能帮到您。
本文整理汇总了C++中BufferGetBlockNumber函数的典型用法代码示例。如果您正苦于以下问题:C++ BufferGetBlockNumber函数的具体用法?C++ BufferGetBlockNumber怎么用?C++ BufferGetBlockNumber使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。 在下文中一共展示了BufferGetBlockNumber函数的30个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。 示例1: spgbuild/* * Build an SP-GiST index. */Datumspgbuild(PG_FUNCTION_ARGS){ Relation heap = (Relation) PG_GETARG_POINTER(0); Relation index = (Relation) PG_GETARG_POINTER(1); IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2); IndexBuildResult *result; double reltuples; SpGistBuildState buildstate; Buffer metabuffer, rootbuffer; if (RelationGetNumberOfBlocks(index) != 0) elog(ERROR, "index /"%s/" already contains data", RelationGetRelationName(index)); /* * Initialize the meta page and root page */ metabuffer = SpGistNewBuffer(index); rootbuffer = SpGistNewBuffer(index); Assert(BufferGetBlockNumber(metabuffer) == SPGIST_METAPAGE_BLKNO); Assert(BufferGetBlockNumber(rootbuffer) == SPGIST_HEAD_BLKNO); START_CRIT_SECTION(); SpGistInitMetapage(BufferGetPage(metabuffer)); MarkBufferDirty(metabuffer); SpGistInitBuffer(rootbuffer, SPGIST_LEAF); MarkBufferDirty(rootbuffer); if (RelationNeedsWAL(index)) { XLogRecPtr recptr; XLogRecData rdata; /* WAL data is just the relfilenode */ rdata.data = (char *) &(index->rd_node); rdata.len = sizeof(RelFileNode); rdata.buffer = InvalidBuffer; rdata.next = NULL; recptr = XLogInsert(RM_SPGIST_ID, XLOG_SPGIST_CREATE_INDEX, &rdata); PageSetLSN(BufferGetPage(metabuffer), recptr); PageSetTLI(BufferGetPage(metabuffer), ThisTimeLineID); PageSetLSN(BufferGetPage(rootbuffer), recptr); PageSetTLI(BufferGetPage(rootbuffer), ThisTimeLineID); } END_CRIT_SECTION(); UnlockReleaseBuffer(metabuffer); UnlockReleaseBuffer(rootbuffer); /* * Now insert all the heap data into the index */ initSpGistState(&buildstate.spgstate, index); buildstate.spgstate.isBuild = true; buildstate.tmpCtx = AllocSetContextCreate(CurrentMemoryContext, "SP-GiST build temporary context", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); reltuples = IndexBuildHeapScan(heap, index, indexInfo, true, spgistBuildCallback, (void *) &buildstate); MemoryContextDelete(buildstate.tmpCtx); SpGistUpdateMetaPage(index); result = (IndexBuildResult *) palloc0(sizeof(IndexBuildResult)); result->heap_tuples = result->index_tuples = reltuples; PG_RETURN_POINTER(result);}
开发者ID:avontd2868,项目名称:postgres,代码行数:83,
示例2: gistXLogSplit/* * Write WAL record of a page split. */XLogRecPtrgistXLogSplit(RelFileNode node, BlockNumber blkno, bool page_is_leaf, SplitedPageLayout *dist, BlockNumber origrlink, GistNSN orignsn, Buffer leftchildbuf){ XLogRecData *rdata; gistxlogPageSplit xlrec; SplitedPageLayout *ptr; int npage = 0, cur; XLogRecPtr recptr; for (ptr = dist; ptr; ptr = ptr->next) npage++; rdata = (XLogRecData *) palloc(sizeof(XLogRecData) * (npage * 2 + 2)); xlrec.node = node; xlrec.origblkno = blkno; xlrec.origrlink = origrlink; xlrec.orignsn = orignsn; xlrec.origleaf = page_is_leaf; xlrec.npage = (uint16) npage; xlrec.leftchild = BufferIsValid(leftchildbuf) ? BufferGetBlockNumber(leftchildbuf) : InvalidBlockNumber; rdata[0].data = (char *) &xlrec; rdata[0].len = sizeof(gistxlogPageSplit); rdata[0].buffer = InvalidBuffer; cur = 1; /* * Include a full page image of the child buf. (only necessary if a * checkpoint happened since the child page was split) */ if (BufferIsValid(leftchildbuf)) { rdata[cur - 1].next = &(rdata[cur]); rdata[cur].data = NULL; rdata[cur].len = 0; rdata[cur].buffer = leftchildbuf; rdata[cur].buffer_std = true; cur++; } for (ptr = dist; ptr; ptr = ptr->next) { rdata[cur - 1].next = &(rdata[cur]); rdata[cur].buffer = InvalidBuffer; rdata[cur].data = (char *) &(ptr->block); rdata[cur].len = sizeof(gistxlogPage); cur++; rdata[cur - 1].next = &(rdata[cur]); rdata[cur].buffer = InvalidBuffer; rdata[cur].data = (char *) (ptr->list); rdata[cur].len = ptr->lenlist; cur++; } rdata[cur - 1].next = NULL; recptr = XLogInsert(RM_GIST_ID, XLOG_GIST_PAGE_SPLIT, rdata); pfree(rdata); return recptr;}
开发者ID:GisKook,项目名称:Gis,代码行数:71,
示例3: btvacuumpage//.........这里部分代码省略......... /* * During Hot Standby we currently assume that * XLOG_BTREE_VACUUM records do not produce conflicts. That is * only true as long as the callback function depends only * upon whether the index tuple refers to heap tuples removed * in the initial heap scan. When vacuum starts it derives a * value of OldestXmin. Backends taking later snapshots could * have a RecentGlobalXmin with a later xid than the vacuum's * OldestXmin, so it is possible that row versions deleted * after OldestXmin could be marked as killed by other * backends. The callback function *could* look at the index * tuple state in isolation and decide to delete the index * tuple, though currently it does not. If it ever did, we * would need to reconsider whether XLOG_BTREE_VACUUM records * should cause conflicts. If they did cause conflicts they * would be fairly harsh conflicts, since we haven't yet * worked out a way to pass a useful value for * latestRemovedXid on the XLOG_BTREE_VACUUM records. This * applies to *any* type of index that marks index tuples as * killed. */ if (callback(htup, callback_state)) deletable[ndeletable++] = offnum; } } /* * Apply any needed deletes. We issue just one _bt_delitems() call * per page, so as to minimize WAL traffic. */ if (ndeletable > 0) { BlockNumber lastBlockVacuumed = BufferGetBlockNumber(buf); _bt_delitems_vacuum(rel, buf, deletable, ndeletable, vstate->lastBlockVacuumed); /* * Keep track of the block number of the lastBlockVacuumed, so we * can scan those blocks as well during WAL replay. This then * provides concurrency protection and allows btrees to be used * while in recovery. */ if (lastBlockVacuumed > vstate->lastBlockVacuumed) vstate->lastBlockVacuumed = lastBlockVacuumed; stats->tuples_removed += ndeletable; /* must recompute maxoff */ maxoff = PageGetMaxOffsetNumber(page); } else { /* * If the page has been split during this vacuum cycle, it seems * worth expending a write to clear btpo_cycleid even if we don't * have any deletions to do. (If we do, _bt_delitems takes care * of this.) This ensures we won't process the page again. * * We treat this like a hint-bit update because there's no need to * WAL-log it. */ if (vstate->cycleid != 0 && opaque->btpo_cycleid == vstate->cycleid) { opaque->btpo_cycleid = 0; SetBufferCommitInfoNeedsSave(buf);
开发者ID:GisKook,项目名称:Gis,代码行数:67,
示例4: _bt_pagedel/* * _bt_pagedel() -- Delete a page from the b-tree, if legal to do so. * * This action unlinks the page from the b-tree structure, removing all * pointers leading to it --- but not touching its own left and right links. * The page cannot be physically reclaimed right away, since other processes * may currently be trying to follow links leading to the page; they have to * be allowed to use its right-link to recover. See nbtree/README. * * On entry, the target buffer must be pinned and locked (either read or write * lock is OK). This lock and pin will be dropped before exiting. * * The "stack" argument can be a search stack leading (approximately) to the * target page, or NULL --- outside callers typically pass NULL since they * have not done such a search, but internal recursion cases pass the stack * to avoid duplicated search effort. * * Returns the number of pages successfully deleted (zero if page cannot * be deleted now; could be more than one if parent pages were deleted too). * * NOTE: this leaks memory. Rather than trying to clean up everything * carefully, it's better to run it in a temp context that can be reset * frequently. */int_bt_pagedel(Relation rel, Buffer buf, BTStack stack){ int result; BlockNumber target, leftsib, rightsib, parent; OffsetNumber poffset, maxoff; uint32 targetlevel, ilevel; ItemId itemid; IndexTuple targetkey, itup; ScanKey itup_scankey; Buffer lbuf, rbuf, pbuf; bool parent_half_dead; bool parent_one_child; bool rightsib_empty; Buffer metabuf = InvalidBuffer; Page metapg = NULL; BTMetaPageData *metad = NULL; Page page; BTPageOpaque opaque; /* * We can never delete rightmost pages nor root pages. While at it, check * that page is not already deleted and is empty. */ page = BufferGetPage(buf); opaque = (BTPageOpaque) PageGetSpecialPointer(page); if (P_RIGHTMOST(opaque) || P_ISROOT(opaque) || P_ISDELETED(opaque) || P_FIRSTDATAKEY(opaque) <= PageGetMaxOffsetNumber(page)) { /* Should never fail to delete a half-dead page */ Assert(!P_ISHALFDEAD(opaque)); _bt_relbuf(rel, buf); return 0; } /* * Save info about page, including a copy of its high key (it must have * one, being non-rightmost). */ target = BufferGetBlockNumber(buf); targetlevel = opaque->btpo.level; leftsib = opaque->btpo_prev; itemid = PageGetItemId(page, P_HIKEY); targetkey = CopyIndexTuple((IndexTuple) PageGetItem(page, itemid)); /* * To avoid deadlocks, we'd better drop the target page lock before going * further. */ _bt_relbuf(rel, buf); /* * We need an approximate pointer to the page's parent page. We use the * standard search mechanism to search for the page's high key; this will * give us a link to either the current parent or someplace to its left * (if there are multiple equal high keys). In recursion cases, the * caller already generated a search stack and we can just re-use that * work. */ if (stack == NULL) { if (!InRecovery) { /* we need an insertion scan key to do our search, so build one */ itup_scankey = _bt_mkscankey(rel, targetkey); /* find the leftmost leaf page containing this key */ stack = _bt_search(rel, rel->rd_rel->relnatts, itup_scankey, false,//.........这里部分代码省略.........
开发者ID:Epictetus,项目名称:postgres,代码行数:101,
示例5: _bt_getroot/* * _bt_getroot() -- Get the root page of the btree. * * Since the root page can move around the btree file, we have to read * its location from the metadata page, and then read the root page * itself. If no root page exists yet, we have to create one. The * standard class of race conditions exists here; I think I covered * them all in the Hopi Indian rain dance of lock requests below. * * The access type parameter (BT_READ or BT_WRITE) controls whether * a new root page will be created or not. If access = BT_READ, * and no root page exists, we just return InvalidBuffer. For * BT_WRITE, we try to create the root page if it doesn't exist. * NOTE that the returned root page will have only a read lock set * on it even if access = BT_WRITE! * * The returned page is not necessarily the true root --- it could be * a "fast root" (a page that is alone in its level due to deletions). * Also, if the root page is split while we are "in flight" to it, * what we will return is the old root, which is now just the leftmost * page on a probably-not-very-wide level. For most purposes this is * as good as or better than the true root, so we do not bother to * insist on finding the true root. We do, however, guarantee to * return a live (not deleted or half-dead) page. * * On successful return, the root page is pinned and read-locked. * The metadata page is not locked or pinned on exit. */Buffer_bt_getroot(Relation rel, int access){ Buffer metabuf; Page metapg; BTPageOpaque metaopaque; Buffer rootbuf; Page rootpage; BTPageOpaque rootopaque; BlockNumber rootblkno; uint32 rootlevel; BTMetaPageData *metad; metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ); metapg = BufferGetPage(metabuf); metaopaque = (BTPageOpaque) PageGetSpecialPointer(metapg); metad = BTPageGetMeta(metapg); /* sanity-check the metapage */ if (!(metaopaque->btpo_flags & BTP_META) || metad->btm_magic != BTREE_MAGIC) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("index /"%s/" is not a btree", RelationGetRelationName(rel)))); if (metad->btm_version != BTREE_VERSION) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("version mismatch in index /"%s/": file version %d, code version %d", RelationGetRelationName(rel), metad->btm_version, BTREE_VERSION))); /* if no root page initialized yet, do it */ if (metad->btm_root == P_NONE) { /* If access = BT_READ, caller doesn't want us to create root yet */ if (access == BT_READ) { _bt_relbuf(rel, metabuf); return InvalidBuffer; } /* trade in our read lock for a write lock */ LockBuffer(metabuf, BUFFER_LOCK_UNLOCK); LockBuffer(metabuf, BT_WRITE); /* * Race condition: if someone else initialized the metadata between * the time we released the read lock and acquired the write lock, we * must avoid doing it again. */ if (metad->btm_root != P_NONE) { /* * Metadata initialized by someone else. In order to guarantee no * deadlocks, we have to release the metadata page and start all * over again. (Is that really true? But it's hardly worth trying * to optimize this case.) */ _bt_relbuf(rel, metabuf); return _bt_getroot(rel, access); } /* * Get, initialize, write, and leave a lock of the appropriate type on * the new root page. Since this is the first page in the tree, it's * a leaf as well as the root. */ rootbuf = _bt_getbuf(rel, P_NEW, BT_WRITE); rootblkno = BufferGetBlockNumber(rootbuf); rootpage = BufferGetPage(rootbuf);//.........这里部分代码省略.........
开发者ID:CraigBryan,项目名称:PostgresqlFun,代码行数:101,
示例6: gistbuild/* * Routine to build an index. Basically calls insert over and over. * * XXX: it would be nice to implement some sort of bulk-loading * algorithm, but it is not clear how to do that. */Datumgistbuild(PG_FUNCTION_ARGS){ Relation heap = (Relation) PG_GETARG_POINTER(0); Relation index = (Relation) PG_GETARG_POINTER(1); IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2); IndexBuildResult *result; double reltuples; GISTBuildState buildstate; Buffer buffer; Page page; /* * We expect to be called exactly once for any index relation. If that's * not the case, big trouble's what we have. */ if (RelationGetNumberOfBlocks(index) != 0) elog(ERROR, "index /"%s/" already contains data", RelationGetRelationName(index)); /* no locking is needed */ initGISTstate(&buildstate.giststate, index); /* initialize the root page */ buffer = gistNewBuffer(index); Assert(BufferGetBlockNumber(buffer) == GIST_ROOT_BLKNO); page = BufferGetPage(buffer); START_CRIT_SECTION(); GISTInitBuffer(buffer, F_LEAF); MarkBufferDirty(buffer); if (RelationNeedsWAL(index)) { XLogRecPtr recptr; XLogRecData rdata; rdata.data = (char *) &(index->rd_node); rdata.len = sizeof(RelFileNode); rdata.buffer = InvalidBuffer; rdata.next = NULL; recptr = XLogInsert(RM_GIST_ID, XLOG_GIST_CREATE_INDEX, &rdata); PageSetLSN(page, recptr); PageSetTLI(page, ThisTimeLineID); } else PageSetLSN(page, GetXLogRecPtrForTemp()); UnlockReleaseBuffer(buffer); END_CRIT_SECTION(); /* build the index */ buildstate.numindexattrs = indexInfo->ii_NumIndexAttrs; buildstate.indtuples = 0; /* * create a temporary memory context that is reset once for each tuple * inserted into the index */ buildstate.tmpCtx = createTempGistContext(); /* do the heap scan */ reltuples = IndexBuildHeapScan(heap, index, indexInfo, true, gistbuildCallback, (void *) &buildstate); /* okay, all heap tuples are indexed */ MemoryContextDelete(buildstate.tmpCtx); freeGISTstate(&buildstate.giststate); /* * Return statistics */ result = (IndexBuildResult *) palloc(sizeof(IndexBuildResult)); result->heap_tuples = reltuples; result->index_tuples = buildstate.indtuples; PG_RETURN_POINTER(result);}
开发者ID:gurjeet,项目名称:postgres,代码行数:90,
示例7: _bt_delitems_deletevoid_bt_delitems_delete(Relation rel, Buffer buf, OffsetNumber *itemnos, int nitems, Relation heapRel){ Page page = BufferGetPage(buf); BTPageOpaque opaque; Assert(nitems > 0); /* No ereport(ERROR) until changes are logged */ START_CRIT_SECTION(); /* Fix the page */ PageIndexMultiDelete(page, itemnos, nitems); /* * We can clear the vacuum cycle ID since this page has certainly been * processed by the current vacuum scan. */ opaque = (BTPageOpaque) PageGetSpecialPointer(page); opaque->btpo_cycleid = 0; /* * Mark the page as not containing any LP_DEAD items. This is not * certainly true (there might be some that have recently been marked, but * weren't included in our target-item list), but it will almost always be * true and it doesn't seem worth an additional page scan to check it. * Remember that BTP_HAS_GARBAGE is only a hint anyway. */ opaque->btpo_flags &= ~BTP_HAS_GARBAGE; MarkBufferDirty(buf); /* XLOG stuff */ if (RelationNeedsWAL(rel)) { XLogRecPtr recptr; XLogRecData rdata[3]; xl_btree_delete xlrec_delete; xlrec_delete.node = rel->rd_node; xlrec_delete.hnode = heapRel->rd_node; xlrec_delete.block = BufferGetBlockNumber(buf); xlrec_delete.nitems = nitems; rdata[0].data = (char *) &xlrec_delete; rdata[0].len = SizeOfBtreeDelete; rdata[0].buffer = InvalidBuffer; rdata[0].next = &(rdata[1]); /* * We need the target-offsets array whether or not we store the to * allow us to find the latestRemovedXid on a standby server. */ rdata[1].data = (char *) itemnos; rdata[1].len = nitems * sizeof(OffsetNumber); rdata[1].buffer = InvalidBuffer; rdata[1].next = &(rdata[2]); rdata[2].data = NULL; rdata[2].len = 0; rdata[2].buffer = buf; rdata[2].buffer_std = true; rdata[2].next = NULL; recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_DELETE, rdata); PageSetLSN(page, recptr); PageSetTLI(page, ThisTimeLineID); } END_CRIT_SECTION();}
开发者ID:Epictetus,项目名称:postgres,代码行数:74,
示例8: _bt_endpoint/* * _bt_endpoint() -- Find the first or last page in the index, and scan * from there to the first key satisfying all the quals. * * This is used by _bt_first() to set up a scan when we've determined * that the scan must start at the beginning or end of the index (for * a forward or backward scan respectively). Exit conditions are the * same as for _bt_first(). */static bool_bt_endpoint(IndexScanDesc scan, ScanDirection dir){ Relation rel = scan->indexRelation; BTScanOpaque so = (BTScanOpaque) scan->opaque; Buffer buf; Page page; BTPageOpaque opaque; OffsetNumber start; BTScanPosItem *currItem; /* * Scan down to the leftmost or rightmost leaf page. This is a simplified * version of _bt_search(). We don't maintain a stack since we know we * won't need it. */ buf = _bt_get_endpoint(rel, 0, ScanDirectionIsBackward(dir)); if (!BufferIsValid(buf)) { /* * Empty index. Lock the whole relation, as nothing finer to lock * exists. */ PredicateLockRelation(rel, scan->xs_snapshot); so->currPos.buf = InvalidBuffer; return false; } PredicateLockPage(rel, BufferGetBlockNumber(buf), scan->xs_snapshot); page = BufferGetPage(buf); opaque = (BTPageOpaque) PageGetSpecialPointer(page); Assert(P_ISLEAF(opaque)); if (ScanDirectionIsForward(dir)) { /* There could be dead pages to the left, so not this: */ /* Assert(P_LEFTMOST(opaque)); */ start = P_FIRSTDATAKEY(opaque); } else if (ScanDirectionIsBackward(dir)) { Assert(P_RIGHTMOST(opaque)); start = PageGetMaxOffsetNumber(page); } else { elog(ERROR, "invalid scan direction: %d", (int) dir); start = 0; /* keep compiler quiet */ } /* remember which buffer we have pinned */ so->currPos.buf = buf; /* initialize moreLeft/moreRight appropriately for scan direction */ if (ScanDirectionIsForward(dir)) { so->currPos.moreLeft = false; so->currPos.moreRight = true; } else { so->currPos.moreLeft = true; so->currPos.moreRight = false; } so->numKilled = 0; /* just paranoia */ so->markItemIndex = -1; /* ditto */ /* * Now load data from the first page of the scan. */ if (!_bt_readpage(scan, dir, start)) { /* * There's no actually-matching data on this page. Try to advance to * the next page. Return false if there's no matching data at all. */ if (!_bt_steppage(scan, dir)) return false; } /* Drop the lock, but not pin, on the current page */ LockBuffer(so->currPos.buf, BUFFER_LOCK_UNLOCK); /* OK, itemIndex says what to return */ currItem = &so->currPos.items[so->currPos.itemIndex]; scan->xs_ctup.t_self = currItem->heapTid; if (scan->xs_want_itup) scan->xs_itup = (IndexTuple) (so->currTuples + currItem->tupleOffset);//.........这里部分代码省略.........
开发者ID:Epictetus,项目名称:postgres,代码行数:101,
示例9: _bt_first//.........这里部分代码省略......... * Find first item > scankey. (This is only used for forward * scans.) */ nextkey = true; goback = false; break; default: /* can't get here, but keep compiler quiet */ elog(ERROR, "unrecognized strat_total: %d", (int) strat_total); return false; } /* * Use the manufactured insertion scan key to descend the tree and * position ourselves on the target leaf page. */ stack = _bt_search(rel, keysCount, scankeys, nextkey, &buf, BT_READ); /* don't need to keep the stack around... */ _bt_freestack(stack); /* remember which buffer we have pinned, if any */ so->currPos.buf = buf; if (!BufferIsValid(buf)) { /* * We only get here if the index is completely empty. Lock relation * because nothing finer to lock exists. */ PredicateLockRelation(rel, scan->xs_snapshot); return false; } else PredicateLockPage(rel, BufferGetBlockNumber(buf), scan->xs_snapshot); /* initialize moreLeft/moreRight appropriately for scan direction */ if (ScanDirectionIsForward(dir)) { so->currPos.moreLeft = false; so->currPos.moreRight = true; } else { so->currPos.moreLeft = true; so->currPos.moreRight = false; } so->numKilled = 0; /* just paranoia */ so->markItemIndex = -1; /* ditto */ /* position to the precise item on the page */ offnum = _bt_binsrch(rel, buf, keysCount, scankeys, nextkey); /* * If nextkey = false, we are positioned at the first item >= scan key, or * possibly at the end of a page on which all the existing items are less * than the scan key and we know that everything on later pages is greater * than or equal to scan key. * * If nextkey = true, we are positioned at the first item > scan key, or * possibly at the end of a page on which all the existing items are less * than or equal to the scan key and we know that everything on later * pages is greater than scan key. * * The actually desired starting point is either this item or the prior * one, or in the end-of-page case it's the first item on the next page or * the last item on this page. Adjust the starting offset if needed. (If * this results in an offset before the first item or after the last one, * _bt_readpage will report no items found, and then we'll step to the * next page as needed.) */ if (goback) offnum = OffsetNumberPrev(offnum); /* * Now load data from the first page of the scan. */ if (!_bt_readpage(scan, dir, offnum)) { /* * There's no actually-matching data on this page. Try to advance to * the next page. Return false if there's no matching data at all. */ if (!_bt_steppage(scan, dir)) return false; } /* Drop the lock, but not pin, on the current page */ LockBuffer(so->currPos.buf, BUFFER_LOCK_UNLOCK); /* OK, itemIndex says what to return */ currItem = &so->currPos.items[so->currPos.itemIndex]; scan->xs_ctup.t_self = currItem->heapTid; if (scan->xs_want_itup) scan->xs_itup = (IndexTuple) (so->currTuples + currItem->tupleOffset); return true;}
开发者ID:Epictetus,项目名称:postgres,代码行数:101,
示例10: _bt_steppage//.........这里部分代码省略......... { /* bump pin on current buffer for assignment to mark buffer */ IncrBufferRefCount(so->currPos.buf); memcpy(&so->markPos, &so->currPos, offsetof(BTScanPosData, items[1]) + so->currPos.lastItem * sizeof(BTScanPosItem)); if (so->markTuples) memcpy(so->markTuples, so->currTuples, so->currPos.nextTupleOffset); so->markPos.itemIndex = so->markItemIndex; so->markItemIndex = -1; } rel = scan->indexRelation; if (ScanDirectionIsForward(dir)) { /* Walk right to the next page with data */ /* We must rely on the previously saved nextPage link! */ BlockNumber blkno = so->currPos.nextPage; /* Remember we left a page with data */ so->currPos.moreLeft = true; for (;;) { /* release the previous buffer */ _bt_relbuf(rel, so->currPos.buf); so->currPos.buf = InvalidBuffer; /* if we're at end of scan, give up */ if (blkno == P_NONE || !so->currPos.moreRight) return false; /* check for interrupts while we're not holding any buffer lock */ CHECK_FOR_INTERRUPTS(); /* step right one page */ so->currPos.buf = _bt_getbuf(rel, blkno, BT_READ); /* check for deleted page */ page = BufferGetPage(so->currPos.buf); opaque = (BTPageOpaque) PageGetSpecialPointer(page); if (!P_IGNORE(opaque)) { PredicateLockPage(rel, blkno, scan->xs_snapshot); /* see if there are any matches on this page */ /* note that this will clear moreRight if we can stop */ if (_bt_readpage(scan, dir, P_FIRSTDATAKEY(opaque))) break; } /* nope, keep going */ blkno = opaque->btpo_next; } } else { /* Remember we left a page with data */ so->currPos.moreRight = true; /* * Walk left to the next page with data. This is much more complex * than the walk-right case because of the possibility that the page * to our left splits while we are in flight to it, plus the * possibility that the page we were on gets deleted after we leave * it. See nbtree/README for details. */ for (;;) { /* Done if we know there are no matching keys to the left */ if (!so->currPos.moreLeft) { _bt_relbuf(rel, so->currPos.buf); so->currPos.buf = InvalidBuffer; return false; } /* Step to next physical page */ so->currPos.buf = _bt_walk_left(rel, so->currPos.buf); /* if we're physically at end of index, return failure */ if (so->currPos.buf == InvalidBuffer) return false; /* * Okay, we managed to move left to a non-deleted page. Done if * it's not half-dead and contains matching tuples. Else loop back * and do it all again. */ page = BufferGetPage(so->currPos.buf); opaque = (BTPageOpaque) PageGetSpecialPointer(page); if (!P_IGNORE(opaque)) { PredicateLockPage(rel, BufferGetBlockNumber(so->currPos.buf), scan->xs_snapshot); /* see if there are any matches on this page */ /* note that this will clear moreLeft if we can stop */ if (_bt_readpage(scan, dir, PageGetMaxOffsetNumber(page))) break; } } } return true;}
开发者ID:Epictetus,项目名称:postgres,代码行数:101,
示例11: _bt_walk_left/* * _bt_walk_left() -- step left one page, if possible * * The given buffer must be pinned and read-locked. This will be dropped * before stepping left. On return, we have pin and read lock on the * returned page, instead. * * Returns InvalidBuffer if there is no page to the left (no lock is held * in that case). * * When working on a non-leaf level, it is possible for the returned page * to be half-dead; the caller should check that condition and step left * again if it's important. */static Buffer_bt_walk_left(Relation rel, Buffer buf){ Page page; BTPageOpaque opaque; page = BufferGetPage(buf); opaque = (BTPageOpaque) PageGetSpecialPointer(page); for (;;) { BlockNumber obknum; BlockNumber lblkno; BlockNumber blkno; int tries; /* if we're at end of tree, release buf and return failure */ if (P_LEFTMOST(opaque)) { _bt_relbuf(rel, buf); break; } /* remember original page we are stepping left from */ obknum = BufferGetBlockNumber(buf); /* step left */ blkno = lblkno = opaque->btpo_prev; _bt_relbuf(rel, buf); /* check for interrupts while we're not holding any buffer lock */ CHECK_FOR_INTERRUPTS(); buf = _bt_getbuf(rel, blkno, BT_READ); page = BufferGetPage(buf); opaque = (BTPageOpaque) PageGetSpecialPointer(page); /* * If this isn't the page we want, walk right till we find what we * want --- but go no more than four hops (an arbitrary limit). If we * don't find the correct page by then, the most likely bet is that * the original page got deleted and isn't in the sibling chain at all * anymore, not that its left sibling got split more than four times. * * Note that it is correct to test P_ISDELETED not P_IGNORE here, * because half-dead pages are still in the sibling chain. Caller * must reject half-dead pages if wanted. */ tries = 0; for (;;) { if (!P_ISDELETED(opaque) && opaque->btpo_next == obknum) { /* Found desired page, return it */ return buf; } if (P_RIGHTMOST(opaque) || ++tries > 4) break; blkno = opaque->btpo_next; buf = _bt_relandgetbuf(rel, buf, blkno, BT_READ); page = BufferGetPage(buf); opaque = (BTPageOpaque) PageGetSpecialPointer(page); } /* Return to the original page to see what's up */ buf = _bt_relandgetbuf(rel, buf, obknum, BT_READ); page = BufferGetPage(buf); opaque = (BTPageOpaque) PageGetSpecialPointer(page); if (P_ISDELETED(opaque)) { /* * It was deleted. Move right to first nondeleted page (there * must be one); that is the page that has acquired the deleted * one's keyspace, so stepping left from it will take us where we * want to be. */ for (;;) { if (P_RIGHTMOST(opaque)) elog(ERROR, "fell off the end of index /"%s/"", RelationGetRelationName(rel)); blkno = opaque->btpo_next; buf = _bt_relandgetbuf(rel, buf, blkno, BT_READ); page = BufferGetPage(buf); opaque = (BTPageOpaque) PageGetSpecialPointer(page); if (!P_ISDELETED(opaque)) break; } /*//.........这里部分代码省略.........
开发者ID:Epictetus,项目名称:postgres,代码行数:101,
示例12: _hash_freeovflpage/* * _hash_freeovflpage() - * * Remove this overflow page from its bucket's chain, and mark the page as * free. On entry, ovflbuf is write-locked; it is released before exiting. * * Add the tuples (itups) to wbuf in this function. We could do that in the * caller as well, but the advantage of doing it here is we can easily write * the WAL for XLOG_HASH_SQUEEZE_PAGE operation. Addition of tuples and * removal of overflow page has to done as an atomic operation, otherwise * during replay on standby users might find duplicate records. * * Since this function is invoked in VACUUM, we provide an access strategy * parameter that controls fetches of the bucket pages. * * Returns the block number of the page that followed the given page * in the bucket, or InvalidBlockNumber if no following page. * * NB: caller must not hold lock on metapage, nor on page, that's next to * ovflbuf in the bucket chain. We don't acquire the lock on page that's * prior to ovflbuf in chain if it is same as wbuf because the caller already * has a lock on same. */BlockNumber_hash_freeovflpage(Relation rel, Buffer bucketbuf, Buffer ovflbuf, Buffer wbuf, IndexTuple *itups, OffsetNumber *itup_offsets, Size *tups_size, uint16 nitups, BufferAccessStrategy bstrategy){ HashMetaPage metap; Buffer metabuf; Buffer mapbuf; BlockNumber ovflblkno; BlockNumber prevblkno; BlockNumber blkno; BlockNumber nextblkno; BlockNumber writeblkno; HashPageOpaque ovflopaque; Page ovflpage; Page mappage; uint32 *freep; uint32 ovflbitno; int32 bitmappage, bitmapbit; Bucket bucket PG_USED_FOR_ASSERTS_ONLY; Buffer prevbuf = InvalidBuffer; Buffer nextbuf = InvalidBuffer; bool update_metap = false; /* Get information from the doomed page */ _hash_checkpage(rel, ovflbuf, LH_OVERFLOW_PAGE); ovflblkno = BufferGetBlockNumber(ovflbuf); ovflpage = BufferGetPage(ovflbuf); ovflopaque = (HashPageOpaque) PageGetSpecialPointer(ovflpage); nextblkno = ovflopaque->hasho_nextblkno; prevblkno = ovflopaque->hasho_prevblkno; writeblkno = BufferGetBlockNumber(wbuf); bucket = ovflopaque->hasho_bucket; /* * Fix up the bucket chain. this is a doubly-linked list, so we must fix * up the bucket chain members behind and ahead of the overflow page being * deleted. Concurrency issues are avoided by using lock chaining as * described atop hashbucketcleanup. */ if (BlockNumberIsValid(prevblkno)) { if (prevblkno == writeblkno) prevbuf = wbuf; else prevbuf = _hash_getbuf_with_strategy(rel, prevblkno, HASH_WRITE, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE, bstrategy); } if (BlockNumberIsValid(nextblkno)) nextbuf = _hash_getbuf_with_strategy(rel, nextblkno, HASH_WRITE, LH_OVERFLOW_PAGE, bstrategy); /* Note: bstrategy is intentionally not used for metapage and bitmap */ /* Read the metapage so we can determine which bitmap page to use */ metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ, LH_META_PAGE); metap = HashPageGetMeta(BufferGetPage(metabuf)); /* Identify which bit to set */ ovflbitno = _hash_ovflblkno_to_bitno(metap, ovflblkno); bitmappage = ovflbitno >> BMPG_SHIFT(metap); bitmapbit = ovflbitno & BMPG_MASK(metap); if (bitmappage >= metap->hashm_nmaps) elog(ERROR, "invalid overflow bit number %u", ovflbitno); blkno = metap->hashm_mapp[bitmappage]; /* Release metapage lock while we access the bitmap page *///.........这里部分代码省略.........
开发者ID:bitnine-oss,项目名称:agens-graph,代码行数:101,
示例13: _hash_addovflpage//.........这里部分代码省略......... * and other on new overflow page) since there cannot be anyone else * contending for access to ovflbuf. */ ovflbuf = _hash_getnewbuf(rel, blkno, MAIN_FORKNUM);found: /* * Do the update. No ereport(ERROR) until changes are logged. We want to * log the changes for bitmap page and overflow page together to avoid * loss of pages in case the new page is added. */ START_CRIT_SECTION(); if (page_found) { Assert(BufferIsValid(mapbuf)); /* mark page "in use" in the bitmap */ SETBIT(freep, bitmap_page_bit); MarkBufferDirty(mapbuf); } else { /* update the count to indicate new overflow page is added */ metap->hashm_spares[splitnum]++; if (BufferIsValid(newmapbuf)) { _hash_initbitmapbuffer(newmapbuf, metap->hashm_bmsize, false); MarkBufferDirty(newmapbuf); /* add the new bitmap page to the metapage's list of bitmaps */ metap->hashm_mapp[metap->hashm_nmaps] = BufferGetBlockNumber(newmapbuf); metap->hashm_nmaps++; metap->hashm_spares[splitnum]++; } MarkBufferDirty(metabuf); /* * for new overflow page, we don't need to explicitly set the bit in * bitmap page, as by default that will be set to "in use". */ } /* * Adjust hashm_firstfree to avoid redundant searches. But don't risk * changing it if someone moved it while we were searching bitmap pages. */ if (metap->hashm_firstfree == orig_firstfree) { metap->hashm_firstfree = bit + 1; MarkBufferDirty(metabuf); } /* initialize new overflow page */ ovflpage = BufferGetPage(ovflbuf); ovflopaque = (HashPageOpaque) PageGetSpecialPointer(ovflpage); ovflopaque->hasho_prevblkno = BufferGetBlockNumber(buf); ovflopaque->hasho_nextblkno = InvalidBlockNumber; ovflopaque->hasho_bucket = pageopaque->hasho_bucket; ovflopaque->hasho_flag = LH_OVERFLOW_PAGE; ovflopaque->hasho_page_id = HASHO_PAGE_ID; MarkBufferDirty(ovflbuf);
开发者ID:bitnine-oss,项目名称:agens-graph,代码行数:67,
示例14: heap_prune_chain//.........这里部分代码省略......... case HEAPTUPLE_LIVE: case HEAPTUPLE_INSERT_IN_PROGRESS: /* * If we wanted to optimize for aborts, we might consider * marking the page prunable when we see INSERT_IN_PROGRESS. * But we don't. See related decisions about when to mark the * page prunable in heapam.c. */ break; default: elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result"); break; } /* * Remember the last DEAD tuple seen. We will advance past * RECENTLY_DEAD tuples just in case there's a DEAD one after them; * but we can't advance past anything else. (XXX is it really worth * continuing to scan beyond RECENTLY_DEAD? The case where we will * find another DEAD tuple is a fairly unusual corner case.) */ if (tupdead) { latestdead = offnum; HeapTupleHeaderAdvanceLatestRemovedXid(htup, &prstate->latestRemovedXid); } else if (!recent_dead) break; /* * If the tuple is not HOT-updated, then we are at the end of this * HOT-update chain. */ if (!HeapTupleHeaderIsHotUpdated(htup)) break; /* * Advance to next chain member. */ Assert(ItemPointerGetBlockNumber(&htup->t_ctid) == BufferGetBlockNumber(buffer)); offnum = ItemPointerGetOffsetNumber(&htup->t_ctid); priorXmax = HeapTupleHeaderGetUpdateXid(htup); } /* * If we found a DEAD tuple in the chain, adjust the HOT chain so that all * the DEAD tuples at the start of the chain are removed and the root line * pointer is appropriately redirected. */ if (OffsetNumberIsValid(latestdead)) { /* * Mark as unused each intermediate item that we are able to remove * from the chain. * * When the previous item is the last dead tuple seen, we are at the * right candidate for redirection. */ for (i = 1; (i < nchain) && (chainitems[i - 1] != latestdead); i++) { heap_prune_record_unused(prstate, chainitems[i]); ndeleted++; } /* * If the root entry had been a normal tuple, we are deleting it, so * count it in the result. But changing a redirect (even to DEAD * state) doesn't count. */ if (ItemIdIsNormal(rootlp)) ndeleted++; /* * If the DEAD tuple is at the end of the chain, the entire chain is * dead and the root line pointer can be marked dead. Otherwise just * redirect the root to the correct chain member. */ if (i >= nchain) heap_prune_record_dead(prstate, rootoffnum); else heap_prune_record_redirect(prstate, rootoffnum, chainitems[i]); } else if (nchain < 2 && ItemIdIsRedirected(rootlp)) { /* * We found a redirect item that doesn't point to a valid follow-on * item. This can happen if the loop in heap_page_prune caused us to * visit the dead successor of a redirect item before visiting the * redirect item. We can clean up by setting the redirect item to * DEAD state. */ heap_prune_record_dead(prstate, rootoffnum); } return ndeleted;}
开发者ID:AXLEproject,项目名称:postgres,代码行数:101,
示例15: gistplacetopagestatic boolgistplacetopage(GISTInsertState *state, GISTSTATE *giststate){ bool is_splitted = false; bool is_leaf = (GistPageIsLeaf(state->stack->page)) ? true : false; MIRROREDLOCK_BUFMGR_MUST_ALREADY_BE_HELD; /* * if (!is_leaf) remove old key: This node's key has been modified, either * because a child split occurred or because we needed to adjust our key * for an insert in a child node. Therefore, remove the old version of * this node's key. * * for WAL replay, in the non-split case we handle this by setting up a * one-element todelete array; in the split case, it's handled implicitly * because the tuple vector passed to gistSplit won't include this tuple. * * XXX: If we want to change fillfactors between node and leaf, fillfactor * = (is_leaf ? state->leaf_fillfactor : state->node_fillfactor) */ if (gistnospace(state->stack->page, state->itup, state->ituplen, is_leaf ? InvalidOffsetNumber : state->stack->childoffnum, state->freespace)) { /* no space for insertion */ IndexTuple *itvec; int tlen; SplitedPageLayout *dist = NULL, *ptr; BlockNumber rrlink = InvalidBlockNumber; GistNSN oldnsn; is_splitted = true; /* * Form index tuples vector to split: remove old tuple if t's needed * and add new tuples to vector */ itvec = gistextractpage(state->stack->page, &tlen); if (!is_leaf) { /* on inner page we should remove old tuple */ int pos = state->stack->childoffnum - FirstOffsetNumber; tlen--; if (pos != tlen) memmove(itvec + pos, itvec + pos + 1, sizeof(IndexTuple) * (tlen - pos)); } itvec = gistjoinvector(itvec, &tlen, state->itup, state->ituplen); dist = gistSplit(state->r, state->stack->page, itvec, tlen, giststate); state->itup = (IndexTuple *) palloc(sizeof(IndexTuple) * tlen); state->ituplen = 0; if (state->stack->blkno != GIST_ROOT_BLKNO) { /* * if non-root split then we should not allocate new buffer, but * we must create temporary page to operate */ dist->buffer = state->stack->buffer; dist->page = PageGetTempPage(BufferGetPage(dist->buffer), sizeof(GISTPageOpaqueData)); /* clean all flags except F_LEAF */ GistPageGetOpaque(dist->page)->flags = (is_leaf) ? F_LEAF : 0; } /* make new pages and fills them */ for (ptr = dist; ptr; ptr = ptr->next) { int i; char *data; /* get new page */ if (ptr->buffer == InvalidBuffer) { ptr->buffer = gistNewBuffer(state->r); GISTInitBuffer(ptr->buffer, (is_leaf) ? F_LEAF : 0); ptr->page = BufferGetPage(ptr->buffer); } ptr->block.blkno = BufferGetBlockNumber(ptr->buffer); /* * fill page, we can do it because all these pages are new * (ie not linked in tree or masked by temp page */ data = (char *) (ptr->list); for (i = 0; i < ptr->block.num; i++) { if (PageAddItem(ptr->page, (Item) data, IndexTupleSize((IndexTuple) data), i + FirstOffsetNumber, LP_USED) == InvalidOffsetNumber) elog(ERROR, "failed to add item to index page in /"%s/"", RelationGetRelationName(state->r)); data += IndexTupleSize((IndexTuple) data); } /* set up ItemPointer and remember it for parent */ ItemPointerSetBlockNumber(&(ptr->itup->t_tid), ptr->block.blkno); state->itup[state->ituplen] = ptr->itup; state->ituplen++; }//.........这里部分代码省略.........
开发者ID:ricky-wu,项目名称:gpdb,代码行数:101,
示例16: _bt_search/* * _bt_search() -- Search the tree for a particular scankey, * or more precisely for the first leaf page it could be on. * * The passed scankey must be an insertion-type scankey (see nbtree/README), * but it can omit the rightmost column(s) of the index. * * When nextkey is false (the usual case), we are looking for the first * item >= scankey. When nextkey is true, we are looking for the first * item strictly greater than scankey. * * Return value is a stack of parent-page pointers. *bufP is set to the * address of the leaf-page buffer, which is read-locked and pinned. * No locks are held on the parent pages, however! * * NOTE that the returned buffer is read-locked regardless of the access * parameter. However, access = BT_WRITE will allow an empty root page * to be created and returned. When access = BT_READ, an empty index * will result in *bufP being set to InvalidBuffer. */BTStack_bt_search(Relation rel, int keysz, ScanKey scankey, bool nextkey, Buffer *bufP, int access){ BTStack stack_in = NULL; /* Get the root page to start with */ *bufP = _bt_getroot(rel, access); /* If index is empty and access = BT_READ, no root page is created. */ if (!BufferIsValid(*bufP)) return (BTStack) NULL; /* Loop iterates once per level descended in the tree */ for (;;) { Page page; BTPageOpaque opaque; OffsetNumber offnum; ItemId itemid; IndexTuple itup; BlockNumber blkno; BlockNumber par_blkno; BTStack new_stack; /* * Race -- the page we just grabbed may have split since we read its * pointer in the parent (or metapage). If it has, we may need to * move right to its new sibling. Do that. */ *bufP = _bt_moveright(rel, *bufP, keysz, scankey, nextkey, BT_READ); /* if this is a leaf page, we're done */ page = BufferGetPage(*bufP); opaque = (BTPageOpaque) PageGetSpecialPointer(page); if (P_ISLEAF(opaque)) break; /* * Find the appropriate item on the internal page, and get the child * page that it points to. */ offnum = _bt_binsrch(rel, *bufP, keysz, scankey, nextkey); itemid = PageGetItemId(page, offnum); itup = (IndexTuple) PageGetItem(page, itemid); blkno = ItemPointerGetBlockNumber(&(itup->t_tid)); par_blkno = BufferGetBlockNumber(*bufP); /* * We need to save the location of the index entry we chose in the * parent page on a stack. In case we split the tree, we'll use the * stack to work back up to the parent page. We also save the actual * downlink (TID) to uniquely identify the index entry, in case it * moves right while we're working lower in the tree. See the paper * by Lehman and Yao for how this is detected and handled. (We use the * child link to disambiguate duplicate keys in the index -- Lehman * and Yao disallow duplicate keys.) */ new_stack = (BTStack) palloc(sizeof(BTStackData)); new_stack->bts_blkno = par_blkno; new_stack->bts_offset = offnum; memcpy(&new_stack->bts_btentry, itup, sizeof(IndexTupleData)); new_stack->bts_parent = stack_in; /* drop the read lock on the parent page, acquire one on the child */ *bufP = _bt_relandgetbuf(rel, *bufP, blkno, BT_READ); /* okay, all set to move down a level */ stack_in = new_stack; } return stack_in;}
开发者ID:Epictetus,项目名称:postgres,代码行数:93,
示例17: gistplacetopage/* * Place tuples from 'itup' to 'buffer'. If 'oldoffnum' is valid, the tuple * at that offset is atomically removed along with inserting the new tuples. * This is used to replace a tuple with a new one. * * If 'leftchildbuf' is valid, we're inserting the downlink for the page * to the right of 'leftchildbuf', or updating the downlink for 'leftchildbuf'. * F_FOLLOW_RIGHT flag on 'leftchildbuf' is cleared and NSN is set. * * If there is not enough room on the page, it is split. All the split * pages are kept pinned and locked and returned in *splitinfo, the caller * is responsible for inserting the downlinks for them. However, if * 'buffer' is the root page and it needs to be split, gistplacetopage() * performs the split as one atomic operation, and *splitinfo is set to NIL. * In that case, we continue to hold the root page locked, and the child * pages are released; note that new tuple(s) are *not* on the root page * but in one of the new child pages. */static boolgistplacetopage(GISTInsertState *state, GISTSTATE *giststate, Buffer buffer, IndexTuple *itup, int ntup, OffsetNumber oldoffnum, Buffer leftchildbuf, List **splitinfo){ Page page = BufferGetPage(buffer); bool is_leaf = (GistPageIsLeaf(page)) ? true : false; XLogRecPtr recptr; int i; bool is_split; /* * Refuse to modify a page that's incompletely split. This should * not happen because we finish any incomplete splits while we walk * down the tree. However, it's remotely possible that another * concurrent inserter splits a parent page, and errors out before * completing the split. We will just throw an error in that case, * and leave any split we had in progress unfinished too. The next * insert that comes along will clean up the mess. */ if (GistFollowRight(page)) elog(ERROR, "concurrent GiST page split was incomplete"); *splitinfo = NIL; /* * if isupdate, remove old key: This node's key has been modified, either * because a child split occurred or because we needed to adjust our key * for an insert in a child node. Therefore, remove the old version of * this node's key. * * for WAL replay, in the non-split case we handle this by setting up a * one-element todelete array; in the split case, it's handled implicitly * because the tuple vector passed to gistSplit won't include this tuple. */ is_split = gistnospace(page, itup, ntup, oldoffnum, state->freespace); if (is_split) { /* no space for insertion */ IndexTuple *itvec; int tlen; SplitedPageLayout *dist = NULL, *ptr; BlockNumber oldrlink = InvalidBlockNumber; GistNSN oldnsn = { 0, 0 }; SplitedPageLayout rootpg; BlockNumber blkno = BufferGetBlockNumber(buffer); bool is_rootsplit; is_rootsplit = (blkno == GIST_ROOT_BLKNO); /* * Form index tuples vector to split. If we're replacing an old tuple, * remove the old version from the vector. */ itvec = gistextractpage(page, &tlen); if (OffsetNumberIsValid(oldoffnum)) { /* on inner page we should remove old tuple */ int pos = oldoffnum - FirstOffsetNumber; tlen--; if (pos != tlen) memmove(itvec + pos, itvec + pos + 1, sizeof(IndexTuple) * (tlen - pos)); } itvec = gistjoinvector(itvec, &tlen, itup, ntup); dist = gistSplit(state->r, page, itvec, tlen, giststate); /* * Set up pages to work with. Allocate new buffers for all but the * leftmost page. The original page becomes the new leftmost page, * and is just replaced with the new contents. * * For a root-split, allocate new buffers for all child pages, the * original page is overwritten with new root page containing * downlinks to the new child pages. */ ptr = dist; if (!is_rootsplit) {//.........这里部分代码省略.........
开发者ID:gurjeet,项目名称:postgres,代码行数:101,
示例18: gistRelocateBuildBuffersOnSplit/* * At page split, distribute tuples from the buffer of the split page to * new buffers for the created page halves. This also adjusts the downlinks * in 'splitinfo' to include the tuples in the buffers. */voidgistRelocateBuildBuffersOnSplit(GISTBuildBuffers *gfbb, GISTSTATE *giststate, Relation r, int level, Buffer buffer, List *splitinfo){ RelocationBufferInfo *relocationBuffersInfos; bool found; GISTNodeBuffer *nodeBuffer; BlockNumber blocknum; IndexTuple itup; int splitPagesCount = 0, i; GISTENTRY entry[INDEX_MAX_KEYS]; bool isnull[INDEX_MAX_KEYS]; GISTNodeBuffer oldBuf; ListCell *lc; /* If the splitted page doesn't have buffers, we have nothing to do. */ if (!LEVEL_HAS_BUFFERS(level, gfbb)) return; /* * Get the node buffer of the splitted page. */ blocknum = BufferGetBlockNumber(buffer); nodeBuffer = hash_search(gfbb->nodeBuffersTab, &blocknum, HASH_FIND, &found); if (!found) { /* The page has no buffer, so we have nothing to do. */ return; } /* * Make a copy of the old buffer, as we're going reuse it as the buffer * for the new left page, which is on the same block as the old page. * That's not true for the root page, but that's fine because we never * have a buffer on the root page anyway. The original algorithm as * described by Arge et al did, but it's of no use, as you might as well * read the tuples straight from the heap instead of the root buffer. */ Assert(blocknum != GIST_ROOT_BLKNO); memcpy(&oldBuf, nodeBuffer, sizeof(GISTNodeBuffer)); oldBuf.isTemp = true; /* Reset the old buffer, used for the new left page from now on */ nodeBuffer->blocksCount = 0; nodeBuffer->pageBuffer = NULL; nodeBuffer->pageBlocknum = InvalidBlockNumber; /* * Allocate memory for information about relocation buffers. */ splitPagesCount = list_length(splitinfo); relocationBuffersInfos = (RelocationBufferInfo *) palloc(sizeof(RelocationBufferInfo) * splitPagesCount); /* * Fill relocation buffers information for node buffers of pages produced * by split. */ i = 0; foreach(lc, splitinfo) { GISTPageSplitInfo *si = (GISTPageSplitInfo *) lfirst(lc); GISTNodeBuffer *newNodeBuffer; /* Decompress parent index tuple of node buffer page. */ gistDeCompressAtt(giststate, r, si->downlink, NULL, (OffsetNumber) 0, relocationBuffersInfos[i].entry, relocationBuffersInfos[i].isnull); /* * Create a node buffer for the page. The leftmost half is on the same * block as the old page before split, so for the leftmost half this * will return the original buffer. The tuples on the original buffer * were relinked to the temporary buffer, so the original one is now * empty. */ newNodeBuffer = gistGetNodeBuffer(gfbb, giststate, BufferGetBlockNumber(si->buf), level); relocationBuffersInfos[i].nodeBuffer = newNodeBuffer; relocationBuffersInfos[i].splitinfo = si; i++; }
开发者ID:mlum,项目名称:postgres,代码行数:93,
示例19: _bt_delitems_vacuum/* * Delete item(s) from a btree page. * * This must only be used for deleting leaf items. Deleting an item on a * non-leaf page has to be done as part of an atomic action that includes * deleting the page it points to. * * This routine assumes that the caller has pinned and locked the buffer. * Also, the given itemnos *must* appear in increasing order in the array. * * We record VACUUMs and b-tree deletes differently in WAL. InHotStandby * we need to be able to pin all of the blocks in the btree in physical * order when replaying the effects of a VACUUM, just as we do for the * original VACUUM itself. lastBlockVacuumed allows us to tell whether an * intermediate range of blocks has had no changes at all by VACUUM, * and so must be scanned anyway during replay. We always write a WAL record * for the last block in the index, whether or not it contained any items * to be removed. This allows us to scan right up to end of index to * ensure correct locking. */void_bt_delitems_vacuum(Relation rel, Buffer buf, OffsetNumber *itemnos, int nitems, BlockNumber lastBlockVacuumed){ Page page = BufferGetPage(buf); BTPageOpaque opaque; /* No ereport(ERROR) until changes are logged */ START_CRIT_SECTION(); /* Fix the page */ if (nitems > 0) PageIndexMultiDelete(page, itemnos, nitems); /* * We can clear the vacuum cycle ID since this page has certainly been * processed by the current vacuum scan. */ opaque = (BTPageOpaque) PageGetSpecialPointer(page); opaque->btpo_cycleid = 0; /* * Mark the page as not containing any LP_DEAD items. This is not * certainly true (there might be some that have recently been marked, but * weren't included in our target-item list), but it will almost always be * true and it doesn't seem worth an additional page scan to check it. * Remember that BTP_HAS_GARBAGE is only a hint anyway. */ opaque->btpo_flags &= ~BTP_HAS_GARBAGE; MarkBufferDirty(buf); /* XLOG stuff */ if (RelationNeedsWAL(rel)) { XLogRecPtr recptr; XLogRecData rdata[2]; xl_btree_vacuum xlrec_vacuum; xlrec_vacuum.node = rel->rd_node; xlrec_vacuum.block = BufferGetBlockNumber(buf); xlrec_vacuum.lastBlockVacuumed = lastBlockVacuumed; rdata[0].data = (char *) &xlrec_vacuum; rdata[0].len = SizeOfBtreeVacuum; rdata[0].buffer = InvalidBuffer; rdata[0].next = &(rdata[1]); /* * The target-offsets array is not in the buffer, but pretend that it * is. When XLogInsert stores the whole buffer, the offsets array * need not be stored too. */ if (nitems > 0) { rdata[1].data = (char *) itemnos; rdata[1].len = nitems * sizeof(OffsetNumber); } else { rdata[1].data = NULL; rdata[1].len = 0; } rdata[1].buffer = buf; rdata[1].buffer_std = true; rdata[1].next = NULL; recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_VACUUM, rdata); PageSetLSN(page, recptr); PageSetTLI(page, ThisTimeLineID); } END_CRIT_SECTION();}
开发者ID:Epictetus,项目名称:postgres,代码行数:96,
示例20: brinbuild/* * brinbuild() -- build a new BRIN index. */IndexBuildResult *brinbuild(Relation heap, Relation index, IndexInfo *indexInfo){ IndexBuildResult *result; double reltuples; double idxtuples; BrinRevmap *revmap; BrinBuildState *state; Buffer meta; BlockNumber pagesPerRange; /* * We expect to be called exactly once for any index relation. */ if (RelationGetNumberOfBlocks(index) != 0) elog(ERROR, "index /"%s/" already contains data", RelationGetRelationName(index)); /* * Critical section not required, because on error the creation of the * whole relation will be rolled back. */ meta = ReadBuffer(index, P_NEW); Assert(BufferGetBlockNumber(meta) == BRIN_METAPAGE_BLKNO); LockBuffer(meta, BUFFER_LOCK_EXCLUSIVE); brin_metapage_init(BufferGetPage(meta), BrinGetPagesPerRange(index), BRIN_CURRENT_VERSION); MarkBufferDirty(meta); if (RelationNeedsWAL(index)) { xl_brin_createidx xlrec; XLogRecPtr recptr; Page page; xlrec.version = BRIN_CURRENT_VERSION; xlrec.pagesPerRange = BrinGetPagesPerRange(index); XLogBeginInsert(); XLogRegisterData((char *) &xlrec, SizeOfBrinCreateIdx); XLogRegisterBuffer(0, meta, REGBUF_WILL_INIT); recptr = XLogInsert(RM_BRIN_ID, XLOG_BRIN_CREATE_INDEX); page = BufferGetPage(meta); PageSetLSN(page, recptr); } UnlockReleaseBuffer(meta); /* * Initialize our state, including the deformed tuple state. */ revmap = brinRevmapInitialize(index, &pagesPerRange, NULL); state = initialize_brin_buildstate(index, revmap, pagesPerRange); /* * Now scan the relation. No syncscan allowed here because we want the * heap blocks in physical order. */ reltuples = IndexBuildHeapScan(heap, index, indexInfo, false, brinbuildCallback, (void *) state); /* process the final batch */ form_and_insert_tuple(state); /* release resources */ idxtuples = state->bs_numtuples; brinRevmapTerminate(state->bs_rmAccess); terminate_brin_buildstate(state); /* * Return statistics */ result = (IndexBuildResult *) palloc(sizeof(IndexBuildResult)); result->heap_tuples = reltuples; result->index_tuples = idxtuples; return result;}
开发者ID:winlibs,项目名称:postgresql,代码行数:86,
示例21: _bt_getroot//.........这里部分代码省略......... if (access == BT_READ) { _bt_relbuf(rel, metabuf); return InvalidBuffer; } /* trade in our read lock for a write lock */ LockBuffer(metabuf, BUFFER_LOCK_UNLOCK); LockBuffer(metabuf, BT_WRITE); /* * Race condition: if someone else initialized the metadata between * the time we released the read lock and acquired the write lock, we * must avoid doing it again. */ if (metad->btm_root != P_NONE) { /* * Metadata initialized by someone else. In order to guarantee no * deadlocks, we have to release the metadata page and start all * over again. (Is that really true? But it's hardly worth trying * to optimize this case.) */ _bt_relbuf(rel, metabuf); return _bt_getroot(rel, access); } /* * Get, initialize, write, and leave a lock of the appropriate type on * the new root page. Since this is the first page in the tree, it's * a leaf as well as the root. */ rootbuf = _bt_getbuf(rel, P_NEW, BT_WRITE); rootblkno = BufferGetBlockNumber(rootbuf); rootpage = BufferGetPage(rootbuf); rootopaque = (BTPageOpaque) PageGetSpecialPointer(rootpage); rootopaque->btpo_prev = rootopaque->btpo_next = P_NONE; rootopaque->btpo_flags = (BTP_LEAF | BTP_ROOT); rootopaque->btpo.level = 0; rootopaque->btpo_cycleid = 0; /* NO ELOG(ERROR) till meta is updated */ START_CRIT_SECTION(); metad->btm_root = rootblkno; metad->btm_level = 0; metad->btm_fastroot = rootblkno; metad->btm_fastlevel = 0; MarkBufferDirty(rootbuf); MarkBufferDirty(metabuf); /* XLOG stuff */ if (RelationNeedsWAL(rel)) { xl_btree_newroot xlrec; XLogRecPtr recptr; XLogRecData rdata; xlrec.node = rel->rd_node; xlrec.rootblk = rootblkno; xlrec.level = 0; rdata.data = (char *) &xlrec; rdata.len = SizeOfBtreeNewroot; rdata.buffer = InvalidBuffer;
开发者ID:Epictetus,项目名称:postgres,代码行数:67,
示例22: _bitmap_log_newpage/* * _bitmap_log_newpage() -- log a new page. * * This function is called before writing a new buffer. If metapage is not NULL, * this function also logs the changes to the metapage. */void_bitmap_log_newpage(Relation rel, uint8 info, Buffer buf, BMMetaPage metapage){ Page page; page = BufferGetPage(buf); /* XLOG stuff */ START_CRIT_SECTION(); if (!(rel->rd_istemp)) { xl_bm_newpage xlNewPage; XLogRecPtr recptr;#ifdef BM_DEBUG elog(LOG, "call _bitmap_log_newpage: blkno=%d", BufferGetBlockNumber(buf));#endif xlNewPage.bm_node = rel->rd_node; xlNewPage.bm_new_blkno = BufferGetBlockNumber(buf); if (metapage != NULL) { XLogRecData rdata[2]; xl_bm_metapage* xlMeta = (xl_bm_metapage*) palloc(MAXALIGN(sizeof(xl_bm_metapage))); rdata[0].buffer = InvalidBuffer; rdata[0].data = (char*)&xlNewPage; rdata[0].len = sizeof(xl_bm_newpage); rdata[0].next = &(rdata[1]); xlMeta->bm_node = rel->rd_node; rdata[1].buffer = InvalidBuffer; rdata[1].data = (char*)xlMeta; rdata[1].len = MAXALIGN(sizeof(xl_bm_metapage)); rdata[1].next = NULL; recptr = XLogInsert(RM_BITMAP_ID, info, rdata); PageSetLSN(page, recptr); PageSetTLI(page, ThisTimeLineID); PageSetLSN(metapage, recptr); PageSetTLI(metapage, ThisTimeLineID); pfree(xlMeta); } else { XLogRecData rdata[1]; rdata[0].buffer = InvalidBuffer; rdata[0].data = (char*)&xlNewPage; rdata[0].len = sizeof(xl_bm_newpage); rdata[0].next = NULL; recptr = XLogInsert(RM_BITMAP_ID, info, rdata); PageSetLSN(page, recptr); PageSetTLI(page, ThisTimeLineID); if (metapage != NULL) { PageSetLSN(metapage, recptr); PageSetTLI(metapage, ThisTimeLineID); } } } END_CRIT_SECTION();}
开发者ID:jaiminpan,项目名称:bizgres,代码行数:81,
示例23: _hash_checkpage/* * _hash_checkpage -- sanity checks on the format of all hash pages * * If flags is not zero, it is a bitwise OR of the acceptable values of * hasho_flag. */void_hash_checkpage(Relation rel, Buffer buf, int flags){ Page page = BufferGetPage(buf); /* * ReadBuffer verifies that every newly-read page passes * PageHeaderIsValid, which means it either contains a reasonably sane * page header or is all-zero. We have to defend against the all-zero * case, however. */ if (PageIsNew(page)) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("index /"%s/" contains unexpected zero page at block %u", RelationGetRelationName(rel), BufferGetBlockNumber(buf)), errhint("Please REINDEX it."))); /* * Additionally check that the special area looks sane. */ if (PageGetSpecialSize(page) != MAXALIGN(sizeof(HashPageOpaqueData))) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("index /"%s/" contains corrupted page at block %u", RelationGetRelationName(rel), BufferGetBlockNumber(buf)), errhint("Please REINDEX it."))); if (flags) { HashPageOpaque opaque = (HashPageOpaque) PageGetSpecialPointer(page); if ((opaque->hasho_flag & flags) == 0) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("index /"%s/" contains corrupted page at block %u", RelationGetRelationName(rel), BufferGetBlockNumber(buf)), errhint("Please REINDEX it."))); } /* * When checking the metapage, also verify magic number and version. */ if (flags == LH_META_PAGE) { HashMetaPage metap = HashPageGetMeta(page); if (metap->hashm_magic != HASH_MAGIC) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("index /"%s/" is not a hash index", RelationGetRelationName(rel)))); if (metap->hashm_version != HASH_VERSION) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("index /"%s/" has wrong hash version", RelationGetRelationName(rel)), errhint("Please REINDEX it."))); }}
开发者ID:Joe-xXx,项目名称:postgres-old-soon-decommissioned,代码行数:70,
示例24: _bitmap_log_bitmappage/* * _bitmap_log_bitmappage() -- log the changes to a bitmap page. * * The changes may be related to either the opaque data or non-opaque data. */void_bitmap_log_bitmappage(Relation rel, Buffer bitmapBuffer, bool isOpaque){ Page bitmapPage; BMBitmapOpaque bitmapPageOpaque; BMBitmap bitmap; bitmapPage = BufferGetPage(bitmapBuffer); bitmapPageOpaque = (BMBitmapOpaque)PageGetSpecialPointer(bitmapPage); bitmap = (BMBitmap) PageGetContents(bitmapPage); /* XLOG stuff */ START_CRIT_SECTION(); if (!(rel->rd_istemp)) { xl_bm_bitmappage xlBitmap; XLogRecPtr recptr; XLogRecData rdata[1];#ifdef BM_DEBUG elog(LOG, "call _bitmap_log_bitmappage: isOpaque=%d, blkno=%d, lastword_pos=%d", isOpaque, BufferGetBlockNumber(bitmapBuffer), bitmapPageOpaque->bm_hrl_words_used);#endif xlBitmap.bm_node = rel->rd_node; xlBitmap.bm_bitmap_blkno = BufferGetBlockNumber(bitmapBuffer); xlBitmap.bm_isOpaque = isOpaque; if (!isOpaque) { xlBitmap.bm_lastword_pos = bitmapPageOpaque->bm_hrl_words_used; xlBitmap.bm_lastword_in_block = bitmap->bm_contentWords[bitmapPageOpaque->bm_hrl_words_used-1]; xlBitmap.bm_isFillWord = (((bitmap->bm_headerWords [bitmapPageOpaque->bm_hrl_words_used/BM_HRL_WORD_SIZE]) & (1<<(BM_HRL_WORD_SIZE-1- bitmapPageOpaque->bm_hrl_words_used%BM_HRL_WORD_SIZE))) != 0); xlBitmap.bm_next_blkno = InvalidBlockNumber; } else { xlBitmap.bm_lastword_pos = 0; xlBitmap.bm_lastword_in_block = 0; xlBitmap.bm_next_blkno = bitmapPageOpaque->bm_bitmap_next; } rdata[0].buffer = InvalidBuffer; rdata[0].data = (char*)&xlBitmap; rdata[0].len = sizeof(xl_bm_bitmappage); rdata[0].next = NULL; recptr = XLogInsert(RM_BITMAP_ID, XLOG_BITMAP_INSERT_BITMAP, rdata); PageSetLSN(bitmapPage, recptr); PageSetTLI(bitmapPage, ThisTimeLineID); } END_CRIT_SECTION();}
开发者ID:jaiminpan,项目名称:bizgres,代码行数:66,
示例25: _bt_pagedel/* * _bt_pagedel() -- Delete a page from the b-tree. * * This action unlinks the page from the b-tree structure, removing all * pointers leading to it --- but not touching its own left and right links. * The page cannot be physically reclaimed right away, since other processes * may currently be trying to follow links leading to the page; they have to * be allowed to use its right-link to recover. See nbtree/README. * * On entry, the target buffer must be pinned and read-locked. This lock and * pin will be dropped before exiting. * * Returns the number of pages successfully deleted (zero on failure; could * be more than one if parent blocks were deleted). * * NOTE: this leaks memory. Rather than trying to clean up everything * carefully, it's better to run it in a temp context that can be reset * frequently. */int_bt_pagedel(Relation rel, Buffer buf, bool vacuum_full){ BlockNumber target, leftsib, rightsib, parent; OffsetNumber poffset, maxoff; uint32 targetlevel, ilevel; ItemId itemid; BTItem targetkey, btitem; ScanKey itup_scankey; BTStack stack; Buffer lbuf, rbuf, pbuf; bool parent_half_dead; bool parent_one_child; bool rightsib_empty; Buffer metabuf = InvalidBuffer; Page metapg = NULL; BTMetaPageData *metad = NULL; Page page; BTPageOpaque opaque; /* * We can never delete rightmost pages nor root pages. While at it, check * that page is not already deleted and is empty. */ page = BufferGetPage(buf); opaque = (BTPageOpaque) PageGetSpecialPointer(page); if (P_RIGHTMOST(opaque) || P_ISROOT(opaque) || P_ISDELETED(opaque) || P_FIRSTDATAKEY(opaque) <= PageGetMaxOffsetNumber(page)) { _bt_relbuf(rel, buf); return 0; } /* * Save info about page, including a copy of its high key (it must have * one, being non-rightmost). */ target = BufferGetBlockNumber(buf); targetlevel = opaque->btpo.level; leftsib = opaque->btpo_prev; itemid = PageGetItemId(page, P_HIKEY); targetkey = CopyBTItem((BTItem) PageGetItem(page, itemid)); /* * We need to get an approximate pointer to the page's parent page. Use * the standard search mechanism to search for the page's high key; this * will give us a link to either the current parent or someplace to its * left (if there are multiple equal high keys). To avoid deadlocks, we'd * better drop the target page lock first. */ _bt_relbuf(rel, buf); /* we need a scan key to do our search, so build one */ itup_scankey = _bt_mkscankey(rel, &(targetkey->bti_itup)); /* find the leftmost leaf page containing this key */ stack = _bt_search(rel, rel->rd_rel->relnatts, itup_scankey, false, &lbuf, BT_READ); /* don't need a pin on that either */ _bt_relbuf(rel, lbuf); /* * If we are trying to delete an interior page, _bt_search did more than * we needed. Locate the stack item pointing to our parent level. */ ilevel = 0; for (;;) { if (stack == NULL) elog(ERROR, "not enough stack items"); if (ilevel == targetlevel) break; stack = stack->bts_parent; ilevel++; }//.........这里部分代码省略.........
开发者ID:CraigBryan,项目名称:PostgresqlFun,代码行数:101,
示例26: rumFindParents/* * Try to find parent for current stack position, returns correct * parent and child's offset in stack->parent. * Function should never release root page to prevent conflicts * with vacuum process */voidrumFindParents(RumBtree btree, RumBtreeStack * stack, BlockNumber rootBlkno){ Page page; Buffer buffer; BlockNumber blkno, leftmostBlkno; OffsetNumber offset; RumBtreeStack *root = stack->parent; RumBtreeStack *ptr; if (!root) { /* XLog mode... */ root = (RumBtreeStack *) palloc(sizeof(RumBtreeStack)); root->blkno = rootBlkno; root->buffer = ReadBuffer(btree->index, rootBlkno); LockBuffer(root->buffer, RUM_EXCLUSIVE); root->parent = NULL; } else { /* * find root, we should not release root page until update is * finished!! */ while (root->parent) { ReleaseBuffer(root->buffer); root = root->parent; } Assert(root->blkno == rootBlkno); Assert(BufferGetBlockNumber(root->buffer) == rootBlkno); LockBuffer(root->buffer, RUM_EXCLUSIVE); } root->off = InvalidOffsetNumber; page = BufferGetPage(root->buffer); Assert(!RumPageIsLeaf(page)); /* check trivial case */ if ((root->off = btree->findChildPtr(btree, page, stack->blkno, InvalidOffsetNumber)) != InvalidOffsetNumber) { stack->parent = root; return; } blkno = btree->getLeftMostPage(btree, page); LockBuffer(root->buffer, RUM_UNLOCK); Assert(blkno != InvalidBlockNumber); for (;;) { buffer = ReadBuffer(btree->index, blkno); LockBuffer(buffer, RUM_EXCLUSIVE); page = BufferGetPage(buffer); if (RumPageIsLeaf(page)) elog(ERROR, "Lost path"); leftmostBlkno = btree->getLeftMostPage(btree, page); while ((offset = btree->findChildPtr(btree, page, stack->blkno, InvalidOffsetNumber)) == InvalidOffsetNumber) { blkno = RumPageGetOpaque(page)->rightlink; if (blkno == InvalidBlockNumber) { UnlockReleaseBuffer(buffer); break; } buffer = rumStep(buffer, btree->index, RUM_EXCLUSIVE, ForwardScanDirection); page = BufferGetPage(buffer); } if (blkno != InvalidBlockNumber) { ptr = (RumBtreeStack *) palloc(sizeof(RumBtreeStack)); ptr->blkno = blkno; ptr->buffer = buffer; ptr->parent = root; /* it's may be wrong, but in next call we will * correct */ ptr->off = offset; stack->parent = ptr; return; } blkno = leftmostBlkno; }}
开发者ID:postgrespro,项目名称:rum,代码行数:97,
示例27: gistXLogUpdate/* * Write XLOG record describing a page update. The update can include any * number of deletions and/or insertions of tuples on a single index page. * * If this update inserts a downlink for a split page, also record that * the F_FOLLOW_RIGHT flag on the child page is cleared and NSN set. * * Note that both the todelete array and the tuples are marked as belonging * to the target buffer; they need not be stored in XLOG if XLogInsert decides * to log the whole buffer contents instead. Also, we take care that there's * at least one rdata item referencing the buffer, even when ntodelete and * ituplen are both zero; this ensures that XLogInsert knows about the buffer. */XLogRecPtrgistXLogUpdate(RelFileNode node, Buffer buffer, OffsetNumber *todelete, int ntodelete, IndexTuple *itup, int ituplen, Buffer leftchildbuf){ XLogRecData *rdata; gistxlogPageUpdate *xlrec; int cur, i; XLogRecPtr recptr; rdata = (XLogRecData *) palloc(sizeof(XLogRecData) * (4 + ituplen)); xlrec = (gistxlogPageUpdate *) palloc(sizeof(gistxlogPageUpdate)); xlrec->node = node; xlrec->blkno = BufferGetBlockNumber(buffer); xlrec->ntodelete = ntodelete; xlrec->leftchild = BufferIsValid(leftchildbuf) ? BufferGetBlockNumber(leftchildbuf) : InvalidBlockNumber; rdata[0].buffer = buffer; rdata[0].buffer_std = true; rdata[0].data = NULL; rdata[0].len = 0; rdata[0].next = &(rdata[1]); rdata[1].data = (char *) xlrec; rdata[1].len = sizeof(gistxlogPageUpdate); rdata[1].buffer = InvalidBuffer; rdata[1].next = &(rdata[2]); rdata[2].data = (char *) todelete; rdata[2].len = sizeof(OffsetNumber) * ntodelete; rdata[2].buffer = buffer; rdata[2].buffer_std = true; cur = 3; /* new tuples */ for (i = 0; i < ituplen; i++) { rdata[cur - 1].next = &(rdata[cur]); rdata[cur].data = (char *) (itup[i]); rdata[cur].len = IndexTupleSize(itup[i]); rdata[cur].buffer = buffer; rdata[cur].buffer_std = true; cur++; } /* * Include a full page image of the child buf. (only necessary if a * checkpoint happened since the child page was split) */ if (BufferIsValid(leftchildbuf)) { rdata[cur - 1].next = &(rdata[cur]); rdata[cur].data = NULL; rdata[cur].len = 0; rdata[cur].buffer = leftchildbuf; rdata[cur].buffer_std = true; cur++; } rdata[cur - 1].next = NULL; recptr = XLogInsert(RM_GIST_ID, XLOG_GIST_PAGE_UPDATE, rdata); pfree(rdata); return recptr;}
开发者ID:GisKook,项目名称:Gis,代码行数:83,
示例28: rumInsertValue//.........这里部分代码省略......... { page = BufferGetPage(stack->buffer); rpage = BufferGetPage(rbuffer); } else { state = GenericXLogStart(index); page = GenericXLogRegisterBuffer(state, stack->buffer, 0); rpage = GenericXLogRegisterBuffer(state, rbuffer, GENERIC_XLOG_FULL_IMAGE); } /* * newlpage is a pointer to memory page, it doesn't associate * with buffer, stack->buffer should be untouched */ newlpage = btree->splitPage(btree, stack->buffer, rbuffer, page, rpage, stack->off); /* * split root, so we need to allocate new left page and place * pointer on root to left and right page */ lbuffer = RumNewBuffer(btree->index); if (btree->rumstate->isBuild) lpage = BufferGetPage(lbuffer); else lpage = GenericXLogRegisterBuffer(state, lbuffer, GENERIC_XLOG_FULL_IMAGE); RumPageGetOpaque(rpage)->rightlink = InvalidBlockNumber; RumPageGetOpaque(newlpage)->leftlink = InvalidBlockNumber; RumPageGetOpaque(rpage)->leftlink = BufferGetBlockNumber(lbuffer); RumPageGetOpaque(newlpage)->rightlink = BufferGetBlockNumber(rbuffer); RumInitPage(page, RumPageGetOpaque(newlpage)->flags & ~RUM_LEAF, BufferGetPageSize(stack->buffer)); PageRestoreTempPage(newlpage, lpage); btree->fillRoot(btree, stack->buffer, lbuffer, rbuffer, page, lpage, rpage); PredicateLockPageSplit(btree->index, BufferGetBlockNumber(stack->buffer), BufferGetBlockNumber(lbuffer)); PredicateLockPageSplit(btree->index, BufferGetBlockNumber(stack->buffer), BufferGetBlockNumber(rbuffer)); if (btree->rumstate->isBuild) { START_CRIT_SECTION(); MarkBufferDirty(rbuffer); MarkBufferDirty(lbuffer); MarkBufferDirty(stack->buffer); } else GenericXLogFinish(state); UnlockReleaseBuffer(rbuffer); UnlockReleaseBuffer(lbuffer); LockBuffer(stack->buffer, RUM_UNLOCK); if (btree->rumstate->isBuild) END_CRIT_SECTION();
开发者ID:postgrespro,项目名称:rum,代码行数:67,
示例29: _bitmap_log_updatewords/* * _bitmap_log_updatewords() -- log updating bitmap words in one or * two bitmap pages. * * If nextBuffer is Invalid, we only update one page. * */void_bitmap_log_updatewords(Relation rel, Buffer lovBuffer, OffsetNumber lovOffset, Buffer firstBuffer, Buffer secondBuffer, bool new_lastpage){ Page firstPage = NULL; Page secondPage = NULL; BMBitmap firstBitmap; BMBitmap secondBitmap; BMBitmapOpaque firstOpaque; BMBitmapOpaque secondOpaque; xl_bm_updatewords xlBitmapWords; XLogRecPtr recptr; XLogRecData rdata[1]; firstPage = BufferGetPage(firstBuffer); firstBitmap = (BMBitmap) PageGetContentsMaxAligned(firstPage); firstOpaque = (BMBitmapOpaque)PageGetSpecialPointer(firstPage); xlBitmapWords.bm_two_pages = false; xlBitmapWords.bm_first_blkno = BufferGetBlockNumber(firstBuffer); memcpy(&xlBitmapWords.bm_first_cwords, firstBitmap->cwords, BM_NUM_OF_HRL_WORDS_PER_PAGE * sizeof(BM_HRL_WORD)); memcpy(&xlBitmapWords.bm_first_hwords, firstBitmap->hwords, BM_NUM_OF_HEADER_WORDS * sizeof(BM_HRL_WORD)); xlBitmapWords.bm_first_last_tid = firstOpaque->bm_last_tid_location; xlBitmapWords.bm_first_num_cwords = firstOpaque->bm_hrl_words_used; xlBitmapWords.bm_next_blkno = firstOpaque->bm_bitmap_next; if (BufferIsValid(secondBuffer)) { secondPage = BufferGetPage(secondBuffer); secondBitmap = (BMBitmap) PageGetContentsMaxAligned(secondPage); secondOpaque = (BMBitmapOpaque)PageGetSpecialPointer(secondPage); xlBitmapWords.bm_two_pages = true; xlBitmapWords.bm_second_blkno = BufferGetBlockNumber(secondBuffer); memcpy(&xlBitmapWords.bm_second_cwords, secondBitmap->cwords, BM_NUM_OF_HRL_WORDS_PER_PAGE * sizeof(BM_HRL_WORD)); memcpy(&xlBitmapWords.bm_second_hwords, secondBitmap->hwords, BM_NUM_OF_HEADER_WORDS * sizeof(BM_HRL_WORD)); xlBitmapWords.bm_second_last_tid = secondOpaque->bm_last_tid_location; xlBitmapWords.bm_second_num_cwords = secondOpaque->bm_hrl_words_used; xlBitmapWords.bm_next_blkno = secondOpaque->bm_bitmap_next; } // Fetch gp_persistent_relation_node information that will be added to XLOG record. RelationFetchGpRelationNodeForXLog(rel); xlBitmapWords.bm_node = rel->rd_node; xlBitmapWords.bm_persistentTid = rel->rd_relationnodeinfo.persistentTid; xlBitmapWords.bm_persistentSerialNum = rel->rd_relationnodeinfo.persistentSerialNum; xlBitmapWords.bm_lov_blkno = BufferGetBlockNumber(lovBuffer); xlBitmapWords.bm_lov_offset = lovOffset; xlBitmapWords.bm_new_lastpage = new_lastpage; rdata[0].buffer = InvalidBuffer; rdata[0].data = (char*)&xlBitmapWords; rdata[0].len = sizeof(xl_bm_updatewords); rdata[0].next = NULL; recptr = XLogInsert(RM_BITMAP_ID, XLOG_BITMAP_UPDATEWORDS, rdata); PageSetLSN(firstPage, recptr); PageSetTLI(firstPage, ThisTimeLineID); if (BufferIsValid(secondBuffer)) { PageSetLSN(secondPage, recptr); PageSetTLI(secondPage, ThisTimeLineID); } if (new_lastpage) { Page lovPage = BufferGetPage(lovBuffer); PageSetLSN(lovPage, recptr); PageSetTLI(lovPage, ThisTimeLineID); }}
开发者ID:BALDELab,项目名称:incubator-hawq,代码行数:96,
示例30: RelationGetBufferForTuple/* * RelationGetBufferForTuple * * Returns pinned and exclusive-locked buffer of a page in given relation * with free space >= given len. * * If otherBuffer is not InvalidBuffer, then it references a previously * pinned buffer of another page in the same relation; on return, this * buffer will also be exclusive-locked. (This case is used by heap_update; * the otherBuffer contains the tuple being updated.) * * The reason for passing otherBuffer is that if two backends are doing * concurrent heap_update operations, a deadlock could occur if they try * to lock the same two buffers in opposite orders. To ensure that this * can't happen, we impose the rule that buffers of a relation must be * locked in increasing page number order. This is most conveniently done * by having RelationGetBufferForTuple lock them both, with suitable care * for ordering. * * NOTE: it is unlikely, but not quite impossible, for otherBuffer to be the * same buffer we select for insertion of the new tuple (this could only * happen if space is freed in that page after heap_update finds there's not * enough there). In that case, the page will be pinned and locked only once. * * For the vmbuffer and vmbuffer_other arguments, we avoid deadlock by * locking them only after locking the corresponding heap page, and taking * no further lwlocks while they are locked. * * We normally use FSM to help us find free space. However, * if HEAP_INSERT_SKIP_FSM is specified, we just append a new empty page to * the end of the relation if the tuple won't fit on the current target page. * This can save some cycles when we know the relation is new and doesn't * contain useful amounts of free space. * * HEAP_INSERT_SKIP_FSM is also useful for non-WAL-logged additions to a * relation, if the caller holds exclusive lock and is careful to invalidate * relation's smgr_targblock before the first insertion --- that ensures that * all insertions will occur into newly added pages and not be intermixed * with tuples from other transactions. That way, a crash can't risk losing * any committed data of other transactions. (See heap_insert's comments * for additional constraints needed for safe usage of this behavior.) * * The caller can also provide a BulkInsertState object to optimize many * insertions into the same relation. This keeps a pin on the current * insertion target page (to save pin/unpin cycles) and also passes a * BULKWRITE buffer selection strategy object to the buffer manager. * Passing NULL for bistate selects the default behavior. * * We always try to avoid filling existing pages further than the fillfactor. * This is OK since this routine is not consulted when updating a tuple and * keeping it on the same page, which is the scenario fillfactor is meant * to reserve space for. * * ereport(ERROR) is allowed here, so this routine *must* be called * before any (unlogged) changes are made in buffer pool. */BufferRelationGetBufferForTuple(Relation relation, Size len, Buffer otherBuffer, int options, BulkInsertState bistate, Buffer *vmbuffer, Buffer *vmbuffer_other){ bool use_fsm = !(options & HEAP_INSERT_SKIP_FSM); Buffer buffer = InvalidBuffer; Page page; Size pageFreeSpace, saveFreeSpace; BlockNumber targetBlock, otherBlock; bool needLock; len = MAXALIGN(len); /* be conservative */ /* Bulk insert is not supported for updates, only inserts. */ Assert(otherBuffer == InvalidBuffer || !bistate); /* * If we're gonna fail for oversize tuple, do it right away */ if (len > MaxHeapTupleSize) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("row is too big: size %lu, maximum size %lu", (unsigned long) len, (unsigned long) MaxHeapTupleSize))); /* Compute desired extra freespace due to fillfactor option */ saveFreeSpace = RelationGetTargetPageFreeSpace(relation, HEAP_DEFAULT_FILLFACTOR); if (otherBuffer != InvalidBuffer) otherBlock = BufferGetBlockNumber(otherBuffer); else otherBlock = InvalidBlockNumber; /* just to keep compiler quiet */ /* * We first try to put the tuple on the same page we last inserted a tuple * on, as cached in the BulkInsertState or relcache entry. If that * doesn't work, we ask the Free Space Map to locate a suitable page. * Since the FSM's info might be out of date, we have to be prepared to//.........这里部分代码省略.........
开发者ID:a1exsh,项目名称:postgres,代码行数:101,
注:本文中的BufferGetBlockNumber函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 C++ BufferGetPageSize函数代码示例 C++ BufferDestroy函数代码示例 |