这篇教程C++ CACHE_HASH函数代码示例写得很实用,希望能帮到您。
本文整理汇总了C++中CACHE_HASH函数的典型用法代码示例。如果您正苦于以下问题:C++ CACHE_HASH函数的具体用法?C++ CACHE_HASH怎么用?C++ CACHE_HASH使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。 在下文中一共展示了CACHE_HASH函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。 示例1: unlink_htab_ent/* unlink BLK from the hash table bucket chain in SET */static voidunlink_htab_ent(struct cache_t *cp, /* cache to update */ struct cache_set_t *set, /* set containing bkt chain */ struct cache_blk_t *blk) /* block to unlink */{ struct cache_blk_t *prev, *ent; int index = CACHE_HASH(cp, blk->tag); /* locate the block in the hash table bucket chain */ for (prev=NULL,ent=set->hash[index]; ent; prev=ent,ent=ent->hash_next) { if (ent == blk) break; } assert(ent); /* unlink the block from the hash table bucket chain */ if (!prev) { /* head of hash bucket list */ set->hash[index] = ent->hash_next; } else { /* middle or end of hash bucket list */ prev->hash_next = ent->hash_next; } ent->hash_next = NULL;}
开发者ID:jnaneshm,项目名称:614_hw4,代码行数:32,
示例2: cache_invalidate_symbolstatic inline voidcache_invalidate_symbol (repv symbol){ unsigned int hash = CACHE_HASH (symbol); if (ref_cache[hash].s != 0 && ref_cache[hash].n->symbol == symbol) ref_cache[hash].s = 0;}
开发者ID:OpenInkpot-archive,项目名称:iplinux-librep,代码行数:7,
示例3: enter_cachestatic inline voidenter_cache (rep_struct *s, rep_struct_node *binding){ unsigned int hash = CACHE_HASH (binding->symbol); int i, oldest_i, oldest_age = INT_MAX; for (i = 0; i < CACHE_ASSOC; i++) { if (ref_cache[hash][i].s == 0) { oldest_i = i; break; } else if (ref_cache[hash][i].age < oldest_age) { oldest_i = i; oldest_age = ref_cache[hash][i].age; } } assert (oldest_i < CACHE_ASSOC);#ifdef DEBUG if (ref_cache[hash][oldest_i].s != 0) { if (ref_cache[hash][oldest_i].n->symbol == binding->symbol) ref_cache_conflicts++; else ref_cache_collisions++; }#endif ref_cache[hash][oldest_i].s = s; ref_cache[hash][oldest_i].n = binding; ref_cache[hash][oldest_i].age = ++ref_age;}
开发者ID:OpenInkpot-archive,项目名称:iplinux-librep,代码行数:32,
示例4: cache_flush_addr/* flush the block containing ADDR from the cache CP, returns the latency of the block flush operation */unsigned int /* latency of flush operation */cache_flush_addr(struct cache_t *cp, /* cache instance to flush */ md_addr_t addr, /* address of block to flush */ tick_t now) /* time of cache flush */{ fprintf( stderr, "flush address/n" ); md_addr_t tag = CACHE_TAG(cp, addr); md_addr_t set = CACHE_SET(cp, addr); struct cache_blk_t *blk; int lat = cp->hit_latency; /* min latency to probe cache */ if (cp->hsize) { /* higly-associativity cache, access through the per-set hash tables */ int hindex = CACHE_HASH(cp, tag); for (blk=cp->sets[set].hash[hindex]; blk; blk=blk->hash_next) { if (blk->tag == tag && (blk->status & CACHE_BLK_VALID)) break; } } else { /* low-associativity cache, linear search the way list */ for (blk=cp->sets[set].way_head; blk; blk=blk->way_next) { if (blk->tag == tag && (blk->status & CACHE_BLK_VALID)) break; } } if (blk) { cp->invalidations++; blk->status &= ~CACHE_BLK_VALID; /* blow away the last block to hit */ cp->last_tagset = 0; cp->last_blk = NULL; if (blk->status & CACHE_BLK_DIRTY) { /* write back the invalidated block */ cp->writebacks++; lat += cp->blk_access_fn(Write, CACHE_MK_BADDR(cp, blk->tag, set), cp->bsize, blk, now+lat); } /* move this block to tail of the way (LRU) list */ update_way_list(&cp->sets[set], blk, Tail); } /* return latency of the operation */ return lat;}
开发者ID:jsarabia1247,项目名称:waylevelcache,代码行数:62,
示例5: cache_probe/* return non-zero if block containing address ADDR is contained in cache CP, this interface is used primarily for debugging and asserting cache invariants */int /* non-zero if access would hit */cache_probe(struct cache_t *cp, /* cache instance to probe */md_addr_t addr) /* address of block to probe */{ md_addr_t tag = CACHE_TAG(cp, addr); md_addr_t set = CACHE_SET(cp, addr); struct cache_blk_t *blk; /* permissions are checked on cache misses */ if (cp->hsize) { /* higly-associativity cache, access through the per-set hash tables */ int hindex = CACHE_HASH(cp, tag); for (blk = cp->sets[set].hash[hindex]; blk; blk = blk->hash_next) { if (blk->tag == tag && (blk->status & CACHE_BLK_VALID)) return TRUE; } } else { /* low-associativity cache, linear search the way list */ for (blk = cp->sets[set].way_head; blk; blk = blk->way_next) { if (blk->tag == tag && (blk->status & CACHE_BLK_VALID)) return TRUE; } } /* cache block not found */ return FALSE;}
开发者ID:Evangileon,项目名称:SimpleScalar-Tournament,代码行数:32,
示例6: property_cache_refrepvproperty_cache_ref (repv id, repv prop){ unsigned int h, i; if (cache_vec == rep_NULL) return rep_NULL; h = CACHE_HASH (id, prop) * CACHE_ASSOC; DB (("prop ref: 0x%x,%s (%d) -> ", id, rep_STR (rep_SYM (prop)->name), h)); for (i = h; i < h + CACHE_ASSOC; i++) { if (cache_ids[i] == id && cache_props[i] == prop) { cache_hits++; DB (("hit/n")); cache_ages[i] = ++cache_clock; return cache_values[i]; } } DB (("miss/n")); cache_misses++; return rep_NULL;}
开发者ID:baohaojun,项目名称:sawfish,代码行数:27,
示例7: property_cache_invalidatevoidproperty_cache_invalidate (repv id, repv prop){ unsigned int h, i; if (cache_vec == rep_NULL) return; h = CACHE_HASH (id, prop) * CACHE_ASSOC; for (i = h; i < h + CACHE_ASSOC; i++) { if (cache_ids[i] == id && cache_props[i] == prop) { if (cache_updates[i] == 0) { cache_ids[i] = 0; cache_props[i] = Qnil; cache_values[i] = Qnil; } else cache_updates[i]--; } }}
开发者ID:baohaojun,项目名称:sawfish,代码行数:25,
示例8: link_htab_ent/* insert BLK onto the head of the hash table bucket chain in SET */static void link_htab_ent(struct cache_t *cp, /* cache to update */struct cache_set_t *set, /* set containing bkt chain */struct cache_blk_t *blk) /* block to insert */{ int index = CACHE_HASH(cp, blk->tag); /* insert block onto the head of the bucket chain */ blk->hash_next = set->hash[index]; set->hash[index] = blk;}
开发者ID:Evangileon,项目名称:SimpleScalar-Tournament,代码行数:11,
示例9: property_cache_setvoidproperty_cache_set (repv id, repv prop, repv value, int invals){ unsigned int h, i, oldest, oldest_age; if (cache_vec == rep_NULL) { cache_vec = Fmake_vector (rep_MAKE_INT (CACHE_SIZE * 3), Qnil); rep_mark_static (&cache_vec); cache_ids = rep_VECT (cache_vec)->array; cache_props = cache_ids + CACHE_SIZE; cache_values = cache_props + CACHE_SIZE; } h = CACHE_HASH (id, prop) * CACHE_ASSOC; oldest_age = UINT_MAX; oldest = -1; for (i = h; i < h + CACHE_ASSOC; i++) { if (cache_ids[i] == id && cache_props[i] == prop) { cache_values[i] = value; cache_updates[i] += invals; return; } if (cache_ages[i] <= oldest_age) { oldest_age = cache_ages[i]; oldest = i; } } assert (oldest != -1); if (cache_ids[oldest] != 0) DB (("prop eject: 0x%x (%d)/n", cache_ids[oldest], oldest)); cache_ids[oldest] = id; cache_props[oldest] = prop; cache_values[oldest] = value; cache_ages[oldest] = ++cache_clock; cache_updates[oldest] = invals; DB (("set: 0x%x,%s (%d)/n", id, rep_STR (rep_SYM (prop)->name), oldest));}
开发者ID:baohaojun,项目名称:sawfish,代码行数:49,
示例10: lookup_cachestatic inline rep_struct_node *lookup_cache (rep_struct *s, repv var){ unsigned int hash = CACHE_HASH (var); if (ref_cache[hash].s == s && ref_cache[hash].n->symbol == var) {#ifdef DEBUG ref_cache_hits++;#endif return ref_cache[hash].n; } else {#ifdef DEBUG ref_cache_misses++;#endif return 0; }}
开发者ID:OpenInkpot-archive,项目名称:iplinux-librep,代码行数:19,
示例11: allocated/* access a cache, perform a CMD operation on cache CP at address ADDR, places NBYTES of data at *P, returns latency of operation if initiated at NOW, places pointer to block user data in *UDATA, *P is untouched if cache blocks are not allocated (!CP->BALLOC), UDATA should be NULL if no user data is attached to blocks */unsigned int /* latency of access in cycles */cache_access(struct cache_t *cp, /* cache to access */ enum mem_cmd cmd, /* access type, Read or Write */ md_addr_t addr, /* address of access */ void *vp, /* ptr to buffer for input/output */ int nbytes, /* number of bytes to access */ tick_t now, /* time of access */ byte_t **udata, /* for return of user data ptr */ md_addr_t *repl_addr, /* for address of replaced block */ int prefetch) /* 1 if the access is a prefetch, 0 if it is not */{ byte_t *p = vp; md_addr_t tag = CACHE_TAG(cp, addr); md_addr_t set = CACHE_SET(cp, addr); md_addr_t bofs = CACHE_BLK(cp, addr); struct cache_blk_t *blk, *repl; int lat = 0; /* default replacement address */ if (repl_addr) *repl_addr = 0; /* check alignments */ if ((nbytes & (nbytes-1)) != 0 || (addr & (nbytes-1)) != 0) fatal("cache: access error: bad size or alignment, addr 0x%08x", addr); /* access must fit in cache block */ /* FIXME: ((addr + (nbytes - 1)) > ((addr & ~cp->blk_mask) + (cp->bsize - 1))) */ if ((addr + nbytes) > ((addr & ~cp->blk_mask) + cp->bsize)) fatal("cache: access error: access spans block, addr 0x%08x", addr); /* permissions are checked on cache misses */ /* check for a fast hit: access to same block */ if (CACHE_TAGSET(cp, addr) == cp->last_tagset) { /* hit in the same block */ blk = cp->last_blk; goto cache_fast_hit; } if (cp->hsize) { /* higly-associativity cache, access through the per-set hash tables */ int hindex = CACHE_HASH(cp, tag); for (blk=cp->sets[set].hash[hindex]; blk; blk=blk->hash_next) { if (blk->tag == tag && (blk->status & CACHE_BLK_VALID)) goto cache_hit; } } else { /* low-associativity cache, linear search the way list */ for (blk=cp->sets[set].way_head; blk; blk=blk->way_next) { if (blk->tag == tag && (blk->status & CACHE_BLK_VALID)) goto cache_hit; } } /* cache block not found */ /* **MISS** */ if (prefetch == 0 ) { cp->misses++; if (cmd == Read) { cp->read_misses++; } } /* ECE552 Assignment 4 - BEGIN CODE */ if (strcmp(cp->name, "dl1") == 0) { for(std::list<evicted_tag>::iterator it = evicted_blks[set].begin(); it != evicted_blks[set].end(); ++it) { if(it->tag == tag && it->prefetched) { //move element to the front of the list if(it != evicted_blks[set].begin()) { std::list<evicted_tag>::iterator tmp = it; evicted_blks[set].splice(evicted_blks[set].begin(), evicted_blks[set], tmp, ++it); } cp->prefetch_misses++; break; } } } /* ECE552 Assignment 4 - END CODE *///.........这里部分代码省略.........
开发者ID:swlpark,项目名称:ece552,代码行数:101,
示例12: allocated/* access a cache, perform a CMD operation on cache CP at address ADDR, places NBYTES of data at *P, returns latency of operation if initiated at NOW, places pointer to block user data in *UDATA, *P is untouched if cache blocks are not allocated (!CP->BALLOC), UDATA should be NULL if no user data is attached to blocks */unsigned int /* latency of access in cycles */cache_access(struct cache_t *cp, /* cache to access */ enum mem_cmd cmd, /* access type, Read or Write */ md_addr_t addr, /* address of access */ void *vp, /* ptr to buffer for input/output */ int nbytes, /* number of bytes to access */ tick_t now, /* time of access */ byte_t **udata, /* for return of user data ptr */ md_addr_t *repl_addr) /* for address of replaced block */{ byte_t *p = vp; md_addr_t tag = CACHE_TAG(cp, addr); md_addr_t set = CACHE_SET(cp, addr); md_addr_t bofs = CACHE_BLK(cp, addr); struct cache_blk_t *blk, *repl; int lat = 0; if(cp->isL2){ if(set > 512){ fprintf(stderr, "Houston we have a problem, set = %d/n", set); scanf("%d", &lat); } } int pointerLat = 0; /* default replacement address */ if (repl_addr) *repl_addr = 0; /* check alignments */ if ((nbytes & (nbytes-1)) != 0 || (addr & (nbytes-1)) != 0) fatal("cache: access error: bad size or alignment, addr 0x%08x", addr); /* access must fit in cache block */ /* FIXME: ((addr + (nbytes - 1)) > ((addr & ~cp->blk_mask) + (cp->bsize - 1))) */ if ((addr + nbytes) > ((addr & ~cp->blk_mask) + cp->bsize)) fatal("cache: access error: access spans block, addr 0x%08x", addr); /* permissions are checked on cache misses */ /* check for a fast hit: access to same block */ if (CACHE_TAGSET(cp, addr) == cp->last_tagset) { /* hit in the same block */ blk = cp->last_blk; goto cache_fast_hit; } /*FP-JS Loc will store the last line traversed through the list I want to keep set so I know where the head of the list is for replacement */ unsigned int loc = set; /*FP-BC Modified cache hit checker for new cache structure*/ if(cp->isL2) { /*FP-BC continue through each linked set with data*/ while(cp->sets[loc].usageCtr) { //if(cp->isL2) //fprintf(stderr, "ptr = %d, loc = %d", cp->sets[loc].fwdPtr, loc); if (cp->hsize) { /* higly-associativity cache, access through the per-set hash tables */ int hindex = CACHE_HASH(cp, tag); for (blk=cp->sets[loc].hash[hindex]; blk; blk=blk->hash_next) { if (blk->tag == tag && (blk->status & CACHE_BLK_VALID)){ //fprintf(stderr, "Hit!"); goto cache_hit; } } } else { /* low-associativity cache, linear search the way list */ for (blk=cp->sets[loc].way_head; blk; blk=blk->way_next) { if (blk->tag == tag && (blk->status & CACHE_BLK_VALID)){ // fprintf(stderr, "Hit!"); goto cache_hit; } } } /*FP-BC If the current set has a pointer to another set, follow it and check again for a hit*/ if(cp->sets[loc].fwdPtr){ loc = cp->sets[loc].fwdPtr;//.........这里部分代码省略.........
开发者ID:jsarabia1247,项目名称:waylevelcache,代码行数:101,
示例13: fetch_cache_blk/* ECE552 Assignment 4 - BEGIN CODE */void fetch_cache_blk (struct cache_t *cp, md_addr_t addr) { md_addr_t tag = CACHE_TAG(cp, addr); md_addr_t set = CACHE_SET(cp, addr); md_addr_t bofs = CACHE_BLK(cp, addr); int lat = 0; struct cache_blk_t *blk, *repl; //check if the block already exists in cache if (cp->hsize) { /* higly-associativity cache, access through the per-set hash tables */ int hindex = CACHE_HASH(cp, tag); for (blk=cp->sets[set].hash[hindex]; blk; blk=blk->hash_next){ if (blk->tag == tag && (blk->status & CACHE_BLK_VALID)) return; } } else { /* low-associativity cache, linear search the way list */ for (blk=cp->sets[set].way_head; blk; blk=blk->way_next) { if (blk->tag == tag && (blk->status & CACHE_BLK_VALID)) return; } } switch (cp->policy) { case LRU: case FIFO: repl = cp->sets[set].way_tail; update_way_list(&cp->sets[set], repl, Head); break; case Random: { int bindex = myrand() & (cp->assoc - 1); repl = CACHE_BINDEX(cp, cp->sets[set].blks, bindex); } break; default: panic("bogus replacement policy"); } /* remove this block from the hash bucket chain, if hash exists */ if (cp->hsize) unlink_htab_ent(cp, &cp->sets[set], repl); /* evicted cache_blk */ if (evicted_blks[set].size() < cp->assoc) { evicted_blks[set].push_front({true, repl->tag}); } else { evicted_blks[set].pop_back(); evicted_blks[set].push_front({true, repl->tag}); } /* write back replaced block data */ if (repl->status & CACHE_BLK_VALID) { cp->replacements++; if (repl->status & CACHE_BLK_DIRTY) { /* write back the cache block */ cp->writebacks++; lat += cp->blk_access_fn(Write, CACHE_MK_BADDR(cp, repl->tag, set), cp->bsize, repl, 0, 0); } } /* update block tags */ repl->tag = tag; repl->status = CACHE_BLK_VALID; /* dirty bit set on update */ repl->prefetched = 1; repl->prefetch_used = 0; /* read data block */ cp->prefetch_cnt += 1; lat += cp->blk_access_fn(Read, CACHE_BADDR(cp, addr), cp->bsize, repl, NULL, 0); /* update block status */ repl->ready = NULL; /* link this entry back into the hash table */ if (cp->hsize) link_htab_ent(cp, &cp->sets[set], repl);}
开发者ID:swlpark,项目名称:ece552,代码行数:84,
示例14: allocated/* access a cache, perform a CMD operation on cache CP at address ADDR, places NBYTES of data at *P, returns latency of operation if initiated at NOW, places pointer to block user data in *UDATA, *P is untouched if cache blocks are not allocated (!CP->BALLOC), UDATA should be NULL if no user data is attached to blocks */unsigned int /* latency of access in cycles */cache_access(struct cache_t *cp, /* cache to access */ enum mem_cmd cmd, /* access type, Read or Write */ md_addr_t addr, /* address of access */ void *vp, /* ptr to buffer for input/output */ int nbytes, /* number of bytes to access */ tick_t now, /* time of access */ byte_t **udata, /* for return of user data ptr */ md_addr_t *repl_addr) /* for address of replaced block */{ byte_t *p = vp; md_addr_t tag = CACHE_TAG(cp, addr); md_addr_t set = CACHE_SET(cp, addr); md_addr_t bofs = CACHE_BLK(cp, addr); struct cache_blk_t *blk, *repl; struct pdp_fifo_node *fnode, *tnode; int lat = 0; /* default replacement address */ if (repl_addr) *repl_addr = 0; /* check alignments */ if ((nbytes & (nbytes-1)) != 0 || (addr & (nbytes-1)) != 0) fatal("cache: access error: bad size or alignment, addr 0x%08x", addr); /* access must fit in cache block */ /* FIXME: ((addr + (nbytes - 1)) > ((addr & ~cp->blk_mask) + (cp->bsize - 1))) */ if ((addr + nbytes) > ((addr & ~cp->blk_mask) + cp->bsize)) fatal("cache: access error: access spans block, addr 0x%08x", addr); /* permissions are checked on cache misses */ /* PDP distance decrement on set access */ if(cp->policy == PDP){ for (blk=cp->sets[set].way_head; blk; blk=blk->way_next) { if (blk->rpd > 0) blk->rpd--; } /* PDP counter update */ cp->PDP_Nt++; if((cp->PDP_Nt % 50000) == 0) compute_pd(cp); } /* check for a fast hit: access to same block */ if (CACHE_TAGSET(cp, addr) == cp->last_tagset) { /* hit in the same block */ blk = cp->last_blk; goto cache_fast_hit; } if (cp->hsize) { /* higly-associativity cache, access through the per-set hash tables */ int hindex = CACHE_HASH(cp, tag); for (blk=cp->sets[set].hash[hindex]; blk; blk=blk->hash_next) { if (blk->tag == tag && (blk->status & CACHE_BLK_VALID)) goto cache_hit; } } else { /* low-associativity cache, linear search the way list */ for (blk=cp->sets[set].way_head; blk; blk=blk->way_next) { if (blk->tag == tag && (blk->status & CACHE_BLK_VALID)) goto cache_hit; } } /* cache block not found */ /* **MISS** */ cp->misses++; /* select the appropriate block to replace, and re-link this entry to the appropriate place in the way list */ switch (cp->policy) { case LRU: case FIFO: repl = cp->sets[set].way_tail; update_way_list(&cp->sets[set], repl, Head); break; case Random://.........这里部分代码省略.........
开发者ID:kuthulas,项目名称:PDP,代码行数:101,
示例15: allocated/* access a cache, perform a CMD operation on cache CP at address ADDR, places NBYTES of data at *P, returns latency of operation if initiated at NOW, places pointer to block user data in *UDATA, *P is untouched if cache blocks are not allocated (!CP->BALLOC), UDATA should be NULL if no user data is attached to blocks */unsigned int /* latency of access in cycles */cache_access(struct cache_t *cp, /* cache to access */ enum mem_cmd cmd, /* access type, Read or Write */ md_addr_t addr, /* address of access */ void *vp, /* ptr to buffer for input/output */ int nbytes, /* number of bytes to access */ tick_t now, /* time of access */ byte_t **udata, /* for return of user data ptr */ md_addr_t *repl_addr) /* for address of replaced block */{ byte_t *p = vp; md_addr_t tag = CACHE_TAG(cp, addr); md_addr_t set = CACHE_SET(cp, addr); md_addr_t bofs = CACHE_BLK(cp, addr); struct cache_blk_t *blk, *repl; int lat = 0; /* default replacement address */ if (repl_addr) *repl_addr = 0; /* check alignments */ if ((nbytes & (nbytes-1)) != 0 || (addr & (nbytes-1)) != 0) fatal("cache: access error: bad size or alignment, addr 0x%08x", addr); /* access must fit in cache block */ /* FIXME: ((addr + (nbytes - 1)) > ((addr & ~cp->blk_mask) + (cp->bsize - 1))) */ if ((addr + nbytes) > ((addr & ~cp->blk_mask) + cp->bsize)) fatal("cache: access error: access spans block, addr 0x%08x", addr); /* permissions are checked on cache misses */ /* check for a fast hit: access to same block */ if (CACHE_TAGSET(cp, addr) == cp->last_tagset) { /* hit in the same block */ blk = cp->last_blk; goto cache_fast_hit; } if (cp->hsize) { /* higly-associativity cache, access through the per-set hash tables */ int hindex = CACHE_HASH(cp, tag); for (blk=cp->sets[set].hash[hindex]; blk; blk=blk->hash_next) { if (blk->tag == tag && (blk->status & CACHE_BLK_VALID)) goto cache_hit; } } else { /* low-associativity cache, linear search the way list */ for (blk=cp->sets[set].way_head; blk; blk=blk->way_next) { if (blk->tag == tag && (blk->status & CACHE_BLK_VALID)) goto cache_hit; } } /* cache block not found */ /* **MISS** */ cp->misses++; /* select the appropriate block to replace, and re-link this entry to the appropriate place in the way list */ switch (cp->policy) { case LRU: case FIFO: repl = cp->sets[set].way_tail; update_way_list(&cp->sets[set], repl, Head); break; case DIP: repl = cp->sets[set].way_tail; enum list_loc_t where; /* insert location */ if ( cp->sets[set].DIP_set_type == LRU_set ) { where = Head; // update PSEL to bias BIP int max_PSEL = 1 << cp->width_PSEL - 1; if ( cp->PSEL < max_PSEL ) { cp->PSEL ++; } } else if ( cp->sets[set].DIP_set_type == BIP_set ) { if ( cp->BIPCTR == 0 ) { // use LRU policy, MRU insertion where = Head; }//.........这里部分代码省略.........
开发者ID:tharun-b,项目名称:sim-outorder_DIP-cache,代码行数:101,
示例16: allocated/* access a cache, perform a CMD operation on cache CP at address ADDR, places NBYTES of data at *P, returns latency of operation if initiated at NOW, places pointer to block user data in *UDATA, *P is untouched if cache blocks are not allocated (!CP->BALLOC), UDATA should be NULL if no user data is attached to blocks */unsigned int /* latency of access in cycles */cache_access(struct cache_t *cp, /* cache to access */ enum mem_cmd cmd, /* access type, Read or Write */ md_addr_t addr, /* address of access */ void *vp, /* ptr to buffer for input/output */ int nbytes, /* number of bytes to access */ tick_t now, /* time of access */ byte_t **udata, /* for return of user data ptr */ md_addr_t *repl_addr) /* for address of replaced block */{ byte_t *p = vp; md_addr_t tag = CACHE_TAG(cp, addr); md_addr_t set = CACHE_SET(cp, addr); md_addr_t bofs = CACHE_BLK(cp, addr); struct cache_blk_t *blk, *repl; int lat = 0; int victim; //DRRIP-Used in while loop to check if we found the victim block or not int RRPV_counter; /* default replacement address */ if (repl_addr) *repl_addr = 0; /* check alignments */ if ((nbytes & (nbytes-1)) != 0 || (addr & (nbytes-1)) != 0) fatal("cache: access error: bad size or alignment, addr 0x%08x", addr); /* access must fit in cache block */ /* FIXME: ((addr + (nbytes - 1)) > ((addr & ~cp->blk_mask) + (cp->bsize - 1))) */ if ((addr + nbytes) > ((addr & ~cp->blk_mask) + cp->bsize)) fatal("cache: access error: access spans block, addr 0x%08x", addr); /* permissions are checked on cache misses */ /* check for a fast hit: access to same block */ if (CACHE_TAGSET(cp, addr) == cp->last_tagset) { /* hit in the same block */ blk = cp->last_blk; //DRRIP-No need to make RRPV=0 here as the block is accessed previously, so in RRIP-HP it is already 0 goto cache_fast_hit; } if (cp->hsize) { /* higly-associativity cache, access through the per-set hash tables */ int hindex = CACHE_HASH(cp, tag); for (blk=cp->sets[set].hash[hindex]; blk; blk=blk->hash_next) { if (blk->tag == tag && (blk->status & CACHE_BLK_VALID)) goto cache_hit; } } else { /* low-associativity cache, linear search the way list */ for (blk=cp->sets[set].way_head; blk; blk=blk->way_next) { if (blk->tag == tag && (blk->status & CACHE_BLK_VALID)) { blk->RRPV=0; //DRRIP-Implementing Re-Reference, when we encounter hit then make RRPV=0 //printf("Its a hit and now RRPV=%d/n", blk->RRPV); goto cache_hit; } } } /* cache block not found */ /* **MISS***/ cp->misses++; /* select the appropriate block to replace, and re-link this entry to the appropriate place in the way list */ switch (cp->policy) { case LRU: case FIFO: repl = cp->sets[set].way_tail; update_way_list(&cp->sets[set], repl, Head); break; case DRRIP: RRPV_counter=1<<(cp->RRPV_bits); //printf("Max choices=%d/n",RRPV_counter); victim=0; while(victim==0) //DRRIP-We keep on looking till the time we don't find the victim block { //DRRIP-Traversing code copied from hit policy line 559 for (blk=cp->sets[set].way_head;blk;blk=blk->way_next) //DRRIP-Resolves the tie breaker automatically { //.........这里部分代码省略.........
开发者ID:shingaridavesh,项目名称:DRRIP_Replacement_Policy,代码行数:101,
示例17: cache_accessunsigned int /* latency of access in cycles */cache_access(struct cache_t *cp, /* cache to access */ enum mem_cmd cmd, /* access type, Read or Write */ md_addr_t addr, /* address of access */ void *vp, /* ptr to buffer for input/output */ int nbytes, /* number of bytes to access */ tick_t now, /* time of access */ byte_t **udata, /* for return of user data ptr */ md_addr_t *repl_addr) /* for address of replaced block */{ acheck++; //printf("%d /n",acheck); byte_t *p = vp; md_addr_t tag; if (pseudo_check==1) tag = CACHE_TAG_PSEUDOASSOC(cp, addr); else tag= CACHE_TAG(cp,addr); md_addr_t set = CACHE_SET(cp, addr); md_addr_t bofs = CACHE_BLK(cp, addr); md_addr_t set1=HASH_MASK(set); md_addr_t addr1=addr; addr1 ^=1<<(cp->set_shift); //set1=CACHE_SET(cp,addr1); struct cache_blk_t *blk, *repl; int lat = 0; // if (cp->sets[set].rehash_bit==1) //printf("yo"); //printf("%d",cp->sets[set].way_head->rehash_bit ); /* default replacement address */ if (repl_addr) *repl_addr = 0; /* check alignments */ if ((nbytes & (nbytes-1)) != 0 || (addr & (nbytes-1)) != 0) fatal("cache: access error: bad size or alignment, addr 0x%08x", addr); /* access must fit in cache block */ /* FIXME: ((addr + (nbytes - 1)) > ((addr & ~cp->blk_mask) + (cp->bsize - 1))) */ if ((addr + nbytes) > ((addr & ~cp->blk_mask) + cp->bsize)) fatal("cache: access error: access spans block, addr 0x%08x", addr); /* permissions are checked on cache misses */ /* check for a fast hit: access to same block */ if (CACHE_TAGSET(cp, addr) == cp->last_tagset) { /* hit in the same block */ //printf("same block hit"); blk = cp->last_blk; //cp->last_blk->rehash_bit=0; goto cache_fast_hit; } if (cp->hsize) { //printf("different block hit"); /* higly-associativity cache, access through the per-set hash tables */ int hindex = CACHE_HASH(cp, tag); for (blk=cp->sets[set].hash[hindex]; blk; blk=blk->hash_next) { if (blk->tag == tag && (blk->status & CACHE_BLK_VALID)) goto cache_hit; } } else { //printf("different block hit"); /* low-associativity cache, linear search the way list */ for (blk=cp->sets[set].way_head; blk; blk=blk->way_next) { if (blk->tag == tag && (blk->status & CACHE_BLK_VALID)) { goto cache_hit; } } } /* cache block not found */ /* **MISS** */ /* select the appropriate block to replace, and re-link this entry to the appropriate place in the way list */ switch (cp->policy) {//.........这里部分代码省略.........
开发者ID:saisujithreddym,项目名称:pseudocache,代码行数:101,
示例18: allocated/* access a cache, perform a CMD operation on cache CP at address ADDR, places NBYTES of data at *P, returns latency of operation if initiated at NOW, places pointer to block user data in *UDATA, *P is untouched if cache blocks are not allocated (!CP->BALLOC), UDATA should be NULL if no user data is attached to blocks */unsigned int /* latency of access in cycles */cache_access(struct cache_t *cp, /* cache to access */ enum mem_cmd cmd, /* access type, Read or Write */ md_addr_t addr, /* address of access */ void *vp, /* ptr to buffer for input/output */ int nbytes, /* number of bytes to access */ tick_t now, /* time of access */ byte_t **udata, /* for return of user data ptr */ md_addr_t *repl_addr) /* for address of replaced block */{ byte_t *p = vp; md_addr_t tag = CACHE_TAG(cp, addr); md_addr_t set = CACHE_SET(cp, addr); md_addr_t bofs = CACHE_BLK(cp, addr); struct cache_blk_t *blk, *repl; int lat = 0; int possible_real_miss = 0; int low_leak_penalty_flag =0; int temp; int decay_caused_miss = FALSE; /* TRUE if it's a decay caused miss */ if (b_in_dispatch) b_in_dispatch = TRUE; /* default replacement address */ if (repl_addr) *repl_addr = 0; /* check alignments */ if ((nbytes & (nbytes-1)) != 0 || (addr & (nbytes-1)) != 0) fatal("cache: access error: bad size or alignment, addr 0x%08x", addr); /* access must fit in cache block */ /* FIXME: ((addr + (nbytes - 1)) > ((addr & ~cp->blk_mask) + (cp->bsize - 1))) */ if ((addr + nbytes) > ((addr & ~cp->blk_mask) + cp->bsize)) fatal("cache: access error: access spans block, addr 0x%08x", addr); /* permissions are checked on cache misses */ /* check for a fast hit: access to same block */ if (CACHE_TAGSET(cp, addr) == cp->last_tagset) { /* hit in the same block */ blk = cp->last_blk; goto cache_fast_hit; } if (cp->hsize) { /* higly-associativity cache, access through the per-set hash tables */ int hindex = CACHE_HASH(cp, tag); for (blk=cp->sets[set].hash[hindex]; blk; blk=blk->hash_next) {if ((blk->status & CACHE_BLK_DECAYED) && cache_leak_is_ctrlled())low_leak_penalty_flag = 1; if (blk->tag == tag) { /* Leakage: induced misses only in state losing ctrl techniques */ if ((blk->status & CACHE_BLK_DECAYED) && cache_leak_ctrl_is_state_losing()) { decay_caused_miss = TRUE; induced_decay_misses++; break; } else if ((blk->status & CACHE_BLK_DECAYED) && (blk->status & CACHE_BLK_VALID)/ && cache_leak_is_ctrlled()) { /* * Leakage: update stats * in state preserving ctrl, mode switch to high happens * on a hit to a decayed block too */ mode_switch_l2h_incr (); /* * leakage throughout the cache assumed uniform. Also to model * the effect of settling time of leakage current, the lines * are assumed to be turned off after 'switch_cycles_l2h/2'. * The assumption is that settling is a linear function of time. */ low_leak_ratio_dcr(1.0/(cp->nsets * cp->assoc), get_switch_cycles_l2h()/2); goto cache_hit; } else if( blk->status & CACHE_BLK_VALID) goto cache_hit; } else if (blk->status & CACHE_BLK_DECAYED) possible_real_miss = 1;//.........这里部分代码省略.........
开发者ID:chuzhufei,项目名称:ViewAcdemicLinuxCode,代码行数:101,
示例19: allocated/* access a cache, perform a CMD operation on cache CP at address ADDR, places NBYTES of data at *P, returns latency of operation if initiated at NOW, places pointer to block user data in *UDATA, *P is untouched if cache blocks are not allocated (!CP->BALLOC), UDATA should be NULL if no user data is attached to blocks */unsigned int /* latency of access in cycles */cache_access(struct cache_t *cp, /* cache to access */ enum mem_cmd cmd, /* access type, Read or Write */ md_addr_t addr, /* address of access */ void *vp, /* ptr to buffer for input/output */ int nbytes, /* number of bytes to access */ tick_t now, /* time of access */ byte_t **udata, /* for return of user data ptr */ md_addr_t *repl_addr, /* for address of replaced block */ tick_t *mem_ready/* ptr to mem_ready of ruu_station */ ) { byte_t *p = vp; md_addr_t tag = CACHE_TAG(cp, addr); md_addr_t set = CACHE_SET(cp, addr); md_addr_t bofs = CACHE_BLK(cp, addr); md_addr_t blk_addr = CACHE_TAGSET(cp, addr); struct cache_blk_t *blk, *repl; int lat = 0; int i,mshr_hit = -1; /* default replacement address */ if (repl_addr) *repl_addr = 0; /* check alignments */ if ((nbytes & (nbytes-1)) != 0 || (addr & (nbytes-1)) != 0) fatal("cache: access error: bad size or alignment, addr 0x%08x", addr); /* access must fit in cache block */ /* FIXME: ((addr + (nbytes - 1)) > ((addr & ~cp->blk_mask) + (cp->bsize - 1))) */ if ((addr + nbytes) > ((addr & ~cp->blk_mask) + cp->bsize)) fatal("cache: access error: access spans block, addr 0x%08x", addr); /* permissions are checked on cache misses */ /* check for a fast hit: access to same block */ if (CACHE_TAGSET(cp, addr) == cp->last_tagset) { /* hit in the same block */ blk = cp->last_blk; goto cache_fast_hit; } if (cp->hsize) { /* higly-associativity cache, access through the per-set hash tables */ int hindex = CACHE_HASH(cp, tag); for (blk=cp->sets[set].hash[hindex]; blk; blk=blk->hash_next) { if (blk->tag == tag && (blk->status & CACHE_BLK_VALID)) goto cache_hit; } } else { /* low-associativity cache, linear search the way list */ for (blk=cp->sets[set].way_head; blk; blk=blk->way_next) { if (blk->tag == tag && (blk->status & CACHE_BLK_VALID)) goto cache_hit; } } /* cache block not found */ /* **MISS** */ cp->misses++;/// printf("mshr enabled=%d/n",mshr_enabled);//printf("cache_access miss num_mshr=%d/n",cp->num_mshr);//printf("cache_access with MSHR/n");// search mshr first to see if already exists, if so wait// if not in mshr, insert if (cp->num_mshr>0) { cp->mshr_accesses++; for (i = 0; i < cp->num_mshr; i++) { if (cp->mshr[i].ready <= now) { /* we have an empty mshr, so we can proceed with the miss */ mshr_hit = i; cp->mshr_misses++; //printf("MSHR:miss=%d",cp->mshr_misses); break; } } if (mshr_hit == -1) { /* no empty mshr, so stall! */ cp->mshr_full++; if(mem_ready!=NULL)*mem_ready = cp->ready; //if (cp->ready <= now) panic("Should have had empty mshr!");//.........这里部分代码省略.........
开发者ID:jnaneshm,项目名称:614_hw4,代码行数:101,
示例20: allocated/* access a cache, perform a CMD operation on cache CP at address ADDR, places NBYTES of data at *P, returns latency of operation if initiated at NOW, places pointer to block user data in *UDATA, *P is untouched if cache blocks are not allocated (!CP->BALLOC), UDATA should be NULL if no user data is attached to blocks */unsigned int /* latency of access in cycles */cache_access(struct cache_t *cp, /* cache to access */ enum mem_cmd cmd, /* access type, Read or Write */ md_addr_t addr, /* address of access */ void *vp, /* ptr to buffer for input/output */ int nbytes, /* number of bytes to access */ tick_t now, /* time of access */ byte_t **udata, /* for return of user data ptr */ md_addr_t *repl_addr) /* for address of replaced block */{ byte_t *p = vp; //tag, set is the address want to access md_addr_t tag = CACHE_TAG(cp, addr); md_addr_t set = CACHE_SET(cp, addr); md_addr_t bofs = CACHE_BLK(cp, addr); struct cache_blk_t *blk, *repl; int lat = 0; //printf("mother fucker/n"); //sprintf(buf, "%s.inv_rate", name); /* default replacement address */ if (repl_addr) *repl_addr = 0; /* check alignments */ if ((nbytes & (nbytes-1)) != 0 || (addr & (nbytes-1)) != 0) fatal("cache: access error: bad size or alignment, addr 0x%08x", addr); /* access must fit in cache block */ /* FIXME: ((addr + (nbytes - 1)) > ((addr & ~cp->blk_mask) + (cp->bsize - 1))) */ if ((addr + nbytes) > ((addr & ~cp->blk_mask) + cp->bsize)) fatal("cache: access error: access spans block, addr 0x%08x", addr); /* permissions are checked on cache misses */ /* check for a fast hit: access to same block */ if (CACHE_TAGSET(cp, addr) == cp->last_tagset) { /* hit in the same block */ blk = cp->last_blk; goto cache_fast_hit; } if (cp->hsize) { /* higly-associativity cache, access through the per-set hash tables */ int hindex = CACHE_HASH(cp, tag); for (blk=cp->sets[set].hash[hindex]; blk; blk=blk->hash_next) { if (blk->tag == tag && (blk->status & CACHE_BLK_VALID)) goto cache_hit; } } else { /* low-associativity cache, linear search the way list */ for (blk=cp->sets[set].way_head; blk; blk=blk->way_next) { if (blk->tag == tag && (blk->status & CACHE_BLK_VALID)) goto cache_hit; } } /* cache block not found */ /* **MISS** */ cp->misses++; /* select the appropriate block to replace, and re-link this entry to the appropriate place in the way list */ switch (cp->policy) { case LRU: case FIFO: repl = cp->sets[set].way_tail; update_way_list(&cp->sets[set], repl, Head); //the tail of way list is the victim //move the victim to the head and wait new data block replacing it //each time when cache misses, new fetched cache block will be placed in MRU position //cp->sets[set].way_tail still points to victim cache, but the position of victim cache is moved to the head of way list of this set break; case Random: { int bindex = myrand() & (cp->assoc - 1); repl = CACHE_BINDEX(cp, cp->sets[set].blks, bindex); }//.........这里部分代码省略.........
开发者ID:Monster-Zou,项目名称:simplesim-3.0,代码行数:101,
注:本文中的CACHE_HASH函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 C++ CADR函数代码示例 C++ CA2W函数代码示例 |