这篇教程C++ HEAVY_STAT函数代码示例写得很实用,希望能帮到您。
本文整理汇总了C++中HEAVY_STAT函数的典型用法代码示例。如果您正苦于以下问题:C++ HEAVY_STAT函数的具体用法?C++ HEAVY_STAT怎么用?C++ HEAVY_STAT使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。 在下文中一共展示了HEAVY_STAT函数的26个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。 示例1: mono_sgen_nursery_allocvoid*mono_sgen_nursery_alloc (size_t size){ Fragment *frag; DEBUG (4, fprintf (gc_debug_file, "Searching nursery for size: %zd/n", size)); size = SGEN_ALIGN_UP (size); HEAVY_STAT (InterlockedIncrement (&stat_nursery_alloc_requests));#ifdef NALLOC_DEBUG InterlockedIncrement (&alloc_count);#endifrestart: for (frag = unmask (nursery_fragments); frag; frag = unmask (frag->next)) { HEAVY_STAT (InterlockedIncrement (&stat_alloc_iterations)); if (size <= (frag->fragment_end - frag->fragment_next)) { void *p = alloc_from_fragment (frag, size); if (!p) { HEAVY_STAT (InterlockedIncrement (&stat_alloc_retries)); goto restart; }#ifdef NALLOC_DEBUG add_alloc_record (p, size, FIXED_ALLOC);#endif return p; } } return NULL;}
开发者ID:Sciumo,项目名称:mono,代码行数:31,
示例2: major_alloc_degraded/* * size is already rounded up and we hold the GC lock. */static void*major_alloc_degraded (MonoVTable *vtable, size_t size){ GCMemSection *section; void **p = NULL; g_assert (size <= SGEN_MAX_SMALL_OBJ_SIZE); HEAVY_STAT (++stat_objects_alloced_degraded); HEAVY_STAT (stat_bytes_alloced_degraded += size); for (section = section_list; section; section = section->block.next) { if ((section->end_data - section->next_data) >= size) { p = (void**)section->next_data; break; } } if (!p) { section = alloc_major_section (); section->is_to_space = FALSE; /* FIXME: handle OOM */ p = (void**)section->next_data; sgen_register_major_sections_alloced (1); } section->next_data += size; DEBUG (3, fprintf (gc_debug_file, "Allocated (degraded) object %p, vtable: %p (%s), size: %zd in section %p/n", p, vtable, vtable->klass->name, size, section)); *p = vtable; return p;}
开发者ID:av8,项目名称:mono,代码行数:29,
示例3: sgen_fragment_allocator_par_allocvoid*sgen_fragment_allocator_par_alloc (SgenFragmentAllocator *allocator, size_t size){ SgenFragment *frag;#ifdef NALLOC_DEBUG InterlockedIncrement (&alloc_count);#endifrestart: for (frag = (SgenFragment *)unmask (allocator->alloc_head); unmask (frag); frag = (SgenFragment *)unmask (frag->next)) { HEAVY_STAT (++stat_alloc_iterations); if (size <= (size_t)(frag->fragment_end - frag->fragment_next)) { void *p = par_alloc_from_fragment (allocator, frag, size); if (!p) { HEAVY_STAT (++stat_alloc_retries); goto restart; }#ifdef NALLOC_DEBUG add_alloc_record (p, size, FIXED_ALLOC);#endif return p; } } return NULL;}
开发者ID:Profit0004,项目名称:mono,代码行数:27,
示例4: major_copy_or_mark_objectstatic voidmajor_copy_or_mark_object (void **ptr, SgenGrayQueue *queue){ void *obj = *ptr; MSBlockInfo *block; HEAVY_STAT (++stat_copy_object_called_major); DEBUG (9, g_assert (obj)); DEBUG (9, g_assert (current_collection_generation == GENERATION_OLD)); if (ptr_in_nursery (obj)) { int word, bit; char *forwarded; if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) { *ptr = forwarded; return; } if (SGEN_OBJECT_IS_PINNED (obj)) return; HEAVY_STAT (++stat_objects_copied_major); obj = copy_object_no_checks (obj, queue); *ptr = obj; /* * FIXME: See comment for copy_object_no_checks(). If * we have that, we can let the allocation function * give us the block info, too, and we won't have to * re-fetch it. */ block = MS_BLOCK_FOR_OBJ (obj); MS_CALC_MARK_BIT (word, bit, obj); DEBUG (9, g_assert (!MS_MARK_BIT (block, word, bit))); MS_SET_MARK_BIT (block, word, bit); } else {#ifdef FIXED_HEAP if (MS_PTR_IN_SMALL_MAJOR_HEAP (obj))#else mword objsize; objsize = SGEN_ALIGN_UP (mono_sgen_safe_object_get_size ((MonoObject*)obj)); if (objsize <= SGEN_MAX_SMALL_OBJ_SIZE)#endif { block = MS_BLOCK_FOR_OBJ (obj); MS_MARK_OBJECT_AND_ENQUEUE (obj, block, queue); } else { if (SGEN_OBJECT_IS_PINNED (obj)) return; binary_protocol_pin (obj, (gpointer)SGEN_LOAD_VTABLE (obj), mono_sgen_safe_object_get_size ((MonoObject*)obj)); SGEN_PIN_OBJECT (obj); /* FIXME: only enqueue if object has references */ GRAY_OBJECT_ENQUEUE (queue, obj); } }}
开发者ID:massimiliano-mantione,项目名称:mono,代码行数:60,
示例5: global_remset_location_was_not_added/* * Tries to check if a given remset location was already added to the global remset. * It can * * A 2 entry, LRU cache of recently saw location remsets. * * It's hand-coded instead of done using loops to reduce the number of memory references on cache hit. * * Returns TRUE is the element was added.. */static gbooleanglobal_remset_location_was_not_added (gpointer ptr){ gpointer first = global_remset_cache [0], second; if (first == ptr) { HEAVY_STAT (++stat_global_remsets_discarded); return FALSE; } second = global_remset_cache [1]; if (second == ptr) { /*Move the second to the front*/ global_remset_cache [0] = second; global_remset_cache [1] = first; HEAVY_STAT (++stat_global_remsets_discarded); return FALSE; } global_remset_cache [0] = second; global_remset_cache [1] = ptr; return TRUE;}
开发者ID:Sectoid,项目名称:mono,代码行数:35,
示例6: mono_sgen_ssb_wbarrier_generic_nostorestatic voidmono_sgen_ssb_wbarrier_generic_nostore (gpointer ptr){ gpointer *buffer; int index; TLAB_ACCESS_INIT; LOCK_GC; buffer = STORE_REMSET_BUFFER; index = STORE_REMSET_BUFFER_INDEX; /* This simple optimization eliminates a sizable portion of entries. Comparing it to the last but one entry as well doesn't eliminate significantly more entries. */ if (buffer [index] == ptr) { UNLOCK_GC; return; } HEAVY_STAT (++stat_wbarrier_generic_store_remset); ++index; if (index >= STORE_REMSET_BUFFER_SIZE) { evacuate_remset_buffer (); index = STORE_REMSET_BUFFER_INDEX; g_assert (index == 0); ++index; } buffer [index] = ptr; STORE_REMSET_BUFFER_INDEX = index; UNLOCK_GC;}
开发者ID:Sectoid,项目名称:mono,代码行数:33,
示例7: sgen_gray_object_enqueuevoidsgen_gray_object_enqueue (SgenGrayQueue *queue, GCObject *obj, SgenDescriptor desc){ GrayQueueEntry entry = SGEN_GRAY_QUEUE_ENTRY (obj, desc); HEAVY_STAT (stat_gray_queue_enqueue_slow_path ++); SGEN_ASSERT (9, obj, "enqueueing a null object"); //sgen_check_objref (obj);#ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE if (queue->enqueue_check_func) queue->enqueue_check_func (obj);#endif if (G_UNLIKELY (!queue->first || queue->cursor == GRAY_LAST_CURSOR_POSITION (queue->first))) { if (queue->first) { /* Set the current section size back to default, might have been changed by sgen_gray_object_dequeue_section */ queue->first->size = SGEN_GRAY_QUEUE_SECTION_SIZE; } sgen_gray_object_alloc_queue_section (queue); } STATE_ASSERT (queue->first, GRAY_QUEUE_SECTION_STATE_ENQUEUED); SGEN_ASSERT (9, queue->cursor <= GRAY_LAST_CURSOR_POSITION (queue->first), "gray queue %p overflow, first %p, cursor %p", queue, queue->first, queue->cursor); *++queue->cursor = entry;#ifdef SGEN_HEAVY_BINARY_PROTOCOL binary_protocol_gray_enqueue (queue, queue->cursor, obj);#endif}
开发者ID:Lixire,项目名称:mono,代码行数:31,
示例8: sgen_gray_object_alloc_queue_sectionvoidsgen_gray_object_alloc_queue_section (SgenGrayQueue *queue){ GrayQueueSection *section; HEAVY_STAT (stat_gray_queue_section_alloc ++); if (queue->alloc_prepare_func) queue->alloc_prepare_func (queue); if (queue->free_list) { /* Use the previously allocated queue sections if possible */ section = queue->free_list; queue->free_list = section->next; STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FREE_LIST, GRAY_QUEUE_SECTION_STATE_FLOATING); } else { /* Allocate a new section */ section = (GrayQueueSection *)sgen_alloc_internal (INTERNAL_MEM_GRAY_QUEUE); STATE_SET (section, GRAY_QUEUE_SECTION_STATE_FLOATING); } section->size = SGEN_GRAY_QUEUE_SECTION_SIZE; STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_ENQUEUED); /* Link it with the others */ section->next = queue->first; queue->first = section; queue->cursor = section->entries - 1;}
开发者ID:Lixire,项目名称:mono,代码行数:30,
示例9: sgen_gray_object_dequeueGrayQueueEntrysgen_gray_object_dequeue (SgenGrayQueue *queue){ GrayQueueEntry entry; HEAVY_STAT (stat_gray_queue_dequeue_slow_path ++); if (sgen_gray_object_queue_is_empty (queue)) { entry.obj = NULL; return entry; } STATE_ASSERT (queue->first, GRAY_QUEUE_SECTION_STATE_ENQUEUED); SGEN_ASSERT (9, queue->cursor >= GRAY_FIRST_CURSOR_POSITION (queue->first), "gray queue %p underflow", queue); entry = *queue->cursor--;#ifdef SGEN_HEAVY_BINARY_PROTOCOL binary_protocol_gray_dequeue (queue, queue->cursor + 1, entry.obj);#endif if (G_UNLIKELY (queue->cursor < GRAY_FIRST_CURSOR_POSITION (queue->first))) { GrayQueueSection *section = queue->first; queue->first = section->next; section->next = queue->free_list; STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_ENQUEUED, GRAY_QUEUE_SECTION_STATE_FREE_LIST); queue->free_list = section; queue->cursor = queue->first ? queue->first->entries + queue->first->size - 1 : NULL; } return entry;}
开发者ID:Lixire,项目名称:mono,代码行数:34,
示例10: sgen_gray_object_enqueuevoidsgen_gray_object_enqueue (SgenGrayQueue *queue, GCObject *obj, SgenDescriptor desc, gboolean is_parallel){ GrayQueueEntry entry = SGEN_GRAY_QUEUE_ENTRY (obj, desc); HEAVY_STAT (stat_gray_queue_enqueue_slow_path ++); SGEN_ASSERT (9, obj, "enqueueing a null object"); //sgen_check_objref (obj);#ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE if (queue->enqueue_check_func) queue->enqueue_check_func (obj);#endif if (G_UNLIKELY (!queue->first || queue->cursor == GRAY_LAST_CURSOR_POSITION (queue->first))) { if (queue->first) { /* * We don't actively update the section size with each push/pop. For the first * section we determine the size from the cursor position. For the reset of the * sections we need to have the size set. */ queue->first->size = SGEN_GRAY_QUEUE_SECTION_SIZE; } sgen_gray_object_alloc_queue_section (queue, is_parallel); } STATE_ASSERT (queue->first, GRAY_QUEUE_SECTION_STATE_ENQUEUED); SGEN_ASSERT (9, queue->cursor <= GRAY_LAST_CURSOR_POSITION (queue->first), "gray queue %p overflow, first %p, cursor %p", queue, queue->first, queue->cursor); *++queue->cursor = entry;#ifdef SGEN_HEAVY_BINARY_PROTOCOL binary_protocol_gray_enqueue (queue, queue->cursor, obj);#endif}
开发者ID:medo64,项目名称:mono,代码行数:35,
示例11: add_nursery_frag/* * We found a fragment of free memory in the nursery: memzero it and if * it is big enough, add it to the list of fragments that can be used for * allocation. */static voidadd_nursery_frag (size_t frag_size, char* frag_start, char* frag_end){ DEBUG (4, fprintf (gc_debug_file, "Found empty fragment: %p-%p, size: %zd/n", frag_start, frag_end, frag_size)); binary_protocol_empty (frag_start, frag_size); /* Not worth dealing with smaller fragments: need to tune */ if (frag_size >= SGEN_MAX_NURSERY_WASTE) { /* memsetting just the first chunk start is bound to provide better cache locality */ if (mono_sgen_get_nursery_clear_policy () == CLEAR_AT_GC) memset (frag_start, 0, frag_size);#ifdef NALLOC_DEBUG /* XXX convert this into a flight record entry printf ("/tfragment [%p %p] size %zd/n", frag_start, frag_end, frag_size); */#endif add_fragment (frag_start, frag_end); fragment_total += frag_size; } else { /* Clear unused fragments, pinning depends on this */ /*TODO place an int[] here instead of the memset if size justify it*/ memset (frag_start, 0, frag_size); HEAVY_STAT (InterlockedExchangeAdd (&stat_wasted_bytes_small_areas, frag_size)); }}
开发者ID:Sciumo,项目名称:mono,代码行数:30,
示例12: add_nursery_frag/* * We found a fragment of free memory in the nursery: memzero it and if * it is big enough, add it to the list of fragments that can be used for * allocation. */static voidadd_nursery_frag (SgenFragmentAllocator *allocator, size_t frag_size, char* frag_start, char* frag_end){ SGEN_LOG (4, "Found empty fragment: %p-%p, size: %zd", frag_start, frag_end, frag_size); binary_protocol_empty (frag_start, frag_size); /* Not worth dealing with smaller fragments: need to tune */ if (frag_size >= SGEN_MAX_NURSERY_WASTE) { /* memsetting just the first chunk start is bound to provide better cache locality */ if (sgen_get_nursery_clear_policy () == CLEAR_AT_GC) memset (frag_start, 0, frag_size); else if (sgen_get_nursery_clear_policy () == CLEAR_AT_TLAB_CREATION_DEBUG) memset (frag_start, 0xff, frag_size);#ifdef NALLOC_DEBUG /* XXX convert this into a flight record entry printf ("/tfragment [%p %p] size %zd/n", frag_start, frag_end, frag_size); */#endif sgen_fragment_allocator_add (allocator, frag_start, frag_end); fragment_total += frag_size; } else { /* Clear unused fragments, pinning depends on this */ sgen_clear_range (frag_start, frag_end); HEAVY_STAT (stat_wasted_bytes_small_areas += frag_size); }}
开发者ID:Profit0004,项目名称:mono,代码行数:31,
示例13: sgen_fragment_allocator_serial_allocvoid*sgen_fragment_allocator_serial_alloc (SgenFragmentAllocator *allocator, size_t size){ SgenFragment *frag; SgenFragment **previous;#ifdef NALLOC_DEBUG InterlockedIncrement (&alloc_count);#endif previous = &allocator->alloc_head; for (frag = *previous; frag; frag = *previous) { char *p = (char *)serial_alloc_from_fragment (previous, frag, size); HEAVY_STAT (++stat_alloc_iterations); if (p) {#ifdef NALLOC_DEBUG add_alloc_record (p, size, FIXED_ALLOC);#endif return p; } previous = &frag->next; } return NULL;}
开发者ID:Profit0004,项目名称:mono,代码行数:26,
示例14: sgen_gray_object_free_queue_sectionvoidsgen_gray_object_free_queue_section (GrayQueueSection *section){ HEAVY_STAT (stat_gray_queue_section_free ++); STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_FREED); sgen_free_internal (section, INTERNAL_MEM_GRAY_QUEUE);}
开发者ID:medo64,项目名称:mono,代码行数:8,
示例15: sgen_nursery_alloc_rangevoid*sgen_nursery_alloc_range (size_t desired_size, size_t minimum_size, size_t *out_alloc_size){ SGEN_LOG (4, "Searching for byte range desired size: %zd minimum size %zd", desired_size, minimum_size); HEAVY_STAT (++stat_nursery_alloc_range_requests); return sgen_fragment_allocator_par_range_alloc (&mutator_allocator, desired_size, minimum_size, out_alloc_size);}
开发者ID:Profit0004,项目名称:mono,代码行数:9,
示例16: sgen_gray_object_dequeueGrayQueueEntrysgen_gray_object_dequeue (SgenGrayQueue *queue, gboolean is_parallel){ GrayQueueEntry entry; HEAVY_STAT (stat_gray_queue_dequeue_slow_path ++); if (sgen_gray_object_queue_is_empty (queue)) { entry.obj = NULL; return entry; } STATE_ASSERT (queue->first, GRAY_QUEUE_SECTION_STATE_ENQUEUED); SGEN_ASSERT (9, queue->cursor >= GRAY_FIRST_CURSOR_POSITION (queue->first), "gray queue %p underflow", queue); entry = *queue->cursor--;#ifdef SGEN_HEAVY_BINARY_PROTOCOL binary_protocol_gray_dequeue (queue, queue->cursor + 1, entry.obj);#endif if (G_UNLIKELY (queue->cursor < GRAY_FIRST_CURSOR_POSITION (queue->first))) { GrayQueueSection *section; gint32 old_num_sections = 0; if (is_parallel) old_num_sections = mono_atomic_dec_i32 (&queue->num_sections); else queue->num_sections--; if (is_parallel && old_num_sections <= 0) { mono_os_mutex_lock (&queue->steal_mutex); } section = queue->first; queue->first = section->next; if (queue->first) { queue->first->prev = NULL; } else { queue->last = NULL; SGEN_ASSERT (0, !old_num_sections, "Why do we have an inconsistent number of sections ?"); } section->next = queue->free_list; STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_ENQUEUED, GRAY_QUEUE_SECTION_STATE_FREE_LIST); queue->free_list = section; queue->cursor = queue->first ? queue->first->entries + queue->first->size - 1 : NULL; if (is_parallel && old_num_sections <= 0) { mono_os_mutex_unlock (&queue->steal_mutex); } } return entry;}
开发者ID:medo64,项目名称:mono,代码行数:56,
示例17: sgen_nursery_allocvoid*sgen_nursery_alloc (size_t size){ SGEN_ASSERT (1, size >= sizeof (MonoObject) && size <= SGEN_MAX_SMALL_OBJ_SIZE, "Invalid nursery object size"); SGEN_LOG (4, "Searching nursery for size: %zd", size); size = SGEN_ALIGN_UP (size); HEAVY_STAT (InterlockedIncrement (&stat_nursery_alloc_requests)); return sgen_fragment_allocator_par_alloc (&mutator_allocator, size);}
开发者ID:Adamcbrz,项目名称:mono,代码行数:12,
示例18: sgen_nursery_allocvoid*sgen_nursery_alloc (size_t size){ SGEN_ASSERT (1, size >= (SGEN_CLIENT_MINIMUM_OBJECT_SIZE + CANARY_SIZE) && size <= (SGEN_MAX_SMALL_OBJ_SIZE + CANARY_SIZE), "Invalid nursery object size"); SGEN_LOG (4, "Searching nursery for size: %zd", size); size = SGEN_ALIGN_UP (size); HEAVY_STAT (++stat_nursery_alloc_requests); return sgen_fragment_allocator_par_alloc (&mutator_allocator, size);}
开发者ID:Profit0004,项目名称:mono,代码行数:12,
示例19: sgen_fragment_allocator_serial_range_allocvoid*sgen_fragment_allocator_serial_range_alloc (SgenFragmentAllocator *allocator, size_t desired_size, size_t minimum_size, size_t *out_alloc_size){ SgenFragment *frag, **previous, *min_frag = NULL, **prev_min_frag = NULL; size_t current_minimum = minimum_size;#ifdef NALLOC_DEBUG InterlockedIncrement (&alloc_count);#endif previous = &allocator->alloc_head; for (frag = *previous; frag; frag = *previous) { size_t frag_size = frag->fragment_end - frag->fragment_next; HEAVY_STAT (++stat_alloc_range_iterations); if (desired_size <= frag_size) { void *p; *out_alloc_size = desired_size; p = serial_alloc_from_fragment (previous, frag, desired_size);#ifdef NALLOC_DEBUG add_alloc_record (p, desired_size, RANGE_ALLOC);#endif return p; } if (current_minimum <= frag_size) { min_frag = frag; prev_min_frag = previous; current_minimum = frag_size; } previous = &frag->next; } if (min_frag) { void *p; size_t frag_size = min_frag->fragment_end - min_frag->fragment_next; *out_alloc_size = frag_size; p = serial_alloc_from_fragment (prev_min_frag, min_frag, frag_size);#ifdef NALLOC_DEBUG add_alloc_record (p, frag_size, RANGE_ALLOC);#endif return p; } return NULL;}
开发者ID:Profit0004,项目名称:mono,代码行数:50,
示例20: sgen_gray_object_alloc_queue_sectionvoidsgen_gray_object_alloc_queue_section (SgenGrayQueue *queue, gboolean is_parallel){ GrayQueueSection *section; if (queue->free_list) { /* Use the previously allocated queue sections if possible */ section = queue->free_list; queue->free_list = section->next; STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FREE_LIST, GRAY_QUEUE_SECTION_STATE_FLOATING); } else { HEAVY_STAT (stat_gray_queue_section_alloc ++); /* Allocate a new section */ section = (GrayQueueSection *)sgen_alloc_internal (INTERNAL_MEM_GRAY_QUEUE); STATE_SET (section, GRAY_QUEUE_SECTION_STATE_FLOATING); } /* Section is empty */ section->size = 0; STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_ENQUEUED); /* Link it with the others */ section->next = queue->first; section->prev = NULL; if (queue->first) queue->first->prev = section; else queue->last = section; queue->first = section; queue->cursor = section->entries - 1; if (is_parallel) { mono_memory_write_barrier (); /* * FIXME * we could probably optimize the code to only rely on the write barrier * for synchronization with the stealer thread. Additionally we could also * do a write barrier once every other gray queue change, and request * to have a minimum of sections before stealing, to keep consistency. */ mono_atomic_inc_i32 (&queue->num_sections); } else { queue->num_sections++; }}
开发者ID:medo64,项目名称:mono,代码行数:47,
示例21: mono_sgen_ssb_begin_scan_remsetsstatic voidmono_sgen_ssb_begin_scan_remsets (void *start_nursery, void *end_nursery, SgenGrayQueue *queue){ RememberedSet *remset; mword *p, *next_p, *store_pos; /* the global one */ for (remset = global_remset; remset; remset = remset->next) { DEBUG (4, fprintf (gc_debug_file, "Scanning global remset range: %p-%p, size: %td/n", remset->data, remset->store_next, remset->store_next - remset->data)); store_pos = remset->data; for (p = remset->data; p < remset->store_next; p = next_p) { void **ptr = (void**)p [0]; /*Ignore previously processed remset.*/ if (!global_remset_location_was_not_added (ptr)) { next_p = p + 1; continue; } next_p = handle_remset (p, start_nursery, end_nursery, TRUE, queue); /* * Clear global remsets of locations which no longer point to the * nursery. Otherwise, they could grow indefinitely between major * collections. * * Since all global remsets are location remsets, we don't need to unmask the pointer. */ if (mono_sgen_ptr_in_nursery (*ptr)) { *store_pos ++ = p [0]; HEAVY_STAT (++stat_global_remsets_readded); } } /* Truncate the remset */ remset->store_next = store_pos; }}
开发者ID:Sectoid,项目名称:mono,代码行数:38,
示例22: par_alloc_from_fragmentstatic void*par_alloc_from_fragment (SgenFragmentAllocator *allocator, SgenFragment *frag, size_t size){ char *p = frag->fragment_next; char *end = p + size; if (end > frag->fragment_end) return NULL; /* p = frag->fragment_next must happen before */ mono_memory_barrier (); if (InterlockedCompareExchangePointer ((volatile gpointer*)&frag->fragment_next, end, p) != p) return NULL; if (frag->fragment_end - end < SGEN_MAX_NURSERY_WASTE) { SgenFragment *next, **prev_ptr; /* * Before we clean the remaining nursery, we must claim the remaining space * as it could end up been used by the range allocator since it can end up * allocating from this dying fragment as it doesn't respect SGEN_MAX_NURSERY_WASTE * when doing second chance allocation. */ if ((sgen_get_nursery_clear_policy () == CLEAR_AT_TLAB_CREATION || sgen_get_nursery_clear_policy () == CLEAR_AT_TLAB_CREATION_DEBUG) && claim_remaining_size (frag, end)) { sgen_clear_range (end, frag->fragment_end); HEAVY_STAT (stat_wasted_bytes_trailer += frag->fragment_end - end);#ifdef NALLOC_DEBUG add_alloc_record (end, frag->fragment_end - end, BLOCK_ZEROING);#endif } prev_ptr = find_previous_pointer_fragment (allocator, frag); /*Use Michaels linked list remove*/ /*prev_ptr will be null if the fragment was removed concurrently */ while (prev_ptr) { next = frag->next; /*already deleted*/ if (!get_mark (next)) { /*frag->next read must happen before the first CAS*/ mono_memory_write_barrier (); /*Fail if the next node is removed concurrently and its CAS wins */ if (InterlockedCompareExchangePointer ((volatile gpointer*)&frag->next, mask (next, 1), next) != next) { continue; } } /* The second CAS must happen after the first CAS or frag->next. */ mono_memory_write_barrier (); /* Fail if the previous node was deleted and its CAS wins */ if (InterlockedCompareExchangePointer ((volatile gpointer*)prev_ptr, unmask (next), frag) != frag) { prev_ptr = find_previous_pointer_fragment (allocator, frag); continue; } break; } } return p;}
开发者ID:Profit0004,项目名称:mono,代码行数:65,
示例23: mono_gc_alloc_obj_nolock/* * Provide a variant that takes just the vtable for small fixed-size objects. * The aligned size is already computed and stored in vt->gc_descr. * Note: every SGEN_SCAN_START_SIZE or so we are given the chance to do some special * processing. We can keep track of where objects start, for example, * so when we scan the thread stacks for pinned objects, we can start * a search for the pinned object in SGEN_SCAN_START_SIZE chunks. */static void*mono_gc_alloc_obj_nolock (MonoVTable *vtable, size_t size){ /* FIXME: handle OOM */ void **p; char *new_next; TLAB_ACCESS_INIT; HEAVY_STAT (++stat_objects_alloced); if (size <= SGEN_MAX_SMALL_OBJ_SIZE) HEAVY_STAT (stat_bytes_alloced += size); else HEAVY_STAT (stat_bytes_alloced_los += size); size = ALIGN_UP (size); g_assert (vtable->gc_descr); if (G_UNLIKELY (has_per_allocation_action)) { static int alloc_count; int current_alloc = InterlockedIncrement (&alloc_count); if (collect_before_allocs) { if (((current_alloc % collect_before_allocs) == 0) && nursery_section) { sgen_perform_collection (0, GENERATION_NURSERY, "collect-before-alloc-triggered"); if (!degraded_mode && sgen_can_alloc_size (size) && size <= SGEN_MAX_SMALL_OBJ_SIZE) { // FIXME: g_assert_not_reached (); } } } else if (verify_before_allocs) { if ((current_alloc % verify_before_allocs) == 0) sgen_check_whole_heap_stw (); } } /* * We must already have the lock here instead of after the * fast path because we might be interrupted in the fast path * (after confirming that new_next < TLAB_TEMP_END) by the GC, * and we'll end up allocating an object in a fragment which * no longer belongs to us. * * The managed allocator does not do this, but it's treated * specially by the world-stopping code. */ if (size > SGEN_MAX_SMALL_OBJ_SIZE) { p = sgen_los_alloc_large_inner (vtable, size); } else { /* tlab_next and tlab_temp_end are TLS vars so accessing them might be expensive */ p = (void**)TLAB_NEXT; /* FIXME: handle overflow */ new_next = (char*)p + size; TLAB_NEXT = new_next; if (G_LIKELY (new_next < TLAB_TEMP_END)) { /* Fast path */ /* * FIXME: We might need a memory barrier here so the change to tlab_next is * visible before the vtable store. */ DEBUG (6, fprintf (gc_debug_file, "Allocated object %p, vtable: %p (%s), size: %zd/n", p, vtable, vtable->klass->name, size)); binary_protocol_alloc (p , vtable, size); if (G_UNLIKELY (MONO_GC_NURSERY_OBJ_ALLOC_ENABLED ())) MONO_GC_NURSERY_OBJ_ALLOC ((mword)p, size, vtable->klass->name_space, vtable->klass->name); g_assert (*p == NULL); mono_atomic_store_seq (p, vtable); return p; } /* Slow path */ /* there are two cases: the object is too big or we run out of space in the TLAB */ /* we also reach here when the thread does its first allocation after a minor * collection, since the tlab_ variables are initialized to NULL. * there can be another case (from ORP), if we cooperate with the runtime a bit: * objects that need finalizers can have the high bit set in their size * so the above check fails and we can readily add the object to the queue. * This avoids taking again the GC lock when registering, but this is moot when * doing thread-local allocation, so it may not be a good idea. */ if (TLAB_NEXT >= TLAB_REAL_END) { int available_in_tlab; /* * Run out of space in the TLAB. When this happens, some amount of space * remains in the TLAB, but not enough to satisfy the current allocation * request. Currently, we retire the TLAB in all cases, later we could//.........这里部分代码省略.........
开发者ID:Lavesson,项目名称:mono,代码行数:101,
示例24: mono_gc_try_alloc_obj_nolockstatic void*mono_gc_try_alloc_obj_nolock (MonoVTable *vtable, size_t size){ void **p; char *new_next; TLAB_ACCESS_INIT; size = ALIGN_UP (size); g_assert (vtable->gc_descr); if (size > SGEN_MAX_SMALL_OBJ_SIZE) return NULL; if (G_UNLIKELY (size > tlab_size)) { /* Allocate directly from the nursery */ p = sgen_nursery_alloc (size); if (!p) return NULL; sgen_set_nursery_scan_start ((char*)p); /*FIXME we should use weak memory ops here. Should help specially on x86. */ if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION) memset (p, 0, size); } else { int available_in_tlab; char *real_end; /* tlab_next and tlab_temp_end are TLS vars so accessing them might be expensive */ p = (void**)TLAB_NEXT; /* FIXME: handle overflow */ new_next = (char*)p + size; real_end = TLAB_REAL_END; available_in_tlab = real_end - (char*)p; if (G_LIKELY (new_next < real_end)) { TLAB_NEXT = new_next; /* Second case, we overflowed temp end */ if (G_UNLIKELY (new_next >= TLAB_TEMP_END)) { sgen_set_nursery_scan_start (new_next); /* we just bump tlab_temp_end as well */ TLAB_TEMP_END = MIN (TLAB_REAL_END, TLAB_NEXT + SGEN_SCAN_START_SIZE); DEBUG (5, fprintf (gc_debug_file, "Expanding local alloc: %p-%p/n", TLAB_NEXT, TLAB_TEMP_END)); } } else if (available_in_tlab > SGEN_MAX_NURSERY_WASTE) { /* Allocate directly from the nursery */ p = sgen_nursery_alloc (size); if (!p) return NULL; if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION) memset (p, 0, size); } else { size_t alloc_size = 0; sgen_nursery_retire_region (p, available_in_tlab); new_next = sgen_nursery_alloc_range (tlab_size, size, &alloc_size); p = (void**)new_next; if (!p) return NULL; TLAB_START = (char*)new_next; TLAB_NEXT = new_next + size; TLAB_REAL_END = new_next + alloc_size; TLAB_TEMP_END = new_next + MIN (SGEN_SCAN_START_SIZE, alloc_size); sgen_set_nursery_scan_start ((char*)p); if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION) memset (new_next, 0, alloc_size); MONO_GC_NURSERY_TLAB_ALLOC ((mword)new_next, alloc_size); } } HEAVY_STAT (++stat_objects_alloced); HEAVY_STAT (stat_bytes_alloced += size); DEBUG (6, fprintf (gc_debug_file, "Allocated object %p, vtable: %p (%s), size: %zd/n", p, vtable, vtable->klass->name, size)); binary_protocol_alloc (p, vtable, size); if (G_UNLIKELY (MONO_GC_NURSERY_OBJ_ALLOC_ENABLED ())) MONO_GC_NURSERY_OBJ_ALLOC ((mword)p, size, vtable->klass->name_space, vtable->klass->name); g_assert (*p == NULL); /* FIXME disable this in non debug builds */ mono_atomic_store_seq (p, vtable); return p;}
开发者ID:Lavesson,项目名称:mono,代码行数:88,
示例25: sgen_nursery_retire_region/*** Nursery memory allocation ***/voidsgen_nursery_retire_region (void *address, ptrdiff_t size){ HEAVY_STAT (stat_wasted_bytes_discarded_fragments += size);}
开发者ID:Profit0004,项目名称:mono,代码行数:6,
示例26: sgen_fragment_allocator_par_range_allocvoid*sgen_fragment_allocator_par_range_alloc (SgenFragmentAllocator *allocator, size_t desired_size, size_t minimum_size, size_t *out_alloc_size){ SgenFragment *frag, *min_frag; size_t current_minimum;restart: min_frag = NULL; current_minimum = minimum_size;#ifdef NALLOC_DEBUG InterlockedIncrement (&alloc_count);#endif for (frag = (SgenFragment *)unmask (allocator->alloc_head); frag; frag = (SgenFragment *)unmask (frag->next)) { size_t frag_size = frag->fragment_end - frag->fragment_next; HEAVY_STAT (++stat_alloc_range_iterations); if (desired_size <= frag_size) { void *p; *out_alloc_size = desired_size; p = par_alloc_from_fragment (allocator, frag, desired_size); if (!p) { HEAVY_STAT (++stat_alloc_range_retries); goto restart; }#ifdef NALLOC_DEBUG add_alloc_record (p, desired_size, RANGE_ALLOC);#endif return p; } if (current_minimum <= frag_size) { min_frag = frag; current_minimum = frag_size; } } /* The second fragment_next read should be ordered in respect to the first code block */ mono_memory_barrier (); if (min_frag) { void *p; size_t frag_size; frag_size = min_frag->fragment_end - min_frag->fragment_next; if (frag_size < minimum_size) goto restart; *out_alloc_size = frag_size; mono_memory_barrier (); p = par_alloc_from_fragment (allocator, min_frag, frag_size); /*XXX restarting here is quite dubious given this is already second chance allocation. */ if (!p) { HEAVY_STAT (++stat_alloc_retries); goto restart; }#ifdef NALLOC_DEBUG add_alloc_record (p, frag_size, RANGE_ALLOC);#endif return p; } return NULL;}
开发者ID:Profit0004,项目名称:mono,代码行数:68,
注:本文中的HEAVY_STAT函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 C++ HEIGHT函数代码示例 C++ HEAD_UNLOCK函数代码示例 |