Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/dotnet/runtime.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMaoni Stephens <Maoni0@users.noreply.github.com>2021-02-27 01:33:00 +0300
committerGitHub <noreply@github.com>2021-02-27 01:33:00 +0300
commitc41894db1291eb3a700a2f7a5056a76b7800b2ea (patch)
tree705dc3e6cd79659279dde39f9d5a4ddfafd57620 /src/coreclr/gc
parent8468909a85418482d47e669d2d3dc6f151b3f036 (diff)
region memory limit (#48691)
+ implemented the logic to handle when we are really running out of memory. We need to be resilient to it during a GC. We might need an empty region per heap so we try to get it up front and if we can't get it and find out that we do need it, we resort to a special sweep mode. + also take virtual memory load into consideration + fixed the logic to decide whether we should compact based on space. + fixed various places when we can't get a region, we need to make sure we don't thread 0 into the region list. + made allocated_since_last_gc per more_space_lock otherwise we are not doing the accounting correctly + moved the pinning for STRESS_REGIONS into a blocking GC otherwise we can have AV from BGC trying to mark the pinned object while it's being constructed + got rid of the places that use the hard coded REGION_SIZE + included one perf change in reset_memory
Diffstat (limited to 'src/coreclr/gc')
-rw-r--r--src/coreclr/gc/gc.cpp649
-rw-r--r--src/coreclr/gc/gcpriv.h75
2 files changed, 484 insertions, 240 deletions
diff --git a/src/coreclr/gc/gc.cpp b/src/coreclr/gc/gc.cpp
index 873b238f528..5e1b2099a38 100644
--- a/src/coreclr/gc/gc.cpp
+++ b/src/coreclr/gc/gc.cpp
@@ -2323,7 +2323,7 @@ oom_history gc_heap::oomhist_per_heap[max_oom_history_count];
fgm_history gc_heap::fgm_result;
-size_t gc_heap::allocated_since_last_gc = 0;
+size_t gc_heap::allocated_since_last_gc[2];
BOOL gc_heap::ro_segments_in_range;
@@ -2384,7 +2384,7 @@ heap_segment* gc_heap::ephemeral_heap_segment = 0;
OBJECTHANDLE* gc_heap::pinning_handles_for_alloc = 0;
int gc_heap::ph_index_per_heap = 0;
int gc_heap::pinning_seg_interval = 2;
-int gc_heap::num_gen0_segs = 0;
+size_t gc_heap::num_gen0_regions = 0;
#endif //STRESS_REGIONS
heap_segment* gc_heap::free_regions = 0;
@@ -2395,6 +2395,8 @@ int gc_heap::num_free_regions_added = 0;
int gc_heap::num_free_regions_removed = 0;
+int gc_heap::num_regions_freed_in_sweep = 0;
+
heap_segment* gc_heap::free_large_regions = 0;
int gc_heap::num_free_large_regions = 0;
@@ -2635,6 +2637,8 @@ BOOL gc_heap::fgn_last_gc_was_concurrent = FALSE;
VOLATILE(bool) gc_heap::full_gc_approach_event_set;
+bool gc_heap::special_sweep_p = false;
+
size_t gc_heap::full_gc_counts[gc_type_max];
bool gc_heap::maxgen_size_inc_p = false;
@@ -2940,13 +2944,16 @@ gc_heap::dt_low_ephemeral_space_p (gc_tuning_point tp)
switch (tp)
{
case tuning_deciding_condemned_gen:
+#ifndef USE_REGIONS
case tuning_deciding_compaction:
case tuning_deciding_expansion:
+#endif //USE_REGIONS
case tuning_deciding_full_gc:
{
ret = (!ephemeral_gen_fit_p (tp));
break;
}
+#ifndef USE_REGIONS
case tuning_deciding_promote_ephemeral:
{
size_t new_gen0size = approximate_new_allocation();
@@ -2960,8 +2967,12 @@ gc_heap::dt_low_ephemeral_space_p (gc_tuning_point tp)
ret = ((soh_segment_size - segment_info_size) < (plan_ephemeral_size + new_gen0size));
break;
}
+#endif //USE_REGIONS
default:
+ {
+ assert (!"invalid tuning reason");
break;
+ }
}
return ret;
@@ -3383,22 +3394,10 @@ sorted_table::clear()
#ifdef USE_REGIONS
inline
-uint8_t* align_on_region (uint8_t* add)
-{
- return (uint8_t*)((size_t)(add + (REGION_SIZE - 1)) & ~(REGION_SIZE - 1));
-}
-
-inline
-uint8_t* align_lower_region (uint8_t* add)
-{
- return (uint8_t*)((size_t)add & ~(REGION_SIZE - 1));
-}
-
-inline
size_t get_basic_region_index_for_address (uint8_t* address)
{
size_t basic_region_index = (size_t)address >> gc_heap::min_segment_size_shr;
- return basic_region_index - ((size_t)g_gc_lowest_address >> gc_heap::min_segment_size_shr);
+ return (basic_region_index - ((size_t)g_gc_lowest_address >> gc_heap::min_segment_size_shr));
}
// Go from a random address to its region info. The random address could be
@@ -3454,20 +3453,21 @@ bool region_allocator::init (uint8_t* start, uint8_t* end, size_t alignment, uin
// Note: I am allocating a map that covers the whole reserved range.
// We can optimize it to only cover the current heap range.
- size_t num_user_heap_units = (global_region_end - global_region_start) / region_alignment;
+ size_t total_num_units = (global_region_end - global_region_start) / region_alignment;
+ total_free_units = (uint32_t)total_num_units;
- uint32_t* unit_map = new (nothrow) uint32_t[num_user_heap_units];
+ uint32_t* unit_map = new (nothrow) uint32_t[total_num_units];
if (unit_map)
{
- memset (unit_map, 0, sizeof (uint32_t) * num_user_heap_units);
+ memset (unit_map, 0, sizeof (uint32_t) * total_num_units);
region_map_start = unit_map;
region_map_end = region_map_start;
- dprintf (1, ("start: %Ix, end: %Ix, total %Idmb(alignment: %Idmb), map units %d",
+ dprintf (REGIONS_LOG, ("start: %Ix, end: %Ix, total %Idmb(alignment: %Idmb), map units %d",
(size_t)start, (size_t)end,
(size_t)((end - start) / 1024 / 1024),
(alignment / 1024 / 1024),
- num_user_heap_units));
+ total_num_units));
*lowest = global_region_start;
*highest = global_region_end;
@@ -3491,7 +3491,7 @@ uint32_t* region_allocator::region_map_index_of (uint8_t* address)
void region_allocator::make_busy_block (uint32_t* index_start, uint32_t num_units)
{
#ifdef _DEBUG
- dprintf (1, ("MBB[B: %Id] %d->%d", (size_t)num_units, (int)(index_start - region_map_start), (int)(index_start - region_map_start + num_units)));
+ dprintf (REGIONS_LOG, ("MBB[B: %Id] %d->%d", (size_t)num_units, (int)(index_start - region_map_start), (int)(index_start - region_map_start + num_units)));
#endif //_DEBUG
*index_start = num_units;
}
@@ -3499,7 +3499,7 @@ void region_allocator::make_busy_block (uint32_t* index_start, uint32_t num_unit
void region_allocator::make_free_block (uint32_t* index_start, uint32_t num_units)
{
#ifdef _DEBUG
- dprintf (1, ("MFB[F: %Id] %d->%d", (size_t)num_units, (int)(index_start - region_map_start), (int)(index_start - region_map_start + num_units)));
+ dprintf (REGIONS_LOG, ("MFB[F: %Id] %d->%d", (size_t)num_units, (int)(index_start - region_map_start), (int)(index_start - region_map_start + num_units)));
#endif //_DEBUG
*index_start = region_alloc_free_bit | num_units;
}
@@ -3521,7 +3521,7 @@ void region_allocator::print_map (const char* msg)
{
#ifdef _DEBUG
const char* heap_type = "UH";
- dprintf (1, ("\n[%s]-----printing----%s", heap_type, msg));
+ dprintf (REGIONS_LOG, ("[%s]-----printing----%s", heap_type, msg));
uint32_t* current_index = region_map_start;
uint32_t* end_index = region_map_end;
@@ -3533,7 +3533,7 @@ void region_allocator::print_map (const char* msg)
uint32_t current_num_units = get_num_units (current_val);
bool free_p = is_unit_memory_free (current_val);
- dprintf (1, ("[%s][%s: %Id]%d->%d", heap_type, (free_p ? "F" : "B"), (size_t)current_num_units,
+ dprintf (REGIONS_LOG, ("[%s][%s: %Id]%d->%d", heap_type, (free_p ? "F" : "B"), (size_t)current_num_units,
(int)(current_index - map_start),
(int)(current_index - map_start + current_num_units)));
@@ -3542,19 +3542,19 @@ void region_allocator::print_map (const char* msg)
uint32_t total_regions = (uint32_t)((global_region_end - global_region_start) / region_alignment);
- dprintf (1, ("[%s]-----end printing----[%d total, used %d]\n", heap_type, total_regions, (end_index - map_start)));
+ dprintf (REGIONS_LOG, ("[%s]-----end printing----[%d total, used %d]\n", heap_type, total_regions, (end_index - map_start)));
#endif //_DEBUG
}
-uint8_t* region_allocator::allocate_end_uh (uint32_t num_units)
+uint8_t* region_allocator::allocate_end (uint32_t num_units)
{
uint8_t* alloc = NULL;
if (global_region_used < global_region_end)
{
- size_t user_heap_remaining = global_region_end - global_region_used;
+ size_t end_remaining = global_region_end - global_region_used;
- if ((user_heap_remaining / region_alignment) >= num_units)
+ if ((end_remaining / region_alignment) >= num_units)
{
make_busy_block (region_map_end, num_units);
region_map_end += num_units;
@@ -3571,7 +3571,7 @@ uint8_t* region_allocator::allocate (uint32_t num_units)
uint32_t* current_index = region_map_start;
uint32_t* end_index = region_map_end;
- dprintf (1, ("\nsearcing %d->%d", (int)(current_index - region_map_start), (int)(end_index - region_map_start)));
+ dprintf (REGIONS_LOG, ("searching %d->%d", (int)(current_index - region_map_start), (int)(end_index - region_map_start)));
uint32_t* current_free_index_start = 0;
uint32_t num_contiguous_free_units = 0;
uint32_t last_num_free_units = 0;
@@ -3583,7 +3583,7 @@ uint8_t* region_allocator::allocate (uint32_t num_units)
uint32_t current_val = *current_index;
uint32_t current_num_units = get_num_units (current_val);
bool free_p = is_unit_memory_free (current_val);
- dprintf (1, ("ALLOC[%s: %Id]%d->%d", (free_p ? "F" : "B"), (size_t)current_num_units,
+ dprintf (REGIONS_LOG, ("ALLOC[%s: %Id]%d->%d", (free_p ? "F" : "B"), (size_t)current_num_units,
(int)(current_index - region_map_start), (int)(current_index + current_num_units - region_map_start)));
if (free_p)
@@ -3598,13 +3598,14 @@ uint8_t* region_allocator::allocate (uint32_t num_units)
if (num_contiguous_free_units >= num_units)
{
- dprintf (1, ("found %Id contiguous free units(%d->%d), sufficient",
+ dprintf (REGIONS_LOG, ("found %Id contiguous free units(%d->%d), sufficient",
(size_t)num_contiguous_free_units,
(int)(current_free_index_start - region_map_start),
(int)(current_free_index_start - region_map_start + num_contiguous_free_units)));
adjust_map (current_free_index_start, num_contiguous_free_units, num_units);
+ total_free_units -= num_units;
print_map ("alloc: found in free");
return region_address_of (current_free_index_start);
}
@@ -3614,7 +3615,7 @@ uint8_t* region_allocator::allocate (uint32_t num_units)
// Take this opportunity to coalesce free blocks.
if (num_contiguous_free_units > last_num_free_units)
{
- dprintf (1, ("Observed %Id free units in multiple blocks(%Id), coalescing",
+ dprintf (REGIONS_LOG, ("Observed %Id free units in multiple blocks(%Id), coalescing",
(size_t)num_contiguous_free_units, (size_t)last_num_free_units));
make_free_block (current_free_index_start, num_contiguous_free_units);
}
@@ -3634,15 +3635,16 @@ uint8_t* region_allocator::allocate (uint32_t num_units)
region_map_end = current_free_index_start;
}
- uint8_t* alloc = allocate_end_uh (num_units);
+ uint8_t* alloc = allocate_end (num_units);
if (alloc)
{
+ total_free_units -= num_units;
print_map ("alloc: found at the end");
}
else
{
- dprintf (1, ("couldn't find memory at the end! only %Id bytes left", (global_region_end - global_region_used)));
+ dprintf (REGIONS_LOG, ("couldn't find memory at the end! only %Id bytes left", (global_region_end - global_region_used)));
}
return alloc;
@@ -3659,7 +3661,7 @@ bool region_allocator::allocate_region (size_t size, uint8_t** start, uint8_t**
uint32_t num_units = (uint32_t)(alloc_size / alignment);
bool ret = false;
uint8_t* alloc = NULL;
- dprintf (1, ("----GET %d-----", num_units));
+ dprintf (REGIONS_LOG, ("----GET %d-----", num_units));
alloc = allocate (num_units);
*start = alloc;
@@ -3691,10 +3693,22 @@ void region_allocator::delete_region (uint8_t* start)
uint32_t current_val = *current_index;
assert (!is_unit_memory_free (current_val));
- dprintf (1, ("----DEL %d-----", (current_index - region_map_start)));
+ dprintf (REGIONS_LOG, ("----DEL %d-----", (current_index - region_map_start)));
- make_free_block (current_index, current_val);
+ uint8_t* region_end = region_address_of (current_index + current_val);
+ if (region_end == global_region_used)
+ {
+ region_map_end = current_index;
+ dprintf (REGIONS_LOG, ("adjust global used from %Ix to %Ix",
+ global_region_used, region_address_of (current_index)));
+ global_region_used = region_address_of (current_index);
+ }
+ else
+ {
+ make_free_block (current_index, current_val);
+ }
+ total_free_units += current_val;
print_map ("after delete");
}
#endif //USE_REGIONS
@@ -3713,20 +3727,11 @@ uint8_t* align_lower_segment (uint8_t* add)
size_t size_seg_mapping_table_of (uint8_t* from, uint8_t* end)
{
-#ifdef USE_REGIONS
- from = align_lower_region (from);
- end = align_on_region (end);
- dprintf (1, ("region from: %Ix, end: %Ix, size: %Id(%Id)",
- from, end,
- (size_t)sizeof (seg_mapping)*((size_t)(end - from) >> gc_heap::min_segment_size_shr),
- (size_t)sizeof (seg_mapping)));
- return sizeof (seg_mapping)*((size_t)(end - from) >> gc_heap::min_segment_size_shr);
-#else
from = align_lower_segment (from);
end = align_on_segment (end);
- dprintf (1, ("from: %Ix, end: %Ix, size: %Ix", from, end, sizeof (seg_mapping)*(((size_t)(end - from) >> gc_heap::min_segment_size_shr))));
+ dprintf (1, ("from: %Ix, end: %Ix, size: %Ix", from, end,
+ sizeof (seg_mapping)*(((size_t)(end - from) >> gc_heap::min_segment_size_shr))));
return sizeof (seg_mapping)*((size_t)(end - from) >> gc_heap::min_segment_size_shr);
-#endif //USE_REGIONS
}
// for seg_mapping_table we want it to start from a pointer sized address.
@@ -10535,12 +10540,13 @@ void gc_heap::return_free_region (heap_segment* region)
uint8_t* region_start = get_region_start (region);
uint8_t* region_end = heap_segment_reserved (region);
- int num_basic_regions = (int)((region_end - region_start) / REGION_SIZE);
+
+ int num_basic_regions = (int)((region_end - region_start) >> min_segment_size_shr);
dprintf (REGIONS_LOG, ("RETURING region %Ix (%d basic regions) to free, total %d",
heap_segment_mem (region), num_basic_regions, num_free_regions));
for (int i = 0; i < num_basic_regions; i++)
{
- uint8_t* basic_region_start = region_start + (i * REGION_SIZE);
+ uint8_t* basic_region_start = region_start + (i << min_segment_size_shr);
heap_segment* basic_region = get_region_info (basic_region_start);
heap_segment_allocated (basic_region) = 0;
#ifdef MULTIPLE_HEAPS
@@ -10569,7 +10575,7 @@ heap_segment* gc_heap::get_free_region (int gen_number)
num_free_regions--;
num_free_regions_removed++;
region = free_regions;
- dprintf (REGIONS_LOG, ("%d free regions left, get %Ix-%Ix-%Ix",
+ dprintf (REGIONS_LOG, ("%d free regions left, get %Ix-%Ix-%Ix",
num_free_regions, heap_segment_mem (region),
heap_segment_committed (region), heap_segment_used (region)));
free_regions = heap_segment_next (free_regions);
@@ -10612,31 +10618,9 @@ heap_segment* gc_heap::get_free_region (int gen_number)
if (region)
{
-#ifdef BACKGROUND_GC
- if (is_bgc_in_progress())
+ if (!init_table_for_region (gen_number, region))
{
- dprintf (GC_TABLE_LOG, ("new seg %Ix, mark_array is %Ix",
- heap_segment_mem (region), mark_array));
- if (!commit_mark_array_new_seg (__this, region))
- {
- dprintf (GC_TABLE_LOG, ("failed to commit mark array for the new region %Ix-%Ix",
- get_region_start (region), heap_segment_reserved (region)));
-
- // We don't have memory to commit the mark array so we cannot use the new region.
- global_region_allocator.delete_region (get_region_start (region));
- region = 0;
- }
- }
-#endif //BACKGROUND_GC
-
- if (gen_number <= max_generation)
- {
- size_t first_brick = brick_of (heap_segment_mem (region));
- set_brick (first_brick, -1);
- }
- else
- {
- assert (brick_table[brick_of (heap_segment_mem (region))] == 0);
+ region = 0;
}
}
@@ -10721,11 +10705,11 @@ void gc_heap::set_region_plan_gen_num (heap_segment* region, int plan_gen_num)
settings.demotion = TRUE;
}
get_gc_data_per_heap()->set_mechanism_bit (gc_demotion_bit);
- heap_segment_demoted_p (region) = true;
+ region->flags |= heap_segment_flags_demoted;
}
else
{
- heap_segment_demoted_p (region) = false;
+ region->flags &= ~heap_segment_flags_demoted;
}
heap_segment_plan_gen_num (region) = plan_gen_num;
@@ -10867,18 +10851,19 @@ void gc_heap::init_heap_segment (heap_segment* seg, gc_heap* hp
#endif //USE_REGIONS
#ifdef USE_REGIONS
- int num_basic_regions = (int)(size / REGION_SIZE);
+ int num_basic_regions = (int)(size >> min_segment_size_shr);
+ size_t basic_region_size = (size_t)1 << min_segment_size_shr;
dprintf (REGIONS_LOG, ("this region contains %d basic regions", num_basic_regions));
if (num_basic_regions > 1)
{
for (int i = 1; i < num_basic_regions; i++)
{
- uint8_t* basic_region_start = start + (i * REGION_SIZE);
+ uint8_t* basic_region_start = start + (i * basic_region_size);
heap_segment* basic_region = get_region_info (basic_region_start);
heap_segment_allocated (basic_region) = (uint8_t*)(ptrdiff_t)-i;
dprintf (REGIONS_LOG, ("Initing basic region %Ix->%Ix(%Idmb) alloc to %Ix",
- basic_region_start, (basic_region_start + REGION_SIZE),
- (size_t)(REGION_SIZE / 1024 / 1024),
+ basic_region_start, (basic_region_start + basic_region_size),
+ (size_t)(basic_region_size / 1024 / 1024),
heap_segment_allocated (basic_region)));
heap_segment_gen_num (basic_region) = gen_num;
@@ -11695,7 +11680,8 @@ HRESULT gc_heap::initialize_gc (size_t soh_segment_size,
if (!reserve_range)
return E_OUTOFMEMORY;
- if (!global_region_allocator.init (reserve_range, (reserve_range + reserve_size), REGION_SIZE,
+ if (!global_region_allocator.init (reserve_range, (reserve_range + reserve_size),
+ ((size_t)1 << min_segment_size_shr),
&g_gc_lowest_address, &g_gc_highest_address))
return E_OUTOFMEMORY;
}
@@ -12222,7 +12208,8 @@ gc_heap::init_gc_heap (int h_number)
time_bgc_last = 0;
- allocated_since_last_gc = 0;
+ allocated_since_last_gc[0] = 0;
+ allocated_since_last_gc[1] = 0;
#ifdef SPINLOCK_HISTORY
spinlock_info_index = 0;
@@ -12367,7 +12354,7 @@ gc_heap::init_gc_heap (int h_number)
enable_preemptive();
ph_index_per_heap = 0;
pinning_seg_interval = 2;
- num_gen0_segs = 0;
+ num_gen0_regions = 0;
#endif //STRESS_REGIONS
free_regions = 0;
num_free_regions = 0;
@@ -12381,6 +12368,9 @@ gc_heap::init_gc_heap (int h_number)
end_gen0_region_space = 0;
gen0_pinned_free_space = 0;
gen0_large_chunk_found = false;
+ // REGIONS PERF TODO: we should really allocate the POH regions together just so that
+ // they wouldn't prevent us from coalescing free regions to form a large virtual address
+ // range.
if (!initial_make_soh_regions (__this) ||
!initial_make_uoh_regions (loh_generation, __this) ||
!initial_make_uoh_regions (poh_generation, __this))
@@ -14229,8 +14219,7 @@ BOOL gc_heap::short_on_end_of_seg (heap_segment* seg)
#else
BOOL sufficient_p = sufficient_space_end_seg (allocated,
heap_segment_reserved (seg),
- end_space_after_gc(),
- tuning_deciding_short_on_seg);
+ end_space_after_gc());
#endif //USE_REGIONS
if (!sufficient_p)
{
@@ -14800,27 +14789,6 @@ BOOL gc_heap::soh_try_fit (int gen_number,
#ifdef USE_REGIONS
if (can_allocate)
{
-#ifdef STRESS_REGIONS
- uint8_t* res = acontext->alloc_ptr;
- heap_segment* seg = ephemeral_heap_segment;
- size_t region_size = get_region_size (seg);
- uint8_t* region_start = heap_segment_mem (seg) + (8 * 1024);
- uint8_t* region_mid = get_region_start (seg) + (region_size / 2);
- if (((num_gen0_segs % pinning_seg_interval) == 0) &&
- ((res == heap_segment_mem (seg)) ||
- ((res >= (region_mid - size)) && (res < (region_mid + size)))))
- {
- HndAssignHandleGC(pinning_handles_for_alloc[ph_index_per_heap], res);
- dprintf (REGIONS_LOG, ("h%d pinning object at %Ix on eph seg %Ix (ph#%d)",
- heap_number, res, heap_segment_mem (seg), ph_index_per_heap));
-
- ph_index_per_heap++;
- if (ph_index_per_heap == PINNING_HANDLE_INITIAL_LENGTH)
- {
- ph_index_per_heap = 0;
- }
- }
-#endif //STRESS_REGIONS
break;
}
@@ -14833,6 +14801,12 @@ BOOL gc_heap::soh_try_fit (int gen_number,
heap_segment* next_seg = heap_segment_next (ephemeral_heap_segment);
+ if (!next_seg)
+ {
+ assert (ephemeral_heap_segment == generation_tail_region (generation_of (gen_number)));
+ next_seg = get_new_region (gen_number);
+ }
+
if (next_seg)
{
dprintf (REGIONS_LOG, ("eph seg %Ix -> next %Ix",
@@ -14841,15 +14815,11 @@ BOOL gc_heap::soh_try_fit (int gen_number,
}
else
{
- assert (ephemeral_heap_segment == generation_tail_region (generation_of (gen_number)));
- ephemeral_heap_segment = get_new_region (gen_number);
+ *commit_failed_p = TRUE;
+ dprintf (REGIONS_LOG, ("couldn't get a new ephemeral region"));
+ return FALSE;
}
- assert (ephemeral_heap_segment != 0);
-
-#if defined(STRESS_REGIONS) && defined(STRESS_HEAP)
- num_gen0_segs++;
-#endif //STRESS_REGIONS && STRESS_HEAP
alloc_allocated = heap_segment_allocated (ephemeral_heap_segment);
dprintf (REGIONS_LOG, ("h%d alloc_allocated is now %Ix", heap_number, alloc_allocated));
#endif //USE_REGIONS
@@ -15855,7 +15825,7 @@ allocation_state gc_heap::try_allocate_more_space (alloc_context* acontext, size
etw_allocation_running_amount[etw_allocation_index] += alloc_context_bytes;
- allocated_since_last_gc += alloc_context_bytes;
+ allocated_since_last_gc[etw_allocation_index] += alloc_context_bytes;
if (etw_allocation_running_amount[etw_allocation_index] > etw_allocation_tick)
{
@@ -17788,13 +17758,14 @@ size_t gc_heap::get_total_allocated_since_last_gc()
for (int i = 0; i < gc_heap::n_heaps; i++)
{
gc_heap* hp = gc_heap::g_heaps[i];
- total_allocated_size += hp->allocated_since_last_gc;
- hp->allocated_since_last_gc = 0;
- }
-#else
- total_allocated_size = allocated_since_last_gc;
- allocated_since_last_gc = 0;
+#else //MULTIPLE_HEAPS
+ {
+ gc_heap* hp = pGenGCHeap;
#endif //MULTIPLE_HEAPS
+ total_allocated_size += hp->allocated_since_last_gc[0] + hp->allocated_since_last_gc[1];
+ hp->allocated_since_last_gc[0] = 0;
+ hp->allocated_since_last_gc[1] = 0;
+ }
return total_allocated_size;
}
@@ -17976,6 +17947,71 @@ size_t gc_heap::current_generation_size (int gen_number)
return gen_size;
}
+#ifdef USE_REGIONS
+// We may need a new empty region while doing a GC so try to get one now, if we don't have any
+// reserve in the free region list.
+bool gc_heap::try_get_new_free_region()
+{
+ heap_segment* region = 0;
+ if (free_regions)
+ {
+ dprintf (REGIONS_LOG, ("h%d has %d free regions %Ix", heap_number, num_free_regions,
+ heap_segment_mem (free_regions)));
+ return true;
+ }
+ else
+ {
+ region = allocate_new_region (__this, 0, false);
+ if (region)
+ {
+ if (init_table_for_region (0, region))
+ {
+ return_free_region (region);
+ dprintf (REGIONS_LOG, ("h%d got a new empty region %Ix", heap_number, region));
+ }
+ else
+ {
+ region = 0;
+ }
+ }
+ }
+
+ return (region != 0);
+}
+
+bool gc_heap::init_table_for_region (int gen_number, heap_segment* region)
+{
+#ifdef BACKGROUND_GC
+ if (is_bgc_in_progress())
+ {
+ dprintf (GC_TABLE_LOG, ("new seg %Ix, mark_array is %Ix",
+ heap_segment_mem (region), mark_array));
+ if (!commit_mark_array_new_seg (__this, region))
+ {
+ dprintf (GC_TABLE_LOG, ("failed to commit mark array for the new region %Ix-%Ix",
+ get_region_start (region), heap_segment_reserved (region)));
+
+ // We don't have memory to commit the mark array so we cannot use the new region.
+ global_region_allocator.delete_region (get_region_start (region));
+ return false;
+ }
+ }
+#endif //BACKGROUND_GC
+
+ if (gen_number <= max_generation)
+ {
+ size_t first_brick = brick_of (heap_segment_mem (region));
+ set_brick (first_brick, -1);
+ }
+ else
+ {
+ assert (brick_table[brick_of (heap_segment_mem (region))] == 0);
+ }
+
+ return true;
+}
+#endif //USE_REGIONS
+
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable:6326) // "Potential comparison of a constant with another constant" is intentional in this function.
@@ -18218,6 +18254,14 @@ int gc_heap::generation_to_condemn (int n_initial,
}
}
+#ifdef USE_REGIONS
+ if (!try_get_new_free_region())
+ {
+ dprintf (GTC_LOG, ("can't get an empty region -> full compacting"));
+ last_gc_before_oom = TRUE;
+ }
+#endif //USE_REGIONS
+
//figure out which ephemeral generation is too fragmented
temp_gen = n;
for (i = n+1; i < max_generation; i++)
@@ -18272,6 +18316,16 @@ int gc_heap::generation_to_condemn (int n_initial,
dprintf (GTC_LOG, ("ml: %d", memory_load));
}
+#ifdef USE_REGIONS
+ // For regions we want to take the VA range into consideration as well.
+ uint32_t va_memory_load = global_region_allocator.get_va_memory_load();
+ if (heap_number == 0)
+ {
+ dprintf (GTC_LOG, ("h%d ML %d, va ML %d", heap_number, memory_load, va_memory_load));
+ }
+ memory_load = max (memory_load, va_memory_load);
+#endif //USE_REGIONS
+
// Need to get it early enough for all heaps to use.
local_settings->entry_available_physical_mem = available_physical;
local_settings->entry_memory_load = memory_load;
@@ -19970,6 +20024,7 @@ void gc_heap::init_records()
end_gen0_region_space = 0;
gen0_pinned_free_space = 0;
gen0_large_chunk_found = false;
+ num_regions_freed_in_sweep = 0;
#endif //USE_REGIONS
sufficient_gen0_space_p = FALSE;
@@ -22264,7 +22319,7 @@ BOOL gc_heap::background_process_mark_overflow (BOOL concurrent_p)
if ((heap_segment_mem (region) <= background_max_overflow_address) &&
(heap_segment_allocated (region) >= background_min_overflow_address))
{
- region->flags |= heap_segment_flag_overflow;
+ region->flags |= heap_segment_flags_overflow;
}
region = heap_segment_next (region);
}
@@ -22926,6 +22981,42 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
reset_card_marking_enumerators();
#endif // FEATURE_CARD_MARKING_STEALING
+#ifdef STRESS_REGIONS
+ heap_segment* gen0_region = generation_start_segment (generation_of (0));
+ while (gen0_region)
+ {
+ size_t gen0_region_size = heap_segment_allocated (gen0_region) - heap_segment_mem (gen0_region);
+
+ if (gen0_region_size > 0)
+ {
+ uint8_t* region_mid = heap_segment_mem (gen0_region) + (gen0_region_size / 2);
+
+ if ((num_gen0_regions % pinning_seg_interval) == 0)
+ {
+ int align_const = get_alignment_constant (TRUE);
+ // Pinning the first object in the region.
+ uint8_t* obj_to_pin = heap_segment_mem (gen0_region);
+ pin_by_gc (obj_to_pin);
+
+ obj_to_pin += Align (size (obj_to_pin), align_const);
+ // Pinning the middle object in the region.
+ while (obj_to_pin < heap_segment_allocated (gen0_region))
+ {
+ if (obj_to_pin > region_mid)
+ {
+ pin_by_gc (obj_to_pin);
+ break;
+ }
+ obj_to_pin += Align (size (obj_to_pin), align_const);
+ }
+ }
+ }
+
+ num_gen0_regions++;
+ gen0_region = heap_segment_next (gen0_region);
+ }
+#endif //STRESS_REGIONS
+
#ifdef MULTIPLE_HEAPS
gc_t_join.join(this, gc_join_begin_mark_phase);
if (gc_t_join.joined())
@@ -22933,6 +23024,10 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
{
maxgen_size_inc_p = false;
+#ifdef USE_REGIONS
+ special_sweep_p = false;
+#endif //USE_REGIONS
+
num_sizedrefs = GCToEEInterface::GetTotalNumSizedRefHandles();
#ifdef MULTIPLE_HEAPS
@@ -24931,14 +25026,6 @@ void gc_heap::process_last_np_surv_region (generation* consing_gen,
heap_segment* next_region = heap_segment_next (alloc_region);
- // PERF TODO: we just use the next region but we should see if we should skip some regions with
- // large pins.
- //
- // However, right now we have this coupling between going through the segs and the pins on
- // the segs, as in, I cannot change the seg order without re-arranging pins on them accordingly.
- // We could record where the index of the 1st pin and last pin in a region as part of the
- // region info.
-
if (!next_region)
{
int gen_num = heap_segment_gen_num (alloc_region);
@@ -24954,8 +25041,17 @@ void gc_heap::process_last_np_surv_region (generation* consing_gen,
{
assert (next_plan_gen_num == 0);
next_region = get_new_region (0);
- dprintf (REGIONS_LOG, ("h%d getting a new region for gen0 plan start seg to %Ix",
- heap_number, heap_segment_mem (next_region)));
+ if (next_region)
+ {
+ dprintf (REGIONS_LOG, ("h%d getting a new region for gen0 plan start seg to %Ix",
+ heap_number, heap_segment_mem (next_region)));
+ }
+ else
+ {
+ dprintf (REGIONS_LOG, ("h%d couldn't get a region to plan gen0, special sweep on",
+ heap_number));
+ special_sweep_p = true;
+ }
}
else
{
@@ -24969,23 +25065,28 @@ void gc_heap::process_last_np_surv_region (generation* consing_gen,
heap_number, heap_segment_mem (next_region), heap_segment_gen_num (next_region)));
}
- assert (next_region != 0);
-
- generation_allocation_segment (consing_gen) = next_region;
- generation_allocation_pointer (consing_gen) = heap_segment_mem (next_region);
- generation_allocation_context_start_region (consing_gen) = generation_allocation_pointer (consing_gen);
- generation_allocation_limit (consing_gen) = generation_allocation_pointer (consing_gen);
+ if (next_region)
+ {
+ generation_allocation_segment (consing_gen) = next_region;
+ generation_allocation_pointer (consing_gen) = heap_segment_mem (next_region);
+ generation_allocation_context_start_region (consing_gen) = generation_allocation_pointer (consing_gen);
+ generation_allocation_limit (consing_gen) = generation_allocation_pointer (consing_gen);
- if (next_plan_gen_num != -1)
+ if (next_plan_gen_num != -1)
+ {
+ generation_plan_start_segment (generation_of (next_plan_gen_num)) = next_region;
+ dprintf (REGIONS_LOG, ("h%d setting gen%d plan start seg to %Ix (new consing)",
+ heap_number, next_plan_gen_num, heap_segment_mem (next_region)));
+ }
+ dprintf (REGIONS_LOG, ("h%d consing(%d) alloc seg: %Ix, ptr: %Ix, planning gen%d",
+ heap_number, consing_gen->gen_num,
+ heap_segment_mem (generation_allocation_segment (consing_gen)),
+ generation_allocation_pointer (consing_gen), next_plan_gen_num));
+ }
+ else
{
- generation_plan_start_segment (generation_of (next_plan_gen_num)) = next_region;
- dprintf (REGIONS_LOG, ("h%d setting gen%d plan start seg to %Ix (new consing)",
- heap_number, next_plan_gen_num, heap_segment_mem (next_region)));
+ assert (special_sweep_p);
}
- dprintf (REGIONS_LOG, ("h%d consing(%d) alloc seg: %Ix, ptr: %Ix, planning gen%d",
- heap_number, consing_gen->gen_num,
- heap_segment_mem (generation_allocation_segment (consing_gen)),
- generation_allocation_pointer (consing_gen), next_plan_gen_num));
}
}
@@ -24993,6 +25094,11 @@ void gc_heap::process_remaining_regions (int current_plan_gen_num, generation* c
{
assert ((current_plan_gen_num == 0) || (!settings.promotion && (current_plan_gen_num == -1)));
+ if (special_sweep_p)
+ {
+ assert (pinned_plug_que_empty_p());
+ }
+
dprintf (REGIONS_LOG, ("h%d PRR: plan %d: consing alloc seg: %Ix, ptr: %Ix",
heap_number, current_plan_gen_num,
heap_segment_mem (generation_allocation_segment (consing_gen)),
@@ -25066,9 +25172,16 @@ void gc_heap::process_remaining_regions (int current_plan_gen_num, generation* c
}
heap_segment* current_region = generation_allocation_segment (consing_gen);
+
+ if (special_sweep_p)
+ {
+ assert (heap_segment_next (current_region) == 0);
+ return;
+ }
+
set_region_plan_gen_num (current_region, current_plan_gen_num);
heap_segment_plan_allocated (current_region) = generation_allocation_pointer (consing_gen);
- dprintf (3, ("h%d setting alloc seg %Ix plan alloc to %Ix",
+ dprintf (REGIONS_LOG, ("h%d setting alloc seg %Ix plan alloc to %Ix",
heap_number, heap_segment_mem (current_region),
heap_segment_plan_allocated (current_region)));
@@ -25119,8 +25232,9 @@ void gc_heap::get_gen0_end_plan_space_worker (heap_segment* region)
}
}
- dprintf (REGIONS_LOG, ("h%d found end space: %Id in region %Ix, total %Id",
- heap_number, end_plan_space, heap_segment_mem (region), end_gen0_region_space));
+ dprintf (REGIONS_LOG, ("h%d found end space: %Id in region %Ix, total %Id->%Id",
+ heap_number, end_plan_space, heap_segment_mem (region), end_gen0_region_space,
+ (end_gen0_region_space + end_plan_space)));
end_gen0_region_space += end_plan_space;
region = heap_segment_next (region);
@@ -25607,6 +25721,13 @@ void gc_heap::plan_phase (int condemned_gen_number)
heap_segment_saved_allocated (seg1) = heap_segment_allocated (seg1);
heap_segment_allocated (seg1) = plug_end;
+#ifdef USE_REGIONS
+ if (heap_segment_mem (seg1) == heap_segment_allocated (seg1))
+ {
+ num_regions_freed_in_sweep++;
+ }
+#endif //USE_REGIONS
+
current_brick = update_brick_table (tree, current_brick, x, plug_end);
dprintf (3, ("end of seg: new tree, sequence# 0"));
sequence_number = 0;
@@ -26336,6 +26457,9 @@ void gc_heap::plan_phase (int condemned_gen_number)
#ifdef HOST_64BIT
if ((!settings.concurrent) &&
+#ifdef USE_REGIONS
+ !special_sweep_p &&
+#endif //USE_REGIONS
!provisional_mode_triggered &&
((condemned_gen_number < max_generation) &&
((settings.gen0_reduction_count > 0) || (settings.entry_memory_load >= 95))))
@@ -26901,7 +27025,10 @@ void gc_heap::plan_phase (int condemned_gen_number)
}
#ifdef FEATURE_PREMORTEM_FINALIZATION
- finalize_queue->UpdatePromotedGenerations (condemned_gen_number, TRUE);
+ if (!special_sweep_p)
+ {
+ finalize_queue->UpdatePromotedGenerations (condemned_gen_number, TRUE);
+ }
#endif // FEATURE_PREMORTEM_FINALIZATION
#ifdef MULTIPLE_HEAPS
@@ -26910,8 +27037,11 @@ void gc_heap::plan_phase (int condemned_gen_number)
if (gc_t_join.joined())
#endif //MULTIPLE_HEAPS
{
- GCScan::GcPromotionsGranted(condemned_gen_number,
- max_generation, &sc);
+ if (!special_sweep_p)
+ {
+ GCScan::GcPromotionsGranted(condemned_gen_number,
+ max_generation, &sc);
+ }
#ifndef USE_REGIONS
if (condemned_gen_number >= (max_generation -1))
@@ -26934,7 +27064,10 @@ void gc_heap::plan_phase (int condemned_gen_number)
#endif //MULTIPLE_HEAPS
}
- clear_gen1_cards();
+ if (!special_sweep_p)
+ {
+ clear_gen1_cards();
+ }
}
//verify_partial();
@@ -27310,12 +27443,16 @@ void gc_heap::thread_rest_of_generation (generation* gen, heap_segment* start_re
heap_segment* gc_heap::get_new_region (int gen_number)
{
heap_segment* new_region = get_free_region (gen_number);
- generation* gen = generation_of (gen_number);
- heap_segment_next (generation_tail_region (gen)) = new_region;
- generation_tail_region (gen) = new_region;
if (new_region)
+ {
+ generation* gen = generation_of (gen_number);
+ heap_segment_next (generation_tail_region (gen)) = new_region;
+ generation_tail_region (gen) = new_region;
+
+
verify_regions (gen_number);
+ }
return new_region;
}
@@ -27374,6 +27511,22 @@ void gc_heap::update_start_tail_regions (generation* gen,
verify_regions();
}
+
+#ifdef STRESS_REGIONS
+void gc_heap::pin_by_gc (uint8_t* object)
+{
+ heap_segment* region = region_of (object);
+ HndAssignHandleGC(pinning_handles_for_alloc[ph_index_per_heap], object);
+ dprintf (REGIONS_LOG, ("h%d pinning object at %Ix on eph seg %Ix (ph#%d)",
+ heap_number, object, heap_segment_mem (region), ph_index_per_heap));
+
+ ph_index_per_heap++;
+ if (ph_index_per_heap == PINNING_HANDLE_INITIAL_LENGTH)
+ {
+ ph_index_per_heap = 0;
+ }
+}
+#endif //STRESS_REGIONS
#endif //USE_REGIONS
void gc_heap::make_free_lists (int condemned_gen_number)
@@ -27392,12 +27545,13 @@ void gc_heap::make_free_lists (int condemned_gen_number)
uint8_t* end_address = heap_segment_allocated (current_heap_segment);
size_t end_brick = brick_of (end_address-1);
make_free_args args;
- args.free_list_gen_number = min (max_generation, 1 + condemned_gen_number);
+ args.free_list_gen_number = (special_sweep_p ? condemned_gen_number : (min (max_generation, 1 + condemned_gen_number)));
#ifdef USE_REGIONS
int current_gen_num = condemned_gen_number;
dprintf (REGIONS_LOG, ("starting at gen%d %Ix -> %Ix",
condemned_gen_number, start_address, end_address));
#else
+ assert (!special_sweep_p);
args.current_gen_limit = (((condemned_gen_number == max_generation)) ?
MAX_PTR :
(generation_limit (args.free_list_gen_number)));
@@ -27448,7 +27602,7 @@ void gc_heap::make_free_lists (int condemned_gen_number)
{
current_gen_num--;
- int plan_gen_num = get_plan_gen_num (current_gen_num);
+ int plan_gen_num = (special_sweep_p ? current_gen_num : get_plan_gen_num (current_gen_num));
generation* current_gen = generation_of (current_gen_num);
current_heap_segment = generation_start_segment (current_gen);
@@ -27531,16 +27685,20 @@ void gc_heap::make_free_lists (int condemned_gen_number)
while (current_gen_num >= 0)
{
- int new_gen_num = get_plan_gen_num (current_gen_num);
+ int new_gen_num = (special_sweep_p ? current_gen_num : get_plan_gen_num (current_gen_num));
generation* new_gen = generation_of (new_gen_num);
dprintf (REGIONS_LOG, ("gen%d->%d", current_gen_num, new_gen_num));
// If the new gen is outside the condemned gens, it always already exists,
// unless we are condemning max_gen.
- bool new_gen_exists_p = (new_gen_num > condemned_gen_number);
- if (condemned_gen_number == max_generation)
+ bool new_gen_exists_p = false;
+ if (!special_sweep_p)
{
- new_gen_exists_p = (current_gen_num == (max_generation - 1));
+ new_gen_exists_p = (new_gen_num > condemned_gen_number);
+ if (condemned_gen_number == max_generation)
+ {
+ new_gen_exists_p = (current_gen_num == (max_generation - 1));
+ }
}
dprintf (REGIONS_LOG, ("new_gen%d %s", new_gen_num, (new_gen_exists_p ? "exists" : "doesn't exist")));
@@ -27579,7 +27737,10 @@ void gc_heap::make_free_lists (int condemned_gen_number)
else
{
heap_segment* new_region = get_free_region (new_gen_num);
- thread_start_region (new_gen, new_region);
+ // If we need to get a new region it means we must've returned some regions to free so
+ // we are guaranteed to get a new region.
+ assert (new_region);
+ thread_start_region (new_gen, new_region);
}
uint8_t* new_gen_start = heap_segment_mem (heap_segment_rw (generation_start_segment (new_gen)));
@@ -27589,12 +27750,22 @@ void gc_heap::make_free_lists (int condemned_gen_number)
current_gen_num--;
}
- // We need to get a new region for the new gen0.
generation* gen_gen0 = generation_of (0);
- heap_segment* gen0_region = get_free_region (0);
- thread_start_region (gen_gen0, gen0_region);
- ephemeral_heap_segment = gen0_region;
- reset_allocation_pointers (gen_gen0, heap_segment_mem (gen0_region));
+
+ if (special_sweep_p)
+ {
+ ephemeral_heap_segment = generation_start_segment (gen_gen0);
+ }
+ else
+ {
+ // We need to get a new region for the new gen0.
+ assert (num_free_regions > 0);
+ heap_segment* gen0_region = get_free_region (0);
+ thread_start_region (gen_gen0, gen0_region);
+ ephemeral_heap_segment = gen0_region;
+ reset_allocation_pointers (gen_gen0, heap_segment_mem (gen0_region));
+ }
+
alloc_allocated = heap_segment_allocated (ephemeral_heap_segment);
// Since we didn't compact, we should recalculate the end_gen0_region_space.
end_gen0_region_space = get_gen0_end_space();
@@ -36100,7 +36271,7 @@ size_t gc_heap::generation_fragmentation (generation* gen,
frag += (heap_segment_saved_allocated (seg) -
heap_segment_plan_allocated (seg));
- dprintf (REGIONS_LOG, ("h%d g%d adding seg plan frag: %Ix-%Ix=%Id -> %Id",
+ dprintf (3, ("h%d g%d adding seg plan frag: %Ix-%Ix=%Id -> %Id",
heap_number, gen_num,
heap_segment_saved_allocated (seg),
heap_segment_plan_allocated (seg),
@@ -36205,11 +36376,18 @@ size_t gc_heap::generation_sizes (generation* gen, bool use_saved_p)
}
#ifdef USE_REGIONS
-bool gc_heap::decide_on_expansion()
+bool gc_heap::decide_on_compaction_space()
{
- bool should_expand = false;
size_t gen0size = approximate_new_allocation();
- size_t free_region_size = num_free_regions * REGION_SIZE;
+
+ // If we don't compact, would we have enough space?
+ if (sufficient_space_regions ((num_regions_freed_in_sweep * ((size_t)1 << min_segment_size_shr)),
+ gen0size))
+ {
+ return false;
+ }
+
+ // If we do compact, would we have enough space?
get_gen0_end_plan_space();
if (!gen0_large_chunk_found)
@@ -36217,16 +36395,16 @@ bool gc_heap::decide_on_expansion()
gen0_large_chunk_found = (num_free_regions > 0);
}
- dprintf (REGIONS_LOG, ("gen0_pinned_free_space: %Id, end_gen0_region_space: %Id, free regions: %Id, gen0size: %Id",
- gen0_pinned_free_space, end_gen0_region_space, free_region_size, gen0size));
- // Do we want to add the extra here, see ephemeral_gen_fit_p
- if (((gen0_pinned_free_space + end_gen0_region_space + free_region_size) < gen0size) ||
- !gen0_large_chunk_found)
+ dprintf (REGIONS_LOG, ("gen0_pinned_free_space: %Id, end_gen0_region_space: %Id, gen0size: %Id",
+ gen0_pinned_free_space, end_gen0_region_space, gen0size));
+
+ if (sufficient_space_regions ((gen0_pinned_free_space + end_gen0_region_space), gen0size) &&
+ gen0_large_chunk_found)
{
- should_expand = TRUE;
+ sufficient_gen0_space_p = TRUE;
}
- return should_expand;
+ return true;
}
#endif //USE_REGIONS
@@ -36310,6 +36488,14 @@ BOOL gc_heap::decide_on_compacting (int condemned_gen_number,
fragmentation, (int)(fragmentation_burden * 100.0),
gen_sizes));
+#ifdef USE_REGIONS
+ if (special_sweep_p)
+ {
+ last_gc_before_oom = FALSE;
+ return FALSE;
+ }
+#endif //USE_REGIONS
+
#if defined(STRESS_HEAP) && !defined(FEATURE_REDHAWK)
// for GC stress runs we need compaction
if (GCStress<cfg_any>::IsEnabled() && !settings.concurrent)
@@ -36352,7 +36538,12 @@ BOOL gc_heap::decide_on_compacting (int condemned_gen_number,
should_compact = TRUE;
}
-#ifndef USE_REGIONS
+#ifdef USE_REGIONS
+ if (!should_compact)
+ {
+ should_compact = !!decide_on_compaction_space();
+ }
+#else //USE_REGIONS
if (!should_compact)
{
if (dt_low_ephemeral_space_p (tuning_deciding_compaction))
@@ -36463,15 +36654,6 @@ BOOL gc_heap::decide_on_compacting (int condemned_gen_number,
}
}
-#ifdef USE_REGIONS
- if (!should_compact)
- {
- should_expand = decide_on_expansion();
- if (should_expand)
- should_compact = TRUE;
- }
-#endif //USE_REGIONS
-
if (settings.pause_mode == pause_no_gc)
{
should_compact = TRUE;
@@ -36497,48 +36679,63 @@ size_t gc_heap::approximate_new_allocation()
return max (2*dd_min_size (dd0), ((dd_desired_allocation (dd0)*2)/3));
}
+bool gc_heap::check_against_hard_limit (size_t space_required)
+{
+ bool can_fit = TRUE;
+
+ // If hard limit is specified, and if we attributed all that's left in commit to the ephemeral seg
+ // so we treat that as segment end, do we have enough space.
+ if (heap_hard_limit)
+ {
+ size_t left_in_commit = heap_hard_limit - current_total_committed;
+ int num_heaps = 1;
+#ifdef MULTIPLE_HEAPS
+ num_heaps = n_heaps;
+#endif //MULTIPLE_HEAPS
+ left_in_commit /= num_heaps;
+ if (left_in_commit < space_required)
+ {
+ can_fit = FALSE;
+ }
+
+ dprintf (2, ("h%d end seg %Id, but only %Id left in HARD LIMIT commit, required: %Id %s on eph",
+ heap_number, space_required,
+ left_in_commit, space_required,
+ (can_fit ? "ok" : "short")));
+ }
+
+ return can_fit;
+}
+
#ifdef USE_REGIONS
bool gc_heap::sufficient_space_regions (size_t end_space, size_t end_space_required)
{
- size_t total_alloc_space = end_space + (num_free_regions * REGION_SIZE);
- dprintf (REGIONS_LOG, ("h%d required %Id, has %Id",
- heap_number, end_space_required, total_alloc_space));
- return (total_alloc_space > end_space_required);
+ // REGIONS PERF TODO: we can repurpose large regions here too, if needed.
+ size_t free_regions_space = (num_free_regions * ((size_t)1 << min_segment_size_shr)) +
+ global_region_allocator.get_free();
+ size_t total_alloc_space = end_space + free_regions_space;
+ dprintf (REGIONS_LOG, ("h%d required %Id, end %Id + free %Id=%Id",
+ heap_number, end_space_required, end_space, free_regions_space, total_alloc_space));
+ if (total_alloc_space > end_space_required)
+ {
+ return check_against_hard_limit (end_space_required);
+ }
+ else
+ return false;
}
-#endif //USE_REGIONS
-
-BOOL gc_heap::sufficient_space_end_seg (uint8_t* start, uint8_t* seg_end, size_t end_space_required, gc_tuning_point tp)
+#else //USE_REGIONS
+BOOL gc_heap::sufficient_space_end_seg (uint8_t* start, uint8_t* seg_end, size_t end_space_required)
{
BOOL can_fit = FALSE;
size_t end_seg_space = (size_t)(seg_end - start);
if (end_seg_space > end_space_required)
{
- // If hard limit is specified, and if we attributed all that's left in commit to the ephemeral seg
- // so we treat that as segment end, do we have enough space.
- if (heap_hard_limit)
- {
- size_t left_in_commit = heap_hard_limit - current_total_committed;
- int num_heaps = 1;
-#ifdef MULTIPLE_HEAPS
- num_heaps = n_heaps;
-#endif //MULTIPLE_HEAPS
- left_in_commit /= num_heaps;
- if (left_in_commit > end_space_required)
- {
- can_fit = TRUE;
- }
-
- dprintf (2, ("h%d end seg %Id, but only %Id left in HARD LIMIT commit, required: %Id %s on eph (%d)",
- heap_number, end_seg_space,
- left_in_commit, end_space_required,
- (can_fit ? "ok" : "short"), (int)tp));
- }
- else
- can_fit = TRUE;
+ return check_against_hard_limit (end_space_required);
}
-
- return can_fit;
+ else
+ return false;
}
+#endif //USE_REGIONS
// After we did a GC we expect to have at least this
// much space at the end of the segment to satisfy
@@ -36697,7 +36894,7 @@ BOOL gc_heap::ephemeral_gen_fit_p (gc_tuning_point tp)
size_t gen0_end_space = get_gen0_end_space();
BOOL can_fit = sufficient_space_regions (gen0_end_space, end_space);
#else //USE_REGIONS
- BOOL can_fit = sufficient_space_end_seg (start, heap_segment_reserved (ephemeral_heap_segment), end_space, tp);
+ BOOL can_fit = sufficient_space_end_seg (start, heap_segment_reserved (ephemeral_heap_segment), end_space);
#endif //USE_REGIONS
return can_fit;
}
@@ -36805,6 +37002,9 @@ CObjectHeader* gc_heap::allocate_uoh_object (size_t jsize, uint32_t flags, int g
void reset_memory (uint8_t* o, size_t sizeo)
{
+ if (gc_heap::use_large_pages_p)
+ return;
+
if (sizeo > 128 * 1024)
{
// We cannot reset the memory for the useful part of a free object.
@@ -38617,11 +38817,12 @@ void gc_heap::descr_generations (const char* msg)
generation* gen = generation_of (curr_gen_number);
heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
#ifdef USE_REGIONS
- dprintf (1, ("g%d: start seg: %Ix alloc seg: %Ix, plan start seg: %Ix",
+ dprintf (1, ("g%d: start seg: %Ix alloc seg: %Ix, plan start seg: %Ix, tail region: %Ix",
curr_gen_number,
heap_segment_mem (seg),
heap_segment_mem (generation_allocation_segment (gen)),
- (generation_plan_start_segment (gen) ? heap_segment_mem (generation_plan_start_segment (gen)) : 0)));
+ (generation_plan_start_segment (gen) ? heap_segment_mem (generation_plan_start_segment (gen)) : 0),
+ heap_segment_mem (generation_tail_region (gen))));
while (seg)
{
dprintf (GTC_LOG, ("g%d: (%d:p %d) [%Ix %Ix(sa: %Ix, pa: %Ix)[-%Ix[ (%Id) (%Id)",
@@ -42550,7 +42751,7 @@ size_t gc_heap::get_gen0_min_size()
#ifdef STRESS_REGIONS
// This is just so we can test allocation using more than one region on machines with very
// small caches.
- gen0size = REGION_SIZE * 3;
+ gen0size = ((size_t)1 << min_segment_size_shr) * 3;
#endif //STRESS_REGIONS
#endif //USE_REGIONS
diff --git a/src/coreclr/gc/gcpriv.h b/src/coreclr/gc/gcpriv.h
index 937f2654c70..0e9f0a18895 100644
--- a/src/coreclr/gc/gcpriv.h
+++ b/src/coreclr/gc/gcpriv.h
@@ -341,10 +341,13 @@ class recursive_gc_sync;
#endif //BACKGROUND_GC
#ifdef MULTIPLE_HEAPS
+// This feature hasn't been enabled for regions yet.
+#ifndef USE_REGIONS
// card marking stealing only makes sense in server GC
// but it works and is easier to debug for workstation GC
// so turn it on for server GC, turn on for workstation GC if necessary
#define FEATURE_CARD_MARKING_STEALING
+#endif //!USE_REGIONS
#endif //MULTIPLE_HEAPS
#ifdef FEATURE_CARD_MARKING_STEALING
@@ -1306,7 +1309,11 @@ public:
PER_HEAP
size_t get_gen0_end_space();
PER_HEAP
- bool decide_on_expansion();
+ bool decide_on_compaction_space();
+ PER_HEAP
+ bool try_get_new_free_region();
+ PER_HEAP
+ bool init_table_for_region (int gen_number, heap_segment* region);
PER_HEAP
heap_segment* find_first_valid_region (heap_segment* region, int gen_num=-1);
PER_HEAP
@@ -1325,6 +1332,10 @@ public:
heap_segment* region_to_delete,
heap_segment* prev_region,
heap_segment* next_region);
+#ifdef STRESS_REGIONS
+ PER_HEAP
+ void pin_by_gc (uint8_t* object);
+#endif //STRESS_REGIONS
#endif //USE_REGIONS
static
@@ -1365,6 +1376,9 @@ public:
bool should_retry_other_heap (int gen_number, size_t size);
PER_HEAP
+ bool check_against_hard_limit (size_t space_required);
+
+ PER_HEAP
CObjectHeader* allocate (size_t jsize,
alloc_context* acontext,
uint32_t flags);
@@ -3093,10 +3107,12 @@ protected:
BOOL decide_on_compacting (int condemned_gen_number,
size_t fragmentation,
BOOL& should_expand);
+#ifndef USE_REGIONS
PER_HEAP
BOOL sufficient_space_end_seg (uint8_t* start, uint8_t* seg_end,
- size_t end_space_required,
- gc_tuning_point tp);
+ size_t end_space_required);
+#endif //!USE_REGIONS
+
PER_HEAP
BOOL ephemeral_gen_fit_p (gc_tuning_point tp);
PER_HEAP
@@ -3368,7 +3384,7 @@ public:
PER_HEAP
int pinning_seg_interval;
PER_HEAP
- int num_gen0_segs;
+ size_t num_gen0_regions;
#endif //STRESS_REGIONS
PER_HEAP
@@ -3383,6 +3399,11 @@ public:
PER_HEAP
int num_free_regions_removed;
+ // This is the number of regions we would free up if we sweep.
+ // It's used in the decision for compaction so we calculate it in plan.
+ PER_HEAP
+ int num_regions_freed_in_sweep;
+
PER_HEAP
heap_segment* free_large_regions;
@@ -3515,6 +3536,9 @@ public:
PER_HEAP_ISOLATED
VOLATILE(bool) full_gc_approach_event_set;
+ PER_HEAP_ISOLATED
+ bool special_sweep_p;
+
#ifdef BACKGROUND_GC
PER_HEAP_ISOLATED
BOOL fgn_last_gc_was_concurrent;
@@ -4429,7 +4453,7 @@ protected:
size_t num_provisional_triggered;
PER_HEAP
- size_t allocated_since_last_gc;
+ size_t allocated_since_last_gc[2];
#ifdef BACKGROUND_GC
PER_HEAP_ISOLATED
@@ -5140,6 +5164,7 @@ struct loh_padding_obj
#define heap_segment_flags_readonly 1
#define heap_segment_flags_inrange 2
#define heap_segment_flags_loh 8
+
#ifdef BACKGROUND_GC
#define heap_segment_flags_swept 16
#define heap_segment_flags_decommitted 32
@@ -5147,15 +5172,19 @@ struct loh_padding_obj
// for segments whose mark array is only partially committed.
#define heap_segment_flags_ma_pcommitted 128
#define heap_segment_flags_uoh_delete 256
+#endif //BACKGROUND_GC
-#ifdef USE_REGIONS
+#define heap_segment_flags_poh 512
+
+#if defined(BACKGROUND_GC) && defined(USE_REGIONS)
// This means this seg needs to be processed by
// BGC overflow when we process non concurrently.
-#define heap_segment_flag_overflow 1024
-#endif //USE_REGIONS
-#endif //BACKGROUND_GC
+#define heap_segment_flags_overflow 1024
+#endif //BACKGROUND_GC && USE_REGIONS
-#define heap_segment_flags_poh 512
+#ifdef USE_REGIONS
+#define heap_segment_flags_demoted 2048
+#endif //USE_REGIONS
//need to be careful to keep enough pad items to fit a relocation node
//padded to QuadWord before the plug_skew
@@ -5200,8 +5229,6 @@ public:
// When setting it we update the demotion decision accordingly.
int gen_num;
int plan_gen_num;
- // This should be changed to a flag.
- bool demoted_p;
// Fields that we need to provide in response to a
// random address that might land anywhere on the region.
@@ -5280,6 +5307,13 @@ public:
// We do take the opportunity to coalesce free blocks but we do not coalesce busy blocks.
// When we decommit a region, we simply mark its block free. Free blocks are coalesced
// opportunistically when we need to walk them.
+//
+// TODO: to accommodate the large page case, we will need the region allocator to have the
+// concept of committed, ie, if a page is 1GB obviously it can accommodate a lot of regions.
+// And we'll need to indicate that they are committed already.
+//
+// TODO: to accommodate 32-bit processes, we reserve in segment sizes and divide each seg
+// into regions.
class region_allocator
{
private:
@@ -5290,6 +5324,8 @@ private:
uint8_t* global_region_end;
uint8_t* global_region_used;
+ uint32_t total_free_units;
+
size_t region_alignment;
size_t large_region_alignment;
@@ -5300,7 +5336,7 @@ private:
uint32_t* region_map_index_of (uint8_t* address);
uint8_t* allocate (uint32_t num_units);
- uint8_t* allocate_end_uh (uint32_t num_units);
+ uint8_t* allocate_end (uint32_t num_units);
void make_busy_block (uint32_t* index_start, uint32_t num_units);
void make_free_block (uint32_t* index_start, uint32_t num_units);
@@ -5341,6 +5377,13 @@ public:
bool allocate_basic_region (uint8_t** start, uint8_t** end);
bool allocate_large_region (uint8_t** start, uint8_t** end);
void delete_region (uint8_t* start);
+ uint32_t get_va_memory_load()
+ {
+ return (uint32_t)((global_region_used - global_region_start) * 100.0
+ / (global_region_end - global_region_start));
+ }
+ // Returns the amount of free space + end of unused region space
+ size_t get_free() { return (total_free_units * region_alignment) ; }
size_t get_region_alignment () { return region_alignment; }
size_t get_large_region_alignment () { return large_region_alignment; }
};
@@ -5466,7 +5509,7 @@ inline gc_oh_num heap_segment_oh (heap_segment * inst)
inline
bool heap_segment_overflow_p (heap_segment* inst)
{
- return ((inst->flags & heap_segment_flag_overflow) != 0);
+ return ((inst->flags & heap_segment_flags_overflow) != 0);
}
#endif //USE_REGIONS
@@ -5535,9 +5578,9 @@ int& heap_segment_plan_gen_num (heap_segment* inst)
return inst->plan_gen_num;
}
inline
-bool& heap_segment_demoted_p (heap_segment* inst)
+bool heap_segment_demoted_p (heap_segment* inst)
{
- return inst->demoted_p;
+ return ((inst->flags & heap_segment_flags_demoted) != 0);
}
#endif //USE_REGIONS