Sort the overlap map and split entries
The overlap map must be sorted before it can be split. If it's not sorted prior, the entry length and base calculations are incorrect, which cascades into all other overlaps for the same usable memory region. After the overlap map has been sorted, it is looped over and entries are split off into the memory map. Add explanatory comments Formatting fixes
This commit is contained in:
parent
5c289f58b4
commit
bb1316ae00
|
@ -126,13 +126,13 @@ static size_t split_entry(struct memmap_entry *dst, struct memmap_entry *bad, st
|
||||||
struct memmap_entry *ptr;
|
struct memmap_entry *ptr;
|
||||||
size_t length;
|
size_t length;
|
||||||
|
|
||||||
if(!dst) return 0;
|
if (!dst) return 0;
|
||||||
|
|
||||||
ptr = dst;
|
ptr = dst;
|
||||||
|
|
||||||
length = bad->base - good->base;
|
length = bad->base - good->base;
|
||||||
length = length <= good->length ? length : 0;
|
length = length <= good->length ? length : 0;
|
||||||
if(length > 0) {
|
if (length > 0) {
|
||||||
*ptr++ = (struct memmap_entry) {
|
*ptr++ = (struct memmap_entry) {
|
||||||
good->base,
|
good->base,
|
||||||
length,
|
length,
|
||||||
|
@ -171,6 +171,8 @@ static void sanitise_entries(struct memmap_entry *m, size_t *_count, bool align_
|
||||||
uint64_t res_length = m[j].length;
|
uint64_t res_length = m[j].length;
|
||||||
uint64_t res_top = res_base + res_length;
|
uint64_t res_top = res_base + res_length;
|
||||||
|
|
||||||
|
// Construct an overlap map if bad memory completely overlaps usable
|
||||||
|
// memory. In this case, the entries are not touched at all yet.
|
||||||
if ( (res_base >= base && res_base < top)
|
if ( (res_base >= base && res_base < top)
|
||||||
&& (res_top >= base && res_top < top) ) {
|
&& (res_top >= base && res_top < top) ) {
|
||||||
overlap[num_overlaps++] = m + j;
|
overlap[num_overlaps++] = m + j;
|
||||||
|
@ -197,6 +199,31 @@ static void sanitise_entries(struct memmap_entry *m, size_t *_count, bool align_
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Sort the entries according to the bad type's base address, or don't
|
||||||
|
// bother if there's zero or one overlaps. This is just an adapted version
|
||||||
|
// of the sorting algorithm used for the map as a whole.
|
||||||
|
for (size_t p = 0; num_overlaps > 2 && p < num_overlaps - 2; p += 2) {
|
||||||
|
uint64_t min = overlap[p]->base;
|
||||||
|
size_t min_index = p;
|
||||||
|
for (size_t i = p; i < num_overlaps; i += 2) {
|
||||||
|
if (overlap[i]->base < min) {
|
||||||
|
min = overlap[i]->base;
|
||||||
|
min_index = i;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
struct memmap_entry *bad = overlap[min_index];
|
||||||
|
struct memmap_entry *good = overlap[min_index + 1];
|
||||||
|
overlap[min_index] = overlap[p];
|
||||||
|
overlap[min_index + 1] = overlap[p + 1];
|
||||||
|
overlap[p] = bad;
|
||||||
|
overlap[p + 1] = good;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Split the entries into the memory map
|
||||||
|
for (size_t i = 0; count < memmap_max_entries && i < num_overlaps; i += 2) {
|
||||||
|
count += split_entry(m + count, overlap[i], overlap[i + 1]);
|
||||||
|
}
|
||||||
|
|
||||||
// Remove 0 length usable entries and usable entries below 0x1000
|
// Remove 0 length usable entries and usable entries below 0x1000
|
||||||
for (size_t i = 0; i < count; i++) {
|
for (size_t i = 0; i < count; i++) {
|
||||||
if (m[i].type != MEMMAP_USABLE)
|
if (m[i].type != MEMMAP_USABLE)
|
||||||
|
|
Loading…
Reference in New Issue