mirror of
https://github.com/limine-bootloader/limine
synced 2024-12-25 07:16:48 +03:00
stivale: Make bootloader reclaImable entries aligned by spec
This commit is contained in:
parent
ed939eb6d9
commit
3d76435d4c
16
STIVALE.md
16
STIVALE.md
@ -110,7 +110,7 @@ All other general purpose registers are set to 0.
|
||||
## Bootloader-reserved memory
|
||||
|
||||
In order for stivale to function, it needs to reserve memory areas for either internal
|
||||
usage (such as page tables, GDT, SMP), or for kernel interfacing (such as returned
|
||||
usage (such as page tables, GDT), or for kernel interfacing (such as returned
|
||||
structures).
|
||||
|
||||
stivale ensures that none of these areas are found in any of the sections
|
||||
@ -124,8 +124,8 @@ before switching to its own address space, as unmarked memory areas in use by
|
||||
the bootloader may become unavailable.
|
||||
|
||||
Once the OS is done needing the bootloader, memory map areas marked as "bootloader
|
||||
reclaimable" may be used as usable memory. These areas are not guaranteed to be
|
||||
aligned, but they are guaranteed to not overlap other sections of the memory map.
|
||||
reclaimable" may be used as usable memory. These areas are guaranteed to be
|
||||
4096-byte aligned, and they are guaranteed to not overlap other sections of the memory map.
|
||||
|
||||
## stivale header (.stivalehdr)
|
||||
|
||||
@ -220,14 +220,14 @@ All other values are undefined.
|
||||
The kernel and modules loaded **are not** marked as usable memory. They are marked
|
||||
as Kernel/Modules (type 10).
|
||||
|
||||
Usable RAM chunks are guaranteed to be 4096 byte aligned for both base and length.
|
||||
|
||||
The entries are guaranteed to be sorted by base address, lowest to highest.
|
||||
|
||||
Usable RAM chunks are guaranteed not to overlap with any other entry.
|
||||
Usable and bootloader reclaimable entries are guaranteed to be 4096 byte aligned for both base and length.
|
||||
|
||||
To the contrary, all non-usable RAM chunks are not guaranteed any alignment, nor
|
||||
is it guaranteed that they do not overlap each other (except usable RAM).
|
||||
Usable and bootloader reclaimable entries are guaranteed not to overlap with any other entry.
|
||||
|
||||
To the contrary, all non-usable entries (including kernel/modules) are not guaranteed any alignment, nor
|
||||
is it guaranteed that they do not overlap other entries (except usable and bootloader reclaimable entries).
|
||||
|
||||
## Modules
|
||||
|
||||
|
14
STIVALE2.md
14
STIVALE2.md
@ -124,8 +124,8 @@ before switching to its own address space, as unmarked memory areas in use by
|
||||
the bootloader may become unavailable.
|
||||
|
||||
Once the OS is done needing the bootloader, memory map areas marked as "bootloader
|
||||
reclaimable" may be used as usable memory. These areas are not guaranteed to be
|
||||
aligned, but they are guaranteed to not overlap other sections of the memory map.
|
||||
reclaimable" may be used as usable memory. These areas are guaranteed to be
|
||||
4096-byte aligned, and they are guaranteed to not overlap other sections of the memory map.
|
||||
|
||||
## stivale2 header (.stivale2hdr)
|
||||
|
||||
@ -305,14 +305,14 @@ All other values are undefined.
|
||||
The kernel and modules loaded **are not** marked as usable memory. They are marked
|
||||
as Kernel/Modules (type 0x1001).
|
||||
|
||||
Usable RAM chunks are guaranteed to be 4096 byte aligned for both base and length.
|
||||
|
||||
The entries are guaranteed to be sorted by base address, lowest to highest.
|
||||
|
||||
Usable RAM chunks are guaranteed not to overlap with any other entry.
|
||||
Usable and bootloader reclaimable entries are guaranteed to be 4096 byte aligned for both base and length.
|
||||
|
||||
To the contrary, all non-usable RAM chunks are not guaranteed any alignment, nor
|
||||
is it guaranteed that they do not overlap each other (except usable RAM).
|
||||
Usable and bootloader reclaimable entries are guaranteed not to overlap with any other entry.
|
||||
|
||||
To the contrary, all non-usable entries (including kernel/modules) are not guaranteed any alignment, nor
|
||||
is it guaranteed that they do not overlap other entries (except usable and bootloader reclaimable entries).
|
||||
|
||||
#### Framebuffer structure tag
|
||||
|
||||
|
BIN
limine-pxe.bin
BIN
limine-pxe.bin
Binary file not shown.
BIN
limine.bin
BIN
limine.bin
Binary file not shown.
BIN
stage2.map
BIN
stage2.map
Binary file not shown.
@ -11,8 +11,8 @@
|
||||
#define MEMMAP_BASE ((size_t)0x100000)
|
||||
#define MEMMAP_MAX_ENTRIES 256
|
||||
|
||||
static struct e820_entry_t memmap[MEMMAP_MAX_ENTRIES];
|
||||
static size_t memmap_entries = 0;
|
||||
struct e820_entry_t memmap[MEMMAP_MAX_ENTRIES];
|
||||
size_t memmap_entries = 0;
|
||||
|
||||
static const char *memmap_type(uint32_t type) {
|
||||
switch (type) {
|
||||
@ -152,13 +152,34 @@ static void sanitise_entries(bool align_entries) {
|
||||
i--;
|
||||
}
|
||||
}
|
||||
|
||||
// Align bootloader-reclaimable entries
|
||||
if (align_entries) {
|
||||
for (size_t i = 0; i < memmap_entries; i++) {
|
||||
if (memmap[i].type != MEMMAP_BOOTLOADER_RECLAIMABLE)
|
||||
continue;
|
||||
|
||||
if (!align_entry(&memmap[i].base, &memmap[i].length)) {
|
||||
// Eradicate from memmap
|
||||
for (size_t j = i; j < memmap_entries - 1; j++) {
|
||||
memmap[j] = memmap[j+1];
|
||||
}
|
||||
memmap_entries--;
|
||||
i--;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static bool allocations_disallowed = true;
|
||||
|
||||
struct e820_entry_t *get_memmap(size_t *entries) {
|
||||
sanitise_entries(true);
|
||||
|
||||
*entries = memmap_entries;
|
||||
|
||||
allocations_disallowed = true;
|
||||
|
||||
return memmap;
|
||||
}
|
||||
|
||||
@ -172,6 +193,8 @@ void init_memmap(void) {
|
||||
}
|
||||
|
||||
sanitise_entries(false);
|
||||
|
||||
allocations_disallowed = false;
|
||||
}
|
||||
|
||||
void *ext_mem_alloc(size_t count) {
|
||||
@ -188,6 +211,9 @@ void *ext_mem_alloc_type(size_t count, uint32_t type) {
|
||||
|
||||
// Allocate memory top down, hopefully without bumping into kernel or modules
|
||||
void *ext_mem_alloc_aligned_type(size_t count, size_t alignment, uint32_t type) {
|
||||
if (allocations_disallowed)
|
||||
panic("Extended memory allocations disallowed");
|
||||
|
||||
for (int i = memmap_entries - 1; i >= 0; i--) {
|
||||
if (memmap[i].type != 1)
|
||||
continue;
|
||||
|
@ -12,6 +12,9 @@
|
||||
#define MEMMAP_BOOTLOADER_RECLAIMABLE 0x1000
|
||||
#define MEMMAP_KERNEL_AND_MODULES 0x1001
|
||||
|
||||
extern struct e820_entry_t memmap[];
|
||||
extern size_t memmap_entries;
|
||||
|
||||
void init_memmap(void);
|
||||
struct e820_entry_t *get_memmap(size_t *entries);
|
||||
void print_memmap(struct e820_entry_t *mm, size_t size);
|
||||
|
@ -229,15 +229,16 @@ pagemap_t stivale_build_pagemap(bool level5pg) {
|
||||
map_page(pagemap, higher_half_base + i, i, 0x03);
|
||||
}
|
||||
|
||||
size_t memmap_entries;
|
||||
struct e820_entry_t *memmap = get_memmap(&memmap_entries);
|
||||
size_t _memmap_entries = memmap_entries;
|
||||
struct e820_entry_t *_memmap =
|
||||
conv_mem_alloc(_memmap_entries * sizeof(struct e820_entry_t));
|
||||
for (size_t i = 0; i < _memmap_entries; i++)
|
||||
_memmap[i] = memmap[i];
|
||||
|
||||
// Map any other region of memory from the memmap
|
||||
for (size_t i = 0; i < memmap_entries; i++) {
|
||||
memmap = get_memmap(&memmap_entries);
|
||||
|
||||
uint64_t base = memmap[i].base;
|
||||
uint64_t length = memmap[i].length;
|
||||
for (size_t i = 0; i < _memmap_entries; i++) {
|
||||
uint64_t base = _memmap[i].base;
|
||||
uint64_t length = _memmap[i].length;
|
||||
uint64_t top = base + length;
|
||||
|
||||
uint64_t aligned_base = ALIGN_DOWN(base, PAGE_SIZE);
|
||||
|
Loading…
Reference in New Issue
Block a user