exec.c: refactor function flatview_add_to_dispatch()
flatview_add_to_dispatch() registers page based on the condition of *section*, which may looks like this: |s|PPPPPPP|s| where s stands for subpage and P for page. The procedure of this function could be described as: - register first subpage - register page - register last subpage This means the procedure could be simplified into these three steps instead of a loop iteration. This patch refactors the function into three corresponding steps and adds some comment to clarify it. Signed-off-by: Wei Yang <richardw.yang@linux.intel.com> Message-Id: <20190311054252.6094-1-richardw.yang@linux.intel.com> [Paolo: move exit before adjustment of remain.offset_within_*, otherwise int128_get64 fails when a region is 2^64 bytes long] Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
e6c165f364
commit
494d199727
42
exec.c
42
exec.c
@ -1599,35 +1599,49 @@ static void register_multipage(FlatView *fv,
|
||||
phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
|
||||
}
|
||||
|
||||
/*
|
||||
* The range in *section* may look like this:
|
||||
*
|
||||
* |s|PPPPPPP|s|
|
||||
*
|
||||
* where s stands for subpage and P for page.
|
||||
*/
|
||||
void flatview_add_to_dispatch(FlatView *fv, MemoryRegionSection *section)
|
||||
{
|
||||
MemoryRegionSection now = *section, remain = *section;
|
||||
MemoryRegionSection remain = *section;
|
||||
Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
|
||||
|
||||
if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
|
||||
uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
|
||||
- now.offset_within_address_space;
|
||||
/* register first subpage */
|
||||
if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
|
||||
uint64_t left = TARGET_PAGE_ALIGN(remain.offset_within_address_space)
|
||||
- remain.offset_within_address_space;
|
||||
|
||||
MemoryRegionSection now = remain;
|
||||
now.size = int128_min(int128_make64(left), now.size);
|
||||
register_subpage(fv, &now);
|
||||
} else {
|
||||
now.size = int128_zero();
|
||||
if (int128_eq(remain.size, now.size)) {
|
||||
return;
|
||||
}
|
||||
while (int128_ne(remain.size, now.size)) {
|
||||
remain.size = int128_sub(remain.size, now.size);
|
||||
remain.offset_within_address_space += int128_get64(now.size);
|
||||
remain.offset_within_region += int128_get64(now.size);
|
||||
now = remain;
|
||||
if (int128_lt(remain.size, page_size)) {
|
||||
register_subpage(fv, &now);
|
||||
} else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
|
||||
now.size = page_size;
|
||||
register_subpage(fv, &now);
|
||||
} else {
|
||||
}
|
||||
|
||||
/* register whole pages */
|
||||
if (int128_ge(remain.size, page_size)) {
|
||||
MemoryRegionSection now = remain;
|
||||
now.size = int128_and(now.size, int128_neg(page_size));
|
||||
register_multipage(fv, &now);
|
||||
if (int128_eq(remain.size, now.size)) {
|
||||
return;
|
||||
}
|
||||
remain.size = int128_sub(remain.size, now.size);
|
||||
remain.offset_within_address_space += int128_get64(now.size);
|
||||
remain.offset_within_region += int128_get64(now.size);
|
||||
}
|
||||
|
||||
/* register last subpage */
|
||||
register_subpage(fv, &remain);
|
||||
}
|
||||
|
||||
void qemu_flush_coalesced_mmio_buffer(void)
|
||||
|
Loading…
Reference in New Issue
Block a user