tcg: Init TB's direct jumps before making it visible

Initialize TB's direct jump list data fields and reset the jumps before
tb_link_page() puts it into the physical hash table and the physical
page list. So TB is completely initialized before it becomes visible.

This is pure rearrangement of code to a more suitable place, though it
could be a preparation for relaxing the locking scheme in future.

Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com>
Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Richard Henderson <rth@twiddle.net>
This commit is contained in:
Sergey Fedorov 2016-03-22 19:00:12 +03:00 committed by Richard Henderson
parent e90d96b158
commit 901bc3deb4

View File

@ -1134,19 +1134,6 @@ static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
tb->page_addr[1] = -1; tb->page_addr[1] = -1;
} }
assert(((uintptr_t)tb & 3) == 0);
tb->jmp_list_first = (uintptr_t)tb | 2;
tb->jmp_list_next[0] = (uintptr_t)NULL;
tb->jmp_list_next[1] = (uintptr_t)NULL;
/* init original jump addresses */
if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
tb_reset_jump(tb, 0);
}
if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
tb_reset_jump(tb, 1);
}
#ifdef DEBUG_TB_CHECK #ifdef DEBUG_TB_CHECK
tb_page_check(); tb_page_check();
#endif #endif
@ -1255,12 +1242,31 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size, ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
CODE_GEN_ALIGN); CODE_GEN_ALIGN);
/* init jump list */
assert(((uintptr_t)tb & 3) == 0);
tb->jmp_list_first = (uintptr_t)tb | 2;
tb->jmp_list_next[0] = (uintptr_t)NULL;
tb->jmp_list_next[1] = (uintptr_t)NULL;
/* init original jump addresses wich has been set during tcg_gen_code() */
if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
tb_reset_jump(tb, 0);
}
if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
tb_reset_jump(tb, 1);
}
/* check next page if needed */ /* check next page if needed */
virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
phys_page2 = -1; phys_page2 = -1;
if ((pc & TARGET_PAGE_MASK) != virt_page2) { if ((pc & TARGET_PAGE_MASK) != virt_page2) {
phys_page2 = get_page_addr_code(env, virt_page2); phys_page2 = get_page_addr_code(env, virt_page2);
} }
/* As long as consistency of the TB stuff is provided by tb_lock in user
* mode and is implicit in single-threaded softmmu emulation, no explicit
* memory barrier is required before tb_link_page() makes the TB visible
* through the physical hash table and physical page list.
*/
tb_link_page(tb, phys_pc, phys_page2); tb_link_page(tb, phys_pc, phys_page2);
return tb; return tb;
} }