use segment rok4g and wok4g in the fast string optimizations for correctness

This commit is contained in:
Stanislav Shwartsman 2015-09-28 18:37:35 +00:00
parent 256fd99d12
commit 3a563a6573
2 changed files with 134 additions and 73 deletions

View File

@ -38,20 +38,27 @@ Bit32u BX_CPU_C::FastRepINSW(bxInstruction_c *i, Bit32u dstOff, Bit16u port, Bit
signed int pointerDelta;
Bit8u *hostAddrDst;
unsigned count;
bx_address laddrDst;
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
bx_segment_reg_t *dstSegPtr = &BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES];
if (!(dstSegPtr->cache.valid & SegAccessWOK))
return 0;
if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled)
return 0;
if (dstSegPtr->cache.valid & SegAccessWOK4G) {
laddrDst = dstOff;
}
else {
if (!(dstSegPtr->cache.valid & SegAccessWOK))
return 0;
if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled)
return 0;
laddrDst = get_laddr32(BX_SEG_REG_ES, dstOff);
}
bx_address laddrDst = get_laddr32(BX_SEG_REG_ES, dstOff);
// check that the address is word aligned
if (laddrDst & 1) return 0;
hostAddrDst = v2h_write_byte(laddrDst, BX_CPU_THIS_PTR user_pl);
hostAddrDst = v2h_write_byte(laddrDst, USER_PL);
// Check that native host access was not vetoed for that page
if (!hostAddrDst) return 0;
@ -111,21 +118,27 @@ Bit32u BX_CPU_C::FastRepOUTSW(bxInstruction_c *i, unsigned srcSeg, Bit32u srcOff
signed int pointerDelta;
Bit8u *hostAddrSrc;
unsigned count;
bx_address laddrSrc;
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
bx_segment_reg_t *srcSegPtr = &BX_CPU_THIS_PTR sregs[srcSeg];
if (!(srcSegPtr->cache.valid & SegAccessROK))
return 0;
if ((srcOff | 0xfff) > srcSegPtr->cache.u.segment.limit_scaled)
return 0;
if (srcSegPtr->cache.valid & SegAccessROK4G) {
laddrSrc = srcOff;
}
else {
if (!(srcSegPtr->cache.valid & SegAccessROK))
return 0;
if ((srcOff | 0xfff) > srcSegPtr->cache.u.segment.limit_scaled)
return 0;
laddrSrc = get_laddr32(srcSeg, srcOff);
}
bx_address laddrSrc = get_laddr32(srcSeg, srcOff);
// check that the address is word aligned
if (laddrSrc & 1) return 0;
hostAddrSrc = v2h_read_byte(laddrSrc, BX_CPU_THIS_PTR user_pl);
hostAddrSrc = v2h_read_byte(laddrSrc, USER_PL);
// Check that native host access was not vetoed for that page
if (!hostAddrSrc) return 0;

View File

@ -39,25 +39,36 @@ Bit32u BX_CPU_C::FastRepMOVSB(bxInstruction_c *i, unsigned srcSeg, Bit32u srcOff
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
bx_segment_reg_t *srcSegPtr = &BX_CPU_THIS_PTR sregs[srcSeg];
if (!(srcSegPtr->cache.valid & SegAccessROK))
return 0;
if ((srcOff | 0xfff) > srcSegPtr->cache.u.segment.limit_scaled)
return 0;
if (srcSegPtr->cache.valid & SegAccessROK4G) {
laddrSrc = srcOff;
}
else {
if (!(srcSegPtr->cache.valid & SegAccessROK))
return 0;
if ((srcOff | 0xfff) > srcSegPtr->cache.u.segment.limit_scaled)
return 0;
bx_segment_reg_t *dstSegPtr = &BX_CPU_THIS_PTR sregs[dstSeg];
if (!(dstSegPtr->cache.valid & SegAccessWOK))
return 0;
if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled)
return 0;
laddrSrc = get_laddr32(srcSeg, srcOff);
}
laddrSrc = get_laddr32(srcSeg, srcOff);
hostAddrSrc = v2h_read_byte(laddrSrc, BX_CPU_THIS_PTR user_pl);
hostAddrSrc = v2h_read_byte(laddrSrc, USER_PL);
// Check that native host access was not vetoed for that page
if (! hostAddrSrc) return 0;
laddrDst = get_laddr32(dstSeg, dstOff);
bx_segment_reg_t *dstSegPtr = &BX_CPU_THIS_PTR sregs[dstSeg];
if (dstSegPtr->cache.valid & SegAccessWOK4G) {
laddrDst = dstOff;
}
else {
if (!(dstSegPtr->cache.valid & SegAccessWOK))
return 0;
if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled)
return 0;
hostAddrDst = v2h_write_byte(laddrDst, BX_CPU_THIS_PTR user_pl);
laddrDst = get_laddr32(dstSeg, dstOff);
}
hostAddrDst = v2h_write_byte(laddrDst, USER_PL);
// Check that native host access was not vetoed for that page
if (!hostAddrDst) return 0;
@ -109,25 +120,36 @@ Bit32u BX_CPU_C::FastRepMOVSW(bxInstruction_c *i, unsigned srcSeg, Bit32u srcOff
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
bx_segment_reg_t *srcSegPtr = &BX_CPU_THIS_PTR sregs[srcSeg];
if (!(srcSegPtr->cache.valid & SegAccessROK))
return 0;
if ((srcOff | 0xfff) > srcSegPtr->cache.u.segment.limit_scaled)
return 0;
if (srcSegPtr->cache.valid & SegAccessROK4G) {
laddrSrc = srcOff;
}
else {
if (!(srcSegPtr->cache.valid & SegAccessROK))
return 0;
if ((srcOff | 0xfff) > srcSegPtr->cache.u.segment.limit_scaled)
return 0;
bx_segment_reg_t *dstSegPtr = &BX_CPU_THIS_PTR sregs[dstSeg];
if (!(dstSegPtr->cache.valid & SegAccessWOK))
return 0;
if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled)
return 0;
laddrSrc = get_laddr32(srcSeg, srcOff);
}
laddrSrc = get_laddr32(srcSeg, srcOff);
hostAddrSrc = v2h_read_byte(laddrSrc, BX_CPU_THIS_PTR user_pl);
hostAddrSrc = v2h_read_byte(laddrSrc, USER_PL);
// Check that native host access was not vetoed for that page
if (! hostAddrSrc) return 0;
laddrDst = get_laddr32(dstSeg, dstOff);
bx_segment_reg_t *dstSegPtr = &BX_CPU_THIS_PTR sregs[dstSeg];
if (dstSegPtr->cache.valid & SegAccessWOK4G) {
laddrDst = dstOff;
}
else {
if (!(dstSegPtr->cache.valid & SegAccessWOK))
return 0;
if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled)
return 0;
hostAddrDst = v2h_write_byte(laddrDst, BX_CPU_THIS_PTR user_pl);
laddrDst = get_laddr32(dstSeg, dstOff);
}
hostAddrDst = v2h_write_byte(laddrDst, USER_PL);
// Check that native host access was not vetoed for that page
if (!hostAddrDst) return 0;
@ -182,25 +204,36 @@ Bit32u BX_CPU_C::FastRepMOVSD(bxInstruction_c *i, unsigned srcSeg, Bit32u srcOff
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
bx_segment_reg_t *srcSegPtr = &BX_CPU_THIS_PTR sregs[srcSeg];
if (!(srcSegPtr->cache.valid & SegAccessROK))
return 0;
if ((srcOff | 0xfff) > srcSegPtr->cache.u.segment.limit_scaled)
return 0;
if (srcSegPtr->cache.valid & SegAccessROK4G) {
laddrSrc = srcOff;
}
else {
if (!(srcSegPtr->cache.valid & SegAccessROK))
return 0;
if ((srcOff | 0xfff) > srcSegPtr->cache.u.segment.limit_scaled)
return 0;
bx_segment_reg_t *dstSegPtr = &BX_CPU_THIS_PTR sregs[dstSeg];
if (!(dstSegPtr->cache.valid & SegAccessWOK))
return 0;
if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled)
return 0;
laddrSrc = get_laddr32(srcSeg, srcOff);
}
laddrSrc = get_laddr32(srcSeg, srcOff);
hostAddrSrc = v2h_read_byte(laddrSrc, BX_CPU_THIS_PTR user_pl);
hostAddrSrc = v2h_read_byte(laddrSrc, USER_PL);
// Check that native host access was not vetoed for that page
if (! hostAddrSrc) return 0;
laddrDst = get_laddr32(dstSeg, dstOff);
bx_segment_reg_t *dstSegPtr = &BX_CPU_THIS_PTR sregs[dstSeg];
if (dstSegPtr->cache.valid & SegAccessWOK4G) {
laddrDst = dstOff;
}
else {
if (!(dstSegPtr->cache.valid & SegAccessWOK))
return 0;
if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled)
return 0;
hostAddrDst = v2h_write_byte(laddrDst, BX_CPU_THIS_PTR user_pl);
laddrDst = get_laddr32(dstSeg, dstOff);
}
hostAddrDst = v2h_write_byte(laddrDst, USER_PL);
// Check that native host access was not vetoed for that page
if (!hostAddrDst) return 0;
@ -255,14 +288,19 @@ Bit32u BX_CPU_C::FastRepSTOSB(bxInstruction_c *i, unsigned dstSeg, Bit32u dstOff
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
bx_segment_reg_t *dstSegPtr = &BX_CPU_THIS_PTR sregs[dstSeg];
if (!(dstSegPtr->cache.valid & SegAccessWOK))
return 0;
if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled)
return 0;
if (dstSegPtr->cache.valid & SegAccessWOK4G) {
laddrDst = dstOff;
}
else {
if (!(dstSegPtr->cache.valid & SegAccessWOK))
return 0;
if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled)
return 0;
laddrDst = get_laddr32(dstSeg, dstOff);
laddrDst = get_laddr32(dstSeg, dstOff);
}
hostAddrDst = v2h_write_byte(laddrDst, BX_CPU_THIS_PTR user_pl);
hostAddrDst = v2h_write_byte(laddrDst, USER_PL);
// Check that native host access was not vetoed for that page
if (!hostAddrDst) return 0;
@ -309,14 +347,19 @@ Bit32u BX_CPU_C::FastRepSTOSW(bxInstruction_c *i, unsigned dstSeg, Bit32u dstOff
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
bx_segment_reg_t *dstSegPtr = &BX_CPU_THIS_PTR sregs[dstSeg];
if (!(dstSegPtr->cache.valid & SegAccessWOK))
return 0;
if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled)
return 0;
if (dstSegPtr->cache.valid & SegAccessWOK4G) {
laddrDst = dstOff;
}
else {
if (!(dstSegPtr->cache.valid & SegAccessWOK))
return 0;
if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled)
return 0;
laddrDst = get_laddr32(dstSeg, dstOff);
laddrDst = get_laddr32(dstSeg, dstOff);
}
hostAddrDst = v2h_write_byte(laddrDst, BX_CPU_THIS_PTR user_pl);
hostAddrDst = v2h_write_byte(laddrDst, USER_PL);
// Check that native host access was not vetoed for that page
if (!hostAddrDst) return 0;
@ -365,14 +408,19 @@ Bit32u BX_CPU_C::FastRepSTOSD(bxInstruction_c *i, unsigned dstSeg, Bit32u dstOff
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
bx_segment_reg_t *dstSegPtr = &BX_CPU_THIS_PTR sregs[dstSeg];
if (!(dstSegPtr->cache.valid & SegAccessWOK))
return 0;
if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled)
return 0;
if (dstSegPtr->cache.valid & SegAccessWOK4G) {
laddrDst = dstOff;
}
else {
if (!(dstSegPtr->cache.valid & SegAccessWOK))
return 0;
if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled)
return 0;
laddrDst = get_laddr32(dstSeg, dstOff);
laddrDst = get_laddr32(dstSeg, dstOff);
}
hostAddrDst = v2h_write_byte(laddrDst, BX_CPU_THIS_PTR user_pl);
hostAddrDst = v2h_write_byte(laddrDst, USER_PL);
// Check that native host access was not vetoed for that page
if (!hostAddrDst) return 0;