ppc patch queue for 2016-02-18
Currently accumulated patches for target-ppc, pseries machine type and related devices. * Some cleanups to management of SDR1 and the hashed page table * Implementations of a number of simple PAPR hypercalls * Significant improvements to the Macintosh CUDA device * Several bugfixes -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJWxUWzAAoJEGw4ysog2bOSst8QAOBAPa2iaAT/dtBA/NxVACNt o344TLo1Hzk2OW28OLBTLT4q8UE0E8Qs84feSC7nN76syR6dyUkvMrCy7/1mlZyl 5zClsH2RgckWY64GAtaX9RjzXwanr0JJXNEUbqc3SeKun9JDXb4evRGX2dbtxhRf 1PR+TXlzDbAhSpIIeQoJjp3S4ur+gIuc522K/c6BJ4R48jKl6qVhOMe13sRXWyXR T7HED48f+E0ySzP87CdQLwEiJCTY2BMcM4VCjhysVUgHajshkLKu544+VFwHxkgV UhLEb+SFeBvL/QzAPZ67hqopUiex1fcMdjnkTBVj6sQmg9k8tRHyrDDw8cMyR2R/ zWpr5TPiJdDNkaR8oQq1PAfFtDMYiPb4M0dU4WuXn9Jkrxe9NTtvapQXVdAOH4SI 3lecr+jEGupGw/MCcWLdBZGmhSRPWupx97JuyYAL9BvsEFFCfXaf2RcgArJkspBv 6iS2j6ge20fSA4NhqgJhsQgPMT3XqxJirUT7V7DQZMF6MGrOlMtmD3oHIEcpKJaZ 8khJ4Lo4vlvGZcKWLSyGJhpvMECdPDfs/fqZo+dKwn4xL8gNvBsqr7TQnM6iEaja oh2XBMpsoiHWz/mrsjxgnrkQStbTNfTB0RWFmXR7LtFjrkLP4a7NH8EB2tEAJVpB LEbVMnuVJQxnRzmYroZn =YxlE -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-2.6-20160218' into staging ppc patch queue for 2016-02-18 Currently accumulated patches for target-ppc, pseries machine type and related devices. * Some cleanups to management of SDR1 and the hashed page table * Implementations of a number of simple PAPR hypercalls * Significant improvements to the Macintosh CUDA device * Several bugfixes # gpg: Signature made Thu 18 Feb 2016 04:16:51 GMT using RSA key ID 20D9B392 # gpg: Good signature from "David Gibson <david@gibson.dropbear.id.au>" # gpg: aka "David Gibson (Red Hat) <dgibson@redhat.com>" # gpg: aka "David Gibson (ozlabs.org) <dgibson@ozlabs.org>" # gpg: WARNING: This key is not certified with sufficiently trusted signatures! # gpg: It is not certain that the signature belongs to the owner. # Primary key fingerprint: 75F4 6586 AE61 A66C C44E 87DC 6C38 CACA 20D9 B392 * remotes/dgibson/tags/ppc-for-2.6-20160218: (26 commits) hw/ppc/spapr: Halt CPU when powering off via RTAS call pseries: Include missing pseries-2.5 compat properties in pseries-2.4 cuda: remove CUDA_GET_SET_IIC/CUDA_COMBINED_FORMAT_IIC commands cuda: remove GET_6805_ADDR command cuda: port SET_TIME command to new framework cuda: port GET_TIME command to new framework cuda: port SET_POWER_MESSAGES command to new framework cuda: port FILE_SERVER_FLAG command to new framework cuda: port RESET_SYSTEM command to new framework cuda: port POWERDOWN command to new framework cuda: port SET_DEVICE_LIST command to new framework cuda: port SET_AUTO_RATE command to new framework cuda: port AUTOPOLL command to new framework cuda: move unknown commands reject out of switch cuda: add a framework to handle commands hw/ppc/spapr: Implement the h_set_xdabr hypercall hw/ppc/spapr: Implement h_set_dabr hw/ppc/spapr: Add h_set_sprg0 hypercall migration: ensure htab_save_first completes after timeout target-ppc: Remove hack for ppc_hash64_load_hpte*() with HV KVM ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
339b665c88
@ -89,7 +89,7 @@ int adb_request(ADBBusState *s, uint8_t *obuf, const uint8_t *buf, int len)
|
||||
}
|
||||
|
||||
/* XXX: move that to cuda ? */
|
||||
int adb_poll(ADBBusState *s, uint8_t *obuf)
|
||||
int adb_poll(ADBBusState *s, uint8_t *obuf, uint16_t poll_mask)
|
||||
{
|
||||
ADBDevice *d;
|
||||
int olen, i;
|
||||
@ -100,13 +100,15 @@ int adb_poll(ADBBusState *s, uint8_t *obuf)
|
||||
if (s->poll_index >= s->nb_devices)
|
||||
s->poll_index = 0;
|
||||
d = s->devices[s->poll_index];
|
||||
buf[0] = ADB_READREG | (d->devaddr << 4);
|
||||
olen = adb_request(s, obuf + 1, buf, 1);
|
||||
/* if there is data, we poll again the same device */
|
||||
if (olen > 0) {
|
||||
obuf[0] = buf[0];
|
||||
olen++;
|
||||
break;
|
||||
if ((1 << d->devaddr) & poll_mask) {
|
||||
buf[0] = ADB_READREG | (d->devaddr << 4);
|
||||
olen = adb_request(s, obuf + 1, buf, 1);
|
||||
/* if there is data, we poll again the same device */
|
||||
if (olen > 0) {
|
||||
obuf[0] = buf[0];
|
||||
olen++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
s->poll_index++;
|
||||
}
|
||||
|
@ -106,7 +106,6 @@
|
||||
#define CUDA_COMBINED_FORMAT_IIC 0x25
|
||||
|
||||
#define CUDA_TIMER_FREQ (4700000 / 6)
|
||||
#define CUDA_ADB_POLL_FREQ 50
|
||||
|
||||
/* CUDA returns time_t's offset from Jan 1, 1904, not 1970 */
|
||||
#define RTC_OFFSET 2082844800
|
||||
@ -524,7 +523,7 @@ static void cuda_adb_poll(void *opaque)
|
||||
uint8_t obuf[ADB_MAX_OUT_LEN + 2];
|
||||
int olen;
|
||||
|
||||
olen = adb_poll(&s->adb_bus, obuf + 2);
|
||||
olen = adb_poll(&s->adb_bus, obuf + 2, s->adb_poll_mask);
|
||||
if (olen > 0) {
|
||||
obuf[0] = ADB_PACKET;
|
||||
obuf[1] = 0x40; /* polled data */
|
||||
@ -532,87 +531,213 @@ static void cuda_adb_poll(void *opaque)
|
||||
}
|
||||
timer_mod(s->adb_poll_timer,
|
||||
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
|
||||
(get_ticks_per_sec() / CUDA_ADB_POLL_FREQ));
|
||||
(get_ticks_per_sec() / (1000 / s->autopoll_rate_ms)));
|
||||
}
|
||||
|
||||
/* description of commands */
|
||||
typedef struct CudaCommand {
|
||||
uint8_t command;
|
||||
const char *name;
|
||||
bool (*handler)(CUDAState *s,
|
||||
const uint8_t *in_args, int in_len,
|
||||
uint8_t *out_args, int *out_len);
|
||||
} CudaCommand;
|
||||
|
||||
static bool cuda_cmd_autopoll(CUDAState *s,
|
||||
const uint8_t *in_data, int in_len,
|
||||
uint8_t *out_data, int *out_len)
|
||||
{
|
||||
int autopoll;
|
||||
|
||||
if (in_len != 1) {
|
||||
return false;
|
||||
}
|
||||
|
||||
autopoll = (in_data[0] != 0);
|
||||
if (autopoll != s->autopoll) {
|
||||
s->autopoll = autopoll;
|
||||
if (autopoll) {
|
||||
timer_mod(s->adb_poll_timer,
|
||||
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
|
||||
(get_ticks_per_sec() / (1000 / s->autopoll_rate_ms)));
|
||||
} else {
|
||||
timer_del(s->adb_poll_timer);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool cuda_cmd_set_autorate(CUDAState *s,
|
||||
const uint8_t *in_data, int in_len,
|
||||
uint8_t *out_data, int *out_len)
|
||||
{
|
||||
if (in_len != 1) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* we don't want a period of 0 ms */
|
||||
/* FIXME: check what real hardware does */
|
||||
if (in_data[0] == 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
s->autopoll_rate_ms = in_data[0];
|
||||
if (s->autopoll) {
|
||||
timer_mod(s->adb_poll_timer,
|
||||
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
|
||||
(get_ticks_per_sec() / (1000 / s->autopoll_rate_ms)));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool cuda_cmd_set_device_list(CUDAState *s,
|
||||
const uint8_t *in_data, int in_len,
|
||||
uint8_t *out_data, int *out_len)
|
||||
{
|
||||
if (in_len != 2) {
|
||||
return false;
|
||||
}
|
||||
|
||||
s->adb_poll_mask = (((uint16_t)in_data[0]) << 8) | in_data[1];
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool cuda_cmd_powerdown(CUDAState *s,
|
||||
const uint8_t *in_data, int in_len,
|
||||
uint8_t *out_data, int *out_len)
|
||||
{
|
||||
if (in_len != 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
qemu_system_shutdown_request();
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool cuda_cmd_reset_system(CUDAState *s,
|
||||
const uint8_t *in_data, int in_len,
|
||||
uint8_t *out_data, int *out_len)
|
||||
{
|
||||
if (in_len != 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
qemu_system_reset_request();
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool cuda_cmd_set_file_server_flag(CUDAState *s,
|
||||
const uint8_t *in_data, int in_len,
|
||||
uint8_t *out_data, int *out_len)
|
||||
{
|
||||
if (in_len != 1) {
|
||||
return false;
|
||||
}
|
||||
|
||||
qemu_log_mask(LOG_UNIMP,
|
||||
"CUDA: unimplemented command FILE_SERVER_FLAG %d\n",
|
||||
in_data[0]);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool cuda_cmd_set_power_message(CUDAState *s,
|
||||
const uint8_t *in_data, int in_len,
|
||||
uint8_t *out_data, int *out_len)
|
||||
{
|
||||
if (in_len != 1) {
|
||||
return false;
|
||||
}
|
||||
|
||||
qemu_log_mask(LOG_UNIMP,
|
||||
"CUDA: unimplemented command SET_POWER_MESSAGE %d\n",
|
||||
in_data[0]);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool cuda_cmd_get_time(CUDAState *s,
|
||||
const uint8_t *in_data, int in_len,
|
||||
uint8_t *out_data, int *out_len)
|
||||
{
|
||||
uint32_t ti;
|
||||
|
||||
if (in_len != 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ti = s->tick_offset + (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)
|
||||
/ get_ticks_per_sec());
|
||||
out_data[0] = ti >> 24;
|
||||
out_data[1] = ti >> 16;
|
||||
out_data[2] = ti >> 8;
|
||||
out_data[3] = ti;
|
||||
*out_len = 4;
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool cuda_cmd_set_time(CUDAState *s,
|
||||
const uint8_t *in_data, int in_len,
|
||||
uint8_t *out_data, int *out_len)
|
||||
{
|
||||
uint32_t ti;
|
||||
|
||||
if (in_len != 4) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ti = (((uint32_t)in_data[1]) << 24) + (((uint32_t)in_data[2]) << 16)
|
||||
+ (((uint32_t)in_data[3]) << 8) + in_data[4];
|
||||
s->tick_offset = ti - (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)
|
||||
/ get_ticks_per_sec());
|
||||
return true;
|
||||
}
|
||||
|
||||
static const CudaCommand handlers[] = {
|
||||
{ CUDA_AUTOPOLL, "AUTOPOLL", cuda_cmd_autopoll },
|
||||
{ CUDA_SET_AUTO_RATE, "SET_AUTO_RATE", cuda_cmd_set_autorate },
|
||||
{ CUDA_SET_DEVICE_LIST, "SET_DEVICE_LIST", cuda_cmd_set_device_list },
|
||||
{ CUDA_POWERDOWN, "POWERDOWN", cuda_cmd_powerdown },
|
||||
{ CUDA_RESET_SYSTEM, "RESET_SYSTEM", cuda_cmd_reset_system },
|
||||
{ CUDA_FILE_SERVER_FLAG, "FILE_SERVER_FLAG",
|
||||
cuda_cmd_set_file_server_flag },
|
||||
{ CUDA_SET_POWER_MESSAGES, "SET_POWER_MESSAGES",
|
||||
cuda_cmd_set_power_message },
|
||||
{ CUDA_GET_TIME, "GET_TIME", cuda_cmd_get_time },
|
||||
{ CUDA_SET_TIME, "SET_TIME", cuda_cmd_set_time },
|
||||
};
|
||||
|
||||
static void cuda_receive_packet(CUDAState *s,
|
||||
const uint8_t *data, int len)
|
||||
{
|
||||
uint8_t obuf[16] = { CUDA_PACKET, 0, data[0] };
|
||||
int autopoll;
|
||||
uint32_t ti;
|
||||
int i, out_len = 0;
|
||||
|
||||
switch(data[0]) {
|
||||
case CUDA_AUTOPOLL:
|
||||
autopoll = (data[1] != 0);
|
||||
if (autopoll != s->autopoll) {
|
||||
s->autopoll = autopoll;
|
||||
if (autopoll) {
|
||||
timer_mod(s->adb_poll_timer,
|
||||
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
|
||||
(get_ticks_per_sec() / CUDA_ADB_POLL_FREQ));
|
||||
for (i = 0; i < ARRAY_SIZE(handlers); i++) {
|
||||
const CudaCommand *desc = &handlers[i];
|
||||
if (desc->command == data[0]) {
|
||||
CUDA_DPRINTF("handling command %s\n", desc->name);
|
||||
out_len = 0;
|
||||
if (desc->handler(s, data + 1, len - 1, obuf + 3, &out_len)) {
|
||||
cuda_send_packet_to_host(s, obuf, 3 + out_len);
|
||||
} else {
|
||||
timer_del(s->adb_poll_timer);
|
||||
qemu_log_mask(LOG_GUEST_ERROR,
|
||||
"CUDA: %s: wrong parameters %d\n",
|
||||
desc->name, len);
|
||||
obuf[0] = ERROR_PACKET;
|
||||
obuf[1] = 0x5; /* bad parameters */
|
||||
obuf[2] = CUDA_PACKET;
|
||||
obuf[3] = data[0];
|
||||
cuda_send_packet_to_host(s, obuf, 4);
|
||||
}
|
||||
return;
|
||||
}
|
||||
cuda_send_packet_to_host(s, obuf, 3);
|
||||
break;
|
||||
case CUDA_GET_6805_ADDR:
|
||||
cuda_send_packet_to_host(s, obuf, 3);
|
||||
break;
|
||||
case CUDA_SET_TIME:
|
||||
ti = (((uint32_t)data[1]) << 24) + (((uint32_t)data[2]) << 16) + (((uint32_t)data[3]) << 8) + data[4];
|
||||
s->tick_offset = ti - (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / get_ticks_per_sec());
|
||||
cuda_send_packet_to_host(s, obuf, 3);
|
||||
break;
|
||||
case CUDA_GET_TIME:
|
||||
ti = s->tick_offset + (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / get_ticks_per_sec());
|
||||
obuf[3] = ti >> 24;
|
||||
obuf[4] = ti >> 16;
|
||||
obuf[5] = ti >> 8;
|
||||
obuf[6] = ti;
|
||||
cuda_send_packet_to_host(s, obuf, 7);
|
||||
break;
|
||||
case CUDA_FILE_SERVER_FLAG:
|
||||
case CUDA_SET_DEVICE_LIST:
|
||||
case CUDA_SET_AUTO_RATE:
|
||||
case CUDA_SET_POWER_MESSAGES:
|
||||
cuda_send_packet_to_host(s, obuf, 3);
|
||||
break;
|
||||
case CUDA_POWERDOWN:
|
||||
cuda_send_packet_to_host(s, obuf, 3);
|
||||
qemu_system_shutdown_request();
|
||||
break;
|
||||
case CUDA_RESET_SYSTEM:
|
||||
cuda_send_packet_to_host(s, obuf, 3);
|
||||
qemu_system_reset_request();
|
||||
break;
|
||||
case CUDA_COMBINED_FORMAT_IIC:
|
||||
obuf[0] = ERROR_PACKET;
|
||||
obuf[1] = 0x5;
|
||||
obuf[2] = CUDA_PACKET;
|
||||
obuf[3] = data[0];
|
||||
cuda_send_packet_to_host(s, obuf, 4);
|
||||
break;
|
||||
case CUDA_GET_SET_IIC:
|
||||
if (len == 4) {
|
||||
cuda_send_packet_to_host(s, obuf, 3);
|
||||
} else {
|
||||
obuf[0] = ERROR_PACKET;
|
||||
obuf[1] = 0x2;
|
||||
obuf[2] = CUDA_PACKET;
|
||||
obuf[3] = data[0];
|
||||
cuda_send_packet_to_host(s, obuf, 4);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
obuf[0] = ERROR_PACKET;
|
||||
obuf[1] = 0x2;
|
||||
obuf[2] = CUDA_PACKET;
|
||||
obuf[3] = data[0];
|
||||
cuda_send_packet_to_host(s, obuf, 4);
|
||||
break;
|
||||
}
|
||||
|
||||
qemu_log_mask(LOG_GUEST_ERROR, "CUDA: unknown command 0x%02x\n", data[0]);
|
||||
obuf[0] = ERROR_PACKET;
|
||||
obuf[1] = 0x2; /* unknown command */
|
||||
obuf[2] = CUDA_PACKET;
|
||||
obuf[3] = data[0];
|
||||
cuda_send_packet_to_host(s, obuf, 4);
|
||||
}
|
||||
|
||||
static void cuda_receive_packet_from_host(CUDAState *s,
|
||||
@ -710,8 +835,8 @@ static const VMStateDescription vmstate_cuda_timer = {
|
||||
|
||||
static const VMStateDescription vmstate_cuda = {
|
||||
.name = "cuda",
|
||||
.version_id = 3,
|
||||
.minimum_version_id = 3,
|
||||
.version_id = 4,
|
||||
.minimum_version_id = 4,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_UINT8(a, CUDAState),
|
||||
VMSTATE_UINT8(b, CUDAState),
|
||||
@ -729,6 +854,8 @@ static const VMStateDescription vmstate_cuda = {
|
||||
VMSTATE_INT32(data_in_index, CUDAState),
|
||||
VMSTATE_INT32(data_out_index, CUDAState),
|
||||
VMSTATE_UINT8(autopoll, CUDAState),
|
||||
VMSTATE_UINT8(autopoll_rate_ms, CUDAState),
|
||||
VMSTATE_UINT16(adb_poll_mask, CUDAState),
|
||||
VMSTATE_BUFFER(data_in, CUDAState),
|
||||
VMSTATE_BUFFER(data_out, CUDAState),
|
||||
VMSTATE_UINT32(tick_offset, CUDAState),
|
||||
@ -782,6 +909,8 @@ static void cuda_realizefn(DeviceState *dev, Error **errp)
|
||||
s->tick_offset = (uint32_t)mktimegm(&tm) + RTC_OFFSET;
|
||||
|
||||
s->adb_poll_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cuda_adb_poll, s);
|
||||
s->autopoll_rate_ms = 20;
|
||||
s->adb_poll_mask = 0xffff;
|
||||
}
|
||||
|
||||
static void cuda_initfn(Object *obj)
|
||||
|
@ -49,7 +49,8 @@ static void macio_nvram_writeb(void *opaque, hwaddr addr,
|
||||
|
||||
addr = (addr >> s->it_shift) & (s->size - 1);
|
||||
s->data[addr] = value;
|
||||
NVR_DPRINTF("writeb addr %04" PHYS_PRIx " val %" PRIx64 "\n", addr, value);
|
||||
NVR_DPRINTF("writeb addr %04" HWADDR_PRIx " val %" PRIx64 "\n",
|
||||
addr, value);
|
||||
}
|
||||
|
||||
static uint64_t macio_nvram_readb(void *opaque, hwaddr addr,
|
||||
@ -60,7 +61,8 @@ static uint64_t macio_nvram_readb(void *opaque, hwaddr addr,
|
||||
|
||||
addr = (addr >> s->it_shift) & (s->size - 1);
|
||||
value = s->data[addr];
|
||||
NVR_DPRINTF("readb addr %04x val %x\n", (int)addr, value);
|
||||
NVR_DPRINTF("readb addr %04" HWADDR_PRIx " val %" PRIx32 "\n",
|
||||
addr, value);
|
||||
|
||||
return value;
|
||||
}
|
||||
|
@ -120,7 +120,7 @@ static void unin_data_write(void *opaque, hwaddr addr,
|
||||
{
|
||||
UNINState *s = opaque;
|
||||
PCIHostState *phb = PCI_HOST_BRIDGE(s);
|
||||
UNIN_DPRINTF("write addr %" TARGET_FMT_plx " len %d val %"PRIx64"\n",
|
||||
UNIN_DPRINTF("write addr " TARGET_FMT_plx " len %d val %"PRIx64"\n",
|
||||
addr, len, val);
|
||||
pci_data_write(phb->bus,
|
||||
unin_get_config_reg(phb->config_reg, addr),
|
||||
@ -137,7 +137,7 @@ static uint64_t unin_data_read(void *opaque, hwaddr addr,
|
||||
val = pci_data_read(phb->bus,
|
||||
unin_get_config_reg(phb->config_reg, addr),
|
||||
len);
|
||||
UNIN_DPRINTF("read addr %" TARGET_FMT_plx " len %d val %x\n",
|
||||
UNIN_DPRINTF("read addr " TARGET_FMT_plx " len %d val %x\n",
|
||||
addr, len, val);
|
||||
return val;
|
||||
}
|
||||
|
@ -111,6 +111,8 @@ typedef struct CUDAState {
|
||||
int data_out_index;
|
||||
|
||||
qemu_irq irq;
|
||||
uint16_t adb_poll_mask;
|
||||
uint8_t autopoll_rate_ms;
|
||||
uint8_t autopoll;
|
||||
uint8_t data_in[128];
|
||||
uint8_t data_out[16];
|
||||
|
245
hw/ppc/spapr.c
245
hw/ppc/spapr.c
@ -1024,84 +1024,94 @@ static void emulate_spapr_hypercall(PowerPCCPU *cpu)
|
||||
#define CLEAN_HPTE(_hpte) ((*(uint64_t *)(_hpte)) &= tswap64(~HPTE64_V_HPTE_DIRTY))
|
||||
#define DIRTY_HPTE(_hpte) ((*(uint64_t *)(_hpte)) |= tswap64(HPTE64_V_HPTE_DIRTY))
|
||||
|
||||
static void spapr_alloc_htab(sPAPRMachineState *spapr)
|
||||
/*
|
||||
* Get the fd to access the kernel htab, re-opening it if necessary
|
||||
*/
|
||||
static int get_htab_fd(sPAPRMachineState *spapr)
|
||||
{
|
||||
long shift;
|
||||
int index;
|
||||
if (spapr->htab_fd >= 0) {
|
||||
return spapr->htab_fd;
|
||||
}
|
||||
|
||||
/* allocate hash page table. For now we always make this 16mb,
|
||||
* later we should probably make it scale to the size of guest
|
||||
* RAM */
|
||||
spapr->htab_fd = kvmppc_get_htab_fd(false);
|
||||
if (spapr->htab_fd < 0) {
|
||||
error_report("Unable to open fd for reading hash table from KVM: %s",
|
||||
strerror(errno));
|
||||
}
|
||||
|
||||
shift = kvmppc_reset_htab(spapr->htab_shift);
|
||||
if (shift < 0) {
|
||||
/*
|
||||
* For HV KVM, host kernel will return -ENOMEM when requested
|
||||
* HTAB size can't be allocated.
|
||||
*/
|
||||
error_setg(&error_abort, "Failed to allocate HTAB of requested size, try with smaller maxmem");
|
||||
} else if (shift > 0) {
|
||||
/*
|
||||
* Kernel handles htab, we don't need to allocate one
|
||||
*
|
||||
* Older kernels can fall back to lower HTAB shift values,
|
||||
* but we don't allow booting of such guests.
|
||||
*/
|
||||
if (shift != spapr->htab_shift) {
|
||||
error_setg(&error_abort, "Failed to allocate HTAB of requested size, try with smaller maxmem");
|
||||
return spapr->htab_fd;
|
||||
}
|
||||
|
||||
static void close_htab_fd(sPAPRMachineState *spapr)
|
||||
{
|
||||
if (spapr->htab_fd >= 0) {
|
||||
close(spapr->htab_fd);
|
||||
}
|
||||
spapr->htab_fd = -1;
|
||||
}
|
||||
|
||||
static int spapr_hpt_shift_for_ramsize(uint64_t ramsize)
|
||||
{
|
||||
int shift;
|
||||
|
||||
/* We aim for a hash table of size 1/128 the size of RAM (rounded
|
||||
* up). The PAPR recommendation is actually 1/64 of RAM size, but
|
||||
* that's much more than is needed for Linux guests */
|
||||
shift = ctz64(pow2ceil(ramsize)) - 7;
|
||||
shift = MAX(shift, 18); /* Minimum architected size */
|
||||
shift = MIN(shift, 46); /* Maximum architected size */
|
||||
return shift;
|
||||
}
|
||||
|
||||
static void spapr_reallocate_hpt(sPAPRMachineState *spapr, int shift,
|
||||
Error **errp)
|
||||
{
|
||||
long rc;
|
||||
|
||||
/* Clean up any HPT info from a previous boot */
|
||||
g_free(spapr->htab);
|
||||
spapr->htab = NULL;
|
||||
spapr->htab_shift = 0;
|
||||
close_htab_fd(spapr);
|
||||
|
||||
rc = kvmppc_reset_htab(shift);
|
||||
if (rc < 0) {
|
||||
/* kernel-side HPT needed, but couldn't allocate one */
|
||||
error_setg_errno(errp, errno,
|
||||
"Failed to allocate KVM HPT of order %d (try smaller maxmem?)",
|
||||
shift);
|
||||
/* This is almost certainly fatal, but if the caller really
|
||||
* wants to carry on with shift == 0, it's welcome to try */
|
||||
} else if (rc > 0) {
|
||||
/* kernel-side HPT allocated */
|
||||
if (rc != shift) {
|
||||
error_setg(errp,
|
||||
"Requested order %d HPT, but kernel allocated order %ld (try smaller maxmem?)",
|
||||
shift, rc);
|
||||
}
|
||||
|
||||
spapr->htab_shift = shift;
|
||||
kvmppc_kern_htab = true;
|
||||
} else {
|
||||
/* Allocate htab */
|
||||
spapr->htab = qemu_memalign(HTAB_SIZE(spapr), HTAB_SIZE(spapr));
|
||||
/* kernel-side HPT not needed, allocate in userspace instead */
|
||||
size_t size = 1ULL << shift;
|
||||
int i;
|
||||
|
||||
/* And clear it */
|
||||
memset(spapr->htab, 0, HTAB_SIZE(spapr));
|
||||
|
||||
for (index = 0; index < HTAB_SIZE(spapr) / HASH_PTE_SIZE_64; index++) {
|
||||
DIRTY_HPTE(HPTE(spapr->htab, index));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear HTAB entries during reset.
|
||||
*
|
||||
* If host kernel has allocated HTAB, KVM_PPC_ALLOCATE_HTAB ioctl is
|
||||
* used to clear HTAB. Otherwise QEMU-allocated HTAB is cleared manually.
|
||||
*/
|
||||
static void spapr_reset_htab(sPAPRMachineState *spapr)
|
||||
{
|
||||
long shift;
|
||||
int index;
|
||||
|
||||
shift = kvmppc_reset_htab(spapr->htab_shift);
|
||||
if (shift < 0) {
|
||||
error_setg(&error_abort, "Failed to reset HTAB");
|
||||
} else if (shift > 0) {
|
||||
if (shift != spapr->htab_shift) {
|
||||
error_setg(&error_abort, "Requested HTAB allocation failed during reset");
|
||||
spapr->htab = qemu_memalign(size, size);
|
||||
if (!spapr->htab) {
|
||||
error_setg_errno(errp, errno,
|
||||
"Could not allocate HPT of order %d", shift);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Tell readers to update their file descriptor */
|
||||
if (spapr->htab_fd >= 0) {
|
||||
spapr->htab_fd_stale = true;
|
||||
}
|
||||
} else {
|
||||
memset(spapr->htab, 0, HTAB_SIZE(spapr));
|
||||
memset(spapr->htab, 0, size);
|
||||
spapr->htab_shift = shift;
|
||||
kvmppc_kern_htab = false;
|
||||
|
||||
for (index = 0; index < HTAB_SIZE(spapr) / HASH_PTE_SIZE_64; index++) {
|
||||
DIRTY_HPTE(HPTE(spapr->htab, index));
|
||||
for (i = 0; i < size / HASH_PTE_SIZE_64; i++) {
|
||||
DIRTY_HPTE(HPTE(spapr->htab, i));
|
||||
}
|
||||
}
|
||||
|
||||
/* Update the RMA size if necessary */
|
||||
if (spapr->vrma_adjust) {
|
||||
spapr->rma_size = kvmppc_rma_size(spapr_node0_size(),
|
||||
spapr->htab_shift);
|
||||
}
|
||||
}
|
||||
|
||||
static int find_unknown_sysbus_device(SysBusDevice *sbdev, void *opaque)
|
||||
@ -1121,39 +1131,26 @@ static int find_unknown_sysbus_device(SysBusDevice *sbdev, void *opaque)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* A guest reset will cause spapr->htab_fd to become stale if being used.
|
||||
* Reopen the file descriptor to make sure the whole HTAB is properly read.
|
||||
*/
|
||||
static int spapr_check_htab_fd(sPAPRMachineState *spapr)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
if (spapr->htab_fd_stale) {
|
||||
close(spapr->htab_fd);
|
||||
spapr->htab_fd = kvmppc_get_htab_fd(false);
|
||||
if (spapr->htab_fd < 0) {
|
||||
error_report("Unable to open fd for reading hash table from KVM: "
|
||||
"%s", strerror(errno));
|
||||
rc = -1;
|
||||
}
|
||||
spapr->htab_fd_stale = false;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void ppc_spapr_reset(void)
|
||||
{
|
||||
sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
|
||||
MachineState *machine = MACHINE(qdev_get_machine());
|
||||
sPAPRMachineState *spapr = SPAPR_MACHINE(machine);
|
||||
PowerPCCPU *first_ppc_cpu;
|
||||
uint32_t rtas_limit;
|
||||
|
||||
/* Check for unknown sysbus devices */
|
||||
foreach_dynamic_sysbus_device(find_unknown_sysbus_device, NULL);
|
||||
|
||||
/* Reset the hash table & recalc the RMA */
|
||||
spapr_reset_htab(spapr);
|
||||
/* Allocate and/or reset the hash page table */
|
||||
spapr_reallocate_hpt(spapr,
|
||||
spapr_hpt_shift_for_ramsize(machine->maxram_size),
|
||||
&error_fatal);
|
||||
|
||||
/* Update the RMA size if necessary */
|
||||
if (spapr->vrma_adjust) {
|
||||
spapr->rma_size = kvmppc_rma_size(spapr_node0_size(),
|
||||
spapr->htab_shift);
|
||||
}
|
||||
|
||||
qemu_devices_reset();
|
||||
|
||||
@ -1200,13 +1197,6 @@ static void spapr_cpu_reset(void *opaque)
|
||||
env->spr[SPR_HIOR] = 0;
|
||||
|
||||
env->external_htab = (uint8_t *)spapr->htab;
|
||||
if (kvm_enabled() && !env->external_htab) {
|
||||
/*
|
||||
* HV KVM, set external_htab to 1 so our ppc_hash64_load_hpte*
|
||||
* functions do the right thing.
|
||||
*/
|
||||
env->external_htab = (void *)1;
|
||||
}
|
||||
env->htab_base = -1;
|
||||
/*
|
||||
* htab_mask is the mask used to normalize hash value to PTEG index.
|
||||
@ -1313,14 +1303,6 @@ static int htab_save_setup(QEMUFile *f, void *opaque)
|
||||
spapr->htab_first_pass = true;
|
||||
} else {
|
||||
assert(kvm_enabled());
|
||||
|
||||
spapr->htab_fd = kvmppc_get_htab_fd(false);
|
||||
spapr->htab_fd_stale = false;
|
||||
if (spapr->htab_fd < 0) {
|
||||
fprintf(stderr, "Unable to open fd for reading hash table from KVM: %s\n",
|
||||
strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1330,6 +1312,7 @@ static int htab_save_setup(QEMUFile *f, void *opaque)
|
||||
static void htab_save_first_pass(QEMUFile *f, sPAPRMachineState *spapr,
|
||||
int64_t max_ns)
|
||||
{
|
||||
bool has_timeout = max_ns != -1;
|
||||
int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64;
|
||||
int index = spapr->htab_save_index;
|
||||
int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
||||
@ -1363,7 +1346,8 @@ static void htab_save_first_pass(QEMUFile *f, sPAPRMachineState *spapr,
|
||||
qemu_put_buffer(f, HPTE(spapr->htab, chunkstart),
|
||||
HASH_PTE_SIZE_64 * n_valid);
|
||||
|
||||
if ((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) {
|
||||
if (has_timeout &&
|
||||
(qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -1460,6 +1444,7 @@ static int htab_save_later_pass(QEMUFile *f, sPAPRMachineState *spapr,
|
||||
static int htab_save_iterate(QEMUFile *f, void *opaque)
|
||||
{
|
||||
sPAPRMachineState *spapr = opaque;
|
||||
int fd;
|
||||
int rc = 0;
|
||||
|
||||
/* Iteration header */
|
||||
@ -1468,13 +1453,12 @@ static int htab_save_iterate(QEMUFile *f, void *opaque)
|
||||
if (!spapr->htab) {
|
||||
assert(kvm_enabled());
|
||||
|
||||
rc = spapr_check_htab_fd(spapr);
|
||||
if (rc < 0) {
|
||||
return rc;
|
||||
fd = get_htab_fd(spapr);
|
||||
if (fd < 0) {
|
||||
return fd;
|
||||
}
|
||||
|
||||
rc = kvmppc_save_htab(f, spapr->htab_fd,
|
||||
MAX_KVM_BUF_SIZE, MAX_ITERATION_NS);
|
||||
rc = kvmppc_save_htab(f, fd, MAX_KVM_BUF_SIZE, MAX_ITERATION_NS);
|
||||
if (rc < 0) {
|
||||
return rc;
|
||||
}
|
||||
@ -1495,6 +1479,7 @@ static int htab_save_iterate(QEMUFile *f, void *opaque)
|
||||
static int htab_save_complete(QEMUFile *f, void *opaque)
|
||||
{
|
||||
sPAPRMachineState *spapr = opaque;
|
||||
int fd;
|
||||
|
||||
/* Iteration header */
|
||||
qemu_put_be32(f, 0);
|
||||
@ -1504,18 +1489,20 @@ static int htab_save_complete(QEMUFile *f, void *opaque)
|
||||
|
||||
assert(kvm_enabled());
|
||||
|
||||
rc = spapr_check_htab_fd(spapr);
|
||||
if (rc < 0) {
|
||||
return rc;
|
||||
fd = get_htab_fd(spapr);
|
||||
if (fd < 0) {
|
||||
return fd;
|
||||
}
|
||||
|
||||
rc = kvmppc_save_htab(f, spapr->htab_fd, MAX_KVM_BUF_SIZE, -1);
|
||||
rc = kvmppc_save_htab(f, fd, MAX_KVM_BUF_SIZE, -1);
|
||||
if (rc < 0) {
|
||||
return rc;
|
||||
}
|
||||
close(spapr->htab_fd);
|
||||
spapr->htab_fd = -1;
|
||||
close_htab_fd(spapr);
|
||||
} else {
|
||||
if (spapr->htab_first_pass) {
|
||||
htab_save_first_pass(f, spapr, -1);
|
||||
}
|
||||
htab_save_later_pass(f, spapr, -1);
|
||||
}
|
||||
|
||||
@ -1541,10 +1528,12 @@ static int htab_load(QEMUFile *f, void *opaque, int version_id)
|
||||
section_hdr = qemu_get_be32(f);
|
||||
|
||||
if (section_hdr) {
|
||||
/* First section, just the hash shift */
|
||||
if (spapr->htab_shift != section_hdr) {
|
||||
error_report("htab_shift mismatch: source %d target %d",
|
||||
section_hdr, spapr->htab_shift);
|
||||
Error *local_err;
|
||||
|
||||
/* First section gives the htab size */
|
||||
spapr_reallocate_hpt(spapr, section_hdr, &local_err);
|
||||
if (local_err) {
|
||||
error_report_err(local_err);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
@ -1797,18 +1786,6 @@ static void ppc_spapr_init(MachineState *machine)
|
||||
/* Setup a load limit for the ramdisk leaving room for SLOF and FDT */
|
||||
load_limit = MIN(spapr->rma_size, RTAS_MAX_ADDR) - FW_OVERHEAD;
|
||||
|
||||
/* We aim for a hash table of size 1/128 the size of RAM. The
|
||||
* normal rule of thumb is 1/64 the size of RAM, but that's much
|
||||
* more than needed for the Linux guests we support. */
|
||||
spapr->htab_shift = 18; /* Minimum architected size */
|
||||
while (spapr->htab_shift <= 46) {
|
||||
if ((1ULL << (spapr->htab_shift + 7)) >= machine->maxram_size) {
|
||||
break;
|
||||
}
|
||||
spapr->htab_shift++;
|
||||
}
|
||||
spapr_alloc_htab(spapr);
|
||||
|
||||
/* Set up Interrupt Controller before we create the VCPUs */
|
||||
spapr->icp = xics_system_init(machine,
|
||||
DIV_ROUND_UP(max_cpus * kvmppc_smt_threads(),
|
||||
@ -2125,6 +2102,9 @@ static void spapr_set_kvm_type(Object *obj, const char *value, Error **errp)
|
||||
|
||||
static void spapr_machine_initfn(Object *obj)
|
||||
{
|
||||
sPAPRMachineState *spapr = SPAPR_MACHINE(obj);
|
||||
|
||||
spapr->htab_fd = -1;
|
||||
object_property_add_str(obj, "kvm-type",
|
||||
spapr_get_kvm_type, spapr_set_kvm_type, NULL);
|
||||
object_property_set_description(obj, "kvm-type",
|
||||
@ -2411,6 +2391,7 @@ DEFINE_SPAPR_MACHINE(2_5, "2.5", false);
|
||||
* pseries-2.4
|
||||
*/
|
||||
#define SPAPR_COMPAT_2_4 \
|
||||
SPAPR_COMPAT_2_5 \
|
||||
HW_COMPAT_2_4
|
||||
|
||||
static void spapr_machine_2_4_instance_options(MachineState *machine)
|
||||
|
@ -38,6 +38,12 @@ static void set_spr(CPUState *cs, int spr, target_ulong value,
|
||||
run_on_cpu(cs, do_spr_sync, &s);
|
||||
}
|
||||
|
||||
static bool has_spr(PowerPCCPU *cpu, int spr)
|
||||
{
|
||||
/* We can test whether the SPR is defined by checking for a valid name */
|
||||
return cpu->env.spr_cb[spr].name != NULL;
|
||||
}
|
||||
|
||||
static inline bool valid_pte_index(CPUPPCState *env, target_ulong pte_index)
|
||||
{
|
||||
/*
|
||||
@ -332,11 +338,52 @@ static target_ulong h_read(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
return H_SUCCESS;
|
||||
}
|
||||
|
||||
static target_ulong h_set_sprg0(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
target_ulong opcode, target_ulong *args)
|
||||
{
|
||||
cpu_synchronize_state(CPU(cpu));
|
||||
cpu->env.spr[SPR_SPRG0] = args[0];
|
||||
|
||||
return H_SUCCESS;
|
||||
}
|
||||
|
||||
static target_ulong h_set_dabr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
target_ulong opcode, target_ulong *args)
|
||||
{
|
||||
/* FIXME: actually implement this */
|
||||
return H_HARDWARE;
|
||||
if (!has_spr(cpu, SPR_DABR)) {
|
||||
return H_HARDWARE; /* DABR register not available */
|
||||
}
|
||||
cpu_synchronize_state(CPU(cpu));
|
||||
|
||||
if (has_spr(cpu, SPR_DABRX)) {
|
||||
cpu->env.spr[SPR_DABRX] = 0x3; /* Use Problem and Privileged state */
|
||||
} else if (!(args[0] & 0x4)) { /* Breakpoint Translation set? */
|
||||
return H_RESERVED_DABR;
|
||||
}
|
||||
|
||||
cpu->env.spr[SPR_DABR] = args[0];
|
||||
return H_SUCCESS;
|
||||
}
|
||||
|
||||
static target_ulong h_set_xdabr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
target_ulong opcode, target_ulong *args)
|
||||
{
|
||||
target_ulong dabrx = args[1];
|
||||
|
||||
if (!has_spr(cpu, SPR_DABR) || !has_spr(cpu, SPR_DABRX)) {
|
||||
return H_HARDWARE;
|
||||
}
|
||||
|
||||
if ((dabrx & ~0xfULL) != 0 || (dabrx & H_DABRX_HYPERVISOR) != 0
|
||||
|| (dabrx & (H_DABRX_KERNEL | H_DABRX_USER)) == 0) {
|
||||
return H_PARAMETER;
|
||||
}
|
||||
|
||||
cpu_synchronize_state(CPU(cpu));
|
||||
cpu->env.spr[SPR_DABRX] = dabrx;
|
||||
cpu->env.spr[SPR_DABR] = args[0];
|
||||
|
||||
return H_SUCCESS;
|
||||
}
|
||||
|
||||
#define FLAGS_REGISTER_VPA 0x0000200000000000ULL
|
||||
@ -990,13 +1037,16 @@ static void hypercall_register_types(void)
|
||||
/* hcall-bulk */
|
||||
spapr_register_hypercall(H_BULK_REMOVE, h_bulk_remove);
|
||||
|
||||
/* hcall-dabr */
|
||||
spapr_register_hypercall(H_SET_DABR, h_set_dabr);
|
||||
|
||||
/* hcall-splpar */
|
||||
spapr_register_hypercall(H_REGISTER_VPA, h_register_vpa);
|
||||
spapr_register_hypercall(H_CEDE, h_cede);
|
||||
|
||||
/* processor register resource access h-calls */
|
||||
spapr_register_hypercall(H_SET_SPRG0, h_set_sprg0);
|
||||
spapr_register_hypercall(H_SET_DABR, h_set_dabr);
|
||||
spapr_register_hypercall(H_SET_XDABR, h_set_xdabr);
|
||||
spapr_register_hypercall(H_SET_MODE, h_set_mode);
|
||||
|
||||
/* "debugger" hcalls (also used by SLOF). Note: We do -not- differenciate
|
||||
* here between the "CI" and the "CACHE" variants, they will use whatever
|
||||
* mapping attributes qemu is using. When using KVM, the kernel will
|
||||
@ -1013,8 +1063,6 @@ static void hypercall_register_types(void)
|
||||
/* qemu/KVM-PPC specific hcalls */
|
||||
spapr_register_hypercall(KVMPPC_H_RTAS, h_rtas);
|
||||
|
||||
spapr_register_hypercall(H_SET_MODE, h_set_mode);
|
||||
|
||||
/* ibm,client-architecture-support support */
|
||||
spapr_register_hypercall(KVMPPC_H_CAS, h_client_architecture_support);
|
||||
}
|
||||
|
@ -113,6 +113,7 @@ static void rtas_power_off(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
return;
|
||||
}
|
||||
qemu_system_shutdown_request();
|
||||
cpu_stop_current();
|
||||
rtas_st(rets, 0, RTAS_OUT_SUCCESS);
|
||||
}
|
||||
|
||||
|
@ -79,7 +79,7 @@ struct ADBBusState {
|
||||
|
||||
int adb_request(ADBBusState *s, uint8_t *buf_out,
|
||||
const uint8_t *buf, int len);
|
||||
int adb_poll(ADBBusState *s, uint8_t *buf_out);
|
||||
int adb_poll(ADBBusState *s, uint8_t *buf_out, uint16_t poll_mask);
|
||||
|
||||
#define TYPE_ADB_KEYBOARD "adb-keyboard"
|
||||
#define TYPE_ADB_MOUSE "adb-mouse"
|
||||
|
@ -72,7 +72,6 @@ struct sPAPRMachineState {
|
||||
int htab_save_index;
|
||||
bool htab_first_pass;
|
||||
int htab_fd;
|
||||
bool htab_fd_stale;
|
||||
|
||||
/* RTAS state */
|
||||
QTAILQ_HEAD(, sPAPRConfigureConnectorState) ccs_list;
|
||||
|
@ -184,11 +184,6 @@ static inline uint64_t kvmppc_rma_size(uint64_t current_size,
|
||||
return ram_size;
|
||||
}
|
||||
|
||||
static inline int kvmppc_update_sdr1(CPUPPCState *env)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
static inline bool kvmppc_has_cap_epr(void)
|
||||
|
@ -102,7 +102,7 @@ static inline target_ulong ppc_hash64_load_hpte0(PowerPCCPU *cpu,
|
||||
uint64_t addr;
|
||||
|
||||
addr = token + (index * HASH_PTE_SIZE_64);
|
||||
if (env->external_htab) {
|
||||
if (kvmppc_kern_htab || env->external_htab) {
|
||||
return ldq_p((const void *)(uintptr_t)addr);
|
||||
} else {
|
||||
return ldq_phys(CPU(cpu)->as, addr);
|
||||
@ -116,7 +116,7 @@ static inline target_ulong ppc_hash64_load_hpte1(PowerPCCPU *cpu,
|
||||
uint64_t addr;
|
||||
|
||||
addr = token + (index * HASH_PTE_SIZE_64) + HASH_PTE_SIZE_64/2;
|
||||
if (env->external_htab) {
|
||||
if (kvmppc_kern_htab || env->external_htab) {
|
||||
return ldq_p((const void *)(uintptr_t)addr);
|
||||
} else {
|
||||
return ldq_phys(CPU(cpu)->as, addr);
|
||||
|
@ -11352,7 +11352,9 @@ void ppc_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
|
||||
case POWERPC_MMU_64B:
|
||||
case POWERPC_MMU_2_03:
|
||||
case POWERPC_MMU_2_06:
|
||||
case POWERPC_MMU_2_06a:
|
||||
case POWERPC_MMU_2_07:
|
||||
case POWERPC_MMU_2_07a:
|
||||
#endif
|
||||
cpu_fprintf(f, " SDR1 " TARGET_FMT_lx " DAR " TARGET_FMT_lx
|
||||
" DSISR " TARGET_FMT_lx "\n", env->spr[SPR_SDR1],
|
||||
|
Loading…
Reference in New Issue
Block a user