spapr/xive: fix EQ page addresses above 64GB
The high order bits of the address of the OS event queue is stored in bits [4-31] of word2 of the XIVE END internal structures and the low order bits in word3. This structure is using Big Endian ordering and computing the value requires some simple arithmetic which happens to be wrong. The mask removing bits [0-3] of word2 is applied to the wrong value and the resulting address is bogus when above 64GB. Guests with more than 64GB of RAM will allocate pages for the OS event queues which will reside above the 64GB limit. In this case, the XIVE device model will wake up the CPUs in case of a notification, such as IPIs, but the update of the event queue will be written at the wrong place in memory. The result is uncertain as the guest memory is trashed and IPI are not delivered. Introduce a helper xive_end_qaddr() to compute this value correctly in all places where it is used. Signed-off-by: Cédric Le Goater <clg@kaod.org> Message-Id: <20190508171946.657-3-clg@kaod.org> Reviewed-by: Greg Kurz <groug@kaod.org> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
This commit is contained in:
parent
7f9136f90d
commit
13df93244e
@ -1150,8 +1150,7 @@ static target_ulong h_int_get_queue_config(PowerPCCPU *cpu,
|
||||
}
|
||||
|
||||
if (xive_end_is_enqueue(end)) {
|
||||
args[1] = (uint64_t) be32_to_cpu(end->w2 & 0x0fffffff) << 32
|
||||
| be32_to_cpu(end->w3);
|
||||
args[1] = xive_end_qaddr(end);
|
||||
args[2] = xive_get_field32(END_W0_QSIZE, end->w0) + 12;
|
||||
} else {
|
||||
args[1] = 0;
|
||||
|
@ -1042,8 +1042,7 @@ static const TypeInfo xive_source_info = {
|
||||
|
||||
void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, Monitor *mon)
|
||||
{
|
||||
uint64_t qaddr_base = (uint64_t) be32_to_cpu(end->w2 & 0x0fffffff) << 32
|
||||
| be32_to_cpu(end->w3);
|
||||
uint64_t qaddr_base = xive_end_qaddr(end);
|
||||
uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
|
||||
uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
|
||||
uint32_t qentries = 1 << (qsize + 10);
|
||||
@ -1072,8 +1071,7 @@ void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, Monitor *mon)
|
||||
|
||||
void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, Monitor *mon)
|
||||
{
|
||||
uint64_t qaddr_base = (uint64_t) be32_to_cpu(end->w2 & 0x0fffffff) << 32
|
||||
| be32_to_cpu(end->w3);
|
||||
uint64_t qaddr_base = xive_end_qaddr(end);
|
||||
uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
|
||||
uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
|
||||
uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
|
||||
@ -1101,8 +1099,7 @@ void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, Monitor *mon)
|
||||
|
||||
static void xive_end_enqueue(XiveEND *end, uint32_t data)
|
||||
{
|
||||
uint64_t qaddr_base = (uint64_t) be32_to_cpu(end->w2 & 0x0fffffff) << 32
|
||||
| be32_to_cpu(end->w3);
|
||||
uint64_t qaddr_base = xive_end_qaddr(end);
|
||||
uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
|
||||
uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
|
||||
uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
|
||||
|
@ -208,6 +208,12 @@ typedef struct XiveEND {
|
||||
#define xive_end_is_backlog(end) (be32_to_cpu((end)->w0) & END_W0_BACKLOG)
|
||||
#define xive_end_is_escalate(end) (be32_to_cpu((end)->w0) & END_W0_ESCALATE_CTL)
|
||||
|
||||
static inline uint64_t xive_end_qaddr(XiveEND *end)
|
||||
{
|
||||
return ((uint64_t) be32_to_cpu(end->w2) & 0x0fffffff) << 32 |
|
||||
be32_to_cpu(end->w3);
|
||||
}
|
||||
|
||||
/* Notification Virtual Target (NVT) */
|
||||
typedef struct XiveNVT {
|
||||
uint32_t w0;
|
||||
|
Loading…
Reference in New Issue
Block a user