Some bus_dma(9) fixes for Xen:

- Attempt to gracefully recover from a failed decrease_reservation or
  increase_reservation, by avoiding physical memory loss.
- always store a machine address in ds_addr; this avoids some mistakes
  where machine address would in some case be freed at physical address, or
  mapped as physical address.
This commit is contained in:
bouyer 2006-08-28 19:58:56 +00:00
parent 4c2420029f
commit aeeb0b33ca
4 changed files with 104 additions and 31 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: bus_private.h,v 1.5 2006/02/16 20:17:15 perry Exp $ */
/* $NetBSD: bus_private.h,v 1.6 2006/08/28 19:58:56 bouyer Exp $ */
/* NetBSD: bus.h,v 1.8 2005/03/09 19:04:46 matt Exp */
/*-
@ -150,10 +150,23 @@ struct x86_bus_dma_cookie {
#define _BUS_PHYS_TO_BUS(pa) ((bus_addr_t)(pa))
#endif /* !defined(_BUS_PHYS_TO_BUS) */
#if !defined(_BUS_BUS_TO_PHYS)
#define _BUS_BUS_TO_PHYS(ba) ((paddr_t)(ba))
#endif /* !defined(_BUS_BUS_TO_PHYS) */
#if !defined(_BUS_VM_PAGE_TO_BUS)
#define _BUS_VM_PAGE_TO_BUS(pg) _BUS_PHYS_TO_BUS(VM_PAGE_TO_PHYS(pg))
#endif /* !defined(_BUS_VM_PAGE_TO_BUS) */
#if !defined(_BUS_BUS_TO_VM_PAGE)
#define _BUS_BUS_TO_VM_PAGE(ba) PHYS_TO_VM_PAGE(ba)
#endif /* !defined(_BUS_BUS_TO_VM_PAGE) */
#if !defined(_BUS_PMAP_ENTER)
#define _BUS_PMAP_ENTER(pmap, va, ba, prot, flags) \
pmap_enter(pmap, va, ba, prot, flags)
#endif /* _BUS_PMAP_ENTER */
#if !defined(_BUS_VIRT_TO_BUS)
#include <uvm/uvm_extern.h>

View File

@ -1,4 +1,4 @@
/* $NetBSD: bus_dma.c,v 1.29 2006/03/01 12:38:12 yamt Exp $ */
/* $NetBSD: bus_dma.c,v 1.30 2006/08/28 19:58:57 bouyer Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
@ -38,7 +38,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.29 2006/03/01 12:38:12 yamt Exp $");
__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.30 2006/08/28 19:58:57 bouyer Exp $");
/*
* The following is included because _bus_dma_uiomove is derived from
@ -994,7 +994,7 @@ _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
for (addr = segs[curseg].ds_addr;
addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
addr += PAGE_SIZE) {
m = PHYS_TO_VM_PAGE(addr);
m = _BUS_BUS_TO_VM_PAGE(addr);
TAILQ_INSERT_TAIL(&mlist, m, pageq);
}
}
@ -1039,7 +1039,7 @@ _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
if (size == 0)
panic("_bus_dmamem_map: size botch");
pmap_enter(pmap_kernel(), va, addr,
_BUS_PMAP_ENTER(pmap_kernel(), va, addr,
VM_PROT_READ | VM_PROT_WRITE,
PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE);
/*
@ -1129,7 +1129,7 @@ _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
continue;
}
return (x86_btop((caddr_t)segs[i].ds_addr + off));
return (x86_btop(_BUS_BUS_TO_PHYS(segs[i].ds_addr + off)));
}
/* Page not found. */

View File

@ -1,4 +1,4 @@
/* $NetBSD: bus_private.h,v 1.6 2006/02/16 20:17:15 perry Exp $ */
/* $NetBSD: bus_private.h,v 1.7 2006/08/28 19:58:57 bouyer Exp $ */
/*-
* Copyright (c)2005 YAMAMOTO Takashi,
@ -29,7 +29,11 @@
#include <uvm/uvm_extern.h>
#define _BUS_PHYS_TO_BUS(pa) ((bus_addr_t)xpmap_ptom(pa))
#define _BUS_BUS_TO_PHYS(ba) ((paddr_t)xpmap_mtop(ba))
#define _BUS_VIRT_TO_BUS(pm, va) _bus_virt_to_bus((pm), (va))
#define _BUS_BUS_TO_VM_PAGE(ba) (PHYS_TO_VM_PAGE(xpmap_mtop(ba)))
#define _BUS_PMAP_ENTER(pmap, va, ba, prot, flags) \
pmap_enter(pmap, va, xpmap_mtop(ba), prot, flags)
static __inline bus_addr_t _bus_virt_to_bus(struct pmap *, vaddr_t);

View File

@ -1,4 +1,4 @@
/* $NetBSD: xen_bus_dma.c,v 1.6 2006/01/15 22:09:52 bouyer Exp $ */
/* $NetBSD: xen_bus_dma.c,v 1.7 2006/08/28 19:58:57 bouyer Exp $ */
/* NetBSD bus_dma.c,v 1.21 2005/04/16 07:53:35 yamt Exp */
/*-
@ -39,7 +39,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: xen_bus_dma.c,v 1.6 2006/01/15 22:09:52 bouyer Exp $");
__KERNEL_RCSID(0, "$NetBSD: xen_bus_dma.c,v 1.7 2006/08/28 19:58:57 bouyer Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -75,14 +75,11 @@ _xen_alloc_contig(bus_size_t size, bus_size_t alignment, bus_size_t boundary,
unsigned long npagesreq, npages, mfn;
bus_addr_t pa;
struct vm_page *pg, *pgnext;
struct pglist freelist;
int s, error;
#ifdef XEN3
struct xen_memory_reservation res;
#endif
TAILQ_INIT(&freelist);
/*
* When requesting a contigous memory region, the hypervisor will
* return a memory range aligned on size. This will automagically
@ -114,14 +111,21 @@ _xen_alloc_contig(bus_size_t size, bus_size_t alignment, bus_size_t boundary,
< 0) {
printf("xen_alloc_contig: XENMEM_decrease_reservation "
"failed!\n");
return ENOMEM;
xpmap_phys_to_machine_mapping[
(pa - XPMAP_OFFSET) >> PAGE_SHIFT] = mfn;
error = ENOMEM;
goto failed;
}
#else
if (HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation,
&mfn, 1, 0) != 1) {
printf("xen_alloc_contig: MEMOP_decrease_reservation "
"failed!\n");
return ENOMEM;
xpmap_phys_to_machine_mapping[
(pa - XPMAP_OFFSET) >> PAGE_SHIFT] = mfn;
error = ENOMEM;
goto failed;
}
#endif
}
@ -135,14 +139,18 @@ _xen_alloc_contig(bus_size_t size, bus_size_t alignment, bus_size_t boundary,
if (HYPERVISOR_memory_op(XENMEM_increase_reservation, &res) < 0) {
printf("xen_alloc_contig: XENMEM_increase_reservation "
"failed!\n");
return ENOMEM;
error = ENOMEM;
pg = NULL;
goto failed;
}
#else
if (HYPERVISOR_dom_mem_op(MEMOP_increase_reservation,
&mfn, 1, order) != 1) {
printf("xen_alloc_contig: MEMOP_increase_reservation "
"failed!\n");
return ENOMEM;
error = ENOMEM;
pg = NULL;
goto failed;
}
#endif
s = splvm();
@ -165,6 +173,58 @@ _xen_alloc_contig(bus_size_t size, bus_size_t alignment, bus_size_t boundary,
xpq_flush_queue();
splx(s);
return 0;
failed:
/*
* Attempt to recover from a failed decrease or increase reservation:
* if decrease_reservation failed, we don't have given all pages
* back to Xen; give them back to UVM, and get the missing pages
* from Xen.
* if increase_reservation failed, we expect pg to be NULL and we just
* get back the missing pages from Xen one by one.
*/
/* give back remaining pages to UVM */
for (; pg != NULL; pg = pgnext) {
pgnext = pg->pageq.tqe_next;
TAILQ_REMOVE(mlistp, pg, pageq);
uvm_pagefree(pg);
}
/* remplace the pages that we already gave to Xen */
s = splvm();
for (pg = mlistp->tqh_first; pg != NULL; pg = pgnext) {
pgnext = pg->pageq.tqe_next;
#ifdef XEN3
res.extent_start = &mfn;
res.nr_extents = 1;
res.extent_order = 0;
res.address_bits = 31;
res.domid = DOMID_SELF;
if (HYPERVISOR_memory_op(XENMEM_increase_reservation, &res)
< 0) {
printf("xen_alloc_contig: recovery "
"XENMEM_increase_reservation failed!\n");
break;
}
#else
if (HYPERVISOR_dom_mem_op(MEMOP_increase_reservation,
&mfn, 1, 0) != 1) {
printf("xen_alloc_contig: recovery "
"MEMOP_increase_reservation failed!\n");
break;
}
#endif
pa = VM_PAGE_TO_PHYS(pg);
xpmap_phys_to_machine_mapping[
(pa - XPMAP_OFFSET) >> PAGE_SHIFT] = mfn;
xpq_queue_machphys_update((mfn) << PAGE_SHIFT, pa);
TAILQ_REMOVE(mlistp, pg, pageq);
uvm_pagefree(pg);
}
/* Flush updates through and flush the TLB */
xpq_queue_tlb_flush();
xpq_flush_queue();
splx(s);
return error;
}
@ -178,8 +238,7 @@ _xen_bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size,
bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs,
int nsegs, int *rsegs, int flags, bus_addr_t low, bus_addr_t high)
{
paddr_t curaddr, lastaddr;
bus_addr_t bus_curaddr, bus_lastaddr;
bus_addr_t curaddr, lastaddr;
struct vm_page *m;
struct pglist mlist;
int curseg, error;
@ -210,18 +269,16 @@ again:
*/
m = mlist.tqh_first;
curseg = 0;
lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
lastaddr = segs[curseg].ds_addr = _BUS_VM_PAGE_TO_BUS(m);
segs[curseg].ds_len = PAGE_SIZE;
m = m->pageq.tqe_next;
if ((_BUS_PHYS_TO_BUS(segs[curseg].ds_addr) & (alignment - 1)) != 0)
if ((segs[curseg].ds_addr & (alignment - 1)) != 0)
goto dorealloc;
for (; m != NULL; m = m->pageq.tqe_next) {
curaddr = VM_PAGE_TO_PHYS(m);
bus_curaddr = _BUS_PHYS_TO_BUS(curaddr);
bus_lastaddr = _BUS_PHYS_TO_BUS(lastaddr);
if ((bus_lastaddr < low || bus_lastaddr >= high) ||
(bus_curaddr < low || bus_curaddr >= high)) {
curaddr = _BUS_VM_PAGE_TO_BUS(m);
if ((lastaddr < low || lastaddr >= high) ||
(curaddr < low || curaddr >= high)) {
/*
* If machine addresses are outside the allowed
* range we have to bail. Xen2 doesn't offer an
@ -233,16 +290,15 @@ again:
uvm_pglistfree(&mlist);
return EINVAL;
}
if (bus_curaddr == (bus_lastaddr + PAGE_SIZE)) {
if (curaddr == (lastaddr + PAGE_SIZE)) {
segs[curseg].ds_len += PAGE_SIZE;
if ((bus_lastaddr & boundary) !=
(bus_curaddr & boundary))
if ((lastaddr & boundary) !=
(curaddr & boundary))
goto dorealloc;
}
else {
} else {
curseg++;
if (curseg >= nsegs ||
(bus_curaddr & (alignment - 1)) != 0) {
(curaddr & (alignment - 1)) != 0) {
dorealloc:
if (doingrealloc == 1)
panic("_xen_bus_dmamem_alloc_range: "