Deal with the machine address space being non-contigous in bus_dmamem_alloc():

- Define _BUS_AVAIL_END to 0xffffffff, as we don't have an easy way to
  find the upper bound for our machine address space (and this can change
  when we swap pages with the hypervisor).
- implement _xen_bus_dmamem_alloc_range(), which will request a contigous
  set of pages to the hypervisor if the pages returned by uvm_pglistalloc()
  don't fit the constraints.
We can't deal with the low/high constraints yet, because Xen doesn't offer a
way to get pages in a specific ranges of addresses.

Based on patches from Dave Thompson (in private mail), with heavy hacking
by me.
This commit is contained in:
bouyer 2005-08-20 19:30:37 +00:00
parent 54ecd3d8a1
commit 0944c0fa9d
3 changed files with 246 additions and 2 deletions

View File

@ -1,4 +1,4 @@
# $NetBSD: files.xen,v 1.27 2005/08/19 16:06:12 bouyer Exp $
# $NetBSD: files.xen,v 1.28 2005/08/20 19:30:37 bouyer Exp $
# NetBSD: files.x86,v 1.10 2003/10/08 17:30:00 bouyer Exp
# NetBSD: files.i386,v 1.254 2004/03/25 23:32:10 jmc Exp
@ -83,6 +83,7 @@ include "dev/ata/files.ata"
file dev/md_root.c memory_disk_hooks
file arch/x86/x86/bus_dma.c pci
file arch/xen/x86/xen_bus_dma.c pci
file arch/xen/x86/bus_space.c pci
file arch/x86/x86/cacheinfo.c
file arch/xen/x86/consinit.c

View File

@ -1,4 +1,4 @@
/* $NetBSD: bus_private.h,v 1.1 2005/04/16 08:53:09 yamt Exp $ */
/* $NetBSD: bus_private.h,v 1.2 2005/08/20 19:30:37 bouyer Exp $ */
/*-
* Copyright (c)2005 YAMAMOTO Takashi,
@ -45,4 +45,16 @@ _bus_virt_to_bus(struct pmap *pm, vaddr_t va)
return ba;
}
/* we need our own bus_dmamem_alloc_range */
#define _PRIVATE_BUS_DMAMEM_ALLOC_RANGE _xen_bus_dmamem_alloc_range
int _xen_bus_dmamem_alloc_range(bus_dma_tag_t, bus_size_t, bus_size_t,
bus_size_t, bus_dma_segment_t *, int, int *, int,
bus_addr_t, bus_addr_t);
/*
* The higher machine address of our allocated range isn't know and can change
* over time. Just assume it's the largest possible value.
*/
#define _BUS_AVAIL_END ((bus_addr_t)0xffffffff)
#include <x86/bus_private.h>

View File

@ -0,0 +1,231 @@
/* $NetBSD: xen_bus_dma.c,v 1.1 2005/08/20 19:30:38 bouyer Exp $ */
/* NetBSD bus_dma.c,v 1.21 2005/04/16 07:53:35 yamt Exp */
/*-
* Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
* Simulation Facility, NASA Ames Research Center.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the NetBSD
* Foundation, Inc. and its contributors.
* 4. Neither the name of The NetBSD Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: xen_bus_dma.c,v 1.1 2005/08/20 19:30:38 bouyer Exp $");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/proc.h>
#include <machine/bus.h>
#include <machine/bus_private.h>
#include <uvm/uvm_extern.h>
extern paddr_t avail_end;
/* Pure 2^n version of get_order */
static __inline__ int get_order(unsigned long size)
{
int order = -1;
size = (size - 1) >> (PAGE_SHIFT - 1);
do {
size >>= 1;
order++;
} while (size);
return order;
}
static int
_xen_alloc_contig(bus_size_t size, bus_size_t alignment, bus_size_t boundary,
struct pglist *mlistp, int flags)
{
int order, i;
unsigned long npagesreq, npages, mfn;
bus_addr_t pa;
struct vm_page *pg, *pgnext;
struct pglist freelist;
int s, error;
TAILQ_INIT(&freelist);
/*
* When requesting a contigous memory region, the hypervisor will
* return a memory range aligned on size. This will automagically
* handle "boundary", but the only way to enforce alignment
* is to request a memory region of size max(alignment, size).
*/
order = max(get_order(size), get_order(alignment));
npages = (1 << order);
npagesreq = (size >> PAGE_SHIFT);
KASSERT(npages >= npagesreq);
/* get npages from UWM, and give them back to the hypervisor */
error = uvm_pglistalloc(npages << PAGE_SHIFT, 0, avail_end, 0, 0,
mlistp, npages, (flags & BUS_DMA_NOWAIT) == 0);
if (error)
return (error);
for (pg = mlistp->tqh_first; pg != NULL; pg = pg->pageq.tqe_next) {
pa = VM_PAGE_TO_PHYS(pg);
mfn = xpmap_ptom(pa) >> PAGE_SHIFT;
xpmap_phys_to_machine_mapping[
(pa - XPMAP_OFFSET) >> PAGE_SHIFT] = INVALID_P2M_ENTRY;
if (HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation,
&mfn, 1, 0) != 1) {
printf("xen_alloc_contig: MEMOP_decrease_reservation "
"failed!\n");
return ENOMEM;
}
}
/* Get the new contiguous memory extent */
if (HYPERVISOR_dom_mem_op(MEMOP_increase_reservation,
&mfn, 1, order) != 1) {
printf("xen_alloc_contig: MEMOP_increase_reservation "
"failed!\n");
return ENOMEM;
}
s = splvm();
/* Map the new extent in place of the old pages */
for (pg = mlistp->tqh_first, i = 0; pg != NULL; pg = pgnext, i++) {
pgnext = pg->pageq.tqe_next;
pa = VM_PAGE_TO_PHYS(pg);
xpmap_phys_to_machine_mapping[
(pa - XPMAP_OFFSET) >> PAGE_SHIFT] = mfn+i;
xpq_queue_machphys_update((mfn+i) << PAGE_SHIFT, pa);
/* while here, give extra pages back to UVM */
if (i >= npagesreq) {
TAILQ_REMOVE(mlistp, pg, pageq);
uvm_pagefree(pg);
}
}
/* Flush updates through and flush the TLB */
xpq_queue_tlb_flush();
xpq_flush_queue();
splx(s);
return 0;
}
/*
* Allocate physical memory from the given physical address range.
* Called by DMA-safe memory allocation methods.
* We need our own version to deal with physical vs machine addresses.
*/
int
_xen_bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size,
bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs,
int nsegs, int *rsegs, int flags, bus_addr_t low, bus_addr_t high)
{
paddr_t curaddr, lastaddr;
bus_addr_t bus_curaddr, bus_lastaddr;
struct vm_page *m;
struct pglist mlist;
int curseg, error;
int doingrealloc = 0;
/* Always round the size. */
size = round_page(size);
/*
* Allocate pages from the VM system.
*/
error = uvm_pglistalloc(size, 0, avail_end, alignment, boundary,
&mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
if (error)
return (error);
again:
/*
* Compute the location, size, and number of segments actually
* returned by the VM code.
*/
m = mlist.tqh_first;
curseg = 0;
lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
segs[curseg].ds_len = PAGE_SIZE;
m = m->pageq.tqe_next;
for (; m != NULL; m = m->pageq.tqe_next) {
curaddr = VM_PAGE_TO_PHYS(m);
bus_curaddr = _BUS_PHYS_TO_BUS(curaddr);
bus_lastaddr = _BUS_PHYS_TO_BUS(lastaddr);
if ((bus_lastaddr < low || bus_lastaddr >= high) ||
(bus_curaddr < low || bus_curaddr >= high)) {
/*
* If machine addresses are outside the allowed
* range we have to bail. Xen2 doesn't offer an
* interface to get memory in a specific address
* range.
*/
printf("_xen_bus_dmamem_alloc_range: no way to "
"enforce address range\n");
return EINVAL;
}
if (bus_curaddr == (bus_lastaddr + PAGE_SIZE))
segs[curseg].ds_len += PAGE_SIZE;
else {
curseg++;
if (curseg >= nsegs) {
if (doingrealloc == 1)
panic("_xen_bus_dmamem_alloc_range: "
"xen_alloc_contig returned "
"too much segments");
doingrealloc = 1;
/*
* Too much segments. Free this memory and
* get a contigous segment from the hypervisor.
*/
uvm_pglistfree(&mlist);
for (curseg = 0; curseg < nsegs; curseg++) {
segs[curseg].ds_addr = 0;
segs[curseg].ds_len = 0;
}
error = _xen_alloc_contig(size, alignment,
boundary, &mlist, flags);
if (error)
return error;
goto again;
}
segs[curseg].ds_addr = curaddr;
segs[curseg].ds_len = PAGE_SIZE;
}
lastaddr = curaddr;
}
*rsegs = curseg + 1;
return (0);
}