- Use ctob() instead of ptoa() to obtain physical addresses from frame

numbers. Using ptoa() will cast to vaddr_t, which might no be adequate
for architectures where sizeof(paddr_t) > sizeof(vaddr_t) (like i386 PAE).

- small fix inside AGP heuristics to avoid masking high order bits for
systems with more than 4GB.

Reviewed by bouyer@.

See also http://mail-index.netbsd.org/tech-kern/2010/02/22/msg007373.html
This commit is contained in:
jym 2010-02-24 00:01:11 +00:00
parent 49f69a7e5c
commit 7bf36164a7
2 changed files with 11 additions and 11 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: agp.c,v 1.66 2010/01/08 19:56:51 dyoung Exp $ */
/* $NetBSD: agp.c,v 1.67 2010/02/24 00:01:11 jym Exp $ */
/*-
* Copyright (c) 2000 Doug Rabson
@ -65,7 +65,7 @@
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: agp.c,v 1.66 2010/01/08 19:56:51 dyoung Exp $");
__KERNEL_RCSID(0, "$NetBSD: agp.c,v 1.67 2010/02/24 00:01:11 jym Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -327,7 +327,7 @@ agpattach(device_t parent, device_t self, void *aux)
* Work out an upper bound for agp memory allocation. This
* uses a heuristic table from the Linux driver.
*/
memsize = ptoa(physmem) >> 20;
memsize = physmem >> (20 - PAGE_SHIFT); /* memsize is in MB */
for (i = 0; i < agp_max_size; i++) {
if (memsize <= agp_max[i][0])
break;

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_page.c,v 1.153 2010/01/27 03:56:33 uebayasi Exp $ */
/* $NetBSD: uvm_page.c,v 1.154 2010/02/24 00:01:12 jym Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -71,7 +71,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.153 2010/01/27 03:56:33 uebayasi Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.154 2010/02/24 00:01:12 jym Exp $");
#include "opt_ddb.h"
#include "opt_uvmhist.h"
@ -339,7 +339,7 @@ uvm_page_init_buckets(struct pgfreelist *pgfl)
void
uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp)
{
vsize_t freepages, pagecount, bucketcount, n;
psize_t freepages, pagecount, bucketcount, n;
struct pgflbucket *bucketarray, *cpuarray;
struct vm_page *pagearray;
int lcv;
@ -439,7 +439,7 @@ uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp)
vm_physmem[lcv].lastpg = vm_physmem[lcv].pgs + (n - 1);
/* init and free vm_pages (we've already zeroed them) */
paddr = ptoa(vm_physmem[lcv].start);
paddr = ctob(vm_physmem[lcv].start);
for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
vm_physmem[lcv].pgs[i].phys_addr = paddr;
#ifdef __HAVE_VM_PAGE_MD
@ -642,7 +642,7 @@ uvm_page_physget_freelist(paddr_t *paddrp, int freelist)
/* try from front */
if (vm_physmem[lcv].avail_start == vm_physmem[lcv].start &&
vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) {
*paddrp = ptoa(vm_physmem[lcv].avail_start);
*paddrp = ctob(vm_physmem[lcv].avail_start);
vm_physmem[lcv].avail_start++;
vm_physmem[lcv].start++;
/* nothing left? nuke it */
@ -661,7 +661,7 @@ uvm_page_physget_freelist(paddr_t *paddrp, int freelist)
/* try from rear */
if (vm_physmem[lcv].avail_end == vm_physmem[lcv].end &&
vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) {
*paddrp = ptoa(vm_physmem[lcv].avail_end - 1);
*paddrp = ctob(vm_physmem[lcv].avail_end - 1);
vm_physmem[lcv].avail_end--;
vm_physmem[lcv].end--;
/* nothing left? nuke it */
@ -690,7 +690,7 @@ uvm_page_physget_freelist(paddr_t *paddrp, int freelist)
if (vm_physmem[lcv].avail_start >= vm_physmem[lcv].avail_end)
continue; /* nope */
*paddrp = ptoa(vm_physmem[lcv].avail_start);
*paddrp = ctob(vm_physmem[lcv].avail_start);
vm_physmem[lcv].avail_start++;
/* truncate! */
vm_physmem[lcv].start = vm_physmem[lcv].avail_start;
@ -793,7 +793,7 @@ uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start,
}
/* zero data, init phys_addr and free_list, and free pages */
memset(pgs, 0, sizeof(struct vm_page) * npages);
for (lcv = 0, paddr = ptoa(start) ;
for (lcv = 0, paddr = ctob(start) ;
lcv < npages ; lcv++, paddr += PAGE_SIZE) {
pgs[lcv].phys_addr = paddr;
pgs[lcv].free_list = free_list;