new pmap designed for SH3/SH4.

This commit is contained in:
uch 2002-05-09 12:28:08 +00:00
parent bf93dc9b4c
commit 5cf2727a95
11 changed files with 1345 additions and 3863 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: param.h,v 1.10 2002/04/28 17:10:35 uch Exp $ */
/* $NetBSD: param.h,v 1.11 2002/05/09 12:28:08 uch Exp $ */
/*-
* Copyright (c) 2002 The NetBSD Foundation, Inc. All rights reserved.
@ -43,10 +43,18 @@
* SuperH dependent constants.
*/
#ifndef _SH3_PARAM_H_
#define _SH3_PARAM_H_
#if defined(_KERNEL) && !defined(_LOCORE)
#include <machine/cpu.h>
#include <sh3/cpu.h>
#endif
/* NetBSD/sh3 is 4KB page */
#define PGSHIFT 12
#define NBPG (1 << PGSHIFT)
#define PGOFSET (NBPG - 1)
/*
* Round p (pointer or byte index) up to a correctly-aligned value
* for all data types (int, long, ...). The result is u_int and
@ -62,11 +70,6 @@
#define ALIGN(p) (((u_int)(p) + ALIGNBYTES) & ~ALIGNBYTES)
#define ALIGNED_POINTER(p, t) ((((u_long)(p)) & (sizeof(t) - 1)) == 0)
#define PGSHIFT 12 /* LOG2(NBPG) */
#define NBPG (1 << PGSHIFT) /* bytes/page */
#define PGOFSET (NBPG - 1) /* byte offset into page */
#define NPTEPG (NBPG / (sizeof(pt_entry_t)))
#define DEV_BSHIFT 9 /* log2(DEV_BSIZE) */
#define DEV_BSIZE (1 << DEV_BSHIFT)
#define BLKDEV_IOSIZE 2048
@ -135,23 +138,4 @@
/* bytes to disk blocks */
#define dbtob(x) ((x) << DEV_BSHIFT)
#define btodb(x) ((x) >> DEV_BSHIFT)
/*
* Map a ``block device block'' to a file system block.
* This should be device dependent, and should use the bsize
* field from the disk label.
* For now though just use DEV_BSIZE.
*/
#define bdbtofsb(bn) ((bn) / (BLKDEV_IOSIZE / DEV_BSIZE))
/*
* Mach derived conversion macros
*/
#define sh3_round_pdr(x) ((((unsigned)(x)) + PDOFSET) & ~PDOFSET)
#define sh3_trunc_pdr(x) ((unsigned)(x) & ~PDOFSET)
#define sh3_btod(x) ((unsigned)(x) >> PDSHIFT)
#define sh3_dtob(x) ((unsigned)(x) << PDSHIFT)
#define sh3_round_page(x) ((((unsigned)(x)) + PGOFSET) & ~PGOFSET)
#define sh3_trunc_page(x) ((unsigned)(x) & ~PGOFSET)
#define sh3_btop(x) ((unsigned)(x) >> PGSHIFT)
#define sh3_ptob(x) ((unsigned)(x) << PGSHIFT)
#endif /* !_SH3_PARAM_H_ */

View File

@ -1,13 +1,11 @@
/* $NetBSD: pcb.h,v 1.6 2002/04/28 17:10:36 uch Exp $ */
/* $NetBSD: pcb.h,v 1.7 2002/05/09 12:28:08 uch Exp $ */
/*-
* Copyright (c) 2002 The NetBSD Foundation, Inc. All rights reserved.
* Copyright (c) 1995 Charles M. Hannum. All rights reserved.
* Copyright (c) 1990 The Regents of the University of California.
* Copyright (c) 2002 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* William Jolitz.
* This code is derived from software contributed to The NetBSD Foundation
* by UCHIYAMA Yasushi.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -19,30 +17,23 @@
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* This product includes software developed by the NetBSD
* Foundation, Inc. and its contributors.
* 4. Neither the name of The NetBSD Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)pcb.h 5.10 (Berkeley) 5/12/91
*/
/*
* SH3 process control block
* T.Horiuchi Brains Corp. 05/27/1998
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _SH3_PCB_H_
@ -51,26 +42,10 @@
#include <sh3/frame.h>
struct pcb {
struct switchframe pcb_sf;
vaddr_t pcb_sp; /* kernel stack top */
vaddr_t pcb_fp; /* frame top */
int pageDirReg; /* Page Directory of this process */
int pcb_flags;
caddr_t pcb_onfault; /* copyin/out fault recovery */
int fusubail;
struct pmap *pcb_pmap; /* back pointer to our pmap */
struct switchframe pcb_sf; /* kernel context for resume */
caddr_t pcb_onfault; /* for copyin/out fault */
int pcb_faultbail; /* bail out before call uvm_fault. */
};
/*
* The pcb is augmented with machine-dependent additional data for
* core dumps. For the SH3, there is nothing to add.
*/
struct md_coredump {
long md_pad[8];
};
#ifdef _KERNEL
extern struct pcb *curpcb; /* our current running pcb */
#endif
extern struct pcb *curpcb;
#endif /* !_SH3_PCB_H_ */

View File

@ -1,9 +1,12 @@
/* $NetBSD: pmap.h,v 1.22 2002/04/28 17:10:36 uch Exp $ */
/* $NetBSD: pmap.h,v 1.23 2002/05/09 12:28:08 uch Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
/*-
* Copyright (c) 2002 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by UCHIYAMA Yasushi.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@ -13,389 +16,65 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgment:
* This product includes software developed by Charles D. Cranor and
* Washington University.
* 4. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
* must display the following acknowledgement:
* This product includes software developed by the NetBSD
* Foundation, Inc. and its contributors.
* 4. Neither the name of The NetBSD Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* pmap.h: see pmap.c for the history of this pmap module.
* NetBSD/sh3 pmap:
* pmap.pm_ptp[512] ... 512 slot of page table page
* page table page contains 1024 PTEs. (PAGE_SIZE / sizeof(pt_entry_t))
* | PTP 11bit | PTOFSET 10bit | PGOFSET 12bit |
*/
#ifndef _SH3_PMAP_H_
#define _SH3_PMAP_H_
#include <sh3/cache.h>
#include <sh3/psl.h>
#include <sys/queue.h>
#include <sh3/pte.h>
#include <uvm/uvm_object.h>
/*
* see pte.h for a description of i386 MMU terminology and hardware
* interface.
*
* a pmap describes a processes' 4GB virtual address space. this
* virtual address space can be broken up into 1024 4MB regions which
* are described by PDEs in the PDP. the PDEs are defined as follows:
*
* (ranges are inclusive -> exclusive, just like vm_map_entry start/end)
* (the following assumes that KERNBASE is 0xf0000000)
*
* PDE#s VA range usage
* 0->959 0x0 -> 0xefc00000 user address space, note that the
* max user address is 0xefbfe000
* the final two pages in the last 4MB
* used to be reserved for the UAREA
* but now are no longer used
* 959 0xefc00000-> recursive mapping of PDP (used for
* 0xf0000000 linear mapping of PTPs)
* 960->1023 0xf0000000-> kernel address space (constant
* 0xffc00000 across all pmap's/processes)
* 1023 0xffc00000-> "alternate" recursive PDP mapping
* <end> (for other pmaps)
*
*
* note: a recursive PDP mapping provides a way to map all the PTEs for
* a 4GB address space into a linear chunk of virtual memory. in other
* words, the PTE for page 0 is the first int mapped into the 4MB recursive
* area. the PTE for page 1 is the second int. the very last int in the
* 4MB range is the PTE that maps VA 0xffffe000 (the last page in a 4GB
* address).
*
* all pmap's PD's must have the same values in slots 960->1023 so that
* the kernel is always mapped in every process. these values are loaded
* into the PD at pmap creation time.
*
* at any one time only one pmap can be active on a processor. this is
* the pmap whose PDP is pointed to by processor register %cr3. this pmap
* will have all its PTEs mapped into memory at the recursive mapping
* point (slot #959 as show above). when the pmap code wants to find the
* PTE for a virtual address, all it has to do is the following:
*
* address of PTE = (959 * 4MB) + (VA / NBPG) * sizeof(pt_entry_t)
* = 0xefc00000 + (VA / 4096) * 4
*
* what happens if the pmap layer is asked to perform an operation
* on a pmap that is not the one which is currently active? in that
* case we take the PA of the PDP of non-active pmap and put it in
* slot 1023 of the active pmap. this causes the non-active pmap's
* PTEs to get mapped in the final 4MB of the 4GB address space
* (e.g. starting at 0xffc00000).
*
* the following figure shows the effects of the recursive PDP mapping:
*
* PDP (%cr3)
* +----+
* | 0| -> PTP#0 that maps VA 0x0 -> 0x400000
* | |
* | |
* | 959| -> points back to PDP (%cr3) mapping VA 0xefc00000 -> 0xf0000000
* | 960| -> first kernel PTP (maps 0xf0000000 -> 0xf0400000)
* | |
* |1023| -> points to alternate pmap's PDP (maps 0xffc00000 -> end)
* +----+
*
* note that the PDE#959 VA (0xefc00000) is defined as "PTE_BASE"
* note that the PDE#1023 VA (0xffc00000) is defined as "APTE_BASE"
*
* starting at VA 0xefc00000 the current active PDP (%cr3) acts as a
* PTP:
*
* PTP#959 == PDP(%cr3) => maps VA 0xefc00000 -> 0xf0000000
* +----+
* | 0| -> maps the contents of PTP#0 at VA 0xefc00000->0xefc01000
* | |
* | |
* | 959| -> maps contents of PTP#959 (the PDP) at VA 0xeffbf000
* | 960| -> maps contents of first kernel PTP
* | |
* |1023|
* +----+
*
* note that mapping of the PDP at PTP#959's VA (0xeffbf000) is
* defined as "PDP_BASE".... within that mapping there are two
* defines:
* "PDP_PDE" (0xeffbfefc) is the VA of the PDE in the PDP
* which points back to itself.
* "APDP_PDE" (0xeffbfffc) is the VA of the PDE in the PDP which
* establishes the recursive mapping of the alternate pmap.
* to set the alternate PDP, one just has to put the correct
* PA info in *APDP_PDE.
*
* note that in the APTE_BASE space, the APDP appears at VA
* "APDP_BASE" (0xfffff000).
*/
#define PMAP_NEED_PROCWR
#define PMAP_STEAL_MEMORY
#define PMAP_GROWKERNEL
/*
* the following defines identify the slots used as described above.
*/
#define __PMAP_PTP_N 512 /* # of page table page maps 2GB. */
typedef struct pmap {
pt_entry_t **pm_ptp;
int pm_asid;
int pm_refcnt;
struct pmap_statistics pm_stats; /* pmap statistics */
} *pmap_t;
extern struct pmap __pmap_kernel;
#define PDSLOT_PTE ((u_int)0x33f) /* PTDPTDI for recursive PDP map */
#define PDSLOT_KERN ((u_int)0x340) /* KPTDI start of kernel space */
#define PDSLOT_APTE ((u_int)0x37f) /* alternative recursive slot */
/*
* the following defines give the virtual addresses of various MMU
* data structures:
* PTE_BASE and APTE_BASE: the base VA of the linear PTE mappings
* PTD_BASE and APTD_BASE: the base VA of the recursive mapping of the PTD
* PDP_PDE and APDP_PDE: the VA of the PDE that points back to the PDP/APDP
*/
#define PTE_BASE ((pt_entry_t *) (PDSLOT_PTE * NBPD) )
#define APTE_BASE ((pt_entry_t *) (PDSLOT_APTE * NBPD) )
#define PDP_BASE ((pd_entry_t *)(((char *)PTE_BASE) + (PDSLOT_PTE * NBPG)))
#define APDP_BASE ((pd_entry_t *)(((char *)APTE_BASE) + (PDSLOT_APTE * NBPG)))
#define PDP_PDE (PDP_BASE + PDSLOT_PTE)
#define APDP_PDE (PDP_BASE + PDSLOT_APTE)
/*
* XXXCDC: tmp xlate from old names:
* PTDPTDI -> PDSLOT_PTE
* KPTDI -> PDSLOT_KERN
* APTDPTDI -> PDSLOT_APTE
*/
/*
* the follow define determines how many PTPs should be set up for the
* kernel by locore.s at boot time. this should be large enough to
* get the VM system running. once the VM system is running, the
* pmap module can add more PTPs to the kernel area on demand.
*/
#ifndef NKPTP
#define NKPTP 8 /* 32MB to start */
#endif
#define NKPTP_MIN 8 /* smallest value we allow */
#define NKPTP_MAX 63 /* (1024 - (0xd0000000/NBPD) - 1) */
/* largest value (-1 for APTP space) */
/*
* various address macros
*
* vtopte: return a pointer to the PTE mapping a VA
* kvtopte: same as above (takes a KVA, but doesn't matter with this pmap)
* ptetov: given a pointer to a PTE, return the VA that it maps
* vtophys: translate a VA to the PA mapped to it
*
* plus alternative versions of the above
*/
#define vtopte(VA) (PTE_BASE + sh3_btop(VA))
#define kvtopte(VA) vtopte(VA)
#define ptetov(PT) (sh3_ptob(PT - PTE_BASE))
#define avtopte(VA) (APTE_BASE + sh3_btop(VA))
#define ptetoav(PT) (sh3_ptob(PT - APTE_BASE))
#define avtophys(VA) ((*avtopte(VA) & PG_FRAME) | \
((unsigned)(VA) & ~PG_FRAME))
/*
* pdei/ptei: generate index into PDP/PTP from a VA
*/
#define pdei(VA) (((VA) & PD_MASK) >> PDSHIFT)
#define ptei(VA) (((VA) & PT_MASK) >> PGSHIFT)
/*
* PTP macros:
* a PTP's index is the PD index of the PDE that points to it
* a PTP's offset is the byte-offset in the PTE space that this PTP is at
* a PTP's VA is the first VA mapped by that PTP
*
* note that NBPG == number of bytes in a PTP (4096 bytes == 1024 entries)
* NBPD == number of bytes a PTP can map (4MB)
*/
#define ptp_i2o(I) ((I) * NBPG) /* index => offset */
#define ptp_o2i(O) ((O) / NBPG) /* offset => index */
#define ptp_i2v(I) ((I) * NBPD) /* index => VA */
#define ptp_v2i(V) ((V) / NBPD) /* VA => index (same as pdei) */
#ifdef _KERNEL
/*
* pmap data structures: see pmap.c for details of locking.
*/
struct pmap;
typedef struct pmap *pmap_t;
/*
* we maintain a list of all non-kernel pmaps
*/
LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */
/*
* the pmap structure
*
* note that the pm_obj contains the simple_lock, the reference count,
* page list, and number of PTPs within the pmap.
*/
struct pmap {
struct uvm_object pm_obj; /* object (lck by object lock) */
#define pm_lock pm_obj.vmobjlock
LIST_ENTRY(pmap) pm_list; /* list (lck by pm_list lock) */
pd_entry_t *pm_pdir; /* VA of PD (lck by object lock) */
u_int32_t pm_pdirpa; /* PA of PD (read-only after create) */
struct vm_page *pm_ptphint; /* pointer to a PTP in our pmap */
struct pmap_statistics pm_stats; /* pmap stats (lck by object lock) */
int pm_flags; /* see below */
};
/* pm_flags */
#define PMF_USER_LDT 0x01 /* pmap has user-set LDT */
/*
* for each managed physical page we maintain a list of <PMAP,VA>'s
* which it is mapped at. the list is headed by a pv_head structure.
* there is one pv_head per managed phys page (allocated at boot time).
* the pv_head structure points to a list of pv_entry structures (each
* describes one mapping).
*/
struct pv_entry;
struct pv_head {
struct simplelock pvh_lock; /* locks every pv on this list */
struct pv_entry *pvh_list; /* head of list (locked by pvh_lock) */
};
/* These are kept in the vm_physseg array. */
#define PGA_REFERENCED 0x01 /* page is referenced */
#define PGA_MODIFIED 0x02 /* page is modified */
struct pv_entry { /* locked by its list's pvh_lock */
struct pv_entry *pv_next; /* next entry */
struct pmap *pv_pmap; /* the pmap */
vaddr_t pv_va; /* the virtual address */
struct vm_page *pv_ptp; /* the vm_page of the PTP */
};
/*
* pv_entrys are dynamically allocated in chunks from a single page.
* we keep track of how many pv_entrys are in use for each page and
* we can free pv_entry pages if needed. there is one lock for the
* entire allocation system.
*/
struct pv_page_info {
TAILQ_ENTRY(pv_page) pvpi_list;
struct pv_entry *pvpi_pvfree;
int pvpi_nfree;
};
/*
* number of pv_entry's in a pv_page
* (note: won't work on systems where NPBG isn't a constant)
*/
#define PVE_PER_PVPAGE ((NBPG - sizeof(struct pv_page_info)) / \
sizeof(struct pv_entry))
/*
* a pv_page: where pv_entrys are allocated from
*/
struct pv_page {
struct pv_page_info pvinfo;
struct pv_entry pvents[PVE_PER_PVPAGE];
};
/*
* pmap_remove_record: a record of VAs that have been unmapped, used to
* flush TLB. if we have more than PMAP_RR_MAX then we stop recording.
*/
#define PMAP_RR_MAX 16 /* max of 16 pages (64K) */
struct pmap_remove_record {
int prr_npages;
vaddr_t prr_vas[PMAP_RR_MAX];
};
/*
* global kernel variables
*/
/* PTDpaddr: is the physical address of the kernel's PDP */
extern u_long PTDpaddr;
extern struct pmap kernel_pmap_store; /* kernel pmap */
extern int nkpde; /* current # of PDEs for kernel */
extern int pmap_pg_g; /* do we support PG_G? */
/*
* macros
*/
#define pmap_kernel() (&kernel_pmap_store)
#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
void pmap_bootstrap(void);
void pmap_procwr(struct proc *, vaddr_t, size_t);
#define pmap_kernel() (&__pmap_kernel)
#define pmap_update(pmap) ((void)0)
#define pmap_copy(dp,sp,d,l,s) ((void)0)
#define pmap_collect(pmap) ((void)0)
#define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
#define pmap_update(pmap) /* nothing (yet) */
#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
#define pmap_is_referenced(pg) pmap_test_attrs(pg, PGA_REFERENCED)
#define pmap_is_modified(pg) pmap_test_attrs(pg, PGA_MODIFIED)
#define PMAP_MAP_POOLPAGE(pa) SH3_PHYS_TO_P1SEG((pa))
#define PMAP_UNMAP_POOLPAGE(va) SH3_P1SEG_TO_PHYS((va))
#define pmap_copy(DP,SP,D,L,S)
#define pmap_move(DP,SP,D,L,S)
#define pmap_phys_address(ppn) sh3_ptob(ppn)
#define pmap_valid_entry(E) ((E) & PG_V) /* is PDE or PTE valid? */
/*
* prototypes
*/
void pmap_activate(struct proc *);
void pmap_bootstrap(vaddr_t);
boolean_t pmap_change_attrs(struct vm_page *, int, int);
void pmap_deactivate(struct proc *);
void pmap_page_remove (struct vm_page *);
void pmap_protect(struct pmap *, vaddr_t,
vaddr_t, vm_prot_t);
void pmap_remove(struct pmap *, vaddr_t, vaddr_t);
boolean_t pmap_test_attrs(struct vm_page *, int);
void pmap_update_pg(vaddr_t);
void pmap_update_2pg(vaddr_t,vaddr_t);
void pmap_write_protect(struct pmap *, vaddr_t,
vaddr_t, vm_prot_t);
vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */
#define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */
/*
* Alternate mapping hooks for pool pages. Avoids thrashing the TLB.
*/
/*
* XXX Indeed, first, we should refine physical address v.s. virtual
* address mapping.
* See
* uvm_km.c:uvm_km_free_poolpage1,
* vm_page.h:PHYS_TO_VM_PAGE, vm_physseg_find
* machdep.c:pmap_bootstrap (uvm_page_physload, etc)
*/
/* XXX broken */
#define PMAP_MAP_POOLPAGE(pa) (pa)
#define PMAP_UNMAP_POOLPAGE(va) (va)
vaddr_t pmap_map(vaddr_t, paddr_t, paddr_t, vm_prot_t);
paddr_t vtophys(vaddr_t);
void pmap_emulate_reference(struct proc *, vaddr_t, int, int);
#endif /* _KERNEL */
#endif /* _SH3_PMAP_H_ */
/* MD pmap utils. */
pt_entry_t *__pmap_pte_lookup(pmap_t, vaddr_t);
pt_entry_t *__pmap_kpte_lookup(vaddr_t);
boolean_t __pmap_pte_load(pmap_t, vaddr_t, int);
#endif /* !_SH3_PMAP_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: proc.h,v 1.3 2002/03/17 14:02:04 uch Exp $ */
/* $NetBSD: proc.h,v 1.4 2002/05/09 12:28:08 uch Exp $ */
/*
* Copyright (c) 2002 The NetBSD Foundation, Inc. All rights reserved.
@ -50,8 +50,8 @@ struct md_upte {
struct mdproc {
struct trapframe *md_regs; /* user context */
struct pcb *md_pcb; /* pcb access address */
int md_flags; /* machine-dependent flags */
struct user *md_p3; /* P3 address of p_addr */
/* u-area PTE: *2 .. SH4 data/address data array access */
struct md_upte md_upte[UPAGES * 2];
__volatile int md_astpending; /* AST pending on return to userland */
@ -62,7 +62,7 @@ struct mdproc {
#ifdef _KERNEL
#ifndef _LOCORE
vsize_t sh_proc0_init(vaddr_t, paddr_t, paddr_t);
void sh_proc0_init(void);
extern struct md_upte *curupte; /* SH3 wired u-area hack */
#endif /* _LOCORE */
#endif /* _KERNEL */

View File

@ -1,11 +1,11 @@
/* $NetBSD: pte.h,v 1.7 2002/04/28 17:10:36 uch Exp $ */
/* $NetBSD: pte.h,v 1.8 2002/05/09 12:28:08 uch Exp $ */
/*-
* Copyright (c) 1990 The Regents of the University of California.
* Copyright (c) 2002 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* William Jolitz.
* This code is derived from software contributed to The NetBSD Foundation
* by UCHIYAMA Yasushi.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -17,102 +17,71 @@
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* This product includes software developed by the NetBSD
* Foundation, Inc. and its contributors.
* 4. Neither the name of The NetBSD Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)pte.h 5.5 (Berkeley) 5/9/91
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* SH3
*
* Page Table Entry
*
* T.Horiuchi Brains Corp. 05/26/1998
*/
#ifndef _SH3_PTE_H_
#define _SH3_PTE_H_
#define PDSHIFT 22 /* LOG2(NBPDR) */
#define NBPD (1 << PDSHIFT) /* bytes/page dir */
#define PDOFSET (NBPD-1) /* byte offset into page dir */
#define NPTEPD (NBPD / NBPG)
#ifndef _LOCORE
typedef int pd_entry_t; /* page directory entry */
typedef int pt_entry_t; /* Mach page table entry */
#endif
#define PD_MASK 0xffc00000 /* page directory address bits */
#define PT_MASK 0x003ff000 /* page table address bits */
#define PTES_PER_PTP (NBPD / NBPG) /* # of PTEs in a PTP */
/*
*
* NetBSD/sh3 PTE format.
*
* [Hardware bit]
* SH3
* PPN V PR SZ C D SH
* [28:10][8][6:5][4][3][2][1]
* PPN V PR SZ C D SH
* [28:10][8][6:5][4][3][2][1]
*
* SH4
* V SZ PR SZ C D SH WT
* [28:10][8][7][6:5][4][3][2][1][0]
* V SZ PR SZ C D SH WT
* [28:10][8][7][6:5][4][3][2][1][0]
*
* + NetBSD/sh3 page size is 4KB. [11:10] and [7] can be used as SW bit.
* + [31:29] should be available for SW bit...
* + SH4 WT bit is not stored in PTE. U0 is always write-back. and
* P3 is always write-thurogh. (see sh3/trap.c::__setup_pte_sh4())
* We use WT bit as SW bit.
*
* Software bit assign
* [Software bit]
* [31] - PMAP_WIRED bit (not hardware wired entry)
* [11:9] - SH4 PCMCIA Assistant bit. (space attribute bit only)
* [7] - Wired page bit.
* [0] - PVlist bit.
*/
/*
* Hardware bits
*/
#define PG_FRAME 0xfffff000 /* page frame mask XXX */
#define PG_V 0x00000100 /* present */
#define PG_UW 0x00000060 /* kernel/user read/write */
#define PG_URKR 0x00000040 /* kernel/user read only */
#define PG_KW 0x00000020 /* kernel read/write */
#define PG_KR 0x00000000 /* kernel read only */
#define PG_PPN 0x1ffff000 /* Phisical page number mask */
#define PG_V 0x00000100 /* Valid */
#define PG_PR_MASK 0x00000060 /* Page protection mask */
#define PG_PR_URW 0x00000060 /* kernel/user read/write */
#define PG_PR_URO 0x00000040 /* kernel/user read only */
#define PG_PR_KRW 0x00000020 /* kernel read/write */
#define PG_PR_KRO 0x00000000 /* kernel read only */
#define PG_4K 0x00000010 /* page size 4KB */
#define PG_N 0x00000008 /* 0=non-cacheable */
#define PG_M 0x00000004 /* has been modified */
#define PG_G 0x00000002 /* share status */
#define PG_WT 0x00000001 /* write through (SH4) */
#define PG_C 0x00000008 /* Cacheable */
#define PG_D 0x00000004 /* Dirty */
#define PG_SH 0x00000002 /* Share status */
#define PG_WT 0x00000001 /* Write-through (SH4 only) */
#define PG_HW_BITS 0x1ffff17e /* [28:12][8][6:1] */
/*
* Software bits
*/
/* XXX referece bit is not emulated. */
#define PG_U 0 /* referenced bit */
#define PG_W 0x00000080 /* page is wired */
#define PG_PVLIST 0x00000001 /* mapping has entry on pvlist */
#define _PG_WIRED 0x80000000
/* SH4 PCMCIA MMU support bits */
/* PTEA SA (Space Attribute bit) */
#define _PG_PCMCIA 0x00000e00
#define _PG_PCMCIA 0x00000e00 /* [11:9] */
#define _PG_PCMCIA_SHIFT 9
#define _PG_PCMCIA_NONE 0x00000000 /* Non PCMCIA space */
#define _PG_PCMCIA_IO 0x00000200 /* IOIS16 signal */
@ -123,4 +92,7 @@ typedef int pt_entry_t; /* Mach page table entry */
#define _PG_PCMCIA_ATTR8 0x00000c00 /* 8 bit attribute */
#define _PG_PCMCIA_ATTR16 0x00000e00 /* 16 bit attribute */
#ifndef _LOCORE
typedef u_int32_t pt_entry_t;
#endif /* _LOCORE */
#endif /* !_SH3_PTE_H_ */

View File

@ -1,11 +1,11 @@
/* $NetBSD: vmparam.h,v 1.12 2002/04/28 17:10:37 uch Exp $ */
/* $NetBSD: vmparam.h,v 1.13 2002/05/09 12:28:08 uch Exp $ */
/*-
* Copyright (c) 1990 The Regents of the University of California.
* Copyright (c) 2002 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* William Jolitz.
* This code is derived from software contributed to The NetBSD Foundation
* by UCHIYAMA Yasushi.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -17,106 +17,98 @@
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* This product includes software developed by the NetBSD
* Foundation, Inc. and its contributors.
* 4. Neither the name of The NetBSD Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)vmparam.h 5.9 (Berkeley) 5/12/91
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _SH3_VMPARAM_H_
#define _SH3_VMPARAM_H_
#include <sys/queue.h>
/*
* Machine dependent constants for NetBSD/sh3.
*/
/* Virtual address map. */
#define VM_MIN_ADDRESS ((vaddr_t)0)
#define VM_MAXUSER_ADDRESS ((vaddr_t)0x7ffff000)
#define VM_MAX_ADDRESS ((vaddr_t)0x7ffff000)
#define VM_MIN_KERNEL_ADDRESS ((vaddr_t)0xc0000000)
#define VM_MAX_KERNEL_ADDRESS ((vaddr_t)0xe0000000)
/*
* Virtual address space arrangement.
* USRTEXT is the start of the user text/data space, while USRSTACK
* is the top (end) of the user stack.
*/
#define USRTEXT NBPG
#define USRSTACK VM_MAXUSER_ADDRESS
/* User program text start address and top of stack */
#define USRTEXT 0x00001000
#define USRSTACK VM_MAXUSER_ADDRESS
/*
* Virtual memory related constants, all in bytes
*/
#define MAXTSIZ (64*1024*1024) /* max text size */
#ifndef DFLDSIZ
#define DFLDSIZ (128*1024*1024) /* initial data size limit */
#endif
/* Virtual memory resoruce limit. */
#define MAXTSIZ (64 * 1024 * 1024) /* max text size */
#ifndef MAXDSIZ
#define MAXDSIZ (1*1024*1024*1024) /* max data size */
#endif
#ifndef DFLSSIZ
#define DFLSSIZ (2*1024*1024) /* initial stack size limit */
#define MAXDSIZ (512 * 1024 * 1024) /* max data size */
#endif
#ifndef MAXSSIZ
#define MAXSSIZ (32*1024*1024) /* max stack size */
#define MAXSSIZ (32 * 1024 * 1024) /* max stack size */
#endif
/* initial data size limit */
#ifndef DFLDSIZ
#define DFLDSIZ (128 * 1024 * 1024)
#endif
/* initial stack size limit */
#ifndef DFLSSIZ
#define DFLSSIZ (2 * 1024 * 1024)
#endif
/*
* Size of shared memory map
*/
#ifndef SHMMAXPGS
#define SHMMAXPGS 1024
#define SHMMAXPGS 1024
#endif
/*
* Size of User Raw I/O map
*/
#define USRIOSIZE 300
/*
* Mach derived constants
*/
/* user/kernel map constants */
#define VM_MIN_ADDRESS ((vaddr_t)0)
/* PTDPTDI<<PDSHIFT - UPAGES*NBPG */
#define VM_MAXUSER_ADDRESS ((vaddr_t)0x7fffe000)
/* PTDPTDI<<PDSHIFT + PTDPTDI<<PGSHIFT */
#define VM_MAX_ADDRESS ((vaddr_t)0xcffbf000)
/* KPTDI<<PDSHIFT */
#define VM_MIN_KERNEL_ADDRESS ((vaddr_t)0xd0000000)
/* APTDPTDI<<PDSHIFT */
#define VM_MAX_KERNEL_ADDRESS ((vaddr_t)0xdfc00000)
/* XXX max. amount of KVM to be used by buffers. */
#ifndef VM_MAX_KERNEL_BUF
#define VM_MAX_KERNEL_BUF \
((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) * 7 / 10)
/* Size of user raw I/O map */
#ifndef USRIOSIZE
#define USRIOSIZE (MAXBSIZE / NBPG * 8)
#endif
/* virtual sizes (bytes) for various kernel submaps */
#define VM_PHYS_SIZE (USRIOSIZE*NBPG)
#define VM_PHYS_SIZE (USRIOSIZE * NBPG)
/* Physical memory segments */
#define VM_PHYSSEG_STRAT VM_PSTRAT_BSEARCH
#define VM_PHYSSEG_NOADD /* no more after vm_mem_init */
#define VM_PHYSSEG_NOADD
#define __HAVE_PMAP_PHYSSEG
#define sh3_round_page(x) ((((u_int32_t)(x)) + PGOFSET) & ~PGOFSET)
#define sh3_trunc_page(x) ((u_int32_t)(x) & ~PGOFSET)
#define sh3_btop(x) ((u_int32_t)(x) >> PGSHIFT)
#define sh3_ptob(x) ((u_int32_t)(x) << PGSHIFT)
/*
* pmap specific data stored in the vm_physmem[] array
*/
struct pmap_physseg {
struct pv_head *pvhead; /* pv_head array */
char *attrs; /* attrs array */
/* pmap-specific data store in the vm_page structure. */
#define __HAVE_VM_PAGE_MD
#define PVH_REFERENCED 1
#define PVH_MODIFIED 2
#ifndef _LOCORE
struct pv_entry;
struct vm_page_md {
SLIST_HEAD(, pv_entry) pvh_head;
int pvh_flags;
};
#define VM_MDPAGE_INIT(pg) \
do { \
struct vm_page_md *pvh = &(pg)->mdpage; \
SLIST_INIT(&pvh->pvh_head); \
pvh->pvh_flags = 0; \
} while (/*CONSTCOND*/0)
#endif /* _LOCORE */
#endif /* !_SH3_VMPARAM_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: Locore.c,v 1.11 2002/04/29 09:32:56 uch Exp $ */
/* $NetBSD: Locore.c,v 1.12 2002/05/09 12:28:08 uch Exp $ */
/*-
* Copyright (c) 1996, 1997, 2002 The NetBSD Foundation, Inc.
@ -88,13 +88,14 @@
#include <uvm/uvm_extern.h>
#include <sh3/cpu.h>
#include <sh3/psl.h>
#include <sh3/mmu.h>
#include <sh3/locore.h>
#include <sh3/cpu.h>
#include <sh3/pmap.h>
#include <sh3/mmu_sh3.h>
#include <sh3/mmu_sh4.h>
void (*__sh_switch_resume)(struct proc *);
struct proc *cpu_switch_search(void);
struct proc *cpu_switch_search(struct proc *);
void idle(void);
int want_resched;
@ -107,25 +108,21 @@ int want_resched;
#endif
/*
* struct proc *cpu_switch_search(void):
* struct proc *cpu_switch_search(struct proc *oldproc):
* Find the highest priority process.
*/
struct proc *
cpu_switch_search()
cpu_switch_search(struct proc *oldproc)
{
struct prochd *q;
struct proc *p;
int s;
curproc = 0;
s = _cpu_intr_suspend();
SCHED_LOCK_IDLE();
while (sched_whichqs == 0) {
SCHED_UNLOCK_IDLE();
_cpu_intr_resume(s);
idle();
s = _cpu_intr_suspend();
SCHED_LOCK_IDLE();
}
@ -133,10 +130,14 @@ cpu_switch_search()
p = q->ph_link;
remrunqueue(p);
want_resched = 0;
_cpu_intr_resume(s);
SCHED_UNLOCK_IDLE();
p->p_stat = SONPROC;
if (p != oldproc) {
curpcb = p->p_md.md_pcb;
pmap_activate(p);
}
curproc = p;
return (p);
@ -152,8 +153,9 @@ idle()
{
spl0();
uvm_pageidlezero();
__asm__ __volatile__("sleep");
splhigh();
splsched();
}
/*
@ -217,13 +219,14 @@ sh3_switch_setup(struct proc *p)
u_int32_t vpn;
int i;
vpn = (u_int32_t)p->p_md.md_p3;
vpn = (u_int32_t)p->p_addr;
vpn &= ~PGOFSET;
for (i = 0; i < UPAGES; i++, pte++, vpn += NBPG, md_upte++) {
pte = vtopte(vpn);
for (i = 0; i < UPAGES; i++, vpn += NBPG, md_upte++) {
pte = __pmap_kpte_lookup(vpn);
KDASSERT(pte && *pte != 0);
md_upte->addr = vpn;
md_upte->data = (*pte & PG_HW_BITS) |
SH3_MMUDA_D_D | SH3_MMUDA_D_V;
md_upte->data = (*pte & PG_HW_BITS) | PG_D | PG_V;
}
}
@ -239,11 +242,12 @@ sh4_switch_setup(struct proc *p)
u_int32_t vpn;
int i, e;
vpn = (u_int32_t)p->p_md.md_p3;
pte = vtopte(vpn);
vpn = (u_int32_t)p->p_addr;
vpn &= ~PGOFSET;
e = SH4_UTLB_ENTRY - UPAGES;
for (i = 0; i < UPAGES; i++, pte++, e++, vpn += NBPG) {
for (i = 0; i < UPAGES; i++, e++, vpn += NBPG) {
pte = __pmap_kpte_lookup(vpn);
KDASSERT(pte && *pte != 0);
/* Address array */
md_upte->addr = SH4_UTLB_AA | (e << SH4_UTLB_E_SHIFT);
md_upte->data = vpn | SH4_UTLB_AA_D | SH4_UTLB_AA_V;
@ -335,7 +339,7 @@ copyoutstr(const void *kaddr, void *uaddr, size_t maxlen, size_t *lencopied)
else
rc = ENAMETOOLONG;
out:
out:
if (lencopied)
*lencopied = from - from_top;
curpcb->pcb_onfault = 0;
@ -381,7 +385,7 @@ copyinstr(const void *uaddr, void *kaddr, size_t maxlen, size_t *lencopied)
else
rc = ENAMETOOLONG;
out:
out:
if (lencopied)
*lencopied = from - from_top;
curpcb->pcb_onfault = 0;
@ -417,6 +421,7 @@ copystr(const void *kfaddr, void *kdaddr, size_t maxlen, size_t *lencopied)
if (lencopied)
*lencopied = i;
return (ENAMETOOLONG);
}
@ -485,17 +490,17 @@ fuswintr(const void *base)
return (-1);
curpcb->pcb_onfault = &&Err999;
curpcb->fusubail = 1;
curpcb->pcb_faultbail = 1;
rc = *(unsigned short *)uaddr;
curpcb->pcb_onfault = 0;
curpcb->fusubail = 0;
curpcb->pcb_faultbail = 0;
return (rc);
Err999:
curpcb->pcb_onfault = 0;
curpcb->fusubail = 0;
curpcb->pcb_faultbail = 0;
return (-1);
}
@ -537,10 +542,9 @@ suword(void *base, long x)
return (-1);
curpcb->pcb_onfault = &&Err999;
*(int *)uaddr = x;
curpcb->pcb_onfault = 0;
return (0);
Err999:
@ -586,17 +590,18 @@ suswintr(void *base, short x)
return (-1);
curpcb->pcb_onfault = &&Err999;
curpcb->fusubail = 1;
curpcb->pcb_faultbail = 1;
*(short *)uaddr = x;
curpcb->pcb_onfault = 0;
curpcb->fusubail = 0;
curpcb->pcb_faultbail = 0;
return (0);
Err999:
curpcb->pcb_onfault = 0;
curpcb->fusubail = 0;
curpcb->pcb_faultbail = 0;
return (-1);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: locore_subr.S,v 1.9 2002/04/28 17:10:38 uch Exp $ */
/* $NetBSD: locore_subr.S,v 1.10 2002/05/09 12:28:08 uch Exp $ */
/*-
* Copyright (c) 2002 The NetBSD Foundation, Inc.
@ -33,18 +33,17 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "opt_cputype.h"
#include "opt_ddb.h"
#include "opt_kgdb.h"
#include "opt_cputype.h"
#include "assym.h"
#include <sys/syscall.h> /* SYS___sigreturn14, SYS_exit */
#include <sh3/asm.h>
#include <sh3/locore.h>
#include <sh3/mmu_sh3.h> /* TTB */
#include <sh3/mmu_sh4.h> /* TTB */
#include <sh3/param.h> /* UPAGES */
#include <sh3/mmu_sh3.h>
#include <sh3/mmu_sh4.h>
.text
.align 5 /* align cache line size (32B) */
@ -54,9 +53,10 @@
*/
ENTRY(cpu_switch)
/* Save current process's context to switchframe */
mov.l _L.P_ADDR, r0
mov.l _L.SF, r0
mov.l @(r0, r4), r1
add #SF_SIZE, r1
stc.l r7_bank,@-r1
stc.l sr, @-r1
stc.l r6_bank,@-r1
sts.l pr, @-r1
@ -79,27 +79,31 @@ ENTRY(cpu_switch)
bt/s 1f
mov r0, r4 /* new proc */
/* Set curpcb */
mov.l _L.P_ADDR, r0
mov.l @(r0, r4), r1
mov.l _L.curpcb, r0
mov.l r1, @r0 /* curpcb = curproc->p_addr */
/* Setup kernel stack */
mov.l _L.SF, r0
mov.l @(r0, r4), r1 /* switch frame */
mov.l @(SF_R7_BANK, r1), r0 /* stack top */
mov.l @(SF_R6_BANK, r1), r2 /* current frame */
mov.l @(SF_R15, r1), r3 /* current stack */
/* During kernel stack switching, all interrupts are disabled. */
__EXCEPTION_BLOCK(r1, r5)
/* switch to new kernel stack */
ldc r0, r7_bank
ldc r2, r6_bank
mov r3, r15
/* Switch address space. */
mov #PCB_PAGEDIRREG, r0
mov.l @(r0, r1), r0
MOV (TTB, r1)
mov.l r0, @r1 /* TTB = curpcb->pageDirReg */
/* Wire kernel stack */
/* Wire u-area */
MOV (switch_resume, r0)
jsr @r0
nop
mov r4, r8 /* save new proc */
mov r8, r4
__EXCEPTION_UNBLOCK(r0, r1)
/* Now OK to use kernel stack. */
/* Restore new process's context from switchframe */
1: mov.l _L.curpcb, r1
mov.l @r1, r1
mov.l @r1+, r15
1: mov.l _L.SF, r0
mov.l @(r0, r4), r1
add #4, r1 /* r15 already restored */
mov.l @r1+, r14
mov.l @r1+, r13
mov.l @r1+, r12
@ -108,99 +112,45 @@ ENTRY(cpu_switch)
mov.l @r1+, r9
mov.l @r1+, r8
lds.l @r1+, pr
ldc.l @r1+, r6_bank /* current fp */
mov.l @r1+, r0
ldc.l @r1+, r7_bank /* kernel stack top */
ldc r0, sr
add #4, r1 /* r6_bank already restored */
ldc.l @r1+, sr
rts
nop
.align 2
_L.P_ADDR: .long P_ADDR
_L.SF: .long (P_MD + MD_PCB)
_L.cpu_switch_search: .long _C_LABEL(cpu_switch_search)
_L.curpcb: .long _C_LABEL(curpcb)
REG_SYMBOL(TTB)
FUNC_SYMBOL(switch_resume)
#ifdef SH3
/*
* void sh3_switch_resume(sturct proc *p)
* Zero clear all TLB. and
* Set current kernel stack PTE table.
* Set current u-area PTE array to curupte.
* No need to flush any entries. it is depended on u-area mapping is
* wired, and its mapping never cause modified/reference fault.
* u-area TLB fault is only covered by TLB miss exception.
* When the situation that "VPN match but not Valid" occur, SH3 jump to
* "generic exception" handler instead of TLB miss exception.
* But NetBSD/sh3 code doesn't handle it. As the result, it causes
* hard reset. (never can access kernel stack).
*/
NENTRY(sh3_switch_resume)
mov.l _L.UPTE, r0
add r4, r0
mov.l _L.curupte, r1
mov.l r0, @r1
mov.l _L.MMUAA, r7
mov.l _L.MMUDA, r6
mov #3, r5 /* max way # */
2: mov r5, r3
shll8 r3
mov #31, r4 /* max entry # */
1: mov r4, r2
shll8 r2
shll2 r2
shll2 r2
or r3, r2 /* r2 = way|entry */
mov r7, r1
or r2, r1 /* Address array */
xor r0, r0
mov.l r0, @r1
mov r6, r1
or r2, r1 /* Data array */
mov.l r0, @r1
cmp/gt r0, r4
bt/s 1b
add #-1, r4
cmp/gt r0, r5
bt/s 2b
add #-1, r5
rts
nop
.align 2
_L.MMUCR: .long SH3_MMUCR
_L.MMUAA: .long SH3_MMUAA
_L.MMUDA: .long SH3_MMUDA
_L.curupte: .long _C_LABEL(curupte)
#endif /* SH3 */
#ifdef SH4
/*
* void sh4_switch_resume(sturct proc *p)
* Invalidate all TLB
* Wire kernel stack.
* Wire u-area. invalidate TLB entry for kernel stack to prevent
* TLB multiple hit.
*/
NENTRY(sh4_switch_resume)
/* Invalidate ITLB */
mov.l _L.4_ITLB_AA, r0
mov #1, r2
xor r1, r1
mov.l r1, @r0
shll8 r2
add r2, r0
mov.l r1, @r0
add r2, r0
mov.l r1, @r0
add r2, r0
mov.l r1, @r0
/* Invalidate UTLB */
mov.l _L.4_MMUCR, r0
mov.l @r0, r1
mov.l _L.4_MMUCR_MASK, r2
and r2, r1
mov #4, r2 /* SH4_MMUCR_TI */
or r2, r1
mov.l r1, @r0
/*
* Wire u-area
* for (i = 0; i < UPAGES * 2; i++)
* _reg_write_4(p->p_md.md_upte[i].addr, p->p_md.md_upte[i].data);
* if u-area is P1, nothing to do.
*/
mov.l _L.UPTE,r0
add r0, r4 /* p->p_md.md_upte */
mov #UPAGES,r3
@ -208,10 +158,35 @@ NENTRY(sh4_switch_resume)
mov.l @r4, r0 /* if (p->p_md.md_upte[0].addr == 0) return; */
tst r0, r0
bt 2f
1: /* Address array */
/* Save old ASID and set ASID to zero */
xor r0, r0
mov.l _L.4_PTEH, r1
mov.l @r1, r7
mov.l r0, @r1
mov.l _L.VPN_MASK, r6
mov.l _L.4_UTLB_AA_A, r5
/* TLB address array must be accessed via P2. Setup jump address. */
mova 1f, r0
mov.l _L.P2BASE, r1
or r1, r0
jmp @r0 /* run P2 */
nop
/* Probe VPN match TLB entry and invalidate it. */
.align 2 /* mova target must be 4byte alignment */
1: mov.l @(4, r4), r0
and r6, r0
mov.l r0, @r5 /* clear D, V */
/* Wire u-area TLB entry */
/* Address array */
mov.l @r4+, r0 /* addr */
mov.l @r4+, r1 /* data */
mov.l r1, @r0 /* *addr = data */
/* Data array */
mov.l @r4+, r0 /* addr */
mov.l @r4+, r1 /* data */
@ -219,12 +194,22 @@ NENTRY(sh4_switch_resume)
cmp/eq r2, r3
bf/s 1b
add #1, r2
2: rts
/* restore ASID */
mov.l _L.4_PTEH, r0
mov.l r7, @r0
mova 2f, r0
jmp @r0 /* run P1 */
nop
.align 2
2: rts /* mova target must be 4byte alignment */
nop
.align 2
_L.4_PTEH: .long SH4_PTEH
_L.4_UTLB_AA_A: .long (SH4_UTLB_AA | SH4_UTLB_A)
_L.4_ITLB_AA: .long SH4_ITLB_AA
_L.4_MMUCR: .long SH4_MMUCR
_L.4_MMUCR_MASK: .long SH4_MMUCR_MASK
_L.VPN_MASK: .long 0xfffff000
_L.P2BASE: .long 0xa0000000
#endif /* SH4 */
_L.UPTE: .long (P_MD + MD_UPTE)
@ -344,7 +329,7 @@ NENTRY(_cpu_spin)
/*
* proc_trapmpoline:
* Call the service funciton with one argument specified by the r12 and r11
* respectively.
* respectively. setted by cpu_fork().
*/
NENTRY(proc_trampoline)
jsr @r12
@ -381,6 +366,7 @@ _C_LABEL(esigcode):
*/
ENTRY(savectx)
add #SF_SIZE, r4
stc.l r7_bank,@-r4
stc.l sr, @-r4
stc.l r6_bank,@-r4
sts.l pr, @-r4

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
/* $NetBSD: sh3_machdep.c,v 1.39 2002/04/29 09:33:30 uch Exp $ */
/* $NetBSD: sh3_machdep.c,v 1.40 2002/05/09 12:28:09 uch Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998, 2002 The NetBSD Foundation, Inc.
@ -125,11 +125,14 @@ struct user *proc0paddr; /* init_main.c use this. */
struct pcb *curpcb;
struct md_upte *curupte; /* SH3 wired u-area hack */
#ifndef IOM_RAM_BEGIN
#if !defined(IOM_RAM_BEGIN)
#error "define IOM_RAM_BEGIN"
#elif (IOM_RAM_BEGIN & SH3_P1SEG_BASE) != 0
#error "IOM_RAM_BEGIN is physical address. not P1 address."
#endif
#define VBR (u_int8_t *)IOM_RAM_BEGIN
vaddr_t ram_start = IOM_RAM_BEGIN;
#define VBR (u_int8_t *)SH3_PHYS_TO_P1SEG(IOM_RAM_BEGIN)
vaddr_t ram_start = SH3_PHYS_TO_P1SEG(IOM_RAM_BEGIN);
/* exception handler holder (sh3/sh3/exception_vector.S) */
extern char sh_vector_generic[], sh_vector_generic_end[];
extern char sh_vector_interrupt[], sh_vector_interrupt_end[];
@ -185,116 +188,65 @@ sh_cpu_init(int arch, int product)
memcpy(VBR + 0x600, sh_vector_interrupt,
sh_vector_interrupt_end - sh_vector_interrupt);
sh_icache_sync_all(); /* for I/D separated cache */
if (!SH_HAS_UNIFIED_CACHE)
sh_icache_sync_all();
__asm__ __volatile__ ("ldc %0, vbr" :: "r"(VBR));
__asm__ __volatile__("ldc %0, vbr" :: "r"(VBR));
/* kernel stack setup */
__sh_switch_resume = CPU_IS_SH3 ? sh3_switch_resume : sh4_switch_resume;
/* Set page size (4KB) */
uvm_setpagesize();
}
/*
* vsize_t sh_proc0_init(vaddr_t kernend, paddr_t pstart, paddr_t pend)
*
* kernend ... P1 address.
* pstart ... physical address of RAM start address.
* pend ... physical address of the last RAM address
*
* Returns size of stealed memory.
*
* Memory map
* ....| proc0 stack | Page Dir | Page Table |
* * USPACE NBPG (1+nkpde)*NBPG
* kernend
* void sh_proc0_init(void):
* Setup proc0 u-area.
*/
vsize_t
sh_proc0_init(vaddr_t kernend, paddr_t pstart, paddr_t pend)
void
sh_proc0_init()
{
pd_entry_t *pagedir, *pagetab, pte;
vsize_t sz;
vaddr_t p0;
int i;
struct switchframe *sf;
vaddr_t u;
/* Set default page size (4KB) */
uvm_setpagesize();
/* # of pdes maps whole physical memory area. */
nkpde = sh3_btod(((pend - pstart + 1) + PDOFSET) & ~PDOFSET);
/* Steal page dir area, process0 stack, page table area */
sz = USPACE + NBPG + (1 + nkpde) * NBPG;
p0 = round_page(kernend);
memset((void *)p0, 0, sz);
/* Build initial page tables */
pagedir = (pt_entry_t *)(p0 + USPACE);
pagetab = (pt_entry_t *)(p0 + USPACE + NBPG);
/* Construct a page table directory */
pte = (pt_entry_t)pagetab;
pte |= PG_KW | PG_V | PG_4K | PG_M | PG_N;
pagedir[(SH3_PHYS_TO_P1SEG(pstart)) >> PDSHIFT] = pte;
/* Map whole physical memory space from VM_MIN_KERNEL_ADDRESS */
pte += NBPG;
for (i = 0; i < nkpde; i++, pte += NBPG)
pagedir[(VM_MIN_KERNEL_ADDRESS >> PDSHIFT) + i] = pte;
/* Install a PDE recursively mapping page directory as a page table. */
pte = (pt_entry_t)pagedir;
pte |= PG_V | PG_4K | PG_KW | PG_M | PG_N;
pagedir[PDSLOT_PTE] = pte; /* 0xcfc00000 */
/* Set page directory base */
SH_MMU_TTB_WRITE((u_int32_t)pagedir);
/* Steal process0 u-area */
u = uvm_pageboot_alloc(USPACE);
memset((void *)u, 0, USPACE);
/* Setup proc0 */
proc0paddr = (struct user *)p0;
proc0paddr = (struct user *)u;
proc0.p_addr = proc0paddr;
curpcb = &proc0.p_addr->u_pcb;
curpcb->pageDirReg = (pt_entry_t)pagedir;
/*
* u-area map:
* |user| .... | ............... |
* | NBPG + USPACE - NBPG +
* pcb_fp(P1) pcb_sp
* pcb_fp and pcb_sp are stored into r6_bank, r7_bank
* when context switching.
* | NBPG | USPACE - NBPG |
* frame top stack top
* current frame ... r6_bank
* stack top ... r7_bank
* current stack ... r15
*/
curpcb->pcb_sp = p0 + USPACE;
curpcb->pcb_fp = p0 + NBPG;
curpcb->pcb_sf.sf_r6_bank = curpcb->pcb_fp;
curpcb->pcb_sf.sf_r15 = curpcb->pcb_sp;
__asm__ __volatile__("ldc %0, r6_bank" :: "r"(curpcb->pcb_fp));
__asm__ __volatile__("ldc %0, r7_bank" :: "r"(curpcb->pcb_sp));
curpcb = proc0.p_md.md_pcb = &proc0.p_addr->u_pcb;
curupte = proc0.p_md.md_upte;
/* trap frame */
proc0.p_md.md_regs = (struct trapframe *)curpcb->pcb_fp - 1;
sf = &curpcb->pcb_sf;
sf->sf_r6_bank = u + NBPG;
sf->sf_r7_bank = sf->sf_r15 = u + USPACE;
__asm__ __volatile__("ldc %0, r6_bank" :: "r"(sf->sf_r6_bank));
__asm__ __volatile__("ldc %0, r7_bank" :: "r"(sf->sf_r7_bank));
proc0.p_md.md_regs = (struct trapframe *)sf->sf_r6_bank - 1;
#ifdef KSTACK_DEBUG
memset((char *)(p0 + sizeof(struct user)), 0x5a,
memset((char *)(u + sizeof(struct user)), 0x5a,
NBPG - sizeof(struct user));
memset((char *)(p0 + NBPG), 0xa5, USPACE - NBPG);
#endif
/* Enable MMU */
sh_mmu_start();
/* Mask all interrupt */
_cpu_intr_suspend();
/* Enable exception for P3 access */
_cpu_exception_resume(0);
return (p0 + sz - kernend);
memset((char *)(u + NBPG), 0xa5, USPACE - NBPG);
#endif /* KSTACK_DEBUG */
}
void
sh_startup()
{
caddr_t v;
int i, sz, base, residual;
int i, base, residual;
vaddr_t minaddr, maxaddr;
vsize_t size;
char pbuf[9];
@ -304,7 +256,7 @@ sh_startup()
printf("%s", cpu_model);
#ifdef DEBUG
printf("general exception handler:\t%d byte\n",
sh_vector_generic_end - sh_vector_generic);
sh_vector_generic_end - sh_vector_generic);
printf("TLB miss exception handler:\t%d byte\n",
#if defined(SH3) && defined(SH4)
CPU_IS_SH3 ? sh3_vector_tlbmiss_end - sh3_vector_tlbmiss :
@ -316,22 +268,12 @@ sh_startup()
#endif
);
printf("interrupt exception handler:\t%d byte\n",
sh_vector_interrupt_end - sh_vector_interrupt);
sh_vector_interrupt_end - sh_vector_interrupt);
#endif /* DEBUG */
format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
printf("total memory = %s\n", pbuf);
/*
* Find out how much space we need, allocate it,
* and then give everything true virtual addresses.
*/
sz = (int)allocsys(NULL, NULL);
if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(sz))) == 0)
panic("startup: no room for tables");
if (allocsys(v, NULL) - v != sz)
panic("startup: table size inconsistency");
/*
* Now allocate buffers proper. They are different than the above
* in that they usually occupy more virtual memory than physical.
@ -339,9 +281,9 @@ sh_startup()
size = MAXBSIZE * nbuf;
buffers = 0;
if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size),
NULL, UVM_UNKNOWN_OFFSET, 0,
UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
UVM_ADV_NORMAL, 0)) != 0)
NULL, UVM_UNKNOWN_OFFSET, 0,
UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
UVM_ADV_NORMAL, 0)) != 0)
panic("sh3_startup: cannot allocate VM for buffers");
minaddr = (vaddr_t)buffers;
if ((bufpages / nbuf) >= btoc(MAXBSIZE)) {
@ -371,7 +313,7 @@ sh_startup()
panic("sh3_startup: not enough memory for "
"buffer cache");
pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg),
VM_PROT_READ|VM_PROT_WRITE);
VM_PROT_READ|VM_PROT_WRITE);
curbuf += PAGE_SIZE;
curbufsize -= PAGE_SIZE;
}
@ -383,13 +325,13 @@ sh_startup()
* limits the number of processes exec'ing at any time.
*/
exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
16 * NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
/*
* Allocate a submap for physio
*/
phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
VM_PHYS_SIZE, 0, FALSE, NULL);
VM_PHYS_SIZE, 0, FALSE, NULL);
format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
printf("avail memory = %s\n", pbuf);
@ -400,7 +342,6 @@ sh_startup()
* Set up buffers, so they can be used to read disk labels.
*/
bufinit();
}
/*
@ -415,23 +356,6 @@ cpu_dumpconf()
{
}
/*
* Doadump comes here after turning off memory management and
* getting on the dump stack, either when called above, or by
* the auto-restart code.
*/
#define BYTES_PER_DUMP NBPG /* must be a multiple of pagesize XXX small */
static vaddr_t dumpspace;
vaddr_t
reserve_dumppages(p)
vaddr_t p;
{
dumpspace = p;
return (p + BYTES_PER_DUMP);
}
void
dumpsys()
{
@ -465,7 +389,7 @@ sendsig(sig_t catcher, int sig, sigset_t *mask, u_long code)
/* Allocate space for the signal handler context. */
if (onstack)
fp = (struct sigframe *)((caddr_t)p->p_sigctx.ps_sigstk.ss_sp +
p->p_sigctx.ps_sigstk.ss_size);
p->p_sigctx.ps_sigstk.ss_size);
else
fp = (struct sigframe *)tf->tf_r15;
fp--;
@ -496,7 +420,7 @@ sendsig(sig_t catcher, int sig, sigset_t *mask, u_long code)
frame.sf_sc.sc_r2 = tf->tf_r2;
frame.sf_sc.sc_r1 = tf->tf_r1;
frame.sf_sc.sc_r0 = tf->tf_r0;
frame.sf_sc.sc_trapno = tf->tf_trapno;
frame.sf_sc.sc_expevt = tf->tf_expevt;
/* Save signal stack. */
frame.sf_sc.sc_onstack = p->p_sigctx.ps_sigstk.ss_flags & SS_ONSTACK;
@ -607,11 +531,9 @@ sys___sigreturn14(struct proc *p, void *v, register_t *retval)
void
setregs(struct proc *p, struct exec_package *pack, u_long stack)
{
register struct pcb *pcb = &p->p_addr->u_pcb;
register struct trapframe *tf;
struct trapframe *tf;
p->p_md.md_flags &= ~MDP_USEDFPU;
pcb->pcb_flags = 0;
tf = p->p_md.md_regs;
@ -619,9 +541,9 @@ setregs(struct proc *p, struct exec_package *pack, u_long stack)
tf->tf_r1 = 0;
tf->tf_r2 = 0;
tf->tf_r3 = 0;
tf->tf_r4 = *(int *)stack; /* argc */
tf->tf_r5 = stack+4; /* argv */
tf->tf_r6 = stack+4*tf->tf_r4 + 8; /* envp */
tf->tf_r4 = fuword((caddr_t)stack); /* argc */
tf->tf_r5 = stack + 4; /* argv */
tf->tf_r6 = stack + 4 * tf->tf_r4 + 8; /* envp */
tf->tf_r7 = 0;
tf->tf_r8 = 0;
tf->tf_r9 = (int)p->p_psstr;
@ -643,7 +565,7 @@ cpu_reset()
{
_cpu_exception_suspend();
_reg_write_4(SH_(EXPEVT), 0x020); /* manual reset */
_reg_write_4(SH_(EXPEVT), EXPEVT_RESET_MANUAL);
goto *(u_int32_t *)0xa0000000;
/* NOTREACHED */

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.32 2002/04/28 17:10:39 uch Exp $ */
/* $NetBSD: vm_machdep.c,v 1.33 2002/05/09 12:28:09 uch Exp $ */
/*-
* Copyright (c) 2002 The NetBSD Foundation, Inc. All rights reserved.
@ -61,18 +61,11 @@
#include <uvm/uvm_extern.h>
#include <sh3/locore.h>
#include <sh3/cpu.h>
#include <sh3/reg.h>
#include <sh3/mmu.h>
#include <sh3/locore.h>
/* XXX XXX XXX */
#ifdef SH4
#define TLBFLUSH() (cacheflush(), sh_tlb_invalidate_all())
#else
#define TLBFLUSH() sh_tlb_invalidate_all()
#endif
/* XXX XXX XXX */
#include <sh3/cache.h>
/*
* Finish a fork operation, with process p2 nearly set up.
@ -100,68 +93,72 @@ cpu_fork(struct proc *p1, struct proc *p2, void *stack,
struct pcb *pcb;
struct trapframe *tf;
struct switchframe *sf;
vaddr_t spbase;
vaddr_t spbase, fptop;
#define P1ADDR(x) (SH3_PHYS_TO_P1SEG(*__pmap_kpte_lookup(x) & PG_PPN))
/* XXX vtophys don't return physical addresss */
#define P1ADDR(x) \
((vtophys((vaddr_t)(x)) & SH3_PHYS_MASK) | SH3_P1SEG_BASE)
#ifdef DIAGNOSTIC
if (p1 != curproc && p1 != &proc0)
panic("cpu_fork: curproc");
#endif
/*
* wbinv u-area to avoid cache-aliasing, since trapframe
* top is accessed from P1 instead of P3.
*/
if (CPU_IS_SH4)
sh_dcache_wbinv_range((vaddr_t)p2->p_addr, USPACE);
p2->p_md.md_p3 = p2->p_addr;
KDASSERT(!(p1 != curproc && p1 != &proc0));
/* Copy flags */
p2->p_md.md_flags = p1->p_md.md_flags;
/* Sync the switchframe before we copy it. */
if (p1 == curproc)
savectx(curpcb);
/* Copy PCB */
pcb = &p2->p_addr->u_pcb;
*pcb = p1->p_addr->u_pcb;
/* Set page directory base to pcb */
pmap_activate(p2);
/* set up the kernel stack pointer */
spbase = (vaddr_t)p2->p_md.md_p3 + NBPG;
#ifdef P1_STACK
/* Convert to P1 from P3 */
spbase = P1ADDR(spbase);
#else /* P1_STACK */
/* Prepare kernel stack PTEs */
if (CPU_IS_SH3)
sh3_switch_setup(p2);
else
sh4_switch_setup(p2);
#endif /* P1_STACK */
pcb->pcb_sp = spbase + USPACE - NBPG;
#ifdef SH3
/*
* Convert frame pointer top to P1. because SH3 can't make
* wired TLB entry, context store space accessing must not cause
* exception. SH4 can make wired entry, no need to convert to P1.
* exception. For SH3, we are 4K page, P3/P1 conversion don't
* cause virtual-aliasing.
*/
pcb->pcb_fp = (vaddr_t)P1ADDR(pcb) + NBPG; /* P1 */
if (CPU_IS_SH3) {
pcb = (struct pcb *)P1ADDR((vaddr_t)&p2->p_addr->u_pcb);
p2->p_md.md_pcb = pcb;
fptop = (vaddr_t)pcb + NBPG;
}
#endif /* SH3 */
#ifdef SH4
/* SH4 can make wired entry, no need to convert to P1. */
if (CPU_IS_SH4) {
pcb = &p2->p_addr->u_pcb;
p2->p_md.md_pcb = pcb;
fptop = (vaddr_t)pcb + NBPG;
}
#endif /* SH4 */
/* set up the kernel stack pointer */
spbase = (vaddr_t)p2->p_addr + NBPG;
#ifdef P1_STACK
/* Convert to P1 from P3 */
/*
* wbinv u-area to avoid cache-aliasing, since kernel stack
* is accessed from P1 instead of P3.
*/
if (SH_HAS_VIRTUAL_ALIAS)
sh_dcache_wbinv_range((vaddr_t)p2->p_addr, USPACE);
spbase = P1ADDR(spbase);
#else /* P1_STACK */
/* Prepare u-area PTEs */
#ifdef SH3
if (CPU_IS_SH3)
sh3_switch_setup(p2);
#endif
#ifdef SH4
if (CPU_IS_SH4)
sh4_switch_setup(p2);
#endif
#endif /* P1_STACK */
#ifdef KSTACK_DEBUG
memset((char *)pcb->pcb_fp - NBPG + sizeof(struct user), 0x5a,
/* Fill magic number for tracking */
memset((char *)fptop - NBPG + sizeof(struct user), 0x5a,
NBPG - sizeof(struct user));
memset((char *)spbase, 0xa5, (USPACE - NBPG));
#endif
memset(&pcb->pcb_sf, 0xb4, sizeof(struct switchframe));
#endif /* KSTACK_DEBUG */
/*
* Copy the trapframe.
* Copy the user context.
*/
p2->p_md.md_regs = tf = (struct trapframe *)pcb->pcb_fp - 1;
*tf = *p1->p_md.md_regs;
p2->p_md.md_regs = tf = (struct trapframe *)fptop - 1;
memcpy(tf, p1->p_md.md_regs, sizeof(struct trapframe));
/*
* If specified, give the child a different stack.
@ -173,11 +170,16 @@ cpu_fork(struct proc *p1, struct proc *p2, void *stack,
sf = &pcb->pcb_sf;
sf->sf_r11 = (int)arg; /* proc_trampoline hook func */
sf->sf_r12 = (int)func; /* proc_trampoline hook func's arg */
sf->sf_r6_bank = (int)tf; /* frame pointer */
sf->sf_r15 = pcb->pcb_sp; /* stack pointer */
sf->sf_r15 = spbase + USPACE - NBPG; /* current stack pointer */
sf->sf_r7_bank = sf->sf_r15; /* stack top */
sf->sf_r6_bank = (vaddr_t)tf; /* current frame pointer */
/* when switch to me, jump to proc_trampoline */
sf->sf_pr = (int)proc_trampoline;
sf->sf_sr &= ~0xf0; /* SR.IMASK = 0 */
/*
* Enable interrupt when switch frame is restored, since
* kernel thread begin to run without restoring trapframe.
*/
sf->sf_sr = PSL_MD; /* kernel mode, interrupt enable */
}
/*
@ -189,21 +191,23 @@ cpu_fork(struct proc *p1, struct proc *p2, void *stack,
void
cpu_exit(struct proc *p)
{
struct switchframe *sf;
splsched();
uvmexp.swtch++;
/* Switch to proc0 stack */
curproc = 0;
curpcb = (struct pcb *)proc0.p_addr;
curpcb = proc0.p_md.md_pcb;
sf = &curpcb->pcb_sf;
__asm__ __volatile__(
"mov %0, r15;" /* current stack */
"ldc %1, r6_bank;" /* current frame pointer */
"ldc %2, r7_bank;" /* stack top */
::
"r"(curpcb->pcb_sf.sf_r15),
"r"(curpcb->pcb_sf.sf_r6_bank),
"r"(curpcb->pcb_sp));
"r"(sf->sf_r15),
"r"(sf->sf_r6_bank),
"r"(sf->sf_r7_bank));
/* Schedule freeing process resources */
exit2(p);
@ -260,25 +264,30 @@ cpu_coredump(struct proc *p, struct vnode *vp, struct ucred *cred,
/*
* Move pages from one kernel virtual address to another.
* Both addresses are assumed to reside in the Sysmap.
* Both addresses are assumed to reside in the pmap_kernel().
*/
void
pagemove(caddr_t from, caddr_t to, size_t size)
{
pt_entry_t *fpte, *tpte;
if (size % NBPG)
if ((size & PGOFSET) != 0)
panic("pagemove");
fpte = kvtopte(from);
tpte = kvtopte(to);
fpte = __pmap_kpte_lookup((vaddr_t)from);
tpte = __pmap_kpte_lookup((vaddr_t)to);
if (SH_HAS_VIRTUAL_ALIAS)
sh_dcache_wbinv_range((vaddr_t)from, size);
while (size > 0) {
*tpte++ = *fpte;
*fpte++ = 0;
sh_tlb_invalidate_addr(0, (vaddr_t)from);
sh_tlb_invalidate_addr(0, (vaddr_t)to);
from += NBPG;
to += NBPG;
size -= NBPG;
}
TLBFLUSH();
}
/*