NetBSD/sys/uvm/uvm_amap_i.h

294 lines
8.0 KiB
C
Raw Normal View History

1998-02-07 01:26:13 +03:00
/* $NetBSD: uvm_amap_i.h,v 1.2 1998/02/06 22:31:34 thorpej Exp $ */
/*
* XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
* >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
*/
/*
*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Charles D. Cranor and
* Washington University.
* 4. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* uvm_amap_i.h
*/
/*
* inline functions [maybe]
*/
#if defined(UVM_AMAP_INLINE) || defined(UVM_AMAP)
/*
* amap_lookup: look up a page in an amap
*
* => amap should be locked by caller.
*/
AMAP_INLINE struct vm_anon *amap_lookup(aref, offset)
struct vm_aref *aref;
vm_offset_t offset;
{
int slot;
struct vm_amap *amap = aref->ar_amap;
UVMHIST_FUNC("amap_lookup"); UVMHIST_CALLED(maphist);
AMAP_B2SLOT(slot, offset);
slot += aref->ar_slotoff;
if (slot >= amap->am_nslot)
panic("amap_lookup: offset out of range");
UVMHIST_LOG(maphist, "<- done (amap=0x%x, offset=0x%x, result=0x%x)",
amap, offset, amap->am_anon[slot], 0);
return(amap->am_anon[slot]);
}
/*
* amap_lookups: look up a range of pages in an amap
*
* => amap should be locked by caller.
*/
AMAP_INLINE void amap_lookups(aref, offset, anons, npages)
struct vm_aref *aref;
vm_offset_t offset;
struct vm_anon **anons;
int npages;
{
int slot;
struct vm_amap *amap = aref->ar_amap;
UVMHIST_FUNC("amap_lookups"); UVMHIST_CALLED(maphist);
AMAP_B2SLOT(slot, offset);
slot += aref->ar_slotoff;
UVMHIST_LOG(maphist, " slot=%d, npages=%d, nslot=%d", slot, npages,
amap->am_nslot, 0);
if ((slot + (npages - 1)) >= amap->am_nslot)
panic("amap_lookups: offset out of range");
bcopy(&amap->am_anon[slot], anons, npages * sizeof(struct vm_anon *));
UVMHIST_LOG(maphist, "<- done", 0, 0, 0, 0);
return;
}
/*
* amap_add: add (or replace) a page to an amap
*
* => caller must lock amap.
* => if (replace) caller must lock anon because we might have to call
* pmap_page_protect on the anon's page.
* => returns an "offset" which is meaningful to amap_unadd().
*/
AMAP_INLINE vm_offset_t amap_add(aref, offset, anon, replace)
struct vm_aref *aref;
vm_offset_t offset;
struct vm_anon *anon;
int replace;
{
int slot;
struct vm_amap *amap = aref->ar_amap;
UVMHIST_FUNC("amap_add"); UVMHIST_CALLED(maphist);
AMAP_B2SLOT(slot, offset);
slot += aref->ar_slotoff;
if (slot >= amap->am_nslot)
panic("amap_add: offset out of range");
if (replace) {
if (amap->am_anon[slot] == NULL)
panic("amap_add: replacing null anon");
if (amap->am_anon[slot]->u.an_page != NULL &&
(amap->am_flags & AMAP_SHARED) != 0) {
pmap_page_protect(PMAP_PGARG(amap->am_anon[slot]->u.an_page),
VM_PROT_NONE);
/* XXX: suppose page is supposed to be wired somewhere? */
}
} else { /* !replace */
if (amap->am_anon[slot] != NULL)
panic("amap_add: slot in use");
amap->am_bckptr[slot] = amap->am_nused;
amap->am_slots[amap->am_nused] = slot;
amap->am_nused++;
}
amap->am_anon[slot] = anon;
UVMHIST_LOG(maphist, "<- done (amap=0x%x, offset=0x%x, anon=0x%x, rep=%d)",
amap, offset, anon, replace);
return(slot);
}
/*
* amap_unadd: remove a page from an amap, given we know the slot #.
*
* => caller must lock amap
*/
AMAP_INLINE void amap_unadd(amap, slot)
struct vm_amap *amap;
vm_offset_t slot;
{
int ptr;
UVMHIST_FUNC("amap_unadd"); UVMHIST_CALLED(maphist);
if (slot >= amap->am_nslot)
panic("amap_add: offset out of range");
if (amap->am_anon[slot] == NULL)
panic("amap_unadd: nothing there");
amap->am_anon[slot] = NULL;
ptr = amap->am_bckptr[slot];
if (ptr != (amap->am_nused - 1)) { /* swap to keep slots contig? */
amap->am_slots[ptr] = amap->am_slots[amap->am_nused - 1];
amap->am_bckptr[amap->am_slots[ptr]] = ptr; /* back link */
}
amap->am_nused--;
UVMHIST_LOG(maphist, "<- done (amap=0x%x, slot=0x%x)", amap, slot,0, 0);
}
/*
* amap_ref: gain a reference to an amap
*
* => amap must not be locked (we will lock)
* => called at fork time to gain the child's reference
*/
AMAP_INLINE void amap_ref(entry, flags)
vm_map_entry_t entry;
int flags;
{
struct vm_amap *amap = entry->aref.ar_amap;
UVMHIST_FUNC("amap_ref"); UVMHIST_CALLED(maphist);
simple_lock(&amap->am_l);
amap->am_ref++;
if (flags & AMAP_SHARED)
amap->am_flags |= AMAP_SHARED;
#ifdef VM_AMAP_PPREF
if (amap->am_ppref == NULL && (flags & AMAP_REFALL) == 0 &&
(entry->start - entry->end) / PAGE_SIZE != amap->am_nslot)
amap_pp_establish(amap);
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
if (flags & AMAP_REFALL)
amap_pp_adjref(amap, 0, amap->am_nslot * PAGE_SIZE, 1);
else
amap_pp_adjref(amap, entry->aref.ar_slotoff,
entry->end - entry->start, 1);
}
#endif
simple_unlock(&amap->am_l);
UVMHIST_LOG(maphist,"<- done! amap=0x%x", amap, 0, 0, 0);
}
/*
* amap_unref: remove a reference to an amap
*
* => caller has already removed pmap mappings to amap.
* => entry is no longer a part of a map, so it can't be changed and
* doesn't need to be locked.
* => amap must be unlocked (we will lock it).
*/
AMAP_INLINE void amap_unref(entry, all)
vm_map_entry_t entry;
int all;
{
struct vm_amap *amap = entry->aref.ar_amap;
UVMHIST_FUNC("amap_unref"); UVMHIST_CALLED(maphist);
/*
* lock it
*/
simple_lock(&amap->am_l);
UVMHIST_LOG(maphist,"(entry=0x%x) amap=0x%x refs=%d, nused=%d",
entry, amap, amap->am_ref, amap->am_nused);
/*
* if we are the last reference, free the amap and return.
*/
if (amap->am_ref == 1) {
amap_wipeout(amap); /* drops final ref and frees */
UVMHIST_LOG(maphist,"<- done (was last ref)!", 0, 0, 0, 0);
return; /* no need to unlock */
}
/*
* otherwise just drop the reference count(s)
*/
amap->am_ref--;
if (amap->am_ref == 1 && (amap->am_flags & AMAP_SHARED) != 0)
amap->am_flags &= ~AMAP_SHARED; /* clear shared flag */
#ifdef VM_AMAP_PPREF
if (amap->am_ppref == NULL && all == 0 &&
(entry->start - entry->end) / PAGE_SIZE != amap->am_nslot)
amap_pp_establish(amap);
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
if (all)
amap_pp_adjref(amap, 0, amap->am_nslot * PAGE_SIZE, -1);
else
amap_pp_adjref(amap, entry->aref.ar_slotoff,
entry->end - entry->start, -1);
}
#endif
simple_unlock(&amap->am_l);
UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
}
#endif /* defined(UVM_AMAP_INLINE) || defined(UVM_AMAP) */