2003-02-01 09:23:35 +03:00
|
|
|
/* $NetBSD: uvm_amap.h,v 1.22 2003/02/01 06:23:54 thorpej Exp $ */
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
*
|
|
|
|
* Copyright (c) 1997 Charles D. Cranor and Washington University.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by Charles D. Cranor and
|
|
|
|
* Washington University.
|
|
|
|
* 4. The name of the author may not be used to endorse or promote products
|
|
|
|
* derived from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
1998-02-10 05:34:17 +03:00
|
|
|
#ifndef _UVM_UVM_AMAP_H_
|
|
|
|
#define _UVM_UVM_AMAP_H_
|
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
1999-01-25 02:53:14 +03:00
|
|
|
* uvm_amap.h: general amap interface and amap implementation-specific info
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* an amap structure contains pointers to a set of anons that are
|
|
|
|
* mapped together in virtual memory (an anon is a single page of
|
|
|
|
* anonymous virtual memory -- see uvm_anon.h). in uvm we hide the
|
|
|
|
* details of the implementation of amaps behind a general amap
|
|
|
|
* interface. this allows us to change the amap implementation
|
|
|
|
* without having to touch the rest of the code. this file is divided
|
|
|
|
* into two parts: the definition of the uvm amap interface and the
|
|
|
|
* amap implementation-specific definitions.
|
|
|
|
*/
|
|
|
|
|
1999-06-21 21:25:11 +04:00
|
|
|
#ifdef _KERNEL
|
|
|
|
|
1999-01-25 02:53:14 +03:00
|
|
|
/*
|
|
|
|
* part 1: amap interface
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* forward definition of vm_amap structure. only amap
|
|
|
|
* implementation-specific code should directly access the fields of
|
2001-05-25 08:06:11 +04:00
|
|
|
* this structure.
|
1999-01-25 02:53:14 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
struct vm_amap;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* handle inline options... we allow amap ops to be inline, but we also
|
|
|
|
* provide a hook to turn this off. macros can also be used.
|
|
|
|
*/
|
|
|
|
|
1999-01-28 17:46:27 +03:00
|
|
|
#ifdef UVM_AMAP_INLINE /* defined/undef'd in uvm_amap.c */
|
1999-01-25 02:53:14 +03:00
|
|
|
#define AMAP_INLINE static __inline /* inline enabled */
|
2001-05-25 08:06:11 +04:00
|
|
|
#else
|
1999-01-25 02:53:14 +03:00
|
|
|
#define AMAP_INLINE /* inline disabled */
|
|
|
|
#endif /* UVM_AMAP_INLINE */
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
2001-05-25 08:06:11 +04:00
|
|
|
* prototypes for the amap interface
|
1999-01-25 02:53:14 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
AMAP_INLINE
|
1999-07-07 09:31:40 +04:00
|
|
|
void amap_add /* add an anon to an amap */
|
1999-01-25 02:53:14 +03:00
|
|
|
__P((struct vm_aref *, vaddr_t,
|
2000-11-25 09:27:59 +03:00
|
|
|
struct vm_anon *, boolean_t));
|
1999-01-25 02:53:14 +03:00
|
|
|
struct vm_amap *amap_alloc /* allocate a new amap */
|
|
|
|
__P((vaddr_t, vaddr_t, int));
|
|
|
|
void amap_copy /* clear amap needs-copy flag */
|
2001-06-02 22:09:08 +04:00
|
|
|
__P((struct vm_map *, struct vm_map_entry *, int,
|
1999-01-25 02:53:14 +03:00
|
|
|
boolean_t, vaddr_t, vaddr_t));
|
|
|
|
void amap_cow_now /* resolve all COW faults now */
|
2001-06-02 22:09:08 +04:00
|
|
|
__P((struct vm_map *, struct vm_map_entry *));
|
2002-09-15 20:54:26 +04:00
|
|
|
int amap_extend /* make amap larger */
|
Implement backwards extension of amaps. There are three cases to deal
with:
Case #1 -- adjust offset: The slot offset in the aref can be
decremented to cover the required size addition.
Case #2 -- move pages and adjust offset: The slot offset is not large
enough, but the amap contains enough inactive space *after* the mapped
pages to make up the difference, so active slots are slid to the "end"
of the amap, and the slot offset is, again, adjusted to cover the
required size addition. This optimizes for hitting case #1 again on
the next small extension.
Case #3 -- reallocate, move pages, and adjust offset: There is not
enough inactive space in the amap, so the arrays are reallocated, and
the active pages are copied again to the "end" of the amap, and the
slot offset is adjusted to cover the required size. This also
optimizes for hitting case #1 on the next backwards extension.
This provides the missing piece in the "forward extension of
vm_map_entries" logic, so the merge failure counters have been
removed.
Not many applications will make any use of this at this time (except
for jvms and perhaps gcc3), but a "top-down" memory allocator will use
it extensively.
2002-11-14 20:58:48 +03:00
|
|
|
__P((struct vm_map_entry *, vsize_t, int));
|
1999-01-25 02:53:14 +03:00
|
|
|
int amap_flags /* get amap's flags */
|
|
|
|
__P((struct vm_amap *));
|
|
|
|
void amap_free /* free amap */
|
2001-05-25 08:06:11 +04:00
|
|
|
__P((struct vm_amap *));
|
1999-01-25 02:53:14 +03:00
|
|
|
void amap_init /* init amap module (at boot time) */
|
|
|
|
__P((void));
|
|
|
|
void amap_lock /* lock amap */
|
|
|
|
__P((struct vm_amap *));
|
|
|
|
AMAP_INLINE
|
|
|
|
struct vm_anon *amap_lookup /* lookup an anon @ offset in amap */
|
|
|
|
__P((struct vm_aref *, vaddr_t));
|
|
|
|
AMAP_INLINE
|
|
|
|
void amap_lookups /* lookup multiple anons */
|
2001-05-25 08:06:11 +04:00
|
|
|
__P((struct vm_aref *, vaddr_t,
|
1999-01-25 02:53:14 +03:00
|
|
|
struct vm_anon **, int));
|
|
|
|
AMAP_INLINE
|
|
|
|
void amap_ref /* add a reference to an amap */
|
2000-11-25 09:27:59 +03:00
|
|
|
__P((struct vm_amap *, vaddr_t, vsize_t, int));
|
1999-01-25 02:53:14 +03:00
|
|
|
int amap_refs /* get number of references of amap */
|
|
|
|
__P((struct vm_amap *));
|
|
|
|
void amap_share_protect /* protect pages in a shared amap */
|
2001-06-02 22:09:08 +04:00
|
|
|
__P((struct vm_map_entry *, vm_prot_t));
|
1999-01-25 02:53:14 +03:00
|
|
|
void amap_splitref /* split reference to amap into two */
|
2001-05-25 08:06:11 +04:00
|
|
|
__P((struct vm_aref *, struct vm_aref *,
|
1999-01-25 02:53:14 +03:00
|
|
|
vaddr_t));
|
|
|
|
AMAP_INLINE
|
|
|
|
void amap_unadd /* remove an anon from an amap */
|
1999-07-07 09:31:40 +04:00
|
|
|
__P((struct vm_aref *, vaddr_t));
|
1999-01-25 02:53:14 +03:00
|
|
|
void amap_unlock /* unlock amap */
|
|
|
|
__P((struct vm_amap *));
|
|
|
|
AMAP_INLINE
|
|
|
|
void amap_unref /* drop reference to an amap */
|
2000-11-25 09:27:59 +03:00
|
|
|
__P((struct vm_amap *, vaddr_t, vsize_t, int));
|
1999-01-25 02:53:14 +03:00
|
|
|
void amap_wipeout /* remove all anons from amap */
|
|
|
|
__P((struct vm_amap *));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* amap flag values
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define AMAP_SHARED 0x1 /* amap is shared */
|
|
|
|
#define AMAP_REFALL 0x2 /* amap_ref: reference entire amap */
|
|
|
|
|
Implement backwards extension of amaps. There are three cases to deal
with:
Case #1 -- adjust offset: The slot offset in the aref can be
decremented to cover the required size addition.
Case #2 -- move pages and adjust offset: The slot offset is not large
enough, but the amap contains enough inactive space *after* the mapped
pages to make up the difference, so active slots are slid to the "end"
of the amap, and the slot offset is, again, adjusted to cover the
required size addition. This optimizes for hitting case #1 again on
the next small extension.
Case #3 -- reallocate, move pages, and adjust offset: There is not
enough inactive space in the amap, so the arrays are reallocated, and
the active pages are copied again to the "end" of the amap, and the
slot offset is adjusted to cover the required size. This also
optimizes for hitting case #1 on the next backwards extension.
This provides the missing piece in the "forward extension of
vm_map_entries" logic, so the merge failure counters have been
removed.
Not many applications will make any use of this at this time (except
for jvms and perhaps gcc3), but a "top-down" memory allocator will use
it extensively.
2002-11-14 20:58:48 +03:00
|
|
|
/*
|
2002-11-30 21:28:04 +03:00
|
|
|
* amap_extend flags
|
Implement backwards extension of amaps. There are three cases to deal
with:
Case #1 -- adjust offset: The slot offset in the aref can be
decremented to cover the required size addition.
Case #2 -- move pages and adjust offset: The slot offset is not large
enough, but the amap contains enough inactive space *after* the mapped
pages to make up the difference, so active slots are slid to the "end"
of the amap, and the slot offset is, again, adjusted to cover the
required size addition. This optimizes for hitting case #1 again on
the next small extension.
Case #3 -- reallocate, move pages, and adjust offset: There is not
enough inactive space in the amap, so the arrays are reallocated, and
the active pages are copied again to the "end" of the amap, and the
slot offset is adjusted to cover the required size. This also
optimizes for hitting case #1 on the next backwards extension.
This provides the missing piece in the "forward extension of
vm_map_entries" logic, so the merge failure counters have been
removed.
Not many applications will make any use of this at this time (except
for jvms and perhaps gcc3), but a "top-down" memory allocator will use
it extensively.
2002-11-14 20:58:48 +03:00
|
|
|
*/
|
2002-11-30 21:28:04 +03:00
|
|
|
#define AMAP_EXTEND_BACKWARDS 0x00 /* add "size" to start of map */
|
|
|
|
#define AMAP_EXTEND_FORWARDS 0x01 /* add "size" to end of map */
|
|
|
|
#define AMAP_EXTEND_NOWAIT 0x02 /* not allowed to sleep */
|
Implement backwards extension of amaps. There are three cases to deal
with:
Case #1 -- adjust offset: The slot offset in the aref can be
decremented to cover the required size addition.
Case #2 -- move pages and adjust offset: The slot offset is not large
enough, but the amap contains enough inactive space *after* the mapped
pages to make up the difference, so active slots are slid to the "end"
of the amap, and the slot offset is, again, adjusted to cover the
required size addition. This optimizes for hitting case #1 again on
the next small extension.
Case #3 -- reallocate, move pages, and adjust offset: There is not
enough inactive space in the amap, so the arrays are reallocated, and
the active pages are copied again to the "end" of the amap, and the
slot offset is adjusted to cover the required size. This also
optimizes for hitting case #1 on the next backwards extension.
This provides the missing piece in the "forward extension of
vm_map_entries" logic, so the merge failure counters have been
removed.
Not many applications will make any use of this at this time (except
for jvms and perhaps gcc3), but a "top-down" memory allocator will use
it extensively.
2002-11-14 20:58:48 +03:00
|
|
|
|
1999-06-21 21:25:11 +04:00
|
|
|
#endif /* _KERNEL */
|
1999-01-25 02:53:14 +03:00
|
|
|
|
|
|
|
/**********************************************************************/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* part 2: amap implementation-specific info
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* we currently provide an array-based amap implementation. in this
|
|
|
|
* implementation we provide the option of tracking split references
|
|
|
|
* so that we don't lose track of references during partial unmaps
|
1999-01-28 17:46:27 +03:00
|
|
|
* ... this is enabled with the "UVM_AMAP_PPREF" define.
|
1999-01-25 02:53:14 +03:00
|
|
|
*/
|
|
|
|
|
1999-01-28 17:46:27 +03:00
|
|
|
#define UVM_AMAP_PPREF /* track partial references */
|
1999-01-25 02:53:14 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* here is the definition of the vm_amap structure for this implementation.
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct vm_amap {
|
2001-05-26 20:32:40 +04:00
|
|
|
struct simplelock am_l; /* simple lock [locks all vm_amap fields] */
|
1999-01-25 02:53:14 +03:00
|
|
|
int am_ref; /* reference count */
|
|
|
|
int am_flags; /* flags */
|
|
|
|
int am_maxslot; /* max # of slots allocated */
|
|
|
|
int am_nslot; /* # of slots currently in map ( <= maxslot) */
|
|
|
|
int am_nused; /* # of slots currently in use */
|
|
|
|
int *am_slots; /* contig array of active slots */
|
|
|
|
int *am_bckptr; /* back pointer array to am_slots */
|
|
|
|
struct vm_anon **am_anon; /* array of anonymous pages */
|
1999-01-28 17:46:27 +03:00
|
|
|
#ifdef UVM_AMAP_PPREF
|
1999-01-25 02:53:14 +03:00
|
|
|
int *am_ppref; /* per page reference count (if !NULL) */
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* note that am_slots, am_bckptr, and am_anon are arrays. this allows
|
|
|
|
* fast lookup of pages based on their virual address at the expense of
|
|
|
|
* some extra memory. in the future we should be smarter about memory
|
2001-05-25 08:06:11 +04:00
|
|
|
* usage and fall back to a non-array based implementation on systems
|
1999-01-25 02:53:14 +03:00
|
|
|
* that are short of memory (XXXCDC).
|
|
|
|
*
|
|
|
|
* the entries in the array are called slots... for example an amap that
|
|
|
|
* covers four pages of virtual memory is said to have four slots. here
|
|
|
|
* is an example of the array usage for a four slot amap. note that only
|
|
|
|
* slots one and three have anons assigned to them. "D/C" means that we
|
|
|
|
* "don't care" about the value.
|
2001-05-25 08:06:11 +04:00
|
|
|
*
|
1999-01-25 02:53:14 +03:00
|
|
|
* 0 1 2 3
|
|
|
|
* am_anon: NULL, anon0, NULL, anon1 (actual pointers to anons)
|
|
|
|
* am_bckptr: D/C, 1, D/C, 0 (points to am_slots entry)
|
|
|
|
*
|
|
|
|
* am_slots: 3, 1, D/C, D/C (says slots 3 and 1 are in use)
|
2001-05-25 08:06:11 +04:00
|
|
|
*
|
1999-01-25 02:53:14 +03:00
|
|
|
* note that am_bckptr is D/C if the slot in am_anon is set to NULL.
|
|
|
|
* to find the entry in am_slots for an anon, look at am_bckptr[slot],
|
|
|
|
* thus the entry for slot 3 in am_slots[] is at am_slots[am_bckptr[3]].
|
|
|
|
* in general, if am_anon[X] is non-NULL, then the following must be
|
|
|
|
* true: am_slots[am_bckptr[X]] == X
|
|
|
|
*
|
|
|
|
* note that am_slots is always contig-packed.
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
1999-01-25 02:53:14 +03:00
|
|
|
* defines for handling of large sparce amaps:
|
2001-05-25 08:06:11 +04:00
|
|
|
*
|
1999-01-25 02:53:14 +03:00
|
|
|
* one of the problems of array-based amaps is that if you allocate a
|
|
|
|
* large sparcely-used area of virtual memory you end up allocating
|
|
|
|
* large arrays that, for the most part, don't get used. this is a
|
|
|
|
* problem for BSD in that the kernel likes to make these types of
|
|
|
|
* allocations to "reserve" memory for possible future use.
|
1998-02-05 09:25:08 +03:00
|
|
|
*
|
1999-01-25 02:53:14 +03:00
|
|
|
* for example, the kernel allocates (reserves) a large chunk of user
|
|
|
|
* VM for possible stack growth. most of the time only a page or two
|
|
|
|
* of this VM is actually used. since the stack is anonymous memory
|
|
|
|
* it makes sense for it to live in an amap, but if we allocated an
|
|
|
|
* amap for the entire stack range we could end up wasting a large
|
|
|
|
* amount of malloc'd KVM.
|
2001-05-25 08:06:11 +04:00
|
|
|
*
|
|
|
|
* for example, on the i386 at boot time we allocate two amaps for the stack
|
|
|
|
* of /sbin/init:
|
1998-02-05 09:25:08 +03:00
|
|
|
* 1. a 7680 slot amap at protection 0 (reserve space for stack)
|
|
|
|
* 2. a 512 slot amap at protection 7 (top of stack)
|
|
|
|
*
|
2001-05-25 08:06:11 +04:00
|
|
|
* most of the array allocated for the amaps for this is never used.
|
1999-01-25 02:53:14 +03:00
|
|
|
* the amap interface provides a way for us to avoid this problem by
|
2001-05-25 08:06:11 +04:00
|
|
|
* allowing amap_copy() to break larger amaps up into smaller sized
|
1999-01-25 02:53:14 +03:00
|
|
|
* chunks (controlled by the "canchunk" option). we use this feature
|
|
|
|
* to reduce our memory usage with the BSD stack management. if we
|
|
|
|
* are asked to create an amap with more than UVM_AMAP_LARGE slots in it,
|
|
|
|
* we attempt to break it up into a UVM_AMAP_CHUNK sized amap if the
|
|
|
|
* "canchunk" flag is set.
|
1998-02-05 09:25:08 +03:00
|
|
|
*
|
|
|
|
* so, in the i386 example, the 7680 slot area is never referenced so
|
1999-01-25 02:53:14 +03:00
|
|
|
* nothing gets allocated (amap_copy is never called because the protection
|
|
|
|
* is zero). the 512 slot area for the top of the stack is referenced.
|
|
|
|
* the chunking code breaks it up into 16 slot chunks (hopefully a single
|
|
|
|
* 16 slot chunk is enough to handle the whole stack).
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
|
|
|
|
1999-01-25 02:53:14 +03:00
|
|
|
#define UVM_AMAP_LARGE 256 /* # of slots in "large" amap */
|
|
|
|
#define UVM_AMAP_CHUNK 16 /* # of slots to chunk large amaps in */
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1999-06-21 21:25:11 +04:00
|
|
|
#ifdef _KERNEL
|
2003-02-01 09:23:35 +03:00
|
|
|
#include <sys/mallocvar.h>
|
|
|
|
MALLOC_DECLARE(M_UVMAMAP);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1999-01-25 02:53:14 +03:00
|
|
|
/*
|
|
|
|
* macros
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* AMAP_B2SLOT: convert byte offset to slot */
|
2001-02-19 00:19:08 +03:00
|
|
|
#define AMAP_B2SLOT(S,B) { \
|
|
|
|
KASSERT(((B) & (PAGE_SIZE - 1)) == 0); \
|
|
|
|
(S) = (B) >> PAGE_SHIFT; \
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
1999-01-25 02:53:14 +03:00
|
|
|
* lock/unlock/refs/flags macros
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
|
|
|
|
1999-01-25 02:53:14 +03:00
|
|
|
#define amap_flags(AMAP) ((AMAP)->am_flags)
|
|
|
|
#define amap_lock(AMAP) simple_lock(&(AMAP)->am_l)
|
|
|
|
#define amap_refs(AMAP) ((AMAP)->am_ref)
|
|
|
|
#define amap_unlock(AMAP) simple_unlock(&(AMAP)->am_l)
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/*
|
1999-01-25 02:53:14 +03:00
|
|
|
* if we enable PPREF, then we have a couple of extra functions that
|
|
|
|
* we need to prototype here...
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
|
|
|
|
1999-01-28 17:46:27 +03:00
|
|
|
#ifdef UVM_AMAP_PPREF
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1999-01-25 02:53:14 +03:00
|
|
|
#define PPREF_NONE ((int *) -1) /* not using ppref */
|
|
|
|
|
|
|
|
void amap_pp_adjref /* adjust references */
|
|
|
|
__P((struct vm_amap *, int, vsize_t, int));
|
|
|
|
void amap_pp_establish /* establish ppref */
|
2002-12-20 21:21:13 +03:00
|
|
|
__P((struct vm_amap *, vaddr_t));
|
1999-01-25 02:53:14 +03:00
|
|
|
void amap_wiperange /* wipe part of an amap */
|
|
|
|
__P((struct vm_amap *, int, int));
|
1999-01-28 17:46:27 +03:00
|
|
|
#endif /* UVM_AMAP_PPREF */
|
1998-02-10 05:34:17 +03:00
|
|
|
|
1999-06-21 21:25:11 +04:00
|
|
|
#endif /* _KERNEL */
|
|
|
|
|
1998-02-10 05:34:17 +03:00
|
|
|
#endif /* _UVM_UVM_AMAP_H_ */
|