This is a preview of the uvm_hotplug(9) api code.

This commit does not actually introduce the UVM_HOTPLUG option.
However it does provide developers a way to review, test and try out
the API.

To do this, please go to tests/sys/uvm/ and build and run the tests
there. The tests also have a set of basic load tests, to get a measure
of the performance penalties due to enabling the UVM_HOTPLUG option.

In order to build the tests you need to have at least done the
following in $SRC/

cd $SRC; $NBMAKE do-distrib-dirs includes
cd $SRC/lib/csu; $NBMAKE all install || exit
cd $SRC/external/gpl3/gcc/lib/libgcc/libgcc_s; $NBMAKE all install || exit
cd $SRC/external/gpl3/gcc/lib/libgcc/libgcc; $NBMAKE all install || exit
cd $SRC/lib/libc; $NBMAKE includes all install || exit
cd $SRC/lib/libpthread; $NBMAKE all install || exit
cd $SRC/lib/libm; $NBMAKE all install || exit
cd $SRC/external/gpl3/gcc/lib/libstdc++-v3/; $NBMAKE all install || exit

Once the development environment has these userspace libraries, one
can simple build using $NBMAKE and finally test the kernel API using

atf-run|atf-report
This commit is contained in:
cherry 2016-12-19 12:21:29 +00:00
parent 996a7c47cf
commit 07acf3c096
6 changed files with 5183 additions and 0 deletions

View File

@ -0,0 +1,527 @@
.\" $NetBSD: uvm_hotplug.9,v 1.1 2016/12/19 12:21:29 cherry Exp $
.\"
.\" Copyright (c) 2016 The NetBSD Foundation, Inc.
.\" All rights reserved.
.\"
.\" This code is derived from software contributed to The NetBSD Foundation
.\" by Cherry G Mathew and Santhosh N Raju.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
.\" are met:
.\" 1. Redistributions of source code must retain the above copyright
.\" notice, this list of conditions and the following disclaimer.
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\"
.\" THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
.\" ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
.\" TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
.\" PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
.\" BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
.\" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
.\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
.\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
.\" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
.\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
.\" POSSIBILITY OF SUCH DAMAGE.
.\"
.Dd November 20, 2016
.Dt UVM_HOTPLUG 9
.Os
.Sh NAME
.Nm uvm_physseg_init ,
.Nm uvm_physseg_valid ,
.Nm uvm_physseg_get_start ,
.Nm uvm_physseg_get_end ,
.Nm uvm_physseg_get_avail_start ,
.Nm uvm_physseg_get_avail_end ,
.Nm uvm_physseg_get_pg ,
.Nm uvm_physseg_get_pmseg ,
.Nm uvm_physseg_get_free_list ,
.Nm uvm_physseg_get_start_hint ,
.Nm uvm_physseg_set_start_hint ,
.Nm uvm_physseg_get_next ,
.Nm uvm_physseg_get_prev ,
.Nm uvm_physseg_get_first ,
.Nm uvm_physseg_get_last ,
.Nm uvm_physseg_get_highest_frame ,
.Nm uvm_physseg_find ,
.Nm uvm_page_physload ,
.Nm uvm_page_physunload ,
.Nm uvm_page_physunload_force ,
.Nm uvm_physseg_plug ,
.Nm uvm_physseg_unplug ,
.Nm uvm_physseg_set_avail_start ,
.Nm uvm_physseg_set_avail_end
.Nd memory hotplug manager
.Sh SYNOPSIS
.In uvm/uvm_physseg.h
.Ft void
.Fn uvm_physseg_init "void"
.Ft uvm_physseg_t
.Fn uvm_page_physload "paddr_t start" "paddr_t end" "paddr_t avail_start" \
"paddr_t avail_end" "int free_list"
.Ft bool
.Fn uvm_page_physunload "uvm_physseg_t upm" "int free_list" \
"paddr_t *paddrp"
.Ft bool
.Fn uvm_page_physunload_force "uvm_physseg_t upm" "int free_list" \
"paddr_t *paddrp"
.Ft bool
.Fn uvm_physseg_plug "paddr_t pfn" "size_t npages" "uvm_physseg_t *upmp"
.Ft bool
.Fn uvm_physseg_unplug "paddr_t pfn" "size_t npages"
.Sh DESCRIPTION
These utility routines provide the ability to tell
.Pq Xr uvm 9
about system memory segments.
When the kernel is compiled with
.Cd 'options UVM_HOTPLUG'
, memory segments are handled in a dynamic data structure
.Pq Xr rbtree 3
compared to a static array when not. This enables kernel code to add
or remove information about memory segments at any point after boot -
thus "hotplug".
.Pp
.Fn uvm_page_physload
.Fn uvm_page_physunload
and
.Fn uvm_page_physunload_force
are legacy interfaces which may be removed in the future. They must
never be used after
.Pq Xr uvm_init 9
.Pp
WARNING: This is an experimental feature and should not be used in production
environments. Furthermore, attempting to "hotplug" without
.Cd 'options UVM_HOTPLUG'
after boot will almost certainly end in a
.Pq Xr panic 9
.Sh USAGE
.Ss INITIALIZING HOTPLUG
The function
.Fn uvm_physseg_init
initializes the hotplug subsystem. This is expected to happen exactly
once, at boot time, and from MD code.
.Ss PLUGGING IN MEMORY
.Fn uvm_page_physload
registers
.Pq Xr uvm 9
with a memory segment span, and on a specified
.Fa free_list.
It must be called at system boot time as part of setting up memory
management.
The arguments describe the start and end of the physical addresses of the
segment, and the available start and end addresses of pages not already in use.
If a system has memory banks of different speeds the slower memory should be
given a higher
.Fa free_list
value.
.Bl -tag -offset indent -width "avail_start"
.It Fa start
Starting page frame number of the physical memory segments.
.It Fa end
Ending page frame number of the physical memory segments.
.It Fa avail_start
Available starting page frame number of the physical memory segments.
.It Fa avail_end
Available ending page frame number of the physical memory segments.
.It Fa free_list
The free list type are defined in the Machine Dependent code.
.El
.Pp
This function returns a valid
.Dv uvm_physseg_t
handle when a successful plug occurs, else it will return
.Dv UVM_PHYSSEG_TYPE_INVALID
when the plug fails.
.Pp
.Fn uvm_physseg_plug
registers
.Pq Xr uvm 9
with a memory segment span.
It can also be called to initiate a hotplug and register a newly
"hotplugged" physical memory range into the VM. Unlike
.Fn uvm_page_physload
this function can, if
.Cd 'options UVM_HOTPLUG'
is enabled at compile time, be used after
.Pq Xr uvm_init 9
The arguments describe the start page frame, the number of pages to
plug starting from the start page frame and an optional return variable, which
points to a valid
.Fa uvm_physseg_t
handle when a successful plug occurs.
.Bl -tag -offset indent -width "npages"
.It Fa pfn
Starting page frame number of the physical memory segment.
.It Fa npages
Total number of pages from the starting page frame number to plug in.
.It Fa upmp
If upmp is not
.Fa NULL ,
then on a successful plug, a valid pointer to the uvm_physseg_t handle
for the segment which was plugged is returned.
.El
.Pp
This function returns
.Fa true
when a successful plug occurs,
.Fa false
otherwise.
.Ss UNPLUGGING MEMORY
The functions
.Fn uvm_page_physunload
,
.Fn uvm_page_physunload_force
and
.Fn uvm_physseg_unplug
make
.Pq Xr uvm 9
forget about previously registered memory segments or portions of
such.
.Pp
.Fn uvm_page_physunload
unloads pages from a segment (from the front or from the back)
depending on its availability. When the last page is removed, the
segment handle is invalidated and supporting metadata is freed.
.Pp Note: This function can only be used during boot time.
Pages, once unloaded, are unregistered from uvm and are therefore
assumed to be managed by the code which called
.Fn uvm_page_physunload 9
(usually boot time MD code, for boottime memory "allocation").
.Pp
The arguments are:
.Bl -tag -offset indent -width "free_list"
.It Fa upm
The handle identifying segment from which we are trying to unload memory.
.It Fa free_list
The free list type are defined in the Machine Dependent code.
.It Fa paddrp
The pointer to the physical address that was unloaded.
.El
.Pp
If the unload was successful,
.Fa true
is returned,
.Fa false
otherwise.
.Pp
.Fn uvm_page_physunload_force
unconditionally unloads pages from a segment.
When the last page is removed, the segment handle
is invalidated and supporting metadata is freed.
.Pp Note: This function can only be used during boot time.
Pages, once unloaded, are unregistered from uvm and are therefore
assumed to be managed by the code which called
.Fn uvm_page_physunload_force 9
(usually boot time MD code, for boottime memory "allocation").
.Pp
The arguments are:
.Bl -tag -offset indent -width "free_list"
.It Fa upm
The handle identifying segment from which we are trying to unload memory.
.It Fa free_list
The free list type are defined in the Machine Dependent code.
.It Fa paddrp
The pointer to the physical address that was unloaded.
.El
.Pp
If the unload the successful
.Fa true
is returned,
.Fa false
otherwise.
.Pp
.Fn uvm_physseg_unplug
can be called to unplug an existing physical memory segment. Unlike
.Fn uvm_page_physunload
and
.Fn uvm_page_physunload_force
, it can be called after
.Pq Xr uvm_init 9
, if
.Cd 'options UVM_HOTPLUG'
is enabled at compile time.
.Fn uvm_hotplug 9
makes no effort to manage the state of the underlying physical
memory. It is upto the caller to ensure that it is not in use,
either by
.Pq Xr uvm 9
, or by any other sub-system. Further, any hardware
quiescing that may be required, is the responsibility of MD code.
The arguments
describe the start page frame and the number of pages to unplug.
The arguments are:
.Bl -tag -offset indent -width "npages"
.It Fa pfn
Starting page frame number of the physical memory segment.
.It Fa npages
Total number of pages from the starting page frame number to unplug.
.El
.Pp
Returns
.Fa true
or
.Fa false
depending on success or failure respectively.
.Sh UTILITY FUNCTIONS
.Bl -ohang
.It bool
.Fn uvm_physseg_valid "uvm_physseg_t upm";
.It paddr_t
.Fn uvm_physseg_get_start "uvm_physseg_t upm";
.It paddr_t
.Fn uvm_physseg_get_end "uvm_physseg_t upm";
.It paddr_t
.Fn uvm_physseg_get_avail_start "uvm_physseg_t upm";
.It paddr_t
.Fn uvm_physseg_get_avail_end "uvm_physseg_t upm";
.It struct vm_page *
.Fn uvm_physseg_get_pg "uvm_physseg_t upm" "paddr_t index";
.It struct pmap_physseg *
.Fn uvm_physseg_get_pmesg "uvm_physseg_t upm";
.It int
.Fn uvm_physseg_get_free_list "uvm_physseg_t upm";
.It u_int
.Fn uvm_physseg_get_start_hint "uvm_physseg_t upm";
.It bool
.Fn uvm_physseg_set_start_hint "uvm_physseg_t upm" "u_int start_hint";
.It uvm_physseg_t
.Fn uvm_physseg_get_next "uvm_physseg_t upm";
.It uvm_physseg_t
.Fn uvm_physseg_get_prev "uvm_physseg_t upm";
.It uvm_physseg_t
.Fn uvm_physseg_get_first "void";
.It uvm_physseg_t
.Fn uvm_physseg_get_last "void";
.It paddr_t
.Fn uvm_physseg_get_highest_frame "void";
.It paddr_t
.Fn uvm_physseg_find "paddr pframe" "psize_t *offsetp";
.It void
.Fn uvm_physseg_set_avail_start "uvm_physseg_t upm" "paddr_t avail_start";
.It void
.Fn uvm_physseg_set_avail_end "uvm_physseg_t upm" "paddr_t avail_end";
.El
.Pp
.Fn uvm_physseg_valid
validates a handle that is passed in, returns
.Fa true
if the given handle is valid,
.Fa false
otherwise.
.Pp
.Fn uvm_physseg_get_start
if a valid
.Fa uvm_physseg_t
handle is passed in, it returns the starting physical address of
the segment. The returned value is of type
.Fa paddr_t .
In case the handle is invalid the returned value will match
.Fa ( paddr_t )
-1.
.Pp
.Fn uvm_physseg_get_end
if a valid
.Fa uvm_physseg_t
handle is passed in, it returns the ending physical address of the
segment. The returned value is of type
.Fa paddr_t .
In case the handle is invalid the returned value will match
.Fa ( paddr_t )
-1.
.Pp
.Fn uvm_physseg_get_avail_start
if a valid
.Fa uvm_physseg_t
handle is passed in, it returns the available starting physical
address of the segment. The returned value is of type
.Fa paddr_t .
In case the handle is invalid the returned value will match
.Fa ( paddr_t )
-1.
.Pp
.Fn uvm_physseg_get_avail_end
if a valid
.Fa uvm_physseg_t
handle is passed in, it returns the available ending physical
address of the segment. The returned value is of type
.Fa paddr_t .
In case the handle is invalid the returned value will match
.Fa ( paddr_t )
-1.
.Pp
.Fn uvm_physseg_get_pg
if a valid
.Fa uvm_physseg_t
handle along with an index value is passed in, it returns the
.Fa struct vm_page *
object contained in that location.
.Pp
.Fn uvm_physseg_get_pmseg
if a valid
.Fa uvm_physseg_t
handle is passed in, it returns the
.Fa struct pmap_physseg *
object contained in the handle.
.Pp
.Fn uvm_physseg_get_free_list
if a valid
.Fa uvm_physseg_t
handle is passed in, it returns the
.Fa free_list
type for which the current segment is associated with. The returned value is of
type
.Fa int .
.Pp
.Fn uvm_physseg_get_start_hint
if a valid
.Fa uvm_physseg_t
handle is passed in, it returns the
.Fa start_hint
type for the current segment. The returned value is of type
.Fa u_int .
.Pp
.Fn uvm_physseg_set_start_hint
if a valid handle along with the
.Fa start_hint
is passed in, the value is set in the segment. And a
.Fa true
is returned to indicate a successful value setting.
In case the handle is invalid a
.Fa false
is returned.
.Pp
.Fn uvm_physseg_get_next
if a valid handle is passed in, it returns the next valid
.Fa uvm_physseg_t
handle in the sequence. However if the handle passed is the last segment in the
sequence the function returns
.Fa UVM_PHYSSEG_TYPE_INVALID_OVERFLOW .
Passing an invalid handle is not fatal, and returns
.Fa UVM_PHYSSEG_TYPE_INVALID
.
.Pp
.Fn uvm_physseg_get_prev
if a valid handle is passed in, it returns the previous validh
.Fa uvm_physseg_t
handle in the sequence. However if the handle passed is the first segment in
the sequence the function returns
.Fa UVM_PHYSSEG_TYPE_INVALID_EMPTY .
Passing an invalid handle is not fatal, and returns
.Fa UVM_PHYSSEG_TYPE_INVALID
.
.Pp
.Fn uvm_physseg_get_first
returns the first valid
.Fa uvm_physseg_t
handle in the sequence. However if there are no valid handles in the sequence
yet, the function returns
.Fa UVM_PHYSSEG_TYPE_INVALID_EMPTY
.Pp
.Fn uvm_physseg_get_last
returns the last valid
.Fa uvm_physseg_t
handle in the sequence. However if there are no valid handles in the sequence
yet, the function returns
.Fa UVM_PHYSSEG_TYPE_INVALID_EMPTY
.Pp
.Fn uvm_physseg_get_highest_frame
returns the frame number of the highest registered physical page frame
which is of type
.Fa paddr_t .
XXX: Searching on empty sequences are not yet processed in the function.
.Pp
.Fn uvm_physseg_find
searches for a given segment containing the page frame
.Fa ( paddr_t )
passed in. If a segment that falls between starting and ending addresses is
found, the corresponding
.Fa uvm_physseg_t
handle is returned else a
.Fa UVM_PHYSSEG_TYPE_INVALID
is returned. The second parameter, if not set to
.Fa NULL
, the offset value of the page frame passed in with respect to the
starting address is set to the appropriate
.Fa psize_t
value if the search was successful in finding the segment.
.Pp
.Fn uvm_physseg_set_avail_start
if a valid
.Fa uvm_physseg_t
handle is passed in along with the available starting physical address of the
segment of type
.Fa paddr_t ,
the value is set in the segment.
.Pp
.Fn uvm_physseg_set_avail_end
if a valid
.Fa uvm_physseg_t
handle is passed in along with the available ending physical address of the
segment of type
.Fa paddr_t ,
the value is set in the segment.
.Sh NOTES
.Fn uvm_physseg_plug
and
.Fn uvm_physseg_unplug
must never be used after
.Pq Xr uvm_init
in a kernel build where
.Cd 'options UVM_HOTPLUG'
is not enabled.
.Sh DIAGNOSTICS
Tests for
.Xr uvm_hotplug 9
are in
.Pa tests/sys/uvm .
.Pp
Unit / functional tests are in
.Pa tests/sys/uvm/t_uvm_physseg.c .
These tests focus on the expected working of the
.Xr uvm_hotplug 9
API and its utility functions.
.Pp
Load tests can be found in
.Pa tests/sys/uvm/t_uvm_physseg_load.c .
These tests focus on stressing the
.Xr uvm_hotplug 9
implementation in order to make performance comparisons between kernel
builds with and without
.Cd 'options UVM_HOTPLUG'
.
.\" .Sh RETURN VALUES
.\" .Sh EXAMPLES
.Sh CODE REFERENCES
The uvm hotplug feature is implemented in the file
.Pa sys/uvm/uvm_physseg.c .
The uvm hotplug api is exported via
.Pa sys/uvm/uvm_physseg.h .
.Sh SEE ALSO
.Xr free 9 ,
.Xr malloc 9 ,
.Xr memoryallocators 9 ,
.Xr extent 9 ,
.Xr uvm 9
.Sh HISTORY
This API emerged out of the need to insert new pages at runtime in the
Xen
.Pq Xr balloon 9
driver.
.Sh AUTHORS
.An -nosplit
.An Cherry G. Mathew
.Aq Mt cherry@NetBSD.org
designed and integrated the api.
.Pp
.An Santhosh N. Raju
.Aq Mt santhosh.raju@gmail.com
implemented the dynamic segment handling code and all tests for this API.
.Pp
.An Nick Hudson
.Aq Mt skrll@NetBSD.org
contributed bugfixes and testing on a wide range of hardware ports.

1395
sys/uvm/uvm_physseg.c Normal file

File diff suppressed because it is too large Load Diff

118
sys/uvm/uvm_physseg.h Normal file
View File

@ -0,0 +1,118 @@
/* $NetBSD: uvm_physseg.h,v 1.1 2016/12/19 12:21:29 cherry Exp $ */
/*
* Consolidated API from uvm_page.c and others.
* Consolidated and designed by Cherry G. Mathew <cherry@zyx.in>
*/
#ifndef _UVM_UVM_PHYSSEG_H_
#define _UVM_UVM_PHYSSEG_H_
#if defined(_KERNEL_OPT)
#include "opt_uvm_hotplug.h"
#endif
#include <sys/cdefs.h>
#include <sys/param.h>
#include <sys/types.h>
/*
* No APIs are explicitly #included in uvm_physseg.c
*/
#if defined(UVM_HOTPLUG) /* rbtree impementation */
#define PRIxPHYSSEG "p"
/*
* These are specific values of invalid constants for uvm_physseg_t.
* uvm_physseg_valid() == false on any of the below constants.
*
* Specific invalid constants encapsulate specific explicit failure
* scenarios (see the comments next to them)
*/
#define UVM_PHYSSEG_TYPE_INVALID NULL /* Generic invalid value */
#define UVM_PHYSSEG_TYPE_INVALID_EMPTY NULL /* empty segment access */
#define UVM_PHYSSEG_TYPE_INVALID_OVERFLOW NULL /* ran off the end of the last segment */
typedef struct uvm_physseg * uvm_physseg_t;
#else /* UVM_HOTPLUG */
#define PRIxPHYSSEG "d"
/*
* These are specific values of invalid constants for uvm_physseg_t.
* uvm_physseg_valid() == false on any of the below constants.
*
* Specific invalid constants encapsulate specific explicit failure
* scenarios (see the comments next to them)
*/
#define UVM_PHYSSEG_TYPE_INVALID -1 /* Generic invalid value */
#define UVM_PHYSSEG_TYPE_INVALID_EMPTY -1 /* empty segment access */
#define UVM_PHYSSEG_TYPE_INVALID_OVERFLOW (uvm_physseg_get_last() + 1) /* ran off the end of the last segment */
typedef int uvm_physseg_t;
#endif /* UVM_HOTPLUG */
void uvm_physseg_init(void);
bool uvm_physseg_valid(uvm_physseg_t);
/*
* Return start/end pfn of given segment
* Returns: -1 if the segment number is invalid
*/
paddr_t uvm_physseg_get_start(uvm_physseg_t);
paddr_t uvm_physseg_get_end(uvm_physseg_t);
paddr_t uvm_physseg_get_avail_start(uvm_physseg_t);
paddr_t uvm_physseg_get_avail_end(uvm_physseg_t);
struct vm_page * uvm_physseg_get_pg(uvm_physseg_t, paddr_t);
#ifdef __HAVE_PMAP_PHYSSEG
struct pmap_physseg * uvm_physseg_get_pmseg(uvm_physseg_t);
#endif
int uvm_physseg_get_free_list(uvm_physseg_t);
u_int uvm_physseg_get_start_hint(uvm_physseg_t);
bool uvm_physseg_set_start_hint(uvm_physseg_t, u_int);
/*
* Functions to help walk the list of segments.
* Returns: NULL if the segment number is invalid
*/
uvm_physseg_t uvm_physseg_get_next(uvm_physseg_t);
uvm_physseg_t uvm_physseg_get_prev(uvm_physseg_t);
uvm_physseg_t uvm_physseg_get_first(void);
uvm_physseg_t uvm_physseg_get_last(void);
/* Return the frame number of the highest registered physical page frame */
paddr_t uvm_physseg_get_highest_frame(void);
/* Actually, uvm_page_physload takes PF#s which need their own type */
uvm_physseg_t uvm_page_physload(paddr_t, paddr_t, paddr_t,
paddr_t, int);
bool uvm_page_physunload(uvm_physseg_t, int, paddr_t *);
bool uvm_page_physunload_force(uvm_physseg_t, int, paddr_t *);
uvm_physseg_t uvm_physseg_find(paddr_t, psize_t *);
bool uvm_physseg_plug(paddr_t, size_t, uvm_physseg_t *);
bool uvm_physseg_unplug(paddr_t, size_t);
#if defined(PMAP_STEAL_MEMORY)
/*
* XXX: Legacy: This needs to be upgraded to a full pa management
* layer.
*/
void uvm_physseg_set_avail_start(uvm_physseg_t, paddr_t);
void uvm_physseg_set_avail_end(uvm_physseg_t, paddr_t);
#endif /* PMAP_STEAL_MEMORY */
#endif /* _UVM_UVM_PHYSSEG_H_ */

24
tests/sys/uvm/Makefile Normal file
View File

@ -0,0 +1,24 @@
# $NetBSD: Makefile,v 1.1 2016/12/19 12:21:29 cherry Exp $
#
WARNS?=6
.include <bsd.own.mk>
TESTSDIR= ${TESTSBASE}/sys/uvm
CPPFLAGS+= -I${NETBSDSRCDIR}/sys -I${.CURDIR}/ -D_TEST -g
# Depend on the kernel source files too
DPSRCS= ${NETBSDSRCDIR}/sys/uvm/uvm_physseg.[ch]
.PATH: ${NETBSDSRCDIR}/sys/kern
TESTS_C+= t_uvm_physseg
SRCS.t_uvm_physseg+= t_uvm_physseg.c subr_extent.c
CPPFLAGS.t_uvm_physseg.c= -D_EXTENT_TESTING -D__POOL_EXPOSE -DDIAGNOSTIC
CPPFLAGS.subr_extent.c= -D_EXTENT_TESTING -D__POOL_EXPOSE -D_KERNTYPES -DDIAGNOSTIC
TESTS_C+= t_uvm_physseg_load
SRCS.t_uvm_physseg_load+= t_uvm_physseg_load.c subr_extent.c
CPPFLAGS.t_uvm_physseg_load.c= -D_EXTENT_TESTING -D__POOL_EXPOSE -DDIAGNOSTIC
.include <bsd.dep.mk>
.include <bsd.test.mk>

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,741 @@
/* $NetBSD: t_uvm_physseg_load.c,v 1.1 2016/12/19 12:21:29 cherry Exp $ */
/*-
* Copyright (c) 2015, 2016 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Santhosh N. Raju <santhosh.raju@gmail.com> and
* by Cherry G. Mathew
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__RCSID("$NetBSD: t_uvm_physseg_load.c,v 1.1 2016/12/19 12:21:29 cherry Exp $");
/* Testing API - assumes userland */
/* Provide Kernel API equivalents */
#include <assert.h>
#include <stdbool.h>
#include <string.h> /* memset(3) et. al */
#include <stdio.h> /* printf(3) */
#include <stdlib.h> /* malloc(3) */
#include <stdarg.h>
#include <stddef.h>
#include <time.h>
#define PRIxPADDR "lx"
#define PRIxPSIZE "lx"
#define PRIuPSIZE "lu"
#define PRIxVADDR "lx"
#define PRIxVSIZE "lx"
#define PRIuVSIZE "lu"
#define UVM_HOTPLUG /* Enable hotplug with rbtree. */
#define PMAP_STEAL_MEMORY
#define DEBUG /* Enable debug functionality. */
typedef unsigned long vaddr_t;
typedef unsigned long paddr_t;
typedef unsigned long psize_t;
typedef unsigned long vsize_t;
#include <uvm/uvm_page.h>
/*
* If this line is commented out tests related touvm_physseg_get_pmseg()
* wont run.
*
* Have a look at machine/uvm_physseg.h for more details.
*/
#define __HAVE_PMAP_PHYSSEG
#include <uvm/uvm_physseg.h>
/*
* This is a dummy struct used for testing purposes
*
* In reality this struct would exist in the MD part of the code residing in
* machines/vmparam.h
*/
#ifdef __HAVE_PMAP_PHYSSEG
struct pmap_physseg {
bool dummy_variable; /* Dummy variable use for testing */
};
#endif
#ifndef DIAGNOSTIC
#define KASSERTMSG(e, msg, ...) /* NOTHING */
#define KASSERT(e) /* NOTHING */
#else
#define KASSERT(a) assert(a)
#define KASSERTMSG(exp, ...) printf(__VA_ARGS__); assert((exp))
#endif
#define VM_PHYSSEG_STRAT VM_PSTRAT_BSEARCH
#define VM_NFREELIST 4
#define VM_FREELIST_DEFAULT 0
#define VM_FREELIST_FIRST16 3
#define VM_FREELIST_FIRST1G 2
#define VM_FREELIST_FIRST4G 1
/*
* Used in tests when Array implementation is tested
*/
#if !defined(VM_PHYSSEG_MAX)
#define VM_PHYSSEG_MAX 32
#endif
#define PAGE_SIZE 4096
#define PAGE_SHIFT 12
#define atop(x) (((paddr_t)(x)) >> PAGE_SHIFT)
#define mutex_enter(l)
#define mutex_exit(l)
#define _SYS_KMEM_H_ /* Disallow the real kmem API (see below) */
/* free(p) XXX: pgs management need more thought */
#define kmem_alloc(size, flags) malloc(size)
#define kmem_zalloc(size, flags) malloc(size)
#define kmem_free(p, size) free(p)
psize_t physmem;
struct uvmexp uvmexp; /* decl */
/*
* uvm structure borrowed from uvm.h
*
* Remember this is a dummy structure used within the ATF Tests and
* uses only necessary fields from the original uvm struct.
* See uvm/uvm.h for the full struct.
*/
struct uvm {
/* vm_page related parameters */
bool page_init_done; /* TRUE if uvm_page_init() finished */
} uvm;
static void
panic(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
vprintf(fmt, ap);
printf("\n");
va_end(ap);
KASSERT(false);
/*NOTREACHED*/
}
static void
uvm_pagefree(struct vm_page *pg)
{
return;
}
#if defined(UVM_HOTPLUG)
static void
uvmpdpol_reinit(void)
{
return;
}
#endif /* UVM_HOTPLUG */
/* end - Provide Kernel API equivalents */
#include "uvm/uvm_physseg.c"
#include <atf-c.h>
#define ONE_MEGABYTE 1024 * 1024
/* Sample Page Frame Numbers */
#define VALID_START_PFN_1 atop(0)
#define VALID_END_PFN_1 atop(ONE_MEGABYTE)
#define VALID_AVAIL_START_PFN_1 atop(0)
#define VALID_AVAIL_END_PFN_1 atop(ONE_MEGABYTE)
#define VALID_START_PFN_2 atop(ONE_MEGABYTE + 1)
#define VALID_END_PFN_2 atop(ONE_MEGABYTE * 2)
#define VALID_AVAIL_START_PFN_2 atop(ONE_MEGABYTE + 1)
#define VALID_AVAIL_END_PFN_2 atop(ONE_MEGABYTE * 2)
#define VALID_START_PFN_3 atop((ONE_MEGABYTE * 2) + 1)
#define VALID_END_PFN_3 atop(ONE_MEGABYTE * 3)
#define VALID_AVAIL_START_PFN_3 atop((ONE_MEGABYTE * 2) + 1)
#define VALID_AVAIL_END_PFN_3 atop(ONE_MEGABYTE * 3)
#define VALID_START_PFN_4 atop(ONE_MEGABYTE + 1)
#define VALID_END_PFN_4 atop(ONE_MEGABYTE * 128)
#define VALID_AVAIL_START_PFN_4 atop(ONE_MEGABYTE + 1)
#define VALID_AVAIL_END_PFN_4 atop(ONE_MEGABYTE * 128)
#define VALID_START_PFN_5 atop(ONE_MEGABYTE + 1)
#define VALID_END_PFN_5 atop(ONE_MEGABYTE * 256)
#define VALID_AVAIL_START_PFN_5 atop(ONE_MEGABYTE + 1)
#define VALID_AVAIL_END_PFN_5 atop(ONE_MEGABYTE * 256)
/*
* Total number of pages (of 4K size each) should be 256 for 1MB of memory.
*/
#define PAGE_COUNT_1M 256
/*
* The number of Page Frames to allot per segment
*/
#define PF_STEP 8
/*
* A debug fucntion to print the content of upm.
*/
static inline void
uvm_physseg_dump_seg(uvm_physseg_t upm)
{
#if defined(DEBUG)
printf("%s: seg->start == %ld\n", __func__,
uvm_physseg_get_start(upm));
printf("%s: seg->end == %ld\n", __func__,
uvm_physseg_get_end(upm));
printf("%s: seg->avail_start == %ld\n", __func__,
uvm_physseg_get_avail_start(upm));
printf("%s: seg->avail_end == %ld\n", __func__,
uvm_physseg_get_avail_end(upm));
printf("====\n\n");
#else
return;
#endif /* DEBUG */
}
/*
* Private accessor that gets the value of vm_physmem.nentries
*/
static int
uvm_physseg_get_entries(void)
{
#if defined(UVM_HOTPLUG)
return uvm_physseg_graph.nentries;
#else
return vm_nphysmem;
#endif /* UVM_HOTPLUG */
}
/*
* Note: This function replicates verbatim what happens in
* uvm_page.c:uvm_page_init().
*
* Please track any changes that happen there.
*/
static void
uvm_page_init_fake(struct vm_page *pagearray, psize_t pagecount)
{
uvm_physseg_t bank;
size_t n;
for (bank = uvm_physseg_get_first(),
uvm_physseg_seg_chomp_slab(bank, pagearray, pagecount);
uvm_physseg_valid(bank);
bank = uvm_physseg_get_next(bank)) {
n = uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank);
uvm_physseg_seg_alloc_from_slab(bank, n);
uvm_physseg_init_seg(bank, pagearray);
/* set up page array pointers */
pagearray += n;
pagecount -= n;
}
uvm.page_init_done = true;
}
/*
* PHYS_TO_VM_PAGE: find vm_page for a PA. used by MI code to get vm_pages
* back from an I/O mapping (ugh!). used in some MD code as well.
*/
static struct vm_page *
uvm_phys_to_vm_page(paddr_t pa)
{
paddr_t pf = atop(pa);
paddr_t off;
uvm_physseg_t psi;
psi = uvm_physseg_find(pf, &off);
if (psi != UVM_PHYSSEG_TYPE_INVALID)
return uvm_physseg_get_pg(psi, off);
return(NULL);
}
//static paddr_t
//uvm_vm_page_to_phys(const struct vm_page *pg)
//{
//
// return pg->phys_addr;
//}
/*
* XXX: To do, write control test cases for uvm_vm_page_to_phys().
*/
/* #define VM_PAGE_TO_PHYS(entry) uvm_vm_page_to_phys(entry) */
#define PHYS_TO_VM_PAGE(pa) uvm_phys_to_vm_page(pa)
/*
* Test Fixture SetUp().
*/
static void
setup(void)
{
/* Prerequisites for running certain calls in uvm_physseg */
uvmexp.pagesize = PAGE_SIZE;
uvmexp.npages = 0;
uvm.page_init_done = false;
uvm_physseg_init();
}
ATF_TC(uvm_physseg_100);
ATF_TC_HEAD(uvm_physseg_100, tc)
{
atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
100 calls, VM_PHYSSEG_MAX is 32.");
}
ATF_TC_BODY(uvm_physseg_100, tc)
{
paddr_t pa;
setup();
for(paddr_t i = VALID_START_PFN_1;
i < VALID_END_PFN_1; i += PF_STEP) {
uvm_page_physload(i, i + PF_STEP, i, i + PF_STEP,
VM_FREELIST_DEFAULT);
}
ATF_REQUIRE_EQ(VM_PHYSSEG_MAX, uvm_physseg_get_entries());
srandom((unsigned)time(NULL));
for(int i = 0; i < 100; i++) {
pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_1);
PHYS_TO_VM_PAGE(pa);
}
ATF_CHECK_EQ(true, true);
}
ATF_TC(uvm_physseg_1K);
ATF_TC_HEAD(uvm_physseg_1K, tc)
{
atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
1000 calls, VM_PHYSSEG_MAX is 32.");
}
ATF_TC_BODY(uvm_physseg_1K, tc)
{
paddr_t pa;
setup();
for(paddr_t i = VALID_START_PFN_1;
i < VALID_END_PFN_1; i += PF_STEP) {
uvm_page_physload(i, i + PF_STEP, i, i + PF_STEP,
VM_FREELIST_DEFAULT);
}
ATF_REQUIRE_EQ(VM_PHYSSEG_MAX, uvm_physseg_get_entries());
srandom((unsigned)time(NULL));
for(int i = 0; i < 1000; i++) {
pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_1);
PHYS_TO_VM_PAGE(pa);
}
ATF_CHECK_EQ(true, true);
}
ATF_TC(uvm_physseg_10K);
ATF_TC_HEAD(uvm_physseg_10K, tc)
{
atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
10,000 calls, VM_PHYSSEG_MAX is 32.");
}
ATF_TC_BODY(uvm_physseg_10K, tc)
{
paddr_t pa;
setup();
for(paddr_t i = VALID_START_PFN_1;
i < VALID_END_PFN_1; i += PF_STEP) {
uvm_page_physload(i, i + PF_STEP, i, i + PF_STEP,
VM_FREELIST_DEFAULT);
}
ATF_REQUIRE_EQ(VM_PHYSSEG_MAX, uvm_physseg_get_entries());
srandom((unsigned)time(NULL));
for(int i = 0; i < 10000; i++) {
pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_1);
PHYS_TO_VM_PAGE(pa);
}
ATF_CHECK_EQ(true, true);
}
ATF_TC(uvm_physseg_100K);
ATF_TC_HEAD(uvm_physseg_100K, tc)
{
atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
100,000 calls, VM_PHYSSEG_MAX is 32.");
}
ATF_TC_BODY(uvm_physseg_100K, tc)
{
paddr_t pa;
setup();
for(paddr_t i = VALID_START_PFN_1;
i < VALID_END_PFN_1; i += PF_STEP) {
uvm_page_physload(i, i + PF_STEP, i, i + PF_STEP,
VM_FREELIST_DEFAULT);
}
ATF_REQUIRE_EQ(VM_PHYSSEG_MAX, uvm_physseg_get_entries());
srandom((unsigned)time(NULL));
for(int i = 0; i < 100000; i++) {
pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_1);
PHYS_TO_VM_PAGE(pa);
}
ATF_CHECK_EQ(true, true);
}
ATF_TC(uvm_physseg_1M);
ATF_TC_HEAD(uvm_physseg_1M, tc)
{
atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
1,000,000 calls, VM_PHYSSEG_MAX is 32.");
}
ATF_TC_BODY(uvm_physseg_1M, tc)
{
paddr_t pa;
setup();
for(paddr_t i = VALID_START_PFN_1;
i < VALID_END_PFN_1; i += PF_STEP) {
uvm_page_physload(i, i + PF_STEP, i, i + PF_STEP,
VM_FREELIST_DEFAULT);
}
ATF_REQUIRE_EQ(VM_PHYSSEG_MAX, uvm_physseg_get_entries());
srandom((unsigned)time(NULL));
for(int i = 0; i < 1000000; i++) {
pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_1);
PHYS_TO_VM_PAGE(pa);
}
ATF_CHECK_EQ(true, true);
}
ATF_TC(uvm_physseg_10M);
ATF_TC_HEAD(uvm_physseg_10M, tc)
{
atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
10,000,000 calls, VM_PHYSSEG_MAX is 32.");
}
ATF_TC_BODY(uvm_physseg_10M, tc)
{
paddr_t pa;
setup();
for(paddr_t i = VALID_START_PFN_1;
i < VALID_END_PFN_1; i += PF_STEP) {
uvm_page_physload(i, i + PF_STEP, i, i + PF_STEP,
VM_FREELIST_DEFAULT);
}
ATF_REQUIRE_EQ(VM_PHYSSEG_MAX, uvm_physseg_get_entries());
srandom((unsigned)time(NULL));
for(int i = 0; i < 10000000; i++) {
pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_1);
PHYS_TO_VM_PAGE(pa);
}
ATF_CHECK_EQ(true, true);
}
ATF_TC(uvm_physseg_100M);
ATF_TC_HEAD(uvm_physseg_100M, tc)
{
atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
100,000,000 calls, VM_PHYSSEG_MAX is 32.");
}
ATF_TC_BODY(uvm_physseg_100M, tc)
{
paddr_t pa;
setup();
for(paddr_t i = VALID_START_PFN_1;
i < VALID_END_PFN_1; i += PF_STEP) {
uvm_page_physload(i, i + PF_STEP, i, i + PF_STEP,
VM_FREELIST_DEFAULT);
}
ATF_REQUIRE_EQ(VM_PHYSSEG_MAX, uvm_physseg_get_entries());
srandom((unsigned)time(NULL));
for(int i = 0; i < 100000000; i++) {
pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_1);
PHYS_TO_VM_PAGE(pa);
}
ATF_CHECK_EQ(true, true);
}
ATF_TC(uvm_physseg_1MB);
ATF_TC_HEAD(uvm_physseg_1MB, tc)
{
atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
10,000,000 calls, VM_PHYSSEG_MAX is 32 on 1 MB Segment.");
}
ATF_TC_BODY(uvm_physseg_1MB, t)
{
paddr_t pa = 0;
paddr_t pf = 0;
psize_t pf_chunk_size = 0;
psize_t npages1 = (VALID_END_PFN_1 - VALID_START_PFN_1);
psize_t npages2 = (VALID_END_PFN_2 - VALID_START_PFN_2);
struct vm_page *slab = malloc(sizeof(struct vm_page) *
(npages1 + npages2));
setup();
/* We start with zero segments */
ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_1, npages1, NULL));
ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
/* Post boot: Fake all segments and pages accounted for. */
uvm_page_init_fake(slab, npages1 + npages2);
ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_2, npages2, NULL));
ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
srandom((unsigned)time(NULL));
for(pf = VALID_START_PFN_2; pf < VALID_END_PFN_2; pf += PF_STEP) {
pf_chunk_size = (psize_t) random() % (psize_t) (PF_STEP - 1) + 1;
uvm_physseg_unplug(pf, pf_chunk_size);
}
for(int i = 0; i < 10000000; i++) {
pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_2);
if(pa < ctob(VALID_START_PFN_2))
pa += ctob(VALID_START_PFN_2);
PHYS_TO_VM_PAGE(pa);
}
ATF_CHECK_EQ(true, true);
}
ATF_TC(uvm_physseg_64MB);
ATF_TC_HEAD(uvm_physseg_64MB, tc)
{
atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
10,000,000 calls, VM_PHYSSEG_MAX is 32 on 64 MB Segment.");
}
ATF_TC_BODY(uvm_physseg_64MB, t)
{
paddr_t pa = 0;
paddr_t pf = 0;
psize_t pf_chunk_size = 0;
psize_t npages1 = (VALID_END_PFN_1 - VALID_START_PFN_1);
psize_t npages2 = (VALID_END_PFN_3 - VALID_START_PFN_3);
struct vm_page *slab = malloc(sizeof(struct vm_page) *
(npages1 + npages2));
setup();
/* We start with zero segments */
ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_1, npages1, NULL));
ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
/* Post boot: Fake all segments and pages accounted for. */
uvm_page_init_fake(slab, npages1 + npages2);
ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_3, npages2, NULL));
ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
srandom((unsigned)time(NULL));
for(pf = VALID_START_PFN_3; pf < VALID_END_PFN_3; pf += PF_STEP) {
pf_chunk_size = (psize_t) random() % (psize_t) (PF_STEP - 1) + 1;
uvm_physseg_unplug(pf, pf_chunk_size);
}
for(int i = 0; i < 10000000; i++) {
pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_3);
if(pa < ctob(VALID_START_PFN_3))
pa += ctob(VALID_START_PFN_3);
PHYS_TO_VM_PAGE(pa);
}
ATF_CHECK_EQ(true, true);
}
ATF_TC(uvm_physseg_128MB);
ATF_TC_HEAD(uvm_physseg_128MB, tc)
{
atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
10,000,000 calls, VM_PHYSSEG_MAX is 32 on 128 MB Segment.");
}
ATF_TC_BODY(uvm_physseg_128MB, t)
{
paddr_t pa = 0;
paddr_t pf = 0;
psize_t pf_chunk_size = 0;
psize_t npages1 = (VALID_END_PFN_1 - VALID_START_PFN_1);
psize_t npages2 = (VALID_END_PFN_4 - VALID_START_PFN_4);
struct vm_page *slab = malloc(sizeof(struct vm_page)
* (npages1 + npages2));
setup();
/* We start with zero segments */
ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_1, npages1, NULL));
ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
/* Post boot: Fake all segments and pages accounted for. */
uvm_page_init_fake(slab, npages1 + npages2);
ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_2, npages2, NULL));
ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
srandom((unsigned)time(NULL));
for(pf = VALID_START_PFN_4; pf < VALID_END_PFN_4; pf += PF_STEP) {
pf_chunk_size = (psize_t) random() % (psize_t) (PF_STEP - 1) + 1;
uvm_physseg_unplug(pf, pf_chunk_size);
}
for(int i = 0; i < 10000000; i++) {
pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_4);
if(pa < ctob(VALID_START_PFN_4))
pa += ctob(VALID_START_PFN_4);
PHYS_TO_VM_PAGE(pa);
}
ATF_CHECK_EQ(true, true);
}
ATF_TC(uvm_physseg_256MB);
ATF_TC_HEAD(uvm_physseg_256MB, tc)
{
atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
10,000,000 calls, VM_PHYSSEG_MAX is 32 on 256 MB Segment.");
}
ATF_TC_BODY(uvm_physseg_256MB, t)
{
paddr_t pa = 0;
paddr_t pf = 0;
psize_t pf_chunk_size = 0;
psize_t npages1 = (VALID_END_PFN_1 - VALID_START_PFN_1);
psize_t npages2 = (VALID_END_PFN_5 - VALID_START_PFN_5);
struct vm_page *slab = malloc(sizeof(struct vm_page) * (npages1 + npages2));
setup();
/* We start with zero segments */
ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_1, npages1, NULL));
ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
/* Post boot: Fake all segments and pages accounted for. */
uvm_page_init_fake(slab, npages1 + npages2);
ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_2, npages2, NULL));
ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
srandom((unsigned)time(NULL));
for(pf = VALID_START_PFN_5; pf < VALID_END_PFN_5; pf += PF_STEP) {
pf_chunk_size = (psize_t) random() % (psize_t) (PF_STEP - 1) + 1;
uvm_physseg_unplug(pf, pf_chunk_size);
}
for(int i = 0; i < 10000000; i++) {
pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_5);
if(pa < ctob(VALID_END_PFN_5))
pa += ctob(VALID_START_PFN_5);
PHYS_TO_VM_PAGE(pa);
}
ATF_CHECK_EQ(true, true);
}
ATF_TP_ADD_TCS(tp)
{
/* Fixed memory size tests. */
ATF_TP_ADD_TC(tp, uvm_physseg_100);
ATF_TP_ADD_TC(tp, uvm_physseg_1K);
ATF_TP_ADD_TC(tp, uvm_physseg_10K);
ATF_TP_ADD_TC(tp, uvm_physseg_100K);
ATF_TP_ADD_TC(tp, uvm_physseg_1M);
ATF_TP_ADD_TC(tp, uvm_physseg_10M);
ATF_TP_ADD_TC(tp, uvm_physseg_100M);
#if defined(UVM_HOTPLUG)
/* Variable memory size tests. */
ATF_TP_ADD_TC(tp, uvm_physseg_1MB);
ATF_TP_ADD_TC(tp, uvm_physseg_64MB);
ATF_TP_ADD_TC(tp, uvm_physseg_128MB);
ATF_TP_ADD_TC(tp, uvm_physseg_256MB);
#endif /* UVM_HOTPLUG */
return atf_no_error();
}