2021-10-27 06:06:59 +03:00
|
|
|
# $NetBSD: Makefile.libkern,v 1.53 2021/10/27 03:06:59 ryo Exp $
|
2009-01-04 21:00:55 +03:00
|
|
|
|
2019-12-14 20:24:43 +03:00
|
|
|
#
|
|
|
|
# Variable definitions for libkern.
|
2009-01-04 21:00:55 +03:00
|
|
|
#
|
|
|
|
# Before including this, you _must_ set
|
|
|
|
# KERNDIR: location of sys/lib/libkern
|
|
|
|
#
|
|
|
|
# You *may* set:
|
|
|
|
# LIBKERN_ARCH: architecture subdir to be used
|
|
|
|
# KERNCPPFLAGS: see Makefile.inc
|
2010-05-12 01:50:35 +04:00
|
|
|
# KERNMISCCPPFLAGS: see Makefile.inc
|
2009-01-04 21:00:55 +03:00
|
|
|
#
|
|
|
|
|
|
|
|
.include <bsd.own.mk>
|
|
|
|
|
|
|
|
.if defined(LIBKERN_ARCH) && !empty(LIBKERN_ARCH) && \
|
|
|
|
exists(${KERNDIR}/arch/${LIBKERN_ARCH})
|
|
|
|
ARCHSUBDIR= ${LIBKERN_ARCH}
|
|
|
|
.elif defined(MACHINE_ARCH) && !empty(MACHINE_ARCH) && \
|
|
|
|
exists(${KERNDIR}/arch/${MACHINE_ARCH})
|
|
|
|
ARCHSUBDIR= ${MACHINE_ARCH}
|
|
|
|
.elif defined(MACHINE_CPU) && !empty(MACHINE_CPU) && \
|
|
|
|
exists(${KERNDIR}/arch/${MACHINE_CPU})
|
|
|
|
ARCHSUBDIR= ${MACHINE_CPU}
|
|
|
|
.endif
|
|
|
|
|
|
|
|
M= ${KERNDIR}/arch/${ARCHSUBDIR}
|
|
|
|
|
|
|
|
CPPFLAGS+= -I$M ${KERNCPPFLAGS} ${KERNMISCCPPFLAGS}
|
|
|
|
|
|
|
|
.include "${.PARSEDIR}/../../../common/lib/libc/Makefile.inc"
|
|
|
|
.include "${.PARSEDIR}/../../../common/lib/libutil/Makefile.inc"
|
|
|
|
.include "${.PARSEDIR}/../../../common/lib/libprop/Makefile.inc"
|
2011-08-27 01:22:07 +04:00
|
|
|
.include "${.PARSEDIR}/../../../common/lib/libppath/Makefile.inc"
|
2009-01-04 21:00:55 +03:00
|
|
|
|
|
|
|
CPPFLAGS+= -I${KERNDIR}/../../../common/include
|
Rewrite entropy subsystem.
Primary goals:
1. Use cryptography primitives designed and vetted by cryptographers.
2. Be honest about entropy estimation.
3. Propagate full entropy as soon as possible.
4. Simplify the APIs.
5. Reduce overhead of rnd_add_data and cprng_strong.
6. Reduce side channels of HWRNG data and human input sources.
7. Improve visibility of operation with sysctl and event counters.
Caveat: rngtest is no longer used generically for RND_TYPE_RNG
rndsources. Hardware RNG devices should have hardware-specific
health tests. For example, checking for two repeated 256-bit outputs
works to detect AMD's 2019 RDRAND bug. Not all hardware RNGs are
necessarily designed to produce exactly uniform output.
ENTROPY POOL
- A Keccak sponge, with test vectors, replaces the old LFSR/SHA-1
kludge as the cryptographic primitive.
- `Entropy depletion' is available for testing purposes with a sysctl
knob kern.entropy.depletion; otherwise it is disabled, and once the
system reaches full entropy it is assumed to stay there as far as
modern cryptography is concerned.
- No `entropy estimation' based on sample values. Such `entropy
estimation' is a contradiction in terms, dishonest to users, and a
potential source of side channels. It is the responsibility of the
driver author to study the entropy of the process that generates
the samples.
- Per-CPU gathering pools avoid contention on a global queue.
- Entropy is occasionally consolidated into global pool -- as soon as
it's ready, if we've never reached full entropy, and with a rate
limit afterward. Operators can force consolidation now by running
sysctl -w kern.entropy.consolidate=1.
- rndsink(9) API has been replaced by an epoch counter which changes
whenever entropy is consolidated into the global pool.
. Usage: Cache entropy_epoch() when you seed. If entropy_epoch()
has changed when you're about to use whatever you seeded, reseed.
. Epoch is never zero, so initialize cache to 0 if you want to reseed
on first use.
. Epoch is -1 iff we have never reached full entropy -- in other
words, the old rnd_initial_entropy is (entropy_epoch() != -1) --
but it is better if you check for changes rather than for -1, so
that if the system estimated its own entropy incorrectly, entropy
consolidation has the opportunity to prevent future compromise.
- Sysctls and event counters provide operator visibility into what's
happening:
. kern.entropy.needed - bits of entropy short of full entropy
. kern.entropy.pending - bits known to be pending in per-CPU pools,
can be consolidated with sysctl -w kern.entropy.consolidate=1
. kern.entropy.epoch - number of times consolidation has happened,
never 0, and -1 iff we have never reached full entropy
CPRNG_STRONG
- A cprng_strong instance is now a collection of per-CPU NIST
Hash_DRBGs. There are only two in the system: user_cprng for
/dev/urandom and sysctl kern.?random, and kern_cprng for kernel
users which may need to operate in interrupt context up to IPL_VM.
(Calling cprng_strong in interrupt context does not strike me as a
particularly good idea, so I added an event counter to see whether
anything actually does.)
- Event counters provide operator visibility into when reseeding
happens.
INTEL RDRAND/RDSEED, VIA C3 RNG (CPU_RNG)
- Unwired for now; will be rewired in a subsequent commit.
2020-04-30 06:28:18 +03:00
|
|
|
CPPFLAGS+= -I${KERNDIR}/../../../common/libc/hash/sha3
|
2009-01-04 21:00:55 +03:00
|
|
|
|
|
|
|
.PATH.c: ${KERNDIR}
|
|
|
|
.if exists ($M/Makefile.inc)
|
|
|
|
.PATH.c: $M
|
|
|
|
.PATH.S: $M
|
|
|
|
.include "$M/Makefile.inc"
|
|
|
|
.endif
|
|
|
|
|
2014-03-12 04:22:53 +04:00
|
|
|
.if !defined(RUMPKERNEL)
|
2014-07-04 20:44:26 +04:00
|
|
|
.include "${.PARSEDIR}/Makefile.compiler-rt"
|
2014-03-12 04:22:53 +04:00
|
|
|
.endif
|
2009-01-04 21:00:55 +03:00
|
|
|
|
|
|
|
# Other stuff
|
2010-01-20 01:28:30 +03:00
|
|
|
SRCS+= kern_assert.c __main.c
|
2012-01-20 04:25:29 +04:00
|
|
|
SRCS+= cpuset.c inet_addr.c intoa.c
|
2009-12-14 15:18:14 +03:00
|
|
|
.if empty(SRCS:Mbyte_swap_8.*)
|
2009-08-14 23:23:53 +04:00
|
|
|
SRCS+= bswap64.c
|
2009-12-14 15:18:14 +03:00
|
|
|
.endif
|
2017-11-30 08:47:24 +03:00
|
|
|
SRCS+= md4c.c md5c.c rmd160.c sha1.c sha2.c sha3.c keccak.c murmurhash.c
|
2021-10-27 06:06:59 +03:00
|
|
|
SRCS+= pmatch.c mcount.c crc32.c
|
2021-01-25 15:45:49 +03:00
|
|
|
SRCS+= strlist.c
|
2009-01-04 21:00:55 +03:00
|
|
|
|
2011-08-27 01:22:07 +04:00
|
|
|
SRCS+= ppath_kmem_alloc.c
|
|
|
|
|
Make copystr() a MI C function, part of libkern and shared on all
architectures.
Notes:
- On alpha and ia64 the function is kept but gets renamed locally to avoid
symbol collision. This is because on these two arches, I am not sure
whether the ASM callers do not rely on fixed registers, so I prefer to
keep the ASM body for now.
- On Vax, only the symbol is removed, because the body is used from other
functions.
- On RISC-V, this change fixes a bug: copystr() was just a wrapper around
strlcpy(), but strlcpy() makes the operation less safe (strlen on the
source beyond its size).
- The kASan, kCSan and kMSan wrappers are removed, because now that
copystr() is in C, the compiler transformations are applied to it,
without the need for manual wrappers.
Could test on amd64 only, but should be fine.
2020-06-30 19:20:00 +03:00
|
|
|
SRCS+= copystr.c
|
2009-08-13 01:18:42 +04:00
|
|
|
SRCS+= strsep.c strstr.c
|
|
|
|
SRCS+= strlcpy.c strlcat.c
|
2009-08-14 23:23:53 +04:00
|
|
|
|
2018-09-03 19:54:54 +03:00
|
|
|
SRCS+= imax.c imin.c lmax.c lmin.c uimax.c uimin.c ulmax.c ulmin.c
|
2013-03-10 11:31:03 +04:00
|
|
|
SRCS+= memmove.c
|
2009-08-13 01:18:42 +04:00
|
|
|
SRCS+= strchr.c strrchr.c
|
2018-07-08 20:54:42 +03:00
|
|
|
SRCS+= memcmp.c memmem.c
|
2013-03-10 11:31:03 +04:00
|
|
|
|
|
|
|
SRCS+= memcpy.c
|
2009-12-14 15:18:14 +03:00
|
|
|
.if empty(SRCS:Mmemset2.*)
|
2019-12-14 20:24:43 +03:00
|
|
|
SRCS+= memset.c
|
2009-12-14 15:18:14 +03:00
|
|
|
.endif
|
2013-03-17 04:47:13 +04:00
|
|
|
|
2009-08-13 01:18:42 +04:00
|
|
|
SRCS+= popcount32.c popcount64.c
|
2013-12-02 08:39:10 +04:00
|
|
|
SRCS+= strtoul.c strtoll.c strtoull.c strtoimax.c strtoumax.c
|
2015-01-16 21:36:31 +03:00
|
|
|
SRCS+= strtoi.c strtou.c
|
2016-05-02 22:18:29 +03:00
|
|
|
SRCS+= strnvisx.c
|
2009-08-14 23:23:53 +04:00
|
|
|
|
|
|
|
SRCS+= scanc.c skpc.c
|
|
|
|
SRCS+= random.c
|
First step of random number subsystem rework described in
<20111022023242.BA26F14A158@mail.netbsd.org>. This change includes
the following:
An initial cleanup and minor reorganization of the entropy pool
code in sys/dev/rnd.c and sys/dev/rndpool.c. Several bugs are
fixed. Some effort is made to accumulate entropy more quickly at
boot time.
A generic interface, "rndsink", is added, for stream generators to
request that they be re-keyed with good quality entropy from the pool
as soon as it is available.
The arc4random()/arc4randbytes() implementation in libkern is
adjusted to use the rndsink interface for rekeying, which helps
address the problem of low-quality keys at boot time.
An implementation of the FIPS 140-2 statistical tests for random
number generator quality is provided (libkern/rngtest.c). This
is based on Greg Rose's implementation from Qualcomm.
A new random stream generator, nist_ctr_drbg, is provided. It is
based on an implementation of the NIST SP800-90 CTR_DRBG by
Henric Jungheim. This generator users AES in a modified counter
mode to generate a backtracking-resistant random stream.
An abstraction layer, "cprng", is provided for in-kernel consumers
of randomness. The arc4random/arc4randbytes API is deprecated for
in-kernel use. It is replaced by "cprng_strong". The current
cprng_fast implementation wraps the existing arc4random
implementation. The current cprng_strong implementation wraps the
new CTR_DRBG implementation. Both interfaces are rekeyed from
the entropy pool automatically at intervals justifiable from best
current cryptographic practice.
In some quick tests, cprng_fast() is about the same speed as
the old arc4randbytes(), and cprng_strong() is about 20% faster
than rnd_extract_data(). Performance is expected to improve.
The AES code in src/crypto/rijndael is no longer an optional
kernel component, as it is required by cprng_strong, which is
not an optional kernel component.
The entropy pool output is subjected to the rngtest tests at
startup time; if it fails, the system will reboot. There is
approximately a 3/10000 chance of a false positive from these
tests. Entropy pool _input_ from hardware random numbers is
subjected to the rngtest tests at attach time, as well as the
FIPS continuous-output test, to detect bad or stuck hardware
RNGs; if any are detected, they are detached, but the system
continues to run.
A problem with rndctl(8) is fixed -- datastructures with
pointers in arrays are no longer passed to userspace (this
was not a security problem, but rather a major issue for
compat32). A new kernel will require a new rndctl.
The sysctl kern.arandom() and kern.urandom() nodes are hooked
up to the new generators, but the /dev/*random pseudodevices
are not, yet.
Manual pages for the new kernel interfaces are forthcoming.
2011-11-20 02:51:18 +04:00
|
|
|
SRCS+= rngtest.c
|
2009-01-04 21:00:55 +03:00
|
|
|
|
2009-08-14 23:23:53 +04:00
|
|
|
SRCS+= memchr.c
|
2014-07-19 22:38:33 +04:00
|
|
|
SRCS+= strcat.c strcmp.c strcpy.c strcspn.c strlen.c strnlen.c
|
|
|
|
SRCS+= strncat.c strncmp.c strncpy.c strpbrk.c strspn.c
|
2009-08-14 23:23:53 +04:00
|
|
|
SRCS+= strcasecmp.c strncasecmp.c
|
2009-01-04 21:00:55 +03:00
|
|
|
|
|
|
|
SRCS+= xlat_mbr_fstype.c
|
|
|
|
|
2019-12-14 18:30:37 +03:00
|
|
|
SRCS+= heapsort.c ptree.c radixtree.c rb.c rpst.c
|
2009-01-04 21:00:55 +03:00
|
|
|
|
2017-12-09 00:51:07 +03:00
|
|
|
SRCS+= hexdump.c
|
|
|
|
|
2012-08-30 16:16:48 +04:00
|
|
|
# for crypto
|
2013-06-24 08:21:19 +04:00
|
|
|
SRCS+= explicit_memset.c consttime_memequal.c
|
2012-08-30 16:16:48 +04:00
|
|
|
|
Rewrite entropy subsystem.
Primary goals:
1. Use cryptography primitives designed and vetted by cryptographers.
2. Be honest about entropy estimation.
3. Propagate full entropy as soon as possible.
4. Simplify the APIs.
5. Reduce overhead of rnd_add_data and cprng_strong.
6. Reduce side channels of HWRNG data and human input sources.
7. Improve visibility of operation with sysctl and event counters.
Caveat: rngtest is no longer used generically for RND_TYPE_RNG
rndsources. Hardware RNG devices should have hardware-specific
health tests. For example, checking for two repeated 256-bit outputs
works to detect AMD's 2019 RDRAND bug. Not all hardware RNGs are
necessarily designed to produce exactly uniform output.
ENTROPY POOL
- A Keccak sponge, with test vectors, replaces the old LFSR/SHA-1
kludge as the cryptographic primitive.
- `Entropy depletion' is available for testing purposes with a sysctl
knob kern.entropy.depletion; otherwise it is disabled, and once the
system reaches full entropy it is assumed to stay there as far as
modern cryptography is concerned.
- No `entropy estimation' based on sample values. Such `entropy
estimation' is a contradiction in terms, dishonest to users, and a
potential source of side channels. It is the responsibility of the
driver author to study the entropy of the process that generates
the samples.
- Per-CPU gathering pools avoid contention on a global queue.
- Entropy is occasionally consolidated into global pool -- as soon as
it's ready, if we've never reached full entropy, and with a rate
limit afterward. Operators can force consolidation now by running
sysctl -w kern.entropy.consolidate=1.
- rndsink(9) API has been replaced by an epoch counter which changes
whenever entropy is consolidated into the global pool.
. Usage: Cache entropy_epoch() when you seed. If entropy_epoch()
has changed when you're about to use whatever you seeded, reseed.
. Epoch is never zero, so initialize cache to 0 if you want to reseed
on first use.
. Epoch is -1 iff we have never reached full entropy -- in other
words, the old rnd_initial_entropy is (entropy_epoch() != -1) --
but it is better if you check for changes rather than for -1, so
that if the system estimated its own entropy incorrectly, entropy
consolidation has the opportunity to prevent future compromise.
- Sysctls and event counters provide operator visibility into what's
happening:
. kern.entropy.needed - bits of entropy short of full entropy
. kern.entropy.pending - bits known to be pending in per-CPU pools,
can be consolidated with sysctl -w kern.entropy.consolidate=1
. kern.entropy.epoch - number of times consolidation has happened,
never 0, and -1 iff we have never reached full entropy
CPRNG_STRONG
- A cprng_strong instance is now a collection of per-CPU NIST
Hash_DRBGs. There are only two in the system: user_cprng for
/dev/urandom and sysctl kern.?random, and kern_cprng for kernel
users which may need to operate in interrupt context up to IPL_VM.
(Calling cprng_strong in interrupt context does not strike me as a
particularly good idea, so I added an event counter to see whether
anything actually does.)
- Event counters provide operator visibility into when reseeding
happens.
INTEL RDRAND/RDSEED, VIA C3 RNG (CPU_RNG)
- Unwired for now; will be rewired in a subsequent commit.
2020-04-30 06:28:18 +03:00
|
|
|
SRCS+= entpool.c
|
|
|
|
|
move bi-endian disklabel support from the kernel and libsa into libkern.
- dkcksum() and dkcksum_sized() move from subr_disk.c and from
libsa into libkern/dkcksum.c (which is missing _sized() version),
using the version from usr.sbin/disklabel.
- swap_disklabel() moves from subr_disk_mbr.c into libkern, now called
disklabel_swap(). (the sh3 version should be updated to use this.)
- DISKLABEL_EI becomes a first-class option with opt_disklabel.h.
- add libkern.h to libsa/disklabel.c.
this enables future work for bi-endian libsa/ufs.c (relevant for ffsv1,
ffsv2, lfsv1, and lfsv2), as well as making it possible for ports not
using subr_disk_mbr.c to include bi-endian disklabel support (which,
afaict, includes any disk on mbr-supporting platforms that do not have
an mbr as well as disklabel.)
builds successsfully on: alpha, i386, amd64, sun2, sun3, evbarm64,
evbarm64-eb, sparc, and sparc64. tested in anita on i386 and sparc,
testing in hardware on evbarm64*.
2021-05-17 11:50:36 +03:00
|
|
|
SRCS+= dkcksum.c
|
|
|
|
SRCS+= disklabel_swap.c
|
|
|
|
|
2013-12-11 05:24:08 +04:00
|
|
|
.PATH: ${NETBSDSRCDIR}/common/lib/libc/cdb
|
|
|
|
SRCS+= cdbr.c
|
|
|
|
SRCS+= mi_vector_hash.c
|
|
|
|
|
2009-01-04 21:00:55 +03:00
|
|
|
# Files to clean up
|
|
|
|
CLEANFILES+= lib${LIB}.o lib${LIB}.po
|
|
|
|
|
2009-08-13 01:18:42 +04:00
|
|
|
# Remove from SRCS the .c files for any .S files added by the MD makefiles,
|
2009-08-14 23:23:53 +04:00
|
|
|
# also remove from SRCS the .c files for the .c files in NO_SRCS.
|
|
|
|
# (Unlike libc, we don't worry about lint)
|
2009-08-13 01:18:42 +04:00
|
|
|
|
|
|
|
.for check_file in ${SRCS:M*.S} ${NO_SRCS}
|
|
|
|
unwanted_file := ${SRCS:M${check_file:.S=.c}}
|
|
|
|
.if "${unwanted_file}" != ""
|
|
|
|
SRCS := ${SRCS:N${unwanted_file}}
|
2009-07-21 18:55:32 +04:00
|
|
|
.endif
|
2009-08-13 01:18:42 +04:00
|
|
|
.endfor
|