diff --git a/sys/kern/subr_csan.c b/sys/kern/subr_csan.c index 9ed3258df496..4f883ec102db 100644 --- a/sys/kern/subr_csan.c +++ b/sys/kern/subr_csan.c @@ -1,4 +1,4 @@ -/* $NetBSD: subr_csan.c,v 1.7 2020/04/02 16:31:37 maxv Exp $ */ +/* $NetBSD: subr_csan.c,v 1.8 2020/04/15 17:28:26 maxv Exp $ */ /* * Copyright (c) 2019 The NetBSD Foundation, Inc. @@ -30,7 +30,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: subr_csan.c,v 1.7 2020/04/02 16:31:37 maxv Exp $"); +__KERNEL_RCSID(0, "$NetBSD: subr_csan.c,v 1.8 2020/04/15 17:28:26 maxv Exp $"); #include #include @@ -606,14 +606,10 @@ CSAN_ATOMIC_FUNC_INC(uint, unsigned int, unsigned int); CSAN_ATOMIC_FUNC_INC(ulong, unsigned long, unsigned long); CSAN_ATOMIC_FUNC_INC(ptr, void *, void); -/* - * TODO: these two functions should qualify as atomic accesses. However - * for now we just whitelist them, to reduce the output. - */ - void kcsan_atomic_load(const volatile void *p, void *v, int size) { + kcsan_access((uintptr_t)p, size, false, true, __RET_ADDR); switch (size) { case 1: *(uint8_t *)v = *(const volatile uint8_t *)p; break; case 2: *(uint16_t *)v = *(const volatile uint16_t *)p; break; @@ -625,6 +621,7 @@ kcsan_atomic_load(const volatile void *p, void *v, int size) void kcsan_atomic_store(volatile void *p, const void *v, int size) { + kcsan_access((uintptr_t)p, size, true, true, __RET_ADDR); switch (size) { case 1: *(volatile uint8_t *)p = *(const uint8_t *)v; break; case 2: *(volatile uint16_t *)p = *(const uint16_t *)v; break;