From 55154c9edc740ae66f7df5bc90c3e72de8338808 Mon Sep 17 00:00:00 2001 From: ad Date: Sat, 26 Aug 2006 20:08:07 +0000 Subject: [PATCH] Add x86_sfence(), x86_mfence(). --- sys/arch/amd64/include/cpufunc.h | 26 ++++++++++++++++++++------ sys/arch/i386/include/cpufunc.h | 24 ++++++++++++++++++++---- 2 files changed, 40 insertions(+), 10 deletions(-) diff --git a/sys/arch/amd64/include/cpufunc.h b/sys/arch/amd64/include/cpufunc.h index a58d4ca6cd1b..0a9c04ccdd10 100644 --- a/sys/arch/amd64/include/cpufunc.h +++ b/sys/arch/amd64/include/cpufunc.h @@ -1,4 +1,4 @@ -/* $NetBSD: cpufunc.h,v 1.8 2006/08/19 16:04:41 dsl Exp $ */ +/* $NetBSD: cpufunc.h,v 1.9 2006/08/26 20:08:07 ad Exp $ */ /*- * Copyright (c) 1998 The NetBSD Foundation, Inc. @@ -55,18 +55,32 @@ x86_pause(void) /* nothing */ } +/* + * XXX if lfence isn't available... + * + * memory clobber to avoid compiler reordering. + */ static __inline void x86_lfence(void) { - /* - * XXX if lfence isn't available... - * - * memory clobber to avoid compiler reordering. - */ __asm volatile("lfence" : : : "memory"); } +static __inline void +x86_sfence(void) +{ + + __asm volatile("sfence" : : : "memory"); +} + +static __inline void +x86_mfence(void) +{ + + __asm volatile("mfence" : : : "memory"); +} + #ifdef _KERNEL extern int cpu_feature; diff --git a/sys/arch/i386/include/cpufunc.h b/sys/arch/i386/include/cpufunc.h index 0059c95874a9..e692627dbdb6 100644 --- a/sys/arch/i386/include/cpufunc.h +++ b/sys/arch/i386/include/cpufunc.h @@ -1,4 +1,4 @@ -/* $NetBSD: cpufunc.h,v 1.32 2006/08/19 15:21:23 dsl Exp $ */ +/* $NetBSD: cpufunc.h,v 1.33 2006/08/26 20:08:07 ad Exp $ */ /*- * Copyright (c) 1998 The NetBSD Foundation, Inc. @@ -55,13 +55,29 @@ x86_pause(void) __asm volatile("pause"); } +/* + * XXX it's better to use real lfence insn if available. + * + * memory clobber to avoid compiler reordering. + */ static __inline void x86_lfence(void) { - /* - * XXX it's better to use real lfence insn if available. - */ + __asm volatile("lock; addl $0, 0(%%esp)" : : : "memory"); +} + +static __inline void +x86_sfence(void) +{ + + __asm volatile("lock; addl $0, 0(%%esp)" : : : "memory"); +} + +static __inline void +x86_mfence(void) +{ + __asm volatile("lock; addl $0, 0(%%esp)" : : : "memory"); }