Implement a real hotpatch feature.

Define a HOTPATCH() macro, that puts a label and additional information
in the new .rodata.hotpatch kernel section. In patch.c, scan the section
and patch what needs to be. Now it is possible to hotpatch the content of
a macro.

SMAP is switched to use this new system; this saves a call+ret in each
kernel entry/exit point.

Many other operating systems do the same.
This commit is contained in:
maxv 2018-01-07 12:42:46 +00:00
parent b36637ee94
commit 99d8611c28
11 changed files with 150 additions and 86 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: amd64_trap.S,v 1.15 2018/01/06 08:44:01 maxv Exp $ */
/* $NetBSD: amd64_trap.S,v 1.16 2018/01/07 12:42:46 maxv Exp $ */
/*
* Copyright (c) 1998, 2007, 2008, 2017 The NetBSD Foundation, Inc.
@ -66,7 +66,7 @@
#if 0
#include <machine/asm.h>
__KERNEL_RCSID(0, "$NetBSD: amd64_trap.S,v 1.15 2018/01/06 08:44:01 maxv Exp $");
__KERNEL_RCSID(0, "$NetBSD: amd64_trap.S,v 1.16 2018/01/07 12:42:46 maxv Exp $");
#endif
/*
@ -122,7 +122,7 @@ IDTVEC(trap02)
subq $TF_REGSIZE,%rsp
INTR_SAVE_GPRS
cld
callq smap_enable
SMAP_ENABLE
movw %gs,TF_GS(%rsp)
movw %fs,TF_FS(%rsp)
movw %es,TF_ES(%rsp)

View File

@ -1,4 +1,4 @@
/* $NetBSD: copy.S,v 1.28 2017/11/01 09:17:28 maxv Exp $ */
/* $NetBSD: copy.S,v 1.29 2018/01/07 12:42:46 maxv Exp $ */
/*
* Copyright (c) 2001 Wasabi Systems, Inc.
@ -107,26 +107,6 @@ ENTRY(do_pmap_load)
ret
END(do_pmap_load)
/*
* SMAP functions. ret+int3+int3 is patched dynamically to STAC/CLAC.
*/
ENTRY(smap_enable)
.Lclacpatch:
ret
int3
int3
ret
END(smap_enable)
ENTRY(smap_disable)
.Lstacpatch:
ret
int3
int3
ret
END(smap_disable)
/*
* Copy routines from and to userland, plus a few more. See the
* section 9 manpages for info. Some cases can be optimized more.
@ -207,7 +187,7 @@ ENTRY(copyout)
cmpq %r8,%rdx
ja _C_LABEL(copy_efault) /* jump if end in kernel space */
callq smap_disable
SMAP_DISABLE
.Lcopyout_start:
movq %rax,%rcx /* length */
shrq $3,%rcx /* count of 8-byte words */
@ -218,7 +198,7 @@ ENTRY(copyout)
rep
movsb /* copy remaining bytes */
.Lcopyout_end:
callq smap_enable
SMAP_ENABLE
xorl %eax,%eax
ret
@ -237,7 +217,7 @@ ENTRY(copyin)
cmpq %r8,%rdx
ja _C_LABEL(copy_efault) /* j if end in kernel space */
callq smap_disable
SMAP_DISABLE
.Lcopyin_start:
3: /* bcopy(%rsi, %rdi, %rax); */
movq %rax,%rcx
@ -249,7 +229,7 @@ ENTRY(copyin)
rep
movsb
.Lcopyin_end:
callq smap_enable
SMAP_ENABLE
xorl %eax,%eax
ret
@ -266,7 +246,7 @@ NENTRY(kcopy_fault)
END(kcopy_fault)
NENTRY(copy_fault)
callq smap_enable
SMAP_ENABLE
ret
END(copy_fault)
@ -288,7 +268,7 @@ ENTRY(copyoutstr)
movq %rax,%r8
1: incq %rdx
callq smap_disable
SMAP_DISABLE
.Lcopyoutstr_start:
1: decq %rdx
jz 2f
@ -297,7 +277,7 @@ ENTRY(copyoutstr)
testb %al,%al
jnz 1b
.Lcopyoutstr_end:
callq smap_enable
SMAP_ENABLE
/* Success -- 0 byte reached. */
decq %rdx
@ -305,7 +285,7 @@ ENTRY(copyoutstr)
jmp copystr_return
2: /* rdx is zero -- return EFAULT or ENAMETOOLONG. */
callq smap_enable
SMAP_ENABLE
movq $VM_MAXUSER_ADDRESS,%r11
cmpq %r11,%rdi
jae _C_LABEL(copystr_efault)
@ -332,7 +312,7 @@ ENTRY(copyinstr)
movq %rax,%r8
1: incq %rdx
callq smap_disable
SMAP_DISABLE
.Lcopyinstr_start:
1: decq %rdx
jz 2f
@ -341,7 +321,7 @@ ENTRY(copyinstr)
testb %al,%al
jnz 1b
.Lcopyinstr_end:
callq smap_enable
SMAP_ENABLE
/* Success -- 0 byte reached. */
decq %rdx
@ -349,7 +329,7 @@ ENTRY(copyinstr)
jmp copystr_return
2: /* rdx is zero -- return EFAULT or ENAMETOOLONG. */
callq smap_enable
SMAP_ENABLE
movq $VM_MAXUSER_ADDRESS,%r11
cmpq %r11,%rsi
jae _C_LABEL(copystr_efault)
@ -364,7 +344,7 @@ ENTRY(copystr_efault)
END(copystr_efault)
ENTRY(copystr_fault)
callq smap_enable
SMAP_ENABLE
copystr_return:
/* Set *lencopied and return %eax. */
testq %r9,%r9
@ -414,9 +394,9 @@ ENTRY(fuswintr)
leaq _C_LABEL(fusuintrfailure)(%rip),%r11
movq %r11,PCB_ONFAULT(%rcx)
callq smap_disable
SMAP_DISABLE
movzwl (%rdi),%eax
callq smap_enable
SMAP_ENABLE
movq $0,PCB_ONFAULT(%rcx)
ret
@ -431,9 +411,9 @@ ENTRY(fubyte)
leaq _C_LABEL(fusufailure)(%rip),%r11
movq %r11,PCB_ONFAULT(%rcx)
callq smap_disable
SMAP_DISABLE
movzbl (%rdi),%eax
callq smap_enable
SMAP_ENABLE
movq $0,PCB_ONFAULT(%rcx)
ret
@ -450,9 +430,9 @@ ENTRY(suswintr)
leaq _C_LABEL(fusuintrfailure)(%rip),%r11
movq %r11,PCB_ONFAULT(%rcx)
callq smap_disable
SMAP_DISABLE
movw %si,(%rdi)
callq smap_enable
SMAP_ENABLE
xorq %rax,%rax
movq %rax,PCB_ONFAULT(%rcx)
@ -469,9 +449,9 @@ ENTRY(subyte)
leaq _C_LABEL(fusufailure)(%rip),%r11
movq %r11,PCB_ONFAULT(%rcx)
callq smap_disable
SMAP_DISABLE
movb %sil,(%rdi)
callq smap_enable
SMAP_ENABLE
xorq %rax,%rax
movq %rax,PCB_ONFAULT(%rcx)
@ -484,14 +464,14 @@ END(subyte)
* because trap.c checks for them.
*/
ENTRY(fusuintrfailure)
callq smap_enable
SMAP_ENABLE
movq $0,PCB_ONFAULT(%rcx)
movl $-1,%eax
ret
END(fusuintrfailure)
ENTRY(fusufailure)
callq smap_enable
SMAP_ENABLE
movq $0,PCB_ONFAULT(%rcx)
movl $-1,%eax
ret
@ -515,13 +495,13 @@ ENTRY(ucas_64)
ja _C_LABEL(ucas_efault)
movq %rsi,%rax
callq smap_disable
SMAP_DISABLE
.Lucas64_start:
/* Perform the CAS */
lock
cmpxchgq %rdx,(%rdi)
.Lucas64_end:
callq smap_enable
SMAP_ENABLE
/*
* Note: %rax is "old" value.
@ -544,13 +524,13 @@ ENTRY(ucas_32)
ja _C_LABEL(ucas_efault)
movl %esi,%eax
callq smap_disable
SMAP_DISABLE
.Lucas32_start:
/* Perform the CAS */
lock
cmpxchgl %edx,(%rdi)
.Lucas32_end:
callq smap_enable
SMAP_ENABLE
/*
* Note: %eax is "old" value.
@ -568,7 +548,7 @@ ENTRY(ucas_efault)
END(ucas_efault)
NENTRY(ucas_fault)
callq smap_enable
SMAP_ENABLE
ret
END(ucas_fault)
@ -589,18 +569,6 @@ x86_copyfunc_end: .globl x86_copyfunc_end
*/
.section ".rodata"
.globl _C_LABEL(onfault_table)
.type _C_LABEL(x86_clacpatch),@object
.type _C_LABEL(x86_stacpatch),@object
LABEL(x86_clacpatch)
.quad .Lclacpatch
.quad 0 /* terminate */
END(x86_clacpatch)
LABEL(x86_stacpatch)
.quad .Lstacpatch
.quad 0 /* terminate */
END(x86_stacpatch)
_C_LABEL(onfault_table):
.quad .Lcopyin_start

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern.ldscript,v 1.24 2017/08/18 10:28:53 maxv Exp $ */
/* $NetBSD: kern.ldscript,v 1.25 2018/01/07 12:42:46 maxv Exp $ */
#include "assym.h"
@ -30,6 +30,14 @@ SECTIONS
. = ALIGN(__LARGE_PAGE_SIZE);
__rodata_start = . ;
.rodata.hotpatch :
{
__rodata_hotpatch_start = . ;
*(.rodata.hotpatch)
__rodata_hotpatch_end = . ;
}
.rodata :
{
*(.rodata)

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern.ldscript.Xen,v 1.13 2016/08/02 14:03:34 maxv Exp $ */
/* $NetBSD: kern.ldscript.Xen,v 1.14 2018/01/07 12:42:46 maxv Exp $ */
#include "assym.h"
@ -19,6 +19,14 @@ SECTIONS
. = ALIGN(__PAGE_SIZE);
__rodata_start = . ;
.rodata.hotpatch :
{
__rodata_hotpatch_start = . ;
*(.rodata.hotpatch)
__rodata_hotpatch_end = . ;
}
.rodata :
{
*(.rodata)

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern.ldscript.kaslr,v 1.3 2017/11/14 10:15:40 maxv Exp $ */
/* $NetBSD: kern.ldscript.kaslr,v 1.4 2018/01/07 12:42:46 maxv Exp $ */
#include "assym.h"
@ -15,6 +15,14 @@ SECTIONS
PROVIDE (etext = .) ;
__rodata_start = . ;
.rodata.hotpatch :
{
__rodata_hotpatch_start = . ;
*(.rodata.hotpatch)
__rodata_hotpatch_end = . ;
}
.rodata :
{
*(.rodata)

View File

@ -1,4 +1,4 @@
/* $NetBSD: frameasm.h,v 1.23 2017/10/17 07:33:44 maxv Exp $ */
/* $NetBSD: frameasm.h,v 1.24 2018/01/07 12:42:46 maxv Exp $ */
#ifndef _AMD64_MACHINE_FRAMEASM_H
#define _AMD64_MACHINE_FRAMEASM_H
@ -35,6 +35,25 @@
#define STI(temp_reg) sti
#endif /* XEN */
#define HP_NAME_CLAC 1
#define HP_NAME_STAC 2
#define HOTPATCH(name, size) \
123: ; \
.section .rodata.hotpatch, "a" ; \
.byte name ; \
.byte size ; \
.quad 123b ; \
.previous
#define SMAP_ENABLE \
HOTPATCH(HP_NAME_CLAC, 3) ; \
.byte 0x0F, 0x1F, 0x00 ; \
#define SMAP_DISABLE \
HOTPATCH(HP_NAME_STAC, 3) ; \
.byte 0x0F, 0x1F, 0x00 ; \
#define SWAPGS NOT_XEN(swapgs)
/*
@ -78,7 +97,7 @@
subq $TF_REGSIZE,%rsp ; \
INTR_SAVE_GPRS ; \
cld ; \
callq smap_enable ; \
SMAP_ENABLE ; \
testb $SEL_UPL,TF_CS(%rsp) ; \
je kernel_trap ; \
usertrap ; \

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern.ldscript,v 1.20 2017/08/18 10:28:53 maxv Exp $ */
/* $NetBSD: kern.ldscript,v 1.21 2018/01/07 12:42:47 maxv Exp $ */
#include "assym.h"
@ -20,6 +20,14 @@ SECTIONS
. = ALIGN(__PAGE_SIZE);
__rodata_start = . ;
.rodata.hotpatch :
{
__rodata_hotpatch_start = . ;
*(.rodata.hotpatch)
__rodata_hotpatch_end = . ;
}
.rodata :
{
*(.rodata)

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern.ldscript.4MB,v 1.18 2017/08/18 10:28:53 maxv Exp $ */
/* $NetBSD: kern.ldscript.4MB,v 1.19 2018/01/07 12:42:47 maxv Exp $ */
#include "assym.h"
@ -29,6 +29,14 @@ SECTIONS
. = ALIGN(__LARGE_PAGE_SIZE);
__rodata_start = . ;
.rodata.hotpatch :
{
__rodata_hotpatch_start = . ;
*(.rodata.hotpatch)
__rodata_hotpatch_end = . ;
}
.rodata :
{
*(.rodata)

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern.ldscript.Xen,v 1.14 2017/06/25 20:22:32 bouyer Exp $ */
/* $NetBSD: kern.ldscript.Xen,v 1.15 2018/01/07 12:42:47 maxv Exp $ */
#include "assym.h"
@ -18,6 +18,14 @@ SECTIONS
. = ALIGN(__PAGE_SIZE);
__rodata_start = . ;
.rodata.hotpatch :
{
__rodata_hotpatch_start = . ;
*(.rodata.hotpatch)
__rodata_hotpatch_end = . ;
}
.rodata :
{
*(.rodata)

View File

@ -1,4 +1,4 @@
/* $NetBSD: frameasm.h,v 1.18 2017/09/17 09:59:23 maxv Exp $ */
/* $NetBSD: frameasm.h,v 1.19 2018/01/07 12:42:47 maxv Exp $ */
#ifndef _I386_FRAMEASM_H_
#define _I386_FRAMEASM_H_
@ -27,6 +27,14 @@
testb $0xff,EVTCHN_UPCALL_PENDING(reg)
#endif
#define HOTPATCH(name, size) \
123: ; \
.section .rodata.hotpatch, "a" ; \
.byte name ; \
.byte size ; \
.long 123b ; \
.previous
/*
* These are used on interrupt or trap entry or exit.
*/

View File

@ -1,4 +1,4 @@
/* $NetBSD: patch.c,v 1.25 2018/01/07 11:24:45 maxv Exp $ */
/* $NetBSD: patch.c,v 1.26 2018/01/07 12:42:46 maxv Exp $ */
/*-
* Copyright (c) 2007, 2008, 2009 The NetBSD Foundation, Inc.
@ -34,7 +34,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: patch.c,v 1.25 2018/01/07 11:24:45 maxv Exp $");
__KERNEL_RCSID(0, "$NetBSD: patch.c,v 1.26 2018/01/07 12:42:46 maxv Exp $");
#include "opt_lockdebug.h"
#ifdef i386
@ -47,10 +47,17 @@ __KERNEL_RCSID(0, "$NetBSD: patch.c,v 1.25 2018/01/07 11:24:45 maxv Exp $");
#include <machine/cpu.h>
#include <machine/cpufunc.h>
#include <machine/specialreg.h>
#include <machine/frameasm.h>
#include <x86/cpuvar.h>
#include <x86/cputypes.h>
struct hotpatch {
uint8_t name;
uint8_t size;
void *addr;
} __packed;
void spllower(int);
void spllower_end(void);
void cx8_spllower(int);
@ -77,8 +84,6 @@ void _atomic_cas_cx8(void);
void _atomic_cas_cx8_end(void);
extern void *x86_lockpatch[];
extern void *x86_clacpatch[];
extern void *x86_stacpatch[];
extern void *x86_retpatch[];
extern void *atomic_lockpatch[];
@ -140,6 +145,27 @@ patchbytes(void *addr, const uint8_t *bytes, size_t size)
}
}
static void
x86_hotpatch(uint32_t name, const uint8_t *bytes, size_t size)
{
extern char __rodata_hotpatch_start;
extern char __rodata_hotpatch_end;
struct hotpatch *hps, *hpe, *hp;
hps = (struct hotpatch *)&__rodata_hotpatch_start;
hpe = (struct hotpatch *)&__rodata_hotpatch_end;
for (hp = hps; hp < hpe; hp++) {
if (hp->name != name) {
continue;
}
if (hp->size != size) {
panic("x86_hotpatch: incorrect size");
}
patchbytes(hp->addr, bytes, size);
}
}
void
x86_patch(bool early)
{
@ -268,16 +294,11 @@ x86_patch(bool early)
0x0F, 0x01, 0xCB /* stac */
};
for (i = 0; x86_clacpatch[i] != NULL; i++) {
/* ret,int3,int3 -> clac */
patchbytes(x86_clacpatch[i], clac_bytes,
sizeof(clac_bytes));
}
for (i = 0; x86_stacpatch[i] != NULL; i++) {
/* ret,int3,int3 -> stac */
patchbytes(x86_stacpatch[i], stac_bytes,
sizeof(stac_bytes));
}
/* nop,nop,nop -> clac */
x86_hotpatch(HP_NAME_CLAC, clac_bytes, sizeof(clac_bytes));
/* nop,nop,nop -> stac */
x86_hotpatch(HP_NAME_STAC, stac_bytes, sizeof(stac_bytes));
}
#endif