PT_GNU_RELRO segments are arranged such that their vaddr + memsz ends

on a linker common page size boundary.  However, if the common page size
used by the linker is less than the VM page size being used by the kernel,
this can end up in the middle of a VM page and when the region is write-
protected, this can cause objects in neighboring .data to get incorrectly
write-protected, resulting in a crash.

Avoid this situation by calculating the end of the RELRO region not by
rounding memsz up to the VM page size, but rather by adding vaddr + memsz
and then truncating to the VM page size.

Fixes PR toolchain/55043.

XXX pullup-9
This commit is contained in:
thorpej 2020-03-04 01:21:17 +00:00
parent 379e21a2a0
commit da570a6238
3 changed files with 27 additions and 14 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: headers.c,v 1.67 2020/02/29 18:53:55 kamil Exp $ */
/* $NetBSD: headers.c,v 1.68 2020/03/04 01:21:17 thorpej Exp $ */
/*
* Copyright 1996 John D. Polstra.
@ -40,7 +40,7 @@
#include <sys/cdefs.h>
#ifndef lint
__RCSID("$NetBSD: headers.c,v 1.67 2020/02/29 18:53:55 kamil Exp $");
__RCSID("$NetBSD: headers.c,v 1.68 2020/03/04 01:21:17 thorpej Exp $");
#endif /* not lint */
#include <err.h>
@ -516,9 +516,9 @@ _rtld_digest_phdr(const Elf_Phdr *phdr, int phnum, caddr_t entry)
#ifdef GNU_RELRO
case PT_GNU_RELRO:
obj->relro_page = obj->relocbase
+ round_down(ph->p_vaddr);
obj->relro_size = round_up(ph->p_memsz);
/* rounding happens later. */
obj->relro_page = obj->relocbase + ph->p_vaddr;
obj->relro_size = ph->p_memsz;
dbg(("headers: %s %p phsize %" PRImemsz,
"PT_GNU_RELRO", (void *)(uintptr_t)vaddr,
ph->p_memsz));

View File

@ -1,4 +1,4 @@
/* $NetBSD: map_object.c,v 1.60 2019/01/06 19:44:54 joerg Exp $ */
/* $NetBSD: map_object.c,v 1.61 2020/03/04 01:21:17 thorpej Exp $ */
/*
* Copyright 1996 John D. Polstra.
@ -34,7 +34,7 @@
#include <sys/cdefs.h>
#ifndef lint
__RCSID("$NetBSD: map_object.c,v 1.60 2019/01/06 19:44:54 joerg Exp $");
__RCSID("$NetBSD: map_object.c,v 1.61 2020/03/04 01:21:17 thorpej Exp $");
#endif /* not lint */
#include <errno.h>
@ -406,8 +406,9 @@ _rtld_map_object(const char *path, int fd, const struct stat *sb)
obj->relocbase = mapbase - base_vaddr;
#ifdef GNU_RELRO
obj->relro_page = obj->relocbase + round_down(relro_page);
obj->relro_size = round_up(relro_size);
/* rounding happens later. */
obj->relro_page = obj->relocbase + relro_page;
obj->relro_size = relro_size;
#endif
if (obj->dynamic)

View File

@ -1,4 +1,4 @@
/* $NetBSD: rtld.c,v 1.202 2020/02/29 04:23:05 kamil Exp $ */
/* $NetBSD: rtld.c,v 1.203 2020/03/04 01:21:17 thorpej Exp $ */
/*
* Copyright 1996 John D. Polstra.
@ -40,7 +40,7 @@
#include <sys/cdefs.h>
#ifndef lint
__RCSID("$NetBSD: rtld.c,v 1.202 2020/02/29 04:23:05 kamil Exp $");
__RCSID("$NetBSD: rtld.c,v 1.203 2020/03/04 01:21:17 thorpej Exp $");
#endif /* not lint */
#include <sys/param.h>
@ -1773,13 +1773,25 @@ int
_rtld_relro(const Obj_Entry *obj, bool wantmain)
{
#ifdef GNU_RELRO
if (obj->relro_size == 0)
/*
* If our VM page size is larger than the page size used by the
* linker when laying out the object, we could end up making data
* read-only that is unintended. Detect and avoid this situation.
* It may mean we are unable to protect everything we'd like, but
* it's better than crashing.
*/
uintptr_t relro_end = (uintptr_t)obj->relro_page + obj->relro_size;
uintptr_t relro_start = round_down((uintptr_t)obj->relro_page);
assert(relro_end >= relro_start);
size_t relro_size = round_down(relro_end) - relro_start;
if (relro_size == 0)
return 0;
if (wantmain != (obj ==_rtld_objmain))
return 0;
dbg(("RELRO %s %p %zx\n", obj->path, obj->relro_page, obj->relro_size));
if (mprotect(obj->relro_page, obj->relro_size, PROT_READ) == -1) {
dbg(("RELRO %s %p %zx\n", obj->path, (void *)relro_start, relro_size));
if (mprotect((void *)relro_start, relro_size, PROT_READ) == -1) {
_rtld_error("%s: Cannot enforce relro " "protection: %s",
obj->path, xstrerror(errno));
return -1;