shmctl(SHM_LOCK) does not need to mess with mappings of the shm segment,

uvm_obj_wirepages() is sufficient.  this fixes the problem reported in
https://syzkaller.appspot.com/bug?id=71f9271d761f5b6ed517a18030dc04f0135e6179
This commit is contained in:
chs 2019-06-10 00:35:47 +00:00
parent 4261787c4e
commit c7c4f4753b
1 changed files with 19 additions and 54 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: sysv_shm.c,v 1.134 2019/04/10 10:03:50 pgoyette Exp $ */
/* $NetBSD: sysv_shm.c,v 1.135 2019/06/10 00:35:47 chs Exp $ */
/*-
* Copyright (c) 1999, 2007 The NetBSD Foundation, Inc.
@ -61,7 +61,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: sysv_shm.c,v 1.134 2019/04/10 10:03:50 pgoyette Exp $");
__KERNEL_RCSID(0, "$NetBSD: sysv_shm.c,v 1.135 2019/06/10 00:35:47 chs Exp $");
#ifdef _KERNEL_OPT
#include "opt_sysv.h"
@ -249,55 +249,30 @@ shmmap_getprivate(struct proc *p)
/*
* Lock/unlock the memory.
* => must be called with shm_lock held;
* => called from one place, thus, inline;
*/
static inline int
shm_memlock(struct lwp *l, struct shmid_ds *shmseg, int shmid, int cmd)
static int
shm_memlock(struct shmid_ds *shmseg, int shmid, int cmd)
{
struct proc *p = l->l_proc;
struct shmmap_entry *shmmap_se;
struct shmmap_state *shmmap_s;
size_t size;
int error;
KASSERT(mutex_owned(&shm_lock));
shmmap_s = shmmap_getprivate(p);
/* Find our shared memory address by shmid */
SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next) {
if (shmmap_se->shmid != shmid)
continue;
size = round_page(shmseg->shm_segsz);
size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
if (cmd == SHM_LOCK && (shmseg->shm_perm.mode & SHMSEG_WIRED) == 0) {
/* Wire the object and map, then tag it */
error = uvm_obj_wirepages(shmseg->_shm_internal,
0, size, NULL);
if (error)
return EIO;
shmseg->shm_perm.mode |= SHMSEG_WIRED;
if (cmd == SHM_LOCK &&
(shmseg->shm_perm.mode & SHMSEG_WIRED) == 0) {
/* Wire the object and map, then tag it */
error = uvm_obj_wirepages(shmseg->_shm_internal,
0, size, NULL);
if (error)
return EIO;
error = uvm_map_pageable(&p->p_vmspace->vm_map,
shmmap_se->va, shmmap_se->va + size, false, 0);
if (error) {
uvm_obj_unwirepages(shmseg->_shm_internal,
0, size);
if (error == EFAULT)
error = ENOMEM;
return error;
}
shmseg->shm_perm.mode |= SHMSEG_WIRED;
} else if (cmd == SHM_UNLOCK &&
(shmseg->shm_perm.mode & SHMSEG_WIRED) != 0) {
/* Unwire the object and map, then untag it */
uvm_obj_unwirepages(shmseg->_shm_internal, 0, size);
error = uvm_map_pageable(&p->p_vmspace->vm_map,
shmmap_se->va, shmmap_se->va + size, true, 0);
if (error)
return EIO;
shmseg->shm_perm.mode &= ~SHMSEG_WIRED;
}
} else if (cmd == SHM_UNLOCK &&
(shmseg->shm_perm.mode & SHMSEG_WIRED) != 0) {
/* Unwire the object, then untag it */
uvm_obj_unwirepages(shmseg->_shm_internal, 0, size);
shmseg->shm_perm.mode &= ~SHMSEG_WIRED;
}
return 0;
@ -462,16 +437,6 @@ sys_shmat(struct lwp *l, const struct sys_shmat_args *uap, register_t *retval)
UVM_MAPFLAG(prot, prot, UVM_INH_SHARE, UVM_ADV_RANDOM, flags));
if (error)
goto err_detach;
if (shm_use_phys || (shmseg->shm_perm.mode & SHMSEG_WIRED)) {
error = uvm_map_pageable(&vm->vm_map, attach_va,
attach_va + size, false, 0);
if (error) {
if (error == EFAULT)
error = ENOMEM;
uvm_deallocate(&vm->vm_map, attach_va, size);
goto err_detach;
}
}
/* Set the new address, and update the time */
mutex_enter(&shm_lock);
@ -595,7 +560,7 @@ shmctl1(struct lwp *l, int shmid, int cmd, struct shmid_ds *shmbuf)
(cmd == SHM_LOCK) ? KAUTH_REQ_SYSTEM_SYSVIPC_SHM_LOCK :
KAUTH_REQ_SYSTEM_SYSVIPC_SHM_UNLOCK, NULL, NULL, NULL)) != 0)
break;
error = shm_memlock(l, shmseg, shmid, cmd);
error = shm_memlock(shmseg, shmid, cmd);
break;
default:
error = EINVAL;
@ -717,7 +682,7 @@ sys_shmget(struct lwp *l, const struct sys_shmget_args *uap, register_t *retval)
mutex_exit(&shm_lock);
return ENOSPC;
}
size = (size + PGOFSET) & ~PGOFSET;
size = round_page(size);
if (shm_committed + btoc(size) > shminfo.shmall) {
mutex_exit(&shm_lock);
return ENOMEM;