Fix USER_LDT syscall locking. It's a bit over the top for a big lock

kernel right now, a sleep lock around the entire thing would have
worked as well, but hey.

Also correct PCB_USER_LDT to PMF_USER_LDT. This was wrong but since
they had the same value, it didn't matter.
This commit is contained in:
fvdl 2002-10-08 20:23:27 +00:00
parent accf8358dd
commit ab7f74bdca
1 changed files with 83 additions and 72 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: sys_machdep.c,v 1.65 2002/10/01 12:57:00 fvdl Exp $ */
/* $NetBSD: sys_machdep.c,v 1.66 2002/10/08 20:23:27 fvdl Exp $ */
/*-
* Copyright (c) 1998 The NetBSD Foundation, Inc.
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: sys_machdep.c,v 1.65 2002/10/01 12:57:00 fvdl Exp $");
__KERNEL_RCSID(0, "$NetBSD: sys_machdep.c,v 1.66 2002/10/08 20:23:27 fvdl Exp $");
#include "opt_vm86.h"
#include "opt_user_ldt.h"
@ -112,7 +112,7 @@ i386_get_ldt(p, args, retval)
int error;
pmap_t pmap = p->p_vmspace->vm_map.pmap;
int nldt, num;
union descriptor *lp;
union descriptor *lp, *cp;
struct i386_get_ldt_args ua;
if ((error = copyin(args, &ua, sizeof(ua))) != 0)
@ -127,9 +127,11 @@ i386_get_ldt(p, args, retval)
ua.start + ua.num > 8192)
return (EINVAL);
/*
* XXX LOCKING.
*/
cp = malloc(ua.num * sizeof(union descriptor), M_TEMP, M_WAITOK);
if (cp == NULL)
return ENOMEM;
simple_lock(&pmap->pm_lock);
if (pmap->pm_flags & PMF_USER_LDT) {
nldt = pmap->pm_ldt_len;
@ -139,8 +141,11 @@ i386_get_ldt(p, args, retval)
lp = ldt;
}
if (ua.start > nldt)
if (ua.start > nldt) {
simple_unlock(&pmap->pm_lock);
free(cp, M_TEMP);
return (EINVAL);
}
lp += ua.start;
num = min(ua.num, nldt - ua.start);
@ -152,12 +157,15 @@ i386_get_ldt(p, args, retval)
}
#endif
error = copyout(lp, ua.desc, num * sizeof(union descriptor));
if (error)
return (error);
memcpy(cp, lp, num * sizeof(union descriptor));
simple_unlock(&pmap->pm_lock);
*retval = num;
return (0);
error = copyout(cp, ua.desc, num * sizeof(union descriptor));
if (error == 0)
*retval = num;
free(cp, M_TEMP);
return (error);
}
int
@ -171,15 +179,12 @@ i386_set_ldt(p, args, retval)
pmap_t pmap = p->p_vmspace->vm_map.pmap;
struct i386_set_ldt_args ua;
union descriptor *descv;
size_t old_len, new_len, ldt_len;
union descriptor *old_ldt, *new_ldt;
if ((error = copyin(args, &ua, sizeof(ua))) != 0)
return (error);
#ifdef LDT_DEBUG
printf("i386_set_ldt: start=%d num=%d descs=%p\n", ua.start,
ua.num, ua.desc);
#endif
if (ua.start < 0 || ua.num < 0 || ua.start > 8192 || ua.num > 8192 ||
ua.start + ua.num > 8192)
return (EINVAL);
@ -191,55 +196,6 @@ i386_set_ldt(p, args, retval)
if ((error = copyin(ua.desc, descv, sizeof (*descv) * ua.num)) != 0)
goto out;
/*
* XXX LOCKING
*/
/* allocate user ldt */
if (pmap->pm_ldt == 0 || (ua.start + ua.num) > pmap->pm_ldt_len) {
size_t old_len, new_len;
union descriptor *old_ldt, *new_ldt;
if (pmap->pm_flags & PMF_USER_LDT) {
old_len = pmap->pm_ldt_len * sizeof(union descriptor);
old_ldt = pmap->pm_ldt;
} else {
old_len = NLDT * sizeof(union descriptor);
old_ldt = ldt;
pmap->pm_ldt_len = 512;
}
while ((ua.start + ua.num) > pmap->pm_ldt_len)
pmap->pm_ldt_len *= 2;
new_len = pmap->pm_ldt_len * sizeof(union descriptor);
new_ldt = (union descriptor *)uvm_km_alloc(kernel_map, new_len);
memcpy(new_ldt, old_ldt, old_len);
memset((caddr_t)new_ldt + old_len, 0, new_len - old_len);
pmap->pm_ldt = new_ldt;
if (pmap->pm_flags & PCB_USER_LDT)
ldt_free(pmap);
else
pmap->pm_flags |= PCB_USER_LDT;
ldt_alloc(pmap, new_ldt, new_len);
pcb->pcb_ldt_sel = pmap->pm_ldt_sel;
if (pcb == curpcb)
lldt(pcb->pcb_ldt_sel);
/*
* XXX Need to notify other processors which may be
* XXX currently using this pmap that they need to
* XXX re-load the LDT.
*/
if (old_ldt != ldt)
uvm_km_free(kernel_map, (vaddr_t)old_ldt, old_len);
#ifdef LDT_DEBUG
printf("i386_set_ldt(%d): new_ldt=%p\n", p->p_pid, new_ldt);
#endif
}
error = 0;
/* Check descriptors for access violations. */
for (i = 0, n = ua.start; i < ua.num; i++, n++) {
union descriptor *desc = &descv[i];
@ -309,17 +265,72 @@ i386_set_ldt(p, args, retval)
}
}
#ifdef LDT_DEBUG
{
int i;
for (i = 0, n = ua.start; i < ua.num; i++, n++)
i386_print_ldt(n, &descv[i].sd);
/* allocate user ldt */
simple_lock(&pmap->pm_lock);
if (pmap->pm_ldt == 0 || (ua.start + ua.num) > pmap->pm_ldt_len) {
old_ldt = pmap->pm_ldt;
if (pmap->pm_flags & PMF_USER_LDT)
ldt_len = pmap->pm_ldt_len;
else
ldt_len = 512;
while ((ua.start + ua.num) > ldt_len)
ldt_len *= 2;
new_len = ldt_len * sizeof(union descriptor);
simple_unlock(&pmap->pm_lock);
new_ldt = (union descriptor *)uvm_km_alloc(kernel_map,
new_len);
simple_lock(&pmap->pm_lock);
if (pmap->pm_ldt != NULL && ldt_len <= pmap->pm_ldt_len) {
/*
* Another thread (re)allocated the LDT to
* sufficient size while we were blocked in
* uvm_km_alloc. Oh well. The new entries
* will quite probably not be right, but
* hey.. not our problem if user applications
* have race conditions like that.
*/
uvm_km_free(kernel_map, (vaddr_t)new_ldt, new_len);
goto copy;
}
old_ldt = pmap->pm_ldt;
if (old_ldt != NULL) {
old_len = pmap->pm_ldt_len * sizeof(union descriptor);
uvm_km_free(kernel_map, (vaddr_t)old_ldt, old_len);
} else {
old_len = NLDT * sizeof(union descriptor);
old_ldt = ldt;
}
memcpy(new_ldt, old_ldt, old_len);
memset((caddr_t)new_ldt + old_len, 0, new_len - old_len);
if (old_ldt != ldt)
uvm_km_free(kernel_map, (vaddr_t)old_ldt, old_len);
pmap->pm_ldt = new_ldt;
pmap->pm_ldt_len = ldt_len;
if (pmap->pm_flags & PMF_USER_LDT)
ldt_free(pmap);
else
pmap->pm_flags |= PMF_USER_LDT;
ldt_alloc(pmap, new_ldt, new_len);
pcb->pcb_ldt_sel = pmap->pm_ldt_sel;
if (pcb == curpcb)
lldt(pcb->pcb_ldt_sel);
}
#endif
copy:
/* Now actually replace the descriptors. */
for (i = 0, n = ua.start; i < ua.num; i++, n++)
pmap->pm_ldt[n] = descv[i];
simple_unlock(&pmap->pm_lock);
*retval = ua.start;
out: