diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c index 0292d6b4dc04..89e9bb6ce229 100644 --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -1123,6 +1123,8 @@ vm_map_pageable(map, start, end, new_pageable) { register vm_map_entry_t entry; vm_map_entry_t temp_entry; + register vm_offset_t failed; + int rv; vm_map_lock(map); @@ -1275,10 +1277,26 @@ vm_map_pageable(map, start, end, new_pageable) lock_write_to_read(&map->lock); } + rv = 0; entry = temp_entry; while (entry != &map->header && entry->start < end) { - if (entry->wired_count == 1) { - vm_fault_wire(map, entry->start, entry->end); + /* + * If vm_fault_wire fails for any page we need to + * undo what has been done. We decrement the wiring + * count for those pages which have not yet been + * wired (now) and unwire those that have (later). + * + * XXX this violates the locking protocol on the map, + * needs to be fixed. + */ + if (rv) + entry->wired_count--; + else if (entry->wired_count == 1) { + rv = vm_fault_wire(map, entry->start, entry->end); + if (rv) { + failed = entry->start; + entry->wired_count--; + } } entry = entry->next; } @@ -1289,6 +1307,11 @@ vm_map_pageable(map, start, end, new_pageable) else { lock_clear_recursive(&map->lock); } + if (rv) { + vm_map_unlock(map); + (void) vm_map_pageable(map, start, failed, TRUE); + return(rv); + } } vm_map_unlock(map); diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c index 28fefe8e9d1c..d7b39b81c457 100644 --- a/sys/vm/vm_mmap.c +++ b/sys/vm/vm_mmap.c @@ -123,6 +123,7 @@ smmap(p, uap, retval) vm_prot_t prot; caddr_t handle; int mtype, error; + int flags = uap->flags; #ifdef DEBUG if (mmapdebug & MDB_FOLLOW) @@ -133,7 +134,7 @@ smmap(p, uap, retval) /* * Make sure one of the sharing types is specified */ - mtype = uap->flags & MAP_TYPE; + mtype = flags & MAP_TYPE; switch (mtype) { case MAP_FILE: case MAP_ANON: @@ -146,7 +147,7 @@ smmap(p, uap, retval) * Size is implicitly rounded to a page boundary. */ addr = (vm_offset_t) uap->addr; - if ((uap->flags & MAP_FIXED) && (addr & page_mask) || uap->len < 0) + if ((flags & MAP_FIXED) && (addr & page_mask) || uap->len < 0) return(EINVAL); size = (vm_size_t) round_page(uap->len); /* @@ -156,7 +157,7 @@ smmap(p, uap, retval) * There should really be a pmap call to determine a reasonable * location. */ - if (addr == 0 && (uap->flags & MAP_FIXED) == 0) + if (addr == 0 && (flags & MAP_FIXED) == 0) addr = round_page(p->p_vmspace->vm_daddr + MAXDSIZ); /* * Mapping file or named anonymous, get fp for validation @@ -185,9 +186,11 @@ smmap(p, uap, retval) * if mapping is shared. */ if ((uap->prot & PROT_READ) && (fp->f_flag & FREAD) == 0 || - ((uap->flags & MAP_SHARED) && + ((flags & MAP_SHARED) && (uap->prot & PROT_WRITE) && (fp->f_flag & FWRITE) == 0)) return(EACCES); + if ((flags & MAP_SHARED) && (fp->f_flag & FWRITE) == 0) + flags = (flags & ~MAP_SHARED) | MAP_PRIVATE; handle = (caddr_t)vp; } else if (uap->fd != -1) handle = (caddr_t)fp; @@ -205,7 +208,7 @@ smmap(p, uap, retval) prot |= VM_PROT_EXECUTE; error = vm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, - uap->flags, handle, (vm_offset_t)uap->pos); + flags, handle, (vm_offset_t)uap->pos); if (error == 0) *retval = (int) addr; return(error); @@ -473,6 +476,13 @@ vm_mmap(map, addr, size, prot, flags, handle, foff) vm_object_deallocate(object); goto out; } + /* + * The object of unnamed anonymous regions was just created + * find it for pager_cache. + */ + if (handle == NULL) + object = vm_object_lookup(pager); + /* * Don't cache anonymous objects. * Loses the reference gained by vm_pager_allocate.