From c5ba8f5c88dba5e68295ac152af57c2c1e6b5db2 Mon Sep 17 00:00:00 2001 From: jdolecek Date: Tue, 21 Aug 2018 18:31:55 +0000 Subject: [PATCH] remove code handling buffers not aligned to 512-byte boundary, simply return EINVAL straight away the condition doesn't seem to be actually hit for normal boots and tools like fsck, which is good thing since it allocated memory separately for each such I/O --- sys/arch/xen/xen/xbd_xenbus.c | 58 ++++++----------------------------- 1 file changed, 9 insertions(+), 49 deletions(-) diff --git a/sys/arch/xen/xen/xbd_xenbus.c b/sys/arch/xen/xen/xbd_xenbus.c index e5c7fee9c004..9a4750bcdf70 100644 --- a/sys/arch/xen/xen/xbd_xenbus.c +++ b/sys/arch/xen/xen/xbd_xenbus.c @@ -1,4 +1,4 @@ -/* $NetBSD: xbd_xenbus.c,v 1.83 2018/08/21 18:11:10 jdolecek Exp $ */ +/* $NetBSD: xbd_xenbus.c,v 1.84 2018/08/21 18:31:55 jdolecek Exp $ */ /* * Copyright (c) 2006 Manuel Bouyer. @@ -50,7 +50,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: xbd_xenbus.c,v 1.83 2018/08/21 18:11:10 jdolecek Exp $"); +__KERNEL_RCSID(0, "$NetBSD: xbd_xenbus.c,v 1.84 2018/08/21 18:31:55 jdolecek Exp $"); #include "opt_xen.h" @@ -105,7 +105,6 @@ struct xbd_req { grant_ref_t req_gntref[BLKIF_MAX_SEGMENTS_PER_REQUEST]; int req_nr_segments; /* number of segments in this request */ struct buf *req_bp; /* buffer associated with this request */ - void *req_data; /* pointer to the data buffer */ } req_rw; struct { int s_error; @@ -116,7 +115,6 @@ struct xbd_req { #define req_gntref u.req_rw.req_gntref #define req_nr_segments u.req_rw.req_nr_segments #define req_bp u.req_rw.req_bp -#define req_data u.req_rw.req_data #define req_sync u.req_sync struct xbd_xenbus_softc { @@ -171,9 +169,6 @@ static int xbd_diskstart(device_t, struct buf *); static void xbd_backend_changed(void *, XenbusState); static void xbd_connect(struct xbd_xenbus_softc *); -static int xbd_map_align(struct xbd_req *); -static void xbd_unmap_align(struct xbd_req *); - static void xbdminphys(struct buf *); CFATTACH_DECL3_NEW(xbd, sizeof(struct xbd_xenbus_softc), @@ -703,9 +698,6 @@ again: } /* b_resid was set in dk_start */ next: - if (bp->b_data != xbdreq->req_data) - xbd_unmap_align(xbdreq); - dk_done(&sc->sc_dksc, bp); SLIST_INSERT_HEAD(&sc->sc_xbdreq_head, xbdreq, req_next); @@ -972,16 +964,14 @@ xbd_diskstart(device_t self, struct buf *bp) goto out; } - xbdreq->req_bp = bp; - xbdreq->req_data = bp->b_data; if ((vaddr_t)bp->b_data & (XEN_BSIZE - 1)) { - if (__predict_false(xbd_map_align(xbdreq) != 0)) { - DPRINTF(("xbd_diskstart: no align\n")); - error = EAGAIN; - goto out; - } + DPRINTF(("xbd_diskstart: no align\n")); + error = EINVAL; + goto out; } + xbdreq->req_bp = bp; + SLIST_REMOVE_HEAD(&sc->sc_xbdreq_head, req_next); req = RING_GET_REQUEST(&sc->sc_ring, sc->sc_ring.req_prod_pvt); req->id = xbdreq->req_id; @@ -990,8 +980,8 @@ xbd_diskstart(device_t self, struct buf *bp) req->sector_number = bp->b_rawblkno; req->handle = sc->sc_handle; - va = (vaddr_t)xbdreq->req_data & ~PAGE_MASK; - off = (vaddr_t)xbdreq->req_data & PAGE_MASK; + va = (vaddr_t)bp->b_data & ~PAGE_MASK; + off = (vaddr_t)bp->b_data & PAGE_MASK; bcount = bp->b_bcount; bp->b_resid = 0; for (seg = 0; bcount > 0;) { @@ -1031,33 +1021,3 @@ out: err: return error; } - -static int -xbd_map_align(struct xbd_req *req) -{ - int s = splvm(); /* XXXSMP - bogus? */ - int rc; - - rc = uvm_km_kmem_alloc(kmem_va_arena, - req->req_bp->b_bcount, (VM_NOSLEEP | VM_INSTANTFIT), - (vmem_addr_t *)&req->req_data); - splx(s); - if (__predict_false(rc != 0)) - return ENOMEM; - if ((req->req_bp->b_flags & B_READ) == 0) - memcpy(req->req_data, req->req_bp->b_data, - req->req_bp->b_bcount); - return 0; -} - -static void -xbd_unmap_align(struct xbd_req *req) -{ - int s; - if (req->req_bp->b_flags & B_READ) - memcpy(req->req_bp->b_data, req->req_data, - req->req_bp->b_bcount); - s = splvm(); /* XXXSMP - bogus? */ - uvm_km_kmem_free(kmem_va_arena, (vaddr_t)req->req_data, req->req_bp->b_bcount); - splx(s); -}