Add back non-512-byte aligned buffers support (reverting 1.84).

Userland users of raw devices don't have to use sector-aligned buffers,
and e.g. disklabel doesn't.
anita tests on Xen should at last complete the install phase again.
This commit is contained in:
bouyer 2018-08-26 11:31:56 +00:00
parent 8ea42f66ff
commit 00ac29de45
1 changed files with 51 additions and 11 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: xbd_xenbus.c,v 1.86 2018/08/21 18:55:08 jdolecek Exp $ */
/* $NetBSD: xbd_xenbus.c,v 1.87 2018/08/26 11:31:56 bouyer Exp $ */
/*
* Copyright (c) 2006 Manuel Bouyer.
@ -50,7 +50,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: xbd_xenbus.c,v 1.86 2018/08/21 18:55:08 jdolecek Exp $");
__KERNEL_RCSID(0, "$NetBSD: xbd_xenbus.c,v 1.87 2018/08/26 11:31:56 bouyer Exp $");
#include "opt_xen.h"
@ -105,6 +105,7 @@ struct xbd_req {
grant_ref_t req_gntref[BLKIF_MAX_SEGMENTS_PER_REQUEST];
int req_nr_segments; /* number of segments in this request */
struct buf *req_bp; /* buffer associated with this request */
void *req_data; /* pointer to the data buffer */
} req_rw;
struct {
int s_error;
@ -115,6 +116,7 @@ struct xbd_req {
#define req_gntref u.req_rw.req_gntref
#define req_nr_segments u.req_rw.req_nr_segments
#define req_bp u.req_rw.req_bp
#define req_data u.req_rw.req_data
#define req_sync u.req_sync
struct xbd_xenbus_softc {
@ -169,6 +171,9 @@ static int xbd_diskstart(device_t, struct buf *);
static void xbd_backend_changed(void *, XenbusState);
static void xbd_connect(struct xbd_xenbus_softc *);
static int xbd_map_align(struct xbd_req *);
static void xbd_unmap_align(struct xbd_req *);
static void xbdminphys(struct buf *);
CFATTACH_DECL3_NEW(xbd, sizeof(struct xbd_xenbus_softc),
@ -694,7 +699,6 @@ again:
bp = xbdreq->req_bp;
KASSERT(bp != NULL);
xbdreq->req_bp = NULL;
DPRINTF(("%s(%p): b_bcount = %ld\n", __func__,
bp, (long)bp->b_bcount));
@ -704,7 +708,11 @@ again:
goto next;
}
/* b_resid was set in dk_start */
if (__predict_false(
xbdreq->req_data != NULL && bp->b_data != xbdreq->req_data))
xbd_unmap_align(xbdreq);
next:
xbdreq->req_bp = NULL;
dk_done(&sc->sc_dksc, bp);
SLIST_INSERT_HEAD(&sc->sc_xbdreq_head, xbdreq, req_next);
@ -971,13 +979,15 @@ xbd_diskstart(device_t self, struct buf *bp)
goto out;
}
if ((vaddr_t)bp->b_data & (XEN_BSIZE - 1)) {
DPRINTF(("xbd_diskstart: no align\n"));
error = EINVAL;
goto out;
}
xbdreq->req_bp = bp;
xbdreq->req_data = bp->b_data;
if (__predict_false((vaddr_t)bp->b_data & (XEN_BSIZE - 1))) {
if (__predict_false(xbd_map_align(xbdreq) != 0)) {
DPRINTF(("xbd_diskstart: no align\n"));
error = EAGAIN;
goto out;
}
}
SLIST_REMOVE_HEAD(&sc->sc_xbdreq_head, req_next);
req = RING_GET_REQUEST(&sc->sc_ring, sc->sc_ring.req_prod_pvt);
@ -987,8 +997,8 @@ xbd_diskstart(device_t self, struct buf *bp)
req->sector_number = bp->b_rawblkno;
req->handle = sc->sc_handle;
va = (vaddr_t)bp->b_data & ~PAGE_MASK;
off = (vaddr_t)bp->b_data & PAGE_MASK;
va = (vaddr_t)xbdreq->req_data & ~PAGE_MASK;
off = (vaddr_t)xbdreq->req_data & PAGE_MASK;
bcount = bp->b_bcount;
bp->b_resid = 0;
for (seg = 0; bcount > 0;) {
@ -1028,3 +1038,33 @@ out:
err:
return error;
}
static int
xbd_map_align(struct xbd_req *req)
{
int s = splvm(); /* XXXSMP - bogus? */
int rc;
rc = uvm_km_kmem_alloc(kmem_va_arena,
req->req_bp->b_bcount, (VM_NOSLEEP | VM_INSTANTFIT),
(vmem_addr_t *)&req->req_data);
splx(s);
if (__predict_false(rc != 0))
return ENOMEM;
if ((req->req_bp->b_flags & B_READ) == 0)
memcpy(req->req_data, req->req_bp->b_data,
req->req_bp->b_bcount);
return 0;
}
static void
xbd_unmap_align(struct xbd_req *req)
{
int s;
if (req->req_bp->b_flags & B_READ)
memcpy(req->req_bp->b_data, req->req_data,
req->req_bp->b_bcount);
s = splvm(); /* XXXSMP - bogus? */
uvm_km_kmem_free(kmem_va_arena, (vaddr_t)req->req_data, req->req_bp->b_bcount);
splx(s);
}