on further thought, just remove the separately allocated nvme_ns_context

altogether and fold into nvme_ccb; allocating this separately just isn't useful
This commit is contained in:
jdolecek 2016-09-19 20:33:51 +00:00
parent 2839ad01b7
commit ac7944cb93
3 changed files with 95 additions and 161 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: ld_nvme.c,v 1.5 2016/09/18 21:52:36 jdolecek Exp $ */
/* $NetBSD: ld_nvme.c,v 1.6 2016/09/19 20:33:51 jdolecek Exp $ */
/*-
* Copyright (C) 2016 NONAKA Kimihiro <nonaka@netbsd.org>
@ -26,7 +26,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: ld_nvme.c,v 1.5 2016/09/18 21:52:36 jdolecek Exp $");
__KERNEL_RCSID(0, "$NetBSD: ld_nvme.c,v 1.6 2016/09/19 20:33:51 jdolecek Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -46,7 +46,6 @@ struct ld_nvme_softc {
struct nvme_softc *sc_nvme;
uint16_t sc_nsid;
int sc_attaching;
};
static int ld_nvme_match(device_t, cfdata_t, void *);
@ -60,11 +59,8 @@ static int ld_nvme_start(struct ld_softc *, struct buf *);
static int ld_nvme_dump(struct ld_softc *, void *, int, int);
static int ld_nvme_flush(struct ld_softc *, int);
static int ld_nvme_dobio(struct ld_nvme_softc *, void *, int, daddr_t,
int, struct buf *);
static void ld_nvme_biodone(struct nvme_ns_context *);
static void ld_nvme_syncdone(struct nvme_ns_context *);
static void ld_nvme_biodone(void *, struct buf *, uint16_t);
static void ld_nvme_syncdone(void *, struct buf *, uint16_t);
static int
ld_nvme_match(device_t parent, cfdata_t match, void *aux)
@ -92,7 +88,6 @@ ld_nvme_attach(device_t parent, device_t self, void *aux)
ld->sc_dv = self;
sc->sc_nvme = nsc;
sc->sc_nsid = naa->naa_nsid;
sc->sc_attaching = 1;
aprint_naive("\n");
aprint_normal("\n");
@ -117,8 +112,6 @@ ld_nvme_attach(device_t parent, device_t self, void *aux)
ld->sc_flush = ld_nvme_flush;
ld->sc_flags = LDF_ENABLED;
ldattach(ld, "fcfs");
sc->sc_attaching = 0;
}
static int
@ -142,8 +135,11 @@ ld_nvme_start(struct ld_softc *ld, struct buf *bp)
{
struct ld_nvme_softc *sc = device_private(ld->sc_dv);
return ld_nvme_dobio(sc, bp->b_data, bp->b_bcount, bp->b_rawblkno,
BUF_ISWRITE(bp), bp);
return nvme_ns_dobio(sc->sc_nvme, sc->sc_nsid, sc,
bp, bp->b_data, bp->b_bcount,
sc->sc_ld.sc_secsize, bp->b_rawblkno,
BUF_ISWRITE(bp) ? 0 : NVME_NS_CTX_F_READ,
ld_nvme_biodone);
}
static int
@ -151,51 +147,18 @@ ld_nvme_dump(struct ld_softc *ld, void *data, int blkno, int blkcnt)
{
struct ld_nvme_softc *sc = device_private(ld->sc_dv);
return ld_nvme_dobio(sc, data, blkcnt * ld->sc_secsize, blkno, 1, NULL);
}
static int
ld_nvme_dobio(struct ld_nvme_softc *sc, void *data, int datasize, daddr_t blkno,
int dowrite, struct buf *bp)
{
struct nvme_ns_context *ctx;
int error;
int waitok = (bp != NULL && !cpu_softintr_p() && !cpu_intr_p()
&& !sc->sc_attaching);
ctx = nvme_ns_get_ctx(sc, waitok ? PR_WAITOK : PR_NOWAIT);
if (ctx == NULL)
return EAGAIN;
ctx->nnc_cookie = sc;
ctx->nnc_nsid = sc->sc_nsid;
ctx->nnc_done = ld_nvme_biodone;
ctx->nnc_buf = bp;
ctx->nnc_data = data;
ctx->nnc_datasize = datasize;
ctx->nnc_secsize = sc->sc_ld.sc_secsize;
ctx->nnc_blkno = blkno;
ctx->nnc_flags = dowrite ? 0 : NVME_NS_CTX_F_READ;
if (bp == NULL)
SET(ctx->nnc_flags, NVME_NS_CTX_F_POLL);
error = nvme_ns_dobio(sc->sc_nvme, ctx);
if (error)
nvme_ns_put_ctx(sc, ctx);
return error;
return nvme_ns_dobio(sc->sc_nvme, sc->sc_nsid, sc,
NULL, data, blkcnt * ld->sc_secsize,
sc->sc_ld.sc_secsize, blkno,
NVME_NS_CTX_F_POLL,
ld_nvme_biodone);
}
static void
ld_nvme_biodone(struct nvme_ns_context *ctx)
ld_nvme_biodone(void *xc, struct buf *bp, uint16_t cmd_status)
{
struct ld_nvme_softc *sc = ctx->nnc_cookie;
struct buf *bp = ctx->nnc_buf;
int status = NVME_CQE_SC(ctx->nnc_status);
/* free before processing to avoid starvation, lddone() could trigger
* another i/o request */
nvme_ns_put_ctx(sc, ctx);
struct ld_nvme_softc *sc = xc;
uint16_t status = NVME_CQE_SC(cmd_status);
if (bp != NULL) {
if (status != NVME_CQE_SC_SUCCESS) {
@ -217,33 +180,14 @@ static int
ld_nvme_flush(struct ld_softc *ld, int flags)
{
struct ld_nvme_softc *sc = device_private(ld->sc_dv);
struct nvme_ns_context *ctx;
int error;
int waitok = (!ISSET(flags, LDFL_POLL)
&& !cpu_softintr_p() && !cpu_intr_p());
ctx = nvme_ns_get_ctx(sc, waitok ? PR_WAITOK : PR_NOWAIT);
if (ctx == NULL)
return EAGAIN;
ctx->nnc_cookie = sc;
ctx->nnc_nsid = sc->sc_nsid;
ctx->nnc_done = ld_nvme_syncdone;
ctx->nnc_flags = 0;
if (flags & LDFL_POLL)
SET(ctx->nnc_flags, NVME_NS_CTX_F_POLL);
error = nvme_ns_sync(sc->sc_nvme, ctx);
if (error)
nvme_ns_put_ctx(sc, ctx);
return error;
return nvme_ns_sync(sc->sc_nvme, sc->sc_nsid, sc,
(flags & LDFL_POLL) ? NVME_NS_CTX_F_POLL : 0,
ld_nvme_syncdone);
}
static void
ld_nvme_syncdone(struct nvme_ns_context *ctx)
ld_nvme_syncdone(void *xc, struct buf *bp, uint16_t cmd_status)
{
struct ld_nvme_softc *sc = ctx->nnc_cookie;
nvme_ns_put_ctx(sc, ctx);
/* nothing to do */
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: nvme.c,v 1.10 2016/09/19 19:06:57 jdolecek Exp $ */
/* $NetBSD: nvme.c,v 1.11 2016/09/19 20:33:51 jdolecek Exp $ */
/* $OpenBSD: nvme.c,v 1.49 2016/04/18 05:59:50 dlg Exp $ */
/*
@ -18,7 +18,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: nvme.c,v 1.10 2016/09/19 19:06:57 jdolecek Exp $");
__KERNEL_RCSID(0, "$NetBSD: nvme.c,v 1.11 2016/09/19 20:33:51 jdolecek Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -417,15 +417,6 @@ nvme_attach(struct nvme_softc *sc)
if (!sc->sc_use_mq)
nvme_write4(sc, NVME_INTMC, 1);
snprintf(sc->sc_ctxpoolname, sizeof(sc->sc_ctxpoolname),
"%s_ns_ctx", device_xname(sc->sc_dev));
sc->sc_ctxpool = pool_cache_init(sizeof(struct nvme_ns_context),
0, 0, 0, sc->sc_ctxpoolname, NULL, IPL_BIO, NULL, NULL, NULL);
if (sc->sc_ctxpool == NULL) {
aprint_error_dev(sc->sc_dev, "unable to create ctx pool\n");
goto free_q;
}
/* probe subdevices */
sc->sc_namespaces = kmem_zalloc(sizeof(*sc->sc_namespaces) * sc->sc_nn,
KM_SLEEP);
@ -484,8 +475,6 @@ nvme_detach(struct nvme_softc *sc, int flags)
return error;
/* from now on we are committed to detach, following will never fail */
pool_cache_destroy(sc->sc_ctxpool);
for (i = 0; i < sc->sc_nq; i++)
nvme_q_free(sc, sc->sc_q[i]);
kmem_free(sc->sc_q, sizeof(*sc->sc_q) * sc->sc_nq);
@ -565,8 +554,7 @@ nvme_ns_identify(struct nvme_softc *sc, uint16_t nsid)
KASSERT(nsid > 0);
ccb = nvme_ccb_get(sc->sc_admin_q);
if (ccb == NULL)
return EAGAIN;
KASSERT(ccb != NULL); /* it's a bug if we don't have spare ccb here */
mem = nvme_dmamem_alloc(sc, sizeof(*identify));
if (mem == NULL)
@ -608,7 +596,9 @@ done:
}
int
nvme_ns_dobio(struct nvme_softc *sc, struct nvme_ns_context *ctx)
nvme_ns_dobio(struct nvme_softc *sc, uint16_t nsid, void *cookie,
struct buf *bp, void *data, size_t datasize,
int secsize, daddr_t blkno, int flags, nvme_nnc_done nnc_done)
{
struct nvme_queue *q = nvme_get_q(sc);
struct nvme_ccb *ccb;
@ -620,14 +610,23 @@ nvme_ns_dobio(struct nvme_softc *sc, struct nvme_ns_context *ctx)
return EAGAIN;
ccb->ccb_done = nvme_ns_io_done;
ccb->ccb_cookie = ctx;
ccb->ccb_cookie = cookie;
/* namespace context */
ccb->nnc_nsid = nsid;
ccb->nnc_flags = flags;
ccb->nnc_buf = bp;
ccb->nnc_datasize = datasize;
ccb->nnc_secsize = secsize;
ccb->nnc_blkno = blkno;
ccb->nnc_done = nnc_done;
dmap = ccb->ccb_dmamap;
error = bus_dmamap_load(sc->sc_dmat, dmap, ctx->nnc_data,
ctx->nnc_datasize, NULL,
(ISSET(ctx->nnc_flags, NVME_NS_CTX_F_POLL) ?
error = bus_dmamap_load(sc->sc_dmat, dmap, data,
datasize, NULL,
(ISSET(flags, NVME_NS_CTX_F_POLL) ?
BUS_DMA_NOWAIT : BUS_DMA_WAITOK) |
(ISSET(ctx->nnc_flags, NVME_NS_CTX_F_READ) ?
(ISSET(flags, NVME_NS_CTX_F_READ) ?
BUS_DMA_READ : BUS_DMA_WRITE));
if (error) {
nvme_ccb_put(q, ccb);
@ -635,7 +634,7 @@ nvme_ns_dobio(struct nvme_softc *sc, struct nvme_ns_context *ctx)
}
bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
ISSET(ctx->nnc_flags, NVME_NS_CTX_F_READ) ?
ISSET(flags, NVME_NS_CTX_F_READ) ?
BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
if (dmap->dm_nsegs > 2) {
@ -650,7 +649,7 @@ nvme_ns_dobio(struct nvme_softc *sc, struct nvme_ns_context *ctx)
BUS_DMASYNC_PREWRITE);
}
if (ISSET(ctx->nnc_flags, NVME_NS_CTX_F_POLL)) {
if (ISSET(flags, NVME_NS_CTX_F_POLL)) {
if (nvme_poll(sc, q, ccb, nvme_ns_io_fill, NVME_TIMO_PT) != 0)
return EIO;
return 0;
@ -664,12 +663,11 @@ static void
nvme_ns_io_fill(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot)
{
struct nvme_sqe_io *sqe = slot;
struct nvme_ns_context *ctx = ccb->ccb_cookie;
bus_dmamap_t dmap = ccb->ccb_dmamap;
sqe->opcode = ISSET(ctx->nnc_flags, NVME_NS_CTX_F_READ) ?
sqe->opcode = ISSET(ccb->nnc_flags, NVME_NS_CTX_F_READ) ?
NVM_CMD_READ : NVM_CMD_WRITE;
htolem32(&sqe->nsid, ctx->nnc_nsid);
htolem32(&sqe->nsid, ccb->nnc_nsid);
htolem64(&sqe->entry.prp[0], dmap->dm_segs[0].ds_addr);
switch (dmap->dm_nsegs) {
@ -684,8 +682,11 @@ nvme_ns_io_fill(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot)
break;
}
htolem64(&sqe->slba, ctx->nnc_blkno);
htolem16(&sqe->nlb, (ctx->nnc_datasize / ctx->nnc_secsize) - 1);
htolem64(&sqe->slba, ccb->nnc_blkno);
/* guaranteed by upper layers, but check just in case */
KASSERT((ccb->nnc_datasize % ccb->nnc_secsize) == 0);
htolem16(&sqe->nlb, (ccb->nnc_datasize / ccb->nnc_secsize) - 1);
}
static void
@ -693,9 +694,10 @@ nvme_ns_io_done(struct nvme_queue *q, struct nvme_ccb *ccb,
struct nvme_cqe *cqe)
{
struct nvme_softc *sc = q->q_sc;
struct nvme_ns_context *ctx = ccb->ccb_cookie;
bus_dmamap_t dmap = ccb->ccb_dmamap;
uint16_t flags;
void *nnc_cookie = ccb->ccb_cookie;
nvme_nnc_done nnc_done = ccb->nnc_done;
struct buf *bp = ccb->nnc_buf;
if (dmap->dm_nsegs > 2) {
bus_dmamap_sync(sc->sc_dmat,
@ -706,20 +708,18 @@ nvme_ns_io_done(struct nvme_queue *q, struct nvme_ccb *ccb,
}
bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
ISSET(ctx->nnc_flags, NVME_NS_CTX_F_READ) ?
ISSET(ccb->nnc_flags, NVME_NS_CTX_F_READ) ?
BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->sc_dmat, dmap);
nvme_ccb_put(q, ccb);
flags = lemtoh16(&cqe->flags);
ctx->nnc_status = flags;
(*ctx->nnc_done)(ctx);
nnc_done(nnc_cookie, bp, lemtoh16(&cqe->flags));
}
int
nvme_ns_sync(struct nvme_softc *sc, struct nvme_ns_context *ctx)
nvme_ns_sync(struct nvme_softc *sc, uint16_t nsid, void *cookie,
int flags, nvme_nnc_done nnc_done)
{
struct nvme_queue *q = nvme_get_q(sc);
struct nvme_ccb *ccb;
@ -729,9 +729,14 @@ nvme_ns_sync(struct nvme_softc *sc, struct nvme_ns_context *ctx)
return EAGAIN;
ccb->ccb_done = nvme_ns_sync_done;
ccb->ccb_cookie = ctx;
ccb->ccb_cookie = cookie;
if (ISSET(ctx->nnc_flags, NVME_NS_CTX_F_POLL)) {
/* namespace context */
ccb->nnc_nsid = nsid;
ccb->nnc_flags = flags;
ccb->nnc_done = nnc_done;
if (ISSET(flags, NVME_NS_CTX_F_POLL)) {
if (nvme_poll(sc, q, ccb, nvme_ns_sync_fill, NVME_TIMO_SY) != 0)
return EIO;
return 0;
@ -745,25 +750,21 @@ static void
nvme_ns_sync_fill(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot)
{
struct nvme_sqe *sqe = slot;
struct nvme_ns_context *ctx = ccb->ccb_cookie;
sqe->opcode = NVM_CMD_FLUSH;
htolem32(&sqe->nsid, ctx->nnc_nsid);
htolem32(&sqe->nsid, ccb->nnc_nsid);
}
static void
nvme_ns_sync_done(struct nvme_queue *q, struct nvme_ccb *ccb,
struct nvme_cqe *cqe)
{
struct nvme_ns_context *ctx = ccb->ccb_cookie;
uint16_t flags;
void *cookie = ccb->ccb_cookie;
nvme_nnc_done nnc_done = ccb->nnc_done;
nvme_ccb_put(q, ccb);
flags = lemtoh16(&cqe->flags);
ctx->nnc_status = flags;
(*ctx->nnc_done)(ctx);
nnc_done(cookie, NULL, lemtoh16(&cqe->flags));
}
void
@ -1078,8 +1079,7 @@ nvme_identify(struct nvme_softc *sc, u_int mps)
int rv = 1;
ccb = nvme_ccb_get(sc->sc_admin_q);
if (ccb == NULL)
panic("%s: nvme_ccb_get returned NULL", __func__);
KASSERT(ccb != NULL); /* it's a bug if we don't have spare ccb here */
mem = nvme_dmamem_alloc(sc, sizeof(*identify));
if (mem == NULL)

View File

@ -1,4 +1,4 @@
/* $NetBSD: nvmevar.h,v 1.3 2016/09/18 21:19:39 jdolecek Exp $ */
/* $NetBSD: nvmevar.h,v 1.4 2016/09/19 20:33:51 jdolecek Exp $ */
/* $OpenBSD: nvmevar.h,v 1.8 2016/04/14 11:18:32 dlg Exp $ */
/*
@ -38,22 +38,37 @@ struct nvme_dmamem {
struct nvme_softc;
struct nvme_queue;
typedef void (*nvme_nnc_done)(void *, struct buf *, uint16_t);
struct nvme_ccb {
SIMPLEQ_ENTRY(nvme_ccb) ccb_entry;
/* DMA handles */
bus_dmamap_t ccb_dmamap;
void *ccb_cookie;
void (*ccb_done)(struct nvme_queue *,
struct nvme_ccb *, struct nvme_cqe *);
bus_addr_t ccb_prpl_off;
uint64_t ccb_prpl_dva;
uint64_t *ccb_prpl;
/* command context */
uint16_t ccb_id;
void *ccb_cookie;
void (*ccb_done)(struct nvme_queue *,
struct nvme_ccb *, struct nvme_cqe *);
/* namespace context */
void *nnc_cookie;
nvme_nnc_done nnc_done;
uint16_t nnc_nsid;
uint16_t nnc_flags;
#define NVME_NS_CTX_F_READ __BIT(0)
#define NVME_NS_CTX_F_POLL __BIT(1)
struct buf *nnc_buf;
daddr_t nnc_blkno;
size_t nnc_datasize;
int nnc_secsize;
};
SIMPLEQ_HEAD(nvme_ccb_list, nvme_ccb);
struct nvme_queue {
struct nvme_softc *q_sc;
@ -72,7 +87,7 @@ struct nvme_queue {
kmutex_t q_ccb_mtx;
u_int q_nccbs;
struct nvme_ccb *q_ccbs;
struct nvme_ccb_list q_ccb_list;
SIMPLEQ_HEAD(, nvme_ccb) q_ccb_list;
struct nvme_dmamem *q_ccb_prpls;
};
@ -114,9 +129,6 @@ struct nvme_softc {
struct nvme_queue *sc_admin_q;
struct nvme_queue **sc_q;
pool_cache_t sc_ctxpool;
char sc_ctxpoolname[16]; /* pool wchan */
uint32_t sc_flags;
#define NVME_F_ATTACHED __BIT(0)
#define NVME_F_OPEN __BIT(1)
@ -160,28 +172,6 @@ nvme_ns_get(struct nvme_softc *sc, uint16_t nsid)
int nvme_ns_identify(struct nvme_softc *, uint16_t);
void nvme_ns_free(struct nvme_softc *, uint16_t);
struct nvme_ns_context {
void *nnc_cookie;
void (*nnc_done)(struct nvme_ns_context *);
uint16_t nnc_nsid;
struct buf *nnc_buf;
void *nnc_data;
int nnc_datasize;
int nnc_secsize;
daddr_t nnc_blkno;
u_int nnc_flags;
#define NVME_NS_CTX_F_READ __BIT(0)
#define NVME_NS_CTX_F_POLL __BIT(1)
int nnc_status;
};
#define nvme_ns_get_ctx(sc, flags) \
pool_cache_get((sc)->sc_nvme->sc_ctxpool, (flags))
#define nvme_ns_put_ctx(sc, ctx) \
pool_cache_put((sc)->sc_nvme->sc_ctxpool, (ctx))
int nvme_ns_dobio(struct nvme_softc *, struct nvme_ns_context *);
int nvme_ns_sync(struct nvme_softc *, struct nvme_ns_context *);
int nvme_ns_dobio(struct nvme_softc *, uint16_t, void *,
struct buf *, void *, size_t, int, daddr_t, int, nvme_nnc_done);
int nvme_ns_sync(struct nvme_softc *, uint16_t, void *, int, nvme_nnc_done);