nvme(4): Limit the number of queues to the number allocated in HW.

This commit is contained in:
nonaka 2017-02-13 11:10:45 +00:00
parent d00a6fde99
commit 5bfb6e7e1e
1 changed files with 52 additions and 4 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: nvme.c,v 1.22 2016/11/01 14:46:31 jdolecek Exp $ */
/* $NetBSD: nvme.c,v 1.23 2017/02/13 11:10:45 nonaka Exp $ */
/* $OpenBSD: nvme.c,v 1.49 2016/04/18 05:59:50 dlg Exp $ */
/*
@ -18,7 +18,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: nvme.c,v 1.22 2016/11/01 14:46:31 jdolecek Exp $");
__KERNEL_RCSID(0, "$NetBSD: nvme.c,v 1.23 2017/02/13 11:10:45 nonaka Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -107,6 +107,8 @@ static void nvme_pt_done(struct nvme_queue *, struct nvme_ccb *,
static int nvme_command_passthrough(struct nvme_softc *,
struct nvme_pt_command *, uint16_t, struct lwp *, bool);
static int nvme_get_number_of_queues(struct nvme_softc *, u_int *);
#define NVME_TIMO_QOP 5 /* queue create and delete timeout */
#define NVME_TIMO_IDENT 10 /* probe identify timeout */
#define NVME_TIMO_PT -1 /* passthrough cmd timeout */
@ -333,6 +335,7 @@ nvme_attach(struct nvme_softc *sc)
uint32_t reg;
u_int dstrd;
u_int mps = PAGE_SHIFT;
u_int ioq_allocated;
uint16_t adminq_entries = nvme_adminq_size;
uint16_t ioq_entries = nvme_ioq_size;
int i;
@ -395,6 +398,17 @@ nvme_attach(struct nvme_softc *sc)
nvme_ccbs_free(sc->sc_admin_q);
nvme_ccbs_alloc(sc->sc_admin_q, sc->sc_admin_q->q_entries);
if (sc->sc_use_mq) {
/* Limit the number of queues to the number allocated in HW */
if (nvme_get_number_of_queues(sc, &ioq_allocated) != 0) {
aprint_error_dev(sc->sc_dev,
"unable to get number of queues\n");
goto disable;
}
if (sc->sc_nq > ioq_allocated)
sc->sc_nq = ioq_allocated;
}
sc->sc_q = kmem_zalloc(sizeof(*sc->sc_q) * sc->sc_nq, KM_SLEEP);
if (sc->sc_q == NULL) {
aprint_error_dev(sc->sc_dev, "unable to allocate io queue\n");
@ -868,8 +882,8 @@ nvme_pt_done(struct nvme_queue *q, struct nvme_ccb *ccb, struct nvme_cqe *cqe)
bus_dmamap_unload(sc->sc_dmat, dmap);
}
pt->cpl.cdw0 = cqe->cdw0;
pt->cpl.flags = cqe->flags & ~NVME_CQE_PHASE;
pt->cpl.cdw0 = lemtoh32(&cqe->cdw0);
pt->cpl.flags = lemtoh16(&cqe->flags) & ~NVME_CQE_PHASE;
}
static int
@ -1270,6 +1284,40 @@ nvme_fill_identify(struct nvme_queue *q, struct nvme_ccb *ccb, void *slot)
htolem32(&sqe->cdw10, 1);
}
static int
nvme_get_number_of_queues(struct nvme_softc *sc, u_int *nqap)
{
struct nvme_pt_command pt;
struct nvme_ccb *ccb;
uint16_t ncqa, nsqa;
int rv;
ccb = nvme_ccb_get(sc->sc_admin_q);
KASSERT(ccb != NULL); /* it's a bug if we don't have spare ccb here */
memset(&pt, 0, sizeof(pt));
pt.cmd.opcode = NVM_ADMIN_GET_FEATURES;
pt.cmd.cdw10 = 7 /*NVME_FEAT_NUMBER_OF_QUEUES*/;
ccb->ccb_done = nvme_pt_done;
ccb->ccb_cookie = &pt;
rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_pt_fill, NVME_TIMO_QOP);
nvme_ccb_put(sc->sc_admin_q, ccb);
if (rv != 0) {
*nqap = 0;
return EIO;
}
ncqa = pt.cpl.cdw0 >> 16;
nsqa = pt.cpl.cdw0 & 0xffff;
*nqap = MIN(ncqa, nsqa) + 1;
return 0;
}
static int
nvme_ccbs_alloc(struct nvme_queue *q, uint16_t nccbs)
{