hw/nvme patches

-----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCgAdFiEEUigzqnXi3OaiR2bATeGvMW1PDekFAmaeiz4ACgkQTeGvMW1P
 Dem5DggAkudAwZYUlKLz/FuxmOJsZ/CKL7iIu6wE3P93WTTbi4m2AL5lMFz1bOUH
 33LtjHz51bDvOsnhAwLs2TwjfhICiMJCOXEmxF9zJnO4Yo8ih9UbeE7sEukpxsVr
 FJlAg5OXhdIHuo48ow7hu7BqMs58jnXhVA6zSvLU5rbKTSdG/369jyQKy5aoFPN0
 Rk+S6hqDmVMiN7u6E+QqPyB2tSbmNKkhPICu3O9fbHmaOoMFmrcvyxkd1wJ9JxwF
 8MWbuEZlIpLIIL/mCN4wzDw8VKlJ26sBJJC1b+NHmWIWmPkqMeXwcmQtWhUqsrcs
 xAGUcjgJuJ3Fu6Xzt+09Y+FXO8v0oQ==
 =vCDb
 -----END PGP SIGNATURE-----

Merge tag 'nvme-next-pull-request' of https://gitlab.com/birkelund/qemu into staging

hw/nvme patches

# -----BEGIN PGP SIGNATURE-----
#
# iQEzBAABCgAdFiEEUigzqnXi3OaiR2bATeGvMW1PDekFAmaeiz4ACgkQTeGvMW1P
# Dem5DggAkudAwZYUlKLz/FuxmOJsZ/CKL7iIu6wE3P93WTTbi4m2AL5lMFz1bOUH
# 33LtjHz51bDvOsnhAwLs2TwjfhICiMJCOXEmxF9zJnO4Yo8ih9UbeE7sEukpxsVr
# FJlAg5OXhdIHuo48ow7hu7BqMs58jnXhVA6zSvLU5rbKTSdG/369jyQKy5aoFPN0
# Rk+S6hqDmVMiN7u6E+QqPyB2tSbmNKkhPICu3O9fbHmaOoMFmrcvyxkd1wJ9JxwF
# 8MWbuEZlIpLIIL/mCN4wzDw8VKlJ26sBJJC1b+NHmWIWmPkqMeXwcmQtWhUqsrcs
# xAGUcjgJuJ3Fu6Xzt+09Y+FXO8v0oQ==
# =vCDb
# -----END PGP SIGNATURE-----
# gpg: Signature made Tue 23 Jul 2024 02:39:26 AM AEST
# gpg:                using RSA key 522833AA75E2DCE6A24766C04DE1AF316D4F0DE9
# gpg: Good signature from "Klaus Jensen <its@irrelevant.dk>" [unknown]
# gpg:                 aka "Klaus Jensen <k.jensen@samsung.com>" [unknown]
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg:          There is no indication that the signature belongs to the owner.
# Primary key fingerprint: DDCA 4D9C 9EF9 31CC 3468  4272 63D5 6FC5 E55D A838
#      Subkey fingerprint: 5228 33AA 75E2 DCE6 A247  66C0 4DE1 AF31 6D4F 0DE9

* tag 'nvme-next-pull-request' of https://gitlab.com/birkelund/qemu:
  hw/nvme: remove useless type cast
  hw/nvme: actually implement abort
  hw/nvme: add cross namespace copy support
  hw/nvme: fix memory leak in nvme_dsm

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2024-07-23 13:55:45 +10:00
commit 6af69d0270
2 changed files with 323 additions and 104 deletions

View File

@ -1758,6 +1758,10 @@ static void nvme_aio_err(NvmeRequest *req, int ret)
break;
}
if (ret == -ECANCELED) {
status = NVME_CMD_ABORT_REQ;
}
trace_pci_nvme_err_aio(nvme_cid(req), strerror(-ret), status);
error_setg_errno(&local_err, -ret, "aio failed");
@ -2591,6 +2595,7 @@ next:
done:
iocb->aiocb = NULL;
iocb->common.cb(iocb->common.opaque, iocb->ret);
g_free(iocb->range);
qemu_aio_unref(iocb);
}
@ -2695,6 +2700,7 @@ typedef struct NvmeCopyAIOCB {
BlockAIOCB common;
BlockAIOCB *aiocb;
NvmeRequest *req;
NvmeCtrl *n;
int ret;
void *ranges;
@ -2713,6 +2719,8 @@ typedef struct NvmeCopyAIOCB {
uint64_t slba;
NvmeZone *zone;
NvmeNamespace *sns;
uint32_t tcl;
} NvmeCopyAIOCB;
static void nvme_copy_cancel(BlockAIOCB *aiocb)
@ -2759,13 +2767,19 @@ static void nvme_copy_done(NvmeCopyAIOCB *iocb)
static void nvme_do_copy(NvmeCopyAIOCB *iocb);
static void nvme_copy_source_range_parse_format0(void *ranges, int idx,
uint64_t *slba, uint32_t *nlb,
uint16_t *apptag,
uint16_t *appmask,
uint64_t *reftag)
static void nvme_copy_source_range_parse_format0_2(void *ranges,
int idx, uint64_t *slba,
uint32_t *nlb,
uint32_t *snsid,
uint16_t *apptag,
uint16_t *appmask,
uint64_t *reftag)
{
NvmeCopySourceRangeFormat0 *_ranges = ranges;
NvmeCopySourceRangeFormat0_2 *_ranges = ranges;
if (snsid) {
*snsid = le32_to_cpu(_ranges[idx].sparams);
}
if (slba) {
*slba = le64_to_cpu(_ranges[idx].slba);
@ -2788,13 +2802,19 @@ static void nvme_copy_source_range_parse_format0(void *ranges, int idx,
}
}
static void nvme_copy_source_range_parse_format1(void *ranges, int idx,
uint64_t *slba, uint32_t *nlb,
uint16_t *apptag,
uint16_t *appmask,
uint64_t *reftag)
static void nvme_copy_source_range_parse_format1_3(void *ranges, int idx,
uint64_t *slba,
uint32_t *nlb,
uint32_t *snsid,
uint16_t *apptag,
uint16_t *appmask,
uint64_t *reftag)
{
NvmeCopySourceRangeFormat1 *_ranges = ranges;
NvmeCopySourceRangeFormat1_3 *_ranges = ranges;
if (snsid) {
*snsid = le32_to_cpu(_ranges[idx].sparams);
}
if (slba) {
*slba = le64_to_cpu(_ranges[idx].slba);
@ -2826,18 +2846,20 @@ static void nvme_copy_source_range_parse_format1(void *ranges, int idx,
static void nvme_copy_source_range_parse(void *ranges, int idx, uint8_t format,
uint64_t *slba, uint32_t *nlb,
uint16_t *apptag, uint16_t *appmask,
uint64_t *reftag)
uint32_t *snsid, uint16_t *apptag,
uint16_t *appmask, uint64_t *reftag)
{
switch (format) {
case NVME_COPY_FORMAT_0:
nvme_copy_source_range_parse_format0(ranges, idx, slba, nlb, apptag,
appmask, reftag);
case NVME_COPY_FORMAT_2:
nvme_copy_source_range_parse_format0_2(ranges, idx, slba, nlb, snsid,
apptag, appmask, reftag);
break;
case NVME_COPY_FORMAT_1:
nvme_copy_source_range_parse_format1(ranges, idx, slba, nlb, apptag,
appmask, reftag);
case NVME_COPY_FORMAT_3:
nvme_copy_source_range_parse_format1_3(ranges, idx, slba, nlb, snsid,
apptag, appmask, reftag);
break;
default:
@ -2853,10 +2875,10 @@ static inline uint16_t nvme_check_copy_mcl(NvmeNamespace *ns,
for (int idx = 0; idx < nr; idx++) {
uint32_t nlb;
nvme_copy_source_range_parse(iocb->ranges, idx, iocb->format, NULL,
&nlb, NULL, NULL, NULL);
&nlb, NULL, NULL, NULL, NULL);
copy_len += nlb;
}
iocb->tcl = copy_len;
if (copy_len > ns->id_ns.mcl) {
return NVME_CMD_SIZE_LIMIT | NVME_DNR;
}
@ -2868,11 +2890,11 @@ static void nvme_copy_out_completed_cb(void *opaque, int ret)
{
NvmeCopyAIOCB *iocb = opaque;
NvmeRequest *req = iocb->req;
NvmeNamespace *ns = req->ns;
NvmeNamespace *dns = req->ns;
uint32_t nlb;
nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, NULL,
&nlb, NULL, NULL, NULL);
&nlb, NULL, NULL, NULL, NULL);
if (ret < 0) {
iocb->ret = ret;
@ -2881,8 +2903,8 @@ static void nvme_copy_out_completed_cb(void *opaque, int ret)
goto out;
}
if (ns->params.zoned) {
nvme_advance_zone_wp(ns, iocb->zone, nlb);
if (dns->params.zoned) {
nvme_advance_zone_wp(dns, iocb->zone, nlb);
}
iocb->idx++;
@ -2895,25 +2917,25 @@ static void nvme_copy_out_cb(void *opaque, int ret)
{
NvmeCopyAIOCB *iocb = opaque;
NvmeRequest *req = iocb->req;
NvmeNamespace *ns = req->ns;
NvmeNamespace *dns = req->ns;
uint32_t nlb;
size_t mlen;
uint8_t *mbounce;
if (ret < 0 || iocb->ret < 0 || !ns->lbaf.ms) {
if (ret < 0 || iocb->ret < 0 || !dns->lbaf.ms) {
goto out;
}
nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, NULL,
&nlb, NULL, NULL, NULL);
&nlb, NULL, NULL, NULL, NULL);
mlen = nvme_m2b(ns, nlb);
mbounce = iocb->bounce + nvme_l2b(ns, nlb);
mlen = nvme_m2b(dns, nlb);
mbounce = iocb->bounce + nvme_l2b(dns, nlb);
qemu_iovec_reset(&iocb->iov);
qemu_iovec_add(&iocb->iov, mbounce, mlen);
iocb->aiocb = blk_aio_pwritev(ns->blkconf.blk, nvme_moff(ns, iocb->slba),
iocb->aiocb = blk_aio_pwritev(dns->blkconf.blk, nvme_moff(dns, iocb->slba),
&iocb->iov, 0, nvme_copy_out_completed_cb,
iocb);
@ -2927,12 +2949,15 @@ static void nvme_copy_in_completed_cb(void *opaque, int ret)
{
NvmeCopyAIOCB *iocb = opaque;
NvmeRequest *req = iocb->req;
NvmeNamespace *ns = req->ns;
NvmeNamespace *sns = iocb->sns;
NvmeNamespace *dns = req->ns;
NvmeCopyCmd *copy = NULL;
uint8_t *mbounce = NULL;
uint32_t nlb;
uint64_t slba;
uint16_t apptag, appmask;
uint64_t reftag;
size_t len;
size_t len, mlen;
uint16_t status;
if (ret < 0) {
@ -2943,43 +2968,51 @@ static void nvme_copy_in_completed_cb(void *opaque, int ret)
}
nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, &slba,
&nlb, &apptag, &appmask, &reftag);
len = nvme_l2b(ns, nlb);
&nlb, NULL, &apptag, &appmask, &reftag);
trace_pci_nvme_copy_out(iocb->slba, nlb);
if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) {
NvmeCopyCmd *copy = (NvmeCopyCmd *)&req->cmd;
len = nvme_l2b(sns, nlb);
if (NVME_ID_NS_DPS_TYPE(sns->id_ns.dps)) {
copy = (NvmeCopyCmd *)&req->cmd;
uint16_t prinfor = ((copy->control[0] >> 4) & 0xf);
uint16_t prinfow = ((copy->control[2] >> 2) & 0xf);
size_t mlen = nvme_m2b(ns, nlb);
uint8_t *mbounce = iocb->bounce + nvme_l2b(ns, nlb);
mlen = nvme_m2b(sns, nlb);
mbounce = iocb->bounce + nvme_l2b(sns, nlb);
status = nvme_dif_mangle_mdata(ns, mbounce, mlen, slba);
status = nvme_dif_mangle_mdata(sns, mbounce, mlen, slba);
if (status) {
goto invalid;
}
status = nvme_dif_check(ns, iocb->bounce, len, mbounce, mlen, prinfor,
status = nvme_dif_check(sns, iocb->bounce, len, mbounce, mlen, prinfor,
slba, apptag, appmask, &reftag);
if (status) {
goto invalid;
}
}
if (NVME_ID_NS_DPS_TYPE(dns->id_ns.dps)) {
copy = (NvmeCopyCmd *)&req->cmd;
uint16_t prinfow = ((copy->control[2] >> 2) & 0xf);
mlen = nvme_m2b(dns, nlb);
mbounce = iocb->bounce + nvme_l2b(dns, nlb);
apptag = le16_to_cpu(copy->apptag);
appmask = le16_to_cpu(copy->appmask);
if (prinfow & NVME_PRINFO_PRACT) {
status = nvme_check_prinfo(ns, prinfow, iocb->slba, iocb->reftag);
status = nvme_check_prinfo(dns, prinfow, iocb->slba, iocb->reftag);
if (status) {
goto invalid;
}
nvme_dif_pract_generate_dif(ns, iocb->bounce, len, mbounce, mlen,
nvme_dif_pract_generate_dif(dns, iocb->bounce, len, mbounce, mlen,
apptag, &iocb->reftag);
} else {
status = nvme_dif_check(ns, iocb->bounce, len, mbounce, mlen,
status = nvme_dif_check(dns, iocb->bounce, len, mbounce, mlen,
prinfow, iocb->slba, apptag, appmask,
&iocb->reftag);
if (status) {
@ -2988,13 +3021,13 @@ static void nvme_copy_in_completed_cb(void *opaque, int ret)
}
}
status = nvme_check_bounds(ns, iocb->slba, nlb);
status = nvme_check_bounds(dns, iocb->slba, nlb);
if (status) {
goto invalid;
}
if (ns->params.zoned) {
status = nvme_check_zone_write(ns, iocb->zone, iocb->slba, nlb);
if (dns->params.zoned) {
status = nvme_check_zone_write(dns, iocb->zone, iocb->slba, nlb);
if (status) {
goto invalid;
}
@ -3007,7 +3040,10 @@ static void nvme_copy_in_completed_cb(void *opaque, int ret)
qemu_iovec_reset(&iocb->iov);
qemu_iovec_add(&iocb->iov, iocb->bounce, len);
iocb->aiocb = blk_aio_pwritev(ns->blkconf.blk, nvme_l2b(ns, iocb->slba),
block_acct_start(blk_get_stats(dns->blkconf.blk), &iocb->acct.write, 0,
BLOCK_ACCT_WRITE);
iocb->aiocb = blk_aio_pwritev(dns->blkconf.blk, nvme_l2b(dns, iocb->slba),
&iocb->iov, 0, nvme_copy_out_cb, iocb);
return;
@ -3022,23 +3058,22 @@ out:
static void nvme_copy_in_cb(void *opaque, int ret)
{
NvmeCopyAIOCB *iocb = opaque;
NvmeRequest *req = iocb->req;
NvmeNamespace *ns = req->ns;
NvmeNamespace *sns = iocb->sns;
uint64_t slba;
uint32_t nlb;
if (ret < 0 || iocb->ret < 0 || !ns->lbaf.ms) {
if (ret < 0 || iocb->ret < 0 || !sns->lbaf.ms) {
goto out;
}
nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, &slba,
&nlb, NULL, NULL, NULL);
&nlb, NULL, NULL, NULL, NULL);
qemu_iovec_reset(&iocb->iov);
qemu_iovec_add(&iocb->iov, iocb->bounce + nvme_l2b(ns, nlb),
nvme_m2b(ns, nlb));
qemu_iovec_add(&iocb->iov, iocb->bounce + nvme_l2b(sns, nlb),
nvme_m2b(sns, nlb));
iocb->aiocb = blk_aio_preadv(ns->blkconf.blk, nvme_moff(ns, slba),
iocb->aiocb = blk_aio_preadv(sns->blkconf.blk, nvme_moff(sns, slba),
&iocb->iov, 0, nvme_copy_in_completed_cb,
iocb);
return;
@ -3047,14 +3082,78 @@ out:
nvme_copy_in_completed_cb(iocb, ret);
}
static inline bool nvme_csi_supports_copy(uint8_t csi)
{
return csi == NVME_CSI_NVM || csi == NVME_CSI_ZONED;
}
static inline bool nvme_copy_ns_format_match(NvmeNamespace *sns,
NvmeNamespace *dns)
{
return sns->lbaf.ds == dns->lbaf.ds && sns->lbaf.ms == dns->lbaf.ms;
}
static bool nvme_copy_matching_ns_format(NvmeNamespace *sns, NvmeNamespace *dns,
bool pi_enable)
{
if (!nvme_csi_supports_copy(sns->csi) ||
!nvme_csi_supports_copy(dns->csi)) {
return false;
}
if (!pi_enable && !nvme_copy_ns_format_match(sns, dns)) {
return false;
}
if (pi_enable && (!nvme_copy_ns_format_match(sns, dns) ||
sns->id_ns.dps != dns->id_ns.dps)) {
return false;
}
return true;
}
static inline bool nvme_copy_corresp_pi_match(NvmeNamespace *sns,
NvmeNamespace *dns)
{
return sns->lbaf.ms == 0 &&
((dns->lbaf.ms == 8 && dns->pif == 0) ||
(dns->lbaf.ms == 16 && dns->pif == 1));
}
static bool nvme_copy_corresp_pi_format(NvmeNamespace *sns, NvmeNamespace *dns,
bool sns_pi_en)
{
if (!nvme_csi_supports_copy(sns->csi) ||
!nvme_csi_supports_copy(dns->csi)) {
return false;
}
if (!sns_pi_en && !nvme_copy_corresp_pi_match(sns, dns)) {
return false;
}
if (sns_pi_en && !nvme_copy_corresp_pi_match(dns, sns)) {
return false;
}
return true;
}
static void nvme_do_copy(NvmeCopyAIOCB *iocb)
{
NvmeRequest *req = iocb->req;
NvmeNamespace *ns = req->ns;
NvmeNamespace *sns;
NvmeNamespace *dns = req->ns;
NvmeCopyCmd *copy = (NvmeCopyCmd *)&req->cmd;
uint16_t prinfor = ((copy->control[0] >> 4) & 0xf);
uint16_t prinfow = ((copy->control[2] >> 2) & 0xf);
uint64_t slba;
uint32_t nlb;
size_t len;
uint16_t status;
uint32_t dnsid = le32_to_cpu(req->cmd.nsid);
uint32_t snsid = dnsid;
if (iocb->ret < 0) {
goto done;
@ -3064,40 +3163,124 @@ static void nvme_do_copy(NvmeCopyAIOCB *iocb)
goto done;
}
nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, &slba,
&nlb, NULL, NULL, NULL);
len = nvme_l2b(ns, nlb);
if (iocb->format == 2 || iocb->format == 3) {
nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format,
&slba, &nlb, &snsid, NULL, NULL, NULL);
if (snsid != dnsid) {
if (snsid == NVME_NSID_BROADCAST ||
!nvme_nsid_valid(iocb->n, snsid)) {
status = NVME_INVALID_NSID | NVME_DNR;
goto invalid;
}
iocb->sns = nvme_ns(iocb->n, snsid);
if (unlikely(!iocb->sns)) {
status = NVME_INVALID_FIELD | NVME_DNR;
goto invalid;
}
} else {
if (((slba + nlb) > iocb->slba) &&
((slba + nlb) < (iocb->slba + iocb->tcl))) {
status = NVME_CMD_OVERLAP_IO_RANGE | NVME_DNR;
goto invalid;
}
}
} else {
nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format,
&slba, &nlb, NULL, NULL, NULL, NULL);
}
sns = iocb->sns;
if ((snsid == dnsid) && NVME_ID_NS_DPS_TYPE(sns->id_ns.dps) &&
((prinfor & NVME_PRINFO_PRACT) != (prinfow & NVME_PRINFO_PRACT))) {
status = NVME_INVALID_FIELD | NVME_DNR;
goto invalid;
} else if (snsid != dnsid) {
if (!NVME_ID_NS_DPS_TYPE(sns->id_ns.dps) &&
!NVME_ID_NS_DPS_TYPE(dns->id_ns.dps)) {
if (!nvme_copy_matching_ns_format(sns, dns, false)) {
status = NVME_CMD_INCOMP_NS_OR_FMT | NVME_DNR;
goto invalid;
}
}
if (NVME_ID_NS_DPS_TYPE(sns->id_ns.dps) &&
NVME_ID_NS_DPS_TYPE(dns->id_ns.dps)) {
if ((prinfor & NVME_PRINFO_PRACT) !=
(prinfow & NVME_PRINFO_PRACT)) {
status = NVME_CMD_INCOMP_NS_OR_FMT | NVME_DNR;
goto invalid;
} else {
if (!nvme_copy_matching_ns_format(sns, dns, true)) {
status = NVME_CMD_INCOMP_NS_OR_FMT | NVME_DNR;
goto invalid;
}
}
}
if (!NVME_ID_NS_DPS_TYPE(sns->id_ns.dps) &&
NVME_ID_NS_DPS_TYPE(dns->id_ns.dps)) {
if (!(prinfow & NVME_PRINFO_PRACT)) {
status = NVME_CMD_INCOMP_NS_OR_FMT | NVME_DNR;
goto invalid;
} else {
if (!nvme_copy_corresp_pi_format(sns, dns, false)) {
status = NVME_CMD_INCOMP_NS_OR_FMT | NVME_DNR;
goto invalid;
}
}
}
if (NVME_ID_NS_DPS_TYPE(sns->id_ns.dps) &&
!NVME_ID_NS_DPS_TYPE(dns->id_ns.dps)) {
if (!(prinfor & NVME_PRINFO_PRACT)) {
status = NVME_CMD_INCOMP_NS_OR_FMT | NVME_DNR;
goto invalid;
} else {
if (!nvme_copy_corresp_pi_format(sns, dns, true)) {
status = NVME_CMD_INCOMP_NS_OR_FMT | NVME_DNR;
goto invalid;
}
}
}
}
len = nvme_l2b(sns, nlb);
trace_pci_nvme_copy_source_range(slba, nlb);
if (nlb > le16_to_cpu(ns->id_ns.mssrl)) {
if (nlb > le16_to_cpu(sns->id_ns.mssrl)) {
status = NVME_CMD_SIZE_LIMIT | NVME_DNR;
goto invalid;
}
status = nvme_check_bounds(ns, slba, nlb);
status = nvme_check_bounds(sns, slba, nlb);
if (status) {
goto invalid;
}
if (NVME_ERR_REC_DULBE(ns->features.err_rec)) {
status = nvme_check_dulbe(ns, slba, nlb);
if (NVME_ERR_REC_DULBE(sns->features.err_rec)) {
status = nvme_check_dulbe(sns, slba, nlb);
if (status) {
goto invalid;
}
}
if (ns->params.zoned) {
status = nvme_check_zone_read(ns, slba, nlb);
if (sns->params.zoned) {
status = nvme_check_zone_read(sns, slba, nlb);
if (status) {
goto invalid;
}
}
g_free(iocb->bounce);
iocb->bounce = g_malloc_n(le16_to_cpu(sns->id_ns.mssrl),
sns->lbasz + sns->lbaf.ms);
qemu_iovec_reset(&iocb->iov);
qemu_iovec_add(&iocb->iov, iocb->bounce, len);
iocb->aiocb = blk_aio_preadv(ns->blkconf.blk, nvme_l2b(ns, slba),
block_acct_start(blk_get_stats(sns->blkconf.blk), &iocb->acct.read, 0,
BLOCK_ACCT_READ);
iocb->aiocb = blk_aio_preadv(sns->blkconf.blk, nvme_l2b(sns, slba),
&iocb->iov, 0, nvme_copy_in_cb, iocb);
return;
@ -3116,9 +3299,7 @@ static uint16_t nvme_copy(NvmeCtrl *n, NvmeRequest *req)
nvme_misc_cb, req);
uint16_t nr = copy->nr + 1;
uint8_t format = copy->control[0] & 0xf;
uint16_t prinfor = ((copy->control[0] >> 4) & 0xf);
uint16_t prinfow = ((copy->control[2] >> 2) & 0xf);
size_t len = sizeof(NvmeCopySourceRangeFormat0);
size_t len = sizeof(NvmeCopySourceRangeFormat0_2);
uint16_t status;
@ -3127,13 +3308,9 @@ static uint16_t nvme_copy(NvmeCtrl *n, NvmeRequest *req)
iocb->ranges = NULL;
iocb->zone = NULL;
if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps) &&
((prinfor & NVME_PRINFO_PRACT) != (prinfow & NVME_PRINFO_PRACT))) {
status = NVME_INVALID_FIELD | NVME_DNR;
goto invalid;
}
if (!(n->id_ctrl.ocfs & (1 << format))) {
if (!(n->id_ctrl.ocfs & (1 << format)) ||
((format == 2 || format == 3) &&
!(n->features.hbs.cdfe & (1 << format)))) {
trace_pci_nvme_err_copy_invalid_format(format);
status = NVME_INVALID_FIELD | NVME_DNR;
goto invalid;
@ -3144,14 +3321,14 @@ static uint16_t nvme_copy(NvmeCtrl *n, NvmeRequest *req)
goto invalid;
}
if ((ns->pif == 0x0 && format != 0x0) ||
(ns->pif != 0x0 && format != 0x1)) {
if ((ns->pif == 0x0 && (format != 0x0 && format != 0x2)) ||
(ns->pif != 0x0 && (format != 0x1 && format != 0x3))) {
status = NVME_INVALID_FORMAT | NVME_DNR;
goto invalid;
}
if (ns->pif) {
len = sizeof(NvmeCopySourceRangeFormat1);
len = sizeof(NvmeCopySourceRangeFormat1_3);
}
iocb->format = format;
@ -3187,17 +3364,13 @@ static uint16_t nvme_copy(NvmeCtrl *n, NvmeRequest *req)
iocb->idx = 0;
iocb->reftag = le32_to_cpu(copy->reftag);
iocb->reftag |= (uint64_t)le32_to_cpu(copy->cdw3) << 32;
iocb->bounce = g_malloc_n(le16_to_cpu(ns->id_ns.mssrl),
ns->lbasz + ns->lbaf.ms);
qemu_iovec_init(&iocb->iov, 1);
block_acct_start(blk_get_stats(ns->blkconf.blk), &iocb->acct.read, 0,
BLOCK_ACCT_READ);
block_acct_start(blk_get_stats(ns->blkconf.blk), &iocb->acct.write, 0,
BLOCK_ACCT_WRITE);
req->aiocb = &iocb->common;
iocb->sns = req->ns;
iocb->n = n;
iocb->bounce = NULL;
nvme_do_copy(iocb);
return NVME_NO_COMPLETE;
@ -4167,7 +4340,7 @@ static bool nvme_zone_matches_filter(uint32_t zafs, NvmeZone *zl)
static uint16_t nvme_zone_mgmt_recv(NvmeCtrl *n, NvmeRequest *req)
{
NvmeCmd *cmd = (NvmeCmd *)&req->cmd;
NvmeCmd *cmd = &req->cmd;
NvmeNamespace *ns = req->ns;
/* cdw12 is zero-based number of dwords to return. Convert to bytes */
uint32_t data_size = (le32_to_cpu(cmd->cdw12) + 1) << 2;
@ -4406,10 +4579,6 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req)
trace_pci_nvme_io_cmd(nvme_cid(req), nsid, nvme_sqid(req),
req->cmd.opcode, nvme_io_opc_str(req->cmd.opcode));
if (!nvme_nsid_valid(n, nsid)) {
return NVME_INVALID_NSID | NVME_DNR;
}
/*
* In the base NVM command set, Flush may apply to all namespaces
* (indicated by NSID being set to FFFFFFFFh). But if that feature is used
@ -4429,10 +4598,15 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req)
* device only supports namespace types that includes the NVM Flush command
* (NVM and Zoned), so always do an NVM Flush.
*/
if (req->cmd.opcode == NVME_CMD_FLUSH) {
return nvme_flush(n, req);
}
if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) {
return NVME_INVALID_NSID | NVME_DNR;
}
ns = nvme_ns(n, nsid);
if (unlikely(!ns)) {
return NVME_INVALID_FIELD | NVME_DNR;
@ -5780,12 +5954,40 @@ static uint16_t nvme_identify(NvmeCtrl *n, NvmeRequest *req)
static uint16_t nvme_abort(NvmeCtrl *n, NvmeRequest *req)
{
uint16_t sqid = le32_to_cpu(req->cmd.cdw10) & 0xffff;
uint16_t cid = (le32_to_cpu(req->cmd.cdw10) >> 16) & 0xffff;
NvmeSQueue *sq = n->sq[sqid];
NvmeRequest *r, *next;
int i;
req->cqe.result = 1;
if (nvme_check_sqid(n, sqid)) {
return NVME_INVALID_FIELD | NVME_DNR;
}
if (sqid == 0) {
for (i = 0; i < n->outstanding_aers; i++) {
NvmeRequest *re = n->aer_reqs[i];
if (re->cqe.cid == cid) {
memmove(n->aer_reqs + i, n->aer_reqs + i + 1,
(n->outstanding_aers - i - 1) * sizeof(NvmeRequest *));
n->outstanding_aers--;
re->status = NVME_CMD_ABORT_REQ;
req->cqe.result = 0;
nvme_enqueue_req_completion(&n->admin_cq, re);
return NVME_SUCCESS;
}
}
}
QTAILQ_FOREACH_SAFE(r, &sq->out_req_list, entry, next) {
if (r->cqe.cid == cid) {
if (r->aiocb) {
blk_aio_cancel_async(r->aiocb);
}
break;
}
}
return NVME_SUCCESS;
}
@ -8287,7 +8489,8 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev)
id->nn = cpu_to_le32(NVME_MAX_NAMESPACES);
id->oncs = cpu_to_le16(NVME_ONCS_WRITE_ZEROES | NVME_ONCS_TIMESTAMP |
NVME_ONCS_FEATURES | NVME_ONCS_DSM |
NVME_ONCS_COMPARE | NVME_ONCS_COPY);
NVME_ONCS_COMPARE | NVME_ONCS_COPY |
NVME_ONCS_NVMCSA | NVME_ONCS_NVMAFC);
/*
* NOTE: If this device ever supports a command set that does NOT use 0x0
@ -8298,7 +8501,8 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev)
*/
id->vwc = NVME_VWC_NSID_BROADCAST_SUPPORT | NVME_VWC_PRESENT;
id->ocfs = cpu_to_le16(NVME_OCFS_COPY_FORMAT_0 | NVME_OCFS_COPY_FORMAT_1);
id->ocfs = cpu_to_le16(NVME_OCFS_COPY_FORMAT_0 | NVME_OCFS_COPY_FORMAT_1 |
NVME_OCFS_COPY_FORMAT_2 | NVME_OCFS_COPY_FORMAT_3);
id->sgls = cpu_to_le32(NVME_CTRL_SGLS_SUPPORT_NO_ALIGN);
nvme_init_subnqn(n);

View File

@ -799,6 +799,8 @@ typedef struct QEMU_PACKED NvmeDsmRange {
enum {
NVME_COPY_FORMAT_0 = 0x0,
NVME_COPY_FORMAT_1 = 0x1,
NVME_COPY_FORMAT_2 = 0x2,
NVME_COPY_FORMAT_3 = 0x3,
};
typedef struct QEMU_PACKED NvmeCopyCmd {
@ -820,25 +822,30 @@ typedef struct QEMU_PACKED NvmeCopyCmd {
uint16_t appmask;
} NvmeCopyCmd;
typedef struct QEMU_PACKED NvmeCopySourceRangeFormat0 {
uint8_t rsvd0[8];
typedef struct QEMU_PACKED NvmeCopySourceRangeFormat0_2 {
uint32_t sparams;
uint8_t rsvd4[4];
uint64_t slba;
uint16_t nlb;
uint8_t rsvd18[6];
uint8_t rsvd18[4];
uint16_t sopt;
uint32_t reftag;
uint16_t apptag;
uint16_t appmask;
} NvmeCopySourceRangeFormat0;
} NvmeCopySourceRangeFormat0_2;
typedef struct QEMU_PACKED NvmeCopySourceRangeFormat1 {
uint8_t rsvd0[8];
typedef struct QEMU_PACKED NvmeCopySourceRangeFormat1_3 {
uint32_t sparams;
uint8_t rsvd4[4];
uint64_t slba;
uint16_t nlb;
uint8_t rsvd18[8];
uint8_t rsvd18[4];
uint16_t sopt;
uint8_t rsvd24[2];
uint8_t sr[10];
uint16_t apptag;
uint16_t appmask;
} NvmeCopySourceRangeFormat1;
} NvmeCopySourceRangeFormat1_3;
enum NvmeAsyncEventRequest {
NVME_AER_TYPE_ERROR = 0,
@ -937,6 +944,8 @@ enum NvmeStatusCodes {
NVME_INVALID_PROT_INFO = 0x0181,
NVME_WRITE_TO_RO = 0x0182,
NVME_CMD_SIZE_LIMIT = 0x0183,
NVME_CMD_INCOMP_NS_OR_FMT = 0x0185,
NVME_CMD_OVERLAP_IO_RANGE = 0x0187,
NVME_INVALID_ZONE_OP = 0x01b6,
NVME_NOZRWA = 0x01b7,
NVME_ZONE_BOUNDARY_ERROR = 0x01b8,
@ -1195,11 +1204,15 @@ enum NvmeIdCtrlOncs {
NVME_ONCS_TIMESTAMP = 1 << 6,
NVME_ONCS_VERIFY = 1 << 7,
NVME_ONCS_COPY = 1 << 8,
NVME_ONCS_NVMCSA = 1 << 9,
NVME_ONCS_NVMAFC = 1 << 10,
};
enum NvmeIdCtrlOcfs {
NVME_OCFS_COPY_FORMAT_0 = 1 << NVME_COPY_FORMAT_0,
NVME_OCFS_COPY_FORMAT_1 = 1 << NVME_COPY_FORMAT_1,
NVME_OCFS_COPY_FORMAT_2 = 1 << NVME_COPY_FORMAT_2,
NVME_OCFS_COPY_FORMAT_3 = 1 << NVME_COPY_FORMAT_3,
};
enum NvmeIdctrlVwc {
@ -1333,7 +1346,9 @@ typedef struct NvmeHostBehaviorSupport {
uint8_t acre;
uint8_t etdas;
uint8_t lbafee;
uint8_t rsvd3[509];
uint8_t rsvd3;
uint16_t cdfe;
uint8_t rsvd6[506];
} NvmeHostBehaviorSupport;
typedef struct QEMU_PACKED NvmeLBAF {
@ -1833,8 +1848,8 @@ static inline void _nvme_check_size(void)
QEMU_BUILD_BUG_ON(sizeof(NvmeZonedResult) != 8);
QEMU_BUILD_BUG_ON(sizeof(NvmeCqe) != 16);
QEMU_BUILD_BUG_ON(sizeof(NvmeDsmRange) != 16);
QEMU_BUILD_BUG_ON(sizeof(NvmeCopySourceRangeFormat0) != 32);
QEMU_BUILD_BUG_ON(sizeof(NvmeCopySourceRangeFormat1) != 40);
QEMU_BUILD_BUG_ON(sizeof(NvmeCopySourceRangeFormat0_2) != 32);
QEMU_BUILD_BUG_ON(sizeof(NvmeCopySourceRangeFormat1_3) != 40);
QEMU_BUILD_BUG_ON(sizeof(NvmeCmd) != 64);
QEMU_BUILD_BUG_ON(sizeof(NvmeDeleteQ) != 64);
QEMU_BUILD_BUG_ON(sizeof(NvmeCreateCq) != 64);