Revert "hw/block/nvme: add support for sgl bit bucket descriptor"

This reverts commit d97eee64fe.

The emulated controller correctly accounts for not including bit buckets
in the controller-to-host data transfer, however it doesn't correctly
account for the holes for the on-disk data offsets.

Reported-by: Keith Busch <kbusch@kernel.org>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
This commit is contained in:
Klaus Jensen 2022-05-02 07:55:54 +02:00
parent cc9bcee265
commit b9147a3aa1

View File

@ -850,10 +850,6 @@ static uint16_t nvme_map_sgl_data(NvmeCtrl *n, NvmeSg *sg,
uint8_t type = NVME_SGL_TYPE(segment[i].type); uint8_t type = NVME_SGL_TYPE(segment[i].type);
switch (type) { switch (type) {
case NVME_SGL_DESCR_TYPE_BIT_BUCKET:
if (cmd->opcode == NVME_CMD_WRITE) {
continue;
}
case NVME_SGL_DESCR_TYPE_DATA_BLOCK: case NVME_SGL_DESCR_TYPE_DATA_BLOCK:
break; break;
case NVME_SGL_DESCR_TYPE_SEGMENT: case NVME_SGL_DESCR_TYPE_SEGMENT:
@ -886,10 +882,6 @@ static uint16_t nvme_map_sgl_data(NvmeCtrl *n, NvmeSg *sg,
trans_len = MIN(*len, dlen); trans_len = MIN(*len, dlen);
if (type == NVME_SGL_DESCR_TYPE_BIT_BUCKET) {
goto next;
}
addr = le64_to_cpu(segment[i].addr); addr = le64_to_cpu(segment[i].addr);
if (UINT64_MAX - addr < dlen) { if (UINT64_MAX - addr < dlen) {
@ -901,7 +893,6 @@ static uint16_t nvme_map_sgl_data(NvmeCtrl *n, NvmeSg *sg,
return status; return status;
} }
next:
*len -= trans_len; *len -= trans_len;
} }
@ -959,8 +950,7 @@ static uint16_t nvme_map_sgl(NvmeCtrl *n, NvmeSg *sg, NvmeSglDescriptor sgl,
seg_len = le32_to_cpu(sgld->len); seg_len = le32_to_cpu(sgld->len);
/* check the length of the (Last) Segment descriptor */ /* check the length of the (Last) Segment descriptor */
if ((!seg_len || seg_len & 0xf) && if (!seg_len || seg_len & 0xf) {
(NVME_SGL_TYPE(sgld->type) != NVME_SGL_DESCR_TYPE_BIT_BUCKET)) {
return NVME_INVALID_SGL_SEG_DESCR | NVME_DNR; return NVME_INVALID_SGL_SEG_DESCR | NVME_DNR;
} }
@ -998,26 +988,20 @@ static uint16_t nvme_map_sgl(NvmeCtrl *n, NvmeSg *sg, NvmeSglDescriptor sgl,
last_sgld = &segment[nsgld - 1]; last_sgld = &segment[nsgld - 1];
/* /*
* If the segment ends with a Data Block or Bit Bucket Descriptor Type, * If the segment ends with a Data Block, then we are done.
* then we are done.
*/ */
switch (NVME_SGL_TYPE(last_sgld->type)) { if (NVME_SGL_TYPE(last_sgld->type) == NVME_SGL_DESCR_TYPE_DATA_BLOCK) {
case NVME_SGL_DESCR_TYPE_DATA_BLOCK:
case NVME_SGL_DESCR_TYPE_BIT_BUCKET:
status = nvme_map_sgl_data(n, sg, segment, nsgld, &len, cmd); status = nvme_map_sgl_data(n, sg, segment, nsgld, &len, cmd);
if (status) { if (status) {
goto unmap; goto unmap;
} }
goto out; goto out;
default:
break;
} }
/* /*
* If the last descriptor was not a Data Block or Bit Bucket, then the * If the last descriptor was not a Data Block, then the current
* current segment must not be a Last Segment. * segment must not be a Last Segment.
*/ */
if (NVME_SGL_TYPE(sgld->type) == NVME_SGL_DESCR_TYPE_LAST_SEGMENT) { if (NVME_SGL_TYPE(sgld->type) == NVME_SGL_DESCR_TYPE_LAST_SEGMENT) {
status = NVME_INVALID_SGL_SEG_DESCR | NVME_DNR; status = NVME_INVALID_SGL_SEG_DESCR | NVME_DNR;
@ -7286,8 +7270,7 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev)
id->vwc = NVME_VWC_NSID_BROADCAST_SUPPORT | NVME_VWC_PRESENT; id->vwc = NVME_VWC_NSID_BROADCAST_SUPPORT | NVME_VWC_PRESENT;
id->ocfs = cpu_to_le16(NVME_OCFS_COPY_FORMAT_0 | NVME_OCFS_COPY_FORMAT_1); id->ocfs = cpu_to_le16(NVME_OCFS_COPY_FORMAT_0 | NVME_OCFS_COPY_FORMAT_1);
id->sgls = cpu_to_le32(NVME_CTRL_SGLS_SUPPORT_NO_ALIGN | id->sgls = cpu_to_le32(NVME_CTRL_SGLS_SUPPORT_NO_ALIGN);
NVME_CTRL_SGLS_BITBUCKET);
nvme_init_subnqn(n); nvme_init_subnqn(n);