hw/block/nvme: refactor dma read/write

Refactor the nvme_dma_{read,write}_prp functions into a common function
taking a DMADirection parameter.

Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
This commit is contained in:
Klaus Jensen 2020-02-23 16:03:34 +01:00
parent f06a6aa964
commit 794ef1b5a7

View File

@ -378,55 +378,53 @@ unmap:
return status;
}
static uint16_t nvme_dma_write_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
uint64_t prp1, uint64_t prp2)
static uint16_t nvme_dma_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
uint64_t prp1, uint64_t prp2, DMADirection dir)
{
QEMUSGList qsg;
QEMUIOVector iov;
uint16_t status = NVME_SUCCESS;
if (nvme_map_prp(&qsg, &iov, prp1, prp2, len, n)) {
return NVME_INVALID_FIELD | NVME_DNR;
status = nvme_map_prp(&qsg, &iov, prp1, prp2, len, n);
if (status) {
return status;
}
/* assert that only one of qsg and iov carries data */
assert((qsg.nsg > 0) != (iov.niov > 0));
if (qsg.nsg > 0) {
if (dma_buf_write(ptr, len, &qsg)) {
status = NVME_INVALID_FIELD | NVME_DNR;
uint64_t residual;
if (dir == DMA_DIRECTION_TO_DEVICE) {
residual = dma_buf_write(ptr, len, &qsg);
} else {
residual = dma_buf_read(ptr, len, &qsg);
}
qemu_sglist_destroy(&qsg);
} else {
if (qemu_iovec_to_buf(&iov, 0, ptr, len) != len) {
status = NVME_INVALID_FIELD | NVME_DNR;
}
qemu_iovec_destroy(&iov);
}
return status;
}
static uint16_t nvme_dma_read_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
uint64_t prp1, uint64_t prp2)
{
QEMUSGList qsg;
QEMUIOVector iov;
uint16_t status = NVME_SUCCESS;
trace_pci_nvme_dma_read(prp1, prp2);
if (nvme_map_prp(&qsg, &iov, prp1, prp2, len, n)) {
return NVME_INVALID_FIELD | NVME_DNR;
}
if (qsg.nsg > 0) {
if (unlikely(dma_buf_read(ptr, len, &qsg))) {
if (unlikely(residual)) {
trace_pci_nvme_err_invalid_dma();
status = NVME_INVALID_FIELD | NVME_DNR;
}
qemu_sglist_destroy(&qsg);
} else {
if (unlikely(qemu_iovec_from_buf(&iov, 0, ptr, len) != len)) {
size_t bytes;
if (dir == DMA_DIRECTION_TO_DEVICE) {
bytes = qemu_iovec_to_buf(&iov, 0, ptr, len);
} else {
bytes = qemu_iovec_from_buf(&iov, 0, ptr, len);
}
if (unlikely(bytes != len)) {
trace_pci_nvme_err_invalid_dma();
status = NVME_INVALID_FIELD | NVME_DNR;
}
qemu_iovec_destroy(&iov);
}
return status;
}
@ -858,8 +856,8 @@ static uint16_t nvme_smart_info(NvmeCtrl *n, NvmeCmd *cmd, uint8_t rae,
nvme_clear_events(n, NVME_AER_TYPE_SMART);
}
return nvme_dma_read_prp(n, (uint8_t *) &smart + off, trans_len, prp1,
prp2);
return nvme_dma_prp(n, (uint8_t *) &smart + off, trans_len, prp1, prp2,
DMA_DIRECTION_FROM_DEVICE);
}
static uint16_t nvme_fw_log_info(NvmeCtrl *n, NvmeCmd *cmd, uint32_t buf_len,
@ -880,8 +878,8 @@ static uint16_t nvme_fw_log_info(NvmeCtrl *n, NvmeCmd *cmd, uint32_t buf_len,
trans_len = MIN(sizeof(fw_log) - off, buf_len);
return nvme_dma_read_prp(n, (uint8_t *) &fw_log + off, trans_len, prp1,
prp2);
return nvme_dma_prp(n, (uint8_t *) &fw_log + off, trans_len, prp1, prp2,
DMA_DIRECTION_FROM_DEVICE);
}
static uint16_t nvme_error_info(NvmeCtrl *n, NvmeCmd *cmd, uint8_t rae,
@ -905,7 +903,8 @@ static uint16_t nvme_error_info(NvmeCtrl *n, NvmeCmd *cmd, uint8_t rae,
trans_len = MIN(sizeof(errlog) - off, buf_len);
return nvme_dma_read_prp(n, (uint8_t *)&errlog, trans_len, prp1, prp2);
return nvme_dma_prp(n, (uint8_t *)&errlog, trans_len, prp1, prp2,
DMA_DIRECTION_FROM_DEVICE);
}
static uint16_t nvme_get_log(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
@ -1060,8 +1059,8 @@ static uint16_t nvme_identify_ctrl(NvmeCtrl *n, NvmeIdentify *c)
trace_pci_nvme_identify_ctrl();
return nvme_dma_read_prp(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl),
prp1, prp2);
return nvme_dma_prp(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl), prp1,
prp2, DMA_DIRECTION_FROM_DEVICE);
}
static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeIdentify *c)
@ -1080,8 +1079,8 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeIdentify *c)
ns = &n->namespaces[nsid - 1];
return nvme_dma_read_prp(n, (uint8_t *)&ns->id_ns, sizeof(ns->id_ns),
prp1, prp2);
return nvme_dma_prp(n, (uint8_t *)&ns->id_ns, sizeof(ns->id_ns), prp1,
prp2, DMA_DIRECTION_FROM_DEVICE);
}
static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeIdentify *c)
@ -1116,7 +1115,8 @@ static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeIdentify *c)
break;
}
}
ret = nvme_dma_read_prp(n, (uint8_t *)list, data_len, prp1, prp2);
ret = nvme_dma_prp(n, (uint8_t *)list, data_len, prp1, prp2,
DMA_DIRECTION_FROM_DEVICE);
g_free(list);
return ret;
}
@ -1157,7 +1157,8 @@ static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeIdentify *c)
ns_descrs->uuid.hdr.nidl = NVME_NIDT_UUID_LEN;
stl_be_p(&ns_descrs->uuid.v, nsid);
return nvme_dma_read_prp(n, list, NVME_IDENTIFY_DATA_SIZE, prp1, prp2);
return nvme_dma_prp(n, list, NVME_IDENTIFY_DATA_SIZE, prp1, prp2,
DMA_DIRECTION_FROM_DEVICE);
}
static uint16_t nvme_identify(NvmeCtrl *n, NvmeCmd *cmd)
@ -1238,8 +1239,8 @@ static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n, NvmeCmd *cmd)
uint64_t timestamp = nvme_get_timestamp(n);
return nvme_dma_read_prp(n, (uint8_t *)&timestamp,
sizeof(timestamp), prp1, prp2);
return nvme_dma_prp(n, (uint8_t *)&timestamp, sizeof(timestamp), prp1,
prp2, DMA_DIRECTION_FROM_DEVICE);
}
static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
@ -1370,8 +1371,8 @@ static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, NvmeCmd *cmd)
uint64_t prp1 = le64_to_cpu(cmd->dptr.prp1);
uint64_t prp2 = le64_to_cpu(cmd->dptr.prp2);
ret = nvme_dma_write_prp(n, (uint8_t *)&timestamp,
sizeof(timestamp), prp1, prp2);
ret = nvme_dma_prp(n, (uint8_t *)&timestamp, sizeof(timestamp), prp1,
prp2, DMA_DIRECTION_TO_DEVICE);
if (ret != NVME_SUCCESS) {
return ret;
}