Xen 2016/11/28
-----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJYPId9AAoJEIlPj0hw4a6QHrkQAOKcJBMIu+qnnYbGWaEMJ9a7 yk9L/5Qz13ahiuihzVRbwBQD4QjWjU67R/C/6iVdmirJDWTt7EcRWn4ewleoFuOR jkCozxiRHThuEf+WdquoNxLqHIYpSRoVAhXQCEQLhgDyuq/wK4260V2yb5dVfs1u p7mAXXBWEd+JkaDukZI1lwDPxh5vRXZmn8IyoM2YFxbcRH5Dqynonvr8xkxavKHK f0BfnMhQ3DstmX0IaC+9NpW+75r7uRbZNjZKtSNXk7+IJddNqjlleUxE491JI1Y7 fkBqurvbbtreDn88A7lJc2LJEYP1fHIcHYXu52QDs96bQVTi4mvsgkTH0VSVJjlI sGJtluQFtZJJ2Y5pwRu0UmMekPyJ5/Mmgt+ZJtvOOeZv9G+c+oqlWTjrszusjuMw 0n0wLd8MX2E34Po4Z56ey9E6ciQg+Fr9J6iwW+GVEiySl5J8EQsti74oZuT6YORN qY6jSvyAFGaEad1cEVW5ibPpgdRFzlS5eY2uImGacOStNeoSV5hUVtdXX3CnD8oo 9WzUMuXiQL8fSw6wR0UOgVyCsvlotTW8mBuKeqOSBxcd+zWb5qNsugZmxVnYEGHC FnmkZJZ79xIwGKYbYIrYRcXFJv6Smeiej0wG+2PAg0HK1yTJlQAu0CDFzScfLYot hgUKIWZppgg6rN3yWcMn =3ryE -----END PGP SIGNATURE----- Merge remote-tracking branch 'sstabellini/tags/xen-20161128-tag' into staging Xen 2016/11/28 # gpg: Signature made Mon 28 Nov 2016 07:37:33 PM GMT # gpg: using RSA key 0x894F8F4870E1AE90 # gpg: Good signature from "Stefano Stabellini <sstabellini@kernel.org>" # gpg: aka "Stefano Stabellini <stefano.stabellini@eu.citrix.com>" # Primary key fingerprint: D04E 33AB A51F 67BA 07D3 0AEA 894F 8F48 70E1 AE90 * sstabellini/tags/xen-20161128-tag: xen: ignore direction in bufioreq handling xen: slightly simplify bufioreq handling xen: fix quad word bufioreq handling xen_disk: split discard input to match internal representation Message-id: alpine.DEB.2.10.1611281137590.21858@sstabellini-ThinkPad-X260 Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
commit
5a55760226
@ -660,6 +660,38 @@ static void qemu_aio_complete(void *opaque, int ret)
|
||||
qemu_bh_schedule(ioreq->blkdev->bh);
|
||||
}
|
||||
|
||||
static bool blk_split_discard(struct ioreq *ioreq, blkif_sector_t sector_number,
|
||||
uint64_t nr_sectors)
|
||||
{
|
||||
struct XenBlkDev *blkdev = ioreq->blkdev;
|
||||
int64_t byte_offset;
|
||||
int byte_chunk;
|
||||
uint64_t byte_remaining, limit;
|
||||
uint64_t sec_start = sector_number;
|
||||
uint64_t sec_count = nr_sectors;
|
||||
|
||||
/* Wrap around, or overflowing byte limit? */
|
||||
if (sec_start + sec_count < sec_count ||
|
||||
sec_start + sec_count > INT64_MAX >> BDRV_SECTOR_BITS) {
|
||||
return false;
|
||||
}
|
||||
|
||||
limit = BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS;
|
||||
byte_offset = sec_start << BDRV_SECTOR_BITS;
|
||||
byte_remaining = sec_count << BDRV_SECTOR_BITS;
|
||||
|
||||
do {
|
||||
byte_chunk = byte_remaining > limit ? limit : byte_remaining;
|
||||
ioreq->aio_inflight++;
|
||||
blk_aio_pdiscard(blkdev->blk, byte_offset, byte_chunk,
|
||||
qemu_aio_complete, ioreq);
|
||||
byte_remaining -= byte_chunk;
|
||||
byte_offset += byte_chunk;
|
||||
} while (byte_remaining > 0);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
|
||||
{
|
||||
struct XenBlkDev *blkdev = ioreq->blkdev;
|
||||
@ -708,12 +740,10 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
|
||||
break;
|
||||
case BLKIF_OP_DISCARD:
|
||||
{
|
||||
struct blkif_request_discard *discard_req = (void *)&ioreq->req;
|
||||
ioreq->aio_inflight++;
|
||||
blk_aio_pdiscard(blkdev->blk,
|
||||
discard_req->sector_number << BDRV_SECTOR_BITS,
|
||||
discard_req->nr_sectors << BDRV_SECTOR_BITS,
|
||||
qemu_aio_complete, ioreq);
|
||||
struct blkif_request_discard *req = (void *)&ioreq->req;
|
||||
if (!blk_split_discard(ioreq, req->sector_number, req->nr_sectors)) {
|
||||
goto err;
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
|
22
xen-hvm.c
22
xen-hvm.c
@ -995,6 +995,9 @@ static int handle_buffered_iopage(XenIOState *state)
|
||||
}
|
||||
|
||||
memset(&req, 0x00, sizeof(req));
|
||||
req.state = STATE_IOREQ_READY;
|
||||
req.count = 1;
|
||||
req.dir = IOREQ_WRITE;
|
||||
|
||||
for (;;) {
|
||||
uint32_t rdptr = buf_page->read_pointer, wrptr;
|
||||
@ -1009,18 +1012,16 @@ static int handle_buffered_iopage(XenIOState *state)
|
||||
break;
|
||||
}
|
||||
buf_req = &buf_page->buf_ioreq[rdptr % IOREQ_BUFFER_SLOT_NUM];
|
||||
req.size = 1UL << buf_req->size;
|
||||
req.count = 1;
|
||||
req.size = 1U << buf_req->size;
|
||||
req.addr = buf_req->addr;
|
||||
req.data = buf_req->data;
|
||||
req.state = STATE_IOREQ_READY;
|
||||
req.dir = buf_req->dir;
|
||||
req.df = 1;
|
||||
req.type = buf_req->type;
|
||||
req.data_is_ptr = 0;
|
||||
xen_rmb();
|
||||
qw = (req.size == 8);
|
||||
if (qw) {
|
||||
if (rdptr + 1 == wrptr) {
|
||||
hw_error("Incomplete quad word buffered ioreq");
|
||||
}
|
||||
buf_req = &buf_page->buf_ioreq[(rdptr + 1) %
|
||||
IOREQ_BUFFER_SLOT_NUM];
|
||||
req.data |= ((uint64_t)buf_req->data) << 32;
|
||||
@ -1029,6 +1030,15 @@ static int handle_buffered_iopage(XenIOState *state)
|
||||
|
||||
handle_ioreq(state, &req);
|
||||
|
||||
/* Only req.data may get updated by handle_ioreq(), albeit even that
|
||||
* should not happen as such data would never make it to the guest (we
|
||||
* can only usefully see writes here after all).
|
||||
*/
|
||||
assert(req.state == STATE_IOREQ_READY);
|
||||
assert(req.count == 1);
|
||||
assert(req.dir == IOREQ_WRITE);
|
||||
assert(!req.data_is_ptr);
|
||||
|
||||
atomic_add(&buf_page->read_pointer, qw + 1);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user