2009-04-22 19:19:35 +04:00
|
|
|
/*
|
|
|
|
* xen paravirt network card backend
|
|
|
|
*
|
|
|
|
* (c) Gerd Hoffmann <kraxel@redhat.com>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; under version 2 of the License.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along
|
2009-07-17 00:47:01 +04:00
|
|
|
* with this program; if not, see <http://www.gnu.org/licenses/>.
|
2012-01-13 20:44:23 +04:00
|
|
|
*
|
|
|
|
* Contributions after 2012-01-13 are licensed under the terms of the
|
|
|
|
* GNU GPL, version 2 or (at your option) any later version.
|
2009-04-22 19:19:35 +04:00
|
|
|
*/
|
|
|
|
|
2016-01-26 21:17:06 +03:00
|
|
|
#include "qemu/osdep.h"
|
2009-04-22 19:19:35 +04:00
|
|
|
#include <sys/socket.h>
|
|
|
|
#include <sys/ioctl.h>
|
|
|
|
#include <sys/wait.h>
|
|
|
|
|
2013-02-04 18:40:22 +04:00
|
|
|
#include "hw/hw.h"
|
2012-10-24 10:43:34 +04:00
|
|
|
#include "net/net.h"
|
2009-10-22 20:49:03 +04:00
|
|
|
#include "net/checksum.h"
|
2009-11-25 21:49:28 +03:00
|
|
|
#include "net/util.h"
|
2019-01-08 17:48:46 +03:00
|
|
|
#include "hw/xen/xen-legacy-backend.h"
|
2009-04-22 19:19:35 +04:00
|
|
|
|
2012-06-21 15:43:59 +04:00
|
|
|
#include <xen/io/netif.h>
|
|
|
|
|
2009-04-22 19:19:35 +04:00
|
|
|
/* ------------------------------------------------------------- */
|
|
|
|
|
|
|
|
struct XenNetDev {
|
2019-01-08 17:48:46 +03:00
|
|
|
struct XenLegacyDevice xendev; /* must be first */
|
2009-04-22 19:19:35 +04:00
|
|
|
char *mac;
|
|
|
|
int tx_work;
|
|
|
|
int tx_ring_ref;
|
|
|
|
int rx_ring_ref;
|
|
|
|
struct netif_tx_sring *txs;
|
|
|
|
struct netif_rx_sring *rxs;
|
|
|
|
netif_tx_back_ring_t tx_ring;
|
|
|
|
netif_rx_back_ring_t rx_ring;
|
2009-11-25 21:49:28 +03:00
|
|
|
NICConf conf;
|
|
|
|
NICState *nic;
|
2009-04-22 19:19:35 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
/* ------------------------------------------------------------- */
|
|
|
|
|
|
|
|
static void net_tx_response(struct XenNetDev *netdev, netif_tx_request_t *txp, int8_t st)
|
|
|
|
{
|
|
|
|
RING_IDX i = netdev->tx_ring.rsp_prod_pvt;
|
|
|
|
netif_tx_response_t *resp;
|
|
|
|
int notify;
|
|
|
|
|
|
|
|
resp = RING_GET_RESPONSE(&netdev->tx_ring, i);
|
|
|
|
resp->id = txp->id;
|
|
|
|
resp->status = st;
|
|
|
|
|
|
|
|
#if 0
|
2010-09-23 15:28:45 +04:00
|
|
|
if (txp->flags & NETTXF_extra_info) {
|
|
|
|
RING_GET_RESPONSE(&netdev->tx_ring, ++i)->status = NETIF_RSP_NULL;
|
|
|
|
}
|
2009-04-22 19:19:35 +04:00
|
|
|
#endif
|
|
|
|
|
|
|
|
netdev->tx_ring.rsp_prod_pvt = ++i;
|
|
|
|
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netdev->tx_ring, notify);
|
2010-09-23 15:28:45 +04:00
|
|
|
if (notify) {
|
2016-10-25 08:50:16 +03:00
|
|
|
xen_pv_send_notify(&netdev->xendev);
|
2010-09-23 15:28:45 +04:00
|
|
|
}
|
2009-04-22 19:19:35 +04:00
|
|
|
|
|
|
|
if (i == netdev->tx_ring.req_cons) {
|
2010-09-23 15:28:45 +04:00
|
|
|
int more_to_do;
|
|
|
|
RING_FINAL_CHECK_FOR_REQUESTS(&netdev->tx_ring, more_to_do);
|
|
|
|
if (more_to_do) {
|
|
|
|
netdev->tx_work++;
|
|
|
|
}
|
2009-04-22 19:19:35 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void net_tx_error(struct XenNetDev *netdev, netif_tx_request_t *txp, RING_IDX end)
|
|
|
|
{
|
|
|
|
#if 0
|
|
|
|
/*
|
|
|
|
* Hmm, why netback fails everything in the ring?
|
|
|
|
* Should we do that even when not supporting SG and TSO?
|
|
|
|
*/
|
|
|
|
RING_IDX cons = netdev->tx_ring.req_cons;
|
|
|
|
|
|
|
|
do {
|
2010-09-23 15:28:45 +04:00
|
|
|
make_tx_response(netif, txp, NETIF_RSP_ERROR);
|
|
|
|
if (cons >= end) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
txp = RING_GET_REQUEST(&netdev->tx_ring, cons++);
|
2009-04-22 19:19:35 +04:00
|
|
|
} while (1);
|
|
|
|
netdev->tx_ring.req_cons = cons;
|
|
|
|
netif_schedule_work(netif);
|
|
|
|
netif_put(netif);
|
|
|
|
#else
|
|
|
|
net_tx_response(netdev, txp, NETIF_RSP_ERROR);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static void net_tx_packets(struct XenNetDev *netdev)
|
|
|
|
{
|
|
|
|
netif_tx_request_t txreq;
|
|
|
|
RING_IDX rc, rp;
|
|
|
|
void *page;
|
|
|
|
void *tmpbuf = NULL;
|
|
|
|
|
|
|
|
for (;;) {
|
2010-09-23 15:28:45 +04:00
|
|
|
rc = netdev->tx_ring.req_cons;
|
|
|
|
rp = netdev->tx_ring.sring->req_prod;
|
|
|
|
xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
|
2009-04-22 19:19:35 +04:00
|
|
|
|
2010-09-23 15:28:45 +04:00
|
|
|
while ((rc != rp)) {
|
|
|
|
if (RING_REQUEST_CONS_OVERFLOW(&netdev->tx_ring, rc)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
memcpy(&txreq, RING_GET_REQUEST(&netdev->tx_ring, rc), sizeof(txreq));
|
|
|
|
netdev->tx_ring.req_cons = ++rc;
|
2009-04-22 19:19:35 +04:00
|
|
|
|
|
|
|
#if 1
|
2010-09-23 15:28:45 +04:00
|
|
|
/* should not happen in theory, we don't announce the *
|
|
|
|
* feature-{sg,gso,whatelse} flags in xenstore (yet?) */
|
|
|
|
if (txreq.flags & NETTXF_extra_info) {
|
2016-10-25 08:50:14 +03:00
|
|
|
xen_pv_printf(&netdev->xendev, 0, "FIXME: extra info flag\n");
|
2010-09-23 15:28:45 +04:00
|
|
|
net_tx_error(netdev, &txreq, rc);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (txreq.flags & NETTXF_more_data) {
|
2016-10-25 08:50:14 +03:00
|
|
|
xen_pv_printf(&netdev->xendev, 0, "FIXME: more data flag\n");
|
2010-09-23 15:28:45 +04:00
|
|
|
net_tx_error(netdev, &txreq, rc);
|
|
|
|
continue;
|
|
|
|
}
|
2009-04-22 19:19:35 +04:00
|
|
|
#endif
|
|
|
|
|
2010-09-23 15:28:45 +04:00
|
|
|
if (txreq.size < 14) {
|
2016-10-25 08:50:14 +03:00
|
|
|
xen_pv_printf(&netdev->xendev, 0, "bad packet size: %d\n",
|
2016-10-25 08:50:08 +03:00
|
|
|
txreq.size);
|
2010-09-23 15:28:45 +04:00
|
|
|
net_tx_error(netdev, &txreq, rc);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((txreq.offset + txreq.size) > XC_PAGE_SIZE) {
|
2016-10-25 08:50:14 +03:00
|
|
|
xen_pv_printf(&netdev->xendev, 0, "error: page crossing\n");
|
2010-09-23 15:28:45 +04:00
|
|
|
net_tx_error(netdev, &txreq, rc);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2016-10-25 08:50:14 +03:00
|
|
|
xen_pv_printf(&netdev->xendev, 3,
|
2016-10-25 08:50:07 +03:00
|
|
|
"tx packet ref %d, off %d, len %d, flags 0x%x%s%s%s%s\n",
|
2010-09-23 15:28:45 +04:00
|
|
|
txreq.gref, txreq.offset, txreq.size, txreq.flags,
|
|
|
|
(txreq.flags & NETTXF_csum_blank) ? " csum_blank" : "",
|
|
|
|
(txreq.flags & NETTXF_data_validated) ? " data_validated" : "",
|
|
|
|
(txreq.flags & NETTXF_more_data) ? " more_data" : "",
|
|
|
|
(txreq.flags & NETTXF_extra_info) ? " extra_info" : "");
|
|
|
|
|
2018-05-17 18:35:53 +03:00
|
|
|
page = xen_be_map_grant_ref(&netdev->xendev, txreq.gref,
|
|
|
|
PROT_READ);
|
2010-09-23 15:28:45 +04:00
|
|
|
if (page == NULL) {
|
2016-10-25 08:50:14 +03:00
|
|
|
xen_pv_printf(&netdev->xendev, 0,
|
2016-10-25 08:50:07 +03:00
|
|
|
"error: tx gref dereference failed (%d)\n",
|
|
|
|
txreq.gref);
|
2010-09-23 15:28:45 +04:00
|
|
|
net_tx_error(netdev, &txreq, rc);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (txreq.flags & NETTXF_csum_blank) {
|
2009-04-22 19:19:35 +04:00
|
|
|
/* have read-only mapping -> can't fill checksum in-place */
|
2010-09-23 15:28:45 +04:00
|
|
|
if (!tmpbuf) {
|
2011-08-21 07:09:37 +04:00
|
|
|
tmpbuf = g_malloc(XC_PAGE_SIZE);
|
2010-09-23 15:28:45 +04:00
|
|
|
}
|
2009-04-22 19:19:35 +04:00
|
|
|
memcpy(tmpbuf, page + txreq.offset, txreq.size);
|
2010-09-23 15:28:45 +04:00
|
|
|
net_checksum_calculate(tmpbuf, txreq.size);
|
2013-01-30 15:12:22 +04:00
|
|
|
qemu_send_packet(qemu_get_queue(netdev->nic), tmpbuf,
|
|
|
|
txreq.size);
|
2009-04-22 19:19:35 +04:00
|
|
|
} else {
|
2013-01-30 15:12:22 +04:00
|
|
|
qemu_send_packet(qemu_get_queue(netdev->nic),
|
|
|
|
page + txreq.offset, txreq.size);
|
2009-04-22 19:19:35 +04:00
|
|
|
}
|
2018-05-17 18:35:53 +03:00
|
|
|
xen_be_unmap_grant_ref(&netdev->xendev, page);
|
2010-09-23 15:28:45 +04:00
|
|
|
net_tx_response(netdev, &txreq, NETIF_RSP_OKAY);
|
|
|
|
}
|
|
|
|
if (!netdev->tx_work) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
netdev->tx_work = 0;
|
2009-04-22 19:19:35 +04:00
|
|
|
}
|
2011-08-21 07:09:37 +04:00
|
|
|
g_free(tmpbuf);
|
2009-04-22 19:19:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ------------------------------------------------------------- */
|
|
|
|
|
|
|
|
static void net_rx_response(struct XenNetDev *netdev,
|
2010-09-23 15:28:45 +04:00
|
|
|
netif_rx_request_t *req, int8_t st,
|
|
|
|
uint16_t offset, uint16_t size,
|
|
|
|
uint16_t flags)
|
2009-04-22 19:19:35 +04:00
|
|
|
{
|
|
|
|
RING_IDX i = netdev->rx_ring.rsp_prod_pvt;
|
|
|
|
netif_rx_response_t *resp;
|
|
|
|
int notify;
|
|
|
|
|
|
|
|
resp = RING_GET_RESPONSE(&netdev->rx_ring, i);
|
|
|
|
resp->offset = offset;
|
|
|
|
resp->flags = flags;
|
|
|
|
resp->id = req->id;
|
|
|
|
resp->status = (int16_t)size;
|
2010-09-23 15:28:45 +04:00
|
|
|
if (st < 0) {
|
|
|
|
resp->status = (int16_t)st;
|
|
|
|
}
|
2009-04-22 19:19:35 +04:00
|
|
|
|
2016-10-25 08:50:14 +03:00
|
|
|
xen_pv_printf(&netdev->xendev, 3,
|
2016-10-25 08:50:08 +03:00
|
|
|
"rx response: idx %d, status %d, flags 0x%x\n",
|
2010-09-23 15:28:45 +04:00
|
|
|
i, resp->status, resp->flags);
|
2009-04-22 19:19:35 +04:00
|
|
|
|
|
|
|
netdev->rx_ring.rsp_prod_pvt = ++i;
|
|
|
|
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netdev->rx_ring, notify);
|
2010-09-23 15:28:45 +04:00
|
|
|
if (notify) {
|
2016-10-25 08:50:16 +03:00
|
|
|
xen_pv_send_notify(&netdev->xendev);
|
2010-09-23 15:28:45 +04:00
|
|
|
}
|
2009-04-22 19:19:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
#define NET_IP_ALIGN 2
|
|
|
|
|
2012-07-24 19:35:13 +04:00
|
|
|
static ssize_t net_rx_packet(NetClientState *nc, const uint8_t *buf, size_t size)
|
2009-04-22 19:19:35 +04:00
|
|
|
{
|
2013-01-30 15:12:23 +04:00
|
|
|
struct XenNetDev *netdev = qemu_get_nic_opaque(nc);
|
2009-04-22 19:19:35 +04:00
|
|
|
netif_rx_request_t rxreq;
|
|
|
|
RING_IDX rc, rp;
|
|
|
|
void *page;
|
|
|
|
|
2010-09-23 15:28:45 +04:00
|
|
|
if (netdev->xendev.be_state != XenbusStateConnected) {
|
|
|
|
return -1;
|
|
|
|
}
|
2009-04-22 19:19:35 +04:00
|
|
|
|
|
|
|
rc = netdev->rx_ring.req_cons;
|
|
|
|
rp = netdev->rx_ring.sring->req_prod;
|
|
|
|
xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
|
|
|
|
|
|
|
|
if (rc == rp || RING_REQUEST_CONS_OVERFLOW(&netdev->rx_ring, rc)) {
|
2015-07-28 12:52:56 +03:00
|
|
|
return 0;
|
2009-04-22 19:19:35 +04:00
|
|
|
}
|
|
|
|
if (size > XC_PAGE_SIZE - NET_IP_ALIGN) {
|
2016-10-25 08:50:14 +03:00
|
|
|
xen_pv_printf(&netdev->xendev, 0, "packet too big (%lu > %ld)",
|
2010-09-23 15:28:45 +04:00
|
|
|
(unsigned long)size, XC_PAGE_SIZE - NET_IP_ALIGN);
|
|
|
|
return -1;
|
2009-04-22 19:19:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(&rxreq, RING_GET_REQUEST(&netdev->rx_ring, rc), sizeof(rxreq));
|
|
|
|
netdev->rx_ring.req_cons = ++rc;
|
|
|
|
|
2018-05-17 18:35:53 +03:00
|
|
|
page = xen_be_map_grant_ref(&netdev->xendev, rxreq.gref, PROT_WRITE);
|
2009-04-22 19:19:35 +04:00
|
|
|
if (page == NULL) {
|
2016-10-25 08:50:14 +03:00
|
|
|
xen_pv_printf(&netdev->xendev, 0,
|
2016-10-25 08:50:08 +03:00
|
|
|
"error: rx gref dereference failed (%d)\n",
|
2009-04-22 19:19:35 +04:00
|
|
|
rxreq.gref);
|
2010-09-23 15:28:45 +04:00
|
|
|
net_rx_response(netdev, &rxreq, NETIF_RSP_ERROR, 0, 0, 0);
|
|
|
|
return -1;
|
2009-04-22 19:19:35 +04:00
|
|
|
}
|
|
|
|
memcpy(page + NET_IP_ALIGN, buf, size);
|
2018-05-17 18:35:53 +03:00
|
|
|
xen_be_unmap_grant_ref(&netdev->xendev, page);
|
2009-04-22 19:19:35 +04:00
|
|
|
net_rx_response(netdev, &rxreq, NETIF_RSP_OKAY, NET_IP_ALIGN, size, 0);
|
2009-05-18 16:40:55 +04:00
|
|
|
|
|
|
|
return size;
|
2009-04-22 19:19:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ------------------------------------------------------------- */
|
|
|
|
|
2009-11-25 21:49:28 +03:00
|
|
|
static NetClientInfo net_xen_info = {
|
qapi: Change Netdev into a flat union
This is a mostly-mechanical conversion that creates a new flat
union 'Netdev' QAPI type that covers all the branches of the
former 'NetClientOptions' simple union, where the branches are
now listed in a new 'NetClientDriver' enum rather than generated
from the simple union. The existence of a flat union has no
change to the command line syntax accepted for new code, and
will make it possible for a future patch to switch the QMP
command to parse a boxed union for no change to valid QMP; but
it does have some ripple effect on the C code when dealing with
the new types.
While making the conversion, note that the 'NetLegacy' type
remains unchanged: it applies only to legacy command line options,
and will not be ported to QMP, so it should remain a wrapper
around a simple union; to avoid confusion, the type named
'NetClientOptions' is now gone, and we introduce 'NetLegacyOptions'
in its place. Then, in the C code, we convert from NetLegacy to
Netdev as soon as possible, so that the bulk of the net stack
only has to deal with one QAPI type, not two. Note that since
the old legacy code always rejected 'hubport', we can just omit
that branch from the new 'NetLegacyOptions' simple union.
Based on an idea originally by Zoltán Kővágó <DirtY.iCE.hu@gmail.com>:
Message-Id: <01a527fbf1a5de880091f98cf011616a78adeeee.1441627176.git.DirtY.iCE.hu@gmail.com>
although the sed script in that patch no longer applies due to
other changes in the tree since then, and I also did some manual
cleanups (such as fixing whitespace to keep checkpatch happy).
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <1468468228-27827-13-git-send-email-eblake@redhat.com>
Reviewed-by: Markus Armbruster <armbru@redhat.com>
[Fixup from Eric squashed in]
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2016-07-14 06:50:23 +03:00
|
|
|
.type = NET_CLIENT_DRIVER_NIC,
|
2009-11-25 21:49:28 +03:00
|
|
|
.size = sizeof(NICState),
|
|
|
|
.receive = net_rx_packet,
|
|
|
|
};
|
|
|
|
|
2019-01-08 17:48:46 +03:00
|
|
|
static int net_init(struct XenLegacyDevice *xendev)
|
2009-04-22 19:19:35 +04:00
|
|
|
{
|
|
|
|
struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev);
|
|
|
|
|
|
|
|
/* read xenstore entries */
|
2010-09-23 15:28:45 +04:00
|
|
|
if (netdev->mac == NULL) {
|
|
|
|
netdev->mac = xenstore_read_be_str(&netdev->xendev, "mac");
|
|
|
|
}
|
2009-04-22 19:19:35 +04:00
|
|
|
|
|
|
|
/* do we have all we need? */
|
2010-09-23 15:28:45 +04:00
|
|
|
if (netdev->mac == NULL) {
|
|
|
|
return -1;
|
|
|
|
}
|
2009-04-22 19:19:35 +04:00
|
|
|
|
2010-09-23 15:28:45 +04:00
|
|
|
if (net_parse_macaddr(netdev->conf.macaddr.a, netdev->mac) < 0) {
|
2009-11-25 21:49:28 +03:00
|
|
|
return -1;
|
2010-09-23 15:28:45 +04:00
|
|
|
}
|
2009-11-25 21:49:28 +03:00
|
|
|
|
|
|
|
netdev->nic = qemu_new_nic(&net_xen_info, &netdev->conf,
|
|
|
|
"xen", NULL, netdev);
|
|
|
|
|
2013-01-30 15:12:22 +04:00
|
|
|
snprintf(qemu_get_queue(netdev->nic)->info_str,
|
|
|
|
sizeof(qemu_get_queue(netdev->nic)->info_str),
|
2009-04-22 19:19:35 +04:00
|
|
|
"nic: xenbus vif macaddr=%s", netdev->mac);
|
|
|
|
|
|
|
|
/* fill info */
|
|
|
|
xenstore_write_be_int(&netdev->xendev, "feature-rx-copy", 1);
|
|
|
|
xenstore_write_be_int(&netdev->xendev, "feature-rx-flip", 0);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-01-08 17:48:46 +03:00
|
|
|
static int net_connect(struct XenLegacyDevice *xendev)
|
2009-04-22 19:19:35 +04:00
|
|
|
{
|
|
|
|
struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev);
|
|
|
|
int rx_copy;
|
|
|
|
|
|
|
|
if (xenstore_read_fe_int(&netdev->xendev, "tx-ring-ref",
|
2010-09-23 15:28:45 +04:00
|
|
|
&netdev->tx_ring_ref) == -1) {
|
|
|
|
return -1;
|
|
|
|
}
|
2009-04-22 19:19:35 +04:00
|
|
|
if (xenstore_read_fe_int(&netdev->xendev, "rx-ring-ref",
|
2010-09-23 15:28:45 +04:00
|
|
|
&netdev->rx_ring_ref) == -1) {
|
|
|
|
return 1;
|
|
|
|
}
|
2009-04-22 19:19:35 +04:00
|
|
|
if (xenstore_read_fe_int(&netdev->xendev, "event-channel",
|
2010-09-23 15:28:45 +04:00
|
|
|
&netdev->xendev.remote_port) == -1) {
|
|
|
|
return -1;
|
|
|
|
}
|
2009-04-22 19:19:35 +04:00
|
|
|
|
2010-09-23 15:28:45 +04:00
|
|
|
if (xenstore_read_fe_int(&netdev->xendev, "request-rx-copy", &rx_copy) == -1) {
|
|
|
|
rx_copy = 0;
|
|
|
|
}
|
2009-04-22 19:19:35 +04:00
|
|
|
if (rx_copy == 0) {
|
2016-10-25 08:50:14 +03:00
|
|
|
xen_pv_printf(&netdev->xendev, 0,
|
2016-10-25 08:50:08 +03:00
|
|
|
"frontend doesn't support rx-copy.\n");
|
2010-09-23 15:28:45 +04:00
|
|
|
return -1;
|
2009-04-22 19:19:35 +04:00
|
|
|
}
|
|
|
|
|
2018-05-17 18:35:53 +03:00
|
|
|
netdev->txs = xen_be_map_grant_ref(&netdev->xendev,
|
|
|
|
netdev->tx_ring_ref,
|
|
|
|
PROT_READ | PROT_WRITE);
|
2014-12-16 23:48:54 +03:00
|
|
|
if (!netdev->txs) {
|
|
|
|
return -1;
|
|
|
|
}
|
2018-05-17 18:35:53 +03:00
|
|
|
netdev->rxs = xen_be_map_grant_ref(&netdev->xendev,
|
|
|
|
netdev->rx_ring_ref,
|
|
|
|
PROT_READ | PROT_WRITE);
|
2014-12-16 23:48:54 +03:00
|
|
|
if (!netdev->rxs) {
|
2018-05-17 18:35:53 +03:00
|
|
|
xen_be_unmap_grant_ref(&netdev->xendev, netdev->txs);
|
2014-12-16 23:48:54 +03:00
|
|
|
netdev->txs = NULL;
|
2010-09-23 15:28:45 +04:00
|
|
|
return -1;
|
|
|
|
}
|
2009-04-22 19:19:35 +04:00
|
|
|
BACK_RING_INIT(&netdev->tx_ring, netdev->txs, XC_PAGE_SIZE);
|
|
|
|
BACK_RING_INIT(&netdev->rx_ring, netdev->rxs, XC_PAGE_SIZE);
|
|
|
|
|
|
|
|
xen_be_bind_evtchn(&netdev->xendev);
|
|
|
|
|
2016-10-25 08:50:14 +03:00
|
|
|
xen_pv_printf(&netdev->xendev, 1, "ok: tx-ring-ref %d, rx-ring-ref %d, "
|
2010-09-23 15:28:45 +04:00
|
|
|
"remote port %d, local port %d\n",
|
|
|
|
netdev->tx_ring_ref, netdev->rx_ring_ref,
|
|
|
|
netdev->xendev.remote_port, netdev->xendev.local_port);
|
2009-06-11 13:32:51 +04:00
|
|
|
|
|
|
|
net_tx_packets(netdev);
|
2009-04-22 19:19:35 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-01-08 17:48:46 +03:00
|
|
|
static void net_disconnect(struct XenLegacyDevice *xendev)
|
2009-04-22 19:19:35 +04:00
|
|
|
{
|
|
|
|
struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev);
|
|
|
|
|
2016-10-25 08:50:15 +03:00
|
|
|
xen_pv_unbind_evtchn(&netdev->xendev);
|
2009-04-22 19:19:35 +04:00
|
|
|
|
|
|
|
if (netdev->txs) {
|
2018-05-17 18:35:53 +03:00
|
|
|
xen_be_unmap_grant_ref(&netdev->xendev, netdev->txs);
|
2010-09-23 15:28:45 +04:00
|
|
|
netdev->txs = NULL;
|
2009-04-22 19:19:35 +04:00
|
|
|
}
|
|
|
|
if (netdev->rxs) {
|
2018-05-17 18:35:53 +03:00
|
|
|
xen_be_unmap_grant_ref(&netdev->xendev, netdev->rxs);
|
2010-09-23 15:28:45 +04:00
|
|
|
netdev->rxs = NULL;
|
2009-04-22 19:19:35 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-08 17:48:46 +03:00
|
|
|
static void net_event(struct XenLegacyDevice *xendev)
|
2009-04-22 19:19:35 +04:00
|
|
|
{
|
|
|
|
struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev);
|
|
|
|
net_tx_packets(netdev);
|
2013-01-30 15:12:22 +04:00
|
|
|
qemu_flush_queued_packets(qemu_get_queue(netdev->nic));
|
2009-04-22 19:19:35 +04:00
|
|
|
}
|
|
|
|
|
2019-01-08 17:48:46 +03:00
|
|
|
static int net_free(struct XenLegacyDevice *xendev)
|
2009-04-22 19:19:35 +04:00
|
|
|
{
|
|
|
|
struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev);
|
|
|
|
|
2014-12-16 23:52:16 +03:00
|
|
|
if (netdev->nic) {
|
|
|
|
qemu_del_nic(netdev->nic);
|
|
|
|
netdev->nic = NULL;
|
|
|
|
}
|
2011-08-21 07:09:37 +04:00
|
|
|
g_free(netdev->mac);
|
2014-12-16 23:58:42 +03:00
|
|
|
netdev->mac = NULL;
|
2009-04-22 19:19:35 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ------------------------------------------------------------- */
|
|
|
|
|
|
|
|
struct XenDevOps xen_netdev_ops = {
|
|
|
|
.size = sizeof(struct XenNetDev),
|
|
|
|
.flags = DEVOPS_FLAG_NEED_GNTDEV,
|
|
|
|
.init = net_init,
|
2011-06-17 16:15:35 +04:00
|
|
|
.initialise = net_connect,
|
2009-04-22 19:19:35 +04:00
|
|
|
.event = net_event,
|
|
|
|
.disconnect = net_disconnect,
|
|
|
|
.free = net_free,
|
|
|
|
};
|