2015-09-07 12:39:30 +03:00
|
|
|
/*
|
|
|
|
* i.MX Fast Ethernet Controller emulation.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2013 Jean-Christophe Dubois. <jcd@tribudubois.net>
|
|
|
|
*
|
|
|
|
* Based on Coldfire Fast Ethernet Controller emulation.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2007 CodeSourcery.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms of the GNU General Public License as published by the
|
|
|
|
* Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
|
|
* for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along
|
|
|
|
* with this program; if not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
2016-01-26 21:17:05 +03:00
|
|
|
#include "qemu/osdep.h"
|
2019-08-12 08:23:42 +03:00
|
|
|
#include "hw/irq.h"
|
2015-09-07 12:39:30 +03:00
|
|
|
#include "hw/net/imx_fec.h"
|
2019-08-12 08:23:51 +03:00
|
|
|
#include "hw/qdev-properties.h"
|
2019-08-12 08:23:45 +03:00
|
|
|
#include "migration/vmstate.h"
|
2015-09-07 12:39:30 +03:00
|
|
|
#include "sysemu/dma.h"
|
2015-12-15 15:16:16 +03:00
|
|
|
#include "qemu/log.h"
|
2019-05-23 17:35:07 +03:00
|
|
|
#include "qemu/module.h"
|
2016-05-30 20:26:10 +03:00
|
|
|
#include "net/checksum.h"
|
|
|
|
#include "net/eth.h"
|
2020-06-16 12:32:29 +03:00
|
|
|
#include "trace.h"
|
2015-09-07 12:39:30 +03:00
|
|
|
|
|
|
|
/* For crc32 */
|
|
|
|
#include <zlib.h>
|
|
|
|
|
2017-02-02 13:46:24 +03:00
|
|
|
#define IMX_MAX_DESC 1024
|
|
|
|
|
2016-05-30 20:26:10 +03:00
|
|
|
static const char *imx_default_reg_name(IMXFECState *s, uint32_t index)
|
2016-05-30 20:26:05 +03:00
|
|
|
{
|
|
|
|
static char tmp[20];
|
2016-05-30 20:26:10 +03:00
|
|
|
sprintf(tmp, "index %d", index);
|
|
|
|
return tmp;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const char *imx_fec_reg_name(IMXFECState *s, uint32_t index)
|
|
|
|
{
|
|
|
|
switch (index) {
|
|
|
|
case ENET_FRBR:
|
|
|
|
return "FRBR";
|
|
|
|
case ENET_FRSR:
|
|
|
|
return "FRSR";
|
|
|
|
case ENET_MIIGSK_CFGR:
|
|
|
|
return "MIIGSK_CFGR";
|
|
|
|
case ENET_MIIGSK_ENR:
|
|
|
|
return "MIIGSK_ENR";
|
|
|
|
default:
|
|
|
|
return imx_default_reg_name(s, index);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static const char *imx_enet_reg_name(IMXFECState *s, uint32_t index)
|
|
|
|
{
|
|
|
|
switch (index) {
|
|
|
|
case ENET_RSFL:
|
|
|
|
return "RSFL";
|
|
|
|
case ENET_RSEM:
|
|
|
|
return "RSEM";
|
|
|
|
case ENET_RAEM:
|
|
|
|
return "RAEM";
|
|
|
|
case ENET_RAFL:
|
|
|
|
return "RAFL";
|
|
|
|
case ENET_TSEM:
|
|
|
|
return "TSEM";
|
|
|
|
case ENET_TAEM:
|
|
|
|
return "TAEM";
|
|
|
|
case ENET_TAFL:
|
|
|
|
return "TAFL";
|
|
|
|
case ENET_TIPG:
|
|
|
|
return "TIPG";
|
|
|
|
case ENET_FTRL:
|
|
|
|
return "FTRL";
|
|
|
|
case ENET_TACC:
|
|
|
|
return "TACC";
|
|
|
|
case ENET_RACC:
|
|
|
|
return "RACC";
|
|
|
|
case ENET_ATCR:
|
|
|
|
return "ATCR";
|
|
|
|
case ENET_ATVR:
|
|
|
|
return "ATVR";
|
|
|
|
case ENET_ATOFF:
|
|
|
|
return "ATOFF";
|
|
|
|
case ENET_ATPER:
|
|
|
|
return "ATPER";
|
|
|
|
case ENET_ATCOR:
|
|
|
|
return "ATCOR";
|
|
|
|
case ENET_ATINC:
|
|
|
|
return "ATINC";
|
|
|
|
case ENET_ATSTMP:
|
|
|
|
return "ATSTMP";
|
|
|
|
case ENET_TGSR:
|
|
|
|
return "TGSR";
|
|
|
|
case ENET_TCSR0:
|
|
|
|
return "TCSR0";
|
|
|
|
case ENET_TCCR0:
|
|
|
|
return "TCCR0";
|
|
|
|
case ENET_TCSR1:
|
|
|
|
return "TCSR1";
|
|
|
|
case ENET_TCCR1:
|
|
|
|
return "TCCR1";
|
|
|
|
case ENET_TCSR2:
|
|
|
|
return "TCSR2";
|
|
|
|
case ENET_TCCR2:
|
|
|
|
return "TCCR2";
|
|
|
|
case ENET_TCSR3:
|
|
|
|
return "TCSR3";
|
|
|
|
case ENET_TCCR3:
|
|
|
|
return "TCCR3";
|
|
|
|
default:
|
|
|
|
return imx_default_reg_name(s, index);
|
|
|
|
}
|
|
|
|
}
|
2016-05-30 20:26:05 +03:00
|
|
|
|
2016-05-30 20:26:10 +03:00
|
|
|
static const char *imx_eth_reg_name(IMXFECState *s, uint32_t index)
|
|
|
|
{
|
2016-05-30 20:26:05 +03:00
|
|
|
switch (index) {
|
|
|
|
case ENET_EIR:
|
|
|
|
return "EIR";
|
|
|
|
case ENET_EIMR:
|
|
|
|
return "EIMR";
|
|
|
|
case ENET_RDAR:
|
|
|
|
return "RDAR";
|
|
|
|
case ENET_TDAR:
|
|
|
|
return "TDAR";
|
|
|
|
case ENET_ECR:
|
|
|
|
return "ECR";
|
|
|
|
case ENET_MMFR:
|
|
|
|
return "MMFR";
|
|
|
|
case ENET_MSCR:
|
|
|
|
return "MSCR";
|
|
|
|
case ENET_MIBC:
|
|
|
|
return "MIBC";
|
|
|
|
case ENET_RCR:
|
|
|
|
return "RCR";
|
|
|
|
case ENET_TCR:
|
|
|
|
return "TCR";
|
|
|
|
case ENET_PALR:
|
|
|
|
return "PALR";
|
|
|
|
case ENET_PAUR:
|
|
|
|
return "PAUR";
|
|
|
|
case ENET_OPD:
|
|
|
|
return "OPD";
|
|
|
|
case ENET_IAUR:
|
|
|
|
return "IAUR";
|
|
|
|
case ENET_IALR:
|
|
|
|
return "IALR";
|
|
|
|
case ENET_GAUR:
|
|
|
|
return "GAUR";
|
|
|
|
case ENET_GALR:
|
|
|
|
return "GALR";
|
|
|
|
case ENET_TFWR:
|
|
|
|
return "TFWR";
|
|
|
|
case ENET_RDSR:
|
|
|
|
return "RDSR";
|
|
|
|
case ENET_TDSR:
|
|
|
|
return "TDSR";
|
|
|
|
case ENET_MRBR:
|
|
|
|
return "MRBR";
|
|
|
|
default:
|
2016-05-30 20:26:10 +03:00
|
|
|
if (s->is_fec) {
|
|
|
|
return imx_fec_reg_name(s, index);
|
|
|
|
} else {
|
|
|
|
return imx_enet_reg_name(s, index);
|
|
|
|
}
|
2016-05-30 20:26:05 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-11 16:25:37 +03:00
|
|
|
/*
|
|
|
|
* Versions of this device with more than one TX descriptor save the
|
|
|
|
* 2nd and 3rd descriptors in a subsection, to maintain migration
|
|
|
|
* compatibility with previous versions of the device that only
|
|
|
|
* supported a single descriptor.
|
|
|
|
*/
|
|
|
|
static bool imx_eth_is_multi_tx_ring(void *opaque)
|
|
|
|
{
|
|
|
|
IMXFECState *s = IMX_FEC(opaque);
|
|
|
|
|
|
|
|
return s->tx_ring_num > 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const VMStateDescription vmstate_imx_eth_txdescs = {
|
|
|
|
.name = "imx.fec/txdescs",
|
|
|
|
.version_id = 1,
|
|
|
|
.minimum_version_id = 1,
|
|
|
|
.needed = imx_eth_is_multi_tx_ring,
|
|
|
|
.fields = (VMStateField[]) {
|
|
|
|
VMSTATE_UINT32(tx_descriptor[1], IMXFECState),
|
|
|
|
VMSTATE_UINT32(tx_descriptor[2], IMXFECState),
|
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2016-05-30 20:26:10 +03:00
|
|
|
static const VMStateDescription vmstate_imx_eth = {
|
2015-09-07 12:39:30 +03:00
|
|
|
.name = TYPE_IMX_FEC,
|
2016-05-30 20:26:05 +03:00
|
|
|
.version_id = 2,
|
|
|
|
.minimum_version_id = 2,
|
2015-09-07 12:39:30 +03:00
|
|
|
.fields = (VMStateField[]) {
|
2016-05-30 20:26:05 +03:00
|
|
|
VMSTATE_UINT32_ARRAY(regs, IMXFECState, ENET_MAX),
|
2015-09-07 12:39:30 +03:00
|
|
|
VMSTATE_UINT32(rx_descriptor, IMXFECState),
|
2018-01-11 16:25:37 +03:00
|
|
|
VMSTATE_UINT32(tx_descriptor[0], IMXFECState),
|
2015-09-07 12:39:30 +03:00
|
|
|
VMSTATE_UINT32(phy_status, IMXFECState),
|
|
|
|
VMSTATE_UINT32(phy_control, IMXFECState),
|
|
|
|
VMSTATE_UINT32(phy_advertise, IMXFECState),
|
|
|
|
VMSTATE_UINT32(phy_int, IMXFECState),
|
|
|
|
VMSTATE_UINT32(phy_int_mask, IMXFECState),
|
|
|
|
VMSTATE_END_OF_LIST()
|
2018-01-11 16:25:37 +03:00
|
|
|
},
|
|
|
|
.subsections = (const VMStateDescription * []) {
|
|
|
|
&vmstate_imx_eth_txdescs,
|
|
|
|
NULL
|
|
|
|
},
|
2015-09-07 12:39:30 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
#define PHY_INT_ENERGYON (1 << 7)
|
|
|
|
#define PHY_INT_AUTONEG_COMPLETE (1 << 6)
|
|
|
|
#define PHY_INT_FAULT (1 << 5)
|
|
|
|
#define PHY_INT_DOWN (1 << 4)
|
|
|
|
#define PHY_INT_AUTONEG_LP (1 << 3)
|
|
|
|
#define PHY_INT_PARFAULT (1 << 2)
|
|
|
|
#define PHY_INT_AUTONEG_PAGE (1 << 1)
|
|
|
|
|
2016-05-30 20:26:10 +03:00
|
|
|
static void imx_eth_update(IMXFECState *s);
|
2015-09-07 12:39:30 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The MII phy could raise a GPIO to the processor which in turn
|
|
|
|
* could be handled as an interrpt by the OS.
|
|
|
|
* For now we don't handle any GPIO/interrupt line, so the OS will
|
|
|
|
* have to poll for the PHY status.
|
|
|
|
*/
|
2020-06-16 12:32:29 +03:00
|
|
|
static void imx_phy_update_irq(IMXFECState *s)
|
2015-09-07 12:39:30 +03:00
|
|
|
{
|
2016-05-30 20:26:10 +03:00
|
|
|
imx_eth_update(s);
|
2015-09-07 12:39:30 +03:00
|
|
|
}
|
|
|
|
|
2020-06-16 12:32:29 +03:00
|
|
|
static void imx_phy_update_link(IMXFECState *s)
|
2015-09-07 12:39:30 +03:00
|
|
|
{
|
|
|
|
/* Autonegotiation status mirrors link status. */
|
|
|
|
if (qemu_get_queue(s->nic)->link_down) {
|
2020-06-16 12:32:29 +03:00
|
|
|
trace_imx_phy_update_link("down");
|
2015-09-07 12:39:30 +03:00
|
|
|
s->phy_status &= ~0x0024;
|
|
|
|
s->phy_int |= PHY_INT_DOWN;
|
|
|
|
} else {
|
2020-06-16 12:32:29 +03:00
|
|
|
trace_imx_phy_update_link("up");
|
2015-09-07 12:39:30 +03:00
|
|
|
s->phy_status |= 0x0024;
|
|
|
|
s->phy_int |= PHY_INT_ENERGYON;
|
|
|
|
s->phy_int |= PHY_INT_AUTONEG_COMPLETE;
|
|
|
|
}
|
2020-06-16 12:32:29 +03:00
|
|
|
imx_phy_update_irq(s);
|
2015-09-07 12:39:30 +03:00
|
|
|
}
|
|
|
|
|
2016-05-30 20:26:10 +03:00
|
|
|
static void imx_eth_set_link(NetClientState *nc)
|
2015-09-07 12:39:30 +03:00
|
|
|
{
|
2020-06-16 12:32:29 +03:00
|
|
|
imx_phy_update_link(IMX_FEC(qemu_get_nic_opaque(nc)));
|
2015-09-07 12:39:30 +03:00
|
|
|
}
|
|
|
|
|
2020-06-16 12:32:29 +03:00
|
|
|
static void imx_phy_reset(IMXFECState *s)
|
2015-09-07 12:39:30 +03:00
|
|
|
{
|
2020-06-16 12:32:29 +03:00
|
|
|
trace_imx_phy_reset();
|
|
|
|
|
2015-09-07 12:39:30 +03:00
|
|
|
s->phy_status = 0x7809;
|
|
|
|
s->phy_control = 0x3000;
|
|
|
|
s->phy_advertise = 0x01e1;
|
|
|
|
s->phy_int_mask = 0;
|
|
|
|
s->phy_int = 0;
|
2020-06-16 12:32:29 +03:00
|
|
|
imx_phy_update_link(s);
|
2015-09-07 12:39:30 +03:00
|
|
|
}
|
|
|
|
|
2020-06-16 12:32:29 +03:00
|
|
|
static uint32_t imx_phy_read(IMXFECState *s, int reg)
|
2015-09-07 12:39:30 +03:00
|
|
|
{
|
|
|
|
uint32_t val;
|
2020-07-03 18:59:41 +03:00
|
|
|
uint32_t phy = reg / 32;
|
2015-09-07 12:39:30 +03:00
|
|
|
|
2020-07-03 18:59:41 +03:00
|
|
|
if (phy != s->phy_num) {
|
|
|
|
qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad phy num %u\n",
|
|
|
|
TYPE_IMX_FEC, __func__, phy);
|
2015-09-07 12:39:30 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-07-03 18:59:41 +03:00
|
|
|
reg %= 32;
|
|
|
|
|
2015-09-07 12:39:30 +03:00
|
|
|
switch (reg) {
|
|
|
|
case 0: /* Basic Control */
|
|
|
|
val = s->phy_control;
|
|
|
|
break;
|
|
|
|
case 1: /* Basic Status */
|
|
|
|
val = s->phy_status;
|
|
|
|
break;
|
|
|
|
case 2: /* ID1 */
|
|
|
|
val = 0x0007;
|
|
|
|
break;
|
|
|
|
case 3: /* ID2 */
|
|
|
|
val = 0xc0d1;
|
|
|
|
break;
|
|
|
|
case 4: /* Auto-neg advertisement */
|
|
|
|
val = s->phy_advertise;
|
|
|
|
break;
|
|
|
|
case 5: /* Auto-neg Link Partner Ability */
|
|
|
|
val = 0x0f71;
|
|
|
|
break;
|
|
|
|
case 6: /* Auto-neg Expansion */
|
|
|
|
val = 1;
|
|
|
|
break;
|
|
|
|
case 29: /* Interrupt source. */
|
|
|
|
val = s->phy_int;
|
|
|
|
s->phy_int = 0;
|
2020-06-16 12:32:29 +03:00
|
|
|
imx_phy_update_irq(s);
|
2015-09-07 12:39:30 +03:00
|
|
|
break;
|
|
|
|
case 30: /* Interrupt mask */
|
|
|
|
val = s->phy_int_mask;
|
|
|
|
break;
|
|
|
|
case 17:
|
|
|
|
case 18:
|
|
|
|
case 27:
|
|
|
|
case 31:
|
2015-10-25 17:16:21 +03:00
|
|
|
qemu_log_mask(LOG_UNIMP, "[%s.phy]%s: reg %d not implemented\n",
|
2015-09-07 12:39:30 +03:00
|
|
|
TYPE_IMX_FEC, __func__, reg);
|
|
|
|
val = 0;
|
|
|
|
break;
|
|
|
|
default:
|
2015-10-25 17:16:21 +03:00
|
|
|
qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n",
|
2015-09-07 12:39:30 +03:00
|
|
|
TYPE_IMX_FEC, __func__, reg);
|
|
|
|
val = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-07-03 18:59:41 +03:00
|
|
|
trace_imx_phy_read(val, phy, reg);
|
2015-09-07 12:39:30 +03:00
|
|
|
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
2020-06-16 12:32:29 +03:00
|
|
|
static void imx_phy_write(IMXFECState *s, int reg, uint32_t val)
|
2015-09-07 12:39:30 +03:00
|
|
|
{
|
2020-07-03 18:59:41 +03:00
|
|
|
uint32_t phy = reg / 32;
|
2015-09-07 12:39:30 +03:00
|
|
|
|
2020-07-03 18:59:41 +03:00
|
|
|
if (phy != s->phy_num) {
|
|
|
|
qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad phy num %u\n",
|
|
|
|
TYPE_IMX_FEC, __func__, phy);
|
2015-09-07 12:39:30 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-07-03 18:59:41 +03:00
|
|
|
reg %= 32;
|
|
|
|
|
|
|
|
trace_imx_phy_write(val, phy, reg);
|
|
|
|
|
2015-09-07 12:39:30 +03:00
|
|
|
switch (reg) {
|
|
|
|
case 0: /* Basic Control */
|
|
|
|
if (val & 0x8000) {
|
2020-06-16 12:32:29 +03:00
|
|
|
imx_phy_reset(s);
|
2015-09-07 12:39:30 +03:00
|
|
|
} else {
|
|
|
|
s->phy_control = val & 0x7980;
|
|
|
|
/* Complete autonegotiation immediately. */
|
|
|
|
if (val & 0x1000) {
|
|
|
|
s->phy_status |= 0x0020;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 4: /* Auto-neg advertisement */
|
|
|
|
s->phy_advertise = (val & 0x2d7f) | 0x80;
|
|
|
|
break;
|
|
|
|
case 30: /* Interrupt mask */
|
|
|
|
s->phy_int_mask = val & 0xff;
|
2020-06-16 12:32:29 +03:00
|
|
|
imx_phy_update_irq(s);
|
2015-09-07 12:39:30 +03:00
|
|
|
break;
|
|
|
|
case 17:
|
|
|
|
case 18:
|
|
|
|
case 27:
|
|
|
|
case 31:
|
2015-10-25 17:16:21 +03:00
|
|
|
qemu_log_mask(LOG_UNIMP, "[%s.phy)%s: reg %d not implemented\n",
|
2015-09-07 12:39:30 +03:00
|
|
|
TYPE_IMX_FEC, __func__, reg);
|
|
|
|
break;
|
|
|
|
default:
|
2015-10-25 17:16:21 +03:00
|
|
|
qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n",
|
2015-09-07 12:39:30 +03:00
|
|
|
TYPE_IMX_FEC, __func__, reg);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void imx_fec_read_bd(IMXFECBufDesc *bd, dma_addr_t addr)
|
|
|
|
{
|
|
|
|
dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd));
|
2020-06-16 12:32:29 +03:00
|
|
|
|
|
|
|
trace_imx_fec_read_bd(addr, bd->flags, bd->length, bd->data);
|
2015-09-07 12:39:30 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void imx_fec_write_bd(IMXFECBufDesc *bd, dma_addr_t addr)
|
|
|
|
{
|
|
|
|
dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd));
|
|
|
|
}
|
|
|
|
|
2016-05-30 20:26:10 +03:00
|
|
|
static void imx_enet_read_bd(IMXENETBufDesc *bd, dma_addr_t addr)
|
|
|
|
{
|
|
|
|
dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd));
|
2020-06-16 12:32:29 +03:00
|
|
|
|
|
|
|
trace_imx_enet_read_bd(addr, bd->flags, bd->length, bd->data,
|
|
|
|
bd->option, bd->status);
|
2016-05-30 20:26:10 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void imx_enet_write_bd(IMXENETBufDesc *bd, dma_addr_t addr)
|
2015-09-07 12:39:30 +03:00
|
|
|
{
|
2016-05-30 20:26:10 +03:00
|
|
|
dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void imx_eth_update(IMXFECState *s)
|
|
|
|
{
|
2018-03-11 02:04:50 +03:00
|
|
|
/*
|
|
|
|
* Previous versions of qemu had the ENET_INT_MAC and ENET_INT_TS_TIMER
|
|
|
|
* interrupts swapped. This worked with older versions of Linux (4.14
|
|
|
|
* and older) since Linux associated both interrupt lines with Ethernet
|
|
|
|
* MAC interrupts. Specifically,
|
|
|
|
* - Linux 4.15 and later have separate interrupt handlers for the MAC and
|
|
|
|
* timer interrupts. Those versions of Linux fail with versions of QEMU
|
|
|
|
* with swapped interrupt assignments.
|
|
|
|
* - In linux 4.14, both interrupt lines were registered with the Ethernet
|
|
|
|
* MAC interrupt handler. As a result, all versions of qemu happen to
|
|
|
|
* work, though that is accidental.
|
|
|
|
* - In Linux 4.9 and older, the timer interrupt was registered directly
|
|
|
|
* with the Ethernet MAC interrupt handler. The MAC interrupt was
|
|
|
|
* redirected to a GPIO interrupt to work around erratum ERR006687.
|
|
|
|
* This was implemented using the SOC's IOMUX block. In qemu, this GPIO
|
|
|
|
* interrupt never fired since IOMUX is currently not supported in qemu.
|
|
|
|
* Linux instead received MAC interrupts on the timer interrupt.
|
|
|
|
* As a result, qemu versions with the swapped interrupt assignment work,
|
|
|
|
* albeit accidentally, but qemu versions with the correct interrupt
|
|
|
|
* assignment fail.
|
|
|
|
*
|
|
|
|
* To ensure that all versions of Linux work, generate ENET_INT_MAC
|
|
|
|
* interrrupts on both interrupt lines. This should be changed if and when
|
|
|
|
* qemu supports IOMUX.
|
|
|
|
*/
|
|
|
|
if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] &
|
|
|
|
(ENET_INT_MAC | ENET_INT_TS_TIMER)) {
|
2016-05-30 20:26:10 +03:00
|
|
|
qemu_set_irq(s->irq[1], 1);
|
|
|
|
} else {
|
|
|
|
qemu_set_irq(s->irq[1], 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] & ENET_INT_MAC) {
|
|
|
|
qemu_set_irq(s->irq[0], 1);
|
2016-05-30 20:26:05 +03:00
|
|
|
} else {
|
2016-05-30 20:26:10 +03:00
|
|
|
qemu_set_irq(s->irq[0], 0);
|
2015-09-07 12:39:30 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void imx_fec_do_tx(IMXFECState *s)
|
|
|
|
{
|
2017-02-02 13:46:24 +03:00
|
|
|
int frame_size = 0, descnt = 0;
|
2018-01-11 16:25:35 +03:00
|
|
|
uint8_t *ptr = s->frame;
|
2018-01-11 16:25:37 +03:00
|
|
|
uint32_t addr = s->tx_descriptor[0];
|
2015-09-07 12:39:30 +03:00
|
|
|
|
2017-02-02 13:46:24 +03:00
|
|
|
while (descnt++ < IMX_MAX_DESC) {
|
2015-09-07 12:39:30 +03:00
|
|
|
IMXFECBufDesc bd;
|
|
|
|
int len;
|
|
|
|
|
|
|
|
imx_fec_read_bd(&bd, addr);
|
2016-05-30 20:26:02 +03:00
|
|
|
if ((bd.flags & ENET_BD_R) == 0) {
|
2020-06-16 12:32:29 +03:00
|
|
|
|
2015-09-07 12:39:30 +03:00
|
|
|
/* Run out of descriptors to transmit. */
|
2020-06-16 12:32:29 +03:00
|
|
|
trace_imx_eth_tx_bd_busy();
|
|
|
|
|
2015-09-07 12:39:30 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
len = bd.length;
|
2016-05-30 20:26:02 +03:00
|
|
|
if (frame_size + len > ENET_MAX_FRAME_SIZE) {
|
|
|
|
len = ENET_MAX_FRAME_SIZE - frame_size;
|
2016-05-30 20:26:05 +03:00
|
|
|
s->regs[ENET_EIR] |= ENET_INT_BABT;
|
2015-09-07 12:39:30 +03:00
|
|
|
}
|
|
|
|
dma_memory_read(&address_space_memory, bd.data, ptr, len);
|
|
|
|
ptr += len;
|
|
|
|
frame_size += len;
|
2016-05-30 20:26:02 +03:00
|
|
|
if (bd.flags & ENET_BD_L) {
|
2015-09-07 12:39:30 +03:00
|
|
|
/* Last buffer in frame. */
|
2018-01-11 16:25:35 +03:00
|
|
|
qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size);
|
|
|
|
ptr = s->frame;
|
2015-09-07 12:39:30 +03:00
|
|
|
frame_size = 0;
|
2016-05-30 20:26:05 +03:00
|
|
|
s->regs[ENET_EIR] |= ENET_INT_TXF;
|
2015-09-07 12:39:30 +03:00
|
|
|
}
|
2016-05-30 20:26:05 +03:00
|
|
|
s->regs[ENET_EIR] |= ENET_INT_TXB;
|
2016-05-30 20:26:02 +03:00
|
|
|
bd.flags &= ~ENET_BD_R;
|
2015-09-07 12:39:30 +03:00
|
|
|
/* Write back the modified descriptor. */
|
|
|
|
imx_fec_write_bd(&bd, addr);
|
|
|
|
/* Advance to the next descriptor. */
|
2016-05-30 20:26:02 +03:00
|
|
|
if ((bd.flags & ENET_BD_W) != 0) {
|
2016-05-30 20:26:05 +03:00
|
|
|
addr = s->regs[ENET_TDSR];
|
2015-09-07 12:39:30 +03:00
|
|
|
} else {
|
2016-05-30 20:26:05 +03:00
|
|
|
addr += sizeof(bd);
|
2015-09-07 12:39:30 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-11 16:25:37 +03:00
|
|
|
s->tx_descriptor[0] = addr;
|
2015-09-07 12:39:30 +03:00
|
|
|
|
2016-05-30 20:26:10 +03:00
|
|
|
imx_eth_update(s);
|
|
|
|
}
|
|
|
|
|
2018-01-11 16:25:37 +03:00
|
|
|
static void imx_enet_do_tx(IMXFECState *s, uint32_t index)
|
2016-05-30 20:26:10 +03:00
|
|
|
{
|
2017-02-02 13:46:24 +03:00
|
|
|
int frame_size = 0, descnt = 0;
|
2018-01-11 16:25:37 +03:00
|
|
|
|
2018-01-11 16:25:35 +03:00
|
|
|
uint8_t *ptr = s->frame;
|
2018-01-11 16:25:37 +03:00
|
|
|
uint32_t addr, int_txb, int_txf, tdsr;
|
|
|
|
size_t ring;
|
|
|
|
|
|
|
|
switch (index) {
|
|
|
|
case ENET_TDAR:
|
|
|
|
ring = 0;
|
|
|
|
int_txb = ENET_INT_TXB;
|
|
|
|
int_txf = ENET_INT_TXF;
|
|
|
|
tdsr = ENET_TDSR;
|
|
|
|
break;
|
|
|
|
case ENET_TDAR1:
|
|
|
|
ring = 1;
|
|
|
|
int_txb = ENET_INT_TXB1;
|
|
|
|
int_txf = ENET_INT_TXF1;
|
|
|
|
tdsr = ENET_TDSR1;
|
|
|
|
break;
|
|
|
|
case ENET_TDAR2:
|
|
|
|
ring = 2;
|
|
|
|
int_txb = ENET_INT_TXB2;
|
|
|
|
int_txf = ENET_INT_TXF2;
|
|
|
|
tdsr = ENET_TDSR2;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
qemu_log_mask(LOG_GUEST_ERROR,
|
|
|
|
"%s: bogus value for index %x\n",
|
|
|
|
__func__, index);
|
|
|
|
abort();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
addr = s->tx_descriptor[ring];
|
2016-05-30 20:26:10 +03:00
|
|
|
|
2017-02-02 13:46:24 +03:00
|
|
|
while (descnt++ < IMX_MAX_DESC) {
|
2016-05-30 20:26:10 +03:00
|
|
|
IMXENETBufDesc bd;
|
|
|
|
int len;
|
|
|
|
|
|
|
|
imx_enet_read_bd(&bd, addr);
|
|
|
|
if ((bd.flags & ENET_BD_R) == 0) {
|
|
|
|
/* Run out of descriptors to transmit. */
|
2020-06-16 12:32:29 +03:00
|
|
|
|
|
|
|
trace_imx_eth_tx_bd_busy();
|
|
|
|
|
2016-05-30 20:26:10 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
len = bd.length;
|
|
|
|
if (frame_size + len > ENET_MAX_FRAME_SIZE) {
|
|
|
|
len = ENET_MAX_FRAME_SIZE - frame_size;
|
|
|
|
s->regs[ENET_EIR] |= ENET_INT_BABT;
|
|
|
|
}
|
|
|
|
dma_memory_read(&address_space_memory, bd.data, ptr, len);
|
|
|
|
ptr += len;
|
|
|
|
frame_size += len;
|
|
|
|
if (bd.flags & ENET_BD_L) {
|
|
|
|
if (bd.option & ENET_BD_PINS) {
|
2018-01-11 16:25:35 +03:00
|
|
|
struct ip_header *ip_hd = PKT_GET_IP_HDR(s->frame);
|
2016-05-30 20:26:10 +03:00
|
|
|
if (IP_HEADER_VERSION(ip_hd) == 4) {
|
2018-01-11 16:25:35 +03:00
|
|
|
net_checksum_calculate(s->frame, frame_size);
|
2016-05-30 20:26:10 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (bd.option & ENET_BD_IINS) {
|
2018-01-11 16:25:35 +03:00
|
|
|
struct ip_header *ip_hd = PKT_GET_IP_HDR(s->frame);
|
2016-05-30 20:26:10 +03:00
|
|
|
/* We compute checksum only for IPv4 frames */
|
|
|
|
if (IP_HEADER_VERSION(ip_hd) == 4) {
|
|
|
|
uint16_t csum;
|
|
|
|
ip_hd->ip_sum = 0;
|
|
|
|
csum = net_raw_checksum((uint8_t *)ip_hd, sizeof(*ip_hd));
|
|
|
|
ip_hd->ip_sum = cpu_to_be16(csum);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* Last buffer in frame. */
|
2018-01-11 16:25:35 +03:00
|
|
|
|
2018-01-11 16:25:37 +03:00
|
|
|
qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size);
|
2018-01-11 16:25:35 +03:00
|
|
|
ptr = s->frame;
|
|
|
|
|
2016-05-30 20:26:10 +03:00
|
|
|
frame_size = 0;
|
|
|
|
if (bd.option & ENET_BD_TX_INT) {
|
2018-01-11 16:25:37 +03:00
|
|
|
s->regs[ENET_EIR] |= int_txf;
|
2016-05-30 20:26:10 +03:00
|
|
|
}
|
2019-08-15 11:46:42 +03:00
|
|
|
/* Indicate that we've updated the last buffer descriptor. */
|
|
|
|
bd.last_buffer = ENET_BD_BDU;
|
2016-05-30 20:26:10 +03:00
|
|
|
}
|
|
|
|
if (bd.option & ENET_BD_TX_INT) {
|
2018-01-11 16:25:37 +03:00
|
|
|
s->regs[ENET_EIR] |= int_txb;
|
2016-05-30 20:26:10 +03:00
|
|
|
}
|
|
|
|
bd.flags &= ~ENET_BD_R;
|
|
|
|
/* Write back the modified descriptor. */
|
|
|
|
imx_enet_write_bd(&bd, addr);
|
|
|
|
/* Advance to the next descriptor. */
|
|
|
|
if ((bd.flags & ENET_BD_W) != 0) {
|
2018-01-11 16:25:37 +03:00
|
|
|
addr = s->regs[tdsr];
|
2016-05-30 20:26:10 +03:00
|
|
|
} else {
|
|
|
|
addr += sizeof(bd);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-11 16:25:37 +03:00
|
|
|
s->tx_descriptor[ring] = addr;
|
2016-05-30 20:26:10 +03:00
|
|
|
|
|
|
|
imx_eth_update(s);
|
|
|
|
}
|
|
|
|
|
2018-01-11 16:25:37 +03:00
|
|
|
static void imx_eth_do_tx(IMXFECState *s, uint32_t index)
|
2016-05-30 20:26:10 +03:00
|
|
|
{
|
|
|
|
if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) {
|
2018-01-11 16:25:37 +03:00
|
|
|
imx_enet_do_tx(s, index);
|
2016-05-30 20:26:10 +03:00
|
|
|
} else {
|
|
|
|
imx_fec_do_tx(s);
|
|
|
|
}
|
2015-09-07 12:39:30 +03:00
|
|
|
}
|
|
|
|
|
imx_fec: Change queue flushing heuristics
In current implementation, packet queue flushing logic seem to suffer
from a deadlock like scenario if a packet is received by the interface
before before Rx ring is initialized by Guest's driver. Consider the
following sequence of events:
1. A QEMU instance is started against a TAP device on Linux
host, running Linux guest, e. g., something to the effect
of:
qemu-system-arm \
-net nic,model=imx.fec,netdev=lan0 \
netdev tap,id=lan0,ifname=tap0,script=no,downscript=no \
... rest of the arguments ...
2. Once QEMU starts, but before guest reaches the point where
FEC deriver is done initializing the HW, Guest, via TAP
interface, receives a number of multicast MDNS packets from
Host (not necessarily true for every OS, but it happens at
least on Fedora 25)
3. Recieving a packet in such a state results in
imx_eth_can_receive() returning '0', which in turn causes
tap_send() to disable corresponding event (tap.c:203)
4. Once Guest's driver reaches the point where it is ready to
recieve packets it prepares Rx ring descriptors and writes
ENET_RDAR_RDAR to ENET_RDAR register to indicate to HW that
more descriptors are ready. And at this points emulation
layer does this:
s->regs[index] = ENET_RDAR_RDAR;
imx_eth_enable_rx(s);
which, combined with:
if (!s->regs[ENET_RDAR]) {
qemu_flush_queued_packets(qemu_get_queue(s->nic));
}
results in Rx queue never being flushed and corresponding
I/O event beign disabled.
To prevent the problem, change the code to always flush packet queue
when ENET_RDAR transitions 0 -> ENET_RDAR_RDAR.
Cc: Peter Maydell <peter.maydell@linaro.org>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Philippe Mathieu-Daudé <f4bug@amsat.org>
Cc: qemu-devel@nongnu.org
Cc: qemu-arm@nongnu.org
Cc: yurovsky@gmail.com
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Andrey Smirnov <andrew.smirnov@gmail.com>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2018-01-11 16:25:35 +03:00
|
|
|
static void imx_eth_enable_rx(IMXFECState *s, bool flush)
|
2015-09-07 12:39:30 +03:00
|
|
|
{
|
|
|
|
IMXFECBufDesc bd;
|
|
|
|
|
|
|
|
imx_fec_read_bd(&bd, s->rx_descriptor);
|
|
|
|
|
2018-01-25 14:45:28 +03:00
|
|
|
s->regs[ENET_RDAR] = (bd.flags & ENET_BD_E) ? ENET_RDAR_RDAR : 0;
|
2015-09-07 12:39:30 +03:00
|
|
|
|
2018-01-25 14:45:28 +03:00
|
|
|
if (!s->regs[ENET_RDAR]) {
|
2020-06-16 12:32:29 +03:00
|
|
|
trace_imx_eth_rx_bd_full();
|
imx_fec: Change queue flushing heuristics
In current implementation, packet queue flushing logic seem to suffer
from a deadlock like scenario if a packet is received by the interface
before before Rx ring is initialized by Guest's driver. Consider the
following sequence of events:
1. A QEMU instance is started against a TAP device on Linux
host, running Linux guest, e. g., something to the effect
of:
qemu-system-arm \
-net nic,model=imx.fec,netdev=lan0 \
netdev tap,id=lan0,ifname=tap0,script=no,downscript=no \
... rest of the arguments ...
2. Once QEMU starts, but before guest reaches the point where
FEC deriver is done initializing the HW, Guest, via TAP
interface, receives a number of multicast MDNS packets from
Host (not necessarily true for every OS, but it happens at
least on Fedora 25)
3. Recieving a packet in such a state results in
imx_eth_can_receive() returning '0', which in turn causes
tap_send() to disable corresponding event (tap.c:203)
4. Once Guest's driver reaches the point where it is ready to
recieve packets it prepares Rx ring descriptors and writes
ENET_RDAR_RDAR to ENET_RDAR register to indicate to HW that
more descriptors are ready. And at this points emulation
layer does this:
s->regs[index] = ENET_RDAR_RDAR;
imx_eth_enable_rx(s);
which, combined with:
if (!s->regs[ENET_RDAR]) {
qemu_flush_queued_packets(qemu_get_queue(s->nic));
}
results in Rx queue never being flushed and corresponding
I/O event beign disabled.
To prevent the problem, change the code to always flush packet queue
when ENET_RDAR transitions 0 -> ENET_RDAR_RDAR.
Cc: Peter Maydell <peter.maydell@linaro.org>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Philippe Mathieu-Daudé <f4bug@amsat.org>
Cc: qemu-devel@nongnu.org
Cc: qemu-arm@nongnu.org
Cc: yurovsky@gmail.com
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Andrey Smirnov <andrew.smirnov@gmail.com>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2018-01-11 16:25:35 +03:00
|
|
|
} else if (flush) {
|
2015-09-07 12:39:30 +03:00
|
|
|
qemu_flush_queued_packets(qemu_get_queue(s->nic));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-30 20:26:10 +03:00
|
|
|
static void imx_eth_reset(DeviceState *d)
|
2015-09-07 12:39:30 +03:00
|
|
|
{
|
|
|
|
IMXFECState *s = IMX_FEC(d);
|
|
|
|
|
2016-05-30 20:26:10 +03:00
|
|
|
/* Reset the Device */
|
2016-05-30 20:26:05 +03:00
|
|
|
memset(s->regs, 0, sizeof(s->regs));
|
|
|
|
s->regs[ENET_ECR] = 0xf0000000;
|
|
|
|
s->regs[ENET_MIBC] = 0xc0000000;
|
|
|
|
s->regs[ENET_RCR] = 0x05ee0001;
|
|
|
|
s->regs[ENET_OPD] = 0x00010000;
|
|
|
|
|
|
|
|
s->regs[ENET_PALR] = (s->conf.macaddr.a[0] << 24)
|
|
|
|
| (s->conf.macaddr.a[1] << 16)
|
|
|
|
| (s->conf.macaddr.a[2] << 8)
|
|
|
|
| s->conf.macaddr.a[3];
|
|
|
|
s->regs[ENET_PAUR] = (s->conf.macaddr.a[4] << 24)
|
|
|
|
| (s->conf.macaddr.a[5] << 16)
|
|
|
|
| 0x8808;
|
|
|
|
|
2016-05-30 20:26:10 +03:00
|
|
|
if (s->is_fec) {
|
|
|
|
s->regs[ENET_FRBR] = 0x00000600;
|
|
|
|
s->regs[ENET_FRSR] = 0x00000500;
|
|
|
|
s->regs[ENET_MIIGSK_ENR] = 0x00000006;
|
|
|
|
} else {
|
|
|
|
s->regs[ENET_RAEM] = 0x00000004;
|
|
|
|
s->regs[ENET_RAFL] = 0x00000004;
|
|
|
|
s->regs[ENET_TAEM] = 0x00000004;
|
|
|
|
s->regs[ENET_TAFL] = 0x00000008;
|
|
|
|
s->regs[ENET_TIPG] = 0x0000000c;
|
|
|
|
s->regs[ENET_FTRL] = 0x000007ff;
|
|
|
|
s->regs[ENET_ATPER] = 0x3b9aca00;
|
|
|
|
}
|
2016-05-30 20:26:05 +03:00
|
|
|
|
|
|
|
s->rx_descriptor = 0;
|
2018-01-11 16:25:37 +03:00
|
|
|
memset(s->tx_descriptor, 0, sizeof(s->tx_descriptor));
|
2015-09-07 12:39:30 +03:00
|
|
|
|
|
|
|
/* We also reset the PHY */
|
2020-06-16 12:32:29 +03:00
|
|
|
imx_phy_reset(s);
|
2015-09-07 12:39:30 +03:00
|
|
|
}
|
|
|
|
|
2016-05-30 20:26:10 +03:00
|
|
|
static uint32_t imx_default_read(IMXFECState *s, uint32_t index)
|
|
|
|
{
|
|
|
|
qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%"
|
|
|
|
PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t imx_fec_read(IMXFECState *s, uint32_t index)
|
|
|
|
{
|
|
|
|
switch (index) {
|
|
|
|
case ENET_FRBR:
|
|
|
|
case ENET_FRSR:
|
|
|
|
case ENET_MIIGSK_CFGR:
|
|
|
|
case ENET_MIIGSK_ENR:
|
|
|
|
return s->regs[index];
|
|
|
|
default:
|
|
|
|
return imx_default_read(s, index);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t imx_enet_read(IMXFECState *s, uint32_t index)
|
|
|
|
{
|
|
|
|
switch (index) {
|
|
|
|
case ENET_RSFL:
|
|
|
|
case ENET_RSEM:
|
|
|
|
case ENET_RAEM:
|
|
|
|
case ENET_RAFL:
|
|
|
|
case ENET_TSEM:
|
|
|
|
case ENET_TAEM:
|
|
|
|
case ENET_TAFL:
|
|
|
|
case ENET_TIPG:
|
|
|
|
case ENET_FTRL:
|
|
|
|
case ENET_TACC:
|
|
|
|
case ENET_RACC:
|
|
|
|
case ENET_ATCR:
|
|
|
|
case ENET_ATVR:
|
|
|
|
case ENET_ATOFF:
|
|
|
|
case ENET_ATPER:
|
|
|
|
case ENET_ATCOR:
|
|
|
|
case ENET_ATINC:
|
|
|
|
case ENET_ATSTMP:
|
|
|
|
case ENET_TGSR:
|
|
|
|
case ENET_TCSR0:
|
|
|
|
case ENET_TCCR0:
|
|
|
|
case ENET_TCSR1:
|
|
|
|
case ENET_TCCR1:
|
|
|
|
case ENET_TCSR2:
|
|
|
|
case ENET_TCCR2:
|
|
|
|
case ENET_TCSR3:
|
|
|
|
case ENET_TCCR3:
|
|
|
|
return s->regs[index];
|
|
|
|
default:
|
|
|
|
return imx_default_read(s, index);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t imx_eth_read(void *opaque, hwaddr offset, unsigned size)
|
2015-09-07 12:39:30 +03:00
|
|
|
{
|
2016-05-30 20:26:05 +03:00
|
|
|
uint32_t value = 0;
|
2015-09-07 12:39:30 +03:00
|
|
|
IMXFECState *s = IMX_FEC(opaque);
|
2016-05-30 20:26:10 +03:00
|
|
|
uint32_t index = offset >> 2;
|
2016-05-30 20:26:05 +03:00
|
|
|
|
|
|
|
switch (index) {
|
|
|
|
case ENET_EIR:
|
|
|
|
case ENET_EIMR:
|
|
|
|
case ENET_RDAR:
|
|
|
|
case ENET_TDAR:
|
|
|
|
case ENET_ECR:
|
|
|
|
case ENET_MMFR:
|
|
|
|
case ENET_MSCR:
|
|
|
|
case ENET_MIBC:
|
|
|
|
case ENET_RCR:
|
|
|
|
case ENET_TCR:
|
|
|
|
case ENET_PALR:
|
|
|
|
case ENET_PAUR:
|
|
|
|
case ENET_OPD:
|
|
|
|
case ENET_IAUR:
|
|
|
|
case ENET_IALR:
|
|
|
|
case ENET_GAUR:
|
|
|
|
case ENET_GALR:
|
|
|
|
case ENET_TFWR:
|
|
|
|
case ENET_RDSR:
|
|
|
|
case ENET_TDSR:
|
|
|
|
case ENET_MRBR:
|
|
|
|
value = s->regs[index];
|
|
|
|
break;
|
2015-09-07 12:39:30 +03:00
|
|
|
default:
|
2016-05-30 20:26:10 +03:00
|
|
|
if (s->is_fec) {
|
|
|
|
value = imx_fec_read(s, index);
|
|
|
|
} else {
|
|
|
|
value = imx_enet_read(s, index);
|
|
|
|
}
|
2016-05-30 20:26:05 +03:00
|
|
|
break;
|
2015-09-07 12:39:30 +03:00
|
|
|
}
|
2016-05-30 20:26:05 +03:00
|
|
|
|
2020-06-16 12:32:29 +03:00
|
|
|
trace_imx_eth_read(index, imx_eth_reg_name(s, index), value);
|
2016-05-30 20:26:05 +03:00
|
|
|
|
|
|
|
return value;
|
2015-09-07 12:39:30 +03:00
|
|
|
}
|
|
|
|
|
2016-05-30 20:26:10 +03:00
|
|
|
static void imx_default_write(IMXFECState *s, uint32_t index, uint32_t value)
|
|
|
|
{
|
|
|
|
qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad address at offset 0x%"
|
|
|
|
PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void imx_fec_write(IMXFECState *s, uint32_t index, uint32_t value)
|
|
|
|
{
|
|
|
|
switch (index) {
|
|
|
|
case ENET_FRBR:
|
|
|
|
/* FRBR is read only */
|
|
|
|
qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register FRBR is read only\n",
|
|
|
|
TYPE_IMX_FEC, __func__);
|
|
|
|
break;
|
|
|
|
case ENET_FRSR:
|
|
|
|
s->regs[index] = (value & 0x000003fc) | 0x00000400;
|
|
|
|
break;
|
|
|
|
case ENET_MIIGSK_CFGR:
|
|
|
|
s->regs[index] = value & 0x00000053;
|
|
|
|
break;
|
|
|
|
case ENET_MIIGSK_ENR:
|
|
|
|
s->regs[index] = (value & 0x00000002) ? 0x00000006 : 0;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
imx_default_write(s, index, value);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void imx_enet_write(IMXFECState *s, uint32_t index, uint32_t value)
|
|
|
|
{
|
|
|
|
switch (index) {
|
|
|
|
case ENET_RSFL:
|
|
|
|
case ENET_RSEM:
|
|
|
|
case ENET_RAEM:
|
|
|
|
case ENET_RAFL:
|
|
|
|
case ENET_TSEM:
|
|
|
|
case ENET_TAEM:
|
|
|
|
case ENET_TAFL:
|
|
|
|
s->regs[index] = value & 0x000001ff;
|
|
|
|
break;
|
|
|
|
case ENET_TIPG:
|
|
|
|
s->regs[index] = value & 0x0000001f;
|
|
|
|
break;
|
|
|
|
case ENET_FTRL:
|
|
|
|
s->regs[index] = value & 0x00003fff;
|
|
|
|
break;
|
|
|
|
case ENET_TACC:
|
|
|
|
s->regs[index] = value & 0x00000019;
|
|
|
|
break;
|
|
|
|
case ENET_RACC:
|
|
|
|
s->regs[index] = value & 0x000000C7;
|
|
|
|
break;
|
|
|
|
case ENET_ATCR:
|
|
|
|
s->regs[index] = value & 0x00002a9d;
|
|
|
|
break;
|
|
|
|
case ENET_ATVR:
|
|
|
|
case ENET_ATOFF:
|
|
|
|
case ENET_ATPER:
|
|
|
|
s->regs[index] = value;
|
|
|
|
break;
|
|
|
|
case ENET_ATSTMP:
|
|
|
|
/* ATSTMP is read only */
|
|
|
|
qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register ATSTMP is read only\n",
|
|
|
|
TYPE_IMX_FEC, __func__);
|
|
|
|
break;
|
|
|
|
case ENET_ATCOR:
|
|
|
|
s->regs[index] = value & 0x7fffffff;
|
|
|
|
break;
|
|
|
|
case ENET_ATINC:
|
|
|
|
s->regs[index] = value & 0x00007f7f;
|
|
|
|
break;
|
|
|
|
case ENET_TGSR:
|
|
|
|
/* implement clear timer flag */
|
2020-03-13 15:32:42 +03:00
|
|
|
s->regs[index] &= ~(value & 0x0000000f); /* all bits W1C */
|
2016-05-30 20:26:10 +03:00
|
|
|
break;
|
|
|
|
case ENET_TCSR0:
|
|
|
|
case ENET_TCSR1:
|
|
|
|
case ENET_TCSR2:
|
|
|
|
case ENET_TCSR3:
|
2020-03-13 15:32:42 +03:00
|
|
|
s->regs[index] &= ~(value & 0x00000080); /* W1C bits */
|
|
|
|
s->regs[index] &= ~0x0000007d; /* writable fields */
|
|
|
|
s->regs[index] |= (value & 0x0000007d);
|
2016-05-30 20:26:10 +03:00
|
|
|
break;
|
|
|
|
case ENET_TCCR0:
|
|
|
|
case ENET_TCCR1:
|
|
|
|
case ENET_TCCR2:
|
|
|
|
case ENET_TCCR3:
|
|
|
|
s->regs[index] = value;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
imx_default_write(s, index, value);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void imx_eth_write(void *opaque, hwaddr offset, uint64_t value,
|
|
|
|
unsigned size)
|
2015-09-07 12:39:30 +03:00
|
|
|
{
|
|
|
|
IMXFECState *s = IMX_FEC(opaque);
|
2018-01-11 16:25:37 +03:00
|
|
|
const bool single_tx_ring = !imx_eth_is_multi_tx_ring(s);
|
2016-05-30 20:26:10 +03:00
|
|
|
uint32_t index = offset >> 2;
|
2015-09-07 12:39:30 +03:00
|
|
|
|
2020-06-16 12:32:29 +03:00
|
|
|
trace_imx_eth_write(index, imx_eth_reg_name(s, index), value);
|
2015-09-07 12:39:30 +03:00
|
|
|
|
2016-05-30 20:26:05 +03:00
|
|
|
switch (index) {
|
|
|
|
case ENET_EIR:
|
|
|
|
s->regs[index] &= ~value;
|
2015-09-07 12:39:30 +03:00
|
|
|
break;
|
2016-05-30 20:26:05 +03:00
|
|
|
case ENET_EIMR:
|
|
|
|
s->regs[index] = value;
|
2015-09-07 12:39:30 +03:00
|
|
|
break;
|
2016-05-30 20:26:05 +03:00
|
|
|
case ENET_RDAR:
|
|
|
|
if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) {
|
|
|
|
if (!s->regs[index]) {
|
imx_fec: Change queue flushing heuristics
In current implementation, packet queue flushing logic seem to suffer
from a deadlock like scenario if a packet is received by the interface
before before Rx ring is initialized by Guest's driver. Consider the
following sequence of events:
1. A QEMU instance is started against a TAP device on Linux
host, running Linux guest, e. g., something to the effect
of:
qemu-system-arm \
-net nic,model=imx.fec,netdev=lan0 \
netdev tap,id=lan0,ifname=tap0,script=no,downscript=no \
... rest of the arguments ...
2. Once QEMU starts, but before guest reaches the point where
FEC deriver is done initializing the HW, Guest, via TAP
interface, receives a number of multicast MDNS packets from
Host (not necessarily true for every OS, but it happens at
least on Fedora 25)
3. Recieving a packet in such a state results in
imx_eth_can_receive() returning '0', which in turn causes
tap_send() to disable corresponding event (tap.c:203)
4. Once Guest's driver reaches the point where it is ready to
recieve packets it prepares Rx ring descriptors and writes
ENET_RDAR_RDAR to ENET_RDAR register to indicate to HW that
more descriptors are ready. And at this points emulation
layer does this:
s->regs[index] = ENET_RDAR_RDAR;
imx_eth_enable_rx(s);
which, combined with:
if (!s->regs[ENET_RDAR]) {
qemu_flush_queued_packets(qemu_get_queue(s->nic));
}
results in Rx queue never being flushed and corresponding
I/O event beign disabled.
To prevent the problem, change the code to always flush packet queue
when ENET_RDAR transitions 0 -> ENET_RDAR_RDAR.
Cc: Peter Maydell <peter.maydell@linaro.org>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Philippe Mathieu-Daudé <f4bug@amsat.org>
Cc: qemu-devel@nongnu.org
Cc: qemu-arm@nongnu.org
Cc: yurovsky@gmail.com
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Andrey Smirnov <andrew.smirnov@gmail.com>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2018-01-11 16:25:35 +03:00
|
|
|
imx_eth_enable_rx(s, true);
|
2016-05-30 20:26:05 +03:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
s->regs[index] = 0;
|
2015-09-07 12:39:30 +03:00
|
|
|
}
|
|
|
|
break;
|
2019-12-18 22:25:25 +03:00
|
|
|
case ENET_TDAR1:
|
|
|
|
case ENET_TDAR2:
|
2018-01-11 16:25:37 +03:00
|
|
|
if (unlikely(single_tx_ring)) {
|
|
|
|
qemu_log_mask(LOG_GUEST_ERROR,
|
|
|
|
"[%s]%s: trying to access TDAR2 or TDAR1\n",
|
|
|
|
TYPE_IMX_FEC, __func__);
|
|
|
|
return;
|
|
|
|
}
|
2019-12-18 22:25:24 +03:00
|
|
|
/* fall through */
|
|
|
|
case ENET_TDAR:
|
2016-05-30 20:26:05 +03:00
|
|
|
if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) {
|
|
|
|
s->regs[index] = ENET_TDAR_TDAR;
|
2018-01-11 16:25:37 +03:00
|
|
|
imx_eth_do_tx(s, index);
|
2015-09-07 12:39:30 +03:00
|
|
|
}
|
2016-05-30 20:26:05 +03:00
|
|
|
s->regs[index] = 0;
|
2015-09-07 12:39:30 +03:00
|
|
|
break;
|
2016-05-30 20:26:05 +03:00
|
|
|
case ENET_ECR:
|
2016-05-30 20:26:02 +03:00
|
|
|
if (value & ENET_ECR_RESET) {
|
2016-05-30 20:26:10 +03:00
|
|
|
return imx_eth_reset(DEVICE(s));
|
2015-09-07 12:39:30 +03:00
|
|
|
}
|
2016-05-30 20:26:05 +03:00
|
|
|
s->regs[index] = value;
|
|
|
|
if ((s->regs[index] & ENET_ECR_ETHEREN) == 0) {
|
|
|
|
s->regs[ENET_RDAR] = 0;
|
|
|
|
s->rx_descriptor = s->regs[ENET_RDSR];
|
2018-01-11 16:25:37 +03:00
|
|
|
s->regs[ENET_TDAR] = 0;
|
|
|
|
s->regs[ENET_TDAR1] = 0;
|
|
|
|
s->regs[ENET_TDAR2] = 0;
|
|
|
|
s->tx_descriptor[0] = s->regs[ENET_TDSR];
|
|
|
|
s->tx_descriptor[1] = s->regs[ENET_TDSR1];
|
|
|
|
s->tx_descriptor[2] = s->regs[ENET_TDSR2];
|
2015-09-07 12:39:30 +03:00
|
|
|
}
|
|
|
|
break;
|
2016-05-30 20:26:05 +03:00
|
|
|
case ENET_MMFR:
|
|
|
|
s->regs[index] = value;
|
2016-05-30 20:25:51 +03:00
|
|
|
if (extract32(value, 29, 1)) {
|
2016-05-30 20:26:05 +03:00
|
|
|
/* This is a read operation */
|
|
|
|
s->regs[ENET_MMFR] = deposit32(s->regs[ENET_MMFR], 0, 16,
|
2020-06-16 12:32:29 +03:00
|
|
|
imx_phy_read(s,
|
2016-05-30 20:26:05 +03:00
|
|
|
extract32(value,
|
|
|
|
18, 10)));
|
2016-05-30 20:25:51 +03:00
|
|
|
} else {
|
2020-07-03 18:59:41 +03:00
|
|
|
/* This is a write operation */
|
2020-06-16 12:32:29 +03:00
|
|
|
imx_phy_write(s, extract32(value, 18, 10), extract32(value, 0, 16));
|
2015-09-07 12:39:30 +03:00
|
|
|
}
|
|
|
|
/* raise the interrupt as the PHY operation is done */
|
2016-05-30 20:26:05 +03:00
|
|
|
s->regs[ENET_EIR] |= ENET_INT_MII;
|
2015-09-07 12:39:30 +03:00
|
|
|
break;
|
2016-05-30 20:26:05 +03:00
|
|
|
case ENET_MSCR:
|
|
|
|
s->regs[index] = value & 0xfe;
|
2015-09-07 12:39:30 +03:00
|
|
|
break;
|
2016-05-30 20:26:05 +03:00
|
|
|
case ENET_MIBC:
|
2015-09-07 12:39:30 +03:00
|
|
|
/* TODO: Implement MIB. */
|
2016-05-30 20:26:05 +03:00
|
|
|
s->regs[index] = (value & 0x80000000) ? 0xc0000000 : 0;
|
2015-09-07 12:39:30 +03:00
|
|
|
break;
|
2016-05-30 20:26:05 +03:00
|
|
|
case ENET_RCR:
|
|
|
|
s->regs[index] = value & 0x07ff003f;
|
2015-09-07 12:39:30 +03:00
|
|
|
/* TODO: Implement LOOP mode. */
|
|
|
|
break;
|
2016-05-30 20:26:05 +03:00
|
|
|
case ENET_TCR:
|
2015-09-07 12:39:30 +03:00
|
|
|
/* We transmit immediately, so raise GRA immediately. */
|
2016-05-30 20:26:05 +03:00
|
|
|
s->regs[index] = value;
|
2015-09-07 12:39:30 +03:00
|
|
|
if (value & 1) {
|
2016-05-30 20:26:05 +03:00
|
|
|
s->regs[ENET_EIR] |= ENET_INT_GRA;
|
2015-09-07 12:39:30 +03:00
|
|
|
}
|
|
|
|
break;
|
2016-05-30 20:26:05 +03:00
|
|
|
case ENET_PALR:
|
|
|
|
s->regs[index] = value;
|
2015-09-07 12:39:30 +03:00
|
|
|
s->conf.macaddr.a[0] = value >> 24;
|
|
|
|
s->conf.macaddr.a[1] = value >> 16;
|
|
|
|
s->conf.macaddr.a[2] = value >> 8;
|
|
|
|
s->conf.macaddr.a[3] = value;
|
|
|
|
break;
|
2016-05-30 20:26:05 +03:00
|
|
|
case ENET_PAUR:
|
|
|
|
s->regs[index] = (value | 0x0000ffff) & 0xffff8808;
|
2015-09-07 12:39:30 +03:00
|
|
|
s->conf.macaddr.a[4] = value >> 24;
|
|
|
|
s->conf.macaddr.a[5] = value >> 16;
|
|
|
|
break;
|
2016-05-30 20:26:05 +03:00
|
|
|
case ENET_OPD:
|
|
|
|
s->regs[index] = (value & 0x0000ffff) | 0x00010000;
|
2015-09-07 12:39:30 +03:00
|
|
|
break;
|
2016-05-30 20:26:05 +03:00
|
|
|
case ENET_IAUR:
|
|
|
|
case ENET_IALR:
|
|
|
|
case ENET_GAUR:
|
|
|
|
case ENET_GALR:
|
2015-09-07 12:39:30 +03:00
|
|
|
/* TODO: implement MAC hash filtering. */
|
|
|
|
break;
|
2016-05-30 20:26:05 +03:00
|
|
|
case ENET_TFWR:
|
2016-05-30 20:26:10 +03:00
|
|
|
if (s->is_fec) {
|
|
|
|
s->regs[index] = value & 0x3;
|
|
|
|
} else {
|
|
|
|
s->regs[index] = value & 0x13f;
|
|
|
|
}
|
2015-09-07 12:39:30 +03:00
|
|
|
break;
|
2016-05-30 20:26:05 +03:00
|
|
|
case ENET_RDSR:
|
2016-05-30 20:26:10 +03:00
|
|
|
if (s->is_fec) {
|
|
|
|
s->regs[index] = value & ~3;
|
|
|
|
} else {
|
|
|
|
s->regs[index] = value & ~7;
|
|
|
|
}
|
2016-05-30 20:26:05 +03:00
|
|
|
s->rx_descriptor = s->regs[index];
|
2015-09-07 12:39:30 +03:00
|
|
|
break;
|
2016-05-30 20:26:05 +03:00
|
|
|
case ENET_TDSR:
|
2016-05-30 20:26:10 +03:00
|
|
|
if (s->is_fec) {
|
|
|
|
s->regs[index] = value & ~3;
|
|
|
|
} else {
|
|
|
|
s->regs[index] = value & ~7;
|
|
|
|
}
|
2018-01-11 16:25:37 +03:00
|
|
|
s->tx_descriptor[0] = s->regs[index];
|
|
|
|
break;
|
|
|
|
case ENET_TDSR1:
|
|
|
|
if (unlikely(single_tx_ring)) {
|
|
|
|
qemu_log_mask(LOG_GUEST_ERROR,
|
|
|
|
"[%s]%s: trying to access TDSR1\n",
|
|
|
|
TYPE_IMX_FEC, __func__);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
s->regs[index] = value & ~7;
|
|
|
|
s->tx_descriptor[1] = s->regs[index];
|
|
|
|
break;
|
|
|
|
case ENET_TDSR2:
|
|
|
|
if (unlikely(single_tx_ring)) {
|
|
|
|
qemu_log_mask(LOG_GUEST_ERROR,
|
|
|
|
"[%s]%s: trying to access TDSR2\n",
|
|
|
|
TYPE_IMX_FEC, __func__);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
s->regs[index] = value & ~7;
|
|
|
|
s->tx_descriptor[2] = s->regs[index];
|
2015-09-07 12:39:30 +03:00
|
|
|
break;
|
2016-05-30 20:26:05 +03:00
|
|
|
case ENET_MRBR:
|
2016-05-30 20:26:10 +03:00
|
|
|
s->regs[index] = value & 0x00003ff0;
|
2015-09-07 12:39:30 +03:00
|
|
|
break;
|
|
|
|
default:
|
2016-05-30 20:26:10 +03:00
|
|
|
if (s->is_fec) {
|
|
|
|
imx_fec_write(s, index, value);
|
|
|
|
} else {
|
|
|
|
imx_enet_write(s, index, value);
|
|
|
|
}
|
|
|
|
return;
|
2015-09-07 12:39:30 +03:00
|
|
|
}
|
|
|
|
|
2016-05-30 20:26:10 +03:00
|
|
|
imx_eth_update(s);
|
2015-09-07 12:39:30 +03:00
|
|
|
}
|
|
|
|
|
2020-03-05 20:56:49 +03:00
|
|
|
static bool imx_eth_can_receive(NetClientState *nc)
|
2015-09-07 12:39:30 +03:00
|
|
|
{
|
|
|
|
IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
|
|
|
|
|
imx_fec: Change queue flushing heuristics
In current implementation, packet queue flushing logic seem to suffer
from a deadlock like scenario if a packet is received by the interface
before before Rx ring is initialized by Guest's driver. Consider the
following sequence of events:
1. A QEMU instance is started against a TAP device on Linux
host, running Linux guest, e. g., something to the effect
of:
qemu-system-arm \
-net nic,model=imx.fec,netdev=lan0 \
netdev tap,id=lan0,ifname=tap0,script=no,downscript=no \
... rest of the arguments ...
2. Once QEMU starts, but before guest reaches the point where
FEC deriver is done initializing the HW, Guest, via TAP
interface, receives a number of multicast MDNS packets from
Host (not necessarily true for every OS, but it happens at
least on Fedora 25)
3. Recieving a packet in such a state results in
imx_eth_can_receive() returning '0', which in turn causes
tap_send() to disable corresponding event (tap.c:203)
4. Once Guest's driver reaches the point where it is ready to
recieve packets it prepares Rx ring descriptors and writes
ENET_RDAR_RDAR to ENET_RDAR register to indicate to HW that
more descriptors are ready. And at this points emulation
layer does this:
s->regs[index] = ENET_RDAR_RDAR;
imx_eth_enable_rx(s);
which, combined with:
if (!s->regs[ENET_RDAR]) {
qemu_flush_queued_packets(qemu_get_queue(s->nic));
}
results in Rx queue never being flushed and corresponding
I/O event beign disabled.
To prevent the problem, change the code to always flush packet queue
when ENET_RDAR transitions 0 -> ENET_RDAR_RDAR.
Cc: Peter Maydell <peter.maydell@linaro.org>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Philippe Mathieu-Daudé <f4bug@amsat.org>
Cc: qemu-devel@nongnu.org
Cc: qemu-arm@nongnu.org
Cc: yurovsky@gmail.com
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Andrey Smirnov <andrew.smirnov@gmail.com>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2018-01-11 16:25:35 +03:00
|
|
|
return !!s->regs[ENET_RDAR];
|
2015-09-07 12:39:30 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t imx_fec_receive(NetClientState *nc, const uint8_t *buf,
|
|
|
|
size_t len)
|
|
|
|
{
|
|
|
|
IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
|
|
|
|
IMXFECBufDesc bd;
|
|
|
|
uint32_t flags = 0;
|
|
|
|
uint32_t addr;
|
|
|
|
uint32_t crc;
|
|
|
|
uint32_t buf_addr;
|
|
|
|
uint8_t *crc_ptr;
|
|
|
|
unsigned int buf_len;
|
|
|
|
size_t size = len;
|
|
|
|
|
2020-06-16 12:32:29 +03:00
|
|
|
trace_imx_fec_receive(size);
|
2015-09-07 12:39:30 +03:00
|
|
|
|
2016-05-30 20:26:05 +03:00
|
|
|
if (!s->regs[ENET_RDAR]) {
|
2015-10-25 17:16:21 +03:00
|
|
|
qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n",
|
2015-09-07 12:39:30 +03:00
|
|
|
TYPE_IMX_FEC, __func__);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* 4 bytes for the CRC. */
|
|
|
|
size += 4;
|
|
|
|
crc = cpu_to_be32(crc32(~0, buf, size));
|
|
|
|
crc_ptr = (uint8_t *) &crc;
|
|
|
|
|
2016-05-30 20:26:10 +03:00
|
|
|
/* Huge frames are truncated. */
|
2016-05-30 20:26:02 +03:00
|
|
|
if (size > ENET_MAX_FRAME_SIZE) {
|
|
|
|
size = ENET_MAX_FRAME_SIZE;
|
|
|
|
flags |= ENET_BD_TR | ENET_BD_LG;
|
2015-09-07 12:39:30 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Frames larger than the user limit just set error flags. */
|
2016-05-30 20:26:05 +03:00
|
|
|
if (size > (s->regs[ENET_RCR] >> 16)) {
|
2016-05-30 20:26:02 +03:00
|
|
|
flags |= ENET_BD_LG;
|
2015-09-07 12:39:30 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
addr = s->rx_descriptor;
|
|
|
|
while (size > 0) {
|
|
|
|
imx_fec_read_bd(&bd, addr);
|
2016-05-30 20:26:02 +03:00
|
|
|
if ((bd.flags & ENET_BD_E) == 0) {
|
2015-09-07 12:39:30 +03:00
|
|
|
/* No descriptors available. Bail out. */
|
|
|
|
/*
|
|
|
|
* FIXME: This is wrong. We should probably either
|
|
|
|
* save the remainder for when more RX buffers are
|
|
|
|
* available, or flag an error.
|
|
|
|
*/
|
2015-10-25 17:16:21 +03:00
|
|
|
qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n",
|
2015-09-07 12:39:30 +03:00
|
|
|
TYPE_IMX_FEC, __func__);
|
|
|
|
break;
|
|
|
|
}
|
2016-05-30 20:26:05 +03:00
|
|
|
buf_len = (size <= s->regs[ENET_MRBR]) ? size : s->regs[ENET_MRBR];
|
2015-09-07 12:39:30 +03:00
|
|
|
bd.length = buf_len;
|
|
|
|
size -= buf_len;
|
2015-10-25 17:16:21 +03:00
|
|
|
|
2020-06-16 12:32:29 +03:00
|
|
|
trace_imx_fec_receive_len(addr, bd.length);
|
2015-10-25 17:16:21 +03:00
|
|
|
|
2015-09-07 12:39:30 +03:00
|
|
|
/* The last 4 bytes are the CRC. */
|
|
|
|
if (size < 4) {
|
|
|
|
buf_len += size - 4;
|
|
|
|
}
|
|
|
|
buf_addr = bd.data;
|
|
|
|
dma_memory_write(&address_space_memory, buf_addr, buf, buf_len);
|
|
|
|
buf += buf_len;
|
|
|
|
if (size < 4) {
|
|
|
|
dma_memory_write(&address_space_memory, buf_addr + buf_len,
|
|
|
|
crc_ptr, 4 - size);
|
|
|
|
crc_ptr += 4 - size;
|
|
|
|
}
|
2016-05-30 20:26:02 +03:00
|
|
|
bd.flags &= ~ENET_BD_E;
|
2015-09-07 12:39:30 +03:00
|
|
|
if (size == 0) {
|
|
|
|
/* Last buffer in frame. */
|
2016-05-30 20:26:02 +03:00
|
|
|
bd.flags |= flags | ENET_BD_L;
|
2020-06-16 12:32:29 +03:00
|
|
|
|
|
|
|
trace_imx_fec_receive_last(bd.flags);
|
|
|
|
|
2016-05-30 20:26:05 +03:00
|
|
|
s->regs[ENET_EIR] |= ENET_INT_RXF;
|
2015-09-07 12:39:30 +03:00
|
|
|
} else {
|
2016-05-30 20:26:05 +03:00
|
|
|
s->regs[ENET_EIR] |= ENET_INT_RXB;
|
2015-09-07 12:39:30 +03:00
|
|
|
}
|
|
|
|
imx_fec_write_bd(&bd, addr);
|
|
|
|
/* Advance to the next descriptor. */
|
2016-05-30 20:26:02 +03:00
|
|
|
if ((bd.flags & ENET_BD_W) != 0) {
|
2016-05-30 20:26:05 +03:00
|
|
|
addr = s->regs[ENET_RDSR];
|
2015-09-07 12:39:30 +03:00
|
|
|
} else {
|
2016-05-30 20:26:05 +03:00
|
|
|
addr += sizeof(bd);
|
2015-09-07 12:39:30 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
s->rx_descriptor = addr;
|
imx_fec: Change queue flushing heuristics
In current implementation, packet queue flushing logic seem to suffer
from a deadlock like scenario if a packet is received by the interface
before before Rx ring is initialized by Guest's driver. Consider the
following sequence of events:
1. A QEMU instance is started against a TAP device on Linux
host, running Linux guest, e. g., something to the effect
of:
qemu-system-arm \
-net nic,model=imx.fec,netdev=lan0 \
netdev tap,id=lan0,ifname=tap0,script=no,downscript=no \
... rest of the arguments ...
2. Once QEMU starts, but before guest reaches the point where
FEC deriver is done initializing the HW, Guest, via TAP
interface, receives a number of multicast MDNS packets from
Host (not necessarily true for every OS, but it happens at
least on Fedora 25)
3. Recieving a packet in such a state results in
imx_eth_can_receive() returning '0', which in turn causes
tap_send() to disable corresponding event (tap.c:203)
4. Once Guest's driver reaches the point where it is ready to
recieve packets it prepares Rx ring descriptors and writes
ENET_RDAR_RDAR to ENET_RDAR register to indicate to HW that
more descriptors are ready. And at this points emulation
layer does this:
s->regs[index] = ENET_RDAR_RDAR;
imx_eth_enable_rx(s);
which, combined with:
if (!s->regs[ENET_RDAR]) {
qemu_flush_queued_packets(qemu_get_queue(s->nic));
}
results in Rx queue never being flushed and corresponding
I/O event beign disabled.
To prevent the problem, change the code to always flush packet queue
when ENET_RDAR transitions 0 -> ENET_RDAR_RDAR.
Cc: Peter Maydell <peter.maydell@linaro.org>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Philippe Mathieu-Daudé <f4bug@amsat.org>
Cc: qemu-devel@nongnu.org
Cc: qemu-arm@nongnu.org
Cc: yurovsky@gmail.com
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Andrey Smirnov <andrew.smirnov@gmail.com>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2018-01-11 16:25:35 +03:00
|
|
|
imx_eth_enable_rx(s, false);
|
2016-05-30 20:26:10 +03:00
|
|
|
imx_eth_update(s);
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t imx_enet_receive(NetClientState *nc, const uint8_t *buf,
|
|
|
|
size_t len)
|
|
|
|
{
|
|
|
|
IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
|
|
|
|
IMXENETBufDesc bd;
|
|
|
|
uint32_t flags = 0;
|
|
|
|
uint32_t addr;
|
|
|
|
uint32_t crc;
|
|
|
|
uint32_t buf_addr;
|
|
|
|
uint8_t *crc_ptr;
|
|
|
|
unsigned int buf_len;
|
|
|
|
size_t size = len;
|
2018-01-11 16:25:37 +03:00
|
|
|
bool shift16 = s->regs[ENET_RACC] & ENET_RACC_SHIFT16;
|
2016-05-30 20:26:10 +03:00
|
|
|
|
2020-06-16 12:32:29 +03:00
|
|
|
trace_imx_enet_receive(size);
|
2016-05-30 20:26:10 +03:00
|
|
|
|
|
|
|
if (!s->regs[ENET_RDAR]) {
|
|
|
|
qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n",
|
|
|
|
TYPE_IMX_FEC, __func__);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* 4 bytes for the CRC. */
|
|
|
|
size += 4;
|
|
|
|
crc = cpu_to_be32(crc32(~0, buf, size));
|
|
|
|
crc_ptr = (uint8_t *) &crc;
|
|
|
|
|
2018-01-11 16:25:37 +03:00
|
|
|
if (shift16) {
|
|
|
|
size += 2;
|
|
|
|
}
|
|
|
|
|
2018-01-11 16:25:38 +03:00
|
|
|
/* Huge frames are truncated. */
|
2018-01-11 16:25:36 +03:00
|
|
|
if (size > s->regs[ENET_FTRL]) {
|
|
|
|
size = s->regs[ENET_FTRL];
|
2016-05-30 20:26:10 +03:00
|
|
|
flags |= ENET_BD_TR | ENET_BD_LG;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Frames larger than the user limit just set error flags. */
|
|
|
|
if (size > (s->regs[ENET_RCR] >> 16)) {
|
|
|
|
flags |= ENET_BD_LG;
|
|
|
|
}
|
|
|
|
|
|
|
|
addr = s->rx_descriptor;
|
|
|
|
while (size > 0) {
|
|
|
|
imx_enet_read_bd(&bd, addr);
|
|
|
|
if ((bd.flags & ENET_BD_E) == 0) {
|
|
|
|
/* No descriptors available. Bail out. */
|
|
|
|
/*
|
|
|
|
* FIXME: This is wrong. We should probably either
|
|
|
|
* save the remainder for when more RX buffers are
|
|
|
|
* available, or flag an error.
|
|
|
|
*/
|
|
|
|
qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n",
|
|
|
|
TYPE_IMX_FEC, __func__);
|
|
|
|
break;
|
|
|
|
}
|
2018-01-11 16:25:36 +03:00
|
|
|
buf_len = MIN(size, s->regs[ENET_MRBR]);
|
2016-05-30 20:26:10 +03:00
|
|
|
bd.length = buf_len;
|
|
|
|
size -= buf_len;
|
|
|
|
|
2020-06-16 12:32:29 +03:00
|
|
|
trace_imx_enet_receive_len(addr, bd.length);
|
2016-05-30 20:26:10 +03:00
|
|
|
|
|
|
|
/* The last 4 bytes are the CRC. */
|
|
|
|
if (size < 4) {
|
|
|
|
buf_len += size - 4;
|
|
|
|
}
|
|
|
|
buf_addr = bd.data;
|
2018-01-11 16:25:37 +03:00
|
|
|
|
|
|
|
if (shift16) {
|
|
|
|
/*
|
|
|
|
* If SHIFT16 bit of ENETx_RACC register is set we need to
|
|
|
|
* align the payload to 4-byte boundary.
|
|
|
|
*/
|
|
|
|
const uint8_t zeros[2] = { 0 };
|
|
|
|
|
|
|
|
dma_memory_write(&address_space_memory, buf_addr,
|
|
|
|
zeros, sizeof(zeros));
|
|
|
|
|
|
|
|
buf_addr += sizeof(zeros);
|
|
|
|
buf_len -= sizeof(zeros);
|
|
|
|
|
|
|
|
/* We only do this once per Ethernet frame */
|
|
|
|
shift16 = false;
|
|
|
|
}
|
|
|
|
|
2016-05-30 20:26:10 +03:00
|
|
|
dma_memory_write(&address_space_memory, buf_addr, buf, buf_len);
|
|
|
|
buf += buf_len;
|
|
|
|
if (size < 4) {
|
|
|
|
dma_memory_write(&address_space_memory, buf_addr + buf_len,
|
|
|
|
crc_ptr, 4 - size);
|
|
|
|
crc_ptr += 4 - size;
|
|
|
|
}
|
|
|
|
bd.flags &= ~ENET_BD_E;
|
|
|
|
if (size == 0) {
|
|
|
|
/* Last buffer in frame. */
|
|
|
|
bd.flags |= flags | ENET_BD_L;
|
2020-06-16 12:32:29 +03:00
|
|
|
|
|
|
|
trace_imx_enet_receive_last(bd.flags);
|
|
|
|
|
2019-08-15 11:46:42 +03:00
|
|
|
/* Indicate that we've updated the last buffer descriptor. */
|
|
|
|
bd.last_buffer = ENET_BD_BDU;
|
2016-05-30 20:26:10 +03:00
|
|
|
if (bd.option & ENET_BD_RX_INT) {
|
|
|
|
s->regs[ENET_EIR] |= ENET_INT_RXF;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (bd.option & ENET_BD_RX_INT) {
|
|
|
|
s->regs[ENET_EIR] |= ENET_INT_RXB;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
imx_enet_write_bd(&bd, addr);
|
|
|
|
/* Advance to the next descriptor. */
|
|
|
|
if ((bd.flags & ENET_BD_W) != 0) {
|
|
|
|
addr = s->regs[ENET_RDSR];
|
|
|
|
} else {
|
|
|
|
addr += sizeof(bd);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s->rx_descriptor = addr;
|
imx_fec: Change queue flushing heuristics
In current implementation, packet queue flushing logic seem to suffer
from a deadlock like scenario if a packet is received by the interface
before before Rx ring is initialized by Guest's driver. Consider the
following sequence of events:
1. A QEMU instance is started against a TAP device on Linux
host, running Linux guest, e. g., something to the effect
of:
qemu-system-arm \
-net nic,model=imx.fec,netdev=lan0 \
netdev tap,id=lan0,ifname=tap0,script=no,downscript=no \
... rest of the arguments ...
2. Once QEMU starts, but before guest reaches the point where
FEC deriver is done initializing the HW, Guest, via TAP
interface, receives a number of multicast MDNS packets from
Host (not necessarily true for every OS, but it happens at
least on Fedora 25)
3. Recieving a packet in such a state results in
imx_eth_can_receive() returning '0', which in turn causes
tap_send() to disable corresponding event (tap.c:203)
4. Once Guest's driver reaches the point where it is ready to
recieve packets it prepares Rx ring descriptors and writes
ENET_RDAR_RDAR to ENET_RDAR register to indicate to HW that
more descriptors are ready. And at this points emulation
layer does this:
s->regs[index] = ENET_RDAR_RDAR;
imx_eth_enable_rx(s);
which, combined with:
if (!s->regs[ENET_RDAR]) {
qemu_flush_queued_packets(qemu_get_queue(s->nic));
}
results in Rx queue never being flushed and corresponding
I/O event beign disabled.
To prevent the problem, change the code to always flush packet queue
when ENET_RDAR transitions 0 -> ENET_RDAR_RDAR.
Cc: Peter Maydell <peter.maydell@linaro.org>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Philippe Mathieu-Daudé <f4bug@amsat.org>
Cc: qemu-devel@nongnu.org
Cc: qemu-arm@nongnu.org
Cc: yurovsky@gmail.com
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Andrey Smirnov <andrew.smirnov@gmail.com>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2018-01-11 16:25:35 +03:00
|
|
|
imx_eth_enable_rx(s, false);
|
2016-05-30 20:26:10 +03:00
|
|
|
imx_eth_update(s);
|
2015-09-07 12:39:30 +03:00
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
2016-05-30 20:26:10 +03:00
|
|
|
static ssize_t imx_eth_receive(NetClientState *nc, const uint8_t *buf,
|
|
|
|
size_t len)
|
|
|
|
{
|
|
|
|
IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
|
|
|
|
|
|
|
|
if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) {
|
|
|
|
return imx_enet_receive(nc, buf, len);
|
|
|
|
} else {
|
|
|
|
return imx_fec_receive(nc, buf, len);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static const MemoryRegionOps imx_eth_ops = {
|
|
|
|
.read = imx_eth_read,
|
|
|
|
.write = imx_eth_write,
|
2015-09-07 12:39:30 +03:00
|
|
|
.valid.min_access_size = 4,
|
|
|
|
.valid.max_access_size = 4,
|
2016-05-30 20:26:10 +03:00
|
|
|
.endianness = DEVICE_NATIVE_ENDIAN,
|
2015-09-07 12:39:30 +03:00
|
|
|
};
|
|
|
|
|
2016-05-30 20:26:10 +03:00
|
|
|
static void imx_eth_cleanup(NetClientState *nc)
|
2015-09-07 12:39:30 +03:00
|
|
|
{
|
|
|
|
IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
|
|
|
|
|
|
|
|
s->nic = NULL;
|
|
|
|
}
|
|
|
|
|
2016-05-30 20:26:10 +03:00
|
|
|
static NetClientInfo imx_eth_net_info = {
|
qapi: Change Netdev into a flat union
This is a mostly-mechanical conversion that creates a new flat
union 'Netdev' QAPI type that covers all the branches of the
former 'NetClientOptions' simple union, where the branches are
now listed in a new 'NetClientDriver' enum rather than generated
from the simple union. The existence of a flat union has no
change to the command line syntax accepted for new code, and
will make it possible for a future patch to switch the QMP
command to parse a boxed union for no change to valid QMP; but
it does have some ripple effect on the C code when dealing with
the new types.
While making the conversion, note that the 'NetLegacy' type
remains unchanged: it applies only to legacy command line options,
and will not be ported to QMP, so it should remain a wrapper
around a simple union; to avoid confusion, the type named
'NetClientOptions' is now gone, and we introduce 'NetLegacyOptions'
in its place. Then, in the C code, we convert from NetLegacy to
Netdev as soon as possible, so that the bulk of the net stack
only has to deal with one QAPI type, not two. Note that since
the old legacy code always rejected 'hubport', we can just omit
that branch from the new 'NetLegacyOptions' simple union.
Based on an idea originally by Zoltán Kővágó <DirtY.iCE.hu@gmail.com>:
Message-Id: <01a527fbf1a5de880091f98cf011616a78adeeee.1441627176.git.DirtY.iCE.hu@gmail.com>
although the sed script in that patch no longer applies due to
other changes in the tree since then, and I also did some manual
cleanups (such as fixing whitespace to keep checkpatch happy).
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <1468468228-27827-13-git-send-email-eblake@redhat.com>
Reviewed-by: Markus Armbruster <armbru@redhat.com>
[Fixup from Eric squashed in]
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2016-07-14 06:50:23 +03:00
|
|
|
.type = NET_CLIENT_DRIVER_NIC,
|
2016-05-30 20:26:10 +03:00
|
|
|
.size = sizeof(NICState),
|
|
|
|
.can_receive = imx_eth_can_receive,
|
|
|
|
.receive = imx_eth_receive,
|
|
|
|
.cleanup = imx_eth_cleanup,
|
|
|
|
.link_status_changed = imx_eth_set_link,
|
2015-09-07 12:39:30 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2016-05-30 20:26:10 +03:00
|
|
|
static void imx_eth_realize(DeviceState *dev, Error **errp)
|
2015-09-07 12:39:30 +03:00
|
|
|
{
|
|
|
|
IMXFECState *s = IMX_FEC(dev);
|
|
|
|
SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
|
|
|
|
|
2016-05-30 20:26:10 +03:00
|
|
|
memory_region_init_io(&s->iomem, OBJECT(dev), &imx_eth_ops, s,
|
2018-01-11 16:25:38 +03:00
|
|
|
TYPE_IMX_FEC, FSL_IMX25_FEC_SIZE);
|
2015-09-07 12:39:30 +03:00
|
|
|
sysbus_init_mmio(sbd, &s->iomem);
|
2016-05-30 20:26:10 +03:00
|
|
|
sysbus_init_irq(sbd, &s->irq[0]);
|
|
|
|
sysbus_init_irq(sbd, &s->irq[1]);
|
|
|
|
|
2015-09-07 12:39:30 +03:00
|
|
|
qemu_macaddr_default_if_unset(&s->conf.macaddr);
|
|
|
|
|
2016-05-30 20:26:10 +03:00
|
|
|
s->nic = qemu_new_nic(&imx_eth_net_info, &s->conf,
|
|
|
|
object_get_typename(OBJECT(dev)),
|
2020-05-12 10:00:20 +03:00
|
|
|
dev->id, s);
|
2016-05-30 20:26:10 +03:00
|
|
|
|
2015-09-07 12:39:30 +03:00
|
|
|
qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
|
|
|
|
}
|
|
|
|
|
2016-05-30 20:26:10 +03:00
|
|
|
static Property imx_eth_properties[] = {
|
2015-09-07 12:39:30 +03:00
|
|
|
DEFINE_NIC_PROPERTIES(IMXFECState, conf),
|
2018-01-11 16:25:37 +03:00
|
|
|
DEFINE_PROP_UINT32("tx-ring-num", IMXFECState, tx_ring_num, 1),
|
2020-07-03 18:59:41 +03:00
|
|
|
DEFINE_PROP_UINT32("phy-num", IMXFECState, phy_num, 0),
|
2015-09-07 12:39:30 +03:00
|
|
|
DEFINE_PROP_END_OF_LIST(),
|
|
|
|
};
|
|
|
|
|
2016-05-30 20:26:10 +03:00
|
|
|
static void imx_eth_class_init(ObjectClass *klass, void *data)
|
2015-09-07 12:39:30 +03:00
|
|
|
{
|
|
|
|
DeviceClass *dc = DEVICE_CLASS(klass);
|
|
|
|
|
2016-05-30 20:26:10 +03:00
|
|
|
dc->vmsd = &vmstate_imx_eth;
|
|
|
|
dc->reset = imx_eth_reset;
|
2020-01-10 18:30:32 +03:00
|
|
|
device_class_set_props(dc, imx_eth_properties);
|
2016-05-30 20:26:10 +03:00
|
|
|
dc->realize = imx_eth_realize;
|
|
|
|
dc->desc = "i.MX FEC/ENET Ethernet Controller";
|
|
|
|
}
|
|
|
|
|
|
|
|
static void imx_fec_init(Object *obj)
|
|
|
|
{
|
|
|
|
IMXFECState *s = IMX_FEC(obj);
|
|
|
|
|
|
|
|
s->is_fec = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void imx_enet_init(Object *obj)
|
|
|
|
{
|
|
|
|
IMXFECState *s = IMX_FEC(obj);
|
|
|
|
|
|
|
|
s->is_fec = false;
|
2015-09-07 12:39:30 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static const TypeInfo imx_fec_info = {
|
2016-05-30 20:26:10 +03:00
|
|
|
.name = TYPE_IMX_FEC,
|
|
|
|
.parent = TYPE_SYS_BUS_DEVICE,
|
2015-09-07 12:39:30 +03:00
|
|
|
.instance_size = sizeof(IMXFECState),
|
2016-05-30 20:26:10 +03:00
|
|
|
.instance_init = imx_fec_init,
|
|
|
|
.class_init = imx_eth_class_init,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const TypeInfo imx_enet_info = {
|
|
|
|
.name = TYPE_IMX_ENET,
|
|
|
|
.parent = TYPE_IMX_FEC,
|
|
|
|
.instance_init = imx_enet_init,
|
2015-09-07 12:39:30 +03:00
|
|
|
};
|
|
|
|
|
2016-05-30 20:26:10 +03:00
|
|
|
static void imx_eth_register_types(void)
|
2015-09-07 12:39:30 +03:00
|
|
|
{
|
|
|
|
type_register_static(&imx_fec_info);
|
2016-05-30 20:26:10 +03:00
|
|
|
type_register_static(&imx_enet_info);
|
2015-09-07 12:39:30 +03:00
|
|
|
}
|
|
|
|
|
2016-05-30 20:26:10 +03:00
|
|
|
type_init(imx_eth_register_types)
|