summaryrefslogtreecommitdiffstats
path: root/drivers/virtio
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/virtio')
-rw-r--r--drivers/virtio/Kconfig62
-rw-r--r--drivers/virtio/Makefile11
-rw-r--r--drivers/virtio/virtio-uclass.c369
-rw-r--r--drivers/virtio/virtio_blk.c137
-rw-r--r--drivers/virtio/virtio_blk.h129
-rw-r--r--drivers/virtio/virtio_mmio.c413
-rw-r--r--drivers/virtio/virtio_mmio.h129
-rw-r--r--drivers/virtio/virtio_net.c239
-rw-r--r--drivers/virtio/virtio_net.h268
-rw-r--r--drivers/virtio/virtio_pci.h173
-rw-r--r--drivers/virtio/virtio_pci_legacy.c421
-rw-r--r--drivers/virtio/virtio_pci_modern.c609
-rw-r--r--drivers/virtio/virtio_ring.c358
-rw-r--r--drivers/virtio/virtio_sandbox.c233
14 files changed, 3551 insertions, 0 deletions
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
new file mode 100644
index 0000000000..a9d5fd07b7
--- /dev/null
+++ b/drivers/virtio/Kconfig
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Copyright (C) 2018, Tuomas Tynkkynen <tuomas.tynkkynen@iki.fi>
+# Copyright (C) 2018, Bin Meng <bmeng.cn@gmail.com>
+#
+# VirtIO is a virtualization standard for network and disk device drivers
+# where just the guest's device driver "knows" it is running in a virtual
+# environment, and cooperates with the hypervisor. This enables guests to
+# get high performance network and disk operations, and gives most of the
+# performance benefits of paravirtualization. In the U-Boot case, the guest
+# is U-Boot itself, while the virtual environment are normally QEMU targets
+# like ARM, RISC-V and x86.
+#
+# See http://docs.oasis-open.org/virtio/virtio/v1.0/virtio-v1.0.pdf for
+# the VirtIO specification v1.0.
+
+menu "VirtIO Drivers"
+
+config VIRTIO
+ bool
+ help
+ This option is selected by any driver which implements the virtio
+ transport, such as CONFIG_VIRTIO_MMIO or CONFIG_VIRTIO_PCI.
+
+config VIRTIO_MMIO
+ bool "Platform bus driver for memory mapped virtio devices"
+ select VIRTIO
+ help
+ This driver provides support for memory mapped virtio
+ platform device driver.
+
+config VIRTIO_PCI
+ bool "PCI driver for virtio devices"
+ depends on DM_PCI
+ select VIRTIO
+ help
+ This driver provides support for virtio based paravirtual device
+ drivers over PCI.
+
+config VIRTIO_SANDBOX
+ bool "Sandbox driver for virtio devices"
+ depends on SANDBOX
+ select VIRTIO
+ help
+ This driver provides support for Sandbox implementation of virtio
+ transport driver which is used for testing purpose only.
+
+config VIRTIO_NET
+ bool "virtio net driver"
+ depends on VIRTIO
+ help
+ This is the virtual net driver for virtio. It can be used with
+ QEMU based targets.
+
+config VIRTIO_BLK
+ bool "virtio block driver"
+ depends on VIRTIO
+ help
+ This is the virtual block driver for virtio. It can be used with
+ QEMU based targets.
+
+endmenu
diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile
new file mode 100644
index 0000000000..4579044ae3
--- /dev/null
+++ b/drivers/virtio/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Copyright (C) 2018, Tuomas Tynkkynen <tuomas.tynkkynen@iki.fi>
+# Copyright (C) 2018, Bin Meng <bmeng.cn@gmail.com>
+
+obj-y += virtio-uclass.o virtio_ring.o
+obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o
+obj-$(CONFIG_VIRTIO_PCI) += virtio_pci_legacy.o virtio_pci_modern.o
+obj-$(CONFIG_VIRTIO_SANDBOX) += virtio_sandbox.o
+obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
+obj-$(CONFIG_VIRTIO_BLK) += virtio_blk.o
diff --git a/drivers/virtio/virtio-uclass.c b/drivers/virtio/virtio-uclass.c
new file mode 100644
index 0000000000..34397d7dbb
--- /dev/null
+++ b/drivers/virtio/virtio-uclass.c
@@ -0,0 +1,369 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018, Tuomas Tynkkynen <tuomas.tynkkynen@iki.fi>
+ * Copyright (C) 2018, Bin Meng <bmeng.cn@gmail.com>
+ *
+ * VirtIO is a virtualization standard for network and disk device drivers
+ * where just the guest's device driver "knows" it is running in a virtual
+ * environment, and cooperates with the hypervisor. This enables guests to
+ * get high performance network and disk operations, and gives most of the
+ * performance benefits of paravirtualization. In the U-Boot case, the guest
+ * is U-Boot itself, while the virtual environment are normally QEMU targets
+ * like ARM, RISC-V and x86.
+ *
+ * See http://docs.oasis-open.org/virtio/virtio/v1.0/virtio-v1.0.pdf for
+ * the VirtIO specification v1.0.
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <virtio_types.h>
+#include <virtio.h>
+#include <dm/lists.h>
+
+static const char *const virtio_drv_name[VIRTIO_ID_MAX_NUM] = {
+ [VIRTIO_ID_NET] = VIRTIO_NET_DRV_NAME,
+ [VIRTIO_ID_BLOCK] = VIRTIO_BLK_DRV_NAME,
+};
+
+int virtio_get_config(struct udevice *vdev, unsigned int offset,
+ void *buf, unsigned int len)
+{
+ struct dm_virtio_ops *ops;
+
+ ops = virtio_get_ops(vdev->parent);
+
+ return ops->get_config(vdev->parent, offset, buf, len);
+}
+
+int virtio_set_config(struct udevice *vdev, unsigned int offset,
+ void *buf, unsigned int len)
+{
+ struct dm_virtio_ops *ops;
+
+ ops = virtio_get_ops(vdev->parent);
+
+ return ops->set_config(vdev->parent, offset, buf, len);
+}
+
+int virtio_generation(struct udevice *vdev, u32 *counter)
+{
+ struct dm_virtio_ops *ops;
+
+ ops = virtio_get_ops(vdev->parent);
+ if (!ops->generation)
+ return -ENOSYS;
+
+ return ops->generation(vdev->parent, counter);
+}
+
+int virtio_get_status(struct udevice *vdev, u8 *status)
+{
+ struct dm_virtio_ops *ops;
+
+ ops = virtio_get_ops(vdev->parent);
+
+ return ops->get_status(vdev->parent, status);
+}
+
+int virtio_set_status(struct udevice *vdev, u8 status)
+{
+ struct dm_virtio_ops *ops;
+
+ ops = virtio_get_ops(vdev->parent);
+
+ return ops->set_status(vdev->parent, status);
+}
+
+int virtio_reset(struct udevice *vdev)
+{
+ struct dm_virtio_ops *ops;
+
+ ops = virtio_get_ops(vdev->parent);
+
+ return ops->reset(vdev->parent);
+}
+
+int virtio_get_features(struct udevice *vdev, u64 *features)
+{
+ struct dm_virtio_ops *ops;
+
+ ops = virtio_get_ops(vdev->parent);
+
+ return ops->get_features(vdev->parent, features);
+}
+
+int virtio_set_features(struct udevice *vdev)
+{
+ struct dm_virtio_ops *ops;
+
+ ops = virtio_get_ops(vdev->parent);
+
+ return ops->set_features(vdev->parent);
+}
+
+int virtio_find_vqs(struct udevice *vdev, unsigned int nvqs,
+ struct virtqueue *vqs[])
+{
+ struct dm_virtio_ops *ops;
+
+ ops = virtio_get_ops(vdev->parent);
+
+ return ops->find_vqs(vdev->parent, nvqs, vqs);
+}
+
+int virtio_del_vqs(struct udevice *vdev)
+{
+ struct dm_virtio_ops *ops;
+
+ ops = virtio_get_ops(vdev->parent);
+
+ return ops->del_vqs(vdev->parent);
+}
+
+int virtio_notify(struct udevice *vdev, struct virtqueue *vq)
+{
+ struct dm_virtio_ops *ops;
+
+ ops = virtio_get_ops(vdev->parent);
+
+ return ops->notify(vdev->parent, vq);
+}
+
+void virtio_add_status(struct udevice *vdev, u8 status)
+{
+ u8 old;
+
+ if (!virtio_get_status(vdev, &old))
+ virtio_set_status(vdev, old | status);
+}
+
+int virtio_finalize_features(struct udevice *vdev)
+{
+ struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(vdev->parent);
+ u8 status;
+ int ret;
+
+ ret = virtio_set_features(vdev);
+ if (ret)
+ return ret;
+
+ if (uc_priv->legacy)
+ return 0;
+
+ virtio_add_status(vdev, VIRTIO_CONFIG_S_FEATURES_OK);
+ ret = virtio_get_status(vdev, &status);
+ if (ret)
+ return ret;
+ if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
+ debug("(%s): device refuses features %x\n", vdev->name, status);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+void virtio_driver_features_init(struct virtio_dev_priv *priv,
+ const u32 *feature,
+ u32 feature_size,
+ const u32 *feature_legacy,
+ u32 feature_legacy_size)
+{
+ priv->feature_table = feature;
+ priv->feature_table_size = feature_size;
+ priv->feature_table_legacy = feature_legacy;
+ priv->feature_table_size_legacy = feature_legacy_size;
+}
+
+int virtio_init(void)
+{
+ struct udevice *bus;
+ int ret;
+
+ /* Enumerate all known virtio devices */
+ ret = uclass_first_device(UCLASS_VIRTIO, &bus);
+ if (ret)
+ return ret;
+
+ while (bus) {
+ ret = uclass_next_device(&bus);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int virtio_uclass_pre_probe(struct udevice *udev)
+{
+ struct dm_virtio_ops *ops;
+
+ ops = (struct dm_virtio_ops *)(udev->driver->ops);
+
+ /*
+ * Check virtio transport driver ops here so that we don't need
+ * check these ops each time when the virtio_xxx APIs are called.
+ *
+ * Only generation op is optional. All other ops are must-have.
+ */
+ if (!ops->get_config || !ops->set_config ||
+ !ops->get_status || !ops->set_status ||
+ !ops->get_features || !ops->set_features ||
+ !ops->find_vqs || !ops->del_vqs ||
+ !ops->reset || !ops->notify)
+ return -ENOENT;
+
+ return 0;
+}
+
+static int virtio_uclass_post_probe(struct udevice *udev)
+{
+ struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(udev);
+ char dev_name[30], *str;
+ struct udevice *vdev;
+ int ret;
+
+ if (uc_priv->device > VIRTIO_ID_MAX_NUM) {
+ debug("(%s): virtio device ID %d exceeds maximum num\n",
+ udev->name, uc_priv->device);
+ return 0;
+ }
+
+ if (!virtio_drv_name[uc_priv->device]) {
+ debug("(%s): underlying virtio device driver unavailable\n",
+ udev->name);
+ return 0;
+ }
+
+ snprintf(dev_name, sizeof(dev_name), "%s#%d",
+ virtio_drv_name[uc_priv->device], udev->seq);
+ str = strdup(dev_name);
+ if (!str)
+ return -ENOMEM;
+
+ ret = device_bind_driver(udev, virtio_drv_name[uc_priv->device],
+ str, &vdev);
+ if (ret == -ENOENT) {
+ debug("(%s): no driver configured\n", udev->name);
+ return 0;
+ }
+ if (ret) {
+ free(str);
+ return ret;
+ }
+ device_set_name_alloced(vdev);
+
+ INIT_LIST_HEAD(&uc_priv->vqs);
+
+ return 0;
+}
+
+static int virtio_uclass_child_post_bind(struct udevice *vdev)
+{
+ /* Acknowledge that we've seen the device */
+ virtio_add_status(vdev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
+
+ return 0;
+}
+
+static int virtio_uclass_child_pre_probe(struct udevice *vdev)
+{
+ struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(vdev->parent);
+ u64 device_features;
+ u64 driver_features;
+ u64 driver_features_legacy;
+ int i;
+ int ret;
+
+ /*
+ * Save the real virtio device (eg: virtio-net, virtio-blk) to
+ * the transport (parent) device's uclass priv for future use.
+ */
+ uc_priv->vdev = vdev;
+
+ /*
+ * We always start by resetting the device, in case a previous driver
+ * messed it up. This also tests that code path a little.
+ */
+ ret = virtio_reset(vdev);
+ if (ret)
+ goto err;
+
+ /* We have a driver! */
+ virtio_add_status(vdev, VIRTIO_CONFIG_S_DRIVER);
+
+ /* Figure out what features the device supports */
+ virtio_get_features(vdev, &device_features);
+ debug("(%s) plain device features supported %016llx\n",
+ vdev->name, device_features);
+ if (!(device_features & (1ULL << VIRTIO_F_VERSION_1)))
+ uc_priv->legacy = true;
+
+ /* Figure out what features the driver supports */
+ driver_features = 0;
+ for (i = 0; i < uc_priv->feature_table_size; i++) {
+ unsigned int f = uc_priv->feature_table[i];
+
+ WARN_ON(f >= 64);
+ driver_features |= (1ULL << f);
+ }
+
+ /* Some drivers have a separate feature table for virtio v1.0 */
+ if (uc_priv->feature_table_legacy) {
+ driver_features_legacy = 0;
+ for (i = 0; i < uc_priv->feature_table_size_legacy; i++) {
+ unsigned int f = uc_priv->feature_table_legacy[i];
+
+ WARN_ON(f >= 64);
+ driver_features_legacy |= (1ULL << f);
+ }
+ } else {
+ driver_features_legacy = driver_features;
+ }
+
+ if (uc_priv->legacy) {
+ debug("(%s): legacy virtio device\n", vdev->name);
+ uc_priv->features = driver_features_legacy & device_features;
+ } else {
+ debug("(%s): v1.0 complaint virtio device\n", vdev->name);
+ uc_priv->features = driver_features & device_features;
+ }
+
+ /* Transport features always preserved to pass to finalize_features */
+ for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++)
+ if ((device_features & (1ULL << i)) &&
+ (i == VIRTIO_F_VERSION_1))
+ __virtio_set_bit(vdev->parent, i);
+
+ debug("(%s) final negotiated features supported %016llx\n",
+ vdev->name, uc_priv->features);
+ ret = virtio_finalize_features(vdev);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ virtio_add_status(vdev, VIRTIO_CONFIG_S_FAILED);
+ return ret;
+}
+
+static int virtio_uclass_child_post_probe(struct udevice *vdev)
+{
+ /* Indicates that the driver is set up and ready to drive the device */
+ virtio_add_status(vdev, VIRTIO_CONFIG_S_DRIVER_OK);
+
+ return 0;
+}
+
+UCLASS_DRIVER(virtio) = {
+ .name = "virtio",
+ .id = UCLASS_VIRTIO,
+ .flags = DM_UC_FLAG_SEQ_ALIAS,
+ .pre_probe = virtio_uclass_pre_probe,
+ .post_probe = virtio_uclass_post_probe,
+ .child_post_bind = virtio_uclass_child_post_bind,
+ .child_pre_probe = virtio_uclass_child_pre_probe,
+ .child_post_probe = virtio_uclass_child_post_probe,
+ .per_device_auto_alloc_size = sizeof(struct virtio_dev_priv),
+};
diff --git a/drivers/virtio/virtio_blk.c b/drivers/virtio/virtio_blk.c
new file mode 100644
index 0000000000..e793e34e83
--- /dev/null
+++ b/drivers/virtio/virtio_blk.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018, Tuomas Tynkkynen <tuomas.tynkkynen@iki.fi>
+ * Copyright (C) 2018, Bin Meng <bmeng.cn@gmail.com>
+ */
+
+#include <common.h>
+#include <blk.h>
+#include <dm.h>
+#include <virtio_types.h>
+#include <virtio.h>
+#include <virtio_ring.h>
+#include "virtio_blk.h"
+
+struct virtio_blk_priv {
+ struct virtqueue *vq;
+};
+
+static ulong virtio_blk_do_req(struct udevice *dev, u64 sector,
+ lbaint_t blkcnt, void *buffer, u32 type)
+{
+ struct virtio_blk_priv *priv = dev_get_priv(dev);
+ unsigned int num_out = 0, num_in = 0;
+ struct virtio_sg *sgs[3];
+ u8 status;
+ int ret;
+
+ struct virtio_blk_outhdr out_hdr = {
+ .type = cpu_to_virtio32(dev, type),
+ .sector = cpu_to_virtio64(dev, sector),
+ };
+ struct virtio_sg hdr_sg = { &out_hdr, sizeof(out_hdr) };
+ struct virtio_sg data_sg = { buffer, blkcnt * 512 };
+ struct virtio_sg status_sg = { &status, sizeof(status) };
+
+ sgs[num_out++] = &hdr_sg;
+
+ if (type & VIRTIO_BLK_T_OUT)
+ sgs[num_out++] = &data_sg;
+ else
+ sgs[num_out + num_in++] = &data_sg;
+
+ sgs[num_out + num_in++] = &status_sg;
+
+ ret = virtqueue_add(priv->vq, sgs, num_out, num_in);
+ if (ret)
+ return ret;
+
+ virtqueue_kick(priv->vq);
+
+ while (!virtqueue_get_buf(priv->vq, NULL))
+ ;
+
+ return status == VIRTIO_BLK_S_OK ? blkcnt : -EIO;
+}
+
+static ulong virtio_blk_read(struct udevice *dev, lbaint_t start,
+ lbaint_t blkcnt, void *buffer)
+{
+ return virtio_blk_do_req(dev, start, blkcnt, buffer,
+ VIRTIO_BLK_T_IN);
+}
+
+static ulong virtio_blk_write(struct udevice *dev, lbaint_t start,
+ lbaint_t blkcnt, const void *buffer)
+{
+ return virtio_blk_do_req(dev, start, blkcnt, (void *)buffer,
+ VIRTIO_BLK_T_OUT);
+}
+
+static int virtio_blk_bind(struct udevice *dev)
+{
+ struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(dev->parent);
+ struct blk_desc *desc = dev_get_uclass_platdata(dev);
+ int devnum;
+
+ desc->if_type = IF_TYPE_VIRTIO;
+ /*
+ * Initialize the devnum to -ENODEV. This is to make sure that
+ * blk_next_free_devnum() works as expected, since the default
+ * value 0 is a valid devnum.
+ */
+ desc->devnum = -ENODEV;
+ devnum = blk_next_free_devnum(IF_TYPE_VIRTIO);
+ if (devnum < 0)
+ return devnum;
+ desc->devnum = devnum;
+ desc->part_type = PART_TYPE_UNKNOWN;
+ /*
+ * virtio mmio transport supplies string identification for us,
+ * while pci trnasport uses a 2-byte subvendor value.
+ */
+ if (uc_priv->vendor >> 16)
+ sprintf(desc->vendor, "%s", (char *)&uc_priv->vendor);
+ else
+ sprintf(desc->vendor, "%04x", uc_priv->vendor);
+ desc->bdev = dev;
+
+ /* Indicate what driver features we support */
+ virtio_driver_features_init(uc_priv, NULL, 0, NULL, 0);
+
+ return 0;
+}
+
+static int virtio_blk_probe(struct udevice *dev)
+{
+ struct virtio_blk_priv *priv = dev_get_priv(dev);
+ struct blk_desc *desc = dev_get_uclass_platdata(dev);
+ u64 cap;
+ int ret;
+
+ ret = virtio_find_vqs(dev, 1, &priv->vq);
+ if (ret)
+ return ret;
+
+ desc->blksz = 512;
+ virtio_cread(dev, struct virtio_blk_config, capacity, &cap);
+ desc->lba = cap;
+
+ return 0;
+}
+
+static const struct blk_ops virtio_blk_ops = {
+ .read = virtio_blk_read,
+ .write = virtio_blk_write,
+};
+
+U_BOOT_DRIVER(virtio_blk) = {
+ .name = VIRTIO_BLK_DRV_NAME,
+ .id = UCLASS_BLK,
+ .ops = &virtio_blk_ops,
+ .bind = virtio_blk_bind,
+ .probe = virtio_blk_probe,
+ .remove = virtio_reset,
+ .priv_auto_alloc_size = sizeof(struct virtio_blk_priv),
+ .flags = DM_FLAG_ACTIVE_DMA,
+};
diff --git a/drivers/virtio/virtio_blk.h b/drivers/virtio/virtio_blk.h
new file mode 100644
index 0000000000..8d8e02fa2e
--- /dev/null
+++ b/drivers/virtio/virtio_blk.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Copyright (C) 2018, Tuomas Tynkkynen <tuomas.tynkkynen@iki.fi>
+ * Copyright (C) 2018, Bin Meng <bmeng.cn@gmail.com>
+ *
+ * From Linux kernel include/uapi/linux/virtio_blk.h
+ */
+
+#ifndef _LINUX_VIRTIO_BLK_H
+#define _LINUX_VIRTIO_BLK_H
+
+/* Feature bits */
+#define VIRTIO_BLK_F_SIZE_MAX 1 /* Indicates maximum segment size */
+#define VIRTIO_BLK_F_SEG_MAX 2 /* Indicates maximum # of segments */
+#define VIRTIO_BLK_F_GEOMETRY 4 /* Legacy geometry available */
+#define VIRTIO_BLK_F_RO 5 /* Disk is read-only */
+#define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available */
+#define VIRTIO_BLK_F_TOPOLOGY 10 /* Topology information is available */
+#define VIRTIO_BLK_F_MQ 12 /* Support more than one vq */
+
+/* Legacy feature bits */
+#ifndef VIRTIO_BLK_NO_LEGACY
+#define VIRTIO_BLK_F_BARRIER 0 /* Does host support barriers? */
+#define VIRTIO_BLK_F_SCSI 7 /* Supports scsi command passthru */
+#define VIRTIO_BLK_F_FLUSH 9 /* Flush command supported */
+#define VIRTIO_BLK_F_CONFIG_WCE 11 /* Writeback mode available in config */
+#ifndef __KERNEL__
+/* Old (deprecated) name for VIRTIO_BLK_F_FLUSH */
+#define VIRTIO_BLK_F_WCE VIRTIO_BLK_F_FLUSH
+#endif
+#endif /* !VIRTIO_BLK_NO_LEGACY */
+
+#define VIRTIO_BLK_ID_BYTES 20 /* ID string length */
+
+struct __packed virtio_blk_config {
+ /* The capacity (in 512-byte sectors) */
+ __u64 capacity;
+ /* The maximum segment size (if VIRTIO_BLK_F_SIZE_MAX) */
+ __u32 size_max;
+ /* The maximum number of segments (if VIRTIO_BLK_F_SEG_MAX) */
+ __u32 seg_max;
+ /* geometry of the device (if VIRTIO_BLK_F_GEOMETRY) */
+ struct virtio_blk_geometry {
+ __u16 cylinders;
+ __u8 heads;
+ __u8 sectors;
+ } geometry;
+
+ /* block size of device (if VIRTIO_BLK_F_BLK_SIZE) */
+ __u32 blk_size;
+
+ /* the next 4 entries are guarded by VIRTIO_BLK_F_TOPOLOGY */
+ /* exponent for physical block per logical block */
+ __u8 physical_block_exp;
+ /* alignment offset in logical blocks */
+ __u8 alignment_offset;
+ /* minimum I/O size without performance penalty in logical blocks */
+ __u16 min_io_size;
+ /* optimal sustained I/O size in logical blocks */
+ __u32 opt_io_size;
+
+ /* writeback mode (if VIRTIO_BLK_F_CONFIG_WCE) */
+ __u8 wce;
+ __u8 unused;
+
+ /* number of vqs, only available when VIRTIO_BLK_F_MQ is set */
+ __u16 num_queues;
+};
+
+/*
+ * Command types
+ *
+ * Usage is a bit tricky as some bits are used as flags and some are not.
+ *
+ * Rules:
+ * VIRTIO_BLK_T_OUT may be combined with VIRTIO_BLK_T_SCSI_CMD or
+ * VIRTIO_BLK_T_BARRIER. VIRTIO_BLK_T_FLUSH is a command of its own
+ * and may not be combined with any of the other flags.
+ */
+
+/* These two define direction */
+#define VIRTIO_BLK_T_IN 0
+#define VIRTIO_BLK_T_OUT 1
+
+#ifndef VIRTIO_BLK_NO_LEGACY
+/* This bit says it's a scsi command, not an actual read or write */
+#define VIRTIO_BLK_T_SCSI_CMD 2
+#endif /* VIRTIO_BLK_NO_LEGACY */
+
+/* Cache flush command */
+#define VIRTIO_BLK_T_FLUSH 4
+
+/* Get device ID command */
+#define VIRTIO_BLK_T_GET_ID 8
+
+#ifndef VIRTIO_BLK_NO_LEGACY
+/* Barrier before this op */
+#define VIRTIO_BLK_T_BARRIER 0x80000000
+#endif /* !VIRTIO_BLK_NO_LEGACY */
+
+/*
+ * This comes first in the read scatter-gather list.
+ * For legacy virtio, if VIRTIO_F_ANY_LAYOUT is not negotiated,
+ * this is the first element of the read scatter-gather list.
+ */
+struct virtio_blk_outhdr {
+ /* VIRTIO_BLK_T* */
+ __virtio32 type;
+ /* io priority */
+ __virtio32 ioprio;
+ /* Sector (ie. 512 byte offset) */
+ __virtio64 sector;
+};
+
+#ifndef VIRTIO_BLK_NO_LEGACY
+struct virtio_scsi_inhdr {
+ __virtio32 errors;
+ __virtio32 data_len;
+ __virtio32 sense_len;
+ __virtio32 residual;
+};
+#endif /* !VIRTIO_BLK_NO_LEGACY */
+
+/* And this is the final byte of the write scatter-gather list */
+#define VIRTIO_BLK_S_OK 0
+#define VIRTIO_BLK_S_IOERR 1
+#define VIRTIO_BLK_S_UNSUPP 2
+
+#endif /* _LINUX_VIRTIO_BLK_H */
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
new file mode 100644
index 0000000000..7b738703b8
--- /dev/null
+++ b/drivers/virtio/virtio_mmio.c
@@ -0,0 +1,413 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018, Tuomas Tynkkynen <tuomas.tynkkynen@iki.fi>
+ * Copyright (C) 2018, Bin Meng <bmeng.cn@gmail.com>
+ *
+ * VirtIO memory-maped I/O transport driver
+ * Ported from Linux drivers/virtio/virtio_mmio.c
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <virtio_types.h>
+#include <virtio.h>
+#include <virtio_ring.h>
+#include <linux/compat.h>
+#include <linux/io.h>
+#include "virtio_mmio.h"
+
+static int virtio_mmio_get_config(struct udevice *udev, unsigned int offset,
+ void *buf, unsigned int len)
+{
+ struct virtio_mmio_priv *priv = dev_get_priv(udev);
+ void __iomem *base = priv->base + VIRTIO_MMIO_CONFIG;
+ u8 b;
+ __le16 w;
+ __le32 l;
+
+ if (priv->version == 1) {
+ u8 *ptr = buf;
+ int i;
+
+ for (i = 0; i < len; i++)
+ ptr[i] = readb(base + offset + i);
+
+ return 0;
+ }
+
+ switch (len) {
+ case 1:
+ b = readb(base + offset);
+ memcpy(buf, &b, sizeof(b));
+ break;
+ case 2:
+ w = cpu_to_le16(readw(base + offset));
+ memcpy(buf, &w, sizeof(w));
+ break;
+ case 4:
+ l = cpu_to_le32(readl(base + offset));
+ memcpy(buf, &l, sizeof(l));
+ break;
+ case 8:
+ l = cpu_to_le32(readl(base + offset));
+ memcpy(buf, &l, sizeof(l));
+ l = cpu_to_le32(readl(base + offset + sizeof(l)));
+ memcpy(buf + sizeof(l), &l, sizeof(l));
+ break;
+ default:
+ WARN_ON(true);
+ }
+
+ return 0;
+}
+
+static int virtio_mmio_set_config(struct udevice *udev, unsigned int offset,
+ const void *buf, unsigned int len)
+{
+ struct virtio_mmio_priv *priv = dev_get_priv(udev);
+ void __iomem *base = priv->base + VIRTIO_MMIO_CONFIG;
+ u8 b;
+ __le16 w;
+ __le32 l;
+
+ if (priv->version == 1) {
+ const u8 *ptr = buf;
+ int i;
+
+ for (i = 0; i < len; i++)
+ writeb(ptr[i], base + offset + i);
+
+ return 0;
+ }
+
+ switch (len) {
+ case 1:
+ memcpy(&b, buf, sizeof(b));
+ writeb(b, base + offset);
+ break;
+ case 2:
+ memcpy(&w, buf, sizeof(w));
+ writew(le16_to_cpu(w), base + offset);
+ break;
+ case 4:
+ memcpy(&l, buf, sizeof(l));
+ writel(le32_to_cpu(l), base + offset);
+ break;
+ case 8:
+ memcpy(&l, buf, sizeof(l));
+ writel(le32_to_cpu(l), base + offset);
+ memcpy(&l, buf + sizeof(l), sizeof(l));
+ writel(le32_to_cpu(l), base + offset + sizeof(l));
+ break;
+ default:
+ WARN_ON(true);
+ }
+
+ return 0;
+}
+
+static int virtio_mmio_generation(struct udevice *udev, u32 *counter)
+{
+ struct virtio_mmio_priv *priv = dev_get_priv(udev);
+
+ if (priv->version == 1)
+ *counter = 0;
+ else
+ *counter = readl(priv->base + VIRTIO_MMIO_CONFIG_GENERATION);
+
+ return 0;
+}
+
+static int virtio_mmio_get_status(struct udevice *udev, u8 *status)
+{
+ struct virtio_mmio_priv *priv = dev_get_priv(udev);
+
+ *status = readl(priv->base + VIRTIO_MMIO_STATUS) & 0xff;
+
+ return 0;
+}
+
+static int virtio_mmio_set_status(struct udevice *udev, u8 status)
+{
+ struct virtio_mmio_priv *priv = dev_get_priv(udev);
+
+ /* We should never be setting status to 0 */
+ WARN_ON(status == 0);
+
+ writel(status, priv->base + VIRTIO_MMIO_STATUS);
+
+ return 0;
+}
+
+static int virtio_mmio_reset(struct udevice *udev)
+{
+ struct virtio_mmio_priv *priv = dev_get_priv(udev);
+
+ /* 0 status means a reset */
+ writel(0, priv->base + VIRTIO_MMIO_STATUS);
+
+ return 0;
+}
+
+static int virtio_mmio_get_features(struct udevice *udev, u64 *features)
+{
+ struct virtio_mmio_priv *priv = dev_get_priv(udev);
+
+ writel(1, priv->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL);
+ *features = readl(priv->base + VIRTIO_MMIO_DEVICE_FEATURES);
+ *features <<= 32;
+
+ writel(0, priv->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL);
+ *features |= readl(priv->base + VIRTIO_MMIO_DEVICE_FEATURES);
+
+ return 0;
+}
+
+static int virtio_mmio_set_features(struct udevice *udev)
+{
+ struct virtio_mmio_priv *priv = dev_get_priv(udev);
+ struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(udev);
+
+ /* Make sure there is are no mixed devices */
+ if (priv->version == 2 && uc_priv->legacy) {
+ debug("New virtio-mmio devices (version 2) must provide VIRTIO_F_VERSION_1 feature!\n");
+ return -EINVAL;
+ }
+
+ writel(1, priv->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL);
+ writel((u32)(uc_priv->features >> 32),
+ priv->base + VIRTIO_MMIO_DRIVER_FEATURES);
+
+ writel(0, priv->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL);
+ writel((u32)uc_priv->features,
+ priv->base + VIRTIO_MMIO_DRIVER_FEATURES);
+
+ return 0;
+}
+
+static struct virtqueue *virtio_mmio_setup_vq(struct udevice *udev,
+ unsigned int index)
+{
+ struct virtio_mmio_priv *priv = dev_get_priv(udev);
+ struct virtqueue *vq;
+ unsigned int num;
+ int err;
+
+ /* Select the queue we're interested in */
+ writel(index, priv->base + VIRTIO_MMIO_QUEUE_SEL);
+
+ /* Queue shouldn't already be set up */
+ if (readl(priv->base + (priv->version == 1 ?
+ VIRTIO_MMIO_QUEUE_PFN : VIRTIO_MMIO_QUEUE_READY))) {
+ err = -ENOENT;
+ goto error_available;
+ }
+
+ num = readl(priv->base + VIRTIO_MMIO_QUEUE_NUM_MAX);
+ if (num == 0) {
+ err = -ENOENT;
+ goto error_new_virtqueue;
+ }
+
+ /* Create the vring */
+ vq = vring_create_virtqueue(index, num, VIRTIO_MMIO_VRING_ALIGN, udev);
+ if (!vq) {
+ err = -ENOMEM;
+ goto error_new_virtqueue;
+ }
+
+ /* Activate the queue */
+ writel(virtqueue_get_vring_size(vq),
+ priv->base + VIRTIO_MMIO_QUEUE_NUM);
+ if (priv->version == 1) {
+ u64 q_pfn = virtqueue_get_desc_addr(vq) >> PAGE_SHIFT;
+
+ /*
+ * virtio-mmio v1 uses a 32bit QUEUE PFN. If we have something
+ * that doesn't fit in 32bit, fail the setup rather than
+ * pretending to be successful.
+ */
+ if (q_pfn >> 32) {
+ debug("platform bug: legacy virtio-mmio must not be used with RAM above 0x%llxGB\n",
+ 0x1ULL << (32 + PAGE_SHIFT - 30));
+ err = -E2BIG;
+ goto error_bad_pfn;
+ }
+
+ writel(PAGE_SIZE, priv->base + VIRTIO_MMIO_QUEUE_ALIGN);
+ writel(q_pfn, priv->base + VIRTIO_MMIO_QUEUE_PFN);
+ } else {
+ u64 addr;
+
+ addr = virtqueue_get_desc_addr(vq);
+ writel((u32)addr, priv->base + VIRTIO_MMIO_QUEUE_DESC_LOW);
+ writel((u32)(addr >> 32),
+ priv->base + VIRTIO_MMIO_QUEUE_DESC_HIGH);
+
+ addr = virtqueue_get_avail_addr(vq);
+ writel((u32)addr, priv->base + VIRTIO_MMIO_QUEUE_AVAIL_LOW);
+ writel((u32)(addr >> 32),
+ priv->base + VIRTIO_MMIO_QUEUE_AVAIL_HIGH);
+
+ addr = virtqueue_get_used_addr(vq);
+ writel((u32)addr, priv->base + VIRTIO_MMIO_QUEUE_USED_LOW);
+ writel((u32)(addr >> 32),
+ priv->base + VIRTIO_MMIO_QUEUE_USED_HIGH);
+
+ writel(1, priv->base + VIRTIO_MMIO_QUEUE_READY);
+ }
+
+ return vq;
+
+error_bad_pfn:
+ vring_del_virtqueue(vq);
+
+error_new_virtqueue:
+ if (priv->version == 1) {
+ writel(0, priv->base + VIRTIO_MMIO_QUEUE_PFN);
+ } else {
+ writel(0, priv->base + VIRTIO_MMIO_QUEUE_READY);
+ WARN_ON(readl(priv->base + VIRTIO_MMIO_QUEUE_READY));
+ }
+
+error_available:
+ return ERR_PTR(err);
+}
+
+static void virtio_mmio_del_vq(struct virtqueue *vq)
+{
+ struct virtio_mmio_priv *priv = dev_get_priv(vq->vdev);
+ unsigned int index = vq->index;
+
+ /* Select and deactivate the queue */
+ writel(index, priv->base + VIRTIO_MMIO_QUEUE_SEL);
+ if (priv->version == 1) {
+ writel(0, priv->base + VIRTIO_MMIO_QUEUE_PFN);
+ } else {
+ writel(0, priv->base + VIRTIO_MMIO_QUEUE_READY);
+ WARN_ON(readl(priv->base + VIRTIO_MMIO_QUEUE_READY));
+ }
+
+ vring_del_virtqueue(vq);
+}
+
+static int virtio_mmio_del_vqs(struct udevice *udev)
+{
+ struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(udev);
+ struct virtqueue *vq, *n;
+
+ list_for_each_entry_safe(vq, n, &uc_priv->vqs, list)
+ virtio_mmio_del_vq(vq);
+
+ return 0;
+}
+
+static int virtio_mmio_find_vqs(struct udevice *udev, unsigned int nvqs,
+ struct virtqueue *vqs[])
+{
+ int i;
+
+ for (i = 0; i < nvqs; ++i) {
+ vqs[i] = virtio_mmio_setup_vq(udev, i);
+ if (IS_ERR(vqs[i])) {
+ virtio_mmio_del_vqs(udev);
+ return PTR_ERR(vqs[i]);
+ }
+ }
+
+ return 0;
+}
+
+static int virtio_mmio_notify(struct udevice *udev, struct virtqueue *vq)
+{
+ struct virtio_mmio_priv *priv = dev_get_priv(udev);
+
+ /*
+ * We write the queue's selector into the notification register
+ * to signal the other end
+ */
+ writel(vq->index, priv->base + VIRTIO_MMIO_QUEUE_NOTIFY);
+
+ return 0;
+}
+
+static int virtio_mmio_ofdata_to_platdata(struct udevice *udev)
+{
+ struct virtio_mmio_priv *priv = dev_get_priv(udev);
+
+ priv->base = (void __iomem *)(ulong)dev_read_addr(udev);
+ if (priv->base == (void __iomem *)FDT_ADDR_T_NONE)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int virtio_mmio_probe(struct udevice *udev)
+{
+ struct virtio_mmio_priv *priv = dev_get_priv(udev);
+ struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(udev);
+ u32 magic;
+
+ /* Check magic value */
+ magic = readl(priv->base + VIRTIO_MMIO_MAGIC_VALUE);
+ if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) {
+ debug("(%s): wrong magic value 0x%08x!\n", udev->name, magic);
+ return 0;
+ }
+
+ /* Check device version */
+ priv->version = readl(priv->base + VIRTIO_MMIO_VERSION);
+ if (priv->version < 1 || priv->version > 2) {
+ debug("(%s): version %d not supported!\n",
+ udev->name, priv->version);
+ return 0;
+ }
+
+ /* Check devicd ID */
+ uc_priv->device = readl(priv->base + VIRTIO_MMIO_DEVICE_ID);
+ if (uc_priv->device == 0) {
+ /*
+ * virtio-mmio device with an ID 0 is a (dummy) placeholder
+ * with no function. End probing now with no error reported.
+ */
+ return 0;
+ }
+ uc_priv->vendor = readl(priv->base + VIRTIO_MMIO_VENDOR_ID);
+
+ if (priv->version == 1)
+ writel(PAGE_SIZE, priv->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
+
+ debug("(%s): device (%d) vendor (%08x) version (%d)\n", udev->name,
+ uc_priv->device, uc_priv->vendor, priv->version);
+
+ return 0;
+}
+
+static const struct dm_virtio_ops virtio_mmio_ops = {
+ .get_config = virtio_mmio_get_config,
+ .set_config = virtio_mmio_set_config,
+ .generation = virtio_mmio_generation,
+ .get_status = virtio_mmio_get_status,
+ .set_status = virtio_mmio_set_status,
+ .reset = virtio_mmio_reset,
+ .get_features = virtio_mmio_get_features,
+ .set_features = virtio_mmio_set_features,
+ .find_vqs = virtio_mmio_find_vqs,
+ .del_vqs = virtio_mmio_del_vqs,
+ .notify = virtio_mmio_notify,
+};
+
+static const struct udevice_id virtio_mmio_ids[] = {
+ { .compatible = "virtio,mmio" },
+ { }
+};
+
+U_BOOT_DRIVER(virtio_mmio) = {
+ .name = "virtio-mmio",
+ .id = UCLASS_VIRTIO,
+ .of_match = virtio_mmio_ids,
+ .ops = &virtio_mmio_ops,
+ .probe = virtio_mmio_probe,
+ .ofdata_to_platdata = virtio_mmio_ofdata_to_platdata,
+ .priv_auto_alloc_size = sizeof(struct virtio_mmio_priv),
+};
diff --git a/drivers/virtio/virtio_mmio.h b/drivers/virtio/virtio_mmio.h
new file mode 100644
index 0000000000..b3408828a5
--- /dev/null
+++ b/drivers/virtio/virtio_mmio.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018, Tuomas Tynkkynen <tuomas.tynkkynen@iki.fi>
+ * Copyright (C) 2018, Bin Meng <bmeng.cn@gmail.com>
+ *
+ * From Linux kernel include/uapi/linux/virtio_mmio.h
+ */
+
+#ifndef _LINUX_VIRTIO_MMIO_H
+#define _LINUX_VIRTIO_MMIO_H
+
+/* Control registers */
+
+/* Magic value ("virt" string) - Read Only */
+#define VIRTIO_MMIO_MAGIC_VALUE 0x000
+
+/* Virtio device version - Read Only */
+#define VIRTIO_MMIO_VERSION 0x004
+
+/* Virtio device ID - Read Only */
+#define VIRTIO_MMIO_DEVICE_ID 0x008
+
+/* Virtio vendor ID - Read Only */
+#define VIRTIO_MMIO_VENDOR_ID 0x00c
+
+/*
+ * Bitmask of the features supported by the device (host)
+ * (32 bits per set) - Read Only
+ */
+#define VIRTIO_MMIO_DEVICE_FEATURES 0x010
+
+/* Device (host) features set selector - Write Only */
+#define VIRTIO_MMIO_DEVICE_FEATURES_SEL 0x014
+
+/*
+ * Bitmask of features activated by the driver (guest)
+ * (32 bits per set) - Write Only
+ */
+#define VIRTIO_MMIO_DRIVER_FEATURES 0x020
+
+/* Activated features set selector - Write Only */
+#define VIRTIO_MMIO_DRIVER_FEATURES_SEL 0x024
+
+#ifndef VIRTIO_MMIO_NO_LEGACY /* LEGACY DEVICES ONLY! */
+
+/* Guest's memory page size in bytes - Write Only */
+#define VIRTIO_MMIO_GUEST_PAGE_SIZE 0x028
+
+#endif
+
+/* Queue selector - Write Only */
+#define VIRTIO_MMIO_QUEUE_SEL 0x030
+
+/* Maximum size of the currently selected queue - Read Only */
+#define VIRTIO_MMIO_QUEUE_NUM_MAX 0x034
+
+/* Queue size for the currently selected queue - Write Only */
+#define VIRTIO_MMIO_QUEUE_NUM 0x038
+
+#ifndef VIRTIO_MMIO_NO_LEGACY /* LEGACY DEVICES ONLY! */
+
+/* Used Ring alignment for the currently selected queue - Write Only */
+#define VIRTIO_MMIO_QUEUE_ALIGN 0x03c
+
+/* Guest's PFN for the currently selected queue - Read Write */
+#define VIRTIO_MMIO_QUEUE_PFN 0x040
+
+#endif
+
+/* Ready bit for the currently selected queue - Read Write */
+#define VIRTIO_MMIO_QUEUE_READY 0x044
+
+/* Queue notifier - Write Only */
+#define VIRTIO_MMIO_QUEUE_NOTIFY 0x050
+
+/* Interrupt status - Read Only */
+#define VIRTIO_MMIO_INTERRUPT_STATUS 0x060
+
+/* Interrupt acknowledge - Write Only */
+#define VIRTIO_MMIO_INTERRUPT_ACK 0x064
+
+/* Device status register - Read Write */
+#define VIRTIO_MMIO_STATUS 0x070
+
+/* Selected queue's Descriptor Table address, 64 bits in two halves */
+#define VIRTIO_MMIO_QUEUE_DESC_LOW 0x080
+#define VIRTIO_MMIO_QUEUE_DESC_HIGH 0x084
+
+/* Selected queue's Available Ring address, 64 bits in two halves */
+#define VIRTIO_MMIO_QUEUE_AVAIL_LOW 0x090
+#define VIRTIO_MMIO_QUEUE_AVAIL_HIGH 0x094
+
+/* Selected queue's Used Ring address, 64 bits in two halves */
+#define VIRTIO_MMIO_QUEUE_USED_LOW 0x0a0
+#define VIRTIO_MMIO_QUEUE_USED_HIGH 0x0a4
+
+/* Configuration atomicity value */
+#define VIRTIO_MMIO_CONFIG_GENERATION 0x0fc
+
+/*
+ * The config space is defined by each driver as
+ * the per-driver configuration space - Read Write
+ */
+#define VIRTIO_MMIO_CONFIG 0x100
+
+/* Interrupt flags (re: interrupt status & acknowledge registers) */
+
+#define VIRTIO_MMIO_INT_VRING BIT(0)
+#define VIRTIO_MMIO_INT_CONFIG BIT(1)
+
+/*
+ * The alignment to use between consumer and producer parts of vring.
+ * Currently hardcoded to the page size.
+ */
+#define PAGE_SHIFT 12
+#define VIRTIO_MMIO_VRING_ALIGN PAGE_SIZE
+
+/**
+ * virtio mmio transport driver private data
+ *
+ * @base: mmio transport device register base
+ * @version: mmio transport device version
+ */
+struct virtio_mmio_priv {
+ void __iomem *base;
+ u32 version;
+};
+
+#endif /* _LINUX_VIRTIO_MMIO_H */
diff --git a/drivers/virtio/virtio_net.c b/drivers/virtio/virtio_net.c
new file mode 100644
index 0000000000..0dbbd78023
--- /dev/null
+++ b/drivers/virtio/virtio_net.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018, Tuomas Tynkkynen <tuomas.tynkkynen@iki.fi>
+ * Copyright (C) 2018, Bin Meng <bmeng.cn@gmail.com>
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <net.h>
+#include <virtio_types.h>
+#include <virtio.h>
+#include <virtio_ring.h>
+#include "virtio_net.h"
+
+/* Amount of buffers to keep in the RX virtqueue */
+#define VIRTIO_NET_NUM_RX_BUFS 32
+
+/*
+ * This value comes from the VirtIO spec: 1500 for maximum packet size,
+ * 14 for the Ethernet header, 12 for virtio_net_hdr. In total 1526 bytes.
+ */
+#define VIRTIO_NET_RX_BUF_SIZE 1526
+
+struct virtio_net_priv {
+ union {
+ struct virtqueue *vqs[2];
+ struct {
+ struct virtqueue *rx_vq;
+ struct virtqueue *tx_vq;
+ };
+ };
+
+ char rx_buff[VIRTIO_NET_NUM_RX_BUFS][VIRTIO_NET_RX_BUF_SIZE];
+ bool rx_running;
+ int net_hdr_len;
+};
+
+/*
+ * For simplicity, the driver only negotiates the VIRTIO_NET_F_MAC feature.
+ * For the VIRTIO_NET_F_STATUS feature, we don't negotiate it, hence per spec
+ * we should assume the link is always active.
+ */
+static const u32 feature[] = {
+ VIRTIO_NET_F_MAC
+};
+
+static const u32 feature_legacy[] = {
+ VIRTIO_NET_F_MAC
+};
+
+static int virtio_net_start(struct udevice *dev)
+{
+ struct virtio_net_priv *priv = dev_get_priv(dev);
+ struct virtio_sg sg;
+ struct virtio_sg *sgs[] = { &sg };
+ int i;
+
+ if (!priv->rx_running) {
+ /* receive buffer length is always 1526 */
+ sg.length = VIRTIO_NET_RX_BUF_SIZE;
+
+ /* setup the receive buffer address */
+ for (i = 0; i < VIRTIO_NET_NUM_RX_BUFS; i++) {
+ sg.addr = priv->rx_buff[i];
+ virtqueue_add(priv->rx_vq, sgs, 0, 1);
+ }
+
+ virtqueue_kick(priv->rx_vq);
+
+ /* setup the receive queue only once */
+ priv->rx_running = true;
+ }
+
+ return 0;
+}
+
+static int virtio_net_send(struct udevice *dev, void *packet, int length)
+{
+ struct virtio_net_priv *priv = dev_get_priv(dev);
+ struct virtio_net_hdr hdr;
+ struct virtio_net_hdr_v1 hdr_v1;
+ struct virtio_sg hdr_sg;
+ struct virtio_sg data_sg = { packet, length };
+ struct virtio_sg *sgs[] = { &hdr_sg, &data_sg };
+ int ret;
+
+ if (priv->net_hdr_len == sizeof(struct virtio_net_hdr))
+ hdr_sg.addr = &hdr;
+ else
+ hdr_sg.addr = &hdr_v1;
+ hdr_sg.length = priv->net_hdr_len;
+
+ memset(hdr_sg.addr, 0, priv->net_hdr_len);
+
+ ret = virtqueue_add(priv->tx_vq, sgs, 2, 0);
+ if (ret)
+ return ret;
+
+ virtqueue_kick(priv->tx_vq);
+
+ while (1) {
+ if (virtqueue_get_buf(priv->tx_vq, NULL))
+ break;
+ }
+
+ return 0;
+}
+
+static int virtio_net_recv(struct udevice *dev, int flags, uchar **packetp)
+{
+ struct virtio_net_priv *priv = dev_get_priv(dev);
+ unsigned int len;
+ void *buf;
+
+ buf = virtqueue_get_buf(priv->rx_vq, &len);
+ if (!buf)
+ return -EAGAIN;
+
+ *packetp = buf + priv->net_hdr_len;
+ return len - priv->net_hdr_len;
+}
+
+static int virtio_net_free_pkt(struct udevice *dev, uchar *packet, int length)
+{
+ struct virtio_net_priv *priv = dev_get_priv(dev);
+ void *buf = packet - priv->net_hdr_len;
+ struct virtio_sg sg = { buf, VIRTIO_NET_RX_BUF_SIZE };
+ struct virtio_sg *sgs[] = { &sg };
+
+ /* Put the buffer back to the rx ring */
+ virtqueue_add(priv->rx_vq, sgs, 0, 1);
+
+ return 0;
+}
+
+static void virtio_net_stop(struct udevice *dev)
+{
+ /*
+ * There is no way to stop the queue from running, unless we issue
+ * a reset to the virtio device, and re-do the queue initialization
+ * from the beginning.
+ */
+}
+
+static int virtio_net_write_hwaddr(struct udevice *dev)
+{
+ struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(dev->parent);
+ struct eth_pdata *pdata = dev_get_platdata(dev);
+ int i;
+
+ /*
+ * v1.0 compliant device's MAC address is set through control channel,
+ * which we don't support for now.
+ */
+ if (!uc_priv->legacy)
+ return -ENOSYS;
+
+ for (i = 0; i < sizeof(pdata->enetaddr); i++) {
+ virtio_cwrite8(dev,
+ offsetof(struct virtio_net_config, mac) + i,
+ pdata->enetaddr[i]);
+ }
+
+ return 0;
+}
+
+static int virtio_net_read_rom_hwaddr(struct udevice *dev)
+{
+ struct eth_pdata *pdata = dev_get_platdata(dev);
+
+ if (!pdata)
+ return -ENOSYS;
+
+ if (virtio_has_feature(dev, VIRTIO_NET_F_MAC)) {
+ virtio_cread_bytes(dev,
+ offsetof(struct virtio_net_config, mac),
+ pdata->enetaddr, sizeof(pdata->enetaddr));
+ }
+
+ return 0;
+}
+
+static int virtio_net_bind(struct udevice *dev)
+{
+ struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(dev->parent);
+
+ /* Indicate what driver features we support */
+ virtio_driver_features_init(uc_priv, feature, ARRAY_SIZE(feature),
+ feature_legacy, ARRAY_SIZE(feature_legacy));
+
+ return 0;
+}
+
+static int virtio_net_probe(struct udevice *dev)
+{
+ struct virtio_net_priv *priv = dev_get_priv(dev);
+ struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(dev->parent);
+ int ret;
+
+ ret = virtio_find_vqs(dev, 2, priv->vqs);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * For v1.0 compliant device, it always assumes the member
+ * 'num_buffers' exists in the struct virtio_net_hdr while
+ * the legacy driver only presented 'num_buffers' when
+ * VIRTIO_NET_F_MRG_RXBUF was negotiated. Without that feature
+ * the structure was 2 bytes shorter.
+ */
+ if (uc_priv->legacy)
+ priv->net_hdr_len = sizeof(struct virtio_net_hdr);
+ else
+ priv->net_hdr_len = sizeof(struct virtio_net_hdr_v1);
+
+ return 0;
+}
+
+static const struct eth_ops virtio_net_ops = {
+ .start = virtio_net_start,
+ .send = virtio_net_send,
+ .recv = virtio_net_recv,
+ .free_pkt = virtio_net_free_pkt,
+ .stop = virtio_net_stop,
+ .write_hwaddr = virtio_net_write_hwaddr,
+ .read_rom_hwaddr = virtio_net_read_rom_hwaddr,
+};
+
+U_BOOT_DRIVER(virtio_net) = {
+ .name = VIRTIO_NET_DRV_NAME,
+ .id = UCLASS_ETH,
+ .bind = virtio_net_bind,
+ .probe = virtio_net_probe,
+ .remove = virtio_reset,
+ .ops = &virtio_net_ops,
+ .priv_auto_alloc_size = sizeof(struct virtio_net_priv),
+ .platdata_auto_alloc_size = sizeof(struct eth_pdata),
+ .flags = DM_FLAG_ACTIVE_DMA,
+};
diff --git a/drivers/virtio/virtio_net.h b/drivers/virtio/virtio_net.h
new file mode 100644
index 0000000000..c92bae5269
--- /dev/null
+++ b/drivers/virtio/virtio_net.h
@@ -0,0 +1,268 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Copyright (C) 2018, Tuomas Tynkkynen <tuomas.tynkkynen@iki.fi>
+ * Copyright (C) 2018, Bin Meng <bmeng.cn@gmail.com>
+ *
+ * From Linux kernel include/uapi/linux/virtio_net.h
+ */
+
+#ifndef _LINUX_VIRTIO_NET_H
+#define _LINUX_VIRTIO_NET_H
+
+/* TODO: needs to be removed! */
+#define ETH_ALEN 6
+
+/* The feature bitmap for virtio net */
+
+/* Host handles pkts w/ partial csum */
+#define VIRTIO_NET_F_CSUM 0
+/* Guest handles pkts w/ partial csum */
+#define VIRTIO_NET_F_GUEST_CSUM 1
+/* Dynamic offload configuration */
+#define VIRTIO_NET_F_CTRL_GUEST_OFFLOADS 2
+/* Initial MTU advice */
+#define VIRTIO_NET_F_MTU 3
+/* Host has given MAC address */
+#define VIRTIO_NET_F_MAC 5
+/* Guest can handle TSOv4 in */
+#define VIRTIO_NET_F_GUEST_TSO4 7
+/* Guest can handle TSOv6 in */
+#define VIRTIO_NET_F_GUEST_TSO6 8
+/* Guest can handle TSO[6] w/ ECN in */
+#define VIRTIO_NET_F_GUEST_ECN 9
+/* Guest can handle UFO in */
+#define VIRTIO_NET_F_GUEST_UFO 10
+/* Host can handle TSOv4 in */
+#define VIRTIO_NET_F_HOST_TSO4 11
+/* Host can handle TSOv6 in */
+#define VIRTIO_NET_F_HOST_TSO6 12
+/* Host can handle TSO[6] w/ ECN in */
+#define VIRTIO_NET_F_HOST_ECN 13
+/* Host can handle UFO in */
+#define VIRTIO_NET_F_HOST_UFO 14
+/* Host can merge receive buffers */
+#define VIRTIO_NET_F_MRG_RXBUF 15
+/* virtio_net_config.status available */
+#define VIRTIO_NET_F_STATUS 16
+/* Control channel available */
+#define VIRTIO_NET_F_CTRL_VQ 17
+/* Control channel RX mode support */
+#define VIRTIO_NET_F_CTRL_RX 18
+/* Control channel VLAN filtering */
+#define VIRTIO_NET_F_CTRL_VLAN 19
+/* Extra RX mode control support */
+#define VIRTIO_NET_F_CTRL_RX_EXTRA 20
+/* Guest can announce device on the network */
+#define VIRTIO_NET_F_GUEST_ANNOUNCE 21
+/* Device supports receive flow steering */
+#define VIRTIO_NET_F_MQ 22
+/* Set MAC address */
+#define VIRTIO_NET_F_CTRL_MAC_ADDR 23
+/* Device set linkspeed and duplex */
+#define VIRTIO_NET_F_SPEED_DUPLEX 63
+
+#ifndef VIRTIO_NET_NO_LEGACY
+/* Host handles pkts w/ any GSO type */
+#define VIRTIO_NET_F_GSO 6
+#endif /* VIRTIO_NET_NO_LEGACY */
+
+#define VIRTIO_NET_S_LINK_UP 1 /* Link is up */
+#define VIRTIO_NET_S_ANNOUNCE 2 /* Announcement is needed */
+
+struct __packed virtio_net_config {
+ /* The config defining mac address (if VIRTIO_NET_F_MAC) */
+ __u8 mac[ETH_ALEN];
+ /* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */
+ __u16 status;
+ /*
+ * Maximum number of each of transmit and receive queues;
+ * see VIRTIO_NET_F_MQ and VIRTIO_NET_CTRL_MQ.
+ * Legal values are between 1 and 0x8000
+ */
+ __u16 max_virtqueue_pairs;
+ /* Default maximum transmit unit advice */
+ __u16 mtu;
+ /*
+ * speed, in units of 1Mb. All values 0 to INT_MAX are legal.
+ * Any other value stands for unknown.
+ */
+ __u32 speed;
+ /*
+ * 0x00 - half duplex
+ * 0x01 - full duplex
+ * Any other value stands for unknown.
+ */
+ __u8 duplex;
+};
+
+/*
+ * This header comes first in the scatter-gather list. If you don't
+ * specify GSO or CSUM features, you can simply ignore the header.
+ *
+ * This is bitwise-equivalent to the legacy struct virtio_net_hdr_mrg_rxbuf,
+ * only flattened.
+ */
+struct virtio_net_hdr_v1 {
+#define VIRTIO_NET_HDR_F_NEEDS_CSUM 0x01 /* Use csum_start, csum_offset */
+#define VIRTIO_NET_HDR_F_DATA_VALID 0x02 /* Csum is valid */
+ __u8 flags;
+#define VIRTIO_NET_HDR_GSO_NONE 0x00 /* Not a GSO frame */
+#define VIRTIO_NET_HDR_GSO_TCPV4 0x01 /* GSO frame, IPv4 TCP (TSO) */
+#define VIRTIO_NET_HDR_GSO_UDP 0x03 /* GSO frame, IPv4 UDP (UFO) */
+#define VIRTIO_NET_HDR_GSO_TCPV6 0x04 /* GSO frame, IPv6 TCP */
+#define VIRTIO_NET_HDR_GSO_ECN 0x80 /* TCP has ECN set */
+ __u8 gso_type;
+ __virtio16 hdr_len; /* Ethernet + IP + tcp/udp hdrs */
+ __virtio16 gso_size; /* Bytes to append to hdr_len per frame */
+ __virtio16 csum_start; /* Position to start checksumming from */
+ __virtio16 csum_offset; /* Offset after that to place checksum */
+ __virtio16 num_buffers; /* Number of merged rx buffers */
+};
+
+#ifndef VIRTIO_NET_NO_LEGACY
+/*
+ * This header comes first in the scatter-gather list.
+ *
+ * For legacy virtio, if VIRTIO_F_ANY_LAYOUT is not negotiated, it must
+ * be the first element of the scatter-gather list. If you don't
+ * specify GSO or CSUM features, you can simply ignore the header.
+ */
+struct virtio_net_hdr {
+ /* See VIRTIO_NET_HDR_F_* */
+ __u8 flags;
+ /* See VIRTIO_NET_HDR_GSO_* */
+ __u8 gso_type;
+ __virtio16 hdr_len; /* Ethernet + IP + tcp/udp hdrs */
+ __virtio16 gso_size; /* Bytes to append to hdr_len per frame */
+ __virtio16 csum_start; /* Position to start checksumming from */
+ __virtio16 csum_offset; /* Offset after that to place checksum */
+};
+
+/*
+ * This is the version of the header to use when the MRG_RXBUF
+ * feature has been negotiated.
+ */
+struct virtio_net_hdr_mrg_rxbuf {
+ struct virtio_net_hdr hdr;
+ __virtio16 num_buffers; /* Number of merged rx buffers */
+};
+#endif /* ...VIRTIO_NET_NO_LEGACY */
+
+/*
+ * Control virtqueue data structures
+ *
+ * The control virtqueue expects a header in the first sg entry
+ * and an ack/status response in the last entry. Data for the
+ * command goes in between.
+ */
+struct __packed virtio_net_ctrl_hdr {
+ __u8 class;
+ __u8 cmd;
+};
+
+typedef __u8 virtio_net_ctrl_ack;
+
+#define VIRTIO_NET_OK 0
+#define VIRTIO_NET_ERR 1
+
+/*
+ * Control the RX mode, ie. promisucous, allmulti, etc...
+ *
+ * All commands require an "out" sg entry containing a 1 byte state value,
+ * zero = disable, non-zero = enable.
+ *
+ * Commands 0 and 1 are supported with the VIRTIO_NET_F_CTRL_RX feature.
+ * Commands 2-5 are added with VIRTIO_NET_F_CTRL_RX_EXTRA.
+ */
+#define VIRTIO_NET_CTRL_RX 0
+#define VIRTIO_NET_CTRL_RX_PROMISC 0
+#define VIRTIO_NET_CTRL_RX_ALLMULTI 1
+#define VIRTIO_NET_CTRL_RX_ALLUNI 2
+#define VIRTIO_NET_CTRL_RX_NOMULTI 3
+#define VIRTIO_NET_CTRL_RX_NOUNI 4
+#define VIRTIO_NET_CTRL_RX_NOBCAST 5
+
+/*
+ * Control the MAC
+ *
+ * The MAC filter table is managed by the hypervisor, the guest should assume
+ * the size is infinite. Filtering should be considered non-perfect, ie. based
+ * on hypervisor resources, the guest may received packets from sources not
+ * specified in the filter list.
+ *
+ * In addition to the class/cmd header, the TABLE_SET command requires two
+ * out scatterlists. Each contains a 4 byte count of entries followed by a
+ * concatenated byte stream of the ETH_ALEN MAC addresses. The first sg list
+ * contains unicast addresses, the second is for multicast. This functionality
+ * is present if the VIRTIO_NET_F_CTRL_RX feature is available.
+ *
+ * The ADDR_SET command requests one out scatterlist, it contains a 6 bytes MAC
+ * address. This functionality is present if the VIRTIO_NET_F_CTRL_MAC_ADDR
+ * feature is available.
+ */
+struct __packed virtio_net_ctrl_mac {
+ __virtio32 entries;
+ __u8 macs[][ETH_ALEN];
+};
+
+#define VIRTIO_NET_CTRL_MAC 1
+#define VIRTIO_NET_CTRL_MAC_TABLE_SET 0
+#define VIRTIO_NET_CTRL_MAC_ADDR_SET 1
+
+/*
+ * Control VLAN filtering
+ *
+ * The VLAN filter table is controlled via a simple ADD/DEL interface. VLAN IDs
+ * not added may be filterd by the hypervisor. Del is the opposite of add. Both
+ * commands expect an out entry containing a 2 byte VLAN ID. VLAN filterting is
+ * available with the VIRTIO_NET_F_CTRL_VLAN feature bit.
+ */
+#define VIRTIO_NET_CTRL_VLAN 2
+#define VIRTIO_NET_CTRL_VLAN_ADD 0
+#define VIRTIO_NET_CTRL_VLAN_DEL 1
+
+/*
+ * Control link announce acknowledgment
+ *
+ * The command VIRTIO_NET_CTRL_ANNOUNCE_ACK is used to indicate that driver has
+ * recevied the notification; device would clear the VIRTIO_NET_S_ANNOUNCE bit
+ * in the status field after it receives this command.
+ */
+#define VIRTIO_NET_CTRL_ANNOUNCE 3
+#define VIRTIO_NET_CTRL_ANNOUNCE_ACK 0
+
+/*
+ * Control receive flow steering
+ *
+ * The command VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET enables receive flow steering,
+ * specifying the number of the transmit and receive queues that will be used.
+ * After the command is consumed and acked by the device, the device will not
+ * steer new packets on receive virtqueues other than specified nor read from
+ * transmit virtqueues other than specified. Accordingly, driver should not
+ * transmit new packets on virtqueues other than specified.
+ */
+struct virtio_net_ctrl_mq {
+ __virtio16 virtqueue_pairs;
+};
+
+#define VIRTIO_NET_CTRL_MQ 4
+#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET 0
+#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1
+#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000
+
+/*
+ * Control network offloads
+ *
+ * Reconfigures the network offloads that guest can handle.
+ *
+ * Available with the VIRTIO_NET_F_CTRL_GUEST_OFFLOADS feature bit.
+ *
+ * Command data format matches the feature bit mask exactly.
+ *
+ * See VIRTIO_NET_F_GUEST_* for the list of offloads
+ * that can be enabled/disabled.
+ */
+#define VIRTIO_NET_CTRL_GUEST_OFFLOADS 5
+#define VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET 0
+
+#endif /* _LINUX_VIRTIO_NET_H */
diff --git a/drivers/virtio/virtio_pci.h b/drivers/virtio/virtio_pci.h
new file mode 100644
index 0000000000..cc753ed7b3
--- /dev/null
+++ b/drivers/virtio/virtio_pci.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Copyright (C) 2018, Bin Meng <bmeng.cn@gmail.com>
+ *
+ * From Linux kernel include/uapi/linux/virtio_pci.h
+ */
+
+#ifndef _LINUX_VIRTIO_PCI_H
+#define _LINUX_VIRTIO_PCI_H
+
+#ifndef VIRTIO_PCI_NO_LEGACY
+
+/* A 32-bit r/o bitmask of the features supported by the host */
+#define VIRTIO_PCI_HOST_FEATURES 0
+
+/* A 32-bit r/w bitmask of features activated by the guest */
+#define VIRTIO_PCI_GUEST_FEATURES 4
+
+/* A 32-bit r/w PFN for the currently selected queue */
+#define VIRTIO_PCI_QUEUE_PFN 8
+
+/* A 16-bit r/o queue size for the currently selected queue */
+#define VIRTIO_PCI_QUEUE_NUM 12
+
+/* A 16-bit r/w queue selector */
+#define VIRTIO_PCI_QUEUE_SEL 14
+
+/* A 16-bit r/w queue notifier */
+#define VIRTIO_PCI_QUEUE_NOTIFY 16
+
+/* An 8-bit device status register */
+#define VIRTIO_PCI_STATUS 18
+
+/*
+ * An 8-bit r/o interrupt status register. Reading the value will return the
+ * current contents of the ISR and will also clear it. This is effectively
+ * a read-and-acknowledge.
+ */
+#define VIRTIO_PCI_ISR 19
+
+/* MSI-X registers: only enabled if MSI-X is enabled */
+
+/* A 16-bit vector for configuration changes */
+#define VIRTIO_MSI_CONFIG_VECTOR 20
+/* A 16-bit vector for selected queue notifications */
+#define VIRTIO_MSI_QUEUE_VECTOR 22
+
+/*
+ * The remaining space is defined by each driver as the per-driver
+ * configuration space
+ */
+#define VIRTIO_PCI_CONFIG_OFF(msix) ((msix) ? 24 : 20)
+
+/* Virtio ABI version, this must match exactly */
+#define VIRTIO_PCI_ABI_VERSION 0
+
+/*
+ * How many bits to shift physical queue address written to QUEUE_PFN.
+ * 12 is historical, and due to x86 page size.
+ */
+#define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12
+
+/*
+ * The alignment to use between consumer and producer parts of vring.
+ * x86 pagesize again.
+ */
+#define VIRTIO_PCI_VRING_ALIGN 4096
+
+#endif /* VIRTIO_PCI_NO_LEGACY */
+
+/* The bit of the ISR which indicates a device configuration change */
+#define VIRTIO_PCI_ISR_CONFIG 0x2
+/* Vector value used to disable MSI for queue */
+#define VIRTIO_MSI_NO_VECTOR 0xffff
+
+#ifndef VIRTIO_PCI_NO_MODERN
+
+/* IDs for different capabilities. Must all exist. */
+
+/* Common configuration */
+#define VIRTIO_PCI_CAP_COMMON_CFG 1
+/* Notifications */
+#define VIRTIO_PCI_CAP_NOTIFY_CFG 2
+/* ISR access */
+#define VIRTIO_PCI_CAP_ISR_CFG 3
+/* Device specific configuration */
+#define VIRTIO_PCI_CAP_DEVICE_CFG 4
+/* PCI configuration access */
+#define VIRTIO_PCI_CAP_PCI_CFG 5
+
+/* This is the PCI capability header: */
+struct virtio_pci_cap {
+ __u8 cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */
+ __u8 cap_next; /* Generic PCI field: next ptr */
+ __u8 cap_len; /* Generic PCI field: capability length */
+ __u8 cfg_type; /* Identifies the structure */
+ __u8 bar; /* Where to find it */
+ __u8 padding[3]; /* Pad to full dword */
+ __le32 offset; /* Offset within bar */
+ __le32 length; /* Length of the structure, in bytes */
+};
+
+struct virtio_pci_notify_cap {
+ struct virtio_pci_cap cap;
+ __le32 notify_off_multiplier; /* Multiplier for queue_notify_off */
+};
+
+/* Fields in VIRTIO_PCI_CAP_COMMON_CFG: */
+struct virtio_pci_common_cfg {
+ /* About the whole device */
+ __le32 device_feature_select; /* read-write */
+ __le32 device_feature; /* read-only */
+ __le32 guest_feature_select; /* read-write */
+ __le32 guest_feature; /* read-write */
+ __le16 msix_config; /* read-write */
+ __le16 num_queues; /* read-only */
+ __u8 device_status; /* read-write */
+ __u8 config_generation; /* read-only */
+
+ /* About a specific virtqueue */
+ __le16 queue_select; /* read-write */
+ __le16 queue_size; /* read-write, power of 2 */
+ __le16 queue_msix_vector; /* read-write */
+ __le16 queue_enable; /* read-write */
+ __le16 queue_notify_off; /* read-only */
+ __le32 queue_desc_lo; /* read-write */
+ __le32 queue_desc_hi; /* read-write */
+ __le32 queue_avail_lo; /* read-write */
+ __le32 queue_avail_hi; /* read-write */
+ __le32 queue_used_lo; /* read-write */
+ __le32 queue_used_hi; /* read-write */
+};
+
+/* Fields in VIRTIO_PCI_CAP_PCI_CFG: */
+struct virtio_pci_cfg_cap {
+ struct virtio_pci_cap cap;
+ __u8 pci_cfg_data[4]; /* Data for BAR access */
+};
+
+/* Macro versions of offsets for the Old Timers! */
+#define VIRTIO_PCI_CAP_VNDR 0
+#define VIRTIO_PCI_CAP_NEXT 1
+#define VIRTIO_PCI_CAP_LEN 2
+#define VIRTIO_PCI_CAP_CFG_TYPE 3
+#define VIRTIO_PCI_CAP_BAR 4
+#define VIRTIO_PCI_CAP_OFFSET 8
+#define VIRTIO_PCI_CAP_LENGTH 12
+
+#define VIRTIO_PCI_NOTIFY_CAP_MULT 16
+
+#define VIRTIO_PCI_COMMON_DFSELECT 0
+#define VIRTIO_PCI_COMMON_DF 4
+#define VIRTIO_PCI_COMMON_GFSELECT 8
+#define VIRTIO_PCI_COMMON_GF 12
+#define VIRTIO_PCI_COMMON_MSIX 16
+#define VIRTIO_PCI_COMMON_NUMQ 18
+#define VIRTIO_PCI_COMMON_STATUS 20
+#define VIRTIO_PCI_COMMON_CFGGENERATION 21
+#define VIRTIO_PCI_COMMON_Q_SELECT 22
+#define VIRTIO_PCI_COMMON_Q_SIZE 24
+#define VIRTIO_PCI_COMMON_Q_MSIX 26
+#define VIRTIO_PCI_COMMON_Q_ENABLE 28
+#define VIRTIO_PCI_COMMON_Q_NOFF 30
+#define VIRTIO_PCI_COMMON_Q_DESCLO 32
+#define VIRTIO_PCI_COMMON_Q_DESCHI 36
+#define VIRTIO_PCI_COMMON_Q_AVAILLO 40
+#define VIRTIO_PCI_COMMON_Q_AVAILHI 44
+#define VIRTIO_PCI_COMMON_Q_USEDLO 48
+#define VIRTIO_PCI_COMMON_Q_USEDHI 52
+
+#endif /* VIRTIO_PCI_NO_MODERN */
+
+#endif /* _LINUX_VIRTIO_PCI_H */
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
new file mode 100644
index 0000000000..08764ee6f2
--- /dev/null
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -0,0 +1,421 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018, Bin Meng <bmeng.cn@gmail.com>
+ *
+ * VirtIO PCI bus transport driver
+ * Ported from Linux drivers/virtio/virtio_pci*.c
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <virtio_types.h>
+#include <virtio.h>
+#include <virtio_ring.h>
+#include <dm/device.h>
+#include <linux/compat.h>
+#include <linux/io.h>
+#include "virtio_pci.h"
+
+#define VIRTIO_PCI_DRV_NAME "virtio-pci.l"
+
+/* PCI device ID in the range 0x1000 to 0x103f */
+#define VIRTIO_PCI_VENDOR_ID 0x1af4
+#define VIRTIO_PCI_DEVICE_ID00 0x1000
+#define VIRTIO_PCI_DEVICE_ID01 0x1001
+#define VIRTIO_PCI_DEVICE_ID02 0x1002
+#define VIRTIO_PCI_DEVICE_ID03 0x1003
+#define VIRTIO_PCI_DEVICE_ID04 0x1004
+#define VIRTIO_PCI_DEVICE_ID05 0x1005
+#define VIRTIO_PCI_DEVICE_ID06 0x1006
+#define VIRTIO_PCI_DEVICE_ID07 0x1007
+#define VIRTIO_PCI_DEVICE_ID08 0x1008
+#define VIRTIO_PCI_DEVICE_ID09 0x1009
+#define VIRTIO_PCI_DEVICE_ID0A 0x100a
+#define VIRTIO_PCI_DEVICE_ID0B 0x100b
+#define VIRTIO_PCI_DEVICE_ID0C 0x100c
+#define VIRTIO_PCI_DEVICE_ID0D 0x100d
+#define VIRTIO_PCI_DEVICE_ID0E 0x100e
+#define VIRTIO_PCI_DEVICE_ID0F 0x100f
+#define VIRTIO_PCI_DEVICE_ID10 0x1010
+#define VIRTIO_PCI_DEVICE_ID11 0x1011
+#define VIRTIO_PCI_DEVICE_ID12 0x1012
+#define VIRTIO_PCI_DEVICE_ID13 0x1013
+#define VIRTIO_PCI_DEVICE_ID14 0x1014
+#define VIRTIO_PCI_DEVICE_ID15 0x1015
+#define VIRTIO_PCI_DEVICE_ID16 0x1016
+#define VIRTIO_PCI_DEVICE_ID17 0x1017
+#define VIRTIO_PCI_DEVICE_ID18 0x1018
+#define VIRTIO_PCI_DEVICE_ID19 0x1019
+#define VIRTIO_PCI_DEVICE_ID1A 0x101a
+#define VIRTIO_PCI_DEVICE_ID1B 0x101b
+#define VIRTIO_PCI_DEVICE_ID1C 0x101c
+#define VIRTIO_PCI_DEVICE_ID1D 0x101d
+#define VIRTIO_PCI_DEVICE_ID1E 0x101e
+#define VIRTIO_PCI_DEVICE_ID1F 0x101f
+#define VIRTIO_PCI_DEVICE_ID20 0x1020
+#define VIRTIO_PCI_DEVICE_ID21 0x1021
+#define VIRTIO_PCI_DEVICE_ID22 0x1022
+#define VIRTIO_PCI_DEVICE_ID23 0x1023
+#define VIRTIO_PCI_DEVICE_ID24 0x1024
+#define VIRTIO_PCI_DEVICE_ID25 0x1025
+#define VIRTIO_PCI_DEVICE_ID26 0x1026
+#define VIRTIO_PCI_DEVICE_ID27 0x1027
+#define VIRTIO_PCI_DEVICE_ID28 0x1028
+#define VIRTIO_PCI_DEVICE_ID29 0x1029
+#define VIRTIO_PCI_DEVICE_ID2A 0x102a
+#define VIRTIO_PCI_DEVICE_ID2B 0x102b
+#define VIRTIO_PCI_DEVICE_ID2C 0x102c
+#define VIRTIO_PCI_DEVICE_ID2D 0x102d
+#define VIRTIO_PCI_DEVICE_ID2E 0x102e
+#define VIRTIO_PCI_DEVICE_ID2F 0x102f
+#define VIRTIO_PCI_DEVICE_ID30 0x1030
+#define VIRTIO_PCI_DEVICE_ID31 0x1031
+#define VIRTIO_PCI_DEVICE_ID32 0x1032
+#define VIRTIO_PCI_DEVICE_ID33 0x1033
+#define VIRTIO_PCI_DEVICE_ID34 0x1034
+#define VIRTIO_PCI_DEVICE_ID35 0x1035
+#define VIRTIO_PCI_DEVICE_ID36 0x1036
+#define VIRTIO_PCI_DEVICE_ID37 0x1037
+#define VIRTIO_PCI_DEVICE_ID38 0x1038
+#define VIRTIO_PCI_DEVICE_ID39 0x1039
+#define VIRTIO_PCI_DEVICE_ID3A 0x103a
+#define VIRTIO_PCI_DEVICE_ID3B 0x103b
+#define VIRTIO_PCI_DEVICE_ID3C 0x103c
+#define VIRTIO_PCI_DEVICE_ID3D 0x103d
+#define VIRTIO_PCI_DEVICE_ID3E 0x103e
+#define VIRTIO_PCI_DEVICE_ID3F 0x103f
+
+/**
+ * virtio pci transport driver private data
+ *
+ * @ioaddr: pci transport device register base
+ * @version: pci transport device version
+ */
+struct virtio_pci_priv {
+ void __iomem *ioaddr;
+};
+
+static int virtio_pci_get_config(struct udevice *udev, unsigned int offset,
+ void *buf, unsigned int len)
+{
+ struct virtio_pci_priv *priv = dev_get_priv(udev);
+ void __iomem *ioaddr = priv->ioaddr + VIRTIO_PCI_CONFIG_OFF(false);
+ u8 *ptr = buf;
+ int i;
+
+ for (i = 0; i < len; i++)
+ ptr[i] = ioread8(ioaddr + i);
+
+ return 0;
+}
+
+static int virtio_pci_set_config(struct udevice *udev, unsigned int offset,
+ const void *buf, unsigned int len)
+{
+ struct virtio_pci_priv *priv = dev_get_priv(udev);
+ void __iomem *ioaddr = priv->ioaddr + VIRTIO_PCI_CONFIG_OFF(false);
+ const u8 *ptr = buf;
+ int i;
+
+ for (i = 0; i < len; i++)
+ iowrite8(ptr[i], ioaddr + i);
+
+ return 0;
+}
+
+static int virtio_pci_get_status(struct udevice *udev, u8 *status)
+{
+ struct virtio_pci_priv *priv = dev_get_priv(udev);
+
+ *status = ioread8(priv->ioaddr + VIRTIO_PCI_STATUS);
+
+ return 0;
+}
+
+static int virtio_pci_set_status(struct udevice *udev, u8 status)
+{
+ struct virtio_pci_priv *priv = dev_get_priv(udev);
+
+ /* We should never be setting status to 0 */
+ WARN_ON(status == 0);
+
+ iowrite8(status, priv->ioaddr + VIRTIO_PCI_STATUS);
+
+ return 0;
+}
+
+static int virtio_pci_reset(struct udevice *udev)
+{
+ struct virtio_pci_priv *priv = dev_get_priv(udev);
+
+ /* 0 status means a reset */
+ iowrite8(0, priv->ioaddr + VIRTIO_PCI_STATUS);
+
+ /*
+ * Flush out the status write, and flush in device writes,
+ * including MSI-X interrupts, if any.
+ */
+ ioread8(priv->ioaddr + VIRTIO_PCI_STATUS);
+
+ return 0;
+}
+
+static int virtio_pci_get_features(struct udevice *udev, u64 *features)
+{
+ struct virtio_pci_priv *priv = dev_get_priv(udev);
+
+ /*
+ * When someone needs more than 32 feature bits, we'll need to
+ * steal a bit to indicate that the rest are somewhere else.
+ */
+ *features = ioread32(priv->ioaddr + VIRTIO_PCI_HOST_FEATURES);
+
+ return 0;
+}
+
+static int virtio_pci_set_features(struct udevice *udev)
+{
+ struct virtio_pci_priv *priv = dev_get_priv(udev);
+ struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(udev);
+
+ /* Make sure we don't have any features > 32 bits! */
+ WARN_ON((u32)uc_priv->features != uc_priv->features);
+
+ /* We only support 32 feature bits */
+ iowrite32(uc_priv->features, priv->ioaddr + VIRTIO_PCI_GUEST_FEATURES);
+
+ return 0;
+}
+
+static struct virtqueue *virtio_pci_setup_vq(struct udevice *udev,
+ unsigned int index)
+{
+ struct virtio_pci_priv *priv = dev_get_priv(udev);
+ struct virtqueue *vq;
+ unsigned int num;
+ int err;
+
+ /* Select the queue we're interested in */
+ iowrite16(index, priv->ioaddr + VIRTIO_PCI_QUEUE_SEL);
+
+ /* Check if queue is either not available or already active */
+ num = ioread16(priv->ioaddr + VIRTIO_PCI_QUEUE_NUM);
+ if (!num || ioread32(priv->ioaddr + VIRTIO_PCI_QUEUE_PFN)) {
+ err = -ENOENT;
+ goto error_available;
+ }
+
+ /* Create the vring */
+ vq = vring_create_virtqueue(index, num, VIRTIO_PCI_VRING_ALIGN, udev);
+ if (!vq) {
+ err = -ENOMEM;
+ goto error_available;
+ }
+
+ /* Activate the queue */
+ iowrite32(virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT,
+ priv->ioaddr + VIRTIO_PCI_QUEUE_PFN);
+
+ return vq;
+
+error_available:
+ return ERR_PTR(err);
+}
+
+static void virtio_pci_del_vq(struct virtqueue *vq)
+{
+ struct virtio_pci_priv *priv = dev_get_priv(vq->vdev);
+ unsigned int index = vq->index;
+
+ iowrite16(index, priv->ioaddr + VIRTIO_PCI_QUEUE_SEL);
+
+ /* Select and deactivate the queue */
+ iowrite32(0, priv->ioaddr + VIRTIO_PCI_QUEUE_PFN);
+
+ vring_del_virtqueue(vq);
+}
+
+static int virtio_pci_del_vqs(struct udevice *udev)
+{
+ struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(udev);
+ struct virtqueue *vq, *n;
+
+ list_for_each_entry_safe(vq, n, &uc_priv->vqs, list)
+ virtio_pci_del_vq(vq);
+
+ return 0;
+}
+
+static int virtio_pci_find_vqs(struct udevice *udev, unsigned int nvqs,
+ struct virtqueue *vqs[])
+{
+ int i;
+
+ for (i = 0; i < nvqs; ++i) {
+ vqs[i] = virtio_pci_setup_vq(udev, i);
+ if (IS_ERR(vqs[i])) {
+ virtio_pci_del_vqs(udev);
+ return PTR_ERR(vqs[i]);
+ }
+ }
+
+ return 0;
+}
+
+static int virtio_pci_notify(struct udevice *udev, struct virtqueue *vq)
+{
+ struct virtio_pci_priv *priv = dev_get_priv(udev);
+
+ /*
+ * We write the queue's selector into the notification register
+ * to signal the other end
+ */
+ iowrite16(vq->index, priv->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY);
+
+ return 0;
+}
+
+static int virtio_pci_bind(struct udevice *udev)
+{
+ static int num_devs;
+ char name[20];
+
+ /* Create a unique device name for PCI type devices */
+ sprintf(name, "%s#%u", VIRTIO_PCI_DRV_NAME, num_devs++);
+ device_set_name(udev, name);
+
+ return 0;
+}
+
+static int virtio_pci_probe(struct udevice *udev)
+{
+ struct pci_child_platdata *pplat = dev_get_parent_platdata(udev);
+ struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(udev);
+ struct virtio_pci_priv *priv = dev_get_priv(udev);
+ u16 subvendor, subdevice;
+ u8 revision;
+
+ /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */
+ if (pplat->device < 0x1000 || pplat->device > 0x103f)
+ return -ENODEV;
+
+ /* Transitional devices must have a PCI revision ID of 0 */
+ dm_pci_read_config8(udev, PCI_REVISION_ID, &revision);
+ if (revision != VIRTIO_PCI_ABI_VERSION) {
+ printf("(%s): virtio_pci expected ABI version %d, got %d\n",
+ udev->name, VIRTIO_PCI_ABI_VERSION, revision);
+ return -ENODEV;
+ }
+
+ /*
+ * Transitional devices must have the PCI subsystem device ID matching
+ * the virtio device ID
+ */
+ dm_pci_read_config16(udev, PCI_SUBSYSTEM_ID, &subdevice);
+ dm_pci_read_config16(udev, PCI_SUBSYSTEM_VENDOR_ID, &subvendor);
+ uc_priv->device = subdevice;
+ uc_priv->vendor = subvendor;
+
+ priv->ioaddr = dm_pci_map_bar(udev, PCI_BASE_ADDRESS_0, PCI_REGION_IO);
+ if (!priv->ioaddr)
+ return -ENXIO;
+ debug("(%s): virtio legacy device reg base %04lx\n",
+ udev->name, (ulong)priv->ioaddr);
+
+ debug("(%s): device (%d) vendor (%08x) version (%d)\n", udev->name,
+ uc_priv->device, uc_priv->vendor, revision);
+
+ return 0;
+}
+
+static const struct dm_virtio_ops virtio_pci_ops = {
+ .get_config = virtio_pci_get_config,
+ .set_config = virtio_pci_set_config,
+ .get_status = virtio_pci_get_status,
+ .set_status = virtio_pci_set_status,
+ .reset = virtio_pci_reset,
+ .get_features = virtio_pci_get_features,
+ .set_features = virtio_pci_set_features,
+ .find_vqs = virtio_pci_find_vqs,
+ .del_vqs = virtio_pci_del_vqs,
+ .notify = virtio_pci_notify,
+};
+
+U_BOOT_DRIVER(virtio_pci_legacy) = {
+ .name = VIRTIO_PCI_DRV_NAME,
+ .id = UCLASS_VIRTIO,
+ .ops = &virtio_pci_ops,
+ .bind = virtio_pci_bind,
+ .probe = virtio_pci_probe,
+ .priv_auto_alloc_size = sizeof(struct virtio_pci_priv),
+};
+
+static struct pci_device_id virtio_pci_supported[] = {
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID00) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID01) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID02) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID03) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID04) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID05) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID06) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID07) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID08) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID09) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID0A) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID0B) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID0C) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID0D) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID0E) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID0F) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID10) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID11) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID12) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID13) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID14) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID15) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID16) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID17) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID18) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID19) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID1A) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID1B) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID1C) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID1D) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID1E) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID1F) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID20) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID21) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID22) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID23) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID24) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID25) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID26) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID27) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID28) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID29) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID2A) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID2B) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID2C) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID2D) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID2E) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID2F) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID30) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID31) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID32) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID33) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID34) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID35) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID36) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID37) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID38) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID39) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID3A) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID3B) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID3C) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID3D) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID3E) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID3F) },
+ {},
+};
+
+U_BOOT_PCI_DEVICE(virtio_pci_legacy, virtio_pci_supported);
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
new file mode 100644
index 0000000000..da76aea8d1
--- /dev/null
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -0,0 +1,609 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018, Bin Meng <bmeng.cn@gmail.com>
+ *
+ * VirtIO PCI bus transport driver
+ * Ported from Linux drivers/virtio/virtio_pci*.c
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <virtio_types.h>
+#include <virtio.h>
+#include <virtio_ring.h>
+#include <dm/device.h>
+#include <linux/compat.h>
+#include <linux/io.h>
+#include "virtio_pci.h"
+
+#define VIRTIO_PCI_DRV_NAME "virtio-pci.m"
+
+/* PCI device ID in the range 0x1040 to 0x107f */
+#define VIRTIO_PCI_VENDOR_ID 0x1af4
+#define VIRTIO_PCI_DEVICE_ID00 0x1040
+#define VIRTIO_PCI_DEVICE_ID01 0x1041
+#define VIRTIO_PCI_DEVICE_ID02 0x1042
+#define VIRTIO_PCI_DEVICE_ID03 0x1043
+#define VIRTIO_PCI_DEVICE_ID04 0x1044
+#define VIRTIO_PCI_DEVICE_ID05 0x1045
+#define VIRTIO_PCI_DEVICE_ID06 0x1046
+#define VIRTIO_PCI_DEVICE_ID07 0x1047
+#define VIRTIO_PCI_DEVICE_ID08 0x1048
+#define VIRTIO_PCI_DEVICE_ID09 0x1049
+#define VIRTIO_PCI_DEVICE_ID0A 0x104a
+#define VIRTIO_PCI_DEVICE_ID0B 0x104b
+#define VIRTIO_PCI_DEVICE_ID0C 0x104c
+#define VIRTIO_PCI_DEVICE_ID0D 0x104d
+#define VIRTIO_PCI_DEVICE_ID0E 0x104e
+#define VIRTIO_PCI_DEVICE_ID0F 0x104f
+#define VIRTIO_PCI_DEVICE_ID10 0x1050
+#define VIRTIO_PCI_DEVICE_ID11 0x1051
+#define VIRTIO_PCI_DEVICE_ID12 0x1052
+#define VIRTIO_PCI_DEVICE_ID13 0x1053
+#define VIRTIO_PCI_DEVICE_ID14 0x1054
+#define VIRTIO_PCI_DEVICE_ID15 0x1055
+#define VIRTIO_PCI_DEVICE_ID16 0x1056
+#define VIRTIO_PCI_DEVICE_ID17 0x1057
+#define VIRTIO_PCI_DEVICE_ID18 0x1058
+#define VIRTIO_PCI_DEVICE_ID19 0x1059
+#define VIRTIO_PCI_DEVICE_ID1A 0x105a
+#define VIRTIO_PCI_DEVICE_ID1B 0x105b
+#define VIRTIO_PCI_DEVICE_ID1C 0x105c
+#define VIRTIO_PCI_DEVICE_ID1D 0x105d
+#define VIRTIO_PCI_DEVICE_ID1E 0x105e
+#define VIRTIO_PCI_DEVICE_ID1F 0x105f
+#define VIRTIO_PCI_DEVICE_ID20 0x1060
+#define VIRTIO_PCI_DEVICE_ID21 0x1061
+#define VIRTIO_PCI_DEVICE_ID22 0x1062
+#define VIRTIO_PCI_DEVICE_ID23 0x1063
+#define VIRTIO_PCI_DEVICE_ID24 0x1064
+#define VIRTIO_PCI_DEVICE_ID25 0x1065
+#define VIRTIO_PCI_DEVICE_ID26 0x1066
+#define VIRTIO_PCI_DEVICE_ID27 0x1067
+#define VIRTIO_PCI_DEVICE_ID28 0x1068
+#define VIRTIO_PCI_DEVICE_ID29 0x1069
+#define VIRTIO_PCI_DEVICE_ID2A 0x106a
+#define VIRTIO_PCI_DEVICE_ID2B 0x106b
+#define VIRTIO_PCI_DEVICE_ID2C 0x106c
+#define VIRTIO_PCI_DEVICE_ID2D 0x106d
+#define VIRTIO_PCI_DEVICE_ID2E 0x106e
+#define VIRTIO_PCI_DEVICE_ID2F 0x106f
+#define VIRTIO_PCI_DEVICE_ID30 0x1070
+#define VIRTIO_PCI_DEVICE_ID31 0x1071
+#define VIRTIO_PCI_DEVICE_ID32 0x1072
+#define VIRTIO_PCI_DEVICE_ID33 0x1073
+#define VIRTIO_PCI_DEVICE_ID34 0x1074
+#define VIRTIO_PCI_DEVICE_ID35 0x1075
+#define VIRTIO_PCI_DEVICE_ID36 0x1076
+#define VIRTIO_PCI_DEVICE_ID37 0x1077
+#define VIRTIO_PCI_DEVICE_ID38 0x1078
+#define VIRTIO_PCI_DEVICE_ID39 0x1079
+#define VIRTIO_PCI_DEVICE_ID3A 0x107a
+#define VIRTIO_PCI_DEVICE_ID3B 0x107b
+#define VIRTIO_PCI_DEVICE_ID3C 0x107c
+#define VIRTIO_PCI_DEVICE_ID3D 0x107d
+#define VIRTIO_PCI_DEVICE_ID3E 0x107e
+#define VIRTIO_PCI_DEVICE_ID3F 0x107f
+
+/**
+ * virtio pci transport driver private data
+ *
+ * @common: pci transport device common register block base
+ * @notify_base: pci transport device notify register block base
+ * @device: pci transport device device-specific register block base
+ * @device_len: pci transport device device-specific register block length
+ * @notify_offset_multiplier: multiply queue_notify_off by this value
+ */
+struct virtio_pci_priv {
+ struct virtio_pci_common_cfg __iomem *common;
+ void __iomem *notify_base;
+ void __iomem *device;
+ u32 device_len;
+ u32 notify_offset_multiplier;
+};
+
+static int virtio_pci_get_config(struct udevice *udev, unsigned int offset,
+ void *buf, unsigned int len)
+{
+ struct virtio_pci_priv *priv = dev_get_priv(udev);
+ u8 b;
+ __le16 w;
+ __le32 l;
+
+ WARN_ON(offset + len > priv->device_len);
+
+ switch (len) {
+ case 1:
+ b = ioread8(priv->device + offset);
+ memcpy(buf, &b, sizeof(b));
+ break;
+ case 2:
+ w = cpu_to_le16(ioread16(priv->device + offset));
+ memcpy(buf, &w, sizeof(w));
+ break;
+ case 4:
+ l = cpu_to_le32(ioread32(priv->device + offset));
+ memcpy(buf, &l, sizeof(l));
+ break;
+ case 8:
+ l = cpu_to_le32(ioread32(priv->device + offset));
+ memcpy(buf, &l, sizeof(l));
+ l = cpu_to_le32(ioread32(priv->device + offset + sizeof(l)));
+ memcpy(buf + sizeof(l), &l, sizeof(l));
+ break;
+ default:
+ WARN_ON(true);
+ }
+
+ return 0;
+}
+
+static int virtio_pci_set_config(struct udevice *udev, unsigned int offset,
+ const void *buf, unsigned int len)
+{
+ struct virtio_pci_priv *priv = dev_get_priv(udev);
+ u8 b;
+ __le16 w;
+ __le32 l;
+
+ WARN_ON(offset + len > priv->device_len);
+
+ switch (len) {
+ case 1:
+ memcpy(&b, buf, sizeof(b));
+ iowrite8(b, priv->device + offset);
+ break;
+ case 2:
+ memcpy(&w, buf, sizeof(w));
+ iowrite16(le16_to_cpu(w), priv->device + offset);
+ break;
+ case 4:
+ memcpy(&l, buf, sizeof(l));
+ iowrite32(le32_to_cpu(l), priv->device + offset);
+ break;
+ case 8:
+ memcpy(&l, buf, sizeof(l));
+ iowrite32(le32_to_cpu(l), priv->device + offset);
+ memcpy(&l, buf + sizeof(l), sizeof(l));
+ iowrite32(le32_to_cpu(l), priv->device + offset + sizeof(l));
+ break;
+ default:
+ WARN_ON(true);
+ }
+
+ return 0;
+}
+
+static int virtio_pci_generation(struct udevice *udev, u32 *counter)
+{
+ struct virtio_pci_priv *priv = dev_get_priv(udev);
+
+ *counter = ioread8(&priv->common->config_generation);
+
+ return 0;
+}
+
+static int virtio_pci_get_status(struct udevice *udev, u8 *status)
+{
+ struct virtio_pci_priv *priv = dev_get_priv(udev);
+
+ *status = ioread8(&priv->common->device_status);
+
+ return 0;
+}
+
+static int virtio_pci_set_status(struct udevice *udev, u8 status)
+{
+ struct virtio_pci_priv *priv = dev_get_priv(udev);
+
+ /* We should never be setting status to 0 */
+ WARN_ON(status == 0);
+
+ iowrite8(status, &priv->common->device_status);
+
+ return 0;
+}
+
+static int virtio_pci_reset(struct udevice *udev)
+{
+ struct virtio_pci_priv *priv = dev_get_priv(udev);
+
+ /* 0 status means a reset */
+ iowrite8(0, &priv->common->device_status);
+
+ /*
+ * After writing 0 to device_status, the driver MUST wait for a read
+ * of device_status to return 0 before reinitializing the device.
+ * This will flush out the status write, and flush in device writes,
+ * including MSI-X interrupts, if any.
+ */
+ while (ioread8(&priv->common->device_status))
+ udelay(1000);
+
+ return 0;
+}
+
+static int virtio_pci_get_features(struct udevice *udev, u64 *features)
+{
+ struct virtio_pci_priv *priv = dev_get_priv(udev);
+
+ iowrite32(0, &priv->common->device_feature_select);
+ *features = ioread32(&priv->common->device_feature);
+ iowrite32(1, &priv->common->device_feature_select);
+ *features |= ((u64)ioread32(&priv->common->device_feature) << 32);
+
+ return 0;
+}
+
+static int virtio_pci_set_features(struct udevice *udev)
+{
+ struct virtio_pci_priv *priv = dev_get_priv(udev);
+ struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(udev);
+
+ if (!__virtio_test_bit(udev, VIRTIO_F_VERSION_1)) {
+ debug("virtio: device uses modern interface but does not have VIRTIO_F_VERSION_1\n");
+ return -EINVAL;
+ }
+
+ iowrite32(0, &priv->common->guest_feature_select);
+ iowrite32((u32)uc_priv->features, &priv->common->guest_feature);
+ iowrite32(1, &priv->common->guest_feature_select);
+ iowrite32(uc_priv->features >> 32, &priv->common->guest_feature);
+
+ return 0;
+}
+
+static struct virtqueue *virtio_pci_setup_vq(struct udevice *udev,
+ unsigned int index)
+{
+ struct virtio_pci_priv *priv = dev_get_priv(udev);
+ struct virtio_pci_common_cfg __iomem *cfg = priv->common;
+ struct virtqueue *vq;
+ u16 num;
+ u64 addr;
+ int err;
+
+ if (index >= ioread16(&cfg->num_queues))
+ return ERR_PTR(-ENOENT);
+
+ /* Select the queue we're interested in */
+ iowrite16(index, &cfg->queue_select);
+
+ /* Check if queue is either not available or already active */
+ num = ioread16(&cfg->queue_size);
+ if (!num || ioread16(&cfg->queue_enable))
+ return ERR_PTR(-ENOENT);
+
+ if (num & (num - 1)) {
+ printf("(%s): bad queue size %u", udev->name, num);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Create the vring */
+ vq = vring_create_virtqueue(index, num, VIRTIO_PCI_VRING_ALIGN, udev);
+ if (!vq) {
+ err = -ENOMEM;
+ goto error_available;
+ }
+
+ /* Activate the queue */
+ iowrite16(virtqueue_get_vring_size(vq), &cfg->queue_size);
+
+ addr = virtqueue_get_desc_addr(vq);
+ iowrite32((u32)addr, &cfg->queue_desc_lo);
+ iowrite32(addr >> 32, &cfg->queue_desc_hi);
+
+ addr = virtqueue_get_avail_addr(vq);
+ iowrite32((u32)addr, &cfg->queue_avail_lo);
+ iowrite32(addr >> 32, &cfg->queue_avail_hi);
+
+ addr = virtqueue_get_used_addr(vq);
+ iowrite32((u32)addr, &cfg->queue_used_lo);
+ iowrite32(addr >> 32, &cfg->queue_used_hi);
+
+ iowrite16(1, &cfg->queue_enable);
+
+ return vq;
+
+error_available:
+ return ERR_PTR(err);
+}
+
+static void virtio_pci_del_vq(struct virtqueue *vq)
+{
+ struct virtio_pci_priv *priv = dev_get_priv(vq->vdev);
+ unsigned int index = vq->index;
+
+ iowrite16(index, &priv->common->queue_select);
+
+ /* Select and deactivate the queue */
+ iowrite16(0, &priv->common->queue_enable);
+
+ vring_del_virtqueue(vq);
+}
+
+static int virtio_pci_del_vqs(struct udevice *udev)
+{
+ struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(udev);
+ struct virtqueue *vq, *n;
+
+ list_for_each_entry_safe(vq, n, &uc_priv->vqs, list)
+ virtio_pci_del_vq(vq);
+
+ return 0;
+}
+
+static int virtio_pci_find_vqs(struct udevice *udev, unsigned int nvqs,
+ struct virtqueue *vqs[])
+{
+ int i;
+
+ for (i = 0; i < nvqs; ++i) {
+ vqs[i] = virtio_pci_setup_vq(udev, i);
+ if (IS_ERR(vqs[i])) {
+ virtio_pci_del_vqs(udev);
+ return PTR_ERR(vqs[i]);
+ }
+ }
+
+ return 0;
+}
+
+static int virtio_pci_notify(struct udevice *udev, struct virtqueue *vq)
+{
+ struct virtio_pci_priv *priv = dev_get_priv(udev);
+ u16 off;
+
+ /* Select the queue we're interested in */
+ iowrite16(vq->index, &priv->common->queue_select);
+
+ /* get offset of notification word for this vq */
+ off = ioread16(&priv->common->queue_notify_off);
+
+ /*
+ * We write the queue's selector into the notification register
+ * to signal the other end
+ */
+ iowrite16(vq->index,
+ priv->notify_base + off * priv->notify_offset_multiplier);
+
+ return 0;
+}
+
+/**
+ * virtio_pci_find_capability - walk capabilities to find device info
+ *
+ * @udev: the transport device
+ * @cfg_type: the VIRTIO_PCI_CAP_* value we seek
+ *
+ * @return offset of the configuration structure
+ */
+static int virtio_pci_find_capability(struct udevice *udev, u8 cfg_type)
+{
+ int pos;
+ int offset;
+ u8 type, bar;
+
+ for (pos = dm_pci_find_capability(udev, PCI_CAP_ID_VNDR);
+ pos > 0;
+ pos = dm_pci_find_next_capability(udev, pos, PCI_CAP_ID_VNDR)) {
+ offset = pos + offsetof(struct virtio_pci_cap, cfg_type);
+ dm_pci_read_config8(udev, offset, &type);
+ offset = pos + offsetof(struct virtio_pci_cap, bar);
+ dm_pci_read_config8(udev, offset, &bar);
+
+ /* Ignore structures with reserved BAR values */
+ if (bar > 0x5)
+ continue;
+
+ if (type == cfg_type)
+ return pos;
+ }
+
+ return 0;
+}
+
+/**
+ * virtio_pci_map_capability - map base address of the capability
+ *
+ * @udev: the transport device
+ * @off: offset of the configuration structure
+ *
+ * @return base address of the capability
+ */
+static void __iomem *virtio_pci_map_capability(struct udevice *udev, int off)
+{
+ u8 bar;
+ u32 offset;
+ ulong base;
+ void __iomem *p;
+
+ if (!off)
+ return NULL;
+
+ offset = off + offsetof(struct virtio_pci_cap, bar);
+ dm_pci_read_config8(udev, offset, &bar);
+ offset = off + offsetof(struct virtio_pci_cap, offset);
+ dm_pci_read_config32(udev, offset, &offset);
+
+ /*
+ * TODO: adding 64-bit BAR support
+ *
+ * Per spec, the BAR is permitted to be either 32-bit or 64-bit.
+ * For simplicity, only read the BAR address as 32-bit.
+ */
+ base = dm_pci_read_bar32(udev, bar);
+ p = (void __iomem *)base + offset;
+
+ return p;
+}
+
+static int virtio_pci_bind(struct udevice *udev)
+{
+ static int num_devs;
+ char name[20];
+
+ /* Create a unique device name */
+ sprintf(name, "%s#%u", VIRTIO_PCI_DRV_NAME, num_devs++);
+ device_set_name(udev, name);
+
+ return 0;
+}
+
+static int virtio_pci_probe(struct udevice *udev)
+{
+ struct pci_child_platdata *pplat = dev_get_parent_platdata(udev);
+ struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(udev);
+ struct virtio_pci_priv *priv = dev_get_priv(udev);
+ u16 subvendor;
+ u8 revision;
+ int common, notify, device;
+ int offset;
+
+ /* We only own devices >= 0x1040 and <= 0x107f: leave the rest. */
+ if (pplat->device < 0x1040 || pplat->device > 0x107f)
+ return -ENODEV;
+
+ /* Transitional devices must not have a PCI revision ID of 0 */
+ dm_pci_read_config8(udev, PCI_REVISION_ID, &revision);
+
+ /* Modern devices: simply use PCI device id, but start from 0x1040. */
+ uc_priv->device = pplat->device - 0x1040;
+ dm_pci_read_config16(udev, PCI_SUBSYSTEM_VENDOR_ID, &subvendor);
+ uc_priv->vendor = subvendor;
+
+ /* Check for a common config: if not, use legacy mode (bar 0) */
+ common = virtio_pci_find_capability(udev, VIRTIO_PCI_CAP_COMMON_CFG);
+ if (!common) {
+ printf("(%s): leaving for legacy driver\n", udev->name);
+ return -ENODEV;
+ }
+
+ /* If common is there, notify should be too */
+ notify = virtio_pci_find_capability(udev, VIRTIO_PCI_CAP_NOTIFY_CFG);
+ if (!notify) {
+ printf("(%s): missing capabilities %i/%i\n", udev->name,
+ common, notify);
+ return -EINVAL;
+ }
+
+ /*
+ * Device capability is only mandatory for devices that have
+ * device-specific configuration.
+ */
+ device = virtio_pci_find_capability(udev, VIRTIO_PCI_CAP_DEVICE_CFG);
+ if (device) {
+ offset = notify + offsetof(struct virtio_pci_cap, length);
+ dm_pci_read_config32(udev, offset, &priv->device_len);
+ }
+
+ /* Map configuration structures */
+ priv->common = virtio_pci_map_capability(udev, common);
+ priv->notify_base = virtio_pci_map_capability(udev, notify);
+ priv->device = virtio_pci_map_capability(udev, device);
+ debug("(%p): common @ %p, notify base @ %p, device @ %p\n",
+ udev, priv->common, priv->notify_base, priv->device);
+
+ /* Read notify_off_multiplier from config space */
+ offset = notify + offsetof(struct virtio_pci_notify_cap,
+ notify_off_multiplier);
+ dm_pci_read_config32(udev, offset, &priv->notify_offset_multiplier);
+
+ debug("(%s): device (%d) vendor (%08x) version (%d)\n", udev->name,
+ uc_priv->device, uc_priv->vendor, revision);
+
+ return 0;
+}
+
+static const struct dm_virtio_ops virtio_pci_ops = {
+ .get_config = virtio_pci_get_config,
+ .set_config = virtio_pci_set_config,
+ .generation = virtio_pci_generation,
+ .get_status = virtio_pci_get_status,
+ .set_status = virtio_pci_set_status,
+ .reset = virtio_pci_reset,
+ .get_features = virtio_pci_get_features,
+ .set_features = virtio_pci_set_features,
+ .find_vqs = virtio_pci_find_vqs,
+ .del_vqs = virtio_pci_del_vqs,
+ .notify = virtio_pci_notify,
+};
+
+U_BOOT_DRIVER(virtio_pci_modern) = {
+ .name = VIRTIO_PCI_DRV_NAME,
+ .id = UCLASS_VIRTIO,
+ .ops = &virtio_pci_ops,
+ .bind = virtio_pci_bind,
+ .probe = virtio_pci_probe,
+ .priv_auto_alloc_size = sizeof(struct virtio_pci_priv),
+};
+
+static struct pci_device_id virtio_pci_supported[] = {
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID00) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID01) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID02) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID03) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID04) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID05) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID06) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID07) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID08) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID09) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID0A) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID0B) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID0C) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID0D) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID0E) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID0F) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID10) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID11) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID12) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID13) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID14) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID15) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID16) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID17) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID18) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID19) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID1A) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID1B) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID1C) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID1D) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID1E) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID1F) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID20) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID21) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID22) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID23) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID24) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID25) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID26) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID27) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID28) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID29) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID2A) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID2B) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID2C) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID2D) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID2E) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID2F) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID30) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID31) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID32) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID33) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID34) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID35) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID36) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID37) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID38) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID39) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID3A) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID3B) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID3C) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID3D) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID3E) },
+ { PCI_DEVICE(VIRTIO_PCI_VENDOR_ID, VIRTIO_PCI_DEVICE_ID3F) },
+ {},
+};
+
+U_BOOT_PCI_DEVICE(virtio_pci_modern, virtio_pci_supported);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
new file mode 100644
index 0000000000..0eeb3501c2
--- /dev/null
+++ b/drivers/virtio/virtio_ring.c
@@ -0,0 +1,358 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018, Tuomas Tynkkynen <tuomas.tynkkynen@iki.fi>
+ * Copyright (C) 2018, Bin Meng <bmeng.cn@gmail.com>
+ *
+ * virtio ring implementation
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <malloc.h>
+#include <virtio_types.h>
+#include <virtio.h>
+#include <virtio_ring.h>
+
+int virtqueue_add(struct virtqueue *vq, struct virtio_sg *sgs[],
+ unsigned int out_sgs, unsigned int in_sgs)
+{
+ struct vring_desc *desc;
+ unsigned int total_sg = out_sgs + in_sgs;
+ unsigned int i, n, avail, descs_used, uninitialized_var(prev);
+ int head;
+
+ WARN_ON(total_sg == 0);
+
+ head = vq->free_head;
+
+ desc = vq->vring.desc;
+ i = head;
+ descs_used = total_sg;
+
+ if (vq->num_free < descs_used) {
+ debug("Can't add buf len %i - avail = %i\n",
+ descs_used, vq->num_free);
+ /*
+ * FIXME: for historical reasons, we force a notify here if
+ * there are outgoing parts to the buffer. Presumably the
+ * host should service the ring ASAP.
+ */
+ if (out_sgs)
+ virtio_notify(vq->vdev, vq);
+ return -ENOSPC;
+ }
+
+ for (n = 0; n < out_sgs; n++) {
+ struct virtio_sg *sg = sgs[n];
+
+ desc[i].flags = cpu_to_virtio16(vq->vdev, VRING_DESC_F_NEXT);
+ desc[i].addr = cpu_to_virtio64(vq->vdev, (u64)(size_t)sg->addr);
+ desc[i].len = cpu_to_virtio32(vq->vdev, sg->length);
+
+ prev = i;
+ i = virtio16_to_cpu(vq->vdev, desc[i].next);
+ }
+ for (; n < (out_sgs + in_sgs); n++) {
+ struct virtio_sg *sg = sgs[n];
+
+ desc[i].flags = cpu_to_virtio16(vq->vdev, VRING_DESC_F_NEXT |
+ VRING_DESC_F_WRITE);
+ desc[i].addr = cpu_to_virtio64(vq->vdev,
+ (u64)(uintptr_t)sg->addr);
+ desc[i].len = cpu_to_virtio32(vq->vdev, sg->length);
+
+ prev = i;
+ i = virtio16_to_cpu(vq->vdev, desc[i].next);
+ }
+ /* Last one doesn't continue */
+ desc[prev].flags &= cpu_to_virtio16(vq->vdev, ~VRING_DESC_F_NEXT);
+
+ /* We're using some buffers from the free list. */
+ vq->num_free -= descs_used;
+
+ /* Update free pointer */
+ vq->free_head = i;
+
+ /*
+ * Put entry in available array (but don't update avail->idx
+ * until they do sync).
+ */
+ avail = vq->avail_idx_shadow & (vq->vring.num - 1);
+ vq->vring.avail->ring[avail] = cpu_to_virtio16(vq->vdev, head);
+
+ /*
+ * Descriptors and available array need to be set before we expose the
+ * new available array entries.
+ */
+ virtio_wmb();
+ vq->avail_idx_shadow++;
+ vq->vring.avail->idx = cpu_to_virtio16(vq->vdev, vq->avail_idx_shadow);
+ vq->num_added++;
+
+ /*
+ * This is very unlikely, but theoretically possible.
+ * Kick just in case.
+ */
+ if (unlikely(vq->num_added == (1 << 16) - 1))
+ virtqueue_kick(vq);
+
+ return 0;
+}
+
+static bool virtqueue_kick_prepare(struct virtqueue *vq)
+{
+ u16 new, old;
+ bool needs_kick;
+
+ /*
+ * We need to expose available array entries before checking
+ * avail event.
+ */
+ virtio_mb();
+
+ old = vq->avail_idx_shadow - vq->num_added;
+ new = vq->avail_idx_shadow;
+ vq->num_added = 0;
+
+ if (vq->event) {
+ needs_kick = vring_need_event(virtio16_to_cpu(vq->vdev,
+ vring_avail_event(&vq->vring)), new, old);
+ } else {
+ needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(vq->vdev,
+ VRING_USED_F_NO_NOTIFY));
+ }
+
+ return needs_kick;
+}
+
+void virtqueue_kick(struct virtqueue *vq)
+{
+ if (virtqueue_kick_prepare(vq))
+ virtio_notify(vq->vdev, vq);
+}
+
+static void detach_buf(struct virtqueue *vq, unsigned int head)
+{
+ unsigned int i;
+ __virtio16 nextflag = cpu_to_virtio16(vq->vdev, VRING_DESC_F_NEXT);
+
+ /* Put back on free list: unmap first-level descriptors and find end */
+ i = head;
+
+ while (vq->vring.desc[i].flags & nextflag) {
+ i = virtio16_to_cpu(vq->vdev, vq->vring.desc[i].next);
+ vq->num_free++;
+ }
+
+ vq->vring.desc[i].next = cpu_to_virtio16(vq->vdev, vq->free_head);
+ vq->free_head = head;
+
+ /* Plus final descriptor */
+ vq->num_free++;
+}
+
+static inline bool more_used(const struct virtqueue *vq)
+{
+ return vq->last_used_idx != virtio16_to_cpu(vq->vdev,
+ vq->vring.used->idx);
+}
+
+void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len)
+{
+ unsigned int i;
+ u16 last_used;
+
+ if (!more_used(vq)) {
+ debug("(%s.%d): No more buffers in queue\n",
+ vq->vdev->name, vq->index);
+ return NULL;
+ }
+
+ /* Only get used array entries after they have been exposed by host */
+ virtio_rmb();
+
+ last_used = (vq->last_used_idx & (vq->vring.num - 1));
+ i = virtio32_to_cpu(vq->vdev, vq->vring.used->ring[last_used].id);
+ if (len) {
+ *len = virtio32_to_cpu(vq->vdev,
+ vq->vring.used->ring[last_used].len);
+ debug("(%s.%d): last used idx %u with len %u\n",
+ vq->vdev->name, vq->index, i, *len);
+ }
+
+ if (unlikely(i >= vq->vring.num)) {
+ printf("(%s.%d): id %u out of range\n",
+ vq->vdev->name, vq->index, i);
+ return NULL;
+ }
+
+ detach_buf(vq, i);
+ vq->last_used_idx++;
+ /*
+ * If we expect an interrupt for the next entry, tell host
+ * by writing event index and flush out the write before
+ * the read in the next get_buf call.
+ */
+ if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
+ virtio_store_mb(&vring_used_event(&vq->vring),
+ cpu_to_virtio16(vq->vdev, vq->last_used_idx));
+
+ return (void *)(uintptr_t)virtio64_to_cpu(vq->vdev,
+ vq->vring.desc[i].addr);
+}
+
+static struct virtqueue *__vring_new_virtqueue(unsigned int index,
+ struct vring vring,
+ struct udevice *udev)
+{
+ unsigned int i;
+ struct virtqueue *vq;
+ struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(udev);
+ struct udevice *vdev = uc_priv->vdev;
+
+ vq = malloc(sizeof(*vq));
+ if (!vq)
+ return NULL;
+
+ vq->vdev = vdev;
+ vq->index = index;
+ vq->num_free = vring.num;
+ vq->vring = vring;
+ vq->last_used_idx = 0;
+ vq->avail_flags_shadow = 0;
+ vq->avail_idx_shadow = 0;
+ vq->num_added = 0;
+ list_add_tail(&vq->list, &uc_priv->vqs);
+
+ vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
+
+ /* Tell other side not to bother us */
+ vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
+ if (!vq->event)
+ vq->vring.avail->flags = cpu_to_virtio16(vdev,
+ vq->avail_flags_shadow);
+
+ /* Put everything in free lists */
+ vq->free_head = 0;
+ for (i = 0; i < vring.num - 1; i++)
+ vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
+
+ return vq;
+}
+
+struct virtqueue *vring_create_virtqueue(unsigned int index, unsigned int num,
+ unsigned int vring_align,
+ struct udevice *udev)
+{
+ struct virtqueue *vq;
+ void *queue = NULL;
+ struct vring vring;
+
+ /* We assume num is a power of 2 */
+ if (num & (num - 1)) {
+ printf("Bad virtqueue length %u\n", num);
+ return NULL;
+ }
+
+ /* TODO: allocate each queue chunk individually */
+ for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
+ queue = memalign(PAGE_SIZE, vring_size(num, vring_align));
+ if (queue)
+ break;
+ }
+
+ if (!num)
+ return NULL;
+
+ if (!queue) {
+ /* Try to get a single page. You are my only hope! */
+ queue = memalign(PAGE_SIZE, vring_size(num, vring_align));
+ }
+ if (!queue)
+ return NULL;
+
+ memset(queue, 0, vring_size(num, vring_align));
+ vring_init(&vring, num, queue, vring_align);
+
+ vq = __vring_new_virtqueue(index, vring, udev);
+ if (!vq) {
+ free(queue);
+ return NULL;
+ }
+ debug("(%s): created vring @ %p for vq @ %p with num %u\n", udev->name,
+ queue, vq, num);
+
+ return vq;
+}
+
+void vring_del_virtqueue(struct virtqueue *vq)
+{
+ free(vq->vring.desc);
+ list_del(&vq->list);
+ free(vq);
+}
+
+unsigned int virtqueue_get_vring_size(struct virtqueue *vq)
+{
+ return vq->vring.num;
+}
+
+ulong virtqueue_get_desc_addr(struct virtqueue *vq)
+{
+ return (ulong)vq->vring.desc;
+}
+
+ulong virtqueue_get_avail_addr(struct virtqueue *vq)
+{
+ return (ulong)vq->vring.desc +
+ ((char *)vq->vring.avail - (char *)vq->vring.desc);
+}
+
+ulong virtqueue_get_used_addr(struct virtqueue *vq)
+{
+ return (ulong)vq->vring.desc +
+ ((char *)vq->vring.used - (char *)vq->vring.desc);
+}
+
+bool virtqueue_poll(struct virtqueue *vq, u16 last_used_idx)
+{
+ virtio_mb();
+
+ return last_used_idx != virtio16_to_cpu(vq->vdev, vq->vring.used->idx);
+}
+
+void virtqueue_dump(struct virtqueue *vq)
+{
+ unsigned int i;
+
+ printf("virtqueue %p for dev %s:\n", vq, vq->vdev->name);
+ printf("\tindex %u, phys addr %p num %u\n",
+ vq->index, vq->vring.desc, vq->vring.num);
+ printf("\tfree_head %u, num_added %u, num_free %u\n",
+ vq->free_head, vq->num_added, vq->num_free);
+ printf("\tlast_used_idx %u, avail_flags_shadow %u, avail_idx_shadow %u\n",
+ vq->last_used_idx, vq->avail_flags_shadow, vq->avail_idx_shadow);
+
+ printf("Descriptor dump:\n");
+ for (i = 0; i < vq->vring.num; i++) {
+ printf("\tdesc[%u] = { 0x%llx, len %u, flags %u, next %u }\n",
+ i, vq->vring.desc[i].addr, vq->vring.desc[i].len,
+ vq->vring.desc[i].flags, vq->vring.desc[i].next);
+ }
+
+ printf("Avail ring dump:\n");
+ printf("\tflags %u, idx %u\n",
+ vq->vring.avail->flags, vq->vring.avail->idx);
+ for (i = 0; i < vq->vring.num; i++) {
+ printf("\tavail[%u] = %u\n",
+ i, vq->vring.avail->ring[i]);
+ }
+
+ printf("Used ring dump:\n");
+ printf("\tflags %u, idx %u\n",
+ vq->vring.used->flags, vq->vring.used->idx);
+ for (i = 0; i < vq->vring.num; i++) {
+ printf("\tused[%u] = { %u, %u }\n", i,
+ vq->vring.used->ring[i].id, vq->vring.used->ring[i].len);
+ }
+}
diff --git a/drivers/virtio/virtio_sandbox.c b/drivers/virtio/virtio_sandbox.c
new file mode 100644
index 0000000000..2addb1ebc5
--- /dev/null
+++ b/drivers/virtio/virtio_sandbox.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018, Bin Meng <bmeng.cn@gmail.com>
+ *
+ * VirtIO Sandbox transport driver, for testing purpose only
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <virtio_types.h>
+#include <virtio.h>
+#include <virtio_ring.h>
+#include <linux/compat.h>
+#include <linux/io.h>
+
+struct virtio_sandbox_priv {
+ u8 id;
+ u8 status;
+ u64 device_features;
+ u64 driver_features;
+ ulong queue_desc;
+ ulong queue_available;
+ ulong queue_used;
+};
+
+static int virtio_sandbox_get_config(struct udevice *udev, unsigned int offset,
+ void *buf, unsigned int len)
+{
+ return 0;
+}
+
+static int virtio_sandbox_set_config(struct udevice *udev, unsigned int offset,
+ const void *buf, unsigned int len)
+{
+ return 0;
+}
+
+static int virtio_sandbox_get_status(struct udevice *udev, u8 *status)
+{
+ struct virtio_sandbox_priv *priv = dev_get_priv(udev);
+
+ *status = priv->status;
+
+ return 0;
+}
+
+static int virtio_sandbox_set_status(struct udevice *udev, u8 status)
+{
+ struct virtio_sandbox_priv *priv = dev_get_priv(udev);
+
+ /* We should never be setting status to 0 */
+ WARN_ON(status == 0);
+
+ priv->status = status;
+
+ return 0;
+}
+
+static int virtio_sandbox_reset(struct udevice *udev)
+{
+ struct virtio_sandbox_priv *priv = dev_get_priv(udev);
+
+ /* 0 status means a reset */
+ priv->status = 0;
+
+ return 0;
+}
+
+static int virtio_sandbox_get_features(struct udevice *udev, u64 *features)
+{
+ struct virtio_sandbox_priv *priv = dev_get_priv(udev);
+
+ *features = priv->device_features;
+
+ return 0;
+}
+
+static int virtio_sandbox_set_features(struct udevice *udev)
+{
+ struct virtio_sandbox_priv *priv = dev_get_priv(udev);
+ struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(udev);
+
+ priv->driver_features = uc_priv->features;
+
+ return 0;
+}
+
+static struct virtqueue *virtio_sandbox_setup_vq(struct udevice *udev,
+ unsigned int index)
+{
+ struct virtio_sandbox_priv *priv = dev_get_priv(udev);
+ struct virtqueue *vq;
+ ulong addr;
+ int err;
+
+ /* Create the vring */
+ vq = vring_create_virtqueue(index, 4, 4096, udev);
+ if (!vq) {
+ err = -ENOMEM;
+ goto error_new_virtqueue;
+ }
+
+ addr = virtqueue_get_desc_addr(vq);
+ priv->queue_desc = addr;
+
+ addr = virtqueue_get_avail_addr(vq);
+ priv->queue_available = addr;
+
+ addr = virtqueue_get_used_addr(vq);
+ priv->queue_used = addr;
+
+ return vq;
+
+error_new_virtqueue:
+ return ERR_PTR(err);
+}
+
+static void virtio_sandbox_del_vq(struct virtqueue *vq)
+{
+ vring_del_virtqueue(vq);
+}
+
+static int virtio_sandbox_del_vqs(struct udevice *udev)
+{
+ struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(udev);
+ struct virtqueue *vq, *n;
+
+ list_for_each_entry_safe(vq, n, &uc_priv->vqs, list)
+ virtio_sandbox_del_vq(vq);
+
+ return 0;
+}
+
+static int virtio_sandbox_find_vqs(struct udevice *udev, unsigned int nvqs,
+ struct virtqueue *vqs[])
+{
+ int i;
+
+ for (i = 0; i < nvqs; ++i) {
+ vqs[i] = virtio_sandbox_setup_vq(udev, i);
+ if (IS_ERR(vqs[i])) {
+ virtio_sandbox_del_vqs(udev);
+ return PTR_ERR(vqs[i]);
+ }
+ }
+
+ return 0;
+}
+
+static int virtio_sandbox_notify(struct udevice *udev, struct virtqueue *vq)
+{
+ return 0;
+}
+
+static int virtio_sandbox_probe(struct udevice *udev)
+{
+ struct virtio_sandbox_priv *priv = dev_get_priv(udev);
+ struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(udev);
+
+ /* fake some information for testing */
+ priv->device_features = VIRTIO_F_VERSION_1;
+ uc_priv->device = VIRTIO_ID_BLOCK;
+ uc_priv->vendor = ('u' << 24) | ('b' << 16) | ('o' << 8) | 't';
+
+ return 0;
+}
+
+/* check virtio device driver's remove routine was called to reset the device */
+static int virtio_sandbox_child_post_remove(struct udevice *vdev)
+{
+ u8 status;
+
+ virtio_get_status(vdev, &status);
+ if (status)
+ panic("virtio device was not reset\n");
+
+ return 0;
+}
+
+static const struct dm_virtio_ops virtio_sandbox1_ops = {
+ .get_config = virtio_sandbox_get_config,
+ .set_config = virtio_sandbox_set_config,
+ .get_status = virtio_sandbox_get_status,
+ .set_status = virtio_sandbox_set_status,
+ .reset = virtio_sandbox_reset,
+ .get_features = virtio_sandbox_get_features,
+ .set_features = virtio_sandbox_set_features,
+ .find_vqs = virtio_sandbox_find_vqs,
+ .del_vqs = virtio_sandbox_del_vqs,
+ .notify = virtio_sandbox_notify,
+};
+
+static const struct udevice_id virtio_sandbox1_ids[] = {
+ { .compatible = "sandbox,virtio1" },
+ { }
+};
+
+U_BOOT_DRIVER(virtio_sandbox1) = {
+ .name = "virtio-sandbox1",
+ .id = UCLASS_VIRTIO,
+ .of_match = virtio_sandbox1_ids,
+ .ops = &virtio_sandbox1_ops,
+ .probe = virtio_sandbox_probe,
+ .child_post_remove = virtio_sandbox_child_post_remove,
+ .priv_auto_alloc_size = sizeof(struct virtio_sandbox_priv),
+};
+
+/* this one without notify op */
+static const struct dm_virtio_ops virtio_sandbox2_ops = {
+ .get_config = virtio_sandbox_get_config,
+ .set_config = virtio_sandbox_set_config,
+ .get_status = virtio_sandbox_get_status,
+ .set_status = virtio_sandbox_set_status,
+ .reset = virtio_sandbox_reset,
+ .get_features = virtio_sandbox_get_features,
+ .set_features = virtio_sandbox_set_features,
+ .find_vqs = virtio_sandbox_find_vqs,
+ .del_vqs = virtio_sandbox_del_vqs,
+};
+
+static const struct udevice_id virtio_sandbox2_ids[] = {
+ { .compatible = "sandbox,virtio2" },
+ { }
+};
+
+U_BOOT_DRIVER(virtio_sandbox2) = {
+ .name = "virtio-sandbox2",
+ .id = UCLASS_VIRTIO,
+ .of_match = virtio_sandbox2_ids,
+ .ops = &virtio_sandbox2_ops,
+ .probe = virtio_sandbox_probe,
+ .priv_auto_alloc_size = sizeof(struct virtio_sandbox_priv),
+};