Adding multiple TX/RX queues to loopback device driver

Now that we have added an IOCTL mechanism to the loopback driver (article here), we will update the loopback device driver to support multiple device driver queues for transmit and receive.

The device driver still processes the packet in one go. No locking mechanisms are placed in code to protect data at present. The queues are initialized within the driver. alloc_netdev API is used for network device creation and private structure initialization while skb_queue_head_init is used to initialize the different transmit and receive queues.

Presently, alloc_netdev_mqs API call is seen in certain device drivers to allocate the different transmit and receive queues for the network device. The queues that are setup by utilizing netdev_alloc_mqs are kernel level queues and not meant to be directly manipulated by the driver. The queues created by alloc_netdev_mqs are owned by the networking core.

We will discuss the usage and how alloc_netdev_mqs differs from an internal intialization of transmit and receive queues such as what is performed in this network device driver presently in later articles.

The code for multi-queue support is provided below:

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/uaccess.h>
#include <linux/sockios.h>

#define DRV_NAME "vivek_net"

MODULE_AUTHOR("Vivekananda Uppunda");
MODULE_DESCRIPTION("Virtual netdev with multi-queue buffer management");
MODULE_LICENSE("GPL");

#define VNET_IOCTL_RESET_STATS   (SIOCDEVPRIVATE + 0)

/* Number of TX and RX queues */
#define VNET_TX_QUEUES 4
#define VNET_RX_QUEUES 2

/* -------------------------------------------------- */
/* Private data */
/* -------------------------------------------------- */
struct vnet_priv {
    struct sk_buff_head tx_queue[VNET_TX_QUEUES];  /* software TX buffers */
    struct sk_buff_head rx_queue[VNET_RX_QUEUES];  /* software RX buffers */
};

/* -------------------------------------------------- */
/* Software RX processing (simple loopback) */
/* -------------------------------------------------- */
static void vnet_process_tx_queues(struct net_device *dev)
{
    struct vnet_priv *priv = netdev_priv(dev);
    struct sk_buff *skb;
    int i, q;

    /* For each TX queue, dequeue packets and loop them back into RX queue 0 */
    for (q = 0; q < VNET_TX_QUEUES; q++) {
        while ((skb = skb_dequeue(&priv->tx_queue[q])) != NULL) {
            skb->dev = dev;
            skb->protocol = eth_type_trans(skb, dev);
            skb->ip_summed = CHECKSUM_UNNECESSARY;

            dev->stats.rx_packets++;
            dev->stats.rx_bytes += skb->len;

            /* Put into RX queue 0 for simplicity */
            skb_queue_tail(&priv->rx_queue[0], skb);
        }
    }

    /* Deliver packets from RX queues to stack */
    for (i = 0; i < VNET_RX_QUEUES; i++) {
        while ((skb = skb_dequeue(&priv->rx_queue[i])) != NULL) {
            netif_rx(skb);
        }
    }
}

/* -------------------------------------------------- */
/* IOCTL function */
/* -------------------------------------------------- */
static int vnet_siocdevprivate(struct net_device *dev,
                               struct ifreq *ifr,
                               void __user *data,
                               int cmd)
{
    pr_info("vnet_siocdevprivate\n");

    switch (cmd) {
    case VNET_IOCTL_RESET_STATS:
        memset(&dev->stats, 0, sizeof(dev->stats));
        pr_info("%s: stats reset via ioctl\n", dev->name);
        break;

    default:
        pr_info("%s: default not supported\n", dev->name);
        return -EOPNOTSUPP;
    }

    return 0;
}

/* -------------------------------------------------- */
/* Transmit function */
/* -------------------------------------------------- */
static netdev_tx_t vnet_start_xmit(struct sk_buff *skb,
                                   struct net_device *dev)
{
    struct vnet_priv *priv = netdev_priv(dev);
    unsigned int q = skb_get_queue_mapping(skb);

    if (q >= VNET_TX_QUEUES)
        q = 0; /* fallback */

    dev->stats.tx_packets++;
    dev->stats.tx_bytes += skb->len;

    /* Queue packet into selected TX buffer */
    skb_queue_tail(&priv->tx_queue[q], skb);

    /* Immediately process queued packets (simple model) */
    vnet_process_tx_queues(dev);

    return NETDEV_TX_OK;
}

/* -------------------------------------------------- */
/* Open / Close */
/* -------------------------------------------------- */
static int vnet_open(struct net_device *dev)
{
    struct vnet_priv *priv = netdev_priv(dev);
    int i;

    for (i = 0; i < VNET_TX_QUEUES; i++)
        skb_queue_head_init(&priv->tx_queue[i]);
    for (i = 0; i < VNET_RX_QUEUES; i++)
        skb_queue_head_init(&priv->rx_queue[i]);

    netif_start_queue(dev);

    pr_info("%s: device opened\n", dev->name);
    return 0;
}

static int vnet_stop(struct net_device *dev)
{
    struct vnet_priv *priv = netdev_priv(dev);
    struct sk_buff *skb;
    int i;

    netif_stop_queue(dev);

    /* Flush software queues */
    for (i = 0; i < VNET_TX_QUEUES; i++) {
        while ((skb = skb_dequeue(&priv->tx_queue[i])) != NULL)
            dev_kfree_skb(skb);
    }
    for (i = 0; i < VNET_RX_QUEUES; i++) {
        while ((skb = skb_dequeue(&priv->rx_queue[i])) != NULL)
            dev_kfree_skb(skb);
    }

    pr_info("%s: device closed\n", dev->name);
    return 0;
}

/* -------------------------------------------------- */
/* net_device operations */
/* -------------------------------------------------- */
static const struct net_device_ops vnet_netdev_ops = {
    .ndo_open             = vnet_open,
    .ndo_stop             = vnet_stop,
    .ndo_start_xmit       = vnet_start_xmit,
    .ndo_siocdevprivate   = vnet_siocdevprivate,
};

/* -------------------------------------------------- */
/* Device setup */
/* -------------------------------------------------- */
static void vnet_setup(struct net_device *dev)
{
    ether_setup(dev);

    dev->netdev_ops = &vnet_netdev_ops;
    dev->flags |= IFF_NOARP;
    dev->features |= NETIF_F_HW_CSUM;

    eth_hw_addr_random(dev);

    /* Configure number of queues */
    dev->num_tx_queues = VNET_TX_QUEUES;
    dev->num_rx_queues = VNET_RX_QUEUES;
}

/* -------------------------------------------------- */
/* Module init / exit */
/* -------------------------------------------------- */
static struct net_device *vnet_dev;

static int __init vnet_init(void)
{
    int ret;

    vnet_dev = alloc_netdev(sizeof(struct vnet_priv),
                            DRV_NAME"%d",
                            NET_NAME_UNKNOWN,
                            vnet_setup);
    if (!vnet_dev)
        return -ENOMEM;

    ret = register_netdev(vnet_dev);
    if (ret) {
        free_netdev(vnet_dev);
        return ret;
    }

    pr_info(DRV_NAME ": virtual network device registered\n");
    return 0;
}

static void __exit vnet_exit(void)
{
    unregister_netdev(vnet_dev);
    free_netdev(vnet_dev);
    pr_info(DRV_NAME ": module unloaded\n");
}

module_init(vnet_init);
module_exit(vnet_exit);

The same application code provided here for loopback driver will work with the current multi-queue loopback virtual network driver.

A brief understanding of alloc_netdev_mqs

Leave a Reply

Your email address will not be published. Required fields are marked *