[PATCH v1] virtio-net: enable configurable tx queue size

Previous Topic Next Topic
 
classic Classic list List threaded Threaded
39 messages Options
12
Reply | Threaded
Open this post in threaded view
|  
Report Content as Inappropriate

[PATCH v1] virtio-net: enable configurable tx queue size

Wei Wang
This patch enables the virtio-net tx queue size to be configurable
between 256 and 1024 by the user. The queue size specified by the
user should be power of 2. If "tx_queue_size" is not offered by the
user, the default queue size, 1024, will be used.

For the traditional QEMU backend, setting the tx queue size to be 1024
requires the guest virtio driver to support the VIRTIO_F_MAX_CHAIN_SIZE
feature. This feature restricts the guest driver from chaining 1024
vring descriptors, which may cause the device side implementation to
send more than 1024 iov to writev.

VIRTIO_F_MAX_CHAIN_SIZE is a common transport feature added for all
virtio devices. However, each device has the flexibility to set the max
chain size to limit its driver to chain vring descriptors. Currently,
the max chain size of the virtio-net device is set to 1023.

In the case that the tx queue size is set to 1024 and the
VIRTIO_F_MAX_CHAIN_SIZE feature is not supported by the guest driver,
the tx queue size will be reconfigured to be 512.

Signed-off-by: Wei Wang <[hidden email]>

RFC to v1 changes:
1) change VIRTIO_F_MAX_CHAIN_SIZE to be a common virtio feature (was
virtio-net specific);
2) change the default tx queue size to be 1024 (was 256);
3) For the vhost backend case, setting tx queue size to be 1024 dosen't
require the VIRTIO_F_MAX_CHAIN_SIZE feature support.
---
 hw/net/virtio-net.c                            | 69 ++++++++++++++++++++++++--
 include/hw/virtio/virtio-net.h                 |  1 +
 include/hw/virtio/virtio.h                     |  2 +
 include/standard-headers/linux/virtio_config.h |  3 ++
 include/standard-headers/linux/virtio_net.h    |  2 +
 5 files changed, 73 insertions(+), 4 deletions(-)

diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 7d091c9..5c82f54 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -33,8 +33,13 @@
 
 /* previously fixed value */
 #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
+#define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 1024
+
 /* for now, only allow larger queues; with virtio-1, guest can downsize */
 #define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
+#define VIRTIO_NET_TX_QUEUE_MIN_SIZE 256
+
+#define VIRTIO_NET_MAX_CHAIN_SIZE 1023
 
 /*
  * Calculate the number of bytes up to and including the given 'field' of
@@ -57,6 +62,8 @@ static VirtIOFeature feature_sizes[] = {
      .end = endof(struct virtio_net_config, max_virtqueue_pairs)},
     {.flags = 1 << VIRTIO_NET_F_MTU,
      .end = endof(struct virtio_net_config, mtu)},
+    {.flags = 1 << VIRTIO_F_MAX_CHAIN_SIZE,
+     .end = endof(struct virtio_net_config, max_chain_size)},
     {}
 };
 
@@ -84,6 +91,7 @@ static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config)
     virtio_stw_p(vdev, &netcfg.status, n->status);
     virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queues);
     virtio_stw_p(vdev, &netcfg.mtu, n->net_conf.mtu);
+    virtio_stw_p(vdev, &netcfg.max_chain_size, VIRTIO_NET_MAX_CHAIN_SIZE);
     memcpy(netcfg.mac, n->mac, ETH_ALEN);
     memcpy(config, &netcfg, n->config_size);
 }
@@ -635,9 +643,33 @@ static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n)
     return virtio_net_guest_offloads_by_features(vdev->guest_features);
 }
 
+static bool is_tx(int queue_index)
+{
+    return queue_index % 2 == 1;
+}
+
+static void virtio_net_reconfig_tx_queue_size(VirtIONet *n)
+{
+    VirtIODevice *vdev = VIRTIO_DEVICE(n);
+    int i, num_queues = virtio_get_num_queues(vdev);
+
+    /* Return when the queue size is already less than the 1024 */
+    if (n->net_conf.tx_queue_size < VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE) {
+        return;
+    }
+
+    for (i = 0; i < num_queues; i++) {
+        if (is_tx(i)) {
+            n->net_conf.tx_queue_size = VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE / 2;
+            virtio_queue_set_num(vdev, i, n->net_conf.tx_queue_size);
+        }
+    }
+}
+
 static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
 {
     VirtIONet *n = VIRTIO_NET(vdev);
+    NetClientState *nc = qemu_get_queue(n->nic);
     int i;
 
     virtio_net_set_multiqueue(n,
@@ -649,6 +681,16 @@ static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
                                virtio_has_feature(features,
                                                   VIRTIO_F_VERSION_1));
 
+    /*
+     * When the traditional QEMU backend is used, using 1024 tx queue size
+     * requires the driver to support the VIRTIO_F_MAX_CHAIN_SIZE feature. If
+     * the feature is not supported, reconfigure the tx queue size to 512.
+     */
+    if (!get_vhost_net(nc->peer) &&
+        !virtio_has_feature(features, VIRTIO_F_MAX_CHAIN_SIZE)) {
+        virtio_net_reconfig_tx_queue_size(n);
+    }
+
     if (n->has_vnet_hdr) {
         n->curr_guest_offloads =
             virtio_net_guest_offloads_by_features(features);
@@ -1297,8 +1339,8 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
 
         out_num = elem->out_num;
         out_sg = elem->out_sg;
-        if (out_num < 1) {
-            virtio_error(vdev, "virtio-net header not in first element");
+        if (out_num < 1 || out_num > VIRTIO_NET_MAX_CHAIN_SIZE) {
+            virtio_error(vdev, "no packet or too large vring desc chain");
             virtqueue_detach_element(q->tx_vq, elem, 0);
             g_free(elem);
             return -EINVAL;
@@ -1496,13 +1538,15 @@ static void virtio_net_add_queue(VirtIONet *n, int index)
                                            virtio_net_handle_rx);
     if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
         n->vqs[index].tx_vq =
-            virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer);
+            virtio_add_queue(vdev, n->net_conf.tx_queue_size,
+                             virtio_net_handle_tx_timer);
         n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
                                               virtio_net_tx_timer,
                                               &n->vqs[index]);
     } else {
         n->vqs[index].tx_vq =
-            virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh);
+            virtio_add_queue(vdev, n->net_conf.tx_queue_size,
+                             virtio_net_handle_tx_bh);
         n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]);
     }
 
@@ -1891,6 +1935,10 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
         n->host_features |= (0x1 << VIRTIO_NET_F_MTU);
     }
 
+    if (virtio_host_has_feature(vdev, VIRTIO_F_MAX_CHAIN_SIZE)) {
+        n->host_features |= (0x1 << VIRTIO_F_MAX_CHAIN_SIZE);
+    }
+
     virtio_net_set_config_size(n, n->host_features);
     virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size);
 
@@ -1910,6 +1958,17 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
         return;
     }
 
+    if (n->net_conf.tx_queue_size < VIRTIO_NET_TX_QUEUE_MIN_SIZE ||
+        n->net_conf.tx_queue_size > VIRTQUEUE_MAX_SIZE ||
+        (n->net_conf.tx_queue_size & (n->net_conf.tx_queue_size - 1))) {
+        error_setg(errp, "Invalid tx_queue_size (= %" PRIu16 "), "
+                   "must be a power of 2 between %d and %d.",
+                   n->net_conf.tx_queue_size, VIRTIO_NET_TX_QUEUE_MIN_SIZE,
+                   VIRTQUEUE_MAX_SIZE);
+        virtio_cleanup(vdev);
+        return;
+    }
+
     n->max_queues = MAX(n->nic_conf.peers.queues, 1);
     if (n->max_queues * 2 + 1 > VIRTIO_QUEUE_MAX) {
         error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
@@ -2089,6 +2148,8 @@ static Property virtio_net_properties[] = {
     DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
     DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size,
                        VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE),
+    DEFINE_PROP_UINT16("tx_queue_size", VirtIONet, net_conf.tx_queue_size,
+                       VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE),
     DEFINE_PROP_UINT16("host_mtu", VirtIONet, net_conf.mtu, 0),
     DEFINE_PROP_END_OF_LIST(),
 };
diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h
index 1eec9a2..fd944ba 100644
--- a/include/hw/virtio/virtio-net.h
+++ b/include/hw/virtio/virtio-net.h
@@ -36,6 +36,7 @@ typedef struct virtio_net_conf
     int32_t txburst;
     char *tx;
     uint16_t rx_queue_size;
+    uint16_t tx_queue_size;
     uint16_t mtu;
 } virtio_net_conf;
 
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index 7b6edba..8e85e41 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -260,6 +260,8 @@ typedef struct VirtIORNGConf VirtIORNGConf;
                       VIRTIO_F_NOTIFY_ON_EMPTY, true), \
     DEFINE_PROP_BIT64("any_layout", _state, _field, \
                       VIRTIO_F_ANY_LAYOUT, true), \
+    DEFINE_PROP_BIT64("max_chain_size", _state, _field, \
+                      VIRTIO_F_MAX_CHAIN_SIZE, true), \
     DEFINE_PROP_BIT64("iommu_platform", _state, _field, \
                       VIRTIO_F_IOMMU_PLATFORM, false)
 
diff --git a/include/standard-headers/linux/virtio_config.h b/include/standard-headers/linux/virtio_config.h
index b777069..b70cbfe 100644
--- a/include/standard-headers/linux/virtio_config.h
+++ b/include/standard-headers/linux/virtio_config.h
@@ -60,6 +60,9 @@
 #define VIRTIO_F_ANY_LAYOUT 27
 #endif /* VIRTIO_CONFIG_NO_LEGACY */
 
+/* Guest chains vring descriptors within a limit */
+#define VIRTIO_F_MAX_CHAIN_SIZE 31
+
 /* v1.0 compliant. */
 #define VIRTIO_F_VERSION_1 32
 
diff --git a/include/standard-headers/linux/virtio_net.h b/include/standard-headers/linux/virtio_net.h
index 30ff249..729aaa8 100644
--- a/include/standard-headers/linux/virtio_net.h
+++ b/include/standard-headers/linux/virtio_net.h
@@ -76,6 +76,8 @@ struct virtio_net_config {
  uint16_t max_virtqueue_pairs;
  /* Default maximum transmit unit advice */
  uint16_t mtu;
+        /* Maximum number of vring descriptors that can be chained */
+ uint16_t max_chain_size;
 } QEMU_PACKED;
 
 /*
--
2.7.4


Reply | Threaded
Open this post in threaded view
|  
Report Content as Inappropriate

Re: [PATCH v1] virtio-net: enable configurable tx queue size

Michael S. Tsirkin-4
On Mon, Jun 05, 2017 at 04:57:29PM +0800, Wei Wang wrote:

> This patch enables the virtio-net tx queue size to be configurable
> between 256 and 1024 by the user. The queue size specified by the
> user should be power of 2. If "tx_queue_size" is not offered by the
> user, the default queue size, 1024, will be used.
>
> For the traditional QEMU backend, setting the tx queue size to be 1024
> requires the guest virtio driver to support the VIRTIO_F_MAX_CHAIN_SIZE
> feature. This feature restricts the guest driver from chaining 1024
> vring descriptors, which may cause the device side implementation to
> send more than 1024 iov to writev.
>
> VIRTIO_F_MAX_CHAIN_SIZE is a common transport feature added for all
> virtio devices. However, each device has the flexibility to set the max
> chain size to limit its driver to chain vring descriptors. Currently,
> the max chain size of the virtio-net device is set to 1023.
>
> In the case that the tx queue size is set to 1024 and the
> VIRTIO_F_MAX_CHAIN_SIZE feature is not supported by the guest driver,
> the tx queue size will be reconfigured to be 512.

I'd like to see the reverse. Start with the current default.
If VIRTIO_F_MAX_CHAIN_SIZE is negotiated, increase the queue size.

> Signed-off-by: Wei Wang <[hidden email]>

below should come after ---

> RFC to v1 changes:
> 1) change VIRTIO_F_MAX_CHAIN_SIZE to be a common virtio feature (was
> virtio-net specific);
> 2) change the default tx queue size to be 1024 (was 256);
> 3) For the vhost backend case, setting tx queue size to be 1024 dosen't
> require the VIRTIO_F_MAX_CHAIN_SIZE feature support.
> ---
>  hw/net/virtio-net.c                            | 69 ++++++++++++++++++++++++--
>  include/hw/virtio/virtio-net.h                 |  1 +
>  include/hw/virtio/virtio.h                     |  2 +
>  include/standard-headers/linux/virtio_config.h |  3 ++
>  include/standard-headers/linux/virtio_net.h    |  2 +
>  5 files changed, 73 insertions(+), 4 deletions(-)
>
> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> index 7d091c9..5c82f54 100644
> --- a/hw/net/virtio-net.c
> +++ b/hw/net/virtio-net.c
> @@ -33,8 +33,13 @@
>  
>  /* previously fixed value */
>  #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
> +#define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 1024
> +
>  /* for now, only allow larger queues; with virtio-1, guest can downsize */
>  #define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
> +#define VIRTIO_NET_TX_QUEUE_MIN_SIZE 256
> +
> +#define VIRTIO_NET_MAX_CHAIN_SIZE 1023
>  
>  /*
>   * Calculate the number of bytes up to and including the given 'field' of
> @@ -57,6 +62,8 @@ static VirtIOFeature feature_sizes[] = {
>       .end = endof(struct virtio_net_config, max_virtqueue_pairs)},
>      {.flags = 1 << VIRTIO_NET_F_MTU,
>       .end = endof(struct virtio_net_config, mtu)},
> +    {.flags = 1 << VIRTIO_F_MAX_CHAIN_SIZE,
> +     .end = endof(struct virtio_net_config, max_chain_size)},
>      {}
>  };
>

Using a generic VIRTIO_F_MAX_CHAIN_SIZE flag, so this should go into the
generic virtio section, not virtio_net_config.
 

> @@ -84,6 +91,7 @@ static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config)
>      virtio_stw_p(vdev, &netcfg.status, n->status);
>      virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queues);
>      virtio_stw_p(vdev, &netcfg.mtu, n->net_conf.mtu);
> +    virtio_stw_p(vdev, &netcfg.max_chain_size, VIRTIO_NET_MAX_CHAIN_SIZE);
>      memcpy(netcfg.mac, n->mac, ETH_ALEN);
>      memcpy(config, &netcfg, n->config_size);
>  }
> @@ -635,9 +643,33 @@ static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n)
>      return virtio_net_guest_offloads_by_features(vdev->guest_features);
>  }
>  
> +static bool is_tx(int queue_index)
> +{
> +    return queue_index % 2 == 1;
> +}
> +
> +static void virtio_net_reconfig_tx_queue_size(VirtIONet *n)
> +{
> +    VirtIODevice *vdev = VIRTIO_DEVICE(n);
> +    int i, num_queues = virtio_get_num_queues(vdev);
> +
> +    /* Return when the queue size is already less than the 1024 */
> +    if (n->net_conf.tx_queue_size < VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE) {
> +        return;
> +    }
> +
> +    for (i = 0; i < num_queues; i++) {
> +        if (is_tx(i)) {
> +            n->net_conf.tx_queue_size = VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE / 2;
> +            virtio_queue_set_num(vdev, i, n->net_conf.tx_queue_size);
> +        }
> +    }
> +}
> +
>  static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
>  {
>      VirtIONet *n = VIRTIO_NET(vdev);
> +    NetClientState *nc = qemu_get_queue(n->nic);
>      int i;
>  
>      virtio_net_set_multiqueue(n,
> @@ -649,6 +681,16 @@ static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
>                                 virtio_has_feature(features,
>                                                    VIRTIO_F_VERSION_1));
>  
> +    /*
> +     * When the traditional QEMU backend is used, using 1024 tx queue size
> +     * requires the driver to support the VIRTIO_F_MAX_CHAIN_SIZE feature. If
> +     * the feature is not supported, reconfigure the tx queue size to 512.
> +     */
> +    if (!get_vhost_net(nc->peer) &&
> +        !virtio_has_feature(features, VIRTIO_F_MAX_CHAIN_SIZE)) {
> +        virtio_net_reconfig_tx_queue_size(n);
> +    }
> +
>      if (n->has_vnet_hdr) {
>          n->curr_guest_offloads =
>              virtio_net_guest_offloads_by_features(features);
> @@ -1297,8 +1339,8 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
>  
>          out_num = elem->out_num;
>          out_sg = elem->out_sg;
> -        if (out_num < 1) {
> -            virtio_error(vdev, "virtio-net header not in first element");
> +        if (out_num < 1 || out_num > VIRTIO_NET_MAX_CHAIN_SIZE) {
> +            virtio_error(vdev, "no packet or too large vring desc chain");
>              virtqueue_detach_element(q->tx_vq, elem, 0);
>              g_free(elem);
>              return -EINVAL;

what about rx vq? we need to limit that as well, do we not?


> @@ -1496,13 +1538,15 @@ static void virtio_net_add_queue(VirtIONet *n, int index)
>                                             virtio_net_handle_rx);
>      if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
>          n->vqs[index].tx_vq =
> -            virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer);
> +            virtio_add_queue(vdev, n->net_conf.tx_queue_size,
> +                             virtio_net_handle_tx_timer);
>          n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
>                                                virtio_net_tx_timer,
>                                                &n->vqs[index]);
>      } else {
>          n->vqs[index].tx_vq =
> -            virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh);
> +            virtio_add_queue(vdev, n->net_conf.tx_queue_size,
> +                             virtio_net_handle_tx_bh);
>          n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]);
>      }
>  
> @@ -1891,6 +1935,10 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
>          n->host_features |= (0x1 << VIRTIO_NET_F_MTU);
>      }
>  
> +    if (virtio_host_has_feature(vdev, VIRTIO_F_MAX_CHAIN_SIZE)) {
> +        n->host_features |= (0x1 << VIRTIO_F_MAX_CHAIN_SIZE);
> +    }
> +
>      virtio_net_set_config_size(n, n->host_features);
>      virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size);
>  
> @@ -1910,6 +1958,17 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
>          return;
>      }
>  
> +    if (n->net_conf.tx_queue_size < VIRTIO_NET_TX_QUEUE_MIN_SIZE ||
> +        n->net_conf.tx_queue_size > VIRTQUEUE_MAX_SIZE ||
> +        (n->net_conf.tx_queue_size & (n->net_conf.tx_queue_size - 1))) {

Pls use is_power_of_2.

> +        error_setg(errp, "Invalid tx_queue_size (= %" PRIu16 "), "
> +                   "must be a power of 2 between %d and %d.",
> +                   n->net_conf.tx_queue_size, VIRTIO_NET_TX_QUEUE_MIN_SIZE,
> +                   VIRTQUEUE_MAX_SIZE);

I think management will need a way to discover the limits for
this value. Not sure how. Cc QAPI maintainers.


> +        virtio_cleanup(vdev);
> +        return;
> +    }
> +
>      n->max_queues = MAX(n->nic_conf.peers.queues, 1);
>      if (n->max_queues * 2 + 1 > VIRTIO_QUEUE_MAX) {
>          error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
> @@ -2089,6 +2148,8 @@ static Property virtio_net_properties[] = {
>      DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
>      DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size,
>                         VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE),
> +    DEFINE_PROP_UINT16("tx_queue_size", VirtIONet, net_conf.tx_queue_size,
> +                       VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE),
>      DEFINE_PROP_UINT16("host_mtu", VirtIONet, net_conf.mtu, 0),
>      DEFINE_PROP_END_OF_LIST(),
>  };
> diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h
> index 1eec9a2..fd944ba 100644
> --- a/include/hw/virtio/virtio-net.h
> +++ b/include/hw/virtio/virtio-net.h
> @@ -36,6 +36,7 @@ typedef struct virtio_net_conf
>      int32_t txburst;
>      char *tx;
>      uint16_t rx_queue_size;
> +    uint16_t tx_queue_size;
>      uint16_t mtu;
>  } virtio_net_conf;
>  
> diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
> index 7b6edba..8e85e41 100644
> --- a/include/hw/virtio/virtio.h
> +++ b/include/hw/virtio/virtio.h
> @@ -260,6 +260,8 @@ typedef struct VirtIORNGConf VirtIORNGConf;
>                        VIRTIO_F_NOTIFY_ON_EMPTY, true), \
>      DEFINE_PROP_BIT64("any_layout", _state, _field, \
>                        VIRTIO_F_ANY_LAYOUT, true), \
> +    DEFINE_PROP_BIT64("max_chain_size", _state, _field, \
> +                      VIRTIO_F_MAX_CHAIN_SIZE, true), \
>      DEFINE_PROP_BIT64("iommu_platform", _state, _field, \
>                        VIRTIO_F_IOMMU_PLATFORM, false)
>  
> diff --git a/include/standard-headers/linux/virtio_config.h b/include/standard-headers/linux/virtio_config.h
> index b777069..b70cbfe 100644
> --- a/include/standard-headers/linux/virtio_config.h
> +++ b/include/standard-headers/linux/virtio_config.h
> @@ -60,6 +60,9 @@
>  #define VIRTIO_F_ANY_LAYOUT 27
>  #endif /* VIRTIO_CONFIG_NO_LEGACY */
>  
> +/* Guest chains vring descriptors within a limit */
> +#define VIRTIO_F_MAX_CHAIN_SIZE 31
> +

Pls use a flag in the high 32 bit.

>  /* v1.0 compliant. */
>  #define VIRTIO_F_VERSION_1 32
>  
> diff --git a/include/standard-headers/linux/virtio_net.h b/include/standard-headers/linux/virtio_net.h
> index 30ff249..729aaa8 100644
> --- a/include/standard-headers/linux/virtio_net.h
> +++ b/include/standard-headers/linux/virtio_net.h
> @@ -76,6 +76,8 @@ struct virtio_net_config {
>   uint16_t max_virtqueue_pairs;
>   /* Default maximum transmit unit advice */
>   uint16_t mtu;
> +        /* Maximum number of vring descriptors that can be chained */
> + uint16_t max_chain_size;
>  } QEMU_PACKED;
>  
>  /*
> --
> 2.7.4

Reply | Threaded
Open this post in threaded view
|  
Report Content as Inappropriate

Re: [PATCH v1] virtio-net: enable configurable tx queue size

Eric Blake
On 06/05/2017 10:38 AM, Michael S. Tsirkin wrote:
> On Mon, Jun 05, 2017 at 04:57:29PM +0800, Wei Wang wrote:
>> This patch enables the virtio-net tx queue size to be configurable
>> between 256 and 1024 by the user. The queue size specified by the
>> user should be power of 2. If "tx_queue_size" is not offered by the
>> user, the default queue size, 1024, will be used.
>>

>> +    if (n->net_conf.tx_queue_size < VIRTIO_NET_TX_QUEUE_MIN_SIZE ||
>> +        n->net_conf.tx_queue_size > VIRTQUEUE_MAX_SIZE ||
>> +        (n->net_conf.tx_queue_size & (n->net_conf.tx_queue_size - 1))) {
>
> Pls use is_power_of_2.
>
>> +        error_setg(errp, "Invalid tx_queue_size (= %" PRIu16 "), "
>> +                   "must be a power of 2 between %d and %d.",

No trailing '.' in error_setg() messages, please.

>> +                   n->net_conf.tx_queue_size, VIRTIO_NET_TX_QUEUE_MIN_SIZE,
>> +                   VIRTQUEUE_MAX_SIZE);
>
> I think management will need a way to discover the limits for
> this value. Not sure how. Cc QAPI maintainers.

Is this something that can be documented? Or is it a runtime-variable
limit, where you will need to query the limit on a per-machine basis?
Given that it looks like compile-time documentation, I'm leaning towards
documentation being sufficient.

--
Eric Blake, Principal Software Engineer
Red Hat, Inc.           +1-919-301-3266
Virtualization:  qemu.org | libvirt.org


signature.asc (617 bytes) Download Attachment
Reply | Threaded
Open this post in threaded view
|  
Report Content as Inappropriate

Re: [PATCH v1] virtio-net: enable configurable tx queue size

Michael S. Tsirkin-4
On Mon, Jun 05, 2017 at 10:41:56AM -0500, Eric Blake wrote:

> On 06/05/2017 10:38 AM, Michael S. Tsirkin wrote:
> > On Mon, Jun 05, 2017 at 04:57:29PM +0800, Wei Wang wrote:
> >> This patch enables the virtio-net tx queue size to be configurable
> >> between 256 and 1024 by the user. The queue size specified by the
> >> user should be power of 2. If "tx_queue_size" is not offered by the
> >> user, the default queue size, 1024, will be used.
> >>
>
> >> +    if (n->net_conf.tx_queue_size < VIRTIO_NET_TX_QUEUE_MIN_SIZE ||
> >> +        n->net_conf.tx_queue_size > VIRTQUEUE_MAX_SIZE ||
> >> +        (n->net_conf.tx_queue_size & (n->net_conf.tx_queue_size - 1))) {
> >
> > Pls use is_power_of_2.
> >
> >> +        error_setg(errp, "Invalid tx_queue_size (= %" PRIu16 "), "
> >> +                   "must be a power of 2 between %d and %d.",
>
> No trailing '.' in error_setg() messages, please.
>
> >> +                   n->net_conf.tx_queue_size, VIRTIO_NET_TX_QUEUE_MIN_SIZE,
> >> +                   VIRTQUEUE_MAX_SIZE);
> >
> > I think management will need a way to discover the limits for
> > this value. Not sure how. Cc QAPI maintainers.
>
> Is this something that can be documented? Or is it a runtime-variable
> limit, where you will need to query the limit on a per-machine basis?
> Given that it looks like compile-time documentation, I'm leaning towards
> documentation being sufficient.

We are likely to change the limits with time. E.g.
VIRTIO_NET_TX_QUEUE_MIN_SIZE is arbitrary.

> --
> Eric Blake, Principal Software Engineer
> Red Hat, Inc.           +1-919-301-3266
> Virtualization:  qemu.org | libvirt.org
>




Reply | Threaded
Open this post in threaded view
|  
Report Content as Inappropriate

Re: [PATCH v1] virtio-net: enable configurable tx queue size

Michael S. Tsirkin-4
In reply to this post by Wei Wang
On Mon, Jun 05, 2017 at 04:57:29PM +0800, Wei Wang wrote:

> @@ -1910,6 +1958,17 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
>          return;
>      }
>  
> +    if (n->net_conf.tx_queue_size < VIRTIO_NET_TX_QUEUE_MIN_SIZE ||
> +        n->net_conf.tx_queue_size > VIRTQUEUE_MAX_SIZE ||
> +        (n->net_conf.tx_queue_size & (n->net_conf.tx_queue_size - 1))) {
> +        error_setg(errp, "Invalid tx_queue_size (= %" PRIu16 "), "
> +                   "must be a power of 2 between %d and %d.",
> +                   n->net_conf.tx_queue_size, VIRTIO_NET_TX_QUEUE_MIN_SIZE,
> +                   VIRTQUEUE_MAX_SIZE);
> +        virtio_cleanup(vdev);
> +        return;
> +    }
> +
>      n->max_queues = MAX(n->nic_conf.peers.queues, 1);
>      if (n->max_queues * 2 + 1 > VIRTIO_QUEUE_MAX) {
>          error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "

Given that some configurations (e.g. legacy guest) ignore the value,
I'm inclined to say just force the value to be within a reasonable
limit.

--
MST

Reply | Threaded
Open this post in threaded view
|  
Report Content as Inappropriate

Re: [PATCH v1] virtio-net: enable configurable tx queue size

Wei Wang
In reply to this post by Michael S. Tsirkin-4
On 06/05/2017 11:38 PM, Michael S. Tsirkin wrote:

> On Mon, Jun 05, 2017 at 04:57:29PM +0800, Wei Wang wrote:
>   /*
>    * Calculate the number of bytes up to and including the given 'field' of
> @@ -57,6 +62,8 @@ static VirtIOFeature feature_sizes[] = {
>        .end = endof(struct virtio_net_config, max_virtqueue_pairs)},
>       {.flags = 1 << VIRTIO_NET_F_MTU,
>        .end = endof(struct virtio_net_config, mtu)},
> +    {.flags = 1 << VIRTIO_F_MAX_CHAIN_SIZE,
> +     .end = endof(struct virtio_net_config, max_chain_size)},
>       {}
>   };
>
> Using a generic VIRTIO_F_MAX_CHAIN_SIZE flag, so this should go into the
> generic virtio section, not virtio_net_config.
>    
Do you mean VIRTIO_PCI_xx under virtio_pci.h?

The reason that the config field wasn't added there is because I didn't find
space to put into the new 16-bit field into the existing layout. The
last one
is "#define VIRTIO_MSI_QUEUE_VECTOR 22".  The per-driver configuration
space directly follows that last register by
"#define VIRTIO_PCI_CONFIG_OFF(msix_enabled) ((msix_enabled) ? 24:20)"

Changing the MACRO to something like
VIRTIO_PCI_CONFIG_OFF(max_chain_size_enabled) would be difficult.

Do you see a good way to add the new field here?


>> @@ -84,6 +91,7 @@ static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config)
>>       virtio_stw_p(vdev, &netcfg.status, n->status);
>>       virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queues);
>>       virtio_stw_p(vdev, &netcfg.mtu, n->net_conf.mtu);
>> +    virtio_stw_p(vdev, &netcfg.max_chain_size, VIRTIO_NET_MAX_CHAIN_SIZE);
>>       memcpy(netcfg.mac, n->mac, ETH_ALEN);
>>       memcpy(config, &netcfg, n->config_size);
>>   }
>> @@ -635,9 +643,33 @@ static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n)
>>       return virtio_net_guest_offloads_by_features(vdev->guest_features);
>>   }
>>  
>> +static bool is_tx(int queue_index)
>> +{
>> +    return queue_index % 2 == 1;
>> +}
>> +
>> +static void virtio_net_reconfig_tx_queue_size(VirtIONet *n)
>> +{
>> +    VirtIODevice *vdev = VIRTIO_DEVICE(n);
>> +    int i, num_queues = virtio_get_num_queues(vdev);
>> +
>> +    /* Return when the queue size is already less than the 1024 */
>> +    if (n->net_conf.tx_queue_size < VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE) {
>> +        return;
>> +    }
>> +
>> +    for (i = 0; i < num_queues; i++) {
>> +        if (is_tx(i)) {
>> +            n->net_conf.tx_queue_size = VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE / 2;
>> +            virtio_queue_set_num(vdev, i, n->net_conf.tx_queue_size);
>> +        }
>> +    }
>> +}
>> +
>>   static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
>>   {
>>       VirtIONet *n = VIRTIO_NET(vdev);
>> +    NetClientState *nc = qemu_get_queue(n->nic);
>>       int i;
>>  
>>       virtio_net_set_multiqueue(n,
>> @@ -649,6 +681,16 @@ static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
>>                                  virtio_has_feature(features,
>>                                                     VIRTIO_F_VERSION_1));
>>  
>> +    /*
>> +     * When the traditional QEMU backend is used, using 1024 tx queue size
>> +     * requires the driver to support the VIRTIO_F_MAX_CHAIN_SIZE feature. If
>> +     * the feature is not supported, reconfigure the tx queue size to 512.
>> +     */
>> +    if (!get_vhost_net(nc->peer) &&
>> +        !virtio_has_feature(features, VIRTIO_F_MAX_CHAIN_SIZE)) {
>> +        virtio_net_reconfig_tx_queue_size(n);
>> +    }
>> +
>>       if (n->has_vnet_hdr) {
>>           n->curr_guest_offloads =
>>               virtio_net_guest_offloads_by_features(features);
>> @@ -1297,8 +1339,8 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
>>  
>>           out_num = elem->out_num;
>>           out_sg = elem->out_sg;
>> -        if (out_num < 1) {
>> -            virtio_error(vdev, "virtio-net header not in first element");
>> +        if (out_num < 1 || out_num > VIRTIO_NET_MAX_CHAIN_SIZE) {
>> +            virtio_error(vdev, "no packet or too large vring desc chain");
>>               virtqueue_detach_element(q->tx_vq, elem, 0);
>>               g_free(elem);
>>               return -EINVAL;
> what about rx vq? we need to limit that as well, do we not?
I think the rx vq doesn't need the limit, because there is no off-by-one
issue
like the tx side writev().

>  
> diff --git a/include/standard-headers/linux/virtio_config.h b/include/standard-headers/linux/virtio_config.h
> index b777069..b70cbfe 100644
> --- a/include/standard-headers/linux/virtio_config.h
> +++ b/include/standard-headers/linux/virtio_config.h
> @@ -60,6 +60,9 @@
>   #define VIRTIO_F_ANY_LAYOUT 27
>   #endif /* VIRTIO_CONFIG_NO_LEGACY */
>  
> +/* Guest chains vring descriptors within a limit */
> +#define VIRTIO_F_MAX_CHAIN_SIZE 31
> +
> Pls use a flag in the high 32 bit.

I think this should be considered as a transport feature. How about changing
VIRTIO_TRANSPORT_F_END to 35 (was 34), and use 34 for
VIRTIO_F_MAX_CHAIN_SIZE?

Best,
Wei

Reply | Threaded
Open this post in threaded view
|  
Report Content as Inappropriate

Re: [PATCH v1] virtio-net: enable configurable tx queue size

Wei Wang
In reply to this post by Michael S. Tsirkin-4
On 06/05/2017 11:38 PM, Michael S. Tsirkin wrote:

> On Mon, Jun 05, 2017 at 04:57:29PM +0800, Wei Wang wrote:
>> This patch enables the virtio-net tx queue size to be configurable
>> between 256 and 1024 by the user. The queue size specified by the
>> user should be power of 2. If "tx_queue_size" is not offered by the
>> user, the default queue size, 1024, will be used.
>>
>> For the traditional QEMU backend, setting the tx queue size to be 1024
>> requires the guest virtio driver to support the VIRTIO_F_MAX_CHAIN_SIZE
>> feature. This feature restricts the guest driver from chaining 1024
>> vring descriptors, which may cause the device side implementation to
>> send more than 1024 iov to writev.
>>
>> VIRTIO_F_MAX_CHAIN_SIZE is a common transport feature added for all
>> virtio devices. However, each device has the flexibility to set the max
>> chain size to limit its driver to chain vring descriptors. Currently,
>> the max chain size of the virtio-net device is set to 1023.
>>
>> In the case that the tx queue size is set to 1024 and the
>> VIRTIO_F_MAX_CHAIN_SIZE feature is not supported by the guest driver,
>> the tx queue size will be reconfigured to be 512.
> I'd like to see the reverse. Start with the current default.
> If VIRTIO_F_MAX_CHAIN_SIZE is negotiated, increase the queue size.
>

OK, we can let the queue size start with 256, and how about
increasing it to 1024 in the following two cases:
1) VIRTIO_F_MAX_CHAIN_SIZE is negotiated; or
2) the backend is vhost.

Best,
Wei

Reply | Threaded
Open this post in threaded view
|  
Report Content as Inappropriate

Re: [PATCH v1] virtio-net: enable configurable tx queue size

Michael S. Tsirkin-4
On Wed, Jun 07, 2017 at 09:04:29AM +0800, Wei Wang wrote:

> On 06/05/2017 11:38 PM, Michael S. Tsirkin wrote:
> > On Mon, Jun 05, 2017 at 04:57:29PM +0800, Wei Wang wrote:
> > > This patch enables the virtio-net tx queue size to be configurable
> > > between 256 and 1024 by the user. The queue size specified by the
> > > user should be power of 2. If "tx_queue_size" is not offered by the
> > > user, the default queue size, 1024, will be used.
> > >
> > > For the traditional QEMU backend, setting the tx queue size to be 1024
> > > requires the guest virtio driver to support the VIRTIO_F_MAX_CHAIN_SIZE
> > > feature. This feature restricts the guest driver from chaining 1024
> > > vring descriptors, which may cause the device side implementation to
> > > send more than 1024 iov to writev.
> > >
> > > VIRTIO_F_MAX_CHAIN_SIZE is a common transport feature added for all
> > > virtio devices. However, each device has the flexibility to set the max
> > > chain size to limit its driver to chain vring descriptors. Currently,
> > > the max chain size of the virtio-net device is set to 1023.
> > >
> > > In the case that the tx queue size is set to 1024 and the
> > > VIRTIO_F_MAX_CHAIN_SIZE feature is not supported by the guest driver,
> > > the tx queue size will be reconfigured to be 512.
> > I'd like to see the reverse. Start with the current default.
> > If VIRTIO_F_MAX_CHAIN_SIZE is negotiated, increase the queue size.
> >
>
> OK, we can let the queue size start with 256, and how about
> increasing it to 1024 in the following two cases:

I think it should be
1) VIRTIO_F_MAX_CHAIN_SIZE is negotiated
and
2) user requested large size

> 1) VIRTIO_F_MAX_CHAIN_SIZE is negotiated; or
> 2) the backend is vhost.

For vhost we also need vhost backend to support VIRTIO_F_MAX_CHAIN_SIZE.
We also need to send the max chain size to backend.

> Best,
> Wei

Reply | Threaded
Open this post in threaded view
|  
Report Content as Inappropriate

Re: [PATCH v1] virtio-net: enable configurable tx queue size

Wei Wang
On 06/09/2017 03:01 AM, Michael S. Tsirkin wrote:

> On Wed, Jun 07, 2017 at 09:04:29AM +0800, Wei Wang wrote:
>> On 06/05/2017 11:38 PM, Michael S. Tsirkin wrote:
>>> On Mon, Jun 05, 2017 at 04:57:29PM +0800, Wei Wang wrote:
>>>> This patch enables the virtio-net tx queue size to be configurable
>>>> between 256 and 1024 by the user. The queue size specified by the
>>>> user should be power of 2. If "tx_queue_size" is not offered by the
>>>> user, the default queue size, 1024, will be used.
>>>>
>>>> For the traditional QEMU backend, setting the tx queue size to be 1024
>>>> requires the guest virtio driver to support the VIRTIO_F_MAX_CHAIN_SIZE
>>>> feature. This feature restricts the guest driver from chaining 1024
>>>> vring descriptors, which may cause the device side implementation to
>>>> send more than 1024 iov to writev.
>>>>
>>>> VIRTIO_F_MAX_CHAIN_SIZE is a common transport feature added for all
>>>> virtio devices. However, each device has the flexibility to set the max
>>>> chain size to limit its driver to chain vring descriptors. Currently,
>>>> the max chain size of the virtio-net device is set to 1023.
>>>>
>>>> In the case that the tx queue size is set to 1024 and the
>>>> VIRTIO_F_MAX_CHAIN_SIZE feature is not supported by the guest driver,
>>>> the tx queue size will be reconfigured to be 512.
>>> I'd like to see the reverse. Start with the current default.
>>> If VIRTIO_F_MAX_CHAIN_SIZE is negotiated, increase the queue size.
>>>
>> OK, we can let the queue size start with 256, and how about
>> increasing it to 1024 in the following two cases:
> I think it should be
> 1) VIRTIO_F_MAX_CHAIN_SIZE is negotiated
> and
> 2) user requested large size
>
>> 1) VIRTIO_F_MAX_CHAIN_SIZE is negotiated; or
>> 2) the backend is vhost.
> For vhost we also need vhost backend to support VIRTIO_F_MAX_CHAIN_SIZE.
> We also need to send the max chain size to backend.
>
I think the limitation that we are dealing with is that the virtio-net
backend implementation in QEMU is possible to pass more than
1024 iov to writev. In this case, the QEMU backend uses the
"max_chain_size" register to tell the driver the max size of the
vring_desc chain. So, I think it should be the device (backend)
sending the max size to the driver, rather than the other way
around.

For the vhost-user and vhost-net backend cases, they don't have
such limitation as the QEMU backend, right?
If no such limitation, I think without the negotiation of
VIRTIO_F_MAX_CHAIN_SIZE, the device should be safe to use 1024
tx queue size if it is the vhost backend.

Best,
Wei






Reply | Threaded
Open this post in threaded view
|  
Report Content as Inappropriate

Re: [virtio-dev] Re: [PATCH v1] virtio-net: enable configurable tx queue size

Wei Wang
Ping for comments, thanks.

On 06/09/2017 11:00 AM, Wei Wang wrote:

> On 06/09/2017 03:01 AM, Michael S. Tsirkin wrote:
>> On Wed, Jun 07, 2017 at 09:04:29AM +0800, Wei Wang wrote:
>>> On 06/05/2017 11:38 PM, Michael S. Tsirkin wrote:
>>>> On Mon, Jun 05, 2017 at 04:57:29PM +0800, Wei Wang wrote:
>>>>> This patch enables the virtio-net tx queue size to be configurable
>>>>> between 256 and 1024 by the user. The queue size specified by the
>>>>> user should be power of 2. If "tx_queue_size" is not offered by the
>>>>> user, the default queue size, 1024, will be used.
>>>>>
>>>>> For the traditional QEMU backend, setting the tx queue size to be
>>>>> 1024
>>>>> requires the guest virtio driver to support the
>>>>> VIRTIO_F_MAX_CHAIN_SIZE
>>>>> feature. This feature restricts the guest driver from chaining 1024
>>>>> vring descriptors, which may cause the device side implementation to
>>>>> send more than 1024 iov to writev.
>>>>>
>>>>> VIRTIO_F_MAX_CHAIN_SIZE is a common transport feature added for all
>>>>> virtio devices. However, each device has the flexibility to set
>>>>> the max
>>>>> chain size to limit its driver to chain vring descriptors. Currently,
>>>>> the max chain size of the virtio-net device is set to 1023.
>>>>>
>>>>> In the case that the tx queue size is set to 1024 and the
>>>>> VIRTIO_F_MAX_CHAIN_SIZE feature is not supported by the guest driver,
>>>>> the tx queue size will be reconfigured to be 512.
>>>> I'd like to see the reverse. Start with the current default.
>>>> If VIRTIO_F_MAX_CHAIN_SIZE is negotiated, increase the queue size.
>>>>
>>> OK, we can let the queue size start with 256, and how about
>>> increasing it to 1024 in the following two cases:
>> I think it should be
>> 1) VIRTIO_F_MAX_CHAIN_SIZE is negotiated
>> and
>> 2) user requested large size
>>
>>> 1) VIRTIO_F_MAX_CHAIN_SIZE is negotiated; or
>>> 2) the backend is vhost.
>> For vhost we also need vhost backend to support VIRTIO_F_MAX_CHAIN_SIZE.
>> We also need to send the max chain size to backend.
>>
> I think the limitation that we are dealing with is that the virtio-net
> backend implementation in QEMU is possible to pass more than
> 1024 iov to writev. In this case, the QEMU backend uses the
> "max_chain_size" register to tell the driver the max size of the
> vring_desc chain. So, I think it should be the device (backend)
> sending the max size to the driver, rather than the other way
> around.
>
> For the vhost-user and vhost-net backend cases, they don't have
> such limitation as the QEMU backend, right?
> If no such limitation, I think without the negotiation of
> VIRTIO_F_MAX_CHAIN_SIZE, the device should be safe to use 1024
> tx queue size if it is the vhost backend.
>
> Best,
> Wei
>
>
>
>
>
>
> ---------------------------------------------------------------------
> To unsubscribe, e-mail: [hidden email]
> For additional commands, e-mail: [hidden email]
>


Reply | Threaded
Open this post in threaded view
|  
Report Content as Inappropriate

Re: [virtio-dev] Re: [PATCH v1] virtio-net: enable configurable tx queue size

Michael S. Tsirkin-4
On Mon, Jun 12, 2017 at 05:30:46PM +0800, Wei Wang wrote:
> Ping for comments, thanks.

This was only posted a week ago, might be a bit too short for some
people.  A couple of weeks is more reasonable before you ping.  Also, I
sent a bunch of comments on Thu, 8 Jun 2017.  You should probably
address these.

--
MST

Reply | Threaded
Open this post in threaded view
|  
Report Content as Inappropriate

Re: [virtio-dev] Re: [PATCH v1] virtio-net: enable configurable tx queue size

Wei Wang
On 06/13/2017 04:43 AM, Michael S. Tsirkin wrote:
> On Mon, Jun 12, 2017 at 05:30:46PM +0800, Wei Wang wrote:
>> Ping for comments, thanks.
> This was only posted a week ago, might be a bit too short for some
> people.
OK, sorry for the push.
> A couple of weeks is more reasonable before you ping.  Also, I
> sent a bunch of comments on Thu, 8 Jun 2017.  You should probably
> address these.
>
I responded to the comments. The main question is that I'm not sure
why we need the vhost backend to support VIRTIO_F_MAX_CHAIN_SIZE.
IMHO, that should be a feature proposed to solve the possible issue
caused by the QEMU implemented backend.

Best,
Wei

Reply | Threaded
Open this post in threaded view
|  
Report Content as Inappropriate

Re: [virtio-dev] Re: [PATCH v1] virtio-net: enable configurable tx queue size

Jason Wang-8


On 2017年06月13日 11:10, Wei Wang wrote:

> On 06/13/2017 04:43 AM, Michael S. Tsirkin wrote:
>> On Mon, Jun 12, 2017 at 05:30:46PM +0800, Wei Wang wrote:
>>> Ping for comments, thanks.
>> This was only posted a week ago, might be a bit too short for some
>> people.
> OK, sorry for the push.
>> A couple of weeks is more reasonable before you ping.  Also, I
>> sent a bunch of comments on Thu, 8 Jun 2017.  You should probably
>> address these.
>>
> I responded to the comments. The main question is that I'm not sure
> why we need the vhost backend to support VIRTIO_F_MAX_CHAIN_SIZE.
> IMHO, that should be a feature proposed to solve the possible issue
> caused by the QEMU implemented backend.

The issue is what if there's a mismatch of max #sgs between qemu and vhost?

Thanks

>
> Best,
> Wei
>
> ---------------------------------------------------------------------
> To unsubscribe, e-mail: [hidden email]
> For additional commands, e-mail: [hidden email]
>


Reply | Threaded
Open this post in threaded view
|  
Report Content as Inappropriate

Re: [virtio-dev] Re: [PATCH v1] virtio-net: enable configurable tx queue size

Wei Wang
On 06/13/2017 11:19 AM, Jason Wang wrote:

>
>
> On 2017年06月13日 11:10, Wei Wang wrote:
>> On 06/13/2017 04:43 AM, Michael S. Tsirkin wrote:
>>> On Mon, Jun 12, 2017 at 05:30:46PM +0800, Wei Wang wrote:
>>>> Ping for comments, thanks.
>>> This was only posted a week ago, might be a bit too short for some
>>> people.
>> OK, sorry for the push.
>>> A couple of weeks is more reasonable before you ping.  Also, I
>>> sent a bunch of comments on Thu, 8 Jun 2017.  You should probably
>>> address these.
>>>
>> I responded to the comments. The main question is that I'm not sure
>> why we need the vhost backend to support VIRTIO_F_MAX_CHAIN_SIZE.
>> IMHO, that should be a feature proposed to solve the possible issue
>> caused by the QEMU implemented backend.
>
> The issue is what if there's a mismatch of max #sgs between qemu and
> vhost?
>

When the vhost backend is used, QEMU is not involved in the data path.
The vhost backend
directly gets what is offered by the guest from the vq. Why would there
be a mismatch of
max #sgs between QEMU and vhost, and what is the QEMU side max #sgs used
for? Thanks.

Best,
Wei


Reply | Threaded
Open this post in threaded view
|  
Report Content as Inappropriate

Re: [virtio-dev] Re: [PATCH v1] virtio-net: enable configurable tx queue size

Jason Wang-8


On 2017年06月13日 11:51, Wei Wang wrote:

> On 06/13/2017 11:19 AM, Jason Wang wrote:
>>
>>
>> On 2017年06月13日 11:10, Wei Wang wrote:
>>> On 06/13/2017 04:43 AM, Michael S. Tsirkin wrote:
>>>> On Mon, Jun 12, 2017 at 05:30:46PM +0800, Wei Wang wrote:
>>>>> Ping for comments, thanks.
>>>> This was only posted a week ago, might be a bit too short for some
>>>> people.
>>> OK, sorry for the push.
>>>> A couple of weeks is more reasonable before you ping.  Also, I
>>>> sent a bunch of comments on Thu, 8 Jun 2017.  You should probably
>>>> address these.
>>>>
>>> I responded to the comments. The main question is that I'm not sure
>>> why we need the vhost backend to support VIRTIO_F_MAX_CHAIN_SIZE.
>>> IMHO, that should be a feature proposed to solve the possible issue
>>> caused by the QEMU implemented backend.
>>
>> The issue is what if there's a mismatch of max #sgs between qemu and
>> vhost?
>>
>
> When the vhost backend is used, QEMU is not involved in the data path.
> The vhost backend
> directly gets what is offered by the guest from the vq. Why would
> there be a mismatch of
> max #sgs between QEMU and vhost, and what is the QEMU side max #sgs
> used for? Thanks.

You need query the backend max #sgs in this case at least. no? If not
how do you know the value is supported by the backend?

Thanks

>
> Best,
> Wei
>
>


Reply | Threaded
Open this post in threaded view
|  
Report Content as Inappropriate

Re: [virtio-dev] Re: [PATCH v1] virtio-net: enable configurable tx queue size

Jason Wang-8


On 2017年06月13日 11:55, Jason Wang wrote:

>
>
> On 2017年06月13日 11:51, Wei Wang wrote:
>> On 06/13/2017 11:19 AM, Jason Wang wrote:
>>>
>>>
>>> On 2017年06月13日 11:10, Wei Wang wrote:
>>>> On 06/13/2017 04:43 AM, Michael S. Tsirkin wrote:
>>>>> On Mon, Jun 12, 2017 at 05:30:46PM +0800, Wei Wang wrote:
>>>>>> Ping for comments, thanks.
>>>>> This was only posted a week ago, might be a bit too short for some
>>>>> people.
>>>> OK, sorry for the push.
>>>>> A couple of weeks is more reasonable before you ping.  Also, I
>>>>> sent a bunch of comments on Thu, 8 Jun 2017.  You should probably
>>>>> address these.
>>>>>
>>>> I responded to the comments. The main question is that I'm not sure
>>>> why we need the vhost backend to support VIRTIO_F_MAX_CHAIN_SIZE.
>>>> IMHO, that should be a feature proposed to solve the possible issue
>>>> caused by the QEMU implemented backend.
>>>
>>> The issue is what if there's a mismatch of max #sgs between qemu and
>>> vhost?
>>>
>>
>> When the vhost backend is used, QEMU is not involved in the data
>> path. The vhost backend
>> directly gets what is offered by the guest from the vq.

FYI, qemu will try to fallback to userspace if there's something wrong
with vhost-kernel (e.g the IOMMU support). This doesn't work for
vhost-user actually, but it works for vhost-kernel.

Thanks

>> Why would there be a mismatch of
>> max #sgs between QEMU and vhost, and what is the QEMU side max #sgs
>> used for? Thanks.
>
> You need query the backend max #sgs in this case at least. no? If not
> how do you know the value is supported by the backend?
>
> Thanks
>
>>
>> Best,
>> Wei
>>
>>
>
>


Reply | Threaded
Open this post in threaded view
|  
Report Content as Inappropriate

Re: [virtio-dev] Re: [PATCH v1] virtio-net: enable configurable tx queue size

Wei Wang
In reply to this post by Jason Wang-8
On 06/13/2017 11:55 AM, Jason Wang wrote:

>
> On 2017年06月13日 11:51, Wei Wang wrote:
>> On 06/13/2017 11:19 AM, Jason Wang wrote:
>>>
>>> On 2017年06月13日 11:10, Wei Wang wrote:
>>>> On 06/13/2017 04:43 AM, Michael S. Tsirkin wrote:
>>>>> On Mon, Jun 12, 2017 at 05:30:46PM +0800, Wei Wang wrote:
>>>>>> Ping for comments, thanks.
>>>>> This was only posted a week ago, might be a bit too short for some
>>>>> people.
>>>> OK, sorry for the push.
>>>>> A couple of weeks is more reasonable before you ping.  Also, I
>>>>> sent a bunch of comments on Thu, 8 Jun 2017.  You should probably
>>>>> address these.
>>>>>
>>>> I responded to the comments. The main question is that I'm not sure
>>>> why we need the vhost backend to support VIRTIO_F_MAX_CHAIN_SIZE.
>>>> IMHO, that should be a feature proposed to solve the possible issue
>>>> caused by the QEMU implemented backend.
>>> The issue is what if there's a mismatch of max #sgs between qemu and
>>> vhost?
>>>
>> When the vhost backend is used, QEMU is not involved in the data path.
>> The vhost backend
>> directly gets what is offered by the guest from the vq. Why would
>> there be a mismatch of
>> max #sgs between QEMU and vhost, and what is the QEMU side max #sgs
>> used for? Thanks.
> You need query the backend max #sgs in this case at least. no? If not
> how do you know the value is supported by the backend?
>
> Thanks
>
Here is my thought: vhost backend has already been supporting 1024 sgs,
so I think it might not be necessary to query the max sgs that the vhost
backend supports. In the setup phase, when QEMU detects the backend is
vhost, it assumes 1024 max sgs is supported, instead of giving an extra
call to query.

The advantage is that people who is using the vhost backend can upgrade
to use 1024 tx queue size by only applying the QEMU patches. Adding an extra
call to query the size would need to patch their vhost backend (like
vhost-user),
which is difficult for them.


Best,
Wei








Reply | Threaded
Open this post in threaded view
|  
Report Content as Inappropriate

Re: [virtio-dev] Re: [PATCH v1] virtio-net: enable configurable tx queue size

Wei Wang
In reply to this post by Jason Wang-8
On 06/13/2017 11:59 AM, Jason Wang wrote:

>
> On 2017年06月13日 11:55, Jason Wang wrote:
>> The issue is what if there's a mismatch of max #sgs between qemu and
>> vhost?
>>
>>> When the vhost backend is used, QEMU is not involved in the data
>>> path. The vhost backend
>>> directly gets what is offered by the guest from the vq.
> FYI, qemu will try to fallback to userspace if there's something wrong
> with vhost-kernel (e.g the IOMMU support). This doesn't work for
> vhost-user actually, but it works for vhost-kernel.
>
> Thanks
>

That wouldn't be a problem. When it falls back to the QEMU backend,
the "max_chain_size" will be set according to the QEMU backend
(e.g. 1023). Guest will read the max_chain_size register.

Best,
Wei

Reply | Threaded
Open this post in threaded view
|  
Report Content as Inappropriate

Re: [virtio-dev] Re: [virtio-dev] Re: [PATCH v1] virtio-net: enable configurable tx queue size

Jason Wang-8
In reply to this post by Wei Wang


On 2017年06月13日 14:08, Wei Wang wrote:

> On 06/13/2017 11:55 AM, Jason Wang wrote:
>>
>> On 2017年06月13日 11:51, Wei Wang wrote:
>>> On 06/13/2017 11:19 AM, Jason Wang wrote:
>>>>
>>>> On 2017年06月13日 11:10, Wei Wang wrote:
>>>>> On 06/13/2017 04:43 AM, Michael S. Tsirkin wrote:
>>>>>> On Mon, Jun 12, 2017 at 05:30:46PM +0800, Wei Wang wrote:
>>>>>>> Ping for comments, thanks.
>>>>>> This was only posted a week ago, might be a bit too short for some
>>>>>> people.
>>>>> OK, sorry for the push.
>>>>>> A couple of weeks is more reasonable before you ping.  Also, I
>>>>>> sent a bunch of comments on Thu, 8 Jun 2017.  You should probably
>>>>>> address these.
>>>>>>
>>>>> I responded to the comments. The main question is that I'm not sure
>>>>> why we need the vhost backend to support VIRTIO_F_MAX_CHAIN_SIZE.
>>>>> IMHO, that should be a feature proposed to solve the possible issue
>>>>> caused by the QEMU implemented backend.
>>>> The issue is what if there's a mismatch of max #sgs between qemu and
>>>> vhost?
>>>>
>>> When the vhost backend is used, QEMU is not involved in the data path.
>>> The vhost backend
>>> directly gets what is offered by the guest from the vq. Why would
>>> there be a mismatch of
>>> max #sgs between QEMU and vhost, and what is the QEMU side max #sgs
>>> used for? Thanks.
>> You need query the backend max #sgs in this case at least. no? If not
>> how do you know the value is supported by the backend?
>>
>> Thanks
>>
> Here is my thought: vhost backend has already been supporting 1024 sgs,
> so I think it might not be necessary to query the max sgs that the vhost
> backend supports. In the setup phase, when QEMU detects the backend is
> vhost, it assumes 1024 max sgs is supported, instead of giving an extra
> call to query.

We can probably assume vhost kernel supports up to 1024 sgs. But how
about for other vhost-user backends?

And what you said here makes me ask one of my questions in the past:

Do we have plan to extend 1024 to a larger value or 1024 looks good for
the future years? If we only care about 1024, there's even no need for a
new config filed, a feature flag is more than enough. If we want to
extend it to e.g 2048, we definitely need to query vhost backend's limit
(even for vhost-kernel).

Thanks

>
> The advantage is that people who is using the vhost backend can upgrade
> to use 1024 tx queue size by only applying the QEMU patches. Adding an
> extra
> call to query the size would need to patch their vhost backend (like
> vhost-user),
> which is difficult for them.
>
>
> Best,
> Wei
>
>
>
>
>
>
>
>
> ---------------------------------------------------------------------
> To unsubscribe, e-mail: [hidden email]
> For additional commands, e-mail: [hidden email]
>


Reply | Threaded
Open this post in threaded view
|  
Report Content as Inappropriate

Re: [virtio-dev] Re: [PATCH v1] virtio-net: enable configurable tx queue size

Jason Wang-8
In reply to this post by Wei Wang


On 2017年06月13日 14:13, Wei Wang wrote:

> On 06/13/2017 11:59 AM, Jason Wang wrote:
>>
>> On 2017年06月13日 11:55, Jason Wang wrote:
>>> The issue is what if there's a mismatch of max #sgs between qemu and
>>> vhost?
>>>
>>>> When the vhost backend is used, QEMU is not involved in the data
>>>> path. The vhost backend
>>>> directly gets what is offered by the guest from the vq.
>> FYI, qemu will try to fallback to userspace if there's something wrong
>> with vhost-kernel (e.g the IOMMU support). This doesn't work for
>> vhost-user actually, but it works for vhost-kernel.
>>
>> Thanks
>>
>
> That wouldn't be a problem. When it falls back to the QEMU backend,
> the "max_chain_size" will be set according to the QEMU backend
> (e.g. 1023). Guest will read the max_chain_size register.

What if there's backend that supports less than 1023? Or in the future,
we increase the limit to e.g 2048?

Thanks

>
> Best,
> Wei
>


12
Loading...