Skip to content

Commit

Permalink
virtio_net: Add a lock for per queue RX coalesce
Browse files Browse the repository at this point in the history
Once the RTNL locking around the control buffer is removed there can be
contention on the per queue RX interrupt coalescing data. Use a mutex
per queue. A mutex is required because virtnet_send_command can sleep.

Signed-off-by: Daniel Jurgens <[email protected]>
Reviewed-by: Heng Qi <[email protected]>
Tested-by: Heng Qi <[email protected]>
Acked-by: Jason Wang <[email protected]>
Signed-off-by: Paolo Abeni <[email protected]>
  • Loading branch information
Daniel Jurgens authored and Paolo Abeni committed May 7, 2024
1 parent 650d77c commit 4d4ac2e
Showing 1 changed file with 41 additions and 12 deletions.
53 changes: 41 additions & 12 deletions drivers/net/virtio_net.c
Original file line number Diff line number Diff line change
Expand Up @@ -312,6 +312,9 @@ struct receive_queue {
/* Is dynamic interrupt moderation enabled? */
bool dim_enabled;

/* Used to protect dim_enabled and inter_coal */
struct mutex dim_lock;

/* Dynamic Interrupt Moderation */
struct dim dim;

Expand Down Expand Up @@ -2365,6 +2368,10 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
/* Out of packets? */
if (received < budget) {
napi_complete = virtqueue_napi_complete(napi, rq->vq, received);
/* Intentionally not taking dim_lock here. This may result in a
* spurious net_dim call. But if that happens virtnet_rx_dim_work
* will not act on the scheduled work.
*/
if (napi_complete && rq->dim_enabled)
virtnet_rx_dim_update(vi, rq);
}
Expand Down Expand Up @@ -3247,9 +3254,11 @@ static int virtnet_set_ringparam(struct net_device *dev,
return err;

/* The reason is same as the transmit virtqueue reset */
mutex_lock(&vi->rq[i].dim_lock);
err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i,
vi->intr_coal_rx.max_usecs,
vi->intr_coal_rx.max_packets);
mutex_unlock(&vi->rq[i].dim_lock);
if (err)
return err;
}
Expand Down Expand Up @@ -4255,6 +4264,7 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
struct virtio_net_ctrl_coal_rx *coal_rx __free(kfree) = NULL;
bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
struct scatterlist sgs_rx;
int ret = 0;
int i;

if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
Expand All @@ -4264,16 +4274,22 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets))
return -EINVAL;

/* Acquire all queues dim_locks */
for (i = 0; i < vi->max_queue_pairs; i++)
mutex_lock(&vi->rq[i].dim_lock);

if (rx_ctrl_dim_on && !vi->rx_dim_enabled) {
vi->rx_dim_enabled = true;
for (i = 0; i < vi->max_queue_pairs; i++)
vi->rq[i].dim_enabled = true;
return 0;
goto unlock;
}

coal_rx = kzalloc(sizeof(*coal_rx), GFP_KERNEL);
if (!coal_rx)
return -ENOMEM;
if (!coal_rx) {
ret = -ENOMEM;
goto unlock;
}

if (!rx_ctrl_dim_on && vi->rx_dim_enabled) {
vi->rx_dim_enabled = false;
Expand All @@ -4291,17 +4307,22 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,

if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
&sgs_rx))
return -EINVAL;
&sgs_rx)) {
ret = -EINVAL;
goto unlock;
}

vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
for (i = 0; i < vi->max_queue_pairs; i++) {
vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
}
unlock:
for (i = vi->max_queue_pairs - 1; i >= 0; i--)
mutex_unlock(&vi->rq[i].dim_lock);

return 0;
return ret;
}

static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
Expand All @@ -4325,19 +4346,24 @@ static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi,
u16 queue)
{
bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
bool cur_rx_dim = vi->rq[queue].dim_enabled;
u32 max_usecs, max_packets;
bool cur_rx_dim;
int err;

mutex_lock(&vi->rq[queue].dim_lock);
cur_rx_dim = vi->rq[queue].dim_enabled;
max_usecs = vi->rq[queue].intr_coal.max_usecs;
max_packets = vi->rq[queue].intr_coal.max_packets;

if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != max_usecs ||
ec->rx_max_coalesced_frames != max_packets))
ec->rx_max_coalesced_frames != max_packets)) {
mutex_unlock(&vi->rq[queue].dim_lock);
return -EINVAL;
}

if (rx_ctrl_dim_on && !cur_rx_dim) {
vi->rq[queue].dim_enabled = true;
mutex_unlock(&vi->rq[queue].dim_lock);
return 0;
}

Expand All @@ -4350,10 +4376,8 @@ static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi,
err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, queue,
ec->rx_coalesce_usecs,
ec->rx_max_coalesced_frames);
if (err)
return err;

return 0;
mutex_unlock(&vi->rq[queue].dim_lock);
return err;
}

static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi,
Expand Down Expand Up @@ -4390,6 +4414,7 @@ static void virtnet_rx_dim_work(struct work_struct *work)

qnum = rq - vi->rq;

mutex_lock(&rq->dim_lock);
if (!rq->dim_enabled)
goto out;

Expand All @@ -4405,6 +4430,7 @@ static void virtnet_rx_dim_work(struct work_struct *work)
dim->state = DIM_START_MEASURE;
}
out:
mutex_unlock(&rq->dim_lock);
rtnl_unlock();
}

Expand Down Expand Up @@ -4543,11 +4569,13 @@ static int virtnet_get_per_queue_coalesce(struct net_device *dev,
return -EINVAL;

if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
mutex_lock(&vi->rq[queue].dim_lock);
ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs;
ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs;
ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets;
ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets;
ec->use_adaptive_rx_coalesce = vi->rq[queue].dim_enabled;
mutex_unlock(&vi->rq[queue].dim_lock);
} else {
ec->rx_max_coalesced_frames = 1;

Expand Down Expand Up @@ -5377,6 +5405,7 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)

u64_stats_init(&vi->rq[i].stats.syncp);
u64_stats_init(&vi->sq[i].stats.syncp);
mutex_init(&vi->rq[i].dim_lock);
}

return 0;
Expand Down

0 comments on commit 4d4ac2e

Please sign in to comment.