Skip to content

Commit

Permalink
vhost: add logging mechanism for reconnection
Browse files Browse the repository at this point in the history
This patch introduces a way for backend to keep track
of the needed information to be able to reconnect without
frontend cooperation.

It will be used for VDUSE, which does not provide interface
for the backend to save and later recover local virtqueues
metadata needed to reconnect.

Vhost-user support could also be added for improved packed
ring reconnection support.

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Reviewed-by: Chenbo Xia <chenbox@nvidia.com>
Reviewed-by: David Marchand <david.marchand@redhat.com>
  • Loading branch information
mcoquelin committed Sep 24, 2024
1 parent 94d8b04 commit 65e07ad
Show file tree
Hide file tree
Showing 5 changed files with 54 additions and 3 deletions.
2 changes: 2 additions & 0 deletions lib/vhost/vhost.c
Original file line number Diff line number Diff line change
Expand Up @@ -1712,9 +1712,11 @@ rte_vhost_set_vring_base(int vid, uint16_t queue_id,
vq->avail_wrap_counter = !!(last_avail_idx & (1 << 15));
vq->last_used_idx = last_used_idx & 0x7fff;
vq->used_wrap_counter = !!(last_used_idx & (1 << 15));
vhost_virtqueue_reconnect_log_packed(vq);
} else {
vq->last_avail_idx = last_avail_idx;
vq->last_used_idx = last_used_idx;
vhost_virtqueue_reconnect_log_split(vq);
}

return 0;
Expand Down
41 changes: 38 additions & 3 deletions lib/vhost/vhost.h
Original file line number Diff line number Diff line change
Expand Up @@ -269,6 +269,24 @@ struct vhost_async {
};
};

#define VHOST_RECONNECT_VERSION 0x0
#define VHOST_MAX_VRING 0x100
#define VHOST_MAX_QUEUE_PAIRS 0x80

struct __rte_cache_aligned vhost_reconnect_vring {
uint16_t last_avail_idx;
bool avail_wrap_counter;
};

struct vhost_reconnect_data {
uint32_t version;
uint64_t features;
uint8_t status;
struct virtio_net_config config;
uint32_t nr_vrings;
struct vhost_reconnect_vring vring[VHOST_MAX_VRING];
};

/**
* Structure contains variables relevant to RX/TX virtqueues.
*/
Expand Down Expand Up @@ -351,6 +369,7 @@ struct __rte_cache_aligned vhost_virtqueue {
struct virtqueue_stats stats;

RTE_ATOMIC(bool) irq_pending;
struct vhost_reconnect_vring *reconnect_log;
};

/* Virtio device status as per Virtio specification */
Expand All @@ -362,9 +381,6 @@ struct __rte_cache_aligned vhost_virtqueue {
#define VIRTIO_DEVICE_STATUS_DEV_NEED_RESET 0x40
#define VIRTIO_DEVICE_STATUS_FAILED 0x80

#define VHOST_MAX_VRING 0x100
#define VHOST_MAX_QUEUE_PAIRS 0x80

/* Declare IOMMU related bits for older kernels */
#ifndef VIRTIO_F_IOMMU_PLATFORM

Expand Down Expand Up @@ -538,8 +554,26 @@ struct __rte_cache_aligned virtio_net {
struct rte_vhost_user_extern_ops extern_ops;

struct vhost_backend_ops *backend_ops;

struct vhost_reconnect_data *reconnect_log;
};

static __rte_always_inline void
vhost_virtqueue_reconnect_log_split(struct vhost_virtqueue *vq)
{
if (vq->reconnect_log != NULL)
vq->reconnect_log->last_avail_idx = vq->last_avail_idx;
}

static __rte_always_inline void
vhost_virtqueue_reconnect_log_packed(struct vhost_virtqueue *vq)
{
if (vq->reconnect_log != NULL) {
vq->reconnect_log->last_avail_idx = vq->last_avail_idx;
vq->reconnect_log->avail_wrap_counter = vq->avail_wrap_counter;
}
}

static inline void
vq_assert_lock__(struct virtio_net *dev, struct vhost_virtqueue *vq, const char *func)
__rte_assert_exclusive_lock(&vq->access_lock)
Expand Down Expand Up @@ -584,6 +618,7 @@ vq_inc_last_avail_packed(struct vhost_virtqueue *vq, uint16_t num)
vq->avail_wrap_counter ^= 1;
vq->last_avail_idx -= vq->size;
}
vhost_virtqueue_reconnect_log_packed(vq);
}

void __vhost_log_cache_write(struct virtio_net *dev,
Expand Down
4 changes: 4 additions & 0 deletions lib/vhost/vhost_user.c
Original file line number Diff line number Diff line change
Expand Up @@ -954,6 +954,7 @@ translate_ring_addresses(struct virtio_net **pdev, struct vhost_virtqueue **pvq)
vq->last_used_idx, vq->used->idx);
vq->last_used_idx = vq->used->idx;
vq->last_avail_idx = vq->used->idx;
vhost_virtqueue_reconnect_log_split(vq);
VHOST_CONFIG_LOG(dev->ifname, WARNING,
"some packets maybe resent for Tx and dropped for Rx");
}
Expand Down Expand Up @@ -1039,9 +1040,11 @@ vhost_user_set_vring_base(struct virtio_net **pdev,
*/
vq->last_used_idx = vq->last_avail_idx;
vq->used_wrap_counter = vq->avail_wrap_counter;
vhost_virtqueue_reconnect_log_packed(vq);
} else {
vq->last_used_idx = ctx->msg.payload.state.num;
vq->last_avail_idx = ctx->msg.payload.state.num;
vhost_virtqueue_reconnect_log_split(vq);
}

VHOST_CONFIG_LOG(dev->ifname, INFO,
Expand Down Expand Up @@ -1997,6 +2000,7 @@ vhost_check_queue_inflights_split(struct virtio_net *dev,
}

vq->last_avail_idx += resubmit_num;
vhost_virtqueue_reconnect_log_split(vq);

if (resubmit_num) {
resubmit = rte_zmalloc_socket("resubmit", sizeof(struct rte_vhost_resubmit_info),
Expand Down
8 changes: 8 additions & 0 deletions lib/vhost/virtio_net.c
Original file line number Diff line number Diff line change
Expand Up @@ -1433,6 +1433,7 @@ virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
}

vq->last_avail_idx += num_buffers;
vhost_virtqueue_reconnect_log_split(vq);
}

do_data_copy_enqueue(dev, vq);
Expand Down Expand Up @@ -1845,6 +1846,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct vhost_virtqueue
pkts_info[slot_idx].mbuf = pkts[pkt_idx];

vq->last_avail_idx += num_buffers;
vhost_virtqueue_reconnect_log_split(vq);
}

if (unlikely(pkt_idx == 0))
Expand Down Expand Up @@ -1873,6 +1875,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct vhost_virtqueue
/* recover shadow used ring and available ring */
vq->shadow_used_idx -= num_descs;
vq->last_avail_idx -= num_descs;
vhost_virtqueue_reconnect_log_split(vq);
}

/* keep used descriptors */
Expand Down Expand Up @@ -2088,6 +2091,7 @@ dma_error_handler_packed(struct vhost_virtqueue *vq, uint16_t slot_idx,
vq->last_avail_idx = vq->last_avail_idx + vq->size - descs_err;
vq->avail_wrap_counter ^= 1;
}
vhost_virtqueue_reconnect_log_packed(vq);

if (async->buffer_idx_packed >= buffers_err)
async->buffer_idx_packed -= buffers_err;
Expand Down Expand Up @@ -3170,6 +3174,7 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,

if (likely(vq->shadow_used_idx)) {
vq->last_avail_idx += vq->shadow_used_idx;
vhost_virtqueue_reconnect_log_split(vq);
do_data_copy_dequeue(vq);
flush_shadow_used_ring_split(dev, vq);
vhost_vring_call_split(dev, vq);
Expand Down Expand Up @@ -3842,6 +3847,7 @@ virtio_dev_tx_async_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
async->desc_idx_split++;

vq->last_avail_idx++;
vhost_virtqueue_reconnect_log_split(vq);
}

if (unlikely(dropped))
Expand All @@ -3860,6 +3866,7 @@ virtio_dev_tx_async_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
pkt_idx = n_xfer;
/* recover available ring */
vq->last_avail_idx -= pkt_err;
vhost_virtqueue_reconnect_log_split(vq);

/**
* recover async channel copy related structures and free pktmbufs
Expand Down Expand Up @@ -4141,6 +4148,7 @@ virtio_dev_tx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
vq->last_avail_idx += vq->size - descs_err;
vq->avail_wrap_counter ^= 1;
}
vhost_virtqueue_reconnect_log_packed(vq);
}

async->pkts_idx += pkt_idx;
Expand Down
2 changes: 2 additions & 0 deletions lib/vhost/virtio_net_ctrl.c
Original file line number Diff line number Diff line change
Expand Up @@ -169,6 +169,7 @@ virtio_net_ctrl_pop(struct virtio_net *dev, struct vhost_virtqueue *cvq,
cvq->last_avail_idx++;
if (cvq->last_avail_idx >= cvq->size)
cvq->last_avail_idx -= cvq->size;
vhost_virtqueue_reconnect_log_split(cvq);

if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
vhost_avail_event(cvq) = cvq->last_avail_idx;
Expand All @@ -181,6 +182,7 @@ virtio_net_ctrl_pop(struct virtio_net *dev, struct vhost_virtqueue *cvq,
cvq->last_avail_idx++;
if (cvq->last_avail_idx >= cvq->size)
cvq->last_avail_idx -= cvq->size;
vhost_virtqueue_reconnect_log_split(cvq);

if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
vhost_avail_event(cvq) = cvq->last_avail_idx;
Expand Down

0 comments on commit 65e07ad

Please sign in to comment.