Skip to content

Commit

Permalink
event/cnxk: remove single event enqueue and dequeue
Browse files Browse the repository at this point in the history
Provide only burst enqueue and dequeue.

Signed-off-by: Pavan Nikhilesh <[email protected]>
  • Loading branch information
PavanNikhilesh authored and jerinjacobk committed Oct 22, 2024
1 parent 8b565b3 commit a83fc0f
Show file tree
Hide file tree
Showing 105 changed files with 187 additions and 2,203 deletions.
79 changes: 5 additions & 74 deletions drivers/event/cnxk/cn10k_eventdev.c
Original file line number Diff line number Diff line change
Expand Up @@ -311,98 +311,49 @@ cn10k_sso_fp_tmplt_fns_set(struct rte_eventdev *event_dev)
{
#if !defined(CNXK_DIS_TMPLT_FUNC)
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
const event_dequeue_t sso_hws_deq[NIX_RX_OFFLOAD_MAX] = {
#define R(name, flags)[flags] = cn10k_sso_hws_deq_##name,
NIX_RX_FASTPATH_MODES
#undef R
};

const event_dequeue_burst_t sso_hws_deq_burst[NIX_RX_OFFLOAD_MAX] = {
#define R(name, flags)[flags] = cn10k_sso_hws_deq_burst_##name,
NIX_RX_FASTPATH_MODES
#undef R
};

const event_dequeue_t sso_hws_deq_tmo[NIX_RX_OFFLOAD_MAX] = {
#define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_##name,
NIX_RX_FASTPATH_MODES
#undef R
};

const event_dequeue_burst_t sso_hws_deq_tmo_burst[NIX_RX_OFFLOAD_MAX] = {
#define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_burst_##name,
NIX_RX_FASTPATH_MODES
#undef R
};

const event_dequeue_t sso_hws_deq_seg[NIX_RX_OFFLOAD_MAX] = {
#define R(name, flags)[flags] = cn10k_sso_hws_deq_seg_##name,

NIX_RX_FASTPATH_MODES
#undef R
};

const event_dequeue_burst_t sso_hws_deq_seg_burst[NIX_RX_OFFLOAD_MAX] = {
#define R(name, flags)[flags] = cn10k_sso_hws_deq_seg_burst_##name,
NIX_RX_FASTPATH_MODES
#undef R
};

const event_dequeue_t sso_hws_deq_tmo_seg[NIX_RX_OFFLOAD_MAX] = {
#define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_seg_##name,
NIX_RX_FASTPATH_MODES
#undef R
};

const event_dequeue_burst_t sso_hws_deq_tmo_seg_burst[NIX_RX_OFFLOAD_MAX] = {
#define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_seg_burst_##name,
NIX_RX_FASTPATH_MODES
#undef R
};

const event_dequeue_t sso_hws_reas_deq[NIX_RX_OFFLOAD_MAX] = {
#define R(name, flags)[flags] = cn10k_sso_hws_reas_deq_##name,
NIX_RX_FASTPATH_MODES
#undef R
};

const event_dequeue_burst_t sso_hws_reas_deq_burst[NIX_RX_OFFLOAD_MAX] = {
#define R(name, flags)[flags] = cn10k_sso_hws_reas_deq_burst_##name,
NIX_RX_FASTPATH_MODES
#undef R
};

const event_dequeue_t sso_hws_reas_deq_tmo[NIX_RX_OFFLOAD_MAX] = {
#define R(name, flags)[flags] = cn10k_sso_hws_reas_deq_tmo_##name,
NIX_RX_FASTPATH_MODES
#undef R
};

const event_dequeue_burst_t sso_hws_reas_deq_tmo_burst[NIX_RX_OFFLOAD_MAX] = {
#define R(name, flags)[flags] = cn10k_sso_hws_reas_deq_tmo_burst_##name,
NIX_RX_FASTPATH_MODES
#undef R
};

const event_dequeue_t sso_hws_reas_deq_seg[NIX_RX_OFFLOAD_MAX] = {
#define R(name, flags)[flags] = cn10k_sso_hws_reas_deq_seg_##name,

NIX_RX_FASTPATH_MODES
#undef R
};

const event_dequeue_burst_t sso_hws_reas_deq_seg_burst[NIX_RX_OFFLOAD_MAX] = {
#define R(name, flags)[flags] = cn10k_sso_hws_reas_deq_seg_burst_##name,
NIX_RX_FASTPATH_MODES
#undef R
};

const event_dequeue_t sso_hws_reas_deq_tmo_seg[NIX_RX_OFFLOAD_MAX] = {
#define R(name, flags)[flags] = cn10k_sso_hws_reas_deq_tmo_seg_##name,
NIX_RX_FASTPATH_MODES
#undef R
};

const event_dequeue_burst_t sso_hws_reas_deq_tmo_seg_burst[NIX_RX_OFFLOAD_MAX] = {
#define R(name, flags)[flags] = cn10k_sso_hws_reas_deq_tmo_seg_burst_##name,
NIX_RX_FASTPATH_MODES
Expand All @@ -424,48 +375,33 @@ cn10k_sso_fp_tmplt_fns_set(struct rte_eventdev *event_dev)

if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
if (dev->rx_offloads & NIX_RX_REAS_F) {
CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_reas_deq_seg);
CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
sso_hws_reas_deq_seg_burst);
if (dev->is_timeout_deq) {
CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
sso_hws_reas_deq_tmo_seg);
if (dev->is_timeout_deq)
CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
sso_hws_reas_deq_tmo_seg_burst);
}
} else {
CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq_seg);
CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
sso_hws_deq_seg_burst);

if (dev->is_timeout_deq) {
CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
sso_hws_deq_tmo_seg);
if (dev->is_timeout_deq)
CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
sso_hws_deq_tmo_seg_burst);
}
}
} else {
if (dev->rx_offloads & NIX_RX_REAS_F) {
CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_reas_deq);
CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
sso_hws_reas_deq_burst);

if (dev->is_timeout_deq) {
CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
sso_hws_reas_deq_tmo);
if (dev->is_timeout_deq)
CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
sso_hws_reas_deq_tmo_burst);
}
} else {
CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq);
CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst, sso_hws_deq_burst);

if (dev->is_timeout_deq) {
CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq_tmo);
if (dev->is_timeout_deq)
CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
sso_hws_deq_tmo_burst);
}
}
}

Expand All @@ -486,12 +422,9 @@ cn10k_sso_fp_blk_fns_set(struct rte_eventdev *event_dev)
#if defined(CNXK_DIS_TMPLT_FUNC)
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);

event_dev->dequeue = cn10k_sso_hws_deq_all_offload;
event_dev->dequeue_burst = cn10k_sso_hws_deq_burst_all_offload;
if (dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F) {
event_dev->dequeue = cn10k_sso_hws_deq_all_offload_tst;
if (dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)
event_dev->dequeue_burst = cn10k_sso_hws_deq_burst_all_offload_tst;
}
event_dev->txa_enqueue = cn10k_sso_hws_tx_adptr_enq_seg_all_offload;
event_dev->txa_enqueue_same_dest = cn10k_sso_hws_tx_adptr_enq_seg_all_offload;
if (dev->tx_offloads & (NIX_TX_OFFLOAD_OL3_OL4_CSUM_F | NIX_TX_OFFLOAD_VLAN_QINQ_F |
Expand All @@ -514,7 +447,6 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
cn10k_sso_fp_blk_fns_set(event_dev);
cn10k_sso_fp_tmplt_fns_set(event_dev);

event_dev->enqueue = cn10k_sso_hws_enq;
event_dev->enqueue_burst = cn10k_sso_hws_enq_burst;
event_dev->enqueue_new_burst = cn10k_sso_hws_enq_new_burst;
event_dev->enqueue_forward_burst = cn10k_sso_hws_enq_fwd_burst;
Expand Down Expand Up @@ -848,7 +780,6 @@ eventdev_fops_update(struct rte_eventdev *event_dev)
struct rte_event_fp_ops *fp_op =
rte_event_fp_ops + event_dev->data->dev_id;

fp_op->dequeue = event_dev->dequeue;
fp_op->dequeue_burst = event_dev->dequeue_burst;
}

Expand Down
49 changes: 22 additions & 27 deletions drivers/event/cnxk/cn10k_worker.c
Original file line number Diff line number Diff line change
Expand Up @@ -107,32 +107,6 @@ sso_lmt_aw_wait_fc(struct cn10k_sso_hws *ws, int64_t req)
}
}

uint16_t __rte_hot
cn10k_sso_hws_enq(void *port, const struct rte_event *ev)
{
struct cn10k_sso_hws *ws = port;

switch (ev->op) {
case RTE_EVENT_OP_NEW:
return cn10k_sso_hws_new_event(ws, ev);
case RTE_EVENT_OP_FORWARD:
cn10k_sso_hws_forward_event(ws, ev);
break;
case RTE_EVENT_OP_RELEASE:
if (ws->swtag_req) {
cnxk_sso_hws_desched(ev->u64, ws->base);
ws->swtag_req = 0;
break;
}
cnxk_sso_hws_swtag_flush(ws->base);
break;
default:
return 0;
}

return 1;
}

#define VECTOR_SIZE_BITS 0xFFFFFFFFFFF80000ULL
#define VECTOR_GET_LINE_OFFSET(line) (19 + (3 * line))

Expand Down Expand Up @@ -384,8 +358,29 @@ uint16_t __rte_hot
cn10k_sso_hws_enq_burst(void *port, const struct rte_event ev[],
uint16_t nb_events)
{
struct cn10k_sso_hws *ws = port;

RTE_SET_USED(nb_events);
return cn10k_sso_hws_enq(port, ev);

switch (ev->op) {
case RTE_EVENT_OP_NEW:
return cn10k_sso_hws_new_event(ws, ev);
case RTE_EVENT_OP_FORWARD:
cn10k_sso_hws_forward_event(ws, ev);
break;
case RTE_EVENT_OP_RELEASE:
if (ws->swtag_req) {
cnxk_sso_hws_desched(ev->u64, ws->base);
ws->swtag_req = 0;
break;
}
cnxk_sso_hws_swtag_flush(ws->base);
break;
default:
return 0;
}

return 1;
}

uint16_t __rte_hot
Expand Down
Loading

0 comments on commit a83fc0f

Please sign in to comment.