Skip to content

Commit

Permalink
mldev: drop input and output size queries
Browse files Browse the repository at this point in the history
Drop support and use of ML input and output size get functions,
rte_ml_io_input_size_get and rte_ml_io_output_size_get.

These functions are not required, as the model buffer size can
be computed from the fields of updated rte_ml_io_info structure.

Signed-off-by: Srikanth Yalavarthi <[email protected]>
Acked-by: Anup Prabhu <[email protected]>
Acked-by: Shivah Shankar S <[email protected]>
  • Loading branch information
syalavarthi authored and tmonjalo committed Oct 11, 2023
1 parent 2436429 commit 30b85ef
Show file tree
Hide file tree
Showing 6 changed files with 2 additions and 204 deletions.
2 changes: 2 additions & 0 deletions doc/guides/rel_notes/release_23_11.rst
Original file line number Diff line number Diff line change
Expand Up @@ -208,6 +208,8 @@ Removed Items
* security: Removed deprecated field ``reserved_opts``
from struct ``rte_security_ipsec_sa_options``.

* mldev: Removed functions ``rte_ml_io_input_size_get`` and ``rte_ml_io_output_size_get``.


API Changes
-----------
Expand Down
50 changes: 0 additions & 50 deletions drivers/ml/cnxk/cn10k_ml_ops.c
Original file line number Diff line number Diff line change
Expand Up @@ -2110,54 +2110,6 @@ cn10k_ml_model_params_update(struct rte_ml_dev *dev, uint16_t model_id, void *bu
return 0;
}

static int
cn10k_ml_io_input_size_get(struct rte_ml_dev *dev, uint16_t model_id, uint32_t nb_batches,
uint64_t *input_qsize, uint64_t *input_dsize)
{
struct cn10k_ml_model *model;

model = dev->data->models[model_id];

if (model == NULL) {
plt_err("Invalid model_id = %u", model_id);
return -EINVAL;
}

if (input_qsize != NULL)
*input_qsize = PLT_U64_CAST(model->addr.total_input_sz_q *
PLT_DIV_CEIL(nb_batches, model->batch_size));

if (input_dsize != NULL)
*input_dsize = PLT_U64_CAST(model->addr.total_input_sz_d *
PLT_DIV_CEIL(nb_batches, model->batch_size));

return 0;
}

static int
cn10k_ml_io_output_size_get(struct rte_ml_dev *dev, uint16_t model_id, uint32_t nb_batches,
uint64_t *output_qsize, uint64_t *output_dsize)
{
struct cn10k_ml_model *model;

model = dev->data->models[model_id];

if (model == NULL) {
plt_err("Invalid model_id = %u", model_id);
return -EINVAL;
}

if (output_qsize != NULL)
*output_qsize = PLT_U64_CAST(model->addr.total_output_sz_q *
PLT_DIV_CEIL(nb_batches, model->batch_size));

if (output_dsize != NULL)
*output_dsize = PLT_U64_CAST(model->addr.total_output_sz_d *
PLT_DIV_CEIL(nb_batches, model->batch_size));

return 0;
}

static int
cn10k_ml_io_quantize(struct rte_ml_dev *dev, uint16_t model_id, struct rte_ml_buff_seg **dbuffer,
struct rte_ml_buff_seg **qbuffer)
Expand Down Expand Up @@ -2636,8 +2588,6 @@ struct rte_ml_dev_ops cn10k_ml_ops = {
.model_params_update = cn10k_ml_model_params_update,

/* I/O ops */
.io_input_size_get = cn10k_ml_io_input_size_get,
.io_output_size_get = cn10k_ml_io_output_size_get,
.io_quantize = cn10k_ml_io_quantize,
.io_dequantize = cn10k_ml_io_dequantize,
};
38 changes: 0 additions & 38 deletions lib/mldev/rte_mldev.c
Original file line number Diff line number Diff line change
Expand Up @@ -691,44 +691,6 @@ rte_ml_model_params_update(int16_t dev_id, uint16_t model_id, void *buffer)
return (*dev->dev_ops->model_params_update)(dev, model_id, buffer);
}

int
rte_ml_io_input_size_get(int16_t dev_id, uint16_t model_id, uint32_t nb_batches,
uint64_t *input_qsize, uint64_t *input_dsize)
{
struct rte_ml_dev *dev;

if (!rte_ml_dev_is_valid_dev(dev_id)) {
RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d\n", dev_id);
return -EINVAL;
}

dev = rte_ml_dev_pmd_get_dev(dev_id);
if (*dev->dev_ops->io_input_size_get == NULL)
return -ENOTSUP;

return (*dev->dev_ops->io_input_size_get)(dev, model_id, nb_batches, input_qsize,
input_dsize);
}

int
rte_ml_io_output_size_get(int16_t dev_id, uint16_t model_id, uint32_t nb_batches,
uint64_t *output_qsize, uint64_t *output_dsize)
{
struct rte_ml_dev *dev;

if (!rte_ml_dev_is_valid_dev(dev_id)) {
RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d\n", dev_id);
return -EINVAL;
}

dev = rte_ml_dev_pmd_get_dev(dev_id);
if (*dev->dev_ops->io_output_size_get == NULL)
return -ENOTSUP;

return (*dev->dev_ops->io_output_size_get)(dev, model_id, nb_batches, output_qsize,
output_dsize);
}

int
rte_ml_io_quantize(int16_t dev_id, uint16_t model_id, struct rte_ml_buff_seg **dbuffer,
struct rte_ml_buff_seg **qbuffer)
Expand Down
60 changes: 0 additions & 60 deletions lib/mldev/rte_mldev.h
Original file line number Diff line number Diff line change
Expand Up @@ -1008,66 +1008,6 @@ rte_ml_model_params_update(int16_t dev_id, uint16_t model_id, void *buffer);

/* IO operations */

/**
* Get size of quantized and dequantized input buffers.
*
* Calculate the size of buffers required for quantized and dequantized input data.
* This API would return the buffer sizes for the number of batches provided and would
* consider the alignment requirements as per the PMD. Input sizes computed by this API can
* be used by the application to allocate buffers.
*
* @param[in] dev_id
* The identifier of the device.
* @param[in] model_id
* Identifier for the model created
* @param[in] nb_batches
* Number of batches of input to be processed in a single inference job
* @param[out] input_qsize
* Quantized input size pointer.
* NULL value is allowed, in which case input_qsize is not calculated by the driver.
* @param[out] input_dsize
* Dequantized input size pointer.
* NULL value is allowed, in which case input_dsize is not calculated by the driver.
*
* @return
* - Returns 0 on success
* - Returns negative value on failure
*/
__rte_experimental
int
rte_ml_io_input_size_get(int16_t dev_id, uint16_t model_id, uint32_t nb_batches,
uint64_t *input_qsize, uint64_t *input_dsize);

/**
* Get size of quantized and dequantized output buffers.
*
* Calculate the size of buffers required for quantized and dequantized output data.
* This API would return the buffer sizes for the number of batches provided and would consider
* the alignment requirements as per the PMD. Output sizes computed by this API can be used by the
* application to allocate buffers.
*
* @param[in] dev_id
* The identifier of the device.
* @param[in] model_id
* Identifier for the model created
* @param[in] nb_batches
* Number of batches of input to be processed in a single inference job
* @param[out] output_qsize
* Quantized output size pointer.
* NULL value is allowed, in which case output_qsize is not calculated by the driver.
* @param[out] output_dsize
* Dequantized output size pointer.
* NULL value is allowed, in which case output_dsize is not calculated by the driver.
*
* @return
* - Returns 0 on success
* - Returns negative value on failure
*/
__rte_experimental
int
rte_ml_io_output_size_get(int16_t dev_id, uint16_t model_id, uint32_t nb_batches,
uint64_t *output_qsize, uint64_t *output_dsize);

/**
* Quantize input data.
*
Expand Down
54 changes: 0 additions & 54 deletions lib/mldev/rte_mldev_core.h
Original file line number Diff line number Diff line change
Expand Up @@ -466,54 +466,6 @@ typedef int (*mldev_model_info_get_t)(struct rte_ml_dev *dev, uint16_t model_id,
*/
typedef int (*mldev_model_params_update_t)(struct rte_ml_dev *dev, uint16_t model_id, void *buffer);

/**
* @internal
*
* Get size of input buffers.
*
* @param dev
* ML device pointer.
* @param model_id
* Model ID to use.
* @param nb_batches
* Number of batches.
* @param input_qsize
* Size of quantized input.
* @param input_dsize
* Size of dequantized input.
*
* @return
* - 0 on success.
* - <0, error on failure.
*/
typedef int (*mldev_io_input_size_get_t)(struct rte_ml_dev *dev, uint16_t model_id,
uint32_t nb_batches, uint64_t *input_qsize,
uint64_t *input_dsize);

/**
* @internal
*
* Get size of output buffers.
*
* @param dev
* ML device pointer.
* @param model_id
* Model ID to use.
* @param nb_batches
* Number of batches.
* @param output_qsize
* Size of quantized output.
* @param output_dsize
* Size of dequantized output.
*
* @return
* - 0 on success.
* - <0, error on failure.
*/
typedef int (*mldev_io_output_size_get_t)(struct rte_ml_dev *dev, uint16_t model_id,
uint32_t nb_batches, uint64_t *output_qsize,
uint64_t *output_dsize);

/**
* @internal
*
Expand Down Expand Up @@ -627,12 +579,6 @@ struct rte_ml_dev_ops {
/** Update model params. */
mldev_model_params_update_t model_params_update;

/** Get input buffer size. */
mldev_io_input_size_get_t io_input_size_get;

/** Get output buffer size. */
mldev_io_output_size_get_t io_output_size_get;

/** Quantize data */
mldev_io_quantize_t io_quantize;

Expand Down
2 changes: 0 additions & 2 deletions lib/mldev/version.map
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,6 @@ EXPERIMENTAL {
rte_ml_dev_xstats_reset;
rte_ml_enqueue_burst;
rte_ml_io_dequantize;
rte_ml_io_input_size_get;
rte_ml_io_output_size_get;
rte_ml_io_quantize;
rte_ml_model_info_get;
rte_ml_model_load;
Expand Down

0 comments on commit 30b85ef

Please sign in to comment.