net/mlx5e: Alloc xsk channel param out of mlx5e_open_xsk()

Currently the allocation and filling of the xsk channel
parameters was done in mlx5e_open_xsk().

Move this responsibility out of mlx5e_open_xsk() and have
the function take an already filled mlx5e_channel_param.

mlx5e_open_channel() already allocates channel parameters.
The only precaution that is needed is to call
mlx5e_build_xsk_channel_param() before mlx5e_open_xsk().

mlx5e_xsk_enable_locked() now allocates and fills the xsk parameters.

For simplicity, link the xsk parameters in struct mlx5e_channel_params
so that channel params can be passed around.

This patch has no functional changes.

Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://patch.msgid.link/20260223204155.1783580-6-tariqt@nvidia.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
Dragos Tatulea
2026-02-23 22:41:45 +02:00
committed by Paolo Abeni
parent ba4f39c256
commit 8a96b9144f
6 changed files with 26 additions and 18 deletions
@@ -1278,6 +1278,7 @@ void mlx5e_build_xsk_channel_param(struct mlx5_core_dev *mdev,
struct mlx5e_xsk_param *xsk,
struct mlx5e_channel_param *cparam)
{
cparam->xsk = xsk;
mlx5e_build_rq_param(mdev, params, xsk, &cparam->rq);
mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq);
}
@@ -42,6 +42,7 @@ struct mlx5e_channel_param {
struct mlx5e_sq_param xdp_sq;
struct mlx5e_sq_param icosq;
struct mlx5e_sq_param async_icosq;
struct mlx5e_xsk_param *xsk;
};
struct mlx5e_create_sq_param {
@@ -79,6 +79,7 @@ static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
struct xsk_buff_pool *pool, u16 ix)
{
struct mlx5e_params *params = &priv->channels.params;
struct mlx5e_channel_param *cparam;
struct mlx5e_xsk_param xsk;
struct mlx5e_channel *c;
int err;
@@ -89,15 +90,20 @@ static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
if (unlikely(!mlx5e_xsk_is_pool_sane(pool)))
return -EINVAL;
cparam = kvzalloc_obj(*cparam, GFP_KERNEL);
if (!cparam)
return -ENOMEM;
err = mlx5e_xsk_map_pool(mlx5_sd_ch_ix_get_dev(priv->mdev, ix), pool);
if (unlikely(err))
return err;
goto err_free_cparam;
err = mlx5e_xsk_add_pool(&priv->xsk, pool, ix);
if (unlikely(err))
goto err_unmap_pool;
mlx5e_build_xsk_param(pool, &xsk);
mlx5e_build_xsk_channel_param(priv->mdev, params, &xsk, cparam);
if (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
mlx5e_mpwrq_umr_mode(priv->mdev, &xsk) == MLX5E_MPWRQ_UMR_MODE_OVERSIZED) {
@@ -122,7 +128,7 @@ static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
c = priv->channels.c[ix];
err = mlx5e_open_xsk(priv, params, &xsk, pool, c);
err = mlx5e_open_xsk(priv, params, cparam, pool, c);
if (unlikely(err))
goto err_remove_pool;
@@ -138,6 +144,8 @@ static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
mlx5e_deactivate_rq(&c->rq);
mlx5e_flush_rq(&c->rq, MLX5_RQC_STATE_RDY);
kvfree(cparam);
return 0;
err_remove_pool:
@@ -146,6 +154,9 @@ err_remove_pool:
err_unmap_pool:
mlx5e_xsk_unmap_pool(priv, pool);
err_free_cparam:
kvfree(cparam);
return err;
validate_closed:
@@ -157,6 +168,8 @@ validate_closed:
goto err_remove_pool;
}
kvfree(cparam);
return 0;
}
@@ -105,10 +105,11 @@ static int mlx5e_open_xsk_rq(struct mlx5e_channel *c,
}
int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk, struct xsk_buff_pool *pool,
struct mlx5e_channel_param *cparam,
struct xsk_buff_pool *pool,
struct mlx5e_channel *c)
{
struct mlx5e_channel_param *cparam;
struct mlx5e_xsk_param *xsk = cparam->xsk;
struct mlx5e_create_cq_param ccp;
int err;
@@ -117,16 +118,10 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
if (!mlx5e_validate_xsk_param(params, xsk, priv->mdev))
return -EINVAL;
cparam = kvzalloc(sizeof(*cparam), GFP_KERNEL);
if (!cparam)
return -ENOMEM;
mlx5e_build_xsk_channel_param(priv->mdev, params, xsk, cparam);
err = mlx5e_open_cq(c->mdev, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
&c->xskrq.cq);
if (unlikely(err))
goto err_free_cparam;
return err;
err = mlx5e_open_xsk_rq(c, params, &cparam->rq, pool, xsk);
if (unlikely(err))
@@ -147,8 +142,6 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
if (unlikely(err))
goto err_close_tx_cq;
kvfree(cparam);
set_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
return 0;
@@ -162,9 +155,6 @@ err_close_rq:
err_close_rx_cq:
mlx5e_close_cq(&c->xskrq.cq);
err_free_cparam:
kvfree(cparam);
return err;
}
@@ -11,8 +11,10 @@ struct mlx5e_xsk_param;
bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
struct mlx5_core_dev *mdev);
struct mlx5e_channel_param;
int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk, struct xsk_buff_pool *pool,
struct mlx5e_channel_param *cparam,
struct xsk_buff_pool *pool,
struct mlx5e_channel *c);
void mlx5e_close_xsk(struct mlx5e_channel *c);
void mlx5e_activate_xsk(struct mlx5e_channel *c);
@@ -2829,7 +2829,8 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
if (xsk_pool) {
mlx5e_build_xsk_param(xsk_pool, &xsk);
err = mlx5e_open_xsk(priv, params, &xsk, xsk_pool, c);
mlx5e_build_xsk_channel_param(priv->mdev, params, &xsk, cparam);
err = mlx5e_open_xsk(priv, params, cparam, xsk_pool, c);
if (unlikely(err))
goto err_close_queues;
}