Commit 029e88fd authored by Leon Romanovsky's avatar Leon Romanovsky Committed by Jason Gunthorpe

RDMA/mlx5: Move all WR logic from qp.c to separate file

Split qp.c by removing all WR logic to separate file.

Link: https://lore.kernel.org/r/20200506065513.4668-4-leon@kernel.orgSigned-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 6671cde8
......@@ -16,7 +16,8 @@ mlx5_ib-y := ah.o \
qpc.o \
restrack.o \
srq.o \
srq_cmd.o
srq_cmd.o \
wr.o
mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o
mlx5_ib-$(CONFIG_MLX5_ESWITCH) += ib_rep.o
......
......@@ -61,6 +61,7 @@
#include "cmd.h"
#include "srq.h"
#include "qp.h"
#include "wr.h"
#include <linux/mlx5/fs_helpers.h>
#include <linux/mlx5/accel.h>
#include <rdma/uverbs_std_types.h>
......@@ -6657,8 +6658,8 @@ static const struct ib_device_ops mlx5_ib_dev_ops = {
.modify_qp = mlx5_ib_modify_qp,
.modify_srq = mlx5_ib_modify_srq,
.poll_cq = mlx5_ib_poll_cq,
.post_recv = mlx5_ib_post_recv,
.post_send = mlx5_ib_post_send,
.post_recv = mlx5_ib_post_recv_nodrain,
.post_send = mlx5_ib_post_send_nodrain,
.post_srq_recv = mlx5_ib_post_srq_recv,
.process_mad = mlx5_ib_process_mad,
.query_ah = mlx5_ib_query_ah,
......
......@@ -1178,10 +1178,6 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
void mlx5_ib_drain_sq(struct ib_qp *qp);
void mlx5_ib_drain_rq(struct ib_qp *qp);
int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr);
int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr);
int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
size_t buflen, size_t *bc);
int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
......
This diff is collapsed.
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
*/
#ifndef _MLX5_IB_WR_H
#define _MLX5_IB_WR_H
#include "mlx5_ib.h"
enum {
MLX5_IB_SQ_UMR_INLINE_THRESHOLD = 64,
};
struct mlx5_wqe_eth_pad {
u8 rsvd0[16];
};
/* get_sq_edge - Get the next nearby edge.
*
* An 'edge' is defined as the first following address after the end
* of the fragment or the SQ. Accordingly, during the WQE construction
* which repetitively increases the pointer to write the next data, it
* simply should check if it gets to an edge.
*
* @sq - SQ buffer.
* @idx - Stride index in the SQ buffer.
*
* Return:
* The new edge.
*/
static inline void *get_sq_edge(struct mlx5_ib_wq *sq, u32 idx)
{
void *fragment_end;
fragment_end = mlx5_frag_buf_get_wqe
(&sq->fbc,
mlx5_frag_buf_get_idx_last_contig_stride(&sq->fbc, idx));
return fragment_end + MLX5_SEND_WQE_BB;
}
int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr, bool drain);
int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr, bool drain);
static inline int mlx5_ib_post_send_nodrain(struct ib_qp *ibqp,
const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr)
{
return mlx5_ib_post_send(ibqp, wr, bad_wr, false);
}
static inline int mlx5_ib_post_send_drain(struct ib_qp *ibqp,
const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr)
{
return mlx5_ib_post_send(ibqp, wr, bad_wr, true);
}
static inline int mlx5_ib_post_recv_nodrain(struct ib_qp *ibqp,
const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr)
{
return mlx5_ib_post_recv(ibqp, wr, bad_wr, false);
}
static inline int mlx5_ib_post_recv_drain(struct ib_qp *ibqp,
const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr)
{
return mlx5_ib_post_recv(ibqp, wr, bad_wr, true);
}
#endif /* _MLX5_IB_WR_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment