Skip to content

Commit 5565b90

Browse files
author
Jakub Ramaseuski
committed
RDMA/ionic: Create device queues to support admin operations
JIRA: https://issues.redhat.com/browse/RHEL-121486 commit f3bdbd4 Author: Abhijit Gangurde <abhijit.gangurde@amd.com> Date: Wed Sep 3 11:46:01 2025 +0530 RDMA/ionic: Create device queues to support admin operations Setup RDMA admin queues using device command exposed over auxiliary device and manage these queues using ida. Co-developed-by: Andrew Boyer <andrew.boyer@amd.com> Signed-off-by: Andrew Boyer <andrew.boyer@amd.com> Co-developed-by: Allen Hubbe <allen.hubbe@amd.com> Signed-off-by: Allen Hubbe <allen.hubbe@amd.com> Signed-off-by: Abhijit Gangurde <abhijit.gangurde@amd.com> Link: https://patch.msgid.link/20250903061606.4139957-10-abhijit.gangurde@amd.com Signed-off-by: Leon Romanovsky <leon@kernel.org> Signed-off-by: Jakub Ramaseuski <jramaseu@redhat.com>
1 parent 915d3a7 commit 5565b90

File tree

9 files changed

+2300
-0
lines changed

9 files changed

+2300
-0
lines changed

drivers/infiniband/hw/ionic/ionic_admin.c

Lines changed: 1124 additions & 0 deletions
Large diffs are not rendered by default.
Lines changed: 181 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,181 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
3+
4+
#include "ionic_ibdev.h"
5+
6+
static int ionic_validate_qdesc(struct ionic_qdesc *q)
7+
{
8+
if (!q->addr || !q->size || !q->mask ||
9+
!q->depth_log2 || !q->stride_log2)
10+
return -EINVAL;
11+
12+
if (q->addr & (PAGE_SIZE - 1))
13+
return -EINVAL;
14+
15+
if (q->mask != BIT(q->depth_log2) - 1)
16+
return -EINVAL;
17+
18+
if (q->size < BIT_ULL(q->depth_log2 + q->stride_log2))
19+
return -EINVAL;
20+
21+
return 0;
22+
}
23+
24+
static u32 ionic_get_eqid(struct ionic_ibdev *dev, u32 comp_vector, u8 udma_idx)
25+
{
26+
/* EQ per vector per udma, and the first eqs reserved for async events.
27+
* The rest of the vectors can be requested for completions.
28+
*/
29+
u32 comp_vec_count = dev->lif_cfg.eq_count / dev->lif_cfg.udma_count - 1;
30+
31+
return (comp_vector % comp_vec_count + 1) * dev->lif_cfg.udma_count + udma_idx;
32+
}
33+
34+
static int ionic_get_cqid(struct ionic_ibdev *dev, u32 *cqid, u8 udma_idx)
35+
{
36+
unsigned int size, base, bound;
37+
int rc;
38+
39+
size = dev->lif_cfg.cq_count / dev->lif_cfg.udma_count;
40+
base = size * udma_idx;
41+
bound = base + size;
42+
43+
rc = ionic_resid_get_shared(&dev->inuse_cqid, base, bound);
44+
if (rc >= 0) {
45+
/* cq_base is zero or a multiple of two queue groups */
46+
*cqid = dev->lif_cfg.cq_base +
47+
ionic_bitid_to_qid(rc, dev->lif_cfg.udma_qgrp_shift,
48+
dev->half_cqid_udma_shift);
49+
50+
rc = 0;
51+
}
52+
53+
return rc;
54+
}
55+
56+
static void ionic_put_cqid(struct ionic_ibdev *dev, u32 cqid)
57+
{
58+
u32 bitid = ionic_qid_to_bitid(cqid - dev->lif_cfg.cq_base,
59+
dev->lif_cfg.udma_qgrp_shift,
60+
dev->half_cqid_udma_shift);
61+
62+
ionic_resid_put(&dev->inuse_cqid, bitid);
63+
}
64+
65+
int ionic_create_cq_common(struct ionic_vcq *vcq,
66+
struct ionic_tbl_buf *buf,
67+
const struct ib_cq_init_attr *attr,
68+
struct ionic_ctx *ctx,
69+
struct ib_udata *udata,
70+
struct ionic_qdesc *req_cq,
71+
__u32 *resp_cqid,
72+
int udma_idx)
73+
{
74+
struct ionic_ibdev *dev = to_ionic_ibdev(vcq->ibcq.device);
75+
struct ionic_cq *cq = &vcq->cq[udma_idx];
76+
void *entry;
77+
int rc;
78+
79+
cq->vcq = vcq;
80+
81+
if (attr->cqe < 1 || attr->cqe + IONIC_CQ_GRACE > 0xffff) {
82+
rc = -EINVAL;
83+
goto err_args;
84+
}
85+
86+
rc = ionic_get_cqid(dev, &cq->cqid, udma_idx);
87+
if (rc)
88+
goto err_args;
89+
90+
cq->eqid = ionic_get_eqid(dev, attr->comp_vector, udma_idx);
91+
92+
spin_lock_init(&cq->lock);
93+
INIT_LIST_HEAD(&cq->poll_sq);
94+
INIT_LIST_HEAD(&cq->flush_sq);
95+
INIT_LIST_HEAD(&cq->flush_rq);
96+
97+
if (udata) {
98+
rc = ionic_validate_qdesc(req_cq);
99+
if (rc)
100+
goto err_qdesc;
101+
102+
cq->umem = ib_umem_get(&dev->ibdev, req_cq->addr, req_cq->size,
103+
IB_ACCESS_LOCAL_WRITE);
104+
if (IS_ERR(cq->umem)) {
105+
rc = PTR_ERR(cq->umem);
106+
goto err_qdesc;
107+
}
108+
109+
cq->q.ptr = NULL;
110+
cq->q.size = req_cq->size;
111+
cq->q.mask = req_cq->mask;
112+
cq->q.depth_log2 = req_cq->depth_log2;
113+
cq->q.stride_log2 = req_cq->stride_log2;
114+
115+
*resp_cqid = cq->cqid;
116+
} else {
117+
rc = ionic_queue_init(&cq->q, dev->lif_cfg.hwdev,
118+
attr->cqe + IONIC_CQ_GRACE,
119+
sizeof(struct ionic_v1_cqe));
120+
if (rc)
121+
goto err_q_init;
122+
123+
ionic_queue_dbell_init(&cq->q, cq->cqid);
124+
cq->color = true;
125+
cq->credit = cq->q.mask;
126+
}
127+
128+
rc = ionic_pgtbl_init(dev, buf, cq->umem, cq->q.dma, 1, PAGE_SIZE);
129+
if (rc)
130+
goto err_pgtbl_init;
131+
132+
init_completion(&cq->cq_rel_comp);
133+
kref_init(&cq->cq_kref);
134+
135+
entry = xa_store_irq(&dev->cq_tbl, cq->cqid, cq, GFP_KERNEL);
136+
if (entry) {
137+
if (!xa_is_err(entry))
138+
rc = -EINVAL;
139+
else
140+
rc = xa_err(entry);
141+
142+
goto err_xa;
143+
}
144+
145+
return 0;
146+
147+
err_xa:
148+
ionic_pgtbl_unbuf(dev, buf);
149+
err_pgtbl_init:
150+
if (!udata)
151+
ionic_queue_destroy(&cq->q, dev->lif_cfg.hwdev);
152+
err_q_init:
153+
if (cq->umem)
154+
ib_umem_release(cq->umem);
155+
err_qdesc:
156+
ionic_put_cqid(dev, cq->cqid);
157+
err_args:
158+
cq->vcq = NULL;
159+
160+
return rc;
161+
}
162+
163+
void ionic_destroy_cq_common(struct ionic_ibdev *dev, struct ionic_cq *cq)
164+
{
165+
if (!cq->vcq)
166+
return;
167+
168+
xa_erase_irq(&dev->cq_tbl, cq->cqid);
169+
170+
kref_put(&cq->cq_kref, ionic_cq_complete);
171+
wait_for_completion(&cq->cq_rel_comp);
172+
173+
if (cq->umem)
174+
ib_umem_release(cq->umem);
175+
else
176+
ionic_queue_destroy(&cq->q, dev->lif_cfg.hwdev);
177+
178+
ionic_put_cqid(dev, cq->cqid);
179+
180+
cq->vcq = NULL;
181+
}
Lines changed: 164 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,164 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
3+
4+
#ifndef _IONIC_FW_H_
5+
#define _IONIC_FW_H_
6+
7+
#include <linux/kernel.h>
8+
9+
/* completion queue v1 cqe */
10+
struct ionic_v1_cqe {
11+
union {
12+
struct {
13+
__be16 cmd_idx;
14+
__u8 cmd_op;
15+
__u8 rsvd[17];
16+
__le16 old_sq_cindex;
17+
__le16 old_rq_cq_cindex;
18+
} admin;
19+
struct {
20+
__u64 wqe_id;
21+
__be32 src_qpn_op;
22+
__u8 src_mac[6];
23+
__be16 vlan_tag;
24+
__be32 imm_data_rkey;
25+
} recv;
26+
struct {
27+
__u8 rsvd[4];
28+
__be32 msg_msn;
29+
__u8 rsvd2[8];
30+
__u64 npg_wqe_id;
31+
} send;
32+
};
33+
__be32 status_length;
34+
__be32 qid_type_flags;
35+
};
36+
37+
/* bits for cqe qid_type_flags */
38+
enum ionic_v1_cqe_qtf_bits {
39+
IONIC_V1_CQE_COLOR = BIT(0),
40+
IONIC_V1_CQE_ERROR = BIT(1),
41+
IONIC_V1_CQE_TYPE_SHIFT = 5,
42+
IONIC_V1_CQE_TYPE_MASK = 0x7,
43+
IONIC_V1_CQE_QID_SHIFT = 8,
44+
45+
IONIC_V1_CQE_TYPE_ADMIN = 0,
46+
IONIC_V1_CQE_TYPE_RECV = 1,
47+
IONIC_V1_CQE_TYPE_SEND_MSN = 2,
48+
IONIC_V1_CQE_TYPE_SEND_NPG = 3,
49+
};
50+
51+
static inline bool ionic_v1_cqe_color(struct ionic_v1_cqe *cqe)
52+
{
53+
return cqe->qid_type_flags & cpu_to_be32(IONIC_V1_CQE_COLOR);
54+
}
55+
56+
static inline bool ionic_v1_cqe_error(struct ionic_v1_cqe *cqe)
57+
{
58+
return cqe->qid_type_flags & cpu_to_be32(IONIC_V1_CQE_ERROR);
59+
}
60+
61+
static inline void ionic_v1_cqe_clean(struct ionic_v1_cqe *cqe)
62+
{
63+
cqe->qid_type_flags |= cpu_to_be32(~0u << IONIC_V1_CQE_QID_SHIFT);
64+
}
65+
66+
static inline u32 ionic_v1_cqe_qtf(struct ionic_v1_cqe *cqe)
67+
{
68+
return be32_to_cpu(cqe->qid_type_flags);
69+
}
70+
71+
static inline u8 ionic_v1_cqe_qtf_type(u32 qtf)
72+
{
73+
return (qtf >> IONIC_V1_CQE_TYPE_SHIFT) & IONIC_V1_CQE_TYPE_MASK;
74+
}
75+
76+
static inline u32 ionic_v1_cqe_qtf_qid(u32 qtf)
77+
{
78+
return qtf >> IONIC_V1_CQE_QID_SHIFT;
79+
}
80+
81+
#define ADMIN_WQE_STRIDE 64
82+
#define ADMIN_WQE_HDR_LEN 4
83+
84+
/* admin queue v1 wqe */
85+
struct ionic_v1_admin_wqe {
86+
__u8 op;
87+
__u8 rsvd;
88+
__le16 len;
89+
90+
union {
91+
} cmd;
92+
};
93+
94+
/* admin queue v1 cqe status */
95+
enum ionic_v1_admin_status {
96+
IONIC_V1_ASTS_OK,
97+
IONIC_V1_ASTS_BAD_CMD,
98+
IONIC_V1_ASTS_BAD_INDEX,
99+
IONIC_V1_ASTS_BAD_STATE,
100+
IONIC_V1_ASTS_BAD_TYPE,
101+
IONIC_V1_ASTS_BAD_ATTR,
102+
IONIC_V1_ASTS_MSG_TOO_BIG,
103+
};
104+
105+
/* event queue v1 eqe */
106+
struct ionic_v1_eqe {
107+
__be32 evt;
108+
};
109+
110+
/* bits for cqe queue_type_flags */
111+
enum ionic_v1_eqe_evt_bits {
112+
IONIC_V1_EQE_COLOR = BIT(0),
113+
IONIC_V1_EQE_TYPE_SHIFT = 1,
114+
IONIC_V1_EQE_TYPE_MASK = 0x7,
115+
IONIC_V1_EQE_CODE_SHIFT = 4,
116+
IONIC_V1_EQE_CODE_MASK = 0xf,
117+
IONIC_V1_EQE_QID_SHIFT = 8,
118+
119+
/* cq events */
120+
IONIC_V1_EQE_TYPE_CQ = 0,
121+
/* cq normal events */
122+
IONIC_V1_EQE_CQ_NOTIFY = 0,
123+
/* cq error events */
124+
IONIC_V1_EQE_CQ_ERR = 8,
125+
126+
/* qp and srq events */
127+
IONIC_V1_EQE_TYPE_QP = 1,
128+
/* qp normal events */
129+
IONIC_V1_EQE_SRQ_LEVEL = 0,
130+
IONIC_V1_EQE_SQ_DRAIN = 1,
131+
IONIC_V1_EQE_QP_COMM_EST = 2,
132+
IONIC_V1_EQE_QP_LAST_WQE = 3,
133+
/* qp error events */
134+
IONIC_V1_EQE_QP_ERR = 8,
135+
IONIC_V1_EQE_QP_ERR_REQUEST = 9,
136+
IONIC_V1_EQE_QP_ERR_ACCESS = 10,
137+
};
138+
139+
static inline bool ionic_v1_eqe_color(struct ionic_v1_eqe *eqe)
140+
{
141+
return eqe->evt & cpu_to_be32(IONIC_V1_EQE_COLOR);
142+
}
143+
144+
static inline u32 ionic_v1_eqe_evt(struct ionic_v1_eqe *eqe)
145+
{
146+
return be32_to_cpu(eqe->evt);
147+
}
148+
149+
static inline u8 ionic_v1_eqe_evt_type(u32 evt)
150+
{
151+
return (evt >> IONIC_V1_EQE_TYPE_SHIFT) & IONIC_V1_EQE_TYPE_MASK;
152+
}
153+
154+
static inline u8 ionic_v1_eqe_evt_code(u32 evt)
155+
{
156+
return (evt >> IONIC_V1_EQE_CODE_SHIFT) & IONIC_V1_EQE_CODE_MASK;
157+
}
158+
159+
static inline u32 ionic_v1_eqe_evt_qid(u32 evt)
160+
{
161+
return evt >> IONIC_V1_EQE_QID_SHIFT;
162+
}
163+
164+
#endif /* _IONIC_FW_H_ */

0 commit comments

Comments
 (0)