GNU Linux-libre 5.10.215-gnu1
[releases.git] / drivers / infiniband / hw / mlx5 / cmd.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2017-2020, Mellanox Technologies inc. All rights reserved.
4  */
5
6 #include "cmd.h"
7
8 int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey)
9 {
10         u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
11         u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
12         int err;
13
14         MLX5_SET(query_special_contexts_in, in, opcode,
15                  MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
16         err = mlx5_cmd_exec_inout(dev, query_special_contexts, in, out);
17         if (!err)
18                 *mkey = MLX5_GET(query_special_contexts_out, out,
19                                  dump_fill_mkey);
20         return err;
21 }
22
23 int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey)
24 {
25         u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
26         u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
27         int err;
28
29         MLX5_SET(query_special_contexts_in, in, opcode,
30                  MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
31         err = mlx5_cmd_exec_inout(dev, query_special_contexts, in, out);
32         if (!err)
33                 *null_mkey = MLX5_GET(query_special_contexts_out, out,
34                                       null_mkey);
35         return err;
36 }
37
38 int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
39                                void *out)
40 {
41         u32 in[MLX5_ST_SZ_DW(query_cong_params_in)] = {};
42
43         MLX5_SET(query_cong_params_in, in, opcode,
44                  MLX5_CMD_OP_QUERY_CONG_PARAMS);
45         MLX5_SET(query_cong_params_in, in, cong_protocol, cong_point);
46
47         return mlx5_cmd_exec_inout(dev, query_cong_params, in, out);
48 }
49
50 int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
51                          u64 length, u32 alignment)
52 {
53         struct mlx5_core_dev *dev = dm->dev;
54         u64 num_memic_hw_pages = MLX5_CAP_DEV_MEM(dev, memic_bar_size)
55                                         >> PAGE_SHIFT;
56         u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
57         u32 max_alignment = MLX5_CAP_DEV_MEM(dev, log_max_memic_addr_alignment);
58         u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
59         u32 out[MLX5_ST_SZ_DW(alloc_memic_out)] = {};
60         u32 in[MLX5_ST_SZ_DW(alloc_memic_in)] = {};
61         u32 mlx5_alignment;
62         u64 page_idx = 0;
63         int ret = 0;
64
65         if (!length || (length & MLX5_MEMIC_ALLOC_SIZE_MASK))
66                 return -EINVAL;
67
68         /* mlx5 device sets alignment as 64*2^driver_value
69          * so normalizing is needed.
70          */
71         mlx5_alignment = (alignment < MLX5_MEMIC_BASE_ALIGN) ? 0 :
72                          alignment - MLX5_MEMIC_BASE_ALIGN;
73         if (mlx5_alignment > max_alignment)
74                 return -EINVAL;
75
76         MLX5_SET(alloc_memic_in, in, opcode, MLX5_CMD_OP_ALLOC_MEMIC);
77         MLX5_SET(alloc_memic_in, in, range_size, num_pages * PAGE_SIZE);
78         MLX5_SET(alloc_memic_in, in, memic_size, length);
79         MLX5_SET(alloc_memic_in, in, log_memic_addr_alignment,
80                  mlx5_alignment);
81
82         while (page_idx < num_memic_hw_pages) {
83                 spin_lock(&dm->lock);
84                 page_idx = bitmap_find_next_zero_area(dm->memic_alloc_pages,
85                                                       num_memic_hw_pages,
86                                                       page_idx,
87                                                       num_pages, 0);
88
89                 if (page_idx < num_memic_hw_pages)
90                         bitmap_set(dm->memic_alloc_pages,
91                                    page_idx, num_pages);
92
93                 spin_unlock(&dm->lock);
94
95                 if (page_idx >= num_memic_hw_pages)
96                         break;
97
98                 MLX5_SET64(alloc_memic_in, in, range_start_addr,
99                            hw_start_addr + (page_idx * PAGE_SIZE));
100
101                 ret = mlx5_cmd_exec_inout(dev, alloc_memic, in, out);
102                 if (ret) {
103                         spin_lock(&dm->lock);
104                         bitmap_clear(dm->memic_alloc_pages,
105                                      page_idx, num_pages);
106                         spin_unlock(&dm->lock);
107
108                         if (ret == -EAGAIN) {
109                                 page_idx++;
110                                 continue;
111                         }
112
113                         return ret;
114                 }
115
116                 *addr = dev->bar_addr +
117                         MLX5_GET64(alloc_memic_out, out, memic_start_addr);
118
119                 return 0;
120         }
121
122         return -ENOMEM;
123 }
124
125 void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length)
126 {
127         struct mlx5_core_dev *dev = dm->dev;
128         u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
129         u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
130         u32 in[MLX5_ST_SZ_DW(dealloc_memic_in)] = {};
131         u64 start_page_idx;
132         int err;
133
134         addr -= dev->bar_addr;
135         start_page_idx = (addr - hw_start_addr) >> PAGE_SHIFT;
136
137         MLX5_SET(dealloc_memic_in, in, opcode, MLX5_CMD_OP_DEALLOC_MEMIC);
138         MLX5_SET64(dealloc_memic_in, in, memic_start_addr, addr);
139         MLX5_SET(dealloc_memic_in, in, memic_size, length);
140
141         err =  mlx5_cmd_exec_in(dev, dealloc_memic, in);
142         if (err)
143                 return;
144
145         spin_lock(&dm->lock);
146         bitmap_clear(dm->memic_alloc_pages,
147                      start_page_idx, num_pages);
148         spin_unlock(&dm->lock);
149 }
150
151 void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid)
152 {
153         u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {};
154
155         MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
156         MLX5_SET(destroy_tir_in, in, tirn, tirn);
157         MLX5_SET(destroy_tir_in, in, uid, uid);
158         mlx5_cmd_exec_in(dev, destroy_tir, in);
159 }
160
161 void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid)
162 {
163         u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {};
164
165         MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
166         MLX5_SET(destroy_tis_in, in, tisn, tisn);
167         MLX5_SET(destroy_tis_in, in, uid, uid);
168         mlx5_cmd_exec_in(dev, destroy_tis, in);
169 }
170
171 int mlx5_cmd_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn, u16 uid)
172 {
173         u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {};
174
175         MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
176         MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
177         MLX5_SET(destroy_rqt_in, in, uid, uid);
178         return mlx5_cmd_exec_in(dev, destroy_rqt, in);
179 }
180
181 int mlx5_cmd_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn,
182                                     u16 uid)
183 {
184         u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {};
185         u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {};
186         int err;
187
188         MLX5_SET(alloc_transport_domain_in, in, opcode,
189                  MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
190         MLX5_SET(alloc_transport_domain_in, in, uid, uid);
191
192         err = mlx5_cmd_exec_inout(dev, alloc_transport_domain, in, out);
193         if (!err)
194                 *tdn = MLX5_GET(alloc_transport_domain_out, out,
195                                 transport_domain);
196
197         return err;
198 }
199
200 void mlx5_cmd_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn,
201                                        u16 uid)
202 {
203         u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {};
204
205         MLX5_SET(dealloc_transport_domain_in, in, opcode,
206                  MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
207         MLX5_SET(dealloc_transport_domain_in, in, uid, uid);
208         MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
209         mlx5_cmd_exec_in(dev, dealloc_transport_domain, in);
210 }
211
212 int mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid)
213 {
214         u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {};
215
216         MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
217         MLX5_SET(dealloc_pd_in, in, pd, pdn);
218         MLX5_SET(dealloc_pd_in, in, uid, uid);
219         return mlx5_cmd_exec_in(dev, dealloc_pd, in);
220 }
221
222 int mlx5_cmd_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
223                         u32 qpn, u16 uid)
224 {
225         u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)] = {};
226         void *gid;
227
228         MLX5_SET(attach_to_mcg_in, in, opcode, MLX5_CMD_OP_ATTACH_TO_MCG);
229         MLX5_SET(attach_to_mcg_in, in, qpn, qpn);
230         MLX5_SET(attach_to_mcg_in, in, uid, uid);
231         gid = MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid);
232         memcpy(gid, mgid, sizeof(*mgid));
233         return mlx5_cmd_exec_in(dev, attach_to_mcg, in);
234 }
235
236 int mlx5_cmd_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
237                         u32 qpn, u16 uid)
238 {
239         u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)] = {};
240         void *gid;
241
242         MLX5_SET(detach_from_mcg_in, in, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
243         MLX5_SET(detach_from_mcg_in, in, qpn, qpn);
244         MLX5_SET(detach_from_mcg_in, in, uid, uid);
245         gid = MLX5_ADDR_OF(detach_from_mcg_in, in, multicast_gid);
246         memcpy(gid, mgid, sizeof(*mgid));
247         return mlx5_cmd_exec_in(dev, detach_from_mcg, in);
248 }
249
250 int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid)
251 {
252         u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {};
253         u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {};
254         int err;
255
256         MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
257         MLX5_SET(alloc_xrcd_in, in, uid, uid);
258         err = mlx5_cmd_exec_inout(dev, alloc_xrcd, in, out);
259         if (!err)
260                 *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
261         return err;
262 }
263
264 int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid)
265 {
266         u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {};
267
268         MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
269         MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
270         MLX5_SET(dealloc_xrcd_in, in, uid, uid);
271         return mlx5_cmd_exec_in(dev, dealloc_xrcd, in);
272 }
273
274 int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
275                      u16 opmod, u8 port)
276 {
277         int outlen = MLX5_ST_SZ_BYTES(mad_ifc_out);
278         int inlen = MLX5_ST_SZ_BYTES(mad_ifc_in);
279         int err = -ENOMEM;
280         void *data;
281         void *resp;
282         u32 *out;
283         u32 *in;
284
285         in = kzalloc(inlen, GFP_KERNEL);
286         out = kzalloc(outlen, GFP_KERNEL);
287         if (!in || !out)
288                 goto out;
289
290         MLX5_SET(mad_ifc_in, in, opcode, MLX5_CMD_OP_MAD_IFC);
291         MLX5_SET(mad_ifc_in, in, op_mod, opmod);
292         MLX5_SET(mad_ifc_in, in, port, port);
293
294         data = MLX5_ADDR_OF(mad_ifc_in, in, mad);
295         memcpy(data, inb, MLX5_FLD_SZ_BYTES(mad_ifc_in, mad));
296
297         err = mlx5_cmd_exec_inout(dev, mad_ifc, in, out);
298         if (err)
299                 goto out;
300
301         resp = MLX5_ADDR_OF(mad_ifc_out, out, response_mad_packet);
302         memcpy(outb, resp,
303                MLX5_FLD_SZ_BYTES(mad_ifc_out, response_mad_packet));
304
305 out:
306         kfree(out);
307         kfree(in);
308         return err;
309 }