2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/etherdevice.h>
36 #include <linux/mlx4/cmd.h>
37 #include <linux/module.h>
38 #include <linux/cache.h>
44 MLX4_COMMAND_INTERFACE_MIN_REV = 2,
45 MLX4_COMMAND_INTERFACE_MAX_REV = 3,
46 MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS = 3,
49 extern void __buggy_use_of_MLX4_GET(void);
50 extern void __buggy_use_of_MLX4_PUT(void);
52 static bool enable_qos;
53 module_param(enable_qos, bool, 0444);
54 MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)");
56 #define MLX4_GET(dest, source, offset) \
58 void *__p = (char *) (source) + (offset); \
59 switch (sizeof (dest)) { \
60 case 1: (dest) = *(u8 *) __p; break; \
61 case 2: (dest) = be16_to_cpup(__p); break; \
62 case 4: (dest) = be32_to_cpup(__p); break; \
63 case 8: (dest) = be64_to_cpup(__p); break; \
64 default: __buggy_use_of_MLX4_GET(); \
68 #define MLX4_PUT(dest, source, offset) \
70 void *__d = ((char *) (dest) + (offset)); \
71 switch (sizeof(source)) { \
72 case 1: *(u8 *) __d = (source); break; \
73 case 2: *(__be16 *) __d = cpu_to_be16(source); break; \
74 case 4: *(__be32 *) __d = cpu_to_be32(source); break; \
75 case 8: *(__be64 *) __d = cpu_to_be64(source); break; \
76 default: __buggy_use_of_MLX4_PUT(); \
80 static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
82 static const char *fname[] = {
83 [ 0] = "RC transport",
84 [ 1] = "UC transport",
85 [ 2] = "UD transport",
86 [ 3] = "XRC transport",
87 [ 4] = "reliable multicast",
88 [ 5] = "FCoIB support",
90 [ 7] = "IPoIB checksum offload",
91 [ 8] = "P_Key violation counter",
92 [ 9] = "Q_Key violation counter",
94 [12] = "Dual Port Different Protocol (DPDP) support",
95 [15] = "Big LSO headers",
98 [18] = "Atomic ops support",
99 [19] = "Raw multicast support",
100 [20] = "Address vector port checking support",
101 [21] = "UD multicast support",
102 [24] = "Demand paging support",
103 [25] = "Router support",
104 [30] = "IBoE support",
105 [32] = "Unicast loopback support",
106 [34] = "FCS header control",
107 [38] = "Wake On LAN support",
108 [40] = "UDP RSS support",
109 [41] = "Unicast VEP steering support",
110 [42] = "Multicast VEP steering support",
111 [48] = "Counters support",
112 [53] = "Port ETS Scheduler support",
113 [55] = "Port link type sensing support",
114 [59] = "Port management change event support",
115 [61] = "64 byte EQE support",
116 [62] = "64 byte CQE support",
120 mlx4_dbg(dev, "DEV_CAP flags:\n");
121 for (i = 0; i < ARRAY_SIZE(fname); ++i)
122 if (fname[i] && (flags & (1LL << i)))
123 mlx4_dbg(dev, " %s\n", fname[i]);
126 static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
128 static const char * const fname[] = {
130 [1] = "RSS Toeplitz Hash Function support",
131 [2] = "RSS XOR Hash Function support",
132 [3] = "Device manage flow steering support",
133 [4] = "Automatic MAC reassignment support",
134 [5] = "Time stamping support",
135 [6] = "VST (control vlan insertion/stripping) support",
136 [7] = "FSM (MAC anti-spoofing) support",
137 [8] = "Dynamic QP updates support",
138 [9] = "TCP/IP offloads/flow-steering for VXLAN support"
142 for (i = 0; i < ARRAY_SIZE(fname); ++i)
143 if (fname[i] && (flags & (1LL << i)))
144 mlx4_dbg(dev, " %s\n", fname[i]);
147 int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
149 struct mlx4_cmd_mailbox *mailbox;
153 #define MOD_STAT_CFG_IN_SIZE 0x100
155 #define MOD_STAT_CFG_PG_SZ_M_OFFSET 0x002
156 #define MOD_STAT_CFG_PG_SZ_OFFSET 0x003
158 mailbox = mlx4_alloc_cmd_mailbox(dev);
160 return PTR_ERR(mailbox);
161 inbox = mailbox->buf;
163 MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET);
164 MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET);
166 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG,
167 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
169 mlx4_free_cmd_mailbox(dev, mailbox);
173 int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
174 struct mlx4_vhcr *vhcr,
175 struct mlx4_cmd_mailbox *inbox,
176 struct mlx4_cmd_mailbox *outbox,
177 struct mlx4_cmd_info *cmd)
179 struct mlx4_priv *priv = mlx4_priv(dev);
184 #define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0
185 #define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1
186 #define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4
187 #define QUERY_FUNC_CAP_FMR_OFFSET 0x8
188 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP 0x10
189 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP 0x14
190 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP 0x18
191 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP 0x20
192 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP 0x24
193 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28
194 #define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
195 #define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30
197 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50
198 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54
199 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x58
200 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x60
201 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x64
202 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x68
204 #define QUERY_FUNC_CAP_FMR_FLAG 0x80
205 #define QUERY_FUNC_CAP_FLAG_RDMA 0x40
206 #define QUERY_FUNC_CAP_FLAG_ETH 0x80
207 #define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10
209 /* when opcode modifier = 1 */
210 #define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3
211 #define QUERY_FUNC_CAP_FLAGS0_OFFSET 0x8
212 #define QUERY_FUNC_CAP_FLAGS1_OFFSET 0xc
214 #define QUERY_FUNC_CAP_QP0_TUNNEL 0x10
215 #define QUERY_FUNC_CAP_QP0_PROXY 0x14
216 #define QUERY_FUNC_CAP_QP1_TUNNEL 0x18
217 #define QUERY_FUNC_CAP_QP1_PROXY 0x1c
218 #define QUERY_FUNC_CAP_PHYS_PORT_ID 0x28
220 #define QUERY_FUNC_CAP_FLAGS1_FORCE_MAC 0x40
221 #define QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN 0x80
222 #define QUERY_FUNC_CAP_FLAGS1_NIC_INFO 0x10
224 #define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
226 if (vhcr->op_modifier == 1) {
227 /* Set nic_info bit to mark new fields support */
228 field = QUERY_FUNC_CAP_FLAGS1_NIC_INFO;
229 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET);
231 field = vhcr->in_modifier; /* phys-port = logical-port */
232 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
234 /* size is now the QP number */
235 size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + field - 1;
236 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL);
239 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_TUNNEL);
241 size = dev->phys_caps.base_proxy_sqpn + 8 * slave + field - 1;
242 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_PROXY);
245 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_PROXY);
247 MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier],
248 QUERY_FUNC_CAP_PHYS_PORT_ID);
250 } else if (vhcr->op_modifier == 0) {
251 /* enable rdma and ethernet interfaces, and new quota locations */
252 field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
253 QUERY_FUNC_CAP_FLAG_QUOTAS);
254 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
256 field = dev->caps.num_ports;
257 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
259 size = dev->caps.function_caps; /* set PF behaviours */
260 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
262 field = 0; /* protected FMR support not available as yet */
263 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET);
265 size = priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[slave];
266 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
267 size = dev->caps.num_qps;
268 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
270 size = priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[slave];
271 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
272 size = dev->caps.num_srqs;
273 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
275 size = priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[slave];
276 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
277 size = dev->caps.num_cqs;
278 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
280 size = dev->caps.num_eqs;
281 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
283 size = dev->caps.reserved_eqs;
284 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
286 size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave];
287 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
288 size = dev->caps.num_mpts;
289 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
291 size = priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[slave];
292 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
293 size = dev->caps.num_mtts;
294 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
296 size = dev->caps.num_mgms + dev->caps.num_amgms;
297 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
298 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
306 int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
307 struct mlx4_func_cap *func_cap)
309 struct mlx4_cmd_mailbox *mailbox;
311 u8 field, op_modifier;
313 int err = 0, quotas = 0;
315 op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */
317 mailbox = mlx4_alloc_cmd_mailbox(dev);
319 return PTR_ERR(mailbox);
321 err = mlx4_cmd_box(dev, 0, mailbox->dma, gen_or_port, op_modifier,
322 MLX4_CMD_QUERY_FUNC_CAP,
323 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
327 outbox = mailbox->buf;
330 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET);
331 if (!(field & (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA))) {
332 mlx4_err(dev, "The host supports neither eth nor rdma interfaces\n");
333 err = -EPROTONOSUPPORT;
336 func_cap->flags = field;
337 quotas = !!(func_cap->flags & QUERY_FUNC_CAP_FLAG_QUOTAS);
339 MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
340 func_cap->num_ports = field;
342 MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
343 func_cap->pf_context_behaviour = size;
346 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
347 func_cap->qp_quota = size & 0xFFFFFF;
349 MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
350 func_cap->srq_quota = size & 0xFFFFFF;
352 MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
353 func_cap->cq_quota = size & 0xFFFFFF;
355 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
356 func_cap->mpt_quota = size & 0xFFFFFF;
358 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
359 func_cap->mtt_quota = size & 0xFFFFFF;
361 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
362 func_cap->mcg_quota = size & 0xFFFFFF;
365 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
366 func_cap->qp_quota = size & 0xFFFFFF;
368 MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
369 func_cap->srq_quota = size & 0xFFFFFF;
371 MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
372 func_cap->cq_quota = size & 0xFFFFFF;
374 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
375 func_cap->mpt_quota = size & 0xFFFFFF;
377 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
378 func_cap->mtt_quota = size & 0xFFFFFF;
380 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
381 func_cap->mcg_quota = size & 0xFFFFFF;
383 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
384 func_cap->max_eq = size & 0xFFFFFF;
386 MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
387 func_cap->reserved_eq = size & 0xFFFFFF;
392 /* logical port query */
393 if (gen_or_port > dev->caps.num_ports) {
398 MLX4_GET(func_cap->flags1, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET);
399 if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) {
400 if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_OFFSET) {
401 mlx4_err(dev, "VLAN is enforced on this port\n");
402 err = -EPROTONOSUPPORT;
406 if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_MAC) {
407 mlx4_err(dev, "Force mac is enabled on this port\n");
408 err = -EPROTONOSUPPORT;
411 } else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) {
412 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
413 if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) {
414 mlx4_err(dev, "phy_wqe_gid is "
415 "enforced on this ib port\n");
416 err = -EPROTONOSUPPORT;
421 MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
422 func_cap->physical_port = field;
423 if (func_cap->physical_port != gen_or_port) {
428 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_TUNNEL);
429 func_cap->qp0_tunnel_qpn = size & 0xFFFFFF;
431 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_PROXY);
432 func_cap->qp0_proxy_qpn = size & 0xFFFFFF;
434 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_TUNNEL);
435 func_cap->qp1_tunnel_qpn = size & 0xFFFFFF;
437 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_PROXY);
438 func_cap->qp1_proxy_qpn = size & 0xFFFFFF;
440 if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_NIC_INFO)
441 MLX4_GET(func_cap->phys_port_id, outbox,
442 QUERY_FUNC_CAP_PHYS_PORT_ID);
444 /* All other resources are allocated by the master, but we still report
445 * 'num' and 'reserved' capabilities as follows:
446 * - num remains the maximum resource index
447 * - 'num - reserved' is the total available objects of a resource, but
448 * resource indices may be less than 'reserved'
449 * TODO: set per-resource quotas */
452 mlx4_free_cmd_mailbox(dev, mailbox);
457 int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
459 struct mlx4_cmd_mailbox *mailbox;
462 u32 field32, flags, ext_flags;
468 #define QUERY_DEV_CAP_OUT_SIZE 0x100
469 #define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10
470 #define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET 0x11
471 #define QUERY_DEV_CAP_RSVD_QP_OFFSET 0x12
472 #define QUERY_DEV_CAP_MAX_QP_OFFSET 0x13
473 #define QUERY_DEV_CAP_RSVD_SRQ_OFFSET 0x14
474 #define QUERY_DEV_CAP_MAX_SRQ_OFFSET 0x15
475 #define QUERY_DEV_CAP_RSVD_EEC_OFFSET 0x16
476 #define QUERY_DEV_CAP_MAX_EEC_OFFSET 0x17
477 #define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET 0x19
478 #define QUERY_DEV_CAP_RSVD_CQ_OFFSET 0x1a
479 #define QUERY_DEV_CAP_MAX_CQ_OFFSET 0x1b
480 #define QUERY_DEV_CAP_MAX_MPT_OFFSET 0x1d
481 #define QUERY_DEV_CAP_RSVD_EQ_OFFSET 0x1e
482 #define QUERY_DEV_CAP_MAX_EQ_OFFSET 0x1f
483 #define QUERY_DEV_CAP_RSVD_MTT_OFFSET 0x20
484 #define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21
485 #define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22
486 #define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23
487 #define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27
488 #define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29
489 #define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b
490 #define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d
491 #define QUERY_DEV_CAP_RSS_OFFSET 0x2e
492 #define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f
493 #define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33
494 #define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35
495 #define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36
496 #define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37
497 #define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38
498 #define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b
499 #define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c
500 #define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e
501 #define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
502 #define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40
503 #define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
504 #define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
505 #define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
506 #define QUERY_DEV_CAP_PAGE_SZ_OFFSET 0x4b
507 #define QUERY_DEV_CAP_BF_OFFSET 0x4c
508 #define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET 0x4d
509 #define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET 0x4e
510 #define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET 0x4f
511 #define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET 0x51
512 #define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52
513 #define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55
514 #define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56
515 #define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61
516 #define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62
517 #define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63
518 #define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64
519 #define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65
520 #define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66
521 #define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67
522 #define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68
523 #define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET 0x70
524 #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
525 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
526 #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
527 #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
528 #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
529 #define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET 0x86
530 #define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET 0x88
531 #define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET 0x8a
532 #define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET 0x8c
533 #define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e
534 #define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90
535 #define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92
536 #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
537 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
538 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
539 #define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d
540 #define QUERY_DEV_CAP_VXLAN 0x9e
543 mailbox = mlx4_alloc_cmd_mailbox(dev);
545 return PTR_ERR(mailbox);
546 outbox = mailbox->buf;
548 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
549 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
553 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET);
554 dev_cap->reserved_qps = 1 << (field & 0xf);
555 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET);
556 dev_cap->max_qps = 1 << (field & 0x1f);
557 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_SRQ_OFFSET);
558 dev_cap->reserved_srqs = 1 << (field >> 4);
559 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_OFFSET);
560 dev_cap->max_srqs = 1 << (field & 0x1f);
561 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET);
562 dev_cap->max_cq_sz = 1 << field;
563 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_CQ_OFFSET);
564 dev_cap->reserved_cqs = 1 << (field & 0xf);
565 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_OFFSET);
566 dev_cap->max_cqs = 1 << (field & 0x1f);
567 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET);
568 dev_cap->max_mpts = 1 << (field & 0x3f);
569 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET);
570 dev_cap->reserved_eqs = field & 0xf;
571 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET);
572 dev_cap->max_eqs = 1 << (field & 0xf);
573 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET);
574 dev_cap->reserved_mtts = 1 << (field >> 4);
575 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET);
576 dev_cap->max_mrw_sz = 1 << field;
577 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET);
578 dev_cap->reserved_mrws = 1 << (field & 0xf);
579 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET);
580 dev_cap->max_mtt_seg = 1 << (field & 0x3f);
581 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET);
582 dev_cap->max_requester_per_qp = 1 << (field & 0x3f);
583 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET);
584 dev_cap->max_responder_per_qp = 1 << (field & 0x3f);
585 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET);
588 dev_cap->max_gso_sz = 0;
590 dev_cap->max_gso_sz = 1 << field;
592 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSS_OFFSET);
594 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_XOR;
596 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_TOP;
599 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS;
600 dev_cap->max_rss_tbl_sz = 1 << field;
602 dev_cap->max_rss_tbl_sz = 0;
603 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET);
604 dev_cap->max_rdma_global = 1 << (field & 0x3f);
605 MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET);
606 dev_cap->local_ca_ack_delay = field & 0x1f;
607 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
608 dev_cap->num_ports = field & 0xf;
609 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET);
610 dev_cap->max_msg_sz = 1 << (field & 0x1f);
611 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
613 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN;
614 dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f;
615 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET);
616 dev_cap->fs_max_num_qp_per_entry = field;
617 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
618 dev_cap->stat_rate_support = stat_rate;
619 MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
621 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_TS;
622 MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
623 MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
624 dev_cap->flags = flags | (u64)ext_flags << 32;
625 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
626 dev_cap->reserved_uars = field >> 4;
627 MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
628 dev_cap->uar_size = 1 << ((field & 0x3f) + 20);
629 MLX4_GET(field, outbox, QUERY_DEV_CAP_PAGE_SZ_OFFSET);
630 dev_cap->min_page_sz = 1 << field;
632 MLX4_GET(field, outbox, QUERY_DEV_CAP_BF_OFFSET);
634 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
635 dev_cap->bf_reg_size = 1 << (field & 0x1f);
636 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
637 if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size))
639 dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
640 mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
641 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
643 dev_cap->bf_reg_size = 0;
644 mlx4_dbg(dev, "BlueFlame not available\n");
647 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET);
648 dev_cap->max_sq_sg = field;
649 MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET);
650 dev_cap->max_sq_desc_sz = size;
652 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET);
653 dev_cap->max_qp_per_mcg = 1 << field;
654 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET);
655 dev_cap->reserved_mgms = field & 0xf;
656 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MCG_OFFSET);
657 dev_cap->max_mcgs = 1 << field;
658 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_PD_OFFSET);
659 dev_cap->reserved_pds = field >> 4;
660 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET);
661 dev_cap->max_pds = 1 << (field & 0x3f);
662 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_XRC_OFFSET);
663 dev_cap->reserved_xrcds = field >> 4;
664 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_XRC_OFFSET);
665 dev_cap->max_xrcds = 1 << (field & 0x1f);
667 MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET);
668 dev_cap->rdmarc_entry_sz = size;
669 MLX4_GET(size, outbox, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET);
670 dev_cap->qpc_entry_sz = size;
671 MLX4_GET(size, outbox, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET);
672 dev_cap->aux_entry_sz = size;
673 MLX4_GET(size, outbox, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET);
674 dev_cap->altc_entry_sz = size;
675 MLX4_GET(size, outbox, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET);
676 dev_cap->eqc_entry_sz = size;
677 MLX4_GET(size, outbox, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET);
678 dev_cap->cqc_entry_sz = size;
679 MLX4_GET(size, outbox, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET);
680 dev_cap->srq_entry_sz = size;
681 MLX4_GET(size, outbox, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET);
682 dev_cap->cmpt_entry_sz = size;
683 MLX4_GET(size, outbox, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET);
684 dev_cap->mtt_entry_sz = size;
685 MLX4_GET(size, outbox, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET);
686 dev_cap->dmpt_entry_sz = size;
688 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET);
689 dev_cap->max_srq_sz = 1 << field;
690 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET);
691 dev_cap->max_qp_sz = 1 << field;
692 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSZ_SRQ_OFFSET);
693 dev_cap->resize_srq = field & 1;
694 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET);
695 dev_cap->max_rq_sg = field;
696 MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET);
697 dev_cap->max_rq_desc_sz = size;
699 MLX4_GET(dev_cap->bmme_flags, outbox,
700 QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
701 MLX4_GET(dev_cap->reserved_lkey, outbox,
702 QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
703 MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
705 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN;
706 MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN);
708 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS;
709 MLX4_GET(dev_cap->max_icm_sz, outbox,
710 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
711 if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
712 MLX4_GET(dev_cap->max_counters, outbox,
713 QUERY_DEV_CAP_MAX_COUNTERS_OFFSET);
715 MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
716 if (field32 & (1 << 16))
717 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP;
718 if (field32 & (1 << 26))
719 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL;
720 if (field32 & (1 << 20))
721 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FSM;
723 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
724 for (i = 1; i <= dev_cap->num_ports; ++i) {
725 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
726 dev_cap->max_vl[i] = field >> 4;
727 MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET);
728 dev_cap->ib_mtu[i] = field >> 4;
729 dev_cap->max_port_width[i] = field & 0xf;
730 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET);
731 dev_cap->max_gids[i] = 1 << (field & 0xf);
732 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET);
733 dev_cap->max_pkeys[i] = 1 << (field & 0xf);
736 #define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00
737 #define QUERY_PORT_MTU_OFFSET 0x01
738 #define QUERY_PORT_ETH_MTU_OFFSET 0x02
739 #define QUERY_PORT_WIDTH_OFFSET 0x06
740 #define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07
741 #define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a
742 #define QUERY_PORT_MAX_VL_OFFSET 0x0b
743 #define QUERY_PORT_MAC_OFFSET 0x10
744 #define QUERY_PORT_TRANS_VENDOR_OFFSET 0x18
745 #define QUERY_PORT_WAVELENGTH_OFFSET 0x1c
746 #define QUERY_PORT_TRANS_CODE_OFFSET 0x20
748 for (i = 1; i <= dev_cap->num_ports; ++i) {
749 err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
750 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
754 MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET);
755 dev_cap->supported_port_types[i] = field & 3;
756 dev_cap->suggested_type[i] = (field >> 3) & 1;
757 dev_cap->default_sense[i] = (field >> 4) & 1;
758 MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
759 dev_cap->ib_mtu[i] = field & 0xf;
760 MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
761 dev_cap->max_port_width[i] = field & 0xf;
762 MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET);
763 dev_cap->max_gids[i] = 1 << (field >> 4);
764 dev_cap->max_pkeys[i] = 1 << (field & 0xf);
765 MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
766 dev_cap->max_vl[i] = field & 0xf;
767 MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET);
768 dev_cap->log_max_macs[i] = field & 0xf;
769 dev_cap->log_max_vlans[i] = field >> 4;
770 MLX4_GET(dev_cap->eth_mtu[i], outbox, QUERY_PORT_ETH_MTU_OFFSET);
771 MLX4_GET(dev_cap->def_mac[i], outbox, QUERY_PORT_MAC_OFFSET);
772 MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET);
773 dev_cap->trans_type[i] = field32 >> 24;
774 dev_cap->vendor_oui[i] = field32 & 0xffffff;
775 MLX4_GET(dev_cap->wavelength[i], outbox, QUERY_PORT_WAVELENGTH_OFFSET);
776 MLX4_GET(dev_cap->trans_code[i], outbox, QUERY_PORT_TRANS_CODE_OFFSET);
780 mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
781 dev_cap->bmme_flags, dev_cap->reserved_lkey);
784 * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
785 * we can't use any EQs whose doorbell falls on that page,
786 * even if the EQ itself isn't reserved.
788 dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4,
789 dev_cap->reserved_eqs);
791 mlx4_dbg(dev, "Max ICM size %lld MB\n",
792 (unsigned long long) dev_cap->max_icm_sz >> 20);
793 mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
794 dev_cap->max_qps, dev_cap->reserved_qps, dev_cap->qpc_entry_sz);
795 mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
796 dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz);
797 mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
798 dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz);
799 mlx4_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n",
800 dev_cap->max_eqs, dev_cap->reserved_eqs, dev_cap->eqc_entry_sz);
801 mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n",
802 dev_cap->reserved_mrws, dev_cap->reserved_mtts);
803 mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
804 dev_cap->max_pds, dev_cap->reserved_pds, dev_cap->reserved_uars);
805 mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n",
806 dev_cap->max_pds, dev_cap->reserved_mgms);
807 mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
808 dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz);
809 mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
810 dev_cap->local_ca_ack_delay, 128 << dev_cap->ib_mtu[1],
811 dev_cap->max_port_width[1]);
812 mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n",
813 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
814 mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
815 dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg);
816 mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz);
817 mlx4_dbg(dev, "Max counters: %d\n", dev_cap->max_counters);
818 mlx4_dbg(dev, "Max RSS Table size: %d\n", dev_cap->max_rss_tbl_sz);
820 dump_dev_cap_flags(dev, dev_cap->flags);
821 dump_dev_cap_flags2(dev, dev_cap->flags2);
824 mlx4_free_cmd_mailbox(dev, mailbox);
828 int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
829 struct mlx4_vhcr *vhcr,
830 struct mlx4_cmd_mailbox *inbox,
831 struct mlx4_cmd_mailbox *outbox,
832 struct mlx4_cmd_info *cmd)
839 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
840 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
844 /* add port mng change event capability and disable mw type 1
845 * unconditionally to slaves
847 MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
848 flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV;
849 flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW;
850 MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
852 /* For guests, disable timestamp */
853 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
855 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
857 /* For guests, disable vxlan tunneling */
858 MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN);
860 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN);
862 /* For guests, report Blueflame disabled */
863 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
865 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
867 /* For guests, disable mw type 2 */
868 MLX4_GET(bmme_flags, outbox, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
869 bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN;
870 MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
872 /* turn off device-managed steering capability if not enabled */
873 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) {
874 MLX4_GET(field, outbox->buf,
875 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
877 MLX4_PUT(outbox->buf, field,
878 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
883 int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
884 struct mlx4_vhcr *vhcr,
885 struct mlx4_cmd_mailbox *inbox,
886 struct mlx4_cmd_mailbox *outbox,
887 struct mlx4_cmd_info *cmd)
889 struct mlx4_priv *priv = mlx4_priv(dev);
894 int admin_link_state;
896 #define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0
897 #define MLX4_PORT_LINK_UP_MASK 0x80
898 #define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c
899 #define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e
901 err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0,
902 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
905 if (!err && dev->caps.function != slave) {
906 def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
907 MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
909 /* get port type - currently only eth is enabled */
910 MLX4_GET(port_type, outbox->buf,
911 QUERY_PORT_SUPPORTED_TYPE_OFFSET);
913 /* No link sensing allowed */
914 port_type &= MLX4_VF_PORT_NO_LINK_SENSE_MASK;
915 /* set port type to currently operating port type */
916 port_type |= (dev->caps.port_type[vhcr->in_modifier] & 0x3);
918 admin_link_state = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.link_state;
919 if (IFLA_VF_LINK_STATE_ENABLE == admin_link_state)
920 port_type |= MLX4_PORT_LINK_UP_MASK;
921 else if (IFLA_VF_LINK_STATE_DISABLE == admin_link_state)
922 port_type &= ~MLX4_PORT_LINK_UP_MASK;
924 MLX4_PUT(outbox->buf, port_type,
925 QUERY_PORT_SUPPORTED_TYPE_OFFSET);
927 short_field = 1; /* slave max gids */
928 MLX4_PUT(outbox->buf, short_field,
929 QUERY_PORT_CUR_MAX_GID_OFFSET);
931 short_field = dev->caps.pkey_table_len[vhcr->in_modifier];
932 MLX4_PUT(outbox->buf, short_field,
933 QUERY_PORT_CUR_MAX_PKEY_OFFSET);
939 int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port,
940 int *gid_tbl_len, int *pkey_tbl_len)
942 struct mlx4_cmd_mailbox *mailbox;
947 mailbox = mlx4_alloc_cmd_mailbox(dev);
949 return PTR_ERR(mailbox);
951 err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0,
952 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
957 outbox = mailbox->buf;
959 MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_GID_OFFSET);
960 *gid_tbl_len = field;
962 MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_PKEY_OFFSET);
963 *pkey_tbl_len = field;
966 mlx4_free_cmd_mailbox(dev, mailbox);
969 EXPORT_SYMBOL(mlx4_get_slave_pkey_gid_tbl_len);
971 int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
973 struct mlx4_cmd_mailbox *mailbox;
974 struct mlx4_icm_iter iter;
982 mailbox = mlx4_alloc_cmd_mailbox(dev);
984 return PTR_ERR(mailbox);
985 pages = mailbox->buf;
987 for (mlx4_icm_first(icm, &iter);
988 !mlx4_icm_last(&iter);
989 mlx4_icm_next(&iter)) {
991 * We have to pass pages that are aligned to their
992 * size, so find the least significant 1 in the
993 * address or size and use that as our log2 size.
995 lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1;
996 if (lg < MLX4_ICM_PAGE_SHIFT) {
997 mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n",
999 (unsigned long long) mlx4_icm_addr(&iter),
1000 mlx4_icm_size(&iter));
1005 for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) {
1007 pages[nent * 2] = cpu_to_be64(virt);
1011 pages[nent * 2 + 1] =
1012 cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) |
1013 (lg - MLX4_ICM_PAGE_SHIFT));
1014 ts += 1 << (lg - 10);
1017 if (++nent == MLX4_MAILBOX_SIZE / 16) {
1018 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
1019 MLX4_CMD_TIME_CLASS_B,
1029 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
1030 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1035 case MLX4_CMD_MAP_FA:
1036 mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts);
1038 case MLX4_CMD_MAP_ICM_AUX:
1039 mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts);
1041 case MLX4_CMD_MAP_ICM:
1042 mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n",
1043 tc, ts, (unsigned long long) virt - (ts << 10));
1048 mlx4_free_cmd_mailbox(dev, mailbox);
1052 int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm)
1054 return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1);
1057 int mlx4_UNMAP_FA(struct mlx4_dev *dev)
1059 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA,
1060 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1064 int mlx4_RUN_FW(struct mlx4_dev *dev)
1066 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW,
1067 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1070 int mlx4_QUERY_FW(struct mlx4_dev *dev)
1072 struct mlx4_fw *fw = &mlx4_priv(dev)->fw;
1073 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
1074 struct mlx4_cmd_mailbox *mailbox;
1081 #define QUERY_FW_OUT_SIZE 0x100
1082 #define QUERY_FW_VER_OFFSET 0x00
1083 #define QUERY_FW_PPF_ID 0x09
1084 #define QUERY_FW_CMD_IF_REV_OFFSET 0x0a
1085 #define QUERY_FW_MAX_CMD_OFFSET 0x0f
1086 #define QUERY_FW_ERR_START_OFFSET 0x30
1087 #define QUERY_FW_ERR_SIZE_OFFSET 0x38
1088 #define QUERY_FW_ERR_BAR_OFFSET 0x3c
1090 #define QUERY_FW_SIZE_OFFSET 0x00
1091 #define QUERY_FW_CLR_INT_BASE_OFFSET 0x20
1092 #define QUERY_FW_CLR_INT_BAR_OFFSET 0x28
1094 #define QUERY_FW_COMM_BASE_OFFSET 0x40
1095 #define QUERY_FW_COMM_BAR_OFFSET 0x48
1097 #define QUERY_FW_CLOCK_OFFSET 0x50
1098 #define QUERY_FW_CLOCK_BAR 0x58
1100 mailbox = mlx4_alloc_cmd_mailbox(dev);
1101 if (IS_ERR(mailbox))
1102 return PTR_ERR(mailbox);
1103 outbox = mailbox->buf;
1105 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
1106 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1110 MLX4_GET(fw_ver, outbox, QUERY_FW_VER_OFFSET);
1112 * FW subminor version is at more significant bits than minor
1113 * version, so swap here.
1115 dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) |
1116 ((fw_ver & 0xffff0000ull) >> 16) |
1117 ((fw_ver & 0x0000ffffull) << 16);
1119 MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
1120 dev->caps.function = lg;
1122 if (mlx4_is_slave(dev))
1126 MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
1127 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
1128 cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
1129 mlx4_err(dev, "Installed FW has unsupported "
1130 "command interface revision %d.\n",
1132 mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n",
1133 (int) (dev->caps.fw_ver >> 32),
1134 (int) (dev->caps.fw_ver >> 16) & 0xffff,
1135 (int) dev->caps.fw_ver & 0xffff);
1136 mlx4_err(dev, "This driver version supports only revisions %d to %d.\n",
1137 MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV);
1142 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS)
1143 dev->flags |= MLX4_FLAG_OLD_PORT_CMDS;
1145 MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
1146 cmd->max_cmds = 1 << lg;
1148 mlx4_dbg(dev, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n",
1149 (int) (dev->caps.fw_ver >> 32),
1150 (int) (dev->caps.fw_ver >> 16) & 0xffff,
1151 (int) dev->caps.fw_ver & 0xffff,
1152 cmd_if_rev, cmd->max_cmds);
1154 MLX4_GET(fw->catas_offset, outbox, QUERY_FW_ERR_START_OFFSET);
1155 MLX4_GET(fw->catas_size, outbox, QUERY_FW_ERR_SIZE_OFFSET);
1156 MLX4_GET(fw->catas_bar, outbox, QUERY_FW_ERR_BAR_OFFSET);
1157 fw->catas_bar = (fw->catas_bar >> 6) * 2;
1159 mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n",
1160 (unsigned long long) fw->catas_offset, fw->catas_size, fw->catas_bar);
1162 MLX4_GET(fw->fw_pages, outbox, QUERY_FW_SIZE_OFFSET);
1163 MLX4_GET(fw->clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET);
1164 MLX4_GET(fw->clr_int_bar, outbox, QUERY_FW_CLR_INT_BAR_OFFSET);
1165 fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2;
1167 MLX4_GET(fw->comm_base, outbox, QUERY_FW_COMM_BASE_OFFSET);
1168 MLX4_GET(fw->comm_bar, outbox, QUERY_FW_COMM_BAR_OFFSET);
1169 fw->comm_bar = (fw->comm_bar >> 6) * 2;
1170 mlx4_dbg(dev, "Communication vector bar:%d offset:0x%llx\n",
1171 fw->comm_bar, fw->comm_base);
1172 mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2);
1174 MLX4_GET(fw->clock_offset, outbox, QUERY_FW_CLOCK_OFFSET);
1175 MLX4_GET(fw->clock_bar, outbox, QUERY_FW_CLOCK_BAR);
1176 fw->clock_bar = (fw->clock_bar >> 6) * 2;
1177 mlx4_dbg(dev, "Internal clock bar:%d offset:0x%llx\n",
1178 fw->clock_bar, fw->clock_offset);
1181 * Round up number of system pages needed in case
1182 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
1185 ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
1186 (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
1188 mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n",
1189 (unsigned long long) fw->clr_int_base, fw->clr_int_bar);
1192 mlx4_free_cmd_mailbox(dev, mailbox);
1196 int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
1197 struct mlx4_vhcr *vhcr,
1198 struct mlx4_cmd_mailbox *inbox,
1199 struct mlx4_cmd_mailbox *outbox,
1200 struct mlx4_cmd_info *cmd)
1205 outbuf = outbox->buf;
1206 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
1207 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1211 /* for slaves, set pci PPF ID to invalid and zero out everything
1212 * else except FW version */
1213 outbuf[0] = outbuf[1] = 0;
1214 memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8);
1215 outbuf[QUERY_FW_PPF_ID] = MLX4_INVALID_SLAVE_ID;
1220 static void get_board_id(void *vsd, char *board_id)
1224 #define VSD_OFFSET_SIG1 0x00
1225 #define VSD_OFFSET_SIG2 0xde
1226 #define VSD_OFFSET_MLX_BOARD_ID 0xd0
1227 #define VSD_OFFSET_TS_BOARD_ID 0x20
1229 #define VSD_SIGNATURE_TOPSPIN 0x5ad
1231 memset(board_id, 0, MLX4_BOARD_ID_LEN);
1233 if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
1234 be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
1235 strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN);
1238 * The board ID is a string but the firmware byte
1239 * swaps each 4-byte word before passing it back to
1240 * us. Therefore we need to swab it before printing.
1242 for (i = 0; i < 4; ++i)
1243 ((u32 *) board_id)[i] =
1244 swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4));
1248 int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
1250 struct mlx4_cmd_mailbox *mailbox;
1254 #define QUERY_ADAPTER_OUT_SIZE 0x100
1255 #define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10
1256 #define QUERY_ADAPTER_VSD_OFFSET 0x20
1258 mailbox = mlx4_alloc_cmd_mailbox(dev);
1259 if (IS_ERR(mailbox))
1260 return PTR_ERR(mailbox);
1261 outbox = mailbox->buf;
1263 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER,
1264 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1268 MLX4_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET);
1270 get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
1274 mlx4_free_cmd_mailbox(dev, mailbox);
1278 int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1280 struct mlx4_cmd_mailbox *mailbox;
1284 #define INIT_HCA_IN_SIZE 0x200
1285 #define INIT_HCA_VERSION_OFFSET 0x000
1286 #define INIT_HCA_VERSION 2
1287 #define INIT_HCA_VXLAN_OFFSET 0x0c
1288 #define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e
1289 #define INIT_HCA_FLAGS_OFFSET 0x014
1290 #define INIT_HCA_QPC_OFFSET 0x020
1291 #define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10)
1292 #define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17)
1293 #define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28)
1294 #define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f)
1295 #define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30)
1296 #define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37)
1297 #define INIT_HCA_EQE_CQE_OFFSETS (INIT_HCA_QPC_OFFSET + 0x38)
1298 #define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40)
1299 #define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50)
1300 #define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60)
1301 #define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67)
1302 #define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70)
1303 #define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77)
1304 #define INIT_HCA_MCAST_OFFSET 0x0c0
1305 #define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00)
1306 #define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
1307 #define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
1308 #define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18)
1309 #define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
1310 #define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6
1311 #define INIT_HCA_FS_PARAM_OFFSET 0x1d0
1312 #define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00)
1313 #define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12)
1314 #define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b)
1315 #define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21)
1316 #define INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22)
1317 #define INIT_HCA_FS_IB_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x25)
1318 #define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26)
1319 #define INIT_HCA_TPT_OFFSET 0x0f0
1320 #define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
1321 #define INIT_HCA_TPT_MW_OFFSET (INIT_HCA_TPT_OFFSET + 0x08)
1322 #define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b)
1323 #define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10)
1324 #define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18)
1325 #define INIT_HCA_UAR_OFFSET 0x120
1326 #define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a)
1327 #define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b)
1329 mailbox = mlx4_alloc_cmd_mailbox(dev);
1330 if (IS_ERR(mailbox))
1331 return PTR_ERR(mailbox);
1332 inbox = mailbox->buf;
1334 *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
1336 *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) =
1337 (ilog2(cache_line_size()) - 4) << 5;
1339 #if defined(__LITTLE_ENDIAN)
1340 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
1341 #elif defined(__BIG_ENDIAN)
1342 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1);
1344 #error Host endianness not defined
1346 /* Check port for UD address vector: */
1347 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1);
1349 /* Enable IPoIB checksumming if we can: */
1350 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
1351 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3);
1353 /* Enable QoS support if module parameter set */
1355 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2);
1357 /* enable counters */
1358 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)
1359 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4);
1361 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
1362 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_EQE) {
1363 *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 29);
1364 dev->caps.eqe_size = 64;
1365 dev->caps.eqe_factor = 1;
1367 dev->caps.eqe_size = 32;
1368 dev->caps.eqe_factor = 0;
1371 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_CQE) {
1372 *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 30);
1373 dev->caps.cqe_size = 64;
1374 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_64B_CQE;
1376 dev->caps.cqe_size = 32;
1379 /* QPC/EEC/CQC/EQC/RDMARC attributes */
1381 MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
1382 MLX4_PUT(inbox, param->log_num_qps, INIT_HCA_LOG_QP_OFFSET);
1383 MLX4_PUT(inbox, param->srqc_base, INIT_HCA_SRQC_BASE_OFFSET);
1384 MLX4_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET);
1385 MLX4_PUT(inbox, param->cqc_base, INIT_HCA_CQC_BASE_OFFSET);
1386 MLX4_PUT(inbox, param->log_num_cqs, INIT_HCA_LOG_CQ_OFFSET);
1387 MLX4_PUT(inbox, param->altc_base, INIT_HCA_ALTC_BASE_OFFSET);
1388 MLX4_PUT(inbox, param->auxc_base, INIT_HCA_AUXC_BASE_OFFSET);
1389 MLX4_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET);
1390 MLX4_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET);
1391 MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET);
1392 MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET);
1394 /* steering attributes */
1395 if (dev->caps.steering_mode ==
1396 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1397 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |=
1399 INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN);
1401 MLX4_PUT(inbox, param->mc_base, INIT_HCA_FS_BASE_OFFSET);
1402 MLX4_PUT(inbox, param->log_mc_entry_sz,
1403 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
1404 MLX4_PUT(inbox, param->log_mc_table_sz,
1405 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
1406 /* Enable Ethernet flow steering
1407 * with udp unicast and tcp unicast
1409 MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
1410 INIT_HCA_FS_ETH_BITS_OFFSET);
1411 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
1412 INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET);
1413 /* Enable IPoIB flow steering
1414 * with udp unicast and tcp unicast
1416 MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
1417 INIT_HCA_FS_IB_BITS_OFFSET);
1418 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
1419 INIT_HCA_FS_IB_NUM_ADDRS_OFFSET);
1421 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
1422 MLX4_PUT(inbox, param->log_mc_entry_sz,
1423 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
1424 MLX4_PUT(inbox, param->log_mc_hash_sz,
1425 INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
1426 MLX4_PUT(inbox, param->log_mc_table_sz,
1427 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
1428 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0)
1429 MLX4_PUT(inbox, (u8) (1 << 3),
1430 INIT_HCA_UC_STEERING_OFFSET);
1433 /* TPT attributes */
1435 MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET);
1436 MLX4_PUT(inbox, param->mw_enabled, INIT_HCA_TPT_MW_OFFSET);
1437 MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
1438 MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET);
1439 MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET);
1441 /* UAR attributes */
1443 MLX4_PUT(inbox, param->uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET);
1444 MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET);
1446 /* set parser VXLAN attributes */
1447 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) {
1448 u8 parser_params = 0;
1449 MLX4_PUT(inbox, parser_params, INIT_HCA_VXLAN_OFFSET);
1452 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000,
1456 mlx4_err(dev, "INIT_HCA returns %d\n", err);
1458 mlx4_free_cmd_mailbox(dev, mailbox);
1462 int mlx4_QUERY_HCA(struct mlx4_dev *dev,
1463 struct mlx4_init_hca_param *param)
1465 struct mlx4_cmd_mailbox *mailbox;
1471 #define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04
1472 #define QUERY_HCA_CORE_CLOCK_OFFSET 0x0c
1474 mailbox = mlx4_alloc_cmd_mailbox(dev);
1475 if (IS_ERR(mailbox))
1476 return PTR_ERR(mailbox);
1477 outbox = mailbox->buf;
1479 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
1481 MLX4_CMD_TIME_CLASS_B,
1482 !mlx4_is_slave(dev));
1486 MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET);
1487 MLX4_GET(param->hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET);
1489 /* QPC/EEC/CQC/EQC/RDMARC attributes */
1491 MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET);
1492 MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET);
1493 MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET);
1494 MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET);
1495 MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET);
1496 MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET);
1497 MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET);
1498 MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET);
1499 MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET);
1500 MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET);
1501 MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
1502 MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
1504 MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
1505 if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
1506 param->steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
1508 MLX4_GET(byte_field, outbox, INIT_HCA_UC_STEERING_OFFSET);
1509 if (byte_field & 0x8)
1510 param->steering_mode = MLX4_STEERING_MODE_B0;
1512 param->steering_mode = MLX4_STEERING_MODE_A0;
1514 /* steering attributes */
1515 if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
1516 MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
1517 MLX4_GET(param->log_mc_entry_sz, outbox,
1518 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
1519 MLX4_GET(param->log_mc_table_sz, outbox,
1520 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
1522 MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
1523 MLX4_GET(param->log_mc_entry_sz, outbox,
1524 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
1525 MLX4_GET(param->log_mc_hash_sz, outbox,
1526 INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
1527 MLX4_GET(param->log_mc_table_sz, outbox,
1528 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
1531 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
1532 MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_OFFSETS);
1533 if (byte_field & 0x20) /* 64-bytes eqe enabled */
1534 param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED;
1535 if (byte_field & 0x40) /* 64-bytes cqe enabled */
1536 param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED;
1538 /* TPT attributes */
1540 MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET);
1541 MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET);
1542 MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
1543 MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET);
1544 MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET);
1546 /* UAR attributes */
1548 MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
1549 MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
1552 mlx4_free_cmd_mailbox(dev, mailbox);
1557 /* for IB-type ports only in SRIOV mode. Checks that both proxy QP0
1558 * and real QP0 are active, so that the paravirtualized QP0 is ready
1560 static int check_qp0_state(struct mlx4_dev *dev, int function, int port)
1562 struct mlx4_priv *priv = mlx4_priv(dev);
1563 /* irrelevant if not infiniband */
1564 if (priv->mfunc.master.qp0_state[port].proxy_qp0_active &&
1565 priv->mfunc.master.qp0_state[port].qp0_active)
1570 int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
1571 struct mlx4_vhcr *vhcr,
1572 struct mlx4_cmd_mailbox *inbox,
1573 struct mlx4_cmd_mailbox *outbox,
1574 struct mlx4_cmd_info *cmd)
1576 struct mlx4_priv *priv = mlx4_priv(dev);
1577 int port = vhcr->in_modifier;
1580 if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port))
1583 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
1584 /* Enable port only if it was previously disabled */
1585 if (!priv->mfunc.master.init_port_ref[port]) {
1586 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
1587 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1591 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
1593 if (slave == mlx4_master_func_num(dev)) {
1594 if (check_qp0_state(dev, slave, port) &&
1595 !priv->mfunc.master.qp0_state[port].port_active) {
1596 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
1597 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1600 priv->mfunc.master.qp0_state[port].port_active = 1;
1601 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
1604 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
1606 ++priv->mfunc.master.init_port_ref[port];
1610 int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
1612 struct mlx4_cmd_mailbox *mailbox;
1618 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
1619 #define INIT_PORT_IN_SIZE 256
1620 #define INIT_PORT_FLAGS_OFFSET 0x00
1621 #define INIT_PORT_FLAG_SIG (1 << 18)
1622 #define INIT_PORT_FLAG_NG (1 << 17)
1623 #define INIT_PORT_FLAG_G0 (1 << 16)
1624 #define INIT_PORT_VL_SHIFT 4
1625 #define INIT_PORT_PORT_WIDTH_SHIFT 8
1626 #define INIT_PORT_MTU_OFFSET 0x04
1627 #define INIT_PORT_MAX_GID_OFFSET 0x06
1628 #define INIT_PORT_MAX_PKEY_OFFSET 0x0a
1629 #define INIT_PORT_GUID0_OFFSET 0x10
1630 #define INIT_PORT_NODE_GUID_OFFSET 0x18
1631 #define INIT_PORT_SI_GUID_OFFSET 0x20
1633 mailbox = mlx4_alloc_cmd_mailbox(dev);
1634 if (IS_ERR(mailbox))
1635 return PTR_ERR(mailbox);
1636 inbox = mailbox->buf;
1639 flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT;
1640 flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
1641 MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET);
1643 field = 128 << dev->caps.ib_mtu_cap[port];
1644 MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET);
1645 field = dev->caps.gid_table_len[port];
1646 MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET);
1647 field = dev->caps.pkey_table_len[port];
1648 MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET);
1650 err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT,
1651 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1653 mlx4_free_cmd_mailbox(dev, mailbox);
1655 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
1656 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
1660 EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
1662 int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
1663 struct mlx4_vhcr *vhcr,
1664 struct mlx4_cmd_mailbox *inbox,
1665 struct mlx4_cmd_mailbox *outbox,
1666 struct mlx4_cmd_info *cmd)
1668 struct mlx4_priv *priv = mlx4_priv(dev);
1669 int port = vhcr->in_modifier;
1672 if (!(priv->mfunc.master.slave_state[slave].init_port_mask &
1676 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
1677 if (priv->mfunc.master.init_port_ref[port] == 1) {
1678 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
1679 1000, MLX4_CMD_NATIVE);
1683 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
1685 /* infiniband port */
1686 if (slave == mlx4_master_func_num(dev)) {
1687 if (!priv->mfunc.master.qp0_state[port].qp0_active &&
1688 priv->mfunc.master.qp0_state[port].port_active) {
1689 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
1690 1000, MLX4_CMD_NATIVE);
1693 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
1694 priv->mfunc.master.qp0_state[port].port_active = 0;
1697 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
1699 --priv->mfunc.master.init_port_ref[port];
1703 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
1705 return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000,
1708 EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
1710 int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
1712 return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000,
1716 int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
1718 int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0,
1719 MLX4_CMD_SET_ICM_SIZE,
1720 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1725 * Round up number of system pages needed in case
1726 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
1728 *aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
1729 (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
1734 int mlx4_NOP(struct mlx4_dev *dev)
1736 /* Input modifier of 0x1f means "finish as soon as possible." */
1737 return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100, MLX4_CMD_NATIVE);
1740 int mlx4_get_phys_port_id(struct mlx4_dev *dev)
1744 struct mlx4_cmd_mailbox *mailbox;
1746 u32 guid_hi, guid_lo;
1748 #define MOD_STAT_CFG_PORT_OFFSET 8
1749 #define MOD_STAT_CFG_GUID_H 0X14
1750 #define MOD_STAT_CFG_GUID_L 0X1c
1752 mailbox = mlx4_alloc_cmd_mailbox(dev);
1753 if (IS_ERR(mailbox))
1754 return PTR_ERR(mailbox);
1755 outbox = mailbox->buf;
1757 for (port = 1; port <= dev->caps.num_ports; port++) {
1758 in_mod = port << MOD_STAT_CFG_PORT_OFFSET;
1759 err = mlx4_cmd_box(dev, 0, mailbox->dma, in_mod, 0x2,
1760 MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
1763 mlx4_err(dev, "Fail to get port %d uplink guid\n",
1767 MLX4_GET(guid_hi, outbox, MOD_STAT_CFG_GUID_H);
1768 MLX4_GET(guid_lo, outbox, MOD_STAT_CFG_GUID_L);
1769 dev->caps.phys_port_id[port] = (u64)guid_lo |
1773 mlx4_free_cmd_mailbox(dev, mailbox);
1777 #define MLX4_WOL_SETUP_MODE (5 << 28)
1778 int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
1780 u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
1782 return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3,
1783 MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
1786 EXPORT_SYMBOL_GPL(mlx4_wol_read);
1788 int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
1790 u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
1792 return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG,
1793 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1795 EXPORT_SYMBOL_GPL(mlx4_wol_write);
1802 void mlx4_opreq_action(struct work_struct *work)
1804 struct mlx4_priv *priv = container_of(work, struct mlx4_priv,
1806 struct mlx4_dev *dev = &priv->dev;
1807 int num_tasks = atomic_read(&priv->opreq_count);
1808 struct mlx4_cmd_mailbox *mailbox;
1809 struct mlx4_mgm *mgm;
1821 #define GET_OP_REQ_MODIFIER_OFFSET 0x08
1822 #define GET_OP_REQ_TOKEN_OFFSET 0x14
1823 #define GET_OP_REQ_TYPE_OFFSET 0x1a
1824 #define GET_OP_REQ_DATA_OFFSET 0x20
1826 mailbox = mlx4_alloc_cmd_mailbox(dev);
1827 if (IS_ERR(mailbox)) {
1828 mlx4_err(dev, "Failed to allocate mailbox for GET_OP_REQ\n");
1831 outbox = mailbox->buf;
1834 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
1835 MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
1838 mlx4_err(dev, "Failed to retrieve required operation: %d\n",
1842 MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET);
1843 MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
1844 MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET);
1849 if (dev->caps.steering_mode ==
1850 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1851 mlx4_warn(dev, "ADD MCG operation is not supported in DEVICE_MANAGED steering mode\n");
1855 mgm = (struct mlx4_mgm *)((u8 *)(outbox) +
1856 GET_OP_REQ_DATA_OFFSET);
1857 num_qps = be32_to_cpu(mgm->members_count) &
1859 rem_mcg = ((u8 *)(&mgm->members_count))[0] & 1;
1860 prot = ((u8 *)(&mgm->members_count))[0] >> 6;
1862 for (i = 0; i < num_qps; i++) {
1863 qp.qpn = be32_to_cpu(mgm->qp[i]);
1865 err = mlx4_multicast_detach(dev, &qp,
1869 err = mlx4_multicast_attach(dev, &qp,
1879 mlx4_warn(dev, "Bad type for required operation\n");
1883 err = mlx4_cmd(dev, 0, ((u32) err | cpu_to_be32(token) << 16),
1884 1, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
1887 mlx4_err(dev, "Failed to acknowledge required request: %d\n",
1891 memset(outbox, 0, 0xffc);
1892 num_tasks = atomic_dec_return(&priv->opreq_count);
1896 mlx4_free_cmd_mailbox(dev, mailbox);