2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <asm-generic/kmap_types.h>
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/errno.h>
37 #include <linux/pci.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/slab.h>
40 #include <linux/delay.h>
41 #include <linux/random.h>
42 #include <linux/io-mapping.h>
43 #include <linux/mlx5/driver.h>
44 #include <linux/debugfs.h>
46 #include "mlx5_core.h"
60 LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 +
61 MLX5_CMD_DATA_BLOCK_SIZE,
62 MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE,
66 MLX5_CMD_DELIVERY_STAT_OK = 0x0,
67 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1,
68 MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2,
69 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3,
70 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4,
71 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5,
72 MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6,
73 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7,
74 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8,
75 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9,
76 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10,
80 MLX5_CMD_STAT_OK = 0x0,
81 MLX5_CMD_STAT_INT_ERR = 0x1,
82 MLX5_CMD_STAT_BAD_OP_ERR = 0x2,
83 MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3,
84 MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4,
85 MLX5_CMD_STAT_BAD_RES_ERR = 0x5,
86 MLX5_CMD_STAT_RES_BUSY = 0x6,
87 MLX5_CMD_STAT_LIM_ERR = 0x8,
88 MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9,
89 MLX5_CMD_STAT_IX_ERR = 0xa,
90 MLX5_CMD_STAT_NO_RES_ERR = 0xf,
91 MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50,
92 MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51,
93 MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10,
94 MLX5_CMD_STAT_BAD_PKT_ERR = 0x30,
95 MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40,
98 static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
99 struct mlx5_cmd_msg *in,
100 struct mlx5_cmd_msg *out,
101 void *uout, int uout_size,
103 void *context, int page_queue)
105 gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL;
106 struct mlx5_cmd_work_ent *ent;
108 ent = kzalloc(sizeof(*ent), alloc_flags);
110 return ERR_PTR(-ENOMEM);
115 ent->uout_size = uout_size;
117 ent->context = context;
119 ent->page_queue = page_queue;
124 static u8 alloc_token(struct mlx5_cmd *cmd)
128 spin_lock(&cmd->token_lock);
129 token = cmd->token++ % 255 + 1;
130 spin_unlock(&cmd->token_lock);
135 static int alloc_ent(struct mlx5_cmd *cmd)
140 spin_lock_irqsave(&cmd->alloc_lock, flags);
141 ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
142 if (ret < cmd->max_reg_cmds)
143 clear_bit(ret, &cmd->bitmask);
144 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
146 return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
149 static void free_ent(struct mlx5_cmd *cmd, int idx)
153 spin_lock_irqsave(&cmd->alloc_lock, flags);
154 set_bit(idx, &cmd->bitmask);
155 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
158 static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
160 return cmd->cmd_buf + (idx << cmd->log_stride);
163 static u8 xor8_buf(void *buf, int len)
169 for (i = 0; i < len; i++)
175 static int verify_block_sig(struct mlx5_cmd_prot_block *block)
177 if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff)
180 if (xor8_buf(block, sizeof(*block)) != 0xff)
186 static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token,
189 block->token = token;
191 block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) -
192 sizeof(block->data) - 2);
193 block->sig = ~xor8_buf(block, sizeof(*block) - 1);
197 static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum)
199 struct mlx5_cmd_mailbox *next = msg->next;
202 calc_block_sig(next->buf, token, csum);
207 static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
209 ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
210 calc_chain_sig(ent->in, ent->token, csum);
211 calc_chain_sig(ent->out, ent->token, csum);
214 static void poll_timeout(struct mlx5_cmd_work_ent *ent)
216 unsigned long poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000);
220 own = ent->lay->status_own;
221 if (!(own & CMD_OWNER_HW)) {
225 usleep_range(5000, 10000);
226 } while (time_before(jiffies, poll_end));
228 ent->ret = -ETIMEDOUT;
231 static void free_cmd(struct mlx5_cmd_work_ent *ent)
237 static int verify_signature(struct mlx5_cmd_work_ent *ent)
239 struct mlx5_cmd_mailbox *next = ent->out->next;
243 sig = xor8_buf(ent->lay, sizeof(*ent->lay));
248 err = verify_block_sig(next->buf);
258 static void dump_buf(void *buf, int size, int data_only, int offset)
263 for (i = 0; i < size; i += 16) {
264 pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]),
265 be32_to_cpu(p[1]), be32_to_cpu(p[2]),
274 const char *mlx5_command_str(int command)
277 case MLX5_CMD_OP_QUERY_HCA_CAP:
278 return "QUERY_HCA_CAP";
280 case MLX5_CMD_OP_SET_HCA_CAP:
281 return "SET_HCA_CAP";
283 case MLX5_CMD_OP_QUERY_ADAPTER:
284 return "QUERY_ADAPTER";
286 case MLX5_CMD_OP_INIT_HCA:
289 case MLX5_CMD_OP_TEARDOWN_HCA:
290 return "TEARDOWN_HCA";
292 case MLX5_CMD_OP_ENABLE_HCA:
293 return "MLX5_CMD_OP_ENABLE_HCA";
295 case MLX5_CMD_OP_DISABLE_HCA:
296 return "MLX5_CMD_OP_DISABLE_HCA";
298 case MLX5_CMD_OP_QUERY_PAGES:
299 return "QUERY_PAGES";
301 case MLX5_CMD_OP_MANAGE_PAGES:
302 return "MANAGE_PAGES";
304 case MLX5_CMD_OP_CREATE_MKEY:
305 return "CREATE_MKEY";
307 case MLX5_CMD_OP_QUERY_MKEY:
310 case MLX5_CMD_OP_DESTROY_MKEY:
311 return "DESTROY_MKEY";
313 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
314 return "QUERY_SPECIAL_CONTEXTS";
316 case MLX5_CMD_OP_CREATE_EQ:
319 case MLX5_CMD_OP_DESTROY_EQ:
322 case MLX5_CMD_OP_QUERY_EQ:
325 case MLX5_CMD_OP_CREATE_CQ:
328 case MLX5_CMD_OP_DESTROY_CQ:
331 case MLX5_CMD_OP_QUERY_CQ:
334 case MLX5_CMD_OP_MODIFY_CQ:
337 case MLX5_CMD_OP_CREATE_QP:
340 case MLX5_CMD_OP_DESTROY_QP:
343 case MLX5_CMD_OP_RST2INIT_QP:
344 return "RST2INIT_QP";
346 case MLX5_CMD_OP_INIT2RTR_QP:
347 return "INIT2RTR_QP";
349 case MLX5_CMD_OP_RTR2RTS_QP:
352 case MLX5_CMD_OP_RTS2RTS_QP:
355 case MLX5_CMD_OP_SQERR2RTS_QP:
356 return "SQERR2RTS_QP";
358 case MLX5_CMD_OP_2ERR_QP:
361 case MLX5_CMD_OP_RTS2SQD_QP:
364 case MLX5_CMD_OP_SQD2RTS_QP:
367 case MLX5_CMD_OP_2RST_QP:
370 case MLX5_CMD_OP_QUERY_QP:
373 case MLX5_CMD_OP_CONF_SQP:
376 case MLX5_CMD_OP_MAD_IFC:
379 case MLX5_CMD_OP_INIT2INIT_QP:
380 return "INIT2INIT_QP";
382 case MLX5_CMD_OP_SUSPEND_QP:
385 case MLX5_CMD_OP_UNSUSPEND_QP:
386 return "UNSUSPEND_QP";
388 case MLX5_CMD_OP_SQD2SQD_QP:
391 case MLX5_CMD_OP_ALLOC_QP_COUNTER_SET:
392 return "ALLOC_QP_COUNTER_SET";
394 case MLX5_CMD_OP_DEALLOC_QP_COUNTER_SET:
395 return "DEALLOC_QP_COUNTER_SET";
397 case MLX5_CMD_OP_QUERY_QP_COUNTER_SET:
398 return "QUERY_QP_COUNTER_SET";
400 case MLX5_CMD_OP_CREATE_PSV:
403 case MLX5_CMD_OP_DESTROY_PSV:
404 return "DESTROY_PSV";
406 case MLX5_CMD_OP_QUERY_PSV:
409 case MLX5_CMD_OP_QUERY_SIG_RULE_TABLE:
410 return "QUERY_SIG_RULE_TABLE";
412 case MLX5_CMD_OP_QUERY_BLOCK_SIZE_TABLE:
413 return "QUERY_BLOCK_SIZE_TABLE";
415 case MLX5_CMD_OP_CREATE_SRQ:
418 case MLX5_CMD_OP_DESTROY_SRQ:
419 return "DESTROY_SRQ";
421 case MLX5_CMD_OP_QUERY_SRQ:
424 case MLX5_CMD_OP_ARM_RQ:
427 case MLX5_CMD_OP_RESIZE_SRQ:
430 case MLX5_CMD_OP_ALLOC_PD:
433 case MLX5_CMD_OP_DEALLOC_PD:
436 case MLX5_CMD_OP_ALLOC_UAR:
439 case MLX5_CMD_OP_DEALLOC_UAR:
440 return "DEALLOC_UAR";
442 case MLX5_CMD_OP_ATTACH_TO_MCG:
443 return "ATTACH_TO_MCG";
445 case MLX5_CMD_OP_DETACH_FROM_MCG:
446 return "DETACH_FROM_MCG";
448 case MLX5_CMD_OP_ALLOC_XRCD:
451 case MLX5_CMD_OP_DEALLOC_XRCD:
452 return "DEALLOC_XRCD";
454 case MLX5_CMD_OP_ACCESS_REG:
455 return "MLX5_CMD_OP_ACCESS_REG";
457 default: return "unknown command opcode";
461 static void dump_command(struct mlx5_core_dev *dev,
462 struct mlx5_cmd_work_ent *ent, int input)
464 u16 op = be16_to_cpu(((struct mlx5_inbox_hdr *)(ent->lay->in))->opcode);
465 struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
466 struct mlx5_cmd_mailbox *next = msg->next;
471 data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
474 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
475 "dump command data %s(0x%x) %s\n",
476 mlx5_command_str(op), op,
477 input ? "INPUT" : "OUTPUT");
479 mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n",
480 mlx5_command_str(op), op,
481 input ? "INPUT" : "OUTPUT");
485 dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset);
486 offset += sizeof(ent->lay->in);
488 dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset);
489 offset += sizeof(ent->lay->out);
492 dump_buf(ent->lay, sizeof(*ent->lay), 0, offset);
493 offset += sizeof(*ent->lay);
496 while (next && offset < msg->len) {
498 dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset);
499 dump_buf(next->buf, dump_len, 1, offset);
500 offset += MLX5_CMD_DATA_BLOCK_SIZE;
502 mlx5_core_dbg(dev, "command block:\n");
503 dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset);
504 offset += sizeof(struct mlx5_cmd_prot_block);
513 static void cmd_work_handler(struct work_struct *work)
515 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
516 struct mlx5_cmd *cmd = ent->cmd;
517 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
518 struct mlx5_cmd_layout *lay;
519 struct semaphore *sem;
521 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
523 if (!ent->page_queue) {
524 ent->idx = alloc_ent(cmd);
526 mlx5_core_err(dev, "failed to allocate command entry\n");
531 ent->idx = cmd->max_reg_cmds;
534 ent->token = alloc_token(cmd);
535 cmd->ent_arr[ent->idx] = ent;
536 lay = get_inst(cmd, ent->idx);
538 memset(lay, 0, sizeof(*lay));
539 memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
540 ent->op = be32_to_cpu(lay->in[0]) >> 16;
542 lay->in_ptr = cpu_to_be64(ent->in->next->dma);
543 lay->inlen = cpu_to_be32(ent->in->len);
545 lay->out_ptr = cpu_to_be64(ent->out->next->dma);
546 lay->outlen = cpu_to_be32(ent->out->len);
547 lay->type = MLX5_PCI_CMD_XPORT;
548 lay->token = ent->token;
549 lay->status_own = CMD_OWNER_HW;
550 set_signature(ent, !cmd->checksum_disabled);
551 dump_command(dev, ent, 1);
552 ktime_get_ts(&ent->ts1);
554 /* ring doorbell after the descriptor is valid */
556 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
557 mlx5_core_dbg(dev, "write 0x%x to command doorbell\n", 1 << ent->idx);
559 if (cmd->mode == CMD_MODE_POLLING) {
561 /* make sure we read the descriptor after ownership is SW */
563 mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
567 static const char *deliv_status_to_str(u8 status)
570 case MLX5_CMD_DELIVERY_STAT_OK:
572 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
573 return "signature error";
574 case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
575 return "token error";
576 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
577 return "bad block number";
578 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
579 return "output pointer not aligned to block size";
580 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
581 return "input pointer not aligned to block size";
582 case MLX5_CMD_DELIVERY_STAT_FW_ERR:
583 return "firmware internal error";
584 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
585 return "command input length error";
586 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
587 return "command ouput length error";
588 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
589 return "reserved fields not cleared";
590 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
591 return "bad command descriptor type";
593 return "unknown status code";
597 static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
599 struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
601 return be16_to_cpu(hdr->opcode);
604 static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
606 unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
607 struct mlx5_cmd *cmd = &dev->cmd;
610 if (cmd->mode == CMD_MODE_POLLING) {
611 wait_for_completion(&ent->done);
614 if (!wait_for_completion_timeout(&ent->done, timeout))
619 if (err == -ETIMEDOUT) {
620 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
621 mlx5_command_str(msg_to_opcode(ent->in)),
622 msg_to_opcode(ent->in));
624 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", err,
625 deliv_status_to_str(ent->status), ent->status);
631 * 1. Callback functions may not sleep
632 * 2. page queue commands do not support asynchrous completion
634 static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
635 struct mlx5_cmd_msg *out, void *uout, int uout_size,
636 mlx5_cmd_cbk_t callback,
637 void *context, int page_queue, u8 *status)
639 struct mlx5_cmd *cmd = &dev->cmd;
640 struct mlx5_cmd_work_ent *ent;
641 ktime_t t1, t2, delta;
642 struct mlx5_cmd_stats *stats;
647 if (callback && page_queue)
650 ent = alloc_cmd(cmd, in, out, uout, uout_size, callback, context,
656 init_completion(&ent->done);
658 INIT_WORK(&ent->work, cmd_work_handler);
660 cmd_work_handler(&ent->work);
661 } else if (!queue_work(cmd->wq, &ent->work)) {
662 mlx5_core_warn(dev, "failed to queue work\n");
668 err = wait_func(dev, ent);
669 if (err == -ETIMEDOUT)
672 t1 = timespec_to_ktime(ent->ts1);
673 t2 = timespec_to_ktime(ent->ts2);
674 delta = ktime_sub(t2, t1);
675 ds = ktime_to_ns(delta);
676 op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
677 if (op < ARRAY_SIZE(cmd->stats)) {
678 stats = &cmd->stats[op];
679 spin_lock_irq(&stats->lock);
682 spin_unlock_irq(&stats->lock);
684 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
685 "fw exec time for %s is %lld nsec\n",
686 mlx5_command_str(op), ds);
687 *status = ent->status;
699 static ssize_t dbg_write(struct file *filp, const char __user *buf,
700 size_t count, loff_t *pos)
702 struct mlx5_core_dev *dev = filp->private_data;
703 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
707 if (!dbg->in_msg || !dbg->out_msg)
710 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
713 lbuf[sizeof(lbuf) - 1] = 0;
715 if (strcmp(lbuf, "go"))
718 err = mlx5_cmd_exec(dev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen);
720 return err ? err : count;
724 static const struct file_operations fops = {
725 .owner = THIS_MODULE,
730 static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
732 struct mlx5_cmd_prot_block *block;
733 struct mlx5_cmd_mailbox *next;
739 copy = min_t(int, size, sizeof(to->first.data));
740 memcpy(to->first.data, from, copy);
751 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
753 memcpy(block->data, from, copy);
762 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
764 struct mlx5_cmd_prot_block *block;
765 struct mlx5_cmd_mailbox *next;
771 copy = min_t(int, size, sizeof(from->first.data));
772 memcpy(to, from->first.data, copy);
783 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
786 memcpy(to, block->data, copy);
795 static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev,
798 struct mlx5_cmd_mailbox *mailbox;
800 mailbox = kmalloc(sizeof(*mailbox), flags);
802 return ERR_PTR(-ENOMEM);
804 mailbox->buf = pci_pool_alloc(dev->cmd.pool, flags,
807 mlx5_core_dbg(dev, "failed allocation\n");
809 return ERR_PTR(-ENOMEM);
811 memset(mailbox->buf, 0, sizeof(struct mlx5_cmd_prot_block));
812 mailbox->next = NULL;
817 static void free_cmd_box(struct mlx5_core_dev *dev,
818 struct mlx5_cmd_mailbox *mailbox)
820 pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
824 static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
825 gfp_t flags, int size)
827 struct mlx5_cmd_mailbox *tmp, *head = NULL;
828 struct mlx5_cmd_prot_block *block;
829 struct mlx5_cmd_msg *msg;
835 msg = kzalloc(sizeof(*msg), flags);
837 return ERR_PTR(-ENOMEM);
839 blen = size - min_t(int, sizeof(msg->first.data), size);
840 n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) / MLX5_CMD_DATA_BLOCK_SIZE;
842 for (i = 0; i < n; i++) {
843 tmp = alloc_cmd_box(dev, flags);
845 mlx5_core_warn(dev, "failed allocating block\n");
852 block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
853 block->block_num = cpu_to_be32(n - i - 1);
863 free_cmd_box(dev, head);
871 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
872 struct mlx5_cmd_msg *msg)
874 struct mlx5_cmd_mailbox *head = msg->next;
875 struct mlx5_cmd_mailbox *next;
879 free_cmd_box(dev, head);
885 static ssize_t data_write(struct file *filp, const char __user *buf,
886 size_t count, loff_t *pos)
888 struct mlx5_core_dev *dev = filp->private_data;
889 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
900 ptr = kzalloc(count, GFP_KERNEL);
904 if (copy_from_user(ptr, buf, count)) {
920 static ssize_t data_read(struct file *filp, char __user *buf, size_t count,
923 struct mlx5_core_dev *dev = filp->private_data;
924 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
933 copy = min_t(int, count, dbg->outlen);
934 if (copy_to_user(buf, dbg->out_msg, copy))
942 static const struct file_operations dfops = {
943 .owner = THIS_MODULE,
949 static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count,
952 struct mlx5_core_dev *dev = filp->private_data;
953 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
960 err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen);
964 if (copy_to_user(buf, &outlen, err))
972 static ssize_t outlen_write(struct file *filp, const char __user *buf,
973 size_t count, loff_t *pos)
975 struct mlx5_core_dev *dev = filp->private_data;
976 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
982 if (*pos != 0 || count > 6)
989 if (copy_from_user(outlen_str, buf, count))
994 err = sscanf(outlen_str, "%d", &outlen);
998 ptr = kzalloc(outlen, GFP_KERNEL);
1003 dbg->outlen = outlen;
1010 static const struct file_operations olfops = {
1011 .owner = THIS_MODULE,
1012 .open = simple_open,
1013 .write = outlen_write,
1014 .read = outlen_read,
1017 static void set_wqname(struct mlx5_core_dev *dev)
1019 struct mlx5_cmd *cmd = &dev->cmd;
1021 snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
1022 dev_name(&dev->pdev->dev));
1025 static void clean_debug_files(struct mlx5_core_dev *dev)
1027 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1029 if (!mlx5_debugfs_root)
1032 mlx5_cmdif_debugfs_cleanup(dev);
1033 debugfs_remove_recursive(dbg->dbg_root);
1036 static int create_debugfs_files(struct mlx5_core_dev *dev)
1038 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1041 if (!mlx5_debugfs_root)
1044 dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root);
1048 dbg->dbg_in = debugfs_create_file("in", 0400, dbg->dbg_root,
1053 dbg->dbg_out = debugfs_create_file("out", 0200, dbg->dbg_root,
1058 dbg->dbg_outlen = debugfs_create_file("out_len", 0600, dbg->dbg_root,
1060 if (!dbg->dbg_outlen)
1063 dbg->dbg_status = debugfs_create_u8("status", 0600, dbg->dbg_root,
1065 if (!dbg->dbg_status)
1068 dbg->dbg_run = debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops);
1072 mlx5_cmdif_debugfs_init(dev);
1077 clean_debug_files(dev);
1081 void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
1083 struct mlx5_cmd *cmd = &dev->cmd;
1086 for (i = 0; i < cmd->max_reg_cmds; i++)
1089 down(&cmd->pages_sem);
1091 flush_workqueue(cmd->wq);
1093 cmd->mode = CMD_MODE_EVENTS;
1095 up(&cmd->pages_sem);
1096 for (i = 0; i < cmd->max_reg_cmds; i++)
1100 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
1102 struct mlx5_cmd *cmd = &dev->cmd;
1105 for (i = 0; i < cmd->max_reg_cmds; i++)
1108 down(&cmd->pages_sem);
1110 flush_workqueue(cmd->wq);
1111 cmd->mode = CMD_MODE_POLLING;
1113 up(&cmd->pages_sem);
1114 for (i = 0; i < cmd->max_reg_cmds; i++)
1118 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
1120 unsigned long flags;
1123 spin_lock_irqsave(&msg->cache->lock, flags);
1124 list_add_tail(&msg->list, &msg->cache->head);
1125 spin_unlock_irqrestore(&msg->cache->lock, flags);
1127 mlx5_free_cmd_msg(dev, msg);
1131 void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
1133 struct mlx5_cmd *cmd = &dev->cmd;
1134 struct mlx5_cmd_work_ent *ent;
1135 mlx5_cmd_cbk_t callback;
1139 ktime_t t1, t2, delta;
1141 struct mlx5_cmd_stats *stats;
1142 unsigned long flags;
1144 for (i = 0; i < (1 << cmd->log_sz); i++) {
1145 if (test_bit(i, &vector)) {
1146 struct semaphore *sem;
1148 ent = cmd->ent_arr[i];
1149 if (ent->page_queue)
1150 sem = &cmd->pages_sem;
1153 ktime_get_ts(&ent->ts2);
1154 memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
1155 dump_command(dev, ent, 0);
1157 if (!cmd->checksum_disabled)
1158 ent->ret = verify_signature(ent);
1161 ent->status = ent->lay->status_own >> 1;
1162 mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
1163 ent->ret, deliv_status_to_str(ent->status), ent->status);
1165 free_ent(cmd, ent->idx);
1166 if (ent->callback) {
1167 t1 = timespec_to_ktime(ent->ts1);
1168 t2 = timespec_to_ktime(ent->ts2);
1169 delta = ktime_sub(t2, t1);
1170 ds = ktime_to_ns(delta);
1171 if (ent->op < ARRAY_SIZE(cmd->stats)) {
1172 stats = &cmd->stats[ent->op];
1173 spin_lock_irqsave(&stats->lock, flags);
1176 spin_unlock_irqrestore(&stats->lock, flags);
1179 callback = ent->callback;
1180 context = ent->context;
1183 err = mlx5_copy_from_msg(ent->uout,
1187 mlx5_free_cmd_msg(dev, ent->out);
1188 free_msg(dev, ent->in);
1191 callback(err, context);
1193 complete(&ent->done);
1199 EXPORT_SYMBOL(mlx5_cmd_comp_handler);
1201 static int status_to_err(u8 status)
1203 return status ? -1 : 0; /* TBD more meaningful codes */
1206 static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
1209 struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
1210 struct mlx5_cmd *cmd = &dev->cmd;
1211 struct cache_ent *ent = NULL;
1213 if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE)
1214 ent = &cmd->cache.large;
1215 else if (in_size > 16 && in_size <= MED_LIST_SIZE)
1216 ent = &cmd->cache.med;
1219 spin_lock_irq(&ent->lock);
1220 if (!list_empty(&ent->head)) {
1221 msg = list_entry(ent->head.next, typeof(*msg), list);
1222 /* For cached lists, we must explicitly state what is
1226 list_del(&msg->list);
1228 spin_unlock_irq(&ent->lock);
1232 msg = mlx5_alloc_cmd_msg(dev, gfp, in_size);
1237 static int is_manage_pages(struct mlx5_inbox_hdr *in)
1239 return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES;
1242 static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1243 int out_size, mlx5_cmd_cbk_t callback, void *context)
1245 struct mlx5_cmd_msg *inb;
1246 struct mlx5_cmd_msg *outb;
1252 pages_queue = is_manage_pages(in);
1253 gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
1255 inb = alloc_msg(dev, in_size, gfp);
1261 err = mlx5_copy_to_msg(inb, in, in_size);
1263 mlx5_core_warn(dev, "err %d\n", err);
1267 outb = mlx5_alloc_cmd_msg(dev, gfp, out_size);
1269 err = PTR_ERR(outb);
1273 err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
1274 pages_queue, &status);
1278 mlx5_core_dbg(dev, "err %d, status %d\n", err, status);
1280 err = status_to_err(status);
1284 err = mlx5_copy_from_msg(out, outb, out_size);
1288 mlx5_free_cmd_msg(dev, outb);
1296 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1299 return cmd_exec(dev, in, in_size, out, out_size, NULL, NULL);
1301 EXPORT_SYMBOL(mlx5_cmd_exec);
1303 int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
1304 void *out, int out_size, mlx5_cmd_cbk_t callback,
1307 return cmd_exec(dev, in, in_size, out, out_size, callback, context);
1309 EXPORT_SYMBOL(mlx5_cmd_exec_cb);
1311 static void destroy_msg_cache(struct mlx5_core_dev *dev)
1313 struct mlx5_cmd *cmd = &dev->cmd;
1314 struct mlx5_cmd_msg *msg;
1315 struct mlx5_cmd_msg *n;
1317 list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) {
1318 list_del(&msg->list);
1319 mlx5_free_cmd_msg(dev, msg);
1322 list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) {
1323 list_del(&msg->list);
1324 mlx5_free_cmd_msg(dev, msg);
1328 static int create_msg_cache(struct mlx5_core_dev *dev)
1330 struct mlx5_cmd *cmd = &dev->cmd;
1331 struct mlx5_cmd_msg *msg;
1335 spin_lock_init(&cmd->cache.large.lock);
1336 INIT_LIST_HEAD(&cmd->cache.large.head);
1337 spin_lock_init(&cmd->cache.med.lock);
1338 INIT_LIST_HEAD(&cmd->cache.med.head);
1340 for (i = 0; i < NUM_LONG_LISTS; i++) {
1341 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE);
1346 msg->cache = &cmd->cache.large;
1347 list_add_tail(&msg->list, &cmd->cache.large.head);
1350 for (i = 0; i < NUM_MED_LISTS; i++) {
1351 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE);
1356 msg->cache = &cmd->cache.med;
1357 list_add_tail(&msg->list, &cmd->cache.med.head);
1363 destroy_msg_cache(dev);
1367 int mlx5_cmd_init(struct mlx5_core_dev *dev)
1369 int size = sizeof(struct mlx5_cmd_prot_block);
1370 int align = roundup_pow_of_two(size);
1371 struct mlx5_cmd *cmd = &dev->cmd;
1377 cmd_if_rev = cmdif_rev(dev);
1378 if (cmd_if_rev != CMD_IF_REV) {
1379 dev_err(&dev->pdev->dev,
1380 "Driver cmdif rev(%d) differs from firmware's(%d)\n",
1381 CMD_IF_REV, cmd_if_rev);
1385 cmd->pool = pci_pool_create("mlx5_cmd", dev->pdev, size, align, 0);
1389 cmd->cmd_buf = (void *)__get_free_pages(GFP_ATOMIC, 0);
1390 if (!cmd->cmd_buf) {
1394 cmd->dma = dma_map_single(&dev->pdev->dev, cmd->cmd_buf, PAGE_SIZE,
1396 if (dma_mapping_error(&dev->pdev->dev, cmd->dma)) {
1401 cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
1402 cmd->log_sz = cmd_l >> 4 & 0xf;
1403 cmd->log_stride = cmd_l & 0xf;
1404 if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
1405 dev_err(&dev->pdev->dev, "firmware reports too many outstanding commands %d\n",
1411 if (cmd->log_sz + cmd->log_stride > PAGE_SHIFT) {
1412 dev_err(&dev->pdev->dev, "command queue size overflow\n");
1417 cmd->checksum_disabled = 1;
1418 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
1419 cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
1421 cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
1422 if (cmd->cmdif_rev > CMD_IF_REV) {
1423 dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
1424 CMD_IF_REV, cmd->cmdif_rev);
1429 spin_lock_init(&cmd->alloc_lock);
1430 spin_lock_init(&cmd->token_lock);
1431 for (i = 0; i < ARRAY_SIZE(cmd->stats); i++)
1432 spin_lock_init(&cmd->stats[i].lock);
1434 sema_init(&cmd->sem, cmd->max_reg_cmds);
1435 sema_init(&cmd->pages_sem, 1);
1437 cmd_h = (u32)((u64)(cmd->dma) >> 32);
1438 cmd_l = (u32)(cmd->dma);
1439 if (cmd_l & 0xfff) {
1440 dev_err(&dev->pdev->dev, "invalid command queue address\n");
1445 iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
1446 iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
1448 /* Make sure firmware sees the complete address before we proceed */
1451 mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
1453 cmd->mode = CMD_MODE_POLLING;
1455 err = create_msg_cache(dev);
1457 dev_err(&dev->pdev->dev, "failed to create command cache\n");
1462 cmd->wq = create_singlethread_workqueue(cmd->wq_name);
1464 dev_err(&dev->pdev->dev, "failed to create command workqueue\n");
1469 err = create_debugfs_files(dev);
1478 destroy_workqueue(cmd->wq);
1481 destroy_msg_cache(dev);
1484 dma_unmap_single(&dev->pdev->dev, cmd->dma, PAGE_SIZE,
1487 free_pages((unsigned long)cmd->cmd_buf, 0);
1490 pci_pool_destroy(cmd->pool);
1494 EXPORT_SYMBOL(mlx5_cmd_init);
1496 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
1498 struct mlx5_cmd *cmd = &dev->cmd;
1500 clean_debug_files(dev);
1501 destroy_workqueue(cmd->wq);
1502 destroy_msg_cache(dev);
1503 dma_unmap_single(&dev->pdev->dev, cmd->dma, PAGE_SIZE,
1505 free_pages((unsigned long)cmd->cmd_buf, 0);
1506 pci_pool_destroy(cmd->pool);
1508 EXPORT_SYMBOL(mlx5_cmd_cleanup);
1510 static const char *cmd_status_str(u8 status)
1513 case MLX5_CMD_STAT_OK:
1515 case MLX5_CMD_STAT_INT_ERR:
1516 return "internal error";
1517 case MLX5_CMD_STAT_BAD_OP_ERR:
1518 return "bad operation";
1519 case MLX5_CMD_STAT_BAD_PARAM_ERR:
1520 return "bad parameter";
1521 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
1522 return "bad system state";
1523 case MLX5_CMD_STAT_BAD_RES_ERR:
1524 return "bad resource";
1525 case MLX5_CMD_STAT_RES_BUSY:
1526 return "resource busy";
1527 case MLX5_CMD_STAT_LIM_ERR:
1528 return "limits exceeded";
1529 case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
1530 return "bad resource state";
1531 case MLX5_CMD_STAT_IX_ERR:
1533 case MLX5_CMD_STAT_NO_RES_ERR:
1534 return "no resources";
1535 case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
1536 return "bad input length";
1537 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
1538 return "bad output length";
1539 case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
1540 return "bad QP state";
1541 case MLX5_CMD_STAT_BAD_PKT_ERR:
1542 return "bad packet (discarded)";
1543 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
1544 return "bad size too many outstanding CQEs";
1546 return "unknown status";
1550 int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr)
1555 pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
1556 cmd_status_str(hdr->status), hdr->status,
1557 be32_to_cpu(hdr->syndrome));
1559 switch (hdr->status) {
1560 case MLX5_CMD_STAT_OK: return 0;
1561 case MLX5_CMD_STAT_INT_ERR: return -EIO;
1562 case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL;
1563 case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL;
1564 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
1565 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
1566 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
1567 case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
1568 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
1569 case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
1570 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN;
1571 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO;
1572 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO;
1573 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL;
1574 case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL;
1575 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL;
1576 default: return -EIO;