2 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * $Id: mad.c 5596 2006-03-03 01:00:07Z sean.hefty $
36 #include <linux/dma-mapping.h>
43 MODULE_LICENSE("Dual BSD/GPL");
44 MODULE_DESCRIPTION("kernel IB MAD API");
45 MODULE_AUTHOR("Hal Rosenstock");
46 MODULE_AUTHOR("Sean Hefty");
49 kmem_cache_t *ib_mad_cache;
51 static struct list_head ib_mad_port_list;
52 static u32 ib_mad_client_id = 0;
55 static spinlock_t ib_mad_port_list_lock;
58 /* Forward declarations */
59 static int method_in_use(struct ib_mad_mgmt_method_table **method,
60 struct ib_mad_reg_req *mad_reg_req);
61 static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
62 static struct ib_mad_agent_private *find_mad_agent(
63 struct ib_mad_port_private *port_priv,
65 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
66 struct ib_mad_private *mad);
67 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
68 static void timeout_sends(void *data);
69 static void local_completions(void *data);
70 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
71 struct ib_mad_agent_private *agent_priv,
73 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
74 struct ib_mad_agent_private *agent_priv);
77 * Returns a ib_mad_port_private structure or NULL for a device/port
78 * Assumes ib_mad_port_list_lock is being held
80 static inline struct ib_mad_port_private *
81 __ib_get_mad_port(struct ib_device *device, int port_num)
83 struct ib_mad_port_private *entry;
85 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
86 if (entry->device == device && entry->port_num == port_num)
93 * Wrapper function to return a ib_mad_port_private structure or NULL
96 static inline struct ib_mad_port_private *
97 ib_get_mad_port(struct ib_device *device, int port_num)
99 struct ib_mad_port_private *entry;
102 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
103 entry = __ib_get_mad_port(device, port_num);
104 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
109 static inline u8 convert_mgmt_class(u8 mgmt_class)
111 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
112 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
116 static int get_spl_qp_index(enum ib_qp_type qp_type)
129 static int vendor_class_index(u8 mgmt_class)
131 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
134 static int is_vendor_class(u8 mgmt_class)
136 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
137 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
142 static int is_vendor_oui(char *oui)
144 if (oui[0] || oui[1] || oui[2])
149 static int is_vendor_method_in_use(
150 struct ib_mad_mgmt_vendor_class *vendor_class,
151 struct ib_mad_reg_req *mad_reg_req)
153 struct ib_mad_mgmt_method_table *method;
156 for (i = 0; i < MAX_MGMT_OUI; i++) {
157 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
158 method = vendor_class->method_table[i];
160 if (method_in_use(&method, mad_reg_req))
171 * ib_register_mad_agent - Register to send/receive MADs
173 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
175 enum ib_qp_type qp_type,
176 struct ib_mad_reg_req *mad_reg_req,
178 ib_mad_send_handler send_handler,
179 ib_mad_recv_handler recv_handler,
182 struct ib_mad_port_private *port_priv;
183 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
184 struct ib_mad_agent_private *mad_agent_priv;
185 struct ib_mad_reg_req *reg_req = NULL;
186 struct ib_mad_mgmt_class_table *class;
187 struct ib_mad_mgmt_vendor_class_table *vendor;
188 struct ib_mad_mgmt_vendor_class *vendor_class;
189 struct ib_mad_mgmt_method_table *method;
192 u8 mgmt_class, vclass;
194 /* Validate parameters */
195 qpn = get_spl_qp_index(qp_type);
199 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION)
202 /* Validate MAD registration request if supplied */
204 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION)
208 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
210 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
211 * one in this range currently allowed
213 if (mad_reg_req->mgmt_class !=
214 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
216 } else if (mad_reg_req->mgmt_class == 0) {
218 * Class 0 is reserved in IBA and is used for
219 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
222 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
224 * If class is in "new" vendor range,
225 * ensure supplied OUI is not zero
227 if (!is_vendor_oui(mad_reg_req->oui))
230 /* Make sure class supplied is consistent with QP type */
231 if (qp_type == IB_QPT_SMI) {
232 if ((mad_reg_req->mgmt_class !=
233 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
234 (mad_reg_req->mgmt_class !=
235 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
238 if ((mad_reg_req->mgmt_class ==
239 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
240 (mad_reg_req->mgmt_class ==
241 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
245 /* No registration request supplied */
250 /* Validate device and port */
251 port_priv = ib_get_mad_port(device, port_num);
253 ret = ERR_PTR(-ENODEV);
257 /* Allocate structures */
258 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
259 if (!mad_agent_priv) {
260 ret = ERR_PTR(-ENOMEM);
264 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
265 IB_ACCESS_LOCAL_WRITE);
266 if (IS_ERR(mad_agent_priv->agent.mr)) {
267 ret = ERR_PTR(-ENOMEM);
272 reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL);
274 ret = ERR_PTR(-ENOMEM);
277 /* Make a copy of the MAD registration request */
278 memcpy(reg_req, mad_reg_req, sizeof *reg_req);
281 /* Now, fill in the various structures */
282 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
283 mad_agent_priv->reg_req = reg_req;
284 mad_agent_priv->agent.rmpp_version = rmpp_version;
285 mad_agent_priv->agent.device = device;
286 mad_agent_priv->agent.recv_handler = recv_handler;
287 mad_agent_priv->agent.send_handler = send_handler;
288 mad_agent_priv->agent.context = context;
289 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
290 mad_agent_priv->agent.port_num = port_num;
292 spin_lock_irqsave(&port_priv->reg_lock, flags);
293 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
296 * Make sure MAD registration (if supplied)
297 * is non overlapping with any existing ones
300 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
301 if (!is_vendor_class(mgmt_class)) {
302 class = port_priv->version[mad_reg_req->
303 mgmt_class_version].class;
305 method = class->method_table[mgmt_class];
307 if (method_in_use(&method,
312 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
315 /* "New" vendor class range */
316 vendor = port_priv->version[mad_reg_req->
317 mgmt_class_version].vendor;
319 vclass = vendor_class_index(mgmt_class);
320 vendor_class = vendor->vendor_class[vclass];
322 if (is_vendor_method_in_use(
328 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
336 /* Add mad agent into port's agent list */
337 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
338 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
340 spin_lock_init(&mad_agent_priv->lock);
341 INIT_LIST_HEAD(&mad_agent_priv->send_list);
342 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
343 INIT_LIST_HEAD(&mad_agent_priv->done_list);
344 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
345 INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv);
346 INIT_LIST_HEAD(&mad_agent_priv->local_list);
347 INIT_WORK(&mad_agent_priv->local_work, local_completions,
349 atomic_set(&mad_agent_priv->refcount, 1);
350 init_waitqueue_head(&mad_agent_priv->wait);
352 return &mad_agent_priv->agent;
355 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
358 ib_dereg_mr(mad_agent_priv->agent.mr);
360 kfree(mad_agent_priv);
364 EXPORT_SYMBOL(ib_register_mad_agent);
366 static inline int is_snooping_sends(int mad_snoop_flags)
368 return (mad_snoop_flags &
369 (/*IB_MAD_SNOOP_POSTED_SENDS |
370 IB_MAD_SNOOP_RMPP_SENDS |*/
371 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
372 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
375 static inline int is_snooping_recvs(int mad_snoop_flags)
377 return (mad_snoop_flags &
378 (IB_MAD_SNOOP_RECVS /*|
379 IB_MAD_SNOOP_RMPP_RECVS*/));
382 static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
383 struct ib_mad_snoop_private *mad_snoop_priv)
385 struct ib_mad_snoop_private **new_snoop_table;
389 spin_lock_irqsave(&qp_info->snoop_lock, flags);
390 /* Check for empty slot in array. */
391 for (i = 0; i < qp_info->snoop_table_size; i++)
392 if (!qp_info->snoop_table[i])
395 if (i == qp_info->snoop_table_size) {
397 new_snoop_table = kmalloc(sizeof mad_snoop_priv *
398 qp_info->snoop_table_size + 1,
400 if (!new_snoop_table) {
404 if (qp_info->snoop_table) {
405 memcpy(new_snoop_table, qp_info->snoop_table,
406 sizeof mad_snoop_priv *
407 qp_info->snoop_table_size);
408 kfree(qp_info->snoop_table);
410 qp_info->snoop_table = new_snoop_table;
411 qp_info->snoop_table_size++;
413 qp_info->snoop_table[i] = mad_snoop_priv;
414 atomic_inc(&qp_info->snoop_count);
416 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
420 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
422 enum ib_qp_type qp_type,
424 ib_mad_snoop_handler snoop_handler,
425 ib_mad_recv_handler recv_handler,
428 struct ib_mad_port_private *port_priv;
429 struct ib_mad_agent *ret;
430 struct ib_mad_snoop_private *mad_snoop_priv;
433 /* Validate parameters */
434 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
435 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
436 ret = ERR_PTR(-EINVAL);
439 qpn = get_spl_qp_index(qp_type);
441 ret = ERR_PTR(-EINVAL);
444 port_priv = ib_get_mad_port(device, port_num);
446 ret = ERR_PTR(-ENODEV);
449 /* Allocate structures */
450 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
451 if (!mad_snoop_priv) {
452 ret = ERR_PTR(-ENOMEM);
456 /* Now, fill in the various structures */
457 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
458 mad_snoop_priv->agent.device = device;
459 mad_snoop_priv->agent.recv_handler = recv_handler;
460 mad_snoop_priv->agent.snoop_handler = snoop_handler;
461 mad_snoop_priv->agent.context = context;
462 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
463 mad_snoop_priv->agent.port_num = port_num;
464 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
465 init_waitqueue_head(&mad_snoop_priv->wait);
466 mad_snoop_priv->snoop_index = register_snoop_agent(
467 &port_priv->qp_info[qpn],
469 if (mad_snoop_priv->snoop_index < 0) {
470 ret = ERR_PTR(mad_snoop_priv->snoop_index);
474 atomic_set(&mad_snoop_priv->refcount, 1);
475 return &mad_snoop_priv->agent;
478 kfree(mad_snoop_priv);
482 EXPORT_SYMBOL(ib_register_mad_snoop);
484 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
486 struct ib_mad_port_private *port_priv;
489 /* Note that we could still be handling received MADs */
492 * Canceling all sends results in dropping received response
493 * MADs, preventing us from queuing additional work
495 cancel_mads(mad_agent_priv);
496 port_priv = mad_agent_priv->qp_info->port_priv;
497 cancel_delayed_work(&mad_agent_priv->timed_work);
499 spin_lock_irqsave(&port_priv->reg_lock, flags);
500 remove_mad_reg_req(mad_agent_priv);
501 list_del(&mad_agent_priv->agent_list);
502 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
504 flush_workqueue(port_priv->wq);
505 ib_cancel_rmpp_recvs(mad_agent_priv);
507 atomic_dec(&mad_agent_priv->refcount);
508 wait_event(mad_agent_priv->wait,
509 !atomic_read(&mad_agent_priv->refcount));
511 kfree(mad_agent_priv->reg_req);
512 ib_dereg_mr(mad_agent_priv->agent.mr);
513 kfree(mad_agent_priv);
516 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
518 struct ib_mad_qp_info *qp_info;
521 qp_info = mad_snoop_priv->qp_info;
522 spin_lock_irqsave(&qp_info->snoop_lock, flags);
523 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
524 atomic_dec(&qp_info->snoop_count);
525 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
527 atomic_dec(&mad_snoop_priv->refcount);
528 wait_event(mad_snoop_priv->wait,
529 !atomic_read(&mad_snoop_priv->refcount));
531 kfree(mad_snoop_priv);
535 * ib_unregister_mad_agent - Unregisters a client from using MAD services
537 int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
539 struct ib_mad_agent_private *mad_agent_priv;
540 struct ib_mad_snoop_private *mad_snoop_priv;
542 /* If the TID is zero, the agent can only snoop. */
543 if (mad_agent->hi_tid) {
544 mad_agent_priv = container_of(mad_agent,
545 struct ib_mad_agent_private,
547 unregister_mad_agent(mad_agent_priv);
549 mad_snoop_priv = container_of(mad_agent,
550 struct ib_mad_snoop_private,
552 unregister_mad_snoop(mad_snoop_priv);
556 EXPORT_SYMBOL(ib_unregister_mad_agent);
558 static inline int response_mad(struct ib_mad *mad)
560 /* Trap represses are responses although response bit is reset */
561 return ((mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
562 (mad->mad_hdr.method & IB_MGMT_METHOD_RESP));
565 static void dequeue_mad(struct ib_mad_list_head *mad_list)
567 struct ib_mad_queue *mad_queue;
570 BUG_ON(!mad_list->mad_queue);
571 mad_queue = mad_list->mad_queue;
572 spin_lock_irqsave(&mad_queue->lock, flags);
573 list_del(&mad_list->list);
575 spin_unlock_irqrestore(&mad_queue->lock, flags);
578 static void snoop_send(struct ib_mad_qp_info *qp_info,
579 struct ib_mad_send_buf *send_buf,
580 struct ib_mad_send_wc *mad_send_wc,
583 struct ib_mad_snoop_private *mad_snoop_priv;
587 spin_lock_irqsave(&qp_info->snoop_lock, flags);
588 for (i = 0; i < qp_info->snoop_table_size; i++) {
589 mad_snoop_priv = qp_info->snoop_table[i];
590 if (!mad_snoop_priv ||
591 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
594 atomic_inc(&mad_snoop_priv->refcount);
595 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
596 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
597 send_buf, mad_send_wc);
598 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
599 wake_up(&mad_snoop_priv->wait);
600 spin_lock_irqsave(&qp_info->snoop_lock, flags);
602 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
605 static void snoop_recv(struct ib_mad_qp_info *qp_info,
606 struct ib_mad_recv_wc *mad_recv_wc,
609 struct ib_mad_snoop_private *mad_snoop_priv;
613 spin_lock_irqsave(&qp_info->snoop_lock, flags);
614 for (i = 0; i < qp_info->snoop_table_size; i++) {
615 mad_snoop_priv = qp_info->snoop_table[i];
616 if (!mad_snoop_priv ||
617 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
620 atomic_inc(&mad_snoop_priv->refcount);
621 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
622 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
624 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
625 wake_up(&mad_snoop_priv->wait);
626 spin_lock_irqsave(&qp_info->snoop_lock, flags);
628 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
631 static void build_smp_wc(u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
634 memset(wc, 0, sizeof *wc);
636 wc->status = IB_WC_SUCCESS;
637 wc->opcode = IB_WC_RECV;
638 wc->pkey_index = pkey_index;
639 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
644 wc->dlid_path_bits = 0;
645 wc->port_num = port_num;
649 * Return 0 if SMP is to be sent
650 * Return 1 if SMP was consumed locally (whether or not solicited)
651 * Return < 0 if error
653 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
654 struct ib_mad_send_wr_private *mad_send_wr)
657 struct ib_smp *smp = mad_send_wr->send_buf.mad;
659 struct ib_mad_local_private *local;
660 struct ib_mad_private *mad_priv;
661 struct ib_mad_port_private *port_priv;
662 struct ib_mad_agent_private *recv_mad_agent = NULL;
663 struct ib_device *device = mad_agent_priv->agent.device;
664 u8 port_num = mad_agent_priv->agent.port_num;
666 struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
669 * Directed route handling starts if the initial LID routed part of
670 * a request or the ending LID routed part of a response is empty.
671 * If we are at the start of the LID routed part, don't update the
672 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
674 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
676 !smi_handle_dr_smp_send(smp, device->node_type, port_num)) {
678 printk(KERN_ERR PFX "Invalid directed route\n");
681 /* Check to post send on QP or process locally */
682 ret = smi_check_local_smp(smp, device);
686 local = kmalloc(sizeof *local, GFP_ATOMIC);
689 printk(KERN_ERR PFX "No memory for ib_mad_local_private\n");
692 local->mad_priv = NULL;
693 local->recv_mad_agent = NULL;
694 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC);
697 printk(KERN_ERR PFX "No memory for local response MAD\n");
702 build_smp_wc(send_wr->wr_id, be16_to_cpu(smp->dr_slid),
703 send_wr->wr.ud.pkey_index,
704 send_wr->wr.ud.port_num, &mad_wc);
706 /* No GRH for DR SMP */
707 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
708 (struct ib_mad *)smp,
709 (struct ib_mad *)&mad_priv->mad);
712 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
713 if (response_mad(&mad_priv->mad.mad) &&
714 mad_agent_priv->agent.recv_handler) {
715 local->mad_priv = mad_priv;
716 local->recv_mad_agent = mad_agent_priv;
718 * Reference MAD agent until receive
719 * side of local completion handled
721 atomic_inc(&mad_agent_priv->refcount);
723 kmem_cache_free(ib_mad_cache, mad_priv);
725 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
726 kmem_cache_free(ib_mad_cache, mad_priv);
728 case IB_MAD_RESULT_SUCCESS:
729 /* Treat like an incoming receive MAD */
730 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
731 mad_agent_priv->agent.port_num);
733 mad_priv->mad.mad.mad_hdr.tid =
734 ((struct ib_mad *)smp)->mad_hdr.tid;
735 recv_mad_agent = find_mad_agent(port_priv,
738 if (!port_priv || !recv_mad_agent) {
739 kmem_cache_free(ib_mad_cache, mad_priv);
744 local->mad_priv = mad_priv;
745 local->recv_mad_agent = recv_mad_agent;
748 kmem_cache_free(ib_mad_cache, mad_priv);
754 local->mad_send_wr = mad_send_wr;
755 /* Reference MAD agent until send side of local completion handled */
756 atomic_inc(&mad_agent_priv->refcount);
757 /* Queue local completion to local list */
758 spin_lock_irqsave(&mad_agent_priv->lock, flags);
759 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
760 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
761 queue_work(mad_agent_priv->qp_info->port_priv->wq,
762 &mad_agent_priv->local_work);
768 static int get_pad_size(int hdr_len, int data_len)
772 seg_size = sizeof(struct ib_mad) - hdr_len;
773 if (data_len && seg_size) {
774 pad = seg_size - data_len % seg_size;
775 return pad == seg_size ? 0 : pad;
780 static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
782 struct ib_rmpp_segment *s, *t;
784 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
790 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
793 struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
794 struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
795 struct ib_rmpp_segment *seg = NULL;
796 int left, seg_size, pad;
798 send_buf->seg_size = sizeof (struct ib_mad) - send_buf->hdr_len;
799 seg_size = send_buf->seg_size;
802 /* Allocate data segments. */
803 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
804 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
806 printk(KERN_ERR "alloc_send_rmpp_segs: RMPP mem "
807 "alloc failed for len %zd, gfp %#x\n",
808 sizeof (*seg) + seg_size, gfp_mask);
809 free_send_rmpp_list(send_wr);
812 seg->num = ++send_buf->seg_count;
813 list_add_tail(&seg->list, &send_wr->rmpp_list);
816 /* Zero any padding */
818 memset(seg->data + seg_size - pad, 0, pad);
820 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
822 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
823 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
825 send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
826 struct ib_rmpp_segment, list);
827 send_wr->last_ack_seg = send_wr->cur_seg;
831 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
832 u32 remote_qpn, u16 pkey_index,
834 int hdr_len, int data_len,
837 struct ib_mad_agent_private *mad_agent_priv;
838 struct ib_mad_send_wr_private *mad_send_wr;
839 int pad, message_size, ret, size;
842 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
844 pad = get_pad_size(hdr_len, data_len);
845 message_size = hdr_len + data_len + pad;
847 if ((!mad_agent->rmpp_version &&
848 (rmpp_active || message_size > sizeof(struct ib_mad))) ||
849 (!rmpp_active && message_size > sizeof(struct ib_mad)))
850 return ERR_PTR(-EINVAL);
852 size = rmpp_active ? hdr_len : sizeof(struct ib_mad);
853 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
855 return ERR_PTR(-ENOMEM);
857 mad_send_wr = buf + size;
858 INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
859 mad_send_wr->send_buf.mad = buf;
860 mad_send_wr->send_buf.hdr_len = hdr_len;
861 mad_send_wr->send_buf.data_len = data_len;
862 mad_send_wr->pad = pad;
864 mad_send_wr->mad_agent_priv = mad_agent_priv;
865 mad_send_wr->sg_list[0].length = hdr_len;
866 mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey;
867 mad_send_wr->sg_list[1].length = sizeof(struct ib_mad) - hdr_len;
868 mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey;
870 mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
871 mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
872 mad_send_wr->send_wr.num_sge = 2;
873 mad_send_wr->send_wr.opcode = IB_WR_SEND;
874 mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED;
875 mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn;
876 mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
877 mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;
880 ret = alloc_send_rmpp_list(mad_send_wr, gfp_mask);
887 mad_send_wr->send_buf.mad_agent = mad_agent;
888 atomic_inc(&mad_agent_priv->refcount);
889 return &mad_send_wr->send_buf;
891 EXPORT_SYMBOL(ib_create_send_mad);
893 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
895 struct ib_mad_send_wr_private *mad_send_wr;
896 struct list_head *list;
898 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
900 list = &mad_send_wr->cur_seg->list;
902 if (mad_send_wr->cur_seg->num < seg_num) {
903 list_for_each_entry(mad_send_wr->cur_seg, list, list)
904 if (mad_send_wr->cur_seg->num == seg_num)
906 } else if (mad_send_wr->cur_seg->num > seg_num) {
907 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
908 if (mad_send_wr->cur_seg->num == seg_num)
911 return mad_send_wr->cur_seg->data;
913 EXPORT_SYMBOL(ib_get_rmpp_segment);
915 static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
917 if (mad_send_wr->send_buf.seg_count)
918 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
919 mad_send_wr->seg_num);
921 return mad_send_wr->send_buf.mad +
922 mad_send_wr->send_buf.hdr_len;
925 void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
927 struct ib_mad_agent_private *mad_agent_priv;
928 struct ib_mad_send_wr_private *mad_send_wr;
930 mad_agent_priv = container_of(send_buf->mad_agent,
931 struct ib_mad_agent_private, agent);
932 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
935 free_send_rmpp_list(mad_send_wr);
936 kfree(send_buf->mad);
937 if (atomic_dec_and_test(&mad_agent_priv->refcount))
938 wake_up(&mad_agent_priv->wait);
940 EXPORT_SYMBOL(ib_free_send_mad);
942 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
944 struct ib_mad_qp_info *qp_info;
945 struct list_head *list;
946 struct ib_send_wr *bad_send_wr;
947 struct ib_mad_agent *mad_agent;
952 /* Set WR ID to find mad_send_wr upon completion */
953 qp_info = mad_send_wr->mad_agent_priv->qp_info;
954 mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
955 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
957 mad_agent = mad_send_wr->send_buf.mad_agent;
958 sge = mad_send_wr->sg_list;
959 sge[0].addr = dma_map_single(mad_agent->device->dma_device,
960 mad_send_wr->send_buf.mad,
963 pci_unmap_addr_set(mad_send_wr, header_mapping, sge[0].addr);
965 sge[1].addr = dma_map_single(mad_agent->device->dma_device,
966 ib_get_payload(mad_send_wr),
969 pci_unmap_addr_set(mad_send_wr, payload_mapping, sge[1].addr);
971 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
972 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
973 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr,
975 list = &qp_info->send_queue.list;
978 list = &qp_info->overflow_list;
982 qp_info->send_queue.count++;
983 list_add_tail(&mad_send_wr->mad_list.list, list);
985 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
987 dma_unmap_single(mad_agent->device->dma_device,
988 pci_unmap_addr(mad_send_wr, header_mapping),
989 sge[0].length, DMA_TO_DEVICE);
990 dma_unmap_single(mad_agent->device->dma_device,
991 pci_unmap_addr(mad_send_wr, payload_mapping),
992 sge[1].length, DMA_TO_DEVICE);
998 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
999 * with the registered client
1001 int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1002 struct ib_mad_send_buf **bad_send_buf)
1004 struct ib_mad_agent_private *mad_agent_priv;
1005 struct ib_mad_send_buf *next_send_buf;
1006 struct ib_mad_send_wr_private *mad_send_wr;
1007 unsigned long flags;
1010 /* Walk list of send WRs and post each on send list */
1011 for (; send_buf; send_buf = next_send_buf) {
1013 mad_send_wr = container_of(send_buf,
1014 struct ib_mad_send_wr_private,
1016 mad_agent_priv = mad_send_wr->mad_agent_priv;
1018 if (!send_buf->mad_agent->send_handler ||
1019 (send_buf->timeout_ms &&
1020 !send_buf->mad_agent->recv_handler)) {
1026 * Save pointer to next work request to post in case the
1027 * current one completes, and the user modifies the work
1028 * request associated with the completion
1030 next_send_buf = send_buf->next;
1031 mad_send_wr->send_wr.wr.ud.ah = send_buf->ah;
1033 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1034 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1035 ret = handle_outgoing_dr_smp(mad_agent_priv,
1037 if (ret < 0) /* error */
1039 else if (ret == 1) /* locally consumed */
1043 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1044 /* Timeout will be updated after send completes */
1045 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
1046 mad_send_wr->retries = send_buf->retries;
1047 /* Reference for work request to QP + response */
1048 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1049 mad_send_wr->status = IB_WC_SUCCESS;
1051 /* Reference MAD agent until send completes */
1052 atomic_inc(&mad_agent_priv->refcount);
1053 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1054 list_add_tail(&mad_send_wr->agent_list,
1055 &mad_agent_priv->send_list);
1056 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1058 if (mad_agent_priv->agent.rmpp_version) {
1059 ret = ib_send_rmpp_mad(mad_send_wr);
1060 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1061 ret = ib_send_mad(mad_send_wr);
1063 ret = ib_send_mad(mad_send_wr);
1065 /* Fail send request */
1066 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1067 list_del(&mad_send_wr->agent_list);
1068 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1069 atomic_dec(&mad_agent_priv->refcount);
1076 *bad_send_buf = send_buf;
1079 EXPORT_SYMBOL(ib_post_send_mad);
1082 * ib_free_recv_mad - Returns data buffers used to receive
1083 * a MAD to the access layer
1085 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1087 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1088 struct ib_mad_private_header *mad_priv_hdr;
1089 struct ib_mad_private *priv;
1090 struct list_head free_list;
1092 INIT_LIST_HEAD(&free_list);
1093 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1095 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1097 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1099 mad_priv_hdr = container_of(mad_recv_wc,
1100 struct ib_mad_private_header,
1102 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1104 kmem_cache_free(ib_mad_cache, priv);
1107 EXPORT_SYMBOL(ib_free_recv_mad);
1109 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1111 ib_mad_send_handler send_handler,
1112 ib_mad_recv_handler recv_handler,
1115 return ERR_PTR(-EINVAL); /* XXX: for now */
1117 EXPORT_SYMBOL(ib_redirect_mad_qp);
1119 int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1122 printk(KERN_ERR PFX "ib_process_mad_wc() not implemented yet\n");
1125 EXPORT_SYMBOL(ib_process_mad_wc);
1127 static int method_in_use(struct ib_mad_mgmt_method_table **method,
1128 struct ib_mad_reg_req *mad_reg_req)
1132 for (i = find_first_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS);
1133 i < IB_MGMT_MAX_METHODS;
1134 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1136 if ((*method)->agent[i]) {
1137 printk(KERN_ERR PFX "Method %d already in use\n", i);
1144 static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1146 /* Allocate management method table */
1147 *method = kzalloc(sizeof **method, GFP_ATOMIC);
1149 printk(KERN_ERR PFX "No memory for "
1150 "ib_mad_mgmt_method_table\n");
1158 * Check to see if there are any methods still in use
1160 static int check_method_table(struct ib_mad_mgmt_method_table *method)
1164 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1165 if (method->agent[i])
1171 * Check to see if there are any method tables for this class still in use
1173 static int check_class_table(struct ib_mad_mgmt_class_table *class)
1177 for (i = 0; i < MAX_MGMT_CLASS; i++)
1178 if (class->method_table[i])
1183 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1187 for (i = 0; i < MAX_MGMT_OUI; i++)
1188 if (vendor_class->method_table[i])
1193 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1198 for (i = 0; i < MAX_MGMT_OUI; i++)
1199 /* Is there matching OUI for this vendor class ? */
1200 if (!memcmp(vendor_class->oui[i], oui, 3))
1206 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1210 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1211 if (vendor->vendor_class[i])
1217 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1218 struct ib_mad_agent_private *agent)
1222 /* Remove any methods for this mad agent */
1223 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1224 if (method->agent[i] == agent) {
1225 method->agent[i] = NULL;
1230 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1231 struct ib_mad_agent_private *agent_priv,
1234 struct ib_mad_port_private *port_priv;
1235 struct ib_mad_mgmt_class_table **class;
1236 struct ib_mad_mgmt_method_table **method;
1239 port_priv = agent_priv->qp_info->port_priv;
1240 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1242 /* Allocate management class table for "new" class version */
1243 *class = kzalloc(sizeof **class, GFP_ATOMIC);
1245 printk(KERN_ERR PFX "No memory for "
1246 "ib_mad_mgmt_class_table\n");
1251 /* Allocate method table for this management class */
1252 method = &(*class)->method_table[mgmt_class];
1253 if ((ret = allocate_method_table(method)))
1256 method = &(*class)->method_table[mgmt_class];
1258 /* Allocate method table for this management class */
1259 if ((ret = allocate_method_table(method)))
1264 /* Now, make sure methods are not already in use */
1265 if (method_in_use(method, mad_reg_req))
1268 /* Finally, add in methods being registered */
1269 for (i = find_first_bit(mad_reg_req->method_mask,
1270 IB_MGMT_MAX_METHODS);
1271 i < IB_MGMT_MAX_METHODS;
1272 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1274 (*method)->agent[i] = agent_priv;
1279 /* Remove any methods for this mad agent */
1280 remove_methods_mad_agent(*method, agent_priv);
1281 /* Now, check to see if there are any methods in use */
1282 if (!check_method_table(*method)) {
1283 /* If not, release management method table */
1296 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1297 struct ib_mad_agent_private *agent_priv)
1299 struct ib_mad_port_private *port_priv;
1300 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1301 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1302 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1303 struct ib_mad_mgmt_method_table **method;
1304 int i, ret = -ENOMEM;
1307 /* "New" vendor (with OUI) class */
1308 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1309 port_priv = agent_priv->qp_info->port_priv;
1310 vendor_table = &port_priv->version[
1311 mad_reg_req->mgmt_class_version].vendor;
1312 if (!*vendor_table) {
1313 /* Allocate mgmt vendor class table for "new" class version */
1314 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1316 printk(KERN_ERR PFX "No memory for "
1317 "ib_mad_mgmt_vendor_class_table\n");
1321 *vendor_table = vendor;
1323 if (!(*vendor_table)->vendor_class[vclass]) {
1324 /* Allocate table for this management vendor class */
1325 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1326 if (!vendor_class) {
1327 printk(KERN_ERR PFX "No memory for "
1328 "ib_mad_mgmt_vendor_class\n");
1332 (*vendor_table)->vendor_class[vclass] = vendor_class;
1334 for (i = 0; i < MAX_MGMT_OUI; i++) {
1335 /* Is there matching OUI for this vendor class ? */
1336 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1337 mad_reg_req->oui, 3)) {
1338 method = &(*vendor_table)->vendor_class[
1339 vclass]->method_table[i];
1344 for (i = 0; i < MAX_MGMT_OUI; i++) {
1345 /* OUI slot available ? */
1346 if (!is_vendor_oui((*vendor_table)->vendor_class[
1348 method = &(*vendor_table)->vendor_class[
1349 vclass]->method_table[i];
1351 /* Allocate method table for this OUI */
1352 if ((ret = allocate_method_table(method)))
1354 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1355 mad_reg_req->oui, 3);
1359 printk(KERN_ERR PFX "All OUI slots in use\n");
1363 /* Now, make sure methods are not already in use */
1364 if (method_in_use(method, mad_reg_req))
1367 /* Finally, add in methods being registered */
1368 for (i = find_first_bit(mad_reg_req->method_mask,
1369 IB_MGMT_MAX_METHODS);
1370 i < IB_MGMT_MAX_METHODS;
1371 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1373 (*method)->agent[i] = agent_priv;
1378 /* Remove any methods for this mad agent */
1379 remove_methods_mad_agent(*method, agent_priv);
1380 /* Now, check to see if there are any methods in use */
1381 if (!check_method_table(*method)) {
1382 /* If not, release management method table */
1389 (*vendor_table)->vendor_class[vclass] = NULL;
1390 kfree(vendor_class);
1394 *vendor_table = NULL;
1401 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1403 struct ib_mad_port_private *port_priv;
1404 struct ib_mad_mgmt_class_table *class;
1405 struct ib_mad_mgmt_method_table *method;
1406 struct ib_mad_mgmt_vendor_class_table *vendor;
1407 struct ib_mad_mgmt_vendor_class *vendor_class;
1412 * Was MAD registration request supplied
1413 * with original registration ?
1415 if (!agent_priv->reg_req) {
1419 port_priv = agent_priv->qp_info->port_priv;
1420 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1421 class = port_priv->version[
1422 agent_priv->reg_req->mgmt_class_version].class;
1426 method = class->method_table[mgmt_class];
1428 /* Remove any methods for this mad agent */
1429 remove_methods_mad_agent(method, agent_priv);
1430 /* Now, check to see if there are any methods still in use */
1431 if (!check_method_table(method)) {
1432 /* If not, release management method table */
1434 class->method_table[mgmt_class] = NULL;
1435 /* Any management classes left ? */
1436 if (!check_class_table(class)) {
1437 /* If not, release management class table */
1440 agent_priv->reg_req->
1441 mgmt_class_version].class = NULL;
1447 if (!is_vendor_class(mgmt_class))
1450 /* normalize mgmt_class to vendor range 2 */
1451 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1452 vendor = port_priv->version[
1453 agent_priv->reg_req->mgmt_class_version].vendor;
1458 vendor_class = vendor->vendor_class[mgmt_class];
1460 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1463 method = vendor_class->method_table[index];
1465 /* Remove any methods for this mad agent */
1466 remove_methods_mad_agent(method, agent_priv);
1468 * Now, check to see if there are
1469 * any methods still in use
1471 if (!check_method_table(method)) {
1472 /* If not, release management method table */
1474 vendor_class->method_table[index] = NULL;
1475 memset(vendor_class->oui[index], 0, 3);
1476 /* Any OUIs left ? */
1477 if (!check_vendor_class(vendor_class)) {
1478 /* If not, release vendor class table */
1479 kfree(vendor_class);
1480 vendor->vendor_class[mgmt_class] = NULL;
1481 /* Any other vendor classes left ? */
1482 if (!check_vendor_table(vendor)) {
1485 agent_priv->reg_req->
1486 mgmt_class_version].
1498 static struct ib_mad_agent_private *
1499 find_mad_agent(struct ib_mad_port_private *port_priv,
1502 struct ib_mad_agent_private *mad_agent = NULL;
1503 unsigned long flags;
1505 spin_lock_irqsave(&port_priv->reg_lock, flags);
1506 if (response_mad(mad)) {
1508 struct ib_mad_agent_private *entry;
1511 * Routing is based on high 32 bits of transaction ID
1514 hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32;
1515 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1516 if (entry->agent.hi_tid == hi_tid) {
1522 struct ib_mad_mgmt_class_table *class;
1523 struct ib_mad_mgmt_method_table *method;
1524 struct ib_mad_mgmt_vendor_class_table *vendor;
1525 struct ib_mad_mgmt_vendor_class *vendor_class;
1526 struct ib_vendor_mad *vendor_mad;
1530 * Routing is based on version, class, and method
1531 * For "newer" vendor MADs, also based on OUI
1533 if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION)
1535 if (!is_vendor_class(mad->mad_hdr.mgmt_class)) {
1536 class = port_priv->version[
1537 mad->mad_hdr.class_version].class;
1540 method = class->method_table[convert_mgmt_class(
1541 mad->mad_hdr.mgmt_class)];
1543 mad_agent = method->agent[mad->mad_hdr.method &
1544 ~IB_MGMT_METHOD_RESP];
1546 vendor = port_priv->version[
1547 mad->mad_hdr.class_version].vendor;
1550 vendor_class = vendor->vendor_class[vendor_class_index(
1551 mad->mad_hdr.mgmt_class)];
1554 /* Find matching OUI */
1555 vendor_mad = (struct ib_vendor_mad *)mad;
1556 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1559 method = vendor_class->method_table[index];
1561 mad_agent = method->agent[mad->mad_hdr.method &
1562 ~IB_MGMT_METHOD_RESP];
1568 if (mad_agent->agent.recv_handler)
1569 atomic_inc(&mad_agent->refcount);
1571 printk(KERN_NOTICE PFX "No receive handler for client "
1573 &mad_agent->agent, port_priv->port_num);
1578 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1583 static int validate_mad(struct ib_mad *mad, u32 qp_num)
1587 /* Make sure MAD base version is understood */
1588 if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) {
1589 printk(KERN_ERR PFX "MAD received with unsupported base "
1590 "version %d\n", mad->mad_hdr.base_version);
1594 /* Filter SMI packets sent to other than QP0 */
1595 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1596 (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1600 /* Filter GSI packets sent to QP0 */
1609 static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
1610 struct ib_mad_hdr *mad_hdr)
1612 struct ib_rmpp_mad *rmpp_mad;
1614 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1615 return !mad_agent_priv->agent.rmpp_version ||
1616 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1617 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1618 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1621 static inline int rcv_has_same_class(struct ib_mad_send_wr_private *wr,
1622 struct ib_mad_recv_wc *rwc)
1624 return ((struct ib_mad *)(wr->send_buf.mad))->mad_hdr.mgmt_class ==
1625 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1628 static inline int rcv_has_same_gid(struct ib_mad_send_wr_private *wr,
1629 struct ib_mad_recv_wc *rwc )
1631 struct ib_ah_attr attr;
1632 u8 send_resp, rcv_resp;
1634 send_resp = ((struct ib_mad *)(wr->send_buf.mad))->
1635 mad_hdr.method & IB_MGMT_METHOD_RESP;
1636 rcv_resp = rwc->recv_buf.mad->mad_hdr.method & IB_MGMT_METHOD_RESP;
1638 if (!send_resp && rcv_resp)
1639 /* is request/response. GID/LIDs are both local (same). */
1642 if (send_resp == rcv_resp)
1643 /* both requests, or both responses. GIDs different */
1646 if (ib_query_ah(wr->send_buf.ah, &attr))
1647 /* Assume not equal, to avoid false positives. */
1650 if (!(attr.ah_flags & IB_AH_GRH) && !(rwc->wc->wc_flags & IB_WC_GRH))
1651 return attr.dlid == rwc->wc->slid;
1652 else if ((attr.ah_flags & IB_AH_GRH) &&
1653 (rwc->wc->wc_flags & IB_WC_GRH))
1654 return memcmp(attr.grh.dgid.raw,
1655 rwc->recv_buf.grh->sgid.raw, 16) == 0;
1657 /* one has GID, other does not. Assume different */
1660 struct ib_mad_send_wr_private*
1661 ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
1662 struct ib_mad_recv_wc *mad_recv_wc)
1664 struct ib_mad_send_wr_private *mad_send_wr;
1667 mad = (struct ib_mad *)mad_recv_wc->recv_buf.mad;
1669 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
1671 if ((mad_send_wr->tid == mad->mad_hdr.tid) &&
1672 rcv_has_same_class(mad_send_wr, mad_recv_wc) &&
1673 rcv_has_same_gid(mad_send_wr, mad_recv_wc))
1678 * It's possible to receive the response before we've
1679 * been notified that the send has completed
1681 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
1683 if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&
1684 mad_send_wr->tid == mad->mad_hdr.tid &&
1685 mad_send_wr->timeout &&
1686 rcv_has_same_class(mad_send_wr, mad_recv_wc) &&
1687 rcv_has_same_gid(mad_send_wr, mad_recv_wc)) {
1688 /* Verify request has not been canceled */
1689 return (mad_send_wr->status == IB_WC_SUCCESS) ?
1696 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1698 mad_send_wr->timeout = 0;
1699 if (mad_send_wr->refcount == 1) {
1700 list_del(&mad_send_wr->agent_list);
1701 list_add_tail(&mad_send_wr->agent_list,
1702 &mad_send_wr->mad_agent_priv->done_list);
1706 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1707 struct ib_mad_recv_wc *mad_recv_wc)
1709 struct ib_mad_send_wr_private *mad_send_wr;
1710 struct ib_mad_send_wc mad_send_wc;
1711 unsigned long flags;
1713 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1714 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1715 if (mad_agent_priv->agent.rmpp_version) {
1716 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1719 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1720 wake_up(&mad_agent_priv->wait);
1725 /* Complete corresponding request */
1726 if (response_mad(mad_recv_wc->recv_buf.mad)) {
1727 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1728 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1730 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1731 ib_free_recv_mad(mad_recv_wc);
1732 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1733 wake_up(&mad_agent_priv->wait);
1736 ib_mark_mad_done(mad_send_wr);
1737 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1739 /* Defined behavior is to complete response before request */
1740 mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf;
1741 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1743 atomic_dec(&mad_agent_priv->refcount);
1745 mad_send_wc.status = IB_WC_SUCCESS;
1746 mad_send_wc.vendor_err = 0;
1747 mad_send_wc.send_buf = &mad_send_wr->send_buf;
1748 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1750 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1752 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1753 wake_up(&mad_agent_priv->wait);
1757 static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1760 struct ib_mad_qp_info *qp_info;
1761 struct ib_mad_private_header *mad_priv_hdr;
1762 struct ib_mad_private *recv, *response;
1763 struct ib_mad_list_head *mad_list;
1764 struct ib_mad_agent_private *mad_agent;
1766 response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
1768 printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory "
1769 "for response buffer\n");
1771 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1772 qp_info = mad_list->mad_queue->qp_info;
1773 dequeue_mad(mad_list);
1775 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
1777 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1778 dma_unmap_single(port_priv->device->dma_device,
1779 pci_unmap_addr(&recv->header, mapping),
1780 sizeof(struct ib_mad_private) -
1781 sizeof(struct ib_mad_private_header),
1784 /* Setup MAD receive work completion from "normal" work completion */
1785 recv->header.wc = *wc;
1786 recv->header.recv_wc.wc = &recv->header.wc;
1787 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
1788 recv->header.recv_wc.recv_buf.mad = &recv->mad.mad;
1789 recv->header.recv_wc.recv_buf.grh = &recv->grh;
1791 if (atomic_read(&qp_info->snoop_count))
1792 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
1795 if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num))
1798 if (recv->mad.mad.mad_hdr.mgmt_class ==
1799 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1800 if (!smi_handle_dr_smp_recv(&recv->mad.smp,
1801 port_priv->device->node_type,
1802 port_priv->port_num,
1803 port_priv->device->phys_port_cnt))
1805 if (!smi_check_forward_dr_smp(&recv->mad.smp))
1807 if (!smi_handle_dr_smp_send(&recv->mad.smp,
1808 port_priv->device->node_type,
1809 port_priv->port_num))
1811 if (!smi_check_local_smp(&recv->mad.smp, port_priv->device))
1816 /* Give driver "right of first refusal" on incoming MAD */
1817 if (port_priv->device->process_mad) {
1821 printk(KERN_ERR PFX "No memory for response MAD\n");
1823 * Is it better to assume that
1824 * it wouldn't be processed ?
1829 ret = port_priv->device->process_mad(port_priv->device, 0,
1830 port_priv->port_num,
1833 &response->mad.mad);
1834 if (ret & IB_MAD_RESULT_SUCCESS) {
1835 if (ret & IB_MAD_RESULT_CONSUMED)
1837 if (ret & IB_MAD_RESULT_REPLY) {
1838 agent_send_response(&response->mad.mad,
1841 port_priv->port_num,
1842 qp_info->qp->qp_num);
1848 mad_agent = find_mad_agent(port_priv, &recv->mad.mad);
1850 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
1852 * recv is freed up in error cases in ib_mad_complete_recv
1853 * or via recv_handler in ib_mad_complete_recv()
1859 /* Post another receive request for this QP */
1861 ib_mad_post_receive_mads(qp_info, response);
1863 kmem_cache_free(ib_mad_cache, recv);
1865 ib_mad_post_receive_mads(qp_info, recv);
1868 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
1870 struct ib_mad_send_wr_private *mad_send_wr;
1871 unsigned long delay;
1873 if (list_empty(&mad_agent_priv->wait_list)) {
1874 cancel_delayed_work(&mad_agent_priv->timed_work);
1876 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
1877 struct ib_mad_send_wr_private,
1880 if (time_after(mad_agent_priv->timeout,
1881 mad_send_wr->timeout)) {
1882 mad_agent_priv->timeout = mad_send_wr->timeout;
1883 cancel_delayed_work(&mad_agent_priv->timed_work);
1884 delay = mad_send_wr->timeout - jiffies;
1885 if ((long)delay <= 0)
1887 queue_delayed_work(mad_agent_priv->qp_info->
1889 &mad_agent_priv->timed_work, delay);
1894 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
1896 struct ib_mad_agent_private *mad_agent_priv;
1897 struct ib_mad_send_wr_private *temp_mad_send_wr;
1898 struct list_head *list_item;
1899 unsigned long delay;
1901 mad_agent_priv = mad_send_wr->mad_agent_priv;
1902 list_del(&mad_send_wr->agent_list);
1904 delay = mad_send_wr->timeout;
1905 mad_send_wr->timeout += jiffies;
1908 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
1909 temp_mad_send_wr = list_entry(list_item,
1910 struct ib_mad_send_wr_private,
1912 if (time_after(mad_send_wr->timeout,
1913 temp_mad_send_wr->timeout))
1918 list_item = &mad_agent_priv->wait_list;
1919 list_add(&mad_send_wr->agent_list, list_item);
1921 /* Reschedule a work item if we have a shorter timeout */
1922 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {
1923 cancel_delayed_work(&mad_agent_priv->timed_work);
1924 queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
1925 &mad_agent_priv->timed_work, delay);
1929 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
1932 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
1933 wait_for_response(mad_send_wr);
1937 * Process a send work completion
1939 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
1940 struct ib_mad_send_wc *mad_send_wc)
1942 struct ib_mad_agent_private *mad_agent_priv;
1943 unsigned long flags;
1946 mad_agent_priv = mad_send_wr->mad_agent_priv;
1947 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1948 if (mad_agent_priv->agent.rmpp_version) {
1949 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
1950 if (ret == IB_RMPP_RESULT_CONSUMED)
1953 ret = IB_RMPP_RESULT_UNHANDLED;
1955 if (mad_send_wc->status != IB_WC_SUCCESS &&
1956 mad_send_wr->status == IB_WC_SUCCESS) {
1957 mad_send_wr->status = mad_send_wc->status;
1958 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
1961 if (--mad_send_wr->refcount > 0) {
1962 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
1963 mad_send_wr->status == IB_WC_SUCCESS) {
1964 wait_for_response(mad_send_wr);
1969 /* Remove send from MAD agent and notify client of completion */
1970 list_del(&mad_send_wr->agent_list);
1971 adjust_timeout(mad_agent_priv);
1972 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1974 if (mad_send_wr->status != IB_WC_SUCCESS )
1975 mad_send_wc->status = mad_send_wr->status;
1976 if (ret == IB_RMPP_RESULT_INTERNAL)
1977 ib_rmpp_send_handler(mad_send_wc);
1979 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
1982 /* Release reference on agent taken when sending */
1983 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1984 wake_up(&mad_agent_priv->wait);
1987 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1990 static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
1993 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
1994 struct ib_mad_list_head *mad_list;
1995 struct ib_mad_qp_info *qp_info;
1996 struct ib_mad_queue *send_queue;
1997 struct ib_send_wr *bad_send_wr;
1998 struct ib_mad_send_wc mad_send_wc;
1999 unsigned long flags;
2002 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2003 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2005 send_queue = mad_list->mad_queue;
2006 qp_info = send_queue->qp_info;
2009 dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device,
2010 pci_unmap_addr(mad_send_wr, header_mapping),
2011 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2012 dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device,
2013 pci_unmap_addr(mad_send_wr, payload_mapping),
2014 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2015 queued_send_wr = NULL;
2016 spin_lock_irqsave(&send_queue->lock, flags);
2017 list_del(&mad_list->list);
2019 /* Move queued send to the send queue */
2020 if (send_queue->count-- > send_queue->max_active) {
2021 mad_list = container_of(qp_info->overflow_list.next,
2022 struct ib_mad_list_head, list);
2023 queued_send_wr = container_of(mad_list,
2024 struct ib_mad_send_wr_private,
2026 list_del(&mad_list->list);
2027 list_add_tail(&mad_list->list, &send_queue->list);
2029 spin_unlock_irqrestore(&send_queue->lock, flags);
2031 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2032 mad_send_wc.status = wc->status;
2033 mad_send_wc.vendor_err = wc->vendor_err;
2034 if (atomic_read(&qp_info->snoop_count))
2035 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
2036 IB_MAD_SNOOP_SEND_COMPLETIONS);
2037 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2039 if (queued_send_wr) {
2040 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
2043 printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);
2044 mad_send_wr = queued_send_wr;
2045 wc->status = IB_WC_LOC_QP_OP_ERR;
2051 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2053 struct ib_mad_send_wr_private *mad_send_wr;
2054 struct ib_mad_list_head *mad_list;
2055 unsigned long flags;
2057 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2058 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2059 mad_send_wr = container_of(mad_list,
2060 struct ib_mad_send_wr_private,
2062 mad_send_wr->retry = 1;
2064 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2067 static void mad_error_handler(struct ib_mad_port_private *port_priv,
2070 struct ib_mad_list_head *mad_list;
2071 struct ib_mad_qp_info *qp_info;
2072 struct ib_mad_send_wr_private *mad_send_wr;
2075 /* Determine if failure was a send or receive */
2076 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2077 qp_info = mad_list->mad_queue->qp_info;
2078 if (mad_list->mad_queue == &qp_info->recv_queue)
2080 * Receive errors indicate that the QP has entered the error
2081 * state - error handling/shutdown code will cleanup
2086 * Send errors will transition the QP to SQE - move
2087 * QP to RTS and repost flushed work requests
2089 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2091 if (wc->status == IB_WC_WR_FLUSH_ERR) {
2092 if (mad_send_wr->retry) {
2094 struct ib_send_wr *bad_send_wr;
2096 mad_send_wr->retry = 0;
2097 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
2100 ib_mad_send_done_handler(port_priv, wc);
2102 ib_mad_send_done_handler(port_priv, wc);
2104 struct ib_qp_attr *attr;
2106 /* Transition QP to RTS and fail offending send */
2107 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2109 attr->qp_state = IB_QPS_RTS;
2110 attr->cur_qp_state = IB_QPS_SQE;
2111 ret = ib_modify_qp(qp_info->qp, attr,
2112 IB_QP_STATE | IB_QP_CUR_STATE);
2115 printk(KERN_ERR PFX "mad_error_handler - "
2116 "ib_modify_qp to RTS : %d\n", ret);
2118 mark_sends_for_retry(qp_info);
2120 ib_mad_send_done_handler(port_priv, wc);
2125 * IB MAD completion callback
2127 static void ib_mad_completion_handler(void *data)
2129 struct ib_mad_port_private *port_priv;
2132 port_priv = (struct ib_mad_port_private *)data;
2133 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2135 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
2136 if (wc.status == IB_WC_SUCCESS) {
2137 switch (wc.opcode) {
2139 ib_mad_send_done_handler(port_priv, &wc);
2142 ib_mad_recv_done_handler(port_priv, &wc);
2149 mad_error_handler(port_priv, &wc);
2153 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2155 unsigned long flags;
2156 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2157 struct ib_mad_send_wc mad_send_wc;
2158 struct list_head cancel_list;
2160 INIT_LIST_HEAD(&cancel_list);
2162 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2163 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2164 &mad_agent_priv->send_list, agent_list) {
2165 if (mad_send_wr->status == IB_WC_SUCCESS) {
2166 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2167 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2171 /* Empty wait list to prevent receives from finding a request */
2172 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2173 /* Empty local completion list as well */
2174 list_splice_init(&mad_agent_priv->local_list, &cancel_list);
2175 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2177 /* Report all cancelled requests */
2178 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2179 mad_send_wc.vendor_err = 0;
2181 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2182 &cancel_list, agent_list) {
2183 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2184 list_del(&mad_send_wr->agent_list);
2185 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2187 atomic_dec(&mad_agent_priv->refcount);
2191 static struct ib_mad_send_wr_private*
2192 find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2193 struct ib_mad_send_buf *send_buf)
2195 struct ib_mad_send_wr_private *mad_send_wr;
2197 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2199 if (&mad_send_wr->send_buf == send_buf)
2203 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2205 if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&
2206 &mad_send_wr->send_buf == send_buf)
2212 int ib_modify_mad(struct ib_mad_agent *mad_agent,
2213 struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2215 struct ib_mad_agent_private *mad_agent_priv;
2216 struct ib_mad_send_wr_private *mad_send_wr;
2217 unsigned long flags;
2220 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2222 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2223 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2224 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2225 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2229 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2231 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2232 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2235 mad_send_wr->send_buf.timeout_ms = timeout_ms;
2237 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2239 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2241 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2244 EXPORT_SYMBOL(ib_modify_mad);
2246 void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2247 struct ib_mad_send_buf *send_buf)
2249 ib_modify_mad(mad_agent, send_buf, 0);
2251 EXPORT_SYMBOL(ib_cancel_mad);
2253 static void local_completions(void *data)
2255 struct ib_mad_agent_private *mad_agent_priv;
2256 struct ib_mad_local_private *local;
2257 struct ib_mad_agent_private *recv_mad_agent;
2258 unsigned long flags;
2261 struct ib_mad_send_wc mad_send_wc;
2263 mad_agent_priv = (struct ib_mad_agent_private *)data;
2265 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2266 while (!list_empty(&mad_agent_priv->local_list)) {
2267 local = list_entry(mad_agent_priv->local_list.next,
2268 struct ib_mad_local_private,
2270 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2271 if (local->mad_priv) {
2272 recv_mad_agent = local->recv_mad_agent;
2273 if (!recv_mad_agent) {
2274 printk(KERN_ERR PFX "No receive MAD agent for local completion\n");
2275 goto local_send_completion;
2280 * Defined behavior is to complete response
2283 build_smp_wc((unsigned long) local->mad_send_wr,
2284 be16_to_cpu(IB_LID_PERMISSIVE),
2285 0, recv_mad_agent->agent.port_num, &wc);
2287 local->mad_priv->header.recv_wc.wc = &wc;
2288 local->mad_priv->header.recv_wc.mad_len =
2289 sizeof(struct ib_mad);
2290 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2291 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2292 &local->mad_priv->header.recv_wc.rmpp_list);
2293 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2294 local->mad_priv->header.recv_wc.recv_buf.mad =
2295 &local->mad_priv->mad.mad;
2296 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2297 snoop_recv(recv_mad_agent->qp_info,
2298 &local->mad_priv->header.recv_wc,
2299 IB_MAD_SNOOP_RECVS);
2300 recv_mad_agent->agent.recv_handler(
2301 &recv_mad_agent->agent,
2302 &local->mad_priv->header.recv_wc);
2303 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2304 atomic_dec(&recv_mad_agent->refcount);
2305 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2308 local_send_completion:
2310 mad_send_wc.status = IB_WC_SUCCESS;
2311 mad_send_wc.vendor_err = 0;
2312 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2313 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2314 snoop_send(mad_agent_priv->qp_info,
2315 &local->mad_send_wr->send_buf,
2316 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
2317 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2320 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2321 list_del(&local->completion_list);
2322 atomic_dec(&mad_agent_priv->refcount);
2324 kmem_cache_free(ib_mad_cache, local->mad_priv);
2327 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2330 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2334 if (!mad_send_wr->retries--)
2337 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2339 if (mad_send_wr->mad_agent_priv->agent.rmpp_version) {
2340 ret = ib_retry_rmpp(mad_send_wr);
2342 case IB_RMPP_RESULT_UNHANDLED:
2343 ret = ib_send_mad(mad_send_wr);
2345 case IB_RMPP_RESULT_CONSUMED:
2353 ret = ib_send_mad(mad_send_wr);
2356 mad_send_wr->refcount++;
2357 list_add_tail(&mad_send_wr->agent_list,
2358 &mad_send_wr->mad_agent_priv->send_list);
2363 static void timeout_sends(void *data)
2365 struct ib_mad_agent_private *mad_agent_priv;
2366 struct ib_mad_send_wr_private *mad_send_wr;
2367 struct ib_mad_send_wc mad_send_wc;
2368 unsigned long flags, delay;
2370 mad_agent_priv = (struct ib_mad_agent_private *)data;
2371 mad_send_wc.vendor_err = 0;
2373 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2374 while (!list_empty(&mad_agent_priv->wait_list)) {
2375 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2376 struct ib_mad_send_wr_private,
2379 if (time_after(mad_send_wr->timeout, jiffies)) {
2380 delay = mad_send_wr->timeout - jiffies;
2381 if ((long)delay <= 0)
2383 queue_delayed_work(mad_agent_priv->qp_info->
2385 &mad_agent_priv->timed_work, delay);
2389 list_del(&mad_send_wr->agent_list);
2390 if (mad_send_wr->status == IB_WC_SUCCESS &&
2391 !retry_send(mad_send_wr))
2394 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2396 if (mad_send_wr->status == IB_WC_SUCCESS)
2397 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2399 mad_send_wc.status = mad_send_wr->status;
2400 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2401 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2404 atomic_dec(&mad_agent_priv->refcount);
2405 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2407 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2410 static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg)
2412 struct ib_mad_port_private *port_priv = cq->cq_context;
2413 unsigned long flags;
2415 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2416 if (!list_empty(&port_priv->port_list))
2417 queue_work(port_priv->wq, &port_priv->work);
2418 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2422 * Allocate receive MADs and post receive WRs for them
2424 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2425 struct ib_mad_private *mad)
2427 unsigned long flags;
2429 struct ib_mad_private *mad_priv;
2430 struct ib_sge sg_list;
2431 struct ib_recv_wr recv_wr, *bad_recv_wr;
2432 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2434 /* Initialize common scatter list fields */
2435 sg_list.length = sizeof *mad_priv - sizeof mad_priv->header;
2436 sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2438 /* Initialize common receive WR fields */
2439 recv_wr.next = NULL;
2440 recv_wr.sg_list = &sg_list;
2441 recv_wr.num_sge = 1;
2444 /* Allocate and map receive buffer */
2449 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
2451 printk(KERN_ERR PFX "No memory for receive buffer\n");
2456 sg_list.addr = dma_map_single(qp_info->port_priv->
2460 sizeof mad_priv->header,
2462 pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr);
2463 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2464 mad_priv->header.mad_list.mad_queue = recv_queue;
2466 /* Post receive WR */
2467 spin_lock_irqsave(&recv_queue->lock, flags);
2468 post = (++recv_queue->count < recv_queue->max_active);
2469 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2470 spin_unlock_irqrestore(&recv_queue->lock, flags);
2471 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2473 spin_lock_irqsave(&recv_queue->lock, flags);
2474 list_del(&mad_priv->header.mad_list.list);
2475 recv_queue->count--;
2476 spin_unlock_irqrestore(&recv_queue->lock, flags);
2477 dma_unmap_single(qp_info->port_priv->device->dma_device,
2478 pci_unmap_addr(&mad_priv->header,
2481 sizeof mad_priv->header,
2483 kmem_cache_free(ib_mad_cache, mad_priv);
2484 printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
2493 * Return all the posted receive MADs
2495 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2497 struct ib_mad_private_header *mad_priv_hdr;
2498 struct ib_mad_private *recv;
2499 struct ib_mad_list_head *mad_list;
2501 while (!list_empty(&qp_info->recv_queue.list)) {
2503 mad_list = list_entry(qp_info->recv_queue.list.next,
2504 struct ib_mad_list_head, list);
2505 mad_priv_hdr = container_of(mad_list,
2506 struct ib_mad_private_header,
2508 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2511 /* Remove from posted receive MAD list */
2512 list_del(&mad_list->list);
2514 dma_unmap_single(qp_info->port_priv->device->dma_device,
2515 pci_unmap_addr(&recv->header, mapping),
2516 sizeof(struct ib_mad_private) -
2517 sizeof(struct ib_mad_private_header),
2519 kmem_cache_free(ib_mad_cache, recv);
2522 qp_info->recv_queue.count = 0;
2528 static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2531 struct ib_qp_attr *attr;
2534 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2536 printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n");
2540 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2541 qp = port_priv->qp_info[i].qp;
2543 * PKey index for QP1 is irrelevant but
2544 * one is needed for the Reset to Init transition
2546 attr->qp_state = IB_QPS_INIT;
2547 attr->pkey_index = 0;
2548 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2549 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2550 IB_QP_PKEY_INDEX | IB_QP_QKEY);
2552 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2553 "INIT: %d\n", i, ret);
2557 attr->qp_state = IB_QPS_RTR;
2558 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2560 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2561 "RTR: %d\n", i, ret);
2565 attr->qp_state = IB_QPS_RTS;
2566 attr->sq_psn = IB_MAD_SEND_Q_PSN;
2567 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2569 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2570 "RTS: %d\n", i, ret);
2575 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2577 printk(KERN_ERR PFX "Failed to request completion "
2578 "notification: %d\n", ret);
2582 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2583 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2585 printk(KERN_ERR PFX "Couldn't post receive WRs\n");
2594 static void qp_event_handler(struct ib_event *event, void *qp_context)
2596 struct ib_mad_qp_info *qp_info = qp_context;
2598 /* It's worse than that! He's dead, Jim! */
2599 printk(KERN_ERR PFX "Fatal error (%d) on MAD QP (%d)\n",
2600 event->event, qp_info->qp->qp_num);
2603 static void init_mad_queue(struct ib_mad_qp_info *qp_info,
2604 struct ib_mad_queue *mad_queue)
2606 mad_queue->qp_info = qp_info;
2607 mad_queue->count = 0;
2608 spin_lock_init(&mad_queue->lock);
2609 INIT_LIST_HEAD(&mad_queue->list);
2612 static void init_mad_qp(struct ib_mad_port_private *port_priv,
2613 struct ib_mad_qp_info *qp_info)
2615 qp_info->port_priv = port_priv;
2616 init_mad_queue(qp_info, &qp_info->send_queue);
2617 init_mad_queue(qp_info, &qp_info->recv_queue);
2618 INIT_LIST_HEAD(&qp_info->overflow_list);
2619 spin_lock_init(&qp_info->snoop_lock);
2620 qp_info->snoop_table = NULL;
2621 qp_info->snoop_table_size = 0;
2622 atomic_set(&qp_info->snoop_count, 0);
2625 static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2626 enum ib_qp_type qp_type)
2628 struct ib_qp_init_attr qp_init_attr;
2631 memset(&qp_init_attr, 0, sizeof qp_init_attr);
2632 qp_init_attr.send_cq = qp_info->port_priv->cq;
2633 qp_init_attr.recv_cq = qp_info->port_priv->cq;
2634 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
2635 qp_init_attr.cap.max_send_wr = IB_MAD_QP_SEND_SIZE;
2636 qp_init_attr.cap.max_recv_wr = IB_MAD_QP_RECV_SIZE;
2637 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2638 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2639 qp_init_attr.qp_type = qp_type;
2640 qp_init_attr.port_num = qp_info->port_priv->port_num;
2641 qp_init_attr.qp_context = qp_info;
2642 qp_init_attr.event_handler = qp_event_handler;
2643 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2644 if (IS_ERR(qp_info->qp)) {
2645 printk(KERN_ERR PFX "Couldn't create ib_mad QP%d\n",
2646 get_spl_qp_index(qp_type));
2647 ret = PTR_ERR(qp_info->qp);
2650 /* Use minimum queue sizes unless the CQ is resized */
2651 qp_info->send_queue.max_active = IB_MAD_QP_SEND_SIZE;
2652 qp_info->recv_queue.max_active = IB_MAD_QP_RECV_SIZE;
2659 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2661 ib_destroy_qp(qp_info->qp);
2662 kfree(qp_info->snoop_table);
2667 * Create the QP, PD, MR, and CQ if needed
2669 static int ib_mad_port_open(struct ib_device *device,
2673 struct ib_mad_port_private *port_priv;
2674 unsigned long flags;
2675 char name[sizeof "ib_mad123"];
2677 /* Create new device info */
2678 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
2680 printk(KERN_ERR PFX "No memory for ib_mad_port_private\n");
2684 port_priv->device = device;
2685 port_priv->port_num = port_num;
2686 spin_lock_init(&port_priv->reg_lock);
2687 INIT_LIST_HEAD(&port_priv->agent_list);
2688 init_mad_qp(port_priv, &port_priv->qp_info[0]);
2689 init_mad_qp(port_priv, &port_priv->qp_info[1]);
2691 cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2;
2692 port_priv->cq = ib_create_cq(port_priv->device,
2693 ib_mad_thread_completion_handler,
2694 NULL, port_priv, cq_size);
2695 if (IS_ERR(port_priv->cq)) {
2696 printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n");
2697 ret = PTR_ERR(port_priv->cq);
2701 port_priv->pd = ib_alloc_pd(device);
2702 if (IS_ERR(port_priv->pd)) {
2703 printk(KERN_ERR PFX "Couldn't create ib_mad PD\n");
2704 ret = PTR_ERR(port_priv->pd);
2708 port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
2709 if (IS_ERR(port_priv->mr)) {
2710 printk(KERN_ERR PFX "Couldn't get ib_mad DMA MR\n");
2711 ret = PTR_ERR(port_priv->mr);
2715 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2718 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2722 snprintf(name, sizeof name, "ib_mad%d", port_num);
2723 port_priv->wq = create_singlethread_workqueue(name);
2724 if (!port_priv->wq) {
2728 INIT_WORK(&port_priv->work, ib_mad_completion_handler, port_priv);
2730 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2731 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
2732 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2734 ret = ib_mad_port_start(port_priv);
2736 printk(KERN_ERR PFX "Couldn't start port\n");
2743 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2744 list_del_init(&port_priv->port_list);
2745 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2747 destroy_workqueue(port_priv->wq);
2749 destroy_mad_qp(&port_priv->qp_info[1]);
2751 destroy_mad_qp(&port_priv->qp_info[0]);
2753 ib_dereg_mr(port_priv->mr);
2755 ib_dealloc_pd(port_priv->pd);
2757 ib_destroy_cq(port_priv->cq);
2758 cleanup_recv_queue(&port_priv->qp_info[1]);
2759 cleanup_recv_queue(&port_priv->qp_info[0]);
2768 * If there are no classes using the port, free the port
2769 * resources (CQ, MR, PD, QP) and remove the port's info structure
2771 static int ib_mad_port_close(struct ib_device *device, int port_num)
2773 struct ib_mad_port_private *port_priv;
2774 unsigned long flags;
2776 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2777 port_priv = __ib_get_mad_port(device, port_num);
2778 if (port_priv == NULL) {
2779 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2780 printk(KERN_ERR PFX "Port %d not found\n", port_num);
2783 list_del_init(&port_priv->port_list);
2784 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2786 destroy_workqueue(port_priv->wq);
2787 destroy_mad_qp(&port_priv->qp_info[1]);
2788 destroy_mad_qp(&port_priv->qp_info[0]);
2789 ib_dereg_mr(port_priv->mr);
2790 ib_dealloc_pd(port_priv->pd);
2791 ib_destroy_cq(port_priv->cq);
2792 cleanup_recv_queue(&port_priv->qp_info[1]);
2793 cleanup_recv_queue(&port_priv->qp_info[0]);
2794 /* XXX: Handle deallocation of MAD registration tables */
2801 static void ib_mad_init_device(struct ib_device *device)
2805 if (device->node_type == IB_NODE_SWITCH) {
2810 end = device->phys_port_cnt;
2813 for (i = start; i <= end; i++) {
2814 if (ib_mad_port_open(device, i)) {
2815 printk(KERN_ERR PFX "Couldn't open %s port %d\n",
2819 if (ib_agent_port_open(device, i)) {
2820 printk(KERN_ERR PFX "Couldn't open %s port %d "
2829 if (ib_mad_port_close(device, i))
2830 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2836 while (i >= start) {
2837 if (ib_agent_port_close(device, i))
2838 printk(KERN_ERR PFX "Couldn't close %s port %d "
2841 if (ib_mad_port_close(device, i))
2842 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2848 static void ib_mad_remove_device(struct ib_device *device)
2850 int i, num_ports, cur_port;
2852 if (device->node_type == IB_NODE_SWITCH) {
2856 num_ports = device->phys_port_cnt;
2859 for (i = 0; i < num_ports; i++, cur_port++) {
2860 if (ib_agent_port_close(device, cur_port))
2861 printk(KERN_ERR PFX "Couldn't close %s port %d "
2863 device->name, cur_port);
2864 if (ib_mad_port_close(device, cur_port))
2865 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2866 device->name, cur_port);
2870 static struct ib_client mad_client = {
2872 .add = ib_mad_init_device,
2873 .remove = ib_mad_remove_device
2876 static int __init ib_mad_init_module(void)
2880 spin_lock_init(&ib_mad_port_list_lock);
2882 ib_mad_cache = kmem_cache_create("ib_mad",
2883 sizeof(struct ib_mad_private),
2888 if (!ib_mad_cache) {
2889 printk(KERN_ERR PFX "Couldn't create ib_mad cache\n");
2894 INIT_LIST_HEAD(&ib_mad_port_list);
2896 if (ib_register_client(&mad_client)) {
2897 printk(KERN_ERR PFX "Couldn't register ib_mad client\n");
2905 kmem_cache_destroy(ib_mad_cache);
2910 static void __exit ib_mad_cleanup_module(void)
2912 ib_unregister_client(&mad_client);
2914 if (kmem_cache_destroy(ib_mad_cache)) {
2915 printk(KERN_DEBUG PFX "Failed to destroy ib_mad cache\n");
2919 module_init(ib_mad_init_module);
2920 module_exit(ib_mad_cleanup_module);