4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * Implements upper edge functions for Bridge message module.
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
18 #include <linux/types.h>
20 /* ----------------------------------- DSP/BIOS Bridge */
21 #include <dspbridge/dbdefs.h>
23 /* ----------------------------------- OS Adaptation Layer */
24 #include <dspbridge/sync.h>
26 /* ----------------------------------- Platform Manager */
27 #include <dspbridge/dev.h>
29 /* ----------------------------------- Others */
30 #include <dspbridge/io_sm.h>
32 /* ----------------------------------- This */
34 #include <dspbridge/dspmsg.h>
36 /* ----------------------------------- Function Prototypes */
37 static int add_new_msg(struct list_head *msg_list);
38 static void delete_msg_mgr(struct msg_mgr *hmsg_mgr);
39 static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 num_to_dsp);
40 static void free_msg_list(struct list_head *msg_list);
43 * ======== bridge_msg_create ========
44 * Create an object to manage message queues. Only one of these objects
45 * can exist per device object.
47 int bridge_msg_create(struct msg_mgr **msg_man,
48 struct dev_object *hdev_obj,
49 msg_onexit msg_callback)
51 struct msg_mgr *msg_mgr_obj;
52 struct io_mgr *hio_mgr;
55 if (!msg_man || !msg_callback || !hdev_obj)
58 dev_get_io_mgr(hdev_obj, &hio_mgr);
63 /* Allocate msg_ctrl manager object */
64 msg_mgr_obj = kzalloc(sizeof(struct msg_mgr), GFP_KERNEL);
68 msg_mgr_obj->on_exit = msg_callback;
69 msg_mgr_obj->iomgr = hio_mgr;
70 /* List of MSG_QUEUEs */
71 INIT_LIST_HEAD(&msg_mgr_obj->queue_list);
73 * Queues of message frames for messages to the DSP. Message
74 * frames will only be added to the free queue when a
75 * msg_queue object is created.
77 INIT_LIST_HEAD(&msg_mgr_obj->msg_free_list);
78 INIT_LIST_HEAD(&msg_mgr_obj->msg_used_list);
79 spin_lock_init(&msg_mgr_obj->msg_mgr_lock);
82 * Create an event to be used by bridge_msg_put() in waiting
83 * for an available free frame from the message manager.
85 msg_mgr_obj->sync_event =
86 kzalloc(sizeof(struct sync_object), GFP_KERNEL);
87 if (!msg_mgr_obj->sync_event) {
91 sync_init_event(msg_mgr_obj->sync_event);
93 *msg_man = msg_mgr_obj;
99 * ======== bridge_msg_create_queue ========
100 * Create a msg_queue for sending/receiving messages to/from a node
103 int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr, struct msg_queue **msgq,
104 u32 msgq_id, u32 max_msgs, void *arg)
107 u32 num_allocated = 0;
108 struct msg_queue *msg_q;
111 if (!hmsg_mgr || msgq == NULL)
115 /* Allocate msg_queue object */
116 msg_q = kzalloc(sizeof(struct msg_queue), GFP_KERNEL);
120 msg_q->max_msgs = max_msgs;
121 msg_q->msg_mgr = hmsg_mgr;
122 msg_q->arg = arg; /* Node handle */
123 msg_q->msgq_id = msgq_id; /* Node env (not valid yet) */
124 /* Queues of Message frames for messages from the DSP */
125 INIT_LIST_HEAD(&msg_q->msg_free_list);
126 INIT_LIST_HEAD(&msg_q->msg_used_list);
128 /* Create event that will be signalled when a message from
129 * the DSP is available. */
130 msg_q->sync_event = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
131 if (!msg_q->sync_event) {
136 sync_init_event(msg_q->sync_event);
138 /* Create a notification list for message ready notification. */
139 msg_q->ntfy_obj = kmalloc(sizeof(struct ntfy_object), GFP_KERNEL);
140 if (!msg_q->ntfy_obj) {
144 ntfy_init(msg_q->ntfy_obj);
146 /* Create events that will be used to synchronize cleanup
147 * when the object is deleted. sync_done will be set to
148 * unblock threads in MSG_Put() or MSG_Get(). sync_done_ack
149 * will be set by the unblocked thread to signal that it
150 * is unblocked and will no longer reference the object. */
151 msg_q->sync_done = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
152 if (!msg_q->sync_done) {
156 sync_init_event(msg_q->sync_done);
158 msg_q->sync_done_ack = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
159 if (!msg_q->sync_done_ack) {
163 sync_init_event(msg_q->sync_done_ack);
165 /* Enter critical section */
166 spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
167 /* Initialize message frames and put in appropriate queues */
168 for (i = 0; i < max_msgs && !status; i++) {
169 status = add_new_msg(&hmsg_mgr->msg_free_list);
172 status = add_new_msg(&msg_q->msg_free_list);
176 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
180 list_add_tail(&msg_q->list_elem, &hmsg_mgr->queue_list);
182 /* Signal that free frames are now available */
183 if (!list_empty(&hmsg_mgr->msg_free_list))
184 sync_set_event(hmsg_mgr->sync_event);
186 /* Exit critical section */
187 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
191 delete_msg_queue(msg_q, num_allocated);
196 * ======== bridge_msg_delete ========
197 * Delete a msg_ctrl manager allocated in bridge_msg_create().
199 void bridge_msg_delete(struct msg_mgr *hmsg_mgr)
202 delete_msg_mgr(hmsg_mgr);
206 * ======== bridge_msg_delete_queue ========
207 * Delete a msg_ctrl queue allocated in bridge_msg_create_queue.
209 void bridge_msg_delete_queue(struct msg_queue *msg_queue_obj)
211 struct msg_mgr *hmsg_mgr;
214 if (!msg_queue_obj || !msg_queue_obj->msg_mgr)
217 hmsg_mgr = msg_queue_obj->msg_mgr;
218 msg_queue_obj->done = true;
219 /* Unblock all threads blocked in MSG_Get() or MSG_Put(). */
220 io_msg_pend = msg_queue_obj->io_msg_pend;
221 while (io_msg_pend) {
223 sync_set_event(msg_queue_obj->sync_done);
224 /* Wait for acknowledgement */
225 sync_wait_on_event(msg_queue_obj->sync_done_ack, SYNC_INFINITE);
226 io_msg_pend = msg_queue_obj->io_msg_pend;
228 /* Remove message queue from hmsg_mgr->queue_list */
229 spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
230 list_del(&msg_queue_obj->list_elem);
231 /* Free the message queue object */
232 delete_msg_queue(msg_queue_obj, msg_queue_obj->max_msgs);
233 if (list_empty(&hmsg_mgr->msg_free_list))
234 sync_reset_event(hmsg_mgr->sync_event);
235 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
239 * ======== bridge_msg_get ========
240 * Get a message from a msg_ctrl queue.
242 int bridge_msg_get(struct msg_queue *msg_queue_obj,
243 struct dsp_msg *pmsg, u32 utimeout)
245 struct msg_frame *msg_frame_obj;
246 struct msg_mgr *hmsg_mgr;
247 struct sync_object *syncs[2];
251 if (!msg_queue_obj || pmsg == NULL)
254 hmsg_mgr = msg_queue_obj->msg_mgr;
256 spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
257 /* If a message is already there, get it */
258 if (!list_empty(&msg_queue_obj->msg_used_list)) {
259 msg_frame_obj = list_first_entry(&msg_queue_obj->msg_used_list,
260 struct msg_frame, list_elem);
261 list_del(&msg_frame_obj->list_elem);
262 *pmsg = msg_frame_obj->msg_data.msg;
263 list_add_tail(&msg_frame_obj->list_elem,
264 &msg_queue_obj->msg_free_list);
265 if (list_empty(&msg_queue_obj->msg_used_list))
266 sync_reset_event(msg_queue_obj->sync_event);
267 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
271 if (msg_queue_obj->done) {
272 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
275 msg_queue_obj->io_msg_pend++;
276 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
279 * Wait til message is available, timeout, or done. We don't
280 * have to schedule the DPC, since the DSP will send messages
281 * when they are available.
283 syncs[0] = msg_queue_obj->sync_event;
284 syncs[1] = msg_queue_obj->sync_done;
285 status = sync_wait_on_multiple_events(syncs, 2, utimeout, &index);
287 spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
288 if (msg_queue_obj->done) {
289 msg_queue_obj->io_msg_pend--;
290 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
292 * Signal that we're not going to access msg_queue_obj
293 * anymore, so it can be deleted.
295 sync_set_event(msg_queue_obj->sync_done_ack);
298 if (!status && !list_empty(&msg_queue_obj->msg_used_list)) {
299 /* Get msg from used list */
300 msg_frame_obj = list_first_entry(&msg_queue_obj->msg_used_list,
301 struct msg_frame, list_elem);
302 list_del(&msg_frame_obj->list_elem);
303 /* Copy message into pmsg and put frame on the free list */
304 *pmsg = msg_frame_obj->msg_data.msg;
305 list_add_tail(&msg_frame_obj->list_elem,
306 &msg_queue_obj->msg_free_list);
308 msg_queue_obj->io_msg_pend--;
309 /* Reset the event if there are still queued messages */
310 if (!list_empty(&msg_queue_obj->msg_used_list))
311 sync_set_event(msg_queue_obj->sync_event);
313 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
319 * ======== bridge_msg_put ========
320 * Put a message onto a msg_ctrl queue.
322 int bridge_msg_put(struct msg_queue *msg_queue_obj,
323 const struct dsp_msg *pmsg, u32 utimeout)
325 struct msg_frame *msg_frame_obj;
326 struct msg_mgr *hmsg_mgr;
327 struct sync_object *syncs[2];
331 if (!msg_queue_obj || !pmsg || !msg_queue_obj->msg_mgr)
334 hmsg_mgr = msg_queue_obj->msg_mgr;
336 spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
338 /* If a message frame is available, use it */
339 if (!list_empty(&hmsg_mgr->msg_free_list)) {
340 msg_frame_obj = list_first_entry(&hmsg_mgr->msg_free_list,
341 struct msg_frame, list_elem);
342 list_del(&msg_frame_obj->list_elem);
343 msg_frame_obj->msg_data.msg = *pmsg;
344 msg_frame_obj->msg_data.msgq_id =
345 msg_queue_obj->msgq_id;
346 list_add_tail(&msg_frame_obj->list_elem,
347 &hmsg_mgr->msg_used_list);
348 hmsg_mgr->msgs_pending++;
350 if (list_empty(&hmsg_mgr->msg_free_list))
351 sync_reset_event(hmsg_mgr->sync_event);
353 /* Release critical section before scheduling DPC */
354 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
355 /* Schedule a DPC, to do the actual data transfer: */
356 iosm_schedule(hmsg_mgr->iomgr);
360 if (msg_queue_obj->done) {
361 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
364 msg_queue_obj->io_msg_pend++;
366 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
368 /* Wait til a free message frame is available, timeout, or done */
369 syncs[0] = hmsg_mgr->sync_event;
370 syncs[1] = msg_queue_obj->sync_done;
371 status = sync_wait_on_multiple_events(syncs, 2, utimeout, &index);
375 /* Enter critical section */
376 spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
377 if (msg_queue_obj->done) {
378 msg_queue_obj->io_msg_pend--;
379 /* Exit critical section */
380 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
382 * Signal that we're not going to access msg_queue_obj
383 * anymore, so it can be deleted.
385 sync_set_event(msg_queue_obj->sync_done_ack);
389 if (list_empty(&hmsg_mgr->msg_free_list)) {
390 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
394 /* Get msg from free list */
395 msg_frame_obj = list_first_entry(&hmsg_mgr->msg_free_list,
396 struct msg_frame, list_elem);
398 * Copy message into pmsg and put frame on the
401 list_del(&msg_frame_obj->list_elem);
402 msg_frame_obj->msg_data.msg = *pmsg;
403 msg_frame_obj->msg_data.msgq_id = msg_queue_obj->msgq_id;
404 list_add_tail(&msg_frame_obj->list_elem, &hmsg_mgr->msg_used_list);
405 hmsg_mgr->msgs_pending++;
407 * Schedule a DPC, to do the actual
410 iosm_schedule(hmsg_mgr->iomgr);
412 msg_queue_obj->io_msg_pend--;
413 /* Reset event if there are still frames available */
414 if (!list_empty(&hmsg_mgr->msg_free_list))
415 sync_set_event(hmsg_mgr->sync_event);
417 /* Exit critical section */
418 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
424 * ======== bridge_msg_register_notify ========
426 int bridge_msg_register_notify(struct msg_queue *msg_queue_obj,
427 u32 event_mask, u32 notify_type,
428 struct dsp_notification *hnotification)
432 if (!msg_queue_obj || !hnotification) {
437 if (!(event_mask == DSP_NODEMESSAGEREADY || event_mask == 0)) {
442 if (notify_type != DSP_SIGNALEVENT) {
448 status = ntfy_register(msg_queue_obj->ntfy_obj, hnotification,
449 event_mask, notify_type);
451 status = ntfy_unregister(msg_queue_obj->ntfy_obj,
454 if (status == -EINVAL) {
455 /* Not registered. Ok, since we couldn't have known. Node
456 * notifications are split between node state change handled
457 * by NODE, and message ready handled by msg_ctrl. */
465 * ======== bridge_msg_set_queue_id ========
467 void bridge_msg_set_queue_id(struct msg_queue *msg_queue_obj, u32 msgq_id)
470 * A message queue must be created when a node is allocated,
471 * so that node_register_notify() can be called before the node
472 * is created. Since we don't know the node environment until the
473 * node is created, we need this function to set msg_queue_obj->msgq_id
474 * to the node environment, after the node is created.
477 msg_queue_obj->msgq_id = msgq_id;
481 * ======== add_new_msg ========
482 * Must be called in message manager critical section.
484 static int add_new_msg(struct list_head *msg_list)
486 struct msg_frame *pmsg;
488 pmsg = kzalloc(sizeof(struct msg_frame), GFP_ATOMIC);
492 list_add_tail(&pmsg->list_elem, msg_list);
498 * ======== delete_msg_mgr ========
500 static void delete_msg_mgr(struct msg_mgr *hmsg_mgr)
505 /* FIXME: free elements from queue_list? */
506 free_msg_list(&hmsg_mgr->msg_free_list);
507 free_msg_list(&hmsg_mgr->msg_used_list);
508 kfree(hmsg_mgr->sync_event);
513 * ======== delete_msg_queue ========
515 static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 num_to_dsp)
517 struct msg_mgr *hmsg_mgr;
518 struct msg_frame *pmsg, *tmp;
521 if (!msg_queue_obj || !msg_queue_obj->msg_mgr)
524 hmsg_mgr = msg_queue_obj->msg_mgr;
526 /* Pull off num_to_dsp message frames from Msg manager and free */
528 list_for_each_entry_safe(pmsg, tmp, &hmsg_mgr->msg_free_list,
530 list_del(&pmsg->list_elem);
532 if (i++ >= num_to_dsp)
536 free_msg_list(&msg_queue_obj->msg_free_list);
537 free_msg_list(&msg_queue_obj->msg_used_list);
539 if (msg_queue_obj->ntfy_obj) {
540 ntfy_delete(msg_queue_obj->ntfy_obj);
541 kfree(msg_queue_obj->ntfy_obj);
544 kfree(msg_queue_obj->sync_event);
545 kfree(msg_queue_obj->sync_done);
546 kfree(msg_queue_obj->sync_done_ack);
548 kfree(msg_queue_obj);
552 * ======== free_msg_list ========
554 static void free_msg_list(struct list_head *msg_list)
556 struct msg_frame *pmsg, *tmp;
561 list_for_each_entry_safe(pmsg, tmp, msg_list, list_elem) {
562 list_del(&pmsg->list_elem);