4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * Implements upper edge functions for Bridge driver channel module.
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
20 * The lower edge functions must be implemented by the Bridge driver
21 * writer, and are declared in chnl_sm.h.
23 * Care is taken in this code to prevent simultaneous access to channel
26 * 2. io_dpc(), scheduled from the io_isr() as an event.
28 * This is done primarily by:
30 * - state flags in the channel object; and
31 * - ensuring the IO_Dispatch() routine, which is called from both
32 * CHNL_AddIOReq() and the DPC(if implemented), is not re-entered.
35 * There is an important invariant condition which must be maintained per
36 * channel outside of bridge_chnl_get_ioc() and IO_Dispatch(), violation of
37 * which may cause timeouts and/or failure of function sync_wait_on_event.
38 * This invariant condition is:
40 * list_empty(&pchnl->io_completions) ==> pchnl->sync_event is reset
42 * !list_empty(&pchnl->io_completions) ==> pchnl->sync_event is set.
45 #include <linux/types.h>
47 /* ----------------------------------- OS */
48 #include <dspbridge/host_os.h>
50 /* ----------------------------------- DSP/BIOS Bridge */
51 #include <dspbridge/dbdefs.h>
53 /* ----------------------------------- OS Adaptation Layer */
54 #include <dspbridge/sync.h>
56 /* ----------------------------------- Bridge Driver */
57 #include <dspbridge/dspdefs.h>
58 #include <dspbridge/dspchnl.h>
61 /* ----------------------------------- Platform Manager */
62 #include <dspbridge/dev.h>
64 /* ----------------------------------- Others */
65 #include <dspbridge/io_sm.h>
67 /* ----------------------------------- Define for This */
68 #define USERMODE_ADDR PAGE_OFFSET
70 #define MAILBOX_IRQ INT_MAIL_MPU_IRQ
72 /* ----------------------------------- Function Prototypes */
73 static int create_chirp_list(struct list_head *list, u32 chirps);
75 static void free_chirp_list(struct list_head *list);
77 static int search_free_channel(struct chnl_mgr *chnl_mgr_obj,
81 * ======== bridge_chnl_add_io_req ========
82 * Enqueue an I/O request for data transfer on a channel to the DSP.
83 * The direction (mode) is specified in the channel object. Note the DSP
84 * address is specified for channels opened in direct I/O mode.
86 int bridge_chnl_add_io_req(struct chnl_object *chnl_obj, void *host_buf,
87 u32 byte_size, u32 buf_size,
88 u32 dw_dsp_addr, u32 dw_arg)
91 struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
92 struct chnl_irp *chnl_packet_obj = NULL;
93 struct bridge_dev_context *dev_ctxt;
94 struct dev_object *dev_obj;
97 struct chnl_mgr *chnl_mgr_obj;
98 u8 *host_sys_buf = NULL;
99 bool sched_dpc = false;
102 is_eos = (byte_size == 0);
105 if (!host_buf || !pchnl)
108 if (is_eos && CHNL_IS_INPUT(pchnl->chnl_mode))
112 * Check the channel state: only queue chirp if channel state
115 dw_state = pchnl->state;
116 if (dw_state != CHNL_STATEREADY) {
117 if (dw_state & CHNL_STATECANCEL)
119 if ((dw_state & CHNL_STATEEOS) &&
120 CHNL_IS_OUTPUT(pchnl->chnl_mode))
122 /* No other possible states left */
125 dev_obj = dev_get_first();
126 dev_get_bridge_context(dev_obj, &dev_ctxt);
130 if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1 && host_buf) {
131 if (!(host_buf < (void *)USERMODE_ADDR)) {
132 host_sys_buf = host_buf;
135 /* if addr in user mode, then copy to kernel space */
136 host_sys_buf = kmalloc(buf_size, GFP_KERNEL);
137 if (host_sys_buf == NULL)
140 if (CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
141 status = copy_from_user(host_sys_buf, host_buf,
151 /* Mailbox IRQ is disabled to avoid race condition with DMA/ZCPY
152 * channels. DPCCS is held to avoid race conditions with PCPY channels.
153 * If DPC is scheduled in process context (iosm_schedule) and any
154 * non-mailbox interrupt occurs, that DPC will run and break CS. Hence
155 * we disable ALL DPCs. We will try to disable ONLY IO DPC later. */
156 chnl_mgr_obj = pchnl->chnl_mgr_obj;
157 spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
158 omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
159 if (pchnl->chnl_type == CHNL_PCPY) {
160 /* This is a processor-copy channel. */
161 if (CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
162 /* Check buffer size on output channels for fit. */
163 if (byte_size > io_buf_size(
164 pchnl->chnl_mgr_obj->iomgr)) {
171 /* Get a free chirp: */
172 if (list_empty(&pchnl->free_packets_list)) {
176 chnl_packet_obj = list_first_entry(&pchnl->free_packets_list,
177 struct chnl_irp, link);
178 list_del(&chnl_packet_obj->link);
180 /* Enqueue the chirp on the chnl's IORequest queue: */
181 chnl_packet_obj->host_user_buf = chnl_packet_obj->host_sys_buf =
183 if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)
184 chnl_packet_obj->host_sys_buf = host_sys_buf;
187 * Note: for dma chans dw_dsp_addr contains dsp address
191 chnl_packet_obj->dsp_tx_addr = dw_dsp_addr / chnl_mgr_obj->word_size;
192 chnl_packet_obj->byte_size = byte_size;
193 chnl_packet_obj->buf_size = buf_size;
194 /* Only valid for output channel */
195 chnl_packet_obj->arg = dw_arg;
196 chnl_packet_obj->status = (is_eos ? CHNL_IOCSTATEOS :
197 CHNL_IOCSTATCOMPLETE);
198 list_add_tail(&chnl_packet_obj->link, &pchnl->io_requests);
201 * If end of stream, update the channel state to prevent
205 pchnl->state |= CHNL_STATEEOS;
207 /* Request IO from the DSP */
208 io_request_chnl(chnl_mgr_obj->iomgr, pchnl,
209 (CHNL_IS_INPUT(pchnl->chnl_mode) ? IO_INPUT :
210 IO_OUTPUT), &mb_val);
213 omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
214 spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
216 sm_interrupt_dsp(dev_ctxt, mb_val);
218 /* Schedule a DPC, to do the actual data transfer */
220 iosm_schedule(chnl_mgr_obj->iomgr);
226 * ======== bridge_chnl_cancel_io ========
227 * Return all I/O requests to the client which have not yet been
228 * transferred. The channel's I/O completion object is
229 * signalled, and all the I/O requests are queued as IOC's, with the
230 * status field set to CHNL_IOCSTATCANCEL.
231 * This call is typically used in abort situations, and is a prelude to
234 int bridge_chnl_cancel_io(struct chnl_object *chnl_obj)
236 struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
239 struct chnl_irp *chirp, *tmp;
240 struct chnl_mgr *chnl_mgr_obj = NULL;
243 if (!pchnl || !pchnl->chnl_mgr_obj)
246 chnl_id = pchnl->chnl_id;
247 chnl_mode = pchnl->chnl_mode;
248 chnl_mgr_obj = pchnl->chnl_mgr_obj;
250 /* Mark this channel as cancelled, to prevent further IORequests or
251 * IORequests or dispatching. */
252 spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
254 pchnl->state |= CHNL_STATECANCEL;
256 if (list_empty(&pchnl->io_requests)) {
257 spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
261 if (pchnl->chnl_type == CHNL_PCPY) {
262 /* Indicate we have no more buffers available for transfer: */
263 if (CHNL_IS_INPUT(pchnl->chnl_mode)) {
264 io_cancel_chnl(chnl_mgr_obj->iomgr, chnl_id);
266 /* Record that we no longer have output buffers
268 chnl_mgr_obj->output_mask &= ~(1 << chnl_id);
271 /* Move all IOR's to IOC queue: */
272 list_for_each_entry_safe(chirp, tmp, &pchnl->io_requests, link) {
273 list_del(&chirp->link);
274 chirp->byte_size = 0;
275 chirp->status |= CHNL_IOCSTATCANCEL;
276 list_add_tail(&chirp->link, &pchnl->io_completions);
281 spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
287 * ======== bridge_chnl_close ========
289 * Ensures all pending I/O on this channel is cancelled, discards all
290 * queued I/O completion notifications, then frees the resources allocated
291 * for this channel, and makes the corresponding logical channel id
292 * available for subsequent use.
294 int bridge_chnl_close(struct chnl_object *chnl_obj)
297 struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
302 /* Cancel IO: this ensures no further IO requests or notifications */
303 status = bridge_chnl_cancel_io(chnl_obj);
306 /* Invalidate channel object: Protects from CHNL_GetIOCompletion() */
307 /* Free the slot in the channel manager: */
308 pchnl->chnl_mgr_obj->channels[pchnl->chnl_id] = NULL;
309 spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
310 pchnl->chnl_mgr_obj->open_channels -= 1;
311 spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
312 if (pchnl->ntfy_obj) {
313 ntfy_delete(pchnl->ntfy_obj);
314 kfree(pchnl->ntfy_obj);
315 pchnl->ntfy_obj = NULL;
317 /* Reset channel event: (NOTE: user_event freed in user context) */
318 if (pchnl->sync_event) {
319 sync_reset_event(pchnl->sync_event);
320 kfree(pchnl->sync_event);
321 pchnl->sync_event = NULL;
323 /* Free I/O request and I/O completion queues: */
324 free_chirp_list(&pchnl->io_completions);
327 free_chirp_list(&pchnl->io_requests);
330 free_chirp_list(&pchnl->free_packets_list);
332 /* Release channel object. */
339 * ======== bridge_chnl_create ========
340 * Create a channel manager object, responsible for opening new channels
341 * and closing old ones for a given board.
343 int bridge_chnl_create(struct chnl_mgr **channel_mgr,
344 struct dev_object *hdev_obj,
345 const struct chnl_mgrattrs *mgr_attrts)
348 struct chnl_mgr *chnl_mgr_obj = NULL;
351 /* Allocate channel manager object */
352 chnl_mgr_obj = kzalloc(sizeof(struct chnl_mgr), GFP_KERNEL);
355 * The max_channels attr must equal the # of supported chnls for
356 * each transport(# chnls for PCPY = DDMA = ZCPY): i.e.
357 * mgr_attrts->max_channels = CHNL_MAXCHANNELS =
358 * DDMA_MAXDDMACHNLS = DDMA_MAXZCPYCHNLS.
360 max_channels = CHNL_MAXCHANNELS + CHNL_MAXCHANNELS * CHNL_PCPY;
361 /* Create array of channels */
362 chnl_mgr_obj->channels = kzalloc(sizeof(struct chnl_object *)
363 * max_channels, GFP_KERNEL);
364 if (chnl_mgr_obj->channels) {
365 /* Initialize chnl_mgr object */
366 chnl_mgr_obj->type = CHNL_TYPESM;
367 chnl_mgr_obj->word_size = mgr_attrts->word_size;
368 /* Total # chnls supported */
369 chnl_mgr_obj->max_channels = max_channels;
370 chnl_mgr_obj->open_channels = 0;
371 chnl_mgr_obj->output_mask = 0;
372 chnl_mgr_obj->last_output = 0;
373 chnl_mgr_obj->dev_obj = hdev_obj;
374 spin_lock_init(&chnl_mgr_obj->chnl_mgr_lock);
383 bridge_chnl_destroy(chnl_mgr_obj);
386 /* Return channel manager object to caller... */
387 *channel_mgr = chnl_mgr_obj;
393 * ======== bridge_chnl_destroy ========
395 * Close all open channels, and destroy the channel manager.
397 int bridge_chnl_destroy(struct chnl_mgr *hchnl_mgr)
400 struct chnl_mgr *chnl_mgr_obj = hchnl_mgr;
404 /* Close all open channels: */
405 for (chnl_id = 0; chnl_id < chnl_mgr_obj->max_channels;
408 bridge_chnl_close(chnl_mgr_obj->channels
411 dev_dbg(bridge, "%s: Error status 0x%x\n",
415 /* Free channel manager object: */
416 kfree(chnl_mgr_obj->channels);
418 /* Set hchnl_mgr to NULL in device object. */
419 dev_set_chnl_mgr(chnl_mgr_obj->dev_obj, NULL);
420 /* Free this Chnl Mgr object: */
429 * ======== bridge_chnl_flush_io ========
431 * Flushes all the outstanding data requests on a channel.
433 int bridge_chnl_flush_io(struct chnl_object *chnl_obj, u32 timeout)
436 struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
438 struct chnl_mgr *chnl_mgr_obj;
439 struct chnl_ioc chnl_ioc_obj;
442 if ((timeout == CHNL_IOCNOWAIT)
443 && CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
446 chnl_mode = pchnl->chnl_mode;
447 chnl_mgr_obj = pchnl->chnl_mgr_obj;
453 /* Note: Currently, if another thread continues to add IO
454 * requests to this channel, this function will continue to
455 * flush all such queued IO requests. */
456 if (CHNL_IS_OUTPUT(chnl_mode)
457 && (pchnl->chnl_type == CHNL_PCPY)) {
458 /* Wait for IO completions, up to the specified
460 while (!list_empty(&pchnl->io_requests) && !status) {
461 status = bridge_chnl_get_ioc(chnl_obj,
462 timeout, &chnl_ioc_obj);
466 if (chnl_ioc_obj.status & CHNL_IOCSTATTIMEOUT)
471 status = bridge_chnl_cancel_io(chnl_obj);
472 /* Now, leave the channel in the ready state: */
473 pchnl->state &= ~CHNL_STATECANCEL;
480 * ======== bridge_chnl_get_info ========
482 * Retrieve information related to a channel.
484 int bridge_chnl_get_info(struct chnl_object *chnl_obj,
485 struct chnl_info *channel_info)
488 struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
489 if (channel_info != NULL) {
491 /* Return the requested information: */
492 channel_info->chnl_mgr = pchnl->chnl_mgr_obj;
493 channel_info->event_obj = pchnl->user_event;
494 channel_info->cnhl_id = pchnl->chnl_id;
495 channel_info->mode = pchnl->chnl_mode;
496 channel_info->bytes_tx = pchnl->bytes_moved;
497 channel_info->process = pchnl->process;
498 channel_info->sync_event = pchnl->sync_event;
499 channel_info->cio_cs = pchnl->cio_cs;
500 channel_info->cio_reqs = pchnl->cio_reqs;
501 channel_info->state = pchnl->state;
512 * ======== bridge_chnl_get_ioc ========
513 * Optionally wait for I/O completion on a channel. Dequeue an I/O
514 * completion record, which contains information about the completed
516 * Note: Ensures Channel Invariant (see notes above).
518 int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, u32 timeout,
519 struct chnl_ioc *chan_ioc)
522 struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
523 struct chnl_irp *chnl_packet_obj;
525 bool dequeue_ioc = true;
526 struct chnl_ioc ioc = { NULL, 0, 0, 0, 0 };
527 u8 *host_sys_buf = NULL;
528 struct bridge_dev_context *dev_ctxt;
529 struct dev_object *dev_obj;
532 if (!chan_ioc || !pchnl) {
534 } else if (timeout == CHNL_IOCNOWAIT) {
535 if (list_empty(&pchnl->io_completions))
540 dev_obj = dev_get_first();
541 dev_get_bridge_context(dev_obj, &dev_ctxt);
548 ioc.status = CHNL_IOCSTATCOMPLETE;
550 CHNL_IOCNOWAIT && list_empty(&pchnl->io_completions)) {
551 if (timeout == CHNL_IOCINFINITE)
552 timeout = SYNC_INFINITE;
554 stat_sync = sync_wait_on_event(pchnl->sync_event, timeout);
555 if (stat_sync == -ETIME) {
556 /* No response from DSP */
557 ioc.status |= CHNL_IOCSTATTIMEOUT;
559 } else if (stat_sync == -EPERM) {
560 /* This can occur when the user mode thread is
561 * aborted (^C), or when _VWIN32_WaitSingleObject()
562 * fails due to unknown causes. */
563 /* Even though Wait failed, there may be something in
565 if (list_empty(&pchnl->io_completions)) {
566 ioc.status |= CHNL_IOCSTATCANCEL;
571 /* See comment in AddIOReq */
572 spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
573 omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
575 /* Dequeue IOC and set chan_ioc; */
576 chnl_packet_obj = list_first_entry(&pchnl->io_completions,
577 struct chnl_irp, link);
578 list_del(&chnl_packet_obj->link);
579 /* Update chan_ioc from channel state and chirp: */
582 * If this is a zero-copy channel, then set IOC's pbuf
583 * to the DSP's address. This DSP address will get
584 * translated to user's virtual addr later.
586 host_sys_buf = chnl_packet_obj->host_sys_buf;
587 ioc.buf = chnl_packet_obj->host_user_buf;
588 ioc.byte_size = chnl_packet_obj->byte_size;
589 ioc.buf_size = chnl_packet_obj->buf_size;
590 ioc.arg = chnl_packet_obj->arg;
591 ioc.status |= chnl_packet_obj->status;
592 /* Place the used chirp on the free list: */
593 list_add_tail(&chnl_packet_obj->link,
594 &pchnl->free_packets_list);
601 /* Ensure invariant: If any IOC's are queued for this channel... */
602 if (!list_empty(&pchnl->io_completions)) {
603 /* Since DSPStream_Reclaim() does not take a timeout
604 * parameter, we pass the stream's timeout value to
605 * bridge_chnl_get_ioc. We cannot determine whether or not
606 * we have waited in user mode. Since the stream's timeout
607 * value may be non-zero, we still have to set the event.
608 * Therefore, this optimization is taken out.
610 * if (timeout == CHNL_IOCNOWAIT) {
611 * ... ensure event is set..
612 * sync_set_event(pchnl->sync_event);
614 sync_set_event(pchnl->sync_event);
616 /* else, if list is empty, ensure event is reset. */
617 sync_reset_event(pchnl->sync_event);
619 omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
620 spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
622 && (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)) {
623 if (!(ioc.buf < (void *)USERMODE_ADDR))
626 /* If the addr is in user mode, then copy it */
627 if (!host_sys_buf || !ioc.buf) {
631 if (!CHNL_IS_INPUT(pchnl->chnl_mode))
635 status = copy_to_user(ioc.buf, host_sys_buf, ioc.byte_size);
637 if (current->flags & PF_EXITING)
646 /* Update User's IOC block: */
653 * ======== bridge_chnl_get_mgr_info ========
654 * Retrieve information related to the channel manager.
656 int bridge_chnl_get_mgr_info(struct chnl_mgr *hchnl_mgr, u32 ch_id,
657 struct chnl_mgrinfo *mgr_info)
659 struct chnl_mgr *chnl_mgr_obj = (struct chnl_mgr *)hchnl_mgr;
661 if (!mgr_info || !hchnl_mgr)
664 if (ch_id > CHNL_MAXCHANNELS)
667 /* Return the requested information: */
668 mgr_info->chnl_obj = chnl_mgr_obj->channels[ch_id];
669 mgr_info->open_channels = chnl_mgr_obj->open_channels;
670 mgr_info->type = chnl_mgr_obj->type;
671 /* total # of chnls */
672 mgr_info->max_channels = chnl_mgr_obj->max_channels;
678 * ======== bridge_chnl_idle ========
679 * Idles a particular channel.
681 int bridge_chnl_idle(struct chnl_object *chnl_obj, u32 timeout,
685 struct chnl_mgr *chnl_mgr_obj;
688 chnl_mode = chnl_obj->chnl_mode;
689 chnl_mgr_obj = chnl_obj->chnl_mgr_obj;
691 if (CHNL_IS_OUTPUT(chnl_mode) && !flush_data) {
692 /* Wait for IO completions, up to the specified timeout: */
693 status = bridge_chnl_flush_io(chnl_obj, timeout);
695 status = bridge_chnl_cancel_io(chnl_obj);
697 /* Reset the byte count and put channel back in ready state. */
698 chnl_obj->bytes_moved = 0;
699 chnl_obj->state &= ~CHNL_STATECANCEL;
706 * ======== bridge_chnl_open ========
707 * Open a new half-duplex channel to the DSP board.
709 int bridge_chnl_open(struct chnl_object **chnl,
710 struct chnl_mgr *hchnl_mgr, s8 chnl_mode,
711 u32 ch_id, const struct chnl_attr *pattrs)
714 struct chnl_mgr *chnl_mgr_obj = hchnl_mgr;
715 struct chnl_object *pchnl = NULL;
716 struct sync_object *sync_event = NULL;
721 if (!pattrs->uio_reqs)
727 if (ch_id != CHNL_PICKFREE) {
728 if (ch_id >= chnl_mgr_obj->max_channels)
730 if (chnl_mgr_obj->channels[ch_id] != NULL)
733 /* Check for free channel */
734 status = search_free_channel(chnl_mgr_obj, &ch_id);
740 /* Create channel object: */
741 pchnl = kzalloc(sizeof(struct chnl_object), GFP_KERNEL);
745 /* Protect queues from io_dpc: */
746 pchnl->state = CHNL_STATECANCEL;
748 /* Allocate initial IOR and IOC queues: */
749 status = create_chirp_list(&pchnl->free_packets_list,
754 INIT_LIST_HEAD(&pchnl->io_requests);
755 INIT_LIST_HEAD(&pchnl->io_completions);
757 pchnl->chnl_packets = pattrs->uio_reqs;
761 sync_event = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
766 sync_init_event(sync_event);
768 pchnl->ntfy_obj = kmalloc(sizeof(struct ntfy_object), GFP_KERNEL);
769 if (!pchnl->ntfy_obj) {
773 ntfy_init(pchnl->ntfy_obj);
775 /* Initialize CHNL object fields: */
776 pchnl->chnl_mgr_obj = chnl_mgr_obj;
777 pchnl->chnl_id = ch_id;
778 pchnl->chnl_mode = chnl_mode;
779 pchnl->user_event = sync_event;
780 pchnl->sync_event = sync_event;
781 /* Get the process handle */
782 pchnl->process = current->tgid;
784 pchnl->bytes_moved = 0;
785 /* Default to proc-copy */
786 pchnl->chnl_type = CHNL_PCPY;
788 /* Insert channel object in channel manager: */
789 chnl_mgr_obj->channels[pchnl->chnl_id] = pchnl;
790 spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
791 chnl_mgr_obj->open_channels++;
792 spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
793 /* Return result... */
794 pchnl->state = CHNL_STATEREADY;
801 free_chirp_list(&pchnl->io_completions);
802 free_chirp_list(&pchnl->io_requests);
803 free_chirp_list(&pchnl->free_packets_list);
807 if (pchnl->ntfy_obj) {
808 ntfy_delete(pchnl->ntfy_obj);
809 kfree(pchnl->ntfy_obj);
810 pchnl->ntfy_obj = NULL;
818 * ======== bridge_chnl_register_notify ========
819 * Registers for events on a particular channel.
821 int bridge_chnl_register_notify(struct chnl_object *chnl_obj,
822 u32 event_mask, u32 notify_type,
823 struct dsp_notification *hnotification)
829 status = ntfy_register(chnl_obj->ntfy_obj, hnotification,
830 event_mask, notify_type);
832 status = ntfy_unregister(chnl_obj->ntfy_obj, hnotification);
838 * ======== create_chirp_list ========
840 * Initialize a queue of channel I/O Request/Completion packets.
842 * list: Pointer to a list_head
843 * chirps: Number of Chirps to allocate.
845 * 0 if successful, error code otherwise.
849 static int create_chirp_list(struct list_head *list, u32 chirps)
851 struct chnl_irp *chirp;
854 INIT_LIST_HEAD(list);
856 /* Make N chirps and place on queue. */
857 for (i = 0; i < chirps; i++) {
858 chirp = kzalloc(sizeof(struct chnl_irp), GFP_KERNEL);
861 list_add_tail(&chirp->link, list);
864 /* If we couldn't allocate all chirps, free those allocated: */
866 free_chirp_list(list);
874 * ======== free_chirp_list ========
876 * Free the queue of Chirps.
878 static void free_chirp_list(struct list_head *chirp_list)
880 struct chnl_irp *chirp, *tmp;
882 list_for_each_entry_safe(chirp, tmp, chirp_list, link) {
883 list_del(&chirp->link);
889 * ======== search_free_channel ========
890 * Search for a free channel slot in the array of channel pointers.
892 static int search_free_channel(struct chnl_mgr *chnl_mgr_obj,
898 for (i = 0; i < chnl_mgr_obj->max_channels; i++) {
899 if (chnl_mgr_obj->channels[i] == NULL) {