3 * sep_main.c - Security Processor Driver main group of functions
5 * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
6 * Contributions(c) 2009-2011 Discretix. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; version 2 of the License.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * Mark Allyn mark.a.allyn@intel.com
24 * Jayant Mangalampalli jayant.mangalampalli@intel.com
28 * 2009.06.26 Initial publish
29 * 2010.09.14 Upgrade to Medfield
30 * 2011.01.21 Move to sep_main.c to allow for sep_crypto.c
31 * 2011.02.22 Enable kernel crypto operation
33 * Please note that this driver is based on information in the Discretix
34 * CryptoCell 5.2 Driver Implementation Guide; the Discretix CryptoCell 5.2
35 * Integration Intel Medfield appendix; the Discretix CryptoCell 5.2
36 * Linux Driver Integration Guide; and the Discretix CryptoCell 5.2 System
37 * Overview and Integration Guide.
40 /* #define SEP_PERF_DEBUG */
42 #include <linux/init.h>
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/miscdevice.h>
47 #include <linux/cdev.h>
48 #include <linux/kdev_t.h>
49 #include <linux/mutex.h>
50 #include <linux/sched.h>
52 #include <linux/poll.h>
53 #include <linux/wait.h>
54 #include <linux/pci.h>
55 #include <linux/pm_runtime.h>
56 #include <linux/slab.h>
57 #include <linux/ioctl.h>
58 #include <asm/current.h>
59 #include <linux/ioport.h>
61 #include <linux/interrupt.h>
62 #include <linux/pagemap.h>
63 #include <asm/cacheflush.h>
64 #include <linux/delay.h>
65 #include <linux/jiffies.h>
66 #include <linux/async.h>
67 #include <linux/crypto.h>
68 #include <crypto/internal/hash.h>
69 #include <crypto/scatterwalk.h>
70 #include <crypto/sha.h>
71 #include <crypto/md5.h>
72 #include <crypto/aes.h>
73 #include <crypto/des.h>
74 #include <crypto/hash.h>
76 #include "sep_driver_hw_defs.h"
77 #include "sep_driver_config.h"
78 #include "sep_driver_api.h"
80 #include "sep_crypto.h"
82 #define CREATE_TRACE_POINTS
83 #include "sep_trace_events.h"
86 * Let's not spend cycles iterating over message
87 * area contents if debugging not enabled
90 #define sep_dump_message(sep) _sep_dump_message(sep)
92 #define sep_dump_message(sep)
96 * Currently, there is only one SEP device per platform;
97 * In event platforms in the future have more than one SEP
98 * device, this will be a linked list
101 struct sep_device *sep_dev;
104 * sep_queue_status_remove - Removes transaction from status queue
106 * @sep_queue_info: pointer to status queue
108 * This function will remove information about transaction from the queue.
110 void sep_queue_status_remove(struct sep_device *sep,
111 struct sep_queue_info **queue_elem)
113 unsigned long lck_flags;
115 dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove\n",
118 if (!queue_elem || !(*queue_elem)) {
119 dev_dbg(&sep->pdev->dev, "PID%d %s null\n",
120 current->pid, __func__);
124 spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
125 list_del(&(*queue_elem)->list);
126 sep->sep_queue_num--;
127 spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
132 dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove return\n",
138 * sep_queue_status_add - Adds transaction to status queue
140 * @opcode: transaction opcode
141 * @size: input data size
142 * @pid: pid of current process
143 * @name: current process name
144 * @name_len: length of name (current process)
146 * This function adds information about about transaction started to the status
149 struct sep_queue_info *sep_queue_status_add(
150 struct sep_device *sep,
154 u8 *name, size_t name_len)
156 unsigned long lck_flags;
157 struct sep_queue_info *my_elem = NULL;
159 my_elem = kzalloc(sizeof(struct sep_queue_info), GFP_KERNEL);
164 dev_dbg(&sep->pdev->dev, "[PID%d] kzalloc ok\n", current->pid);
166 my_elem->data.opcode = opcode;
167 my_elem->data.size = size;
168 my_elem->data.pid = pid;
170 if (name_len > TASK_COMM_LEN)
171 name_len = TASK_COMM_LEN;
173 memcpy(&my_elem->data.name, name, name_len);
175 spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
177 list_add_tail(&my_elem->list, &sep->sep_queue_status);
178 sep->sep_queue_num++;
180 spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
186 * sep_allocate_dmatables_region - Allocates buf for the MLLI/DMA tables
188 * @dmatables_region: Destination pointer for the buffer
189 * @dma_ctx: DMA context for the transaction
190 * @table_count: Number of MLLI/DMA tables to create
191 * The buffer created will not work as-is for DMA operations,
192 * it needs to be copied over to the appropriate place in the
195 static int sep_allocate_dmatables_region(struct sep_device *sep,
196 void **dmatables_region,
197 struct sep_dma_context *dma_ctx,
198 const u32 table_count)
200 const size_t new_len =
201 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES - 1;
203 void *tmp_region = NULL;
205 dev_dbg(&sep->pdev->dev, "[PID%d] dma_ctx = 0x%p\n",
206 current->pid, dma_ctx);
207 dev_dbg(&sep->pdev->dev, "[PID%d] dmatables_region = 0x%p\n",
208 current->pid, dmatables_region);
210 if (!dma_ctx || !dmatables_region) {
211 dev_warn(&sep->pdev->dev,
212 "[PID%d] dma context/region uninitialized\n",
217 dev_dbg(&sep->pdev->dev, "[PID%d] newlen = 0x%08zX\n",
218 current->pid, new_len);
219 dev_dbg(&sep->pdev->dev, "[PID%d] oldlen = 0x%08X\n", current->pid,
220 dma_ctx->dmatables_len);
221 tmp_region = kzalloc(new_len + dma_ctx->dmatables_len, GFP_KERNEL);
225 /* Were there any previous tables that need to be preserved ? */
226 if (*dmatables_region) {
227 memcpy(tmp_region, *dmatables_region, dma_ctx->dmatables_len);
228 kfree(*dmatables_region);
229 *dmatables_region = NULL;
232 *dmatables_region = tmp_region;
234 dma_ctx->dmatables_len += new_len;
240 * sep_wait_transaction - Used for synchronizing transactions
243 int sep_wait_transaction(struct sep_device *sep)
248 if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
249 &sep->in_use_flags)) {
250 dev_dbg(&sep->pdev->dev,
251 "[PID%d] no transactions, returning\n",
253 goto end_function_setpid;
257 * Looping needed even for exclusive waitq entries
258 * due to process wakeup latencies, previous process
259 * might have already created another transaction.
263 * Exclusive waitq entry, so that only one process is
264 * woken up from the queue at a time.
266 prepare_to_wait_exclusive(&sep->event_transactions,
269 if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
270 &sep->in_use_flags)) {
271 dev_dbg(&sep->pdev->dev,
272 "[PID%d] no transactions, breaking\n",
276 dev_dbg(&sep->pdev->dev,
277 "[PID%d] transactions ongoing, sleeping\n",
280 dev_dbg(&sep->pdev->dev, "[PID%d] woken up\n", current->pid);
282 if (signal_pending(current)) {
283 dev_dbg(&sep->pdev->dev, "[PID%d] received signal\n",
291 * The pid_doing_transaction indicates that this process
292 * now owns the facilities to perform a transaction with
293 * the SEP. While this process is performing a transaction,
294 * no other process who has the SEP device open can perform
295 * any transactions. This method allows more than one process
296 * to have the device open at any given time, which provides
297 * finer granularity for device utilization by multiple
300 /* Only one process is able to progress here at a time */
301 sep->pid_doing_transaction = current->pid;
304 finish_wait(&sep->event_transactions, &wait);
310 * sep_check_transaction_owner - Checks if current process owns transaction
313 static inline int sep_check_transaction_owner(struct sep_device *sep)
315 dev_dbg(&sep->pdev->dev, "[PID%d] transaction pid = %d\n",
317 sep->pid_doing_transaction);
319 if ((sep->pid_doing_transaction == 0) ||
320 (current->pid != sep->pid_doing_transaction)) {
324 /* We own the transaction */
331 * sep_dump_message - dump the message that is pending
333 * This will only print dump if DEBUG is set; it does
334 * follow kernel debug print enabling
336 static void _sep_dump_message(struct sep_device *sep)
340 u32 *p = sep->shared_addr;
342 for (count = 0; count < 10 * 4; count += 4)
343 dev_dbg(&sep->pdev->dev,
344 "[PID%d] Word %d of the message is %x\n",
345 current->pid, count/4, *p++);
351 * sep_map_and_alloc_shared_area -allocate shared block
352 * @sep: security processor
353 * @size: size of shared area
355 static int sep_map_and_alloc_shared_area(struct sep_device *sep)
357 sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev,
359 &sep->shared_bus, GFP_KERNEL);
361 if (!sep->shared_addr) {
362 dev_dbg(&sep->pdev->dev,
363 "[PID%d] shared memory dma_alloc_coherent failed\n",
367 dev_dbg(&sep->pdev->dev,
368 "[PID%d] shared_addr %zx bytes @%p (bus %llx)\n",
370 sep->shared_size, sep->shared_addr,
371 (unsigned long long)sep->shared_bus);
376 * sep_unmap_and_free_shared_area - free shared block
377 * @sep: security processor
379 static void sep_unmap_and_free_shared_area(struct sep_device *sep)
381 dma_free_coherent(&sep->pdev->dev, sep->shared_size,
382 sep->shared_addr, sep->shared_bus);
388 * sep_shared_bus_to_virt - convert bus/virt addresses
389 * @sep: pointer to struct sep_device
390 * @bus_address: address to convert
392 * Returns virtual address inside the shared area according
393 * to the bus address.
395 static void *sep_shared_bus_to_virt(struct sep_device *sep,
396 dma_addr_t bus_address)
398 return sep->shared_addr + (bus_address - sep->shared_bus);
404 * sep_open - device open method
405 * @inode: inode of SEP device
406 * @filp: file handle to SEP device
408 * Open method for the SEP device. Called when userspace opens
409 * the SEP device node.
411 * Returns zero on success otherwise an error code.
413 static int sep_open(struct inode *inode, struct file *filp)
415 struct sep_device *sep;
416 struct sep_private_data *priv;
418 dev_dbg(&sep_dev->pdev->dev, "[PID%d] open\n", current->pid);
420 if (filp->f_flags & O_NONBLOCK)
424 * Get the SEP device structure and use it for the
425 * private_data field in filp for other methods
428 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
434 filp->private_data = priv;
436 dev_dbg(&sep_dev->pdev->dev, "[PID%d] priv is 0x%p\n",
439 /* Anyone can open; locking takes place at transaction level */
444 * sep_free_dma_table_data_handler - free DMA table
445 * @sep: pointer to struct sep_device
446 * @dma_ctx: dma context
448 * Handles the request to free DMA table for synchronic actions
450 int sep_free_dma_table_data_handler(struct sep_device *sep,
451 struct sep_dma_context **dma_ctx)
455 /* Pointer to the current dma_resource struct */
456 struct sep_dma_resource *dma;
458 dev_dbg(&sep->pdev->dev,
459 "[PID%d] sep_free_dma_table_data_handler\n",
462 if (!dma_ctx || !(*dma_ctx)) {
463 /* No context or context already freed */
464 dev_dbg(&sep->pdev->dev,
465 "[PID%d] no DMA context or context already freed\n",
471 dev_dbg(&sep->pdev->dev, "[PID%d] (*dma_ctx)->nr_dcb_creat 0x%x\n",
473 (*dma_ctx)->nr_dcb_creat);
475 for (dcb_counter = 0;
476 dcb_counter < (*dma_ctx)->nr_dcb_creat; dcb_counter++) {
477 dma = &(*dma_ctx)->dma_res_arr[dcb_counter];
479 /* Unmap and free input map array */
480 if (dma->in_map_array) {
481 for (count = 0; count < dma->in_num_pages; count++) {
482 dma_unmap_page(&sep->pdev->dev,
483 dma->in_map_array[count].dma_addr,
484 dma->in_map_array[count].size,
487 kfree(dma->in_map_array);
491 * Output is handled different. If
492 * this was a secure dma into restricted memory,
493 * then we skip this step altogether as restricted
494 * memory is not available to the o/s at all.
496 if (!(*dma_ctx)->secure_dma && dma->out_map_array) {
498 for (count = 0; count < dma->out_num_pages; count++) {
499 dma_unmap_page(&sep->pdev->dev,
500 dma->out_map_array[count].dma_addr,
501 dma->out_map_array[count].size,
504 kfree(dma->out_map_array);
507 /* Free page cache for output */
508 if (dma->in_page_array) {
509 for (count = 0; count < dma->in_num_pages; count++) {
510 flush_dcache_page(dma->in_page_array[count]);
511 page_cache_release(dma->in_page_array[count]);
513 kfree(dma->in_page_array);
516 /* Again, we do this only for non secure dma */
517 if (!(*dma_ctx)->secure_dma && dma->out_page_array) {
519 for (count = 0; count < dma->out_num_pages; count++) {
520 if (!PageReserved(dma->out_page_array[count]))
523 out_page_array[count]);
525 flush_dcache_page(dma->out_page_array[count]);
526 page_cache_release(dma->out_page_array[count]);
528 kfree(dma->out_page_array);
532 * Note that here we use in_map_num_entries because we
533 * don't have a page array; the page array is generated
534 * only in the lock_user_pages, which is not called
535 * for kernel crypto, which is what the sg (scatter gather
536 * is used for exclusively)
539 dma_unmap_sg(&sep->pdev->dev, dma->src_sg,
540 dma->in_map_num_entries, DMA_TO_DEVICE);
545 dma_unmap_sg(&sep->pdev->dev, dma->dst_sg,
546 dma->in_map_num_entries, DMA_FROM_DEVICE);
550 /* Reset all the values */
551 dma->in_page_array = NULL;
552 dma->out_page_array = NULL;
553 dma->in_num_pages = 0;
554 dma->out_num_pages = 0;
555 dma->in_map_array = NULL;
556 dma->out_map_array = NULL;
557 dma->in_map_num_entries = 0;
558 dma->out_map_num_entries = 0;
561 (*dma_ctx)->nr_dcb_creat = 0;
562 (*dma_ctx)->num_lli_tables_created = 0;
567 dev_dbg(&sep->pdev->dev,
568 "[PID%d] sep_free_dma_table_data_handler end\n",
575 * sep_end_transaction_handler - end transaction
576 * @sep: pointer to struct sep_device
577 * @dma_ctx: DMA context
578 * @call_status: Call status
580 * This API handles the end transaction request.
582 static int sep_end_transaction_handler(struct sep_device *sep,
583 struct sep_dma_context **dma_ctx,
584 struct sep_call_status *call_status,
585 struct sep_queue_info **my_queue_elem)
587 dev_dbg(&sep->pdev->dev, "[PID%d] ending transaction\n", current->pid);
590 * Extraneous transaction clearing would mess up PM
591 * device usage counters and SEP would get suspended
592 * just before we send a command to SEP in the next
595 if (sep_check_transaction_owner(sep)) {
596 dev_dbg(&sep->pdev->dev, "[PID%d] not transaction owner\n",
601 /* Update queue status */
602 sep_queue_status_remove(sep, my_queue_elem);
604 /* Check that all the DMA resources were freed */
606 sep_free_dma_table_data_handler(sep, dma_ctx);
608 /* Reset call status for next transaction */
610 call_status->status = 0;
612 /* Clear the message area to avoid next transaction reading
613 * sensitive results from previous transaction */
614 memset(sep->shared_addr, 0,
615 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
617 /* start suspend delay */
618 #ifdef SEP_ENABLE_RUNTIME_PM
621 pm_runtime_mark_last_busy(&sep->pdev->dev);
622 pm_runtime_put_autosuspend(&sep->pdev->dev);
626 clear_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags);
627 sep->pid_doing_transaction = 0;
629 /* Now it's safe for next process to proceed */
630 dev_dbg(&sep->pdev->dev, "[PID%d] waking up next transaction\n",
632 clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT, &sep->in_use_flags);
633 wake_up(&sep->event_transactions);
640 * sep_release - close a SEP device
641 * @inode: inode of SEP device
642 * @filp: file handle being closed
644 * Called on the final close of a SEP device.
646 static int sep_release(struct inode *inode, struct file *filp)
648 struct sep_private_data * const private_data = filp->private_data;
649 struct sep_call_status *call_status = &private_data->call_status;
650 struct sep_device *sep = private_data->device;
651 struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
652 struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
654 dev_dbg(&sep->pdev->dev, "[PID%d] release\n", current->pid);
656 sep_end_transaction_handler(sep, dma_ctx, call_status,
659 kfree(filp->private_data);
665 * sep_mmap - maps the shared area to user space
666 * @filp: pointer to struct file
667 * @vma: pointer to vm_area_struct
669 * Called on an mmap of our space via the normal SEP device
671 static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
673 struct sep_private_data * const private_data = filp->private_data;
674 struct sep_call_status *call_status = &private_data->call_status;
675 struct sep_device *sep = private_data->device;
676 struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
678 unsigned long error = 0;
680 dev_dbg(&sep->pdev->dev, "[PID%d] sep_mmap\n", current->pid);
682 /* Set the transaction busy (own the device) */
684 * Problem for multithreaded applications is that here we're
685 * possibly going to sleep while holding a write lock on
686 * current->mm->mmap_sem, which will cause deadlock for ongoing
687 * transaction trying to create DMA tables
689 error = sep_wait_transaction(sep);
691 /* Interrupted by signal, don't clear transaction */
694 /* Clear the message area to avoid next transaction reading
695 * sensitive results from previous transaction */
696 memset(sep->shared_addr, 0,
697 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
700 * Check that the size of the mapped range is as the size of the message
703 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
705 goto end_function_with_error;
708 dev_dbg(&sep->pdev->dev, "[PID%d] shared_addr is %p\n",
709 current->pid, sep->shared_addr);
711 /* Get bus address */
712 bus_addr = sep->shared_bus;
714 if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT,
715 vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
716 dev_dbg(&sep->pdev->dev, "[PID%d] remap_pfn_range failed\n",
719 goto end_function_with_error;
722 /* Update call status */
723 set_bit(SEP_LEGACY_MMAP_DONE_OFFSET, &call_status->status);
727 end_function_with_error:
728 /* Clear our transaction */
729 sep_end_transaction_handler(sep, NULL, call_status,
737 * sep_poll - poll handler
738 * @filp: pointer to struct file
739 * @wait: pointer to poll_table
741 * Called by the OS when the kernel is asked to do a poll on
744 static unsigned int sep_poll(struct file *filp, poll_table *wait)
746 struct sep_private_data * const private_data = filp->private_data;
747 struct sep_call_status *call_status = &private_data->call_status;
748 struct sep_device *sep = private_data->device;
752 unsigned long lock_irq_flag;
754 /* Am I the process that owns the transaction? */
755 if (sep_check_transaction_owner(sep)) {
756 dev_dbg(&sep->pdev->dev, "[PID%d] poll pid not owner\n",
762 /* Check if send command or send_reply were activated previously */
763 if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
764 &call_status->status)) {
765 dev_warn(&sep->pdev->dev, "[PID%d] sendmsg not called\n",
772 /* Add the event to the polling wait table */
773 dev_dbg(&sep->pdev->dev, "[PID%d] poll: calling wait sep_event\n",
776 poll_wait(filp, &sep->event_interrupt, wait);
778 dev_dbg(&sep->pdev->dev,
779 "[PID%d] poll: send_ct is %lx reply ct is %lx\n",
780 current->pid, sep->send_ct, sep->reply_ct);
782 /* Check if error occurred during poll */
783 retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
784 if ((retval2 != 0x0) && (retval2 != 0x8)) {
785 dev_dbg(&sep->pdev->dev, "[PID%d] poll; poll error %x\n",
786 current->pid, retval2);
791 spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
793 if (sep->send_ct == sep->reply_ct) {
794 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
795 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
796 dev_dbg(&sep->pdev->dev,
797 "[PID%d] poll: data ready check (GPR2) %x\n",
798 current->pid, retval);
800 /* Check if printf request */
801 if ((retval >> 30) & 0x1) {
802 dev_dbg(&sep->pdev->dev,
803 "[PID%d] poll: SEP printf request\n",
808 /* Check if the this is SEP reply or request */
810 dev_dbg(&sep->pdev->dev,
811 "[PID%d] poll: SEP request\n",
814 dev_dbg(&sep->pdev->dev,
815 "[PID%d] poll: normal return\n",
817 sep_dump_message(sep);
818 dev_dbg(&sep->pdev->dev,
819 "[PID%d] poll; SEP reply POLLIN|POLLRDNORM\n",
821 mask |= POLLIN | POLLRDNORM;
823 set_bit(SEP_LEGACY_POLL_DONE_OFFSET, &call_status->status);
825 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
826 dev_dbg(&sep->pdev->dev,
827 "[PID%d] poll; no reply; returning mask of 0\n",
837 * sep_time_address - address in SEP memory of time
838 * @sep: SEP device we want the address from
840 * Return the address of the two dwords in memory used for time
843 static u32 *sep_time_address(struct sep_device *sep)
845 return sep->shared_addr +
846 SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
850 * sep_set_time - set the SEP time
851 * @sep: the SEP we are setting the time for
853 * Calculates time and sets it at the predefined address.
854 * Called with the SEP mutex held.
856 static unsigned long sep_set_time(struct sep_device *sep)
859 u32 *time_addr; /* Address of time as seen by the kernel */
862 do_gettimeofday(&time);
864 /* Set value in the SYSTEM MEMORY offset */
865 time_addr = sep_time_address(sep);
867 time_addr[0] = SEP_TIME_VAL_TOKEN;
868 time_addr[1] = time.tv_sec;
870 dev_dbg(&sep->pdev->dev, "[PID%d] time.tv_sec is %lu\n",
871 current->pid, time.tv_sec);
872 dev_dbg(&sep->pdev->dev, "[PID%d] time_addr is %p\n",
873 current->pid, time_addr);
874 dev_dbg(&sep->pdev->dev, "[PID%d] sep->shared_addr is %p\n",
875 current->pid, sep->shared_addr);
881 * sep_send_command_handler - kick off a command
882 * @sep: SEP being signalled
884 * This function raises interrupt to SEP that signals that is has a new
885 * command from the host
887 * Note that this function does fall under the ioctl lock
889 int sep_send_command_handler(struct sep_device *sep)
891 unsigned long lock_irq_flag;
895 /* Basic sanity check; set msg pool to start of shared area */
896 msg_pool = (u32 *)sep->shared_addr;
899 /* Look for start msg token */
900 if (*msg_pool != SEP_START_MSG_TOKEN) {
901 dev_warn(&sep->pdev->dev, "start message token not present\n");
906 /* Do we have a reasonable size? */
908 if ((*msg_pool < 2) ||
909 (*msg_pool > SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES)) {
911 dev_warn(&sep->pdev->dev, "invalid message size\n");
916 /* Does the command look reasonable? */
919 dev_warn(&sep->pdev->dev, "invalid message opcode\n");
924 #if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
925 dev_dbg(&sep->pdev->dev, "[PID%d] before pm sync status 0x%X\n",
927 sep->pdev->dev.power.runtime_status);
928 sep->in_use = 1; /* device is about to be used */
929 pm_runtime_get_sync(&sep->pdev->dev);
932 if (test_and_set_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags)) {
936 sep->in_use = 1; /* device is about to be used */
939 sep_dump_message(sep);
942 spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
944 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
946 dev_dbg(&sep->pdev->dev,
947 "[PID%d] sep_send_command_handler send_ct %lx reply_ct %lx\n",
948 current->pid, sep->send_ct, sep->reply_ct);
950 /* Send interrupt to SEP */
951 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
959 * @sep: pointer to struct sep_device
960 * @sg: pointer to struct scatterlist
962 * @dma_maps: pointer to place a pointer to array of dma maps
963 * This is filled in; anything previous there will be lost
964 * The structure for dma maps is sep_dma_map
965 * @returns number of dma maps on success; negative on error
967 * This creates the dma table from the scatterlist
968 * It is used only for kernel crypto as it works with scatterlists
969 * representation of data buffers
972 static int sep_crypto_dma(
973 struct sep_device *sep,
974 struct scatterlist *sg,
975 struct sep_dma_map **dma_maps,
976 enum dma_data_direction direction)
978 struct scatterlist *temp_sg;
982 struct sep_dma_map *sep_dma;
988 /* Count the segments */
993 temp_sg = scatterwalk_sg_next(temp_sg);
995 dev_dbg(&sep->pdev->dev,
996 "There are (hex) %x segments in sg\n", count_segment);
998 /* DMA map segments */
999 count_mapped = dma_map_sg(&sep->pdev->dev, sg,
1000 count_segment, direction);
1002 dev_dbg(&sep->pdev->dev,
1003 "There are (hex) %x maps in sg\n", count_mapped);
1005 if (count_mapped == 0) {
1006 dev_dbg(&sep->pdev->dev, "Cannot dma_map_sg\n");
1010 sep_dma = kmalloc(sizeof(struct sep_dma_map) *
1011 count_mapped, GFP_ATOMIC);
1013 if (sep_dma == NULL) {
1014 dev_dbg(&sep->pdev->dev, "Cannot allocate dma_maps\n");
1018 for_each_sg(sg, temp_sg, count_mapped, ct1) {
1019 sep_dma[ct1].dma_addr = sg_dma_address(temp_sg);
1020 sep_dma[ct1].size = sg_dma_len(temp_sg);
1021 dev_dbg(&sep->pdev->dev, "(all hex) map %x dma %lx len %lx\n",
1022 ct1, (unsigned long)sep_dma[ct1].dma_addr,
1023 (unsigned long)sep_dma[ct1].size);
1026 *dma_maps = sep_dma;
1027 return count_mapped;
1033 * @sep: pointer to struct sep_device
1034 * @sg: pointer to struct scatterlist
1035 * @data_size: total data size
1037 * @dma_maps: pointer to place a pointer to array of dma maps
1038 * This is filled in; anything previous there will be lost
1039 * The structure for dma maps is sep_dma_map
1040 * @lli_maps: pointer to place a pointer to array of lli maps
1041 * This is filled in; anything previous there will be lost
1042 * The structure for dma maps is sep_dma_map
1043 * @returns number of dma maps on success; negative on error
1045 * This creates the LLI table from the scatterlist
1046 * It is only used for kernel crypto as it works exclusively
1047 * with scatterlists (struct scatterlist) representation of
1050 static int sep_crypto_lli(
1051 struct sep_device *sep,
1052 struct scatterlist *sg,
1053 struct sep_dma_map **maps,
1054 struct sep_lli_entry **llis,
1056 enum dma_data_direction direction)
1060 struct sep_lli_entry *sep_lli;
1061 struct sep_dma_map *sep_map;
1065 nbr_ents = sep_crypto_dma(sep, sg, maps, direction);
1066 if (nbr_ents <= 0) {
1067 dev_dbg(&sep->pdev->dev, "crypto_dma failed %x\n",
1074 sep_lli = kmalloc(sizeof(struct sep_lli_entry) * nbr_ents, GFP_ATOMIC);
1076 if (sep_lli == NULL) {
1077 dev_dbg(&sep->pdev->dev, "Cannot allocate lli_maps\n");
1084 for (ct1 = 0; ct1 < nbr_ents; ct1 += 1) {
1085 sep_lli[ct1].bus_address = (u32)sep_map[ct1].dma_addr;
1087 /* Maximum for page is total data size */
1088 if (sep_map[ct1].size > data_size)
1089 sep_map[ct1].size = data_size;
1091 sep_lli[ct1].block_size = (u32)sep_map[ct1].size;
1099 * sep_lock_kernel_pages - map kernel pages for DMA
1100 * @sep: pointer to struct sep_device
1101 * @kernel_virt_addr: address of data buffer in kernel
1102 * @data_size: size of data
1103 * @lli_array_ptr: lli array
1104 * @in_out_flag: input into device or output from device
1106 * This function locks all the physical pages of the kernel virtual buffer
1107 * and construct a basic lli array, where each entry holds the physical
1108 * page address and the size that application data holds in this page
1109 * This function is used only during kernel crypto mod calls from within
1110 * the kernel (when ioctl is not used)
1112 * This is used only for kernel crypto. Kernel pages
1113 * are handled differently as they are done via
1114 * scatter gather lists (struct scatterlist)
1116 static int sep_lock_kernel_pages(struct sep_device *sep,
1117 unsigned long kernel_virt_addr,
1119 struct sep_lli_entry **lli_array_ptr,
1121 struct sep_dma_context *dma_ctx)
1125 struct scatterlist *sg;
1128 struct sep_lli_entry *lli_array;
1130 struct sep_dma_map *map_array;
1132 enum dma_data_direction direction;
1137 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1138 direction = DMA_TO_DEVICE;
1139 sg = dma_ctx->src_sg;
1141 direction = DMA_FROM_DEVICE;
1142 sg = dma_ctx->dst_sg;
1145 num_pages = sep_crypto_lli(sep, sg, &map_array, &lli_array,
1146 data_size, direction);
1148 if (num_pages <= 0) {
1149 dev_dbg(&sep->pdev->dev, "sep_crypto_lli returned error %x\n",
1154 /* Put mapped kernel sg into kernel resource array */
1156 /* Set output params according to the in_out flag */
1157 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1158 *lli_array_ptr = lli_array;
1159 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages =
1161 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array =
1163 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array =
1165 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries =
1167 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg =
1170 *lli_array_ptr = lli_array;
1171 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages =
1173 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array =
1175 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array =
1177 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
1178 out_map_num_entries = num_pages;
1179 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg =
1187 * sep_lock_user_pages - lock and map user pages for DMA
1188 * @sep: pointer to struct sep_device
1189 * @app_virt_addr: user memory data buffer
1190 * @data_size: size of data buffer
1191 * @lli_array_ptr: lli array
1192 * @in_out_flag: input or output to device
1194 * This function locks all the physical pages of the application
1195 * virtual buffer and construct a basic lli array, where each entry
1196 * holds the physical page address and the size that application
1197 * data holds in this physical pages
1199 static int sep_lock_user_pages(struct sep_device *sep,
1202 struct sep_lli_entry **lli_array_ptr,
1204 struct sep_dma_context *dma_ctx)
1210 /* The the page of the end address of the user space buffer */
1212 /* The page of the start address of the user space buffer */
1214 /* The range in pages */
1216 /* Array of pointers to page */
1217 struct page **page_array;
1219 struct sep_lli_entry *lli_array;
1221 struct sep_dma_map *map_array;
1223 /* Set start and end pages and num pages */
1224 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1225 start_page = app_virt_addr >> PAGE_SHIFT;
1226 num_pages = end_page - start_page + 1;
1228 dev_dbg(&sep->pdev->dev,
1229 "[PID%d] lock user pages app_virt_addr is %x\n",
1230 current->pid, app_virt_addr);
1232 dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n",
1233 current->pid, data_size);
1234 dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n",
1235 current->pid, start_page);
1236 dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n",
1237 current->pid, end_page);
1238 dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n",
1239 current->pid, num_pages);
1241 /* Allocate array of pages structure pointers */
1242 page_array = kmalloc_array(num_pages, sizeof(struct page *),
1249 map_array = kmalloc_array(num_pages, sizeof(struct sep_dma_map),
1253 goto end_function_with_error1;
1256 lli_array = kmalloc_array(num_pages, sizeof(struct sep_lli_entry),
1260 goto end_function_with_error2;
1263 /* Convert the application virtual address into a set of physical */
1264 result = get_user_pages_fast(app_virt_addr, num_pages,
1265 ((in_out_flag == SEP_DRIVER_IN_FLAG) ? 0 : 1), page_array);
1267 /* Check the number of pages locked - if not all then exit with error */
1268 if (result != num_pages) {
1269 dev_warn(&sep->pdev->dev,
1270 "[PID%d] not all pages locked by get_user_pages, "
1271 "result 0x%X, num_pages 0x%X\n",
1272 current->pid, result, num_pages);
1274 goto end_function_with_error3;
1277 dev_dbg(&sep->pdev->dev, "[PID%d] get_user_pages succeeded\n",
1281 * Fill the array using page array data and
1282 * map the pages - this action will also flush the cache as needed
1284 for (count = 0; count < num_pages; count++) {
1285 /* Fill the map array */
1286 map_array[count].dma_addr =
1287 dma_map_page(&sep->pdev->dev, page_array[count],
1288 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
1290 map_array[count].size = PAGE_SIZE;
1292 /* Fill the lli array entry */
1293 lli_array[count].bus_address = (u32)map_array[count].dma_addr;
1294 lli_array[count].block_size = PAGE_SIZE;
1296 dev_dbg(&sep->pdev->dev,
1297 "[PID%d] lli_array[%x].bus_address is %08lx, "
1298 "lli_array[%x].block_size is (hex) %x\n", current->pid,
1299 count, (unsigned long)lli_array[count].bus_address,
1300 count, lli_array[count].block_size);
1303 /* Check the offset for the first page */
1304 lli_array[0].bus_address =
1305 lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
1307 /* Check that not all the data is in the first page only */
1308 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1309 lli_array[0].block_size = data_size;
1311 lli_array[0].block_size =
1312 PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1314 dev_dbg(&sep->pdev->dev,
1315 "[PID%d] After check if page 0 has all data\n",
1317 dev_dbg(&sep->pdev->dev,
1318 "[PID%d] lli_array[0].bus_address is (hex) %08lx, "
1319 "lli_array[0].block_size is (hex) %x\n",
1321 (unsigned long)lli_array[0].bus_address,
1322 lli_array[0].block_size);
1325 /* Check the size of the last page */
1326 if (num_pages > 1) {
1327 lli_array[num_pages - 1].block_size =
1328 (app_virt_addr + data_size) & (~PAGE_MASK);
1329 if (lli_array[num_pages - 1].block_size == 0)
1330 lli_array[num_pages - 1].block_size = PAGE_SIZE;
1332 dev_dbg(&sep->pdev->dev,
1333 "[PID%d] After last page size adjustment\n",
1335 dev_dbg(&sep->pdev->dev,
1336 "[PID%d] lli_array[%x].bus_address is (hex) %08lx, "
1337 "lli_array[%x].block_size is (hex) %x\n",
1340 (unsigned long)lli_array[num_pages - 1].bus_address,
1342 lli_array[num_pages - 1].block_size);
1345 /* Set output params according to the in_out flag */
1346 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1347 *lli_array_ptr = lli_array;
1348 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages =
1350 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array =
1352 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array =
1354 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries =
1356 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg = NULL;
1358 *lli_array_ptr = lli_array;
1359 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages =
1361 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array =
1363 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array =
1365 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
1366 out_map_num_entries = num_pages;
1367 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg = NULL;
1371 end_function_with_error3:
1372 /* Free lli array */
1375 end_function_with_error2:
1378 end_function_with_error1:
1379 /* Free page array */
1387 * sep_lli_table_secure_dma - get lli array for IMR addresses
1388 * @sep: pointer to struct sep_device
1389 * @app_virt_addr: user memory data buffer
1390 * @data_size: size of data buffer
1391 * @lli_array_ptr: lli array
1392 * @in_out_flag: not used
1393 * @dma_ctx: pointer to struct sep_dma_context
1395 * This function creates lli tables for outputting data to
1396 * IMR memory, which is memory that cannot be accessed by the
1397 * the x86 processor.
1399 static int sep_lli_table_secure_dma(struct sep_device *sep,
1402 struct sep_lli_entry **lli_array_ptr,
1404 struct sep_dma_context *dma_ctx)
1409 /* The the page of the end address of the user space buffer */
1411 /* The page of the start address of the user space buffer */
1413 /* The range in pages */
1416 struct sep_lli_entry *lli_array;
1418 /* Set start and end pages and num pages */
1419 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1420 start_page = app_virt_addr >> PAGE_SHIFT;
1421 num_pages = end_page - start_page + 1;
1423 dev_dbg(&sep->pdev->dev,
1424 "[PID%d] lock user pages app_virt_addr is %x\n",
1425 current->pid, app_virt_addr);
1427 dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n",
1428 current->pid, data_size);
1429 dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n",
1430 current->pid, start_page);
1431 dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n",
1432 current->pid, end_page);
1433 dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n",
1434 current->pid, num_pages);
1436 lli_array = kmalloc_array(num_pages, sizeof(struct sep_lli_entry),
1442 * Fill the lli_array
1444 start_page = start_page << PAGE_SHIFT;
1445 for (count = 0; count < num_pages; count++) {
1446 /* Fill the lli array entry */
1447 lli_array[count].bus_address = start_page;
1448 lli_array[count].block_size = PAGE_SIZE;
1450 start_page += PAGE_SIZE;
1452 dev_dbg(&sep->pdev->dev,
1453 "[PID%d] lli_array[%x].bus_address is %08lx, "
1454 "lli_array[%x].block_size is (hex) %x\n",
1456 count, (unsigned long)lli_array[count].bus_address,
1457 count, lli_array[count].block_size);
1460 /* Check the offset for the first page */
1461 lli_array[0].bus_address =
1462 lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
1464 /* Check that not all the data is in the first page only */
1465 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1466 lli_array[0].block_size = data_size;
1468 lli_array[0].block_size =
1469 PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1471 dev_dbg(&sep->pdev->dev,
1472 "[PID%d] After check if page 0 has all data\n"
1473 "lli_array[0].bus_address is (hex) %08lx, "
1474 "lli_array[0].block_size is (hex) %x\n",
1476 (unsigned long)lli_array[0].bus_address,
1477 lli_array[0].block_size);
1479 /* Check the size of the last page */
1480 if (num_pages > 1) {
1481 lli_array[num_pages - 1].block_size =
1482 (app_virt_addr + data_size) & (~PAGE_MASK);
1483 if (lli_array[num_pages - 1].block_size == 0)
1484 lli_array[num_pages - 1].block_size = PAGE_SIZE;
1486 dev_dbg(&sep->pdev->dev,
1487 "[PID%d] After last page size adjustment\n"
1488 "lli_array[%x].bus_address is (hex) %08lx, "
1489 "lli_array[%x].block_size is (hex) %x\n",
1490 current->pid, num_pages - 1,
1491 (unsigned long)lli_array[num_pages - 1].bus_address,
1493 lli_array[num_pages - 1].block_size);
1495 *lli_array_ptr = lli_array;
1496 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages = num_pages;
1497 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
1498 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = NULL;
1499 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_num_entries = 0;
1505 * sep_calculate_lli_table_max_size - size the LLI table
1506 * @sep: pointer to struct sep_device
1508 * @num_array_entries
1511 * This function calculates the size of data that can be inserted into
1512 * the lli table from this array, such that either the table is full
1513 * (all entries are entered), or there are no more entries in the
1516 static u32 sep_calculate_lli_table_max_size(struct sep_device *sep,
1517 struct sep_lli_entry *lli_in_array_ptr,
1518 u32 num_array_entries,
1519 u32 *last_table_flag)
1522 /* Table data size */
1523 u32 table_data_size = 0;
1524 /* Data size for the next table */
1525 u32 next_table_data_size;
1527 *last_table_flag = 0;
1530 * Calculate the data in the out lli table till we fill the whole
1531 * table or till the data has ended
1534 (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) &&
1535 (counter < num_array_entries); counter++)
1536 table_data_size += lli_in_array_ptr[counter].block_size;
1539 * Check if we reached the last entry,
1540 * meaning this ia the last table to build,
1541 * and no need to check the block alignment
1543 if (counter == num_array_entries) {
1544 /* Set the last table flag */
1545 *last_table_flag = 1;
1550 * Calculate the data size of the next table.
1551 * Stop if no entries left or if data size is more the DMA restriction
1553 next_table_data_size = 0;
1554 for (; counter < num_array_entries; counter++) {
1555 next_table_data_size += lli_in_array_ptr[counter].block_size;
1556 if (next_table_data_size >= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1561 * Check if the next table data size is less then DMA rstriction.
1562 * if it is - recalculate the current table size, so that the next
1563 * table data size will be adaquete for DMA
1565 if (next_table_data_size &&
1566 next_table_data_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1568 table_data_size -= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE -
1569 next_table_data_size);
1572 return table_data_size;
1576 * sep_build_lli_table - build an lli array for the given table
1577 * @sep: pointer to struct sep_device
1578 * @lli_array_ptr: pointer to lli array
1579 * @lli_table_ptr: pointer to lli table
1580 * @num_processed_entries_ptr: pointer to number of entries
1581 * @num_table_entries_ptr: pointer to number of tables
1582 * @table_data_size: total data size
1584 * Builds an lli table from the lli_array according to
1585 * the given size of data
1587 static void sep_build_lli_table(struct sep_device *sep,
1588 struct sep_lli_entry *lli_array_ptr,
1589 struct sep_lli_entry *lli_table_ptr,
1590 u32 *num_processed_entries_ptr,
1591 u32 *num_table_entries_ptr,
1592 u32 table_data_size)
1594 /* Current table data size */
1595 u32 curr_table_data_size;
1596 /* Counter of lli array entry */
1599 /* Init current table data size and lli array entry counter */
1600 curr_table_data_size = 0;
1602 *num_table_entries_ptr = 1;
1604 dev_dbg(&sep->pdev->dev,
1605 "[PID%d] build lli table table_data_size: (hex) %x\n",
1606 current->pid, table_data_size);
1608 /* Fill the table till table size reaches the needed amount */
1609 while (curr_table_data_size < table_data_size) {
1610 /* Update the number of entries in table */
1611 (*num_table_entries_ptr)++;
1613 lli_table_ptr->bus_address =
1614 cpu_to_le32(lli_array_ptr[array_counter].bus_address);
1616 lli_table_ptr->block_size =
1617 cpu_to_le32(lli_array_ptr[array_counter].block_size);
1619 curr_table_data_size += lli_array_ptr[array_counter].block_size;
1621 dev_dbg(&sep->pdev->dev,
1622 "[PID%d] lli_table_ptr is %p\n",
1623 current->pid, lli_table_ptr);
1624 dev_dbg(&sep->pdev->dev,
1625 "[PID%d] lli_table_ptr->bus_address: %08lx\n",
1627 (unsigned long)lli_table_ptr->bus_address);
1629 dev_dbg(&sep->pdev->dev,
1630 "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
1631 current->pid, lli_table_ptr->block_size);
1633 /* Check for overflow of the table data */
1634 if (curr_table_data_size > table_data_size) {
1635 dev_dbg(&sep->pdev->dev,
1636 "[PID%d] curr_table_data_size too large\n",
1639 /* Update the size of block in the table */
1640 lli_table_ptr->block_size =
1641 cpu_to_le32(lli_table_ptr->block_size) -
1642 (curr_table_data_size - table_data_size);
1644 /* Update the physical address in the lli array */
1645 lli_array_ptr[array_counter].bus_address +=
1646 cpu_to_le32(lli_table_ptr->block_size);
1648 /* Update the block size left in the lli array */
1649 lli_array_ptr[array_counter].block_size =
1650 (curr_table_data_size - table_data_size);
1652 /* Advance to the next entry in the lli_array */
1655 dev_dbg(&sep->pdev->dev,
1656 "[PID%d] lli_table_ptr->bus_address is %08lx\n",
1658 (unsigned long)lli_table_ptr->bus_address);
1659 dev_dbg(&sep->pdev->dev,
1660 "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
1662 lli_table_ptr->block_size);
1664 /* Move to the next entry in table */
1668 /* Set the info entry to default */
1669 lli_table_ptr->bus_address = 0xffffffff;
1670 lli_table_ptr->block_size = 0;
1672 /* Set the output parameter */
1673 *num_processed_entries_ptr += array_counter;
1678 * sep_shared_area_virt_to_bus - map shared area to bus address
1679 * @sep: pointer to struct sep_device
1680 * @virt_address: virtual address to convert
1682 * This functions returns the physical address inside shared area according
1683 * to the virtual address. It can be either on the external RAM device
1684 * (ioremapped), or on the system RAM
1685 * This implementation is for the external RAM
1687 static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep,
1690 dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys v %p\n",
1691 current->pid, virt_address);
1692 dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys p %08lx\n",
1695 sep->shared_bus + (virt_address - sep->shared_addr));
1697 return sep->shared_bus + (size_t)(virt_address - sep->shared_addr);
1701 * sep_shared_area_bus_to_virt - map shared area bus address to kernel
1702 * @sep: pointer to struct sep_device
1703 * @bus_address: bus address to convert
1705 * This functions returns the virtual address inside shared area
1706 * according to the physical address. It can be either on the
1707 * external RAM device (ioremapped), or on the system RAM
1708 * This implementation is for the external RAM
1710 static void *sep_shared_area_bus_to_virt(struct sep_device *sep,
1711 dma_addr_t bus_address)
1713 dev_dbg(&sep->pdev->dev, "[PID%d] shared bus to virt b=%lx v=%lx\n",
1715 (unsigned long)bus_address, (unsigned long)(sep->shared_addr +
1716 (size_t)(bus_address - sep->shared_bus)));
1718 return sep->shared_addr + (size_t)(bus_address - sep->shared_bus);
1722 * sep_debug_print_lli_tables - dump LLI table
1723 * @sep: pointer to struct sep_device
1724 * @lli_table_ptr: pointer to sep_lli_entry
1725 * @num_table_entries: number of entries
1726 * @table_data_size: total data size
1728 * Walk the the list of the print created tables and print all the data
1730 static void sep_debug_print_lli_tables(struct sep_device *sep,
1731 struct sep_lli_entry *lli_table_ptr,
1732 unsigned long num_table_entries,
1733 unsigned long table_data_size)
1736 unsigned long table_count = 1;
1737 unsigned long entries_count = 0;
1739 dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables start\n",
1741 if (num_table_entries == 0) {
1742 dev_dbg(&sep->pdev->dev, "[PID%d] no table to print\n",
1747 while ((unsigned long) lli_table_ptr->bus_address != 0xffffffff) {
1748 dev_dbg(&sep->pdev->dev,
1749 "[PID%d] lli table %08lx, "
1750 "table_data_size is (hex) %lx\n",
1751 current->pid, table_count, table_data_size);
1752 dev_dbg(&sep->pdev->dev,
1753 "[PID%d] num_table_entries is (hex) %lx\n",
1754 current->pid, num_table_entries);
1756 /* Print entries of the table (without info entry) */
1757 for (entries_count = 0; entries_count < num_table_entries;
1758 entries_count++, lli_table_ptr++) {
1760 dev_dbg(&sep->pdev->dev,
1761 "[PID%d] lli_table_ptr address is %08lx\n",
1763 (unsigned long) lli_table_ptr);
1765 dev_dbg(&sep->pdev->dev,
1766 "[PID%d] phys address is %08lx "
1767 "block size is (hex) %x\n", current->pid,
1768 (unsigned long)lli_table_ptr->bus_address,
1769 lli_table_ptr->block_size);
1772 /* Point to the info entry */
1775 dev_dbg(&sep->pdev->dev,
1776 "[PID%d] phys lli_table_ptr->block_size "
1779 lli_table_ptr->block_size);
1781 dev_dbg(&sep->pdev->dev,
1782 "[PID%d] phys lli_table_ptr->physical_address "
1785 (unsigned long)lli_table_ptr->bus_address);
1788 table_data_size = lli_table_ptr->block_size & 0xffffff;
1789 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1791 dev_dbg(&sep->pdev->dev,
1792 "[PID%d] phys table_data_size is "
1793 "(hex) %lx num_table_entries is"
1794 " %lx bus_address is%lx\n",
1798 (unsigned long)lli_table_ptr->bus_address);
1800 if ((unsigned long)lli_table_ptr->bus_address != 0xffffffff)
1801 lli_table_ptr = (struct sep_lli_entry *)
1802 sep_shared_bus_to_virt(sep,
1803 (unsigned long)lli_table_ptr->bus_address);
1807 dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables end\n",
1814 * sep_prepare_empty_lli_table - create a blank LLI table
1815 * @sep: pointer to struct sep_device
1816 * @lli_table_addr_ptr: pointer to lli table
1817 * @num_entries_ptr: pointer to number of entries
1818 * @table_data_size_ptr: point to table data size
1819 * @dmatables_region: Optional buffer for DMA tables
1820 * @dma_ctx: DMA context
1822 * This function creates empty lli tables when there is no data
1824 static void sep_prepare_empty_lli_table(struct sep_device *sep,
1825 dma_addr_t *lli_table_addr_ptr,
1826 u32 *num_entries_ptr,
1827 u32 *table_data_size_ptr,
1828 void **dmatables_region,
1829 struct sep_dma_context *dma_ctx)
1831 struct sep_lli_entry *lli_table_ptr;
1833 /* Find the area for new table */
1835 (struct sep_lli_entry *)(sep->shared_addr +
1836 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1837 dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1838 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1840 if (dmatables_region && *dmatables_region)
1841 lli_table_ptr = *dmatables_region;
1843 lli_table_ptr->bus_address = 0;
1844 lli_table_ptr->block_size = 0;
1847 lli_table_ptr->bus_address = 0xFFFFFFFF;
1848 lli_table_ptr->block_size = 0;
1850 /* Set the output parameter value */
1851 *lli_table_addr_ptr = sep->shared_bus +
1852 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1853 dma_ctx->num_lli_tables_created *
1854 sizeof(struct sep_lli_entry) *
1855 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1857 /* Set the num of entries and table data size for empty table */
1858 *num_entries_ptr = 2;
1859 *table_data_size_ptr = 0;
1861 /* Update the number of created tables */
1862 dma_ctx->num_lli_tables_created++;
1866 * sep_prepare_input_dma_table - prepare input DMA mappings
1867 * @sep: pointer to struct sep_device
1872 * @table_data_size_ptr:
1873 * @is_kva: set for kernel data (kernel crypt io call)
1875 * This function prepares only input DMA table for synchronic symmetric
1877 * Note that all bus addresses that are passed to the SEP
1878 * are in 32 bit format; the SEP is a 32 bit device
1880 static int sep_prepare_input_dma_table(struct sep_device *sep,
1881 unsigned long app_virt_addr,
1884 dma_addr_t *lli_table_ptr,
1885 u32 *num_entries_ptr,
1886 u32 *table_data_size_ptr,
1888 void **dmatables_region,
1889 struct sep_dma_context *dma_ctx
1893 /* Pointer to the info entry of the table - the last entry */
1894 struct sep_lli_entry *info_entry_ptr;
1895 /* Array of pointers to page */
1896 struct sep_lli_entry *lli_array_ptr;
1897 /* Points to the first entry to be processed in the lli_in_array */
1898 u32 current_entry = 0;
1899 /* Num entries in the virtual buffer */
1900 u32 sep_lli_entries = 0;
1901 /* Lli table pointer */
1902 struct sep_lli_entry *in_lli_table_ptr;
1903 /* The total data in one table */
1904 u32 table_data_size = 0;
1905 /* Flag for last table */
1906 u32 last_table_flag = 0;
1907 /* Number of entries in lli table */
1908 u32 num_entries_in_table = 0;
1909 /* Next table address */
1910 void *lli_table_alloc_addr = NULL;
1911 void *dma_lli_table_alloc_addr = NULL;
1912 void *dma_in_lli_table_ptr = NULL;
1914 dev_dbg(&sep->pdev->dev,
1915 "[PID%d] prepare intput dma tbl data size: (hex) %x\n",
1916 current->pid, data_size);
1918 dev_dbg(&sep->pdev->dev, "[PID%d] block_size is (hex) %x\n",
1919 current->pid, block_size);
1921 /* Initialize the pages pointers */
1922 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
1923 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages = 0;
1925 /* Set the kernel address for first table to be allocated */
1926 lli_table_alloc_addr = (void *)(sep->shared_addr +
1927 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1928 dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1929 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1931 if (data_size == 0) {
1932 if (dmatables_region) {
1933 error = sep_allocate_dmatables_region(sep,
1940 /* Special case - create meptu table - 2 entries, zero data */
1941 sep_prepare_empty_lli_table(sep, lli_table_ptr,
1942 num_entries_ptr, table_data_size_ptr,
1943 dmatables_region, dma_ctx);
1944 goto update_dcb_counter;
1947 /* Check if the pages are in Kernel Virtual Address layout */
1949 error = sep_lock_kernel_pages(sep, app_virt_addr,
1950 data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG,
1954 * Lock the pages of the user buffer
1955 * and translate them to pages
1957 error = sep_lock_user_pages(sep, app_virt_addr,
1958 data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG,
1964 dev_dbg(&sep->pdev->dev,
1965 "[PID%d] output sep_in_num_pages is (hex) %x\n",
1967 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages);
1970 info_entry_ptr = NULL;
1973 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages;
1975 dma_lli_table_alloc_addr = lli_table_alloc_addr;
1976 if (dmatables_region) {
1977 error = sep_allocate_dmatables_region(sep,
1982 goto end_function_error;
1983 lli_table_alloc_addr = *dmatables_region;
1986 /* Loop till all the entries in in array are processed */
1987 while (current_entry < sep_lli_entries) {
1989 /* Set the new input and output tables */
1991 (struct sep_lli_entry *)lli_table_alloc_addr;
1992 dma_in_lli_table_ptr =
1993 (struct sep_lli_entry *)dma_lli_table_alloc_addr;
1995 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1996 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1997 dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1998 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2000 if (dma_lli_table_alloc_addr >
2001 ((void *)sep->shared_addr +
2002 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
2003 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
2006 goto end_function_error;
2010 /* Update the number of created tables */
2011 dma_ctx->num_lli_tables_created++;
2013 /* Calculate the maximum size of data for input table */
2014 table_data_size = sep_calculate_lli_table_max_size(sep,
2015 &lli_array_ptr[current_entry],
2016 (sep_lli_entries - current_entry),
2020 * If this is not the last table -
2021 * then align it to the block size
2023 if (!last_table_flag)
2025 (table_data_size / block_size) * block_size;
2027 dev_dbg(&sep->pdev->dev,
2028 "[PID%d] output table_data_size is (hex) %x\n",
2032 /* Construct input lli table */
2033 sep_build_lli_table(sep, &lli_array_ptr[current_entry],
2035 ¤t_entry, &num_entries_in_table, table_data_size);
2037 if (info_entry_ptr == NULL) {
2039 /* Set the output parameters to physical addresses */
2040 *lli_table_ptr = sep_shared_area_virt_to_bus(sep,
2041 dma_in_lli_table_ptr);
2042 *num_entries_ptr = num_entries_in_table;
2043 *table_data_size_ptr = table_data_size;
2045 dev_dbg(&sep->pdev->dev,
2046 "[PID%d] output lli_table_in_ptr is %08lx\n",
2048 (unsigned long)*lli_table_ptr);
2051 /* Update the info entry of the previous in table */
2052 info_entry_ptr->bus_address =
2053 sep_shared_area_virt_to_bus(sep,
2054 dma_in_lli_table_ptr);
2055 info_entry_ptr->block_size =
2056 ((num_entries_in_table) << 24) |
2059 /* Save the pointer to the info entry of the current tables */
2060 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
2062 /* Print input tables */
2063 if (!dmatables_region) {
2064 sep_debug_print_lli_tables(sep, (struct sep_lli_entry *)
2065 sep_shared_area_bus_to_virt(sep, *lli_table_ptr),
2066 *num_entries_ptr, *table_data_size_ptr);
2069 /* The array of the pages */
2070 kfree(lli_array_ptr);
2073 /* Update DCB counter */
2074 dma_ctx->nr_dcb_creat++;
2078 /* Free all the allocated resources */
2079 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array);
2080 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = NULL;
2081 kfree(lli_array_ptr);
2082 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array);
2083 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2091 * sep_construct_dma_tables_from_lli - prepare AES/DES mappings
2092 * @sep: pointer to struct sep_device
2094 * @sep_in_lli_entries:
2096 * @sep_out_lli_entries
2099 * @lli_table_out_ptr
2100 * @in_num_entries_ptr
2101 * @out_num_entries_ptr
2102 * @table_data_size_ptr
2104 * This function creates the input and output DMA tables for
2105 * symmetric operations (AES/DES) according to the block
2106 * size from LLI arays
2107 * Note that all bus addresses that are passed to the SEP
2108 * are in 32 bit format; the SEP is a 32 bit device
2110 static int sep_construct_dma_tables_from_lli(
2111 struct sep_device *sep,
2112 struct sep_lli_entry *lli_in_array,
2113 u32 sep_in_lli_entries,
2114 struct sep_lli_entry *lli_out_array,
2115 u32 sep_out_lli_entries,
2117 dma_addr_t *lli_table_in_ptr,
2118 dma_addr_t *lli_table_out_ptr,
2119 u32 *in_num_entries_ptr,
2120 u32 *out_num_entries_ptr,
2121 u32 *table_data_size_ptr,
2122 void **dmatables_region,
2123 struct sep_dma_context *dma_ctx)
2125 /* Points to the area where next lli table can be allocated */
2126 void *lli_table_alloc_addr = NULL;
2128 * Points to the area in shared region where next lli table
2131 void *dma_lli_table_alloc_addr = NULL;
2132 /* Input lli table in dmatables_region or shared region */
2133 struct sep_lli_entry *in_lli_table_ptr = NULL;
2134 /* Input lli table location in the shared region */
2135 struct sep_lli_entry *dma_in_lli_table_ptr = NULL;
2136 /* Output lli table in dmatables_region or shared region */
2137 struct sep_lli_entry *out_lli_table_ptr = NULL;
2138 /* Output lli table location in the shared region */
2139 struct sep_lli_entry *dma_out_lli_table_ptr = NULL;
2140 /* Pointer to the info entry of the table - the last entry */
2141 struct sep_lli_entry *info_in_entry_ptr = NULL;
2142 /* Pointer to the info entry of the table - the last entry */
2143 struct sep_lli_entry *info_out_entry_ptr = NULL;
2144 /* Points to the first entry to be processed in the lli_in_array */
2145 u32 current_in_entry = 0;
2146 /* Points to the first entry to be processed in the lli_out_array */
2147 u32 current_out_entry = 0;
2148 /* Max size of the input table */
2149 u32 in_table_data_size = 0;
2150 /* Max size of the output table */
2151 u32 out_table_data_size = 0;
2152 /* Flag te signifies if this is the last tables build */
2153 u32 last_table_flag = 0;
2154 /* The data size that should be in table */
2155 u32 table_data_size = 0;
2156 /* Number of entries in the input table */
2157 u32 num_entries_in_table = 0;
2158 /* Number of entries in the output table */
2159 u32 num_entries_out_table = 0;
2162 dev_warn(&sep->pdev->dev, "DMA context uninitialized\n");
2166 /* Initiate to point after the message area */
2167 lli_table_alloc_addr = (void *)(sep->shared_addr +
2168 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
2169 (dma_ctx->num_lli_tables_created *
2170 (sizeof(struct sep_lli_entry) *
2171 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP)));
2172 dma_lli_table_alloc_addr = lli_table_alloc_addr;
2174 if (dmatables_region) {
2175 /* 2 for both in+out table */
2176 if (sep_allocate_dmatables_region(sep,
2179 2*sep_in_lli_entries))
2181 lli_table_alloc_addr = *dmatables_region;
2184 /* Loop till all the entries in in array are not processed */
2185 while (current_in_entry < sep_in_lli_entries) {
2186 /* Set the new input and output tables */
2188 (struct sep_lli_entry *)lli_table_alloc_addr;
2189 dma_in_lli_table_ptr =
2190 (struct sep_lli_entry *)dma_lli_table_alloc_addr;
2192 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2193 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2194 dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2195 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2197 /* Set the first output tables */
2199 (struct sep_lli_entry *)lli_table_alloc_addr;
2200 dma_out_lli_table_ptr =
2201 (struct sep_lli_entry *)dma_lli_table_alloc_addr;
2203 /* Check if the DMA table area limit was overrun */
2204 if ((dma_lli_table_alloc_addr + sizeof(struct sep_lli_entry) *
2205 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP) >
2206 ((void *)sep->shared_addr +
2207 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
2208 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
2210 dev_warn(&sep->pdev->dev, "dma table limit overrun\n");
2214 /* Update the number of the lli tables created */
2215 dma_ctx->num_lli_tables_created += 2;
2217 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2218 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2219 dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2220 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2222 /* Calculate the maximum size of data for input table */
2223 in_table_data_size =
2224 sep_calculate_lli_table_max_size(sep,
2225 &lli_in_array[current_in_entry],
2226 (sep_in_lli_entries - current_in_entry),
2229 /* Calculate the maximum size of data for output table */
2230 out_table_data_size =
2231 sep_calculate_lli_table_max_size(sep,
2232 &lli_out_array[current_out_entry],
2233 (sep_out_lli_entries - current_out_entry),
2236 if (!last_table_flag) {
2237 in_table_data_size = (in_table_data_size /
2238 block_size) * block_size;
2239 out_table_data_size = (out_table_data_size /
2240 block_size) * block_size;
2243 table_data_size = in_table_data_size;
2244 if (table_data_size > out_table_data_size)
2245 table_data_size = out_table_data_size;
2247 dev_dbg(&sep->pdev->dev,
2248 "[PID%d] construct tables from lli"
2249 " in_table_data_size is (hex) %x\n", current->pid,
2250 in_table_data_size);
2252 dev_dbg(&sep->pdev->dev,
2253 "[PID%d] construct tables from lli"
2254 "out_table_data_size is (hex) %x\n", current->pid,
2255 out_table_data_size);
2257 /* Construct input lli table */
2258 sep_build_lli_table(sep, &lli_in_array[current_in_entry],
2261 &num_entries_in_table,
2264 /* Construct output lli table */
2265 sep_build_lli_table(sep, &lli_out_array[current_out_entry],
2268 &num_entries_out_table,
2271 /* If info entry is null - this is the first table built */
2272 if (info_in_entry_ptr == NULL || info_out_entry_ptr == NULL) {
2273 /* Set the output parameters to physical addresses */
2275 sep_shared_area_virt_to_bus(sep, dma_in_lli_table_ptr);
2277 *in_num_entries_ptr = num_entries_in_table;
2279 *lli_table_out_ptr =
2280 sep_shared_area_virt_to_bus(sep,
2281 dma_out_lli_table_ptr);
2283 *out_num_entries_ptr = num_entries_out_table;
2284 *table_data_size_ptr = table_data_size;
2286 dev_dbg(&sep->pdev->dev,
2287 "[PID%d] output lli_table_in_ptr is %08lx\n",
2289 (unsigned long)*lli_table_in_ptr);
2290 dev_dbg(&sep->pdev->dev,
2291 "[PID%d] output lli_table_out_ptr is %08lx\n",
2293 (unsigned long)*lli_table_out_ptr);
2295 /* Update the info entry of the previous in table */
2296 info_in_entry_ptr->bus_address =
2297 sep_shared_area_virt_to_bus(sep,
2298 dma_in_lli_table_ptr);
2300 info_in_entry_ptr->block_size =
2301 ((num_entries_in_table) << 24) |
2304 /* Update the info entry of the previous in table */
2305 info_out_entry_ptr->bus_address =
2306 sep_shared_area_virt_to_bus(sep,
2307 dma_out_lli_table_ptr);
2309 info_out_entry_ptr->block_size =
2310 ((num_entries_out_table) << 24) |
2313 dev_dbg(&sep->pdev->dev,
2314 "[PID%d] output lli_table_in_ptr:%08lx %08x\n",
2316 (unsigned long)info_in_entry_ptr->bus_address,
2317 info_in_entry_ptr->block_size);
2319 dev_dbg(&sep->pdev->dev,
2320 "[PID%d] output lli_table_out_ptr:"
2323 (unsigned long)info_out_entry_ptr->bus_address,
2324 info_out_entry_ptr->block_size);
2327 /* Save the pointer to the info entry of the current tables */
2328 info_in_entry_ptr = in_lli_table_ptr +
2329 num_entries_in_table - 1;
2330 info_out_entry_ptr = out_lli_table_ptr +
2331 num_entries_out_table - 1;
2333 dev_dbg(&sep->pdev->dev,
2334 "[PID%d] output num_entries_out_table is %x\n",
2336 (u32)num_entries_out_table);
2337 dev_dbg(&sep->pdev->dev,
2338 "[PID%d] output info_in_entry_ptr is %lx\n",
2340 (unsigned long)info_in_entry_ptr);
2341 dev_dbg(&sep->pdev->dev,
2342 "[PID%d] output info_out_entry_ptr is %lx\n",
2344 (unsigned long)info_out_entry_ptr);
2347 /* Print input tables */
2348 if (!dmatables_region) {
2349 sep_debug_print_lli_tables(
2351 (struct sep_lli_entry *)
2352 sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr),
2353 *in_num_entries_ptr,
2354 *table_data_size_ptr);
2357 /* Print output tables */
2358 if (!dmatables_region) {
2359 sep_debug_print_lli_tables(
2361 (struct sep_lli_entry *)
2362 sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr),
2363 *out_num_entries_ptr,
2364 *table_data_size_ptr);
2371 * sep_prepare_input_output_dma_table - prepare DMA I/O table
2372 * @app_virt_in_addr:
2373 * @app_virt_out_addr:
2376 * @lli_table_in_ptr:
2377 * @lli_table_out_ptr:
2378 * @in_num_entries_ptr:
2379 * @out_num_entries_ptr:
2380 * @table_data_size_ptr:
2381 * @is_kva: set for kernel data; used only for kernel crypto module
2383 * This function builds input and output DMA tables for synchronic
2384 * symmetric operations (AES, DES, HASH). It also checks that each table
2385 * is of the modular block size
2386 * Note that all bus addresses that are passed to the SEP
2387 * are in 32 bit format; the SEP is a 32 bit device
2389 static int sep_prepare_input_output_dma_table(struct sep_device *sep,
2390 unsigned long app_virt_in_addr,
2391 unsigned long app_virt_out_addr,
2394 dma_addr_t *lli_table_in_ptr,
2395 dma_addr_t *lli_table_out_ptr,
2396 u32 *in_num_entries_ptr,
2397 u32 *out_num_entries_ptr,
2398 u32 *table_data_size_ptr,
2400 void **dmatables_region,
2401 struct sep_dma_context *dma_ctx)
2405 /* Array of pointers of page */
2406 struct sep_lli_entry *lli_in_array;
2407 /* Array of pointers of page */
2408 struct sep_lli_entry *lli_out_array;
2415 if (data_size == 0) {
2416 /* Prepare empty table for input and output */
2417 if (dmatables_region) {
2418 error = sep_allocate_dmatables_region(
2426 sep_prepare_empty_lli_table(sep, lli_table_in_ptr,
2427 in_num_entries_ptr, table_data_size_ptr,
2428 dmatables_region, dma_ctx);
2430 sep_prepare_empty_lli_table(sep, lli_table_out_ptr,
2431 out_num_entries_ptr, table_data_size_ptr,
2432 dmatables_region, dma_ctx);
2434 goto update_dcb_counter;
2437 /* Initialize the pages pointers */
2438 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2439 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
2441 /* Lock the pages of the buffer and translate them to pages */
2443 dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel input pages\n",
2445 error = sep_lock_kernel_pages(sep, app_virt_in_addr,
2446 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG,
2449 dev_warn(&sep->pdev->dev,
2450 "[PID%d] sep_lock_kernel_pages for input "
2451 "virtual buffer failed\n", current->pid);
2456 dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel output pages\n",
2458 error = sep_lock_kernel_pages(sep, app_virt_out_addr,
2459 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG,
2463 dev_warn(&sep->pdev->dev,
2464 "[PID%d] sep_lock_kernel_pages for output "
2465 "virtual buffer failed\n", current->pid);
2467 goto end_function_free_lli_in;
2473 dev_dbg(&sep->pdev->dev, "[PID%d] Locking user input pages\n",
2475 error = sep_lock_user_pages(sep, app_virt_in_addr,
2476 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG,
2479 dev_warn(&sep->pdev->dev,
2480 "[PID%d] sep_lock_user_pages for input "
2481 "virtual buffer failed\n", current->pid);
2486 if (dma_ctx->secure_dma) {
2487 /* secure_dma requires use of non accessible memory */
2488 dev_dbg(&sep->pdev->dev, "[PID%d] in secure_dma\n",
2490 error = sep_lli_table_secure_dma(sep,
2491 app_virt_out_addr, data_size, &lli_out_array,
2492 SEP_DRIVER_OUT_FLAG, dma_ctx);
2494 dev_warn(&sep->pdev->dev,
2495 "[PID%d] secure dma table setup "
2496 " for output virtual buffer failed\n",
2499 goto end_function_free_lli_in;
2502 /* For normal, non-secure dma */
2503 dev_dbg(&sep->pdev->dev, "[PID%d] not in secure_dma\n",
2506 dev_dbg(&sep->pdev->dev,
2507 "[PID%d] Locking user output pages\n",
2510 error = sep_lock_user_pages(sep, app_virt_out_addr,
2511 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG,
2515 dev_warn(&sep->pdev->dev,
2516 "[PID%d] sep_lock_user_pages"
2517 " for output virtual buffer failed\n",
2520 goto end_function_free_lli_in;
2525 dev_dbg(&sep->pdev->dev,
2526 "[PID%d] After lock; prep input output dma table sep_in_num_pages is (hex) %x\n",
2528 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages);
2530 dev_dbg(&sep->pdev->dev, "[PID%d] sep_out_num_pages is (hex) %x\n",
2532 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages);
2534 dev_dbg(&sep->pdev->dev,
2535 "[PID%d] SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is (hex) %x\n",
2536 current->pid, SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
2538 /* Call the function that creates table from the lli arrays */
2539 dev_dbg(&sep->pdev->dev, "[PID%d] calling create table from lli\n",
2541 error = sep_construct_dma_tables_from_lli(
2543 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
2546 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
2548 block_size, lli_table_in_ptr, lli_table_out_ptr,
2549 in_num_entries_ptr, out_num_entries_ptr,
2550 table_data_size_ptr, dmatables_region, dma_ctx);
2553 dev_warn(&sep->pdev->dev,
2554 "[PID%d] sep_construct_dma_tables_from_lli failed\n",
2556 goto end_function_with_error;
2559 kfree(lli_out_array);
2560 kfree(lli_in_array);
2563 /* Update DCB counter */
2564 dma_ctx->nr_dcb_creat++;
2568 end_function_with_error:
2569 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array);
2570 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = NULL;
2571 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array);
2572 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
2573 kfree(lli_out_array);
2576 end_function_free_lli_in:
2577 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array);
2578 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = NULL;
2579 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array);
2580 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2581 kfree(lli_in_array);
2590 * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
2591 * @app_in_address: unsigned long; for data buffer in (user space)
2592 * @app_out_address: unsigned long; for data buffer out (user space)
2593 * @data_in_size: u32; for size of data
2594 * @block_size: u32; for block size
2595 * @tail_block_size: u32; for size of tail block
2596 * @isapplet: bool; to indicate external app
2597 * @is_kva: bool; kernel buffer; only used for kernel crypto module
2598 * @secure_dma; indicates whether this is secure_dma using IMR
2600 * This function prepares the linked DMA tables and puts the
2601 * address for the linked list of tables inta a DCB (data control
2602 * block) the address of which is known by the SEP hardware
2603 * Note that all bus addresses that are passed to the SEP
2604 * are in 32 bit format; the SEP is a 32 bit device
2606 int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
2607 unsigned long app_in_address,
2608 unsigned long app_out_address,
2611 u32 tail_block_size,
2615 struct sep_dcblock *dcb_region,
2616 void **dmatables_region,
2617 struct sep_dma_context **dma_ctx,
2618 struct scatterlist *src_sg,
2619 struct scatterlist *dst_sg)
2624 /* Address of the created DCB table */
2625 struct sep_dcblock *dcb_table_ptr = NULL;
2626 /* The physical address of the first input DMA table */
2627 dma_addr_t in_first_mlli_address = 0;
2628 /* Number of entries in the first input DMA table */
2629 u32 in_first_num_entries = 0;
2630 /* The physical address of the first output DMA table */
2631 dma_addr_t out_first_mlli_address = 0;
2632 /* Number of entries in the first output DMA table */
2633 u32 out_first_num_entries = 0;
2634 /* Data in the first input/output table */
2635 u32 first_data_size = 0;
2637 dev_dbg(&sep->pdev->dev, "[PID%d] app_in_address %lx\n",
2638 current->pid, app_in_address);
2640 dev_dbg(&sep->pdev->dev, "[PID%d] app_out_address %lx\n",
2641 current->pid, app_out_address);
2643 dev_dbg(&sep->pdev->dev, "[PID%d] data_in_size %x\n",
2644 current->pid, data_in_size);
2646 dev_dbg(&sep->pdev->dev, "[PID%d] block_size %x\n",
2647 current->pid, block_size);
2649 dev_dbg(&sep->pdev->dev, "[PID%d] tail_block_size %x\n",
2650 current->pid, tail_block_size);
2652 dev_dbg(&sep->pdev->dev, "[PID%d] isapplet %x\n",
2653 current->pid, isapplet);
2655 dev_dbg(&sep->pdev->dev, "[PID%d] is_kva %x\n",
2656 current->pid, is_kva);
2658 dev_dbg(&sep->pdev->dev, "[PID%d] src_sg %p\n",
2659 current->pid, src_sg);
2661 dev_dbg(&sep->pdev->dev, "[PID%d] dst_sg %p\n",
2662 current->pid, dst_sg);
2665 dev_warn(&sep->pdev->dev, "[PID%d] no DMA context pointer\n",
2672 /* In case there are multiple DCBs for this transaction */
2673 dev_dbg(&sep->pdev->dev, "[PID%d] DMA context already set\n",
2676 *dma_ctx = kzalloc(sizeof(**dma_ctx), GFP_KERNEL);
2678 dev_dbg(&sep->pdev->dev,
2679 "[PID%d] Not enough memory for DMA context\n",
2684 dev_dbg(&sep->pdev->dev,
2685 "[PID%d] Created DMA context addr at 0x%p\n",
2686 current->pid, *dma_ctx);
2689 (*dma_ctx)->secure_dma = secure_dma;
2691 /* these are for kernel crypto only */
2692 (*dma_ctx)->src_sg = src_sg;
2693 (*dma_ctx)->dst_sg = dst_sg;
2695 if ((*dma_ctx)->nr_dcb_creat == SEP_MAX_NUM_SYNC_DMA_OPS) {
2696 /* No more DCBs to allocate */
2697 dev_dbg(&sep->pdev->dev, "[PID%d] no more DCBs available\n",
2700 goto end_function_error;
2703 /* Allocate new DCB */
2705 dcb_table_ptr = dcb_region;
2707 dcb_table_ptr = (struct sep_dcblock *)(sep->shared_addr +
2708 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES +
2709 ((*dma_ctx)->nr_dcb_creat *
2710 sizeof(struct sep_dcblock)));
2713 /* Set the default values in the DCB */
2714 dcb_table_ptr->input_mlli_address = 0;
2715 dcb_table_ptr->input_mlli_num_entries = 0;
2716 dcb_table_ptr->input_mlli_data_size = 0;
2717 dcb_table_ptr->output_mlli_address = 0;
2718 dcb_table_ptr->output_mlli_num_entries = 0;
2719 dcb_table_ptr->output_mlli_data_size = 0;
2720 dcb_table_ptr->tail_data_size = 0;
2721 dcb_table_ptr->out_vr_tail_pt = 0;
2725 /* Check if there is enough data for DMA operation */
2726 if (data_in_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) {
2729 goto end_function_error;
2731 if (copy_from_user(dcb_table_ptr->tail_data,
2732 (void __user *)app_in_address,
2735 goto end_function_error;
2739 dcb_table_ptr->tail_data_size = data_in_size;
2741 /* Set the output user-space address for mem2mem op */
2742 if (app_out_address)
2743 dcb_table_ptr->out_vr_tail_pt =
2744 (aligned_u64)app_out_address;
2747 * Update both data length parameters in order to avoid
2748 * second data copy and allow building of empty mlli
2755 if (!app_out_address) {
2756 tail_size = data_in_size % block_size;
2758 if (tail_block_size == block_size)
2759 tail_size = block_size;
2766 if (tail_size > sizeof(dcb_table_ptr->tail_data))
2770 goto end_function_error;
2772 /* We have tail data - copy it to DCB */
2773 if (copy_from_user(dcb_table_ptr->tail_data,
2774 (void __user *)(app_in_address +
2775 data_in_size - tail_size), tail_size)) {
2777 goto end_function_error;
2780 if (app_out_address)
2782 * Calculate the output address
2783 * according to tail data size
2785 dcb_table_ptr->out_vr_tail_pt =
2786 (aligned_u64)app_out_address +
2787 data_in_size - tail_size;
2789 /* Save the real tail data size */
2790 dcb_table_ptr->tail_data_size = tail_size;
2792 * Update the data size without the tail
2793 * data size AKA data for the dma
2795 data_in_size = (data_in_size - tail_size);
2798 /* Check if we need to build only input table or input/output */
2799 if (app_out_address) {
2800 /* Prepare input/output tables */
2801 error = sep_prepare_input_output_dma_table(sep,
2806 &in_first_mlli_address,
2807 &out_first_mlli_address,
2808 &in_first_num_entries,
2809 &out_first_num_entries,
2815 /* Prepare input tables */
2816 error = sep_prepare_input_dma_table(sep,
2820 &in_first_mlli_address,
2821 &in_first_num_entries,
2829 dev_warn(&sep->pdev->dev,
2830 "prepare DMA table call failed "
2831 "from prepare DCB call\n");
2832 goto end_function_error;
2835 /* Set the DCB values */
2836 dcb_table_ptr->input_mlli_address = in_first_mlli_address;
2837 dcb_table_ptr->input_mlli_num_entries = in_first_num_entries;
2838 dcb_table_ptr->input_mlli_data_size = first_data_size;
2839 dcb_table_ptr->output_mlli_address = out_first_mlli_address;
2840 dcb_table_ptr->output_mlli_num_entries = out_first_num_entries;
2841 dcb_table_ptr->output_mlli_data_size = first_data_size;
2856 * sep_free_dma_tables_and_dcb - free DMA tables and DCBs
2857 * @sep: pointer to struct sep_device
2858 * @isapplet: indicates external application (used for kernel access)
2859 * @is_kva: indicates kernel addresses (only used for kernel crypto)
2861 * This function frees the DMA tables and DCB
2863 static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet,
2864 bool is_kva, struct sep_dma_context **dma_ctx)
2866 struct sep_dcblock *dcb_table_ptr;
2867 unsigned long pt_hold;
2874 dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb\n",
2876 if (!dma_ctx || !*dma_ctx) /* nothing to be done here*/
2879 if (!(*dma_ctx)->secure_dma && isapplet) {
2880 dev_dbg(&sep->pdev->dev, "[PID%d] handling applet\n",
2883 /* Tail stuff is only for non secure_dma */
2884 /* Set pointer to first DCB table */
2885 dcb_table_ptr = (struct sep_dcblock *)
2887 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES);
2890 * Go over each DCB and see if
2891 * tail pointer must be updated
2893 for (i = 0; i < (*dma_ctx)->nr_dcb_creat; i++, dcb_table_ptr++) {
2894 if (dcb_table_ptr->out_vr_tail_pt) {
2895 pt_hold = (unsigned long)dcb_table_ptr->
2897 tail_pt = (void *)pt_hold;
2902 error_temp = copy_to_user(
2903 (void __user *)tail_pt,
2904 dcb_table_ptr->tail_data,
2905 dcb_table_ptr->tail_data_size);
2908 /* Release the DMA resource */
2916 /* Free the output pages, if any */
2917 sep_free_dma_table_data_handler(sep, dma_ctx);
2919 dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb end\n",
2926 * sep_prepare_dcb_handler - prepare a control block
2927 * @sep: pointer to struct sep_device
2928 * @arg: pointer to user parameters
2929 * @secure_dma: indicate whether we are using secure_dma on IMR
2931 * This function will retrieve the RAR buffer physical addresses, type
2932 * & size corresponding to the RAR handles provided in the buffers vector.
2934 static int sep_prepare_dcb_handler(struct sep_device *sep, unsigned long arg,
2936 struct sep_dma_context **dma_ctx)
2939 /* Command arguments */
2940 static struct build_dcb_struct command_args;
2942 /* Get the command arguments */
2943 if (copy_from_user(&command_args, (void __user *)arg,
2944 sizeof(struct build_dcb_struct))) {
2949 dev_dbg(&sep->pdev->dev,
2950 "[PID%d] prep dcb handler app_in_address is %08llx\n",
2951 current->pid, command_args.app_in_address);
2952 dev_dbg(&sep->pdev->dev,
2953 "[PID%d] app_out_address is %08llx\n",
2954 current->pid, command_args.app_out_address);
2955 dev_dbg(&sep->pdev->dev,
2956 "[PID%d] data_size is %x\n",
2957 current->pid, command_args.data_in_size);
2958 dev_dbg(&sep->pdev->dev,
2959 "[PID%d] block_size is %x\n",
2960 current->pid, command_args.block_size);
2961 dev_dbg(&sep->pdev->dev,
2962 "[PID%d] tail block_size is %x\n",
2963 current->pid, command_args.tail_block_size);
2964 dev_dbg(&sep->pdev->dev,
2965 "[PID%d] is_applet is %x\n",
2966 current->pid, command_args.is_applet);
2968 if (!command_args.app_in_address) {
2969 dev_warn(&sep->pdev->dev,
2970 "[PID%d] null app_in_address\n", current->pid);
2975 error = sep_prepare_input_output_dma_table_in_dcb(sep,
2976 (unsigned long)command_args.app_in_address,
2977 (unsigned long)command_args.app_out_address,
2978 command_args.data_in_size, command_args.block_size,
2979 command_args.tail_block_size,
2980 command_args.is_applet, false,
2981 secure_dma, NULL, NULL, dma_ctx, NULL, NULL);
2989 * sep_free_dcb_handler - free control block resources
2990 * @sep: pointer to struct sep_device
2992 * This function frees the DCB resources and updates the needed
2993 * user-space buffers.
2995 static int sep_free_dcb_handler(struct sep_device *sep,
2996 struct sep_dma_context **dma_ctx)
2998 if (!dma_ctx || !(*dma_ctx)) {
2999 dev_dbg(&sep->pdev->dev,
3000 "[PID%d] no dma context defined, nothing to free\n",
3005 dev_dbg(&sep->pdev->dev, "[PID%d] free dcbs num of DCBs %x\n",
3007 (*dma_ctx)->nr_dcb_creat);
3009 return sep_free_dma_tables_and_dcb(sep, false, false, dma_ctx);
3013 * sep_ioctl - ioctl handler for sep device
3014 * @filp: pointer to struct file
3016 * @arg: pointer to argument structure
3018 * Implement the ioctl methods available on the SEP device.
3020 static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3022 struct sep_private_data * const private_data = filp->private_data;
3023 struct sep_call_status *call_status = &private_data->call_status;
3024 struct sep_device *sep = private_data->device;
3025 struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
3026 struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
3029 dev_dbg(&sep->pdev->dev, "[PID%d] ioctl cmd 0x%x\n",
3031 dev_dbg(&sep->pdev->dev, "[PID%d] dma context addr 0x%p\n",
3032 current->pid, *dma_ctx);
3034 /* Make sure we own this device */
3035 error = sep_check_transaction_owner(sep);
3037 dev_dbg(&sep->pdev->dev, "[PID%d] ioctl pid is not owner\n",
3042 /* Check that sep_mmap has been called before */
3043 if (0 == test_bit(SEP_LEGACY_MMAP_DONE_OFFSET,
3044 &call_status->status)) {
3045 dev_dbg(&sep->pdev->dev,
3046 "[PID%d] mmap not called\n", current->pid);
3051 /* Check that the command is for SEP device */
3052 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
3058 case SEP_IOCSENDSEPCOMMAND:
3059 dev_dbg(&sep->pdev->dev,
3060 "[PID%d] SEP_IOCSENDSEPCOMMAND start\n",
3062 if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3063 &call_status->status)) {
3064 dev_warn(&sep->pdev->dev,
3065 "[PID%d] send msg already done\n",
3070 /* Send command to SEP */
3071 error = sep_send_command_handler(sep);
3073 set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3074 &call_status->status);
3075 dev_dbg(&sep->pdev->dev,
3076 "[PID%d] SEP_IOCSENDSEPCOMMAND end\n",
3079 case SEP_IOCENDTRANSACTION:
3080 dev_dbg(&sep->pdev->dev,
3081 "[PID%d] SEP_IOCENDTRANSACTION start\n",
3083 error = sep_end_transaction_handler(sep, dma_ctx, call_status,
3085 dev_dbg(&sep->pdev->dev,
3086 "[PID%d] SEP_IOCENDTRANSACTION end\n",
3089 case SEP_IOCPREPAREDCB:
3090 dev_dbg(&sep->pdev->dev,
3091 "[PID%d] SEP_IOCPREPAREDCB start\n",
3093 case SEP_IOCPREPAREDCB_SECURE_DMA:
3094 dev_dbg(&sep->pdev->dev,
3095 "[PID%d] SEP_IOCPREPAREDCB_SECURE_DMA start\n",
3097 if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3098 &call_status->status)) {
3099 dev_dbg(&sep->pdev->dev,
3100 "[PID%d] dcb prep needed before send msg\n",
3107 dev_dbg(&sep->pdev->dev,
3108 "[PID%d] dcb null arg\n", current->pid);
3113 if (cmd == SEP_IOCPREPAREDCB) {
3115 dev_dbg(&sep->pdev->dev,
3116 "[PID%d] SEP_IOCPREPAREDCB (no secure_dma)\n",
3119 error = sep_prepare_dcb_handler(sep, arg, false,
3123 dev_dbg(&sep->pdev->dev,
3124 "[PID%d] SEP_IOC_POC (with secure_dma)\n",
3127 error = sep_prepare_dcb_handler(sep, arg, true,
3130 dev_dbg(&sep->pdev->dev, "[PID%d] dcb's end\n",
3133 case SEP_IOCFREEDCB:
3134 dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB start\n",
3136 case SEP_IOCFREEDCB_SECURE_DMA:
3137 dev_dbg(&sep->pdev->dev,
3138 "[PID%d] SEP_IOCFREEDCB_SECURE_DMA start\n",
3140 error = sep_free_dcb_handler(sep, dma_ctx);
3141 dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB end\n",
3146 dev_dbg(&sep->pdev->dev, "[PID%d] default end\n",
3152 dev_dbg(&sep->pdev->dev, "[PID%d] ioctl end\n", current->pid);
3158 * sep_inthandler - interrupt handler for sep device
3160 * @dev_id: device id
3162 static irqreturn_t sep_inthandler(int irq, void *dev_id)
3164 unsigned long lock_irq_flag;
3165 u32 reg_val, reg_val2 = 0;
3166 struct sep_device *sep = dev_id;
3167 irqreturn_t int_error = IRQ_HANDLED;
3169 /* Are we in power save? */
3170 #if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
3171 if (sep->pdev->dev.power.runtime_status != RPM_ACTIVE) {
3172 dev_dbg(&sep->pdev->dev, "interrupt during pwr save\n");
3177 if (test_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags) == 0) {
3178 dev_dbg(&sep->pdev->dev, "interrupt while nobody using sep\n");
3182 /* Read the IRR register to check if this is SEP interrupt */
3183 reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
3185 dev_dbg(&sep->pdev->dev, "sep int: IRR REG val: %x\n", reg_val);
3187 if (reg_val & (0x1 << 13)) {
3189 /* Lock and update the counter of reply messages */
3190 spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
3192 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
3194 dev_dbg(&sep->pdev->dev, "sep int: send_ct %lx reply_ct %lx\n",
3195 sep->send_ct, sep->reply_ct);
3197 /* Is this a kernel client request */
3198 if (sep->in_kernel) {
3199 tasklet_schedule(&sep->finish_tasklet);
3200 goto finished_interrupt;
3203 /* Is this printf or daemon request? */
3204 reg_val2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
3205 dev_dbg(&sep->pdev->dev,
3206 "SEP Interrupt - GPR2 is %08x\n", reg_val2);
3208 clear_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags);
3210 if ((reg_val2 >> 30) & 0x1) {
3211 dev_dbg(&sep->pdev->dev, "int: printf request\n");
3212 } else if (reg_val2 >> 31) {
3213 dev_dbg(&sep->pdev->dev, "int: daemon request\n");
3215 dev_dbg(&sep->pdev->dev, "int: SEP reply\n");
3216 wake_up(&sep->event_interrupt);
3219 dev_dbg(&sep->pdev->dev, "int: not SEP interrupt\n");
3220 int_error = IRQ_NONE;
3225 if (int_error == IRQ_HANDLED)
3226 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
3232 * sep_reconfig_shared_area - reconfigure shared area
3233 * @sep: pointer to struct sep_device
3235 * Reconfig the shared area between HOST and SEP - needed in case
3236 * the DX_CC_Init function was called before OS loading.
3238 static int sep_reconfig_shared_area(struct sep_device *sep)
3242 /* use to limit waiting for SEP */
3243 unsigned long end_time;
3245 /* Send the new SHARED MESSAGE AREA to the SEP */
3246 dev_dbg(&sep->pdev->dev, "reconfig shared; sending %08llx to sep\n",
3247 (unsigned long long)sep->shared_bus);
3249 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
3251 /* Poll for SEP response */
3252 ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
3254 end_time = jiffies + (WAIT_TIME * HZ);
3256 while ((time_before(jiffies, end_time)) && (ret_val != 0xffffffff) &&
3257 (ret_val != sep->shared_bus))
3258 ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
3260 /* Check the return value (register) */
3261 if (ret_val != sep->shared_bus) {
3262 dev_warn(&sep->pdev->dev, "could not reconfig shared area\n");
3263 dev_warn(&sep->pdev->dev, "result was %x\n", ret_val);
3268 dev_dbg(&sep->pdev->dev, "reconfig shared area end\n");
3274 * sep_activate_dcb_dmatables_context - Takes DCB & DMA tables
3277 * @dcb_region: DCB region copy
3278 * @dmatables_region: MLLI/DMA tables copy
3279 * @dma_ctx: DMA context for current transaction
3281 ssize_t sep_activate_dcb_dmatables_context(struct sep_device *sep,
3282 struct sep_dcblock **dcb_region,
3283 void **dmatables_region,
3284 struct sep_dma_context *dma_ctx)
3286 void *dmaregion_free_start = NULL;
3287 void *dmaregion_free_end = NULL;
3288 void *dcbregion_free_start = NULL;
3289 void *dcbregion_free_end = NULL;
3292 dev_dbg(&sep->pdev->dev, "[PID%d] activating dcb/dma region\n",
3295 if (1 > dma_ctx->nr_dcb_creat) {
3296 dev_warn(&sep->pdev->dev,
3297 "[PID%d] invalid number of dcbs to activate 0x%08X\n",
3298 current->pid, dma_ctx->nr_dcb_creat);
3303 dmaregion_free_start = sep->shared_addr
3304 + SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES;
3305 dmaregion_free_end = dmaregion_free_start
3306 + SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES - 1;
3308 if (dmaregion_free_start
3309 + dma_ctx->dmatables_len > dmaregion_free_end) {
3313 memcpy(dmaregion_free_start,
3315 dma_ctx->dmatables_len);
3316 /* Free MLLI table copy */
3317 kfree(*dmatables_region);
3318 *dmatables_region = NULL;
3320 /* Copy thread's DCB table copy to DCB table region */
3321 dcbregion_free_start = sep->shared_addr +
3322 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES;
3323 dcbregion_free_end = dcbregion_free_start +
3324 (SEP_MAX_NUM_SYNC_DMA_OPS *
3325 sizeof(struct sep_dcblock)) - 1;
3327 if (dcbregion_free_start
3328 + (dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock))
3329 > dcbregion_free_end) {
3334 memcpy(dcbregion_free_start,
3336 dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock));
3338 /* Print the tables */
3339 dev_dbg(&sep->pdev->dev, "activate: input table\n");
3340 sep_debug_print_lli_tables(sep,
3341 (struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep,
3342 (*dcb_region)->input_mlli_address),
3343 (*dcb_region)->input_mlli_num_entries,
3344 (*dcb_region)->input_mlli_data_size);
3346 dev_dbg(&sep->pdev->dev, "activate: output table\n");
3347 sep_debug_print_lli_tables(sep,
3348 (struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep,
3349 (*dcb_region)->output_mlli_address),
3350 (*dcb_region)->output_mlli_num_entries,
3351 (*dcb_region)->output_mlli_data_size);
3353 dev_dbg(&sep->pdev->dev,
3354 "[PID%d] printing activated tables\n", current->pid);
3357 kfree(*dmatables_region);
3358 *dmatables_region = NULL;
3367 * sep_create_dcb_dmatables_context - Creates DCB & MLLI/DMA table context
3369 * @dcb_region: DCB region buf to create for current transaction
3370 * @dmatables_region: MLLI/DMA tables buf to create for current transaction
3371 * @dma_ctx: DMA context buf to create for current transaction
3372 * @user_dcb_args: User arguments for DCB/MLLI creation
3373 * @num_dcbs: Number of DCBs to create
3374 * @secure_dma: Indicate use of IMR restricted memory secure dma
3376 static ssize_t sep_create_dcb_dmatables_context(struct sep_device *sep,
3377 struct sep_dcblock **dcb_region,
3378 void **dmatables_region,
3379 struct sep_dma_context **dma_ctx,
3380 const struct build_dcb_struct __user *user_dcb_args,
3381 const u32 num_dcbs, bool secure_dma)
3385 struct build_dcb_struct *dcb_args = NULL;
3387 dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n",
3390 if (!dcb_region || !dma_ctx || !dmatables_region || !user_dcb_args) {
3395 if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) {
3396 dev_warn(&sep->pdev->dev,
3397 "[PID%d] invalid number of dcbs 0x%08X\n",
3398 current->pid, num_dcbs);
3403 dcb_args = kcalloc(num_dcbs, sizeof(struct build_dcb_struct),
3410 if (copy_from_user(dcb_args,
3412 num_dcbs * sizeof(struct build_dcb_struct))) {
3417 /* Allocate thread-specific memory for DCB */
3418 *dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock),
3420 if (!(*dcb_region)) {
3425 /* Prepare DCB and MLLI table into the allocated regions */
3426 for (i = 0; i < num_dcbs; i++) {
3427 error = sep_prepare_input_output_dma_table_in_dcb(sep,
3428 (unsigned long)dcb_args[i].app_in_address,
3429 (unsigned long)dcb_args[i].app_out_address,
3430 dcb_args[i].data_in_size,
3431 dcb_args[i].block_size,
3432 dcb_args[i].tail_block_size,
3433 dcb_args[i].is_applet,
3435 *dcb_region, dmatables_region,
3440 dev_warn(&sep->pdev->dev,
3441 "[PID%d] dma table creation failed\n",
3446 if (dcb_args[i].app_in_address != 0)
3447 (*dma_ctx)->input_data_len += dcb_args[i].data_in_size;
3457 * sep_create_dcb_dmatables_context_kernel - Creates DCB & MLLI/DMA table context
3460 * @dcb_region: DCB region buf to create for current transaction
3461 * @dmatables_region: MLLI/DMA tables buf to create for current transaction
3462 * @dma_ctx: DMA context buf to create for current transaction
3463 * @user_dcb_args: User arguments for DCB/MLLI creation
3464 * @num_dcbs: Number of DCBs to create
3465 * This does that same thing as sep_create_dcb_dmatables_context
3466 * except that it is used only for the kernel crypto operation. It is
3467 * separate because there is no user data involved; the dcb data structure
3468 * is specific for kernel crypto (build_dcb_struct_kernel)
3470 int sep_create_dcb_dmatables_context_kernel(struct sep_device *sep,
3471 struct sep_dcblock **dcb_region,
3472 void **dmatables_region,
3473 struct sep_dma_context **dma_ctx,
3474 const struct build_dcb_struct_kernel *dcb_data,
3480 dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n",
3483 if (!dcb_region || !dma_ctx || !dmatables_region || !dcb_data) {
3488 if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) {
3489 dev_warn(&sep->pdev->dev,
3490 "[PID%d] invalid number of dcbs 0x%08X\n",
3491 current->pid, num_dcbs);
3496 dev_dbg(&sep->pdev->dev, "[PID%d] num_dcbs is %d\n",
3497 current->pid, num_dcbs);
3499 /* Allocate thread-specific memory for DCB */
3500 *dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock),
3502 if (!(*dcb_region)) {
3507 /* Prepare DCB and MLLI table into the allocated regions */
3508 for (i = 0; i < num_dcbs; i++) {
3509 error = sep_prepare_input_output_dma_table_in_dcb(sep,
3510 (unsigned long)dcb_data->app_in_address,
3511 (unsigned long)dcb_data->app_out_address,
3512 dcb_data->data_in_size,
3513 dcb_data->block_size,
3514 dcb_data->tail_block_size,
3515 dcb_data->is_applet,
3518 *dcb_region, dmatables_region,
3523 dev_warn(&sep->pdev->dev,
3524 "[PID%d] dma table creation failed\n",
3536 * sep_activate_msgarea_context - Takes the message area context into use
3538 * @msg_region: Message area context buf
3539 * @msg_len: Message area context buffer size
3541 static ssize_t sep_activate_msgarea_context(struct sep_device *sep,
3543 const size_t msg_len)
3545 dev_dbg(&sep->pdev->dev, "[PID%d] activating msg region\n",
3548 if (!msg_region || !(*msg_region) ||
3549 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES < msg_len) {
3550 dev_warn(&sep->pdev->dev,
3551 "[PID%d] invalid act msgarea len 0x%08zX\n",
3552 current->pid, msg_len);
3556 memcpy(sep->shared_addr, *msg_region, msg_len);
3562 * sep_create_msgarea_context - Creates message area context
3564 * @msg_region: Msg area region buf to create for current transaction
3565 * @msg_user: Content for msg area region from user
3566 * @msg_len: Message area size
3568 static ssize_t sep_create_msgarea_context(struct sep_device *sep,
3570 const void __user *msg_user,
3571 const size_t msg_len)
3575 dev_dbg(&sep->pdev->dev, "[PID%d] creating msg region\n",
3580 SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < msg_len ||
3581 SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES > msg_len) {
3582 dev_warn(&sep->pdev->dev,
3583 "[PID%d] invalid creat msgarea len 0x%08zX\n",
3584 current->pid, msg_len);
3589 /* Allocate thread-specific memory for message buffer */
3590 *msg_region = kzalloc(msg_len, GFP_KERNEL);
3591 if (!(*msg_region)) {
3596 /* Copy input data to write() to allocated message buffer */
3597 if (copy_from_user(*msg_region, msg_user, msg_len)) {
3603 if (error && msg_region) {
3613 * sep_read - Returns results of an operation for fastcall interface
3614 * @filp: File pointer
3615 * @buf_user: User buffer for storing results
3616 * @count_user: User buffer size
3617 * @offset: File offset, not supported
3619 * The implementation does not support reading in chunks, all data must be
3620 * consumed during a single read system call.
3622 static ssize_t sep_read(struct file *filp,
3623 char __user *buf_user, size_t count_user,
3626 struct sep_private_data * const private_data = filp->private_data;
3627 struct sep_call_status *call_status = &private_data->call_status;
3628 struct sep_device *sep = private_data->device;
3629 struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
3630 struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
3631 ssize_t error = 0, error_tmp = 0;
3633 /* Am I the process that owns the transaction? */
3634 error = sep_check_transaction_owner(sep);
3636 dev_dbg(&sep->pdev->dev, "[PID%d] read pid is not owner\n",
3641 /* Checks that user has called necessary apis */
3642 if (0 == test_bit(SEP_FASTCALL_WRITE_DONE_OFFSET,
3643 &call_status->status)) {
3644 dev_warn(&sep->pdev->dev,
3645 "[PID%d] fastcall write not called\n",
3648 goto end_function_error;
3652 dev_warn(&sep->pdev->dev,
3653 "[PID%d] null user buffer\n",
3656 goto end_function_error;
3660 /* Wait for SEP to finish */
3661 wait_event(sep->event_interrupt,
3662 test_bit(SEP_WORKING_LOCK_BIT,
3663 &sep->in_use_flags) == 0);
3665 sep_dump_message(sep);
3667 dev_dbg(&sep->pdev->dev, "[PID%d] count_user = 0x%08zX\n",
3668 current->pid, count_user);
3670 /* In case user has allocated bigger buffer */
3671 if (count_user > SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES)
3672 count_user = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES;
3674 if (copy_to_user(buf_user, sep->shared_addr, count_user)) {
3676 goto end_function_error;
3679 dev_dbg(&sep->pdev->dev, "[PID%d] read succeeded\n", current->pid);
3683 /* Copy possible tail data to user and free DCB and MLLIs */
3684 error_tmp = sep_free_dcb_handler(sep, dma_ctx);
3686 dev_warn(&sep->pdev->dev, "[PID%d] dcb free failed\n",
3689 /* End the transaction, wakeup pending ones */
3690 error_tmp = sep_end_transaction_handler(sep, dma_ctx, call_status,
3693 dev_warn(&sep->pdev->dev,
3694 "[PID%d] ending transaction failed\n",
3702 * sep_fastcall_args_get - Gets fastcall params from user
3704 * @args: Parameters buffer
3705 * @buf_user: User buffer for operation parameters
3706 * @count_user: User buffer size
3708 static inline ssize_t sep_fastcall_args_get(struct sep_device *sep,
3709 struct sep_fastcall_hdr *args,
3710 const char __user *buf_user,
3711 const size_t count_user)
3714 size_t actual_count = 0;
3717 dev_warn(&sep->pdev->dev,
3718 "[PID%d] null user buffer\n",
3724 if (count_user < sizeof(struct sep_fastcall_hdr)) {
3725 dev_warn(&sep->pdev->dev,
3726 "[PID%d] too small message size 0x%08zX\n",
3727 current->pid, count_user);
3733 if (copy_from_user(args, buf_user, sizeof(struct sep_fastcall_hdr))) {
3738 if (SEP_FC_MAGIC != args->magic) {
3739 dev_warn(&sep->pdev->dev,
3740 "[PID%d] invalid fastcall magic 0x%08X\n",
3741 current->pid, args->magic);
3746 dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr num of DCBs 0x%08X\n",
3747 current->pid, args->num_dcbs);
3748 dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr msg len 0x%08X\n",
3749 current->pid, args->msg_len);
3751 if (SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < args->msg_len ||
3752 SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES > args->msg_len) {
3753 dev_warn(&sep->pdev->dev,
3754 "[PID%d] invalid message length\n",
3760 actual_count = sizeof(struct sep_fastcall_hdr)
3762 + (args->num_dcbs * sizeof(struct build_dcb_struct));
3764 if (actual_count != count_user) {
3765 dev_warn(&sep->pdev->dev,
3766 "[PID%d] inconsistent message "
3767 "sizes 0x%08zX vs 0x%08zX\n",
3768 current->pid, actual_count, count_user);
3778 * sep_write - Starts an operation for fastcall interface
3779 * @filp: File pointer
3780 * @buf_user: User buffer for operation parameters
3781 * @count_user: User buffer size
3782 * @offset: File offset, not supported
3784 * The implementation does not support writing in chunks,
3785 * all data must be given during a single write system call.
3787 static ssize_t sep_write(struct file *filp,
3788 const char __user *buf_user, size_t count_user,
3791 struct sep_private_data * const private_data = filp->private_data;
3792 struct sep_call_status *call_status = &private_data->call_status;
3793 struct sep_device *sep = private_data->device;
3794 struct sep_dma_context *dma_ctx = NULL;
3795 struct sep_fastcall_hdr call_hdr = {0};
3796 void *msg_region = NULL;
3797 void *dmatables_region = NULL;
3798 struct sep_dcblock *dcb_region = NULL;
3800 struct sep_queue_info *my_queue_elem = NULL;
3801 bool my_secure_dma; /* are we using secure_dma (IMR)? */
3803 dev_dbg(&sep->pdev->dev, "[PID%d] sep dev is 0x%p\n",
3805 dev_dbg(&sep->pdev->dev, "[PID%d] private_data is 0x%p\n",
3806 current->pid, private_data);
3808 error = sep_fastcall_args_get(sep, &call_hdr, buf_user, count_user);
3812 buf_user += sizeof(struct sep_fastcall_hdr);
3814 if (call_hdr.secure_dma == 0)
3815 my_secure_dma = false;
3817 my_secure_dma = true;
3820 * Controlling driver memory usage by limiting amount of
3821 * buffers created. Only SEP_DOUBLEBUF_USERS_LIMIT number
3822 * of threads can progress further at a time
3824 dev_dbg(&sep->pdev->dev,
3825 "[PID%d] waiting for double buffering region access\n",
3827 error = down_interruptible(&sep->sep_doublebuf);
3828 dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region start\n",
3831 /* Signal received */
3832 goto end_function_error;
3837 * Prepare contents of the shared area regions for
3838 * the operation into temporary buffers
3840 if (0 < call_hdr.num_dcbs) {
3841 error = sep_create_dcb_dmatables_context(sep,
3845 (const struct build_dcb_struct __user *)
3847 call_hdr.num_dcbs, my_secure_dma);
3849 goto end_function_error_doublebuf;
3851 buf_user += call_hdr.num_dcbs * sizeof(struct build_dcb_struct);
3854 error = sep_create_msgarea_context(sep,
3859 goto end_function_error_doublebuf;
3861 dev_dbg(&sep->pdev->dev, "[PID%d] updating queue status\n",
3863 my_queue_elem = sep_queue_status_add(sep,
3864 ((struct sep_msgarea_hdr *)msg_region)->opcode,
3865 (dma_ctx) ? dma_ctx->input_data_len : 0,
3867 current->comm, sizeof(current->comm));
3869 if (!my_queue_elem) {
3870 dev_dbg(&sep->pdev->dev,
3871 "[PID%d] updating queue status error\n", current->pid);
3873 goto end_function_error_doublebuf;
3876 /* Wait until current process gets the transaction */
3877 error = sep_wait_transaction(sep);
3880 /* Interrupted by signal, don't clear transaction */
3881 dev_dbg(&sep->pdev->dev, "[PID%d] interrupted by signal\n",
3883 sep_queue_status_remove(sep, &my_queue_elem);
3884 goto end_function_error_doublebuf;
3887 dev_dbg(&sep->pdev->dev, "[PID%d] saving queue element\n",
3889 private_data->my_queue_elem = my_queue_elem;
3891 /* Activate shared area regions for the transaction */
3892 error = sep_activate_msgarea_context(sep, &msg_region,
3895 goto end_function_error_clear_transact;
3897 sep_dump_message(sep);
3899 if (0 < call_hdr.num_dcbs) {
3900 error = sep_activate_dcb_dmatables_context(sep,
3905 goto end_function_error_clear_transact;
3908 /* Send command to SEP */
3909 error = sep_send_command_handler(sep);
3911 goto end_function_error_clear_transact;
3913 /* Store DMA context for the transaction */
3914 private_data->dma_ctx = dma_ctx;
3915 /* Update call status */
3916 set_bit(SEP_FASTCALL_WRITE_DONE_OFFSET, &call_status->status);
3919 up(&sep->sep_doublebuf);
3920 dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n",
3925 end_function_error_clear_transact:
3926 sep_end_transaction_handler(sep, &dma_ctx, call_status,
3927 &private_data->my_queue_elem);
3929 end_function_error_doublebuf:
3930 up(&sep->sep_doublebuf);
3931 dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n",
3936 sep_free_dma_table_data_handler(sep, &dma_ctx);
3940 kfree(dmatables_region);
3946 * sep_seek - Handler for seek system call
3947 * @filp: File pointer
3948 * @offset: File offset
3949 * @origin: Options for offset
3951 * Fastcall interface does not support seeking, all reads
3952 * and writes are from/to offset zero
3954 static loff_t sep_seek(struct file *filp, loff_t offset, int origin)
3962 * sep_file_operations - file operation on sep device
3963 * @sep_ioctl: ioctl handler from user space call
3964 * @sep_poll: poll handler
3965 * @sep_open: handles sep device open request
3966 * @sep_release:handles sep device release request
3967 * @sep_mmap: handles memory mapping requests
3968 * @sep_read: handles read request on sep device
3969 * @sep_write: handles write request on sep device
3970 * @sep_seek: handles seek request on sep device
3972 static const struct file_operations sep_file_operations = {
3973 .owner = THIS_MODULE,
3974 .unlocked_ioctl = sep_ioctl,
3977 .release = sep_release,
3985 * sep_sysfs_read - read sysfs entry per gives arguments
3986 * @filp: file pointer
3987 * @kobj: kobject pointer
3988 * @attr: binary file attributes
3989 * @buf: read to this buffer
3990 * @pos: offset to read
3991 * @count: amount of data to read
3993 * This function is to read sysfs entries for sep driver per given arguments.
3996 sep_sysfs_read(struct file *filp, struct kobject *kobj,
3997 struct bin_attribute *attr,
3998 char *buf, loff_t pos, size_t count)
4000 unsigned long lck_flags;
4001 size_t nleft = count;
4002 struct sep_device *sep = sep_dev;
4003 struct sep_queue_info *queue_elem = NULL;
4007 spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
4009 queue_num = sep->sep_queue_num;
4010 if (queue_num > SEP_DOUBLEBUF_USERS_LIMIT)
4011 queue_num = SEP_DOUBLEBUF_USERS_LIMIT;
4014 if (count < sizeof(queue_num)
4015 + (queue_num * sizeof(struct sep_queue_data))) {
4016 spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
4020 memcpy(buf, &queue_num, sizeof(queue_num));
4021 buf += sizeof(queue_num);
4022 nleft -= sizeof(queue_num);
4024 list_for_each_entry(queue_elem, &sep->sep_queue_status, list) {
4025 if (i++ > queue_num)
4028 memcpy(buf, &queue_elem->data, sizeof(queue_elem->data));
4029 nleft -= sizeof(queue_elem->data);
4030 buf += sizeof(queue_elem->data);
4032 spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
4034 return count - nleft;
4038 * bin_attributes - defines attributes for queue_status
4039 * @attr: attributes (name & permissions)
4040 * @read: function pointer to read this file
4041 * @size: maxinum size of binary attribute
4043 static const struct bin_attribute queue_status = {
4044 .attr = {.name = "queue_status", .mode = 0444},
4045 .read = sep_sysfs_read,
4047 + (SEP_DOUBLEBUF_USERS_LIMIT * sizeof(struct sep_queue_data)),
4051 * sep_register_driver_with_fs - register misc devices
4052 * @sep: pointer to struct sep_device
4054 * This function registers the driver with the file system
4056 static int sep_register_driver_with_fs(struct sep_device *sep)
4060 sep->miscdev_sep.minor = MISC_DYNAMIC_MINOR;
4061 sep->miscdev_sep.name = SEP_DEV_NAME;
4062 sep->miscdev_sep.fops = &sep_file_operations;
4064 ret_val = misc_register(&sep->miscdev_sep);
4066 dev_warn(&sep->pdev->dev, "misc reg fails for SEP %x\n",
4071 ret_val = device_create_bin_file(sep->miscdev_sep.this_device,
4074 dev_warn(&sep->pdev->dev, "sysfs attribute1 fails for SEP %x\n",
4076 misc_deregister(&sep->miscdev_sep);
4085 *sep_probe - probe a matching PCI device
4087 *@ent: pci_device_id
4089 *Attempt to set up and configure a SEP device that has been
4090 *discovered by the PCI layer. Allocates all required resources.
4092 static int sep_probe(struct pci_dev *pdev,
4093 const struct pci_device_id *ent)
4096 struct sep_device *sep = NULL;
4098 if (sep_dev != NULL) {
4099 dev_dbg(&pdev->dev, "only one SEP supported.\n");
4103 /* Enable the device */
4104 error = pci_enable_device(pdev);
4106 dev_warn(&pdev->dev, "error enabling pci device\n");
4110 /* Allocate the sep_device structure for this device */
4111 sep_dev = kzalloc(sizeof(struct sep_device), GFP_ATOMIC);
4112 if (sep_dev == NULL) {
4114 goto end_function_disable_device;
4118 * We're going to use another variable for actually
4119 * working with the device; this way, if we have
4120 * multiple devices in the future, it would be easier
4121 * to make appropriate changes
4125 sep->pdev = pci_dev_get(pdev);
4127 init_waitqueue_head(&sep->event_transactions);
4128 init_waitqueue_head(&sep->event_interrupt);
4129 spin_lock_init(&sep->snd_rply_lck);
4130 spin_lock_init(&sep->sep_queue_lock);
4131 sema_init(&sep->sep_doublebuf, SEP_DOUBLEBUF_USERS_LIMIT);
4133 INIT_LIST_HEAD(&sep->sep_queue_status);
4135 dev_dbg(&sep->pdev->dev,
4136 "sep probe: PCI obtained, device being prepared\n");
4138 /* Set up our register area */
4139 sep->reg_physical_addr = pci_resource_start(sep->pdev, 0);
4140 if (!sep->reg_physical_addr) {
4141 dev_warn(&sep->pdev->dev, "Error getting register start\n");
4143 goto end_function_free_sep_dev;
4146 sep->reg_physical_end = pci_resource_end(sep->pdev, 0);
4147 if (!sep->reg_physical_end) {
4148 dev_warn(&sep->pdev->dev, "Error getting register end\n");
4150 goto end_function_free_sep_dev;
4153 sep->reg_addr = ioremap_nocache(sep->reg_physical_addr,
4154 (size_t)(sep->reg_physical_end - sep->reg_physical_addr + 1));
4155 if (!sep->reg_addr) {
4156 dev_warn(&sep->pdev->dev, "Error getting register virtual\n");
4158 goto end_function_free_sep_dev;
4161 dev_dbg(&sep->pdev->dev,
4162 "Register area start %llx end %llx virtual %p\n",
4163 (unsigned long long)sep->reg_physical_addr,
4164 (unsigned long long)sep->reg_physical_end,
4167 /* Allocate the shared area */
4168 sep->shared_size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
4169 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES +
4170 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES +
4171 SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES +
4172 SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
4174 if (sep_map_and_alloc_shared_area(sep)) {
4176 /* Allocation failed */
4177 goto end_function_error;
4180 /* Clear ICR register */
4181 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4183 /* Set the IMR register - open only GPR 2 */
4184 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4186 /* Read send/receive counters from SEP */
4187 sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4188 sep->reply_ct &= 0x3FFFFFFF;
4189 sep->send_ct = sep->reply_ct;
4191 /* Get the interrupt line */
4192 error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED,
4196 goto end_function_deallocate_sep_shared_area;
4198 /* The new chip requires a shared area reconfigure */
4199 error = sep_reconfig_shared_area(sep);
4201 goto end_function_free_irq;
4205 /* Finally magic up the device nodes */
4206 /* Register driver with the fs */
4207 error = sep_register_driver_with_fs(sep);
4210 dev_err(&sep->pdev->dev, "error registering dev file\n");
4211 goto end_function_free_irq;
4214 sep->in_use = 0; /* through touching the device */
4215 #ifdef SEP_ENABLE_RUNTIME_PM
4216 pm_runtime_put_noidle(&sep->pdev->dev);
4217 pm_runtime_allow(&sep->pdev->dev);
4218 pm_runtime_set_autosuspend_delay(&sep->pdev->dev,
4220 pm_runtime_use_autosuspend(&sep->pdev->dev);
4221 pm_runtime_mark_last_busy(&sep->pdev->dev);
4222 sep->power_save_setup = 1;
4224 /* register kernel crypto driver */
4225 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
4226 error = sep_crypto_setup();
4228 dev_err(&sep->pdev->dev, "crypto setup failed\n");
4229 goto end_function_free_irq;
4234 end_function_free_irq:
4235 free_irq(pdev->irq, sep);
4237 end_function_deallocate_sep_shared_area:
4238 /* De-allocate shared area */
4239 sep_unmap_and_free_shared_area(sep);
4242 iounmap(sep->reg_addr);
4244 end_function_free_sep_dev:
4245 pci_dev_put(sep_dev->pdev);
4249 end_function_disable_device:
4250 pci_disable_device(pdev);
4257 * sep_remove - handles removing device from pci subsystem
4258 * @pdev: pointer to pci device
4260 * This function will handle removing our sep device from pci subsystem on exit
4261 * or unloading this module. It should free up all used resources, and unmap if
4262 * any memory regions mapped.
4264 static void sep_remove(struct pci_dev *pdev)
4266 struct sep_device *sep = sep_dev;
4268 /* Unregister from fs */
4269 misc_deregister(&sep->miscdev_sep);
4271 /* Unregister from kernel crypto */
4272 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
4273 sep_crypto_takedown();
4276 free_irq(sep->pdev->irq, sep);
4278 /* Free the shared area */
4279 sep_unmap_and_free_shared_area(sep_dev);
4280 iounmap(sep_dev->reg_addr);
4282 #ifdef SEP_ENABLE_RUNTIME_PM
4285 pm_runtime_forbid(&sep->pdev->dev);
4286 pm_runtime_get_noresume(&sep->pdev->dev);
4289 pci_dev_put(sep_dev->pdev);
4294 /* Initialize struct pci_device_id for our driver */
4295 static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl) = {
4296 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0826)},
4297 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08e9)},
4301 /* Export our pci_device_id structure to user space */
4302 MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
4304 #ifdef SEP_ENABLE_RUNTIME_PM
4307 * sep_pm_resume - rsume routine while waking up from S3 state
4308 * @dev: pointer to sep device
4310 * This function is to be used to wake up sep driver while system awakes from S3
4311 * state i.e. suspend to ram. The RAM in intact.
4312 * Notes - revisit with more understanding of pm, ICR/IMR & counters.
4314 static int sep_pci_resume(struct device *dev)
4316 struct sep_device *sep = sep_dev;
4318 dev_dbg(&sep->pdev->dev, "pci resume called\n");
4320 if (sep->power_state == SEP_DRIVER_POWERON)
4323 /* Clear ICR register */
4324 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4326 /* Set the IMR register - open only GPR 2 */
4327 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4329 /* Read send/receive counters from SEP */
4330 sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4331 sep->reply_ct &= 0x3FFFFFFF;
4332 sep->send_ct = sep->reply_ct;
4334 sep->power_state = SEP_DRIVER_POWERON;
4340 * sep_pm_suspend - suspend routine while going to S3 state
4341 * @dev: pointer to sep device
4343 * This function is to be used to suspend sep driver while system goes to S3
4344 * state i.e. suspend to ram. The RAM in intact and ON during this suspend.
4345 * Notes - revisit with more understanding of pm, ICR/IMR
4347 static int sep_pci_suspend(struct device *dev)
4349 struct sep_device *sep = sep_dev;
4351 dev_dbg(&sep->pdev->dev, "pci suspend called\n");
4352 if (sep->in_use == 1)
4355 sep->power_state = SEP_DRIVER_POWEROFF;
4357 /* Clear ICR register */
4358 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4360 /* Set the IMR to block all */
4361 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0xFFFFFFFF);
4367 * sep_pm_runtime_resume - runtime resume routine
4368 * @dev: pointer to sep device
4370 * Notes - revisit with more understanding of pm, ICR/IMR & counters
4372 static int sep_pm_runtime_resume(struct device *dev)
4377 struct sep_device *sep = sep_dev;
4379 dev_dbg(&sep->pdev->dev, "pm runtime resume called\n");
4382 * Wait until the SCU boot is ready
4383 * This is done by iterating SCU_DELAY_ITERATION (10
4384 * microseconds each) up to SCU_DELAY_MAX (50) times.
4385 * This bit can be set in a random time that is less
4386 * than 500 microseconds after each power resume
4390 while ((!retval2) && (delay_count < SCU_DELAY_MAX)) {
4391 retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
4392 retval2 &= 0x00000008;
4394 udelay(SCU_DELAY_ITERATION);
4400 dev_warn(&sep->pdev->dev, "scu boot bit not set at resume\n");
4404 /* Clear ICR register */
4405 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4407 /* Set the IMR register - open only GPR 2 */
4408 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4410 /* Read send/receive counters from SEP */
4411 sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4412 sep->reply_ct &= 0x3FFFFFFF;
4413 sep->send_ct = sep->reply_ct;
4419 * sep_pm_runtime_suspend - runtime suspend routine
4420 * @dev: pointer to sep device
4422 * Notes - revisit with more understanding of pm
4424 static int sep_pm_runtime_suspend(struct device *dev)
4426 struct sep_device *sep = sep_dev;
4428 dev_dbg(&sep->pdev->dev, "pm runtime suspend called\n");
4430 /* Clear ICR register */
4431 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4436 * sep_pm - power management for sep driver
4437 * @sep_pm_runtime_resume: resume- no communication with cpu & main memory
4438 * @sep_pm_runtime_suspend: suspend- no communication with cpu & main memory
4439 * @sep_pci_suspend: suspend - main memory is still ON
4440 * @sep_pci_resume: resume - main memory is still ON
4442 static const struct dev_pm_ops sep_pm = {
4443 .runtime_resume = sep_pm_runtime_resume,
4444 .runtime_suspend = sep_pm_runtime_suspend,
4445 .resume = sep_pci_resume,
4446 .suspend = sep_pci_suspend,
4448 #endif /* SEP_ENABLE_RUNTIME_PM */
4451 * sep_pci_driver - registers this device with pci subsystem
4452 * @name: name identifier for this driver
4453 * @sep_pci_id_tbl: pointer to struct pci_device_id table
4454 * @sep_probe: pointer to probe function in PCI driver
4455 * @sep_remove: pointer to remove function in PCI driver
4457 static struct pci_driver sep_pci_driver = {
4458 #ifdef SEP_ENABLE_RUNTIME_PM
4463 .name = "sep_sec_driver",
4464 .id_table = sep_pci_id_tbl,
4466 .remove = sep_remove
4469 module_pci_driver(sep_pci_driver);
4470 MODULE_LICENSE("GPL");