3 * sep_main.c - Security Processor Driver main group of functions
5 * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
6 * Contributions(c) 2009-2011 Discretix. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; version 2 of the License.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * Mark Allyn mark.a.allyn@intel.com
24 * Jayant Mangalampalli jayant.mangalampalli@intel.com
28 * 2009.06.26 Initial publish
29 * 2010.09.14 Upgrade to Medfield
30 * 2011.01.21 Move to sep_main.c to allow for sep_crypto.c
31 * 2011.02.22 Enable kernel crypto operation
33 * Please note that this driver is based on information in the Discretix
34 * CryptoCell 5.2 Driver Implementation Guide; the Discretix CryptoCell 5.2
35 * Integration Intel Medfield appendix; the Discretix CryptoCell 5.2
36 * Linux Driver Integration Guide; and the Discretix CryptoCell 5.2 System
37 * Overview and Integration Guide.
40 /* #define SEP_PERF_DEBUG */
42 #include <linux/init.h>
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/miscdevice.h>
47 #include <linux/cdev.h>
48 #include <linux/kdev_t.h>
49 #include <linux/mutex.h>
50 #include <linux/sched.h>
52 #include <linux/poll.h>
53 #include <linux/wait.h>
54 #include <linux/pci.h>
55 #include <linux/pm_runtime.h>
56 #include <linux/slab.h>
57 #include <linux/ioctl.h>
58 #include <asm/current.h>
59 #include <linux/ioport.h>
61 #include <linux/interrupt.h>
62 #include <linux/pagemap.h>
63 #include <asm/cacheflush.h>
64 #include <linux/delay.h>
65 #include <linux/jiffies.h>
66 #include <linux/async.h>
67 #include <linux/crypto.h>
68 #include <crypto/internal/hash.h>
69 #include <crypto/scatterwalk.h>
70 #include <crypto/sha.h>
71 #include <crypto/md5.h>
72 #include <crypto/aes.h>
73 #include <crypto/des.h>
74 #include <crypto/hash.h>
76 #include "sep_driver_hw_defs.h"
77 #include "sep_driver_config.h"
78 #include "sep_driver_api.h"
80 #include "sep_crypto.h"
82 #define CREATE_TRACE_POINTS
83 #include "sep_trace_events.h"
86 * Let's not spend cycles iterating over message
87 * area contents if debugging not enabled
90 #define sep_dump_message(sep) _sep_dump_message(sep)
92 #define sep_dump_message(sep)
96 * Currently, there is only one SEP device per platform;
97 * In event platforms in the future have more than one SEP
98 * device, this will be a linked list
101 struct sep_device *sep_dev;
104 * sep_queue_status_remove - Removes transaction from status queue
106 * @sep_queue_info: pointer to status queue
108 * This function will remove information about transaction from the queue.
110 void sep_queue_status_remove(struct sep_device *sep,
111 struct sep_queue_info **queue_elem)
113 unsigned long lck_flags;
115 dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove\n",
118 if (!queue_elem || !(*queue_elem)) {
119 dev_dbg(&sep->pdev->dev, "PID%d %s null\n",
120 current->pid, __func__);
124 spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
125 list_del(&(*queue_elem)->list);
126 sep->sep_queue_num--;
127 spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
132 dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove return\n",
138 * sep_queue_status_add - Adds transaction to status queue
140 * @opcode: transaction opcode
141 * @size: input data size
142 * @pid: pid of current process
143 * @name: current process name
144 * @name_len: length of name (current process)
146 * This function adds information about about transaction started to the status
149 struct sep_queue_info *sep_queue_status_add(
150 struct sep_device *sep,
154 u8 *name, size_t name_len)
156 unsigned long lck_flags;
157 struct sep_queue_info *my_elem = NULL;
159 my_elem = kzalloc(sizeof(struct sep_queue_info), GFP_KERNEL);
164 dev_dbg(&sep->pdev->dev, "[PID%d] kzalloc ok\n", current->pid);
166 my_elem->data.opcode = opcode;
167 my_elem->data.size = size;
168 my_elem->data.pid = pid;
170 if (name_len > TASK_COMM_LEN)
171 name_len = TASK_COMM_LEN;
173 memcpy(&my_elem->data.name, name, name_len);
175 spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
177 list_add_tail(&my_elem->list, &sep->sep_queue_status);
178 sep->sep_queue_num++;
180 spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
186 * sep_allocate_dmatables_region - Allocates buf for the MLLI/DMA tables
188 * @dmatables_region: Destination pointer for the buffer
189 * @dma_ctx: DMA context for the transaction
190 * @table_count: Number of MLLI/DMA tables to create
191 * The buffer created will not work as-is for DMA operations,
192 * it needs to be copied over to the appropriate place in the
195 static int sep_allocate_dmatables_region(struct sep_device *sep,
196 void **dmatables_region,
197 struct sep_dma_context *dma_ctx,
198 const u32 table_count)
200 const size_t new_len =
201 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES - 1;
203 void *tmp_region = NULL;
205 dev_dbg(&sep->pdev->dev, "[PID%d] dma_ctx = 0x%p\n",
206 current->pid, dma_ctx);
207 dev_dbg(&sep->pdev->dev, "[PID%d] dmatables_region = 0x%p\n",
208 current->pid, dmatables_region);
210 if (!dma_ctx || !dmatables_region) {
211 dev_warn(&sep->pdev->dev,
212 "[PID%d] dma context/region uninitialized\n",
217 dev_dbg(&sep->pdev->dev, "[PID%d] newlen = 0x%08zX\n",
218 current->pid, new_len);
219 dev_dbg(&sep->pdev->dev, "[PID%d] oldlen = 0x%08X\n", current->pid,
220 dma_ctx->dmatables_len);
221 tmp_region = kzalloc(new_len + dma_ctx->dmatables_len, GFP_KERNEL);
225 /* Were there any previous tables that need to be preserved ? */
226 if (*dmatables_region) {
227 memcpy(tmp_region, *dmatables_region, dma_ctx->dmatables_len);
228 kfree(*dmatables_region);
229 *dmatables_region = NULL;
232 *dmatables_region = tmp_region;
234 dma_ctx->dmatables_len += new_len;
240 * sep_wait_transaction - Used for synchronizing transactions
243 int sep_wait_transaction(struct sep_device *sep)
248 if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
249 &sep->in_use_flags)) {
250 dev_dbg(&sep->pdev->dev,
251 "[PID%d] no transactions, returning\n",
253 goto end_function_setpid;
257 * Looping needed even for exclusive waitq entries
258 * due to process wakeup latencies, previous process
259 * might have already created another transaction.
263 * Exclusive waitq entry, so that only one process is
264 * woken up from the queue at a time.
266 prepare_to_wait_exclusive(&sep->event_transactions,
269 if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
270 &sep->in_use_flags)) {
271 dev_dbg(&sep->pdev->dev,
272 "[PID%d] no transactions, breaking\n",
276 dev_dbg(&sep->pdev->dev,
277 "[PID%d] transactions ongoing, sleeping\n",
280 dev_dbg(&sep->pdev->dev, "[PID%d] woken up\n", current->pid);
282 if (signal_pending(current)) {
283 dev_dbg(&sep->pdev->dev, "[PID%d] received signal\n",
291 * The pid_doing_transaction indicates that this process
292 * now owns the facilities to perform a transaction with
293 * the SEP. While this process is performing a transaction,
294 * no other process who has the SEP device open can perform
295 * any transactions. This method allows more than one process
296 * to have the device open at any given time, which provides
297 * finer granularity for device utilization by multiple
300 /* Only one process is able to progress here at a time */
301 sep->pid_doing_transaction = current->pid;
304 finish_wait(&sep->event_transactions, &wait);
310 * sep_check_transaction_owner - Checks if current process owns transaction
313 static inline int sep_check_transaction_owner(struct sep_device *sep)
315 dev_dbg(&sep->pdev->dev, "[PID%d] transaction pid = %d\n",
317 sep->pid_doing_transaction);
319 if ((sep->pid_doing_transaction == 0) ||
320 (current->pid != sep->pid_doing_transaction)) {
324 /* We own the transaction */
331 * sep_dump_message - dump the message that is pending
333 * This will only print dump if DEBUG is set; it does
334 * follow kernel debug print enabling
336 static void _sep_dump_message(struct sep_device *sep)
340 u32 *p = sep->shared_addr;
342 for (count = 0; count < 10 * 4; count += 4)
343 dev_dbg(&sep->pdev->dev,
344 "[PID%d] Word %d of the message is %x\n",
345 current->pid, count/4, *p++);
351 * sep_map_and_alloc_shared_area -allocate shared block
352 * @sep: security processor
353 * @size: size of shared area
355 static int sep_map_and_alloc_shared_area(struct sep_device *sep)
357 sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev,
359 &sep->shared_bus, GFP_KERNEL);
361 if (!sep->shared_addr) {
362 dev_dbg(&sep->pdev->dev,
363 "[PID%d] shared memory dma_alloc_coherent failed\n",
367 dev_dbg(&sep->pdev->dev,
368 "[PID%d] shared_addr %zx bytes @%p (bus %llx)\n",
370 sep->shared_size, sep->shared_addr,
371 (unsigned long long)sep->shared_bus);
376 * sep_unmap_and_free_shared_area - free shared block
377 * @sep: security processor
379 static void sep_unmap_and_free_shared_area(struct sep_device *sep)
381 dma_free_coherent(&sep->pdev->dev, sep->shared_size,
382 sep->shared_addr, sep->shared_bus);
388 * sep_shared_bus_to_virt - convert bus/virt addresses
389 * @sep: pointer to struct sep_device
390 * @bus_address: address to convert
392 * Returns virtual address inside the shared area according
393 * to the bus address.
395 static void *sep_shared_bus_to_virt(struct sep_device *sep,
396 dma_addr_t bus_address)
398 return sep->shared_addr + (bus_address - sep->shared_bus);
404 * sep_open - device open method
405 * @inode: inode of SEP device
406 * @filp: file handle to SEP device
408 * Open method for the SEP device. Called when userspace opens
409 * the SEP device node.
411 * Returns zero on success otherwise an error code.
413 static int sep_open(struct inode *inode, struct file *filp)
415 struct sep_device *sep;
416 struct sep_private_data *priv;
418 dev_dbg(&sep_dev->pdev->dev, "[PID%d] open\n", current->pid);
420 if (filp->f_flags & O_NONBLOCK)
424 * Get the SEP device structure and use it for the
425 * private_data field in filp for other methods
428 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
434 filp->private_data = priv;
436 dev_dbg(&sep_dev->pdev->dev, "[PID%d] priv is 0x%p\n",
439 /* Anyone can open; locking takes place at transaction level */
444 * sep_free_dma_table_data_handler - free DMA table
445 * @sep: pointer to struct sep_device
446 * @dma_ctx: dma context
448 * Handles the request to free DMA table for synchronic actions
450 int sep_free_dma_table_data_handler(struct sep_device *sep,
451 struct sep_dma_context **dma_ctx)
455 /* Pointer to the current dma_resource struct */
456 struct sep_dma_resource *dma;
458 dev_dbg(&sep->pdev->dev,
459 "[PID%d] sep_free_dma_table_data_handler\n",
462 if (!dma_ctx || !(*dma_ctx)) {
463 /* No context or context already freed */
464 dev_dbg(&sep->pdev->dev,
465 "[PID%d] no DMA context or context already freed\n",
471 dev_dbg(&sep->pdev->dev, "[PID%d] (*dma_ctx)->nr_dcb_creat 0x%x\n",
473 (*dma_ctx)->nr_dcb_creat);
475 for (dcb_counter = 0;
476 dcb_counter < (*dma_ctx)->nr_dcb_creat; dcb_counter++) {
477 dma = &(*dma_ctx)->dma_res_arr[dcb_counter];
479 /* Unmap and free input map array */
480 if (dma->in_map_array) {
481 for (count = 0; count < dma->in_num_pages; count++) {
482 dma_unmap_page(&sep->pdev->dev,
483 dma->in_map_array[count].dma_addr,
484 dma->in_map_array[count].size,
487 kfree(dma->in_map_array);
491 * Output is handled different. If
492 * this was a secure dma into restricted memory,
493 * then we skip this step altogether as restricted
494 * memory is not available to the o/s at all.
496 if (((*dma_ctx)->secure_dma == false) &&
497 (dma->out_map_array)) {
499 for (count = 0; count < dma->out_num_pages; count++) {
500 dma_unmap_page(&sep->pdev->dev,
501 dma->out_map_array[count].dma_addr,
502 dma->out_map_array[count].size,
505 kfree(dma->out_map_array);
508 /* Free page cache for output */
509 if (dma->in_page_array) {
510 for (count = 0; count < dma->in_num_pages; count++) {
511 flush_dcache_page(dma->in_page_array[count]);
512 page_cache_release(dma->in_page_array[count]);
514 kfree(dma->in_page_array);
517 /* Again, we do this only for non secure dma */
518 if (((*dma_ctx)->secure_dma == false) &&
519 (dma->out_page_array)) {
521 for (count = 0; count < dma->out_num_pages; count++) {
522 if (!PageReserved(dma->out_page_array[count]))
525 out_page_array[count]);
527 flush_dcache_page(dma->out_page_array[count]);
528 page_cache_release(dma->out_page_array[count]);
530 kfree(dma->out_page_array);
534 * Note that here we use in_map_num_entries because we
535 * don't have a page array; the page array is generated
536 * only in the lock_user_pages, which is not called
537 * for kernel crypto, which is what the sg (scatter gather
538 * is used for exclusively)
541 dma_unmap_sg(&sep->pdev->dev, dma->src_sg,
542 dma->in_map_num_entries, DMA_TO_DEVICE);
547 dma_unmap_sg(&sep->pdev->dev, dma->dst_sg,
548 dma->in_map_num_entries, DMA_FROM_DEVICE);
552 /* Reset all the values */
553 dma->in_page_array = NULL;
554 dma->out_page_array = NULL;
555 dma->in_num_pages = 0;
556 dma->out_num_pages = 0;
557 dma->in_map_array = NULL;
558 dma->out_map_array = NULL;
559 dma->in_map_num_entries = 0;
560 dma->out_map_num_entries = 0;
563 (*dma_ctx)->nr_dcb_creat = 0;
564 (*dma_ctx)->num_lli_tables_created = 0;
569 dev_dbg(&sep->pdev->dev,
570 "[PID%d] sep_free_dma_table_data_handler end\n",
577 * sep_end_transaction_handler - end transaction
578 * @sep: pointer to struct sep_device
579 * @dma_ctx: DMA context
580 * @call_status: Call status
582 * This API handles the end transaction request.
584 static int sep_end_transaction_handler(struct sep_device *sep,
585 struct sep_dma_context **dma_ctx,
586 struct sep_call_status *call_status,
587 struct sep_queue_info **my_queue_elem)
589 dev_dbg(&sep->pdev->dev, "[PID%d] ending transaction\n", current->pid);
592 * Extraneous transaction clearing would mess up PM
593 * device usage counters and SEP would get suspended
594 * just before we send a command to SEP in the next
597 if (sep_check_transaction_owner(sep)) {
598 dev_dbg(&sep->pdev->dev, "[PID%d] not transaction owner\n",
603 /* Update queue status */
604 sep_queue_status_remove(sep, my_queue_elem);
606 /* Check that all the DMA resources were freed */
608 sep_free_dma_table_data_handler(sep, dma_ctx);
610 /* Reset call status for next transaction */
612 call_status->status = 0;
614 /* Clear the message area to avoid next transaction reading
615 * sensitive results from previous transaction */
616 memset(sep->shared_addr, 0,
617 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
619 /* start suspend delay */
620 #ifdef SEP_ENABLE_RUNTIME_PM
623 pm_runtime_mark_last_busy(&sep->pdev->dev);
624 pm_runtime_put_autosuspend(&sep->pdev->dev);
628 clear_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags);
629 sep->pid_doing_transaction = 0;
631 /* Now it's safe for next process to proceed */
632 dev_dbg(&sep->pdev->dev, "[PID%d] waking up next transaction\n",
634 clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT, &sep->in_use_flags);
635 wake_up(&sep->event_transactions);
642 * sep_release - close a SEP device
643 * @inode: inode of SEP device
644 * @filp: file handle being closed
646 * Called on the final close of a SEP device.
648 static int sep_release(struct inode *inode, struct file *filp)
650 struct sep_private_data * const private_data = filp->private_data;
651 struct sep_call_status *call_status = &private_data->call_status;
652 struct sep_device *sep = private_data->device;
653 struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
654 struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
656 dev_dbg(&sep->pdev->dev, "[PID%d] release\n", current->pid);
658 sep_end_transaction_handler(sep, dma_ctx, call_status,
661 kfree(filp->private_data);
667 * sep_mmap - maps the shared area to user space
668 * @filp: pointer to struct file
669 * @vma: pointer to vm_area_struct
671 * Called on an mmap of our space via the normal SEP device
673 static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
675 struct sep_private_data * const private_data = filp->private_data;
676 struct sep_call_status *call_status = &private_data->call_status;
677 struct sep_device *sep = private_data->device;
678 struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
680 unsigned long error = 0;
682 dev_dbg(&sep->pdev->dev, "[PID%d] sep_mmap\n", current->pid);
684 /* Set the transaction busy (own the device) */
686 * Problem for multithreaded applications is that here we're
687 * possibly going to sleep while holding a write lock on
688 * current->mm->mmap_sem, which will cause deadlock for ongoing
689 * transaction trying to create DMA tables
691 error = sep_wait_transaction(sep);
693 /* Interrupted by signal, don't clear transaction */
696 /* Clear the message area to avoid next transaction reading
697 * sensitive results from previous transaction */
698 memset(sep->shared_addr, 0,
699 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
702 * Check that the size of the mapped range is as the size of the message
705 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
707 goto end_function_with_error;
710 dev_dbg(&sep->pdev->dev, "[PID%d] shared_addr is %p\n",
711 current->pid, sep->shared_addr);
713 /* Get bus address */
714 bus_addr = sep->shared_bus;
716 if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT,
717 vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
718 dev_dbg(&sep->pdev->dev, "[PID%d] remap_pfn_range failed\n",
721 goto end_function_with_error;
724 /* Update call status */
725 set_bit(SEP_LEGACY_MMAP_DONE_OFFSET, &call_status->status);
729 end_function_with_error:
730 /* Clear our transaction */
731 sep_end_transaction_handler(sep, NULL, call_status,
739 * sep_poll - poll handler
740 * @filp: pointer to struct file
741 * @wait: pointer to poll_table
743 * Called by the OS when the kernel is asked to do a poll on
746 static unsigned int sep_poll(struct file *filp, poll_table *wait)
748 struct sep_private_data * const private_data = filp->private_data;
749 struct sep_call_status *call_status = &private_data->call_status;
750 struct sep_device *sep = private_data->device;
754 unsigned long lock_irq_flag;
756 /* Am I the process that owns the transaction? */
757 if (sep_check_transaction_owner(sep)) {
758 dev_dbg(&sep->pdev->dev, "[PID%d] poll pid not owner\n",
764 /* Check if send command or send_reply were activated previously */
765 if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
766 &call_status->status)) {
767 dev_warn(&sep->pdev->dev, "[PID%d] sendmsg not called\n",
774 /* Add the event to the polling wait table */
775 dev_dbg(&sep->pdev->dev, "[PID%d] poll: calling wait sep_event\n",
778 poll_wait(filp, &sep->event_interrupt, wait);
780 dev_dbg(&sep->pdev->dev,
781 "[PID%d] poll: send_ct is %lx reply ct is %lx\n",
782 current->pid, sep->send_ct, sep->reply_ct);
784 /* Check if error occurred during poll */
785 retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
786 if ((retval2 != 0x0) && (retval2 != 0x8)) {
787 dev_dbg(&sep->pdev->dev, "[PID%d] poll; poll error %x\n",
788 current->pid, retval2);
793 spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
795 if (sep->send_ct == sep->reply_ct) {
796 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
797 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
798 dev_dbg(&sep->pdev->dev,
799 "[PID%d] poll: data ready check (GPR2) %x\n",
800 current->pid, retval);
802 /* Check if printf request */
803 if ((retval >> 30) & 0x1) {
804 dev_dbg(&sep->pdev->dev,
805 "[PID%d] poll: SEP printf request\n",
810 /* Check if the this is SEP reply or request */
812 dev_dbg(&sep->pdev->dev,
813 "[PID%d] poll: SEP request\n",
816 dev_dbg(&sep->pdev->dev,
817 "[PID%d] poll: normal return\n",
819 sep_dump_message(sep);
820 dev_dbg(&sep->pdev->dev,
821 "[PID%d] poll; SEP reply POLLIN|POLLRDNORM\n",
823 mask |= POLLIN | POLLRDNORM;
825 set_bit(SEP_LEGACY_POLL_DONE_OFFSET, &call_status->status);
827 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
828 dev_dbg(&sep->pdev->dev,
829 "[PID%d] poll; no reply; returning mask of 0\n",
839 * sep_time_address - address in SEP memory of time
840 * @sep: SEP device we want the address from
842 * Return the address of the two dwords in memory used for time
845 static u32 *sep_time_address(struct sep_device *sep)
847 return sep->shared_addr +
848 SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
852 * sep_set_time - set the SEP time
853 * @sep: the SEP we are setting the time for
855 * Calculates time and sets it at the predefined address.
856 * Called with the SEP mutex held.
858 static unsigned long sep_set_time(struct sep_device *sep)
861 u32 *time_addr; /* Address of time as seen by the kernel */
864 do_gettimeofday(&time);
866 /* Set value in the SYSTEM MEMORY offset */
867 time_addr = sep_time_address(sep);
869 time_addr[0] = SEP_TIME_VAL_TOKEN;
870 time_addr[1] = time.tv_sec;
872 dev_dbg(&sep->pdev->dev, "[PID%d] time.tv_sec is %lu\n",
873 current->pid, time.tv_sec);
874 dev_dbg(&sep->pdev->dev, "[PID%d] time_addr is %p\n",
875 current->pid, time_addr);
876 dev_dbg(&sep->pdev->dev, "[PID%d] sep->shared_addr is %p\n",
877 current->pid, sep->shared_addr);
883 * sep_send_command_handler - kick off a command
884 * @sep: SEP being signalled
886 * This function raises interrupt to SEP that signals that is has a new
887 * command from the host
889 * Note that this function does fall under the ioctl lock
891 int sep_send_command_handler(struct sep_device *sep)
893 unsigned long lock_irq_flag;
897 /* Basic sanity check; set msg pool to start of shared area */
898 msg_pool = (u32 *)sep->shared_addr;
901 /* Look for start msg token */
902 if (*msg_pool != SEP_START_MSG_TOKEN) {
903 dev_warn(&sep->pdev->dev, "start message token not present\n");
908 /* Do we have a reasonable size? */
910 if ((*msg_pool < 2) ||
911 (*msg_pool > SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES)) {
913 dev_warn(&sep->pdev->dev, "invalid message size\n");
918 /* Does the command look reasonable? */
921 dev_warn(&sep->pdev->dev, "invalid message opcode\n");
926 #if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
927 dev_dbg(&sep->pdev->dev, "[PID%d] before pm sync status 0x%X\n",
929 sep->pdev->dev.power.runtime_status);
930 sep->in_use = 1; /* device is about to be used */
931 pm_runtime_get_sync(&sep->pdev->dev);
934 if (test_and_set_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags)) {
938 sep->in_use = 1; /* device is about to be used */
941 sep_dump_message(sep);
944 spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
946 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
948 dev_dbg(&sep->pdev->dev,
949 "[PID%d] sep_send_command_handler send_ct %lx reply_ct %lx\n",
950 current->pid, sep->send_ct, sep->reply_ct);
952 /* Send interrupt to SEP */
953 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
961 * @sep: pointer to struct sep_device
962 * @sg: pointer to struct scatterlist
964 * @dma_maps: pointer to place a pointer to array of dma maps
965 * This is filled in; anything previous there will be lost
966 * The structure for dma maps is sep_dma_map
967 * @returns number of dma maps on success; negative on error
969 * This creates the dma table from the scatterlist
970 * It is used only for kernel crypto as it works with scatterlists
971 * representation of data buffers
974 static int sep_crypto_dma(
975 struct sep_device *sep,
976 struct scatterlist *sg,
977 struct sep_dma_map **dma_maps,
978 enum dma_data_direction direction)
980 struct scatterlist *temp_sg;
984 struct sep_dma_map *sep_dma;
990 /* Count the segments */
995 temp_sg = scatterwalk_sg_next(temp_sg);
997 dev_dbg(&sep->pdev->dev,
998 "There are (hex) %x segments in sg\n", count_segment);
1000 /* DMA map segments */
1001 count_mapped = dma_map_sg(&sep->pdev->dev, sg,
1002 count_segment, direction);
1004 dev_dbg(&sep->pdev->dev,
1005 "There are (hex) %x maps in sg\n", count_mapped);
1007 if (count_mapped == 0) {
1008 dev_dbg(&sep->pdev->dev, "Cannot dma_map_sg\n");
1012 sep_dma = kmalloc(sizeof(struct sep_dma_map) *
1013 count_mapped, GFP_ATOMIC);
1015 if (sep_dma == NULL) {
1016 dev_dbg(&sep->pdev->dev, "Cannot allocate dma_maps\n");
1020 for_each_sg(sg, temp_sg, count_mapped, ct1) {
1021 sep_dma[ct1].dma_addr = sg_dma_address(temp_sg);
1022 sep_dma[ct1].size = sg_dma_len(temp_sg);
1023 dev_dbg(&sep->pdev->dev, "(all hex) map %x dma %lx len %lx\n",
1024 ct1, (unsigned long)sep_dma[ct1].dma_addr,
1025 (unsigned long)sep_dma[ct1].size);
1028 *dma_maps = sep_dma;
1029 return count_mapped;
1035 * @sep: pointer to struct sep_device
1036 * @sg: pointer to struct scatterlist
1037 * @data_size: total data size
1039 * @dma_maps: pointer to place a pointer to array of dma maps
1040 * This is filled in; anything previous there will be lost
1041 * The structure for dma maps is sep_dma_map
1042 * @lli_maps: pointer to place a pointer to array of lli maps
1043 * This is filled in; anything previous there will be lost
1044 * The structure for dma maps is sep_dma_map
1045 * @returns number of dma maps on success; negative on error
1047 * This creates the LLI table from the scatterlist
1048 * It is only used for kernel crypto as it works exclusively
1049 * with scatterlists (struct scatterlist) representation of
1052 static int sep_crypto_lli(
1053 struct sep_device *sep,
1054 struct scatterlist *sg,
1055 struct sep_dma_map **maps,
1056 struct sep_lli_entry **llis,
1058 enum dma_data_direction direction)
1062 struct sep_lli_entry *sep_lli;
1063 struct sep_dma_map *sep_map;
1067 nbr_ents = sep_crypto_dma(sep, sg, maps, direction);
1068 if (nbr_ents <= 0) {
1069 dev_dbg(&sep->pdev->dev, "crypto_dma failed %x\n",
1076 sep_lli = kmalloc(sizeof(struct sep_lli_entry) * nbr_ents, GFP_ATOMIC);
1078 if (sep_lli == NULL) {
1079 dev_dbg(&sep->pdev->dev, "Cannot allocate lli_maps\n");
1086 for (ct1 = 0; ct1 < nbr_ents; ct1 += 1) {
1087 sep_lli[ct1].bus_address = (u32)sep_map[ct1].dma_addr;
1089 /* Maximum for page is total data size */
1090 if (sep_map[ct1].size > data_size)
1091 sep_map[ct1].size = data_size;
1093 sep_lli[ct1].block_size = (u32)sep_map[ct1].size;
1101 * sep_lock_kernel_pages - map kernel pages for DMA
1102 * @sep: pointer to struct sep_device
1103 * @kernel_virt_addr: address of data buffer in kernel
1104 * @data_size: size of data
1105 * @lli_array_ptr: lli array
1106 * @in_out_flag: input into device or output from device
1108 * This function locks all the physical pages of the kernel virtual buffer
1109 * and construct a basic lli array, where each entry holds the physical
1110 * page address and the size that application data holds in this page
1111 * This function is used only during kernel crypto mod calls from within
1112 * the kernel (when ioctl is not used)
1114 * This is used only for kernel crypto. Kernel pages
1115 * are handled differently as they are done via
1116 * scatter gather lists (struct scatterlist)
1118 static int sep_lock_kernel_pages(struct sep_device *sep,
1119 unsigned long kernel_virt_addr,
1121 struct sep_lli_entry **lli_array_ptr,
1123 struct sep_dma_context *dma_ctx)
1127 struct scatterlist *sg;
1130 struct sep_lli_entry *lli_array;
1132 struct sep_dma_map *map_array;
1134 enum dma_data_direction direction;
1139 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1140 direction = DMA_TO_DEVICE;
1141 sg = dma_ctx->src_sg;
1143 direction = DMA_FROM_DEVICE;
1144 sg = dma_ctx->dst_sg;
1147 num_pages = sep_crypto_lli(sep, sg, &map_array, &lli_array,
1148 data_size, direction);
1150 if (num_pages <= 0) {
1151 dev_dbg(&sep->pdev->dev, "sep_crypto_lli returned error %x\n",
1156 /* Put mapped kernel sg into kernel resource array */
1158 /* Set output params according to the in_out flag */
1159 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1160 *lli_array_ptr = lli_array;
1161 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages =
1163 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array =
1165 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array =
1167 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries =
1169 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg =
1172 *lli_array_ptr = lli_array;
1173 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages =
1175 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array =
1177 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array =
1179 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
1180 out_map_num_entries = num_pages;
1181 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg =
1189 * sep_lock_user_pages - lock and map user pages for DMA
1190 * @sep: pointer to struct sep_device
1191 * @app_virt_addr: user memory data buffer
1192 * @data_size: size of data buffer
1193 * @lli_array_ptr: lli array
1194 * @in_out_flag: input or output to device
1196 * This function locks all the physical pages of the application
1197 * virtual buffer and construct a basic lli array, where each entry
1198 * holds the physical page address and the size that application
1199 * data holds in this physical pages
1201 static int sep_lock_user_pages(struct sep_device *sep,
1204 struct sep_lli_entry **lli_array_ptr,
1206 struct sep_dma_context *dma_ctx)
1212 /* The the page of the end address of the user space buffer */
1214 /* The page of the start address of the user space buffer */
1216 /* The range in pages */
1218 /* Array of pointers to page */
1219 struct page **page_array;
1221 struct sep_lli_entry *lli_array;
1223 struct sep_dma_map *map_array;
1225 /* Set start and end pages and num pages */
1226 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1227 start_page = app_virt_addr >> PAGE_SHIFT;
1228 num_pages = end_page - start_page + 1;
1230 dev_dbg(&sep->pdev->dev,
1231 "[PID%d] lock user pages app_virt_addr is %x\n",
1232 current->pid, app_virt_addr);
1234 dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n",
1235 current->pid, data_size);
1236 dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n",
1237 current->pid, start_page);
1238 dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n",
1239 current->pid, end_page);
1240 dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n",
1241 current->pid, num_pages);
1243 /* Allocate array of pages structure pointers */
1244 page_array = kmalloc_array(num_pages, sizeof(struct page *),
1251 map_array = kmalloc_array(num_pages, sizeof(struct sep_dma_map),
1255 goto end_function_with_error1;
1258 lli_array = kmalloc_array(num_pages, sizeof(struct sep_lli_entry),
1262 goto end_function_with_error2;
1265 /* Convert the application virtual address into a set of physical */
1266 result = get_user_pages_fast(app_virt_addr, num_pages,
1267 ((in_out_flag == SEP_DRIVER_IN_FLAG) ? 0 : 1), page_array);
1269 /* Check the number of pages locked - if not all then exit with error */
1270 if (result != num_pages) {
1271 dev_warn(&sep->pdev->dev,
1272 "[PID%d] not all pages locked by get_user_pages, "
1273 "result 0x%X, num_pages 0x%X\n",
1274 current->pid, result, num_pages);
1276 goto end_function_with_error3;
1279 dev_dbg(&sep->pdev->dev, "[PID%d] get_user_pages succeeded\n",
1283 * Fill the array using page array data and
1284 * map the pages - this action will also flush the cache as needed
1286 for (count = 0; count < num_pages; count++) {
1287 /* Fill the map array */
1288 map_array[count].dma_addr =
1289 dma_map_page(&sep->pdev->dev, page_array[count],
1290 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
1292 map_array[count].size = PAGE_SIZE;
1294 /* Fill the lli array entry */
1295 lli_array[count].bus_address = (u32)map_array[count].dma_addr;
1296 lli_array[count].block_size = PAGE_SIZE;
1298 dev_dbg(&sep->pdev->dev,
1299 "[PID%d] lli_array[%x].bus_address is %08lx, "
1300 "lli_array[%x].block_size is (hex) %x\n", current->pid,
1301 count, (unsigned long)lli_array[count].bus_address,
1302 count, lli_array[count].block_size);
1305 /* Check the offset for the first page */
1306 lli_array[0].bus_address =
1307 lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
1309 /* Check that not all the data is in the first page only */
1310 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1311 lli_array[0].block_size = data_size;
1313 lli_array[0].block_size =
1314 PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1316 dev_dbg(&sep->pdev->dev,
1317 "[PID%d] After check if page 0 has all data\n",
1319 dev_dbg(&sep->pdev->dev,
1320 "[PID%d] lli_array[0].bus_address is (hex) %08lx, "
1321 "lli_array[0].block_size is (hex) %x\n",
1323 (unsigned long)lli_array[0].bus_address,
1324 lli_array[0].block_size);
1327 /* Check the size of the last page */
1328 if (num_pages > 1) {
1329 lli_array[num_pages - 1].block_size =
1330 (app_virt_addr + data_size) & (~PAGE_MASK);
1331 if (lli_array[num_pages - 1].block_size == 0)
1332 lli_array[num_pages - 1].block_size = PAGE_SIZE;
1334 dev_dbg(&sep->pdev->dev,
1335 "[PID%d] After last page size adjustment\n",
1337 dev_dbg(&sep->pdev->dev,
1338 "[PID%d] lli_array[%x].bus_address is (hex) %08lx, "
1339 "lli_array[%x].block_size is (hex) %x\n",
1342 (unsigned long)lli_array[num_pages - 1].bus_address,
1344 lli_array[num_pages - 1].block_size);
1347 /* Set output params according to the in_out flag */
1348 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1349 *lli_array_ptr = lli_array;
1350 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages =
1352 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array =
1354 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array =
1356 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries =
1358 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg = NULL;
1360 *lli_array_ptr = lli_array;
1361 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages =
1363 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array =
1365 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array =
1367 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
1368 out_map_num_entries = num_pages;
1369 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg = NULL;
1373 end_function_with_error3:
1374 /* Free lli array */
1377 end_function_with_error2:
1380 end_function_with_error1:
1381 /* Free page array */
1389 * sep_lli_table_secure_dma - get lli array for IMR addresses
1390 * @sep: pointer to struct sep_device
1391 * @app_virt_addr: user memory data buffer
1392 * @data_size: size of data buffer
1393 * @lli_array_ptr: lli array
1394 * @in_out_flag: not used
1395 * @dma_ctx: pointer to struct sep_dma_context
1397 * This function creates lli tables for outputting data to
1398 * IMR memory, which is memory that cannot be accessed by the
1399 * the x86 processor.
1401 static int sep_lli_table_secure_dma(struct sep_device *sep,
1404 struct sep_lli_entry **lli_array_ptr,
1406 struct sep_dma_context *dma_ctx)
1411 /* The the page of the end address of the user space buffer */
1413 /* The page of the start address of the user space buffer */
1415 /* The range in pages */
1418 struct sep_lli_entry *lli_array;
1420 /* Set start and end pages and num pages */
1421 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1422 start_page = app_virt_addr >> PAGE_SHIFT;
1423 num_pages = end_page - start_page + 1;
1425 dev_dbg(&sep->pdev->dev,
1426 "[PID%d] lock user pages app_virt_addr is %x\n",
1427 current->pid, app_virt_addr);
1429 dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n",
1430 current->pid, data_size);
1431 dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n",
1432 current->pid, start_page);
1433 dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n",
1434 current->pid, end_page);
1435 dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n",
1436 current->pid, num_pages);
1438 lli_array = kmalloc_array(num_pages, sizeof(struct sep_lli_entry),
1444 * Fill the lli_array
1446 start_page = start_page << PAGE_SHIFT;
1447 for (count = 0; count < num_pages; count++) {
1448 /* Fill the lli array entry */
1449 lli_array[count].bus_address = start_page;
1450 lli_array[count].block_size = PAGE_SIZE;
1452 start_page += PAGE_SIZE;
1454 dev_dbg(&sep->pdev->dev,
1455 "[PID%d] lli_array[%x].bus_address is %08lx, "
1456 "lli_array[%x].block_size is (hex) %x\n",
1458 count, (unsigned long)lli_array[count].bus_address,
1459 count, lli_array[count].block_size);
1462 /* Check the offset for the first page */
1463 lli_array[0].bus_address =
1464 lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
1466 /* Check that not all the data is in the first page only */
1467 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1468 lli_array[0].block_size = data_size;
1470 lli_array[0].block_size =
1471 PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1473 dev_dbg(&sep->pdev->dev,
1474 "[PID%d] After check if page 0 has all data\n"
1475 "lli_array[0].bus_address is (hex) %08lx, "
1476 "lli_array[0].block_size is (hex) %x\n",
1478 (unsigned long)lli_array[0].bus_address,
1479 lli_array[0].block_size);
1481 /* Check the size of the last page */
1482 if (num_pages > 1) {
1483 lli_array[num_pages - 1].block_size =
1484 (app_virt_addr + data_size) & (~PAGE_MASK);
1485 if (lli_array[num_pages - 1].block_size == 0)
1486 lli_array[num_pages - 1].block_size = PAGE_SIZE;
1488 dev_dbg(&sep->pdev->dev,
1489 "[PID%d] After last page size adjustment\n"
1490 "lli_array[%x].bus_address is (hex) %08lx, "
1491 "lli_array[%x].block_size is (hex) %x\n",
1492 current->pid, num_pages - 1,
1493 (unsigned long)lli_array[num_pages - 1].bus_address,
1495 lli_array[num_pages - 1].block_size);
1497 *lli_array_ptr = lli_array;
1498 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages = num_pages;
1499 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
1500 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = NULL;
1501 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_num_entries = 0;
1507 * sep_calculate_lli_table_max_size - size the LLI table
1508 * @sep: pointer to struct sep_device
1510 * @num_array_entries
1513 * This function calculates the size of data that can be inserted into
1514 * the lli table from this array, such that either the table is full
1515 * (all entries are entered), or there are no more entries in the
1518 static u32 sep_calculate_lli_table_max_size(struct sep_device *sep,
1519 struct sep_lli_entry *lli_in_array_ptr,
1520 u32 num_array_entries,
1521 u32 *last_table_flag)
1524 /* Table data size */
1525 u32 table_data_size = 0;
1526 /* Data size for the next table */
1527 u32 next_table_data_size;
1529 *last_table_flag = 0;
1532 * Calculate the data in the out lli table till we fill the whole
1533 * table or till the data has ended
1536 (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) &&
1537 (counter < num_array_entries); counter++)
1538 table_data_size += lli_in_array_ptr[counter].block_size;
1541 * Check if we reached the last entry,
1542 * meaning this ia the last table to build,
1543 * and no need to check the block alignment
1545 if (counter == num_array_entries) {
1546 /* Set the last table flag */
1547 *last_table_flag = 1;
1552 * Calculate the data size of the next table.
1553 * Stop if no entries left or if data size is more the DMA restriction
1555 next_table_data_size = 0;
1556 for (; counter < num_array_entries; counter++) {
1557 next_table_data_size += lli_in_array_ptr[counter].block_size;
1558 if (next_table_data_size >= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1563 * Check if the next table data size is less then DMA rstriction.
1564 * if it is - recalculate the current table size, so that the next
1565 * table data size will be adaquete for DMA
1567 if (next_table_data_size &&
1568 next_table_data_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1570 table_data_size -= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE -
1571 next_table_data_size);
1574 return table_data_size;
1578 * sep_build_lli_table - build an lli array for the given table
1579 * @sep: pointer to struct sep_device
1580 * @lli_array_ptr: pointer to lli array
1581 * @lli_table_ptr: pointer to lli table
1582 * @num_processed_entries_ptr: pointer to number of entries
1583 * @num_table_entries_ptr: pointer to number of tables
1584 * @table_data_size: total data size
1586 * Builds an lli table from the lli_array according to
1587 * the given size of data
1589 static void sep_build_lli_table(struct sep_device *sep,
1590 struct sep_lli_entry *lli_array_ptr,
1591 struct sep_lli_entry *lli_table_ptr,
1592 u32 *num_processed_entries_ptr,
1593 u32 *num_table_entries_ptr,
1594 u32 table_data_size)
1596 /* Current table data size */
1597 u32 curr_table_data_size;
1598 /* Counter of lli array entry */
1601 /* Init current table data size and lli array entry counter */
1602 curr_table_data_size = 0;
1604 *num_table_entries_ptr = 1;
1606 dev_dbg(&sep->pdev->dev,
1607 "[PID%d] build lli table table_data_size: (hex) %x\n",
1608 current->pid, table_data_size);
1610 /* Fill the table till table size reaches the needed amount */
1611 while (curr_table_data_size < table_data_size) {
1612 /* Update the number of entries in table */
1613 (*num_table_entries_ptr)++;
1615 lli_table_ptr->bus_address =
1616 cpu_to_le32(lli_array_ptr[array_counter].bus_address);
1618 lli_table_ptr->block_size =
1619 cpu_to_le32(lli_array_ptr[array_counter].block_size);
1621 curr_table_data_size += lli_array_ptr[array_counter].block_size;
1623 dev_dbg(&sep->pdev->dev,
1624 "[PID%d] lli_table_ptr is %p\n",
1625 current->pid, lli_table_ptr);
1626 dev_dbg(&sep->pdev->dev,
1627 "[PID%d] lli_table_ptr->bus_address: %08lx\n",
1629 (unsigned long)lli_table_ptr->bus_address);
1631 dev_dbg(&sep->pdev->dev,
1632 "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
1633 current->pid, lli_table_ptr->block_size);
1635 /* Check for overflow of the table data */
1636 if (curr_table_data_size > table_data_size) {
1637 dev_dbg(&sep->pdev->dev,
1638 "[PID%d] curr_table_data_size too large\n",
1641 /* Update the size of block in the table */
1642 lli_table_ptr->block_size =
1643 cpu_to_le32(lli_table_ptr->block_size) -
1644 (curr_table_data_size - table_data_size);
1646 /* Update the physical address in the lli array */
1647 lli_array_ptr[array_counter].bus_address +=
1648 cpu_to_le32(lli_table_ptr->block_size);
1650 /* Update the block size left in the lli array */
1651 lli_array_ptr[array_counter].block_size =
1652 (curr_table_data_size - table_data_size);
1654 /* Advance to the next entry in the lli_array */
1657 dev_dbg(&sep->pdev->dev,
1658 "[PID%d] lli_table_ptr->bus_address is %08lx\n",
1660 (unsigned long)lli_table_ptr->bus_address);
1661 dev_dbg(&sep->pdev->dev,
1662 "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
1664 lli_table_ptr->block_size);
1666 /* Move to the next entry in table */
1670 /* Set the info entry to default */
1671 lli_table_ptr->bus_address = 0xffffffff;
1672 lli_table_ptr->block_size = 0;
1674 /* Set the output parameter */
1675 *num_processed_entries_ptr += array_counter;
1680 * sep_shared_area_virt_to_bus - map shared area to bus address
1681 * @sep: pointer to struct sep_device
1682 * @virt_address: virtual address to convert
1684 * This functions returns the physical address inside shared area according
1685 * to the virtual address. It can be either on the external RAM device
1686 * (ioremapped), or on the system RAM
1687 * This implementation is for the external RAM
1689 static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep,
1692 dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys v %p\n",
1693 current->pid, virt_address);
1694 dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys p %08lx\n",
1697 sep->shared_bus + (virt_address - sep->shared_addr));
1699 return sep->shared_bus + (size_t)(virt_address - sep->shared_addr);
1703 * sep_shared_area_bus_to_virt - map shared area bus address to kernel
1704 * @sep: pointer to struct sep_device
1705 * @bus_address: bus address to convert
1707 * This functions returns the virtual address inside shared area
1708 * according to the physical address. It can be either on the
1709 * external RAM device (ioremapped), or on the system RAM
1710 * This implementation is for the external RAM
1712 static void *sep_shared_area_bus_to_virt(struct sep_device *sep,
1713 dma_addr_t bus_address)
1715 dev_dbg(&sep->pdev->dev, "[PID%d] shared bus to virt b=%lx v=%lx\n",
1717 (unsigned long)bus_address, (unsigned long)(sep->shared_addr +
1718 (size_t)(bus_address - sep->shared_bus)));
1720 return sep->shared_addr + (size_t)(bus_address - sep->shared_bus);
1724 * sep_debug_print_lli_tables - dump LLI table
1725 * @sep: pointer to struct sep_device
1726 * @lli_table_ptr: pointer to sep_lli_entry
1727 * @num_table_entries: number of entries
1728 * @table_data_size: total data size
1730 * Walk the the list of the print created tables and print all the data
1732 static void sep_debug_print_lli_tables(struct sep_device *sep,
1733 struct sep_lli_entry *lli_table_ptr,
1734 unsigned long num_table_entries,
1735 unsigned long table_data_size)
1738 unsigned long table_count = 1;
1739 unsigned long entries_count = 0;
1741 dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables start\n",
1743 if (num_table_entries == 0) {
1744 dev_dbg(&sep->pdev->dev, "[PID%d] no table to print\n",
1749 while ((unsigned long) lli_table_ptr->bus_address != 0xffffffff) {
1750 dev_dbg(&sep->pdev->dev,
1751 "[PID%d] lli table %08lx, "
1752 "table_data_size is (hex) %lx\n",
1753 current->pid, table_count, table_data_size);
1754 dev_dbg(&sep->pdev->dev,
1755 "[PID%d] num_table_entries is (hex) %lx\n",
1756 current->pid, num_table_entries);
1758 /* Print entries of the table (without info entry) */
1759 for (entries_count = 0; entries_count < num_table_entries;
1760 entries_count++, lli_table_ptr++) {
1762 dev_dbg(&sep->pdev->dev,
1763 "[PID%d] lli_table_ptr address is %08lx\n",
1765 (unsigned long) lli_table_ptr);
1767 dev_dbg(&sep->pdev->dev,
1768 "[PID%d] phys address is %08lx "
1769 "block size is (hex) %x\n", current->pid,
1770 (unsigned long)lli_table_ptr->bus_address,
1771 lli_table_ptr->block_size);
1774 /* Point to the info entry */
1777 dev_dbg(&sep->pdev->dev,
1778 "[PID%d] phys lli_table_ptr->block_size "
1781 lli_table_ptr->block_size);
1783 dev_dbg(&sep->pdev->dev,
1784 "[PID%d] phys lli_table_ptr->physical_address "
1787 (unsigned long)lli_table_ptr->bus_address);
1790 table_data_size = lli_table_ptr->block_size & 0xffffff;
1791 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1793 dev_dbg(&sep->pdev->dev,
1794 "[PID%d] phys table_data_size is "
1795 "(hex) %lx num_table_entries is"
1796 " %lx bus_address is%lx\n",
1800 (unsigned long)lli_table_ptr->bus_address);
1802 if ((unsigned long)lli_table_ptr->bus_address != 0xffffffff)
1803 lli_table_ptr = (struct sep_lli_entry *)
1804 sep_shared_bus_to_virt(sep,
1805 (unsigned long)lli_table_ptr->bus_address);
1809 dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables end\n",
1816 * sep_prepare_empty_lli_table - create a blank LLI table
1817 * @sep: pointer to struct sep_device
1818 * @lli_table_addr_ptr: pointer to lli table
1819 * @num_entries_ptr: pointer to number of entries
1820 * @table_data_size_ptr: point to table data size
1821 * @dmatables_region: Optional buffer for DMA tables
1822 * @dma_ctx: DMA context
1824 * This function creates empty lli tables when there is no data
1826 static void sep_prepare_empty_lli_table(struct sep_device *sep,
1827 dma_addr_t *lli_table_addr_ptr,
1828 u32 *num_entries_ptr,
1829 u32 *table_data_size_ptr,
1830 void **dmatables_region,
1831 struct sep_dma_context *dma_ctx)
1833 struct sep_lli_entry *lli_table_ptr;
1835 /* Find the area for new table */
1837 (struct sep_lli_entry *)(sep->shared_addr +
1838 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1839 dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1840 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1842 if (dmatables_region && *dmatables_region)
1843 lli_table_ptr = *dmatables_region;
1845 lli_table_ptr->bus_address = 0;
1846 lli_table_ptr->block_size = 0;
1849 lli_table_ptr->bus_address = 0xFFFFFFFF;
1850 lli_table_ptr->block_size = 0;
1852 /* Set the output parameter value */
1853 *lli_table_addr_ptr = sep->shared_bus +
1854 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1855 dma_ctx->num_lli_tables_created *
1856 sizeof(struct sep_lli_entry) *
1857 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1859 /* Set the num of entries and table data size for empty table */
1860 *num_entries_ptr = 2;
1861 *table_data_size_ptr = 0;
1863 /* Update the number of created tables */
1864 dma_ctx->num_lli_tables_created++;
1868 * sep_prepare_input_dma_table - prepare input DMA mappings
1869 * @sep: pointer to struct sep_device
1874 * @table_data_size_ptr:
1875 * @is_kva: set for kernel data (kernel crypt io call)
1877 * This function prepares only input DMA table for synchronic symmetric
1879 * Note that all bus addresses that are passed to the SEP
1880 * are in 32 bit format; the SEP is a 32 bit device
1882 static int sep_prepare_input_dma_table(struct sep_device *sep,
1883 unsigned long app_virt_addr,
1886 dma_addr_t *lli_table_ptr,
1887 u32 *num_entries_ptr,
1888 u32 *table_data_size_ptr,
1890 void **dmatables_region,
1891 struct sep_dma_context *dma_ctx
1895 /* Pointer to the info entry of the table - the last entry */
1896 struct sep_lli_entry *info_entry_ptr;
1897 /* Array of pointers to page */
1898 struct sep_lli_entry *lli_array_ptr;
1899 /* Points to the first entry to be processed in the lli_in_array */
1900 u32 current_entry = 0;
1901 /* Num entries in the virtual buffer */
1902 u32 sep_lli_entries = 0;
1903 /* Lli table pointer */
1904 struct sep_lli_entry *in_lli_table_ptr;
1905 /* The total data in one table */
1906 u32 table_data_size = 0;
1907 /* Flag for last table */
1908 u32 last_table_flag = 0;
1909 /* Number of entries in lli table */
1910 u32 num_entries_in_table = 0;
1911 /* Next table address */
1912 void *lli_table_alloc_addr = NULL;
1913 void *dma_lli_table_alloc_addr = NULL;
1914 void *dma_in_lli_table_ptr = NULL;
1916 dev_dbg(&sep->pdev->dev,
1917 "[PID%d] prepare intput dma tbl data size: (hex) %x\n",
1918 current->pid, data_size);
1920 dev_dbg(&sep->pdev->dev, "[PID%d] block_size is (hex) %x\n",
1921 current->pid, block_size);
1923 /* Initialize the pages pointers */
1924 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
1925 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages = 0;
1927 /* Set the kernel address for first table to be allocated */
1928 lli_table_alloc_addr = (void *)(sep->shared_addr +
1929 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1930 dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1931 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1933 if (data_size == 0) {
1934 if (dmatables_region) {
1935 error = sep_allocate_dmatables_region(sep,
1942 /* Special case - create meptu table - 2 entries, zero data */
1943 sep_prepare_empty_lli_table(sep, lli_table_ptr,
1944 num_entries_ptr, table_data_size_ptr,
1945 dmatables_region, dma_ctx);
1946 goto update_dcb_counter;
1949 /* Check if the pages are in Kernel Virtual Address layout */
1951 error = sep_lock_kernel_pages(sep, app_virt_addr,
1952 data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG,
1956 * Lock the pages of the user buffer
1957 * and translate them to pages
1959 error = sep_lock_user_pages(sep, app_virt_addr,
1960 data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG,
1966 dev_dbg(&sep->pdev->dev,
1967 "[PID%d] output sep_in_num_pages is (hex) %x\n",
1969 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages);
1972 info_entry_ptr = NULL;
1975 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages;
1977 dma_lli_table_alloc_addr = lli_table_alloc_addr;
1978 if (dmatables_region) {
1979 error = sep_allocate_dmatables_region(sep,
1984 goto end_function_error;
1985 lli_table_alloc_addr = *dmatables_region;
1988 /* Loop till all the entries in in array are processed */
1989 while (current_entry < sep_lli_entries) {
1991 /* Set the new input and output tables */
1993 (struct sep_lli_entry *)lli_table_alloc_addr;
1994 dma_in_lli_table_ptr =
1995 (struct sep_lli_entry *)dma_lli_table_alloc_addr;
1997 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1998 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1999 dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2000 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2002 if (dma_lli_table_alloc_addr >
2003 ((void *)sep->shared_addr +
2004 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
2005 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
2008 goto end_function_error;
2012 /* Update the number of created tables */
2013 dma_ctx->num_lli_tables_created++;
2015 /* Calculate the maximum size of data for input table */
2016 table_data_size = sep_calculate_lli_table_max_size(sep,
2017 &lli_array_ptr[current_entry],
2018 (sep_lli_entries - current_entry),
2022 * If this is not the last table -
2023 * then align it to the block size
2025 if (!last_table_flag)
2027 (table_data_size / block_size) * block_size;
2029 dev_dbg(&sep->pdev->dev,
2030 "[PID%d] output table_data_size is (hex) %x\n",
2034 /* Construct input lli table */
2035 sep_build_lli_table(sep, &lli_array_ptr[current_entry],
2037 ¤t_entry, &num_entries_in_table, table_data_size);
2039 if (info_entry_ptr == NULL) {
2041 /* Set the output parameters to physical addresses */
2042 *lli_table_ptr = sep_shared_area_virt_to_bus(sep,
2043 dma_in_lli_table_ptr);
2044 *num_entries_ptr = num_entries_in_table;
2045 *table_data_size_ptr = table_data_size;
2047 dev_dbg(&sep->pdev->dev,
2048 "[PID%d] output lli_table_in_ptr is %08lx\n",
2050 (unsigned long)*lli_table_ptr);
2053 /* Update the info entry of the previous in table */
2054 info_entry_ptr->bus_address =
2055 sep_shared_area_virt_to_bus(sep,
2056 dma_in_lli_table_ptr);
2057 info_entry_ptr->block_size =
2058 ((num_entries_in_table) << 24) |
2061 /* Save the pointer to the info entry of the current tables */
2062 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
2064 /* Print input tables */
2065 if (!dmatables_region) {
2066 sep_debug_print_lli_tables(sep, (struct sep_lli_entry *)
2067 sep_shared_area_bus_to_virt(sep, *lli_table_ptr),
2068 *num_entries_ptr, *table_data_size_ptr);
2071 /* The array of the pages */
2072 kfree(lli_array_ptr);
2075 /* Update DCB counter */
2076 dma_ctx->nr_dcb_creat++;
2080 /* Free all the allocated resources */
2081 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array);
2082 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = NULL;
2083 kfree(lli_array_ptr);
2084 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array);
2085 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2093 * sep_construct_dma_tables_from_lli - prepare AES/DES mappings
2094 * @sep: pointer to struct sep_device
2096 * @sep_in_lli_entries:
2098 * @sep_out_lli_entries
2101 * @lli_table_out_ptr
2102 * @in_num_entries_ptr
2103 * @out_num_entries_ptr
2104 * @table_data_size_ptr
2106 * This function creates the input and output DMA tables for
2107 * symmetric operations (AES/DES) according to the block
2108 * size from LLI arays
2109 * Note that all bus addresses that are passed to the SEP
2110 * are in 32 bit format; the SEP is a 32 bit device
2112 static int sep_construct_dma_tables_from_lli(
2113 struct sep_device *sep,
2114 struct sep_lli_entry *lli_in_array,
2115 u32 sep_in_lli_entries,
2116 struct sep_lli_entry *lli_out_array,
2117 u32 sep_out_lli_entries,
2119 dma_addr_t *lli_table_in_ptr,
2120 dma_addr_t *lli_table_out_ptr,
2121 u32 *in_num_entries_ptr,
2122 u32 *out_num_entries_ptr,
2123 u32 *table_data_size_ptr,
2124 void **dmatables_region,
2125 struct sep_dma_context *dma_ctx)
2127 /* Points to the area where next lli table can be allocated */
2128 void *lli_table_alloc_addr = NULL;
2130 * Points to the area in shared region where next lli table
2133 void *dma_lli_table_alloc_addr = NULL;
2134 /* Input lli table in dmatables_region or shared region */
2135 struct sep_lli_entry *in_lli_table_ptr = NULL;
2136 /* Input lli table location in the shared region */
2137 struct sep_lli_entry *dma_in_lli_table_ptr = NULL;
2138 /* Output lli table in dmatables_region or shared region */
2139 struct sep_lli_entry *out_lli_table_ptr = NULL;
2140 /* Output lli table location in the shared region */
2141 struct sep_lli_entry *dma_out_lli_table_ptr = NULL;
2142 /* Pointer to the info entry of the table - the last entry */
2143 struct sep_lli_entry *info_in_entry_ptr = NULL;
2144 /* Pointer to the info entry of the table - the last entry */
2145 struct sep_lli_entry *info_out_entry_ptr = NULL;
2146 /* Points to the first entry to be processed in the lli_in_array */
2147 u32 current_in_entry = 0;
2148 /* Points to the first entry to be processed in the lli_out_array */
2149 u32 current_out_entry = 0;
2150 /* Max size of the input table */
2151 u32 in_table_data_size = 0;
2152 /* Max size of the output table */
2153 u32 out_table_data_size = 0;
2154 /* Flag te signifies if this is the last tables build */
2155 u32 last_table_flag = 0;
2156 /* The data size that should be in table */
2157 u32 table_data_size = 0;
2158 /* Number of entries in the input table */
2159 u32 num_entries_in_table = 0;
2160 /* Number of entries in the output table */
2161 u32 num_entries_out_table = 0;
2164 dev_warn(&sep->pdev->dev, "DMA context uninitialized\n");
2168 /* Initiate to point after the message area */
2169 lli_table_alloc_addr = (void *)(sep->shared_addr +
2170 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
2171 (dma_ctx->num_lli_tables_created *
2172 (sizeof(struct sep_lli_entry) *
2173 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP)));
2174 dma_lli_table_alloc_addr = lli_table_alloc_addr;
2176 if (dmatables_region) {
2177 /* 2 for both in+out table */
2178 if (sep_allocate_dmatables_region(sep,
2181 2*sep_in_lli_entries))
2183 lli_table_alloc_addr = *dmatables_region;
2186 /* Loop till all the entries in in array are not processed */
2187 while (current_in_entry < sep_in_lli_entries) {
2188 /* Set the new input and output tables */
2190 (struct sep_lli_entry *)lli_table_alloc_addr;
2191 dma_in_lli_table_ptr =
2192 (struct sep_lli_entry *)dma_lli_table_alloc_addr;
2194 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2195 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2196 dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2197 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2199 /* Set the first output tables */
2201 (struct sep_lli_entry *)lli_table_alloc_addr;
2202 dma_out_lli_table_ptr =
2203 (struct sep_lli_entry *)dma_lli_table_alloc_addr;
2205 /* Check if the DMA table area limit was overrun */
2206 if ((dma_lli_table_alloc_addr + sizeof(struct sep_lli_entry) *
2207 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP) >
2208 ((void *)sep->shared_addr +
2209 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
2210 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
2212 dev_warn(&sep->pdev->dev, "dma table limit overrun\n");
2216 /* Update the number of the lli tables created */
2217 dma_ctx->num_lli_tables_created += 2;
2219 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2220 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2221 dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2222 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2224 /* Calculate the maximum size of data for input table */
2225 in_table_data_size =
2226 sep_calculate_lli_table_max_size(sep,
2227 &lli_in_array[current_in_entry],
2228 (sep_in_lli_entries - current_in_entry),
2231 /* Calculate the maximum size of data for output table */
2232 out_table_data_size =
2233 sep_calculate_lli_table_max_size(sep,
2234 &lli_out_array[current_out_entry],
2235 (sep_out_lli_entries - current_out_entry),
2238 if (!last_table_flag) {
2239 in_table_data_size = (in_table_data_size /
2240 block_size) * block_size;
2241 out_table_data_size = (out_table_data_size /
2242 block_size) * block_size;
2245 table_data_size = in_table_data_size;
2246 if (table_data_size > out_table_data_size)
2247 table_data_size = out_table_data_size;
2249 dev_dbg(&sep->pdev->dev,
2250 "[PID%d] construct tables from lli"
2251 " in_table_data_size is (hex) %x\n", current->pid,
2252 in_table_data_size);
2254 dev_dbg(&sep->pdev->dev,
2255 "[PID%d] construct tables from lli"
2256 "out_table_data_size is (hex) %x\n", current->pid,
2257 out_table_data_size);
2259 /* Construct input lli table */
2260 sep_build_lli_table(sep, &lli_in_array[current_in_entry],
2263 &num_entries_in_table,
2266 /* Construct output lli table */
2267 sep_build_lli_table(sep, &lli_out_array[current_out_entry],
2270 &num_entries_out_table,
2273 /* If info entry is null - this is the first table built */
2274 if (info_in_entry_ptr == NULL || info_out_entry_ptr == NULL) {
2275 /* Set the output parameters to physical addresses */
2277 sep_shared_area_virt_to_bus(sep, dma_in_lli_table_ptr);
2279 *in_num_entries_ptr = num_entries_in_table;
2281 *lli_table_out_ptr =
2282 sep_shared_area_virt_to_bus(sep,
2283 dma_out_lli_table_ptr);
2285 *out_num_entries_ptr = num_entries_out_table;
2286 *table_data_size_ptr = table_data_size;
2288 dev_dbg(&sep->pdev->dev,
2289 "[PID%d] output lli_table_in_ptr is %08lx\n",
2291 (unsigned long)*lli_table_in_ptr);
2292 dev_dbg(&sep->pdev->dev,
2293 "[PID%d] output lli_table_out_ptr is %08lx\n",
2295 (unsigned long)*lli_table_out_ptr);
2297 /* Update the info entry of the previous in table */
2298 info_in_entry_ptr->bus_address =
2299 sep_shared_area_virt_to_bus(sep,
2300 dma_in_lli_table_ptr);
2302 info_in_entry_ptr->block_size =
2303 ((num_entries_in_table) << 24) |
2306 /* Update the info entry of the previous in table */
2307 info_out_entry_ptr->bus_address =
2308 sep_shared_area_virt_to_bus(sep,
2309 dma_out_lli_table_ptr);
2311 info_out_entry_ptr->block_size =
2312 ((num_entries_out_table) << 24) |
2315 dev_dbg(&sep->pdev->dev,
2316 "[PID%d] output lli_table_in_ptr:%08lx %08x\n",
2318 (unsigned long)info_in_entry_ptr->bus_address,
2319 info_in_entry_ptr->block_size);
2321 dev_dbg(&sep->pdev->dev,
2322 "[PID%d] output lli_table_out_ptr:"
2325 (unsigned long)info_out_entry_ptr->bus_address,
2326 info_out_entry_ptr->block_size);
2329 /* Save the pointer to the info entry of the current tables */
2330 info_in_entry_ptr = in_lli_table_ptr +
2331 num_entries_in_table - 1;
2332 info_out_entry_ptr = out_lli_table_ptr +
2333 num_entries_out_table - 1;
2335 dev_dbg(&sep->pdev->dev,
2336 "[PID%d] output num_entries_out_table is %x\n",
2338 (u32)num_entries_out_table);
2339 dev_dbg(&sep->pdev->dev,
2340 "[PID%d] output info_in_entry_ptr is %lx\n",
2342 (unsigned long)info_in_entry_ptr);
2343 dev_dbg(&sep->pdev->dev,
2344 "[PID%d] output info_out_entry_ptr is %lx\n",
2346 (unsigned long)info_out_entry_ptr);
2349 /* Print input tables */
2350 if (!dmatables_region) {
2351 sep_debug_print_lli_tables(
2353 (struct sep_lli_entry *)
2354 sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr),
2355 *in_num_entries_ptr,
2356 *table_data_size_ptr);
2359 /* Print output tables */
2360 if (!dmatables_region) {
2361 sep_debug_print_lli_tables(
2363 (struct sep_lli_entry *)
2364 sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr),
2365 *out_num_entries_ptr,
2366 *table_data_size_ptr);
2373 * sep_prepare_input_output_dma_table - prepare DMA I/O table
2374 * @app_virt_in_addr:
2375 * @app_virt_out_addr:
2378 * @lli_table_in_ptr:
2379 * @lli_table_out_ptr:
2380 * @in_num_entries_ptr:
2381 * @out_num_entries_ptr:
2382 * @table_data_size_ptr:
2383 * @is_kva: set for kernel data; used only for kernel crypto module
2385 * This function builds input and output DMA tables for synchronic
2386 * symmetric operations (AES, DES, HASH). It also checks that each table
2387 * is of the modular block size
2388 * Note that all bus addresses that are passed to the SEP
2389 * are in 32 bit format; the SEP is a 32 bit device
2391 static int sep_prepare_input_output_dma_table(struct sep_device *sep,
2392 unsigned long app_virt_in_addr,
2393 unsigned long app_virt_out_addr,
2396 dma_addr_t *lli_table_in_ptr,
2397 dma_addr_t *lli_table_out_ptr,
2398 u32 *in_num_entries_ptr,
2399 u32 *out_num_entries_ptr,
2400 u32 *table_data_size_ptr,
2402 void **dmatables_region,
2403 struct sep_dma_context *dma_ctx)
2407 /* Array of pointers of page */
2408 struct sep_lli_entry *lli_in_array;
2409 /* Array of pointers of page */
2410 struct sep_lli_entry *lli_out_array;
2417 if (data_size == 0) {
2418 /* Prepare empty table for input and output */
2419 if (dmatables_region) {
2420 error = sep_allocate_dmatables_region(
2428 sep_prepare_empty_lli_table(sep, lli_table_in_ptr,
2429 in_num_entries_ptr, table_data_size_ptr,
2430 dmatables_region, dma_ctx);
2432 sep_prepare_empty_lli_table(sep, lli_table_out_ptr,
2433 out_num_entries_ptr, table_data_size_ptr,
2434 dmatables_region, dma_ctx);
2436 goto update_dcb_counter;
2439 /* Initialize the pages pointers */
2440 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2441 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
2443 /* Lock the pages of the buffer and translate them to pages */
2444 if (is_kva == true) {
2445 dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel input pages\n",
2447 error = sep_lock_kernel_pages(sep, app_virt_in_addr,
2448 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG,
2451 dev_warn(&sep->pdev->dev,
2452 "[PID%d] sep_lock_kernel_pages for input "
2453 "virtual buffer failed\n", current->pid);
2458 dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel output pages\n",
2460 error = sep_lock_kernel_pages(sep, app_virt_out_addr,
2461 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG,
2465 dev_warn(&sep->pdev->dev,
2466 "[PID%d] sep_lock_kernel_pages for output "
2467 "virtual buffer failed\n", current->pid);
2469 goto end_function_free_lli_in;
2475 dev_dbg(&sep->pdev->dev, "[PID%d] Locking user input pages\n",
2477 error = sep_lock_user_pages(sep, app_virt_in_addr,
2478 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG,
2481 dev_warn(&sep->pdev->dev,
2482 "[PID%d] sep_lock_user_pages for input "
2483 "virtual buffer failed\n", current->pid);
2488 if (dma_ctx->secure_dma == true) {
2489 /* secure_dma requires use of non accessible memory */
2490 dev_dbg(&sep->pdev->dev, "[PID%d] in secure_dma\n",
2492 error = sep_lli_table_secure_dma(sep,
2493 app_virt_out_addr, data_size, &lli_out_array,
2494 SEP_DRIVER_OUT_FLAG, dma_ctx);
2496 dev_warn(&sep->pdev->dev,
2497 "[PID%d] secure dma table setup "
2498 " for output virtual buffer failed\n",
2501 goto end_function_free_lli_in;
2504 /* For normal, non-secure dma */
2505 dev_dbg(&sep->pdev->dev, "[PID%d] not in secure_dma\n",
2508 dev_dbg(&sep->pdev->dev,
2509 "[PID%d] Locking user output pages\n",
2512 error = sep_lock_user_pages(sep, app_virt_out_addr,
2513 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG,
2517 dev_warn(&sep->pdev->dev,
2518 "[PID%d] sep_lock_user_pages"
2519 " for output virtual buffer failed\n",
2522 goto end_function_free_lli_in;
2527 dev_dbg(&sep->pdev->dev,
2528 "[PID%d] After lock; prep input output dma table sep_in_num_pages is (hex) %x\n",
2530 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages);
2532 dev_dbg(&sep->pdev->dev, "[PID%d] sep_out_num_pages is (hex) %x\n",
2534 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages);
2536 dev_dbg(&sep->pdev->dev,
2537 "[PID%d] SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is (hex) %x\n",
2538 current->pid, SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
2540 /* Call the function that creates table from the lli arrays */
2541 dev_dbg(&sep->pdev->dev, "[PID%d] calling create table from lli\n",
2543 error = sep_construct_dma_tables_from_lli(
2545 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
2548 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
2550 block_size, lli_table_in_ptr, lli_table_out_ptr,
2551 in_num_entries_ptr, out_num_entries_ptr,
2552 table_data_size_ptr, dmatables_region, dma_ctx);
2555 dev_warn(&sep->pdev->dev,
2556 "[PID%d] sep_construct_dma_tables_from_lli failed\n",
2558 goto end_function_with_error;
2561 kfree(lli_out_array);
2562 kfree(lli_in_array);
2565 /* Update DCB counter */
2566 dma_ctx->nr_dcb_creat++;
2570 end_function_with_error:
2571 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array);
2572 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = NULL;
2573 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array);
2574 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
2575 kfree(lli_out_array);
2578 end_function_free_lli_in:
2579 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array);
2580 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = NULL;
2581 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array);
2582 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2583 kfree(lli_in_array);
2592 * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
2593 * @app_in_address: unsigned long; for data buffer in (user space)
2594 * @app_out_address: unsigned long; for data buffer out (user space)
2595 * @data_in_size: u32; for size of data
2596 * @block_size: u32; for block size
2597 * @tail_block_size: u32; for size of tail block
2598 * @isapplet: bool; to indicate external app
2599 * @is_kva: bool; kernel buffer; only used for kernel crypto module
2600 * @secure_dma; indicates whether this is secure_dma using IMR
2602 * This function prepares the linked DMA tables and puts the
2603 * address for the linked list of tables inta a DCB (data control
2604 * block) the address of which is known by the SEP hardware
2605 * Note that all bus addresses that are passed to the SEP
2606 * are in 32 bit format; the SEP is a 32 bit device
2608 int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
2609 unsigned long app_in_address,
2610 unsigned long app_out_address,
2613 u32 tail_block_size,
2617 struct sep_dcblock *dcb_region,
2618 void **dmatables_region,
2619 struct sep_dma_context **dma_ctx,
2620 struct scatterlist *src_sg,
2621 struct scatterlist *dst_sg)
2626 /* Address of the created DCB table */
2627 struct sep_dcblock *dcb_table_ptr = NULL;
2628 /* The physical address of the first input DMA table */
2629 dma_addr_t in_first_mlli_address = 0;
2630 /* Number of entries in the first input DMA table */
2631 u32 in_first_num_entries = 0;
2632 /* The physical address of the first output DMA table */
2633 dma_addr_t out_first_mlli_address = 0;
2634 /* Number of entries in the first output DMA table */
2635 u32 out_first_num_entries = 0;
2636 /* Data in the first input/output table */
2637 u32 first_data_size = 0;
2639 dev_dbg(&sep->pdev->dev, "[PID%d] app_in_address %lx\n",
2640 current->pid, app_in_address);
2642 dev_dbg(&sep->pdev->dev, "[PID%d] app_out_address %lx\n",
2643 current->pid, app_out_address);
2645 dev_dbg(&sep->pdev->dev, "[PID%d] data_in_size %x\n",
2646 current->pid, data_in_size);
2648 dev_dbg(&sep->pdev->dev, "[PID%d] block_size %x\n",
2649 current->pid, block_size);
2651 dev_dbg(&sep->pdev->dev, "[PID%d] tail_block_size %x\n",
2652 current->pid, tail_block_size);
2654 dev_dbg(&sep->pdev->dev, "[PID%d] isapplet %x\n",
2655 current->pid, isapplet);
2657 dev_dbg(&sep->pdev->dev, "[PID%d] is_kva %x\n",
2658 current->pid, is_kva);
2660 dev_dbg(&sep->pdev->dev, "[PID%d] src_sg %p\n",
2661 current->pid, src_sg);
2663 dev_dbg(&sep->pdev->dev, "[PID%d] dst_sg %p\n",
2664 current->pid, dst_sg);
2667 dev_warn(&sep->pdev->dev, "[PID%d] no DMA context pointer\n",
2674 /* In case there are multiple DCBs for this transaction */
2675 dev_dbg(&sep->pdev->dev, "[PID%d] DMA context already set\n",
2678 *dma_ctx = kzalloc(sizeof(**dma_ctx), GFP_KERNEL);
2680 dev_dbg(&sep->pdev->dev,
2681 "[PID%d] Not enough memory for DMA context\n",
2686 dev_dbg(&sep->pdev->dev,
2687 "[PID%d] Created DMA context addr at 0x%p\n",
2688 current->pid, *dma_ctx);
2691 (*dma_ctx)->secure_dma = secure_dma;
2693 /* these are for kernel crypto only */
2694 (*dma_ctx)->src_sg = src_sg;
2695 (*dma_ctx)->dst_sg = dst_sg;
2697 if ((*dma_ctx)->nr_dcb_creat == SEP_MAX_NUM_SYNC_DMA_OPS) {
2698 /* No more DCBs to allocate */
2699 dev_dbg(&sep->pdev->dev, "[PID%d] no more DCBs available\n",
2702 goto end_function_error;
2705 /* Allocate new DCB */
2707 dcb_table_ptr = dcb_region;
2709 dcb_table_ptr = (struct sep_dcblock *)(sep->shared_addr +
2710 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES +
2711 ((*dma_ctx)->nr_dcb_creat *
2712 sizeof(struct sep_dcblock)));
2715 /* Set the default values in the DCB */
2716 dcb_table_ptr->input_mlli_address = 0;
2717 dcb_table_ptr->input_mlli_num_entries = 0;
2718 dcb_table_ptr->input_mlli_data_size = 0;
2719 dcb_table_ptr->output_mlli_address = 0;
2720 dcb_table_ptr->output_mlli_num_entries = 0;
2721 dcb_table_ptr->output_mlli_data_size = 0;
2722 dcb_table_ptr->tail_data_size = 0;
2723 dcb_table_ptr->out_vr_tail_pt = 0;
2725 if (isapplet == true) {
2727 /* Check if there is enough data for DMA operation */
2728 if (data_in_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) {
2729 if (is_kva == true) {
2731 goto end_function_error;
2733 if (copy_from_user(dcb_table_ptr->tail_data,
2734 (void __user *)app_in_address,
2737 goto end_function_error;
2741 dcb_table_ptr->tail_data_size = data_in_size;
2743 /* Set the output user-space address for mem2mem op */
2744 if (app_out_address)
2745 dcb_table_ptr->out_vr_tail_pt =
2746 (aligned_u64)app_out_address;
2749 * Update both data length parameters in order to avoid
2750 * second data copy and allow building of empty mlli
2757 if (!app_out_address) {
2758 tail_size = data_in_size % block_size;
2760 if (tail_block_size == block_size)
2761 tail_size = block_size;
2768 if (tail_size > sizeof(dcb_table_ptr->tail_data))
2770 if (is_kva == true) {
2772 goto end_function_error;
2774 /* We have tail data - copy it to DCB */
2775 if (copy_from_user(dcb_table_ptr->tail_data,
2776 (void __user *)(app_in_address +
2777 data_in_size - tail_size), tail_size)) {
2779 goto end_function_error;
2782 if (app_out_address)
2784 * Calculate the output address
2785 * according to tail data size
2787 dcb_table_ptr->out_vr_tail_pt =
2788 (aligned_u64)app_out_address +
2789 data_in_size - tail_size;
2791 /* Save the real tail data size */
2792 dcb_table_ptr->tail_data_size = tail_size;
2794 * Update the data size without the tail
2795 * data size AKA data for the dma
2797 data_in_size = (data_in_size - tail_size);
2800 /* Check if we need to build only input table or input/output */
2801 if (app_out_address) {
2802 /* Prepare input/output tables */
2803 error = sep_prepare_input_output_dma_table(sep,
2808 &in_first_mlli_address,
2809 &out_first_mlli_address,
2810 &in_first_num_entries,
2811 &out_first_num_entries,
2817 /* Prepare input tables */
2818 error = sep_prepare_input_dma_table(sep,
2822 &in_first_mlli_address,
2823 &in_first_num_entries,
2831 dev_warn(&sep->pdev->dev,
2832 "prepare DMA table call failed "
2833 "from prepare DCB call\n");
2834 goto end_function_error;
2837 /* Set the DCB values */
2838 dcb_table_ptr->input_mlli_address = in_first_mlli_address;
2839 dcb_table_ptr->input_mlli_num_entries = in_first_num_entries;
2840 dcb_table_ptr->input_mlli_data_size = first_data_size;
2841 dcb_table_ptr->output_mlli_address = out_first_mlli_address;
2842 dcb_table_ptr->output_mlli_num_entries = out_first_num_entries;
2843 dcb_table_ptr->output_mlli_data_size = first_data_size;
2858 * sep_free_dma_tables_and_dcb - free DMA tables and DCBs
2859 * @sep: pointer to struct sep_device
2860 * @isapplet: indicates external application (used for kernel access)
2861 * @is_kva: indicates kernel addresses (only used for kernel crypto)
2863 * This function frees the DMA tables and DCB
2865 static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet,
2866 bool is_kva, struct sep_dma_context **dma_ctx)
2868 struct sep_dcblock *dcb_table_ptr;
2869 unsigned long pt_hold;
2876 dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb\n",
2878 if (!dma_ctx || !*dma_ctx) /* nothing to be done here*/
2881 if (((*dma_ctx)->secure_dma == false) && (isapplet == true)) {
2882 dev_dbg(&sep->pdev->dev, "[PID%d] handling applet\n",
2885 /* Tail stuff is only for non secure_dma */
2886 /* Set pointer to first DCB table */
2887 dcb_table_ptr = (struct sep_dcblock *)
2889 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES);
2892 * Go over each DCB and see if
2893 * tail pointer must be updated
2895 for (i = 0; i < (*dma_ctx)->nr_dcb_creat; i++, dcb_table_ptr++) {
2896 if (dcb_table_ptr->out_vr_tail_pt) {
2897 pt_hold = (unsigned long)dcb_table_ptr->
2899 tail_pt = (void *)pt_hold;
2900 if (is_kva == true) {
2904 error_temp = copy_to_user(
2905 (void __user *)tail_pt,
2906 dcb_table_ptr->tail_data,
2907 dcb_table_ptr->tail_data_size);
2910 /* Release the DMA resource */
2918 /* Free the output pages, if any */
2919 sep_free_dma_table_data_handler(sep, dma_ctx);
2921 dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb end\n",
2928 * sep_prepare_dcb_handler - prepare a control block
2929 * @sep: pointer to struct sep_device
2930 * @arg: pointer to user parameters
2931 * @secure_dma: indicate whether we are using secure_dma on IMR
2933 * This function will retrieve the RAR buffer physical addresses, type
2934 * & size corresponding to the RAR handles provided in the buffers vector.
2936 static int sep_prepare_dcb_handler(struct sep_device *sep, unsigned long arg,
2938 struct sep_dma_context **dma_ctx)
2941 /* Command arguments */
2942 static struct build_dcb_struct command_args;
2944 /* Get the command arguments */
2945 if (copy_from_user(&command_args, (void __user *)arg,
2946 sizeof(struct build_dcb_struct))) {
2951 dev_dbg(&sep->pdev->dev,
2952 "[PID%d] prep dcb handler app_in_address is %08llx\n",
2953 current->pid, command_args.app_in_address);
2954 dev_dbg(&sep->pdev->dev,
2955 "[PID%d] app_out_address is %08llx\n",
2956 current->pid, command_args.app_out_address);
2957 dev_dbg(&sep->pdev->dev,
2958 "[PID%d] data_size is %x\n",
2959 current->pid, command_args.data_in_size);
2960 dev_dbg(&sep->pdev->dev,
2961 "[PID%d] block_size is %x\n",
2962 current->pid, command_args.block_size);
2963 dev_dbg(&sep->pdev->dev,
2964 "[PID%d] tail block_size is %x\n",
2965 current->pid, command_args.tail_block_size);
2966 dev_dbg(&sep->pdev->dev,
2967 "[PID%d] is_applet is %x\n",
2968 current->pid, command_args.is_applet);
2970 if (!command_args.app_in_address) {
2971 dev_warn(&sep->pdev->dev,
2972 "[PID%d] null app_in_address\n", current->pid);
2977 error = sep_prepare_input_output_dma_table_in_dcb(sep,
2978 (unsigned long)command_args.app_in_address,
2979 (unsigned long)command_args.app_out_address,
2980 command_args.data_in_size, command_args.block_size,
2981 command_args.tail_block_size,
2982 command_args.is_applet, false,
2983 secure_dma, NULL, NULL, dma_ctx, NULL, NULL);
2991 * sep_free_dcb_handler - free control block resources
2992 * @sep: pointer to struct sep_device
2994 * This function frees the DCB resources and updates the needed
2995 * user-space buffers.
2997 static int sep_free_dcb_handler(struct sep_device *sep,
2998 struct sep_dma_context **dma_ctx)
3000 if (!dma_ctx || !(*dma_ctx)) {
3001 dev_dbg(&sep->pdev->dev,
3002 "[PID%d] no dma context defined, nothing to free\n",
3007 dev_dbg(&sep->pdev->dev, "[PID%d] free dcbs num of DCBs %x\n",
3009 (*dma_ctx)->nr_dcb_creat);
3011 return sep_free_dma_tables_and_dcb(sep, false, false, dma_ctx);
3015 * sep_ioctl - ioctl handler for sep device
3016 * @filp: pointer to struct file
3018 * @arg: pointer to argument structure
3020 * Implement the ioctl methods available on the SEP device.
3022 static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3024 struct sep_private_data * const private_data = filp->private_data;
3025 struct sep_call_status *call_status = &private_data->call_status;
3026 struct sep_device *sep = private_data->device;
3027 struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
3028 struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
3031 dev_dbg(&sep->pdev->dev, "[PID%d] ioctl cmd 0x%x\n",
3033 dev_dbg(&sep->pdev->dev, "[PID%d] dma context addr 0x%p\n",
3034 current->pid, *dma_ctx);
3036 /* Make sure we own this device */
3037 error = sep_check_transaction_owner(sep);
3039 dev_dbg(&sep->pdev->dev, "[PID%d] ioctl pid is not owner\n",
3044 /* Check that sep_mmap has been called before */
3045 if (0 == test_bit(SEP_LEGACY_MMAP_DONE_OFFSET,
3046 &call_status->status)) {
3047 dev_dbg(&sep->pdev->dev,
3048 "[PID%d] mmap not called\n", current->pid);
3053 /* Check that the command is for SEP device */
3054 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
3060 case SEP_IOCSENDSEPCOMMAND:
3061 dev_dbg(&sep->pdev->dev,
3062 "[PID%d] SEP_IOCSENDSEPCOMMAND start\n",
3064 if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3065 &call_status->status)) {
3066 dev_warn(&sep->pdev->dev,
3067 "[PID%d] send msg already done\n",
3072 /* Send command to SEP */
3073 error = sep_send_command_handler(sep);
3075 set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3076 &call_status->status);
3077 dev_dbg(&sep->pdev->dev,
3078 "[PID%d] SEP_IOCSENDSEPCOMMAND end\n",
3081 case SEP_IOCENDTRANSACTION:
3082 dev_dbg(&sep->pdev->dev,
3083 "[PID%d] SEP_IOCENDTRANSACTION start\n",
3085 error = sep_end_transaction_handler(sep, dma_ctx, call_status,
3087 dev_dbg(&sep->pdev->dev,
3088 "[PID%d] SEP_IOCENDTRANSACTION end\n",
3091 case SEP_IOCPREPAREDCB:
3092 dev_dbg(&sep->pdev->dev,
3093 "[PID%d] SEP_IOCPREPAREDCB start\n",
3095 case SEP_IOCPREPAREDCB_SECURE_DMA:
3096 dev_dbg(&sep->pdev->dev,
3097 "[PID%d] SEP_IOCPREPAREDCB_SECURE_DMA start\n",
3099 if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3100 &call_status->status)) {
3101 dev_dbg(&sep->pdev->dev,
3102 "[PID%d] dcb prep needed before send msg\n",
3109 dev_dbg(&sep->pdev->dev,
3110 "[PID%d] dcb null arg\n", current->pid);
3115 if (cmd == SEP_IOCPREPAREDCB) {
3117 dev_dbg(&sep->pdev->dev,
3118 "[PID%d] SEP_IOCPREPAREDCB (no secure_dma)\n",
3121 error = sep_prepare_dcb_handler(sep, arg, false,
3125 dev_dbg(&sep->pdev->dev,
3126 "[PID%d] SEP_IOC_POC (with secure_dma)\n",
3129 error = sep_prepare_dcb_handler(sep, arg, true,
3132 dev_dbg(&sep->pdev->dev, "[PID%d] dcb's end\n",
3135 case SEP_IOCFREEDCB:
3136 dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB start\n",
3138 case SEP_IOCFREEDCB_SECURE_DMA:
3139 dev_dbg(&sep->pdev->dev,
3140 "[PID%d] SEP_IOCFREEDCB_SECURE_DMA start\n",
3142 error = sep_free_dcb_handler(sep, dma_ctx);
3143 dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB end\n",
3148 dev_dbg(&sep->pdev->dev, "[PID%d] default end\n",
3154 dev_dbg(&sep->pdev->dev, "[PID%d] ioctl end\n", current->pid);
3160 * sep_inthandler - interrupt handler for sep device
3162 * @dev_id: device id
3164 static irqreturn_t sep_inthandler(int irq, void *dev_id)
3166 unsigned long lock_irq_flag;
3167 u32 reg_val, reg_val2 = 0;
3168 struct sep_device *sep = dev_id;
3169 irqreturn_t int_error = IRQ_HANDLED;
3171 /* Are we in power save? */
3172 #if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
3173 if (sep->pdev->dev.power.runtime_status != RPM_ACTIVE) {
3174 dev_dbg(&sep->pdev->dev, "interrupt during pwr save\n");
3179 if (test_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags) == 0) {
3180 dev_dbg(&sep->pdev->dev, "interrupt while nobody using sep\n");
3184 /* Read the IRR register to check if this is SEP interrupt */
3185 reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
3187 dev_dbg(&sep->pdev->dev, "sep int: IRR REG val: %x\n", reg_val);
3189 if (reg_val & (0x1 << 13)) {
3191 /* Lock and update the counter of reply messages */
3192 spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
3194 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
3196 dev_dbg(&sep->pdev->dev, "sep int: send_ct %lx reply_ct %lx\n",
3197 sep->send_ct, sep->reply_ct);
3199 /* Is this a kernel client request */
3200 if (sep->in_kernel) {
3201 tasklet_schedule(&sep->finish_tasklet);
3202 goto finished_interrupt;
3205 /* Is this printf or daemon request? */
3206 reg_val2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
3207 dev_dbg(&sep->pdev->dev,
3208 "SEP Interrupt - GPR2 is %08x\n", reg_val2);
3210 clear_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags);
3212 if ((reg_val2 >> 30) & 0x1) {
3213 dev_dbg(&sep->pdev->dev, "int: printf request\n");
3214 } else if (reg_val2 >> 31) {
3215 dev_dbg(&sep->pdev->dev, "int: daemon request\n");
3217 dev_dbg(&sep->pdev->dev, "int: SEP reply\n");
3218 wake_up(&sep->event_interrupt);
3221 dev_dbg(&sep->pdev->dev, "int: not SEP interrupt\n");
3222 int_error = IRQ_NONE;
3227 if (int_error == IRQ_HANDLED)
3228 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
3234 * sep_reconfig_shared_area - reconfigure shared area
3235 * @sep: pointer to struct sep_device
3237 * Reconfig the shared area between HOST and SEP - needed in case
3238 * the DX_CC_Init function was called before OS loading.
3240 static int sep_reconfig_shared_area(struct sep_device *sep)
3244 /* use to limit waiting for SEP */
3245 unsigned long end_time;
3247 /* Send the new SHARED MESSAGE AREA to the SEP */
3248 dev_dbg(&sep->pdev->dev, "reconfig shared; sending %08llx to sep\n",
3249 (unsigned long long)sep->shared_bus);
3251 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
3253 /* Poll for SEP response */
3254 ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
3256 end_time = jiffies + (WAIT_TIME * HZ);
3258 while ((time_before(jiffies, end_time)) && (ret_val != 0xffffffff) &&
3259 (ret_val != sep->shared_bus))
3260 ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
3262 /* Check the return value (register) */
3263 if (ret_val != sep->shared_bus) {
3264 dev_warn(&sep->pdev->dev, "could not reconfig shared area\n");
3265 dev_warn(&sep->pdev->dev, "result was %x\n", ret_val);
3270 dev_dbg(&sep->pdev->dev, "reconfig shared area end\n");
3276 * sep_activate_dcb_dmatables_context - Takes DCB & DMA tables
3279 * @dcb_region: DCB region copy
3280 * @dmatables_region: MLLI/DMA tables copy
3281 * @dma_ctx: DMA context for current transaction
3283 ssize_t sep_activate_dcb_dmatables_context(struct sep_device *sep,
3284 struct sep_dcblock **dcb_region,
3285 void **dmatables_region,
3286 struct sep_dma_context *dma_ctx)
3288 void *dmaregion_free_start = NULL;
3289 void *dmaregion_free_end = NULL;
3290 void *dcbregion_free_start = NULL;
3291 void *dcbregion_free_end = NULL;
3294 dev_dbg(&sep->pdev->dev, "[PID%d] activating dcb/dma region\n",
3297 if (1 > dma_ctx->nr_dcb_creat) {
3298 dev_warn(&sep->pdev->dev,
3299 "[PID%d] invalid number of dcbs to activate 0x%08X\n",
3300 current->pid, dma_ctx->nr_dcb_creat);
3305 dmaregion_free_start = sep->shared_addr
3306 + SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES;
3307 dmaregion_free_end = dmaregion_free_start
3308 + SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES - 1;
3310 if (dmaregion_free_start
3311 + dma_ctx->dmatables_len > dmaregion_free_end) {
3315 memcpy(dmaregion_free_start,
3317 dma_ctx->dmatables_len);
3318 /* Free MLLI table copy */
3319 kfree(*dmatables_region);
3320 *dmatables_region = NULL;
3322 /* Copy thread's DCB table copy to DCB table region */
3323 dcbregion_free_start = sep->shared_addr +
3324 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES;
3325 dcbregion_free_end = dcbregion_free_start +
3326 (SEP_MAX_NUM_SYNC_DMA_OPS *
3327 sizeof(struct sep_dcblock)) - 1;
3329 if (dcbregion_free_start
3330 + (dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock))
3331 > dcbregion_free_end) {
3336 memcpy(dcbregion_free_start,
3338 dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock));
3340 /* Print the tables */
3341 dev_dbg(&sep->pdev->dev, "activate: input table\n");
3342 sep_debug_print_lli_tables(sep,
3343 (struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep,
3344 (*dcb_region)->input_mlli_address),
3345 (*dcb_region)->input_mlli_num_entries,
3346 (*dcb_region)->input_mlli_data_size);
3348 dev_dbg(&sep->pdev->dev, "activate: output table\n");
3349 sep_debug_print_lli_tables(sep,
3350 (struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep,
3351 (*dcb_region)->output_mlli_address),
3352 (*dcb_region)->output_mlli_num_entries,
3353 (*dcb_region)->output_mlli_data_size);
3355 dev_dbg(&sep->pdev->dev,
3356 "[PID%d] printing activated tables\n", current->pid);
3359 kfree(*dmatables_region);
3360 *dmatables_region = NULL;
3369 * sep_create_dcb_dmatables_context - Creates DCB & MLLI/DMA table context
3371 * @dcb_region: DCB region buf to create for current transaction
3372 * @dmatables_region: MLLI/DMA tables buf to create for current transaction
3373 * @dma_ctx: DMA context buf to create for current transaction
3374 * @user_dcb_args: User arguments for DCB/MLLI creation
3375 * @num_dcbs: Number of DCBs to create
3376 * @secure_dma: Indicate use of IMR restricted memory secure dma
3378 static ssize_t sep_create_dcb_dmatables_context(struct sep_device *sep,
3379 struct sep_dcblock **dcb_region,
3380 void **dmatables_region,
3381 struct sep_dma_context **dma_ctx,
3382 const struct build_dcb_struct __user *user_dcb_args,
3383 const u32 num_dcbs, bool secure_dma)
3387 struct build_dcb_struct *dcb_args = NULL;
3389 dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n",
3392 if (!dcb_region || !dma_ctx || !dmatables_region || !user_dcb_args) {
3397 if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) {
3398 dev_warn(&sep->pdev->dev,
3399 "[PID%d] invalid number of dcbs 0x%08X\n",
3400 current->pid, num_dcbs);
3405 dcb_args = kcalloc(num_dcbs, sizeof(struct build_dcb_struct),
3412 if (copy_from_user(dcb_args,
3414 num_dcbs * sizeof(struct build_dcb_struct))) {
3419 /* Allocate thread-specific memory for DCB */
3420 *dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock),
3422 if (!(*dcb_region)) {
3427 /* Prepare DCB and MLLI table into the allocated regions */
3428 for (i = 0; i < num_dcbs; i++) {
3429 error = sep_prepare_input_output_dma_table_in_dcb(sep,
3430 (unsigned long)dcb_args[i].app_in_address,
3431 (unsigned long)dcb_args[i].app_out_address,
3432 dcb_args[i].data_in_size,
3433 dcb_args[i].block_size,
3434 dcb_args[i].tail_block_size,
3435 dcb_args[i].is_applet,
3437 *dcb_region, dmatables_region,
3442 dev_warn(&sep->pdev->dev,
3443 "[PID%d] dma table creation failed\n",
3448 if (dcb_args[i].app_in_address != 0)
3449 (*dma_ctx)->input_data_len += dcb_args[i].data_in_size;
3459 * sep_create_dcb_dmatables_context_kernel - Creates DCB & MLLI/DMA table context
3462 * @dcb_region: DCB region buf to create for current transaction
3463 * @dmatables_region: MLLI/DMA tables buf to create for current transaction
3464 * @dma_ctx: DMA context buf to create for current transaction
3465 * @user_dcb_args: User arguments for DCB/MLLI creation
3466 * @num_dcbs: Number of DCBs to create
3467 * This does that same thing as sep_create_dcb_dmatables_context
3468 * except that it is used only for the kernel crypto operation. It is
3469 * separate because there is no user data involved; the dcb data structure
3470 * is specific for kernel crypto (build_dcb_struct_kernel)
3472 int sep_create_dcb_dmatables_context_kernel(struct sep_device *sep,
3473 struct sep_dcblock **dcb_region,
3474 void **dmatables_region,
3475 struct sep_dma_context **dma_ctx,
3476 const struct build_dcb_struct_kernel *dcb_data,
3482 dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n",
3485 if (!dcb_region || !dma_ctx || !dmatables_region || !dcb_data) {
3490 if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) {
3491 dev_warn(&sep->pdev->dev,
3492 "[PID%d] invalid number of dcbs 0x%08X\n",
3493 current->pid, num_dcbs);
3498 dev_dbg(&sep->pdev->dev, "[PID%d] num_dcbs is %d\n",
3499 current->pid, num_dcbs);
3501 /* Allocate thread-specific memory for DCB */
3502 *dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock),
3504 if (!(*dcb_region)) {
3509 /* Prepare DCB and MLLI table into the allocated regions */
3510 for (i = 0; i < num_dcbs; i++) {
3511 error = sep_prepare_input_output_dma_table_in_dcb(sep,
3512 (unsigned long)dcb_data->app_in_address,
3513 (unsigned long)dcb_data->app_out_address,
3514 dcb_data->data_in_size,
3515 dcb_data->block_size,
3516 dcb_data->tail_block_size,
3517 dcb_data->is_applet,
3520 *dcb_region, dmatables_region,
3525 dev_warn(&sep->pdev->dev,
3526 "[PID%d] dma table creation failed\n",
3538 * sep_activate_msgarea_context - Takes the message area context into use
3540 * @msg_region: Message area context buf
3541 * @msg_len: Message area context buffer size
3543 static ssize_t sep_activate_msgarea_context(struct sep_device *sep,
3545 const size_t msg_len)
3547 dev_dbg(&sep->pdev->dev, "[PID%d] activating msg region\n",
3550 if (!msg_region || !(*msg_region) ||
3551 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES < msg_len) {
3552 dev_warn(&sep->pdev->dev,
3553 "[PID%d] invalid act msgarea len 0x%08zX\n",
3554 current->pid, msg_len);
3558 memcpy(sep->shared_addr, *msg_region, msg_len);
3564 * sep_create_msgarea_context - Creates message area context
3566 * @msg_region: Msg area region buf to create for current transaction
3567 * @msg_user: Content for msg area region from user
3568 * @msg_len: Message area size
3570 static ssize_t sep_create_msgarea_context(struct sep_device *sep,
3572 const void __user *msg_user,
3573 const size_t msg_len)
3577 dev_dbg(&sep->pdev->dev, "[PID%d] creating msg region\n",
3582 SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < msg_len ||
3583 SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES > msg_len) {
3584 dev_warn(&sep->pdev->dev,
3585 "[PID%d] invalid creat msgarea len 0x%08zX\n",
3586 current->pid, msg_len);
3591 /* Allocate thread-specific memory for message buffer */
3592 *msg_region = kzalloc(msg_len, GFP_KERNEL);
3593 if (!(*msg_region)) {
3598 /* Copy input data to write() to allocated message buffer */
3599 if (copy_from_user(*msg_region, msg_user, msg_len)) {
3605 if (error && msg_region) {
3615 * sep_read - Returns results of an operation for fastcall interface
3616 * @filp: File pointer
3617 * @buf_user: User buffer for storing results
3618 * @count_user: User buffer size
3619 * @offset: File offset, not supported
3621 * The implementation does not support reading in chunks, all data must be
3622 * consumed during a single read system call.
3624 static ssize_t sep_read(struct file *filp,
3625 char __user *buf_user, size_t count_user,
3628 struct sep_private_data * const private_data = filp->private_data;
3629 struct sep_call_status *call_status = &private_data->call_status;
3630 struct sep_device *sep = private_data->device;
3631 struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
3632 struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
3633 ssize_t error = 0, error_tmp = 0;
3635 /* Am I the process that owns the transaction? */
3636 error = sep_check_transaction_owner(sep);
3638 dev_dbg(&sep->pdev->dev, "[PID%d] read pid is not owner\n",
3643 /* Checks that user has called necessary apis */
3644 if (0 == test_bit(SEP_FASTCALL_WRITE_DONE_OFFSET,
3645 &call_status->status)) {
3646 dev_warn(&sep->pdev->dev,
3647 "[PID%d] fastcall write not called\n",
3650 goto end_function_error;
3654 dev_warn(&sep->pdev->dev,
3655 "[PID%d] null user buffer\n",
3658 goto end_function_error;
3662 /* Wait for SEP to finish */
3663 wait_event(sep->event_interrupt,
3664 test_bit(SEP_WORKING_LOCK_BIT,
3665 &sep->in_use_flags) == 0);
3667 sep_dump_message(sep);
3669 dev_dbg(&sep->pdev->dev, "[PID%d] count_user = 0x%08zX\n",
3670 current->pid, count_user);
3672 /* In case user has allocated bigger buffer */
3673 if (count_user > SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES)
3674 count_user = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES;
3676 if (copy_to_user(buf_user, sep->shared_addr, count_user)) {
3678 goto end_function_error;
3681 dev_dbg(&sep->pdev->dev, "[PID%d] read succeeded\n", current->pid);
3685 /* Copy possible tail data to user and free DCB and MLLIs */
3686 error_tmp = sep_free_dcb_handler(sep, dma_ctx);
3688 dev_warn(&sep->pdev->dev, "[PID%d] dcb free failed\n",
3691 /* End the transaction, wakeup pending ones */
3692 error_tmp = sep_end_transaction_handler(sep, dma_ctx, call_status,
3695 dev_warn(&sep->pdev->dev,
3696 "[PID%d] ending transaction failed\n",
3704 * sep_fastcall_args_get - Gets fastcall params from user
3706 * @args: Parameters buffer
3707 * @buf_user: User buffer for operation parameters
3708 * @count_user: User buffer size
3710 static inline ssize_t sep_fastcall_args_get(struct sep_device *sep,
3711 struct sep_fastcall_hdr *args,
3712 const char __user *buf_user,
3713 const size_t count_user)
3716 size_t actual_count = 0;
3719 dev_warn(&sep->pdev->dev,
3720 "[PID%d] null user buffer\n",
3726 if (count_user < sizeof(struct sep_fastcall_hdr)) {
3727 dev_warn(&sep->pdev->dev,
3728 "[PID%d] too small message size 0x%08zX\n",
3729 current->pid, count_user);
3735 if (copy_from_user(args, buf_user, sizeof(struct sep_fastcall_hdr))) {
3740 if (SEP_FC_MAGIC != args->magic) {
3741 dev_warn(&sep->pdev->dev,
3742 "[PID%d] invalid fastcall magic 0x%08X\n",
3743 current->pid, args->magic);
3748 dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr num of DCBs 0x%08X\n",
3749 current->pid, args->num_dcbs);
3750 dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr msg len 0x%08X\n",
3751 current->pid, args->msg_len);
3753 if (SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < args->msg_len ||
3754 SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES > args->msg_len) {
3755 dev_warn(&sep->pdev->dev,
3756 "[PID%d] invalid message length\n",
3762 actual_count = sizeof(struct sep_fastcall_hdr)
3764 + (args->num_dcbs * sizeof(struct build_dcb_struct));
3766 if (actual_count != count_user) {
3767 dev_warn(&sep->pdev->dev,
3768 "[PID%d] inconsistent message "
3769 "sizes 0x%08zX vs 0x%08zX\n",
3770 current->pid, actual_count, count_user);
3780 * sep_write - Starts an operation for fastcall interface
3781 * @filp: File pointer
3782 * @buf_user: User buffer for operation parameters
3783 * @count_user: User buffer size
3784 * @offset: File offset, not supported
3786 * The implementation does not support writing in chunks,
3787 * all data must be given during a single write system call.
3789 static ssize_t sep_write(struct file *filp,
3790 const char __user *buf_user, size_t count_user,
3793 struct sep_private_data * const private_data = filp->private_data;
3794 struct sep_call_status *call_status = &private_data->call_status;
3795 struct sep_device *sep = private_data->device;
3796 struct sep_dma_context *dma_ctx = NULL;
3797 struct sep_fastcall_hdr call_hdr = {0};
3798 void *msg_region = NULL;
3799 void *dmatables_region = NULL;
3800 struct sep_dcblock *dcb_region = NULL;
3802 struct sep_queue_info *my_queue_elem = NULL;
3803 bool my_secure_dma; /* are we using secure_dma (IMR)? */
3805 dev_dbg(&sep->pdev->dev, "[PID%d] sep dev is 0x%p\n",
3807 dev_dbg(&sep->pdev->dev, "[PID%d] private_data is 0x%p\n",
3808 current->pid, private_data);
3810 error = sep_fastcall_args_get(sep, &call_hdr, buf_user, count_user);
3814 buf_user += sizeof(struct sep_fastcall_hdr);
3816 if (call_hdr.secure_dma == 0)
3817 my_secure_dma = false;
3819 my_secure_dma = true;
3822 * Controlling driver memory usage by limiting amount of
3823 * buffers created. Only SEP_DOUBLEBUF_USERS_LIMIT number
3824 * of threads can progress further at a time
3826 dev_dbg(&sep->pdev->dev,
3827 "[PID%d] waiting for double buffering region access\n",
3829 error = down_interruptible(&sep->sep_doublebuf);
3830 dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region start\n",
3833 /* Signal received */
3834 goto end_function_error;
3839 * Prepare contents of the shared area regions for
3840 * the operation into temporary buffers
3842 if (0 < call_hdr.num_dcbs) {
3843 error = sep_create_dcb_dmatables_context(sep,
3847 (const struct build_dcb_struct __user *)
3849 call_hdr.num_dcbs, my_secure_dma);
3851 goto end_function_error_doublebuf;
3853 buf_user += call_hdr.num_dcbs * sizeof(struct build_dcb_struct);
3856 error = sep_create_msgarea_context(sep,
3861 goto end_function_error_doublebuf;
3863 dev_dbg(&sep->pdev->dev, "[PID%d] updating queue status\n",
3865 my_queue_elem = sep_queue_status_add(sep,
3866 ((struct sep_msgarea_hdr *)msg_region)->opcode,
3867 (dma_ctx) ? dma_ctx->input_data_len : 0,
3869 current->comm, sizeof(current->comm));
3871 if (!my_queue_elem) {
3872 dev_dbg(&sep->pdev->dev,
3873 "[PID%d] updating queue status error\n", current->pid);
3875 goto end_function_error_doublebuf;
3878 /* Wait until current process gets the transaction */
3879 error = sep_wait_transaction(sep);
3882 /* Interrupted by signal, don't clear transaction */
3883 dev_dbg(&sep->pdev->dev, "[PID%d] interrupted by signal\n",
3885 sep_queue_status_remove(sep, &my_queue_elem);
3886 goto end_function_error_doublebuf;
3889 dev_dbg(&sep->pdev->dev, "[PID%d] saving queue element\n",
3891 private_data->my_queue_elem = my_queue_elem;
3893 /* Activate shared area regions for the transaction */
3894 error = sep_activate_msgarea_context(sep, &msg_region,
3897 goto end_function_error_clear_transact;
3899 sep_dump_message(sep);
3901 if (0 < call_hdr.num_dcbs) {
3902 error = sep_activate_dcb_dmatables_context(sep,
3907 goto end_function_error_clear_transact;
3910 /* Send command to SEP */
3911 error = sep_send_command_handler(sep);
3913 goto end_function_error_clear_transact;
3915 /* Store DMA context for the transaction */
3916 private_data->dma_ctx = dma_ctx;
3917 /* Update call status */
3918 set_bit(SEP_FASTCALL_WRITE_DONE_OFFSET, &call_status->status);
3921 up(&sep->sep_doublebuf);
3922 dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n",
3927 end_function_error_clear_transact:
3928 sep_end_transaction_handler(sep, &dma_ctx, call_status,
3929 &private_data->my_queue_elem);
3931 end_function_error_doublebuf:
3932 up(&sep->sep_doublebuf);
3933 dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n",
3938 sep_free_dma_table_data_handler(sep, &dma_ctx);
3942 kfree(dmatables_region);
3948 * sep_seek - Handler for seek system call
3949 * @filp: File pointer
3950 * @offset: File offset
3951 * @origin: Options for offset
3953 * Fastcall interface does not support seeking, all reads
3954 * and writes are from/to offset zero
3956 static loff_t sep_seek(struct file *filp, loff_t offset, int origin)
3964 * sep_file_operations - file operation on sep device
3965 * @sep_ioctl: ioctl handler from user space call
3966 * @sep_poll: poll handler
3967 * @sep_open: handles sep device open request
3968 * @sep_release:handles sep device release request
3969 * @sep_mmap: handles memory mapping requests
3970 * @sep_read: handles read request on sep device
3971 * @sep_write: handles write request on sep device
3972 * @sep_seek: handles seek request on sep device
3974 static const struct file_operations sep_file_operations = {
3975 .owner = THIS_MODULE,
3976 .unlocked_ioctl = sep_ioctl,
3979 .release = sep_release,
3987 * sep_sysfs_read - read sysfs entry per gives arguments
3988 * @filp: file pointer
3989 * @kobj: kobject pointer
3990 * @attr: binary file attributes
3991 * @buf: read to this buffer
3992 * @pos: offset to read
3993 * @count: amount of data to read
3995 * This function is to read sysfs entries for sep driver per given arguments.
3998 sep_sysfs_read(struct file *filp, struct kobject *kobj,
3999 struct bin_attribute *attr,
4000 char *buf, loff_t pos, size_t count)
4002 unsigned long lck_flags;
4003 size_t nleft = count;
4004 struct sep_device *sep = sep_dev;
4005 struct sep_queue_info *queue_elem = NULL;
4009 spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
4011 queue_num = sep->sep_queue_num;
4012 if (queue_num > SEP_DOUBLEBUF_USERS_LIMIT)
4013 queue_num = SEP_DOUBLEBUF_USERS_LIMIT;
4016 if (count < sizeof(queue_num)
4017 + (queue_num * sizeof(struct sep_queue_data))) {
4018 spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
4022 memcpy(buf, &queue_num, sizeof(queue_num));
4023 buf += sizeof(queue_num);
4024 nleft -= sizeof(queue_num);
4026 list_for_each_entry(queue_elem, &sep->sep_queue_status, list) {
4027 if (i++ > queue_num)
4030 memcpy(buf, &queue_elem->data, sizeof(queue_elem->data));
4031 nleft -= sizeof(queue_elem->data);
4032 buf += sizeof(queue_elem->data);
4034 spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
4036 return count - nleft;
4040 * bin_attributes - defines attributes for queue_status
4041 * @attr: attributes (name & permissions)
4042 * @read: function pointer to read this file
4043 * @size: maxinum size of binary attribute
4045 static const struct bin_attribute queue_status = {
4046 .attr = {.name = "queue_status", .mode = 0444},
4047 .read = sep_sysfs_read,
4049 + (SEP_DOUBLEBUF_USERS_LIMIT * sizeof(struct sep_queue_data)),
4053 * sep_register_driver_with_fs - register misc devices
4054 * @sep: pointer to struct sep_device
4056 * This function registers the driver with the file system
4058 static int sep_register_driver_with_fs(struct sep_device *sep)
4062 sep->miscdev_sep.minor = MISC_DYNAMIC_MINOR;
4063 sep->miscdev_sep.name = SEP_DEV_NAME;
4064 sep->miscdev_sep.fops = &sep_file_operations;
4066 ret_val = misc_register(&sep->miscdev_sep);
4068 dev_warn(&sep->pdev->dev, "misc reg fails for SEP %x\n",
4073 ret_val = device_create_bin_file(sep->miscdev_sep.this_device,
4076 dev_warn(&sep->pdev->dev, "sysfs attribute1 fails for SEP %x\n",
4086 *sep_probe - probe a matching PCI device
4088 *@ent: pci_device_id
4090 *Attempt to set up and configure a SEP device that has been
4091 *discovered by the PCI layer. Allocates all required resources.
4093 static int sep_probe(struct pci_dev *pdev,
4094 const struct pci_device_id *ent)
4097 struct sep_device *sep = NULL;
4099 if (sep_dev != NULL) {
4100 dev_dbg(&pdev->dev, "only one SEP supported.\n");
4104 /* Enable the device */
4105 error = pci_enable_device(pdev);
4107 dev_warn(&pdev->dev, "error enabling pci device\n");
4111 /* Allocate the sep_device structure for this device */
4112 sep_dev = kzalloc(sizeof(struct sep_device), GFP_ATOMIC);
4113 if (sep_dev == NULL) {
4115 goto end_function_disable_device;
4119 * We're going to use another variable for actually
4120 * working with the device; this way, if we have
4121 * multiple devices in the future, it would be easier
4122 * to make appropriate changes
4126 sep->pdev = pci_dev_get(pdev);
4128 init_waitqueue_head(&sep->event_transactions);
4129 init_waitqueue_head(&sep->event_interrupt);
4130 spin_lock_init(&sep->snd_rply_lck);
4131 spin_lock_init(&sep->sep_queue_lock);
4132 sema_init(&sep->sep_doublebuf, SEP_DOUBLEBUF_USERS_LIMIT);
4134 INIT_LIST_HEAD(&sep->sep_queue_status);
4136 dev_dbg(&sep->pdev->dev,
4137 "sep probe: PCI obtained, device being prepared\n");
4139 /* Set up our register area */
4140 sep->reg_physical_addr = pci_resource_start(sep->pdev, 0);
4141 if (!sep->reg_physical_addr) {
4142 dev_warn(&sep->pdev->dev, "Error getting register start\n");
4144 goto end_function_free_sep_dev;
4147 sep->reg_physical_end = pci_resource_end(sep->pdev, 0);
4148 if (!sep->reg_physical_end) {
4149 dev_warn(&sep->pdev->dev, "Error getting register end\n");
4151 goto end_function_free_sep_dev;
4154 sep->reg_addr = ioremap_nocache(sep->reg_physical_addr,
4155 (size_t)(sep->reg_physical_end - sep->reg_physical_addr + 1));
4156 if (!sep->reg_addr) {
4157 dev_warn(&sep->pdev->dev, "Error getting register virtual\n");
4159 goto end_function_free_sep_dev;
4162 dev_dbg(&sep->pdev->dev,
4163 "Register area start %llx end %llx virtual %p\n",
4164 (unsigned long long)sep->reg_physical_addr,
4165 (unsigned long long)sep->reg_physical_end,
4168 /* Allocate the shared area */
4169 sep->shared_size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
4170 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES +
4171 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES +
4172 SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES +
4173 SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
4175 if (sep_map_and_alloc_shared_area(sep)) {
4177 /* Allocation failed */
4178 goto end_function_error;
4181 /* Clear ICR register */
4182 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4184 /* Set the IMR register - open only GPR 2 */
4185 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4187 /* Read send/receive counters from SEP */
4188 sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4189 sep->reply_ct &= 0x3FFFFFFF;
4190 sep->send_ct = sep->reply_ct;
4192 /* Get the interrupt line */
4193 error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED,
4197 goto end_function_deallocate_sep_shared_area;
4199 /* The new chip requires a shared area reconfigure */
4200 error = sep_reconfig_shared_area(sep);
4202 goto end_function_free_irq;
4206 /* Finally magic up the device nodes */
4207 /* Register driver with the fs */
4208 error = sep_register_driver_with_fs(sep);
4211 dev_err(&sep->pdev->dev, "error registering dev file\n");
4212 goto end_function_free_irq;
4215 sep->in_use = 0; /* through touching the device */
4216 #ifdef SEP_ENABLE_RUNTIME_PM
4217 pm_runtime_put_noidle(&sep->pdev->dev);
4218 pm_runtime_allow(&sep->pdev->dev);
4219 pm_runtime_set_autosuspend_delay(&sep->pdev->dev,
4221 pm_runtime_use_autosuspend(&sep->pdev->dev);
4222 pm_runtime_mark_last_busy(&sep->pdev->dev);
4223 sep->power_save_setup = 1;
4225 /* register kernel crypto driver */
4226 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
4227 error = sep_crypto_setup();
4229 dev_err(&sep->pdev->dev, "crypto setup failed\n");
4230 goto end_function_free_irq;
4235 end_function_free_irq:
4236 free_irq(pdev->irq, sep);
4238 end_function_deallocate_sep_shared_area:
4239 /* De-allocate shared area */
4240 sep_unmap_and_free_shared_area(sep);
4243 iounmap(sep->reg_addr);
4245 end_function_free_sep_dev:
4246 pci_dev_put(sep_dev->pdev);
4250 end_function_disable_device:
4251 pci_disable_device(pdev);
4258 * sep_remove - handles removing device from pci subsystem
4259 * @pdev: pointer to pci device
4261 * This function will handle removing our sep device from pci subsystem on exit
4262 * or unloading this module. It should free up all used resources, and unmap if
4263 * any memory regions mapped.
4265 static void sep_remove(struct pci_dev *pdev)
4267 struct sep_device *sep = sep_dev;
4269 /* Unregister from fs */
4270 misc_deregister(&sep->miscdev_sep);
4272 /* Unregister from kernel crypto */
4273 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
4274 sep_crypto_takedown();
4277 free_irq(sep->pdev->irq, sep);
4279 /* Free the shared area */
4280 sep_unmap_and_free_shared_area(sep_dev);
4281 iounmap(sep_dev->reg_addr);
4283 #ifdef SEP_ENABLE_RUNTIME_PM
4286 pm_runtime_forbid(&sep->pdev->dev);
4287 pm_runtime_get_noresume(&sep->pdev->dev);
4290 pci_dev_put(sep_dev->pdev);
4295 /* Initialize struct pci_device_id for our driver */
4296 static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl) = {
4297 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0826)},
4298 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08e9)},
4302 /* Export our pci_device_id structure to user space */
4303 MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
4305 #ifdef SEP_ENABLE_RUNTIME_PM
4308 * sep_pm_resume - rsume routine while waking up from S3 state
4309 * @dev: pointer to sep device
4311 * This function is to be used to wake up sep driver while system awakes from S3
4312 * state i.e. suspend to ram. The RAM in intact.
4313 * Notes - revisit with more understanding of pm, ICR/IMR & counters.
4315 static int sep_pci_resume(struct device *dev)
4317 struct sep_device *sep = sep_dev;
4319 dev_dbg(&sep->pdev->dev, "pci resume called\n");
4321 if (sep->power_state == SEP_DRIVER_POWERON)
4324 /* Clear ICR register */
4325 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4327 /* Set the IMR register - open only GPR 2 */
4328 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4330 /* Read send/receive counters from SEP */
4331 sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4332 sep->reply_ct &= 0x3FFFFFFF;
4333 sep->send_ct = sep->reply_ct;
4335 sep->power_state = SEP_DRIVER_POWERON;
4341 * sep_pm_suspend - suspend routine while going to S3 state
4342 * @dev: pointer to sep device
4344 * This function is to be used to suspend sep driver while system goes to S3
4345 * state i.e. suspend to ram. The RAM in intact and ON during this suspend.
4346 * Notes - revisit with more understanding of pm, ICR/IMR
4348 static int sep_pci_suspend(struct device *dev)
4350 struct sep_device *sep = sep_dev;
4352 dev_dbg(&sep->pdev->dev, "pci suspend called\n");
4353 if (sep->in_use == 1)
4356 sep->power_state = SEP_DRIVER_POWEROFF;
4358 /* Clear ICR register */
4359 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4361 /* Set the IMR to block all */
4362 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0xFFFFFFFF);
4368 * sep_pm_runtime_resume - runtime resume routine
4369 * @dev: pointer to sep device
4371 * Notes - revisit with more understanding of pm, ICR/IMR & counters
4373 static int sep_pm_runtime_resume(struct device *dev)
4378 struct sep_device *sep = sep_dev;
4380 dev_dbg(&sep->pdev->dev, "pm runtime resume called\n");
4383 * Wait until the SCU boot is ready
4384 * This is done by iterating SCU_DELAY_ITERATION (10
4385 * microseconds each) up to SCU_DELAY_MAX (50) times.
4386 * This bit can be set in a random time that is less
4387 * than 500 microseconds after each power resume
4391 while ((!retval2) && (delay_count < SCU_DELAY_MAX)) {
4392 retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
4393 retval2 &= 0x00000008;
4395 udelay(SCU_DELAY_ITERATION);
4401 dev_warn(&sep->pdev->dev, "scu boot bit not set at resume\n");
4405 /* Clear ICR register */
4406 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4408 /* Set the IMR register - open only GPR 2 */
4409 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4411 /* Read send/receive counters from SEP */
4412 sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4413 sep->reply_ct &= 0x3FFFFFFF;
4414 sep->send_ct = sep->reply_ct;
4420 * sep_pm_runtime_suspend - runtime suspend routine
4421 * @dev: pointer to sep device
4423 * Notes - revisit with more understanding of pm
4425 static int sep_pm_runtime_suspend(struct device *dev)
4427 struct sep_device *sep = sep_dev;
4429 dev_dbg(&sep->pdev->dev, "pm runtime suspend called\n");
4431 /* Clear ICR register */
4432 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4437 * sep_pm - power management for sep driver
4438 * @sep_pm_runtime_resume: resume- no communication with cpu & main memory
4439 * @sep_pm_runtime_suspend: suspend- no communication with cpu & main memory
4440 * @sep_pci_suspend: suspend - main memory is still ON
4441 * @sep_pci_resume: resume - main memory is still ON
4443 static const struct dev_pm_ops sep_pm = {
4444 .runtime_resume = sep_pm_runtime_resume,
4445 .runtime_suspend = sep_pm_runtime_suspend,
4446 .resume = sep_pci_resume,
4447 .suspend = sep_pci_suspend,
4449 #endif /* SEP_ENABLE_RUNTIME_PM */
4452 * sep_pci_driver - registers this device with pci subsystem
4453 * @name: name identifier for this driver
4454 * @sep_pci_id_tbl: pointer to struct pci_device_id table
4455 * @sep_probe: pointer to probe function in PCI driver
4456 * @sep_remove: pointer to remove function in PCI driver
4458 static struct pci_driver sep_pci_driver = {
4459 #ifdef SEP_ENABLE_RUNTIME_PM
4464 .name = "sep_sec_driver",
4465 .id_table = sep_pci_id_tbl,
4467 .remove = sep_remove
4470 module_pci_driver(sep_pci_driver);
4471 MODULE_LICENSE("GPL");