]> Pileus Git - ~andy/linux/blob - drivers/staging/sep/sep_main.c
Merge branch 'stable/for-jens-3.13-take-two' of git://git.kernel.org/pub/scm/linux...
[~andy/linux] / drivers / staging / sep / sep_main.c
1 /*
2  *
3  *  sep_main.c - Security Processor Driver main group of functions
4  *
5  *  Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
6  *  Contributions(c) 2009-2011 Discretix. All rights reserved.
7  *
8  *  This program is free software; you can redistribute it and/or modify it
9  *  under the terms of the GNU General Public License as published by the Free
10  *  Software Foundation; version 2 of the License.
11  *
12  *  This program is distributed in the hope that it will be useful, but WITHOUT
13  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  *  more details.
16  *
17  *  You should have received a copy of the GNU General Public License along with
18  *  this program; if not, write to the Free Software Foundation, Inc., 59
19  *  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
20  *
21  *  CONTACTS:
22  *
23  *  Mark Allyn          mark.a.allyn@intel.com
24  *  Jayant Mangalampalli jayant.mangalampalli@intel.com
25  *
26  *  CHANGES:
27  *
28  *  2009.06.26  Initial publish
29  *  2010.09.14  Upgrade to Medfield
30  *  2011.01.21  Move to sep_main.c to allow for sep_crypto.c
31  *  2011.02.22  Enable kernel crypto operation
32  *
33  *  Please note that this driver is based on information in the Discretix
34  *  CryptoCell 5.2 Driver Implementation Guide; the Discretix CryptoCell 5.2
35  *  Integration Intel Medfield appendix; the Discretix CryptoCell 5.2
36  *  Linux Driver Integration Guide; and the Discretix CryptoCell 5.2 System
37  *  Overview and Integration Guide.
38  */
39 /* #define DEBUG */
40 /* #define SEP_PERF_DEBUG */
41
42 #include <linux/init.h>
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/miscdevice.h>
46 #include <linux/fs.h>
47 #include <linux/cdev.h>
48 #include <linux/kdev_t.h>
49 #include <linux/mutex.h>
50 #include <linux/sched.h>
51 #include <linux/mm.h>
52 #include <linux/poll.h>
53 #include <linux/wait.h>
54 #include <linux/pci.h>
55 #include <linux/pm_runtime.h>
56 #include <linux/slab.h>
57 #include <linux/ioctl.h>
58 #include <asm/current.h>
59 #include <linux/ioport.h>
60 #include <linux/io.h>
61 #include <linux/interrupt.h>
62 #include <linux/pagemap.h>
63 #include <asm/cacheflush.h>
64 #include <linux/delay.h>
65 #include <linux/jiffies.h>
66 #include <linux/async.h>
67 #include <linux/crypto.h>
68 #include <crypto/internal/hash.h>
69 #include <crypto/scatterwalk.h>
70 #include <crypto/sha.h>
71 #include <crypto/md5.h>
72 #include <crypto/aes.h>
73 #include <crypto/des.h>
74 #include <crypto/hash.h>
75
76 #include "sep_driver_hw_defs.h"
77 #include "sep_driver_config.h"
78 #include "sep_driver_api.h"
79 #include "sep_dev.h"
80 #include "sep_crypto.h"
81
82 #define CREATE_TRACE_POINTS
83 #include "sep_trace_events.h"
84
85 /*
86  * Let's not spend cycles iterating over message
87  * area contents if debugging not enabled
88  */
89 #ifdef DEBUG
90 #define sep_dump_message(sep)   _sep_dump_message(sep)
91 #else
92 #define sep_dump_message(sep)
93 #endif
94
95 /**
96  * Currently, there is only one SEP device per platform;
97  * In event platforms in the future have more than one SEP
98  * device, this will be a linked list
99  */
100
101 struct sep_device *sep_dev;
102
103 /**
104  * sep_queue_status_remove - Removes transaction from status queue
105  * @sep: SEP device
106  * @sep_queue_info: pointer to status queue
107  *
108  * This function will remove information about transaction from the queue.
109  */
110 void sep_queue_status_remove(struct sep_device *sep,
111                                       struct sep_queue_info **queue_elem)
112 {
113         unsigned long lck_flags;
114
115         dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove\n",
116                 current->pid);
117
118         if (!queue_elem || !(*queue_elem)) {
119                 dev_dbg(&sep->pdev->dev, "PID%d %s null\n",
120                                         current->pid, __func__);
121                 return;
122         }
123
124         spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
125         list_del(&(*queue_elem)->list);
126         sep->sep_queue_num--;
127         spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
128
129         kfree(*queue_elem);
130         *queue_elem = NULL;
131
132         dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove return\n",
133                 current->pid);
134         return;
135 }
136
137 /**
138  * sep_queue_status_add - Adds transaction to status queue
139  * @sep: SEP device
140  * @opcode: transaction opcode
141  * @size: input data size
142  * @pid: pid of current process
143  * @name: current process name
144  * @name_len: length of name (current process)
145  *
146  * This function adds information about about transaction started to the status
147  * queue.
148  */
149 struct sep_queue_info *sep_queue_status_add(
150                                                 struct sep_device *sep,
151                                                 u32 opcode,
152                                                 u32 size,
153                                                 u32 pid,
154                                                 u8 *name, size_t name_len)
155 {
156         unsigned long lck_flags;
157         struct sep_queue_info *my_elem = NULL;
158
159         my_elem = kzalloc(sizeof(struct sep_queue_info), GFP_KERNEL);
160
161         if (!my_elem)
162                 return NULL;
163
164         dev_dbg(&sep->pdev->dev, "[PID%d] kzalloc ok\n", current->pid);
165
166         my_elem->data.opcode = opcode;
167         my_elem->data.size = size;
168         my_elem->data.pid = pid;
169
170         if (name_len > TASK_COMM_LEN)
171                 name_len = TASK_COMM_LEN;
172
173         memcpy(&my_elem->data.name, name, name_len);
174
175         spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
176
177         list_add_tail(&my_elem->list, &sep->sep_queue_status);
178         sep->sep_queue_num++;
179
180         spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
181
182         return my_elem;
183 }
184
185 /**
186  *      sep_allocate_dmatables_region - Allocates buf for the MLLI/DMA tables
187  *      @sep: SEP device
188  *      @dmatables_region: Destination pointer for the buffer
189  *      @dma_ctx: DMA context for the transaction
190  *      @table_count: Number of MLLI/DMA tables to create
191  *      The buffer created will not work as-is for DMA operations,
192  *      it needs to be copied over to the appropriate place in the
193  *      shared area.
194  */
195 static int sep_allocate_dmatables_region(struct sep_device *sep,
196                                          void **dmatables_region,
197                                          struct sep_dma_context *dma_ctx,
198                                          const u32 table_count)
199 {
200         const size_t new_len =
201                 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES - 1;
202
203         void *tmp_region = NULL;
204
205         dev_dbg(&sep->pdev->dev, "[PID%d] dma_ctx = 0x%p\n",
206                                 current->pid, dma_ctx);
207         dev_dbg(&sep->pdev->dev, "[PID%d] dmatables_region = 0x%p\n",
208                                 current->pid, dmatables_region);
209
210         if (!dma_ctx || !dmatables_region) {
211                 dev_warn(&sep->pdev->dev,
212                         "[PID%d] dma context/region uninitialized\n",
213                         current->pid);
214                 return -EINVAL;
215         }
216
217         dev_dbg(&sep->pdev->dev, "[PID%d] newlen = 0x%08zX\n",
218                                 current->pid, new_len);
219         dev_dbg(&sep->pdev->dev, "[PID%d] oldlen = 0x%08X\n", current->pid,
220                                 dma_ctx->dmatables_len);
221         tmp_region = kzalloc(new_len + dma_ctx->dmatables_len, GFP_KERNEL);
222         if (!tmp_region)
223                 return -ENOMEM;
224
225         /* Were there any previous tables that need to be preserved ? */
226         if (*dmatables_region) {
227                 memcpy(tmp_region, *dmatables_region, dma_ctx->dmatables_len);
228                 kfree(*dmatables_region);
229                 *dmatables_region = NULL;
230         }
231
232         *dmatables_region = tmp_region;
233
234         dma_ctx->dmatables_len += new_len;
235
236         return 0;
237 }
238
239 /**
240  *      sep_wait_transaction - Used for synchronizing transactions
241  *      @sep: SEP device
242  */
243 int sep_wait_transaction(struct sep_device *sep)
244 {
245         int error = 0;
246         DEFINE_WAIT(wait);
247
248         if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
249                                 &sep->in_use_flags)) {
250                 dev_dbg(&sep->pdev->dev,
251                         "[PID%d] no transactions, returning\n",
252                                 current->pid);
253                 goto end_function_setpid;
254         }
255
256         /*
257          * Looping needed even for exclusive waitq entries
258          * due to process wakeup latencies, previous process
259          * might have already created another transaction.
260          */
261         for (;;) {
262                 /*
263                  * Exclusive waitq entry, so that only one process is
264                  * woken up from the queue at a time.
265                  */
266                 prepare_to_wait_exclusive(&sep->event_transactions,
267                                           &wait,
268                                           TASK_INTERRUPTIBLE);
269                 if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
270                                           &sep->in_use_flags)) {
271                         dev_dbg(&sep->pdev->dev,
272                                 "[PID%d] no transactions, breaking\n",
273                                         current->pid);
274                         break;
275                 }
276                 dev_dbg(&sep->pdev->dev,
277                         "[PID%d] transactions ongoing, sleeping\n",
278                                 current->pid);
279                 schedule();
280                 dev_dbg(&sep->pdev->dev, "[PID%d] woken up\n", current->pid);
281
282                 if (signal_pending(current)) {
283                         dev_dbg(&sep->pdev->dev, "[PID%d] received signal\n",
284                                                         current->pid);
285                         error = -EINTR;
286                         goto end_function;
287                 }
288         }
289 end_function_setpid:
290         /*
291          * The pid_doing_transaction indicates that this process
292          * now owns the facilities to perform a transaction with
293          * the SEP. While this process is performing a transaction,
294          * no other process who has the SEP device open can perform
295          * any transactions. This method allows more than one process
296          * to have the device open at any given time, which provides
297          * finer granularity for device utilization by multiple
298          * processes.
299          */
300         /* Only one process is able to progress here at a time */
301         sep->pid_doing_transaction = current->pid;
302
303 end_function:
304         finish_wait(&sep->event_transactions, &wait);
305
306         return error;
307 }
308
309 /**
310  * sep_check_transaction_owner - Checks if current process owns transaction
311  * @sep: SEP device
312  */
313 static inline int sep_check_transaction_owner(struct sep_device *sep)
314 {
315         dev_dbg(&sep->pdev->dev, "[PID%d] transaction pid = %d\n",
316                 current->pid,
317                 sep->pid_doing_transaction);
318
319         if ((sep->pid_doing_transaction == 0) ||
320                 (current->pid != sep->pid_doing_transaction)) {
321                 return -EACCES;
322         }
323
324         /* We own the transaction */
325         return 0;
326 }
327
328 #ifdef DEBUG
329
330 /**
331  * sep_dump_message - dump the message that is pending
332  * @sep: SEP device
333  * This will only print dump if DEBUG is set; it does
334  * follow kernel debug print enabling
335  */
336 static void _sep_dump_message(struct sep_device *sep)
337 {
338         int count;
339
340         u32 *p = sep->shared_addr;
341
342         for (count = 0; count < 10 * 4; count += 4)
343                 dev_dbg(&sep->pdev->dev,
344                         "[PID%d] Word %d of the message is %x\n",
345                                 current->pid, count/4, *p++);
346 }
347
348 #endif
349
350 /**
351  * sep_map_and_alloc_shared_area -allocate shared block
352  * @sep: security processor
353  * @size: size of shared area
354  */
355 static int sep_map_and_alloc_shared_area(struct sep_device *sep)
356 {
357         sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev,
358                 sep->shared_size,
359                 &sep->shared_bus, GFP_KERNEL);
360
361         if (!sep->shared_addr) {
362                 dev_dbg(&sep->pdev->dev,
363                         "[PID%d] shared memory dma_alloc_coherent failed\n",
364                                 current->pid);
365                 return -ENOMEM;
366         }
367         dev_dbg(&sep->pdev->dev,
368                 "[PID%d] shared_addr %zx bytes @%p (bus %llx)\n",
369                                 current->pid,
370                                 sep->shared_size, sep->shared_addr,
371                                 (unsigned long long)sep->shared_bus);
372         return 0;
373 }
374
375 /**
376  * sep_unmap_and_free_shared_area - free shared block
377  * @sep: security processor
378  */
379 static void sep_unmap_and_free_shared_area(struct sep_device *sep)
380 {
381         dma_free_coherent(&sep->pdev->dev, sep->shared_size,
382                                 sep->shared_addr, sep->shared_bus);
383 }
384
385 #ifdef DEBUG
386
387 /**
388  * sep_shared_bus_to_virt - convert bus/virt addresses
389  * @sep: pointer to struct sep_device
390  * @bus_address: address to convert
391  *
392  * Returns virtual address inside the shared area according
393  * to the bus address.
394  */
395 static void *sep_shared_bus_to_virt(struct sep_device *sep,
396                                                 dma_addr_t bus_address)
397 {
398         return sep->shared_addr + (bus_address - sep->shared_bus);
399 }
400
401 #endif
402
403 /**
404  * sep_open - device open method
405  * @inode: inode of SEP device
406  * @filp: file handle to SEP device
407  *
408  * Open method for the SEP device. Called when userspace opens
409  * the SEP device node.
410  *
411  * Returns zero on success otherwise an error code.
412  */
413 static int sep_open(struct inode *inode, struct file *filp)
414 {
415         struct sep_device *sep;
416         struct sep_private_data *priv;
417
418         dev_dbg(&sep_dev->pdev->dev, "[PID%d] open\n", current->pid);
419
420         if (filp->f_flags & O_NONBLOCK)
421                 return -ENOTSUPP;
422
423         /*
424          * Get the SEP device structure and use it for the
425          * private_data field in filp for other methods
426          */
427
428         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
429         if (!priv)
430                 return -ENOMEM;
431
432         sep = sep_dev;
433         priv->device = sep;
434         filp->private_data = priv;
435
436         dev_dbg(&sep_dev->pdev->dev, "[PID%d] priv is 0x%p\n",
437                                         current->pid, priv);
438
439         /* Anyone can open; locking takes place at transaction level */
440         return 0;
441 }
442
443 /**
444  * sep_free_dma_table_data_handler - free DMA table
445  * @sep: pointer to struct sep_device
446  * @dma_ctx: dma context
447  *
448  * Handles the request to free DMA table for synchronic actions
449  */
450 int sep_free_dma_table_data_handler(struct sep_device *sep,
451                                            struct sep_dma_context **dma_ctx)
452 {
453         int count;
454         int dcb_counter;
455         /* Pointer to the current dma_resource struct */
456         struct sep_dma_resource *dma;
457
458         dev_dbg(&sep->pdev->dev,
459                 "[PID%d] sep_free_dma_table_data_handler\n",
460                         current->pid);
461
462         if (!dma_ctx || !(*dma_ctx)) {
463                 /* No context or context already freed */
464                 dev_dbg(&sep->pdev->dev,
465                         "[PID%d] no DMA context or context already freed\n",
466                                 current->pid);
467
468                 return 0;
469         }
470
471         dev_dbg(&sep->pdev->dev, "[PID%d] (*dma_ctx)->nr_dcb_creat 0x%x\n",
472                                         current->pid,
473                                         (*dma_ctx)->nr_dcb_creat);
474
475         for (dcb_counter = 0;
476              dcb_counter < (*dma_ctx)->nr_dcb_creat; dcb_counter++) {
477                 dma = &(*dma_ctx)->dma_res_arr[dcb_counter];
478
479                 /* Unmap and free input map array */
480                 if (dma->in_map_array) {
481                         for (count = 0; count < dma->in_num_pages; count++) {
482                                 dma_unmap_page(&sep->pdev->dev,
483                                         dma->in_map_array[count].dma_addr,
484                                         dma->in_map_array[count].size,
485                                         DMA_TO_DEVICE);
486                         }
487                         kfree(dma->in_map_array);
488                 }
489
490                 /**
491                  * Output is handled different. If
492                  * this was a secure dma into restricted memory,
493                  * then we skip this step altogether as restricted
494                  * memory is not available to the o/s at all.
495                  */
496                 if (!(*dma_ctx)->secure_dma && dma->out_map_array) {
497
498                         for (count = 0; count < dma->out_num_pages; count++) {
499                                 dma_unmap_page(&sep->pdev->dev,
500                                         dma->out_map_array[count].dma_addr,
501                                         dma->out_map_array[count].size,
502                                         DMA_FROM_DEVICE);
503                         }
504                         kfree(dma->out_map_array);
505                 }
506
507                 /* Free page cache for output */
508                 if (dma->in_page_array) {
509                         for (count = 0; count < dma->in_num_pages; count++) {
510                                 flush_dcache_page(dma->in_page_array[count]);
511                                 page_cache_release(dma->in_page_array[count]);
512                         }
513                         kfree(dma->in_page_array);
514                 }
515
516                 /* Again, we do this only for non secure dma */
517                 if (!(*dma_ctx)->secure_dma && dma->out_page_array) {
518
519                         for (count = 0; count < dma->out_num_pages; count++) {
520                                 if (!PageReserved(dma->out_page_array[count]))
521
522                                         SetPageDirty(dma->
523                                         out_page_array[count]);
524
525                                 flush_dcache_page(dma->out_page_array[count]);
526                                 page_cache_release(dma->out_page_array[count]);
527                         }
528                         kfree(dma->out_page_array);
529                 }
530
531                 /**
532                  * Note that here we use in_map_num_entries because we
533                  * don't have a page array; the page array is generated
534                  * only in the lock_user_pages, which is not called
535                  * for kernel crypto, which is what the sg (scatter gather
536                  * is used for exclusively)
537                  */
538                 if (dma->src_sg) {
539                         dma_unmap_sg(&sep->pdev->dev, dma->src_sg,
540                                 dma->in_map_num_entries, DMA_TO_DEVICE);
541                         dma->src_sg = NULL;
542                 }
543
544                 if (dma->dst_sg) {
545                         dma_unmap_sg(&sep->pdev->dev, dma->dst_sg,
546                                 dma->in_map_num_entries, DMA_FROM_DEVICE);
547                         dma->dst_sg = NULL;
548                 }
549
550                 /* Reset all the values */
551                 dma->in_page_array = NULL;
552                 dma->out_page_array = NULL;
553                 dma->in_num_pages = 0;
554                 dma->out_num_pages = 0;
555                 dma->in_map_array = NULL;
556                 dma->out_map_array = NULL;
557                 dma->in_map_num_entries = 0;
558                 dma->out_map_num_entries = 0;
559         }
560
561         (*dma_ctx)->nr_dcb_creat = 0;
562         (*dma_ctx)->num_lli_tables_created = 0;
563
564         kfree(*dma_ctx);
565         *dma_ctx = NULL;
566
567         dev_dbg(&sep->pdev->dev,
568                 "[PID%d] sep_free_dma_table_data_handler end\n",
569                         current->pid);
570
571         return 0;
572 }
573
574 /**
575  * sep_end_transaction_handler - end transaction
576  * @sep: pointer to struct sep_device
577  * @dma_ctx: DMA context
578  * @call_status: Call status
579  *
580  * This API handles the end transaction request.
581  */
582 static int sep_end_transaction_handler(struct sep_device *sep,
583                                        struct sep_dma_context **dma_ctx,
584                                        struct sep_call_status *call_status,
585                                        struct sep_queue_info **my_queue_elem)
586 {
587         dev_dbg(&sep->pdev->dev, "[PID%d] ending transaction\n", current->pid);
588
589         /*
590          * Extraneous transaction clearing would mess up PM
591          * device usage counters and SEP would get suspended
592          * just before we send a command to SEP in the next
593          * transaction
594          * */
595         if (sep_check_transaction_owner(sep)) {
596                 dev_dbg(&sep->pdev->dev, "[PID%d] not transaction owner\n",
597                                                 current->pid);
598                 return 0;
599         }
600
601         /* Update queue status */
602         sep_queue_status_remove(sep, my_queue_elem);
603
604         /* Check that all the DMA resources were freed */
605         if (dma_ctx)
606                 sep_free_dma_table_data_handler(sep, dma_ctx);
607
608         /* Reset call status for next transaction */
609         if (call_status)
610                 call_status->status = 0;
611
612         /* Clear the message area to avoid next transaction reading
613          * sensitive results from previous transaction */
614         memset(sep->shared_addr, 0,
615                SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
616
617         /* start suspend delay */
618 #ifdef SEP_ENABLE_RUNTIME_PM
619         if (sep->in_use) {
620                 sep->in_use = 0;
621                 pm_runtime_mark_last_busy(&sep->pdev->dev);
622                 pm_runtime_put_autosuspend(&sep->pdev->dev);
623         }
624 #endif
625
626         clear_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags);
627         sep->pid_doing_transaction = 0;
628
629         /* Now it's safe for next process to proceed */
630         dev_dbg(&sep->pdev->dev, "[PID%d] waking up next transaction\n",
631                                         current->pid);
632         clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT, &sep->in_use_flags);
633         wake_up(&sep->event_transactions);
634
635         return 0;
636 }
637
638
639 /**
640  * sep_release - close a SEP device
641  * @inode: inode of SEP device
642  * @filp: file handle being closed
643  *
644  * Called on the final close of a SEP device.
645  */
646 static int sep_release(struct inode *inode, struct file *filp)
647 {
648         struct sep_private_data * const private_data = filp->private_data;
649         struct sep_call_status *call_status = &private_data->call_status;
650         struct sep_device *sep = private_data->device;
651         struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
652         struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
653
654         dev_dbg(&sep->pdev->dev, "[PID%d] release\n", current->pid);
655
656         sep_end_transaction_handler(sep, dma_ctx, call_status,
657                 my_queue_elem);
658
659         kfree(filp->private_data);
660
661         return 0;
662 }
663
664 /**
665  * sep_mmap -  maps the shared area to user space
666  * @filp: pointer to struct file
667  * @vma: pointer to vm_area_struct
668  *
669  * Called on an mmap of our space via the normal SEP device
670  */
671 static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
672 {
673         struct sep_private_data * const private_data = filp->private_data;
674         struct sep_call_status *call_status = &private_data->call_status;
675         struct sep_device *sep = private_data->device;
676         struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
677         dma_addr_t bus_addr;
678         unsigned long error = 0;
679
680         dev_dbg(&sep->pdev->dev, "[PID%d] sep_mmap\n", current->pid);
681
682         /* Set the transaction busy (own the device) */
683         /*
684          * Problem for multithreaded applications is that here we're
685          * possibly going to sleep while holding a write lock on
686          * current->mm->mmap_sem, which will cause deadlock for ongoing
687          * transaction trying to create DMA tables
688          */
689         error = sep_wait_transaction(sep);
690         if (error)
691                 /* Interrupted by signal, don't clear transaction */
692                 goto end_function;
693
694         /* Clear the message area to avoid next transaction reading
695          * sensitive results from previous transaction */
696         memset(sep->shared_addr, 0,
697                SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
698
699         /*
700          * Check that the size of the mapped range is as the size of the message
701          * shared area
702          */
703         if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
704                 error = -EINVAL;
705                 goto end_function_with_error;
706         }
707
708         dev_dbg(&sep->pdev->dev, "[PID%d] shared_addr is %p\n",
709                                         current->pid, sep->shared_addr);
710
711         /* Get bus address */
712         bus_addr = sep->shared_bus;
713
714         if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT,
715                 vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
716                 dev_dbg(&sep->pdev->dev, "[PID%d] remap_pfn_range failed\n",
717                                                 current->pid);
718                 error = -EAGAIN;
719                 goto end_function_with_error;
720         }
721
722         /* Update call status */
723         set_bit(SEP_LEGACY_MMAP_DONE_OFFSET, &call_status->status);
724
725         goto end_function;
726
727 end_function_with_error:
728         /* Clear our transaction */
729         sep_end_transaction_handler(sep, NULL, call_status,
730                 my_queue_elem);
731
732 end_function:
733         return error;
734 }
735
736 /**
737  * sep_poll - poll handler
738  * @filp:       pointer to struct file
739  * @wait:       pointer to poll_table
740  *
741  * Called by the OS when the kernel is asked to do a poll on
742  * a SEP file handle.
743  */
744 static unsigned int sep_poll(struct file *filp, poll_table *wait)
745 {
746         struct sep_private_data * const private_data = filp->private_data;
747         struct sep_call_status *call_status = &private_data->call_status;
748         struct sep_device *sep = private_data->device;
749         u32 mask = 0;
750         u32 retval = 0;
751         u32 retval2 = 0;
752         unsigned long lock_irq_flag;
753
754         /* Am I the process that owns the transaction? */
755         if (sep_check_transaction_owner(sep)) {
756                 dev_dbg(&sep->pdev->dev, "[PID%d] poll pid not owner\n",
757                                                 current->pid);
758                 mask = POLLERR;
759                 goto end_function;
760         }
761
762         /* Check if send command or send_reply were activated previously */
763         if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
764                           &call_status->status)) {
765                 dev_warn(&sep->pdev->dev, "[PID%d] sendmsg not called\n",
766                                                 current->pid);
767                 mask = POLLERR;
768                 goto end_function;
769         }
770
771
772         /* Add the event to the polling wait table */
773         dev_dbg(&sep->pdev->dev, "[PID%d] poll: calling wait sep_event\n",
774                                         current->pid);
775
776         poll_wait(filp, &sep->event_interrupt, wait);
777
778         dev_dbg(&sep->pdev->dev,
779                 "[PID%d] poll: send_ct is %lx reply ct is %lx\n",
780                         current->pid, sep->send_ct, sep->reply_ct);
781
782         /* Check if error occurred during poll */
783         retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
784         if ((retval2 != 0x0) && (retval2 != 0x8)) {
785                 dev_dbg(&sep->pdev->dev, "[PID%d] poll; poll error %x\n",
786                                                 current->pid, retval2);
787                 mask |= POLLERR;
788                 goto end_function;
789         }
790
791         spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
792
793         if (sep->send_ct == sep->reply_ct) {
794                 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
795                 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
796                 dev_dbg(&sep->pdev->dev,
797                         "[PID%d] poll: data ready check (GPR2)  %x\n",
798                                 current->pid, retval);
799
800                 /* Check if printf request  */
801                 if ((retval >> 30) & 0x1) {
802                         dev_dbg(&sep->pdev->dev,
803                                 "[PID%d] poll: SEP printf request\n",
804                                         current->pid);
805                         goto end_function;
806                 }
807
808                 /* Check if the this is SEP reply or request */
809                 if (retval >> 31) {
810                         dev_dbg(&sep->pdev->dev,
811                                 "[PID%d] poll: SEP request\n",
812                                         current->pid);
813                 } else {
814                         dev_dbg(&sep->pdev->dev,
815                                 "[PID%d] poll: normal return\n",
816                                         current->pid);
817                         sep_dump_message(sep);
818                         dev_dbg(&sep->pdev->dev,
819                                 "[PID%d] poll; SEP reply POLLIN|POLLRDNORM\n",
820                                         current->pid);
821                         mask |= POLLIN | POLLRDNORM;
822                 }
823                 set_bit(SEP_LEGACY_POLL_DONE_OFFSET, &call_status->status);
824         } else {
825                 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
826                 dev_dbg(&sep->pdev->dev,
827                         "[PID%d] poll; no reply; returning mask of 0\n",
828                                 current->pid);
829                 mask = 0;
830         }
831
832 end_function:
833         return mask;
834 }
835
836 /**
837  * sep_time_address - address in SEP memory of time
838  * @sep: SEP device we want the address from
839  *
840  * Return the address of the two dwords in memory used for time
841  * setting.
842  */
843 static u32 *sep_time_address(struct sep_device *sep)
844 {
845         return sep->shared_addr +
846                 SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
847 }
848
849 /**
850  * sep_set_time - set the SEP time
851  * @sep: the SEP we are setting the time for
852  *
853  * Calculates time and sets it at the predefined address.
854  * Called with the SEP mutex held.
855  */
856 static unsigned long sep_set_time(struct sep_device *sep)
857 {
858         struct timeval time;
859         u32 *time_addr; /* Address of time as seen by the kernel */
860
861
862         do_gettimeofday(&time);
863
864         /* Set value in the SYSTEM MEMORY offset */
865         time_addr = sep_time_address(sep);
866
867         time_addr[0] = SEP_TIME_VAL_TOKEN;
868         time_addr[1] = time.tv_sec;
869
870         dev_dbg(&sep->pdev->dev, "[PID%d] time.tv_sec is %lu\n",
871                                         current->pid, time.tv_sec);
872         dev_dbg(&sep->pdev->dev, "[PID%d] time_addr is %p\n",
873                                         current->pid, time_addr);
874         dev_dbg(&sep->pdev->dev, "[PID%d] sep->shared_addr is %p\n",
875                                         current->pid, sep->shared_addr);
876
877         return time.tv_sec;
878 }
879
880 /**
881  * sep_send_command_handler - kick off a command
882  * @sep: SEP being signalled
883  *
884  * This function raises interrupt to SEP that signals that is has a new
885  * command from the host
886  *
887  * Note that this function does fall under the ioctl lock
888  */
889 int sep_send_command_handler(struct sep_device *sep)
890 {
891         unsigned long lock_irq_flag;
892         u32 *msg_pool;
893         int error = 0;
894
895         /* Basic sanity check; set msg pool to start of shared area */
896         msg_pool = (u32 *)sep->shared_addr;
897         msg_pool += 2;
898
899         /* Look for start msg token */
900         if (*msg_pool != SEP_START_MSG_TOKEN) {
901                 dev_warn(&sep->pdev->dev, "start message token not present\n");
902                 error = -EPROTO;
903                 goto end_function;
904         }
905
906         /* Do we have a reasonable size? */
907         msg_pool += 1;
908         if ((*msg_pool < 2) ||
909                 (*msg_pool > SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES)) {
910
911                 dev_warn(&sep->pdev->dev, "invalid message size\n");
912                 error = -EPROTO;
913                 goto end_function;
914         }
915
916         /* Does the command look reasonable? */
917         msg_pool += 1;
918         if (*msg_pool < 2) {
919                 dev_warn(&sep->pdev->dev, "invalid message opcode\n");
920                 error = -EPROTO;
921                 goto end_function;
922         }
923
924 #if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
925         dev_dbg(&sep->pdev->dev, "[PID%d] before pm sync status 0x%X\n",
926                                         current->pid,
927                                         sep->pdev->dev.power.runtime_status);
928         sep->in_use = 1; /* device is about to be used */
929         pm_runtime_get_sync(&sep->pdev->dev);
930 #endif
931
932         if (test_and_set_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags)) {
933                 error = -EPROTO;
934                 goto end_function;
935         }
936         sep->in_use = 1; /* device is about to be used */
937         sep_set_time(sep);
938
939         sep_dump_message(sep);
940
941         /* Update counter */
942         spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
943         sep->send_ct++;
944         spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
945
946         dev_dbg(&sep->pdev->dev,
947                 "[PID%d] sep_send_command_handler send_ct %lx reply_ct %lx\n",
948                         current->pid, sep->send_ct, sep->reply_ct);
949
950         /* Send interrupt to SEP */
951         sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
952
953 end_function:
954         return error;
955 }
956
957 /**
958  *      sep_crypto_dma -
959  *      @sep: pointer to struct sep_device
960  *      @sg: pointer to struct scatterlist
961  *      @direction:
962  *      @dma_maps: pointer to place a pointer to array of dma maps
963  *       This is filled in; anything previous there will be lost
964  *       The structure for dma maps is sep_dma_map
965  *      @returns number of dma maps on success; negative on error
966  *
967  *      This creates the dma table from the scatterlist
968  *      It is used only for kernel crypto as it works with scatterlists
969  *      representation of data buffers
970  *
971  */
972 static int sep_crypto_dma(
973         struct sep_device *sep,
974         struct scatterlist *sg,
975         struct sep_dma_map **dma_maps,
976         enum dma_data_direction direction)
977 {
978         struct scatterlist *temp_sg;
979
980         u32 count_segment;
981         u32 count_mapped;
982         struct sep_dma_map *sep_dma;
983         int ct1;
984
985         if (sg->length == 0)
986                 return 0;
987
988         /* Count the segments */
989         temp_sg = sg;
990         count_segment = 0;
991         while (temp_sg) {
992                 count_segment += 1;
993                 temp_sg = scatterwalk_sg_next(temp_sg);
994         }
995         dev_dbg(&sep->pdev->dev,
996                 "There are (hex) %x segments in sg\n", count_segment);
997
998         /* DMA map segments */
999         count_mapped = dma_map_sg(&sep->pdev->dev, sg,
1000                 count_segment, direction);
1001
1002         dev_dbg(&sep->pdev->dev,
1003                 "There are (hex) %x maps in sg\n", count_mapped);
1004
1005         if (count_mapped == 0) {
1006                 dev_dbg(&sep->pdev->dev, "Cannot dma_map_sg\n");
1007                 return -ENOMEM;
1008         }
1009
1010         sep_dma = kmalloc(sizeof(struct sep_dma_map) *
1011                 count_mapped, GFP_ATOMIC);
1012
1013         if (sep_dma == NULL) {
1014                 dev_dbg(&sep->pdev->dev, "Cannot allocate dma_maps\n");
1015                 return -ENOMEM;
1016         }
1017
1018         for_each_sg(sg, temp_sg, count_mapped, ct1) {
1019                 sep_dma[ct1].dma_addr = sg_dma_address(temp_sg);
1020                 sep_dma[ct1].size = sg_dma_len(temp_sg);
1021                 dev_dbg(&sep->pdev->dev, "(all hex) map %x dma %lx len %lx\n",
1022                         ct1, (unsigned long)sep_dma[ct1].dma_addr,
1023                         (unsigned long)sep_dma[ct1].size);
1024                 }
1025
1026         *dma_maps = sep_dma;
1027         return count_mapped;
1028
1029 }
1030
1031 /**
1032  *      sep_crypto_lli -
1033  *      @sep: pointer to struct sep_device
1034  *      @sg: pointer to struct scatterlist
1035  *      @data_size: total data size
1036  *      @direction:
1037  *      @dma_maps: pointer to place a pointer to array of dma maps
1038  *       This is filled in; anything previous there will be lost
1039  *       The structure for dma maps is sep_dma_map
1040  *      @lli_maps: pointer to place a pointer to array of lli maps
1041  *       This is filled in; anything previous there will be lost
1042  *       The structure for dma maps is sep_dma_map
1043  *      @returns number of dma maps on success; negative on error
1044  *
1045  *      This creates the LLI table from the scatterlist
1046  *      It is only used for kernel crypto as it works exclusively
1047  *      with scatterlists (struct scatterlist) representation of
1048  *      data buffers
1049  */
1050 static int sep_crypto_lli(
1051         struct sep_device *sep,
1052         struct scatterlist *sg,
1053         struct sep_dma_map **maps,
1054         struct sep_lli_entry **llis,
1055         u32 data_size,
1056         enum dma_data_direction direction)
1057 {
1058
1059         int ct1;
1060         struct sep_lli_entry *sep_lli;
1061         struct sep_dma_map *sep_map;
1062
1063         int nbr_ents;
1064
1065         nbr_ents = sep_crypto_dma(sep, sg, maps, direction);
1066         if (nbr_ents <= 0) {
1067                 dev_dbg(&sep->pdev->dev, "crypto_dma failed %x\n",
1068                         nbr_ents);
1069                 return nbr_ents;
1070         }
1071
1072         sep_map = *maps;
1073
1074         sep_lli = kmalloc(sizeof(struct sep_lli_entry) * nbr_ents, GFP_ATOMIC);
1075
1076         if (sep_lli == NULL) {
1077                 dev_dbg(&sep->pdev->dev, "Cannot allocate lli_maps\n");
1078
1079                 kfree(*maps);
1080                 *maps = NULL;
1081                 return -ENOMEM;
1082         }
1083
1084         for (ct1 = 0; ct1 < nbr_ents; ct1 += 1) {
1085                 sep_lli[ct1].bus_address = (u32)sep_map[ct1].dma_addr;
1086
1087                 /* Maximum for page is total data size */
1088                 if (sep_map[ct1].size > data_size)
1089                         sep_map[ct1].size = data_size;
1090
1091                 sep_lli[ct1].block_size = (u32)sep_map[ct1].size;
1092         }
1093
1094         *llis = sep_lli;
1095         return nbr_ents;
1096 }
1097
1098 /**
1099  *      sep_lock_kernel_pages - map kernel pages for DMA
1100  *      @sep: pointer to struct sep_device
1101  *      @kernel_virt_addr: address of data buffer in kernel
1102  *      @data_size: size of data
1103  *      @lli_array_ptr: lli array
1104  *      @in_out_flag: input into device or output from device
1105  *
1106  *      This function locks all the physical pages of the kernel virtual buffer
1107  *      and construct a basic lli  array, where each entry holds the physical
1108  *      page address and the size that application data holds in this page
1109  *      This function is used only during kernel crypto mod calls from within
1110  *      the kernel (when ioctl is not used)
1111  *
1112  *      This is used only for kernel crypto. Kernel pages
1113  *      are handled differently as they are done via
1114  *      scatter gather lists (struct scatterlist)
1115  */
1116 static int sep_lock_kernel_pages(struct sep_device *sep,
1117         unsigned long kernel_virt_addr,
1118         u32 data_size,
1119         struct sep_lli_entry **lli_array_ptr,
1120         int in_out_flag,
1121         struct sep_dma_context *dma_ctx)
1122
1123 {
1124         u32 num_pages;
1125         struct scatterlist *sg;
1126
1127         /* Array of lli */
1128         struct sep_lli_entry *lli_array;
1129         /* Map array */
1130         struct sep_dma_map *map_array;
1131
1132         enum dma_data_direction direction;
1133
1134         lli_array = NULL;
1135         map_array = NULL;
1136
1137         if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1138                 direction = DMA_TO_DEVICE;
1139                 sg = dma_ctx->src_sg;
1140         } else {
1141                 direction = DMA_FROM_DEVICE;
1142                 sg = dma_ctx->dst_sg;
1143         }
1144
1145         num_pages = sep_crypto_lli(sep, sg, &map_array, &lli_array,
1146                 data_size, direction);
1147
1148         if (num_pages <= 0) {
1149                 dev_dbg(&sep->pdev->dev, "sep_crypto_lli returned error %x\n",
1150                         num_pages);
1151                 return -ENOMEM;
1152         }
1153
1154         /* Put mapped kernel sg into kernel resource array */
1155
1156         /* Set output params according to the in_out flag */
1157         if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1158                 *lli_array_ptr = lli_array;
1159                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages =
1160                                                                 num_pages;
1161                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array =
1162                                                                 NULL;
1163                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array =
1164                                                                 map_array;
1165                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries =
1166                                                                 num_pages;
1167                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg =
1168                         dma_ctx->src_sg;
1169         } else {
1170                 *lli_array_ptr = lli_array;
1171                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages =
1172                                                                 num_pages;
1173                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array =
1174                                                                 NULL;
1175                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array =
1176                                                                 map_array;
1177                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
1178                                         out_map_num_entries = num_pages;
1179                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg =
1180                         dma_ctx->dst_sg;
1181         }
1182
1183         return 0;
1184 }
1185
1186 /**
1187  * sep_lock_user_pages - lock and map user pages for DMA
1188  * @sep: pointer to struct sep_device
1189  * @app_virt_addr: user memory data buffer
1190  * @data_size: size of data buffer
1191  * @lli_array_ptr: lli array
1192  * @in_out_flag: input or output to device
1193  *
1194  * This function locks all the physical pages of the application
1195  * virtual buffer and construct a basic lli  array, where each entry
1196  * holds the physical page address and the size that application
1197  * data holds in this physical pages
1198  */
1199 static int sep_lock_user_pages(struct sep_device *sep,
1200         u32 app_virt_addr,
1201         u32 data_size,
1202         struct sep_lli_entry **lli_array_ptr,
1203         int in_out_flag,
1204         struct sep_dma_context *dma_ctx)
1205
1206 {
1207         int error = 0;
1208         u32 count;
1209         int result;
1210         /* The the page of the end address of the user space buffer */
1211         u32 end_page;
1212         /* The page of the start address of the user space buffer */
1213         u32 start_page;
1214         /* The range in pages */
1215         u32 num_pages;
1216         /* Array of pointers to page */
1217         struct page **page_array;
1218         /* Array of lli */
1219         struct sep_lli_entry *lli_array;
1220         /* Map array */
1221         struct sep_dma_map *map_array;
1222
1223         /* Set start and end pages and num pages */
1224         end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1225         start_page = app_virt_addr >> PAGE_SHIFT;
1226         num_pages = end_page - start_page + 1;
1227
1228         dev_dbg(&sep->pdev->dev,
1229                 "[PID%d] lock user pages app_virt_addr is %x\n",
1230                         current->pid, app_virt_addr);
1231
1232         dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n",
1233                                         current->pid, data_size);
1234         dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n",
1235                                         current->pid, start_page);
1236         dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n",
1237                                         current->pid, end_page);
1238         dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n",
1239                                         current->pid, num_pages);
1240
1241         /* Allocate array of pages structure pointers */
1242         page_array = kmalloc_array(num_pages, sizeof(struct page *),
1243                                    GFP_ATOMIC);
1244         if (!page_array) {
1245                 error = -ENOMEM;
1246                 goto end_function;
1247         }
1248
1249         map_array = kmalloc_array(num_pages, sizeof(struct sep_dma_map),
1250                                   GFP_ATOMIC);
1251         if (!map_array) {
1252                 error = -ENOMEM;
1253                 goto end_function_with_error1;
1254         }
1255
1256         lli_array = kmalloc_array(num_pages, sizeof(struct sep_lli_entry),
1257                                   GFP_ATOMIC);
1258         if (!lli_array) {
1259                 error = -ENOMEM;
1260                 goto end_function_with_error2;
1261         }
1262
1263         /* Convert the application virtual address into a set of physical */
1264         result = get_user_pages_fast(app_virt_addr, num_pages,
1265                 ((in_out_flag == SEP_DRIVER_IN_FLAG) ? 0 : 1), page_array);
1266
1267         /* Check the number of pages locked - if not all then exit with error */
1268         if (result != num_pages) {
1269                 dev_warn(&sep->pdev->dev,
1270                         "[PID%d] not all pages locked by get_user_pages, "
1271                         "result 0x%X, num_pages 0x%X\n",
1272                                 current->pid, result, num_pages);
1273                 error = -ENOMEM;
1274                 goto end_function_with_error3;
1275         }
1276
1277         dev_dbg(&sep->pdev->dev, "[PID%d] get_user_pages succeeded\n",
1278                                         current->pid);
1279
1280         /*
1281          * Fill the array using page array data and
1282          * map the pages - this action will also flush the cache as needed
1283          */
1284         for (count = 0; count < num_pages; count++) {
1285                 /* Fill the map array */
1286                 map_array[count].dma_addr =
1287                         dma_map_page(&sep->pdev->dev, page_array[count],
1288                         0, PAGE_SIZE, DMA_BIDIRECTIONAL);
1289
1290                 map_array[count].size = PAGE_SIZE;
1291
1292                 /* Fill the lli array entry */
1293                 lli_array[count].bus_address = (u32)map_array[count].dma_addr;
1294                 lli_array[count].block_size = PAGE_SIZE;
1295
1296                 dev_dbg(&sep->pdev->dev,
1297                         "[PID%d] lli_array[%x].bus_address is %08lx, "
1298                         "lli_array[%x].block_size is (hex) %x\n", current->pid,
1299                         count, (unsigned long)lli_array[count].bus_address,
1300                         count, lli_array[count].block_size);
1301         }
1302
1303         /* Check the offset for the first page */
1304         lli_array[0].bus_address =
1305                 lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
1306
1307         /* Check that not all the data is in the first page only */
1308         if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1309                 lli_array[0].block_size = data_size;
1310         else
1311                 lli_array[0].block_size =
1312                         PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1313
1314                 dev_dbg(&sep->pdev->dev,
1315                         "[PID%d] After check if page 0 has all data\n",
1316                         current->pid);
1317                 dev_dbg(&sep->pdev->dev,
1318                         "[PID%d] lli_array[0].bus_address is (hex) %08lx, "
1319                         "lli_array[0].block_size is (hex) %x\n",
1320                         current->pid,
1321                         (unsigned long)lli_array[0].bus_address,
1322                         lli_array[0].block_size);
1323
1324
1325         /* Check the size of the last page */
1326         if (num_pages > 1) {
1327                 lli_array[num_pages - 1].block_size =
1328                         (app_virt_addr + data_size) & (~PAGE_MASK);
1329                 if (lli_array[num_pages - 1].block_size == 0)
1330                         lli_array[num_pages - 1].block_size = PAGE_SIZE;
1331
1332                 dev_dbg(&sep->pdev->dev,
1333                         "[PID%d] After last page size adjustment\n",
1334                         current->pid);
1335                 dev_dbg(&sep->pdev->dev,
1336                         "[PID%d] lli_array[%x].bus_address is (hex) %08lx, "
1337                         "lli_array[%x].block_size is (hex) %x\n",
1338                         current->pid,
1339                         num_pages - 1,
1340                         (unsigned long)lli_array[num_pages - 1].bus_address,
1341                         num_pages - 1,
1342                         lli_array[num_pages - 1].block_size);
1343         }
1344
1345         /* Set output params according to the in_out flag */
1346         if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1347                 *lli_array_ptr = lli_array;
1348                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages =
1349                                                                 num_pages;
1350                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array =
1351                                                                 page_array;
1352                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array =
1353                                                                 map_array;
1354                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries =
1355                                                                 num_pages;
1356                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg = NULL;
1357         } else {
1358                 *lli_array_ptr = lli_array;
1359                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages =
1360                                                                 num_pages;
1361                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array =
1362                                                                 page_array;
1363                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array =
1364                                                                 map_array;
1365                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
1366                                         out_map_num_entries = num_pages;
1367                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg = NULL;
1368         }
1369         goto end_function;
1370
1371 end_function_with_error3:
1372         /* Free lli array */
1373         kfree(lli_array);
1374
1375 end_function_with_error2:
1376         kfree(map_array);
1377
1378 end_function_with_error1:
1379         /* Free page array */
1380         kfree(page_array);
1381
1382 end_function:
1383         return error;
1384 }
1385
1386 /**
1387  *      sep_lli_table_secure_dma - get lli array for IMR addresses
1388  *      @sep: pointer to struct sep_device
1389  *      @app_virt_addr: user memory data buffer
1390  *      @data_size: size of data buffer
1391  *      @lli_array_ptr: lli array
1392  *      @in_out_flag: not used
1393  *      @dma_ctx: pointer to struct sep_dma_context
1394  *
1395  *      This function creates lli tables for outputting data to
1396  *      IMR memory, which is memory that cannot be accessed by the
1397  *      the x86 processor.
1398  */
1399 static int sep_lli_table_secure_dma(struct sep_device *sep,
1400         u32 app_virt_addr,
1401         u32 data_size,
1402         struct sep_lli_entry **lli_array_ptr,
1403         int in_out_flag,
1404         struct sep_dma_context *dma_ctx)
1405
1406 {
1407         int error = 0;
1408         u32 count;
1409         /* The the page of the end address of the user space buffer */
1410         u32 end_page;
1411         /* The page of the start address of the user space buffer */
1412         u32 start_page;
1413         /* The range in pages */
1414         u32 num_pages;
1415         /* Array of lli */
1416         struct sep_lli_entry *lli_array;
1417
1418         /* Set start and end pages and num pages */
1419         end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1420         start_page = app_virt_addr >> PAGE_SHIFT;
1421         num_pages = end_page - start_page + 1;
1422
1423         dev_dbg(&sep->pdev->dev,
1424                 "[PID%d] lock user pages  app_virt_addr is %x\n",
1425                 current->pid, app_virt_addr);
1426
1427         dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n",
1428                 current->pid, data_size);
1429         dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n",
1430                 current->pid, start_page);
1431         dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n",
1432                 current->pid, end_page);
1433         dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n",
1434                 current->pid, num_pages);
1435
1436         lli_array = kmalloc_array(num_pages, sizeof(struct sep_lli_entry),
1437                                   GFP_ATOMIC);
1438         if (!lli_array)
1439                 return -ENOMEM;
1440
1441         /*
1442          * Fill the lli_array
1443          */
1444         start_page = start_page << PAGE_SHIFT;
1445         for (count = 0; count < num_pages; count++) {
1446                 /* Fill the lli array entry */
1447                 lli_array[count].bus_address = start_page;
1448                 lli_array[count].block_size = PAGE_SIZE;
1449
1450                 start_page += PAGE_SIZE;
1451
1452                 dev_dbg(&sep->pdev->dev,
1453                         "[PID%d] lli_array[%x].bus_address is %08lx, "
1454                         "lli_array[%x].block_size is (hex) %x\n",
1455                         current->pid,
1456                         count, (unsigned long)lli_array[count].bus_address,
1457                         count, lli_array[count].block_size);
1458         }
1459
1460         /* Check the offset for the first page */
1461         lli_array[0].bus_address =
1462                 lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
1463
1464         /* Check that not all the data is in the first page only */
1465         if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1466                 lli_array[0].block_size = data_size;
1467         else
1468                 lli_array[0].block_size =
1469                         PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1470
1471         dev_dbg(&sep->pdev->dev,
1472                 "[PID%d] After check if page 0 has all data\n"
1473                 "lli_array[0].bus_address is (hex) %08lx, "
1474                 "lli_array[0].block_size is (hex) %x\n",
1475                 current->pid,
1476                 (unsigned long)lli_array[0].bus_address,
1477                 lli_array[0].block_size);
1478
1479         /* Check the size of the last page */
1480         if (num_pages > 1) {
1481                 lli_array[num_pages - 1].block_size =
1482                         (app_virt_addr + data_size) & (~PAGE_MASK);
1483                 if (lli_array[num_pages - 1].block_size == 0)
1484                         lli_array[num_pages - 1].block_size = PAGE_SIZE;
1485
1486                 dev_dbg(&sep->pdev->dev,
1487                         "[PID%d] After last page size adjustment\n"
1488                         "lli_array[%x].bus_address is (hex) %08lx, "
1489                         "lli_array[%x].block_size is (hex) %x\n",
1490                         current->pid, num_pages - 1,
1491                         (unsigned long)lli_array[num_pages - 1].bus_address,
1492                         num_pages - 1,
1493                         lli_array[num_pages - 1].block_size);
1494         }
1495         *lli_array_ptr = lli_array;
1496         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages = num_pages;
1497         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
1498         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = NULL;
1499         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_num_entries = 0;
1500
1501         return error;
1502 }
1503
1504 /**
1505  * sep_calculate_lli_table_max_size - size the LLI table
1506  * @sep: pointer to struct sep_device
1507  * @lli_in_array_ptr
1508  * @num_array_entries
1509  * @last_table_flag
1510  *
1511  * This function calculates the size of data that can be inserted into
1512  * the lli table from this array, such that either the table is full
1513  * (all entries are entered), or there are no more entries in the
1514  * lli array
1515  */
1516 static u32 sep_calculate_lli_table_max_size(struct sep_device *sep,
1517         struct sep_lli_entry *lli_in_array_ptr,
1518         u32 num_array_entries,
1519         u32 *last_table_flag)
1520 {
1521         u32 counter;
1522         /* Table data size */
1523         u32 table_data_size = 0;
1524         /* Data size for the next table */
1525         u32 next_table_data_size;
1526
1527         *last_table_flag = 0;
1528
1529         /*
1530          * Calculate the data in the out lli table till we fill the whole
1531          * table or till the data has ended
1532          */
1533         for (counter = 0;
1534                 (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) &&
1535                         (counter < num_array_entries); counter++)
1536                 table_data_size += lli_in_array_ptr[counter].block_size;
1537
1538         /*
1539          * Check if we reached the last entry,
1540          * meaning this ia the last table to build,
1541          * and no need to check the block alignment
1542          */
1543         if (counter == num_array_entries) {
1544                 /* Set the last table flag */
1545                 *last_table_flag = 1;
1546                 goto end_function;
1547         }
1548
1549         /*
1550          * Calculate the data size of the next table.
1551          * Stop if no entries left or if data size is more the DMA restriction
1552          */
1553         next_table_data_size = 0;
1554         for (; counter < num_array_entries; counter++) {
1555                 next_table_data_size += lli_in_array_ptr[counter].block_size;
1556                 if (next_table_data_size >= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1557                         break;
1558         }
1559
1560         /*
1561          * Check if the next table data size is less then DMA rstriction.
1562          * if it is - recalculate the current table size, so that the next
1563          * table data size will be adaquete for DMA
1564          */
1565         if (next_table_data_size &&
1566                 next_table_data_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1567
1568                 table_data_size -= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE -
1569                         next_table_data_size);
1570
1571 end_function:
1572         return table_data_size;
1573 }
1574
1575 /**
1576  * sep_build_lli_table - build an lli array for the given table
1577  * @sep: pointer to struct sep_device
1578  * @lli_array_ptr: pointer to lli array
1579  * @lli_table_ptr: pointer to lli table
1580  * @num_processed_entries_ptr: pointer to number of entries
1581  * @num_table_entries_ptr: pointer to number of tables
1582  * @table_data_size: total data size
1583  *
1584  * Builds an lli table from the lli_array according to
1585  * the given size of data
1586  */
1587 static void sep_build_lli_table(struct sep_device *sep,
1588         struct sep_lli_entry    *lli_array_ptr,
1589         struct sep_lli_entry    *lli_table_ptr,
1590         u32 *num_processed_entries_ptr,
1591         u32 *num_table_entries_ptr,
1592         u32 table_data_size)
1593 {
1594         /* Current table data size */
1595         u32 curr_table_data_size;
1596         /* Counter of lli array entry */
1597         u32 array_counter;
1598
1599         /* Init current table data size and lli array entry counter */
1600         curr_table_data_size = 0;
1601         array_counter = 0;
1602         *num_table_entries_ptr = 1;
1603
1604         dev_dbg(&sep->pdev->dev,
1605                 "[PID%d] build lli table table_data_size: (hex) %x\n",
1606                         current->pid, table_data_size);
1607
1608         /* Fill the table till table size reaches the needed amount */
1609         while (curr_table_data_size < table_data_size) {
1610                 /* Update the number of entries in table */
1611                 (*num_table_entries_ptr)++;
1612
1613                 lli_table_ptr->bus_address =
1614                         cpu_to_le32(lli_array_ptr[array_counter].bus_address);
1615
1616                 lli_table_ptr->block_size =
1617                         cpu_to_le32(lli_array_ptr[array_counter].block_size);
1618
1619                 curr_table_data_size += lli_array_ptr[array_counter].block_size;
1620
1621                 dev_dbg(&sep->pdev->dev,
1622                         "[PID%d] lli_table_ptr is %p\n",
1623                                 current->pid, lli_table_ptr);
1624                 dev_dbg(&sep->pdev->dev,
1625                         "[PID%d] lli_table_ptr->bus_address: %08lx\n",
1626                                 current->pid,
1627                                 (unsigned long)lli_table_ptr->bus_address);
1628
1629                 dev_dbg(&sep->pdev->dev,
1630                         "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
1631                                 current->pid, lli_table_ptr->block_size);
1632
1633                 /* Check for overflow of the table data */
1634                 if (curr_table_data_size > table_data_size) {
1635                         dev_dbg(&sep->pdev->dev,
1636                                 "[PID%d] curr_table_data_size too large\n",
1637                                         current->pid);
1638
1639                         /* Update the size of block in the table */
1640                         lli_table_ptr->block_size =
1641                                 cpu_to_le32(lli_table_ptr->block_size) -
1642                                 (curr_table_data_size - table_data_size);
1643
1644                         /* Update the physical address in the lli array */
1645                         lli_array_ptr[array_counter].bus_address +=
1646                         cpu_to_le32(lli_table_ptr->block_size);
1647
1648                         /* Update the block size left in the lli array */
1649                         lli_array_ptr[array_counter].block_size =
1650                                 (curr_table_data_size - table_data_size);
1651                 } else
1652                         /* Advance to the next entry in the lli_array */
1653                         array_counter++;
1654
1655                 dev_dbg(&sep->pdev->dev,
1656                         "[PID%d] lli_table_ptr->bus_address is %08lx\n",
1657                                 current->pid,
1658                                 (unsigned long)lli_table_ptr->bus_address);
1659                 dev_dbg(&sep->pdev->dev,
1660                         "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
1661                                 current->pid,
1662                                 lli_table_ptr->block_size);
1663
1664                 /* Move to the next entry in table */
1665                 lli_table_ptr++;
1666         }
1667
1668         /* Set the info entry to default */
1669         lli_table_ptr->bus_address = 0xffffffff;
1670         lli_table_ptr->block_size = 0;
1671
1672         /* Set the output parameter */
1673         *num_processed_entries_ptr += array_counter;
1674
1675 }
1676
1677 /**
1678  * sep_shared_area_virt_to_bus - map shared area to bus address
1679  * @sep: pointer to struct sep_device
1680  * @virt_address: virtual address to convert
1681  *
1682  * This functions returns the physical address inside shared area according
1683  * to the virtual address. It can be either on the external RAM device
1684  * (ioremapped), or on the system RAM
1685  * This implementation is for the external RAM
1686  */
1687 static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep,
1688         void *virt_address)
1689 {
1690         dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys v %p\n",
1691                                         current->pid, virt_address);
1692         dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys p %08lx\n",
1693                 current->pid,
1694                 (unsigned long)
1695                 sep->shared_bus + (virt_address - sep->shared_addr));
1696
1697         return sep->shared_bus + (size_t)(virt_address - sep->shared_addr);
1698 }
1699
1700 /**
1701  * sep_shared_area_bus_to_virt - map shared area bus address to kernel
1702  * @sep: pointer to struct sep_device
1703  * @bus_address: bus address to convert
1704  *
1705  * This functions returns the virtual address inside shared area
1706  * according to the physical address. It can be either on the
1707  * external RAM device (ioremapped), or on the system RAM
1708  * This implementation is for the external RAM
1709  */
1710 static void *sep_shared_area_bus_to_virt(struct sep_device *sep,
1711         dma_addr_t bus_address)
1712 {
1713         dev_dbg(&sep->pdev->dev, "[PID%d] shared bus to virt b=%lx v=%lx\n",
1714                 current->pid,
1715                 (unsigned long)bus_address, (unsigned long)(sep->shared_addr +
1716                         (size_t)(bus_address - sep->shared_bus)));
1717
1718         return sep->shared_addr + (size_t)(bus_address - sep->shared_bus);
1719 }
1720
1721 /**
1722  * sep_debug_print_lli_tables - dump LLI table
1723  * @sep: pointer to struct sep_device
1724  * @lli_table_ptr: pointer to sep_lli_entry
1725  * @num_table_entries: number of entries
1726  * @table_data_size: total data size
1727  *
1728  * Walk the the list of the print created tables and print all the data
1729  */
1730 static void sep_debug_print_lli_tables(struct sep_device *sep,
1731         struct sep_lli_entry *lli_table_ptr,
1732         unsigned long num_table_entries,
1733         unsigned long table_data_size)
1734 {
1735 #ifdef DEBUG
1736         unsigned long table_count = 1;
1737         unsigned long entries_count = 0;
1738
1739         dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables start\n",
1740                                         current->pid);
1741         if (num_table_entries == 0) {
1742                 dev_dbg(&sep->pdev->dev, "[PID%d] no table to print\n",
1743                         current->pid);
1744                 return;
1745         }
1746
1747         while ((unsigned long) lli_table_ptr->bus_address != 0xffffffff) {
1748                 dev_dbg(&sep->pdev->dev,
1749                         "[PID%d] lli table %08lx, "
1750                         "table_data_size is (hex) %lx\n",
1751                                 current->pid, table_count, table_data_size);
1752                 dev_dbg(&sep->pdev->dev,
1753                         "[PID%d] num_table_entries is (hex) %lx\n",
1754                                 current->pid, num_table_entries);
1755
1756                 /* Print entries of the table (without info entry) */
1757                 for (entries_count = 0; entries_count < num_table_entries;
1758                         entries_count++, lli_table_ptr++) {
1759
1760                         dev_dbg(&sep->pdev->dev,
1761                                 "[PID%d] lli_table_ptr address is %08lx\n",
1762                                 current->pid,
1763                                 (unsigned long) lli_table_ptr);
1764
1765                         dev_dbg(&sep->pdev->dev,
1766                                 "[PID%d] phys address is %08lx "
1767                                 "block size is (hex) %x\n", current->pid,
1768                                 (unsigned long)lli_table_ptr->bus_address,
1769                                 lli_table_ptr->block_size);
1770                 }
1771
1772                 /* Point to the info entry */
1773                 lli_table_ptr--;
1774
1775                 dev_dbg(&sep->pdev->dev,
1776                         "[PID%d] phys lli_table_ptr->block_size "
1777                         "is (hex) %x\n",
1778                         current->pid,
1779                         lli_table_ptr->block_size);
1780
1781                 dev_dbg(&sep->pdev->dev,
1782                         "[PID%d] phys lli_table_ptr->physical_address "
1783                         "is %08lx\n",
1784                         current->pid,
1785                         (unsigned long)lli_table_ptr->bus_address);
1786
1787
1788                 table_data_size = lli_table_ptr->block_size & 0xffffff;
1789                 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1790
1791                 dev_dbg(&sep->pdev->dev,
1792                         "[PID%d] phys table_data_size is "
1793                         "(hex) %lx num_table_entries is"
1794                         " %lx bus_address is%lx\n",
1795                                 current->pid,
1796                                 table_data_size,
1797                                 num_table_entries,
1798                                 (unsigned long)lli_table_ptr->bus_address);
1799
1800                 if ((unsigned long)lli_table_ptr->bus_address != 0xffffffff)
1801                         lli_table_ptr = (struct sep_lli_entry *)
1802                                 sep_shared_bus_to_virt(sep,
1803                                 (unsigned long)lli_table_ptr->bus_address);
1804
1805                 table_count++;
1806         }
1807         dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables end\n",
1808                                         current->pid);
1809 #endif
1810 }
1811
1812
1813 /**
1814  * sep_prepare_empty_lli_table - create a blank LLI table
1815  * @sep: pointer to struct sep_device
1816  * @lli_table_addr_ptr: pointer to lli table
1817  * @num_entries_ptr: pointer to number of entries
1818  * @table_data_size_ptr: point to table data size
1819  * @dmatables_region: Optional buffer for DMA tables
1820  * @dma_ctx: DMA context
1821  *
1822  * This function creates empty lli tables when there is no data
1823  */
1824 static void sep_prepare_empty_lli_table(struct sep_device *sep,
1825                 dma_addr_t *lli_table_addr_ptr,
1826                 u32 *num_entries_ptr,
1827                 u32 *table_data_size_ptr,
1828                 void **dmatables_region,
1829                 struct sep_dma_context *dma_ctx)
1830 {
1831         struct sep_lli_entry *lli_table_ptr;
1832
1833         /* Find the area for new table */
1834         lli_table_ptr =
1835                 (struct sep_lli_entry *)(sep->shared_addr +
1836                 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1837                 dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1838                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1839
1840         if (dmatables_region && *dmatables_region)
1841                 lli_table_ptr = *dmatables_region;
1842
1843         lli_table_ptr->bus_address = 0;
1844         lli_table_ptr->block_size = 0;
1845
1846         lli_table_ptr++;
1847         lli_table_ptr->bus_address = 0xFFFFFFFF;
1848         lli_table_ptr->block_size = 0;
1849
1850         /* Set the output parameter value */
1851         *lli_table_addr_ptr = sep->shared_bus +
1852                 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1853                 dma_ctx->num_lli_tables_created *
1854                 sizeof(struct sep_lli_entry) *
1855                 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1856
1857         /* Set the num of entries and table data size for empty table */
1858         *num_entries_ptr = 2;
1859         *table_data_size_ptr = 0;
1860
1861         /* Update the number of created tables */
1862         dma_ctx->num_lli_tables_created++;
1863 }
1864
1865 /**
1866  * sep_prepare_input_dma_table - prepare input DMA mappings
1867  * @sep: pointer to struct sep_device
1868  * @data_size:
1869  * @block_size:
1870  * @lli_table_ptr:
1871  * @num_entries_ptr:
1872  * @table_data_size_ptr:
1873  * @is_kva: set for kernel data (kernel crypt io call)
1874  *
1875  * This function prepares only input DMA table for synchronic symmetric
1876  * operations (HASH)
1877  * Note that all bus addresses that are passed to the SEP
1878  * are in 32 bit format; the SEP is a 32 bit device
1879  */
1880 static int sep_prepare_input_dma_table(struct sep_device *sep,
1881         unsigned long app_virt_addr,
1882         u32 data_size,
1883         u32 block_size,
1884         dma_addr_t *lli_table_ptr,
1885         u32 *num_entries_ptr,
1886         u32 *table_data_size_ptr,
1887         bool is_kva,
1888         void **dmatables_region,
1889         struct sep_dma_context *dma_ctx
1890 )
1891 {
1892         int error = 0;
1893         /* Pointer to the info entry of the table - the last entry */
1894         struct sep_lli_entry *info_entry_ptr;
1895         /* Array of pointers to page */
1896         struct sep_lli_entry *lli_array_ptr;
1897         /* Points to the first entry to be processed in the lli_in_array */
1898         u32 current_entry = 0;
1899         /* Num entries in the virtual buffer */
1900         u32 sep_lli_entries = 0;
1901         /* Lli table pointer */
1902         struct sep_lli_entry *in_lli_table_ptr;
1903         /* The total data in one table */
1904         u32 table_data_size = 0;
1905         /* Flag for last table */
1906         u32 last_table_flag = 0;
1907         /* Number of entries in lli table */
1908         u32 num_entries_in_table = 0;
1909         /* Next table address */
1910         void *lli_table_alloc_addr = NULL;
1911         void *dma_lli_table_alloc_addr = NULL;
1912         void *dma_in_lli_table_ptr = NULL;
1913
1914         dev_dbg(&sep->pdev->dev,
1915                 "[PID%d] prepare intput dma tbl data size: (hex) %x\n",
1916                 current->pid, data_size);
1917
1918         dev_dbg(&sep->pdev->dev, "[PID%d] block_size is (hex) %x\n",
1919                                         current->pid, block_size);
1920
1921         /* Initialize the pages pointers */
1922         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
1923         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages = 0;
1924
1925         /* Set the kernel address for first table to be allocated */
1926         lli_table_alloc_addr = (void *)(sep->shared_addr +
1927                 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1928                 dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1929                 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1930
1931         if (data_size == 0) {
1932                 if (dmatables_region) {
1933                         error = sep_allocate_dmatables_region(sep,
1934                                                 dmatables_region,
1935                                                 dma_ctx,
1936                                                 1);
1937                         if (error)
1938                                 return error;
1939                 }
1940                 /* Special case  - create meptu table - 2 entries, zero data */
1941                 sep_prepare_empty_lli_table(sep, lli_table_ptr,
1942                                 num_entries_ptr, table_data_size_ptr,
1943                                 dmatables_region, dma_ctx);
1944                 goto update_dcb_counter;
1945         }
1946
1947         /* Check if the pages are in Kernel Virtual Address layout */
1948         if (is_kva)
1949                 error = sep_lock_kernel_pages(sep, app_virt_addr,
1950                         data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG,
1951                         dma_ctx);
1952         else
1953                 /*
1954                  * Lock the pages of the user buffer
1955                  * and translate them to pages
1956                  */
1957                 error = sep_lock_user_pages(sep, app_virt_addr,
1958                         data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG,
1959                         dma_ctx);
1960
1961         if (error)
1962                 goto end_function;
1963
1964         dev_dbg(&sep->pdev->dev,
1965                 "[PID%d] output sep_in_num_pages is (hex) %x\n",
1966                 current->pid,
1967                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages);
1968
1969         current_entry = 0;
1970         info_entry_ptr = NULL;
1971
1972         sep_lli_entries =
1973                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages;
1974
1975         dma_lli_table_alloc_addr = lli_table_alloc_addr;
1976         if (dmatables_region) {
1977                 error = sep_allocate_dmatables_region(sep,
1978                                         dmatables_region,
1979                                         dma_ctx,
1980                                         sep_lli_entries);
1981                 if (error)
1982                         goto end_function_error;
1983                 lli_table_alloc_addr = *dmatables_region;
1984         }
1985
1986         /* Loop till all the entries in in array are processed */
1987         while (current_entry < sep_lli_entries) {
1988
1989                 /* Set the new input and output tables */
1990                 in_lli_table_ptr =
1991                         (struct sep_lli_entry *)lli_table_alloc_addr;
1992                 dma_in_lli_table_ptr =
1993                         (struct sep_lli_entry *)dma_lli_table_alloc_addr;
1994
1995                 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1996                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1997                 dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1998                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1999
2000                 if (dma_lli_table_alloc_addr >
2001                         ((void *)sep->shared_addr +
2002                         SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
2003                         SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
2004
2005                         error = -ENOMEM;
2006                         goto end_function_error;
2007
2008                 }
2009
2010                 /* Update the number of created tables */
2011                 dma_ctx->num_lli_tables_created++;
2012
2013                 /* Calculate the maximum size of data for input table */
2014                 table_data_size = sep_calculate_lli_table_max_size(sep,
2015                         &lli_array_ptr[current_entry],
2016                         (sep_lli_entries - current_entry),
2017                         &last_table_flag);
2018
2019                 /*
2020                  * If this is not the last table -
2021                  * then align it to the block size
2022                  */
2023                 if (!last_table_flag)
2024                         table_data_size =
2025                                 (table_data_size / block_size) * block_size;
2026
2027                 dev_dbg(&sep->pdev->dev,
2028                         "[PID%d] output table_data_size is (hex) %x\n",
2029                                 current->pid,
2030                                 table_data_size);
2031
2032                 /* Construct input lli table */
2033                 sep_build_lli_table(sep, &lli_array_ptr[current_entry],
2034                         in_lli_table_ptr,
2035                         &current_entry, &num_entries_in_table, table_data_size);
2036
2037                 if (info_entry_ptr == NULL) {
2038
2039                         /* Set the output parameters to physical addresses */
2040                         *lli_table_ptr = sep_shared_area_virt_to_bus(sep,
2041                                 dma_in_lli_table_ptr);
2042                         *num_entries_ptr = num_entries_in_table;
2043                         *table_data_size_ptr = table_data_size;
2044
2045                         dev_dbg(&sep->pdev->dev,
2046                                 "[PID%d] output lli_table_in_ptr is %08lx\n",
2047                                 current->pid,
2048                                 (unsigned long)*lli_table_ptr);
2049
2050                 } else {
2051                         /* Update the info entry of the previous in table */
2052                         info_entry_ptr->bus_address =
2053                                 sep_shared_area_virt_to_bus(sep,
2054                                                         dma_in_lli_table_ptr);
2055                         info_entry_ptr->block_size =
2056                                 ((num_entries_in_table) << 24) |
2057                                 (table_data_size);
2058                 }
2059                 /* Save the pointer to the info entry of the current tables */
2060                 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
2061         }
2062         /* Print input tables */
2063         if (!dmatables_region) {
2064                 sep_debug_print_lli_tables(sep, (struct sep_lli_entry *)
2065                         sep_shared_area_bus_to_virt(sep, *lli_table_ptr),
2066                         *num_entries_ptr, *table_data_size_ptr);
2067         }
2068
2069         /* The array of the pages */
2070         kfree(lli_array_ptr);
2071
2072 update_dcb_counter:
2073         /* Update DCB counter */
2074         dma_ctx->nr_dcb_creat++;
2075         goto end_function;
2076
2077 end_function_error:
2078         /* Free all the allocated resources */
2079         kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array);
2080         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = NULL;
2081         kfree(lli_array_ptr);
2082         kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array);
2083         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2084
2085 end_function:
2086         return error;
2087
2088 }
2089
2090 /**
2091  * sep_construct_dma_tables_from_lli - prepare AES/DES mappings
2092  * @sep: pointer to struct sep_device
2093  * @lli_in_array:
2094  * @sep_in_lli_entries:
2095  * @lli_out_array:
2096  * @sep_out_lli_entries
2097  * @block_size
2098  * @lli_table_in_ptr
2099  * @lli_table_out_ptr
2100  * @in_num_entries_ptr
2101  * @out_num_entries_ptr
2102  * @table_data_size_ptr
2103  *
2104  * This function creates the input and output DMA tables for
2105  * symmetric operations (AES/DES) according to the block
2106  * size from LLI arays
2107  * Note that all bus addresses that are passed to the SEP
2108  * are in 32 bit format; the SEP is a 32 bit device
2109  */
2110 static int sep_construct_dma_tables_from_lli(
2111         struct sep_device *sep,
2112         struct sep_lli_entry *lli_in_array,
2113         u32     sep_in_lli_entries,
2114         struct sep_lli_entry *lli_out_array,
2115         u32     sep_out_lli_entries,
2116         u32     block_size,
2117         dma_addr_t *lli_table_in_ptr,
2118         dma_addr_t *lli_table_out_ptr,
2119         u32     *in_num_entries_ptr,
2120         u32     *out_num_entries_ptr,
2121         u32     *table_data_size_ptr,
2122         void    **dmatables_region,
2123         struct sep_dma_context *dma_ctx)
2124 {
2125         /* Points to the area where next lli table can be allocated */
2126         void *lli_table_alloc_addr = NULL;
2127         /*
2128          * Points to the area in shared region where next lli table
2129          * can be allocated
2130          */
2131         void *dma_lli_table_alloc_addr = NULL;
2132         /* Input lli table in dmatables_region or shared region */
2133         struct sep_lli_entry *in_lli_table_ptr = NULL;
2134         /* Input lli table location in the shared region */
2135         struct sep_lli_entry *dma_in_lli_table_ptr = NULL;
2136         /* Output lli table in dmatables_region or shared region */
2137         struct sep_lli_entry *out_lli_table_ptr = NULL;
2138         /* Output lli table location in the shared region */
2139         struct sep_lli_entry *dma_out_lli_table_ptr = NULL;
2140         /* Pointer to the info entry of the table - the last entry */
2141         struct sep_lli_entry *info_in_entry_ptr = NULL;
2142         /* Pointer to the info entry of the table - the last entry */
2143         struct sep_lli_entry *info_out_entry_ptr = NULL;
2144         /* Points to the first entry to be processed in the lli_in_array */
2145         u32 current_in_entry = 0;
2146         /* Points to the first entry to be processed in the lli_out_array */
2147         u32 current_out_entry = 0;
2148         /* Max size of the input table */
2149         u32 in_table_data_size = 0;
2150         /* Max size of the output table */
2151         u32 out_table_data_size = 0;
2152         /* Flag te signifies if this is the last tables build */
2153         u32 last_table_flag = 0;
2154         /* The data size that should be in table */
2155         u32 table_data_size = 0;
2156         /* Number of entries in the input table */
2157         u32 num_entries_in_table = 0;
2158         /* Number of entries in the output table */
2159         u32 num_entries_out_table = 0;
2160
2161         if (!dma_ctx) {
2162                 dev_warn(&sep->pdev->dev, "DMA context uninitialized\n");
2163                 return -EINVAL;
2164         }
2165
2166         /* Initiate to point after the message area */
2167         lli_table_alloc_addr = (void *)(sep->shared_addr +
2168                 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
2169                 (dma_ctx->num_lli_tables_created *
2170                 (sizeof(struct sep_lli_entry) *
2171                 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP)));
2172         dma_lli_table_alloc_addr = lli_table_alloc_addr;
2173
2174         if (dmatables_region) {
2175                 /* 2 for both in+out table */
2176                 if (sep_allocate_dmatables_region(sep,
2177                                         dmatables_region,
2178                                         dma_ctx,
2179                                         2*sep_in_lli_entries))
2180                         return -ENOMEM;
2181                 lli_table_alloc_addr = *dmatables_region;
2182         }
2183
2184         /* Loop till all the entries in in array are not processed */
2185         while (current_in_entry < sep_in_lli_entries) {
2186                 /* Set the new input and output tables */
2187                 in_lli_table_ptr =
2188                         (struct sep_lli_entry *)lli_table_alloc_addr;
2189                 dma_in_lli_table_ptr =
2190                         (struct sep_lli_entry *)dma_lli_table_alloc_addr;
2191
2192                 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2193                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2194                 dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2195                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2196
2197                 /* Set the first output tables */
2198                 out_lli_table_ptr =
2199                         (struct sep_lli_entry *)lli_table_alloc_addr;
2200                 dma_out_lli_table_ptr =
2201                         (struct sep_lli_entry *)dma_lli_table_alloc_addr;
2202
2203                 /* Check if the DMA table area limit was overrun */
2204                 if ((dma_lli_table_alloc_addr + sizeof(struct sep_lli_entry) *
2205                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP) >
2206                         ((void *)sep->shared_addr +
2207                         SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
2208                         SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
2209
2210                         dev_warn(&sep->pdev->dev, "dma table limit overrun\n");
2211                         return -ENOMEM;
2212                 }
2213
2214                 /* Update the number of the lli tables created */
2215                 dma_ctx->num_lli_tables_created += 2;
2216
2217                 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2218                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2219                 dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2220                         SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2221
2222                 /* Calculate the maximum size of data for input table */
2223                 in_table_data_size =
2224                         sep_calculate_lli_table_max_size(sep,
2225                         &lli_in_array[current_in_entry],
2226                         (sep_in_lli_entries - current_in_entry),
2227                         &last_table_flag);
2228
2229                 /* Calculate the maximum size of data for output table */
2230                 out_table_data_size =
2231                         sep_calculate_lli_table_max_size(sep,
2232                         &lli_out_array[current_out_entry],
2233                         (sep_out_lli_entries - current_out_entry),
2234                         &last_table_flag);
2235
2236                 if (!last_table_flag) {
2237                         in_table_data_size = (in_table_data_size /
2238                                 block_size) * block_size;
2239                         out_table_data_size = (out_table_data_size /
2240                                 block_size) * block_size;
2241                 }
2242
2243                 table_data_size = in_table_data_size;
2244                 if (table_data_size > out_table_data_size)
2245                         table_data_size = out_table_data_size;
2246
2247                 dev_dbg(&sep->pdev->dev,
2248                         "[PID%d] construct tables from lli"
2249                         " in_table_data_size is (hex) %x\n", current->pid,
2250                         in_table_data_size);
2251
2252                 dev_dbg(&sep->pdev->dev,
2253                         "[PID%d] construct tables from lli"
2254                         "out_table_data_size is (hex) %x\n", current->pid,
2255                         out_table_data_size);
2256
2257                 /* Construct input lli table */
2258                 sep_build_lli_table(sep, &lli_in_array[current_in_entry],
2259                         in_lli_table_ptr,
2260                         &current_in_entry,
2261                         &num_entries_in_table,
2262                         table_data_size);
2263
2264                 /* Construct output lli table */
2265                 sep_build_lli_table(sep, &lli_out_array[current_out_entry],
2266                         out_lli_table_ptr,
2267                         &current_out_entry,
2268                         &num_entries_out_table,
2269                         table_data_size);
2270
2271                 /* If info entry is null - this is the first table built */
2272                 if (info_in_entry_ptr == NULL || info_out_entry_ptr == NULL) {
2273                         /* Set the output parameters to physical addresses */
2274                         *lli_table_in_ptr =
2275                         sep_shared_area_virt_to_bus(sep, dma_in_lli_table_ptr);
2276
2277                         *in_num_entries_ptr = num_entries_in_table;
2278
2279                         *lli_table_out_ptr =
2280                                 sep_shared_area_virt_to_bus(sep,
2281                                 dma_out_lli_table_ptr);
2282
2283                         *out_num_entries_ptr = num_entries_out_table;
2284                         *table_data_size_ptr = table_data_size;
2285
2286                         dev_dbg(&sep->pdev->dev,
2287                                 "[PID%d] output lli_table_in_ptr is %08lx\n",
2288                                 current->pid,
2289                                 (unsigned long)*lli_table_in_ptr);
2290                         dev_dbg(&sep->pdev->dev,
2291                                 "[PID%d] output lli_table_out_ptr is %08lx\n",
2292                                 current->pid,
2293                                 (unsigned long)*lli_table_out_ptr);
2294                 } else {
2295                         /* Update the info entry of the previous in table */
2296                         info_in_entry_ptr->bus_address =
2297                                 sep_shared_area_virt_to_bus(sep,
2298                                 dma_in_lli_table_ptr);
2299
2300                         info_in_entry_ptr->block_size =
2301                                 ((num_entries_in_table) << 24) |
2302                                 (table_data_size);
2303
2304                         /* Update the info entry of the previous in table */
2305                         info_out_entry_ptr->bus_address =
2306                                 sep_shared_area_virt_to_bus(sep,
2307                                 dma_out_lli_table_ptr);
2308
2309                         info_out_entry_ptr->block_size =
2310                                 ((num_entries_out_table) << 24) |
2311                                 (table_data_size);
2312
2313                         dev_dbg(&sep->pdev->dev,
2314                                 "[PID%d] output lli_table_in_ptr:%08lx %08x\n",
2315                                 current->pid,
2316                                 (unsigned long)info_in_entry_ptr->bus_address,
2317                                 info_in_entry_ptr->block_size);
2318
2319                         dev_dbg(&sep->pdev->dev,
2320                                 "[PID%d] output lli_table_out_ptr:"
2321                                 "%08lx  %08x\n",
2322                                 current->pid,
2323                                 (unsigned long)info_out_entry_ptr->bus_address,
2324                                 info_out_entry_ptr->block_size);
2325                 }
2326
2327                 /* Save the pointer to the info entry of the current tables */
2328                 info_in_entry_ptr = in_lli_table_ptr +
2329                         num_entries_in_table - 1;
2330                 info_out_entry_ptr = out_lli_table_ptr +
2331                         num_entries_out_table - 1;
2332
2333                 dev_dbg(&sep->pdev->dev,
2334                         "[PID%d] output num_entries_out_table is %x\n",
2335                         current->pid,
2336                         (u32)num_entries_out_table);
2337                 dev_dbg(&sep->pdev->dev,
2338                         "[PID%d] output info_in_entry_ptr is %lx\n",
2339                         current->pid,
2340                         (unsigned long)info_in_entry_ptr);
2341                 dev_dbg(&sep->pdev->dev,
2342                         "[PID%d] output info_out_entry_ptr is %lx\n",
2343                         current->pid,
2344                         (unsigned long)info_out_entry_ptr);
2345         }
2346
2347         /* Print input tables */
2348         if (!dmatables_region) {
2349                 sep_debug_print_lli_tables(
2350                         sep,
2351                         (struct sep_lli_entry *)
2352                         sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr),
2353                         *in_num_entries_ptr,
2354                         *table_data_size_ptr);
2355         }
2356
2357         /* Print output tables */
2358         if (!dmatables_region) {
2359                 sep_debug_print_lli_tables(
2360                         sep,
2361                         (struct sep_lli_entry *)
2362                         sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr),
2363                         *out_num_entries_ptr,
2364                         *table_data_size_ptr);
2365         }
2366
2367         return 0;
2368 }
2369
2370 /**
2371  * sep_prepare_input_output_dma_table - prepare DMA I/O table
2372  * @app_virt_in_addr:
2373  * @app_virt_out_addr:
2374  * @data_size:
2375  * @block_size:
2376  * @lli_table_in_ptr:
2377  * @lli_table_out_ptr:
2378  * @in_num_entries_ptr:
2379  * @out_num_entries_ptr:
2380  * @table_data_size_ptr:
2381  * @is_kva: set for kernel data; used only for kernel crypto module
2382  *
2383  * This function builds input and output DMA tables for synchronic
2384  * symmetric operations (AES, DES, HASH). It also checks that each table
2385  * is of the modular block size
2386  * Note that all bus addresses that are passed to the SEP
2387  * are in 32 bit format; the SEP is a 32 bit device
2388  */
2389 static int sep_prepare_input_output_dma_table(struct sep_device *sep,
2390         unsigned long app_virt_in_addr,
2391         unsigned long app_virt_out_addr,
2392         u32 data_size,
2393         u32 block_size,
2394         dma_addr_t *lli_table_in_ptr,
2395         dma_addr_t *lli_table_out_ptr,
2396         u32 *in_num_entries_ptr,
2397         u32 *out_num_entries_ptr,
2398         u32 *table_data_size_ptr,
2399         bool is_kva,
2400         void **dmatables_region,
2401         struct sep_dma_context *dma_ctx)
2402
2403 {
2404         int error = 0;
2405         /* Array of pointers of page */
2406         struct sep_lli_entry *lli_in_array;
2407         /* Array of pointers of page */
2408         struct sep_lli_entry *lli_out_array;
2409
2410         if (!dma_ctx) {
2411                 error = -EINVAL;
2412                 goto end_function;
2413         }
2414
2415         if (data_size == 0) {
2416                 /* Prepare empty table for input and output */
2417                 if (dmatables_region) {
2418                         error = sep_allocate_dmatables_region(
2419                                         sep,
2420                                         dmatables_region,
2421                                         dma_ctx,
2422                                         2);
2423                   if (error)
2424                         goto end_function;
2425                 }
2426                 sep_prepare_empty_lli_table(sep, lli_table_in_ptr,
2427                         in_num_entries_ptr, table_data_size_ptr,
2428                         dmatables_region, dma_ctx);
2429
2430                 sep_prepare_empty_lli_table(sep, lli_table_out_ptr,
2431                         out_num_entries_ptr, table_data_size_ptr,
2432                         dmatables_region, dma_ctx);
2433
2434                 goto update_dcb_counter;
2435         }
2436
2437         /* Initialize the pages pointers */
2438         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2439         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
2440
2441         /* Lock the pages of the buffer and translate them to pages */
2442         if (is_kva) {
2443                 dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel input pages\n",
2444                                                 current->pid);
2445                 error = sep_lock_kernel_pages(sep, app_virt_in_addr,
2446                                 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG,
2447                                 dma_ctx);
2448                 if (error) {
2449                         dev_warn(&sep->pdev->dev,
2450                                 "[PID%d] sep_lock_kernel_pages for input "
2451                                 "virtual buffer failed\n", current->pid);
2452
2453                         goto end_function;
2454                 }
2455
2456                 dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel output pages\n",
2457                                                 current->pid);
2458                 error = sep_lock_kernel_pages(sep, app_virt_out_addr,
2459                                 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG,
2460                                 dma_ctx);
2461
2462                 if (error) {
2463                         dev_warn(&sep->pdev->dev,
2464                                 "[PID%d] sep_lock_kernel_pages for output "
2465                                 "virtual buffer failed\n", current->pid);
2466
2467                         goto end_function_free_lli_in;
2468                 }
2469
2470         }
2471
2472         else {
2473                 dev_dbg(&sep->pdev->dev, "[PID%d] Locking user input pages\n",
2474                                                 current->pid);
2475                 error = sep_lock_user_pages(sep, app_virt_in_addr,
2476                                 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG,
2477                                 dma_ctx);
2478                 if (error) {
2479                         dev_warn(&sep->pdev->dev,
2480                                 "[PID%d] sep_lock_user_pages for input "
2481                                 "virtual buffer failed\n", current->pid);
2482
2483                         goto end_function;
2484                 }
2485
2486                 if (dma_ctx->secure_dma) {
2487                         /* secure_dma requires use of non accessible memory */
2488                         dev_dbg(&sep->pdev->dev, "[PID%d] in secure_dma\n",
2489                                 current->pid);
2490                         error = sep_lli_table_secure_dma(sep,
2491                                 app_virt_out_addr, data_size, &lli_out_array,
2492                                 SEP_DRIVER_OUT_FLAG, dma_ctx);
2493                         if (error) {
2494                                 dev_warn(&sep->pdev->dev,
2495                                         "[PID%d] secure dma table setup "
2496                                         " for output virtual buffer failed\n",
2497                                         current->pid);
2498
2499                                 goto end_function_free_lli_in;
2500                         }
2501                 } else {
2502                         /* For normal, non-secure dma */
2503                         dev_dbg(&sep->pdev->dev, "[PID%d] not in secure_dma\n",
2504                                 current->pid);
2505
2506                         dev_dbg(&sep->pdev->dev,
2507                                 "[PID%d] Locking user output pages\n",
2508                                 current->pid);
2509
2510                         error = sep_lock_user_pages(sep, app_virt_out_addr,
2511                                 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG,
2512                                 dma_ctx);
2513
2514                         if (error) {
2515                                 dev_warn(&sep->pdev->dev,
2516                                         "[PID%d] sep_lock_user_pages"
2517                                         " for output virtual buffer failed\n",
2518                                         current->pid);
2519
2520                                 goto end_function_free_lli_in;
2521                         }
2522                 }
2523         }
2524
2525         dev_dbg(&sep->pdev->dev,
2526                 "[PID%d] After lock; prep input output dma table sep_in_num_pages is (hex) %x\n",
2527                 current->pid,
2528                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages);
2529
2530         dev_dbg(&sep->pdev->dev, "[PID%d] sep_out_num_pages is (hex) %x\n",
2531                 current->pid,
2532                 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages);
2533
2534         dev_dbg(&sep->pdev->dev,
2535                 "[PID%d] SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is (hex) %x\n",
2536                 current->pid, SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
2537
2538         /* Call the function that creates table from the lli arrays */
2539         dev_dbg(&sep->pdev->dev, "[PID%d] calling create table from lli\n",
2540                                         current->pid);
2541         error = sep_construct_dma_tables_from_lli(
2542                         sep, lli_in_array,
2543                         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
2544                                                                 in_num_pages,
2545                         lli_out_array,
2546                         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
2547                                                                 out_num_pages,
2548                         block_size, lli_table_in_ptr, lli_table_out_ptr,
2549                         in_num_entries_ptr, out_num_entries_ptr,
2550                         table_data_size_ptr, dmatables_region, dma_ctx);
2551
2552         if (error) {
2553                 dev_warn(&sep->pdev->dev,
2554                         "[PID%d] sep_construct_dma_tables_from_lli failed\n",
2555                         current->pid);
2556                 goto end_function_with_error;
2557         }
2558
2559         kfree(lli_out_array);
2560         kfree(lli_in_array);
2561
2562 update_dcb_counter:
2563         /* Update DCB counter */
2564         dma_ctx->nr_dcb_creat++;
2565
2566         goto end_function;
2567
2568 end_function_with_error:
2569         kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array);
2570         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = NULL;
2571         kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array);
2572         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
2573         kfree(lli_out_array);
2574
2575
2576 end_function_free_lli_in:
2577         kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array);
2578         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = NULL;
2579         kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array);
2580         dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2581         kfree(lli_in_array);
2582
2583 end_function:
2584
2585         return error;
2586
2587 }
2588
2589 /**
2590  * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
2591  * @app_in_address: unsigned long; for data buffer in (user space)
2592  * @app_out_address: unsigned long; for data buffer out (user space)
2593  * @data_in_size: u32; for size of data
2594  * @block_size: u32; for block size
2595  * @tail_block_size: u32; for size of tail block
2596  * @isapplet: bool; to indicate external app
2597  * @is_kva: bool; kernel buffer; only used for kernel crypto module
2598  * @secure_dma; indicates whether this is secure_dma using IMR
2599  *
2600  * This function prepares the linked DMA tables and puts the
2601  * address for the linked list of tables inta a DCB (data control
2602  * block) the address of which is known by the SEP hardware
2603  * Note that all bus addresses that are passed to the SEP
2604  * are in 32 bit format; the SEP is a 32 bit device
2605  */
2606 int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
2607         unsigned long  app_in_address,
2608         unsigned long  app_out_address,
2609         u32  data_in_size,
2610         u32  block_size,
2611         u32  tail_block_size,
2612         bool isapplet,
2613         bool    is_kva,
2614         bool    secure_dma,
2615         struct sep_dcblock *dcb_region,
2616         void **dmatables_region,
2617         struct sep_dma_context **dma_ctx,
2618         struct scatterlist *src_sg,
2619         struct scatterlist *dst_sg)
2620 {
2621         int error = 0;
2622         /* Size of tail */
2623         u32 tail_size = 0;
2624         /* Address of the created DCB table */
2625         struct sep_dcblock *dcb_table_ptr = NULL;
2626         /* The physical address of the first input DMA table */
2627         dma_addr_t in_first_mlli_address = 0;
2628         /* Number of entries in the first input DMA table */
2629         u32  in_first_num_entries = 0;
2630         /* The physical address of the first output DMA table */
2631         dma_addr_t  out_first_mlli_address = 0;
2632         /* Number of entries in the first output DMA table */
2633         u32  out_first_num_entries = 0;
2634         /* Data in the first input/output table */
2635         u32  first_data_size = 0;
2636
2637         dev_dbg(&sep->pdev->dev, "[PID%d] app_in_address %lx\n",
2638                 current->pid, app_in_address);
2639
2640         dev_dbg(&sep->pdev->dev, "[PID%d] app_out_address %lx\n",
2641                 current->pid, app_out_address);
2642
2643         dev_dbg(&sep->pdev->dev, "[PID%d] data_in_size %x\n",
2644                 current->pid, data_in_size);
2645
2646         dev_dbg(&sep->pdev->dev, "[PID%d] block_size %x\n",
2647                 current->pid, block_size);
2648
2649         dev_dbg(&sep->pdev->dev, "[PID%d] tail_block_size %x\n",
2650                 current->pid, tail_block_size);
2651
2652         dev_dbg(&sep->pdev->dev, "[PID%d] isapplet %x\n",
2653                 current->pid, isapplet);
2654
2655         dev_dbg(&sep->pdev->dev, "[PID%d] is_kva %x\n",
2656                 current->pid, is_kva);
2657
2658         dev_dbg(&sep->pdev->dev, "[PID%d] src_sg %p\n",
2659                 current->pid, src_sg);
2660
2661         dev_dbg(&sep->pdev->dev, "[PID%d] dst_sg %p\n",
2662                 current->pid, dst_sg);
2663
2664         if (!dma_ctx) {
2665                 dev_warn(&sep->pdev->dev, "[PID%d] no DMA context pointer\n",
2666                                                 current->pid);
2667                 error = -EINVAL;
2668                 goto end_function;
2669         }
2670
2671         if (*dma_ctx) {
2672                 /* In case there are multiple DCBs for this transaction */
2673                 dev_dbg(&sep->pdev->dev, "[PID%d] DMA context already set\n",
2674                                                 current->pid);
2675         } else {
2676                 *dma_ctx = kzalloc(sizeof(**dma_ctx), GFP_KERNEL);
2677                 if (!(*dma_ctx)) {
2678                         dev_dbg(&sep->pdev->dev,
2679                                 "[PID%d] Not enough memory for DMA context\n",
2680                                 current->pid);
2681                   error = -ENOMEM;
2682                   goto end_function;
2683                 }
2684                 dev_dbg(&sep->pdev->dev,
2685                         "[PID%d] Created DMA context addr at 0x%p\n",
2686                         current->pid, *dma_ctx);
2687         }
2688
2689         (*dma_ctx)->secure_dma = secure_dma;
2690
2691         /* these are for kernel crypto only */
2692         (*dma_ctx)->src_sg = src_sg;
2693         (*dma_ctx)->dst_sg = dst_sg;
2694
2695         if ((*dma_ctx)->nr_dcb_creat == SEP_MAX_NUM_SYNC_DMA_OPS) {
2696                 /* No more DCBs to allocate */
2697                 dev_dbg(&sep->pdev->dev, "[PID%d] no more DCBs available\n",
2698                                                 current->pid);
2699                 error = -ENOSPC;
2700                 goto end_function_error;
2701         }
2702
2703         /* Allocate new DCB */
2704         if (dcb_region) {
2705                 dcb_table_ptr = dcb_region;
2706         } else {
2707                 dcb_table_ptr = (struct sep_dcblock *)(sep->shared_addr +
2708                         SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES +
2709                         ((*dma_ctx)->nr_dcb_creat *
2710                                                 sizeof(struct sep_dcblock)));
2711         }
2712
2713         /* Set the default values in the DCB */
2714         dcb_table_ptr->input_mlli_address = 0;
2715         dcb_table_ptr->input_mlli_num_entries = 0;
2716         dcb_table_ptr->input_mlli_data_size = 0;
2717         dcb_table_ptr->output_mlli_address = 0;
2718         dcb_table_ptr->output_mlli_num_entries = 0;
2719         dcb_table_ptr->output_mlli_data_size = 0;
2720         dcb_table_ptr->tail_data_size = 0;
2721         dcb_table_ptr->out_vr_tail_pt = 0;
2722
2723         if (isapplet) {
2724
2725                 /* Check if there is enough data for DMA operation */
2726                 if (data_in_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) {
2727                         if (is_kva) {
2728                                 error = -ENODEV;
2729                                 goto end_function_error;
2730                         } else {
2731                                 if (copy_from_user(dcb_table_ptr->tail_data,
2732                                         (void __user *)app_in_address,
2733                                         data_in_size)) {
2734                                         error = -EFAULT;
2735                                         goto end_function_error;
2736                                 }
2737                         }
2738
2739                         dcb_table_ptr->tail_data_size = data_in_size;
2740
2741                         /* Set the output user-space address for mem2mem op */
2742                         if (app_out_address)
2743                                 dcb_table_ptr->out_vr_tail_pt =
2744                                 (aligned_u64)app_out_address;
2745
2746                         /*
2747                          * Update both data length parameters in order to avoid
2748                          * second data copy and allow building of empty mlli
2749                          * tables
2750                          */
2751                         tail_size = 0x0;
2752                         data_in_size = 0x0;
2753
2754                 } else {
2755                         if (!app_out_address) {
2756                                 tail_size = data_in_size % block_size;
2757                                 if (!tail_size) {
2758                                         if (tail_block_size == block_size)
2759                                                 tail_size = block_size;
2760                                 }
2761                         } else {
2762                                 tail_size = 0;
2763                         }
2764                 }
2765                 if (tail_size) {
2766                         if (tail_size > sizeof(dcb_table_ptr->tail_data))
2767                                 return -EINVAL;
2768                         if (is_kva) {
2769                                 error = -ENODEV;
2770                                 goto end_function_error;
2771                         } else {
2772                                 /* We have tail data - copy it to DCB */
2773                                 if (copy_from_user(dcb_table_ptr->tail_data,
2774                                         (void __user *)(app_in_address +
2775                                         data_in_size - tail_size), tail_size)) {
2776                                         error = -EFAULT;
2777                                         goto end_function_error;
2778                                 }
2779                         }
2780                         if (app_out_address)
2781                                 /*
2782                                  * Calculate the output address
2783                                  * according to tail data size
2784                                  */
2785                                 dcb_table_ptr->out_vr_tail_pt =
2786                                         (aligned_u64)app_out_address +
2787                                         data_in_size - tail_size;
2788
2789                         /* Save the real tail data size */
2790                         dcb_table_ptr->tail_data_size = tail_size;
2791                         /*
2792                          * Update the data size without the tail
2793                          * data size AKA data for the dma
2794                          */
2795                         data_in_size = (data_in_size - tail_size);
2796                 }
2797         }
2798         /* Check if we need to build only input table or input/output */
2799         if (app_out_address) {
2800                 /* Prepare input/output tables */
2801                 error = sep_prepare_input_output_dma_table(sep,
2802                                 app_in_address,
2803                                 app_out_address,
2804                                 data_in_size,
2805                                 block_size,
2806                                 &in_first_mlli_address,
2807                                 &out_first_mlli_address,
2808                                 &in_first_num_entries,
2809                                 &out_first_num_entries,
2810                                 &first_data_size,
2811                                 is_kva,
2812                                 dmatables_region,
2813                                 *dma_ctx);
2814         } else {
2815                 /* Prepare input tables */
2816                 error = sep_prepare_input_dma_table(sep,
2817                                 app_in_address,
2818                                 data_in_size,
2819                                 block_size,
2820                                 &in_first_mlli_address,
2821                                 &in_first_num_entries,
2822                                 &first_data_size,
2823                                 is_kva,
2824                                 dmatables_region,
2825                                 *dma_ctx);
2826         }
2827
2828         if (error) {
2829                 dev_warn(&sep->pdev->dev,
2830                         "prepare DMA table call failed "
2831                         "from prepare DCB call\n");
2832                 goto end_function_error;
2833         }
2834
2835         /* Set the DCB values */
2836         dcb_table_ptr->input_mlli_address = in_first_mlli_address;
2837         dcb_table_ptr->input_mlli_num_entries = in_first_num_entries;
2838         dcb_table_ptr->input_mlli_data_size = first_data_size;
2839         dcb_table_ptr->output_mlli_address = out_first_mlli_address;
2840         dcb_table_ptr->output_mlli_num_entries = out_first_num_entries;
2841         dcb_table_ptr->output_mlli_data_size = first_data_size;
2842
2843         goto end_function;
2844
2845 end_function_error:
2846         kfree(*dma_ctx);
2847         *dma_ctx = NULL;
2848
2849 end_function:
2850         return error;
2851
2852 }
2853
2854
2855 /**
2856  * sep_free_dma_tables_and_dcb - free DMA tables and DCBs
2857  * @sep: pointer to struct sep_device
2858  * @isapplet: indicates external application (used for kernel access)
2859  * @is_kva: indicates kernel addresses (only used for kernel crypto)
2860  *
2861  * This function frees the DMA tables and DCB
2862  */
2863 static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet,
2864         bool is_kva, struct sep_dma_context **dma_ctx)
2865 {
2866         struct sep_dcblock *dcb_table_ptr;
2867         unsigned long pt_hold;
2868         void *tail_pt;
2869
2870         int i = 0;
2871         int error = 0;
2872         int error_temp = 0;
2873
2874         dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb\n",
2875                                         current->pid);
2876         if (!dma_ctx || !*dma_ctx) /* nothing to be done here*/
2877                 return 0;
2878
2879         if (!(*dma_ctx)->secure_dma && isapplet) {
2880                 dev_dbg(&sep->pdev->dev, "[PID%d] handling applet\n",
2881                         current->pid);
2882
2883                 /* Tail stuff is only for non secure_dma */
2884                 /* Set pointer to first DCB table */
2885                 dcb_table_ptr = (struct sep_dcblock *)
2886                         (sep->shared_addr +
2887                         SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES);
2888
2889                 /**
2890                  * Go over each DCB and see if
2891                  * tail pointer must be updated
2892                  */
2893                 for (i = 0; i < (*dma_ctx)->nr_dcb_creat; i++, dcb_table_ptr++) {
2894                         if (dcb_table_ptr->out_vr_tail_pt) {
2895                                 pt_hold = (unsigned long)dcb_table_ptr->
2896                                         out_vr_tail_pt;
2897                                 tail_pt = (void *)pt_hold;
2898                                 if (is_kva) {
2899                                         error = -ENODEV;
2900                                         break;
2901                                 } else {
2902                                         error_temp = copy_to_user(
2903                                                 (void __user *)tail_pt,
2904                                                 dcb_table_ptr->tail_data,
2905                                                 dcb_table_ptr->tail_data_size);
2906                                 }
2907                                 if (error_temp) {
2908                                         /* Release the DMA resource */
2909                                         error = -EFAULT;
2910                                         break;
2911                                 }
2912                         }
2913                 }
2914         }
2915
2916         /* Free the output pages, if any */
2917         sep_free_dma_table_data_handler(sep, dma_ctx);
2918
2919         dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb end\n",
2920                                         current->pid);
2921
2922         return error;
2923 }
2924
2925 /**
2926  * sep_prepare_dcb_handler - prepare a control block
2927  * @sep: pointer to struct sep_device
2928  * @arg: pointer to user parameters
2929  * @secure_dma: indicate whether we are using secure_dma on IMR
2930  *
2931  * This function will retrieve the RAR buffer physical addresses, type
2932  * & size corresponding to the RAR handles provided in the buffers vector.
2933  */
2934 static int sep_prepare_dcb_handler(struct sep_device *sep, unsigned long arg,
2935                                    bool secure_dma,
2936                                    struct sep_dma_context **dma_ctx)
2937 {
2938         int error;
2939         /* Command arguments */
2940         static struct build_dcb_struct command_args;
2941
2942         /* Get the command arguments */
2943         if (copy_from_user(&command_args, (void __user *)arg,
2944                                         sizeof(struct build_dcb_struct))) {
2945                 error = -EFAULT;
2946                 goto end_function;
2947         }
2948
2949         dev_dbg(&sep->pdev->dev,
2950                 "[PID%d] prep dcb handler app_in_address is %08llx\n",
2951                         current->pid, command_args.app_in_address);
2952         dev_dbg(&sep->pdev->dev,
2953                 "[PID%d] app_out_address is %08llx\n",
2954                         current->pid, command_args.app_out_address);
2955         dev_dbg(&sep->pdev->dev,
2956                 "[PID%d] data_size is %x\n",
2957                         current->pid, command_args.data_in_size);
2958         dev_dbg(&sep->pdev->dev,
2959                 "[PID%d] block_size is %x\n",
2960                         current->pid, command_args.block_size);
2961         dev_dbg(&sep->pdev->dev,
2962                 "[PID%d] tail block_size is %x\n",
2963                         current->pid, command_args.tail_block_size);
2964         dev_dbg(&sep->pdev->dev,
2965                 "[PID%d] is_applet is %x\n",
2966                         current->pid, command_args.is_applet);
2967
2968         if (!command_args.app_in_address) {
2969                 dev_warn(&sep->pdev->dev,
2970                         "[PID%d] null app_in_address\n", current->pid);
2971                 error = -EINVAL;
2972                 goto end_function;
2973         }
2974
2975         error = sep_prepare_input_output_dma_table_in_dcb(sep,
2976                         (unsigned long)command_args.app_in_address,
2977                         (unsigned long)command_args.app_out_address,
2978                         command_args.data_in_size, command_args.block_size,
2979                         command_args.tail_block_size,
2980                         command_args.is_applet, false,
2981                         secure_dma, NULL, NULL, dma_ctx, NULL, NULL);
2982
2983 end_function:
2984         return error;
2985
2986 }
2987
2988 /**
2989  * sep_free_dcb_handler - free control block resources
2990  * @sep: pointer to struct sep_device
2991  *
2992  * This function frees the DCB resources and updates the needed
2993  * user-space buffers.
2994  */
2995 static int sep_free_dcb_handler(struct sep_device *sep,
2996                                 struct sep_dma_context **dma_ctx)
2997 {
2998         if (!dma_ctx || !(*dma_ctx)) {
2999                 dev_dbg(&sep->pdev->dev,
3000                         "[PID%d] no dma context defined, nothing to free\n",
3001                         current->pid);
3002                 return -EINVAL;
3003         }
3004
3005         dev_dbg(&sep->pdev->dev, "[PID%d] free dcbs num of DCBs %x\n",
3006                 current->pid,
3007                 (*dma_ctx)->nr_dcb_creat);
3008
3009         return sep_free_dma_tables_and_dcb(sep, false, false, dma_ctx);
3010 }
3011
3012 /**
3013  * sep_ioctl - ioctl handler for sep device
3014  * @filp: pointer to struct file
3015  * @cmd: command
3016  * @arg: pointer to argument structure
3017  *
3018  * Implement the ioctl methods available on the SEP device.
3019  */
3020 static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3021 {
3022         struct sep_private_data * const private_data = filp->private_data;
3023         struct sep_call_status *call_status = &private_data->call_status;
3024         struct sep_device *sep = private_data->device;
3025         struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
3026         struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
3027         int error = 0;
3028
3029         dev_dbg(&sep->pdev->dev, "[PID%d] ioctl cmd 0x%x\n",
3030                 current->pid, cmd);
3031         dev_dbg(&sep->pdev->dev, "[PID%d] dma context addr 0x%p\n",
3032                 current->pid, *dma_ctx);
3033
3034         /* Make sure we own this device */
3035         error = sep_check_transaction_owner(sep);
3036         if (error) {
3037                 dev_dbg(&sep->pdev->dev, "[PID%d] ioctl pid is not owner\n",
3038                         current->pid);
3039                 goto end_function;
3040         }
3041
3042         /* Check that sep_mmap has been called before */
3043         if (0 == test_bit(SEP_LEGACY_MMAP_DONE_OFFSET,
3044                                 &call_status->status)) {
3045                 dev_dbg(&sep->pdev->dev,
3046                         "[PID%d] mmap not called\n", current->pid);
3047                 error = -EPROTO;
3048                 goto end_function;
3049         }
3050
3051         /* Check that the command is for SEP device */
3052         if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
3053                 error = -ENOTTY;
3054                 goto end_function;
3055         }
3056
3057         switch (cmd) {
3058         case SEP_IOCSENDSEPCOMMAND:
3059                 dev_dbg(&sep->pdev->dev,
3060                         "[PID%d] SEP_IOCSENDSEPCOMMAND start\n",
3061                         current->pid);
3062                 if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3063                                   &call_status->status)) {
3064                         dev_warn(&sep->pdev->dev,
3065                                 "[PID%d] send msg already done\n",
3066                                 current->pid);
3067                         error = -EPROTO;
3068                         goto end_function;
3069                 }
3070                 /* Send command to SEP */
3071                 error = sep_send_command_handler(sep);
3072                 if (!error)
3073                         set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3074                                 &call_status->status);
3075                 dev_dbg(&sep->pdev->dev,
3076                         "[PID%d] SEP_IOCSENDSEPCOMMAND end\n",
3077                         current->pid);
3078                 break;
3079         case SEP_IOCENDTRANSACTION:
3080                 dev_dbg(&sep->pdev->dev,
3081                         "[PID%d] SEP_IOCENDTRANSACTION start\n",
3082                         current->pid);
3083                 error = sep_end_transaction_handler(sep, dma_ctx, call_status,
3084                                                     my_queue_elem);
3085                 dev_dbg(&sep->pdev->dev,
3086                         "[PID%d] SEP_IOCENDTRANSACTION end\n",
3087                         current->pid);
3088                 break;
3089         case SEP_IOCPREPAREDCB:
3090                 dev_dbg(&sep->pdev->dev,
3091                         "[PID%d] SEP_IOCPREPAREDCB start\n",
3092                         current->pid);
3093         case SEP_IOCPREPAREDCB_SECURE_DMA:
3094                 dev_dbg(&sep->pdev->dev,
3095                         "[PID%d] SEP_IOCPREPAREDCB_SECURE_DMA start\n",
3096                         current->pid);
3097                 if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3098                                   &call_status->status)) {
3099                         dev_dbg(&sep->pdev->dev,
3100                                 "[PID%d] dcb prep needed before send msg\n",
3101                                 current->pid);
3102                         error = -EPROTO;
3103                         goto end_function;
3104                 }
3105
3106                 if (!arg) {
3107                         dev_dbg(&sep->pdev->dev,
3108                                 "[PID%d] dcb null arg\n", current->pid);
3109                         error = -EINVAL;
3110                         goto end_function;
3111                 }
3112
3113                 if (cmd == SEP_IOCPREPAREDCB) {
3114                         /* No secure dma */
3115                         dev_dbg(&sep->pdev->dev,
3116                                 "[PID%d] SEP_IOCPREPAREDCB (no secure_dma)\n",
3117                                 current->pid);
3118
3119                         error = sep_prepare_dcb_handler(sep, arg, false,
3120                                 dma_ctx);
3121                 } else {
3122                         /* Secure dma */
3123                         dev_dbg(&sep->pdev->dev,
3124                                 "[PID%d] SEP_IOC_POC (with secure_dma)\n",
3125                                 current->pid);
3126
3127                         error = sep_prepare_dcb_handler(sep, arg, true,
3128                                 dma_ctx);
3129                 }
3130                 dev_dbg(&sep->pdev->dev, "[PID%d] dcb's end\n",
3131                         current->pid);
3132                 break;
3133         case SEP_IOCFREEDCB:
3134                 dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB start\n",
3135                         current->pid);
3136         case SEP_IOCFREEDCB_SECURE_DMA:
3137                 dev_dbg(&sep->pdev->dev,
3138                         "[PID%d] SEP_IOCFREEDCB_SECURE_DMA start\n",
3139                         current->pid);
3140                 error = sep_free_dcb_handler(sep, dma_ctx);
3141                 dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB end\n",
3142                         current->pid);
3143                 break;
3144         default:
3145                 error = -ENOTTY;
3146                 dev_dbg(&sep->pdev->dev, "[PID%d] default end\n",
3147                         current->pid);
3148                 break;
3149         }
3150
3151 end_function:
3152         dev_dbg(&sep->pdev->dev, "[PID%d] ioctl end\n", current->pid);
3153
3154         return error;
3155 }
3156
3157 /**
3158  * sep_inthandler - interrupt handler for sep device
3159  * @irq: interrupt
3160  * @dev_id: device id
3161  */
3162 static irqreturn_t sep_inthandler(int irq, void *dev_id)
3163 {
3164         unsigned long lock_irq_flag;
3165         u32 reg_val, reg_val2 = 0;
3166         struct sep_device *sep = dev_id;
3167         irqreturn_t int_error = IRQ_HANDLED;
3168
3169         /* Are we in power save? */
3170 #if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
3171         if (sep->pdev->dev.power.runtime_status != RPM_ACTIVE) {
3172                 dev_dbg(&sep->pdev->dev, "interrupt during pwr save\n");
3173                 return IRQ_NONE;
3174         }
3175 #endif
3176
3177         if (test_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags) == 0) {
3178                 dev_dbg(&sep->pdev->dev, "interrupt while nobody using sep\n");
3179                 return IRQ_NONE;
3180         }
3181
3182         /* Read the IRR register to check if this is SEP interrupt */
3183         reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
3184
3185         dev_dbg(&sep->pdev->dev, "sep int: IRR REG val: %x\n", reg_val);
3186
3187         if (reg_val & (0x1 << 13)) {
3188
3189                 /* Lock and update the counter of reply messages */
3190                 spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
3191                 sep->reply_ct++;
3192                 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
3193
3194                 dev_dbg(&sep->pdev->dev, "sep int: send_ct %lx reply_ct %lx\n",
3195                                         sep->send_ct, sep->reply_ct);
3196
3197                 /* Is this a kernel client request */
3198                 if (sep->in_kernel) {
3199                         tasklet_schedule(&sep->finish_tasklet);
3200                         goto finished_interrupt;
3201                 }
3202
3203                 /* Is this printf or daemon request? */
3204                 reg_val2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
3205                 dev_dbg(&sep->pdev->dev,
3206                         "SEP Interrupt - GPR2 is %08x\n", reg_val2);
3207
3208                 clear_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags);
3209
3210                 if ((reg_val2 >> 30) & 0x1) {
3211                         dev_dbg(&sep->pdev->dev, "int: printf request\n");
3212                 } else if (reg_val2 >> 31) {
3213                         dev_dbg(&sep->pdev->dev, "int: daemon request\n");
3214                 } else {
3215                         dev_dbg(&sep->pdev->dev, "int: SEP reply\n");
3216                         wake_up(&sep->event_interrupt);
3217                 }
3218         } else {
3219                 dev_dbg(&sep->pdev->dev, "int: not SEP interrupt\n");
3220                 int_error = IRQ_NONE;
3221         }
3222
3223 finished_interrupt:
3224
3225         if (int_error == IRQ_HANDLED)
3226                 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
3227
3228         return int_error;
3229 }
3230
3231 /**
3232  * sep_reconfig_shared_area - reconfigure shared area
3233  * @sep: pointer to struct sep_device
3234  *
3235  * Reconfig the shared area between HOST and SEP - needed in case
3236  * the DX_CC_Init function was called before OS loading.
3237  */
3238 static int sep_reconfig_shared_area(struct sep_device *sep)
3239 {
3240         int ret_val;
3241
3242         /* use to limit waiting for SEP */
3243         unsigned long end_time;
3244
3245         /* Send the new SHARED MESSAGE AREA to the SEP */
3246         dev_dbg(&sep->pdev->dev, "reconfig shared; sending %08llx to sep\n",
3247                                 (unsigned long long)sep->shared_bus);
3248
3249         sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
3250
3251         /* Poll for SEP response */
3252         ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
3253
3254         end_time = jiffies + (WAIT_TIME * HZ);
3255
3256         while ((time_before(jiffies, end_time)) && (ret_val != 0xffffffff) &&
3257                 (ret_val != sep->shared_bus))
3258                 ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
3259
3260         /* Check the return value (register) */
3261         if (ret_val != sep->shared_bus) {
3262                 dev_warn(&sep->pdev->dev, "could not reconfig shared area\n");
3263                 dev_warn(&sep->pdev->dev, "result was %x\n", ret_val);
3264                 ret_val = -ENOMEM;
3265         } else
3266                 ret_val = 0;
3267
3268         dev_dbg(&sep->pdev->dev, "reconfig shared area end\n");
3269
3270         return ret_val;
3271 }
3272
3273 /**
3274  *      sep_activate_dcb_dmatables_context - Takes DCB & DMA tables
3275  *                                              contexts into use
3276  *      @sep: SEP device
3277  *      @dcb_region: DCB region copy
3278  *      @dmatables_region: MLLI/DMA tables copy
3279  *      @dma_ctx: DMA context for current transaction
3280  */
3281 ssize_t sep_activate_dcb_dmatables_context(struct sep_device *sep,
3282                                         struct sep_dcblock **dcb_region,
3283                                         void **dmatables_region,
3284                                         struct sep_dma_context *dma_ctx)
3285 {
3286         void *dmaregion_free_start = NULL;
3287         void *dmaregion_free_end = NULL;
3288         void *dcbregion_free_start = NULL;
3289         void *dcbregion_free_end = NULL;
3290         ssize_t error = 0;
3291
3292         dev_dbg(&sep->pdev->dev, "[PID%d] activating dcb/dma region\n",
3293                 current->pid);
3294
3295         if (1 > dma_ctx->nr_dcb_creat) {
3296                 dev_warn(&sep->pdev->dev,
3297                          "[PID%d] invalid number of dcbs to activate 0x%08X\n",
3298                          current->pid, dma_ctx->nr_dcb_creat);
3299                 error = -EINVAL;
3300                 goto end_function;
3301         }
3302
3303         dmaregion_free_start = sep->shared_addr
3304                                 + SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES;
3305         dmaregion_free_end = dmaregion_free_start
3306                                 + SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES - 1;
3307
3308         if (dmaregion_free_start
3309              + dma_ctx->dmatables_len > dmaregion_free_end) {
3310                 error = -ENOMEM;
3311                 goto end_function;
3312         }
3313         memcpy(dmaregion_free_start,
3314                *dmatables_region,
3315                dma_ctx->dmatables_len);
3316         /* Free MLLI table copy */
3317         kfree(*dmatables_region);
3318         *dmatables_region = NULL;
3319
3320         /* Copy thread's DCB  table copy to DCB table region */
3321         dcbregion_free_start = sep->shared_addr +
3322                                 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES;
3323         dcbregion_free_end = dcbregion_free_start +
3324                                 (SEP_MAX_NUM_SYNC_DMA_OPS *
3325                                         sizeof(struct sep_dcblock)) - 1;
3326
3327         if (dcbregion_free_start
3328              + (dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock))
3329              > dcbregion_free_end) {
3330                 error = -ENOMEM;
3331                 goto end_function;
3332         }
3333
3334         memcpy(dcbregion_free_start,
3335                *dcb_region,
3336                dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock));
3337
3338         /* Print the tables */
3339         dev_dbg(&sep->pdev->dev, "activate: input table\n");
3340         sep_debug_print_lli_tables(sep,
3341                 (struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep,
3342                 (*dcb_region)->input_mlli_address),
3343                 (*dcb_region)->input_mlli_num_entries,
3344                 (*dcb_region)->input_mlli_data_size);
3345
3346         dev_dbg(&sep->pdev->dev, "activate: output table\n");
3347         sep_debug_print_lli_tables(sep,
3348                 (struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep,
3349                 (*dcb_region)->output_mlli_address),
3350                 (*dcb_region)->output_mlli_num_entries,
3351                 (*dcb_region)->output_mlli_data_size);
3352
3353         dev_dbg(&sep->pdev->dev,
3354                  "[PID%d] printing activated tables\n", current->pid);
3355
3356 end_function:
3357         kfree(*dmatables_region);
3358         *dmatables_region = NULL;
3359
3360         kfree(*dcb_region);
3361         *dcb_region = NULL;
3362
3363         return error;
3364 }
3365
3366 /**
3367  *      sep_create_dcb_dmatables_context - Creates DCB & MLLI/DMA table context
3368  *      @sep: SEP device
3369  *      @dcb_region: DCB region buf to create for current transaction
3370  *      @dmatables_region: MLLI/DMA tables buf to create for current transaction
3371  *      @dma_ctx: DMA context buf to create for current transaction
3372  *      @user_dcb_args: User arguments for DCB/MLLI creation
3373  *      @num_dcbs: Number of DCBs to create
3374  *      @secure_dma: Indicate use of IMR restricted memory secure dma
3375  */
3376 static ssize_t sep_create_dcb_dmatables_context(struct sep_device *sep,
3377                         struct sep_dcblock **dcb_region,
3378                         void **dmatables_region,
3379                         struct sep_dma_context **dma_ctx,
3380                         const struct build_dcb_struct __user *user_dcb_args,
3381                         const u32 num_dcbs, bool secure_dma)
3382 {
3383         int error = 0;
3384         int i = 0;
3385         struct build_dcb_struct *dcb_args = NULL;
3386
3387         dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n",
3388                 current->pid);
3389
3390         if (!dcb_region || !dma_ctx || !dmatables_region || !user_dcb_args) {
3391                 error = -EINVAL;
3392                 goto end_function;
3393         }
3394
3395         if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) {
3396                 dev_warn(&sep->pdev->dev,
3397                          "[PID%d] invalid number of dcbs 0x%08X\n",
3398                          current->pid, num_dcbs);
3399                 error = -EINVAL;
3400                 goto end_function;
3401         }
3402
3403         dcb_args = kcalloc(num_dcbs, sizeof(struct build_dcb_struct),
3404                            GFP_KERNEL);
3405         if (!dcb_args) {
3406                 error = -ENOMEM;
3407                 goto end_function;
3408         }
3409
3410         if (copy_from_user(dcb_args,
3411                         user_dcb_args,
3412                         num_dcbs * sizeof(struct build_dcb_struct))) {
3413                 error = -EFAULT;
3414                 goto end_function;
3415         }
3416
3417         /* Allocate thread-specific memory for DCB */
3418         *dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock),
3419                               GFP_KERNEL);
3420         if (!(*dcb_region)) {
3421                 error = -ENOMEM;
3422                 goto end_function;
3423         }
3424
3425         /* Prepare DCB and MLLI table into the allocated regions */
3426         for (i = 0; i < num_dcbs; i++) {
3427                 error = sep_prepare_input_output_dma_table_in_dcb(sep,
3428                                 (unsigned long)dcb_args[i].app_in_address,
3429                                 (unsigned long)dcb_args[i].app_out_address,
3430                                 dcb_args[i].data_in_size,
3431                                 dcb_args[i].block_size,
3432                                 dcb_args[i].tail_block_size,
3433                                 dcb_args[i].is_applet,
3434                                 false, secure_dma,
3435                                 *dcb_region, dmatables_region,
3436                                 dma_ctx,
3437                                 NULL,
3438                                 NULL);
3439                 if (error) {
3440                         dev_warn(&sep->pdev->dev,
3441                                  "[PID%d] dma table creation failed\n",
3442                                  current->pid);
3443                         goto end_function;
3444                 }
3445
3446                 if (dcb_args[i].app_in_address != 0)
3447                         (*dma_ctx)->input_data_len += dcb_args[i].data_in_size;
3448         }
3449
3450 end_function:
3451         kfree(dcb_args);
3452         return error;
3453
3454 }
3455
3456 /**
3457  *      sep_create_dcb_dmatables_context_kernel - Creates DCB & MLLI/DMA table context
3458  *      for kernel crypto
3459  *      @sep: SEP device
3460  *      @dcb_region: DCB region buf to create for current transaction
3461  *      @dmatables_region: MLLI/DMA tables buf to create for current transaction
3462  *      @dma_ctx: DMA context buf to create for current transaction
3463  *      @user_dcb_args: User arguments for DCB/MLLI creation
3464  *      @num_dcbs: Number of DCBs to create
3465  *      This does that same thing as sep_create_dcb_dmatables_context
3466  *      except that it is used only for the kernel crypto operation. It is
3467  *      separate because there is no user data involved; the dcb data structure
3468  *      is specific for kernel crypto (build_dcb_struct_kernel)
3469  */
3470 int sep_create_dcb_dmatables_context_kernel(struct sep_device *sep,
3471                         struct sep_dcblock **dcb_region,
3472                         void **dmatables_region,
3473                         struct sep_dma_context **dma_ctx,
3474                         const struct build_dcb_struct_kernel *dcb_data,
3475                         const u32 num_dcbs)
3476 {
3477         int error = 0;
3478         int i = 0;
3479
3480         dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n",
3481                 current->pid);
3482
3483         if (!dcb_region || !dma_ctx || !dmatables_region || !dcb_data) {
3484                 error = -EINVAL;
3485                 goto end_function;
3486         }
3487
3488         if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) {
3489                 dev_warn(&sep->pdev->dev,
3490                          "[PID%d] invalid number of dcbs 0x%08X\n",
3491                          current->pid, num_dcbs);
3492                 error = -EINVAL;
3493                 goto end_function;
3494         }
3495
3496         dev_dbg(&sep->pdev->dev, "[PID%d] num_dcbs is %d\n",
3497                 current->pid, num_dcbs);
3498
3499         /* Allocate thread-specific memory for DCB */
3500         *dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock),
3501                               GFP_KERNEL);
3502         if (!(*dcb_region)) {
3503                 error = -ENOMEM;
3504                 goto end_function;
3505         }
3506
3507         /* Prepare DCB and MLLI table into the allocated regions */
3508         for (i = 0; i < num_dcbs; i++) {
3509                 error = sep_prepare_input_output_dma_table_in_dcb(sep,
3510                                 (unsigned long)dcb_data->app_in_address,
3511                                 (unsigned long)dcb_data->app_out_address,
3512                                 dcb_data->data_in_size,
3513                                 dcb_data->block_size,
3514                                 dcb_data->tail_block_size,
3515                                 dcb_data->is_applet,
3516                                 true,
3517                                 false,
3518                                 *dcb_region, dmatables_region,
3519                                 dma_ctx,
3520                                 dcb_data->src_sg,
3521                                 dcb_data->dst_sg);
3522                 if (error) {
3523                         dev_warn(&sep->pdev->dev,
3524                                  "[PID%d] dma table creation failed\n",
3525                                  current->pid);
3526                         goto end_function;
3527                 }
3528         }
3529
3530 end_function:
3531         return error;
3532
3533 }
3534
3535 /**
3536  *      sep_activate_msgarea_context - Takes the message area context into use
3537  *      @sep: SEP device
3538  *      @msg_region: Message area context buf
3539  *      @msg_len: Message area context buffer size
3540  */
3541 static ssize_t sep_activate_msgarea_context(struct sep_device *sep,
3542                                             void **msg_region,
3543                                             const size_t msg_len)
3544 {
3545         dev_dbg(&sep->pdev->dev, "[PID%d] activating msg region\n",
3546                 current->pid);
3547
3548         if (!msg_region || !(*msg_region) ||
3549             SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES < msg_len) {
3550                 dev_warn(&sep->pdev->dev,
3551                          "[PID%d] invalid act msgarea len 0x%08zX\n",
3552                          current->pid, msg_len);
3553                 return -EINVAL;
3554         }
3555
3556         memcpy(sep->shared_addr, *msg_region, msg_len);
3557
3558         return 0;
3559 }
3560
3561 /**
3562  *      sep_create_msgarea_context - Creates message area context
3563  *      @sep: SEP device
3564  *      @msg_region: Msg area region buf to create for current transaction
3565  *      @msg_user: Content for msg area region from user
3566  *      @msg_len: Message area size
3567  */
3568 static ssize_t sep_create_msgarea_context(struct sep_device *sep,
3569                                           void **msg_region,
3570                                           const void __user *msg_user,
3571                                           const size_t msg_len)
3572 {
3573         int error = 0;
3574
3575         dev_dbg(&sep->pdev->dev, "[PID%d] creating msg region\n",
3576                 current->pid);
3577
3578         if (!msg_region ||
3579             !msg_user ||
3580             SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < msg_len ||
3581             SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES > msg_len) {
3582                 dev_warn(&sep->pdev->dev,
3583                          "[PID%d] invalid creat msgarea len 0x%08zX\n",
3584                          current->pid, msg_len);
3585                 error = -EINVAL;
3586                 goto end_function;
3587         }
3588
3589         /* Allocate thread-specific memory for message buffer */
3590         *msg_region = kzalloc(msg_len, GFP_KERNEL);
3591         if (!(*msg_region)) {
3592                 error = -ENOMEM;
3593                 goto end_function;
3594         }
3595
3596         /* Copy input data to write() to allocated message buffer */
3597         if (copy_from_user(*msg_region, msg_user, msg_len)) {
3598                 error = -EFAULT;
3599                 goto end_function;
3600         }
3601
3602 end_function:
3603         if (error && msg_region) {
3604                 kfree(*msg_region);
3605                 *msg_region = NULL;
3606         }
3607
3608         return error;
3609 }
3610
3611
3612 /**
3613  *      sep_read - Returns results of an operation for fastcall interface
3614  *      @filp: File pointer
3615  *      @buf_user: User buffer for storing results
3616  *      @count_user: User buffer size
3617  *      @offset: File offset, not supported
3618  *
3619  *      The implementation does not support reading in chunks, all data must be
3620  *      consumed during a single read system call.
3621  */
3622 static ssize_t sep_read(struct file *filp,
3623                         char __user *buf_user, size_t count_user,
3624                         loff_t *offset)
3625 {
3626         struct sep_private_data * const private_data = filp->private_data;
3627         struct sep_call_status *call_status = &private_data->call_status;
3628         struct sep_device *sep = private_data->device;
3629         struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
3630         struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
3631         ssize_t error = 0, error_tmp = 0;
3632
3633         /* Am I the process that owns the transaction? */
3634         error = sep_check_transaction_owner(sep);
3635         if (error) {
3636                 dev_dbg(&sep->pdev->dev, "[PID%d] read pid is not owner\n",
3637                         current->pid);
3638                 goto end_function;
3639         }
3640
3641         /* Checks that user has called necessary apis */
3642         if (0 == test_bit(SEP_FASTCALL_WRITE_DONE_OFFSET,
3643                         &call_status->status)) {
3644                 dev_warn(&sep->pdev->dev,
3645                          "[PID%d] fastcall write not called\n",
3646                          current->pid);
3647                 error = -EPROTO;
3648                 goto end_function_error;
3649         }
3650
3651         if (!buf_user) {
3652                 dev_warn(&sep->pdev->dev,
3653                          "[PID%d] null user buffer\n",
3654                          current->pid);
3655                 error = -EINVAL;
3656                 goto end_function_error;
3657         }
3658
3659
3660         /* Wait for SEP to finish */
3661         wait_event(sep->event_interrupt,
3662                    test_bit(SEP_WORKING_LOCK_BIT,
3663                             &sep->in_use_flags) == 0);
3664
3665         sep_dump_message(sep);
3666
3667         dev_dbg(&sep->pdev->dev, "[PID%d] count_user = 0x%08zX\n",
3668                 current->pid, count_user);
3669
3670         /* In case user has allocated bigger buffer */
3671         if (count_user > SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES)
3672                 count_user = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES;
3673
3674         if (copy_to_user(buf_user, sep->shared_addr, count_user)) {
3675                 error = -EFAULT;
3676                 goto end_function_error;
3677         }
3678
3679         dev_dbg(&sep->pdev->dev, "[PID%d] read succeeded\n", current->pid);
3680         error = count_user;
3681
3682 end_function_error:
3683         /* Copy possible tail data to user and free DCB and MLLIs */
3684         error_tmp = sep_free_dcb_handler(sep, dma_ctx);
3685         if (error_tmp)
3686                 dev_warn(&sep->pdev->dev, "[PID%d] dcb free failed\n",
3687                         current->pid);
3688
3689         /* End the transaction, wakeup pending ones */
3690         error_tmp = sep_end_transaction_handler(sep, dma_ctx, call_status,
3691                 my_queue_elem);
3692         if (error_tmp)
3693                 dev_warn(&sep->pdev->dev,
3694                          "[PID%d] ending transaction failed\n",
3695                          current->pid);
3696
3697 end_function:
3698         return error;
3699 }
3700
3701 /**
3702  *      sep_fastcall_args_get - Gets fastcall params from user
3703  *      sep: SEP device
3704  *      @args: Parameters buffer
3705  *      @buf_user: User buffer for operation parameters
3706  *      @count_user: User buffer size
3707  */
3708 static inline ssize_t sep_fastcall_args_get(struct sep_device *sep,
3709                                             struct sep_fastcall_hdr *args,
3710                                             const char __user *buf_user,
3711                                             const size_t count_user)
3712 {
3713         ssize_t error = 0;
3714         size_t actual_count = 0;
3715
3716         if (!buf_user) {
3717                 dev_warn(&sep->pdev->dev,
3718                          "[PID%d] null user buffer\n",
3719                          current->pid);
3720                 error = -EINVAL;
3721                 goto end_function;
3722         }
3723
3724         if (count_user < sizeof(struct sep_fastcall_hdr)) {
3725                 dev_warn(&sep->pdev->dev,
3726                          "[PID%d] too small message size 0x%08zX\n",
3727                          current->pid, count_user);
3728                 error = -EINVAL;
3729                 goto end_function;
3730         }
3731
3732
3733         if (copy_from_user(args, buf_user, sizeof(struct sep_fastcall_hdr))) {
3734                 error = -EFAULT;
3735                 goto end_function;
3736         }
3737
3738         if (SEP_FC_MAGIC != args->magic) {
3739                 dev_warn(&sep->pdev->dev,
3740                          "[PID%d] invalid fastcall magic 0x%08X\n",
3741                          current->pid, args->magic);
3742                 error = -EINVAL;
3743                 goto end_function;
3744         }
3745
3746         dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr num of DCBs 0x%08X\n",
3747                 current->pid, args->num_dcbs);
3748         dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr msg len 0x%08X\n",
3749                 current->pid, args->msg_len);
3750
3751         if (SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < args->msg_len ||
3752             SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES > args->msg_len) {
3753                 dev_warn(&sep->pdev->dev,
3754                          "[PID%d] invalid message length\n",
3755                          current->pid);
3756                 error = -EINVAL;
3757                 goto end_function;
3758         }
3759
3760         actual_count = sizeof(struct sep_fastcall_hdr)
3761                         + args->msg_len
3762                         + (args->num_dcbs * sizeof(struct build_dcb_struct));
3763
3764         if (actual_count != count_user) {
3765                 dev_warn(&sep->pdev->dev,
3766                          "[PID%d] inconsistent message "
3767                          "sizes 0x%08zX vs 0x%08zX\n",
3768                          current->pid, actual_count, count_user);
3769                 error = -EMSGSIZE;
3770                 goto end_function;
3771         }
3772
3773 end_function:
3774         return error;
3775 }
3776
3777 /**
3778  *      sep_write - Starts an operation for fastcall interface
3779  *      @filp: File pointer
3780  *      @buf_user: User buffer for operation parameters
3781  *      @count_user: User buffer size
3782  *      @offset: File offset, not supported
3783  *
3784  *      The implementation does not support writing in chunks,
3785  *      all data must be given during a single write system call.
3786  */
3787 static ssize_t sep_write(struct file *filp,
3788                          const char __user *buf_user, size_t count_user,
3789                          loff_t *offset)
3790 {
3791         struct sep_private_data * const private_data = filp->private_data;
3792         struct sep_call_status *call_status = &private_data->call_status;
3793         struct sep_device *sep = private_data->device;
3794         struct sep_dma_context *dma_ctx = NULL;
3795         struct sep_fastcall_hdr call_hdr = {0};
3796         void *msg_region = NULL;
3797         void *dmatables_region = NULL;
3798         struct sep_dcblock *dcb_region = NULL;
3799         ssize_t error = 0;
3800         struct sep_queue_info *my_queue_elem = NULL;
3801         bool my_secure_dma; /* are we using secure_dma (IMR)? */
3802
3803         dev_dbg(&sep->pdev->dev, "[PID%d] sep dev is 0x%p\n",
3804                 current->pid, sep);
3805         dev_dbg(&sep->pdev->dev, "[PID%d] private_data is 0x%p\n",
3806                 current->pid, private_data);
3807
3808         error = sep_fastcall_args_get(sep, &call_hdr, buf_user, count_user);
3809         if (error)
3810                 goto end_function;
3811
3812         buf_user += sizeof(struct sep_fastcall_hdr);
3813
3814         if (call_hdr.secure_dma == 0)
3815                 my_secure_dma = false;
3816         else
3817                 my_secure_dma = true;
3818
3819         /*
3820          * Controlling driver memory usage by limiting amount of
3821          * buffers created. Only SEP_DOUBLEBUF_USERS_LIMIT number
3822          * of threads can progress further at a time
3823          */
3824         dev_dbg(&sep->pdev->dev,
3825                 "[PID%d] waiting for double buffering region access\n",
3826                 current->pid);
3827         error = down_interruptible(&sep->sep_doublebuf);
3828         dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region start\n",
3829                                         current->pid);
3830         if (error) {
3831                 /* Signal received */
3832                 goto end_function_error;
3833         }
3834
3835
3836         /*
3837          * Prepare contents of the shared area regions for
3838          * the operation into temporary buffers
3839          */
3840         if (0 < call_hdr.num_dcbs) {
3841                 error = sep_create_dcb_dmatables_context(sep,
3842                                 &dcb_region,
3843                                 &dmatables_region,
3844                                 &dma_ctx,
3845                                 (const struct build_dcb_struct __user *)
3846                                         buf_user,
3847                                 call_hdr.num_dcbs, my_secure_dma);
3848                 if (error)
3849                         goto end_function_error_doublebuf;
3850
3851                 buf_user += call_hdr.num_dcbs * sizeof(struct build_dcb_struct);
3852         }
3853
3854         error = sep_create_msgarea_context(sep,
3855                                            &msg_region,
3856                                            buf_user,
3857                                            call_hdr.msg_len);
3858         if (error)
3859                 goto end_function_error_doublebuf;
3860
3861         dev_dbg(&sep->pdev->dev, "[PID%d] updating queue status\n",
3862                                                         current->pid);
3863         my_queue_elem = sep_queue_status_add(sep,
3864                                 ((struct sep_msgarea_hdr *)msg_region)->opcode,
3865                                 (dma_ctx) ? dma_ctx->input_data_len : 0,
3866                                      current->pid,
3867                                      current->comm, sizeof(current->comm));
3868
3869         if (!my_queue_elem) {
3870                 dev_dbg(&sep->pdev->dev,
3871                         "[PID%d] updating queue status error\n", current->pid);
3872                 error = -ENOMEM;
3873                 goto end_function_error_doublebuf;
3874         }
3875
3876         /* Wait until current process gets the transaction */
3877         error = sep_wait_transaction(sep);
3878
3879         if (error) {
3880                 /* Interrupted by signal, don't clear transaction */
3881                 dev_dbg(&sep->pdev->dev, "[PID%d] interrupted by signal\n",
3882                         current->pid);
3883                 sep_queue_status_remove(sep, &my_queue_elem);
3884                 goto end_function_error_doublebuf;
3885         }
3886
3887         dev_dbg(&sep->pdev->dev, "[PID%d] saving queue element\n",
3888                 current->pid);
3889         private_data->my_queue_elem = my_queue_elem;
3890
3891         /* Activate shared area regions for the transaction */
3892         error = sep_activate_msgarea_context(sep, &msg_region,
3893                                              call_hdr.msg_len);
3894         if (error)
3895                 goto end_function_error_clear_transact;
3896
3897         sep_dump_message(sep);
3898
3899         if (0 < call_hdr.num_dcbs) {
3900                 error = sep_activate_dcb_dmatables_context(sep,
3901                                 &dcb_region,
3902                                 &dmatables_region,
3903                                 dma_ctx);
3904                 if (error)
3905                         goto end_function_error_clear_transact;
3906         }
3907
3908         /* Send command to SEP */
3909         error = sep_send_command_handler(sep);
3910         if (error)
3911                 goto end_function_error_clear_transact;
3912
3913         /* Store DMA context for the transaction */
3914         private_data->dma_ctx = dma_ctx;
3915         /* Update call status */
3916         set_bit(SEP_FASTCALL_WRITE_DONE_OFFSET, &call_status->status);
3917         error = count_user;
3918
3919         up(&sep->sep_doublebuf);
3920         dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n",
3921                 current->pid);
3922
3923         goto end_function;
3924
3925 end_function_error_clear_transact:
3926         sep_end_transaction_handler(sep, &dma_ctx, call_status,
3927                                                 &private_data->my_queue_elem);
3928
3929 end_function_error_doublebuf:
3930         up(&sep->sep_doublebuf);
3931         dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n",
3932                 current->pid);
3933
3934 end_function_error:
3935         if (dma_ctx)
3936                 sep_free_dma_table_data_handler(sep, &dma_ctx);
3937
3938 end_function:
3939         kfree(dcb_region);
3940         kfree(dmatables_region);
3941         kfree(msg_region);
3942
3943         return error;
3944 }
3945 /**
3946  *      sep_seek - Handler for seek system call
3947  *      @filp: File pointer
3948  *      @offset: File offset
3949  *      @origin: Options for offset
3950  *
3951  *      Fastcall interface does not support seeking, all reads
3952  *      and writes are from/to offset zero
3953  */
3954 static loff_t sep_seek(struct file *filp, loff_t offset, int origin)
3955 {
3956         return -ENOSYS;
3957 }
3958
3959
3960
3961 /**
3962  * sep_file_operations - file operation on sep device
3963  * @sep_ioctl:  ioctl handler from user space call
3964  * @sep_poll:   poll handler
3965  * @sep_open:   handles sep device open request
3966  * @sep_release:handles sep device release request
3967  * @sep_mmap:   handles memory mapping requests
3968  * @sep_read:   handles read request on sep device
3969  * @sep_write:  handles write request on sep device
3970  * @sep_seek:   handles seek request on sep device
3971  */
3972 static const struct file_operations sep_file_operations = {
3973         .owner = THIS_MODULE,
3974         .unlocked_ioctl = sep_ioctl,
3975         .poll = sep_poll,
3976         .open = sep_open,
3977         .release = sep_release,
3978         .mmap = sep_mmap,
3979         .read = sep_read,
3980         .write = sep_write,
3981         .llseek = sep_seek,
3982 };
3983
3984 /**
3985  * sep_sysfs_read - read sysfs entry per gives arguments
3986  * @filp: file pointer
3987  * @kobj: kobject pointer
3988  * @attr: binary file attributes
3989  * @buf: read to this buffer
3990  * @pos: offset to read
3991  * @count: amount of data to read
3992  *
3993  * This function is to read sysfs entries for sep driver per given arguments.
3994  */
3995 static ssize_t
3996 sep_sysfs_read(struct file *filp, struct kobject *kobj,
3997                 struct bin_attribute *attr,
3998                 char *buf, loff_t pos, size_t count)
3999 {
4000         unsigned long lck_flags;
4001         size_t nleft = count;
4002         struct sep_device *sep = sep_dev;
4003         struct sep_queue_info *queue_elem = NULL;
4004         u32 queue_num = 0;
4005         u32 i = 1;
4006
4007         spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
4008
4009         queue_num = sep->sep_queue_num;
4010         if (queue_num > SEP_DOUBLEBUF_USERS_LIMIT)
4011                 queue_num = SEP_DOUBLEBUF_USERS_LIMIT;
4012
4013
4014         if (count < sizeof(queue_num)
4015                         + (queue_num * sizeof(struct sep_queue_data))) {
4016                 spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
4017                 return -EINVAL;
4018         }
4019
4020         memcpy(buf, &queue_num, sizeof(queue_num));
4021         buf += sizeof(queue_num);
4022         nleft -= sizeof(queue_num);
4023
4024         list_for_each_entry(queue_elem, &sep->sep_queue_status, list) {
4025                 if (i++ > queue_num)
4026                         break;
4027
4028                 memcpy(buf, &queue_elem->data, sizeof(queue_elem->data));
4029                 nleft -= sizeof(queue_elem->data);
4030                 buf += sizeof(queue_elem->data);
4031         }
4032         spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
4033
4034         return count - nleft;
4035 }
4036
4037 /**
4038  * bin_attributes - defines attributes for queue_status
4039  * @attr: attributes (name & permissions)
4040  * @read: function pointer to read this file
4041  * @size: maxinum size of binary attribute
4042  */
4043 static const struct bin_attribute queue_status = {
4044         .attr = {.name = "queue_status", .mode = 0444},
4045         .read = sep_sysfs_read,
4046         .size = sizeof(u32)
4047                 + (SEP_DOUBLEBUF_USERS_LIMIT * sizeof(struct sep_queue_data)),
4048 };
4049
4050 /**
4051  * sep_register_driver_with_fs - register misc devices
4052  * @sep: pointer to struct sep_device
4053  *
4054  * This function registers the driver with the file system
4055  */
4056 static int sep_register_driver_with_fs(struct sep_device *sep)
4057 {
4058         int ret_val;
4059
4060         sep->miscdev_sep.minor = MISC_DYNAMIC_MINOR;
4061         sep->miscdev_sep.name = SEP_DEV_NAME;
4062         sep->miscdev_sep.fops = &sep_file_operations;
4063
4064         ret_val = misc_register(&sep->miscdev_sep);
4065         if (ret_val) {
4066                 dev_warn(&sep->pdev->dev, "misc reg fails for SEP %x\n",
4067                         ret_val);
4068                 return ret_val;
4069         }
4070
4071         ret_val = device_create_bin_file(sep->miscdev_sep.this_device,
4072                                                                 &queue_status);
4073         if (ret_val) {
4074                 dev_warn(&sep->pdev->dev, "sysfs attribute1 fails for SEP %x\n",
4075                         ret_val);
4076                 misc_deregister(&sep->miscdev_sep);
4077                 return ret_val;
4078         }
4079
4080         return ret_val;
4081 }
4082
4083
4084 /**
4085  *sep_probe - probe a matching PCI device
4086  *@pdev:        pci_device
4087  *@ent: pci_device_id
4088  *
4089  *Attempt to set up and configure a SEP device that has been
4090  *discovered by the PCI layer. Allocates all required resources.
4091  */
4092 static int sep_probe(struct pci_dev *pdev,
4093         const struct pci_device_id *ent)
4094 {
4095         int error = 0;
4096         struct sep_device *sep = NULL;
4097
4098         if (sep_dev != NULL) {
4099                 dev_dbg(&pdev->dev, "only one SEP supported.\n");
4100                 return -EBUSY;
4101         }
4102
4103         /* Enable the device */
4104         error = pci_enable_device(pdev);
4105         if (error) {
4106                 dev_warn(&pdev->dev, "error enabling pci device\n");
4107                 goto end_function;
4108         }
4109
4110         /* Allocate the sep_device structure for this device */
4111         sep_dev = kzalloc(sizeof(struct sep_device), GFP_ATOMIC);
4112         if (sep_dev == NULL) {
4113                 error = -ENOMEM;
4114                 goto end_function_disable_device;
4115         }
4116
4117         /*
4118          * We're going to use another variable for actually
4119          * working with the device; this way, if we have
4120          * multiple devices in the future, it would be easier
4121          * to make appropriate changes
4122          */
4123         sep = sep_dev;
4124
4125         sep->pdev = pci_dev_get(pdev);
4126
4127         init_waitqueue_head(&sep->event_transactions);
4128         init_waitqueue_head(&sep->event_interrupt);
4129         spin_lock_init(&sep->snd_rply_lck);
4130         spin_lock_init(&sep->sep_queue_lock);
4131         sema_init(&sep->sep_doublebuf, SEP_DOUBLEBUF_USERS_LIMIT);
4132
4133         INIT_LIST_HEAD(&sep->sep_queue_status);
4134
4135         dev_dbg(&sep->pdev->dev,
4136                 "sep probe: PCI obtained, device being prepared\n");
4137
4138         /* Set up our register area */
4139         sep->reg_physical_addr = pci_resource_start(sep->pdev, 0);
4140         if (!sep->reg_physical_addr) {
4141                 dev_warn(&sep->pdev->dev, "Error getting register start\n");
4142                 error = -ENODEV;
4143                 goto end_function_free_sep_dev;
4144         }
4145
4146         sep->reg_physical_end = pci_resource_end(sep->pdev, 0);
4147         if (!sep->reg_physical_end) {
4148                 dev_warn(&sep->pdev->dev, "Error getting register end\n");
4149                 error = -ENODEV;
4150                 goto end_function_free_sep_dev;
4151         }
4152
4153         sep->reg_addr = ioremap_nocache(sep->reg_physical_addr,
4154                 (size_t)(sep->reg_physical_end - sep->reg_physical_addr + 1));
4155         if (!sep->reg_addr) {
4156                 dev_warn(&sep->pdev->dev, "Error getting register virtual\n");
4157                 error = -ENODEV;
4158                 goto end_function_free_sep_dev;
4159         }
4160
4161         dev_dbg(&sep->pdev->dev,
4162                 "Register area start %llx end %llx virtual %p\n",
4163                 (unsigned long long)sep->reg_physical_addr,
4164                 (unsigned long long)sep->reg_physical_end,
4165                 sep->reg_addr);
4166
4167         /* Allocate the shared area */
4168         sep->shared_size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
4169                 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES +
4170                 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES +
4171                 SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES +
4172                 SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
4173
4174         if (sep_map_and_alloc_shared_area(sep)) {
4175                 error = -ENOMEM;
4176                 /* Allocation failed */
4177                 goto end_function_error;
4178         }
4179
4180         /* Clear ICR register */
4181         sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4182
4183         /* Set the IMR register - open only GPR 2 */
4184         sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4185
4186         /* Read send/receive counters from SEP */
4187         sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4188         sep->reply_ct &= 0x3FFFFFFF;
4189         sep->send_ct = sep->reply_ct;
4190
4191         /* Get the interrupt line */
4192         error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED,
4193                 "sep_driver", sep);
4194
4195         if (error)
4196                 goto end_function_deallocate_sep_shared_area;
4197
4198         /* The new chip requires a shared area reconfigure */
4199         error = sep_reconfig_shared_area(sep);
4200         if (error)
4201                 goto end_function_free_irq;
4202
4203         sep->in_use = 1;
4204
4205         /* Finally magic up the device nodes */
4206         /* Register driver with the fs */
4207         error = sep_register_driver_with_fs(sep);
4208
4209         if (error) {
4210                 dev_err(&sep->pdev->dev, "error registering dev file\n");
4211                 goto end_function_free_irq;
4212         }
4213
4214         sep->in_use = 0; /* through touching the device */
4215 #ifdef SEP_ENABLE_RUNTIME_PM
4216         pm_runtime_put_noidle(&sep->pdev->dev);
4217         pm_runtime_allow(&sep->pdev->dev);
4218         pm_runtime_set_autosuspend_delay(&sep->pdev->dev,
4219                 SUSPEND_DELAY);
4220         pm_runtime_use_autosuspend(&sep->pdev->dev);
4221         pm_runtime_mark_last_busy(&sep->pdev->dev);
4222         sep->power_save_setup = 1;
4223 #endif
4224         /* register kernel crypto driver */
4225 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
4226         error = sep_crypto_setup();
4227         if (error) {
4228                 dev_err(&sep->pdev->dev, "crypto setup failed\n");
4229                 goto end_function_free_irq;
4230         }
4231 #endif
4232         goto end_function;
4233
4234 end_function_free_irq:
4235         free_irq(pdev->irq, sep);
4236
4237 end_function_deallocate_sep_shared_area:
4238         /* De-allocate shared area */
4239         sep_unmap_and_free_shared_area(sep);
4240
4241 end_function_error:
4242         iounmap(sep->reg_addr);
4243
4244 end_function_free_sep_dev:
4245         pci_dev_put(sep_dev->pdev);
4246         kfree(sep_dev);
4247         sep_dev = NULL;
4248
4249 end_function_disable_device:
4250         pci_disable_device(pdev);
4251
4252 end_function:
4253         return error;
4254 }
4255
4256 /**
4257  * sep_remove - handles removing device from pci subsystem
4258  * @pdev:       pointer to pci device
4259  *
4260  * This function will handle removing our sep device from pci subsystem on exit
4261  * or unloading this module. It should free up all used resources, and unmap if
4262  * any memory regions mapped.
4263  */
4264 static void sep_remove(struct pci_dev *pdev)
4265 {
4266         struct sep_device *sep = sep_dev;
4267
4268         /* Unregister from fs */
4269         misc_deregister(&sep->miscdev_sep);
4270
4271         /* Unregister from kernel crypto */
4272 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
4273         sep_crypto_takedown();
4274 #endif
4275         /* Free the irq */
4276         free_irq(sep->pdev->irq, sep);
4277
4278         /* Free the shared area  */
4279         sep_unmap_and_free_shared_area(sep_dev);
4280         iounmap(sep_dev->reg_addr);
4281
4282 #ifdef SEP_ENABLE_RUNTIME_PM
4283         if (sep->in_use) {
4284                 sep->in_use = 0;
4285                 pm_runtime_forbid(&sep->pdev->dev);
4286                 pm_runtime_get_noresume(&sep->pdev->dev);
4287         }
4288 #endif
4289         pci_dev_put(sep_dev->pdev);
4290         kfree(sep_dev);
4291         sep_dev = NULL;
4292 }
4293
4294 /* Initialize struct pci_device_id for our driver */
4295 static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl) = {
4296         {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0826)},
4297         {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08e9)},
4298         {0}
4299 };
4300
4301 /* Export our pci_device_id structure to user space */
4302 MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
4303
4304 #ifdef SEP_ENABLE_RUNTIME_PM
4305
4306 /**
4307  * sep_pm_resume - rsume routine while waking up from S3 state
4308  * @dev:        pointer to sep device
4309  *
4310  * This function is to be used to wake up sep driver while system awakes from S3
4311  * state i.e. suspend to ram. The RAM in intact.
4312  * Notes - revisit with more understanding of pm, ICR/IMR & counters.
4313  */
4314 static int sep_pci_resume(struct device *dev)
4315 {
4316         struct sep_device *sep = sep_dev;
4317
4318         dev_dbg(&sep->pdev->dev, "pci resume called\n");
4319
4320         if (sep->power_state == SEP_DRIVER_POWERON)
4321                 return 0;
4322
4323         /* Clear ICR register */
4324         sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4325
4326         /* Set the IMR register - open only GPR 2 */
4327         sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4328
4329         /* Read send/receive counters from SEP */
4330         sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4331         sep->reply_ct &= 0x3FFFFFFF;
4332         sep->send_ct = sep->reply_ct;
4333
4334         sep->power_state = SEP_DRIVER_POWERON;
4335
4336         return 0;
4337 }
4338
4339 /**
4340  * sep_pm_suspend - suspend routine while going to S3 state
4341  * @dev:        pointer to sep device
4342  *
4343  * This function is to be used to suspend sep driver while system goes to S3
4344  * state i.e. suspend to ram. The RAM in intact and ON during this suspend.
4345  * Notes - revisit with more understanding of pm, ICR/IMR
4346  */
4347 static int sep_pci_suspend(struct device *dev)
4348 {
4349         struct sep_device *sep = sep_dev;
4350
4351         dev_dbg(&sep->pdev->dev, "pci suspend called\n");
4352         if (sep->in_use == 1)
4353                 return -EAGAIN;
4354
4355         sep->power_state = SEP_DRIVER_POWEROFF;
4356
4357         /* Clear ICR register */
4358         sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4359
4360         /* Set the IMR to block all */
4361         sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0xFFFFFFFF);
4362
4363         return 0;
4364 }
4365
4366 /**
4367  * sep_pm_runtime_resume - runtime resume routine
4368  * @dev:        pointer to sep device
4369  *
4370  * Notes - revisit with more understanding of pm, ICR/IMR & counters
4371  */
4372 static int sep_pm_runtime_resume(struct device *dev)
4373 {
4374
4375         u32 retval2;
4376         u32 delay_count;
4377         struct sep_device *sep = sep_dev;
4378
4379         dev_dbg(&sep->pdev->dev, "pm runtime resume called\n");
4380
4381         /**
4382          * Wait until the SCU boot is ready
4383          * This is done by iterating SCU_DELAY_ITERATION (10
4384          * microseconds each) up to SCU_DELAY_MAX (50) times.
4385          * This bit can be set in a random time that is less
4386          * than 500 microseconds after each power resume
4387          */
4388         retval2 = 0;
4389         delay_count = 0;
4390         while ((!retval2) && (delay_count < SCU_DELAY_MAX)) {
4391                 retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
4392                 retval2 &= 0x00000008;
4393                 if (!retval2) {
4394                         udelay(SCU_DELAY_ITERATION);
4395                         delay_count += 1;
4396                 }
4397         }
4398
4399         if (!retval2) {
4400                 dev_warn(&sep->pdev->dev, "scu boot bit not set at resume\n");
4401                 return -EINVAL;
4402         }
4403
4404         /* Clear ICR register */
4405         sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4406
4407         /* Set the IMR register - open only GPR 2 */
4408         sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4409
4410         /* Read send/receive counters from SEP */
4411         sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4412         sep->reply_ct &= 0x3FFFFFFF;
4413         sep->send_ct = sep->reply_ct;
4414
4415         return 0;
4416 }
4417
4418 /**
4419  * sep_pm_runtime_suspend - runtime suspend routine
4420  * @dev:        pointer to sep device
4421  *
4422  * Notes - revisit with more understanding of pm
4423  */
4424 static int sep_pm_runtime_suspend(struct device *dev)
4425 {
4426         struct sep_device *sep = sep_dev;
4427
4428         dev_dbg(&sep->pdev->dev, "pm runtime suspend called\n");
4429
4430         /* Clear ICR register */
4431         sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4432         return 0;
4433 }
4434
4435 /**
4436  * sep_pm - power management for sep driver
4437  * @sep_pm_runtime_resume:      resume- no communication with cpu & main memory
4438  * @sep_pm_runtime_suspend:     suspend- no communication with cpu & main memory
4439  * @sep_pci_suspend:            suspend - main memory is still ON
4440  * @sep_pci_resume:             resume - main memory is still ON
4441  */
4442 static const struct dev_pm_ops sep_pm = {
4443         .runtime_resume = sep_pm_runtime_resume,
4444         .runtime_suspend = sep_pm_runtime_suspend,
4445         .resume = sep_pci_resume,
4446         .suspend = sep_pci_suspend,
4447 };
4448 #endif /* SEP_ENABLE_RUNTIME_PM */
4449
4450 /**
4451  * sep_pci_driver - registers this device with pci subsystem
4452  * @name:       name identifier for this driver
4453  * @sep_pci_id_tbl:     pointer to struct pci_device_id table
4454  * @sep_probe:  pointer to probe function in PCI driver
4455  * @sep_remove: pointer to remove function in PCI driver
4456  */
4457 static struct pci_driver sep_pci_driver = {
4458 #ifdef SEP_ENABLE_RUNTIME_PM
4459         .driver = {
4460                 .pm = &sep_pm,
4461         },
4462 #endif
4463         .name = "sep_sec_driver",
4464         .id_table = sep_pci_id_tbl,
4465         .probe = sep_probe,
4466         .remove = sep_remove
4467 };
4468
4469 module_pci_driver(sep_pci_driver);
4470 MODULE_LICENSE("GPL");