]> Pileus Git - ~andy/linux/blob - drivers/staging/tidspbridge/hw/hw_mmu.h
Merge branch 'v4l_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
[~andy/linux] / drivers / staging / tidspbridge / hw / hw_mmu.h
1 /*
2  * hw_mmu.h
3  *
4  * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5  *
6  * MMU types and API declarations
7  *
8  * Copyright (C) 2007 Texas Instruments, Inc.
9  *
10  * This package is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  *
14  * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17  */
18
19 #ifndef _HW_MMU_H
20 #define _HW_MMU_H
21
22 #include <linux/types.h>
23
24 /* Bitmasks for interrupt sources */
25 #define HW_MMU_TRANSLATION_FAULT   0x2
26 #define HW_MMU_ALL_INTERRUPTS      0x1F
27
28 #define HW_MMU_COARSE_PAGE_SIZE 0x400
29
30 /* hw_mmu_mixed_size_t:  Enumerated Type used to specify whether to follow
31                         CPU/TLB Element size */
32 enum hw_mmu_mixed_size_t {
33         HW_MMU_TLBES,
34         HW_MMU_CPUES
35 };
36
37 /* hw_mmu_map_attrs_t:  Struct containing MMU mapping attributes */
38 struct hw_mmu_map_attrs_t {
39         enum hw_endianism_t endianism;
40         enum hw_element_size_t element_size;
41         enum hw_mmu_mixed_size_t mixed_size;
42         bool donotlockmpupage;
43 };
44
45 extern hw_status hw_mmu_enable(const void __iomem *base_address);
46
47 extern hw_status hw_mmu_disable(const void __iomem *base_address);
48
49 extern hw_status hw_mmu_num_locked_set(const void __iomem *base_address,
50                                        u32 num_locked_entries);
51
52 extern hw_status hw_mmu_victim_num_set(const void __iomem *base_address,
53                                        u32 victim_entry_num);
54
55 /* For MMU faults */
56 extern hw_status hw_mmu_event_ack(const void __iomem *base_address,
57                                   u32 irq_mask);
58
59 extern hw_status hw_mmu_event_disable(const void __iomem *base_address,
60                                       u32 irq_mask);
61
62 extern hw_status hw_mmu_event_enable(const void __iomem *base_address,
63                                      u32 irq_mask);
64
65 extern hw_status hw_mmu_event_status(const void __iomem *base_address,
66                                      u32 *irq_mask);
67
68 extern hw_status hw_mmu_fault_addr_read(const void __iomem *base_address,
69                                         u32 *addr);
70
71 /* Set the TT base address */
72 extern hw_status hw_mmu_ttb_set(const void __iomem *base_address,
73                                 u32 ttb_phys_addr);
74
75 extern hw_status hw_mmu_twl_enable(const void __iomem *base_address);
76
77 extern hw_status hw_mmu_twl_disable(const void __iomem *base_address);
78
79 extern hw_status hw_mmu_tlb_flush(const void __iomem *base_address,
80                                   u32 virtual_addr, u32 page_sz);
81
82 extern hw_status hw_mmu_tlb_add(const void __iomem *base_address,
83                                 u32 physical_addr,
84                                 u32 virtual_addr,
85                                 u32 page_sz,
86                                 u32 entry_num,
87                                 struct hw_mmu_map_attrs_t *map_attrs,
88                                 s8 preserved_bit, s8 valid_bit);
89
90 /* For PTEs */
91 extern hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
92                                 u32 physical_addr,
93                                 u32 virtual_addr,
94                                 u32 page_sz,
95                                 struct hw_mmu_map_attrs_t *map_attrs);
96
97 extern hw_status hw_mmu_pte_clear(const u32 pg_tbl_va,
98                                   u32 virtual_addr, u32 page_size);
99
100 void hw_mmu_tlb_flush_all(const void __iomem *base);
101
102 static inline u32 hw_mmu_pte_addr_l1(u32 l1_base, u32 va)
103 {
104         u32 pte_addr;
105         u32 va31_to20;
106
107         va31_to20 = va >> (20 - 2);     /* Left-shift by 2 here itself */
108         va31_to20 &= 0xFFFFFFFCUL;
109         pte_addr = l1_base + va31_to20;
110
111         return pte_addr;
112 }
113
114 static inline u32 hw_mmu_pte_addr_l2(u32 l2_base, u32 va)
115 {
116         u32 pte_addr;
117
118         pte_addr = (l2_base & 0xFFFFFC00) | ((va >> 10) & 0x3FC);
119
120         return pte_addr;
121 }
122
123 static inline u32 hw_mmu_pte_coarse_l1(u32 pte_val)
124 {
125         u32 pte_coarse;
126
127         pte_coarse = pte_val & 0xFFFFFC00;
128
129         return pte_coarse;
130 }
131
132 static inline u32 hw_mmu_pte_size_l1(u32 pte_val)
133 {
134         u32 pte_size = 0;
135
136         if ((pte_val & 0x3) == 0x1) {
137                 /* Points to L2 PT */
138                 pte_size = HW_MMU_COARSE_PAGE_SIZE;
139         }
140
141         if ((pte_val & 0x3) == 0x2) {
142                 if (pte_val & (1 << 18))
143                         pte_size = HW_PAGE_SIZE16MB;
144                 else
145                         pte_size = HW_PAGE_SIZE1MB;
146         }
147
148         return pte_size;
149 }
150
151 static inline u32 hw_mmu_pte_size_l2(u32 pte_val)
152 {
153         u32 pte_size = 0;
154
155         if (pte_val & 0x2)
156                 pte_size = HW_PAGE_SIZE4KB;
157         else if (pte_val & 0x1)
158                 pte_size = HW_PAGE_SIZE64KB;
159
160         return pte_size;
161 }
162
163 #endif /* _HW_MMU_H */