]> Pileus Git - ~andy/linux/blob - drivers/staging/hv/osd.c
cf8091308d926a7497774788c2b3dad86f829037
[~andy/linux] / drivers / staging / hv / osd.c
1 /*
2  *
3  * Copyright (c) 2009, Microsoft Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16  * Place - Suite 330, Boston, MA 02111-1307 USA.
17  *
18  * Authors:
19  *   Haiyang Zhang <haiyangz@microsoft.com>
20  *   Hank Janssen  <hjanssen@microsoft.com>
21  *
22  */
23
24 #define KERNEL_2_6_27
25
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/types.h>
29 #include <linux/mm.h>
30 #include <linux/highmem.h>
31 #include <linux/vmalloc.h>
32 //#include <linux/config.h>
33 #include <linux/ioport.h>
34 #include <linux/irq.h>
35 #include <linux/interrupt.h>
36 #include <linux/wait.h>
37 #include <linux/spinlock.h>
38 #include <linux/workqueue.h>
39 #include <linux/kernel.h>
40 #include <linux/timer.h>
41 #include <linux/jiffies.h>
42 #include <linux/delay.h>
43 #include <linux/time.h>
44
45 #include <asm/io.h>
46 #include <asm/bitops.h>
47 #include <asm/kmap_types.h>
48 #include <asm/atomic.h>
49
50 #include "include/osd.h"
51
52 //
53 // Data types
54 //
55 typedef struct _TIMER {
56         struct timer_list timer;
57         PFN_TIMER_CALLBACK callback;
58         void* context;
59 }TIMER;
60
61
62 typedef struct _WAITEVENT {
63         int     condition;
64         wait_queue_head_t event;
65 } WAITEVENT;
66
67 typedef struct _SPINLOCK {
68         spinlock_t              lock;
69         unsigned long   flags;
70 } SPINLOCK;
71
72 typedef struct _WORKQUEUE {
73         struct workqueue_struct *queue;
74 } WORKQUEUE;
75
76 typedef struct _WORKITEM {
77         struct work_struct work;
78         PFN_WORKITEM_CALLBACK callback;
79         void* context;
80 } WORKITEM;
81
82
83 //
84 // Global
85 //
86
87 void LogMsg(const char *fmt, ...)
88 {
89 #ifdef KERNEL_2_6_5
90         char buf[1024];
91 #endif
92         va_list args;
93
94         va_start(args, fmt);
95 #ifdef KERNEL_2_6_5
96         vsnprintf(buf, 1024, fmt, args);
97         va_end(args);
98         printk(buf);
99 #else
100         vprintk(fmt, args);
101         va_end(args);
102 #endif
103 }
104
105 void BitSet(unsigned int* addr, int bit)
106 {
107         set_bit(bit, (unsigned long*)addr);
108 }
109
110 int BitTest(unsigned int* addr, int bit)
111 {
112         return test_bit(bit, (unsigned long*)addr);
113 }
114
115 void BitClear(unsigned int* addr, int bit)
116 {
117         clear_bit(bit, (unsigned long*)addr);
118 }
119
120 int BitTestAndClear(unsigned int* addr, int bit)
121 {
122         return test_and_clear_bit(bit, (unsigned long*)addr);
123 }
124
125 int BitTestAndSet(unsigned int* addr, int bit)
126 {
127         return test_and_set_bit(bit, (unsigned long*)addr);
128 }
129
130
131 int InterlockedIncrement(int *val)
132 {
133 #ifdef KERNEL_2_6_5
134         int i;
135         local_irq_disable();
136         i = atomic_read((atomic_t*)val);
137         atomic_set((atomic_t*)val, i+1);
138         local_irq_enable();
139         return i+1;
140 #else
141         return atomic_inc_return((atomic_t*)val);
142 #endif
143 }
144
145 int InterlockedDecrement(int *val)
146 {
147 #ifdef KERNEL_2_6_5
148         int i;
149         local_irq_disable();
150         i = atomic_read((atomic_t*)val);
151         atomic_set((atomic_t*)val, i-1);
152         local_irq_enable();
153         return i-1;
154 #else
155         return atomic_dec_return((atomic_t*)val);
156 #endif
157 }
158
159 #ifndef atomic_cmpxchg
160 #define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
161 #endif
162 int InterlockedCompareExchange(int *val, int new, int curr)
163 {
164         //return ((int)cmpxchg(((atomic_t*)val), curr, new));
165         return atomic_cmpxchg((atomic_t*)val, curr, new);
166
167 }
168
169 void Sleep(unsigned long usecs)
170 {
171         udelay(usecs);
172 }
173
174 void* VirtualAllocExec(unsigned int size)
175 {
176 #ifdef __x86_64__
177         return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL_EXEC);
178 #else
179         return __vmalloc(size, GFP_KERNEL, __pgprot(__PAGE_KERNEL & (~_PAGE_NX)));
180 #endif
181 }
182
183 void VirtualFree(void* VirtAddr)
184 {
185         return vfree(VirtAddr);
186 }
187
188 void* PageAlloc(unsigned int count)
189 {
190         void *p;
191         p = (void *)__get_free_pages(GFP_KERNEL, get_order(count * PAGE_SIZE));
192         if (p) memset(p, 0, count * PAGE_SIZE);
193         return p;
194
195         //struct page* page = alloc_page(GFP_KERNEL|__GFP_ZERO);
196         //void *p;
197
198         ////BUGBUG: We need to use kmap in case we are in HIMEM region
199         //p = page_address(page);
200         //if (p) memset(p, 0, PAGE_SIZE);
201         //return p;
202 }
203
204 void PageFree(void* page, unsigned int count)
205 {
206         free_pages((unsigned long)page, get_order(count * PAGE_SIZE));
207         /*struct page* p = virt_to_page(page);
208         __free_page(p);*/
209 }
210
211
212 void* PageMapVirtualAddress(unsigned long Pfn)
213 {
214         return kmap_atomic(pfn_to_page(Pfn), KM_IRQ0);
215 }
216
217 void PageUnmapVirtualAddress(void* VirtAddr)
218 {
219         kunmap_atomic(VirtAddr, KM_IRQ0);
220 }
221
222 void* MemAlloc(unsigned int size)
223 {
224         return kmalloc(size, GFP_KERNEL);
225 }
226
227 void* MemAllocZeroed(unsigned int size)
228 {
229         void *p = kmalloc(size, GFP_KERNEL);
230         if (p) memset(p, 0, size);
231         return p;
232 }
233
234 void* MemAllocAtomic(unsigned int size)
235 {
236         return kmalloc(size, GFP_ATOMIC);
237 }
238
239 void MemFree(void* buf)
240 {
241         kfree(buf);
242 }
243
244 void *MemMapIO(unsigned long phys, unsigned long size)
245 {
246 #if X2V_LINUX
247 #ifdef __x86_64__
248         return (void*)(phys + 0xFFFF83000C000000);
249 #else // i386
250         return (void*)(phys + 0xfb000000);
251 #endif
252 #else
253         return (void*)GetVirtualAddress(phys); //return ioremap_nocache(phys, size);
254 #endif
255 }
256
257 void MemUnmapIO(void *virt)
258 {
259         //iounmap(virt);
260 }
261
262 void MemoryFence()
263 {
264         mb();
265 }
266
267 void TimerCallback(unsigned long data)
268 {
269         TIMER* t = (TIMER*)data;
270
271         t->callback(t->context);
272 }
273
274 HANDLE TimerCreate(PFN_TIMER_CALLBACK pfnTimerCB, void* context)
275 {
276         TIMER* t = kmalloc(sizeof(TIMER), GFP_KERNEL);
277         if (!t)
278         {
279                 return NULL;
280         }
281
282         t->callback = pfnTimerCB;
283         t->context = context;
284
285         init_timer(&t->timer);
286         t->timer.data = (unsigned long)t;
287         t->timer.function = TimerCallback;
288
289         return t;
290 }
291
292 void TimerStart(HANDLE hTimer, UINT32 expirationInUs)
293 {
294         TIMER* t  = (TIMER* )hTimer;
295
296         t->timer.expires = jiffies + usecs_to_jiffies(expirationInUs);
297         add_timer(&t->timer);
298 }
299
300 int TimerStop(HANDLE hTimer)
301 {
302         TIMER* t  = (TIMER* )hTimer;
303
304         return del_timer(&t->timer);
305 }
306
307 void TimerClose(HANDLE hTimer)
308 {
309         TIMER* t  = (TIMER* )hTimer;
310
311         del_timer(&t->timer);
312         kfree(t);
313 }
314
315 SIZE_T GetTickCount(void)
316 {
317         return jiffies;
318 }
319
320 signed long long GetTimestamp(void)
321 {
322         struct timeval t;
323
324         do_gettimeofday(&t);
325
326         return  timeval_to_ns(&t);
327 }
328
329 HANDLE WaitEventCreate(void)
330 {
331         WAITEVENT* wait = kmalloc(sizeof(WAITEVENT), GFP_KERNEL);
332         if (!wait)
333         {
334                 return NULL;
335         }
336
337         wait->condition = 0;
338         init_waitqueue_head(&wait->event);
339         return wait;
340 }
341
342 void WaitEventClose(HANDLE hWait)
343 {
344         WAITEVENT* waitEvent = (WAITEVENT* )hWait;
345         kfree(waitEvent);
346 }
347
348 void WaitEventSet(HANDLE hWait)
349 {
350         WAITEVENT* waitEvent = (WAITEVENT* )hWait;
351         waitEvent->condition = 1;
352         wake_up_interruptible(&waitEvent->event);
353 }
354
355 int WaitEventWait(HANDLE hWait)
356 {
357         int ret=0;
358         WAITEVENT* waitEvent = (WAITEVENT* )hWait;
359
360         ret= wait_event_interruptible(waitEvent->event,
361                 waitEvent->condition);
362         waitEvent->condition = 0;
363         return ret;
364 }
365
366 int WaitEventWaitEx(HANDLE hWait, UINT32 TimeoutInMs)
367 {
368         int ret=0;
369         WAITEVENT* waitEvent = (WAITEVENT* )hWait;
370
371         ret= wait_event_interruptible_timeout(waitEvent->event,
372                                                                                         waitEvent->condition,
373                                                                                         msecs_to_jiffies(TimeoutInMs));
374         waitEvent->condition = 0;
375         return ret;
376 }
377
378 HANDLE SpinlockCreate(void)
379 {
380         SPINLOCK* spin = kmalloc(sizeof(SPINLOCK), GFP_KERNEL);
381         if (!spin)
382         {
383                 return NULL;
384         }
385         spin_lock_init(&spin->lock);
386
387         return spin;
388 }
389
390 void SpinlockAcquire(HANDLE hSpin)
391 {
392         SPINLOCK* spin = (SPINLOCK* )hSpin;
393
394         spin_lock_irqsave(&spin->lock, spin->flags);
395 }
396
397 void SpinlockRelease(HANDLE hSpin)
398 {
399         SPINLOCK* spin = (SPINLOCK* )hSpin;
400
401         spin_unlock_irqrestore(&spin->lock, spin->flags);
402 }
403
404 void SpinlockClose(HANDLE hSpin)
405 {
406         SPINLOCK* spin = (SPINLOCK* )hSpin;
407         kfree(spin);
408 }
409
410 void* Physical2LogicalAddr(ULONG_PTR PhysAddr)
411 {
412         void* logicalAddr = phys_to_virt(PhysAddr);
413         BUG_ON(!virt_addr_valid(logicalAddr));
414         return logicalAddr;
415 }
416
417 ULONG_PTR Logical2PhysicalAddr(void * LogicalAddr)
418 {
419         BUG_ON(!virt_addr_valid(LogicalAddr));
420         return virt_to_phys(LogicalAddr);
421 }
422
423
424 ULONG_PTR Virtual2Physical(void * VirtAddr)
425 {
426         ULONG_PTR pfn = vmalloc_to_pfn(VirtAddr);
427
428         return pfn << PAGE_SHIFT;
429 }
430
431 #ifdef KERNEL_2_6_27
432 void WorkItemCallback(struct work_struct *work)
433 #else
434 void WorkItemCallback(void* work)
435 #endif
436 {
437         WORKITEM* w = (WORKITEM*)work;
438
439         w->callback(w->context);
440
441         kfree(w);
442 }
443
444 HANDLE WorkQueueCreate(char* name)
445 {
446         WORKQUEUE *wq = kmalloc(sizeof(WORKQUEUE), GFP_KERNEL);
447         if (!wq)
448         {
449                 return NULL;
450         }
451         wq->queue = create_workqueue(name);
452
453         return wq;
454 }
455
456 void WorkQueueClose(HANDLE hWorkQueue)
457 {
458         WORKQUEUE *wq = (WORKQUEUE *)hWorkQueue;
459
460         destroy_workqueue(wq->queue);
461
462         return;
463 }
464
465 int WorkQueueQueueWorkItem(HANDLE hWorkQueue, PFN_WORKITEM_CALLBACK workItem, void* context)
466 {
467         WORKQUEUE *wq = (WORKQUEUE *)hWorkQueue;
468
469         WORKITEM* w = kmalloc(sizeof(WORKITEM), GFP_ATOMIC);
470         if (!w)
471         {
472                 return -1;
473         }
474
475         w->callback = workItem,
476         w->context = context;
477 #ifdef KERNEL_2_6_27
478         INIT_WORK(&w->work, WorkItemCallback);
479 #else
480         INIT_WORK(&w->work, WorkItemCallback, w);
481 #endif
482         return queue_work(wq->queue, &w->work);
483 }
484
485 void QueueWorkItem(PFN_WORKITEM_CALLBACK workItem, void* context)
486 {
487         WORKITEM* w = kmalloc(sizeof(WORKITEM), GFP_ATOMIC);
488         if (!w)
489         {
490                 return;
491         }
492
493         w->callback = workItem,
494         w->context = context;
495 #ifdef KERNEL_2_6_27
496         INIT_WORK(&w->work, WorkItemCallback);
497 #else
498         INIT_WORK(&w->work, WorkItemCallback, w);
499 #endif
500         schedule_work(&w->work);
501 }