3 * Copyright (c) 2009, Microsoft Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/types.h>
30 #include <linux/highmem.h>
31 #include <linux/vmalloc.h>
32 //#include <linux/config.h>
33 #include <linux/ioport.h>
34 #include <linux/irq.h>
35 #include <linux/interrupt.h>
36 #include <linux/wait.h>
37 #include <linux/spinlock.h>
38 #include <linux/workqueue.h>
39 #include <linux/kernel.h>
40 #include <linux/timer.h>
41 #include <linux/jiffies.h>
42 #include <linux/delay.h>
43 #include <linux/time.h>
46 #include <asm/bitops.h>
47 #include <asm/kmap_types.h>
48 #include <asm/atomic.h>
50 #include "include/osd.h"
55 typedef struct _TIMER {
56 struct timer_list timer;
57 PFN_TIMER_CALLBACK callback;
62 typedef struct _WAITEVENT {
64 wait_queue_head_t event;
67 typedef struct _SPINLOCK {
72 typedef struct _WORKQUEUE {
73 struct workqueue_struct *queue;
76 typedef struct _WORKITEM {
77 struct work_struct work;
78 PFN_WORKITEM_CALLBACK callback;
87 void LogMsg(const char *fmt, ...)
96 vsnprintf(buf, 1024, fmt, args);
105 void BitSet(unsigned int* addr, int bit)
107 set_bit(bit, (unsigned long*)addr);
110 int BitTest(unsigned int* addr, int bit)
112 return test_bit(bit, (unsigned long*)addr);
115 void BitClear(unsigned int* addr, int bit)
117 clear_bit(bit, (unsigned long*)addr);
120 int BitTestAndClear(unsigned int* addr, int bit)
122 return test_and_clear_bit(bit, (unsigned long*)addr);
125 int BitTestAndSet(unsigned int* addr, int bit)
127 return test_and_set_bit(bit, (unsigned long*)addr);
131 int InterlockedIncrement(int *val)
136 i = atomic_read((atomic_t*)val);
137 atomic_set((atomic_t*)val, i+1);
141 return atomic_inc_return((atomic_t*)val);
145 int InterlockedDecrement(int *val)
150 i = atomic_read((atomic_t*)val);
151 atomic_set((atomic_t*)val, i-1);
155 return atomic_dec_return((atomic_t*)val);
159 #ifndef atomic_cmpxchg
160 #define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
162 int InterlockedCompareExchange(int *val, int new, int curr)
164 //return ((int)cmpxchg(((atomic_t*)val), curr, new));
165 return atomic_cmpxchg((atomic_t*)val, curr, new);
169 void Sleep(unsigned long usecs)
174 void* VirtualAllocExec(unsigned int size)
177 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL_EXEC);
179 return __vmalloc(size, GFP_KERNEL, __pgprot(__PAGE_KERNEL & (~_PAGE_NX)));
183 void VirtualFree(void* VirtAddr)
185 return vfree(VirtAddr);
188 void* PageAlloc(unsigned int count)
191 p = (void *)__get_free_pages(GFP_KERNEL, get_order(count * PAGE_SIZE));
192 if (p) memset(p, 0, count * PAGE_SIZE);
195 //struct page* page = alloc_page(GFP_KERNEL|__GFP_ZERO);
198 ////BUGBUG: We need to use kmap in case we are in HIMEM region
199 //p = page_address(page);
200 //if (p) memset(p, 0, PAGE_SIZE);
204 void PageFree(void* page, unsigned int count)
206 free_pages((unsigned long)page, get_order(count * PAGE_SIZE));
207 /*struct page* p = virt_to_page(page);
212 void* PageMapVirtualAddress(unsigned long Pfn)
214 return kmap_atomic(pfn_to_page(Pfn), KM_IRQ0);
217 void PageUnmapVirtualAddress(void* VirtAddr)
219 kunmap_atomic(VirtAddr, KM_IRQ0);
222 void* MemAlloc(unsigned int size)
224 return kmalloc(size, GFP_KERNEL);
227 void* MemAllocZeroed(unsigned int size)
229 void *p = kmalloc(size, GFP_KERNEL);
230 if (p) memset(p, 0, size);
234 void* MemAllocAtomic(unsigned int size)
236 return kmalloc(size, GFP_ATOMIC);
239 void MemFree(void* buf)
244 void *MemMapIO(unsigned long phys, unsigned long size)
248 return (void*)(phys + 0xFFFF83000C000000);
250 return (void*)(phys + 0xfb000000);
253 return (void*)GetVirtualAddress(phys); //return ioremap_nocache(phys, size);
257 void MemUnmapIO(void *virt)
267 void TimerCallback(unsigned long data)
269 TIMER* t = (TIMER*)data;
271 t->callback(t->context);
274 HANDLE TimerCreate(PFN_TIMER_CALLBACK pfnTimerCB, void* context)
276 TIMER* t = kmalloc(sizeof(TIMER), GFP_KERNEL);
282 t->callback = pfnTimerCB;
283 t->context = context;
285 init_timer(&t->timer);
286 t->timer.data = (unsigned long)t;
287 t->timer.function = TimerCallback;
292 void TimerStart(HANDLE hTimer, UINT32 expirationInUs)
294 TIMER* t = (TIMER* )hTimer;
296 t->timer.expires = jiffies + usecs_to_jiffies(expirationInUs);
297 add_timer(&t->timer);
300 int TimerStop(HANDLE hTimer)
302 TIMER* t = (TIMER* )hTimer;
304 return del_timer(&t->timer);
307 void TimerClose(HANDLE hTimer)
309 TIMER* t = (TIMER* )hTimer;
311 del_timer(&t->timer);
315 SIZE_T GetTickCount(void)
320 signed long long GetTimestamp(void)
326 return timeval_to_ns(&t);
329 HANDLE WaitEventCreate(void)
331 WAITEVENT* wait = kmalloc(sizeof(WAITEVENT), GFP_KERNEL);
338 init_waitqueue_head(&wait->event);
342 void WaitEventClose(HANDLE hWait)
344 WAITEVENT* waitEvent = (WAITEVENT* )hWait;
348 void WaitEventSet(HANDLE hWait)
350 WAITEVENT* waitEvent = (WAITEVENT* )hWait;
351 waitEvent->condition = 1;
352 wake_up_interruptible(&waitEvent->event);
355 int WaitEventWait(HANDLE hWait)
358 WAITEVENT* waitEvent = (WAITEVENT* )hWait;
360 ret= wait_event_interruptible(waitEvent->event,
361 waitEvent->condition);
362 waitEvent->condition = 0;
366 int WaitEventWaitEx(HANDLE hWait, UINT32 TimeoutInMs)
369 WAITEVENT* waitEvent = (WAITEVENT* )hWait;
371 ret= wait_event_interruptible_timeout(waitEvent->event,
372 waitEvent->condition,
373 msecs_to_jiffies(TimeoutInMs));
374 waitEvent->condition = 0;
378 HANDLE SpinlockCreate(void)
380 SPINLOCK* spin = kmalloc(sizeof(SPINLOCK), GFP_KERNEL);
385 spin_lock_init(&spin->lock);
390 void SpinlockAcquire(HANDLE hSpin)
392 SPINLOCK* spin = (SPINLOCK* )hSpin;
394 spin_lock_irqsave(&spin->lock, spin->flags);
397 void SpinlockRelease(HANDLE hSpin)
399 SPINLOCK* spin = (SPINLOCK* )hSpin;
401 spin_unlock_irqrestore(&spin->lock, spin->flags);
404 void SpinlockClose(HANDLE hSpin)
406 SPINLOCK* spin = (SPINLOCK* )hSpin;
410 void* Physical2LogicalAddr(ULONG_PTR PhysAddr)
412 void* logicalAddr = phys_to_virt(PhysAddr);
413 BUG_ON(!virt_addr_valid(logicalAddr));
417 ULONG_PTR Logical2PhysicalAddr(void * LogicalAddr)
419 BUG_ON(!virt_addr_valid(LogicalAddr));
420 return virt_to_phys(LogicalAddr);
424 ULONG_PTR Virtual2Physical(void * VirtAddr)
426 ULONG_PTR pfn = vmalloc_to_pfn(VirtAddr);
428 return pfn << PAGE_SHIFT;
432 void WorkItemCallback(struct work_struct *work)
434 void WorkItemCallback(void* work)
437 WORKITEM* w = (WORKITEM*)work;
439 w->callback(w->context);
444 HANDLE WorkQueueCreate(char* name)
446 WORKQUEUE *wq = kmalloc(sizeof(WORKQUEUE), GFP_KERNEL);
451 wq->queue = create_workqueue(name);
456 void WorkQueueClose(HANDLE hWorkQueue)
458 WORKQUEUE *wq = (WORKQUEUE *)hWorkQueue;
460 destroy_workqueue(wq->queue);
465 int WorkQueueQueueWorkItem(HANDLE hWorkQueue, PFN_WORKITEM_CALLBACK workItem, void* context)
467 WORKQUEUE *wq = (WORKQUEUE *)hWorkQueue;
469 WORKITEM* w = kmalloc(sizeof(WORKITEM), GFP_ATOMIC);
475 w->callback = workItem,
476 w->context = context;
478 INIT_WORK(&w->work, WorkItemCallback);
480 INIT_WORK(&w->work, WorkItemCallback, w);
482 return queue_work(wq->queue, &w->work);
485 void QueueWorkItem(PFN_WORKITEM_CALLBACK workItem, void* context)
487 WORKITEM* w = kmalloc(sizeof(WORKITEM), GFP_ATOMIC);
493 w->callback = workItem,
494 w->context = context;
496 INIT_WORK(&w->work, WorkItemCallback);
498 INIT_WORK(&w->work, WorkItemCallback, w);
500 schedule_work(&w->work);