techmik

Untitled

Jun 3rd, 2011
121
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 36.66 KB | None | 0 0
  1. /*********************************************************************************
  2.  
  3. AVerMedia TECHNOLOGIES, Inc.
  4. AVerMedia Linux Driver License Agreement
  5.  
  6.  
  7.  
  8. IMPORTANT: This AVerMeida Linux Driver is supplied to you by
  9. AVerMedia TECHNOLOGIES, Inc. ("AVerMedia") in consideration
  10. of your agreement to the following terms, and your use, installation,
  11. modification or redistribution of this AVerMedia Linux Driver
  12. constitutes acceptance of these terms. If you do not agree with
  13. these terms, please do not use, install, modify or redistribute
  14. this AVerMedia Linux Driver.
  15.  
  16. 1. License. Any driver sources, binaries, utilities, documentation
  17. and other materials accompanying this License, whether on disk,
  18. print or electronic documentation, in read only memory, or any
  19. other media, (collectively, the "AVerMedia Linux Driver") are
  20. licensed, not sold, to you by AVerMedia for use only under the terms
  21. of this License, and AVerMedia reserves all rights not expressly
  22. granted to you. The rights granted herein are limited to AVerMedia's
  23. intellectual property rights in the AVerMedia Linux Driver and do not
  24. include any other patents or intellectual property rights. You own
  25. the media on which the AVerMedia Linux Driver is recorded but AVerMedia
  26. retains ownership of the AVerMedia Linux Driver itself and its
  27. derivative works. The AVerMedia Linux Driver in this package and
  28. any copies, modifications and derivative works which this License
  29. authorizes you to make are subject to this License. The binaries
  30. part of AVerMedia Linux Driver is ported from Windows driver and
  31. is under some other NDA, so the original source code will not be
  32. released.
  33.  
  34. 2. Permitted Uses and Restrictions. You may use, copy, redistribute
  35. or install the original AVerMedia Linux Driver to test or demonstrate
  36. the AVerMedia Linux Driver with AVerMedia's products. You may also
  37. modify the released source code in order to improve the compatibilities,
  38. operability and functionalities with AVerMedia's products as long as
  39. this License in whole are remained and subsequently use, copy,
  40. redistribute or install the derivative works. You have no right to
  41. reverse engineer, decompile, disassemble the accompanying object files
  42. through the released source code, nor to modify, incorporate into or
  43. compile in combination with your own programs to operate with any
  44. product other than from AVerMedia. Your rights under this License
  45. will terminate automatically without notice from AVerMedia if you
  46. fail to comply with any term(s) of this License.
  47.  
  48. 3. Disclaimer of Warranty. The AVerMedia Linux Driver may be "alpha",
  49. "beta", "development", pre-release, untested, and/or not fully tested
  50. and may contain errors that could cause failures or loss of data, be
  51. incomplete or contain inaccuracies. YOU EXPRESSLY ACKNOWLEDGE AND AGREE
  52. THAT USE OF THE AVERMEDIA LINUX DRIVER IS AT YOUR SOLE RISK AND THAT THE
  53. ENTIRE RISK AS TO SATISFACTORY QUALITY, PERFORMANCE, ACCURACY AND EFFORT
  54. IS WITH YOU. THE AVERMEDIA LINUX DRIVER IS PROVIDED "AS IS" WITH ALL
  55. FAULTS. THE USER ASSUMES TOTAL RESPONSIBILITY AND ALL RISKS ASSOCIATED
  56. WITH USE OF THE SOFTWARE, INCLUDING, WITHOUT LIMITATION, RISKS OF ANY
  57. DEFECTS, INACCURACIES, PROGRAM ERRORS, DAMAGE TO OR LOSS OF DATA,
  58. PROGRAMS OR EQUIPMENT, UNAVAILABILITY OR INTERRUPTION OF OPERATIONS.
  59. AVERMEDIA DOES NOT MAKE, AND EXPRESSLY DISCLAIM, ANY EXPRESS OR IMPLIED
  60. WARRANTIES OF ANY KIND WHATSOEVER, INCLUDING, WITHOUT LIMITATION,
  61. ANY WARRANTIES OR FITNESS FOR A PARTICULAR PURPOShttp://pastebin.com/ywvHsMzhE, AND ANY WARRANTIES
  62. OF TITLE OR NON-INFRINGEMENT. AVERMEDIA DOES NOT WARRANT THAT THE USE
  63. OR OPERATION OF THE SOFTWARE WILL BE UNITERRUPTED OR ERROR FREE.
  64.  
  65. 4. Complete Agreement. This License constitutes the entire agreement
  66. between the parties with respect to the use of the AVerMedia Linux
  67. Driver licensed hereunder and supersedes all prior or contemporaneous
  68. understandings regarding such subject matter. No amendment to or
  69. modification of this License will be binding unless in writing and
  70. signed by AVerMedia. Any translation of this Licehttp://pastebin.com/ywvHsMzhnse is done for
  71. local requirements and in the event of a dispute between the English
  72. and any non-English versions, the English version of this License
  73. shall govern.
  74.  
  75. *********************************************************************************/
  76. #include <linux/module.h>
  77. #include <linux/version.h>
  78. #include <linux/types.h>
  79.  
  80. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
  81. #include <asm/semaphore.h>
  82. #else
  83. #include <linux/semaphore.h>
  84. #endif
  85. #include <linux/slab.h>
  86. #include <linux/errno.h>
  87. #include <linux/sched.h>
  88. #include <linux/kref.h>
  89. #include <linux/smp_lock.h>
  90. #include <linux/proc_fs.h>
  91. #include <linux/list.h>
  92. #include <linux/kdev_t.h>
  93. #include <linux/device.h>
  94. #include <asm/uaccess.h>
  95. #include <linux/spinlock.h>
  96. #include <linux/mm.h>
  97. #include <asm/scatterlist.h>
  98. #include <linux/vmalloc.h>
  99. #include <linux/timer.h>
  100. #include <linux/pci.h>
  101. #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,8)
  102. #include <linux/hardirq.h>
  103. #endif
  104.  
  105. #include <linux/delay.h>
  106. #include <linux/interrupt.h>
  107. #include <asm/atomic.h>
  108. #include <asm/io.h>
  109. #include <linux/signal.h>
  110.  
  111. #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,15)
  112. #include <linux/mutex.h>
  113. #endif
  114.  
  115. #include <linux/poll.h>
  116. #include <linux/scatterlist.h>
  117.  
  118. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
  119. #include <asm/bug.h>
  120. #else
  121. #include <linux/bug.h>
  122. #endif
  123.  
  124. #include <linux/videodev2.h>
  125.  
  126. #include "osdep.h"
  127. #include "debug.h"
  128. #include "id.h"
  129.  
  130.  
  131.  
  132.  
  133.  
  134.  
  135.  
  136.  
  137. int SysWaitQueueHeadInit(pWait_queue_head_t *wq)
  138. {
  139. wait_queue_head_t *pWQ = kmalloc(sizeof(*pWQ), GFP_KERNEL);
  140.  
  141. if( pWQ ) {
  142. init_waitqueue_head(pWQ);
  143. *wq = (pWait_queue_head_t) pWQ;
  144. return 0;
  145. }
  146.  
  147. *wq = NULL;
  148. return -ENOMEM;
  149. }
  150.  
  151. int SysWaitQueueHeadFini(pWait_queue_head_t wq)
  152. {
  153. if( wq ) {
  154. kfree(wq);
  155. return 0;
  156. }
  157.  
  158. return -EINVAL;
  159. }
  160.  
  161.  
  162.  
  163.  
  164. int SysWaitQueueInit(pWait_queue_t *wq)
  165. {
  166. DECLARE_WAITQUEUE(tmp_wait_queue, current);
  167. *wq = (pWait_queue_t) kmalloc(sizeof(wait_queue_t), GFP_KERNEL);
  168.  
  169. if( !(*wq) ) return -ENOMEM;
  170. memcpy(*wq, &tmp_wait_queue, sizeof(wait_queue_t));
  171. return 0;
  172. }
  173.  
  174. int SysWaitQueueFini(pWait_queue_t wq)
  175. {
  176. if( wq ) {
  177. kfree(wq);
  178. return 0;
  179. }
  180.  
  181. return -EINVAL;
  182. }
  183.  
  184. void SysAddWaitQueue(pWait_queue_head_t wqh, pWait_queue_t wq)
  185. {
  186. wait_queue_head_t *pWQH = (wait_queue_head_t *) wqh;
  187. wait_queue_t *pWQ = (wait_queue_t *) wq;
  188.  
  189. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13)
  190. pWQ->task = current;
  191. #else
  192. pWQ->private = current;
  193. #endif
  194. add_wait_queue(pWQH, pWQ);
  195. }
  196.  
  197.  
  198.  
  199.  
  200.  
  201. void SysRemoveWaitQueue(pWait_queue_head_t wqh, pWait_queue_t wq)
  202. {
  203. wait_queue_head_t *pWQH = (wait_queue_head_t *) wqh;
  204. wait_queue_t *pWQ = (wait_queue_t *) wq;
  205.  
  206. remove_wait_queue(pWQH, pWQ);
  207. }
  208.  
  209. void SysWakeUp(pWait_queue_head_t wqh)
  210. {
  211. wake_up((wait_queue_head_t *)wqh);
  212. }
  213.  
  214.  
  215. void SysPollWait(void *FileContext, pWait_queue_head_t head, void *pollstruct)
  216. {
  217. poll_wait((struct file *)FileContext,
  218. (wait_queue_head_t *)head,
  219. (struct poll_table_struct *)pollstruct);
  220. }
  221.  
  222.  
  223.  
  224.  
  225.  
  226.  
  227.  
  228. int SysSpinLockInit(pSpinlock_t *slock)
  229. {
  230. spinlock_t *plock = NULL;
  231.  
  232. #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  233. plock = kmalloc(sizeof(*plock), GFP_KERNEL);
  234.  
  235. if( plock ) {
  236. *plock = SPIN_LOCK_UNLOCKED;
  237. *slock = (pSpinlock_t) plock;
  238. return 0;
  239. }
  240. #else
  241. plock = kmalloc(1, GFP_KERNEL);
  242. *slock = (pSpinlock_t) plock;
  243. if( plock )
  244. return 0;
  245. #endif
  246.  
  247. *slock = NULL;
  248. return -ENOMEM;
  249. }
  250.  
  251. void SysSpinLock(pSpinlock_t slock)
  252. {
  253. spinlock_t *lock = slock;
  254. if( lock==NULL ) { BUG(); return; }
  255. #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  256. spin_lock(lock);
  257. #else
  258.  
  259. #endif
  260. }
  261.  
  262. void SysSpinUnlock(pSpinlock_t slock)
  263. {
  264. spinlock_t *lock = slock;
  265. if( lock==NULL ) { BUG(); return; }
  266. #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  267. spin_unlock(lock);
  268. #else
  269.  
  270. #endif
  271. }
  272.  
  273. void SysSpinLockIrqsave(pSpinlock_t slock, unsigned long *flags)
  274. {
  275. spinlock_t *lock = slock;
  276. if( lock==NULL ) { BUG(); return; }
  277. #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  278. spin_lock_irqsave(lock, *flags);
  279. #else
  280.  
  281. #endif
  282. }
  283.  
  284. void SysSpinUnlockIrqrestore(pSpinlock_t slock, unsigned long flags)
  285. {
  286. spinlock_t *lock = slock;
  287. if( lock==NULL ) { BUG(); return; }
  288. #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  289. spin_unlock_irqrestore(lock, flags);
  290. #else
  291.  
  292. #endif
  293. }
  294.  
  295. int SysSpinLockFini(pSpinlock_t slock)
  296. {
  297. if( slock ) {
  298. kfree(slock);
  299. return 0;
  300. }
  301.  
  302. return -EINVAL;
  303. }
  304.  
  305.  
  306. int SysSemInit(pSemaphore *sem, enum sem_type type)
  307. {
  308. struct semaphore *tmp;
  309.  
  310. *sem = NULL;
  311. tmp = kmalloc(sizeof(struct semaphore), GFP_KERNEL);
  312. if( !tmp ) return -ENOMEM;
  313.  
  314. if(st_Mutex == type) {
  315. init_MUTEX(tmp);
  316. *sem = (pSemaphore) tmp;
  317. }
  318. else if(st_MutexLocked == type) {
  319. init_MUTEX_LOCKED(tmp);
  320. *sem = (pSemaphore) tmp;
  321. }
  322. else {
  323. kfree(tmp);
  324. return -EINVAL;
  325. }
  326.  
  327. return 0;
  328. }
  329.  
  330. int SysSemDown(pSemaphore sem)
  331. {
  332. if(!sem) BUG();
  333. #if 0
  334. down_interruptible((struct semaphore *)sem);
  335. #else
  336. down((struct semaphore *)sem);
  337. #endif
  338. return 0;
  339. }
  340.  
  341.  
  342. int SysSemDownInterruptible(pSemaphore sem)
  343. {
  344. if(!sem) BUG();
  345. return down_interruptible((struct semaphore *)sem);
  346. }
  347.  
  348.  
  349. void SysSemUp(pSemaphore sem)
  350. {
  351. if(!sem) BUG();
  352. up((struct semaphore *)sem);
  353. }
  354.  
  355. void SysSemFini(pSemaphore sem)
  356. {
  357. if(!sem) BUG();
  358. kfree(sem);
  359. }
  360.  
  361. #if 1
  362.  
  363.  
  364. #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17)
  365.  
  366. int _SysMutexInit(pMutex *pmutex, const char *name)
  367. {
  368. *pmutex = kmalloc(sizeof(Mutex), GFP_KERNEL);
  369. if( *pmutex==NULL ) return -ENOMEM;
  370.  
  371. (*pmutex)->context = kmalloc(sizeof(struct mutex), GFP_KERNEL);
  372. if( (*pmutex)->context==NULL ) {
  373. kfree((*pmutex));
  374. return -ENOMEM;
  375. }
  376. (*pmutex)->key = kmalloc(sizeof(struct lock_class_key), GFP_KERNEL);
  377. if( (*pmutex)->key==NULL ) {
  378. kfree((*pmutex)->context);
  379. kfree(*pmutex);
  380. return -ENOMEM;
  381. }
  382.  
  383. __mutex_init((struct mutex *)(*pmutex)->context,
  384. name,
  385. (struct lock_class_key *)(*pmutex)->key);
  386. return 0;
  387. }
  388.  
  389. void SysMutexLock(pMutex mutex)
  390. {
  391. mutex_lock((struct mutex *)mutex->context);
  392. }
  393.  
  394. int SysMutexLockInterruptible(pMutex mutex)
  395. {
  396. return mutex_lock_interruptible((struct mutex *)mutex->context);
  397. }
  398.  
  399. void SysMutexUnlock(pMutex mutex)
  400. {
  401. mutex_unlock((struct mutex *)mutex->context);
  402. }
  403.  
  404. void SysMutexFree(pMutex mutex)
  405. {
  406. kfree(mutex->context);
  407. kfree(mutex->key);
  408. kfree(mutex);
  409. }
  410.  
  411. #else
  412.  
  413.  
  414.  
  415. int _SysMutexInit(pMutex *pmutex, const char *name)
  416. {
  417. #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,15)
  418. *pmutex = kmalloc(sizeof(struct mutex), GFP_KERNEL);
  419. if( *pmutex==NULL ) return -ENOMEM;
  420.  
  421. __mutex_init((struct mutex *)*pmutex, name);
  422. return 0;
  423. #else
  424. return SysSemInit((pSemaphore *)pmutex, st_Mutex);
  425. #endif
  426. }
  427.  
  428. void SysMutexLock(pMutex mutex)
  429. {
  430. #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,15)
  431. mutex_lock((struct mutex *)mutex);
  432. #else
  433. SysSemDown((pSemaphore)mutex);
  434. #endif
  435. }
  436.  
  437. int SysMutexLockInterruptible(pMutex mutex)
  438. {
  439. #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,15)
  440. return mutex_lock_interruptible((struct mutex *)mutex);
  441. #else
  442. return SysSemDownInterruptible((pSemaphore)mutex);
  443. #endif
  444. }
  445.  
  446. void SysMutexUnlock(pMutex mutex)
  447. {
  448. #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,15)
  449. mutex_unlock((struct mutex *)mutex);
  450. #else
  451. SysSemUp((pSemaphore)mutex);
  452. #endif
  453. }
  454.  
  455. void SysMutexFree(pMutex mutex)
  456. {
  457. #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,15)
  458. kfree(mutex);
  459. #else
  460. SysSemFini((pSemaphore)mutex);
  461. #endif
  462. }
  463.  
  464. #endif
  465.  
  466. #endif
  467.  
  468.  
  469. void SysJSleep(unsigned long j)
  470. {
  471. set_current_state(TASK_UNINTERRUPTIBLE);
  472. schedule_timeout(j);
  473. }
  474.  
  475.  
  476. void SysMSleep(int miliSec)
  477. {
  478. unsigned long j = (HZ*miliSec)/1000;
  479.  
  480. set_current_state(TASK_UNINTERRUPTIBLE);
  481. schedule_timeout(j);
  482. }
  483.  
  484. void SysUSleep(int microSec)
  485. {
  486. udelay(microSec);
  487. }
  488.  
  489.  
  490. void SysMDelay(int miliSec)
  491. {
  492. mdelay(miliSec);
  493. }
  494.  
  495. int SysInAtomic(void)
  496. {
  497. if(in_atomic() || irqs_disabled()) {
  498. return 1;
  499. }
  500. else {
  501. return 0;
  502. }
  503. }
  504.  
  505.  
  506.  
  507.  
  508.  
  509. void * SysKMalloc(unsigned int size, kmalloc_type flag)
  510. {
  511. u32 type;
  512. void *ptr = NULL;
  513.  
  514.  
  515. if(in_atomic() || irqs_disabled()) {
  516. flag = eKMALLOC_ATOMIC;
  517. }
  518.  
  519.  
  520. switch( flag ) {
  521. case eKMALLOC_ATOMIC:
  522. type = GFP_ATOMIC;
  523. break;
  524.  
  525. case eKMALLOC_KERNEL:
  526. type = GFP_KERNEL;
  527. break;
  528.  
  529. case eKMALLOC_DMA:
  530. type = GFP_DMA;
  531. break;
  532.  
  533. default:
  534. return ptr;
  535. }
  536.  
  537. if(in_atomic()) type = GFP_ATOMIC;
  538. ptr = kmalloc(size, type);
  539. if( ptr ) memset(ptr, 0, size);
  540. DBG_OSDEP("kmalloc %s @ %p\n",
  541. (GFP_DMA==type)? "DMA" :
  542. (GFP_KERNEL==type)? "KERN" :
  543. (GFP_ATOMIC==type)? "ATOMIC" : "Unknown",
  544. ptr);
  545.  
  546. return ptr;
  547. }
  548.  
  549. void SysKFree(void *ptr)
  550. {
  551. DBG_OSDEP("kfree %p\n", ptr);
  552. if(!ptr) BUG();
  553. kfree(ptr);
  554. }
  555.  
  556. void SysMemSet(void *ptr, unsigned char val, unsigned int size)
  557. {
  558. memset(ptr, val, size);
  559. }
  560.  
  561. void *SysMemCpy(void *dest, const void *src, unsigned int size)
  562. {
  563. return memcpy(dest, src, size);
  564. }
  565.  
  566. int SysMemCmp(const void *dest, const void *src, unsigned int size)
  567. {
  568. return memcmp(dest, src, size);
  569. }
  570.  
  571. void *SysReAlloc(void *ptr, unsigned int oldsize, unsigned int newsize)
  572. {
  573. void *newptr = NULL;
  574.  
  575. newptr = SysKMalloc(newsize, eKMALLOC_KERNEL);
  576. SysMemCpy(newptr, ptr, oldsize);
  577. SysKFree(ptr);
  578. return newptr;
  579. }
  580.  
  581. void *SysMemMove(void *ptr, const void *src, unsigned int size)
  582. {
  583. return memmove(ptr, src, size);
  584. }
  585.  
  586.  
  587. #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,8)
  588.  
  589.  
  590. int SysPrintk(const char *fmt, ...)
  591. {
  592. va_list args;
  593. int ret;
  594.  
  595. va_start(args, fmt);
  596. ret = vprintk(fmt, args);
  597. va_end(args);
  598.  
  599. #if 0
  600. if(SysInAtomic())
  601. SysMDelay(500);
  602. else
  603. SysMSleep(500);
  604. #endif
  605. return ret;
  606. }
  607.  
  608. int SysVPrintk(const char *fmt, va_list args)
  609. {
  610. return vprintk(fmt, args);
  611. }
  612.  
  613.  
  614. #else
  615.  
  616. static spinlock_t logbuf_lock = SPIN_LOCK_UNLOCKED;
  617. static char logbuf[1024];
  618.  
  619. int SysPrintk(const char *fmt, ...)
  620. {
  621. va_list args;
  622. int ret;
  623.  
  624. va_start(args, fmt);
  625. ret = SysVPrintk(fmt, args);
  626. va_end(args);
  627.  
  628. return ret;
  629. }
  630.  
  631. int SysVPrintk(const char *fmt, va_list args)
  632. {
  633. int ret;
  634. unsigned long flags;
  635.  
  636. spin_lock_irqsave(&logbuf_lock, flags);
  637. ret = vsprintf(logbuf, fmt, args);
  638. spin_unlock_irqrestore(&logbuf_lock, flags);
  639.  
  640. ret = printk(logbuf);
  641.  
  642. return ret;
  643. }
  644.  
  645. #endif
  646.  
  647.  
  648. int SysSnPrintf(char *buf, unsigned int size, const char *fmt, ...)
  649. {
  650. va_list args;
  651. int i;
  652.  
  653. va_start(args, fmt);
  654. i=vsnprintf(buf, size, fmt, args);
  655. va_end(args);
  656. return i;
  657. }
  658.  
  659.  
  660.  
  661.  
  662.  
  663.  
  664. #if 0
  665. void SysKrefInit(pKref *ppkref)
  666. #else
  667. void SysKrefInit(pKref *ppkref, cb_kref cb_func)
  668. #endif
  669. {
  670. struct kref *pkref;
  671.  
  672. pkref = kmalloc(sizeof(*pkref), GFP_KERNEL);
  673. #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,8)
  674. if( pkref ) kref_init(pkref);
  675. #else
  676. if( pkref ) kref_init(pkref, (void (*)(struct kref *))cb_func);
  677. #endif
  678. *ppkref = pkref;
  679. }
  680.  
  681.  
  682.  
  683.  
  684. void SysKrefPut(pKref kref, cb_kref cb_func)
  685. {
  686. struct kref *pkref = (struct kref *) kref;
  687.  
  688. DBG_fOSDEP("pkfre=%p, func=%p\n", pkref, cb_func);
  689. #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,8)
  690. kref_put(pkref, (void (*)(struct kref *))cb_func);
  691. #else
  692. kref_put(pkref);
  693. #endif
  694. }
  695.  
  696.  
  697.  
  698.  
  699.  
  700. void SysKrefGet(pKref kref)
  701. {
  702. struct kref *pkref = (struct kref *) kref;
  703. DBG_fOSDEP("pkfre=%p, func=%p\n", pkref);
  704. kref_get(pkref);
  705. }
  706.  
  707.  
  708.  
  709.  
  710.  
  711. void SysKrefFree(pKref kref)
  712. {
  713. if(!kref) BUG();
  714. kfree(kref);
  715. }
  716.  
  717. void SysLockKernel()
  718. {
  719. lock_kernel();
  720. }
  721.  
  722. void SysUnlockKernel()
  723. {
  724. unlock_kernel();
  725. }
  726.  
  727. unsigned long SysCopyToUser(void *to, const void *from, unsigned long n)
  728. {
  729. return copy_to_user(to, from, n);
  730. }
  731.  
  732. unsigned long SysCopyFromUser(void *to, const void *from, unsigned long n)
  733. {
  734. return copy_from_user(to, from, n);
  735. }
  736.  
  737. unsigned int SysStrLen(const char *str)
  738. {
  739. return strlen(str);
  740. }
  741.  
  742.  
  743. unsigned long SysSimpleStrtoul(const char *cp,char **endp,unsigned int base)
  744. {
  745. return simple_strtoul(cp, endp, base);
  746. }
  747.  
  748.  
  749. char *SysStrNCpy(char *dest, const char *src, size_t count)
  750. {
  751. return strncpy(dest, src, count);
  752. }
  753.  
  754. void SysSetCurrentState(int state)
  755. {
  756. int tmp;
  757.  
  758. switch( state ) {
  759. case PROC_RUNNING:
  760. tmp = TASK_RUNNING; break;
  761. case PROC_INTERRUPTIBLE:
  762. tmp = TASK_INTERRUPTIBLE; break;
  763. case PROC_UNINTERRUPTIBLE:
  764. tmp = TASK_UNINTERRUPTIBLE; break;
  765. default:
  766. DBG_OSDEP("Task state unknown\n");
  767. return;
  768. }
  769.  
  770. set_current_state(tmp);
  771. }
  772.  
  773. int SysSignalPending(void)
  774. {
  775. return signal_pending(current);
  776. }
  777.  
  778. void SysSchedule(void)
  779. {
  780. schedule();
  781. }
  782.  
  783.  
  784.  
  785. signed long SysScheduleTimeout(signed long timeout)
  786. {
  787. return schedule_timeout(timeout);
  788. }
  789.  
  790. unsigned long SysGetHZ(void)
  791. {
  792. return HZ;
  793. }
  794.  
  795.  
  796. void SysGetPageParam(unsigned int *pg_sz, unsigned int *pg_shift)
  797. {
  798. if( pg_sz ) *pg_sz = PAGE_SIZE;
  799. if( pg_shift ) *pg_shift = PAGE_SHIFT;
  800. }
  801.  
  802. pPage SysVmallocToPage(u8 *va)
  803. {
  804. struct page *p = vmalloc_to_page(va);
  805.  
  806. return (pPage) p;
  807. }
  808.  
  809. void *SysPageAddress(pPage pg)
  810. {
  811. return page_address((struct page *)pg);
  812. }
  813.  
  814. int SysPageHighMem(pPage pg)
  815. {
  816. struct page *p = (struct page *)pg;
  817. REF_PARAM(p);
  818.  
  819. return PageHighMem(p);
  820. }
  821.  
  822. void SysGetPage(pPage pg)
  823. {
  824. struct page *p = (struct page *)pg;
  825. get_page(p);
  826. }
  827.  
  828. void SysPciDmaSyncSingleForCpu(pPci_dev pci, dma_addr_t pa, int size,
  829. dma_type_t type)
  830. {
  831. pci_dma_sync_single_for_cpu(pci, pa, size, type);
  832. }
  833.  
  834. void SysPciDmaSyncSgForCpu(pPci_dev pci, pScatterlist pSGList, int count,
  835. dma_type_t type)
  836. {
  837. struct scatterlist *sg = (struct scatterlist *)pSGList;
  838. pci_dma_sync_sg_for_cpu(pci, sg, count, type);
  839. }
  840.  
  841.  
  842.  
  843.  
  844.  
  845.  
  846.  
  847. void SysFillSG(pScatterlist pSGList, int i, pPage pg, int sz)
  848. {
  849. struct scatterlist *sg = (struct scatterlist *)pSGList;
  850.  
  851. #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
  852. sg[i].page = (struct page *) pg;
  853. sg[i].length = sz;
  854. #else
  855. sg_set_page(sg+i, pg, sz, 0);
  856. #endif
  857. }
  858.  
  859. dma_addr_t SysGetSgDmaAddress(pScatterlist pSG, int i)
  860. {
  861. struct scatterlist *sg = (struct scatterlist *)pSG;
  862.  
  863. return sg_dma_address(&sg[i]);
  864. }
  865.  
  866. size_t SysGetSgDmaLen(pScatterlist pSG, int i)
  867. {
  868. struct scatterlist *sg = (struct scatterlist *)pSG;
  869.  
  870. return sg_dma_len(&sg[i]);
  871. }
  872.  
  873. pPage SysGetSgPage(pScatterlist pSG, int i)
  874. {
  875. struct scatterlist *sg = (struct scatterlist *)pSG;
  876.  
  877. #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
  878. return sg[i].page;
  879. #else
  880. return sg_page(sg+i);
  881. #endif
  882. }
  883.  
  884. void SysSetSgDmaAddress(pScatterlist pSG, int i, dma_addr_t pa)
  885. {
  886. struct scatterlist *sg = (struct scatterlist *)pSG;
  887.  
  888. sg_dma_address(&sg[i]) = pa;
  889. }
  890.  
  891. void SysSetSgDmaLen(pScatterlist pSG, int i, size_t len)
  892. {
  893. struct scatterlist *sg = (struct scatterlist *)pSG;
  894.  
  895. sg_dma_len(&sg[i]) = len;
  896. }
  897.  
  898. pScatterlist SysSGAlloc(int count)
  899. {
  900. struct scatterlist *sg =
  901. SysKMalloc(sizeof(*sg)*count, eKMALLOC_KERNEL);
  902.  
  903. #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
  904. if( sg ) sg_init_table(sg, count);
  905. #endif
  906. return (pScatterlist)sg;
  907. }
  908.  
  909.  
  910.  
  911.  
  912.  
  913. int SysPciMapSg(pPci_dev pci, pScatterlist pSG, int entry, dma_type_t type)
  914. {
  915. enum dma_data_direction dir = DMA_NONE;
  916. switch( type ) {
  917. case eDMA_DEV_WRITE:
  918. dir = DMA_TO_DEVICE;
  919. break;
  920.  
  921. case eDMA_DEV_READ:
  922. dir = DMA_FROM_DEVICE;
  923. break;
  924.  
  925. default:
  926. return -EINVAL;
  927. }
  928.  
  929. return pci_map_sg((struct pci_dev *)pci, (struct scatterlist *)pSG,
  930. entry, dir);
  931. }
  932.  
  933. void SysPciUnmapSg(pPci_dev pci, pScatterlist pSG, int entry, dma_type_t type)
  934. {
  935. enum dma_data_direction dir = DMA_NONE;
  936. switch( type ) {
  937. case eDMA_DEV_WRITE:
  938. dir = DMA_TO_DEVICE;
  939. break;
  940.  
  941. case eDMA_DEV_READ:
  942. dir = DMA_FROM_DEVICE;
  943. break;
  944.  
  945. default:
  946. return;
  947. }
  948.  
  949. pci_unmap_sg((struct pci_dev *)pci, (struct scatterlist *)pSG,
  950. entry, dir);
  951. }
  952.  
  953.  
  954.  
  955.  
  956.  
  957. dma_addr_t SysPciMapSingle(pPci_dev pci, void *va, int size, dma_type_t type)
  958. {
  959. dma_addr_t pa = (dma_addr_t)0;
  960.  
  961. enum dma_data_direction dir = DMA_NONE;
  962. switch( type ) {
  963. case eDMA_DEV_WRITE:
  964. dir = DMA_TO_DEVICE;
  965. break;
  966.  
  967. case eDMA_DEV_READ:
  968. dir = DMA_FROM_DEVICE;
  969. break;
  970.  
  971. default:
  972. return pa;
  973. }
  974.  
  975. pa = pci_map_single((struct pci_dev *)pci, va, size, dir);
  976. #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
  977. if( pci_dma_mapping_error((struct pci_dev *)pci, pa) ) pa = (dma_addr_t)0;
  978. #else
  979. if( pci_dma_mapping_error(pa) ) pa = (dma_addr_t)0;
  980. #endif
  981. return pa;
  982. }
  983.  
  984. void SysPciUnmapSingle(pPci_dev pci, dma_addr_t pa, int size, dma_type_t type)
  985. {
  986. enum dma_data_direction dir = DMA_NONE;
  987. switch( type ) {
  988. case eDMA_DEV_WRITE:
  989. dir = DMA_TO_DEVICE;
  990. break;
  991.  
  992. case eDMA_DEV_READ:
  993. dir = DMA_FROM_DEVICE;
  994. break;
  995.  
  996. default:
  997. return;
  998. }
  999.  
  1000. pci_unmap_single((struct pci_dev *)pci, pa, size, dir);
  1001. }
  1002.  
  1003. void *SysPciAllocConsistent(pPci_dev pci, size_t sz, dma_addr_t *pa)
  1004. {
  1005. return pci_alloc_consistent((struct pci_dev *)pci, sz, pa);
  1006. }
  1007.  
  1008. void SysPciFreeConsistent(pPci_dev pci, size_t sz, void *va, dma_addr_t pa)
  1009. {
  1010. return pci_free_consistent((struct pci_dev *)pci, sz, va, pa);
  1011. }
  1012.  
  1013. int SysPciEnableDevice(pPci_dev pci)
  1014. {
  1015. return pci_enable_device((struct pci_dev *)pci);
  1016. }
  1017.  
  1018. void SysPciSetMaster(pPci_dev pci)
  1019. {
  1020. pci_set_master((struct pci_dev *)pci);
  1021. }
  1022.  
  1023. int SysPciMMIOMap(pPci_dev pci, int resource, const char *devname,
  1024. unsigned long *mmio, unsigned int *mmio_size)
  1025. {
  1026. int err = 0;
  1027. struct pci_dev *pci_dev = (struct pci_dev *)pci;
  1028.  
  1029. DBG_OSDEP("pci=0x%p, resource=%d, name=%s\n", pci, resource, devname);
  1030.  
  1031. if (!request_mem_region(pci_resource_start(pci_dev, resource),
  1032. pci_resource_len(pci_dev, resource),
  1033. devname)) {
  1034. DBG_OSDEP("request_mem_region failed, err=%d\n", err);
  1035. err = -EBUSY;
  1036. goto done;
  1037. }
  1038.  
  1039. *mmio = (unsigned long)ioremap(pci_resource_start(pci_dev, resource),
  1040. pci_resource_len(pci_dev, resource));
  1041. if(!(*mmio)) {
  1042. DBG_OSDEP("ioremap failed\n");
  1043. err = -ENOMEM;
  1044. }
  1045.  
  1046. *mmio_size = pci_resource_len(pci_dev, resource);
  1047.  
  1048. DBG_OSDEP("done, mmio=0x%lx, size=%d\n", *mmio, *mmio_size);
  1049. done:
  1050. return err;
  1051. }
  1052.  
  1053. void SysPciMMIOUnMap(pPci_dev pci, int resource, unsigned long mmio)
  1054. {
  1055. struct pci_dev *pci_dev = (struct pci_dev *)pci;
  1056. DBG_OSDEP("pci=0x%p, resource=%d, mmio=0x%lx\n", pci, resource,
  1057. mmio);
  1058. iounmap((void *)mmio);
  1059. release_mem_region(pci_resource_start(pci_dev, resource),
  1060. pci_resource_len(pci_dev, resource));
  1061. }
  1062.  
  1063. #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,8)
  1064. unsigned int SysIORead32(void *addr)
  1065. {
  1066. return ioread32(addr);
  1067. }
  1068.  
  1069. void SysIOWrite32(void *addr, unsigned int val)
  1070. {
  1071. iowrite32(val, addr);
  1072. }
  1073.  
  1074.  
  1075. unsigned int SysIORead16(void *addr)
  1076. {
  1077. return ioread16(addr);
  1078. }
  1079.  
  1080. void SysIOWrite16(void *addr, unsigned int val)
  1081. {
  1082. iowrite16(val, addr);
  1083. }
  1084.  
  1085. unsigned int SysIORead8(void *addr)
  1086. {
  1087. return ioread8(addr);
  1088. }
  1089.  
  1090. void SysIOWrite8(void *addr, unsigned int val)
  1091. {
  1092. iowrite8(val, addr);
  1093. }
  1094.  
  1095.  
  1096. #else
  1097.  
  1098. unsigned int SysIORead32(void *addr)
  1099. {
  1100. return readl(addr);
  1101. }
  1102.  
  1103. void SysIOWrite32(void *addr, unsigned int val)
  1104. {
  1105. return writel(val, addr);
  1106. }
  1107.  
  1108.  
  1109. unsigned int SysIORead16(void *addr)
  1110. {
  1111. return readw(addr);
  1112. }
  1113.  
  1114. void SysIOWrite16(void *addr, unsigned int val)
  1115. {
  1116. return writew(val, addr);
  1117. }
  1118.  
  1119. unsigned int SysIORead8(void *addr)
  1120. {
  1121. return readb(addr);
  1122. }
  1123.  
  1124. void SysIOWrite8(void *addr, unsigned int val)
  1125. {
  1126. return writeb(val, addr);
  1127. }
  1128.  
  1129.  
  1130. #endif
  1131.  
  1132. int SysPciReadCfgByte(pPci_dev pci, unsigned int w, u8 *v)
  1133. {
  1134. return pci_read_config_byte((struct pci_dev *)pci, w, v);
  1135. }
  1136.  
  1137. int SysPciReadCfgWord(pPci_dev pci, unsigned int w, u16 *v)
  1138. {
  1139. return pci_read_config_word((struct pci_dev *)pci, w, v);
  1140. }
  1141.  
  1142. int SysPciReadCfgDWord(pPci_dev pci, unsigned int w, u32 *v)
  1143. {
  1144. return pci_read_config_dword((struct pci_dev *)pci, w, v);
  1145. }
  1146.  
  1147. int SysPciWriteCfgByte(pPci_dev pci, unsigned int w, u8 v)
  1148. {
  1149. return pci_write_config_byte((struct pci_dev *)pci, w, v);
  1150. }
  1151.  
  1152. int SysPciWriteCfgWord(pPci_dev pci, unsigned int w, u16 v)
  1153. {
  1154. return pci_write_config_word((struct pci_dev *)pci, w, v);
  1155. }
  1156.  
  1157. int SysPciWriteCfgDWord(pPci_dev pci, unsigned int w, u32 v)
  1158. {
  1159. return pci_write_config_dword((struct pci_dev *)pci, w, v);
  1160. }
  1161.  
  1162. unsigned short SysPciVendorId(pPci_dev pci)
  1163. {
  1164. return ((struct pci_dev *)pci)->vendor;
  1165. }
  1166.  
  1167. unsigned short SysPciDeviceId(pPci_dev pci)
  1168. {
  1169. return ((struct pci_dev *)pci)->device;
  1170. }
  1171.  
  1172. unsigned short SysPciSubVendorId(pPci_dev pci)
  1173. {
  1174. return ((struct pci_dev *)pci)->subsystem_vendor;
  1175. }
  1176.  
  1177. unsigned short SysPciSubDeviceId(pPci_dev pci)
  1178. {
  1179. return ((struct pci_dev *)pci)->subsystem_device;
  1180. }
  1181.  
  1182. void *SysPciGetDevice(pPci_dev pci)
  1183. {
  1184. return &((struct pci_dev *)pci)->dev;
  1185. }
  1186.  
  1187. void SysPciSetDrvData(pPci_dev pci, void *data)
  1188. {
  1189. pci_set_drvdata((struct pci_dev *)pci, data);
  1190. }
  1191.  
  1192. void *SysPciGetDrvData(pPci_dev pci)
  1193. {
  1194. return pci_get_drvdata((struct pci_dev *)pci);
  1195. }
  1196.  
  1197. unsigned int SysPciGetIrq(pPci_dev pci)
  1198. {
  1199. return ((struct pci_dev *)pci)->irq;
  1200. }
  1201.  
  1202.  
  1203. void SysPciGetId(pPci_dev pci, unsigned short *vendor,
  1204. unsigned short *device, unsigned short *subvendor,
  1205. unsigned short *subdevice)
  1206. {
  1207. if( !pci )
  1208. return;
  1209. if(vendor)
  1210. *vendor = ((struct pci_dev *)pci)->vendor;
  1211. if(device)
  1212. *device = ((struct pci_dev *)pci)->device;
  1213. if(subvendor)
  1214. *subvendor = ((struct pci_dev *)pci)->subsystem_vendor;
  1215. if(subdevice)
  1216. *subdevice = ((struct pci_dev *)pci)->subsystem_device;
  1217. }
  1218.  
  1219.  
  1220. const char *SysPciName(pPci_dev pci)
  1221. {
  1222. return pci_name((struct pci_dev *)pci);
  1223. }
  1224.  
  1225.  
  1226.  
  1227.  
  1228.  
  1229. u8 *SysVmalloc32(unsigned long size)
  1230. {
  1231. return vmalloc_32(size);
  1232. }
  1233.  
  1234. void SysVFree(void *addr)
  1235. {
  1236. vfree(addr);
  1237. }
  1238.  
  1239.  
  1240.  
  1241.  
  1242.  
  1243.  
  1244. int SysInitTimer(pTimer_list *pTimer)
  1245. {
  1246. struct timer_list *timer = kmalloc(sizeof(*timer), GFP_KERNEL);
  1247.  
  1248. *pTimer = (pTimer_list) timer;
  1249. if( timer ) {
  1250. init_timer(timer);
  1251. return 0;
  1252. }
  1253.  
  1254. return -ENOMEM;
  1255. }
  1256.  
  1257.  
  1258.  
  1259.  
  1260.  
  1261. void SysFillTimer(pTimer_list Timer, cb_timer func, void *data)
  1262. {
  1263. struct timer_list *timer = (struct timer_list *)Timer;
  1264.  
  1265. timer->function = func;
  1266. timer->data = (unsigned long)data;
  1267. }
  1268.  
  1269.  
  1270.  
  1271.  
  1272.  
  1273. int SysModTimer(pTimer_list Timer, unsigned long ms)
  1274. {
  1275. ms *= HZ;
  1276. ms /= 1000;
  1277. return mod_timer((struct timer_list *)Timer, ms+jiffies);
  1278. }
  1279.  
  1280. int SysDelTimer(pTimer_list Timer)
  1281. {
  1282. return del_timer_sync((struct timer_list *)Timer);
  1283. }
  1284.  
  1285.  
  1286. unsigned long SysGetJiffies(void)
  1287. {
  1288. return jiffies;
  1289. }
  1290.  
  1291.  
  1292.  
  1293. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
  1294. struct jump_struct
  1295. {
  1296. struct work_struct w;
  1297. void *data;
  1298. cb_workqueue func;
  1299. };
  1300.  
  1301.  
  1302. static void work_handler(struct work_struct *work)
  1303. {
  1304. struct jump_struct *tmp = list_entry(work, struct jump_struct, w);
  1305. void *user_data = tmp->data;
  1306. cb_workqueue user_handler = tmp->func;
  1307.  
  1308. user_handler(user_data);
  1309. }
  1310. #endif
  1311.  
  1312. int SysInitWork(pWorkStruct *work, cb_workqueue func, void *data)
  1313. {
  1314. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
  1315. struct jump_struct *jump;
  1316.  
  1317. jump = SysKMalloc(sizeof(struct jump_struct), eKMALLOC_KERNEL);
  1318. memset(jump,0,sizeof(jump));
  1319.  
  1320. jump->data = data;
  1321. jump->func = func;
  1322.  
  1323.  
  1324. *work = (pWorkStruct) (jump);
  1325. INIT_WORK( &(jump->w), work_handler);
  1326. #else
  1327.  
  1328.  
  1329. struct work_struct *w = SysKMalloc(sizeof(*w), eKMALLOC_KERNEL);
  1330. if( !w ) return -ENOMEM;
  1331. *work = (pWorkStruct) w;
  1332. INIT_WORK(w, func, data);
  1333. #endif
  1334.  
  1335. return 0;
  1336. }
  1337.  
  1338.  
  1339. int SysSubmitWork(pWorkStruct work)
  1340. {
  1341.  
  1342.  
  1343.  
  1344. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
  1345. struct jump_struct *j = (struct jump_struct*)work;
  1346. return schedule_work(&(j->w));
  1347. #else
  1348. struct work_struct *w = (struct work_struct *)work;
  1349. return schedule_work(w);
  1350. #endif
  1351.  
  1352. }
  1353.  
  1354. void SysFlushWork(void)
  1355. {
  1356. flush_scheduled_work();
  1357. }
  1358.  
  1359. struct IrqContext
  1360. {
  1361. unsigned int irq;
  1362. KIrqHandler handler;
  1363. void* data;
  1364. };
  1365.  
  1366.  
  1367.  
  1368. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
  1369. static irqreturn_t irq_handler(int irq, void *context)
  1370. #else
  1371. static irqreturn_t irq_handler(int irq, void *context, struct pt_regs *regs)
  1372. #endif
  1373.  
  1374. {
  1375. int handled = 0;
  1376. struct IrqContext *ic = context;
  1377.  
  1378.  
  1379. if( !ic || !ic->handler ) return IRQ_RETVAL(handled);
  1380. handled = (ic->handler(ic->data)==0)? 1 : 0;
  1381.  
  1382. return IRQ_RETVAL(handled);
  1383. }
  1384.  
  1385. KIrqObject SysRequestIrq(unsigned int irq, KIrqHandler handler,
  1386. const char *name, void *data)
  1387. {
  1388. int ret = 0;
  1389. struct IrqContext *ic = SysKMalloc(sizeof(*ic), eKMALLOC_KERNEL);
  1390.  
  1391. if( !ic ) return NULL;
  1392. ic->irq = irq;
  1393. ic->handler = handler;
  1394. ic->data = data;
  1395.  
  1396. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
  1397. ret = request_irq(irq, irq_handler, SA_SHIRQ|SA_INTERRUPT, name, ic);
  1398. #else
  1399. ret = request_irq(irq, irq_handler, IRQF_SHARED, name, ic);
  1400. #endif
  1401. if( ret ) {
  1402. DBG_fOSDEP("request_irq failed with %d\n", ret);
  1403. SysKFree(ic);
  1404. return NULL;
  1405. }
  1406.  
  1407. return (KIrqObject)ic;
  1408. }
  1409.  
  1410. void SysFreeIrq(KIrqObject obj)
  1411. {
  1412. struct IrqContext *ic = obj;
  1413.  
  1414. if( !ic ) return;
  1415.  
  1416. free_irq(ic->irq, ic);
  1417. SysKFree(ic);
  1418. }
  1419.  
  1420. int SysPciEnableMsi(pPci_dev pci)
  1421. {
  1422. #ifdef CONFIG_PCI_MSI
  1423. struct pci_dev *pci_dev = (struct pci_dev *)pci;
  1424. return pci_enable_msi(pci_dev);
  1425. #else
  1426. return 0;
  1427. #endif
  1428. }
  1429.  
  1430. void SysPciDisableMsi(pPci_dev pci)
  1431. {
  1432. #ifdef CONFIG_PCI_MSI
  1433. struct pci_dev *pci_dev = (struct pci_dev *)pci;
  1434. pci_disable_msi(pci_dev);
  1435. #else
  1436. return;
  1437. #endif
  1438. }
  1439.  
  1440. int SysInitCompletion(pCompletion *pobj)
  1441. {
  1442. struct completion *comp = kmalloc(sizeof(*comp), GFP_KERNEL);
  1443. DBG_fOSDEP("obj=%p\n", comp);
  1444.  
  1445. if( !comp ) return -ENOMEM;
  1446.  
  1447. init_completion(comp);
  1448. *pobj = (void *)comp;
  1449.  
  1450. return 0;
  1451. }
  1452.  
  1453.  
  1454. int SysReInitCompletion(pCompletion pobj)
  1455. {
  1456. struct completion *comp = pobj;
  1457. DBG_fOSDEP("obj=%p\n", comp);
  1458. if( !comp ) return -EINVAL;
  1459.  
  1460. init_completion(comp);
  1461. return 0;
  1462. }
  1463.  
  1464.  
  1465. int SysCompleteAndExit(pCompletion obj, int retval)
  1466. {
  1467. DBG_fOSDEP("obj=%p\n", obj);
  1468. if( !obj ) return -EINVAL;
  1469. complete_and_exit((struct completion *)obj, retval);
  1470. return 0;
  1471. }
  1472.  
  1473. int SysComplete(pCompletion obj)
  1474. {
  1475. DBG_fOSDEP("obj=%p\n", obj);
  1476. if( !obj ) return -EINVAL;
  1477. complete((struct completion *)obj);
  1478. return 0;
  1479. }
  1480.  
  1481. int SysCompleteAll(pCompletion obj)
  1482. {
  1483. DBG_fOSDEP("obj=%p\n", obj);
  1484. if( !obj ) return -EINVAL;
  1485. complete_all((struct completion *)obj);
  1486. return 0;
  1487. }
  1488.  
  1489. int SysWaitForCompletion(pCompletion obj)
  1490. {
  1491. DBG_fOSDEP("obj=%p\n", obj);
  1492. wait_for_completion((struct completion *)obj);
  1493. DBG_fOSDEP("obj=%p, done\n", obj);
  1494. return 0;
  1495. }
  1496.  
  1497.  
  1498.  
  1499. long SysWaitForCompletionTimeout(pCompletion obj, int timeoutMiliSec)
  1500. {
  1501. unsigned long timeout;
  1502. long ret;
  1503. DBG_fOSDEP("obj=%p, timeout=%ld\n", obj, timeoutMiliSec);
  1504. timeout = (timeoutMiliSec*HZ)/1000;
  1505. ret = wait_for_completion_timeout((struct completion *)obj, timeout);
  1506. DBG_fOSDEP("obj=%p, done\n", obj);
  1507. return ret;
  1508. }
  1509.  
  1510.  
  1511. void SysFiniCompletion(pCompletion obj)
  1512. {
  1513. DBG_fOSDEP("obj=%p\n", obj);
  1514. kfree(obj);
  1515. }
  1516.  
  1517. int SysFileFlagIsNonBlock(void *f)
  1518. {
  1519. struct file *file = f;
  1520.  
  1521. return (file->f_flags & O_NONBLOCK);
  1522. }
  1523.  
  1524. void SysGetTimeOfDay(struct timeval *tv)
  1525. {
  1526. do_gettimeofday(tv);
  1527. }
  1528.  
  1529.  
  1530.  
  1531. int SysAtomicInit(pAtomic *patm)
  1532. {
  1533. atomic_t *pv = NULL;
  1534.  
  1535. if( !patm ) return -EINVAL;
  1536.  
  1537.  
  1538. pv = SysKMalloc(sizeof(atomic_t), eKMALLOC_KERNEL);
  1539. if( !pv ) return -ENOMEM;
  1540.  
  1541. atomic_set(pv, 0);
  1542. *patm = (void *)pv;
  1543. return 0;
  1544. }
  1545.  
  1546. void SysAtomicSet(pAtomic atm, int value)
  1547. {
  1548. atomic_set((atomic_t *)atm, value);
  1549. }
  1550.  
  1551. int SysAtomicGet(pAtomic atm)
  1552. {
  1553. return atomic_read((atomic_t *)atm);
  1554. }
  1555.  
  1556. void SysAtomicAdd(int value, pAtomic atm)
  1557. {
  1558. atomic_add(value, (atomic_t *)atm);
  1559. }
  1560.  
  1561. void SysAtomicSub(int value, pAtomic atm)
  1562. {
  1563. atomic_sub(value, (atomic_t *)atm);
  1564. }
  1565.  
  1566. void SysAtomicFini(pAtomic atm)
  1567. {
  1568. kfree(atm);
  1569. }
  1570.  
  1571.  
  1572.  
  1573.  
  1574. int SysAtomicAddReturn(int value, pAtomic atm)
  1575. {
  1576. return atomic_add_return(value, atm);
  1577. }
  1578.  
  1579. int SysAtomicSubReturn(int value, pAtomic atm)
  1580. {
  1581. return atomic_sub_return(value, atm);
  1582. }
  1583.  
  1584.  
  1585.  
  1586. void *SysGetCurrent(void)
  1587. {
  1588. return (void *)(current);
  1589. }
  1590.  
  1591.  
  1592.  
  1593.  
  1594. void SysSendSignalToSelf(int sig)
  1595. {
  1596. send_sig(sig, current, 0);
  1597. }
  1598.  
  1599.  
  1600.  
  1601.  
  1602.  
  1603.  
  1604. #ifndef CONFIG_CC_STACKPROTECTOR
  1605. void __stack_chk_fail(void)
  1606. {
  1607. return;
  1608. }
  1609. #endif
  1610.  
  1611.  
  1612.  
  1613.  
  1614. const char * aver_usb_board_name[] = {
  1615. [DiBcom_7700] = "Dibcom 7700",
  1616. [DiBcom_LDR7700]= "Dibcom 7700",
  1617. [Buffalo_M803] = "M803",
  1618. [AVer_A300] = "A300",
  1619. [AVer_A300LDR] = "A300",
  1620. [AVer_A302] = "A302",
  1621. [AVer_A302LDR] = "A302",
  1622. [AVer_A808] = "A808",
  1623. [AVer_A808LDR] = "A808",
  1624. [AVer_E568] = "E568",
  1625. [AVer_B300] = "A300",
  1626. [AVer_B302] = "A302",
  1627. [AVer_B808] = "A808",
  1628. [AVer_B568] = "E568",
  1629. [AVer_A828] = "A828",
  1630. [AVer_A301] = "A301",
  1631. [AVer_A333] = "A333",
  1632. [AVer_A825] = "A825",
  1633. [AVer_C038] = "C038",
  1634. [AVer_H8261] = "H826",
  1635. [AVer_H8262] = "H826",
  1636. [AVer_H826D1] = "H826D",
  1637. [AVer_H826D2] = "H826D",
  1638. [AVer_H826D3] = "H826D",
  1639. [AVer_A321D] = "A321D",
  1640. [AVer_A321] = "A321",
  1641. [AVer_A827] = "A827",
  1642. [AVer_A827J] = "A827",
  1643. [AVer_A827A1] = "A827",
  1644. [AVer_A827A2] = "A827",
  1645. [AVer_A827A3] = "A827",
  1646. [AVer_Max_Board_ID] = "Unknown"
  1647. };
  1648.  
  1649.  
  1650.  
  1651. const char *SysGetV4L2IOCTLName(unsigned int ioctl)
  1652. {
  1653. switch(ioctl) {
  1654.  
  1655. case VIDIOC_QUERYCAP : return "VIDIOC_QUERYCAP";
  1656. case VIDIOC_RESERVED : return "VIDIOC_RESERVED";
  1657. case VIDIOC_ENUM_FMT : return "VIDIOC_ENUM_FMT";
  1658. case VIDIOC_G_FMT : return "VIDIOC_G_FMT";
  1659. case VIDIOC_S_FMT : return "VIDIOC_S_FMT";
  1660. case VIDIOC_REQBUFS : return "VIDIOC_REQBUFS";
  1661. case VIDIOC_QUERYBUF : return "VIDIOC_QUERYBUF";
  1662. case VIDIOC_G_FBUF : return "VIDIOC_G_FBUF";
  1663. case VIDIOC_S_FBUF : return "VIDIOC_S_FBUF";
  1664. case VIDIOC_OVERLAY : return "VIDIOC_OVERLAY";
  1665. case VIDIOC_QBUF : return "VIDIOC_QBUF";
  1666. case VIDIOC_DQBUF : return "VIDIOC_DQBUF";
  1667. case VIDIOC_STREAMON : return "VIDIOC_STREAMON";
  1668. case VIDIOC_STREAMOFF : return "VIDIOC_STREAMOFF";
  1669. case VIDIOC_G_PARM : return "VIDIOC_G_PARM";
  1670. case VIDIOC_S_PARM : return "VIDIOC_S_PARM";
  1671. case VIDIOC_G_STD : return "VIDIOC_G_STD";
  1672. case VIDIOC_S_STD : return "VIDIOC_S_STD";
  1673. case VIDIOC_ENUMSTD : return "VIDIOC_ENUMSTD";
  1674. case VIDIOC_ENUMINPUT : return "VIDIOC_ENUMINPUT";
  1675. case VIDIOC_G_CTRL : return "VIDIOC_G_CTRL";
  1676. case VIDIOC_S_CTRL : return "VIDIOC_S_CTRL";
  1677. case VIDIOC_G_TUNER : return "VIDIOC_G_TUNER";
  1678. case VIDIOC_S_TUNER : return "VIDIOC_S_TUNER";
  1679. case VIDIOC_G_AUDIO : return "VIDIOC_G_AUDIO";
  1680. case VIDIOC_S_AUDIO : return "VIDIOC_S_AUDIO";
  1681. case VIDIOC_QUERYCTRL : return "VIDIOC_QUERYCTRL";
  1682. case VIDIOC_QUERYMENU : return "VIDIOC_QUERYMENU";
  1683. case VIDIOC_G_INPUT : return "VIDIOC_G_INPUT";
  1684. case VIDIOC_S_INPUT : return "VIDIOC_S_INPUT";
  1685. case VIDIOC_G_OUTPUT : return "VIDIOC_G_OUTPUT";
  1686. case VIDIOC_S_OUTPUT : return "VIDIOC_S_OUTPUT";
  1687. case VIDIOC_ENUMOUTPUT : return "VIDIOC_ENUMOUTPUT";
  1688. case VIDIOC_G_AUDOUT : return "VIDIOC_G_AUDOUT";
  1689. case VIDIOC_S_AUDOUT : return "VIDIOC_S_AUDOUT";
  1690. case VIDIOC_G_MODULATOR : return "VIDIOC_G_MODULATOR";
  1691. case VIDIOC_S_MODULATOR : return "VIDIOC_S_MODULATOR";
  1692. case VIDIOC_G_FREQUENCY : return "VIDIOC_G_FREQUENCY";
  1693. case VIDIOC_S_FREQUENCY : return "VIDIOC_S_FREQUENCY";
  1694. case VIDIOC_CROPCAP : return "VIDIOC_CROPCAP";
  1695. case VIDIOC_G_CROP : return "VIDIOC_G_CROP";
  1696. case VIDIOC_S_CROP : return "VIDIOC_S_CROP";
  1697. case VIDIOC_G_JPEGCOMP : return "VIDIOC_G_JPEGCOMP";
  1698. case VIDIOC_S_JPEGCOMP : return "VIDIOC_S_JPEGCOMP";
  1699. case VIDIOC_QUERYSTD : return "VIDIOC_QUERYSTD";
  1700. case VIDIOC_TRY_FMT : return "VIDIOC_TRY_FMT";
  1701. case VIDIOC_ENUMAUDIO : return "VIDIOC_ENUMAUDIO";
  1702. case VIDIOC_ENUMAUDOUT : return "VIDIOC_ENUMAUDOUT";
  1703. case VIDIOC_G_PRIORITY : return "VIDIOC_G_PRIORITY";
  1704. case VIDIOC_S_PRIORITY : return "VIDIOC_S_PRIORITY";
  1705. case VIDIOC_G_SLICED_VBI_CAP : return "VIDIOC_G_SLICED_VBI_CAP";
  1706. case VIDIOC_LOG_STATUS : return "VIDIOC_LOG_STATUS";
  1707.  
  1708. #ifdef VIDIOC_G_EXT_CTRLS
  1709. case VIDIOC_G_EXT_CTRLS : return "VIDIOC_G_EXT_CTRLS";
  1710. #endif
  1711.  
  1712. #ifdef VIDIOC_S_EXT_CTRLS
  1713. case VIDIOC_S_EXT_CTRLS : return "VIDIOC_S_EXT_CTRLS";
  1714. #endif
  1715.  
  1716. #ifdef VIDIOC_TRY_EXT_CTRLS
  1717. case VIDIOC_TRY_EXT_CTRLS : return "VIDIOC_TRY_EXT_CTRLS";
  1718. #endif
  1719.  
  1720. #ifdef VIDIOC_ENUM_FRAMESIZES
  1721. case VIDIOC_ENUM_FRAMESIZES : return "VIDIOC_ENUM_FRAMESIZES";
  1722. #endif
  1723.  
  1724. #ifdef VIDIOC_ENUM_FRAMEINTERVALS
  1725. case VIDIOC_ENUM_FRAMEINTERVALS : return "VIDIOC_ENUM_FRAMEINTERVALS";
  1726. #endif
  1727.  
  1728. #ifdef VIDIOC_G_ENC_INDEX
  1729. case VIDIOC_G_ENC_INDEX : return "VIDIOC_G_ENC_INDEX";
  1730. #endif
  1731.  
  1732. #ifdef VIDIOC_ENCODER_CMD
  1733. case VIDIOC_ENCODER_CMD : return "VIDIOC_ENCODER_CMD";
  1734. #endif
  1735.  
  1736. #ifdef VIDIOC_TRY_ENCODER_CMD
  1737. case VIDIOC_TRY_ENCODER_CMD : return "VIDIOC_TRY_ENCODER_CMD";
  1738. #endif
  1739.  
  1740. #ifdef VIDIOC_G_CHIP_IDENT
  1741. case VIDIOC_G_CHIP_IDENT : return "VIDIOC_G_CHIP_IDENT";
  1742. #endif
  1743.  
  1744. #ifdef VIDIOC_S_HW_FREQ_SEEK
  1745. case VIDIOC_S_HW_FREQ_SEEK : return "VIDIOC_S_HW_FREQ_SEEK";
  1746. #endif
  1747.  
  1748. #ifdef __OLD_VIDIOC_
  1749. case VIDIOC_OVERLAY_OLD : return "VIDIOC_OVERLAY_OLD";
  1750. case VIDIOC_S_PARM_OLD : return "VIDIOC_S_PARM_OLD";
  1751. case VIDIOC_S_CTRL_OLD : return "VIDIOC_S_CTRL_OLD";
  1752. case VIDIOC_G_AUDIO_OLD : return "VIDIOC_G_AUDIO_OLD";
  1753. case VIDIOC_G_AUDOUT_OLD : return "VIDIOC_G_AUDOUT_OLD";
  1754. case VIDIOC_CROPCAP_OLD : return "VIDIOC_CROPCAP_OLD";
  1755. #endif
  1756. default: return "Unknown IOCtrl";
  1757. }
  1758.  
  1759. return "Unknown IOCtrl";
  1760. }
Advertisement
Add Comment
Please, Sign In to add comment