Advertisement
Guest User

Untitled

a guest
Aug 9th, 2017
484
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C++ 12.93 KB | None | 0 0
  1. commit f279731288d2d85484ddd20078e6bae332404875
  2. Author: Andy (andy572) <17andy@gmx.de>
  3. Date:   Wed Mar 23 16:51:08 2011 +0100
  4.  
  5.     Replace spinlock with mutex lock
  6.  
  7. diff --git a/arch/arm/mach-msm/smd_rpcrouter.c b/arch/arm/mach-msm/smd_rpcrouter.c
  8. index 8934b4f..5a2dc2b 100755
  9. --- a/arch/arm/mach-msm/smd_rpcrouter.c
  10. +++ b/arch/arm/mach-msm/smd_rpcrouter.c
  11. @@ -212,21 +212,25 @@ struct rpcrouter_xprt_info {
  12.  };
  13.  
  14.  static LIST_HEAD(xprt_info_list);
  15. -static DEFINE_SPINLOCK(xprt_info_list_lock);
  16. +//static DEFINE_SPINLOCK(xprt_info_list_lock);
  17. +static DEFINE_MUTEX(xprt_info_list_lock);
  18.  
  19.  static struct rpcrouter_xprt_info *rpcrouter_get_xprt_info(uint32_t remote_pid)
  20.  {
  21.     struct rpcrouter_xprt_info *xprt_info;
  22. -   unsigned long flags;
  23. +// unsigned long flags;
  24.  
  25. -   spin_lock_irqsave(&xprt_info_list_lock, flags);
  26. +// spin_lock_irqsave(&xprt_info_list_lock, flags);
  27. +   mutex_lock(&xprt_info_list_lock);
  28.     list_for_each_entry(xprt_info, &xprt_info_list, list) {
  29.         if (xprt_info->remote_pid == remote_pid) {
  30. -           spin_unlock_irqrestore(&xprt_info_list_lock, flags);
  31. +           //spin_unlock_irqrestore(&xprt_info_list_lock, flags);
  32. +           mutex_unlock(&xprt_info_list_lock);
  33.             return xprt_info;
  34.         }
  35.     }
  36. -   spin_unlock_irqrestore(&xprt_info_list_lock, flags);
  37. +   //spin_unlock_irqrestore(&xprt_info_list_lock, flags);
  38. +   mutex_unlock(&xprt_info_list_lock);
  39.     return NULL;
  40.  }
  41.  
  42. @@ -548,15 +552,18 @@ int msm_rpcrouter_destroy_local_endpoint(struct msm_rpc_endpoint *ept)
  43.     msg.cli.cid = ept->cid;
  44.  
  45.     RR("x REMOVE_CLIENT id=%d:%08x\n", ept->pid, ept->cid);
  46. -   spin_lock_irqsave(&xprt_info_list_lock, flags);
  47. +   //spin_lock_irqsave(&xprt_info_list_lock, flags);
  48. +   mutex_lock(&xprt_info_list_lock);
  49.     list_for_each_entry(xprt_info, &xprt_info_list, list) {
  50.         rc = rpcrouter_send_control_msg(xprt_info, &msg);
  51.         if (rc < 0) {
  52. -           spin_unlock_irqrestore(&xprt_info_list_lock, flags);
  53. +           //spin_unlock_irqrestore(&xprt_info_list_lock, flags);
  54. +           mutex_unlock(&xprt_info_list_lock);
  55.             return rc;
  56.         }
  57.     }
  58. -   spin_unlock_irqrestore(&xprt_info_list_lock, flags);
  59. +   //spin_unlock_irqrestore(&xprt_info_list_lock, flags);
  60. +   mutex_unlock(&xprt_info_list_lock);
  61.  
  62.     /* Free replies */
  63.     spin_lock_irqsave(&ept->reply_q_lock, flags);
  64. @@ -1917,7 +1924,7 @@ int msm_rpc_register_server(struct msm_rpc_endpoint *ept,
  65.     union rr_control_msg msg;
  66.     struct rr_server *server;
  67.     struct rpcrouter_xprt_info *xprt_info;
  68. -   unsigned long flags;
  69. +// unsigned long flags;
  70.  
  71.     server = rpcrouter_create_server(ept->pid, ept->cid,
  72.                      prog, vers);
  73. @@ -1933,15 +1940,18 @@ int msm_rpc_register_server(struct msm_rpc_endpoint *ept,
  74.     RR("x NEW_SERVER id=%d:%08x prog=%08x:%08x\n",
  75.        ept->pid, ept->cid, prog, vers);
  76.  
  77. -   spin_lock_irqsave(&xprt_info_list_lock, flags);
  78. +   //spin_lock_irqsave(&xprt_info_list_lock, flags);
  79. +   mutex_lock(&xprt_info_list_lock);
  80.     list_for_each_entry(xprt_info, &xprt_info_list, list) {
  81.         rc = rpcrouter_send_control_msg(xprt_info, &msg);
  82.         if (rc < 0) {
  83. -           spin_unlock_irqrestore(&xprt_info_list_lock, flags);
  84. +           //spin_unlock_irqrestore(&xprt_info_list_lock, flags);
  85. +           mutex_unlock(&xprt_info_list_lock);
  86.             return rc;
  87.         }
  88.     }
  89. -   spin_unlock_irqrestore(&xprt_info_list_lock, flags);
  90. +   //spin_unlock_irqrestore(&xprt_info_list_lock, flags);
  91. +   mutex_unlock(&xprt_info_list_lock);
  92.     return 0;
  93.  }
  94.  
  95. @@ -2015,16 +2025,18 @@ static int msm_rpcrouter_modem_notify(struct notifier_block *this,
  96.  int msm_rpcrouter_close(void)
  97.  {
  98.     struct rpcrouter_xprt_info *xprt_info, *tmp_xprt_info;
  99. -   unsigned long flags;
  100. +// unsigned long flags;
  101.  
  102. -   spin_lock_irqsave(&xprt_info_list_lock, flags);
  103. +// spin_lock_irqsave(&xprt_info_list_lock, flags);
  104. +   mutex_lock(&xprt_info_list_lock);
  105.     list_for_each_entry_safe(xprt_info, tmp_xprt_info,
  106.                  &xprt_info_list, list) {
  107.         xprt_info->xprt->close();
  108.         list_del(&xprt_info->list);
  109.         kfree(xprt_info);
  110.     }
  111. -   spin_unlock_irqrestore(&xprt_info_list_lock, flags);
  112. +// spin_unlock_irqrestore(&xprt_info_list_lock, flags);
  113. +   mutex_unlock(&xprt_info_list_lock);
  114.     return 0;
  115.  }
  116.  
  117. @@ -2189,7 +2201,7 @@ static int msm_rpcrouter_add_xprt(struct rpcrouter_xprt *xprt)
  118.  {
  119.     struct rpcrouter_xprt_info *xprt_info;
  120.     static uint32_t workthread_created;
  121. -   unsigned long flags;
  122. +// unsigned long flags;
  123.  
  124.     xprt_info = kmalloc(sizeof(struct rpcrouter_xprt_info), GFP_KERNEL);
  125.     if (!xprt_info)
  126. @@ -2229,9 +2241,11 @@ static int msm_rpcrouter_add_xprt(struct rpcrouter_xprt *xprt)
  127.         xprt_info->initialized = 1;
  128.     }
  129.  
  130. -   spin_lock_irqsave(&xprt_info_list_lock, flags);
  131. +// spin_lock_irqsave(&xprt_info_list_lock, flags);
  132. +   mutex_lock(&xprt_info_list_lock);
  133.     list_add_tail(&xprt_info->list, &xprt_info_list);
  134. -   spin_unlock_irqrestore(&xprt_info_list_lock, flags);
  135. +// spin_unlock_irqrestore(&xprt_info_list_lock, flags);
  136. +   mutex_unlock(&xprt_info_list_lock);
  137.  
  138.     queue_work(xprt_info->workqueue, &xprt_info->read_data);
  139.  
  140. commit 380eb0eb9a807a2021db650d63424e37563d88d2
  141. Author: Andy572 <17andy@gmx.de>
  142. Date:   Sat Mar 26 00:07:46 2011 +0100
  143.  
  144.     Add filter for input events
  145.  
  146. diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
  147. index d862742..5bf58e9 100644
  148. --- a/drivers/cpufreq/cpufreq_ondemand.c
  149. +++ b/drivers/cpufreq/cpufreq_ondemand.c
  150. @@ -654,12 +654,31 @@ static void dbs_input_event(struct input_handle *handle, unsigned int type,
  151.  
  152.  }
  153.  
  154. +static int input_dev_filter(const char* input_dev_name)
  155. +{
  156. +    int ret = 0;
  157. +    if (strstr(input_dev_name, "touchscreen") ||
  158. +       strstr(input_dev_name, "-keypad") ||
  159. +       strstr(input_dev_name, "-nav") ||
  160. +       strstr(input_dev_name, "-oj")) {
  161. +    }
  162. +    else {
  163. +   ret = 1;
  164. +    }
  165. +    
  166. +    return ret;
  167. +}
  168. +
  169.  static int dbs_input_connect(struct input_handler *handler,
  170.         struct input_dev *dev, const struct input_device_id *id)
  171.  {
  172.     struct input_handle *handle;
  173.     int error;
  174.  
  175. +   /* filter out those input_dev that we don't care */
  176. +   if (input_dev_filter(dev->name))
  177. +       return 0;
  178. +
  179.     handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
  180.     if (!handle)
  181.         return -ENOMEM;
  182. commit a7d5ba9c76aa413bec1ddd229cbf623cb3fbfe95
  183. Author: Andy572 <17andy@gmx.de>
  184. Date:   Sat Mar 26 22:31:36 2011 +0100
  185.  
  186.     USB: gadget: f_adb: Claim endpoints so they are not reused by another function.
  187.  
  188. diff --git a/drivers/usb/gadget/f_adb.c b/drivers/usb/gadget/f_adb.c
  189. index 3f53f5e..ef06278 100644
  190. --- a/drivers/usb/gadget/f_adb.c
  191. +++ b/drivers/usb/gadget/f_adb.c
  192. @@ -250,6 +250,7 @@ static int create_bulk_endpoints(struct adb_dev *dev,
  193.         return -ENODEV;
  194.     }
  195.     DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
  196. +   ep->driver_data = dev;          /* claim the endpoint */
  197.     dev->ep_in = ep;
  198.  
  199.     ep = usb_ep_autoconfig(cdev->gadget, out_desc);
  200. @@ -258,6 +259,7 @@ static int create_bulk_endpoints(struct adb_dev *dev,
  201.         return -ENODEV;
  202.     }
  203.     DBG(cdev, "usb_ep_autoconfig for adb ep_out got %s\n", ep->name);
  204. +   ep->driver_data = dev;          /* claim the endpoint */
  205.     dev->ep_out = ep;
  206.  
  207.     /* now allocate requests for our endpoints */
  208. commit 515cc430b0b116771904a79979218c49a418aace
  209. Author: Andy572 <17andy@gmx.de>
  210. Date:   Thu Mar 31 07:20:38 2011 +0200
  211.  
  212.     import vfpmodule from 2.6.35 to fix powermanagement
  213.  
  214. diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
  215. old mode 100644
  216. new mode 100755
  217. index 3ccf7e9..4933d6a
  218. --- a/arch/arm/vfp/vfpmodule.c
  219. +++ b/arch/arm/vfp/vfpmodule.c
  220. @@ -201,12 +201,8 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_
  221.      * Comparison instructions always return at least one of
  222.      * these flags set.
  223.      */
  224. -   /* Qualcomm's patch
  225. -    * fix floating point problem
  226. -    * 2010-08-06, cleaneye.kim@lge.com
  227. -    */
  228. -   if (exceptions & (FPSCR_N | FPSCR_Z | FPSCR_C | FPSCR_V))
  229. -       fpscr &= ~(FPSCR_N | FPSCR_Z | FPSCR_C | FPSCR_V);
  230. +   if (exceptions & (FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V))
  231. +       fpscr &= ~(FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V);
  232.  
  233.     fpscr |= exceptions;
  234.  
  235. @@ -429,31 +425,14 @@ void vfp_reinit(void)
  236.  
  237.  static int vfp_pm_suspend(struct sys_device *dev, pm_message_t state)
  238.  {
  239. -   struct thread_info *ti = current_thread_info();
  240. -   u32 fpexc = fmrx(FPEXC);
  241. -
  242. -   /* if vfp is on, then save state for resumption */
  243. -   if (fpexc & FPEXC_EN) {
  244. -       printk(KERN_DEBUG "%s: saving vfp state\n", __func__);
  245. -       vfp_save_state(&ti->vfpstate, fpexc);
  246. -
  247. -       /* disable, just in case */
  248. -       fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
  249. -   }
  250. -
  251. -   /* clear any information we had about last context state */
  252. -   memset(last_VFP_context, 0, sizeof(last_VFP_context));
  253. +   vfp_flush_context();
  254.  
  255.     return 0;
  256.  }
  257.  
  258.  static int vfp_pm_resume(struct sys_device *dev)
  259.  {
  260. -   /* ensure we have access to the vfp */
  261. -   vfp_enable(NULL);
  262. -
  263. -   /* and disable it to ensure the next usage restores the state */
  264. -   fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
  265. +   vfp_reinit();
  266.  
  267.     return 0;
  268.  }
  269. @@ -479,54 +458,65 @@ static void vfp_pm_init(void)
  270.  static inline void vfp_pm_init(void) { }
  271.  #endif /* CONFIG_PM */
  272.  
  273. -/*
  274. - * Synchronise the hardware VFP state of a thread other than current with the
  275. - * saved one. This function is used by the ptrace mechanism.
  276. - */
  277. -#ifdef CONFIG_SMP
  278. -void vfp_sync_state(struct thread_info *thread)
  279. +void vfp_sync_hwstate(struct thread_info *thread)
  280.  {
  281. +   unsigned int cpu = get_cpu();
  282. +
  283.     /*
  284. -    * On SMP systems, the VFP state is automatically saved at every
  285. -    * context switch. We mark the thread VFP state as belonging to a
  286. -    * non-existent CPU so that the saved one will be reloaded when
  287. -    * needed.
  288. +    * If the thread we're interested in is the current owner of the
  289. +    * hardware VFP state, then we need to save its state.
  290.      */
  291. -   thread->vfpstate.hard.cpu = NR_CPUS;
  292. +   if (last_VFP_context[cpu] == &thread->vfpstate) {
  293. +       u32 fpexc = fmrx(FPEXC);
  294. +
  295. +       /*
  296. +        * Save the last VFP state on this CPU.
  297. +        */
  298. +       fmxr(FPEXC, fpexc | FPEXC_EN);
  299. +       vfp_save_state(&thread->vfpstate, fpexc | FPEXC_EN);
  300. +       fmxr(FPEXC, fpexc);
  301. +   }
  302. +
  303. +   put_cpu();
  304.  }
  305. -#else
  306. -void vfp_sync_state(struct thread_info *thread)
  307. +
  308. +void vfp_flush_hwstate(struct thread_info *thread)
  309.  {
  310.     unsigned int cpu = get_cpu();
  311. -   u32 fpexc = fmrx(FPEXC);
  312.  
  313.     /*
  314. -    * If VFP is enabled, the previous state was already saved and
  315. -    * last_VFP_context updated.
  316. +    * If the thread we're interested in is the current owner of the
  317. +    * hardware VFP state, then we need to save its state.
  318.      */
  319. -   if (fpexc & FPEXC_EN)
  320. -       goto out;
  321. +   if (last_VFP_context[cpu] == &thread->vfpstate) {
  322. +       u32 fpexc = fmrx(FPEXC);
  323.  
  324. -   if (!last_VFP_context[cpu])
  325. -       goto out;
  326. +       fmxr(FPEXC, fpexc & ~FPEXC_EN);
  327.  
  328. -   /*
  329. -    * Save the last VFP state on this CPU.
  330. -    */
  331. -   fmxr(FPEXC, fpexc | FPEXC_EN);
  332. -   vfp_save_state(last_VFP_context[cpu], fpexc);
  333. -   fmxr(FPEXC, fpexc);
  334. +       /*
  335. +        * Set the context to NULL to force a reload the next time
  336. +        * the thread uses the VFP.
  337. +        */
  338. +       last_VFP_context[cpu] = NULL;
  339. +   }
  340.  
  341. +#ifdef CONFIG_SMP
  342.     /*
  343. -    * Set the context to NULL to force a reload the next time the thread
  344. -    * uses the VFP.
  345. +    * For SMP we still have to take care of the case where the thread
  346. +    * migrates to another CPU and then back to the original CPU on which
  347. +    * the last VFP user is still the same thread. Mark the thread VFP
  348. +    * state as belonging to a non-existent CPU so that the saved one will
  349. +    * be reloaded in the above case.
  350.      */
  351. -   last_VFP_context[cpu] = NULL;
  352. -
  353. -out:
  354. +   thread->vfpstate.hard.cpu = NR_CPUS;
  355. +#endif
  356.     put_cpu();
  357.  }
  358. -#endif
  359. +
  360. +// backward compatible:
  361. +void vfp_sync_state(struct thread_info *thread) {
  362. +    vfp_flush_hwstate(thread);
  363. +}
  364.  
  365.  #include <linux/smp.h>
  366.  
  367. @@ -579,7 +569,7 @@ static int __init vfp_init(void)
  368.          */
  369.         elf_hwcap |= HWCAP_VFP;
  370.  #ifdef CONFIG_VFPv3
  371. -       if (VFP_arch >= 3) {
  372. +       if (VFP_arch >= 2) {
  373.             elf_hwcap |= HWCAP_VFPv3;
  374.  
  375.             /*
  376. commit c706951cb66062ca08469e2a61ad52c19079a647
  377. Author: Andy572 <17andy@gmx.de>
  378. Date:   Sat Apr 2 22:55:48 2011 +0200
  379.  
  380.     USB: disable endpoints after unbinding interfaces, not before
  381.  
  382. diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
  383. index 980a8d2..738aa1e 100644
  384. --- a/drivers/usb/core/message.c
  385. +++ b/drivers/usb/core/message.c
  386. @@ -1185,13 +1185,14 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
  387.  {
  388.     int i;
  389.  
  390. +/*
  391.     dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__,
  392.         skip_ep0 ? "non-ep0" : "all");
  393.     for (i = skip_ep0; i < 16; ++i) {
  394.         usb_disable_endpoint(dev, i, true);
  395.         usb_disable_endpoint(dev, i + USB_DIR_IN, true);
  396.     }
  397. -
  398. +*/
  399.     /* getting rid of interfaces will disconnect
  400.      * any drivers bound to them (a key side effect)
  401.      */
  402. @@ -1221,6 +1222,13 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
  403.         if (dev->state == USB_STATE_CONFIGURED)
  404.             usb_set_device_state(dev, USB_STATE_ADDRESS);
  405.     }
  406. +  
  407. +       dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__,
  408. +               skip_ep0 ? "non-ep0" : "all");
  409. +       for (i = skip_ep0; i < 16; ++i) {
  410. +               usb_disable_endpoint(dev, i, true);
  411. +               usb_disable_endpoint(dev, i + USB_DIR_IN, true);
  412. +       }
  413.  }
  414.  
  415.  /**
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement