Guest User

Untitled

a guest
Jun 4th, 2016
1,808
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 990.88 KB | None | 0 0
  1. diff --git a/Documentation/devicetree/bindings/qdsp/msm-adsp-sensors.txt b/Documentation/devicetree/bindings/qdsp/msm-adsp-sensors.txt
  2. index ff0b1ef..4f1c10b 100644
  3. --- a/Documentation/devicetree/bindings/qdsp/msm-adsp-sensors.txt
  4. +++ b/Documentation/devicetree/bindings/qdsp/msm-adsp-sensors.txt
  5. @@ -3,17 +3,9 @@
  6. Required properties:
  7.  
  8. - compatible: "qcom,msm-adsp-sensors"
  9. - - qcom,src-id: Master port id
  10. - - qcom,dst-id: Slave port id
  11. - - qcom,ab: Arbitrated bandwidth in bytes/s
  12. - - qcom,ib: Instantaneous bandwidth in bytes/s
  13.  
  14. Example:
  15.  
  16. qcom,msm-adsp-sensors {
  17. compatible = "qcom,msm-adsp-sensors";
  18. - qcom,src-id = <11>;
  19. - qcom,dst-id = <604>;
  20. - qcom,ab = <209715200>;
  21. - qcom,ib = <471859200>;
  22. };
  23. diff --git a/Documentation/devicetree/bindings/usb/msm-hsusb.txt b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
  24. index 979819f..5fa7dde 100644
  25. --- a/Documentation/devicetree/bindings/usb/msm-hsusb.txt
  26. +++ b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
  27. @@ -82,9 +82,9 @@ Optional properties :
  28. - qcom,hsusb-l1-supported: If present, the device supports l1 (Link power
  29. management).
  30. - qcom,no-selective-suspend: If present selective suspend is disabled on hub ports.
  31. -- qcom,hsusb-otg-dpsehv-int: If present, indicates mpm interrupt to be configured
  32. +- qcom,hsusb-otg-mpm-dpsehv-int: If present, indicates mpm interrupt to be configured
  33. for detection of dp line transition during VDD minimization.
  34. -- qcom,hsusb-otg-dmsehv-int: If present, indicates mpm interrupt to be configured
  35. +- qcom,hsusb-otg-mpm-dmsehv-int: If present, indicates mpm interrupt to be configured
  36. for detection of dm line transition during VDD minimization.
  37. - qcom,ahb-async-bridge-bypass: If present, indicates that enable AHB2AHB By Pass
  38. mode with device controller for better throughput. With this mode, USB Core
  39. diff --git a/Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt b/Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt
  40. index 2cb528c..6ef897a 100644
  41. --- a/Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt
  42. +++ b/Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt
  43. @@ -39,7 +39,7 @@ in TX data path.
  44. 1200 - WCN XO settling time (usec)
  45. 1 - WCN RPM power collapse enabled
  46. 1 - WCN standalone power collapse enabled
  47. -
  48. + 6 - GPIO strength value
  49.  
  50. Example:
  51.  
  52. @@ -64,5 +64,5 @@ Example:
  53. qcom,has-48mhz-xo;
  54. qcom,has-pronto-hw;
  55. qcom,wcnss-adc_tm = <&pm8226_adc_tm>;
  56. - qcom,wcnss-pm = <11 21 1200 1 1>;
  57. + qcom,wcnss-pm = <11 21 1200 1 1 6>;
  58. };
  59. diff --git a/arch/arm/boot/compressed/libfdt_env.h b/arch/arm/boot/compressed/libfdt_env.h
  60. index 1f4e718..799b575 100644
  61. --- a/arch/arm/boot/compressed/libfdt_env.h
  62. +++ b/arch/arm/boot/compressed/libfdt_env.h
  63. @@ -1,6 +1,7 @@
  64. #ifndef _ARM_LIBFDT_ENV_H
  65. #define _ARM_LIBFDT_ENV_H
  66.  
  67. +#include <linux/kernel.h>
  68. #include <linux/types.h>
  69. #include <linux/string.h>
  70. #include <asm/byteorder.h>
  71. diff --git a/arch/arm/boot/dts/msm-pm8226.dtsi b/arch/arm/boot/dts/msm-pm8226.dtsi
  72. index 15c6680..9b6e34b 100644
  73. --- a/arch/arm/boot/dts/msm-pm8226.dtsi
  74. +++ b/arch/arm/boot/dts/msm-pm8226.dtsi
  75. @@ -94,6 +94,8 @@
  76. qcom,cool-bat-decidegc = <100>;
  77. qcom,cool-bat-mv = <4100>;
  78. qcom,ibatmax-cool-ma = <350>;
  79. + qcom,chg-iadc = <&pm8226_iadc>;
  80. + qcom,ibat-calibration-enabled;
  81.  
  82. pm8226_chg_chgr: qcom,chgr@1000 {
  83. status = "disabled";
  84. diff --git a/arch/arm/boot/dts/msm8226-1080p-mtp.dtsi b/arch/arm/boot/dts/msm8226-1080p-mtp.dtsi
  85. index 3734273..2f2eacb 100644
  86. --- a/arch/arm/boot/dts/msm8226-1080p-mtp.dtsi
  87. +++ b/arch/arm/boot/dts/msm8226-1080p-mtp.dtsi
  88. @@ -1,4 +1,4 @@
  89. -/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
  90. +/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
  91. *
  92. * This program is free software; you can redistribute it and/or modify
  93. * it under the terms of the GNU General Public License version 2 and
  94. @@ -155,6 +155,10 @@
  95.  
  96. qcom,hsusb-otg-mode = <3>;
  97. vbus_otg-supply = <&pm8226_chg_otg>;
  98. +
  99. + qcom,hsusb-otg-mpm-dpsehv-int = <49>;
  100. + qcom,hsusb-otg-mpm-dmsehv-int = <58>;
  101. +
  102. };
  103.  
  104. &sdcc1 {
  105. diff --git a/arch/arm/boot/dts/msm8226-720p-mtp.dtsi b/arch/arm/boot/dts/msm8226-720p-mtp.dtsi
  106. index 3f79fab..d8d3c44 100644
  107. --- a/arch/arm/boot/dts/msm8226-720p-mtp.dtsi
  108. +++ b/arch/arm/boot/dts/msm8226-720p-mtp.dtsi
  109. @@ -1,4 +1,4 @@
  110. -/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
  111. +/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
  112. *
  113. * This program is free software; you can redistribute it and/or modify
  114. * it under the terms of the GNU General Public License version 2 and
  115. @@ -145,6 +145,9 @@
  116.  
  117. qcom,hsusb-otg-mode = <3>;
  118. vbus_otg-supply = <&pm8226_chg_otg>;
  119. +
  120. + qcom,hsusb-otg-mpm-dpsehv-int = <49>;
  121. + qcom,hsusb-otg-mpm-dmsehv-int = <58>;
  122. };
  123.  
  124. &sdcc1 {
  125. diff --git a/arch/arm/boot/dts/msm8226-camera-sensor-mtp.dtsi b/arch/arm/boot/dts/msm8226-camera-sensor-mtp.dtsi
  126. index 92285a7..1631bd6 100644
  127. --- a/arch/arm/boot/dts/msm8226-camera-sensor-mtp.dtsi
  128. +++ b/arch/arm/boot/dts/msm8226-camera-sensor-mtp.dtsi
  129. @@ -174,80 +174,4 @@
  130. qcom,cci-master = <0>;
  131. status = "ok";
  132. };
  133. -
  134. - qcom,camera@6f {
  135. - compatible = "qcom,ov8825";
  136. - reg = <0x6f>;
  137. - qcom,slave-id = <0x6c 0x300a 0x8825>;
  138. - qcom,csiphy-sd-index = <0>;
  139. - qcom,csid-sd-index = <0>;
  140. - qcom,actuator-src = <&actuator0>;
  141. - qcom,led-flash-src = <&led_flash0>;
  142. - qcom,mount-angle = <270>;
  143. - qcom,sensor-name = "ov8825";
  144. - cam_vdig-supply = <&pm8226_l5>;
  145. - cam_vana-supply = <&pm8226_l19>;
  146. - cam_vio-supply = <&pm8226_lvs1>;
  147. - cam_vaf-supply = <&pm8226_l15>;
  148. - qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana",
  149. - "cam_vaf";
  150. - qcom,cam-vreg-type = <0 1 0 0>;
  151. - qcom,cam-vreg-min-voltage = <1200000 0 2850000 2800000>;
  152. - qcom,cam-vreg-max-voltage = <1200000 0 2850000 2800000>;
  153. - qcom,cam-vreg-op-mode = <200000 0 80000 100000>;
  154. - qcom,gpio-no-mux = <0>;
  155. - gpios = <&msmgpio 26 0>,
  156. - <&msmgpio 37 0>,
  157. - <&msmgpio 35 0>;
  158. - qcom,gpio-reset = <1>;
  159. - qcom,gpio-standby = <2>;
  160. - qcom,gpio-req-tbl-num = <0 1 2>;
  161. - qcom,gpio-req-tbl-flags = <1 0 0>;
  162. - qcom,gpio-req-tbl-label = "CAMIF_MCLK",
  163. - "CAM_RESET1",
  164. - "CAM_STANDBY";
  165. - qcom,csi-lane-assign = <0x4320>;
  166. - qcom,csi-lane-mask = <0x1f>;
  167. - qcom,sensor-position = <0>;
  168. - qcom,sensor-mode = <1>;
  169. - qcom,cci-master = <0>;
  170. - };
  171. -
  172. - qcom,camera@6d {
  173. - compatible = "qcom,ov9724";
  174. - reg = <0x6d>;
  175. - qcom,slave-id = <0x20 0x0 0x9724>;
  176. - qcom,csiphy-sd-index = <1>;
  177. - qcom,csid-sd-index = <1>;
  178. - qcom,mount-angle = <270>;
  179. - qcom,sensor-name = "ov9724";
  180. - cam_vdig-supply = <&pm8226_l5>;
  181. - cam_vana-supply = <&pm8226_l19>;
  182. - cam_vio-supply = <&pm8226_lvs1>;
  183. - qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana";
  184. - qcom,cam-vreg-type = <0 1 0>;
  185. - qcom,cam-vreg-min-voltage = <1200000 0 2850000>;
  186. - qcom,cam-vreg-max-voltage = <1200000 0 2850000>;
  187. - qcom,cam-vreg-op-mode = <200000 0 80000>;
  188. - qcom,gpio-no-mux = <0>;
  189. - gpios = <&msmgpio 26 0>,
  190. - <&msmgpio 28 0>,
  191. - <&msmgpio 36 0>;
  192. - qcom,gpio-reset = <1>;
  193. - qcom,gpio-standby = <2>;
  194. - qcom,gpio-req-tbl-num = <0 1 2>;
  195. - qcom,gpio-req-tbl-flags = <1 0 0>;
  196. - qcom,gpio-req-tbl-label = "CAMIF_MCLK",
  197. - "CAM_RESET",
  198. - "CAM_STANDBY";
  199. - qcom,gpio-set-tbl-num = <1 1>;
  200. - qcom,gpio-set-tbl-flags = <0 2>;
  201. - qcom,gpio-set-tbl-delay = <1000 4000>;
  202. - qcom,csi-lane-assign = <0x4320>;
  203. - qcom,csi-lane-mask = <0x3>;
  204. - qcom,sensor-position = <1>;
  205. - qcom,sensor-mode = <1>;
  206. - qcom,cci-master = <0>;
  207. - status = "ok";
  208. - };
  209. };
  210. diff --git a/arch/arm/boot/dts/msm8226.dtsi b/arch/arm/boot/dts/msm8226.dtsi
  211. index 8d2414c..f20c959 100644
  212. --- a/arch/arm/boot/dts/msm8226.dtsi
  213. +++ b/arch/arm/boot/dts/msm8226.dtsi
  214. @@ -1,4 +1,4 @@
  215. -/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  216. +/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
  217. *
  218. * This program is free software; you can redistribute it and/or modify
  219. * it under the terms of the GNU General Public License version 2 and
  220. @@ -628,6 +628,13 @@
  221. qcom,msm-auxpcm-interface = "primary";
  222. };
  223.  
  224. + qcom,avtimer@fe053000 {
  225. + compatible = "qcom,avtimer";
  226. + reg = <0xfe053008 0x4>,
  227. + <0xfe05300c 0x4>;
  228. + reg-names = "avtimer_lsb_addr", "avtimer_msb_addr";
  229. + };
  230. +
  231. qcom,wcnss-wlan@fb000000 {
  232. compatible = "qcom,wcnss_wlan";
  233. reg = <0xfb000000 0x280000>,
  234. @@ -652,10 +659,6 @@
  235.  
  236. qcom,msm-adsp-sensors {
  237. compatible = "qcom,msm-adsp-sensors";
  238. - qcom,src-id = <11>;
  239. - qcom,dst-id = <604>;
  240. - qcom,ab = <32505856>;
  241. - qcom,ib = <32505856>;
  242. };
  243.  
  244. qcom,wdt@f9017000 {
  245. diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
  246. index f8c9b46..5faa75c 100644
  247. --- a/arch/arm/boot/dts/msm8974.dtsi
  248. +++ b/arch/arm/boot/dts/msm8974.dtsi
  249. @@ -1,4 +1,4 @@
  250. -/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  251. +/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
  252. *
  253. * This program is free software; you can redistribute it and/or modify
  254. * it under the terms of the GNU General Public License version 2 and
  255. @@ -1910,10 +1910,6 @@
  256.  
  257. qcom,msm-adsp-sensors {
  258. compatible = "qcom,msm-adsp-sensors";
  259. - qcom,src-id = <11>;
  260. - qcom,dst-id = <604>;
  261. - qcom,ab = <32505856>;
  262. - qcom,ib = <32505856>;
  263. };
  264.  
  265. qcom,mss@fc880000 {
  266. @@ -1987,7 +1983,7 @@
  267. gpios = <&msmgpio 36 0>, <&msmgpio 37 0>, <&msmgpio 38 0>, <&msmgpio 39 0>, <&msmgpio 40 0>;
  268. qcom,has-48mhz-xo;
  269. qcom,has-pronto-hw;
  270. - qcom,wcnss-pm = <11 21 1200 1 1>;
  271. + qcom,wcnss-pm = <11 19 1200 1 1 6>;
  272. };
  273.  
  274. qcom,ocmem@fdd00000 {
  275. diff --git a/arch/arm/configs/msm8226-perf_defconfig b/arch/arm/configs/msm8226-perf_defconfig
  276. index 5134613..085b129 100644
  277. --- a/arch/arm/configs/msm8226-perf_defconfig
  278. +++ b/arch/arm/configs/msm8226-perf_defconfig
  279. @@ -415,6 +415,10 @@ CONFIG_RTC_DRV_QPNP=y
  280. CONFIG_UIO=y
  281. CONFIG_UIO_MSM_SHAREDMEM=y
  282. CONFIG_STAGING=y
  283. +CONFIG_ZRAM=y
  284. +# CONFIG_ZRAM_DEBUG is not set
  285. +CONFIG_ZSMALLOC=y
  286. +CONFIG_SWAP=y
  287. CONFIG_ANDROID=y
  288. CONFIG_ANDROID_BINDER_IPC=y
  289. CONFIG_ASHMEM=y
  290. @@ -429,6 +433,7 @@ CONFIG_QPNP_PWM=y
  291. CONFIG_QPNP_POWER_ON=y
  292. CONFIG_QPNP_VIBRATOR=y
  293. CONFIG_QPNP_REVID=y
  294. +CONFIG_MSM_AVTIMER=y
  295. CONFIG_MSM_IOMMU_V1=y
  296. CONFIG_MSM_IOMMU_VBIF_CHECK=y
  297. CONFIG_EXT2_FS=y
  298. diff --git a/arch/arm/configs/msm8226_defconfig b/arch/arm/configs/msm8226_defconfig
  299. index 68c47f0..ce6f806 100644
  300. --- a/arch/arm/configs/msm8226_defconfig
  301. +++ b/arch/arm/configs/msm8226_defconfig
  302. @@ -2,7 +2,7 @@
  303. CONFIG_EXPERIMENTAL=y
  304. CONFIG_SYSVIPC=y
  305. CONFIG_AUDIT=y
  306. -# CONFIG_RCU_FAST_NO_HZ=y
  307. +CONFIG_RCU_FAST_NO_HZ=y
  308. CONFIG_IKCONFIG=y
  309. CONFIG_IKCONFIG_PROC=y
  310. CONFIG_CGROUPS=y
  311. @@ -208,15 +208,15 @@ CONFIG_NET_SCH_HTB=y
  312. CONFIG_NET_SCH_PRIO=y
  313. CONFIG_NET_CLS_FW=y
  314. CONFIG_BT=y
  315. -# CONFIG_BT_RFCOMM is not set
  316. -# CONFIG_BT_RFCOMM is not set
  317. -# CONFIG_BT_BNEP is not set
  318. -#CONFIG_BT_BNEP_MC_FILTER is not set
  319. -# CONFIG_BT_BNEP_PROTO_FILTER is not set
  320. +CONFIG_BT_RFCOMM=y
  321. +CONFIG_BT_RFCOMM_TTY=y
  322. +CONFIG_BT_BNEP=y
  323. +CONFIG_BT_BNEP_MC_FILTER=y
  324. +CONFIG_BT_BNEP_PROTO_FILTER=y
  325. CONFIG_BT_HIDP=y
  326. -# CONFIG_BT_HCISMD is not set
  327. +CONFIG_BT_HCISMD=y
  328. CONFIG_CFG80211=y
  329. -# CONFIG_CFG80211_INTERNAL_REGDB is not set
  330. +CONFIG_CFG80211_INTERNAL_REGDB=y
  331. CONFIG_NL80211_TESTMODE=y
  332. CONFIG_CMA=y
  333. CONFIG_BLK_DEV_LOOP=y
  334. @@ -440,6 +440,10 @@ CONFIG_RTC_DRV_QPNP=y
  335. CONFIG_UIO=y
  336. CONFIG_UIO_MSM_SHAREDMEM=y
  337. CONFIG_STAGING=y
  338. +CONFIG_ZRAM=y
  339. +# CONFIG_ZRAM_DEBUG is not set
  340. +CONFIG_ZSMALLOC=y
  341. +CONFIG_SWAP=y
  342. CONFIG_ANDROID=y
  343. CONFIG_ANDROID_BINDER_IPC=y
  344. CONFIG_ASHMEM=y
  345. @@ -454,6 +458,7 @@ CONFIG_QPNP_PWM=y
  346. CONFIG_QPNP_POWER_ON=y
  347. CONFIG_QPNP_VIBRATOR=y
  348. CONFIG_QPNP_REVID=y
  349. +CONFIG_MSM_AVTIMER=y
  350. CONFIG_MSM_IOMMU_V1=y
  351. CONFIG_MSM_IOMMU_VBIF_CHECK=y
  352. CONFIG_CORESIGHT=y
  353. @@ -484,6 +489,8 @@ CONFIG_NLS_ASCII=y
  354. CONFIG_NLS_ISO8859_1=y
  355. CONFIG_PRINTK_TIME=y
  356. CONFIG_MAGIC_SYSRQ=y
  357. +CONFIG_LOCKUP_DETECTOR=y
  358. +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
  359. # CONFIG_SYSRQ_SCHED_DEBUG is not set
  360. CONFIG_SCHEDSTATS=y
  361. CONFIG_TIMER_STATS=y
  362. @@ -527,5 +534,4 @@ CONFIG_MOBICORE_SUPPORT=m
  363. CONFIG_MOBICORE_API=m
  364. CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG=y
  365. CONFIG_MSM_RDBG=m
  366. -# DO NOT USE ROW IO-SCHEDULER
  367. -# CONFIG_DEFAULT_ROW is not set
  368. +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
  369. diff --git a/arch/arm/configs/msm8610-perf_defconfig b/arch/arm/configs/msm8610-perf_defconfig
  370. index 55bc185..931b272 100644
  371. --- a/arch/arm/configs/msm8610-perf_defconfig
  372. +++ b/arch/arm/configs/msm8610-perf_defconfig
  373. @@ -74,10 +74,9 @@ CONFIG_SCHED_MC=y
  374. CONFIG_ARM_ARCH_TIMER=y
  375. CONFIG_PREEMPT=y
  376. CONFIG_AEABI=y
  377. +CONFIG_HIGHMEM=y
  378. CONFIG_COMPACTION=y
  379. -CONFIG_KSM=y
  380. CONFIG_CC_STACKPROTECTOR=y
  381. -CONFIG_KSM=y
  382. CONFIG_CP_ACCESS=y
  383. CONFIG_USE_OF=y
  384. CONFIG_CPU_FREQ_GOV_POWERSAVE=y
  385. diff --git a/arch/arm/configs/msm8610_defconfig b/arch/arm/configs/msm8610_defconfig
  386. index b408aba..029ac2d 100644
  387. --- a/arch/arm/configs/msm8610_defconfig
  388. +++ b/arch/arm/configs/msm8610_defconfig
  389. @@ -75,10 +75,9 @@ CONFIG_SCHED_MC=y
  390. CONFIG_ARM_ARCH_TIMER=y
  391. CONFIG_PREEMPT=y
  392. CONFIG_AEABI=y
  393. +CONFIG_HIGHMEM=y
  394. CONFIG_COMPACTION=y
  395. -CONFIG_KSM=y
  396. CONFIG_CC_STACKPROTECTOR=y
  397. -CONFIG_KSM=y
  398. CONFIG_CP_ACCESS=y
  399. CONFIG_USE_OF=y
  400. CONFIG_CPU_FREQ_GOV_POWERSAVE=y
  401. @@ -507,6 +506,8 @@ CONFIG_NLS_ASCII=y
  402. CONFIG_NLS_ISO8859_1=y
  403. CONFIG_PRINTK_TIME=y
  404. CONFIG_MAGIC_SYSRQ=y
  405. +CONFIG_LOCKUP_DETECTOR=y
  406. +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
  407. # CONFIG_SYSRQ_SCHED_DEBUG is not set
  408. CONFIG_SCHEDSTATS=y
  409. CONFIG_TIMER_STATS=y
  410. @@ -562,3 +563,4 @@ CONFIG_SENSORS_CAPELLA_CM36283=y
  411. CONFIG_MSM_RDBG=m
  412. CONFIG_MOBICORE_SUPPORT=m
  413. CONFIG_MOBICORE_API=m
  414. +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
  415. diff --git a/arch/arm/configs/msm8974-perf_defconfig b/arch/arm/configs/msm8974-perf_defconfig
  416. index c7ebd60..b72527c 100755
  417. --- a/arch/arm/configs/msm8974-perf_defconfig
  418. +++ b/arch/arm/configs/msm8974-perf_defconfig
  419. @@ -459,6 +459,7 @@ CONFIG_QPNP_POWER_ON=y
  420. CONFIG_QPNP_CLKDIV=y
  421. CONFIG_QPNP_REVID=y
  422. CONFIG_QPNP_COINCELL=y
  423. +CONFIG_MSM_AVTIMER=y
  424. CONFIG_MSM_IOMMU_V1=y
  425. CONFIG_IOMMU_PGTABLES_L2=y
  426. CONFIG_MSM_IOMMU_VBIF_CHECK=y
  427. diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
  428. index d0cf629..c6fc8cd 100755
  429. --- a/arch/arm/configs/msm8974_defconfig
  430. +++ b/arch/arm/configs/msm8974_defconfig
  431. @@ -484,6 +484,7 @@ CONFIG_QPNP_POWER_ON=y
  432. CONFIG_QPNP_CLKDIV=y
  433. CONFIG_QPNP_REVID=y
  434. CONFIG_QPNP_COINCELL=y
  435. +CONFIG_MSM_AVTIMER=y
  436. CONFIG_MSM_IOMMU_V1=y
  437. CONFIG_MSM_IOMMU_PMON=y
  438. CONFIG_IOMMU_PGTABLES_L2=y
  439. @@ -523,6 +524,7 @@ CONFIG_NLS_ISO8859_1=y
  440. CONFIG_PRINTK_TIME=y
  441. CONFIG_MAGIC_SYSRQ=y
  442. CONFIG_LOCKUP_DETECTOR=y
  443. +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
  444. # CONFIG_DETECT_HUNG_TASK is not set
  445. # CONFIG_SYSRQ_SCHED_DEBUG is not set
  446. CONFIG_SCHEDSTATS=y
  447. @@ -570,3 +572,4 @@ CONFIG_SND_SOC_MSM_HDMI_CODEC_RX=y
  448. CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
  449. CONFIG_MSM_RDBG=m
  450. CONFIG_MSM_AVTIMER=y
  451. +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
  452. diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
  453. index bdfdfa0..c6cf978 100644
  454. --- a/arch/arm/include/asm/arch_timer.h
  455. +++ b/arch/arm/include/asm/arch_timer.h
  456. @@ -5,6 +5,14 @@
  457. #include <linux/clocksource.h>
  458. #include <asm/errno.h>
  459.  
  460. +#define ARCH_TIMER_USR_PCT_ACCESS_EN (1 << 0) /* physical counter */
  461. +#define ARCH_TIMER_USR_VCT_ACCESS_EN (1 << 1) /* virtual counter */
  462. +#define ARCH_TIMER_VIRT_EVT_EN (1 << 2)
  463. +#define ARCH_TIMER_EVT_TRIGGER_SHIFT (4)
  464. +#define ARCH_TIMER_EVT_TRIGGER_MASK (0xF << ARCH_TIMER_EVT_TRIGGER_SHIFT)
  465. +#define ARCH_TIMER_USR_VT_ACCESS_EN (1 << 8) /* virtual timer registers */
  466. +#define ARCH_TIMER_USR_PT_ACCESS_EN (1 << 9) /* physical timer registers */
  467. +
  468. struct arch_timer {
  469. struct resource res[3];
  470. };
  471. @@ -13,6 +21,46 @@ struct arch_timer {
  472. int arch_timer_register(struct arch_timer *);
  473. int arch_timer_of_register(void);
  474. cycle_t arch_counter_get_cntpct(void);
  475. +
  476. +static inline u32 arch_timer_get_cntkctl(void)
  477. +{
  478. + u32 cntkctl;
  479. + asm volatile("mrc p15, 0, %0, c14, c1, 0" : "=r" (cntkctl));
  480. + return cntkctl;
  481. +}
  482. +
  483. +static inline void arch_timer_set_cntkctl(u32 cntkctl)
  484. +{
  485. + asm volatile("mcr p15, 0, %0, c14, c1, 0" : : "r" (cntkctl));
  486. +}
  487. +
  488. +static inline void arch_counter_set_user_access(void)
  489. +{
  490. + u32 cntkctl = arch_timer_get_cntkctl();
  491. +
  492. + /* Disable user access to the timers and the physical counter */
  493. + /* Also disable virtual event stream */
  494. + cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
  495. + | ARCH_TIMER_USR_VT_ACCESS_EN
  496. + | ARCH_TIMER_VIRT_EVT_EN
  497. + | ARCH_TIMER_USR_PCT_ACCESS_EN);
  498. +
  499. + /* Enable user access to the virtual counter */
  500. + cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
  501. +
  502. + arch_timer_set_cntkctl(cntkctl);
  503. +}
  504. +
  505. +static inline void arch_timer_evtstrm_enable(int divider)
  506. +{
  507. + u32 cntkctl = arch_timer_get_cntkctl();
  508. + cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
  509. + /* Set the divider and enable virtual event stream */
  510. + cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
  511. + | ARCH_TIMER_VIRT_EVT_EN;
  512. + arch_timer_set_cntkctl(cntkctl);
  513. + elf_hwcap |= HWCAP_EVTSTRM;
  514. +}
  515. #else
  516. static inline int arch_timer_register(struct arch_timer *at)
  517. {
  518. diff --git a/arch/arm/include/asm/hwcap.h b/arch/arm/include/asm/hwcap.h
  519. index 9176261..ec3f3bb 100644
  520. --- a/arch/arm/include/asm/hwcap.h
  521. +++ b/arch/arm/include/asm/hwcap.h
  522. @@ -24,7 +24,8 @@
  523. #define HWCAP_IDIVA (1 << 17)
  524. #define HWCAP_IDIVT (1 << 18)
  525. #define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT)
  526. -
  527. +#define HWCAP_LPAE (1 << 20)
  528. +#define HWCAP_EVTSTRM (1 << 21)
  529. #if defined(__KERNEL__)
  530. #if !defined(__ASSEMBLY__)
  531. /*
  532. diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c
  533. index 3c463d0..7ac034e 100644
  534. --- a/arch/arm/kernel/arch_timer.c
  535. +++ b/arch/arm/kernel/arch_timer.c
  536. @@ -15,6 +15,7 @@
  537. #include <linux/device.h>
  538. #include <linux/smp.h>
  539. #include <linux/cpu.h>
  540. +#include <linux/cpu_pm.h>
  541. #include <linux/jiffies.h>
  542. #include <linux/clockchips.h>
  543. #include <linux/interrupt.h>
  544. @@ -259,6 +260,8 @@ static int __cpuinit arch_timer_setup(struct clock_event_device *clk)
  545. if (arch_timer_ppi2)
  546. enable_percpu_irq(arch_timer_ppi2, 0);
  547.  
  548. + arch_counter_set_user_access();
  549. +
  550. return 0;
  551. }
  552.  
  553. @@ -408,6 +411,33 @@ static void __init arch_timer_counter_init(void)
  554. register_current_timer_delay(&arch_delay_timer);
  555. }
  556.  
  557. +#ifdef CONFIG_CPU_PM
  558. +static unsigned int saved_cntkctl;
  559. +static int arch_timer_cpu_pm_notify(struct notifier_block *self,
  560. + unsigned long action, void *hcpu)
  561. +{
  562. + if (action == CPU_PM_ENTER)
  563. + saved_cntkctl = arch_timer_get_cntkctl();
  564. + else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT)
  565. + arch_timer_set_cntkctl(saved_cntkctl);
  566. + return NOTIFY_OK;
  567. +}
  568. +
  569. +static struct notifier_block arch_timer_cpu_pm_notifier = {
  570. + .notifier_call = arch_timer_cpu_pm_notify,
  571. +};
  572. +
  573. +static int __init arch_timer_cpu_pm_init(void)
  574. +{
  575. + return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
  576. +}
  577. +#else
  578. +static int __init arch_timer_cpu_pm_init(void)
  579. +{
  580. + return 0;
  581. +}
  582. +#endif
  583. +
  584. static int __init arch_timer_common_register(void)
  585. {
  586. int err;
  587. @@ -443,6 +473,10 @@ static int __init arch_timer_common_register(void)
  588. }
  589. }
  590.  
  591. + err = arch_timer_cpu_pm_init();
  592. + if (err)
  593. + goto out_free_irq;
  594. +
  595. err = local_timer_register(&arch_timer_ops);
  596. if (err) {
  597. /*
  598. @@ -456,10 +490,12 @@ static int __init arch_timer_common_register(void)
  599. }
  600.  
  601. if (err)
  602. - goto out_free_irq;
  603. + goto out_unreg_notify;
  604.  
  605. return 0;
  606.  
  607. +out_unreg_notify:
  608. + cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier);
  609. out_free_irq:
  610. free_percpu_irq(arch_timer_ppi, arch_timer_evt);
  611. if (arch_timer_ppi2)
  612. diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
  613. index 317400a..9054e6e 100644
  614. --- a/arch/arm/kernel/entry-armv.S
  615. +++ b/arch/arm/kernel/entry-armv.S
  616. @@ -431,6 +431,12 @@ __und_usr:
  617. @ r0 - instruction
  618. @
  619. adr r9, BSYM(ret_from_exception)
  620. +
  621. + @ IRQs must be enabled before attempting to read the instruction from
  622. + @ user space since that could cause a page/translation fault if the
  623. + @ page table was modified by another CPU.
  624. + enable_irq
  625. +
  626. adr lr, BSYM(__und_usr_unknown)
  627. tst r3, #PSR_T_BIT @ Thumb mode?
  628. itet eq @ explicit IT needed for the 1f label
  629. @@ -639,7 +645,6 @@ call_fpe:
  630. #endif
  631.  
  632. do_fpe:
  633. - enable_irq
  634. ldr r4, .LCfp
  635. add r10, r10, #TI_FPSTATE @ r10 = workspace
  636. ldr pc, [r4] @ Call FP module USR entry point
  637. @@ -663,7 +668,6 @@ ENTRY(no_fp)
  638. ENDPROC(no_fp)
  639.  
  640. __und_usr_unknown:
  641. - enable_irq
  642. mov r0, sp
  643. adr lr, BSYM(ret_from_exception)
  644. b do_undefinstr
  645. diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S
  646. index a087838..f07d42b 100644
  647. --- a/arch/arm/kernel/iwmmxt.S
  648. +++ b/arch/arm/kernel/iwmmxt.S
  649. @@ -61,7 +61,7 @@
  650. * r9 = ret_from_exception
  651. * lr = undefined instr exit
  652. *
  653. - * called from prefetch exception handler with interrupts disabled
  654. + * called from prefetch exception handler with interrupts enabled
  655. */
  656.  
  657. ENTRY(iwmmxt_task_enable)
  658. diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
  659. index fe9abfe..58f132c 100644
  660. --- a/arch/arm/kernel/setup.c
  661. +++ b/arch/arm/kernel/setup.c
  662. @@ -1086,6 +1086,9 @@ static const char *hwcap_str[] = {
  663. "vfpv4",
  664. "idiva",
  665. "idivt",
  666. + "vfpd32",
  667. + "lpae",
  668. + "evtstrm",
  669. NULL
  670. };
  671.  
  672. diff --git a/arch/arm/mach-ep93xx/crunch-bits.S b/arch/arm/mach-ep93xx/crunch-bits.S
  673. index 0ec9bb4..1d5ced2 100644
  674. --- a/arch/arm/mach-ep93xx/crunch-bits.S
  675. +++ b/arch/arm/mach-ep93xx/crunch-bits.S
  676. @@ -62,7 +62,7 @@
  677. * r9 = ret_from_exception
  678. * lr = undefined instr exit
  679. *
  680. - * called from prefetch exception handler with interrupts disabled
  681. + * called from prefetch exception handler with interrupts enabled
  682. */
  683. ENTRY(crunch_task_enable)
  684. ldr r8, =(EP93XX_APB_VIRT_BASE + 0x00130000) @ syscon addr
  685. diff --git a/arch/arm/mach-msm/bam_dmux.c b/arch/arm/mach-msm/bam_dmux.c
  686. index 529e7d1..22a9fed 100644
  687. --- a/arch/arm/mach-msm/bam_dmux.c
  688. +++ b/arch/arm/mach-msm/bam_dmux.c
  689. @@ -2075,6 +2075,8 @@ static int bam_init(void)
  690. void *a2_virt_addr;
  691. int skip_iounmap = 0;
  692.  
  693. + in_global_reset = 0;
  694. + in_ssr = 0;
  695. vote_dfab();
  696. /* init BAM */
  697. a2_virt_addr = ioremap_nocache((unsigned long)(a2_phys_base),
  698. diff --git a/arch/arm/mach-msm/include/mach/sensors_adsp.h b/arch/arm/mach-msm/include/mach/sensors_adsp.h
  699. deleted file mode 100644
  700. index 3c65e37..0000000
  701. --- a/arch/arm/mach-msm/include/mach/sensors_adsp.h
  702. +++ /dev/null
  703. @@ -1,111 +0,0 @@
  704. -/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
  705. - *
  706. - * This program is free software; you can redistribute it and/or modify
  707. - * it under the terms of the GNU General Public License version 2 and
  708. - * only version 2 as published by the Free Software Foundation.
  709. - *
  710. - * This program is distributed in the hope that it will be useful,
  711. - * but WITHOUT ANY WARRANTY; without even the implied warranty of
  712. - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  713. - * GNU General Public License for more details.
  714. - */
  715. -
  716. -#ifndef SENSORS_ADSP_H
  717. -#define SENSORS_ADSP_H
  718. -
  719. -#include <linux/types.h>
  720. -
  721. -/** Maximum number of segments that may be mapped from DDR to OCMEM */
  722. -#define SNS_OCMEM_MAX_NUM_SEG_V01 16
  723. -
  724. -/** Maximum size of the ocmem_vectors structure */
  725. -#define SNS_OCMEM_MAX_VECTORS_SIZE_V01 512
  726. -
  727. -/* Sensor OCMEM message id */
  728. -
  729. -#define SNS_OCMEM_CANCEL_REQ_V01 0x0000
  730. -#define SNS_OCMEM_CANCEL_RESP_V01 0x0000
  731. -#define SNS_OCMEM_VERSION_REQ_V01 0x0001
  732. -#define SNS_OCMEM_VERSION_RESP_V01 0x0001
  733. -#define SNS_OCMEM_PHYS_ADDR_REQ_V01 0x0002
  734. -#define SNS_OCMEM_PHYS_ADDR_RESP_V01 0x0002
  735. -#define SNS_OCMEM_HAS_CLIENT_IND_V01 0x0002
  736. -#define SNS_OCMEM_BW_VOTE_REQ_V01 0x0003
  737. -#define SNS_OCMEM_BW_VOTE_RESP_V01 0x0003
  738. -#define SNS_OCMEM_BW_VOTE_IND_V01 0x0003
  739. -
  740. -enum {
  741. - SNS_OCMEM_MODULE_KERNEL = 0,
  742. - SNS_OCMEM_MODULE_ADSP
  743. -};
  744. -
  745. -/**
  746. - * Defines the types of response messages
  747. - */
  748. -enum {
  749. - SNS_OCMEM_MSG_TYPE_REQ = 0, /* Request */
  750. - SNS_OCMEM_MSG_TYPE_RESP, /* Response to a request */
  751. - SNS_OCMEM_MSG_TYPE_IND /* Asynchronous indication */
  752. -};
  753. -
  754. -/**
  755. - * The message header. Used in both incoming and outgoing messages
  756. - */
  757. -struct sns_ocmem_hdr_s {
  758. - int32_t msg_id ; /* Message ID, as defined in the IDL */
  759. - uint16_t msg_size; /* Size of message, in bytes */
  760. - uint8_t dst_module; /* Destination module */
  761. - uint8_t src_module; /* Source module */
  762. - uint8_t msg_type; /* The message type */
  763. -} __packed;
  764. -
  765. -struct sns_ocmem_common_resp_s_v01 {
  766. - /* This shall be the first element of every response message */
  767. - uint8_t sns_result_t;
  768. - /**< 0 == SUCCESS; 1 == FAILURE
  769. - A result of FAILURE indicates that that any data contained in the
  770. - response should not be used other than sns_err_t, to determine the
  771. - type of error */
  772. - uint8_t sns_err_t;
  773. - /**< See sns_ocmem_error_e in ocmem_sensors.h */
  774. -};
  775. -
  776. -/* This structure represents a single memory region that must be
  777. -mapped from DDR to OCMEM */
  778. -struct sns_mem_segment_s_v01 {
  779. -
  780. - uint64_t start_address; /* Physical start address of segment */
  781. - uint32_t size; /* Size (in bytes) of this segment */
  782. - uint16_t type; /* 1 == Read only; 2 == Read/Write Data */
  783. -} __packed;
  784. -
  785. -struct sns_ocmem_phys_addr_resp_msg_v01 {
  786. - struct sns_ocmem_common_resp_s_v01 resp; /* response */
  787. - uint32_t segments_len; /* number of elements in segments */
  788. - /* Segments mapped from DDR to OCMEM */
  789. - struct sns_mem_segment_s_v01 segments[SNS_OCMEM_MAX_NUM_SEG_V01];
  790. - uint8_t segments_valid; /* true if segments is being passed */
  791. -} __packed ;
  792. -
  793. -struct sns_ocmem_has_client_ind_msg_v01 {
  794. - uint16_t num_clients; /* Number of active clients on the ADSP */
  795. -} __packed;
  796. -
  797. -struct sns_ocmem_bw_vote_req_msg_v01 {
  798. - uint8_t is_map; /* True if mapping; false if unmapping */
  799. - uint8_t vectors_valid; /* True if vectors is being passed */
  800. - uint32_t vectors_len; /* Number of elements in vectors */
  801. - uint8_t vectors[SNS_OCMEM_MAX_VECTORS_SIZE_V01]; /* vectors */
  802. -} __packed;
  803. -
  804. -struct sns_ocmem_bw_vote_resp_msg_v01 {
  805. - struct sns_ocmem_common_resp_s_v01 resp;
  806. -};
  807. -
  808. -struct sns_ocmem_bw_vote_ind_msg_v01 {
  809. - /* If the ADSP just voted for, or took away its vote for
  810. - OCMEM bandwidth */
  811. - uint8_t is_vote_on;
  812. -} __packed;
  813. -
  814. -#endif /* SENSORS_ADSP_H */
  815. diff --git a/arch/arm/mach-msm/msm_rtb.c b/arch/arm/mach-msm/msm_rtb.c
  816. index bf7d036..75404c8 100644
  817. --- a/arch/arm/mach-msm/msm_rtb.c
  818. +++ b/arch/arm/mach-msm/msm_rtb.c
  819. @@ -1,5 +1,5 @@
  820. /*
  821. - * Copyright (c) 2013, The Linux Foundation. All rights reserved.
  822. + * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
  823. *
  824. * This program is free software; you can redistribute it and/or modify
  825. * it under the terms of the GNU General Public License version 2 and
  826. @@ -23,7 +23,7 @@
  827. #include <linux/string.h>
  828. #include <linux/atomic.h>
  829. #include <linux/of.h>
  830. -#include <asm/io.h>
  831. +#include <linux/io.h>
  832. #include <asm-generic/sizes.h>
  833. #include <mach/memory.h>
  834. #include <mach/msm_rtb.h>
  835. @@ -38,18 +38,20 @@
  836. /* Write
  837. * 1) 3 bytes sentinel
  838. * 2) 1 bytes of log type
  839. - * 3) 4 bytes of where the caller came from
  840. + * 3) 8 bytes of where the caller came from
  841. * 4) 4 bytes index
  842. - * 4) 4 bytes extra data from the caller
  843. + * 4) 8 bytes extra data from the caller
  844. + * 5) 8 bytes for timestamp
  845. *
  846. - * Total = 16 bytes.
  847. + * Total = 32 bytes.
  848. */
  849. struct msm_rtb_layout {
  850. unsigned char sentinel[3];
  851. unsigned char log_type;
  852. - void *caller;
  853. - unsigned long idx;
  854. - void *data;
  855. + uint32_t idx;
  856. + uint64_t caller;
  857. + uint64_t data;
  858. + uint64_t timestamp;
  859. } __attribute__ ((__packed__));
  860.  
  861.  
  862. @@ -70,7 +72,7 @@ DEFINE_PER_CPU(atomic_t, msm_rtb_idx_cpu);
  863. static atomic_t msm_rtb_idx;
  864. #endif
  865.  
  866. -struct msm_rtb_state msm_rtb = {
  867. +static struct msm_rtb_state msm_rtb = {
  868. .filter = 1 << LOGK_LOGBUF,
  869. .enabled = 0,
  870. };
  871. @@ -109,24 +111,29 @@ static void msm_rtb_write_type(enum logk_event_type log_type,
  872. start->log_type = (char)log_type;
  873. }
  874.  
  875. -static void msm_rtb_write_caller(void *caller, struct msm_rtb_layout *start)
  876. +static void msm_rtb_write_caller(uint64_t caller, struct msm_rtb_layout *start)
  877. {
  878. start->caller = caller;
  879. }
  880.  
  881. -static void msm_rtb_write_idx(unsigned long idx,
  882. +static void msm_rtb_write_idx(uint32_t idx,
  883. struct msm_rtb_layout *start)
  884. {
  885. start->idx = idx;
  886. }
  887.  
  888. -static void msm_rtb_write_data(void *data, struct msm_rtb_layout *start)
  889. +static void msm_rtb_write_data(uint64_t data, struct msm_rtb_layout *start)
  890. {
  891. start->data = data;
  892. }
  893.  
  894. -static void uncached_logk_pc_idx(enum logk_event_type log_type, void *caller,
  895. - void *data, int idx)
  896. +static void msm_rtb_write_timestamp(struct msm_rtb_layout *start)
  897. +{
  898. + start->timestamp = sched_clock();
  899. +}
  900. +
  901. +static void uncached_logk_pc_idx(enum logk_event_type log_type, uint64_t caller,
  902. + uint64_t data, int idx)
  903. {
  904. struct msm_rtb_layout *start;
  905.  
  906. @@ -137,6 +144,7 @@ static void uncached_logk_pc_idx(enum logk_event_type log_type, void *caller,
  907. msm_rtb_write_caller(caller, start);
  908. msm_rtb_write_idx(idx, start);
  909. msm_rtb_write_data(data, start);
  910. + msm_rtb_write_timestamp(start);
  911. mb();
  912.  
  913. return;
  914. @@ -145,13 +153,10 @@ static void uncached_logk_pc_idx(enum logk_event_type log_type, void *caller,
  915. static void uncached_logk_timestamp(int idx)
  916. {
  917. unsigned long long timestamp;
  918. - void *timestamp_upper, *timestamp_lower;
  919. timestamp = sched_clock();
  920. - timestamp_lower = (void *)lower_32_bits(timestamp);
  921. - timestamp_upper = (void *)upper_32_bits(timestamp);
  922. -
  923. - uncached_logk_pc_idx(LOGK_TIMESTAMP|LOGTYPE_NOPC, timestamp_lower,
  924. - timestamp_upper, idx);
  925. + uncached_logk_pc_idx(LOGK_TIMESTAMP|LOGTYPE_NOPC,
  926. + (uint64_t)lower_32_bits(timestamp),
  927. + (uint64_t)upper_32_bits(timestamp), idx);
  928. }
  929.  
  930. #if defined(CONFIG_MSM_RTB_SEPARATE_CPUS)
  931. @@ -213,7 +218,8 @@ int notrace uncached_logk_pc(enum logk_event_type log_type, void *caller,
  932.  
  933. i = msm_rtb_get_idx();
  934.  
  935. - uncached_logk_pc_idx(log_type, caller, data, i);
  936. + uncached_logk_pc_idx(log_type, (uint64_t)((unsigned long) caller),
  937. + (uint64_t)((unsigned long) data), i);
  938.  
  939. return 1;
  940. }
  941. @@ -225,7 +231,7 @@ noinline int notrace uncached_logk(enum logk_event_type log_type, void *data)
  942. }
  943. EXPORT_SYMBOL(uncached_logk);
  944.  
  945. -int msm_rtb_probe(struct platform_device *pdev)
  946. +static int msm_rtb_probe(struct platform_device *pdev)
  947. {
  948. struct msm_rtb_platform_data *d = pdev->dev.platform_data;
  949. #if defined(CONFIG_MSM_RTB_SEPARATE_CPUS)
  950. @@ -297,7 +303,6 @@ static struct of_device_id msm_match_table[] = {
  951. {.compatible = RTB_COMPAT_STR},
  952. {},
  953. };
  954. -EXPORT_COMPAT(RTB_COMPAT_STR);
  955.  
  956. static struct platform_driver msm_rtb_driver = {
  957. .driver = {
  958. diff --git a/arch/arm/mach-msm/qdsp6v2/audio_utils.c b/arch/arm/mach-msm/qdsp6v2/audio_utils.c
  959. index 109e120..7ccc0e3 100644
  960. --- a/arch/arm/mach-msm/qdsp6v2/audio_utils.c
  961. +++ b/arch/arm/mach-msm/qdsp6v2/audio_utils.c
  962. @@ -24,9 +24,9 @@
  963. #include "audio_utils.h"
  964.  
  965. #define MIN_FRAME_SIZE 1536
  966. -#define NUM_FRAMES 5
  967. -#define META_SIZE (sizeof(struct meta_out_dsp))
  968. -#define FRAME_SIZE (1 + ((MIN_FRAME_SIZE + META_SIZE) * NUM_FRAMES))
  969. +#define NUM_FRAMES 5
  970. +#define META_SIZE (sizeof(struct meta_out_dsp))
  971. +#define FRAME_SIZE (1 + ((MIN_FRAME_SIZE + META_SIZE) * NUM_FRAMES))
  972.  
  973. static int audio_in_pause(struct q6audio_in *audio)
  974. {
  975. diff --git a/arch/arm/mach-msm/qdsp6v2/ultrasound/usf.c b/arch/arm/mach-msm/qdsp6v2/ultrasound/usf.c
  976. index 1c42020..99a1863 100644
  977. --- a/arch/arm/mach-msm/qdsp6v2/ultrasound/usf.c
  978. +++ b/arch/arm/mach-msm/qdsp6v2/ultrasound/usf.c
  979. @@ -20,6 +20,7 @@
  980. #include <linux/input.h>
  981. #include <linux/uaccess.h>
  982. #include <linux/time.h>
  983. +#include <linux/kmemleak.h>
  984. #include <asm/mach-types.h>
  985. #include <sound/apr_audio.h>
  986. #include <mach/qdsp6v2/usf.h>
  987. @@ -572,6 +573,9 @@ static int config_xx(struct usf_xx_type *usf_xx, struct us_xx_info_type *config)
  988. if (config->params_data_size > 0) { /* transparent data copy */
  989. usf_xx->encdec_cfg.params = kzalloc(config->params_data_size,
  990. GFP_KERNEL);
  991. + /* False memory leak here - pointer in packed struct *
  992. + * is undetected by kmemleak tool */
  993. + kmemleak_ignore(usf_xx->encdec_cfg.params);
  994. if (usf_xx->encdec_cfg.params == NULL) {
  995. pr_err("%s: params memory alloc[%d] failure\n",
  996. __func__,
  997. diff --git a/arch/arm/mach-msm/qdsp6v2/ultrasound/version_b/q6usm_b.c b/arch/arm/mach-msm/qdsp6v2/ultrasound/version_b/q6usm_b.c
  998. index 1a1d587..6f473e2 100644
  999. --- a/arch/arm/mach-msm/qdsp6v2/ultrasound/version_b/q6usm_b.c
  1000. +++ b/arch/arm/mach-msm/qdsp6v2/ultrasound/version_b/q6usm_b.c
  1001. @@ -745,7 +745,7 @@ static uint32_t q6usm_ext2int_format(uint32_t ext_format)
  1002. int_format = US_RAW_FORMAT_V2;
  1003. break;
  1004. case FORMAT_USPROX:
  1005. - int_format = US_PROX_FORMAT_V2;
  1006. + int_format = US_PROX_FORMAT_V4;
  1007. break;
  1008. case FORMAT_USGES_SYNC:
  1009. int_format = US_GES_SYNC_FORMAT;
  1010. diff --git a/arch/arm/mach-msm/qdsp6v2/voice_svc.c b/arch/arm/mach-msm/qdsp6v2/voice_svc.c
  1011. index 5bf86dc..9ea6a4c 100755
  1012. --- a/arch/arm/mach-msm/qdsp6v2/voice_svc.c
  1013. +++ b/arch/arm/mach-msm/qdsp6v2/voice_svc.c
  1014. @@ -26,6 +26,7 @@
  1015. #define DRIVER_NAME "voice_svc"
  1016. #define MINOR_NUMBER 1
  1017. #define APR_MAX_RESPONSE 10
  1018. +#define TIMEOUT_MS 1000
  1019.  
  1020. #define MAX(a, b) ((a) >= (b) ? (a) : (b))
  1021.  
  1022. @@ -450,10 +451,26 @@ static long voice_svc_ioctl(struct file *file, unsigned int cmd,
  1023. } else {
  1024. spin_unlock_irqrestore(&prtd->response_lock,
  1025. spin_flags);
  1026. - wait_event_interruptible(prtd->response_wait,
  1027. - !list_empty(&prtd->response_queue));
  1028. - pr_debug("%s: Interupt recieved for response",
  1029. - __func__);
  1030. + pr_debug("%s: wait for a response\n", __func__);
  1031. +
  1032. + ret = wait_event_interruptible_timeout(
  1033. + prtd->response_wait,
  1034. + !list_empty(&prtd->response_queue),
  1035. + msecs_to_jiffies(TIMEOUT_MS));
  1036. + if (ret == 0) {
  1037. + pr_debug("%s: Read timeout\n", __func__);
  1038. + ret = -ETIMEDOUT;
  1039. + goto done;
  1040. + } else if (ret > 0 &&
  1041. + !list_empty(&prtd->response_queue)) {
  1042. + pr_debug("%s: Interrupt recieved for response\n",
  1043. + __func__);
  1044. + ret = 0;
  1045. + } else if (ret < 0) {
  1046. + pr_debug("%s: Interrupted by SIGNAL %d\n",
  1047. + __func__, ret);
  1048. + goto done;
  1049. + }
  1050. }
  1051. } while(!apr_response);
  1052. break;
  1053. diff --git a/arch/arm/mach-msm/sensors_adsp.c b/arch/arm/mach-msm/sensors_adsp.c
  1054. index 5159da0..80fe32f 100644
  1055. --- a/arch/arm/mach-msm/sensors_adsp.c
  1056. +++ b/arch/arm/mach-msm/sensors_adsp.c
  1057. @@ -1,4 +1,4 @@
  1058. -/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  1059. +/* Copyright (c) 2012-2013, 2015 The Linux Foundation. All rights reserved.
  1060. *
  1061. * This program is free software; you can redistribute it and/or modify
  1062. * it under the terms of the GNU General Public License version 2 and
  1063. @@ -10,1022 +10,30 @@
  1064. * GNU General Public License for more details.
  1065. */
  1066.  
  1067. -#include <linux/workqueue.h>
  1068. -#include <linux/types.h>
  1069. -#include <linux/delay.h>
  1070. -#include <linux/bitops.h>
  1071. -#include <linux/wait.h>
  1072. -#include <linux/sched.h>
  1073. -#include <linux/notifier.h>
  1074. -#include <linux/slab.h>
  1075. +#include <linux/msm_dsps.h>
  1076. #include <linux/module.h>
  1077. #include <linux/init.h>
  1078. +#include <linux/slab.h>
  1079. +#include <linux/platform_device.h>
  1080. #include <linux/cdev.h>
  1081. #include <linux/fs.h>
  1082. -#include <linux/platform_device.h>
  1083. -#include <linux/err.h>
  1084. -#include <linux/io.h>
  1085. -#include <linux/ctype.h>
  1086. #include <linux/of_device.h>
  1087. -#include <linux/msm_dsps.h>
  1088. -#include <linux/uaccess.h>
  1089. -#include <asm/mach-types.h>
  1090. #include <asm/arch_timer.h>
  1091. -#include <mach/subsystem_restart.h>
  1092. -#include <mach/ocmem.h>
  1093. -#include <mach/msm_smd.h>
  1094. -#include <mach/sensors_adsp.h>
  1095. -#include <mach/msm_bus.h>
  1096. -#include <mach/msm_bus_board.h>
  1097. +#include <linux/uaccess.h>
  1098.  
  1099. #define CLASS_NAME "ssc"
  1100. #define DRV_NAME "sensors"
  1101. -#define DRV_VERSION "1.00"
  1102. -
  1103. -#define SNS_OCMEM_SMD_CHANNEL "SENSOR"
  1104. -#define SNS_OCMEM_CLIENT_ID OCMEM_SENSORS
  1105. -#define SNS_OCMEM_SIZE SZ_256K
  1106. -#define SMD_BUF_SIZE 1024
  1107. -#define SNS_TIMEOUT_MS 1000
  1108. -
  1109. -#define SNS_OCMEM_ALLOC_GROW 0x00000001
  1110. -#define SNS_OCMEM_ALLOC_SHRINK 0x00000002
  1111. -#define SNS_OCMEM_MAP_DONE 0x00000004
  1112. -#define SNS_OCMEM_MAP_FAIL 0x00000008
  1113. -#define SNS_OCMEM_UNMAP_DONE 0x00000010
  1114. -#define SNS_OCMEM_UNMAP_FAIL 0x00000020
  1115. -
  1116. -#define DSPS_HAS_CLIENT 0x00000100
  1117. -#define DSPS_HAS_NO_CLIENT 0x00000200
  1118. -#define DSPS_BW_VOTE_ON 0x00000400
  1119. -#define DSPS_BW_VOTE_OFF 0x00000800
  1120. -#define DSPS_PHYS_ADDR_SET 0x00001000
  1121. +#define DRV_VERSION "2.00"
  1122.  
  1123. -/*
  1124. - * Structure contains all state used by the sensors driver
  1125. - */
  1126. struct sns_adsp_control_s {
  1127. - wait_queue_head_t sns_wait;
  1128. - spinlock_t sns_lock;
  1129. - struct workqueue_struct *sns_workqueue;
  1130. - struct work_struct sns_work;
  1131. - struct workqueue_struct *smd_wq;
  1132. - struct work_struct smd_read_work;
  1133. - smd_channel_t *smd_ch;
  1134. - uint32_t sns_ocmem_status;
  1135. - uint32_t mem_segments_size;
  1136. - struct sns_mem_segment_s_v01 mem_segments[SNS_OCMEM_MAX_NUM_SEG_V01];
  1137. - struct ocmem_buf *buf;
  1138. - struct ocmem_map_list map_list;
  1139. - struct ocmem_notifier *ocmem_handle;
  1140. - bool ocmem_enabled;
  1141. - struct notifier_block ocmem_nb;
  1142. - uint32_t sns_ocmem_bus_client;
  1143. - struct platform_device *pdev;
  1144. - void *pil;
  1145. struct class *dev_class;
  1146. dev_t dev_num;
  1147. struct device *dev;
  1148. struct cdev *cdev;
  1149. };
  1150. -
  1151. static struct sns_adsp_control_s sns_ctl;
  1152.  
  1153. /*
  1154. - * All asynchronous responses from the OCMEM driver are received
  1155. - * by this function
  1156. - */
  1157. -int sns_ocmem_drv_cb(struct notifier_block *self,
  1158. - unsigned long action,
  1159. - void *dev)
  1160. -{
  1161. - unsigned long flags;
  1162. -
  1163. - spin_lock_irqsave(&sns_ctl.sns_lock, flags);
  1164. -
  1165. - pr_debug("%s: Received OCMEM callback: action=%li\n",
  1166. - __func__, action);
  1167. -
  1168. - switch (action) {
  1169. - case OCMEM_MAP_DONE:
  1170. - sns_ctl.sns_ocmem_status |= SNS_OCMEM_MAP_DONE;
  1171. - sns_ctl.sns_ocmem_status &= (~OCMEM_MAP_FAIL &
  1172. - ~SNS_OCMEM_UNMAP_DONE &
  1173. - ~SNS_OCMEM_UNMAP_FAIL);
  1174. - break;
  1175. - case OCMEM_MAP_FAIL:
  1176. - sns_ctl.sns_ocmem_status |= SNS_OCMEM_MAP_FAIL;
  1177. - sns_ctl.sns_ocmem_status &= (~OCMEM_MAP_DONE &
  1178. - ~SNS_OCMEM_UNMAP_DONE &
  1179. - ~SNS_OCMEM_UNMAP_FAIL);
  1180. - break;
  1181. - case OCMEM_UNMAP_DONE:
  1182. - sns_ctl.sns_ocmem_status |= SNS_OCMEM_UNMAP_DONE;
  1183. - sns_ctl.sns_ocmem_status &= (~SNS_OCMEM_UNMAP_FAIL &
  1184. - ~SNS_OCMEM_MAP_DONE &
  1185. - ~OCMEM_MAP_FAIL);
  1186. - break;
  1187. - case OCMEM_UNMAP_FAIL:
  1188. - sns_ctl.sns_ocmem_status |= SNS_OCMEM_UNMAP_FAIL;
  1189. - sns_ctl.sns_ocmem_status &= (~SNS_OCMEM_UNMAP_DONE &
  1190. - ~SNS_OCMEM_MAP_DONE &
  1191. - ~OCMEM_MAP_FAIL);
  1192. - break;
  1193. - case OCMEM_ALLOC_GROW:
  1194. - sns_ctl.sns_ocmem_status |= SNS_OCMEM_ALLOC_GROW;
  1195. - sns_ctl.sns_ocmem_status &= ~SNS_OCMEM_ALLOC_SHRINK;
  1196. - break;
  1197. - case OCMEM_ALLOC_SHRINK:
  1198. - sns_ctl.sns_ocmem_status |= SNS_OCMEM_ALLOC_SHRINK;
  1199. - sns_ctl.sns_ocmem_status &= ~SNS_OCMEM_ALLOC_GROW;
  1200. - break;
  1201. - default:
  1202. - pr_err("%s: Unknown action received in OCMEM callback %lu\n",
  1203. - __func__, action);
  1204. - break;
  1205. - }
  1206. -
  1207. - spin_unlock_irqrestore(&sns_ctl.sns_lock, flags);
  1208. - wake_up(&sns_ctl.sns_wait);
  1209. -
  1210. - return 0;
  1211. -}
  1212. -
  1213. -/*
  1214. - * Processes messages received through SMD from the ADSP
  1215. - *
  1216. - * @param hdr The message header
  1217. - * @param msg Message pointer
  1218. - *
  1219. - */
  1220. -void sns_ocmem_smd_process(struct sns_ocmem_hdr_s *hdr, void *msg)
  1221. -{
  1222. - unsigned long flags;
  1223. -
  1224. - spin_lock_irqsave(&sns_ctl.sns_lock, flags);
  1225. -
  1226. - pr_debug("%s: Received message from ADSP; id: %i type: %i (%08x)\n",
  1227. - __func__, hdr->msg_id, hdr->msg_type,
  1228. - sns_ctl.sns_ocmem_status);
  1229. -
  1230. - if (hdr->msg_id == SNS_OCMEM_PHYS_ADDR_RESP_V01 &&
  1231. - hdr->msg_type == SNS_OCMEM_MSG_TYPE_RESP) {
  1232. - struct sns_ocmem_phys_addr_resp_msg_v01 *msg_ptr =
  1233. - (struct sns_ocmem_phys_addr_resp_msg_v01 *)msg;
  1234. - pr_debug("%s: Received SNS_OCMEM_PHYS_ADDR_RESP_V01\n",
  1235. - __func__);
  1236. - pr_debug("%s: segments_valid=%d, segments_len=%d\n", __func__,
  1237. - msg_ptr->segments_valid, msg_ptr->segments_len);
  1238. -
  1239. - if (msg_ptr->segments_valid) {
  1240. - sns_ctl.mem_segments_size = msg_ptr->segments_len;
  1241. - memcpy(sns_ctl.mem_segments, msg_ptr->segments,
  1242. - sizeof(struct sns_mem_segment_s_v01) *
  1243. - msg_ptr->segments_len);
  1244. -
  1245. - sns_ctl.sns_ocmem_status |= DSPS_PHYS_ADDR_SET;
  1246. - } else {
  1247. - pr_err("%s: Received invalid segment list\n", __func__);
  1248. - }
  1249. - } else if (hdr->msg_id == SNS_OCMEM_HAS_CLIENT_IND_V01 &&
  1250. - hdr->msg_type == SNS_OCMEM_MSG_TYPE_IND) {
  1251. - struct sns_ocmem_has_client_ind_msg_v01 *msg_ptr =
  1252. - (struct sns_ocmem_has_client_ind_msg_v01 *)msg;
  1253. -
  1254. - pr_debug("%s: Received SNS_OCMEM_HAS_CLIENT_IND_V01\n",
  1255. - __func__);
  1256. - pr_debug("%s: ADSP has %i client(s)\n", __func__,
  1257. - msg_ptr->num_clients);
  1258. - if (msg_ptr->num_clients > 0) {
  1259. - sns_ctl.sns_ocmem_status |= DSPS_HAS_CLIENT;
  1260. - sns_ctl.sns_ocmem_status &= ~DSPS_HAS_NO_CLIENT;
  1261. - } else {
  1262. - sns_ctl.sns_ocmem_status |= DSPS_HAS_NO_CLIENT;
  1263. - sns_ctl.sns_ocmem_status &= ~DSPS_HAS_CLIENT;
  1264. - }
  1265. - } else if (hdr->msg_id == SNS_OCMEM_BW_VOTE_RESP_V01 &&
  1266. - hdr->msg_type == SNS_OCMEM_MSG_TYPE_RESP) {
  1267. - /* no need to handle this response msg, just return */
  1268. - pr_debug("%s: Received SNS_OCMEM_BW_VOTE_RESP_V01\n", __func__);
  1269. - spin_unlock_irqrestore(&sns_ctl.sns_lock, flags);
  1270. - return;
  1271. - } else if (hdr->msg_id == SNS_OCMEM_BW_VOTE_IND_V01 &&
  1272. - hdr->msg_type == SNS_OCMEM_MSG_TYPE_IND) {
  1273. - struct sns_ocmem_bw_vote_ind_msg_v01 *msg_ptr =
  1274. - (struct sns_ocmem_bw_vote_ind_msg_v01 *)msg;
  1275. - pr_debug("%s: Received BW_VOTE_IND_V01, is_vote_on=%d\n",
  1276. - __func__, msg_ptr->is_vote_on);
  1277. -
  1278. - if (msg_ptr->is_vote_on) {
  1279. - sns_ctl.sns_ocmem_status |= DSPS_BW_VOTE_ON;
  1280. - sns_ctl.sns_ocmem_status &= ~DSPS_BW_VOTE_OFF;
  1281. - } else {
  1282. - sns_ctl.sns_ocmem_status |= DSPS_BW_VOTE_OFF;
  1283. - sns_ctl.sns_ocmem_status &= ~DSPS_BW_VOTE_ON;
  1284. - }
  1285. - } else {
  1286. - pr_err("%s: Unknown message type received. id: %i; type: %i\n",
  1287. - __func__, hdr->msg_id, hdr->msg_type);
  1288. - }
  1289. -
  1290. - spin_unlock_irqrestore(&sns_ctl.sns_lock, flags);
  1291. -
  1292. - wake_up(&sns_ctl.sns_wait);
  1293. -}
  1294. -
  1295. -static void sns_ocmem_smd_read(struct work_struct *ws)
  1296. -{
  1297. - struct smd_channel *ch = sns_ctl.smd_ch;
  1298. - unsigned char *buf = NULL;
  1299. - int sz, len;
  1300. -
  1301. - for (;;) {
  1302. - sz = smd_cur_packet_size(ch);
  1303. - BUG_ON(sz > SMD_BUF_SIZE);
  1304. - len = smd_read_avail(ch);
  1305. - pr_debug("%s: sz=%d, len=%d\n", __func__, sz, len);
  1306. - if (len == 0 || len < sz)
  1307. - break;
  1308. - buf = kzalloc(SMD_BUF_SIZE, GFP_KERNEL);
  1309. - if (buf == NULL) {
  1310. - pr_err("%s: malloc failed", __func__);
  1311. - break;
  1312. - }
  1313. -
  1314. - if (smd_read(ch, buf, sz) != sz) {
  1315. - pr_err("%s: not enough data?!\n", __func__);
  1316. - kfree(buf);
  1317. - continue;
  1318. - }
  1319. -
  1320. - sns_ocmem_smd_process((struct sns_ocmem_hdr_s *)buf,
  1321. - (void *)((char *)buf +
  1322. - sizeof(struct sns_ocmem_hdr_s)));
  1323. -
  1324. - kfree(buf);
  1325. -
  1326. - }
  1327. -}
  1328. -
  1329. -/*
  1330. - * All SMD notifications and messages from Sensors on ADSP are
  1331. - * received by this function
  1332. - *
  1333. - */
  1334. -void sns_ocmem_smd_notify_data(void *data, unsigned int event)
  1335. -{
  1336. - if (event == SMD_EVENT_DATA) {
  1337. - int sz;
  1338. - pr_debug("%s: Received SMD event Data\n", __func__);
  1339. - sz = smd_cur_packet_size(sns_ctl.smd_ch);
  1340. - if ((sz > 0) && (sz <= smd_read_avail(sns_ctl.smd_ch)))
  1341. - queue_work(sns_ctl.smd_wq, &sns_ctl.smd_read_work);
  1342. - } else if (event == SMD_EVENT_OPEN) {
  1343. - pr_debug("%s: Received SMD event Open\n", __func__);
  1344. - } else if (event == SMD_EVENT_CLOSE) {
  1345. - pr_debug("%s: Received SMD event Close\n", __func__);
  1346. - }
  1347. -}
  1348. -
  1349. -static bool sns_ocmem_is_status_set(uint32_t sns_ocmem_status)
  1350. -{
  1351. - unsigned long flags;
  1352. - bool is_set;
  1353. -
  1354. - spin_lock_irqsave(&sns_ctl.sns_lock, flags);
  1355. - is_set = sns_ctl.sns_ocmem_status & sns_ocmem_status;
  1356. - spin_unlock_irqrestore(&sns_ctl.sns_lock, flags);
  1357. - return is_set;
  1358. -}
  1359. -
  1360. -/*
  1361. - * Wait for a response from ADSP or OCMEM Driver, timeout if necessary
  1362. - *
  1363. - * @param sns_ocmem_status Status flags to wait for.
  1364. - * @param timeout_sec Seconds to wait before timeout
  1365. - * @param timeout_nsec Nanoseconds to wait. Total timeout = nsec + sec
  1366. - *
  1367. - * @return 0 If any status flag is set at any time prior to a timeout.
  1368. - * 0 if success or timedout ; <0 for failures
  1369. - */
  1370. -static int sns_ocmem_wait(uint32_t sns_ocmem_status,
  1371. - uint32_t timeout_ms)
  1372. -{
  1373. - int err;
  1374. - if (timeout_ms) {
  1375. - err = wait_event_interruptible_timeout(sns_ctl.sns_wait,
  1376. - sns_ocmem_is_status_set(sns_ocmem_status),
  1377. - msecs_to_jiffies(timeout_ms));
  1378. -
  1379. - if (err == 0)
  1380. - pr_err("%s: interruptible_timeout timeout err=%i\n",
  1381. - __func__, err);
  1382. - else if (err < 0)
  1383. - pr_err("%s: interruptible_timeout failed err=%i\n",
  1384. - __func__, err);
  1385. - } else { /* no timeout */
  1386. - err = wait_event_interruptible(sns_ctl.sns_wait,
  1387. - sns_ocmem_is_status_set(sns_ocmem_status));
  1388. - if (err < 0)
  1389. - pr_err("%s: wait_event_interruptible failed err=%i\n",
  1390. - __func__, err);
  1391. - }
  1392. -
  1393. - return err;
  1394. -}
  1395. -
  1396. -/*
  1397. - * Sends a message to the ADSP via SMD.
  1398. - *
  1399. - * @param hdr Specifies message type and other meta data
  1400. - * @param msg_ptr Pointer to the message contents.
  1401. - * Must be freed within this function if no error is returned.
  1402. - *
  1403. - * @return 0 upon success; < 0 upon error
  1404. - */
  1405. -static int
  1406. -sns_ocmem_send_msg(struct sns_ocmem_hdr_s *hdr, void const *msg_ptr)
  1407. -{
  1408. - int rv = 0;
  1409. - int err = 0;
  1410. - void *temp = NULL;
  1411. - int size = 0;
  1412. -
  1413. - if (hdr == NULL) {
  1414. - pr_err("%s: NULL message header\n", __func__);
  1415. - rv = -EINVAL;
  1416. - goto out;
  1417. - }
  1418. -
  1419. - size = sizeof(struct sns_ocmem_hdr_s) + hdr->msg_size;
  1420. - temp = kzalloc(sizeof(struct sns_ocmem_hdr_s) + hdr->msg_size,
  1421. - GFP_KERNEL);
  1422. -
  1423. - if (temp == NULL) {
  1424. - pr_err("%s: allocation failure\n", __func__);
  1425. - rv = -ENOMEM;
  1426. - goto out;
  1427. - }
  1428. -
  1429. - hdr->dst_module = SNS_OCMEM_MODULE_ADSP;
  1430. - hdr->src_module = SNS_OCMEM_MODULE_KERNEL;
  1431. -
  1432. - memcpy(temp, hdr, sizeof(struct sns_ocmem_hdr_s));
  1433. - memcpy((char *)temp + sizeof(struct sns_ocmem_hdr_s),
  1434. - msg_ptr, hdr->msg_size);
  1435. - pr_debug("%s: send msg type: %i size: %i id: %i dst: %i src: %i\n",
  1436. - __func__, hdr->msg_type, hdr->msg_size,
  1437. - hdr->msg_id, hdr->dst_module, hdr->src_module);
  1438. -
  1439. - if (sns_ctl.smd_ch == NULL) {
  1440. - pr_err("%s: null smd_ch\n", __func__);
  1441. - rv = -EINVAL;
  1442. - }
  1443. - err = smd_write(sns_ctl.smd_ch, temp, size);
  1444. - if (err < 0) {
  1445. - pr_err("%s: smd_write failed %i\n", __func__, err);
  1446. - rv = -ECOMM;
  1447. - } else {
  1448. - pr_debug("%s smd_write successful ret=%d\n",
  1449. - __func__, err);
  1450. - }
  1451. -
  1452. - kfree(temp);
  1453. -
  1454. -out:
  1455. - return rv;
  1456. -}
  1457. -
  1458. -/*
  1459. - * Load ADSP Firmware.
  1460. - */
  1461. -
  1462. -static int sns_load_adsp(void)
  1463. -{
  1464. - sns_ctl.pil = subsystem_get("adsp");
  1465. - if (IS_ERR(sns_ctl.pil)) {
  1466. - pr_err("%s: fail to load ADSP firmware\n", __func__);
  1467. - return -ENODEV;
  1468. - }
  1469. -
  1470. - pr_debug("%s: Q6/ADSP image is loaded\n", __func__);
  1471. -
  1472. - return 0;
  1473. -}
  1474. -
  1475. -static int sns_ocmem_platform_data_populate(struct platform_device *pdev)
  1476. -{
  1477. - int ret;
  1478. - struct msm_bus_scale_pdata *sns_ocmem_bus_scale_pdata = NULL;
  1479. - struct msm_bus_vectors *sns_ocmem_bus_vectors = NULL;
  1480. - struct msm_bus_paths *ocmem_sns_bus_paths = NULL;
  1481. - u32 val;
  1482. -
  1483. - if (!pdev->dev.of_node) {
  1484. - pr_err("%s: device tree information missing\n", __func__);
  1485. - return -ENODEV;
  1486. - }
  1487. -
  1488. - sns_ocmem_bus_vectors = kzalloc(sizeof(struct msm_bus_vectors),
  1489. - GFP_KERNEL);
  1490. - if (!sns_ocmem_bus_vectors) {
  1491. - dev_err(&pdev->dev, "Failed to allocate memory for platform data\n");
  1492. - return -ENOMEM;
  1493. - }
  1494. -
  1495. - ret = of_property_read_u32(pdev->dev.of_node,
  1496. - "qcom,src-id", &val);
  1497. - if (ret) {
  1498. - dev_err(&pdev->dev, "%s: qcom,src-id missing in DT node\n",
  1499. - __func__);
  1500. - goto fail1;
  1501. - }
  1502. - sns_ocmem_bus_vectors->src = val;
  1503. - ret = of_property_read_u32(pdev->dev.of_node,
  1504. - "qcom,dst-id", &val);
  1505. - if (ret) {
  1506. - dev_err(&pdev->dev, "%s: qcom,dst-id missing in DT node\n",
  1507. - __func__);
  1508. - goto fail1;
  1509. - }
  1510. - sns_ocmem_bus_vectors->dst = val;
  1511. - ret = of_property_read_u32(pdev->dev.of_node,
  1512. - "qcom,ab", &val);
  1513. - if (ret) {
  1514. - dev_err(&pdev->dev, "%s: qcom,ab missing in DT node\n",
  1515. - __func__);
  1516. - goto fail1;
  1517. - }
  1518. - sns_ocmem_bus_vectors->ab = val;
  1519. - ret = of_property_read_u32(pdev->dev.of_node,
  1520. - "qcom,ib", &val);
  1521. - if (ret) {
  1522. - dev_err(&pdev->dev, "%s: qcom,ib missing in DT node\n",
  1523. - __func__);
  1524. - goto fail1;
  1525. - }
  1526. - sns_ocmem_bus_vectors->ib = val;
  1527. - ocmem_sns_bus_paths = kzalloc(sizeof(struct msm_bus_paths),
  1528. - GFP_KERNEL);
  1529. -
  1530. - if (!ocmem_sns_bus_paths) {
  1531. - dev_err(&pdev->dev, "Failed to allocate memory for platform data\n");
  1532. - goto fail1;
  1533. - }
  1534. - ocmem_sns_bus_paths->num_paths = 1;
  1535. - ocmem_sns_bus_paths->vectors = sns_ocmem_bus_vectors;
  1536. -
  1537. - sns_ocmem_bus_scale_pdata =
  1538. - kzalloc(sizeof(struct msm_bus_scale_pdata), GFP_KERNEL);
  1539. - if (!sns_ocmem_bus_scale_pdata) {
  1540. - dev_err(&pdev->dev, "Failed to allocate memory for platform data\n");
  1541. - goto fail2;
  1542. - }
  1543. -
  1544. - sns_ocmem_bus_scale_pdata->usecase = ocmem_sns_bus_paths;
  1545. - sns_ocmem_bus_scale_pdata->num_usecases = 1;
  1546. - sns_ocmem_bus_scale_pdata->name = "sensors-ocmem";
  1547. -
  1548. - dev_set_drvdata(&pdev->dev, sns_ocmem_bus_scale_pdata);
  1549. - return ret;
  1550. -
  1551. -fail2:
  1552. - kfree(ocmem_sns_bus_paths);
  1553. -fail1:
  1554. - kfree(sns_ocmem_bus_vectors);
  1555. - return ret;
  1556. -}
  1557. -
  1558. -
  1559. -/*
  1560. - * Initialize all sensors ocmem driver data fields and register with the
  1561. - * ocmem driver.
  1562. - *
  1563. - * @return 0 upon success; < 0 upon error
  1564. - */
  1565. -static int sns_ocmem_init(void)
  1566. -{
  1567. - int i, err, ret;
  1568. - struct sns_ocmem_hdr_s addr_req_hdr;
  1569. - struct msm_bus_scale_pdata *sns_ocmem_bus_scale_pdata = NULL;
  1570. -
  1571. - /* register from OCMEM callack */
  1572. - sns_ctl.ocmem_handle =
  1573. - ocmem_notifier_register(SNS_OCMEM_CLIENT_ID,
  1574. - &sns_ctl.ocmem_nb);
  1575. - if (sns_ctl.ocmem_handle == NULL) {
  1576. - pr_err("OCMEM notifier registration failed\n");
  1577. - return -EFAULT;
  1578. - }
  1579. -
  1580. - /* populate platform data */
  1581. - ret = sns_ocmem_platform_data_populate(sns_ctl.pdev);
  1582. - if (ret) {
  1583. - dev_err(&sns_ctl.pdev->dev,
  1584. - "%s: failed to populate platform data, rc = %d\n",
  1585. - __func__, ret);
  1586. - return -ENODEV;
  1587. - }
  1588. - sns_ocmem_bus_scale_pdata = dev_get_drvdata(&sns_ctl.pdev->dev);
  1589. -
  1590. - sns_ctl.sns_ocmem_bus_client =
  1591. - msm_bus_scale_register_client(sns_ocmem_bus_scale_pdata);
  1592. -
  1593. - if (!sns_ctl.sns_ocmem_bus_client) {
  1594. - pr_err("%s: msm_bus_scale_register_client() failed\n",
  1595. - __func__);
  1596. - return -EFAULT;
  1597. - }
  1598. -
  1599. - /* load ADSP first */
  1600. - if (sns_load_adsp() != 0) {
  1601. - pr_err("%s: sns_load_adsp failed\n", __func__);
  1602. - return -EFAULT;
  1603. - }
  1604. -
  1605. - /*
  1606. - * wait before open SMD channel from kernel to ensure
  1607. - * channel has been openned already from ADSP side
  1608. - */
  1609. - msleep(1000);
  1610. -
  1611. - err = smd_named_open_on_edge(SNS_OCMEM_SMD_CHANNEL,
  1612. - SMD_APPS_QDSP,
  1613. - &sns_ctl.smd_ch,
  1614. - NULL,
  1615. - sns_ocmem_smd_notify_data);
  1616. - if (err != 0) {
  1617. - pr_err("%s: smd_named_open_on_edge failed %i\n", __func__, err);
  1618. - return -EFAULT;
  1619. - }
  1620. -
  1621. - pr_debug("%s: SMD channel openned successfuly!\n", __func__);
  1622. - /* wait for the channel ready before writing data */
  1623. - msleep(1000);
  1624. - addr_req_hdr.msg_id = SNS_OCMEM_PHYS_ADDR_REQ_V01;
  1625. - addr_req_hdr.msg_type = SNS_OCMEM_MSG_TYPE_REQ;
  1626. - addr_req_hdr.msg_size = 0;
  1627. -
  1628. - err = sns_ocmem_send_msg(&addr_req_hdr, NULL);
  1629. - if (err != 0) {
  1630. - pr_err("%s: sns_ocmem_send_msg failed %i\n", __func__, err);
  1631. - return -ECOMM;
  1632. - }
  1633. -
  1634. - err = sns_ocmem_wait(DSPS_PHYS_ADDR_SET, 0);
  1635. - if (err != 0) {
  1636. - pr_err("%s: sns_ocmem_wait failed %i\n", __func__, err);
  1637. - return -EFAULT;
  1638. - }
  1639. -
  1640. - sns_ctl.map_list.num_chunks = sns_ctl.mem_segments_size;
  1641. - for (i = 0; i < sns_ctl.mem_segments_size; i++) {
  1642. - sns_ctl.map_list.chunks[i].ro =
  1643. - sns_ctl.mem_segments[i].type == 1 ? true : false;
  1644. - sns_ctl.map_list.chunks[i].ddr_paddr =
  1645. - sns_ctl.mem_segments[i].start_address;
  1646. - sns_ctl.map_list.chunks[i].size =
  1647. - sns_ctl.mem_segments[i].size;
  1648. -
  1649. - pr_debug("%s: chunks[%d]: ro=%d, ddr_paddr=0x%lx, size=%li",
  1650. - __func__, i,
  1651. - sns_ctl.map_list.chunks[i].ro,
  1652. - sns_ctl.map_list.chunks[i].ddr_paddr,
  1653. - sns_ctl.map_list.chunks[i].size);
  1654. - }
  1655. -
  1656. - return 0;
  1657. -}
  1658. -
  1659. -/*
  1660. - * Unmaps memory in ocmem back to DDR, indicates to the ADSP its completion,
  1661. - * and waits for it to finish removing its bandwidth vote.
  1662. - */
  1663. -static void sns_ocmem_unmap(void)
  1664. -{
  1665. - unsigned long flags;
  1666. - int err = 0;
  1667. -
  1668. - ocmem_set_power_state(SNS_OCMEM_CLIENT_ID,
  1669. - sns_ctl.buf, OCMEM_ON);
  1670. -
  1671. - spin_lock_irqsave(&sns_ctl.sns_lock, flags);
  1672. - sns_ctl.sns_ocmem_status &= (~SNS_OCMEM_UNMAP_FAIL &
  1673. - ~SNS_OCMEM_UNMAP_DONE);
  1674. - spin_unlock_irqrestore(&sns_ctl.sns_lock, flags);
  1675. -
  1676. - err = ocmem_unmap(SNS_OCMEM_CLIENT_ID,
  1677. - sns_ctl.buf,
  1678. - &sns_ctl.map_list);
  1679. -
  1680. - if (err != 0) {
  1681. - pr_err("ocmem_unmap failed %i\n", err);
  1682. - } else {
  1683. - err = sns_ocmem_wait(SNS_OCMEM_UNMAP_DONE |
  1684. - SNS_OCMEM_UNMAP_FAIL, 0);
  1685. -
  1686. - if (err == 0) {
  1687. - if (sns_ocmem_is_status_set(SNS_OCMEM_UNMAP_DONE))
  1688. - pr_debug("%s: OCMEM_UNMAP_DONE\n", __func__);
  1689. - else if (sns_ocmem_is_status_set(
  1690. - SNS_OCMEM_UNMAP_FAIL)) {
  1691. - pr_err("%s: OCMEM_UNMAP_FAIL\n", __func__);
  1692. - BUG_ON(true);
  1693. - } else
  1694. - pr_err("%s: status flag not set\n", __func__);
  1695. - } else {
  1696. - pr_err("%s: sns_ocmem_wait failed %i\n",
  1697. - __func__, err);
  1698. - }
  1699. - }
  1700. -
  1701. - ocmem_set_power_state(SNS_OCMEM_CLIENT_ID,
  1702. - sns_ctl.buf, OCMEM_OFF);
  1703. -}
  1704. -
  1705. -/*
  1706. - * Waits for allocation to succeed. This may take considerable time if the device
  1707. - * is presently in a high-power use case.
  1708. - *
  1709. - * @return 0 on success; < 0 upon error
  1710. - */
  1711. -static int sns_ocmem_wait_for_alloc(void)
  1712. -{
  1713. - int err = 0;
  1714. -
  1715. - err = sns_ocmem_wait(SNS_OCMEM_ALLOC_GROW |
  1716. - DSPS_HAS_NO_CLIENT, 0);
  1717. -
  1718. - if (err == 0) {
  1719. - if (sns_ocmem_is_status_set(DSPS_HAS_NO_CLIENT)) {
  1720. - pr_debug("%s: Lost client while waiting for GROW\n",
  1721. - __func__);
  1722. - ocmem_free(SNS_OCMEM_CLIENT_ID, sns_ctl.buf);
  1723. - sns_ctl.buf = NULL;
  1724. - return -EPIPE;
  1725. - }
  1726. - } else {
  1727. - pr_err("sns_ocmem_wait failed %i\n", err);
  1728. - return -EFAULT;
  1729. - }
  1730. -
  1731. - return 0;
  1732. -}
  1733. -
  1734. -/*
  1735. - * Kicks-off the mapping of memory from DDR to ocmem. Waits for the process
  1736. - * to complete, then indicates so to the ADSP.
  1737. - *
  1738. - * @return 0: Success; < 0: Other error
  1739. - */
  1740. -static int sns_ocmem_map(void)
  1741. -{
  1742. - int err = 0;
  1743. - unsigned long flags;
  1744. -
  1745. - spin_lock_irqsave(&sns_ctl.sns_lock, flags);
  1746. - sns_ctl.sns_ocmem_status &=
  1747. - (~SNS_OCMEM_MAP_FAIL & ~SNS_OCMEM_MAP_DONE);
  1748. - spin_unlock_irqrestore(&sns_ctl.sns_lock, flags);
  1749. -
  1750. - /* vote for ocmem bus bandwidth */
  1751. - err = msm_bus_scale_client_update_request(
  1752. - sns_ctl.sns_ocmem_bus_client,
  1753. - 0);
  1754. - if (err)
  1755. - pr_err("%s: failed to vote for bus bandwidth\n", __func__);
  1756. -
  1757. - err = ocmem_map(SNS_OCMEM_CLIENT_ID,
  1758. - sns_ctl.buf,
  1759. - &sns_ctl.map_list);
  1760. -
  1761. - if (err != 0) {
  1762. - pr_debug("ocmem_map failed %i\n", err);
  1763. - ocmem_set_power_state(SNS_OCMEM_CLIENT_ID,
  1764. - sns_ctl.buf, OCMEM_OFF);
  1765. - ocmem_free(SNS_OCMEM_CLIENT_ID, sns_ctl.buf);
  1766. - sns_ctl.buf = NULL;
  1767. - } else {
  1768. - err = sns_ocmem_wait(SNS_OCMEM_ALLOC_SHRINK |
  1769. - DSPS_HAS_NO_CLIENT |
  1770. - SNS_OCMEM_MAP_DONE |
  1771. - SNS_OCMEM_MAP_FAIL, 0);
  1772. -
  1773. - if (err == 0) {
  1774. - if (sns_ocmem_is_status_set(SNS_OCMEM_MAP_DONE))
  1775. - pr_debug("%s: OCMEM mapping DONE\n", __func__);
  1776. - else if (sns_ocmem_is_status_set(DSPS_HAS_NO_CLIENT)) {
  1777. - pr_debug("%s: Lost client while waiting for MAP\n",
  1778. - __func__);
  1779. - sns_ocmem_unmap();
  1780. - ocmem_free(SNS_OCMEM_CLIENT_ID,
  1781. - sns_ctl.buf);
  1782. - sns_ctl.buf = NULL;
  1783. - err = -EPIPE;
  1784. - } else if (sns_ocmem_is_status_set(
  1785. - SNS_OCMEM_ALLOC_SHRINK)) {
  1786. - pr_debug("%s: SHRINK while wait for MAP\n",
  1787. - __func__);
  1788. - sns_ocmem_unmap();
  1789. - err = ocmem_shrink(SNS_OCMEM_CLIENT_ID,
  1790. - sns_ctl.buf, 0);
  1791. - BUG_ON(err != 0);
  1792. - err = -EFAULT;
  1793. - } else if (sns_ocmem_is_status_set(
  1794. - SNS_OCMEM_MAP_FAIL)) {
  1795. - pr_err("%s: OCMEM mapping fails\n", __func__);
  1796. - ocmem_set_power_state(SNS_OCMEM_CLIENT_ID,
  1797. - sns_ctl.buf,
  1798. - OCMEM_OFF);
  1799. - ocmem_free(SNS_OCMEM_CLIENT_ID,
  1800. - sns_ctl.buf);
  1801. - sns_ctl.buf = NULL;
  1802. - } else
  1803. - pr_err("%s: status flag not set\n", __func__);
  1804. - } else {
  1805. - pr_err("sns_ocmem_wait failed %i\n", err);
  1806. - }
  1807. - }
  1808. -
  1809. - return err;
  1810. -}
  1811. -
  1812. -/*
  1813. - * Allocates memory in ocmem and maps to it from DDR.
  1814. - *
  1815. - * @return 0 upon success; <0 upon failure;
  1816. - */
  1817. -static int sns_ocmem_alloc(void)
  1818. -{
  1819. - int err = 0;
  1820. - unsigned long flags;
  1821. -
  1822. - if (sns_ctl.buf == NULL) {
  1823. - spin_lock_irqsave(&sns_ctl.sns_lock, flags);
  1824. - sns_ctl.sns_ocmem_status &= ~SNS_OCMEM_ALLOC_GROW &
  1825. - ~SNS_OCMEM_ALLOC_SHRINK;
  1826. - spin_unlock_irqrestore(&sns_ctl.sns_lock, flags);
  1827. - sns_ctl.buf = ocmem_allocate_nb(SNS_OCMEM_CLIENT_ID,
  1828. - SNS_OCMEM_SIZE);
  1829. -
  1830. - if (sns_ctl.buf == NULL) {
  1831. - pr_err("ocmem_allocate_nb returned NULL\n");
  1832. - sns_ctl.ocmem_enabled = false;
  1833. - err = -EFAULT;
  1834. - } else if (sns_ctl.buf->len != 0 &&
  1835. - SNS_OCMEM_SIZE > sns_ctl.buf->len) {
  1836. - pr_err("ocmem_allocate_nb: invalid len %li, Req: %i)\n",
  1837. - sns_ctl.buf->len, SNS_OCMEM_SIZE);
  1838. - sns_ctl.ocmem_enabled = false;
  1839. - err = -EFAULT;
  1840. - }
  1841. - }
  1842. -
  1843. - pr_debug("%s OCMEM buf=%lx, buffer len=%li\n", __func__,
  1844. - sns_ctl.buf->addr, sns_ctl.buf->len);
  1845. -
  1846. - while (sns_ctl.ocmem_enabled) {
  1847. - if (sns_ctl.buf->len == 0) {
  1848. - pr_debug("%s: Waiting for memory allocation\n",
  1849. - __func__);
  1850. - err = sns_ocmem_wait_for_alloc();
  1851. - if (err == -EPIPE) {
  1852. - pr_debug("%s:Lost client while wait for alloc\n",
  1853. - __func__);
  1854. - break;
  1855. - } else if (err != 0) {
  1856. - pr_err("sns_ocmem_wait_for_alloc failed %i\n",
  1857. - err);
  1858. - break;
  1859. - }
  1860. - }
  1861. -
  1862. - ocmem_set_power_state(SNS_OCMEM_CLIENT_ID,
  1863. - sns_ctl.buf,
  1864. - OCMEM_ON);
  1865. -
  1866. - err = sns_ocmem_map();
  1867. -
  1868. - if (err == -EPIPE) {
  1869. - pr_debug("%s: Lost client while waiting for mapping\n",
  1870. - __func__);
  1871. - break;
  1872. - } else if (err < 0) {
  1873. - pr_debug("%s: Mapping failed, will try again\n",
  1874. - __func__);
  1875. - break;
  1876. - } else if (err == 0) {
  1877. - pr_debug("%s: Mapping finished\n", __func__);
  1878. - break;
  1879. - }
  1880. - }
  1881. -
  1882. - return err;
  1883. -}
  1884. -
  1885. -/*
  1886. - * Indicate to the ADSP that unmapping has completed, and wait for the response
  1887. - * that its bandwidth vote has been removed.
  1888. - *
  1889. - * @return 0 Upon success; < 0 upon error
  1890. - */
  1891. -static int sns_ocmem_unmap_send(void)
  1892. -{
  1893. - int err;
  1894. - struct sns_ocmem_hdr_s msg_hdr;
  1895. - struct sns_ocmem_bw_vote_req_msg_v01 msg;
  1896. -
  1897. - memset(&msg, 0, sizeof(struct sns_ocmem_bw_vote_req_msg_v01));
  1898. -
  1899. - msg_hdr.msg_id = SNS_OCMEM_BW_VOTE_REQ_V01;
  1900. - msg_hdr.msg_type = SNS_OCMEM_MSG_TYPE_REQ;
  1901. - msg_hdr.msg_size = sizeof(struct sns_ocmem_bw_vote_req_msg_v01);
  1902. - msg.is_map = 0;
  1903. - msg.vectors_valid = 0;
  1904. - msg.vectors_len = 0;
  1905. -
  1906. - pr_debug("%s: send bw_vote OFF\n", __func__);
  1907. - err = sns_ocmem_send_msg(&msg_hdr, &msg);
  1908. - if (err != 0) {
  1909. - pr_err("%s: sns_ocmem_send_msg failed %i\n",
  1910. - __func__, err);
  1911. - } else {
  1912. - err = sns_ocmem_wait(DSPS_BW_VOTE_OFF, 0);
  1913. - if (err != 0)
  1914. - pr_err("%s: sns_ocmem_wait failed %i\n", __func__, err);
  1915. - }
  1916. -
  1917. - return err;
  1918. -}
  1919. -
  1920. -/*
  1921. - * Indicate to the ADSP that mapping has completed, and wait for the response
  1922. - * that its bandwidth vote has been made.
  1923. - *
  1924. - * @return 0 Upon success; < 0 upon error
  1925. - */
  1926. -static int sns_ocmem_map_send(void)
  1927. -{
  1928. - int err;
  1929. - struct sns_ocmem_hdr_s msg_hdr;
  1930. - struct sns_ocmem_bw_vote_req_msg_v01 msg;
  1931. - struct ocmem_vectors *vectors;
  1932. -
  1933. - memset(&msg, 0, sizeof(struct sns_ocmem_bw_vote_req_msg_v01));
  1934. -
  1935. - msg_hdr.msg_id = SNS_OCMEM_BW_VOTE_REQ_V01;
  1936. - msg_hdr.msg_type = SNS_OCMEM_MSG_TYPE_REQ;
  1937. - msg_hdr.msg_size = sizeof(struct sns_ocmem_bw_vote_req_msg_v01);
  1938. - msg.is_map = 1;
  1939. -
  1940. - vectors = ocmem_get_vectors(SNS_OCMEM_CLIENT_ID, sns_ctl.buf);
  1941. - if ((vectors != NULL)) {
  1942. - memcpy(&msg.vectors, vectors, sizeof(*vectors));
  1943. - /* TODO: set vectors_len */
  1944. - msg.vectors_valid = true;
  1945. - msg.vectors_len = 0;
  1946. - }
  1947. -
  1948. - pr_debug("%s: send bw_vote ON\n", __func__);
  1949. - err = sns_ocmem_send_msg(&msg_hdr, &msg);
  1950. - if (err != 0) {
  1951. - pr_err("%s: sns_ocmem_send_msg failed %i\n", __func__, err);
  1952. - } else {
  1953. - err = sns_ocmem_wait(DSPS_BW_VOTE_ON |
  1954. - SNS_OCMEM_ALLOC_SHRINK, 0);
  1955. - if (err != 0)
  1956. - pr_err("%s: sns_ocmem_wait failed %i\n", __func__, err);
  1957. - }
  1958. -
  1959. - return err;
  1960. -}
  1961. -
  1962. -/*
  1963. - * Perform the encessary operations to clean-up OCMEM after being notified that
  1964. - * there is no longer a client; if sensors was evicted; or if some error
  1965. - * has occurred.
  1966. - *
  1967. - * @param[i] do_free Whether the memory should be freed (true) or if shrink
  1968. - * should be called instead (false).
  1969. - */
  1970. -static void sns_ocmem_evicted(bool do_free)
  1971. -{
  1972. - int err = 0;
  1973. -
  1974. - sns_ocmem_unmap();
  1975. - if (do_free) {
  1976. - ocmem_free(SNS_OCMEM_CLIENT_ID, sns_ctl.buf);
  1977. - sns_ctl.buf = NULL;
  1978. - } else {
  1979. - err = ocmem_shrink(SNS_OCMEM_CLIENT_ID, sns_ctl.buf, 0);
  1980. - BUG_ON(err != 0);
  1981. - }
  1982. -
  1983. - err = sns_ocmem_unmap_send();
  1984. - if (err != 0)
  1985. - pr_err("sns_ocmem_unmap_send failed %i\n", err);
  1986. -}
  1987. -
  1988. -/*
  1989. - * After mapping has completed and the ADSP has reacted appropriately, wait
  1990. - * for a shrink command or word from the ADSP that it no longer has a client.
  1991. - *
  1992. - * @return 0 If no clients; < 0 upon error;
  1993. - */
  1994. -static int sns_ocmem_map_done(void)
  1995. -{
  1996. - int err = 0;
  1997. - unsigned long flags;
  1998. -
  1999. - err = sns_ocmem_map_send();
  2000. - if (err != 0) {
  2001. - pr_err("sns_ocmem_map_send failed %i\n", err);
  2002. - sns_ocmem_evicted(true);
  2003. - } else {
  2004. - ocmem_set_power_state(SNS_OCMEM_CLIENT_ID,
  2005. - sns_ctl.buf, OCMEM_OFF);
  2006. -
  2007. - pr_debug("%s: Waiting for shrink or 'no client' updates\n",
  2008. - __func__);
  2009. - err = sns_ocmem_wait(DSPS_HAS_NO_CLIENT |
  2010. - SNS_OCMEM_ALLOC_SHRINK, 0);
  2011. - if (err == 0) {
  2012. - if (sns_ocmem_is_status_set(DSPS_HAS_NO_CLIENT)) {
  2013. - pr_debug("%s: No longer have a client\n",
  2014. - __func__);
  2015. - sns_ocmem_evicted(true);
  2016. - } else if (sns_ocmem_is_status_set(
  2017. - SNS_OCMEM_ALLOC_SHRINK)) {
  2018. - pr_debug("%s: Received SHRINK\n", __func__);
  2019. - sns_ocmem_evicted(false);
  2020. -
  2021. - spin_lock_irqsave(&sns_ctl.sns_lock, flags);
  2022. - sns_ctl.sns_ocmem_status &=
  2023. - ~SNS_OCMEM_ALLOC_SHRINK;
  2024. - spin_unlock_irqrestore(&sns_ctl.sns_lock,
  2025. - flags);
  2026. - err = -EFAULT;
  2027. - }
  2028. - } else {
  2029. - pr_err("sns_ocmem_wait failed %i\n", err);
  2030. - }
  2031. - }
  2032. -
  2033. - return err;
  2034. -}
  2035. -
  2036. -/*
  2037. - * Main function.
  2038. - * Initializes sensors ocmem feature, and waits for an ADSP client.
  2039. - */
  2040. -static void sns_ocmem_main(struct work_struct *work)
  2041. -{
  2042. - int err = 0;
  2043. - pr_debug("%s\n", __func__);
  2044. -
  2045. - err = sns_ocmem_init();
  2046. - if (err != 0) {
  2047. - pr_err("%s: sns_ocmem_init failed %i\n", __func__, err);
  2048. - return;
  2049. - }
  2050. -
  2051. - while (true) {
  2052. - pr_debug("%s: Waiting for sensor client\n", __func__);
  2053. - if (sns_ocmem_is_status_set(DSPS_HAS_CLIENT) ||
  2054. - !sns_ocmem_wait(DSPS_HAS_CLIENT, 0)) {
  2055. - pr_debug("%s: DSPS_HAS_CLIENT\n", __func__);
  2056. -
  2057. - err = sns_ocmem_alloc();
  2058. - if (err != 0) {
  2059. - pr_err("sns_ocmem_alloc failed %i\n", err);
  2060. - return;
  2061. - } else {
  2062. - err = sns_ocmem_map_done();
  2063. - if (err != 0) {
  2064. - pr_err("sns_ocmem_map_done failed %i",
  2065. - err);
  2066. - return;
  2067. - }
  2068. - }
  2069. - }
  2070. - }
  2071. -
  2072. - ocmem_notifier_unregister(sns_ctl.ocmem_handle,
  2073. - &sns_ctl.ocmem_nb);
  2074. -}
  2075. -
  2076. -static int sensors_adsp_open(struct inode *ip, struct file *fp)
  2077. -{
  2078. - int ret = 0;
  2079. - return ret;
  2080. -}
  2081. -
  2082. -static int sensors_adsp_release(struct inode *inode, struct file *file)
  2083. -{
  2084. - return 0;
  2085. -}
  2086. -
  2087. -/*
  2088. * Read QTimer clock ticks and scale down to 32KHz clock as used
  2089. * in DSPS
  2090. */
  2091. @@ -1047,9 +55,16 @@ static u32 sns_read_qtimer(void)
  2092. return (u32)val;
  2093. }
  2094.  
  2095. -/*
  2096. - * IO Control - handle commands from client.
  2097. - */
  2098. +static int sensors_adsp_open(struct inode *ip, struct file *fp)
  2099. +{
  2100. + return 0;
  2101. +}
  2102. +
  2103. +static int sensors_adsp_release(struct inode *inode, struct file *file)
  2104. +{
  2105. + return 0;
  2106. +}
  2107. +
  2108. static long sensors_adsp_ioctl(struct file *file,
  2109. unsigned int cmd, unsigned long arg)
  2110. {
  2111. @@ -1070,41 +85,18 @@ static long sensors_adsp_ioctl(struct file *file,
  2112. return ret;
  2113. }
  2114.  
  2115. -/*
  2116. - * platform driver
  2117. - */
  2118. const struct file_operations sensors_adsp_fops = {
  2119. .owner = THIS_MODULE,
  2120. .open = sensors_adsp_open,
  2121. .release = sensors_adsp_release,
  2122. - .unlocked_ioctl = sensors_adsp_ioctl,
  2123. + .unlocked_ioctl = sensors_adsp_ioctl
  2124. };
  2125. -#ifdef CONFIG_ADSP_FACTORY
  2126. - struct class* get_adsp_sensor_class( void )
  2127. -{
  2128.  
  2129. -pr_err(" %s:",__func__);
  2130. -if (sns_ctl.dev_class == NULL) {
  2131. - sns_ctl.dev_class = class_create(THIS_MODULE, DRV_NAME);
  2132. - if (sns_ctl.dev_class == NULL) {
  2133. - pr_err("%s: class_create fail.\n", __func__);
  2134. - }
  2135. -}
  2136. -
  2137. -return sns_ctl.dev_class;
  2138. -}
  2139. -
  2140. -EXPORT_SYMBOL(get_adsp_sensor_class);
  2141. -#endif
  2142. static int sensors_adsp_probe(struct platform_device *pdev)
  2143. {
  2144. int ret = 0;
  2145. -#ifdef CONFIG_ADSP_FACTORY
  2146. - pr_err("%s:++",__func__);
  2147. - if (sns_ctl.dev_class == NULL) {
  2148. - sns_ctl.dev_class = class_create(THIS_MODULE, DRV_NAME);
  2149. - }
  2150. -#endif
  2151. +
  2152. + sns_ctl.dev_class = class_create(THIS_MODULE, CLASS_NAME);
  2153. if (sns_ctl.dev_class == NULL) {
  2154. pr_err("%s: class_create fail.\n", __func__);
  2155. goto res_err;
  2156. @@ -1138,38 +130,6 @@ static int sensors_adsp_probe(struct platform_device *pdev)
  2157. goto cdev_add_err;
  2158. }
  2159.  
  2160. - sns_ctl.sns_workqueue =
  2161. - alloc_workqueue("sns_ocmem", WQ_NON_REENTRANT, 0);
  2162. - if (!sns_ctl.sns_workqueue) {
  2163. - pr_err("%s: Failed to create work queue\n",
  2164. - __func__);
  2165. - goto cdev_add_err;
  2166. - }
  2167. -
  2168. - sns_ctl.smd_wq =
  2169. - alloc_workqueue("smd_wq", WQ_NON_REENTRANT, 0);
  2170. - if (!sns_ctl.smd_wq) {
  2171. - pr_err("%s: Failed to create work queue\n",
  2172. - __func__);
  2173. - goto cdev_add_err;
  2174. - }
  2175. -
  2176. - init_waitqueue_head(&sns_ctl.sns_wait);
  2177. - spin_lock_init(&sns_ctl.sns_lock);
  2178. -
  2179. - sns_ctl.ocmem_handle = NULL;
  2180. - sns_ctl.buf = NULL;
  2181. - sns_ctl.sns_ocmem_status = 0;
  2182. - sns_ctl.ocmem_enabled = true;
  2183. - sns_ctl.ocmem_nb.notifier_call = sns_ocmem_drv_cb;
  2184. - sns_ctl.smd_ch = NULL;
  2185. - sns_ctl.pdev = pdev;
  2186. -
  2187. - INIT_WORK(&sns_ctl.sns_work, sns_ocmem_main);
  2188. - INIT_WORK(&sns_ctl.smd_read_work, sns_ocmem_smd_read);
  2189. -
  2190. - queue_work(sns_ctl.sns_workqueue, &sns_ctl.sns_work);
  2191. -
  2192. return 0;
  2193.  
  2194. cdev_add_err:
  2195. @@ -1186,20 +146,6 @@ res_err:
  2196.  
  2197. static int sensors_adsp_remove(struct platform_device *pdev)
  2198. {
  2199. - struct msm_bus_scale_pdata *sns_ocmem_bus_scale_pdata = NULL;
  2200. -
  2201. - sns_ocmem_bus_scale_pdata = (struct msm_bus_scale_pdata *)
  2202. - dev_get_drvdata(&pdev->dev);
  2203. -
  2204. - kfree(sns_ocmem_bus_scale_pdata->usecase->vectors);
  2205. - kfree(sns_ocmem_bus_scale_pdata->usecase);
  2206. - kfree(sns_ocmem_bus_scale_pdata);
  2207. -
  2208. - ocmem_notifier_unregister(sns_ctl.ocmem_handle,
  2209. - &sns_ctl.ocmem_nb);
  2210. - destroy_workqueue(sns_ctl.sns_workqueue);
  2211. - destroy_workqueue(sns_ctl.smd_wq);
  2212. -
  2213. cdev_del(sns_ctl.cdev);
  2214. kfree(sns_ctl.cdev);
  2215. sns_ctl.cdev = NULL;
  2216. @@ -1211,12 +157,10 @@ static int sensors_adsp_remove(struct platform_device *pdev)
  2217. }
  2218.  
  2219. static const struct of_device_id msm_adsp_sensors_dt_match[] = {
  2220. - {.compatible = "qcom,msm-adsp-sensors"},
  2221. - {}
  2222. + {.compatible = "qcom,msm-adsp-sensors"}
  2223. };
  2224. MODULE_DEVICE_TABLE(of, msm_adsp_sensors_dt_match);
  2225.  
  2226. -
  2227. static struct platform_driver sensors_adsp_driver = {
  2228. .driver = {
  2229. .name = "sensors-adsp",
  2230. @@ -1227,16 +171,12 @@ static struct platform_driver sensors_adsp_driver = {
  2231. .remove = sensors_adsp_remove,
  2232. };
  2233.  
  2234. -/*
  2235. - * Module Init.
  2236. - */
  2237. -static int sensors_adsp_init(void)
  2238. +static int __init sensors_adsp_init(void)
  2239. {
  2240. int rc;
  2241. - pr_debug("%s driver version %s.\n", DRV_NAME, DRV_VERSION);
  2242.  
  2243. + pr_debug("%s driver version %s.\n", DRV_NAME, DRV_VERSION);
  2244. rc = platform_driver_register(&sensors_adsp_driver);
  2245. -
  2246. if (rc) {
  2247. pr_err("%s: Failed to register sensors adsp driver\n",
  2248. __func__);
  2249. @@ -1246,16 +186,12 @@ static int sensors_adsp_init(void)
  2250. return 0;
  2251. }
  2252.  
  2253. -/*
  2254. - * Module Exit.
  2255. - */
  2256. -static void sensors_adsp_exit(void)
  2257. +static void __exit sensors_adsp_exit(void)
  2258. {
  2259. platform_driver_unregister(&sensors_adsp_driver);
  2260. }
  2261.  
  2262. module_init(sensors_adsp_init);
  2263. module_exit(sensors_adsp_exit);
  2264. -
  2265. MODULE_LICENSE("GPL v2");
  2266. MODULE_DESCRIPTION("Sensors ADSP driver");
  2267. diff --git a/arch/arm/mach-msm/smd_init_dt.c b/arch/arm/mach-msm/smd_init_dt.c
  2268. index 640656c..1766a68 100644
  2269. --- a/arch/arm/mach-msm/smd_init_dt.c
  2270. +++ b/arch/arm/mach-msm/smd_init_dt.c
  2271. @@ -126,7 +126,7 @@ static int msm_smsm_probe(struct platform_device *pdev)
  2272.  
  2273. ret = request_irq(irq_line,
  2274. private_irq->irq_handler,
  2275. - IRQF_TRIGGER_RISING,
  2276. + IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND,
  2277. "smsm_dev",
  2278. NULL);
  2279. if (ret < 0) {
  2280. @@ -160,7 +160,6 @@ static int msm_smd_probe(struct platform_device *pdev)
  2281. uint32_t irq_offset;
  2282. uint32_t irq_bitmask;
  2283. uint32_t irq_line;
  2284. - unsigned long irq_flags = IRQF_TRIGGER_RISING;
  2285. const char *pilstr;
  2286. struct interrupt_config_item *private_irq;
  2287. struct device_node *node;
  2288. @@ -222,11 +221,6 @@ static int msm_smd_probe(struct platform_device *pdev)
  2289. if (pilstr)
  2290. SMD_DBG("%s: %s = %s", __func__, key, pilstr);
  2291.  
  2292. - key = "qcom,irq-no-suspend";
  2293. - ret = of_property_read_bool(node, key);
  2294. - if (ret)
  2295. - irq_flags |= IRQF_NO_SUSPEND;
  2296. -
  2297. private_intr_config = smd_get_intr_config(edge);
  2298. if (!private_intr_config) {
  2299. pr_err("%s: invalid edge\n", __func__);
  2300. @@ -242,7 +236,7 @@ static int msm_smd_probe(struct platform_device *pdev)
  2301.  
  2302. ret = request_irq(irq_line,
  2303. private_irq->irq_handler,
  2304. - irq_flags,
  2305. + IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND,
  2306. "smd_dev",
  2307. NULL);
  2308. if (ret < 0) {
  2309. diff --git a/arch/arm/mach-msm/smd_rpcrouter.c b/arch/arm/mach-msm/smd_rpcrouter.c
  2310. index ff68d81..931ed58 100644
  2311. --- a/arch/arm/mach-msm/smd_rpcrouter.c
  2312. +++ b/arch/arm/mach-msm/smd_rpcrouter.c
  2313. @@ -1,7 +1,7 @@
  2314. /* arch/arm/mach-msm/smd_rpcrouter.c
  2315. *
  2316. * Copyright (C) 2007 Google, Inc.
  2317. - * Copyright (c) 2007-2012, The Linux Foundation. All rights reserved.
  2318. + * Copyright (c) 2007-2013, The Linux Foundation. All rights reserved.
  2319. * Author: San Mehat <[email protected]>
  2320. *
  2321. * This software is licensed under the terms of the GNU General Public
  2322. @@ -646,7 +646,7 @@ struct msm_rpc_endpoint *msm_rpcrouter_create_local_endpoint(dev_t dev)
  2323. int msm_rpcrouter_destroy_local_endpoint(struct msm_rpc_endpoint *ept)
  2324. {
  2325. int rc;
  2326. - union rr_control_msg msg;
  2327. + union rr_control_msg msg = { 0 };
  2328. struct msm_rpc_reply *reply, *reply_tmp;
  2329. unsigned long flags;
  2330. struct rpcrouter_xprt_info *xprt_info;
  2331. @@ -781,7 +781,7 @@ static void handle_server_restart(struct rr_server *server,
  2332. static int process_control_msg(struct rpcrouter_xprt_info *xprt_info,
  2333. union rr_control_msg *msg, int len)
  2334. {
  2335. - union rr_control_msg ctl;
  2336. + union rr_control_msg ctl = { 0 };
  2337. struct rr_server *server;
  2338. struct rr_remote_endpoint *r_ept;
  2339. int rc = 0;
  2340. @@ -1212,7 +1212,7 @@ packet_complete:
  2341. done:
  2342.  
  2343. if (hdr.confirm_rx) {
  2344. - union rr_control_msg msg;
  2345. + union rr_control_msg msg = { 0 };
  2346.  
  2347. msg.cmd = RPCROUTER_CTRL_CMD_RESUME_TX;
  2348. msg.cli.pid = hdr.dst_pid;
  2349. @@ -2071,7 +2071,7 @@ int msm_rpc_register_server(struct msm_rpc_endpoint *ept,
  2350. uint32_t prog, uint32_t vers)
  2351. {
  2352. int rc;
  2353. - union rr_control_msg msg;
  2354. + union rr_control_msg msg = { 0 };
  2355. struct rr_server *server;
  2356. struct rpcrouter_xprt_info *xprt_info;
  2357.  
  2358. @@ -2152,7 +2152,7 @@ int msm_rpc_get_curr_pkt_size(struct msm_rpc_endpoint *ept)
  2359. static int msm_rpcrouter_close(void)
  2360. {
  2361. struct rpcrouter_xprt_info *xprt_info;
  2362. - union rr_control_msg ctl;
  2363. + union rr_control_msg ctl = { 0 };
  2364.  
  2365. ctl.cmd = RPCROUTER_CTRL_CMD_BYE;
  2366. mutex_lock(&xprt_info_list_lock);
  2367. diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S
  2368. index c1a9784..72156bb 100644
  2369. --- a/arch/arm/vfp/entry.S
  2370. +++ b/arch/arm/vfp/entry.S
  2371. @@ -25,7 +25,6 @@ ENTRY(do_vfp)
  2372. add r11, r4, #1 @ increment it
  2373. str r11, [r10, #TI_PREEMPT]
  2374. #endif
  2375. - enable_irq
  2376. str r2, [sp, #S_PC] @ update regs->ARM_pc for Thumb 2 case
  2377. ldr r4, .LCvfp
  2378. ldr r11, [r10, #TI_CPU] @ CPU number
  2379. diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
  2380. index 0857a5f..871864f 100644
  2381. --- a/drivers/char/adsprpc.c
  2382. +++ b/drivers/char/adsprpc.c
  2383. @@ -43,7 +43,6 @@
  2384. #define RPC_HASH_BITS 5
  2385. #define RPC_HASH_SZ (1 << RPC_HASH_BITS)
  2386. #define BALIGN 32
  2387. -#define NUM_CHANNELS 1
  2388.  
  2389. #define LOCK_MMAP(kernel)\
  2390. do {\
  2391. @@ -140,16 +139,15 @@ struct smq_invoke_ctx {
  2392. struct hlist_node hn;
  2393. struct completion work;
  2394. int retval;
  2395. - int cid;
  2396. int pid;
  2397. remote_arg_t *pra;
  2398. remote_arg_t *rpra;
  2399. struct fastrpc_buf obuf;
  2400. struct fastrpc_buf *abufs;
  2401. struct fastrpc_device *dev;
  2402. - struct fastrpc_apps *apps;
  2403. - int* fds;
  2404. - struct ion_handle** handles;
  2405. + struct fastrpc_apps *apps;
  2406. + int *fds;
  2407. + struct ion_handle **handles;
  2408. int nbufs;
  2409. bool smmu;
  2410. uint32_t sc;
  2411. @@ -168,24 +166,20 @@ struct fastrpc_smmu {
  2412. bool enabled;
  2413. };
  2414.  
  2415. -struct fastrpc_channel_context {
  2416. - smd_channel_t *chan;
  2417. - struct device *dev;
  2418. - struct completion work;
  2419. - struct fastrpc_smmu smmu;
  2420. - struct kref kref;
  2421. -};
  2422. -
  2423. struct fastrpc_apps {
  2424. - struct fastrpc_channel_context channel[NUM_CHANNELS];
  2425. + smd_channel_t *chan;
  2426. struct smq_context_list clst;
  2427. + struct completion work;
  2428. struct ion_client *iclient;
  2429. struct cdev cdev;
  2430. struct class *class;
  2431. + struct device *dev;
  2432. + struct fastrpc_smmu smmu;
  2433. struct mutex smd_mutex;
  2434. dev_t dev_no;
  2435. spinlock_t wrlock;
  2436. spinlock_t hlock;
  2437. + struct kref kref;
  2438. struct hlist_head htbl[RPC_HASH_SZ];
  2439. };
  2440.  
  2441. @@ -203,7 +197,6 @@ struct file_data {
  2442. spinlock_t hlock;
  2443. struct hlist_head hlst;
  2444. uint32_t mode;
  2445. - int cid;
  2446. };
  2447.  
  2448. struct fastrpc_device {
  2449. @@ -212,32 +205,16 @@ struct fastrpc_device {
  2450. struct fastrpc_buf buf;
  2451. };
  2452.  
  2453. -struct fastrpc_channel_info {
  2454. - char *name;
  2455. - char *node;
  2456. - char *group;
  2457. - int channel;
  2458. -};
  2459. -
  2460. static struct fastrpc_apps gfa;
  2461.  
  2462. -static const struct fastrpc_channel_info gcinfo[NUM_CHANNELS] = {
  2463. - {
  2464. - .name = "adsprpc-smd",
  2465. - .node = "qcom,msm-audio-ion",
  2466. - .group = "lpass_audio",
  2467. - .channel = SMD_APPS_QDSP,
  2468. - }
  2469. -};
  2470. -
  2471. -static void free_mem(struct fastrpc_buf *buf, int cid)
  2472. +static void free_mem(struct fastrpc_buf *buf)
  2473. {
  2474. struct fastrpc_apps *me = &gfa;
  2475.  
  2476. if (!IS_ERR_OR_NULL(buf->handle)) {
  2477. - if (me->channel[cid].smmu.enabled && buf->phys) {
  2478. + if (me->smmu.enabled && buf->phys) {
  2479. ion_unmap_iommu(me->iclient, buf->handle,
  2480. - me->channel[cid].smmu.domain_id, 0);
  2481. + me->smmu.domain_id, 0);
  2482. buf->phys = 0;
  2483. }
  2484. if (!IS_ERR_OR_NULL(buf->virt)) {
  2485. @@ -249,13 +226,13 @@ static void free_mem(struct fastrpc_buf *buf, int cid)
  2486. }
  2487. }
  2488.  
  2489. -static void free_map(struct fastrpc_mmap *map, int cid)
  2490. +static void free_map(struct fastrpc_mmap *map)
  2491. {
  2492. struct fastrpc_apps *me = &gfa;
  2493. if (!IS_ERR_OR_NULL(map->handle)) {
  2494. - if (me->channel[cid].smmu.enabled && map->phys) {
  2495. + if (me->smmu.enabled && map->phys) {
  2496. ion_unmap_iommu(me->iclient, map->handle,
  2497. - me->channel[cid].smmu.domain_id, 0);
  2498. + me->smmu.domain_id, 0);
  2499. map->phys = 0;
  2500. }
  2501. if (!IS_ERR_OR_NULL(map->virt)) {
  2502. @@ -267,7 +244,7 @@ static void free_map(struct fastrpc_mmap *map, int cid)
  2503. map->handle = 0;
  2504. }
  2505.  
  2506. -static int alloc_mem(struct fastrpc_buf *buf, int cid)
  2507. +static int alloc_mem(struct fastrpc_buf *buf)
  2508. {
  2509. struct fastrpc_apps *me = &gfa;
  2510. struct ion_client *clnt = gfa.iclient;
  2511. @@ -278,7 +255,7 @@ static int alloc_mem(struct fastrpc_buf *buf, int cid)
  2512. buf->handle = 0;
  2513. buf->virt = 0;
  2514. buf->phys = 0;
  2515. - heap = me->channel[cid].smmu.enabled ? ION_HEAP(ION_IOMMU_HEAP_ID) :
  2516. + heap = me->smmu.enabled ? ION_HEAP(ION_IOMMU_HEAP_ID) :
  2517. ION_HEAP(ION_ADSP_HEAP_ID) | ION_HEAP(ION_AUDIO_HEAP_ID);
  2518. buf->handle = ion_alloc(clnt, buf->size, SZ_4K, heap, ION_FLAG_CACHED);
  2519. VERIFY(err, 0 == IS_ERR_OR_NULL(buf->handle));
  2520. @@ -288,11 +265,11 @@ static int alloc_mem(struct fastrpc_buf *buf, int cid)
  2521. VERIFY(err, 0 == IS_ERR_OR_NULL(buf->virt));
  2522. if (err)
  2523. goto bail;
  2524. - if (me->channel[cid].smmu.enabled) {
  2525. + if (me->smmu.enabled) {
  2526. len = buf->size;
  2527. VERIFY(err, 0 == ion_map_iommu(clnt, buf->handle,
  2528. - me->channel[cid].smmu.domain_id, 0,
  2529. - SZ_4K, 0, &buf->phys, &len, 0, 0));
  2530. + me->smmu.domain_id, 0, SZ_4K, 0,
  2531. + &buf->phys, &len, 0, 0));
  2532. if (err)
  2533. goto bail;
  2534. } else {
  2535. @@ -303,35 +280,13 @@ static int alloc_mem(struct fastrpc_buf *buf, int cid)
  2536. }
  2537. bail:
  2538. if (err && !IS_ERR_OR_NULL(buf->handle))
  2539. - free_mem(buf, cid);
  2540. + free_mem(buf);
  2541. return err;
  2542. }
  2543.  
  2544. -static void context_list_ctor(struct smq_context_list *me)
  2545. -{
  2546. - INIT_HLIST_HEAD(&me->interrupted);
  2547. - INIT_HLIST_HEAD(&me->pending);
  2548. - spin_lock_init(&me->hlock);
  2549. -}
  2550. -
  2551. -static void context_free(struct smq_invoke_ctx *ctx, bool lock);
  2552. -
  2553. -static void context_list_dtor(struct fastrpc_apps *me, struct smq_context_list *clst) {
  2554. - struct smq_invoke_ctx *ictx = 0;
  2555. - struct hlist_node *pos, *n;
  2556. - spin_lock(&clst->hlock);
  2557. - hlist_for_each_entry_safe(ictx, pos, n, &clst->interrupted, hn) {
  2558. - context_free(ictx, 0);
  2559. - }
  2560. - hlist_for_each_entry_safe(ictx, pos, n, &clst->pending, hn) {
  2561. - context_free(ictx, 0);
  2562. - }
  2563. - spin_unlock(&clst->hlock);
  2564. -}
  2565. -
  2566. static int context_restore_interrupted(struct fastrpc_apps *me,
  2567. struct fastrpc_ioctl_invoke_fd *invokefd,
  2568. - int cid, struct smq_invoke_ctx **po)
  2569. + struct smq_invoke_ctx **po)
  2570. {
  2571. int err = 0;
  2572. struct smq_invoke_ctx *ctx = 0, *ictx = 0;
  2573. @@ -339,8 +294,8 @@ static int context_restore_interrupted(struct fastrpc_apps *me,
  2574. struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
  2575. spin_lock(&me->clst.hlock);
  2576. hlist_for_each_entry_safe(ictx, pos, n, &me->clst.interrupted, hn) {
  2577. - if(ictx->pid == current->pid) {
  2578. - if(invoke->sc != ictx->sc || ictx->cid != cid)
  2579. + if (ictx->pid == current->pid) {
  2580. + if (invoke->sc != ictx->sc)
  2581. err = -1;
  2582. else {
  2583. ctx = ictx;
  2584. @@ -351,15 +306,13 @@ static int context_restore_interrupted(struct fastrpc_apps *me,
  2585. }
  2586. }
  2587. spin_unlock(&me->clst.hlock);
  2588. - if(ctx) {
  2589. + if (ctx)
  2590. *po = ctx;
  2591. - }
  2592. return err;
  2593. }
  2594.  
  2595. static int context_alloc(struct fastrpc_apps *me, uint32_t kernel,
  2596. struct fastrpc_ioctl_invoke_fd *invokefd,
  2597. - int cid,
  2598. struct smq_invoke_ctx **po)
  2599. {
  2600. int err = 0, bufs, size = 0;
  2601. @@ -381,10 +334,10 @@ static int context_alloc(struct fastrpc_apps *me, uint32_t kernel,
  2602. goto bail;
  2603.  
  2604. INIT_HLIST_NODE(&ctx->hn);
  2605. - ctx->pra = (remote_arg_t*)(&ctx[1]);
  2606. - ctx->fds = invokefd->fds == 0 ? 0 : (int*)(&ctx->pra[bufs]);
  2607. - ctx->handles = invokefd->fds == 0 ? 0 : (struct ion_handle**)(&ctx->fds[bufs]);
  2608. -
  2609. + ctx->pra = (remote_arg_t *)(&ctx[1]);
  2610. + ctx->fds = invokefd->fds == 0 ? 0 : (int *)(&ctx->pra[bufs]);
  2611. + ctx->handles = invokefd->fds == 0 ? 0 :
  2612. + (struct ion_handle **)(&ctx->fds[bufs]);
  2613. if (!kernel) {
  2614. VERIFY(err, 0 == copy_from_user(ctx->pra, invoke->pra,
  2615. bufs * sizeof(*ctx->pra)));
  2616. @@ -407,7 +360,6 @@ static int context_alloc(struct fastrpc_apps *me, uint32_t kernel,
  2617. }
  2618. ctx->sc = invoke->sc;
  2619. ctx->retval = -1;
  2620. - ctx->cid = cid;
  2621. ctx->pid = current->pid;
  2622. ctx->apps = me;
  2623. init_completion(&ctx->work);
  2624. @@ -417,7 +369,7 @@ static int context_alloc(struct fastrpc_apps *me, uint32_t kernel,
  2625.  
  2626. *po = ctx;
  2627. bail:
  2628. - if(ctx && err)
  2629. + if (ctx && err)
  2630. kfree(ctx);
  2631. return err;
  2632. }
  2633. @@ -436,72 +388,93 @@ static void add_dev(struct fastrpc_apps *me, struct fastrpc_device *dev);
  2634. static void context_free(struct smq_invoke_ctx *ctx, bool lock)
  2635. {
  2636. struct smq_context_list *clst = &ctx->apps->clst;
  2637. - struct fastrpc_apps *apps = ctx->apps;
  2638. + struct fastrpc_apps *apps = ctx->apps;
  2639. + struct ion_client *clnt = apps->iclient;
  2640. + struct fastrpc_smmu *smmu = &apps->smmu;
  2641. struct fastrpc_buf *b;
  2642. int i, bufs;
  2643. if (ctx->smmu) {
  2644. - bufs = REMOTE_SCALARS_INBUFS(ctx->sc) + REMOTE_SCALARS_OUTBUFS(ctx->sc);
  2645. + bufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
  2646. + REMOTE_SCALARS_OUTBUFS(ctx->sc);
  2647. if (ctx->fds) {
  2648. for (i = 0; i < bufs; i++)
  2649. if (!IS_ERR_OR_NULL(ctx->handles[i])) {
  2650. - ion_unmap_iommu(apps->iclient, ctx->handles[i],
  2651. - apps->channel[ctx->cid].smmu.domain_id,
  2652. - 0);
  2653. - ion_free(apps->iclient, ctx->handles[i]);
  2654. + ion_unmap_iommu(clnt, ctx->handles[i],
  2655. + smmu->domain_id, 0);
  2656. + ion_free(clnt, ctx->handles[i]);
  2657. }
  2658. }
  2659. - iommu_detach_group(apps->channel[ctx->cid].smmu.domain,
  2660. - apps->channel[ctx->cid].smmu.group);
  2661. + iommu_detach_group(smmu->domain, smmu->group);
  2662. }
  2663. for (i = 0, b = ctx->abufs; i < ctx->nbufs; ++i, ++b)
  2664. - free_mem(b, ctx->cid);
  2665. -
  2666. + free_mem(b);
  2667. +
  2668. kfree(ctx->abufs);
  2669. if (ctx->dev) {
  2670. add_dev(apps, ctx->dev);
  2671. if (ctx->obuf.handle != ctx->dev->buf.handle)
  2672. - free_mem(&ctx->obuf, ctx->cid);
  2673. + free_mem(&ctx->obuf);
  2674. }
  2675. - if(lock) {
  2676. + if (lock)
  2677. spin_lock(&clst->hlock);
  2678. - }
  2679. hlist_del(&ctx->hn);
  2680. - if(lock) {
  2681. + if (lock)
  2682. spin_unlock(&clst->hlock);
  2683. - }
  2684. kfree(ctx);
  2685. }
  2686.  
  2687. -static void context_notify_user(struct smq_invoke_ctx *me, int retval)
  2688. +static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
  2689. {
  2690. - me->retval = retval;
  2691. - complete(&me->work);
  2692. + ctx->retval = retval;
  2693. + complete(&ctx->work);
  2694. }
  2695.  
  2696. -static void context_notify_all_users(struct smq_context_list *me, int cid)
  2697. +static void context_notify_all_users(struct smq_context_list *me)
  2698. {
  2699. struct smq_invoke_ctx *ictx = 0;
  2700. struct hlist_node *pos, *n;
  2701. spin_lock(&me->hlock);
  2702. hlist_for_each_entry_safe(ictx, pos, n, &me->pending, hn) {
  2703. - if(ictx->cid == cid) {
  2704. - complete(&ictx->work);
  2705. - }
  2706. + complete(&ictx->work);
  2707. }
  2708. hlist_for_each_entry_safe(ictx, pos, n, &me->interrupted, hn) {
  2709. - if(ictx->cid == cid) {
  2710. - complete(&ictx->work);
  2711. - }
  2712. + complete(&ictx->work);
  2713. }
  2714. spin_unlock(&me->hlock);
  2715.  
  2716. }
  2717.  
  2718. -static int get_page_list(uint32_t kernel, uint32_t sc, remote_arg_t *pra,
  2719. - struct fastrpc_buf *ibuf, struct fastrpc_buf *obuf, int cid)
  2720. +static void context_list_ctor(struct smq_context_list *me)
  2721. {
  2722. + INIT_HLIST_HEAD(&me->interrupted);
  2723. + INIT_HLIST_HEAD(&me->pending);
  2724. + spin_lock_init(&me->hlock);
  2725. +}
  2726. +
  2727. +static void context_list_dtor(struct fastrpc_apps *me,
  2728. + struct smq_context_list *clst)
  2729. +{
  2730. + struct smq_invoke_ctx *ictx = 0;
  2731. + struct hlist_node *pos, *n;
  2732. + spin_lock(&clst->hlock);
  2733. + hlist_for_each_entry_safe(ictx, pos, n, &clst->interrupted, hn) {
  2734. + context_free(ictx, 0);
  2735. + }
  2736. + hlist_for_each_entry_safe(ictx, pos, n, &clst->pending, hn) {
  2737. + context_free(ictx, 0);
  2738. + }
  2739. + spin_unlock(&clst->hlock);
  2740. +}
  2741. +
  2742. +static int get_page_list(uint32_t kernel, struct smq_invoke_ctx *ctx)
  2743. +{
  2744. + struct fastrpc_apps *me = &gfa;
  2745. struct smq_phy_page *pgstart, *pages;
  2746. struct smq_invoke_buf *list;
  2747. + struct fastrpc_buf *ibuf = &ctx->dev->buf;
  2748. + struct fastrpc_buf *obuf = &ctx->obuf;
  2749. + remote_arg_t *pra = ctx->pra;
  2750. + uint32_t sc = ctx->sc;
  2751. int i, rlen, err = 0;
  2752. int inbufs = REMOTE_SCALARS_INBUFS(sc);
  2753. int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
  2754. @@ -516,7 +489,7 @@ static int get_page_list(uint32_t kernel, uint32_t sc, remote_arg_t *pra,
  2755. if (rlen < 0) {
  2756. rlen = ((uint32_t)pages - (uint32_t)obuf->virt) - obuf->size;
  2757. obuf->size += buf_page_size(rlen);
  2758. - VERIFY(err, 0 == alloc_mem(obuf, cid));
  2759. + VERIFY(err, 0 == alloc_mem(obuf));
  2760. if (err)
  2761. goto bail;
  2762. goto retry;
  2763. @@ -537,11 +510,21 @@ static int get_page_list(uint32_t kernel, uint32_t sc, remote_arg_t *pra,
  2764. continue;
  2765. buf = pra[i].buf.pv;
  2766. num = buf_num_pages(buf, len);
  2767. - if (!kernel)
  2768. - list[i].num = buf_get_pages(buf, len, num,
  2769. - i >= inbufs, pages, rlen / sizeof(*pages));
  2770. - else
  2771. - list[i].num = 0;
  2772. + if (!kernel) {
  2773. + if (me->smmu.enabled) {
  2774. + VERIFY(err, 0 != access_ok(i >= inbufs ?
  2775. + VERIFY_WRITE : VERIFY_READ,
  2776. + (void __user *)buf, len));
  2777. + if (err)
  2778. + goto bail;
  2779. + if (ctx->fds && (ctx->fds[i] >= 0))
  2780. + list[i].num = 1;
  2781. + } else {
  2782. + list[i].num = buf_get_pages(buf, len, num,
  2783. + i >= inbufs, pages,
  2784. + rlen / sizeof(*pages));
  2785. + }
  2786. + }
  2787. VERIFY(err, list[i].num >= 0);
  2788. if (err)
  2789. goto bail;
  2790. @@ -553,9 +536,9 @@ static int get_page_list(uint32_t kernel, uint32_t sc, remote_arg_t *pra,
  2791. pages = pages + 1;
  2792. } else {
  2793. if (obuf->handle != ibuf->handle)
  2794. - free_mem(obuf, cid);
  2795. + free_mem(obuf);
  2796. obuf->size += buf_page_size(sizeof(*pages));
  2797. - VERIFY(err, 0 == alloc_mem(obuf, cid));
  2798. + VERIFY(err, 0 == alloc_mem(obuf));
  2799. if (err)
  2800. goto bail;
  2801. goto retry;
  2802. @@ -565,24 +548,28 @@ static int get_page_list(uint32_t kernel, uint32_t sc, remote_arg_t *pra,
  2803. obuf->used = obuf->size - rlen;
  2804. bail:
  2805. if (err && (obuf->handle != ibuf->handle))
  2806. - free_mem(obuf, cid);
  2807. + free_mem(obuf);
  2808. UNLOCK_MMAP(kernel);
  2809. return err;
  2810. }
  2811.  
  2812. -static int get_args(uint32_t kernel, uint32_t sc, remote_arg_t *pra,
  2813. - remote_arg_t *rpra, remote_arg_t *upra,
  2814. - struct fastrpc_buf *ibuf, struct fastrpc_buf **abufs,
  2815. - int *nbufs, int *fds, struct ion_handle **handles, int cid)
  2816. +static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
  2817. + remote_arg_t *upra)
  2818. {
  2819. struct fastrpc_apps *me = &gfa;
  2820. struct smq_invoke_buf *list;
  2821. - struct fastrpc_buf *pbuf = ibuf, *obufs = 0;
  2822. + struct fastrpc_buf *pbuf = &ctx->obuf, *obufs = 0;
  2823. struct smq_phy_page *pages;
  2824. + struct vm_area_struct *vma;
  2825. + struct ion_handle **handles = ctx->handles;
  2826. void *args;
  2827. + remote_arg_t *pra = ctx->pra;
  2828. + remote_arg_t *rpra = ctx->rpra;
  2829. + uint32_t sc = ctx->sc, start;
  2830. int i, rlen, size, used, inh, bufs = 0, err = 0;
  2831. int inbufs = REMOTE_SCALARS_INBUFS(sc);
  2832. int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
  2833. + int *fds = ctx->fds, idx, num;
  2834. unsigned long len;
  2835. ion_phys_addr_t iova;
  2836.  
  2837. @@ -596,21 +583,29 @@ static int get_args(uint32_t kernel, uint32_t sc, remote_arg_t *pra,
  2838. rpra[i].buf.len = pra[i].buf.len;
  2839. if (!rpra[i].buf.len)
  2840. continue;
  2841. - if (me->channel[cid].smmu.enabled && fds && (fds[i] >= 0)) {
  2842. + if (me->smmu.enabled && fds && (fds[i] >= 0)) {
  2843. + start = buf_page_start(pra[i].buf.pv);
  2844. len = buf_page_size(pra[i].buf.len);
  2845. + num = buf_num_pages(pra[i].buf.pv, pra[i].buf.len);
  2846. + idx = list[i].pgidx;
  2847. handles[i] = ion_import_dma_buf(me->iclient, fds[i]);
  2848. VERIFY(err, 0 == IS_ERR_OR_NULL(handles[i]));
  2849. if (err)
  2850. goto bail;
  2851. VERIFY(err, 0 == ion_map_iommu(me->iclient, handles[i],
  2852. - me->channel[cid].smmu.domain_id,
  2853. - 0, SZ_4K, 0, &iova, &len, 0, 0));
  2854. + me->smmu.domain_id, 0, SZ_4K, 0,
  2855. + &iova, &len, 0, 0));
  2856. + if (err)
  2857. + goto bail;
  2858. + VERIFY(err, (num << PAGE_SHIFT) <= len);
  2859. + if (err)
  2860. + goto bail;
  2861. + VERIFY(err, 0 != (vma = find_vma(current->mm, start)));
  2862. if (err)
  2863. goto bail;
  2864. rpra[i].buf.pv = pra[i].buf.pv;
  2865. - list[i].num = 1;
  2866. - pages[list[i].pgidx].addr = iova;
  2867. - pages[list[i].pgidx].size = len;
  2868. + pages[idx].addr = iova + (start - vma->vm_start);
  2869. + pages[idx].size = num << PAGE_SHIFT;
  2870. continue;
  2871. } else if (list[i].num) {
  2872. rpra[i].buf.pv = pra[i].buf.pv;
  2873. @@ -627,7 +622,7 @@ static int get_args(uint32_t kernel, uint32_t sc, remote_arg_t *pra,
  2874. pbuf = obufs + bufs;
  2875. pbuf->size = buf_num_pages(0, pra[i].buf.len) *
  2876. PAGE_SIZE;
  2877. - VERIFY(err, 0 == alloc_mem(pbuf, cid));
  2878. + VERIFY(err, 0 == alloc_mem(pbuf));
  2879. if (err)
  2880. goto bail;
  2881. bufs++;
  2882. @@ -636,7 +631,7 @@ static int get_args(uint32_t kernel, uint32_t sc, remote_arg_t *pra,
  2883. }
  2884. list[i].num = 1;
  2885. pages[list[i].pgidx].addr =
  2886. - buf_page_start((void *)((uint32_t)pbuf->phys +
  2887. + buf_page_start((void *)(pbuf->phys +
  2888. (pbuf->size - rlen)));
  2889. pages[list[i].pgidx].size =
  2890. buf_page_size(pra[i].buf.len);
  2891. @@ -674,8 +669,8 @@ static int get_args(uint32_t kernel, uint32_t sc, remote_arg_t *pra,
  2892. }
  2893. dmac_flush_range(rpra, (char *)rpra + used);
  2894. bail:
  2895. - *abufs = obufs;
  2896. - *nbufs = bufs;
  2897. + ctx->abufs = obufs;
  2898. + ctx->nbufs = bufs;
  2899. return err;
  2900. }
  2901.  
  2902. @@ -774,7 +769,7 @@ static int fastrpc_invoke_send(struct fastrpc_apps *me,
  2903. msg.invoke.page.addr = buf->phys;
  2904. msg.invoke.page.size = buf_page_size(buf->used);
  2905. spin_lock(&me->wrlock);
  2906. - len = smd_write(me->channel[ctx->cid].chan, &msg, sizeof(msg));
  2907. + len = smd_write(me->chan, &msg, sizeof(msg));
  2908. spin_unlock(&me->wrlock);
  2909. VERIFY(err, len == sizeof(msg));
  2910. return err;
  2911. @@ -783,28 +778,22 @@ static int fastrpc_invoke_send(struct fastrpc_apps *me,
  2912. static void fastrpc_deinit(void)
  2913. {
  2914. struct fastrpc_apps *me = &gfa;
  2915. - int i;
  2916.  
  2917. - for (i = 0; i < NUM_CHANNELS; i++) {
  2918. - if (me->channel[i].chan) {
  2919. - (void)smd_close(me->channel[i].chan);
  2920. - me->channel[i].chan = 0;
  2921. - }
  2922. - }
  2923. + smd_close(me->chan);
  2924. ion_client_destroy(me->iclient);
  2925. me->iclient = 0;
  2926. + me->chan = 0;
  2927. }
  2928.  
  2929. -static void fastrpc_read_handler(int cid)
  2930. +static void fastrpc_read_handler(void)
  2931. {
  2932. struct fastrpc_apps *me = &gfa;
  2933. struct smq_invoke_rsp rsp;
  2934. int err = 0;
  2935.  
  2936. do {
  2937. - VERIFY(err, sizeof(rsp) == smd_read_from_cb(
  2938. - me->channel[cid].chan,
  2939. - &rsp, sizeof(rsp)));
  2940. + VERIFY(err, sizeof(rsp) ==
  2941. + smd_read_from_cb(me->chan, &rsp, sizeof(rsp)));
  2942. if (err)
  2943. goto bail;
  2944. context_notify_user(rsp.ctx, rsp.retval);
  2945. @@ -815,76 +804,76 @@ static void fastrpc_read_handler(int cid)
  2946.  
  2947. static void smd_event_handler(void *priv, unsigned event)
  2948. {
  2949. - struct fastrpc_apps *me = &gfa;
  2950. - int cid = (int)priv;
  2951. + struct fastrpc_apps *me = (struct fastrpc_apps *)priv;
  2952.  
  2953. switch (event) {
  2954. case SMD_EVENT_OPEN:
  2955. - complete(&me->channel[cid].work);
  2956. + complete(&(me->work));
  2957. break;
  2958. case SMD_EVENT_CLOSE:
  2959. - context_notify_all_users(&me->clst, cid);
  2960. + context_notify_all_users(&me->clst);
  2961. break;
  2962. case SMD_EVENT_DATA:
  2963. - fastrpc_read_handler(cid);
  2964. + fastrpc_read_handler();
  2965. break;
  2966. }
  2967. }
  2968.  
  2969. static int fastrpc_init(void)
  2970. {
  2971. - int i, err = 0;
  2972. + int err = 0;
  2973. struct fastrpc_apps *me = &gfa;
  2974. struct device_node *node;
  2975. - struct fastrpc_smmu *smmu;
  2976. bool enabled = 0;
  2977.  
  2978. - spin_lock_init(&me->hlock);
  2979. - spin_lock_init(&me->wrlock);
  2980. - mutex_init(&me->smd_mutex);
  2981. - context_list_ctor(&me->clst);
  2982. - for (i = 0; i < RPC_HASH_SZ; ++i)
  2983. - INIT_HLIST_HEAD(&me->htbl[i]);
  2984. - me->iclient = msm_ion_client_create(ION_HEAP_CARVEOUT_MASK,
  2985. - DEVICE_NAME);
  2986. - VERIFY(err, 0 == IS_ERR_OR_NULL(me->iclient));
  2987. - if (err)
  2988. - goto bail;
  2989. - for (i = 0; i < NUM_CHANNELS; i++) {
  2990. - init_completion(&me->channel[i].work);
  2991. - if (!gcinfo[i].node)
  2992. - continue;
  2993. - smmu = &me->channel[i].smmu;
  2994. - node = of_find_compatible_node(NULL, NULL, gcinfo[i].node);
  2995. + if (me->chan == 0) {
  2996. + int i;
  2997. + spin_lock_init(&me->hlock);
  2998. + spin_lock_init(&me->wrlock);
  2999. + init_completion(&me->work);
  3000. + mutex_init(&me->smd_mutex);
  3001. + context_list_ctor(&me->clst);
  3002. + for (i = 0; i < RPC_HASH_SZ; ++i)
  3003. + INIT_HLIST_HEAD(&me->htbl[i]);
  3004. + me->iclient = msm_ion_client_create(ION_HEAP_CARVEOUT_MASK,
  3005. + DEVICE_NAME);
  3006. + VERIFY(err, 0 == IS_ERR_OR_NULL(me->iclient));
  3007. + if (err)
  3008. + goto bail;
  3009. + node = of_find_compatible_node(NULL, NULL,
  3010. + "qcom,msm-audio-ion");
  3011. if (node)
  3012. enabled = of_property_read_bool(node,
  3013. "qcom,smmu-enabled");
  3014. if (enabled)
  3015. - smmu->group = iommu_group_find(gcinfo[i].group);
  3016. - if (smmu->group)
  3017. - smmu->domain = iommu_group_get_iommudata(smmu->group);
  3018. - if (!IS_ERR_OR_NULL(smmu->domain)) {
  3019. - smmu->domain_id = msm_find_domain_no(smmu->domain);
  3020. - if (smmu->domain_id >= 0)
  3021. - smmu->enabled = enabled;
  3022. + me->smmu.group = iommu_group_find("lpass_audio");
  3023. + if (me->smmu.group)
  3024. + me->smmu.domain = iommu_group_get_iommudata(
  3025. + me->smmu.group);
  3026. + if (!IS_ERR_OR_NULL(me->smmu.domain)) {
  3027. + me->smmu.domain_id = msm_find_domain_no(
  3028. + me->smmu.domain);
  3029. + if (me->smmu.domain_id >= 0)
  3030. + me->smmu.enabled = enabled;
  3031. }
  3032. }
  3033. +
  3034. return 0;
  3035.  
  3036. bail:
  3037. return err;
  3038. }
  3039.  
  3040. -static void free_dev(struct fastrpc_device *dev, int cid)
  3041. +static void free_dev(struct fastrpc_device *dev)
  3042. {
  3043. if (dev) {
  3044. - free_mem(&dev->buf, cid);
  3045. + free_mem(&dev->buf);
  3046. kfree(dev);
  3047. module_put(THIS_MODULE);
  3048. }
  3049. }
  3050.  
  3051. -static int alloc_dev(struct fastrpc_device **dev, int cid)
  3052. +static int alloc_dev(struct fastrpc_device **dev)
  3053. {
  3054. int err = 0;
  3055. struct fastrpc_device *fd = 0;
  3056. @@ -899,7 +888,7 @@ static int alloc_dev(struct fastrpc_device **dev, int cid)
  3057. INIT_HLIST_NODE(&fd->hn);
  3058.  
  3059. fd->buf.size = PAGE_SIZE;
  3060. - VERIFY(err, 0 == alloc_mem(&fd->buf, cid));
  3061. + VERIFY(err, 0 == alloc_mem(&fd->buf));
  3062. if (err)
  3063. goto bail;
  3064. fd->tgid = current->tgid;
  3065. @@ -907,12 +896,11 @@ static int alloc_dev(struct fastrpc_device **dev, int cid)
  3066. *dev = fd;
  3067. bail:
  3068. if (err)
  3069. - free_dev(fd, cid);
  3070. + free_dev(fd);
  3071. return err;
  3072. }
  3073.  
  3074. -static int get_dev(struct fastrpc_apps *me, int cid,
  3075. - struct fastrpc_device **rdev)
  3076. +static int get_dev(struct fastrpc_apps *me, struct fastrpc_device **rdev)
  3077. {
  3078. struct hlist_head *head;
  3079. struct fastrpc_device *dev = 0, *devfree = 0;
  3080. @@ -936,8 +924,8 @@ static int get_dev(struct fastrpc_apps *me, int cid,
  3081. *rdev = devfree;
  3082. bail:
  3083. if (err) {
  3084. - free_dev(devfree, cid);
  3085. - err = alloc_dev(rdev, cid);
  3086. + free_dev(devfree);
  3087. + err = alloc_dev(rdev);
  3088. }
  3089. return err;
  3090. }
  3091. @@ -954,48 +942,46 @@ static void add_dev(struct fastrpc_apps *me, struct fastrpc_device *dev)
  3092. return;
  3093. }
  3094.  
  3095. -static int fastrpc_release_current_dsp_process(int cid);
  3096. +static int fastrpc_release_current_dsp_process(void);
  3097.  
  3098. static int fastrpc_internal_invoke(struct fastrpc_apps *me, uint32_t mode,
  3099. - uint32_t kernel, struct fastrpc_ioctl_invoke_fd *invokefd,
  3100. - int cid)
  3101. + uint32_t kernel,
  3102. + struct fastrpc_ioctl_invoke_fd *invokefd)
  3103. {
  3104. struct smq_invoke_ctx *ctx = 0;
  3105. struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
  3106. int interrupted = 0;
  3107. int err = 0;
  3108.  
  3109. - if(!kernel) {
  3110. - VERIFY(err, 0 == context_restore_interrupted(me, invokefd, cid, &ctx));
  3111. + if (!kernel) {
  3112. + VERIFY(err, 0 == context_restore_interrupted(me, invokefd,
  3113. + &ctx));
  3114. if (err)
  3115. goto bail;
  3116. - if(ctx)
  3117. + if (ctx)
  3118. goto wait;
  3119. }
  3120.  
  3121. - VERIFY(err, 0 == context_alloc(me, kernel, invokefd, cid, &ctx));
  3122. + VERIFY(err, 0 == context_alloc(me, kernel, invokefd, &ctx));
  3123. if (err)
  3124. goto bail;
  3125.  
  3126. - if (me->channel[cid].smmu.enabled) {
  3127. - VERIFY(err, 0 == iommu_attach_group(
  3128. - me->channel[cid].smmu.domain,
  3129. - me->channel[cid].smmu.group));
  3130. + if (me->smmu.enabled) {
  3131. + VERIFY(err, 0 == iommu_attach_group(me->smmu.domain,
  3132. + me->smmu.group));
  3133. if (err)
  3134. goto bail;
  3135. ctx->smmu = 1;
  3136. }
  3137. if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
  3138. - VERIFY(err, 0 == get_dev(me, cid, &ctx->dev));
  3139. + VERIFY(err, 0 == get_dev(me, &ctx->dev));
  3140. if (err)
  3141. goto bail;
  3142. - VERIFY(err, 0 == get_page_list(kernel, ctx->sc, ctx->pra, &ctx->dev->buf,
  3143. - &ctx->obuf, cid));
  3144. + VERIFY(err, 0 == get_page_list(kernel, ctx));
  3145. if (err)
  3146. goto bail;
  3147. ctx->rpra = (remote_arg_t *)ctx->obuf.virt;
  3148. - VERIFY(err, 0 == get_args(kernel, ctx->sc, ctx->pra, ctx->rpra, invoke->pra,
  3149. - &ctx->obuf, &ctx->abufs, &ctx->nbufs, ctx->fds, ctx->handles, cid));
  3150. + VERIFY(err, 0 == get_args(kernel, ctx, invoke->pra));
  3151. if (err)
  3152. goto bail;
  3153. }
  3154. @@ -1003,15 +989,15 @@ static int fastrpc_internal_invoke(struct fastrpc_apps *me, uint32_t mode,
  3155. inv_args_pre(ctx->sc, ctx->rpra);
  3156. if (FASTRPC_MODE_SERIAL == mode)
  3157. inv_args(ctx->sc, ctx->rpra, ctx->obuf.used);
  3158. - VERIFY(err, 0 == fastrpc_invoke_send(me, kernel, invoke->handle, ctx->sc,
  3159. - ctx, &ctx->obuf));
  3160. + VERIFY(err, 0 == fastrpc_invoke_send(me, kernel, invoke->handle,
  3161. + ctx->sc, ctx, &ctx->obuf));
  3162. if (err)
  3163. goto bail;
  3164. if (FASTRPC_MODE_PARALLEL == mode)
  3165. inv_args(ctx->sc, ctx->rpra, ctx->obuf.used);
  3166. -wait:
  3167. - if(kernel)
  3168. - wait_for_completion(&ctx->work);
  3169. + wait:
  3170. + if (kernel)
  3171. + wait_for_completion(&ctx->work);
  3172. else {
  3173. interrupted = wait_for_completion_interruptible(&ctx->work);
  3174. VERIFY(err, 0 == (err = interrupted));
  3175. @@ -1021,20 +1007,19 @@ wait:
  3176. VERIFY(err, 0 == (err = ctx->retval));
  3177. if (err)
  3178. goto bail;
  3179. - VERIFY(err, 0 == put_args(kernel, ctx->sc, ctx->pra, ctx->rpra, invoke->pra));
  3180. + VERIFY(err, 0 == put_args(kernel, ctx->sc, ctx->pra, ctx->rpra,
  3181. + invoke->pra));
  3182. if (err)
  3183. goto bail;
  3184. bail:
  3185. - if (ctx && interrupted == -ERESTARTSYS) {
  3186. + if (ctx && interrupted == -ERESTARTSYS)
  3187. context_save_interrupted(ctx);
  3188. - err = -ERESTARTSYS;
  3189. - } else if(ctx) {
  3190. + else if (ctx)
  3191. context_free(ctx, 1);
  3192. - }
  3193. return err;
  3194. }
  3195.  
  3196. -static int fastrpc_create_current_dsp_process(int cid)
  3197. +static int fastrpc_create_current_dsp_process(void)
  3198. {
  3199. int err = 0;
  3200. struct fastrpc_ioctl_invoke_fd ioctl;
  3201. @@ -1050,11 +1035,11 @@ static int fastrpc_create_current_dsp_process(int cid)
  3202. ioctl.inv.pra = ra;
  3203. ioctl.fds = 0;
  3204. VERIFY(err, 0 == (err = fastrpc_internal_invoke(me,
  3205. - FASTRPC_MODE_PARALLEL, 1, &ioctl, cid)));
  3206. + FASTRPC_MODE_PARALLEL, 1, &ioctl)));
  3207. return err;
  3208. }
  3209.  
  3210. -static int fastrpc_release_current_dsp_process(int cid)
  3211. +static int fastrpc_release_current_dsp_process(void)
  3212. {
  3213. int err = 0;
  3214. struct fastrpc_apps *me = &gfa;
  3215. @@ -1070,14 +1055,14 @@ static int fastrpc_release_current_dsp_process(int cid)
  3216. ioctl.inv.pra = ra;
  3217. ioctl.fds = 0;
  3218. VERIFY(err, 0 == (err = fastrpc_internal_invoke(me,
  3219. - FASTRPC_MODE_PARALLEL, 1, &ioctl, cid)));
  3220. + FASTRPC_MODE_PARALLEL, 1, &ioctl)));
  3221. return err;
  3222. }
  3223.  
  3224. static int fastrpc_mmap_on_dsp(struct fastrpc_apps *me,
  3225. struct fastrpc_ioctl_mmap *mmap,
  3226. struct smq_phy_page *pages,
  3227. - int cid, int num)
  3228. + int num)
  3229. {
  3230. struct fastrpc_ioctl_invoke_fd ioctl;
  3231. remote_arg_t ra[3];
  3232. @@ -1110,7 +1095,7 @@ static int fastrpc_mmap_on_dsp(struct fastrpc_apps *me,
  3233. ioctl.inv.pra = ra;
  3234. ioctl.fds = 0;
  3235. VERIFY(err, 0 == (err = fastrpc_internal_invoke(me,
  3236. - FASTRPC_MODE_PARALLEL, 1, &ioctl, cid)));
  3237. + FASTRPC_MODE_PARALLEL, 1, &ioctl)));
  3238. mmap->vaddrout = routargs.vaddrout;
  3239. if (err)
  3240. goto bail;
  3241. @@ -1119,7 +1104,7 @@ bail:
  3242. }
  3243.  
  3244. static int fastrpc_munmap_on_dsp(struct fastrpc_apps *me,
  3245. - struct fastrpc_ioctl_munmap *munmap, int cid)
  3246. + struct fastrpc_ioctl_munmap *munmap)
  3247. {
  3248. struct fastrpc_ioctl_invoke_fd ioctl;
  3249. remote_arg_t ra[1];
  3250. @@ -1141,7 +1126,7 @@ static int fastrpc_munmap_on_dsp(struct fastrpc_apps *me,
  3251. ioctl.inv.pra = ra;
  3252. ioctl.fds = 0;
  3253. VERIFY(err, 0 == (err = fastrpc_internal_invoke(me,
  3254. - FASTRPC_MODE_PARALLEL, 1, &ioctl, cid)));
  3255. + FASTRPC_MODE_PARALLEL, 1, &ioctl)));
  3256. return err;
  3257. }
  3258.  
  3259. @@ -1152,13 +1137,13 @@ static int fastrpc_internal_munmap(struct fastrpc_apps *me,
  3260. int err = 0;
  3261. struct fastrpc_mmap *map = 0, *mapfree = 0;
  3262. struct hlist_node *pos, *n;
  3263. - VERIFY(err, 0 == (err = fastrpc_munmap_on_dsp(me, munmap, fdata->cid)));
  3264. + VERIFY(err, 0 == (err = fastrpc_munmap_on_dsp(me, munmap)));
  3265. if (err)
  3266. goto bail;
  3267. spin_lock(&fdata->hlock);
  3268. hlist_for_each_entry_safe(map, pos, n, &fdata->hlst, hn) {
  3269. if (map->vaddrout == munmap->vaddrout &&
  3270. - map->size == munmap->size) {
  3271. + map->size == munmap->size) {
  3272. hlist_del(&map->hn);
  3273. mapfree = map;
  3274. map = 0;
  3275. @@ -1168,7 +1153,7 @@ static int fastrpc_internal_munmap(struct fastrpc_apps *me,
  3276. spin_unlock(&fdata->hlock);
  3277. bail:
  3278. if (mapfree) {
  3279. - free_map(mapfree, fdata->cid);
  3280. + free_map(mapfree);
  3281. kfree(mapfree);
  3282. }
  3283. return err;
  3284. @@ -1182,7 +1167,6 @@ static int fastrpc_internal_mmap(struct fastrpc_apps *me,
  3285. struct ion_client *clnt = gfa.iclient;
  3286. struct fastrpc_mmap *map = 0;
  3287. struct smq_phy_page *pages = 0;
  3288. - struct ion_handle *handles;
  3289. void *buf;
  3290. unsigned long len;
  3291. int num;
  3292. @@ -1206,13 +1190,9 @@ static int fastrpc_internal_mmap(struct fastrpc_apps *me,
  3293. if (err)
  3294. goto bail;
  3295.  
  3296. - if (me->channel[fdata->cid].smmu.enabled) {
  3297. - handles = ion_import_dma_buf(clnt, mmap->fd);
  3298. - VERIFY(err, 0 == IS_ERR_OR_NULL(handles));
  3299. - if (err)
  3300. - goto bail;
  3301. - VERIFY(err, 0 == ion_map_iommu(clnt, handles,
  3302. - me->channel[fdata->cid].smmu.domain_id, 0,
  3303. + if (me->smmu.enabled) {
  3304. + VERIFY(err, 0 == ion_map_iommu(clnt, map->handle,
  3305. + me->smmu.domain_id, 0,
  3306. SZ_4K, 0, &map->phys, &len, 0, 0));
  3307. if (err)
  3308. goto bail;
  3309. @@ -1226,7 +1206,7 @@ static int fastrpc_internal_mmap(struct fastrpc_apps *me,
  3310. goto bail;
  3311. }
  3312.  
  3313. - VERIFY(err, 0 == fastrpc_mmap_on_dsp(me, mmap, pages, fdata->cid, num));
  3314. + VERIFY(err, 0 == fastrpc_mmap_on_dsp(me, mmap, pages, num));
  3315. if (err)
  3316. goto bail;
  3317. map->vaddrin = mmap->vaddrin;
  3318. @@ -1238,14 +1218,14 @@ static int fastrpc_internal_mmap(struct fastrpc_apps *me,
  3319. spin_unlock(&fdata->hlock);
  3320. bail:
  3321. if (err && map) {
  3322. - free_map(map, fdata->cid);
  3323. + free_map(map);
  3324. kfree(map);
  3325. }
  3326. kfree(pages);
  3327. return err;
  3328. }
  3329.  
  3330. -static void cleanup_current_dev(int cid)
  3331. +static void cleanup_current_dev(void)
  3332. {
  3333. struct fastrpc_apps *me = &gfa;
  3334. uint32_t h = hash_32(current->tgid, RPC_HASH_BITS);
  3335. @@ -1266,7 +1246,7 @@ static void cleanup_current_dev(int cid)
  3336. }
  3337. spin_unlock(&me->hlock);
  3338. if (devfree) {
  3339. - free_dev(devfree, cid);
  3340. + free_dev(devfree);
  3341. goto rnext;
  3342. }
  3343. return;
  3344. @@ -1275,37 +1255,32 @@ static void cleanup_current_dev(int cid)
  3345. static void fastrpc_channel_close(struct kref *kref)
  3346. {
  3347. struct fastrpc_apps *me = &gfa;
  3348. - struct fastrpc_channel_context *ctx;
  3349. - int cid;
  3350.  
  3351. - ctx = container_of(kref, struct fastrpc_channel_context, kref);
  3352. - smd_close(ctx->chan);
  3353. - ctx->chan = 0;
  3354. + smd_close(me->chan);
  3355. + me->chan = 0;
  3356. mutex_unlock(&me->smd_mutex);
  3357. - cid = ctx - &me->channel[0];
  3358. - pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
  3359. - MAJOR(me->dev_no), cid);
  3360. + pr_info("'closed /dev/%s c %d 0'\n", DEVICE_NAME,
  3361. + MAJOR(me->dev_no));
  3362. }
  3363.  
  3364. static int fastrpc_device_release(struct inode *inode, struct file *file)
  3365. {
  3366. struct file_data *fdata = (struct file_data *)file->private_data;
  3367. struct fastrpc_apps *me = &gfa;
  3368. - int cid = MINOR(inode->i_rdev);
  3369.  
  3370. - (void)fastrpc_release_current_dsp_process(cid);
  3371. - cleanup_current_dev(cid);
  3372. + (void)fastrpc_release_current_dsp_process();
  3373. + cleanup_current_dev();
  3374. if (fdata) {
  3375. struct fastrpc_mmap *map = 0;
  3376. - struct hlist_node *pos, *n;
  3377. + struct hlist_node *n, *pos;
  3378. file->private_data = 0;
  3379. hlist_for_each_entry_safe(map, pos, n, &fdata->hlst, hn) {
  3380. hlist_del(&map->hn);
  3381. - free_map(map, cid);
  3382. + free_map(map);
  3383. kfree(map);
  3384. }
  3385. kfree(fdata);
  3386. - kref_put_mutex(&me->channel[cid].kref, fastrpc_channel_close,
  3387. + kref_put_mutex(&me->kref, fastrpc_channel_close,
  3388. &me->smd_mutex);
  3389. }
  3390. return 0;
  3391. @@ -1313,27 +1288,23 @@ static int fastrpc_device_release(struct inode *inode, struct file *file)
  3392.  
  3393. static int fastrpc_device_open(struct inode *inode, struct file *filp)
  3394. {
  3395. - int cid = MINOR(inode->i_rdev);
  3396. int err = 0;
  3397. struct fastrpc_apps *me = &gfa;
  3398.  
  3399. mutex_lock(&me->smd_mutex);
  3400. - if (kref_get_unless_zero(&me->channel[cid].kref) == 0) {
  3401. - VERIFY(err, 0 == smd_named_open_on_edge(
  3402. - FASTRPC_SMD_GUID,
  3403. - gcinfo[cid].channel,
  3404. - &me->channel[cid].chan, (void*)cid,
  3405. - smd_event_handler));
  3406. + if (kref_get_unless_zero(&me->kref) == 0) {
  3407. + VERIFY(err, 0 == smd_named_open_on_edge(FASTRPC_SMD_GUID,
  3408. + SMD_APPS_QDSP, &me->chan,
  3409. + me, smd_event_handler));
  3410. if (err)
  3411. goto smd_bail;
  3412. - VERIFY(err, 0 != wait_for_completion_timeout(
  3413. - &me->channel[cid].work,
  3414. + VERIFY(err, 0 != wait_for_completion_timeout(&me->work,
  3415. RPC_TIMEOUT));
  3416. if (err)
  3417. goto completion_bail;
  3418. - kref_init(&me->channel[cid].kref);
  3419. - pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
  3420. - MAJOR(me->dev_no), cid);
  3421. + kref_init(&me->kref);
  3422. + pr_info("'opened /dev/%s c %d 0'\n", DEVICE_NAME,
  3423. + MAJOR(me->dev_no));
  3424. }
  3425. mutex_unlock(&me->smd_mutex);
  3426.  
  3427. @@ -1349,26 +1320,25 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
  3428.  
  3429. spin_lock_init(&fdata->hlock);
  3430. INIT_HLIST_HEAD(&fdata->hlst);
  3431. - fdata->cid = cid;
  3432.  
  3433. - VERIFY(err, 0 == fastrpc_create_current_dsp_process(cid));
  3434. + VERIFY(err, 0 == fastrpc_create_current_dsp_process());
  3435. if (err)
  3436. goto bail;
  3437. filp->private_data = fdata;
  3438. bail:
  3439. if (err) {
  3440. - cleanup_current_dev(cid);
  3441. + cleanup_current_dev();
  3442. kfree(fdata);
  3443. - kref_put_mutex(&me->channel[cid].kref,
  3444. - fastrpc_channel_close, &me->smd_mutex);
  3445. + kref_put_mutex(&me->kref, fastrpc_channel_close,
  3446. + &me->smd_mutex);
  3447. }
  3448. module_put(THIS_MODULE);
  3449. }
  3450. return err;
  3451.  
  3452. completion_bail:
  3453. - smd_close(me->channel[cid].chan);
  3454. - me->channel[cid].chan = 0;
  3455. + smd_close(me->chan);
  3456. + me->chan = 0;
  3457. smd_bail:
  3458. mutex_unlock(&me->smd_mutex);
  3459. return err;
  3460. @@ -1380,7 +1350,6 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
  3461. {
  3462. struct fastrpc_apps *me = &gfa;
  3463. struct fastrpc_ioctl_invoke_fd invokefd;
  3464. - struct fastrpc_ioctl_invoke *invoke = &invokefd.inv;
  3465. struct fastrpc_ioctl_mmap mmap;
  3466. struct fastrpc_ioctl_munmap munmap;
  3467. void *param = (char *)ioctl_param;
  3468. @@ -1392,12 +1361,12 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
  3469. case FASTRPC_IOCTL_INVOKE:
  3470. invokefd.fds = 0;
  3471. size = (ioctl_num == FASTRPC_IOCTL_INVOKE) ?
  3472. - sizeof(*invoke) : sizeof(invokefd);
  3473. + sizeof(invokefd.inv) : sizeof(invokefd);
  3474. VERIFY(err, 0 == copy_from_user(&invokefd, param, size));
  3475. if (err)
  3476. goto bail;
  3477. VERIFY(err, 0 == (err = fastrpc_internal_invoke(me, fdata->mode,
  3478. - 0, &invokefd, fdata->cid)));
  3479. + 0, &invokefd)));
  3480. if (err)
  3481. goto bail;
  3482. break;
  3483. @@ -1407,7 +1376,7 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
  3484. if (err)
  3485. goto bail;
  3486. VERIFY(err, 0 == (err = fastrpc_internal_mmap(me, fdata,
  3487. - &mmap)));
  3488. + &mmap)));
  3489. if (err)
  3490. goto bail;
  3491. VERIFY(err, 0 == copy_to_user(param, &mmap, sizeof(mmap)));
  3492. @@ -1452,34 +1421,29 @@ static const struct file_operations fops = {
  3493. static int __init fastrpc_device_init(void)
  3494. {
  3495. struct fastrpc_apps *me = &gfa;
  3496. - int i, err = 0;
  3497. + int err = 0;
  3498.  
  3499. memset(me, 0, sizeof(*me));
  3500. VERIFY(err, 0 == fastrpc_init());
  3501. if (err)
  3502. goto fastrpc_bail;
  3503. - VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
  3504. - DEVICE_NAME));
  3505. + VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, 1, DEVICE_NAME));
  3506. if (err)
  3507. goto alloc_chrdev_bail;
  3508. cdev_init(&me->cdev, &fops);
  3509. me->cdev.owner = THIS_MODULE;
  3510. - VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
  3511. - NUM_CHANNELS));
  3512. + VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0), 1));
  3513. if (err)
  3514. goto cdev_init_bail;
  3515. me->class = class_create(THIS_MODULE, "fastrpc");
  3516. VERIFY(err, !IS_ERR(me->class));
  3517. if (err)
  3518. goto class_create_bail;
  3519. - for (i = 0; i < NUM_CHANNELS; i++) {
  3520. - me->channel[i].dev = device_create(me->class, NULL,
  3521. - MKDEV(MAJOR(me->dev_no), i),
  3522. - NULL, gcinfo[i].name);
  3523. - VERIFY(err, !IS_ERR(me->channel[i].dev));
  3524. - if (err)
  3525. - goto device_create_bail;
  3526. - }
  3527. + me->dev = device_create(me->class, NULL, MKDEV(MAJOR(me->dev_no), 0),
  3528. + NULL, DEVICE_NAME);
  3529. + VERIFY(err, !IS_ERR(me->dev));
  3530. + if (err)
  3531. + goto device_create_bail;
  3532.  
  3533. return 0;
  3534.  
  3535. @@ -1488,7 +1452,7 @@ device_create_bail:
  3536. class_create_bail:
  3537. cdev_del(&me->cdev);
  3538. cdev_init_bail:
  3539. - unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
  3540. + unregister_chrdev_region(me->dev_no, 1);
  3541. alloc_chrdev_bail:
  3542. fastrpc_deinit();
  3543. fastrpc_bail:
  3544. @@ -1498,17 +1462,14 @@ fastrpc_bail:
  3545. static void __exit fastrpc_device_exit(void)
  3546. {
  3547. struct fastrpc_apps *me = &gfa;
  3548. - int i;
  3549.  
  3550. context_list_dtor(me, &me->clst);
  3551. fastrpc_deinit();
  3552. - for (i = 0; i < NUM_CHANNELS; i++) {
  3553. - cleanup_current_dev(i);
  3554. - device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
  3555. - }
  3556. + cleanup_current_dev();
  3557. + device_destroy(me->class, MKDEV(MAJOR(me->dev_no), 0));
  3558. class_destroy(me->class);
  3559. cdev_del(&me->cdev);
  3560. - unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
  3561. + unregister_chrdev_region(me->dev_no, 1);
  3562. }
  3563.  
  3564. module_init(fastrpc_device_init);
  3565. diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
  3566. index 28b066b..37e40c5 100644
  3567. --- a/drivers/char/diag/diag_dci.c
  3568. +++ b/drivers/char/diag/diag_dci.c
  3569. @@ -1,4 +1,4 @@
  3570. -/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  3571. +/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
  3572. *
  3573. * This program is free software; you can redistribute it and/or modify
  3574. * it under the terms of the GNU General Public License version 2 and
  3575. @@ -567,6 +567,8 @@ void extract_dci_pkt_rsp(unsigned char *buf, int len, int data_source,
  3576. struct diag_dci_buffer_t *rsp_buf = NULL;
  3577. struct dci_pkt_req_entry_t *req_entry = NULL;
  3578. unsigned char *temp = buf;
  3579. + int save_req_uid = 0;
  3580. + struct diag_dci_pkt_rsp_header_t pkt_rsp_header;
  3581.  
  3582. if (!buf) {
  3583. pr_err("diag: Invalid pointer in %s\n", __func__);
  3584. @@ -608,6 +610,7 @@ void extract_dci_pkt_rsp(unsigned char *buf, int len, int data_source,
  3585. return;
  3586. }
  3587. curr_client_pid = req_entry->pid;
  3588. + save_req_uid = req_entry->uid;
  3589.  
  3590. /* Remove the headers and send only the response to this function */
  3591. mutex_lock(&driver->dci_mutex);
  3592. @@ -647,15 +650,14 @@ void extract_dci_pkt_rsp(unsigned char *buf, int len, int data_source,
  3593. }
  3594.  
  3595. /* Fill in packet response header information */
  3596. - *(int *)(rsp_buf->data + rsp_buf->data_len) = DCI_PKT_RSP_TYPE;
  3597. - rsp_buf->data_len += sizeof(int);
  3598. + pkt_rsp_header.type = DCI_PKT_RSP_TYPE;
  3599. /* Packet Length = Response Length + Length of uid field (int) */
  3600. - *(int *)(rsp_buf->data + rsp_buf->data_len) = rsp_len + sizeof(int);
  3601. - rsp_buf->data_len += sizeof(int);
  3602. - *(uint8_t *)(rsp_buf->data + rsp_buf->data_len) = delete_flag;
  3603. - rsp_buf->data_len += sizeof(uint8_t);
  3604. - *(int *)(rsp_buf->data + rsp_buf->data_len) = req_entry->uid;
  3605. - rsp_buf->data_len += sizeof(int);
  3606. + pkt_rsp_header.length = rsp_len + sizeof(int);
  3607. + pkt_rsp_header.delete_flag = delete_flag;
  3608. + pkt_rsp_header.uid = save_req_uid;
  3609. + memcpy(rsp_buf->data, &pkt_rsp_header,
  3610. + sizeof(struct diag_dci_pkt_rsp_header_t));
  3611. + rsp_buf->data_len += sizeof(struct diag_dci_pkt_rsp_header_t);
  3612. memcpy(rsp_buf->data + rsp_buf->data_len, temp, rsp_len);
  3613. rsp_buf->data_len += rsp_len;
  3614. rsp_buf->data_source = data_source;
  3615. @@ -1289,9 +1291,11 @@ static int diag_process_dci_pkt_rsp(unsigned char *buf, int len)
  3616. * registered on the Apps Processor
  3617. */
  3618. if (entry.cmd_code_lo == MODE_CMD &&
  3619. - entry.cmd_code_hi == MODE_CMD)
  3620. + entry.cmd_code_hi == MODE_CMD &&
  3621. + header->subsys_id == RESET_ID) {
  3622. if (entry.client_id != APPS_DATA)
  3623. continue;
  3624. + }
  3625. ret = diag_send_dci_pkt(entry, buf, len,
  3626. req_entry->tag);
  3627. found = 1;
  3628. diff --git a/drivers/char/diag/diag_dci.h b/drivers/char/diag/diag_dci.h
  3629. index ccd1a71..5c90b60 100644
  3630. --- a/drivers/char/diag/diag_dci.h
  3631. +++ b/drivers/char/diag/diag_dci.h
  3632. @@ -1,4 +1,4 @@
  3633. -/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  3634. +/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
  3635. *
  3636. * This program is free software; you can redistribute it and/or modify
  3637. * it under the terms of the GNU General Public License version 2 and
  3638. @@ -128,6 +128,13 @@ struct diag_log_event_stats {
  3639. int is_set;
  3640. };
  3641.  
  3642. +struct diag_dci_pkt_rsp_header_t {
  3643. + int type;
  3644. + int length;
  3645. + uint8_t delete_flag;
  3646. + int uid;
  3647. +} __packed;
  3648. +
  3649. struct diag_dci_pkt_header_t {
  3650. uint8_t start;
  3651. uint8_t version;
  3652. diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
  3653. index de7d0af..3747895 100644
  3654. --- a/drivers/char/diag/diagchar_core.c
  3655. +++ b/drivers/char/diag/diagchar_core.c
  3656. @@ -302,10 +302,12 @@ static int diagchar_close(struct inode *inode, struct file *file)
  3657. #ifdef CONFIG_DIAG_OVER_USB
  3658. /* If the SD logging process exits, change logging to USB mode */
  3659. if (driver->logging_process_id == current->tgid) {
  3660. + mutex_lock(&driver->diagchar_mutex);
  3661. driver->logging_mode = USB_MODE;
  3662. + diag_ws_reset();
  3663. + mutex_unlock(&driver->diagchar_mutex);
  3664. diag_update_proc_vote(DIAG_PROC_MEMORY_DEVICE, VOTE_DOWN);
  3665. diagfwd_connect();
  3666. - diag_ws_reset();
  3667. #ifdef CONFIG_DIAGFWD_BRIDGE_CODE
  3668. diag_clear_hsic_tbl();
  3669. diagfwd_cancel_hsic(REOPEN_HSIC);
  3670. @@ -911,6 +913,7 @@ int diag_switch_logging(unsigned long ioarg)
  3671. pr_err("socket process, status: %d\n",
  3672. status);
  3673. }
  3674. + driver->socket_process = NULL;
  3675. }
  3676. } else if (driver->logging_mode == SOCKET_MODE) {
  3677. driver->socket_process = current;
  3678. @@ -1293,10 +1296,16 @@ drop:
  3679. COPY_USER_SPACE_OR_EXIT(buf+ret,
  3680. *(data->buf_in_1),
  3681. data->write_ptr_1->length);
  3682. + diag_ws_on_copy();
  3683. + copy_data = 1;
  3684. data->in_busy_1 = 0;
  3685. }
  3686. }
  3687. }
  3688. + if (!copy_data) {
  3689. + diag_ws_on_copy();
  3690. + copy_data = 1;
  3691. + }
  3692. #ifdef CONFIG_DIAG_SDIO_PIPE
  3693. /* copy 9K data over SDIO */
  3694. if (driver->in_busy_sdio == 1) {
  3695. @@ -1460,6 +1469,7 @@ drop:
  3696. exit:
  3697. mutex_unlock(&driver->diagchar_mutex);
  3698. if (copy_data) {
  3699. + diag_ws_on_copy_complete();
  3700. /*
  3701. * Flush any work that is currently pending on the data
  3702. * channels. This will ensure that the next read is not
  3703. @@ -1468,7 +1478,6 @@ exit:
  3704. for (i = 0; i < NUM_SMD_DATA_CHANNELS; i++)
  3705. flush_workqueue(driver->smd_data[i].wq);
  3706. wake_up(&driver->smd_wait_q);
  3707. - diag_ws_on_copy_complete();
  3708. }
  3709. return ret;
  3710. }
  3711. diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
  3712. index edf0216..9f2d5d3 100644
  3713. --- a/drivers/char/diag/diagfwd.c
  3714. +++ b/drivers/char/diag/diagfwd.c
  3715. @@ -531,8 +531,11 @@ int diag_process_smd_read_data(struct diag_smd_info *smd_info, void *buf,
  3716.  
  3717. return 0;
  3718. err:
  3719. - if (driver->logging_mode == MEMORY_DEVICE_MODE)
  3720. + if ((smd_info->type == SMD_DATA_TYPE ||
  3721. + smd_info->type == SMD_CMD_TYPE) &&
  3722. + driver->logging_mode == MEMORY_DEVICE_MODE)
  3723. diag_ws_on_read(0);
  3724. +
  3725. return 0;
  3726. }
  3727.  
  3728. @@ -541,6 +544,12 @@ void diag_smd_queue_read(struct diag_smd_info *smd_info)
  3729. if (!smd_info || !smd_info->ch)
  3730. return;
  3731.  
  3732. + if ((smd_info->type == SMD_DATA_TYPE ||
  3733. + smd_info->type == SMD_CMD_TYPE) &&
  3734. + driver->logging_mode == MEMORY_DEVICE_MODE) {
  3735. + diag_ws_on_notify();
  3736. + }
  3737. +
  3738. switch (smd_info->type) {
  3739. case SMD_DCI_TYPE:
  3740. case SMD_DCI_CMD_TYPE:
  3741. @@ -559,12 +568,12 @@ void diag_smd_queue_read(struct diag_smd_info *smd_info)
  3742. default:
  3743. pr_err("diag: In %s, invalid type: %d\n", __func__,
  3744. smd_info->type);
  3745. + if ((smd_info->type == SMD_DATA_TYPE ||
  3746. + smd_info->type == SMD_CMD_TYPE) &&
  3747. + driver->logging_mode == MEMORY_DEVICE_MODE)
  3748. + diag_ws_on_read(0);
  3749. return;
  3750. }
  3751. -
  3752. - if (driver->logging_mode == MEMORY_DEVICE_MODE &&
  3753. - smd_info->type == SMD_DATA_TYPE)
  3754. - diag_ws_on_notify();
  3755. }
  3756.  
  3757. static int diag_smd_resize_buf(struct diag_smd_info *smd_info, void **buf,
  3758. @@ -787,9 +796,11 @@ void diag_smd_send_req(struct diag_smd_info *smd_info)
  3759. }
  3760. }
  3761. }
  3762. - if (smd_info->type == SMD_DATA_TYPE &&
  3763. - driver->logging_mode == MEMORY_DEVICE_MODE)
  3764. - diag_ws_on_read(pkt_len);
  3765. +
  3766. + if ((smd_info->type == SMD_DATA_TYPE ||
  3767. + smd_info->type == SMD_CMD_TYPE) &&
  3768. + driver->logging_mode == MEMORY_DEVICE_MODE)
  3769. + diag_ws_on_read(total_recd);
  3770.  
  3771. if (total_recd > 0) {
  3772. if (!buf) {
  3773. @@ -814,12 +825,19 @@ void diag_smd_send_req(struct diag_smd_info *smd_info)
  3774. } else if (smd_info->ch && !buf &&
  3775. (driver->logging_mode == MEMORY_DEVICE_MODE)) {
  3776. chk_logging_wakeup();
  3777. + } else {
  3778. + if ((smd_info->type == SMD_DATA_TYPE ||
  3779. + smd_info->type == SMD_CMD_TYPE) &&
  3780. + driver->logging_mode == MEMORY_DEVICE_MODE) {
  3781. + diag_ws_on_read(0);
  3782. + }
  3783. }
  3784. return;
  3785.  
  3786. fail_return:
  3787. - if (smd_info->type == SMD_DATA_TYPE &&
  3788. - driver->logging_mode == MEMORY_DEVICE_MODE)
  3789. + if ((smd_info->type == SMD_DATA_TYPE ||
  3790. + smd_info->type == SMD_CMD_TYPE) &&
  3791. + driver->logging_mode == MEMORY_DEVICE_MODE)
  3792. diag_ws_on_read(0);
  3793.  
  3794. if (smd_info->type == SMD_DCI_TYPE ||
  3795. @@ -2214,11 +2232,11 @@ void diag_smd_notify(void *ctxt, unsigned event)
  3796. diag_dci_notify_client(smd_info->peripheral_mask,
  3797. DIAG_STATUS_OPEN);
  3798. }
  3799. - wake_up(&driver->smd_wait_q);
  3800. diag_smd_queue_read(smd_info);
  3801. - } else if (event == SMD_EVENT_DATA) {
  3802. wake_up(&driver->smd_wait_q);
  3803. + } else if (event == SMD_EVENT_DATA) {
  3804. diag_smd_queue_read(smd_info);
  3805. + wake_up(&driver->smd_wait_q);
  3806. if (smd_info->type == SMD_DCI_TYPE ||
  3807. smd_info->type == SMD_DCI_CMD_TYPE) {
  3808. diag_dci_try_activate_wakeup_source();
  3809. diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
  3810. index 3521452..f61759b 100644
  3811. --- a/drivers/crypto/msm/qcrypto.c
  3812. +++ b/drivers/crypto/msm/qcrypto.c
  3813. @@ -1,6 +1,6 @@
  3814. /* Qualcomm Crypto driver
  3815. *
  3816. - * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
  3817. + * Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
  3818. *
  3819. * This program is free software; you can redistribute it and/or modify
  3820. * it under the terms of the GNU General Public License version 2 and
  3821. @@ -68,6 +68,7 @@ enum qcrypto_bus_state {
  3822. BUS_BANDWIDTH_RELEASING,
  3823. BUS_BANDWIDTH_ALLOCATING,
  3824. BUS_SUSPENDED,
  3825. + BUS_SUSPENDING,
  3826. };
  3827.  
  3828. struct crypto_stat {
  3829. @@ -565,7 +566,7 @@ static void qcrypto_bw_reaper_work(struct work_struct *work)
  3830. /* check if engine is stuck */
  3831. if (pengine->req) {
  3832. if (pengine->check_flag)
  3833. - dev_err(&pengine->pdev->dev,
  3834. + dev_warn(&pengine->pdev->dev,
  3835. "The engine appears to be stuck seq %d req %p.\n",
  3836. active_seq, pengine->req);
  3837. pengine->check_flag = false;
  3838. @@ -963,6 +964,7 @@ static void _qcrypto_remove_engine(struct crypto_engine *pengine)
  3839. cancel_work_sync(&pengine->bw_reaper_ws);
  3840. cancel_work_sync(&pengine->bw_allocate_ws);
  3841. del_timer_sync(&pengine->bw_reaper_timer);
  3842. + device_init_wakeup(&pengine->pdev->dev, false);
  3843.  
  3844. if (pengine->bus_scale_handle != 0)
  3845. msm_bus_scale_unregister_client(pengine->bus_scale_handle);
  3846. @@ -1886,6 +1888,12 @@ again:
  3847.  
  3848. backlog_eng = crypto_get_backlog(&pengine->req_queue);
  3849.  
  3850. + /* make sure it is in high bandwidth state */
  3851. + if (pengine->bw_state != BUS_HAS_BANDWIDTH) {
  3852. + spin_unlock_irqrestore(&cp->lock, flags);
  3853. + return 0;
  3854. + }
  3855. +
  3856. /* try to get request from request queue of the engine first */
  3857. async_req = crypto_dequeue_request(&pengine->req_queue);
  3858. if (!async_req) {
  3859. @@ -2037,6 +2045,7 @@ static int _qcrypto_queue_req(struct crypto_priv *cp,
  3860. pengine = NULL;
  3861. break;
  3862. case BUS_SUSPENDED:
  3863. + case BUS_SUSPENDING:
  3864. default:
  3865. pengine = NULL;
  3866. break;
  3867. @@ -4305,6 +4314,7 @@ static int _qcrypto_probe(struct platform_device *pdev)
  3868. pengine->active_seq = 0;
  3869. pengine->last_active_seq = 0;
  3870. pengine->check_flag = false;
  3871. + device_init_wakeup(&pengine->pdev->dev, true);
  3872.  
  3873. tasklet_init(&pengine->done_tasklet, req_done, (unsigned long)pengine);
  3874. crypto_init_queue(&pengine->req_queue, MSM_QCRYPTO_REQ_QUEUE_LENGTH);
  3875. @@ -4657,6 +4667,25 @@ err:
  3876. return rc;
  3877. };
  3878.  
  3879. +static int _qcrypto_engine_in_use(struct crypto_engine *pengine)
  3880. +{
  3881. + struct crypto_priv *cp = pengine->pcp;
  3882. +
  3883. + if (pengine->req || pengine->req_queue.qlen || cp->req_queue.qlen)
  3884. + return 1;
  3885. + return 0;
  3886. +}
  3887. +
  3888. +static void _qcrypto_do_suspending(struct crypto_engine *pengine)
  3889. +{
  3890. + struct crypto_priv *cp = pengine->pcp;
  3891. +
  3892. + if (cp->platform_support.bus_scale_table == NULL)
  3893. + return;
  3894. + del_timer_sync(&pengine->bw_reaper_timer);
  3895. + qcrypto_ce_set_bus(pengine, false);
  3896. +}
  3897. +
  3898. static int _qcrypto_suspend(struct platform_device *pdev, pm_message_t state)
  3899. {
  3900. int ret = 0;
  3901. @@ -4684,9 +4713,20 @@ static int _qcrypto_suspend(struct platform_device *pdev, pm_message_t state)
  3902. ret = -EBUSY;
  3903. break;
  3904. case BUS_HAS_BANDWIDTH:
  3905. + if (_qcrypto_engine_in_use(pengine)) {
  3906. + ret = -EBUSY;
  3907. + } else {
  3908. + pengine->bw_state = BUS_SUSPENDING;
  3909. + spin_unlock_irqrestore(&cp->lock, flags);
  3910. + _qcrypto_do_suspending(pengine);
  3911. + spin_lock_irqsave(&cp->lock, flags);
  3912. + pengine->bw_state = BUS_SUSPENDED;
  3913. + }
  3914. + break;
  3915. case BUS_BANDWIDTH_RELEASING:
  3916. case BUS_BANDWIDTH_ALLOCATING:
  3917. case BUS_SUSPENDED:
  3918. + case BUS_SUSPENDING:
  3919. default:
  3920. ret = -EBUSY;
  3921. break;
  3922. @@ -4707,6 +4747,7 @@ static int _qcrypto_resume(struct platform_device *pdev)
  3923. struct crypto_engine *pengine;
  3924. struct crypto_priv *cp;
  3925. unsigned long flags;
  3926. + int ret = 0;
  3927.  
  3928. pengine = platform_get_drvdata(pdev);
  3929.  
  3930. @@ -4731,9 +4772,11 @@ static int _qcrypto_resume(struct platform_device *pdev)
  3931. pengine->high_bw_req = true;
  3932. }
  3933. }
  3934. - }
  3935. + } else
  3936. + ret = -EBUSY;
  3937. +
  3938. spin_unlock_irqrestore(&cp->lock, flags);
  3939. - return 0;
  3940. + return ret;
  3941. }
  3942.  
  3943. static struct of_device_id qcrypto_match[] = {
  3944. diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
  3945. index 5600c11..6f8919a 100644
  3946. --- a/drivers/gpu/msm/adreno.c
  3947. +++ b/drivers/gpu/msm/adreno.c
  3948. @@ -78,7 +78,6 @@
  3949.  
  3950. #define KGSL_LOG_LEVEL_DEFAULT 3
  3951.  
  3952. -static void adreno_start_work(struct work_struct *work);
  3953. static void adreno_input_work(struct work_struct *work);
  3954.  
  3955. /*
  3956. @@ -151,16 +150,12 @@ static struct adreno_device device_3d0 = {
  3957. .ft_pf_policy = KGSL_FT_PAGEFAULT_DEFAULT_POLICY,
  3958. .fast_hang_detect = 1,
  3959. .long_ib_detect = 1,
  3960. - .start_work = __WORK_INITIALIZER(device_3d0.start_work,
  3961. - adreno_start_work),
  3962. .input_work = __WORK_INITIALIZER(device_3d0.input_work,
  3963. adreno_input_work),
  3964. };
  3965.  
  3966. unsigned int ft_detect_regs[FT_DETECT_REGS_COUNT];
  3967.  
  3968. -static struct workqueue_struct *adreno_wq;
  3969. -
  3970. /*
  3971. * This is the master list of all GPU cores that are supported by this
  3972. * driver.
  3973. @@ -255,7 +250,7 @@ static const struct {
  3974. };
  3975.  
  3976. /* Nice level for the higher priority GPU start thread */
  3977. -static unsigned int _wake_nice = -7;
  3978. +static int _wake_nice = -7;
  3979.  
  3980. /* Number of milliseconds to stay active active after a wake on touch */
  3981. static unsigned int _wake_timeout = 100;
  3982. @@ -1937,9 +1932,6 @@ static int adreno_init(struct kgsl_device *device)
  3983. int i;
  3984. int ret;
  3985.  
  3986. - /* Make a high priority workqueue for starting the GPU */
  3987. - adreno_wq = alloc_workqueue("adreno", WQ_HIGHPRI | WQ_UNBOUND, 1);
  3988. -
  3989. kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
  3990. /*
  3991. * initialization only needs to be done once initially until
  3992. @@ -2125,74 +2117,30 @@ error_clk_off:
  3993. return status;
  3994. }
  3995.  
  3996. -static int _status;
  3997. -
  3998. -/**
  3999. - * _adreno_start_work() - Work handler for the low latency adreno_start
  4000. - * @work: Pointer to the work_struct for
  4001. - *
  4002. - * The work callbak for the low lantecy GPU start - this executes the core
  4003. - * _adreno_start function in the workqueue.
  4004. - */
  4005. -static void adreno_start_work(struct work_struct *work)
  4006. -{
  4007. - struct adreno_device *adreno_dev = container_of(work,
  4008. - struct adreno_device, start_work);
  4009. - struct kgsl_device *device = &adreno_dev->dev;
  4010. -
  4011. - /* Nice ourselves to be higher priority but not too high priority */
  4012. - set_user_nice(current, _wake_nice);
  4013. -
  4014. - kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
  4015. - /*
  4016. - * If adreno start is already called, no need to call it again
  4017. - * it can lead to unpredictable behavior if we try to start
  4018. - * the device that is already started.
  4019. - * Below is the sequence of events that can go bad without the check
  4020. - * 1) thread 1 calls adreno_start to be scheduled on high priority wq
  4021. - * 2) thread 2 calls adreno_start with normal priority
  4022. - * 3) thread 1 after checking the device to be in slumber state gives
  4023. - * up mutex to be scheduled on high priority wq
  4024. - * 4) thread 2 after checking the device to be in slumber state gets
  4025. - * the mutex and finishes adreno_start before thread 1 is scheduled
  4026. - * on high priority wq.
  4027. - * 5) thread 1 gets scheduled on high priority wq and executes
  4028. - * adreno_start again. This leads to unpredictable behavior.
  4029. - */
  4030. - if (!test_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv))
  4031. - _status = _adreno_start(adreno_dev);
  4032. - else
  4033. - _status = 0;
  4034. - kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  4035. -}
  4036. -
  4037. /**
  4038. * adreno_start() - Power up and initialize the GPU
  4039. * @device: Pointer to the KGSL device to power up
  4040. * @priority: Boolean flag to specify of the start should be scheduled in a low
  4041. * latency work queue
  4042. *
  4043. - * Power up the GPU and initialize it. If priority is specified then queue the
  4044. - * start function in a high priority queue for lower latency.
  4045. + * Power up the GPU and initialize it. If priority is specified then elevate
  4046. + * the thread priority for the duration of the start operation
  4047. */
  4048. static int adreno_start(struct kgsl_device *device, int priority)
  4049. {
  4050. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  4051. + int nice = task_nice(current);
  4052. + int ret;
  4053.  
  4054. - /* No priority (normal latency) call the core start function directly */
  4055. - if (!priority)
  4056. - return _adreno_start(adreno_dev);
  4057. + if (priority && (_wake_nice < nice))
  4058. + set_user_nice(current, _wake_nice);
  4059.  
  4060. - /*
  4061. - * If priority is specified (low latency) then queue the work in a
  4062. - * higher priority work queue and wait for it to finish
  4063. - */
  4064. - queue_work(adreno_wq, &adreno_dev->start_work);
  4065. - kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  4066. - flush_work(&adreno_dev->start_work);
  4067. - kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
  4068. + ret = _adreno_start(adreno_dev);
  4069. +
  4070. + if (priority)
  4071. + set_user_nice(current, nice);
  4072.  
  4073. - return _status;
  4074. + return ret;
  4075. }
  4076.  
  4077. static int adreno_stop(struct kgsl_device *device)
  4078. diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
  4079. index aaf5935..c9335c3 100644
  4080. --- a/drivers/gpu/msm/adreno.h
  4081. +++ b/drivers/gpu/msm/adreno.h
  4082. @@ -201,7 +201,6 @@ struct adreno_device {
  4083. struct adreno_dispatcher dispatcher;
  4084. struct adreno_busy_data busy_data;
  4085.  
  4086. - struct work_struct start_work;
  4087. struct work_struct input_work;
  4088. unsigned int ram_cycles_lo;
  4089. };
  4090. diff --git a/drivers/gpu/msm/adreno_a3xx_snapshot.c b/drivers/gpu/msm/adreno_a3xx_snapshot.c
  4091. index 9f5765d..c8f0101 100644
  4092. --- a/drivers/gpu/msm/adreno_a3xx_snapshot.c
  4093. +++ b/drivers/gpu/msm/adreno_a3xx_snapshot.c
  4094. @@ -463,14 +463,23 @@ void *a3xx_snapshot(struct adreno_device *adreno_dev, void *snapshot,
  4095. size = (adreno_is_a330(adreno_dev) ||
  4096. adreno_is_a305b(adreno_dev)) ? 0x2E : 0x14;
  4097.  
  4098. - snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
  4099. - remain, REG_CP_STATE_DEBUG_INDEX,
  4100. - REG_CP_STATE_DEBUG_DATA, 0x0, size);
  4101. -
  4102. - /* CP_ME indexed registers */
  4103. - snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
  4104. - remain, REG_CP_ME_CNTL, REG_CP_ME_STATUS,
  4105. - 64, 44);
  4106. + /* Skip indexed register dump for these chipsets 8974, 8x26, 8x10 */
  4107. + if (adreno_is_a330(adreno_dev) ||
  4108. + adreno_is_a330v2(adreno_dev) ||
  4109. + adreno_is_a305b(adreno_dev) ||
  4110. + adreno_is_a305c(adreno_dev) ) {
  4111. + KGSL_DRV_ERR(device,
  4112. + "Skipping indexed register dump\n");
  4113. + } else {
  4114. + snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
  4115. + remain, REG_CP_STATE_DEBUG_INDEX,
  4116. + REG_CP_STATE_DEBUG_DATA, 0x0, size);
  4117. +
  4118. + /* CP_ME indexed registers */
  4119. + snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
  4120. + remain, REG_CP_ME_CNTL, REG_CP_ME_STATUS,
  4121. + 64, 44);
  4122. + }
  4123.  
  4124. /* VPC memory */
  4125. snapshot = kgsl_snapshot_add_section(device,
  4126. @@ -482,10 +491,19 @@ void *a3xx_snapshot(struct adreno_device *adreno_dev, void *snapshot,
  4127. KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
  4128. a3xx_snapshot_cp_meq, NULL);
  4129.  
  4130. - /* Shader working/shadow memory */
  4131. - snapshot = kgsl_snapshot_add_section(device,
  4132. + /* Skip shader memory dump for these chipsets: 8974, 8x26, 8x10 */
  4133. + if (adreno_is_a330(adreno_dev) ||
  4134. + adreno_is_a330v2(adreno_dev) ||
  4135. + adreno_is_a305b(adreno_dev) ||
  4136. + adreno_is_a305c(adreno_dev) ) {
  4137. + KGSL_DRV_ERR(device,
  4138. + "Skipping shader memory dump\n");
  4139. + } else {
  4140. + /* Shader working/shadow memory */
  4141. + snapshot = kgsl_snapshot_add_section(device,
  4142. KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
  4143. a3xx_snapshot_shader_memory, NULL);
  4144. + }
  4145.  
  4146.  
  4147. /* CP PFP and PM4 */
  4148. diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
  4149. index bcab791..1527d83 100644
  4150. --- a/drivers/gpu/msm/adreno_dispatch.c
  4151. +++ b/drivers/gpu/msm/adreno_dispatch.c
  4152. @@ -1,4 +1,4 @@
  4153. -/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
  4154. +/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
  4155. *
  4156. * This program is free software; you can redistribute it and/or modify
  4157. * it under the terms of the GNU General Public License version 2 and
  4158. @@ -145,6 +145,27 @@ static int fault_detect_read_compare(struct kgsl_device *device)
  4159. return ret;
  4160. }
  4161.  
  4162. +static int _check_context_queue(struct adreno_context *drawctxt)
  4163. +{
  4164. + int ret;
  4165. +
  4166. + spin_lock(&drawctxt->lock);
  4167. +
  4168. + /*
  4169. + * Wake up if there is room in the context or if the whole thing got
  4170. + * invalidated while we were asleep
  4171. + */
  4172. +
  4173. + if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID)
  4174. + ret = 1;
  4175. + else
  4176. + ret = drawctxt->queued < _context_cmdqueue_size ? 1 : 0;
  4177. +
  4178. + spin_unlock(&drawctxt->lock);
  4179. +
  4180. + return ret;
  4181. +}
  4182. +
  4183. /**
  4184. * adreno_dispatcher_get_cmdbatch() - Get a new command from a context queue
  4185. * @drawctxt: Pointer to the adreno draw context
  4186. @@ -221,13 +242,17 @@ static inline int adreno_dispatcher_requeue_cmdbatch(
  4187. struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch)
  4188. {
  4189. unsigned int prev;
  4190. + struct kgsl_device *device;
  4191. spin_lock(&drawctxt->lock);
  4192.  
  4193. if (kgsl_context_detached(&drawctxt->base) ||
  4194. drawctxt->state == ADRENO_CONTEXT_STATE_INVALID) {
  4195. spin_unlock(&drawctxt->lock);
  4196. + device = cmdbatch->device;
  4197. /* get rid of this cmdbatch since the context is bad */
  4198. + kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
  4199. kgsl_cmdbatch_destroy(cmdbatch);
  4200. + kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  4201. return -EINVAL;
  4202. }
  4203.  
  4204. @@ -414,7 +439,10 @@ static int dispatcher_context_sendcmds(struct adreno_device *adreno_dev,
  4205. */
  4206.  
  4207. if (cmdbatch->flags & KGSL_CONTEXT_SYNC) {
  4208. + struct kgsl_device *device = cmdbatch->device;
  4209. + kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
  4210. kgsl_cmdbatch_destroy(cmdbatch);
  4211. + kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  4212. continue;
  4213. }
  4214.  
  4215. @@ -441,12 +469,11 @@ static int dispatcher_context_sendcmds(struct adreno_device *adreno_dev,
  4216. }
  4217.  
  4218. /*
  4219. - * If the context successfully submitted commands there will be room
  4220. - * in the context queue so wake up any snoozing threads that want to
  4221. - * submit commands
  4222. + * Wake up any snoozing threads if we have consumed any real commands
  4223. + * or marker commands and we have room in the context queue.
  4224. */
  4225.  
  4226. - if (count)
  4227. + if (_check_context_queue(drawctxt))
  4228. wake_up_all(&drawctxt->wq);
  4229.  
  4230. /*
  4231. @@ -569,27 +596,6 @@ int adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev)
  4232. return ret;
  4233. }
  4234.  
  4235. -static int _check_context_queue(struct adreno_context *drawctxt)
  4236. -{
  4237. - int ret;
  4238. -
  4239. - spin_lock(&drawctxt->lock);
  4240. -
  4241. - /*
  4242. - * Wake up if there is room in the context or if the whole thing got
  4243. - * invalidated while we were asleep
  4244. - */
  4245. -
  4246. - if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID)
  4247. - ret = 1;
  4248. - else
  4249. - ret = drawctxt->queued < _context_cmdqueue_size ? 1 : 0;
  4250. -
  4251. - spin_unlock(&drawctxt->lock);
  4252. -
  4253. - return ret;
  4254. -}
  4255. -
  4256. /**
  4257. * get_timestamp() - Return the next timestamp for the context
  4258. * @drawctxt - Pointer to an adreno draw context struct
  4259. @@ -931,11 +937,8 @@ static void remove_invalidated_cmdbatches(struct kgsl_device *device,
  4260. drawctxt->state == ADRENO_CONTEXT_STATE_INVALID) {
  4261. replay[i] = NULL;
  4262.  
  4263. - kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
  4264. kgsl_cancel_events_timestamp(device, cmd->context,
  4265. cmd->timestamp);
  4266. - kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  4267. -
  4268. kgsl_cmdbatch_destroy(cmd);
  4269. }
  4270. }
  4271. @@ -1084,7 +1087,8 @@ static int dispatcher_do_fault(struct kgsl_device *device)
  4272. kgsl_device_snapshot(device, 1);
  4273. }
  4274.  
  4275. - kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  4276. + kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  4277. +
  4278.  
  4279. /* Allocate memory to store the inflight commands */
  4280. replay = kzalloc(sizeof(*replay) * dispatcher->inflight, GFP_KERNEL);
  4281. @@ -1092,6 +1096,7 @@ static int dispatcher_do_fault(struct kgsl_device *device)
  4282. if (replay == NULL) {
  4283. unsigned int ptr = dispatcher->head;
  4284.  
  4285. + kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
  4286. /* Recovery failed - mark everybody guilty */
  4287. mark_guilty_context(device, 0);
  4288.  
  4289. @@ -1111,6 +1116,7 @@ static int dispatcher_do_fault(struct kgsl_device *device)
  4290. */
  4291.  
  4292. count = 0;
  4293. + kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  4294. goto replay;
  4295. }
  4296.  
  4297. @@ -1168,7 +1174,9 @@ static int dispatcher_do_fault(struct kgsl_device *device)
  4298. cmdbatch->context->id, cmdbatch->timestamp);
  4299.  
  4300. mark_guilty_context(device, cmdbatch->context->id);
  4301. + kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
  4302. adreno_drawctxt_invalidate(device, cmdbatch->context);
  4303. + kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  4304. }
  4305.  
  4306. /*
  4307. @@ -1277,7 +1285,9 @@ static int dispatcher_do_fault(struct kgsl_device *device)
  4308. mark_guilty_context(device, cmdbatch->context->id);
  4309.  
  4310. /* Invalidate the context */
  4311. + kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
  4312. adreno_drawctxt_invalidate(device, cmdbatch->context);
  4313. + kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  4314.  
  4315.  
  4316. replay:
  4317. @@ -1296,8 +1306,10 @@ replay:
  4318. /* If adreno_reset() fails then what hope do we have for the future? */
  4319. BUG_ON(ret);
  4320.  
  4321. + kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
  4322. /* Remove any pending command batches that have been invalidated */
  4323. remove_invalidated_cmdbatches(device, replay, count);
  4324. + kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  4325.  
  4326. /* Replay the pending command buffers */
  4327. for (i = 0; i < count; i++) {
  4328. @@ -1339,9 +1351,11 @@ replay:
  4329. /* Mark this context as guilty (failed recovery) */
  4330. mark_guilty_context(device, replay[i]->context->id);
  4331.  
  4332. + kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
  4333. adreno_drawctxt_invalidate(device, replay[i]->context);
  4334. remove_invalidated_cmdbatches(device, &replay[i],
  4335. count - i);
  4336. + kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  4337. }
  4338. }
  4339.  
  4340. @@ -1449,8 +1463,10 @@ static void adreno_dispatcher_work(struct work_struct *work)
  4341. dispatcher->head = CMDQUEUE_NEXT(dispatcher->head,
  4342. ADRENO_DISPATCH_CMDQUEUE_SIZE);
  4343.  
  4344. + kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
  4345. /* Destroy the retired command batch */
  4346. kgsl_cmdbatch_destroy(cmdbatch);
  4347. + kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  4348.  
  4349. /* Update the expire time for the next command batch */
  4350.  
  4351. @@ -1695,16 +1711,19 @@ void adreno_dispatcher_stop(struct adreno_device *adreno_dev)
  4352. void adreno_dispatcher_close(struct adreno_device *adreno_dev)
  4353. {
  4354. struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
  4355. + struct kgsl_device *device = &adreno_dev->dev;
  4356.  
  4357. mutex_lock(&dispatcher->mutex);
  4358. del_timer_sync(&dispatcher->timer);
  4359. del_timer_sync(&dispatcher->fault_timer);
  4360.  
  4361. + kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
  4362. while (dispatcher->head != dispatcher->tail) {
  4363. kgsl_cmdbatch_destroy(dispatcher->cmdqueue[dispatcher->head]);
  4364. dispatcher->head = (dispatcher->head + 1)
  4365. % ADRENO_DISPATCH_CMDQUEUE_SIZE;
  4366. }
  4367. + kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  4368.  
  4369. mutex_unlock(&dispatcher->mutex);
  4370.  
  4371. diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
  4372. index db4aa6a..df9d6ec 100644
  4373. --- a/drivers/gpu/msm/adreno_drawctxt.c
  4374. +++ b/drivers/gpu/msm/adreno_drawctxt.c
  4375. @@ -367,14 +367,10 @@ int adreno_drawctxt_wait_global(struct adreno_device *adreno_dev,
  4376. kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  4377.  
  4378. if (timeout) {
  4379. - ret = (int) wait_event_timeout(drawctxt->waiting,
  4380. + if (0 == (int) wait_event_timeout(drawctxt->waiting,
  4381. _check_global_timestamp(device, drawctxt, timestamp),
  4382. - msecs_to_jiffies(timeout));
  4383. -
  4384. - if (ret == 0)
  4385. + msecs_to_jiffies(timeout)))
  4386. ret = -ETIMEDOUT;
  4387. - else if (ret > 0)
  4388. - ret = 0;
  4389. } else {
  4390. wait_event(drawctxt->waiting,
  4391. _check_global_timestamp(device, drawctxt, timestamp));
  4392. @@ -598,9 +594,14 @@ int adreno_drawctxt_detach(struct kgsl_context *context)
  4393. */
  4394. BUG_ON(!mutex_is_locked(&device->mutex));
  4395.  
  4396. - /* Wait for the last global timestamp to pass before continuing */
  4397. + /* Wait for the last global timestamp to pass before continuing.
  4398. + * The maxumum wait time is 30s, some large IB's can take longer
  4399. + * than 10s and if hang happens then the time for the context's
  4400. + * commands to retire will be greater than 10s. 30s should be sufficient
  4401. + * time to wait for the commands even if a hang happens.
  4402. + */
  4403. ret = adreno_drawctxt_wait_global(adreno_dev, context,
  4404. - drawctxt->internal_timestamp, 10 * 1000);
  4405. + drawctxt->internal_timestamp, 30 * 1000);
  4406.  
  4407. /*
  4408. * If the wait for global fails then nothing after this point is likely
  4409. diff --git a/drivers/gpu/msm/adreno_snapshot.c b/drivers/gpu/msm/adreno_snapshot.c
  4410. index bc37bf2..a11c63d 100644
  4411. --- a/drivers/gpu/msm/adreno_snapshot.c
  4412. +++ b/drivers/gpu/msm/adreno_snapshot.c
  4413. @@ -599,11 +599,8 @@ static int ib_add_gpu_object(struct kgsl_device *device, phys_addr_t ptbase,
  4414. unsigned int gpuaddr = src[i + 1];
  4415. unsigned int size = src[i + 2];
  4416.  
  4417. - ret = parse_ib(device, ptbase, gpuaddr, size);
  4418. + parse_ib(device, ptbase, gpuaddr, size);
  4419.  
  4420. - /* If adding the IB failed then stop parsing */
  4421. - if (ret < 0)
  4422. - goto done;
  4423. } else {
  4424. ret = ib_parse_type3(device, &src[i], ptbase);
  4425. /*
  4426. @@ -929,15 +926,16 @@ static int snapshot_ib(struct kgsl_device *device, void *snapshot,
  4427. if ((obj->dwords - i) < type3_pkt_size(*src) + 1)
  4428. continue;
  4429.  
  4430. - if (adreno_cmd_is_ib(*src))
  4431. - ret = parse_ib(device, obj->ptbase, src[1],
  4432. + if (adreno_cmd_is_ib(*src)) {
  4433. + parse_ib(device, obj->ptbase, src[1],
  4434. src[2]);
  4435. - else
  4436. + } else {
  4437. ret = ib_parse_type3(device, src, obj->ptbase);
  4438.  
  4439. - /* Stop parsing if the type3 decode fails */
  4440. - if (ret < 0)
  4441. - break;
  4442. + /* Stop parsing if the type3 decode fails */
  4443. + if (ret < 0)
  4444. + break;
  4445. + }
  4446. }
  4447. }
  4448.  
  4449. diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
  4450. index e2ddde9..0b73419 100644
  4451. --- a/drivers/gpu/msm/kgsl.c
  4452. +++ b/drivers/gpu/msm/kgsl.c
  4453. @@ -2311,8 +2311,11 @@ free_cmdbatch:
  4454. * -EPROTO is a "success" error - it just tells the user that the
  4455. * context had previously faulted
  4456. */
  4457. - if (result && result != -EPROTO)
  4458. + if (result && result != -EPROTO) {
  4459. + kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
  4460. kgsl_cmdbatch_destroy(cmdbatch);
  4461. + kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  4462. + }
  4463.  
  4464. done:
  4465. kgsl_context_put(context);
  4466. @@ -2363,8 +2366,11 @@ free_cmdbatch:
  4467. * -EPROTO is a "success" error - it just tells the user that the
  4468. * context had previously faulted
  4469. */
  4470. - if (result && result != -EPROTO)
  4471. + if (result && result != -EPROTO) {
  4472. + kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
  4473. kgsl_cmdbatch_destroy(cmdbatch);
  4474. + kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  4475. + }
  4476.  
  4477. done:
  4478. kgsl_context_put(context);
  4479. diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
  4480. index cf03c7f..5763971 100644
  4481. --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
  4482. +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
  4483. @@ -1,4 +1,4 @@
  4484. -/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
  4485. +/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
  4486. *
  4487. * This program is free software; you can redistribute it and/or modify
  4488. * it under the terms of the GNU General Public License version 2 and
  4489. @@ -19,11 +19,11 @@
  4490. #include <linux/io.h>
  4491. #include <linux/list.h>
  4492. #include <linux/delay.h>
  4493. +#include <linux/avtimer_kernel.h>
  4494. #include <media/v4l2-subdev.h>
  4495. #include <media/msmb_isp.h>
  4496. #include <mach/msm_bus.h>
  4497. #include <mach/msm_bus_board.h>
  4498. -
  4499. #include "msm_buf_mgr.h"
  4500.  
  4501. #define MAX_IOMMU_CTX 2
  4502. @@ -49,6 +49,16 @@ struct vfe_subscribe_info {
  4503. uint32_t active;
  4504. };
  4505.  
  4506. +enum msm_isp_pack_fmt {
  4507. + QCOM,
  4508. + MIPI,
  4509. + DPCM6,
  4510. + DPCM8,
  4511. + PLAIN8,
  4512. + PLAIN16,
  4513. + MAX_ISP_PACK_FMT,
  4514. +};
  4515. +
  4516. enum msm_isp_camif_update_state {
  4517. NO_UPDATE,
  4518. ENABLE_CAMIF,
  4519. @@ -56,9 +66,17 @@ enum msm_isp_camif_update_state {
  4520. DISABLE_CAMIF_IMMEDIATELY
  4521. };
  4522.  
  4523. +enum msm_isp_reset_type {
  4524. + ISP_RST_HARD,
  4525. + ISP_RST_SOFT,
  4526. + ISP_RST_MAX
  4527. +};
  4528. +
  4529. struct msm_isp_timestamp {
  4530. /*Monotonic clock for v4l2 buffer*/
  4531. struct timeval buf_time;
  4532. + /*Monotonic clock for VT */
  4533. + struct timeval vt_time;
  4534. /*Wall clock for userspace event*/
  4535. struct timeval event_time;
  4536. };
  4537. @@ -89,8 +107,9 @@ struct msm_vfe_axi_ops {
  4538. uint32_t reload_mask);
  4539. void (*enable_wm) (struct vfe_device *vfe_dev,
  4540. uint8_t wm_idx, uint8_t enable);
  4541. - void (*cfg_io_format) (struct vfe_device *vfe_dev,
  4542. - struct msm_vfe_axi_stream *stream_info);
  4543. + int32_t (*cfg_io_format) (struct vfe_device *vfe_dev,
  4544. + enum msm_vfe_axi_stream_src stream_src,
  4545. + uint32_t io_format);
  4546. void (*cfg_framedrop) (struct vfe_device *vfe_dev,
  4547. struct msm_vfe_axi_stream *stream_info);
  4548. void (*clear_framedrop) (struct vfe_device *vfe_dev,
  4549. @@ -124,12 +143,13 @@ struct msm_vfe_axi_ops {
  4550. uint32_t (*get_wm_mask) (uint32_t irq_status0, uint32_t irq_status1);
  4551. uint32_t (*get_comp_mask) (uint32_t irq_status0, uint32_t irq_status1);
  4552. uint32_t (*get_pingpong_status) (struct vfe_device *vfe_dev);
  4553. - long (*halt) (struct vfe_device *vfe_dev);
  4554. + long (*halt) (struct vfe_device *vfe_dev, uint32_t blocking);
  4555. };
  4556.  
  4557. struct msm_vfe_core_ops {
  4558. void (*reg_update) (struct vfe_device *vfe_dev);
  4559. - long (*reset_hw) (struct vfe_device *vfe_dev);
  4560. + long (*reset_hw) (struct vfe_device *vfe_dev,
  4561. + enum msm_isp_reset_type reset_type, uint32_t blocking);
  4562. int (*init_hw) (struct vfe_device *vfe_dev);
  4563. void (*init_hw_reg) (struct vfe_device *vfe_dev);
  4564. void (*release_hw) (struct vfe_device *vfe_dev);
  4565. @@ -143,6 +163,12 @@ struct msm_vfe_core_ops {
  4566. int (*get_platform_data) (struct vfe_device *vfe_dev);
  4567. void (*get_error_mask) (uint32_t *error_mask0, uint32_t *error_mask1);
  4568. void (*process_error_status) (struct vfe_device *vfe_dev);
  4569. + void (*get_overflow_mask) (uint32_t *overflow_mask);
  4570. + void (*get_irq_mask) (struct vfe_device *vfe_dev,
  4571. + uint32_t *irq0_mask, uint32_t *irq1_mask);
  4572. + void (*restore_irq_mask) (struct vfe_device *vfe_dev);
  4573. + void (*get_halt_restart_mask) (uint32_t *irq0_mask,
  4574. + uint32_t *irq1_mask);
  4575. };
  4576. struct msm_vfe_stats_ops {
  4577. int (*get_stats_idx) (enum msm_isp_stats_type stats_type);
  4578. @@ -252,7 +278,7 @@ struct msm_vfe_axi_stream {
  4579. uint32_t stream_handle;
  4580. uint8_t buf_divert;
  4581. enum msm_vfe_axi_stream_type stream_type;
  4582. -
  4583. + uint32_t vt_enable;
  4584. uint32_t frame_based;
  4585. uint32_t framedrop_period;
  4586. uint32_t framedrop_pattern;
  4587. @@ -274,6 +300,15 @@ struct msm_vfe_axi_stream {
  4588. uint32_t runtime_num_burst_capture;
  4589. uint8_t runtime_framedrop_update;
  4590. uint32_t runtime_output_format;
  4591. + enum msm_vfe_frame_skip_pattern frame_skip_pattern;
  4592. +
  4593. +};
  4594. +
  4595. +enum msm_vfe_overflow_state {
  4596. + NO_OVERFLOW,
  4597. + OVERFLOW_DETECTED,
  4598. + HALT_REQUESTED,
  4599. + RESTART_REQUESTED,
  4600. };
  4601.  
  4602. struct msm_vfe_axi_composite_info {
  4603. @@ -289,6 +324,8 @@ struct msm_vfe_src_info {
  4604. enum msm_vfe_inputmux input_mux;
  4605. uint32_t width;
  4606. long pixel_clock;
  4607. + uint32_t input_format;/*V4L2 pix format with bayer pattern*/
  4608. + uint32_t last_updt_frm_id;
  4609. };
  4610.  
  4611. enum msm_wm_ub_cfg_type {
  4612. @@ -314,6 +351,7 @@ struct msm_vfe_axi_shared_data {
  4613. struct msm_vfe_src_info src_info[VFE_SRC_MAX];
  4614. uint16_t stream_handle_cnt;
  4615. unsigned long event_mask;
  4616. + uint32_t burst_len;
  4617. };
  4618.  
  4619. struct msm_vfe_stats_hardware_info {
  4620. @@ -355,6 +393,8 @@ struct msm_vfe_stats_shared_data {
  4621. atomic_t stats_comp_mask;
  4622. uint16_t stream_handle_cnt;
  4623. atomic_t stats_update;
  4624. + uint32_t stats_mask;
  4625. + uint32_t stats_burst_len;
  4626. };
  4627.  
  4628. struct msm_vfe_tasklet_queue_cmd {
  4629. @@ -368,6 +408,9 @@ struct msm_vfe_tasklet_queue_cmd {
  4630. #define MSM_VFE_TASKLETQ_SIZE 200
  4631.  
  4632. struct msm_vfe_error_info {
  4633. + atomic_t overflow_state;
  4634. + uint32_t overflow_recover_irq_mask0;
  4635. + uint32_t overflow_recover_irq_mask1;
  4636. uint32_t error_mask0;
  4637. uint32_t error_mask1;
  4638. uint32_t violation_status;
  4639. @@ -378,16 +421,41 @@ struct msm_vfe_error_info {
  4640. uint32_t error_count;
  4641. };
  4642.  
  4643. +struct msm_vfe_frame_ts {
  4644. + struct timeval buf_time;
  4645. + uint32_t frame_id;
  4646. +};
  4647. +
  4648. +struct msm_isp_statistics {
  4649. + int32_t imagemaster0_overflow;
  4650. + int32_t imagemaster1_overflow;
  4651. + int32_t imagemaster2_overflow;
  4652. + int32_t imagemaster3_overflow;
  4653. + int32_t imagemaster4_overflow;
  4654. + int32_t imagemaster5_overflow;
  4655. + int32_t imagemaster6_overflow;
  4656. + int32_t be_overflow;
  4657. + int32_t bg_overflow;
  4658. + int32_t bf_overflow;
  4659. + int32_t awb_overflow;
  4660. + int32_t rs_overflow;
  4661. + int32_t cs_overflow;
  4662. + int32_t ihist_overflow;
  4663. + int32_t skinbhist_overflow;
  4664. +};
  4665. +
  4666. struct vfe_device {
  4667. struct platform_device *pdev;
  4668. struct msm_sd_subdev subdev;
  4669. struct resource *vfe_irq;
  4670. struct resource *vfe_mem;
  4671. struct resource *vfe_vbif_mem;
  4672. + struct resource *tcsr_mem;
  4673. struct resource *vfe_io;
  4674. struct resource *vfe_vbif_io;
  4675. void __iomem *vfe_base;
  4676. void __iomem *vfe_vbif_base;
  4677. + void __iomem *tcsr_base;
  4678.  
  4679. struct device *iommu_ctx[MAX_IOMMU_CTX];
  4680.  
  4681. @@ -410,16 +478,22 @@ struct vfe_device {
  4682. struct list_head tasklet_q;
  4683. struct tasklet_struct vfe_tasklet;
  4684. struct msm_vfe_tasklet_queue_cmd
  4685. - tasklet_queue_cmd[MSM_VFE_TASKLETQ_SIZE];
  4686. -
  4687. + tasklet_queue_cmd[MSM_VFE_TASKLETQ_SIZE];
  4688. + uint32_t soc_hw_version;
  4689. uint32_t vfe_hw_version;
  4690. struct msm_vfe_hardware_info *hw_info;
  4691. struct msm_vfe_axi_shared_data axi_data;
  4692. struct msm_vfe_stats_shared_data stats_data;
  4693. struct msm_vfe_error_info error_info;
  4694. + struct msm_vfe_frame_ts frame_ts;
  4695. struct msm_isp_buf_mgr *buf_mgr;
  4696. int dump_reg;
  4697. + int vfe_clk_idx;
  4698. uint32_t vfe_open_cnt;
  4699. + uint8_t vt_enable;
  4700. + uint8_t ignore_error;
  4701. + struct msm_isp_statistics *stats;
  4702. + uint32_t vfe_ub_size;
  4703. };
  4704.  
  4705. #endif
  4706. diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
  4707. index 73b4f4d..263d54d 100644
  4708. --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
  4709. +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
  4710. @@ -1,4 +1,4 @@
  4711. -/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
  4712. +/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
  4713. *
  4714. * This program is free software; you can redistribute it and/or modify
  4715. * it under the terms of the GNU General Public License version 2 and
  4716. @@ -22,9 +22,10 @@
  4717. #include "msm.h"
  4718. #include "msm_camera_io_util.h"
  4719.  
  4720. -#define VFE32_BURST_LEN 3
  4721. +#define VFE32_BURST_LEN 2
  4722. #define VFE32_UB_SIZE 1024
  4723. -#define VFE32_EQUAL_SLICE_UB 204
  4724. +#define VFE32_EQUAL_SLICE_UB 194
  4725. +#define VFE32_AXI_SLICE_UB 792
  4726. #define VFE32_WM_BASE(idx) (0x4C + 0x18 * idx)
  4727. #define VFE32_RDI_BASE(idx) (idx ? 0x734 + 0x4 * (idx - 1) : 0x06FC)
  4728. #define VFE32_XBAR_BASE(idx) (0x40 + 0x4 * (idx / 4))
  4729. @@ -40,7 +41,19 @@
  4730. (~(ping_pong >> (idx + VFE32_STATS_PING_PONG_OFFSET)) & 0x1))
  4731.  
  4732. #define VFE32_CLK_IDX 0
  4733. -static struct msm_cam_clk_info msm_vfe32_clk_info[] = {
  4734. +#define MSM_ISP32_TOTAL_WM_UB 792
  4735. +
  4736. +static struct msm_cam_clk_info msm_vfe32_1_clk_info[] = {
  4737. + /*vfe32 clock info for B-family: 8610 */
  4738. + {"vfe_clk_src", 266670000},
  4739. + {"vfe_clk", -1},
  4740. + {"vfe_ahb_clk", -1},
  4741. + {"csi_vfe_clk", -1},
  4742. + {"bus_clk", -1},
  4743. +};
  4744. +
  4745. +static struct msm_cam_clk_info msm_vfe32_2_clk_info[] = {
  4746. + /*vfe32 clock info for A-family: 8960 */
  4747. {"vfe_clk", 266667000},
  4748. {"vfe_pclk", -1},
  4749. {"csi_vfe_clk", -1},
  4750. @@ -49,6 +62,7 @@ static struct msm_cam_clk_info msm_vfe32_clk_info[] = {
  4751. static int msm_vfe32_init_hardware(struct vfe_device *vfe_dev)
  4752. {
  4753. int rc = -1;
  4754. + vfe_dev->vfe_clk_idx = 0;
  4755. rc = msm_isp_init_bandwidth_mgr(ISP_VFE0 + vfe_dev->pdev->id);
  4756. if (rc < 0) {
  4757. pr_err("%s: Bandwidth registration Failed!\n", __func__);
  4758. @@ -62,11 +76,21 @@ static int msm_vfe32_init_hardware(struct vfe_device *vfe_dev)
  4759. goto fs_failed;
  4760. }
  4761. }
  4762. + else
  4763. + goto fs_failed;
  4764.  
  4765. - rc = msm_cam_clk_enable(&vfe_dev->pdev->dev, msm_vfe32_clk_info,
  4766. - vfe_dev->vfe_clk, ARRAY_SIZE(msm_vfe32_clk_info), 1);
  4767. - if (rc < 0)
  4768. - goto clk_enable_failed;
  4769. + rc = msm_cam_clk_enable(&vfe_dev->pdev->dev, msm_vfe32_1_clk_info,
  4770. + vfe_dev->vfe_clk, ARRAY_SIZE(msm_vfe32_1_clk_info), 1);
  4771. + if (rc < 0) {
  4772. + rc = msm_cam_clk_enable(&vfe_dev->pdev->dev,
  4773. + msm_vfe32_2_clk_info, vfe_dev->vfe_clk,
  4774. + ARRAY_SIZE(msm_vfe32_2_clk_info), 1);
  4775. + if (rc < 0)
  4776. + goto clk_enable_failed;
  4777. + else
  4778. + vfe_dev->vfe_clk_idx = 2;
  4779. + } else
  4780. + vfe_dev->vfe_clk_idx = 1;
  4781.  
  4782. vfe_dev->vfe_base = ioremap(vfe_dev->vfe_mem->start,
  4783. resource_size(vfe_dev->vfe_mem));
  4784. @@ -87,8 +111,14 @@ static int msm_vfe32_init_hardware(struct vfe_device *vfe_dev)
  4785. irq_req_failed:
  4786. iounmap(vfe_dev->vfe_base);
  4787. vfe_remap_failed:
  4788. - msm_cam_clk_enable(&vfe_dev->pdev->dev, msm_vfe32_clk_info,
  4789. - vfe_dev->vfe_clk, ARRAY_SIZE(msm_vfe32_clk_info), 0);
  4790. + if (vfe_dev->vfe_clk_idx == 1)
  4791. + msm_cam_clk_enable(&vfe_dev->pdev->dev,
  4792. + msm_vfe32_1_clk_info, vfe_dev->vfe_clk,
  4793. + ARRAY_SIZE(msm_vfe32_1_clk_info), 0);
  4794. + if (vfe_dev->vfe_clk_idx == 2)
  4795. + msm_cam_clk_enable(&vfe_dev->pdev->dev,
  4796. + msm_vfe32_2_clk_info, vfe_dev->vfe_clk,
  4797. + ARRAY_SIZE(msm_vfe32_2_clk_info), 0);
  4798. clk_enable_failed:
  4799. regulator_disable(vfe_dev->fs_vfe);
  4800. fs_failed:
  4801. @@ -102,8 +132,14 @@ static void msm_vfe32_release_hardware(struct vfe_device *vfe_dev)
  4802. free_irq(vfe_dev->vfe_irq->start, vfe_dev);
  4803. tasklet_kill(&vfe_dev->vfe_tasklet);
  4804. iounmap(vfe_dev->vfe_base);
  4805. - msm_cam_clk_enable(&vfe_dev->pdev->dev, msm_vfe32_clk_info,
  4806. - vfe_dev->vfe_clk, ARRAY_SIZE(msm_vfe32_clk_info), 0);
  4807. + if (vfe_dev->vfe_clk_idx == 1)
  4808. + msm_cam_clk_enable(&vfe_dev->pdev->dev,
  4809. + msm_vfe32_1_clk_info, vfe_dev->vfe_clk,
  4810. + ARRAY_SIZE(msm_vfe32_1_clk_info), 0);
  4811. + if (vfe_dev->vfe_clk_idx == 2)
  4812. + msm_cam_clk_enable(&vfe_dev->pdev->dev,
  4813. + msm_vfe32_2_clk_info, vfe_dev->vfe_clk,
  4814. + ARRAY_SIZE(msm_vfe32_2_clk_info), 0);
  4815. regulator_disable(vfe_dev->fs_vfe);
  4816. msm_isp_deinit_bandwidth_mgr(ISP_VFE0 + vfe_dev->pdev->id);
  4817. }
  4818. @@ -113,11 +149,17 @@ static void msm_vfe32_init_hardware_reg(struct vfe_device *vfe_dev)
  4819. /* CGC_OVERRIDE */
  4820. msm_camera_io_w(0x07FFFFFF, vfe_dev->vfe_base + 0xC);
  4821. /* BUS_CFG */
  4822. - msm_camera_io_w(0x00000001, vfe_dev->vfe_base + 0x3C);
  4823. + msm_camera_io_w(0x00000009, vfe_dev->vfe_base + 0x3C);
  4824. msm_camera_io_w(0x01000025, vfe_dev->vfe_base + 0x1C);
  4825. - msm_camera_io_w_mb(0x1DFFFFFF, vfe_dev->vfe_base + 0x20);
  4826. + msm_camera_io_w_mb(0x1CFFFFFF, vfe_dev->vfe_base + 0x20);
  4827. msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x24);
  4828. msm_camera_io_w_mb(0x1FFFFFFF, vfe_dev->vfe_base + 0x28);
  4829. + msm_camera_io_w(0x0, vfe_dev->vfe_base+0x6FC);
  4830. + msm_camera_io_w( 0x10000000,vfe_dev->vfe_base + VFE32_RDI_BASE(1));
  4831. + msm_camera_io_w( 0x10000000,vfe_dev->vfe_base + VFE32_RDI_BASE(2));
  4832. + msm_camera_io_w(0x0, vfe_dev->vfe_base + VFE32_XBAR_BASE(0));
  4833. + msm_camera_io_w(0x0, vfe_dev->vfe_base + VFE32_XBAR_BASE(4));
  4834. +
  4835. }
  4836.  
  4837. static void msm_vfe32_process_reset_irq(struct vfe_device *vfe_dev,
  4838. @@ -130,23 +172,21 @@ static void msm_vfe32_process_reset_irq(struct vfe_device *vfe_dev,
  4839. static void msm_vfe32_process_halt_irq(struct vfe_device *vfe_dev,
  4840. uint32_t irq_status0, uint32_t irq_status1)
  4841. {
  4842. - if (irq_status1 & BIT(24))
  4843. - complete(&vfe_dev->halt_complete);
  4844. }
  4845.  
  4846. static void msm_vfe32_process_camif_irq(struct vfe_device *vfe_dev,
  4847. uint32_t irq_status0, uint32_t irq_status1,
  4848. struct msm_isp_timestamp *ts)
  4849. {
  4850. + uint32_t cnt;
  4851. if (!(irq_status0 & 0x1F))
  4852. return;
  4853.  
  4854. if (irq_status0 & BIT(0)) {
  4855. ISP_DBG("%s: SOF IRQ\n", __func__);
  4856. - if (vfe_dev->axi_data.src_info[VFE_PIX_0].raw_stream_count > 0
  4857. - && vfe_dev->axi_data.src_info[VFE_PIX_0].
  4858. - pix_stream_count == 0) {
  4859. - msm_isp_sof_notify(vfe_dev, VFE_PIX_0, ts);
  4860. + cnt = vfe_dev->axi_data.src_info[VFE_PIX_0].raw_stream_count;
  4861. + if (cnt > 0) {
  4862. + msm_isp_sof_notify(vfe_dev, VFE_RAW_0, ts);
  4863. if (vfe_dev->axi_data.stream_update)
  4864. msm_isp_axi_stream_update(vfe_dev);
  4865. msm_isp_update_framedrop_reg(vfe_dev);
  4866. @@ -215,7 +255,6 @@ static void msm_vfe32_process_violation_status(struct vfe_device *vfe_dev)
  4867. static void msm_vfe32_process_error_status(struct vfe_device *vfe_dev)
  4868. {
  4869. uint32_t error_status1 = vfe_dev->error_info.error_mask1;
  4870. -
  4871. if (error_status1 & BIT(0))
  4872. pr_err("%s: camif error status: 0x%x\n",
  4873. __func__, vfe_dev->error_info.camif_status);
  4874. @@ -235,34 +274,62 @@ static void msm_vfe32_process_error_status(struct vfe_device *vfe_dev)
  4875. pr_err("%s: violation\n", __func__);
  4876. msm_vfe32_process_violation_status(vfe_dev);
  4877. }
  4878. - if (error_status1 & BIT(8))
  4879. + if (error_status1 & BIT(8)) {
  4880. + vfe_dev->stats->imagemaster0_overflow++;
  4881. pr_err("%s: image master 0 bus overflow\n", __func__);
  4882. - if (error_status1 & BIT(9))
  4883. + }
  4884. + if (error_status1 & BIT(9)) {
  4885. + vfe_dev->stats->imagemaster1_overflow++;
  4886. pr_err("%s: image master 1 bus overflow\n", __func__);
  4887. - if (error_status1 & BIT(10))
  4888. + }
  4889. + if (error_status1 & BIT(10)) {
  4890. + vfe_dev->stats->imagemaster2_overflow++;
  4891. pr_err("%s: image master 2 bus overflow\n", __func__);
  4892. - if (error_status1 & BIT(11))
  4893. + }
  4894. + if (error_status1 & BIT(11)) {
  4895. + vfe_dev->stats->imagemaster3_overflow++;
  4896. pr_err("%s: image master 3 bus overflow\n", __func__);
  4897. - if (error_status1 & BIT(12))
  4898. + }
  4899. + if (error_status1 & BIT(12)) {
  4900. + vfe_dev->stats->imagemaster4_overflow++;
  4901. pr_err("%s: image master 4 bus overflow\n", __func__);
  4902. - if (error_status1 & BIT(13))
  4903. + }
  4904. + if (error_status1 & BIT(13)) {
  4905. + vfe_dev->stats->imagemaster5_overflow++;
  4906. pr_err("%s: image master 5 bus overflow\n", __func__);
  4907. - if (error_status1 & BIT(14))
  4908. + }
  4909. + if (error_status1 & BIT(14)) {
  4910. + vfe_dev->stats->imagemaster6_overflow++;
  4911. pr_err("%s: image master 6 bus overflow\n", __func__);
  4912. - if (error_status1 & BIT(15))
  4913. + }
  4914. + if (error_status1 & BIT(15)) {
  4915. + vfe_dev->stats->bg_overflow++;
  4916. pr_err("%s: status ae/bg bus overflow\n", __func__);
  4917. - if (error_status1 & BIT(16))
  4918. + }
  4919. + if (error_status1 & BIT(16)) {
  4920. + vfe_dev->stats->bf_overflow++;
  4921. pr_err("%s: status af/bf bus overflow\n", __func__);
  4922. - if (error_status1 & BIT(17))
  4923. + }
  4924. + if (error_status1 & BIT(17)) {
  4925. + vfe_dev->stats->awb_overflow++;
  4926. pr_err("%s: status awb bus overflow\n", __func__);
  4927. - if (error_status1 & BIT(18))
  4928. + }
  4929. + if (error_status1 & BIT(18)) {
  4930. + vfe_dev->stats->rs_overflow++;
  4931. pr_err("%s: status rs bus overflow\n", __func__);
  4932. - if (error_status1 & BIT(19))
  4933. + }
  4934. + if (error_status1 & BIT(19)) {
  4935. + vfe_dev->stats->cs_overflow++;
  4936. pr_err("%s: status cs bus overflow\n", __func__);
  4937. - if (error_status1 & BIT(20))
  4938. + }
  4939. + if (error_status1 & BIT(20)) {
  4940. + vfe_dev->stats->ihist_overflow++;
  4941. pr_err("%s: status ihist bus overflow\n", __func__);
  4942. - if (error_status1 & BIT(21))
  4943. + }
  4944. + if (error_status1 & BIT(21)) {
  4945. + vfe_dev->stats->skinbhist_overflow++;
  4946. pr_err("%s: status skin bhist bus overflow\n", __func__);
  4947. + }
  4948. if (error_status1 & BIT(22))
  4949. pr_err("%s: axi error\n", __func__);
  4950. }
  4951. @@ -273,7 +340,7 @@ static void msm_vfe32_read_irq_status(struct vfe_device *vfe_dev,
  4952. *irq_status0 = msm_camera_io_r(vfe_dev->vfe_base + 0x2C);
  4953. *irq_status1 = msm_camera_io_r(vfe_dev->vfe_base + 0x30);
  4954. msm_camera_io_w(*irq_status0, vfe_dev->vfe_base + 0x24);
  4955. - msm_camera_io_w(*irq_status1, vfe_dev->vfe_base + 0x28);
  4956. + msm_camera_io_w_mb(*irq_status1, vfe_dev->vfe_base + 0x28);
  4957. msm_camera_io_w_mb(1, vfe_dev->vfe_base + 0x18);
  4958.  
  4959. if (*irq_status1 & BIT(0))
  4960. @@ -319,18 +386,48 @@ static void msm_vfe32_reg_update(
  4961. msm_camera_io_w_mb(0xF, vfe_dev->vfe_base + 0x260);
  4962. }
  4963.  
  4964. -static long msm_vfe32_reset_hardware(struct vfe_device *vfe_dev)
  4965. +static uint32_t msm_vfe32_reset_values[ISP_RST_MAX] =
  4966. {
  4967. - init_completion(&vfe_dev->reset_complete);
  4968. - msm_camera_io_w_mb(0x3FF, vfe_dev->vfe_base + 0x4);
  4969. - return wait_for_completion_interruptible_timeout(
  4970. - &vfe_dev->reset_complete, msecs_to_jiffies(50));
  4971. + 0x3FF, /* ISP_RST_HARD reset everything */
  4972. + 0x3EF /* ISP_RST_SOFT same as HARD RESET */
  4973. +};
  4974. +
  4975. +static long msm_vfe32_reset_hardware(struct vfe_device *vfe_dev ,
  4976. + enum msm_isp_reset_type reset_type, uint32_t blocking)
  4977. +{
  4978. +
  4979. + uint32_t rst_val;
  4980. + long rc = 0;
  4981. + if (reset_type >= ISP_RST_MAX) {
  4982. + pr_err("%s: Error Invalid parameter\n", __func__);
  4983. + reset_type = ISP_RST_HARD;
  4984. + }
  4985. + rst_val = msm_vfe32_reset_values[reset_type];
  4986. + if (blocking) {
  4987. + init_completion(&vfe_dev->reset_complete);
  4988. + msm_camera_io_w_mb(rst_val, vfe_dev->vfe_base + 0x4);
  4989. + rc = wait_for_completion_timeout(
  4990. + &vfe_dev->reset_complete, msecs_to_jiffies(50));
  4991. + } else {
  4992. + msm_camera_io_w_mb(0x3EF, vfe_dev->vfe_base + 0x4);
  4993. + }
  4994. + return rc;
  4995. }
  4996.  
  4997. static void msm_vfe32_axi_reload_wm(
  4998. struct vfe_device *vfe_dev, uint32_t reload_mask)
  4999. {
  5000. - msm_camera_io_w_mb(reload_mask, vfe_dev->vfe_base + 0x38);
  5001. + if (!vfe_dev->pdev->dev.of_node) {
  5002. + /*vfe32 A-family: 8960*/
  5003. + msm_camera_io_w_mb(reload_mask, vfe_dev->vfe_base + 0x38);
  5004. + } else {
  5005. + /*vfe32 B-family: 8610*/
  5006. + msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x28);
  5007. + msm_camera_io_w(0x1C800000, vfe_dev->vfe_base + 0x20);
  5008. + msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x18);
  5009. + msm_camera_io_w(0x9AAAAAAA , vfe_dev->vfe_base + 0x600);
  5010. + msm_camera_io_w(reload_mask, vfe_dev->vfe_base + 0x38);
  5011. + }
  5012. }
  5013.  
  5014. static void msm_vfe32_axi_enable_wm(struct vfe_device *vfe_dev,
  5015. @@ -440,12 +537,17 @@ static void msm_vfe32_clear_framedrop(struct vfe_device *vfe_dev,
  5016. }
  5017. }
  5018.  
  5019. -static void msm_vfe32_cfg_io_format(struct vfe_device *vfe_dev,
  5020. - struct msm_vfe_axi_stream *stream_info)
  5021. +static int32_t msm_vfe32_cfg_io_format(struct vfe_device *vfe_dev,
  5022. + enum msm_vfe_axi_stream_src stream_src, uint32_t io_format)
  5023. {
  5024. - int bpp, bpp_reg = 0;
  5025. + int bpp, bpp_reg = 0, pack_fmt = 0, pack_reg = 0;
  5026. uint32_t io_format_reg;
  5027. - bpp = msm_isp_get_bit_per_pixel(stream_info->output_format);
  5028. + bpp = msm_isp_get_bit_per_pixel(io_format);
  5029. + if (bpp < 0) {
  5030. + pr_err("%s:%d invalid io_format %d bpp %d", __func__, __LINE__,
  5031. + io_format, bpp);
  5032. + return -EINVAL;
  5033. + }
  5034.  
  5035. switch (bpp) {
  5036. case 8:
  5037. @@ -457,27 +559,59 @@ static void msm_vfe32_cfg_io_format(struct vfe_device *vfe_dev,
  5038. case 12:
  5039. bpp_reg = 1 << 1;
  5040. break;
  5041. + default:
  5042. + pr_err("%s:%d invalid bpp %d", __func__, __LINE__, bpp);
  5043. + return -EINVAL;
  5044. + }
  5045. +
  5046. + if (stream_src == IDEAL_RAW) {
  5047. + pack_fmt = msm_isp_get_pack_format(io_format);
  5048. + switch (pack_fmt) {
  5049. + case QCOM:
  5050. + pack_reg = 0x0;
  5051. + break;
  5052. + case MIPI:
  5053. + pack_reg = 0x1;
  5054. + break;
  5055. + case DPCM6:
  5056. + pack_reg = 0x2;
  5057. + break;
  5058. + case DPCM8:
  5059. + pack_reg = 0x3;
  5060. + break;
  5061. + case PLAIN8:
  5062. + pack_reg = 0x4;
  5063. + break;
  5064. + case PLAIN16:
  5065. + pack_reg = 0x5;
  5066. + break;
  5067. + default:
  5068. + pr_err("%s: invalid pack fmt!\n", __func__);
  5069. + return -EINVAL;
  5070. + }
  5071. }
  5072. +
  5073. io_format_reg = msm_camera_io_r(vfe_dev->vfe_base + 0x6F8);
  5074. - switch (stream_info->stream_src) {
  5075. + switch (stream_src) {
  5076. + case PIX_ENCODER:
  5077. + case PIX_VIEWFINDER:
  5078. case CAMIF_RAW:
  5079. io_format_reg &= 0xFFFFCFFF;
  5080. io_format_reg |= bpp_reg << 12;
  5081. break;
  5082. case IDEAL_RAW:
  5083. io_format_reg &= 0xFFFFFFC8;
  5084. - io_format_reg |= bpp_reg << 4;
  5085. + io_format_reg |= bpp_reg << 4 | pack_reg;
  5086. break;
  5087. - case PIX_ENCODER:
  5088. - case PIX_VIEWFINDER:
  5089. case RDI_INTF_0:
  5090. case RDI_INTF_1:
  5091. case RDI_INTF_2:
  5092. default:
  5093. pr_err("%s: Invalid stream source\n", __func__);
  5094. - return;
  5095. + return -EINVAL;
  5096. }
  5097. msm_camera_io_w(io_format_reg, vfe_dev->vfe_base + 0x6F8);
  5098. + return 0;
  5099. }
  5100.  
  5101. static void msm_vfe32_cfg_camif(struct vfe_device *vfe_dev,
  5102. @@ -531,13 +665,14 @@ static void msm_vfe32_update_camif_state(
  5103. val &= 0xFFFFFF3F;
  5104. val = val | bus_en << 7 | vfe_en << 6;
  5105. msm_camera_io_w(val, vfe_dev->vfe_base + 0x1E4);
  5106. + msm_camera_io_w_mb(0x4, vfe_dev->vfe_base + 0x1E0);
  5107. msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x1E0);
  5108. vfe_dev->axi_data.src_info[VFE_PIX_0].active = 1;
  5109. } else if (update_state == DISABLE_CAMIF) {
  5110. msm_camera_io_w_mb(0x0, vfe_dev->vfe_base + 0x1E0);
  5111. vfe_dev->axi_data.src_info[VFE_PIX_0].active = 0;
  5112. } else if (update_state == DISABLE_CAMIF_IMMEDIATELY) {
  5113. - msm_camera_io_w_mb(0x2, vfe_dev->vfe_base + 0x1E0);
  5114. + msm_camera_io_w_mb(0x6, vfe_dev->vfe_base + 0x1E0);
  5115. vfe_dev->axi_data.src_info[VFE_PIX_0].active = 0;
  5116. }
  5117. }
  5118. @@ -611,6 +746,8 @@ static void msm_vfe32_axi_clear_wm_reg(
  5119. {
  5120. uint32_t val = 0;
  5121. uint32_t wm_base = VFE32_WM_BASE(stream_info->wm[plane_idx]);
  5122. + /* FRAME BASED */
  5123. + msm_camera_io_w(val, vfe_dev->vfe_base + wm_base);
  5124. /*WR_IMAGE_SIZE*/
  5125. msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x10);
  5126. /*WR_BUFFER_CFG*/
  5127. @@ -686,18 +823,74 @@ static void msm_vfe32_axi_clear_wm_xbar_reg(
  5128. msm_camera_io_w(xbar_reg_cfg, vfe_dev->vfe_base + VFE32_XBAR_BASE(wm));
  5129. }
  5130.  
  5131. -static void msm_vfe32_cfg_axi_ub(struct vfe_device *vfe_dev)
  5132. +static void msm_vfe32_cfg_axi_ub_equal_default(struct vfe_device *vfe_dev)
  5133. +{
  5134. + int i;
  5135. + uint32_t ub_offset = 0;
  5136. + struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
  5137. + uint32_t total_image_size = 0;
  5138. + uint32_t num_used_wms = 0;
  5139. + uint32_t prop_size = 0;
  5140. + uint32_t wm_ub_size;
  5141. + uint64_t delta;
  5142. + for (i = 0; i < axi_data->hw_info->num_wm; i++) {
  5143. + if (axi_data->free_wm[i] > 0) {
  5144. + num_used_wms++;
  5145. + total_image_size += axi_data->wm_image_size[i];
  5146. + }
  5147. + }
  5148. + prop_size = MSM_ISP32_TOTAL_WM_UB -
  5149. + axi_data->hw_info->min_wm_ub * num_used_wms;
  5150. + for (i = 0; i < axi_data->hw_info->num_wm; i++) {
  5151. + if (axi_data->free_wm[i]) {
  5152. + delta =
  5153. + (uint64_t)(axi_data->wm_image_size[i] *
  5154. + prop_size);
  5155. + do_div(delta, total_image_size);
  5156. + wm_ub_size = axi_data->hw_info->min_wm_ub +
  5157. + (uint32_t)delta;
  5158. + msm_camera_io_w(ub_offset << 16 |
  5159. + (wm_ub_size - 1), vfe_dev->vfe_base +
  5160. + VFE32_WM_BASE(i) + 0xC);
  5161. + ub_offset += wm_ub_size;
  5162. + } else
  5163. + msm_camera_io_w(0,
  5164. + vfe_dev->vfe_base + VFE32_WM_BASE(i) + 0xC);
  5165. + }
  5166. +}
  5167. +
  5168. +static void msm_vfe32_cfg_axi_ub_equal_slicing(struct vfe_device *vfe_dev)
  5169. {
  5170. int i;
  5171. uint32_t ub_offset = 0;
  5172. + uint32_t final_ub_slice_size;
  5173. struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
  5174. for (i = 0; i < axi_data->hw_info->num_wm; i++) {
  5175. - msm_camera_io_w(ub_offset << 16 | (VFE32_EQUAL_SLICE_UB - 1),
  5176. - vfe_dev->vfe_base + VFE32_WM_BASE(i) + 0xC);
  5177. - ub_offset += VFE32_EQUAL_SLICE_UB;
  5178. + if (ub_offset + VFE32_EQUAL_SLICE_UB > VFE32_AXI_SLICE_UB) {
  5179. + final_ub_slice_size = VFE32_AXI_SLICE_UB - ub_offset;
  5180. + msm_camera_io_w(ub_offset << 16 |
  5181. + (final_ub_slice_size - 1), vfe_dev->vfe_base +
  5182. + VFE32_WM_BASE(i) + 0xC);
  5183. + ub_offset += final_ub_slice_size;
  5184. + } else {
  5185. + msm_camera_io_w(ub_offset << 16 |
  5186. + (VFE32_EQUAL_SLICE_UB - 1), vfe_dev->vfe_base +
  5187. + VFE32_WM_BASE(i) + 0xC);
  5188. + ub_offset += VFE32_EQUAL_SLICE_UB;
  5189. + }
  5190. }
  5191. }
  5192.  
  5193. +static void msm_vfe32_cfg_axi_ub(struct vfe_device *vfe_dev)
  5194. +{
  5195. + struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
  5196. + axi_data->wm_ub_cfg_policy = MSM_WM_UB_CFG_DEFAULT;
  5197. + if (axi_data->wm_ub_cfg_policy == MSM_WM_UB_EQUAL_SLICING)
  5198. + msm_vfe32_cfg_axi_ub_equal_slicing(vfe_dev);
  5199. + else
  5200. + msm_vfe32_cfg_axi_ub_equal_default(vfe_dev);
  5201. +}
  5202. +
  5203. static void msm_vfe32_update_ping_pong_addr(struct vfe_device *vfe_dev,
  5204. uint8_t wm_idx, uint32_t pingpong_status, unsigned long paddr)
  5205. {
  5206. @@ -705,17 +898,26 @@ static void msm_vfe32_update_ping_pong_addr(struct vfe_device *vfe_dev,
  5207. VFE32_PING_PONG_BASE(wm_idx, pingpong_status));
  5208. }
  5209.  
  5210. -static long msm_vfe32_axi_halt(struct vfe_device *vfe_dev)
  5211. +static long msm_vfe32_axi_halt(struct vfe_device *vfe_dev,
  5212. + uint32_t blocking)
  5213. {
  5214. uint32_t halt_mask;
  5215. + uint32_t axi_busy_flag = false;
  5216. + msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x1D8);
  5217. + if (blocking) {
  5218. + axi_busy_flag = true;
  5219. + }
  5220. + while (axi_busy_flag) {
  5221. + if (msm_camera_io_r(
  5222. + vfe_dev->vfe_base + 0x1DC) & 0x1)
  5223. + axi_busy_flag = false;
  5224. + }
  5225. + msm_camera_io_w_mb(0, vfe_dev->vfe_base + 0x1D8);
  5226. halt_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x20);
  5227. - halt_mask |= BIT(24);
  5228. + halt_mask &= 0xFEFFFFFF;
  5229. + /* Disable AXI IRQ */
  5230. msm_camera_io_w_mb(halt_mask, vfe_dev->vfe_base + 0x20);
  5231. - init_completion(&vfe_dev->halt_complete);
  5232. - /*TD: Need to fix crashes with this*/
  5233. - /*msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x1D8);*/
  5234. - return wait_for_completion_interruptible_timeout(
  5235. - &vfe_dev->halt_complete, msecs_to_jiffies(500));
  5236. + return 0;
  5237. }
  5238.  
  5239. static uint32_t msm_vfe32_get_wm_mask(
  5240. @@ -806,13 +1008,13 @@ static void msm_vfe32_stats_cfg_ub(struct vfe_device *vfe_dev)
  5241. int i;
  5242. uint32_t ub_offset = VFE32_UB_SIZE;
  5243. uint32_t ub_size[VFE32_NUM_STATS_TYPE] = {
  5244. - 64, /*MSM_ISP_STATS_BG*/
  5245. - 64, /*MSM_ISP_STATS_BF*/
  5246. - 16, /*MSM_ISP_STATS_AWB*/
  5247. - 8, /*MSM_ISP_STATS_RS*/
  5248. + 107, /*MSM_ISP_STATS_BG*/
  5249. + 92, /*MSM_ISP_STATS_BF*/
  5250. + 2, /*MSM_ISP_STATS_AWB*/
  5251. + 7, /*MSM_ISP_STATS_RS*/
  5252. 16, /*MSM_ISP_STATS_CS*/
  5253. - 16, /*MSM_ISP_STATS_IHIST*/
  5254. - 16, /*MSM_ISP_STATS_BHIST*/
  5255. + 2, /*MSM_ISP_STATS_IHIST*/
  5256. + 7, /*MSM_ISP_STATS_BHIST*/
  5257. };
  5258.  
  5259. for (i = 0; i < VFE32_NUM_STATS_TYPE; i++) {
  5260. @@ -875,6 +1077,33 @@ static uint32_t msm_vfe32_stats_get_wm_mask(uint32_t irq_status0,
  5261. return (irq_status0 >> 13) & 0x7F;
  5262. }
  5263.  
  5264. +static void msm_vfe32_get_overflow_mask(uint32_t *overflow_mask)
  5265. +{
  5266. + *overflow_mask = 0x002FFF7E;
  5267. +}
  5268. +
  5269. +static void msm_vfe32_get_irq_mask(struct vfe_device *vfe_dev,
  5270. + uint32_t *irq0_mask, uint32_t *irq1_mask)
  5271. +{
  5272. + *irq0_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
  5273. + *irq1_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x20);
  5274. +}
  5275. +
  5276. +static void msm_vfe32_restore_irq_mask(struct vfe_device *vfe_dev)
  5277. +{
  5278. + msm_camera_io_w(vfe_dev->error_info.overflow_recover_irq_mask0,
  5279. + vfe_dev->vfe_base + 0x1C);
  5280. + msm_camera_io_w(vfe_dev->error_info.overflow_recover_irq_mask1,
  5281. + vfe_dev->vfe_base + 0x20);
  5282. +}
  5283. +
  5284. +static void msm_vfe32_get_halt_restart_mask(uint32_t *irq0_mask,
  5285. + uint32_t *irq1_mask)
  5286. +{
  5287. + *irq0_mask = 0x0;
  5288. + *irq1_mask = 0x01800000;
  5289. +}
  5290. +
  5291. static uint32_t msm_vfe32_stats_get_comp_mask(uint32_t irq_status0,
  5292. uint32_t irq_status1)
  5293. {
  5294. @@ -914,14 +1143,22 @@ static int msm_vfe32_get_platform_data(struct vfe_device *vfe_dev)
  5295. goto vfe_no_resource;
  5296. }
  5297.  
  5298. - vfe_dev->iommu_ctx[0] = msm_iommu_get_ctx("vfe_imgwr");
  5299. + if (!vfe_dev->pdev->dev.of_node)
  5300. + vfe_dev->iommu_ctx[0] = msm_iommu_get_ctx("vfe_imgwr");
  5301. + else
  5302. + vfe_dev->iommu_ctx[0] = msm_iommu_get_ctx("vfe0");
  5303. +
  5304. if (!vfe_dev->iommu_ctx[0]) {
  5305. pr_err("%s: no iommux ctx resource?\n", __func__);
  5306. rc = -ENODEV;
  5307. goto vfe_no_resource;
  5308. }
  5309.  
  5310. - vfe_dev->iommu_ctx[1] = msm_iommu_get_ctx("vfe_misc");
  5311. + if (!vfe_dev->pdev->dev.of_node)
  5312. + vfe_dev->iommu_ctx[1] = msm_iommu_get_ctx("vfe_misc");
  5313. + else
  5314. + vfe_dev->iommu_ctx[1] = msm_iommu_get_ctx("vfe0");
  5315. +
  5316. if (!vfe_dev->iommu_ctx[1]) {
  5317. pr_err("%s: no iommux ctx resource?\n", __func__);
  5318. rc = -ENODEV;
  5319. @@ -940,10 +1177,11 @@ static void msm_vfe32_get_error_mask(uint32_t *error_mask0,
  5320. }
  5321.  
  5322. struct msm_vfe_axi_hardware_info msm_vfe32_axi_hw_info = {
  5323. - .num_wm = 4,
  5324. + .num_wm = 5,
  5325. .num_comp_mask = 3,
  5326. .num_rdi = 3,
  5327. .num_rdi_master = 3,
  5328. + .min_wm_ub = 64,
  5329. };
  5330.  
  5331. static struct msm_vfe_stats_hardware_info msm_vfe32_stats_hw_info = {
  5332. @@ -1019,6 +1257,11 @@ struct msm_vfe_hardware_info vfe32_hw_info = {
  5333. .release_hw = msm_vfe32_release_hardware,
  5334. .get_platform_data = msm_vfe32_get_platform_data,
  5335. .get_error_mask = msm_vfe32_get_error_mask,
  5336. + .get_overflow_mask = msm_vfe32_get_overflow_mask,
  5337. + .get_irq_mask = msm_vfe32_get_irq_mask,
  5338. + .restore_irq_mask = msm_vfe32_restore_irq_mask,
  5339. + .get_halt_restart_mask =
  5340. + msm_vfe32_get_halt_restart_mask,
  5341. .process_error_status = msm_vfe32_process_error_status,
  5342. },
  5343. .stats_ops = {
  5344. diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
  5345. index ac56efa..76d2118 100644
  5346. --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
  5347. +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
  5348. @@ -1,4 +1,4 @@
  5349. -/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
  5350. +/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
  5351. *
  5352. * This program is free software; you can redistribute it and/or modify
  5353. * it under the terms of the GNU General Public License version 2 and
  5354. @@ -13,7 +13,7 @@
  5355. #include <linux/module.h>
  5356. #include <mach/iommu.h>
  5357. #include <linux/ratelimit.h>
  5358. -
  5359. +#include <asm/div64.h>
  5360. #include "msm_isp40.h"
  5361. #include "msm_isp_util.h"
  5362. #include "msm_isp_axi_util.h"
  5363. @@ -30,13 +30,16 @@
  5364. #define CDBG(fmt, args...) do { } while (0)
  5365. #endif
  5366.  
  5367. -#define VFE40_V1_VERSION 0x10000018
  5368. -#define VFE40_V2_VERSION 0x1001001A
  5369. +#define VFE40_8974V1_VERSION 0x10000018
  5370. +#define VFE40_8974V2_VERSION 0x1001001A
  5371. +#define VFE40_8974V3_VERSION 0x1001001B
  5372. +#define VFE40_8x26_VERSION 0x20000013
  5373. +#define VFE40_8x26V2_VERSION 0x20010014
  5374. +
  5375. +
  5376. +/* STATS_SIZE (BE + BG + BF+ RS + CS + IHIST + BHIST ) = 392 */
  5377. +#define VFE40_STATS_SIZE 392
  5378.  
  5379. -#define VFE40_BURST_LEN 3
  5380. -#define VFE40_STATS_BURST_LEN 2
  5381. -#define VFE40_UB_SIZE 1536
  5382. -#define VFE40_EQUAL_SLICE_UB 228
  5383. #define VFE40_WM_BASE(idx) (0x6C + 0x24 * idx)
  5384. #define VFE40_RDI_BASE(idx) (0x2E8 + 0x4 * idx)
  5385. #define VFE40_XBAR_BASE(idx) (0x58 + 0x4 * (idx / 2))
  5386. @@ -91,7 +94,10 @@ static struct msm_cam_clk_info msm_vfe40_clk_info[] = {
  5387. static void msm_vfe40_init_qos_parms(struct vfe_device *vfe_dev)
  5388. {
  5389. void __iomem *vfebase = vfe_dev->vfe_base;
  5390. - if (vfe_dev->vfe_hw_version == VFE40_V1_VERSION) {
  5391. +
  5392. + if (vfe_dev->vfe_hw_version == VFE40_8974V1_VERSION ||
  5393. + vfe_dev->vfe_hw_version == VFE40_8x26_VERSION ||
  5394. + vfe_dev->vfe_hw_version == VFE40_8x26V2_VERSION) {
  5395. msm_camera_io_w(0xAAAAAAAA, vfebase + VFE40_BUS_BDG_QOS_CFG_0);
  5396. msm_camera_io_w(0xAAAAAAAA, vfebase + VFE40_BUS_BDG_QOS_CFG_1);
  5397. msm_camera_io_w(0xAAAAAAAA, vfebase + VFE40_BUS_BDG_QOS_CFG_2);
  5398. @@ -100,7 +106,8 @@ static void msm_vfe40_init_qos_parms(struct vfe_device *vfe_dev)
  5399. msm_camera_io_w(0xAAAAAAAA, vfebase + VFE40_BUS_BDG_QOS_CFG_5);
  5400. msm_camera_io_w(0xAAAAAAAA, vfebase + VFE40_BUS_BDG_QOS_CFG_6);
  5401. msm_camera_io_w(0x0002AAAA, vfebase + VFE40_BUS_BDG_QOS_CFG_7);
  5402. - } else if (vfe_dev->vfe_hw_version == VFE40_V2_VERSION) {
  5403. + } else if (vfe_dev->vfe_hw_version == VFE40_8974V2_VERSION ||
  5404. + vfe_dev->vfe_hw_version == VFE40_8974V3_VERSION) {
  5405. msm_camera_io_w(0xAAA9AAA9, vfebase + VFE40_BUS_BDG_QOS_CFG_0);
  5406. msm_camera_io_w(0xAAA9AAA9, vfebase + VFE40_BUS_BDG_QOS_CFG_1);
  5407. msm_camera_io_w(0xAAA9AAA9, vfebase + VFE40_BUS_BDG_QOS_CFG_2);
  5408. @@ -109,81 +116,140 @@ static void msm_vfe40_init_qos_parms(struct vfe_device *vfe_dev)
  5409. msm_camera_io_w(0xAAA9AAA9, vfebase + VFE40_BUS_BDG_QOS_CFG_5);
  5410. msm_camera_io_w(0xAAA9AAA9, vfebase + VFE40_BUS_BDG_QOS_CFG_6);
  5411. msm_camera_io_w(0x0001AAA9, vfebase + VFE40_BUS_BDG_QOS_CFG_7);
  5412. + } else {
  5413. + BUG();
  5414. + pr_err("%s: QOS is NOT configured for HW Version %x\n",
  5415. + __func__, vfe_dev->vfe_hw_version);
  5416. }
  5417. }
  5418.  
  5419. -static void msm_vfe40_init_vbif_parms(struct vfe_device *vfe_dev)
  5420. +static void msm_vfe40_init_vbif_parms_8974_v1(struct vfe_device *vfe_dev)
  5421. {
  5422. void __iomem *vfe_vbif_base = vfe_dev->vfe_vbif_base;
  5423. - if (vfe_dev->vfe_hw_version == VFE40_V1_VERSION) {
  5424. - msm_camera_io_w(0x1,
  5425. - vfe_vbif_base + VFE40_VBIF_CLKON);
  5426. - msm_camera_io_w(0x01010101,
  5427. - vfe_vbif_base + VFE40_VBIF_IN_RD_LIM_CONF0);
  5428. - msm_camera_io_w(0x01010101,
  5429. - vfe_vbif_base + VFE40_VBIF_IN_RD_LIM_CONF1);
  5430. - msm_camera_io_w(0x10010110,
  5431. - vfe_vbif_base + VFE40_VBIF_IN_RD_LIM_CONF2);
  5432. - msm_camera_io_w(0x10101010,
  5433. - vfe_vbif_base + VFE40_VBIF_IN_WR_LIM_CONF0);
  5434. - msm_camera_io_w(0x10101010,
  5435. - vfe_vbif_base + VFE40_VBIF_IN_WR_LIM_CONF1);
  5436. - msm_camera_io_w(0x10101010,
  5437. - vfe_vbif_base + VFE40_VBIF_IN_WR_LIM_CONF2);
  5438. - msm_camera_io_w(0x00001010,
  5439. - vfe_vbif_base + VFE40_VBIF_OUT_RD_LIM_CONF0);
  5440. - msm_camera_io_w(0x00001010,
  5441. - vfe_vbif_base + VFE40_VBIF_OUT_WR_LIM_CONF0);
  5442. - msm_camera_io_w(0x00000707,
  5443. - vfe_vbif_base + VFE40_VBIF_DDR_OUT_MAX_BURST);
  5444. - msm_camera_io_w(0x00000707,
  5445. - vfe_vbif_base + VFE40_VBIF_OCMEM_OUT_MAX_BURST);
  5446. - msm_camera_io_w(0x00000030,
  5447. - vfe_vbif_base + VFE40_VBIF_ARB_CTL);
  5448. - msm_camera_io_w(0x00000FFF,
  5449. - vfe_vbif_base + VFE40_VBIF_OUT_AXI_AOOO_EN);
  5450. - msm_camera_io_w(0x0FFF0FFF,
  5451. - vfe_vbif_base + VFE40_VBIF_OUT_AXI_AOOO);
  5452. - msm_camera_io_w(0x00000001,
  5453. - vfe_vbif_base + VFE40_VBIF_ROUND_ROBIN_QOS_ARB);
  5454. - msm_camera_io_w(0x22222222,
  5455. - vfe_vbif_base + VFE40_VBIF_OUT_AXI_AMEMTYPE_CONF0);
  5456. - msm_camera_io_w(0x00002222,
  5457. - vfe_vbif_base + VFE40_VBIF_OUT_AXI_AMEMTYPE_CONF1);
  5458. - } else if (vfe_dev->vfe_hw_version == VFE40_V2_VERSION) {
  5459. - msm_camera_io_w(0x1,
  5460. - vfe_vbif_base + VFE40_VBIF_CLKON);
  5461. - msm_camera_io_w(0x10101010,
  5462. - vfe_vbif_base + VFE40_VBIF_IN_RD_LIM_CONF0);
  5463. - msm_camera_io_w(0x10101010,
  5464. - vfe_vbif_base + VFE40_VBIF_IN_RD_LIM_CONF1);
  5465. - msm_camera_io_w(0x10101010,
  5466. - vfe_vbif_base + VFE40_VBIF_IN_RD_LIM_CONF2);
  5467. - msm_camera_io_w(0x10101010,
  5468. - vfe_vbif_base + VFE40_VBIF_IN_WR_LIM_CONF0);
  5469. - msm_camera_io_w(0x10101010,
  5470. - vfe_vbif_base + VFE40_VBIF_IN_WR_LIM_CONF1);
  5471. - msm_camera_io_w(0x10101010,
  5472. - vfe_vbif_base + VFE40_VBIF_IN_WR_LIM_CONF2);
  5473. - msm_camera_io_w(0x00000010,
  5474. - vfe_vbif_base + VFE40_VBIF_OUT_RD_LIM_CONF0);
  5475. - msm_camera_io_w(0x00000010,
  5476. - vfe_vbif_base + VFE40_VBIF_OUT_WR_LIM_CONF0);
  5477. - msm_camera_io_w(0x00000707,
  5478. - vfe_vbif_base + VFE40_VBIF_DDR_OUT_MAX_BURST);
  5479. - msm_camera_io_w(0x00000010,
  5480. - vfe_vbif_base + VFE40_VBIF_ARB_CTL);
  5481. - msm_camera_io_w(0x00000FFF,
  5482. - vfe_vbif_base + VFE40_VBIF_OUT_AXI_AOOO_EN);
  5483. - msm_camera_io_w(0x0FFF0FFF,
  5484. - vfe_vbif_base + VFE40_VBIF_OUT_AXI_AOOO);
  5485. - msm_camera_io_w(0x00000003,
  5486. - vfe_vbif_base + VFE40_VBIF_ROUND_ROBIN_QOS_ARB);
  5487. - msm_camera_io_w(0x22222222,
  5488. - vfe_vbif_base + VFE40_VBIF_OUT_AXI_AMEMTYPE_CONF0);
  5489. - msm_camera_io_w(0x00002222,
  5490. - vfe_vbif_base + VFE40_VBIF_OUT_AXI_AMEMTYPE_CONF1);
  5491. + msm_camera_io_w(0x1,
  5492. + vfe_vbif_base + VFE40_VBIF_CLKON);
  5493. + msm_camera_io_w(0x01010101,
  5494. + vfe_vbif_base + VFE40_VBIF_IN_RD_LIM_CONF0);
  5495. + msm_camera_io_w(0x01010101,
  5496. + vfe_vbif_base + VFE40_VBIF_IN_RD_LIM_CONF1);
  5497. + msm_camera_io_w(0x10010110,
  5498. + vfe_vbif_base + VFE40_VBIF_IN_RD_LIM_CONF2);
  5499. + msm_camera_io_w(0x10101010,
  5500. + vfe_vbif_base + VFE40_VBIF_IN_WR_LIM_CONF0);
  5501. + msm_camera_io_w(0x10101010,
  5502. + vfe_vbif_base + VFE40_VBIF_IN_WR_LIM_CONF1);
  5503. + msm_camera_io_w(0x10101010,
  5504. + vfe_vbif_base + VFE40_VBIF_IN_WR_LIM_CONF2);
  5505. + msm_camera_io_w(0x00001010,
  5506. + vfe_vbif_base + VFE40_VBIF_OUT_RD_LIM_CONF0);
  5507. + msm_camera_io_w(0x00001010,
  5508. + vfe_vbif_base + VFE40_VBIF_OUT_WR_LIM_CONF0);
  5509. + msm_camera_io_w(0x00000707,
  5510. + vfe_vbif_base + VFE40_VBIF_DDR_OUT_MAX_BURST);
  5511. + msm_camera_io_w(0x00000707,
  5512. + vfe_vbif_base + VFE40_VBIF_OCMEM_OUT_MAX_BURST);
  5513. + msm_camera_io_w(0x00000030,
  5514. + vfe_vbif_base + VFE40_VBIF_ARB_CTL);
  5515. + msm_camera_io_w(0x00000FFF,
  5516. + vfe_vbif_base + VFE40_VBIF_OUT_AXI_AOOO_EN);
  5517. + msm_camera_io_w(0x0FFF0FFF,
  5518. + vfe_vbif_base + VFE40_VBIF_OUT_AXI_AOOO);
  5519. + msm_camera_io_w(0x00000001,
  5520. + vfe_vbif_base + VFE40_VBIF_ROUND_ROBIN_QOS_ARB);
  5521. + msm_camera_io_w(0x22222222,
  5522. + vfe_vbif_base + VFE40_VBIF_OUT_AXI_AMEMTYPE_CONF0);
  5523. + msm_camera_io_w(0x00002222,
  5524. + vfe_vbif_base + VFE40_VBIF_OUT_AXI_AMEMTYPE_CONF1);
  5525. + return;
  5526. +}
  5527. +
  5528. +static void msm_vfe40_init_vbif_parms_8974_v2(struct vfe_device *vfe_dev)
  5529. +{
  5530. + void __iomem *vfe_vbif_base = vfe_dev->vfe_vbif_base;
  5531. + msm_camera_io_w(0x1,
  5532. + vfe_vbif_base + VFE40_VBIF_CLKON);
  5533. + msm_camera_io_w(0x10101010,
  5534. + vfe_vbif_base + VFE40_VBIF_IN_RD_LIM_CONF0);
  5535. + msm_camera_io_w(0x10101010,
  5536. + vfe_vbif_base + VFE40_VBIF_IN_RD_LIM_CONF1);
  5537. + msm_camera_io_w(0x10101010,
  5538. + vfe_vbif_base + VFE40_VBIF_IN_RD_LIM_CONF2);
  5539. + msm_camera_io_w(0x10101010,
  5540. + vfe_vbif_base + VFE40_VBIF_IN_WR_LIM_CONF0);
  5541. + msm_camera_io_w(0x10101010,
  5542. + vfe_vbif_base + VFE40_VBIF_IN_WR_LIM_CONF1);
  5543. + msm_camera_io_w(0x10101010,
  5544. + vfe_vbif_base + VFE40_VBIF_IN_WR_LIM_CONF2);
  5545. + msm_camera_io_w(0x00000010,
  5546. + vfe_vbif_base + VFE40_VBIF_OUT_RD_LIM_CONF0);
  5547. + msm_camera_io_w(0x00000010,
  5548. + vfe_vbif_base + VFE40_VBIF_OUT_WR_LIM_CONF0);
  5549. + msm_camera_io_w(0x00000707,
  5550. + vfe_vbif_base + VFE40_VBIF_DDR_OUT_MAX_BURST);
  5551. + msm_camera_io_w(0x00000010,
  5552. + vfe_vbif_base + VFE40_VBIF_ARB_CTL);
  5553. + msm_camera_io_w(0x00000FFF,
  5554. + vfe_vbif_base + VFE40_VBIF_OUT_AXI_AOOO_EN);
  5555. + msm_camera_io_w(0x0FFF0FFF,
  5556. + vfe_vbif_base + VFE40_VBIF_OUT_AXI_AOOO);
  5557. + msm_camera_io_w(0x00000003,
  5558. + vfe_vbif_base + VFE40_VBIF_ROUND_ROBIN_QOS_ARB);
  5559. + msm_camera_io_w(0x22222222,
  5560. + vfe_vbif_base + VFE40_VBIF_OUT_AXI_AMEMTYPE_CONF0);
  5561. + msm_camera_io_w(0x00002222,
  5562. + vfe_vbif_base + VFE40_VBIF_OUT_AXI_AMEMTYPE_CONF1);
  5563. + return;
  5564. +}
  5565. +
  5566. +static void msm_vfe40_init_vbif_parms_8x26(struct vfe_device *vfe_dev)
  5567. +{
  5568. + void __iomem *vfe_vbif_base = vfe_dev->vfe_vbif_base;
  5569. + msm_camera_io_w(0x10101010,
  5570. + vfe_vbif_base + VFE40_VBIF_IN_RD_LIM_CONF0);
  5571. + msm_camera_io_w(0x10101010,
  5572. + vfe_vbif_base + VFE40_VBIF_IN_RD_LIM_CONF1);
  5573. + msm_camera_io_w(0x10101010,
  5574. + vfe_vbif_base + VFE40_VBIF_IN_WR_LIM_CONF0);
  5575. + msm_camera_io_w(0x10101010,
  5576. + vfe_vbif_base + VFE40_VBIF_IN_WR_LIM_CONF1);
  5577. + msm_camera_io_w(0x00000010,
  5578. + vfe_vbif_base + VFE40_VBIF_OUT_RD_LIM_CONF0);
  5579. + msm_camera_io_w(0x00000010,
  5580. + vfe_vbif_base + VFE40_VBIF_OUT_WR_LIM_CONF0);
  5581. + msm_camera_io_w(0x00000707,
  5582. + vfe_vbif_base + VFE40_VBIF_DDR_OUT_MAX_BURST);
  5583. + msm_camera_io_w(0x000000FF,
  5584. + vfe_vbif_base + VFE40_VBIF_OUT_AXI_AOOO_EN);
  5585. + msm_camera_io_w(0x00FF00FF,
  5586. + vfe_vbif_base + VFE40_VBIF_OUT_AXI_AOOO);
  5587. + msm_camera_io_w(0x00000003,
  5588. + vfe_vbif_base + VFE40_VBIF_ROUND_ROBIN_QOS_ARB);
  5589. + msm_camera_io_w(0x22222222,
  5590. + vfe_vbif_base + VFE40_VBIF_OUT_AXI_AMEMTYPE_CONF0);
  5591. + return;
  5592. +}
  5593. +
  5594. +static void msm_vfe40_init_vbif_parms(struct vfe_device *vfe_dev)
  5595. +{
  5596. + switch (vfe_dev->vfe_hw_version) {
  5597. + case VFE40_8974V1_VERSION:
  5598. + msm_vfe40_init_vbif_parms_8974_v1(vfe_dev);
  5599. + break;
  5600. + case VFE40_8974V2_VERSION:
  5601. + case VFE40_8974V3_VERSION:
  5602. + msm_vfe40_init_vbif_parms_8974_v2(vfe_dev);
  5603. + break;
  5604. + case VFE40_8x26_VERSION:
  5605. + case VFE40_8x26V2_VERSION:
  5606. + msm_vfe40_init_vbif_parms_8x26(vfe_dev);
  5607. + break;
  5608. + default:
  5609. + BUG();
  5610. + pr_err("%s: VBIF is NOT configured for HW Version %x\n",
  5611. + __func__, vfe_dev->vfe_hw_version);
  5612. + break;
  5613. }
  5614. +
  5615. }
  5616.  
  5617. static int msm_vfe40_init_hardware(struct vfe_device *vfe_dev)
  5618. @@ -202,6 +268,8 @@ static int msm_vfe40_init_hardware(struct vfe_device *vfe_dev)
  5619. goto fs_failed;
  5620. }
  5621. }
  5622. + else
  5623. + goto fs_failed;
  5624.  
  5625. rc = msm_cam_clk_enable(&vfe_dev->pdev->dev, msm_vfe40_clk_info,
  5626. vfe_dev->vfe_clk, ARRAY_SIZE(msm_vfe40_clk_info), 1);
  5627. @@ -224,6 +292,14 @@ static int msm_vfe40_init_hardware(struct vfe_device *vfe_dev)
  5628. goto vbif_remap_failed;
  5629. }
  5630.  
  5631. + vfe_dev->tcsr_base = ioremap(vfe_dev->tcsr_mem->start,
  5632. + resource_size(vfe_dev->tcsr_mem));
  5633. + if (!vfe_dev->tcsr_base) {
  5634. + rc = -ENOMEM;
  5635. + pr_err("%s: tcsr ioremap failed\n", __func__);
  5636. + goto tcsr_remap_failed;
  5637. + }
  5638. +
  5639. rc = request_irq(vfe_dev->vfe_irq->start, msm_isp_process_irq,
  5640. IRQF_TRIGGER_RISING, "vfe", vfe_dev);
  5641. if (rc < 0) {
  5642. @@ -232,6 +308,8 @@ static int msm_vfe40_init_hardware(struct vfe_device *vfe_dev)
  5643. }
  5644. return rc;
  5645. irq_req_failed:
  5646. + iounmap(vfe_dev->tcsr_base);
  5647. +tcsr_remap_failed:
  5648. iounmap(vfe_dev->vfe_vbif_base);
  5649. vbif_remap_failed:
  5650. iounmap(vfe_dev->vfe_base);
  5651. @@ -250,6 +328,7 @@ static void msm_vfe40_release_hardware(struct vfe_device *vfe_dev)
  5652. {
  5653. free_irq(vfe_dev->vfe_irq->start, vfe_dev);
  5654. tasklet_kill(&vfe_dev->vfe_tasklet);
  5655. + iounmap(vfe_dev->tcsr_base);
  5656. iounmap(vfe_dev->vfe_vbif_base);
  5657. iounmap(vfe_dev->vfe_base);
  5658. msm_cam_clk_enable(&vfe_dev->pdev->dev, msm_vfe40_clk_info,
  5659. @@ -271,6 +350,12 @@ static void msm_vfe40_init_hardware_reg(struct vfe_device *vfe_dev)
  5660. msm_camera_io_w_mb(0xFEFFFFFF, vfe_dev->vfe_base + 0x2C);
  5661. msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x30);
  5662. msm_camera_io_w_mb(0xFEFFFFFF, vfe_dev->vfe_base + 0x34);
  5663. + msm_camera_io_w(vfe_dev->stats_data.stats_mask,
  5664. + vfe_dev->vfe_base + 0x44);
  5665. + msm_camera_io_w(1, vfe_dev->vfe_base + 0x24);
  5666. + msm_camera_io_w(0, vfe_dev->vfe_base + 0x30);
  5667. + msm_camera_io_w_mb(0, vfe_dev->vfe_base + 0x34);
  5668. + msm_camera_io_w(1, vfe_dev->vfe_base + 0x24);
  5669. }
  5670.  
  5671. static void msm_vfe40_process_reset_irq(struct vfe_device *vfe_dev,
  5672. @@ -283,23 +368,22 @@ static void msm_vfe40_process_reset_irq(struct vfe_device *vfe_dev,
  5673. static void msm_vfe40_process_halt_irq(struct vfe_device *vfe_dev,
  5674. uint32_t irq_status0, uint32_t irq_status1)
  5675. {
  5676. - if (irq_status1 & (1 << 8))
  5677. - complete(&vfe_dev->halt_complete);
  5678. }
  5679.  
  5680. static void msm_vfe40_process_camif_irq(struct vfe_device *vfe_dev,
  5681. uint32_t irq_status0, uint32_t irq_status1,
  5682. struct msm_isp_timestamp *ts)
  5683. {
  5684. + int cnt;
  5685. +
  5686. if (!(irq_status0 & 0xF))
  5687. return;
  5688.  
  5689. if (irq_status0 & (1 << 0)) {
  5690. ISP_DBG("%s: SOF IRQ\n", __func__);
  5691. - if (vfe_dev->axi_data.src_info[VFE_PIX_0].raw_stream_count > 0
  5692. - && vfe_dev->axi_data.src_info[VFE_PIX_0].
  5693. - pix_stream_count == 0) {
  5694. - msm_isp_sof_notify(vfe_dev, VFE_PIX_0, ts);
  5695. + cnt = vfe_dev->axi_data.src_info[VFE_PIX_0].raw_stream_count;
  5696. + if (cnt > 0) {
  5697. + msm_isp_sof_notify(vfe_dev, VFE_RAW_0, ts);
  5698. if (vfe_dev->axi_data.stream_update)
  5699. msm_isp_axi_stream_update(vfe_dev);
  5700. msm_isp_update_framedrop_reg(vfe_dev);
  5701. @@ -380,65 +464,94 @@ static void msm_vfe40_process_violation_status(
  5702.  
  5703. static void msm_vfe40_process_error_status(struct vfe_device *vfe_dev)
  5704. {
  5705. - uint32_t halt_mask;
  5706. uint32_t error_status1 = vfe_dev->error_info.error_mask1;
  5707. - struct msm_isp_event_data error_event;
  5708. - if (error_status1 & (1 << 0)) {
  5709. - pr_err("%s: camif error status: 0x%x\n",
  5710. + if (error_status1 & (1 << 0))
  5711. + pr_err_ratelimited("%s: camif error status: 0x%x\n",
  5712. __func__, vfe_dev->error_info.camif_status);
  5713. - error_event.frame_id = vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
  5714. - msm_isp_send_event(vfe_dev, ISP_EVENT_ERROR, &error_event);
  5715. - halt_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x2C);
  5716. - halt_mask &= ~(1 << 8);
  5717. - msm_camera_io_w_mb(halt_mask, vfe_dev->vfe_base + 0x2C);
  5718. - msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x2C0);
  5719. - }
  5720. if (error_status1 & (1 << 1))
  5721. - pr_err("%s: stats bhist overwrite\n", __func__);
  5722. + pr_err_ratelimited("%s: stats bhist overwrite\n", __func__);
  5723. if (error_status1 & (1 << 2))
  5724. - pr_err("%s: stats cs overwrite\n", __func__);
  5725. + pr_err_ratelimited("%s: stats cs overwrite\n", __func__);
  5726. if (error_status1 & (1 << 3))
  5727. - pr_err("%s: stats ihist overwrite\n", __func__);
  5728. + pr_err_ratelimited("%s: stats ihist overwrite\n", __func__);
  5729. if (error_status1 & (1 << 4))
  5730. - pr_err("%s: realign buf y overflow\n", __func__);
  5731. + pr_err_ratelimited("%s: realign buf y overflow\n", __func__);
  5732. if (error_status1 & (1 << 5))
  5733. - pr_err("%s: realign buf cb overflow\n", __func__);
  5734. + pr_err_ratelimited("%s: realign buf cb overflow\n", __func__);
  5735. if (error_status1 & (1 << 6))
  5736. - pr_err("%s: realign buf cr overflow\n", __func__);
  5737. + pr_err_ratelimited("%s: realign buf cr overflow\n", __func__);
  5738. if (error_status1 & (1 << 7)) {
  5739. - pr_err("%s: violation\n", __func__);
  5740. + pr_err_ratelimited("%s: violation\n", __func__);
  5741. msm_vfe40_process_violation_status(vfe_dev);
  5742. }
  5743. - if (error_status1 & (1 << 9))
  5744. - pr_err("%s: image master 0 bus overflow\n", __func__);
  5745. - if (error_status1 & (1 << 10))
  5746. - pr_err("%s: image master 1 bus overflow\n", __func__);
  5747. - if (error_status1 & (1 << 11))
  5748. - pr_err("%s: image master 2 bus overflow\n", __func__);
  5749. - if (error_status1 & (1 << 12))
  5750. - pr_err("%s: image master 3 bus overflow\n", __func__);
  5751. - if (error_status1 & (1 << 13))
  5752. - pr_err("%s: image master 4 bus overflow\n", __func__);
  5753. - if (error_status1 & (1 << 14))
  5754. - pr_err("%s: image master 5 bus overflow\n", __func__);
  5755. - if (error_status1 & (1 << 15))
  5756. - pr_err("%s: image master 6 bus overflow\n", __func__);
  5757. - if (error_status1 & (1 << 16))
  5758. - pr_err("%s: status be bus overflow\n", __func__);
  5759. - if (error_status1 & (1 << 17))
  5760. - pr_err("%s: status bg bus overflow\n", __func__);
  5761. - if (error_status1 & (1 << 18))
  5762. - pr_err("%s: status bf bus overflow\n", __func__);
  5763. - if (error_status1 & (1 << 19))
  5764. - pr_err("%s: status awb bus overflow\n", __func__);
  5765. - if (error_status1 & (1 << 20))
  5766. - pr_err("%s: status rs bus overflow\n", __func__);
  5767. - if (error_status1 & (1 << 21))
  5768. - pr_err("%s: status cs bus overflow\n", __func__);
  5769. - if (error_status1 & (1 << 22))
  5770. - pr_err("%s: status ihist bus overflow\n", __func__);
  5771. - if (error_status1 & (1 << 23))
  5772. - pr_err("%s: status skin bhist bus overflow\n", __func__);
  5773. + if (error_status1 & (1 << 9)) {
  5774. + vfe_dev->stats->imagemaster0_overflow++;
  5775. + pr_err_ratelimited("%s: image master 0 bus overflow\n",
  5776. + __func__);
  5777. + }
  5778. + if (error_status1 & (1 << 10)) {
  5779. + vfe_dev->stats->imagemaster1_overflow++;
  5780. + pr_err_ratelimited("%s: image master 1 bus overflow\n",
  5781. + __func__);
  5782. + }
  5783. + if (error_status1 & (1 << 11)) {
  5784. + vfe_dev->stats->imagemaster2_overflow++;
  5785. + pr_err_ratelimited("%s: image master 2 bus overflow\n",
  5786. + __func__);
  5787. + }
  5788. + if (error_status1 & (1 << 12)) {
  5789. + vfe_dev->stats->imagemaster3_overflow++;
  5790. + pr_err_ratelimited("%s: image master 3 bus overflow\n",
  5791. + __func__);
  5792. + }
  5793. + if (error_status1 & (1 << 13)) {
  5794. + vfe_dev->stats->imagemaster4_overflow++;
  5795. + pr_err_ratelimited("%s: image master 4 bus overflow\n",
  5796. + __func__);
  5797. + }
  5798. + if (error_status1 & (1 << 14)) {
  5799. + vfe_dev->stats->imagemaster5_overflow++;
  5800. + pr_err_ratelimited("%s: image master 5 bus overflow\n",
  5801. + __func__);
  5802. + }
  5803. + if (error_status1 & (1 << 15)) {
  5804. + vfe_dev->stats->imagemaster6_overflow++;
  5805. + pr_err_ratelimited("%s: image master 6 bus overflow\n",
  5806. + __func__);
  5807. + }
  5808. + if (error_status1 & (1 << 16)) {
  5809. + vfe_dev->stats->be_overflow++;
  5810. + pr_err_ratelimited("%s: status be bus overflow\n", __func__);
  5811. + }
  5812. + if (error_status1 & (1 << 17)) {
  5813. + vfe_dev->stats->bg_overflow++;
  5814. + pr_err_ratelimited("%s: status bg bus overflow\n", __func__);
  5815. + }
  5816. + if (error_status1 & (1 << 18)) {
  5817. + vfe_dev->stats->bf_overflow++;
  5818. + pr_err_ratelimited("%s: status bf bus overflow\n", __func__);
  5819. + }
  5820. + if (error_status1 & (1 << 19)) {
  5821. + vfe_dev->stats->awb_overflow++;
  5822. + pr_err_ratelimited("%s: status awb bus overflow\n", __func__);
  5823. + }
  5824. + if (error_status1 & (1 << 20)) {
  5825. + vfe_dev->stats->imagemaster0_overflow++;
  5826. + pr_err_ratelimited("%s: status rs bus overflow\n", __func__);
  5827. + }
  5828. + if (error_status1 & (1 << 21)) {
  5829. + vfe_dev->stats->cs_overflow++;
  5830. + pr_err_ratelimited("%s: status cs bus overflow\n", __func__);
  5831. + }
  5832. + if (error_status1 & (1 << 22)) {
  5833. + vfe_dev->stats->ihist_overflow++;
  5834. + pr_err_ratelimited("%s: status ihist bus overflow\n", __func__);
  5835. + }
  5836. + if (error_status1 & (1 << 23)) {
  5837. + vfe_dev->stats->skinbhist_overflow++;
  5838. + pr_err_ratelimited("%s: status skin bhist bus overflow\n",
  5839. + __func__);
  5840. + }
  5841. }
  5842.  
  5843. static void msm_vfe40_read_irq_status(struct vfe_device *vfe_dev,
  5844. @@ -446,9 +559,11 @@ static void msm_vfe40_read_irq_status(struct vfe_device *vfe_dev,
  5845. {
  5846. *irq_status0 = msm_camera_io_r(vfe_dev->vfe_base + 0x38);
  5847. *irq_status1 = msm_camera_io_r(vfe_dev->vfe_base + 0x3C);
  5848. - if (*irq_status0 & 0x6000000)
  5849. - *irq_status0 &= ~(0x18000000);
  5850. -
  5851. + /*
  5852. + * Ignore composite 2/3 irq which is used for dual VFE only
  5853. + */
  5854. + if (*irq_status0 & 0x6000000)
  5855. + *irq_status0 &= ~(0x18000000);
  5856. msm_camera_io_w(*irq_status0, vfe_dev->vfe_base + 0x30);
  5857. msm_camera_io_w(*irq_status1, vfe_dev->vfe_base + 0x34);
  5858. msm_camera_io_w_mb(1, vfe_dev->vfe_base + 0x24);
  5859. @@ -456,6 +571,7 @@ static void msm_vfe40_read_irq_status(struct vfe_device *vfe_dev,
  5860. pr_err_ratelimited("%s: Protection triggered\n", __func__);
  5861. *irq_status0 &= ~(0x18000000);
  5862. }
  5863. +
  5864. if (*irq_status1 & (1 << 0))
  5865. vfe_dev->error_info.camif_status =
  5866. msm_camera_io_r(vfe_dev->vfe_base + 0x31C);
  5867. @@ -500,12 +616,32 @@ static void msm_vfe40_reg_update(struct vfe_device *vfe_dev)
  5868. msm_camera_io_w_mb(0xF, vfe_dev->vfe_base + 0x378);
  5869. }
  5870.  
  5871. -static long msm_vfe40_reset_hardware(struct vfe_device *vfe_dev)
  5872. +static uint32_t msm_vfe40_reset_values[ISP_RST_MAX] =
  5873. +{
  5874. + 0x1FF, /* ISP_RST_HARD reset everything */
  5875. + 0x1EF /* ISP_RST_SOFT all modules without registers */
  5876. +};
  5877. +
  5878. +
  5879. +static long msm_vfe40_reset_hardware(struct vfe_device *vfe_dev ,
  5880. + enum msm_isp_reset_type reset_type, uint32_t blocking)
  5881. {
  5882. + uint32_t rst_val;
  5883. + long rc = 0;
  5884. + if (reset_type >= ISP_RST_MAX) {
  5885. + pr_err("%s: Error Invalid parameter\n", __func__);
  5886. + reset_type = ISP_RST_HARD;
  5887. + }
  5888. + rst_val = msm_vfe40_reset_values[reset_type];
  5889. init_completion(&vfe_dev->reset_complete);
  5890. - msm_camera_io_w_mb(0x1FF, vfe_dev->vfe_base + 0xC);
  5891. - return wait_for_completion_interruptible_timeout(
  5892. - &vfe_dev->reset_complete, msecs_to_jiffies(50));
  5893. + if (blocking) {
  5894. + msm_camera_io_w_mb(rst_val, vfe_dev->vfe_base + 0xC);
  5895. + rc = wait_for_completion_timeout(
  5896. + &vfe_dev->reset_complete, msecs_to_jiffies(50));
  5897. + } else {
  5898. + msm_camera_io_w_mb(0x1EF, vfe_dev->vfe_base + 0xC);
  5899. + }
  5900. + return rc;
  5901. }
  5902.  
  5903. static void msm_vfe40_axi_reload_wm(
  5904. @@ -539,17 +675,29 @@ static void msm_vfe40_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
  5905. comp_mask &= ~(0x7F << (comp_mask_index * 8));
  5906. comp_mask |= (axi_data->composite_info[comp_mask_index].
  5907. stream_composite_mask << (comp_mask_index * 8));
  5908. +
  5909. + irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x28);
  5910. + irq_mask |= 1 << (comp_mask_index + 25);
  5911. +
  5912. + /*
  5913. + * For dual VFE, composite 2/3 interrupt is used to trigger
  5914. + * microcontroller to update certain VFE registers
  5915. + */
  5916. if (stream_info->plane_cfg[0].plane_addr_offset &&
  5917. - stream_info->stream_type == CONTINUOUS_STREAM) {
  5918. + stream_info->stream_src == PIX_VIEWFINDER) {
  5919. + comp_mask |= (axi_data->composite_info[comp_mask_index].
  5920. + stream_composite_mask << 16);
  5921. + irq_mask |= BIT(27);
  5922. + }
  5923. +
  5924. + if (stream_info->plane_cfg[0].plane_addr_offset &&
  5925. + stream_info->stream_src == PIX_ENCODER) {
  5926. comp_mask |= (axi_data->composite_info[comp_mask_index].
  5927. stream_composite_mask << 24);
  5928. + irq_mask |= BIT(28);
  5929. }
  5930. +
  5931. msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x40);
  5932. - printk("%s comp mask:0x%x\n", __func__, comp_mask);
  5933. - irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x28);
  5934. - irq_mask |= 1 << (comp_mask_index + 25);
  5935. - if (stream_info->plane_cfg[0].plane_addr_offset && (comp_mask >> 24))
  5936. - irq_mask |= 0x10000000;
  5937. msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x28);
  5938. }
  5939.  
  5940. @@ -558,24 +706,29 @@ static void msm_vfe40_axi_clear_comp_mask(struct vfe_device *vfe_dev,
  5941. {
  5942. struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
  5943. uint32_t comp_mask, comp_mask_index = stream_info->comp_mask_index;
  5944. - uint32_t irq_mask, cur_comp_mask;
  5945. + uint32_t irq_mask;
  5946.  
  5947. comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x40);
  5948. - cur_comp_mask = (comp_mask >> (comp_mask_index * 8)) & 0x7F;
  5949. comp_mask &= ~(0x7F << (comp_mask_index * 8));
  5950. - comp_mask &= ~(cur_comp_mask << 24);
  5951. +
  5952. + irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x28);
  5953. + irq_mask &= ~(1 << (comp_mask_index + 25));
  5954. +
  5955. + if (stream_info->plane_cfg[0].plane_addr_offset &&
  5956. + stream_info->stream_src == PIX_VIEWFINDER) {
  5957. + comp_mask &= ~(axi_data->composite_info[comp_mask_index].
  5958. + stream_composite_mask << 16);
  5959. + irq_mask &= ~BIT(27);
  5960. + }
  5961. +
  5962. if (stream_info->plane_cfg[0].plane_addr_offset &&
  5963. - stream_info->stream_type == CONTINUOUS_STREAM) {
  5964. + stream_info->stream_src == PIX_ENCODER) {
  5965. comp_mask &= ~(axi_data->composite_info[comp_mask_index].
  5966. stream_composite_mask << 24);
  5967. + irq_mask &= ~BIT(28);
  5968. }
  5969. - msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x40);
  5970. - printk("%s comp mask:0x%x\n", __func__, comp_mask);
  5971.  
  5972. - irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x28);
  5973. - irq_mask &= ~(1 << (comp_mask_index + 25));
  5974. - if (stream_info->plane_cfg[0].plane_addr_offset && !(comp_mask >> 24))
  5975. - irq_mask &= ~(0x10000000);
  5976. + msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x40);
  5977. msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x28);
  5978. }
  5979.  
  5980. @@ -636,12 +789,18 @@ static void msm_vfe40_clear_framedrop(struct vfe_device *vfe_dev,
  5981. VFE40_WM_BASE(stream_info->wm[i]) + 0x1C);
  5982. }
  5983.  
  5984. -static void msm_vfe40_cfg_io_format(struct vfe_device *vfe_dev,
  5985. - struct msm_vfe_axi_stream *stream_info)
  5986. +static int32_t msm_vfe40_cfg_io_format(struct vfe_device *vfe_dev,
  5987. + enum msm_vfe_axi_stream_src stream_src, uint32_t io_format)
  5988. {
  5989. - int bpp, bpp_reg = 0;
  5990. - uint32_t io_format_reg;
  5991. - bpp = msm_isp_get_bit_per_pixel(stream_info->output_format);
  5992. + int bpp, bpp_reg = 0, pack_reg = 0;
  5993. + enum msm_isp_pack_fmt pack_fmt = 0;
  5994. + uint32_t io_format_reg; /*io format register bit*/
  5995. + bpp = msm_isp_get_bit_per_pixel(io_format);
  5996. + if (bpp < 0) {
  5997. + pr_err("%s:%d invalid io_format %d bpp %d", __func__, __LINE__,
  5998. + io_format, bpp);
  5999. + return -EINVAL;
  6000. + }
  6001.  
  6002. switch (bpp) {
  6003. case 8:
  6004. @@ -653,27 +812,60 @@ static void msm_vfe40_cfg_io_format(struct vfe_device *vfe_dev,
  6005. case 12:
  6006. bpp_reg = 1 << 1;
  6007. break;
  6008. + default:
  6009. + pr_err("%s:%d invalid bpp %d", __func__, __LINE__, bpp);
  6010. + return -EINVAL;
  6011. }
  6012. +
  6013. + if (stream_src == IDEAL_RAW) {
  6014. + /*use io_format(v4l2_pix_fmt) to get pack format*/
  6015. + pack_fmt = msm_isp_get_pack_format(io_format);
  6016. + switch (pack_fmt) {
  6017. + case QCOM:
  6018. + pack_reg = 0x0;
  6019. + break;
  6020. + case MIPI:
  6021. + pack_reg = 0x1;
  6022. + break;
  6023. + case DPCM6:
  6024. + pack_reg = 0x2;
  6025. + break;
  6026. + case DPCM8:
  6027. + pack_reg = 0x3;
  6028. + break;
  6029. + case PLAIN8:
  6030. + pack_reg = 0x4;
  6031. + break;
  6032. + case PLAIN16:
  6033. + pack_reg = 0x5;
  6034. + break;
  6035. + default:
  6036. + pr_err("%s: invalid pack fmt!\n", __func__);
  6037. + return -EINVAL;
  6038. + }
  6039. + }
  6040. +
  6041. io_format_reg = msm_camera_io_r(vfe_dev->vfe_base + 0x54);
  6042. - switch (stream_info->stream_src) {
  6043. + switch (stream_src) {
  6044. + case PIX_ENCODER:
  6045. + case PIX_VIEWFINDER:
  6046. case CAMIF_RAW:
  6047. io_format_reg &= 0xFFFFCFFF;
  6048. io_format_reg |= bpp_reg << 12;
  6049. break;
  6050. case IDEAL_RAW:
  6051. io_format_reg &= 0xFFFFFFC8;
  6052. - io_format_reg |= bpp_reg << 4;
  6053. + io_format_reg |= bpp_reg << 4 | pack_reg;
  6054. break;
  6055. - case PIX_ENCODER:
  6056. - case PIX_VIEWFINDER:
  6057. case RDI_INTF_0:
  6058. case RDI_INTF_1:
  6059. case RDI_INTF_2:
  6060. default:
  6061. pr_err("%s: Invalid stream source\n", __func__);
  6062. - return;
  6063. + return -EINVAL;
  6064. }
  6065. msm_camera_io_w(io_format_reg, vfe_dev->vfe_base + 0x54);
  6066. + return 0;
  6067. }
  6068.  
  6069. static void msm_vfe40_cfg_camif(struct vfe_device *vfe_dev,
  6070. @@ -742,13 +934,14 @@ static void msm_vfe40_update_camif_state(struct vfe_device *vfe_dev,
  6071. val &= 0xFFFFFF3F;
  6072. val = val | bus_en << 7 | vfe_en << 6;
  6073. msm_camera_io_w(val, vfe_dev->vfe_base + 0x2F8);
  6074. + msm_camera_io_w_mb(0x4, vfe_dev->vfe_base + 0x2F4);
  6075. msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x2F4);
  6076. vfe_dev->axi_data.src_info[VFE_PIX_0].active = 1;
  6077. } else if (update_state == DISABLE_CAMIF) {
  6078. msm_camera_io_w_mb(0x0, vfe_dev->vfe_base + 0x2F4);
  6079. vfe_dev->axi_data.src_info[VFE_PIX_0].active = 0;
  6080. } else if (update_state == DISABLE_CAMIF_IMMEDIATELY) {
  6081. - msm_camera_io_w_mb(0x2, vfe_dev->vfe_base + 0x2F4);
  6082. + msm_camera_io_w_mb(0x6, vfe_dev->vfe_base + 0x2F4);
  6083. vfe_dev->axi_data.src_info[VFE_PIX_0].active = 0;
  6084. }
  6085. }
  6086. @@ -780,9 +973,15 @@ static void msm_vfe40_axi_cfg_wm_reg(
  6087. uint8_t plane_idx)
  6088. {
  6089. uint32_t val;
  6090. +
  6091. + struct msm_vfe_axi_shared_data *axi_data =
  6092. + &vfe_dev->axi_data;
  6093. + uint32_t burst_len = axi_data->burst_len;
  6094. +
  6095. uint32_t wm_base = VFE40_WM_BASE(stream_info->wm[plane_idx]);
  6096.  
  6097. if (!stream_info->frame_based) {
  6098. + msm_camera_io_w(0x0, vfe_dev->vfe_base + wm_base);
  6099. /*WR_IMAGE_SIZE*/
  6100. val =
  6101. ((msm_isp_cal_word_per_line(
  6102. @@ -800,7 +999,7 @@ static void msm_vfe40_axi_cfg_wm_reg(
  6103. plane_idx].output_stride) << 16 |
  6104. (stream_info->plane_cfg[
  6105. plane_idx].output_height - 1) << 4 |
  6106. - VFE40_BURST_LEN;
  6107. + burst_len;
  6108. msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x18);
  6109. } else {
  6110. msm_camera_io_w(0x2, vfe_dev->vfe_base + wm_base);
  6111. @@ -810,7 +1009,7 @@ static void msm_vfe40_axi_cfg_wm_reg(
  6112. plane_idx].output_width) << 16 |
  6113. (stream_info->plane_cfg[
  6114. plane_idx].output_height - 1) << 4 |
  6115. - VFE40_BURST_LEN;
  6116. + burst_len;
  6117. msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x18);
  6118. }
  6119.  
  6120. @@ -925,7 +1124,7 @@ static void msm_vfe40_cfg_axi_ub_equal_default(
  6121. uint8_t num_used_wms = 0;
  6122. uint32_t prop_size = 0;
  6123. uint32_t wm_ub_size;
  6124. - uint32_t delta;
  6125. + uint32_t axi_wm_ub;
  6126.  
  6127. for (i = 0; i < axi_data->hw_info->num_wm; i++) {
  6128. if (axi_data->free_wm[i] > 0) {
  6129. @@ -933,13 +1132,17 @@ static void msm_vfe40_cfg_axi_ub_equal_default(
  6130. total_image_size += axi_data->wm_image_size[i];
  6131. }
  6132. }
  6133. - prop_size = MSM_ISP40_TOTAL_WM_UB -
  6134. + axi_wm_ub = vfe_dev->vfe_ub_size - VFE40_STATS_SIZE;
  6135. +
  6136. + prop_size = axi_wm_ub -
  6137. axi_data->hw_info->min_wm_ub * num_used_wms;
  6138. for (i = 0; i < axi_data->hw_info->num_wm; i++) {
  6139. if (axi_data->free_wm[i]) {
  6140. - delta =
  6141. - (axi_data->wm_image_size[i] *
  6142. - prop_size)/total_image_size;
  6143. + uint64_t delta = 0;
  6144. + uint64_t temp = (uint64_t)axi_data->wm_image_size[i] *
  6145. + (uint64_t)prop_size;
  6146. + do_div(temp, total_image_size);
  6147. + delta = temp;
  6148. wm_ub_size = axi_data->hw_info->min_wm_ub + delta;
  6149. msm_camera_io_w(ub_offset << 16 | (wm_ub_size - 1),
  6150. vfe_dev->vfe_base + VFE40_WM_BASE(i) + 0x10);
  6151. @@ -956,17 +1159,21 @@ static void msm_vfe40_cfg_axi_ub_equal_slicing(
  6152. int i;
  6153. uint32_t ub_offset = 0;
  6154. struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
  6155. + uint32_t axi_equal_slice_ub =
  6156. + (vfe_dev->vfe_ub_size - VFE40_STATS_SIZE)/
  6157. + (axi_data->hw_info->num_wm - 1);
  6158. +
  6159. for (i = 0; i < axi_data->hw_info->num_wm; i++) {
  6160. - msm_camera_io_w(ub_offset << 16 | (VFE40_EQUAL_SLICE_UB - 1),
  6161. + msm_camera_io_w(ub_offset << 16 | (axi_equal_slice_ub - 1),
  6162. vfe_dev->vfe_base + VFE40_WM_BASE(i) + 0x10);
  6163. - ub_offset += VFE40_EQUAL_SLICE_UB;
  6164. + ub_offset += axi_equal_slice_ub;
  6165. }
  6166. }
  6167.  
  6168. static void msm_vfe40_cfg_axi_ub(struct vfe_device *vfe_dev)
  6169. {
  6170. struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
  6171. - axi_data->wm_ub_cfg_policy = MSM_WM_UB_EQUAL_SLICING;
  6172. + axi_data->wm_ub_cfg_policy = MSM_WM_UB_CFG_DEFAULT;
  6173. if (axi_data->wm_ub_cfg_policy == MSM_WM_UB_EQUAL_SLICING)
  6174. msm_vfe40_cfg_axi_ub_equal_slicing(vfe_dev);
  6175. else
  6176. @@ -981,16 +1188,31 @@ static void msm_vfe40_update_ping_pong_addr(
  6177. VFE40_PING_PONG_BASE(wm_idx, pingpong_status));
  6178. }
  6179.  
  6180. -static long msm_vfe40_axi_halt(struct vfe_device *vfe_dev)
  6181. +static long msm_vfe40_axi_halt(struct vfe_device *vfe_dev,
  6182. + uint32_t blocking)
  6183. {
  6184. - uint32_t halt_mask;
  6185. - halt_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x2C);
  6186. - halt_mask |= (1 << 8);
  6187. - msm_camera_io_w_mb(halt_mask, vfe_dev->vfe_base + 0x2C);
  6188. - init_completion(&vfe_dev->halt_complete);
  6189. - msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x2C0);
  6190. - return wait_for_completion_interruptible_timeout(
  6191. - &vfe_dev->halt_complete, msecs_to_jiffies(500));
  6192. + long rc = 0;
  6193. + uint32_t axi_busy_flag = true;
  6194. + /* Keep only restart mask and halt mask*/
  6195. + msm_camera_io_w(BIT(31), vfe_dev->vfe_base + 0x28);
  6196. + msm_camera_io_w(BIT(8), vfe_dev->vfe_base + 0x2C);
  6197. + /* Clear IRQ Status*/
  6198. + msm_camera_io_w(0x7FFFFFFF, vfe_dev->vfe_base + 0x30);
  6199. + msm_camera_io_w(0xFEFFFEFF, vfe_dev->vfe_base + 0x34);
  6200. + msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x24);
  6201. + if (blocking) {
  6202. + init_completion(&vfe_dev->halt_complete);
  6203. + /* Halt AXI Bus Bridge */
  6204. + msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x2C0);
  6205. + atomic_set(&vfe_dev->error_info.overflow_state, NO_OVERFLOW);
  6206. + while (axi_busy_flag) {
  6207. + if (msm_camera_io_r(
  6208. + vfe_dev->vfe_base + 0x2E4) & 0x1)
  6209. + axi_busy_flag = false;
  6210. + }
  6211. + }
  6212. + msm_camera_io_w_mb(0x0, vfe_dev->vfe_base + 0x2C0);
  6213. + return rc;
  6214. }
  6215.  
  6216. static uint32_t msm_vfe40_get_wm_mask(
  6217. @@ -999,6 +1221,33 @@ static uint32_t msm_vfe40_get_wm_mask(
  6218. return (irq_status0 >> 8) & 0x7F;
  6219. }
  6220.  
  6221. +static void msm_vfe40_get_overflow_mask(uint32_t *overflow_mask)
  6222. +{
  6223. + *overflow_mask = 0x00FFFE7E;
  6224. +}
  6225. +
  6226. +static void msm_vfe40_get_irq_mask(struct vfe_device *vfe_dev,
  6227. + uint32_t *irq0_mask, uint32_t *irq1_mask)
  6228. +{
  6229. + *irq0_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x28);
  6230. + *irq1_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x2C);
  6231. +}
  6232. +
  6233. +static void msm_vfe40_restore_irq_mask(struct vfe_device *vfe_dev)
  6234. +{
  6235. + msm_camera_io_w(vfe_dev->error_info.overflow_recover_irq_mask0,
  6236. + vfe_dev->vfe_base + 0x28);
  6237. + msm_camera_io_w(vfe_dev->error_info.overflow_recover_irq_mask1,
  6238. + vfe_dev->vfe_base + 0x2C);
  6239. +}
  6240. +
  6241. +static void msm_vfe40_get_halt_restart_mask(uint32_t *irq0_mask,
  6242. + uint32_t *irq1_mask)
  6243. +{
  6244. + *irq0_mask = BIT(31);
  6245. + *irq1_mask = BIT(8);
  6246. +}
  6247. +
  6248. static uint32_t msm_vfe40_get_comp_mask(
  6249. uint32_t irq_status0, uint32_t irq_status1)
  6250. {
  6251. @@ -1046,6 +1295,7 @@ static void msm_vfe40_stats_cfg_comp_mask(struct vfe_device *vfe_dev,
  6252. else
  6253. comp_mask &= ~stats_mask;
  6254. msm_camera_io_w(comp_mask << 16, vfe_dev->vfe_base + 0x44);
  6255. + vfe_dev->stats_data.stats_mask = (comp_mask << 16);
  6256. }
  6257.  
  6258. static void msm_vfe40_stats_cfg_wm_irq_mask(
  6259. @@ -1105,7 +1355,11 @@ static void msm_vfe40_stats_clear_wm_reg(
  6260. static void msm_vfe40_stats_cfg_ub(struct vfe_device *vfe_dev)
  6261. {
  6262. int i;
  6263. - uint32_t ub_offset = VFE40_UB_SIZE;
  6264. + struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
  6265. + uint32_t ub_offset = vfe_dev->vfe_ub_size;
  6266. + uint32_t stats_burst_len = stats_data->stats_burst_len;
  6267. +
  6268. +
  6269. uint32_t ub_size[VFE40_NUM_STATS_TYPE] = {
  6270. 64, /*MSM_ISP_STATS_BE*/
  6271. 128, /*MSM_ISP_STATS_BG*/
  6272. @@ -1119,7 +1373,7 @@ static void msm_vfe40_stats_cfg_ub(struct vfe_device *vfe_dev)
  6273.  
  6274. for (i = 0; i < VFE40_NUM_STATS_TYPE; i++) {
  6275. ub_offset -= ub_size[i];
  6276. - msm_camera_io_w(VFE40_STATS_BURST_LEN << 30 |
  6277. + msm_camera_io_w(stats_burst_len << 30 |
  6278. ub_offset << 16 | (ub_size[i] - 1),
  6279. vfe_dev->vfe_base + VFE40_STATS_BASE(i) + 0xC);
  6280. }
  6281. @@ -1210,6 +1464,14 @@ static int msm_vfe40_get_platform_data(struct vfe_device *vfe_dev)
  6282. goto vfe_no_resource;
  6283. }
  6284.  
  6285. + vfe_dev->tcsr_mem = platform_get_resource_byname(vfe_dev->pdev,
  6286. + IORESOURCE_MEM, "tcsr");
  6287. + if (!vfe_dev->tcsr_mem) {
  6288. + pr_err("%s: no mem resource?\n", __func__);
  6289. + rc = -ENODEV;
  6290. + goto vfe_no_resource;
  6291. + }
  6292. +
  6293. vfe_dev->vfe_irq = platform_get_resource_byname(vfe_dev->pdev,
  6294. IORESOURCE_IRQ, "vfe");
  6295. if (!vfe_dev->vfe_irq) {
  6296. @@ -1247,8 +1509,8 @@ static void msm_vfe40_get_error_mask(
  6297. }
  6298.  
  6299. static struct msm_vfe_axi_hardware_info msm_vfe40_axi_hw_info = {
  6300. - .num_wm = 5,
  6301. - .num_comp_mask = 2,
  6302. + .num_wm = 7,
  6303. + .num_comp_mask = 3,
  6304. .num_rdi = 3,
  6305. .num_rdi_master = 3,
  6306. .min_wm_ub = 64,
  6307. @@ -1327,6 +1589,11 @@ struct msm_vfe_hardware_info vfe40_hw_info = {
  6308. .release_hw = msm_vfe40_release_hardware,
  6309. .get_platform_data = msm_vfe40_get_platform_data,
  6310. .get_error_mask = msm_vfe40_get_error_mask,
  6311. + .get_overflow_mask = msm_vfe40_get_overflow_mask,
  6312. + .get_irq_mask = msm_vfe40_get_irq_mask,
  6313. + .restore_irq_mask = msm_vfe40_restore_irq_mask,
  6314. + .get_halt_restart_mask =
  6315. + msm_vfe40_get_halt_restart_mask,
  6316. .process_error_status = msm_vfe40_process_error_status,
  6317. },
  6318. .stats_ops = {
  6319. diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
  6320. index 2fea23b..8e805ed 100644
  6321. --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
  6322. +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
  6323. @@ -1,4 +1,4 @@
  6324. -/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
  6325. +/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
  6326. *
  6327. * This program is free software; you can redistribute it and/or modify
  6328. * it under the terms of the GNU General Public License version 2 and
  6329. @@ -11,6 +11,7 @@
  6330. */
  6331. #include <linux/io.h>
  6332. #include <media/v4l2-subdev.h>
  6333. +#include <asm/div64.h>
  6334. #include "msm_isp_util.h"
  6335. #include "msm_isp_axi_util.h"
  6336.  
  6337. @@ -20,6 +21,9 @@
  6338.  
  6339. #define HANDLE_TO_IDX(handle) (handle & 0xFF)
  6340.  
  6341. +#define MSM_ISP_MIN_AB 450000000
  6342. +#define MSM_ISP_MIN_IB 900000000
  6343. +
  6344. int msm_isp_axi_create_stream(
  6345. struct msm_vfe_axi_shared_data *axi_data,
  6346. struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd)
  6347. @@ -68,11 +72,23 @@ int msm_isp_validate_axi_request(struct msm_vfe_axi_shared_data *axi_data,
  6348. struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd)
  6349. {
  6350. int rc = -1, i;
  6351. - struct msm_vfe_axi_stream *stream_info =
  6352. - &axi_data->stream_info[
  6353. - HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)];
  6354. + struct msm_vfe_axi_stream *stream_info = NULL;
  6355. + uint32_t idx = 0;
  6356. +
  6357. + if (NULL == stream_cfg_cmd || NULL == axi_data)
  6358. + return rc;
  6359. +
  6360. + idx = HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle);
  6361. + if (idx < MAX_NUM_STREAM)
  6362. + stream_info = &axi_data->stream_info[idx];
  6363. + else
  6364. + return rc;
  6365.  
  6366. switch (stream_cfg_cmd->output_format) {
  6367. + case V4L2_PIX_FMT_YUYV:
  6368. + case V4L2_PIX_FMT_YVYU:
  6369. + case V4L2_PIX_FMT_UYVY:
  6370. + case V4L2_PIX_FMT_VYUY:
  6371. case V4L2_PIX_FMT_SBGGR8:
  6372. case V4L2_PIX_FMT_SGBRG8:
  6373. case V4L2_PIX_FMT_SGRBG8:
  6374. @@ -97,7 +113,7 @@ int msm_isp_validate_axi_request(struct msm_vfe_axi_shared_data *axi_data,
  6375. case V4L2_PIX_FMT_QGBRG12:
  6376. case V4L2_PIX_FMT_QGRBG12:
  6377. case V4L2_PIX_FMT_QRGGB12:
  6378. - case V4L2_PIX_FMT_JPEG:
  6379. + case V4L2_PIX_FMT_JPEG:
  6380. case V4L2_PIX_FMT_META:
  6381. stream_info->num_planes = 1;
  6382. stream_info->format_factor = ISP_Q2;
  6383. @@ -159,6 +175,10 @@ static uint32_t msm_isp_axi_get_plane_size(
  6384. uint32_t size = 0;
  6385. struct msm_vfe_axi_plane_cfg *plane_cfg = stream_info->plane_cfg;
  6386. switch (stream_info->output_format) {
  6387. + case V4L2_PIX_FMT_YUYV:
  6388. + case V4L2_PIX_FMT_YVYU:
  6389. + case V4L2_PIX_FMT_UYVY:
  6390. + case V4L2_PIX_FMT_VYUY:
  6391. case V4L2_PIX_FMT_SBGGR8:
  6392. case V4L2_PIX_FMT_SGBRG8:
  6393. case V4L2_PIX_FMT_SGRBG8:
  6394. @@ -167,7 +187,7 @@ static uint32_t msm_isp_axi_get_plane_size(
  6395. case V4L2_PIX_FMT_QGBRG8:
  6396. case V4L2_PIX_FMT_QGRBG8:
  6397. case V4L2_PIX_FMT_QRGGB8:
  6398. - case V4L2_PIX_FMT_JPEG:
  6399. + case V4L2_PIX_FMT_JPEG:
  6400. case V4L2_PIX_FMT_META:
  6401. size = plane_cfg[plane_idx].output_height *
  6402. plane_cfg[plane_idx].output_width;
  6403. @@ -203,7 +223,7 @@ static uint32_t msm_isp_axi_get_plane_size(
  6404. plane_cfg[plane_idx].output_width;
  6405. else
  6406. size = plane_cfg[plane_idx].output_height *
  6407. - plane_cfg[plane_idx].output_width / 2;
  6408. + plane_cfg[plane_idx].output_width;
  6409. break;
  6410. case V4L2_PIX_FMT_NV14:
  6411. case V4L2_PIX_FMT_NV41:
  6412. @@ -212,7 +232,7 @@ static uint32_t msm_isp_axi_get_plane_size(
  6413. plane_cfg[plane_idx].output_width;
  6414. else
  6415. size = plane_cfg[plane_idx].output_height *
  6416. - plane_cfg[plane_idx].output_width / 8;
  6417. + plane_cfg[plane_idx].output_width;
  6418. break;
  6419. case V4L2_PIX_FMT_NV16:
  6420. case V4L2_PIX_FMT_NV61:
  6421. @@ -227,6 +247,21 @@ static uint32_t msm_isp_axi_get_plane_size(
  6422. return size;
  6423. }
  6424.  
  6425. +static void msm_isp_get_buffer_ts(struct vfe_device *vfe_dev,
  6426. + struct msm_isp_timestamp *irq_ts, struct msm_isp_timestamp *ts)
  6427. +{
  6428. + struct msm_vfe_frame_ts *frame_ts = &vfe_dev->frame_ts;
  6429. + uint32_t frame_count = vfe_dev->error_info.info_dump_frame_count;
  6430. +
  6431. + *ts = *irq_ts;
  6432. + if (frame_count == frame_ts->frame_id) {
  6433. + ts->buf_time = frame_ts->buf_time;
  6434. + } else {
  6435. + frame_ts->buf_time = irq_ts->buf_time;
  6436. + frame_ts->frame_id = frame_count;
  6437. + }
  6438. +}
  6439. +
  6440. void msm_isp_axi_reserve_wm(struct msm_vfe_axi_shared_data *axi_data,
  6441. struct msm_vfe_axi_stream *stream_info)
  6442. {
  6443. @@ -301,8 +336,13 @@ int msm_isp_axi_check_stream_state(
  6444. enum msm_vfe_axi_state valid_state =
  6445. (stream_cfg_cmd->cmd == START_STREAM) ? INACTIVE : ACTIVE;
  6446.  
  6447. + if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM) {
  6448. + return -EINVAL;
  6449. + }
  6450. +
  6451. for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
  6452. - if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >= MAX_NUM_STREAM) {
  6453. + if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
  6454. + MAX_NUM_STREAM) {
  6455. return -EINVAL;
  6456. }
  6457. stream_info = &axi_data->stream_info[
  6458. @@ -313,12 +353,14 @@ int msm_isp_axi_check_stream_state(
  6459. stream_info->state == PAUSED ||
  6460. stream_info->state == RESUME_PENDING ||
  6461. stream_info->state == RESUMING) &&
  6462. - stream_cfg_cmd->cmd == STOP_STREAM) {
  6463. + (stream_cfg_cmd->cmd == STOP_STREAM ||
  6464. + stream_cfg_cmd->cmd == STOP_IMMEDIATELY)) {
  6465. stream_info->state = ACTIVE;
  6466. } else {
  6467. + pr_err("%s: Invalid stream state: %d\n",
  6468. + __func__, stream_info->state);
  6469. spin_unlock_irqrestore(
  6470. &stream_info->lock, flags);
  6471. - pr_err("%s: Invalid stream state\n", __func__);
  6472. rc = -EINVAL;
  6473. break;
  6474. }
  6475. @@ -410,21 +452,35 @@ void msm_isp_sof_notify(struct vfe_device *vfe_dev,
  6476. break;
  6477. }
  6478.  
  6479. + sof_event.input_intf = frame_src;
  6480. sof_event.frame_id = vfe_dev->axi_data.src_info[frame_src].frame_id;
  6481. sof_event.timestamp = ts->event_time;
  6482. - msm_isp_send_event(vfe_dev, ISP_EVENT_SOF, &sof_event);
  6483. + sof_event.mono_timestamp = ts->buf_time;
  6484. + msm_isp_send_event(vfe_dev, ISP_EVENT_SOF + frame_src, &sof_event);
  6485. }
  6486.  
  6487. void msm_isp_calculate_framedrop(
  6488. struct msm_vfe_axi_shared_data *axi_data,
  6489. struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd)
  6490. {
  6491. - struct msm_vfe_axi_stream *stream_info =
  6492. - &axi_data->stream_info[
  6493. - HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)];
  6494. - uint32_t framedrop_period = msm_isp_get_framedrop_period(
  6495. - stream_cfg_cmd->frame_skip_pattern);
  6496. + struct msm_vfe_axi_stream *stream_info = NULL;
  6497. + uint32_t framedrop_period = 0;
  6498. + uint8_t idx = 0;
  6499. +
  6500. + if (NULL == axi_data || NULL == stream_cfg_cmd)
  6501. + return;
  6502.  
  6503. + idx = HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle);
  6504. +
  6505. + if (idx < MAX_NUM_STREAM)
  6506. + stream_info = &axi_data->stream_info[idx];
  6507. + else
  6508. + return;
  6509. +
  6510. + framedrop_period = msm_isp_get_framedrop_period(
  6511. + stream_cfg_cmd->frame_skip_pattern);
  6512. + stream_info->frame_skip_pattern =
  6513. + stream_cfg_cmd->frame_skip_pattern;
  6514. if (stream_cfg_cmd->frame_skip_pattern == SKIP_ALL)
  6515. stream_info->framedrop_pattern = 0x0;
  6516. else
  6517. @@ -469,15 +525,64 @@ void msm_isp_calculate_bandwidth(
  6518. stream_info->format_factor / ISP_Q2;
  6519. } else {
  6520. int rdi = SRC_TO_INTF(stream_info->stream_src);
  6521. - stream_info->bandwidth = axi_data->src_info[rdi].pixel_clock;
  6522. + if (rdi < VFE_SRC_MAX)
  6523. + stream_info->bandwidth =
  6524. + axi_data->src_info[rdi].pixel_clock;
  6525. }
  6526. }
  6527.  
  6528. +#ifdef CONFIG_MSM_AVTIMER
  6529. +void msm_isp_start_avtimer(void)
  6530. +{
  6531. + avcs_core_open();
  6532. + avcs_core_disable_power_collapse(1);
  6533. +}
  6534. +static inline void msm_isp_get_avtimer_ts(
  6535. + struct msm_isp_timestamp *time_stamp)
  6536. +{
  6537. + int rc = 0;
  6538. + uint32_t avtimer_usec = 0;
  6539. + uint64_t avtimer_tick = 0;
  6540. +
  6541. + rc = avcs_core_query_timer(&avtimer_tick);
  6542. + if (rc < 0) {
  6543. + pr_err("%s: Error: Invalid AVTimer Tick, rc=%d\n",
  6544. + __func__, rc);
  6545. + /* In case of error return zero AVTimer Tick Value */
  6546. + time_stamp->vt_time.tv_sec = 0;
  6547. + time_stamp->vt_time.tv_usec = 0;
  6548. + } else {
  6549. + avtimer_usec = do_div(avtimer_tick, USEC_PER_SEC);
  6550. + time_stamp->vt_time.tv_sec = (uint32_t)(avtimer_tick);
  6551. + time_stamp->vt_time.tv_usec = avtimer_usec;
  6552. + pr_debug("%s: AVTimer TS = %u:%u\n", __func__,
  6553. + (uint32_t)(avtimer_tick), avtimer_usec);
  6554. + }
  6555. +}
  6556. +#else
  6557. +void msm_isp_start_avtimer(void)
  6558. +{
  6559. + pr_err("AV Timer is not supported\n");
  6560. +}
  6561. +
  6562. +static inline void msm_isp_get_avtimer_ts(
  6563. + struct msm_isp_timestamp *time_stamp)
  6564. +{
  6565. + pr_err("%s: Error: AVTimer driver not available\n",__func__);
  6566. + time_stamp->vt_time.tv_sec = 0;
  6567. + time_stamp->vt_time.tv_usec = 0;
  6568. +}
  6569. +
  6570. +#endif
  6571. +
  6572. +
  6573. int msm_isp_request_axi_stream(struct vfe_device *vfe_dev, void *arg)
  6574. {
  6575. int rc = 0, i;
  6576. + uint32_t io_format = 0;
  6577. struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd = arg;
  6578. struct msm_vfe_axi_stream *stream_info;
  6579. + struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
  6580.  
  6581. rc = msm_isp_axi_create_stream(
  6582. &vfe_dev->axi_data, stream_cfg_cmd);
  6583. @@ -490,8 +595,11 @@ int msm_isp_request_axi_stream(struct vfe_device *vfe_dev, void *arg)
  6584. &vfe_dev->axi_data, stream_cfg_cmd);
  6585. if (rc) {
  6586. pr_err("%s: Request validation failed\n", __func__);
  6587. - if (HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle) < MAX_NUM_STREAM)
  6588. - msm_isp_axi_destroy_stream(&vfe_dev->axi_data,HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle));
  6589. + if (HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle) <
  6590. + MAX_NUM_STREAM) {
  6591. + msm_isp_axi_destroy_stream(&vfe_dev->axi_data,
  6592. + HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle));
  6593. + }
  6594. return rc;
  6595. }
  6596.  
  6597. @@ -499,13 +607,38 @@ int msm_isp_request_axi_stream(struct vfe_device *vfe_dev, void *arg)
  6598. stream_info[HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)];
  6599. msm_isp_axi_reserve_wm(&vfe_dev->axi_data, stream_info);
  6600.  
  6601. - if (stream_cfg_cmd->stream_src == CAMIF_RAW ||
  6602. - stream_cfg_cmd->stream_src == IDEAL_RAW)
  6603. - vfe_dev->hw_info->vfe_ops.axi_ops.
  6604. - cfg_io_format(vfe_dev, stream_info);
  6605. + if (stream_info->stream_src < RDI_INTF_0) {
  6606. + io_format = vfe_dev->axi_data.src_info[VFE_PIX_0].input_format;
  6607. + if (stream_info->stream_src == CAMIF_RAW ||
  6608. + stream_info->stream_src == IDEAL_RAW) {
  6609. + if (stream_info->stream_src == CAMIF_RAW &&
  6610. + io_format != stream_info->output_format)
  6611. + pr_warn("%s: Overriding input format\n",
  6612. + __func__);
  6613. +
  6614. + io_format = stream_info->output_format;
  6615. + }
  6616. + rc = vfe_dev->hw_info->vfe_ops.axi_ops.cfg_io_format(
  6617. + vfe_dev, stream_info->stream_src, io_format);
  6618. + if (rc) {
  6619. + pr_err("%s: cfg io format failed\n", __func__);
  6620. + msm_isp_axi_free_wm(&vfe_dev->axi_data,
  6621. + stream_info);
  6622. + msm_isp_axi_destroy_stream(&vfe_dev->axi_data,
  6623. + HANDLE_TO_IDX(
  6624. + stream_cfg_cmd->axi_stream_handle));
  6625. + return rc;
  6626. + }
  6627. + }
  6628.  
  6629. msm_isp_calculate_framedrop(&vfe_dev->axi_data, stream_cfg_cmd);
  6630. + stream_info->vt_enable = stream_cfg_cmd->vt_enable;
  6631. + axi_data->burst_len = stream_cfg_cmd->burst_len;
  6632.  
  6633. + if (stream_info->vt_enable) {
  6634. + vfe_dev->vt_enable = stream_info->vt_enable;
  6635. + msm_isp_start_avtimer();
  6636. + }
  6637. if (stream_info->num_planes > 1) {
  6638. msm_isp_axi_reserve_comp_mask(
  6639. &vfe_dev->axi_data, stream_info);
  6640. @@ -534,12 +667,14 @@ int msm_isp_release_axi_stream(struct vfe_device *vfe_dev, void *arg)
  6641. struct msm_vfe_axi_stream *stream_info;
  6642. struct msm_vfe_axi_stream_cfg_cmd stream_cfg;
  6643.  
  6644. - if (HANDLE_TO_IDX(stream_release_cmd->stream_handle) >= MAX_NUM_STREAM) {
  6645. - pr_err("%s: Invalid stream handle\n", __func__);
  6646. - return -EINVAL;
  6647. + if(HANDLE_TO_IDX(stream_release_cmd->stream_handle) >=
  6648. + MAX_NUM_STREAM) {
  6649. + pr_err("%s: Invalid stream handle\n", __func__);
  6650. + return -EINVAL;
  6651. }
  6652. - stream_info = &axi_data->stream_info[HANDLE_TO_IDX(stream_release_cmd->stream_handle)];
  6653.  
  6654. + stream_info = &axi_data->stream_info[
  6655. + HANDLE_TO_IDX(stream_release_cmd->stream_handle)];
  6656. if (stream_info->state == AVALIABLE) {
  6657. pr_err("%s: Stream already released\n", __func__);
  6658. return -EINVAL;
  6659. @@ -589,9 +724,18 @@ static void msm_isp_axi_stream_enable_cfg(
  6660. stream_info->state == RESUME_PENDING)
  6661. vfe_dev->hw_info->vfe_ops.axi_ops.
  6662. enable_wm(vfe_dev, stream_info->wm[i], 1);
  6663. - else
  6664. + else {
  6665. vfe_dev->hw_info->vfe_ops.axi_ops.
  6666. enable_wm(vfe_dev, stream_info->wm[i], 0);
  6667. + /* Issue a reg update for Raw Snapshot Case
  6668. + * since we dont have reg update ack
  6669. + */
  6670. + if (stream_info->stream_src == CAMIF_RAW ||
  6671. + stream_info->stream_src == IDEAL_RAW) {
  6672. + vfe_dev->hw_info->vfe_ops.core_ops.
  6673. + reg_update(vfe_dev);
  6674. + }
  6675. + }
  6676. }
  6677.  
  6678. if (stream_info->state == START_PENDING)
  6679. @@ -621,7 +765,9 @@ void msm_isp_axi_stream_update(struct vfe_device *vfe_dev)
  6680. }
  6681. }
  6682.  
  6683. - if (vfe_dev->axi_data.pipeline_update == DISABLE_CAMIF) {
  6684. + if (vfe_dev->axi_data.pipeline_update == DISABLE_CAMIF ||
  6685. + (vfe_dev->axi_data.pipeline_update ==
  6686. + DISABLE_CAMIF_IMMEDIATELY)) {
  6687. vfe_dev->hw_info->vfe_ops.stats_ops.
  6688. enable_module(vfe_dev, 0xFF, 0);
  6689. vfe_dev->axi_data.pipeline_update = NO_UPDATE;
  6690. @@ -636,15 +782,17 @@ static void msm_isp_reload_ping_pong_offset(struct vfe_device *vfe_dev,
  6691. struct msm_vfe_axi_stream *stream_info)
  6692. {
  6693. int i, j;
  6694. + uint32_t flag;
  6695. struct msm_isp_buffer *buf;
  6696. - uint32_t pingpong_flags[2]= {VFE_PING_FLAG, VFE_PONG_FLAG};
  6697. for (i = 0; i < 2; i++) {
  6698. buf = stream_info->buf[i];
  6699. - for (j = 0; j < stream_info->num_planes; j++)
  6700. + flag = i ? VFE_PONG_FLAG : VFE_PING_FLAG;
  6701. + for (j = 0; j < stream_info->num_planes; j++) {
  6702. vfe_dev->hw_info->vfe_ops.axi_ops.update_ping_pong_addr(
  6703. - vfe_dev, stream_info->wm[j],
  6704. - pingpong_flags[i], buf->mapped_info[j].paddr +
  6705. + vfe_dev, stream_info->wm[j], flag,
  6706. + buf->mapped_info[j].paddr +
  6707. stream_info->plane_cfg[j].plane_addr_offset);
  6708. + }
  6709. }
  6710. }
  6711.  
  6712. @@ -725,8 +873,9 @@ static int msm_isp_cfg_ping_pong_address(struct vfe_device *vfe_dev,
  6713. rc = vfe_dev->buf_mgr->ops->get_buf(vfe_dev->buf_mgr,
  6714. vfe_dev->pdev->id, bufq_handle, &buf);
  6715. if (rc < 0) {
  6716. - vfe_dev->error_info.
  6717. - stream_framedrop_count[stream_idx]++;
  6718. + if(stream_idx < MAX_NUM_STREAM)
  6719. + vfe_dev->error_info.
  6720. + stream_framedrop_count[stream_idx]++;
  6721. return rc;
  6722. }
  6723.  
  6724. @@ -736,12 +885,12 @@ static int msm_isp_cfg_ping_pong_address(struct vfe_device *vfe_dev,
  6725. goto buf_error;
  6726. }
  6727.  
  6728. - for (i = 0; i < stream_info->num_planes; i++) {
  6729. + for (i = 0; i < stream_info->num_planes; i++)
  6730. vfe_dev->hw_info->vfe_ops.axi_ops.update_ping_pong_addr(
  6731. vfe_dev, stream_info->wm[i],
  6732. pingpong_status, buf->mapped_info[i].paddr +
  6733. stream_info->plane_cfg[i].plane_addr_offset);
  6734. - }
  6735. +
  6736. pingpong_bit = (~(pingpong_status >> stream_info->wm[0]) & 0x1);
  6737. stream_info->buf[pingpong_bit] = buf;
  6738. return 0;
  6739. @@ -757,25 +906,41 @@ static void msm_isp_process_done_buf(struct vfe_device *vfe_dev,
  6740. {
  6741. int rc;
  6742. struct msm_isp_event_data buf_event;
  6743. + struct timeval *time_stamp;
  6744. uint32_t stream_idx = HANDLE_TO_IDX(stream_info->stream_handle);
  6745. - uint32_t frame_id = vfe_dev->axi_data.
  6746. - src_info[SRC_TO_INTF(stream_info->stream_src)].frame_id;
  6747. - if (stream_idx >= MAX_NUM_STREAM) {
  6748. - pr_err("%s: Invalid stream_idx", __func__);
  6749. + uint32_t src_intf = SRC_TO_INTF(stream_info->stream_src);
  6750. + uint32_t frame_id = 0;
  6751. + memset(&buf_event, 0, sizeof(buf_event) );
  6752. +
  6753. + if(stream_idx >= MAX_NUM_STREAM) {
  6754. + pr_err("%s: Invalid stream_idx \n", __func__);
  6755. return;
  6756. }
  6757. +
  6758. + if (src_intf < VFE_SRC_MAX) {
  6759. + frame_id = vfe_dev->axi_data.src_info[src_intf].frame_id;
  6760. + }
  6761. +
  6762. if (buf && ts) {
  6763. + if (vfe_dev->vt_enable) {
  6764. + msm_isp_get_avtimer_ts(ts);
  6765. + time_stamp = &ts->vt_time;
  6766. + } else {
  6767. + time_stamp = &ts->buf_time;
  6768. + }
  6769. if (stream_info->buf_divert) {
  6770. rc = vfe_dev->buf_mgr->ops->buf_divert(vfe_dev->buf_mgr,
  6771. buf->bufq_handle, buf->buf_idx,
  6772. - &ts->buf_time, frame_id);
  6773. + time_stamp, frame_id);
  6774. /* Buf divert return value represent whether the buf
  6775. * can be diverted. A positive return value means
  6776. * other ISP hardware is still processing the frame.
  6777. */
  6778. if (rc == 0) {
  6779. + buf_event.input_intf =
  6780. + SRC_TO_INTF(stream_info->stream_src);
  6781. buf_event.frame_id = frame_id;
  6782. - buf_event.timestamp = ts->buf_time;
  6783. + buf_event.timestamp = *time_stamp;
  6784. buf_event.u.buf_done.session_id =
  6785. stream_info->session_id;
  6786. buf_event.u.buf_done.stream_id =
  6787. @@ -792,13 +957,13 @@ static void msm_isp_process_done_buf(struct vfe_device *vfe_dev,
  6788. } else {
  6789. vfe_dev->buf_mgr->ops->buf_done(vfe_dev->buf_mgr,
  6790. buf->bufq_handle, buf->buf_idx,
  6791. - &ts->buf_time, frame_id,
  6792. + time_stamp, frame_id,
  6793. stream_info->runtime_output_format);
  6794. }
  6795. }
  6796. }
  6797.  
  6798. -enum msm_isp_camif_update_state
  6799. +static enum msm_isp_camif_update_state
  6800. msm_isp_get_camif_update_state(struct vfe_device *vfe_dev,
  6801. struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd)
  6802. {
  6803. @@ -825,26 +990,36 @@ enum msm_isp_camif_update_state
  6804. (cur_pix_stream_cnt - pix_stream_cnt) == 0 &&
  6805. stream_cfg_cmd->cmd == STOP_STREAM)
  6806. return DISABLE_CAMIF;
  6807. + else if (cur_pix_stream_cnt &&
  6808. + (cur_pix_stream_cnt - pix_stream_cnt) == 0 &&
  6809. + stream_cfg_cmd->cmd == STOP_IMMEDIATELY)
  6810. + return DISABLE_CAMIF_IMMEDIATELY;
  6811. }
  6812. return NO_UPDATE;
  6813. }
  6814.  
  6815. -void msm_isp_update_camif_output_count(
  6816. +static void msm_isp_update_camif_output_count(
  6817. struct vfe_device *vfe_dev,
  6818. struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd)
  6819. {
  6820. int i;
  6821. struct msm_vfe_axi_stream *stream_info;
  6822. struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
  6823. +
  6824. + if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM) {
  6825. + return;
  6826. + }
  6827. +
  6828. for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
  6829. - if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >= MAX_NUM_STREAM) {
  6830. + if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])
  6831. + >= MAX_NUM_STREAM) {
  6832. return;
  6833. }
  6834. stream_info =
  6835. &axi_data->stream_info[
  6836. HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
  6837. if (stream_info->stream_src >= RDI_INTF_0)
  6838. - return;
  6839. + continue;
  6840. if (stream_info->stream_src == PIX_ENCODER ||
  6841. stream_info->stream_src == PIX_VIEWFINDER ||
  6842. stream_info->stream_src == IDEAL_RAW) {
  6843. @@ -865,6 +1040,68 @@ void msm_isp_update_camif_output_count(
  6844. }
  6845. }
  6846.  
  6847. +static void msm_isp_update_rdi_output_count(
  6848. + struct vfe_device *vfe_dev,
  6849. + struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd)
  6850. +{
  6851. + int i;
  6852. + struct msm_vfe_axi_stream *stream_info;
  6853. + struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
  6854. +
  6855. + if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM) {
  6856. + return;
  6857. + }
  6858. +
  6859. + for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
  6860. + if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])
  6861. + >= MAX_NUM_STREAM) {
  6862. + return;
  6863. + }
  6864. + stream_info =
  6865. + &axi_data->stream_info[
  6866. + HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
  6867. + if (stream_info->stream_src < RDI_INTF_0)
  6868. + continue;
  6869. + if (stream_info->stream_src == RDI_INTF_0) {
  6870. + if (stream_cfg_cmd->cmd == START_STREAM)
  6871. + vfe_dev->axi_data.src_info[VFE_RAW_0].
  6872. + raw_stream_count++;
  6873. + else
  6874. + vfe_dev->axi_data.src_info[VFE_RAW_0].
  6875. + raw_stream_count--;
  6876. + } else if (stream_info->stream_src == RDI_INTF_1) {
  6877. + if (stream_cfg_cmd->cmd == START_STREAM)
  6878. + vfe_dev->axi_data.src_info[VFE_RAW_1].
  6879. + raw_stream_count++;
  6880. + else
  6881. + vfe_dev->axi_data.src_info[VFE_RAW_1].
  6882. + raw_stream_count--;
  6883. + } else if (stream_info->stream_src == RDI_INTF_2) {
  6884. + if (stream_cfg_cmd->cmd == START_STREAM)
  6885. + vfe_dev->axi_data.src_info[VFE_RAW_2].
  6886. + raw_stream_count++;
  6887. + else
  6888. + vfe_dev->axi_data.src_info[VFE_RAW_2].
  6889. + raw_stream_count--;
  6890. + }
  6891. +
  6892. + }
  6893. +}
  6894. +
  6895. +static uint8_t msm_isp_get_curr_stream_cnt(
  6896. + struct vfe_device *vfe_dev)
  6897. +{
  6898. + uint8_t curr_stream_cnt = 0;
  6899. + curr_stream_cnt = vfe_dev->axi_data.src_info[VFE_RAW_0].
  6900. + raw_stream_count + vfe_dev->axi_data.src_info[VFE_RAW_1].
  6901. + raw_stream_count + vfe_dev->axi_data.src_info[VFE_RAW_2].
  6902. + raw_stream_count + vfe_dev->axi_data.src_info[VFE_PIX_0].
  6903. + pix_stream_count + vfe_dev->axi_data.src_info[VFE_PIX_0].
  6904. + raw_stream_count;
  6905. +
  6906. + return curr_stream_cnt;
  6907. +}
  6908. +
  6909. void msm_camera_io_dump_2(void __iomem *addr, int size)
  6910. {
  6911. char line_str[128], *p_str;
  6912. @@ -902,6 +1139,8 @@ static int msm_isp_update_stream_bandwidth(struct vfe_device *vfe_dev)
  6913. struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
  6914. uint32_t total_pix_bandwidth = 0, total_rdi_bandwidth = 0;
  6915. uint32_t num_pix_streams = 0;
  6916. + uint32_t num_rdi_streams = 0;
  6917. + uint32_t total_streams = 0;
  6918. uint64_t total_bandwidth = 0;
  6919.  
  6920. for (i = 0; i < MAX_NUM_STREAM; i++) {
  6921. @@ -913,19 +1152,27 @@ static int msm_isp_update_stream_bandwidth(struct vfe_device *vfe_dev)
  6922. num_pix_streams++;
  6923. } else {
  6924. total_rdi_bandwidth += stream_info->bandwidth;
  6925. + num_rdi_streams++;
  6926. }
  6927. }
  6928. }
  6929. if (num_pix_streams > 0)
  6930. total_pix_bandwidth = total_pix_bandwidth /
  6931. num_pix_streams * (num_pix_streams - 1) +
  6932. - axi_data->src_info[VFE_PIX_0].pixel_clock *
  6933. - ISP_DEFAULT_FORMAT_FACTOR / ISP_Q2;
  6934. + ((unsigned long)axi_data->src_info[VFE_PIX_0].
  6935. + pixel_clock) * ISP_DEFAULT_FORMAT_FACTOR / ISP_Q2;
  6936. total_bandwidth = total_pix_bandwidth + total_rdi_bandwidth;
  6937. -
  6938. + total_streams = num_pix_streams + num_rdi_streams;
  6939. + if (total_streams == 1) {
  6940. + rc = msm_isp_update_bandwidth(ISP_VFE0 + vfe_dev->pdev->id,
  6941. + (total_bandwidth - MSM_ISP_MIN_AB) , (total_bandwidth *
  6942. + ISP_BUS_UTILIZATION_FACTOR / ISP_Q2 - MSM_ISP_MIN_IB));
  6943. + }
  6944. + else {
  6945. rc = msm_isp_update_bandwidth(ISP_VFE0 + vfe_dev->pdev->id,
  6946. total_bandwidth, total_bandwidth *
  6947. ISP_BUS_UTILIZATION_FACTOR / ISP_Q2);
  6948. + }
  6949. if (rc < 0)
  6950. pr_err("%s: update failed\n", __func__);
  6951.  
  6952. @@ -942,7 +1189,7 @@ static int msm_isp_axi_wait_for_cfg_done(struct vfe_device *vfe_dev,
  6953. vfe_dev->axi_data.pipeline_update = camif_update;
  6954. vfe_dev->axi_data.stream_update = 2;
  6955. spin_unlock_irqrestore(&vfe_dev->shared_data_lock, flags);
  6956. - rc = wait_for_completion_interruptible_timeout(
  6957. + rc = wait_for_completion_timeout(
  6958. &vfe_dev->stream_config_complete,
  6959. msecs_to_jiffies(VFE_MAX_CFG_TIMEOUT));
  6960. if (rc == 0) {
  6961. @@ -1015,18 +1262,25 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev,
  6962. enum msm_isp_camif_update_state camif_update)
  6963. {
  6964. int i, rc = 0;
  6965. - uint8_t src_state, wait_for_complete = 0;
  6966. + uint8_t src_state = 0, wait_for_complete = 0;
  6967. uint32_t wm_reload_mask = 0x0;
  6968. struct msm_vfe_axi_stream *stream_info;
  6969. struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
  6970. +
  6971. + if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM) {
  6972. + return -EINVAL;
  6973. + }
  6974. +
  6975. for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
  6976. - if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >= MAX_NUM_STREAM) {
  6977. + if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])
  6978. + >= MAX_NUM_STREAM) {
  6979. return -EINVAL;
  6980. }
  6981. stream_info = &axi_data->stream_info[
  6982. HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
  6983. - src_state = axi_data->src_info[
  6984. - SRC_TO_INTF(stream_info->stream_src)].active;
  6985. + if (SRC_TO_INTF(stream_info->stream_src) < VFE_SRC_MAX)
  6986. + src_state = axi_data->src_info[
  6987. + SRC_TO_INTF(stream_info->stream_src)].active;
  6988.  
  6989. msm_isp_calculate_bandwidth(axi_data, stream_info);
  6990. msm_isp_reset_framedrop(vfe_dev, stream_info);
  6991. @@ -1050,17 +1304,21 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev,
  6992. msm_isp_axi_stream_enable_cfg(vfe_dev, stream_info);
  6993. stream_info->state = ACTIVE;
  6994. }
  6995. + vfe_dev->axi_data.src_info[
  6996. + SRC_TO_INTF(stream_info->stream_src)].frame_id = 0;
  6997. }
  6998. msm_isp_update_stream_bandwidth(vfe_dev);
  6999. vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev, wm_reload_mask);
  7000. vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev);
  7001.  
  7002. msm_isp_update_camif_output_count(vfe_dev, stream_cfg_cmd);
  7003. + msm_isp_update_rdi_output_count(vfe_dev, stream_cfg_cmd);
  7004. if (camif_update == ENABLE_CAMIF) {
  7005. vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id = 0;
  7006. vfe_dev->hw_info->vfe_ops.core_ops.
  7007. update_camif_state(vfe_dev, camif_update);
  7008. }
  7009. +
  7010. if (wait_for_complete)
  7011. rc = msm_isp_axi_wait_for_cfg_done(vfe_dev, camif_update);
  7012.  
  7013. @@ -1072,35 +1330,81 @@ static int msm_isp_stop_axi_stream(struct vfe_device *vfe_dev,
  7014. enum msm_isp_camif_update_state camif_update)
  7015. {
  7016. int i, rc = 0;
  7017. + uint8_t wait_for_complete = 0, cur_stream_cnt = 0;
  7018. struct msm_vfe_axi_stream *stream_info;
  7019. struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
  7020. +
  7021. + if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM) {
  7022. + return -EINVAL;
  7023. + }
  7024. +
  7025. for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
  7026. - if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >= MAX_NUM_STREAM) {
  7027. + if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])
  7028. + >= MAX_NUM_STREAM) {
  7029. return -EINVAL;
  7030. }
  7031. stream_info = &axi_data->stream_info[
  7032. HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
  7033. - stream_info->state = STOP_PENDING;
  7034. - }
  7035.  
  7036. - rc = msm_isp_axi_wait_for_cfg_done(vfe_dev, camif_update);
  7037. - if (rc < 0) {
  7038. - pr_err("%s: wait for config done failed\n", __func__);
  7039. - pr_err("%s:<DEBUG00>timeout:no frame from sensor\n", __func__);
  7040. - for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
  7041. - stream_info = &axi_data->stream_info[
  7042. - HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
  7043. - stream_info->state = STOP_PENDING;
  7044. - msm_isp_axi_stream_enable_cfg(
  7045. - vfe_dev, stream_info);
  7046. + stream_info->state = STOP_PENDING;
  7047. + if (stream_info->stream_src == CAMIF_RAW ||
  7048. + stream_info->stream_src == IDEAL_RAW) {
  7049. + /* We dont get reg update IRQ for raw snapshot
  7050. + * so frame skip cant be ocnfigured
  7051. + */
  7052. + wait_for_complete = 1;
  7053. + } else if (stream_info->stream_type == BURST_STREAM &&
  7054. + stream_info->runtime_num_burst_capture == 0) {
  7055. + /* Configure AXI writemasters to stop immediately
  7056. + * since for burst case, write masters already skip
  7057. + * all frames.
  7058. + */
  7059. + if (stream_info->stream_src == RDI_INTF_0 ||
  7060. + stream_info->stream_src == RDI_INTF_1 ||
  7061. + stream_info->stream_src == RDI_INTF_2)
  7062. + wait_for_complete = 1;
  7063. + else {
  7064. + msm_isp_axi_stream_enable_cfg(vfe_dev, stream_info);
  7065. stream_info->state = INACTIVE;
  7066. + }
  7067. + } else {
  7068. + wait_for_complete = 1;
  7069. + }
  7070. + }
  7071. + if (wait_for_complete) {
  7072. + rc = msm_isp_axi_wait_for_cfg_done(vfe_dev, camif_update);
  7073. + if (rc < 0) {
  7074. + pr_err("%s: wait for config done failed\n", __func__);
  7075. + for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
  7076. + stream_info = &axi_data->stream_info[
  7077. + HANDLE_TO_IDX(
  7078. + stream_cfg_cmd->stream_handle[i])];
  7079. + stream_info->state = STOP_PENDING;
  7080. + msm_isp_axi_stream_enable_cfg(
  7081. + vfe_dev, stream_info);
  7082. + stream_info->state = INACTIVE;
  7083. + }
  7084. }
  7085. }
  7086. msm_isp_update_stream_bandwidth(vfe_dev);
  7087. if (camif_update == DISABLE_CAMIF)
  7088. vfe_dev->hw_info->vfe_ops.core_ops.
  7089. update_camif_state(vfe_dev, DISABLE_CAMIF);
  7090. + else if (camif_update == DISABLE_CAMIF_IMMEDIATELY)
  7091. + vfe_dev->hw_info->vfe_ops.core_ops.
  7092. + update_camif_state(vfe_dev, DISABLE_CAMIF_IMMEDIATELY);
  7093. msm_isp_update_camif_output_count(vfe_dev, stream_cfg_cmd);
  7094. + msm_isp_update_rdi_output_count(vfe_dev, stream_cfg_cmd);
  7095. + cur_stream_cnt = msm_isp_get_curr_stream_cnt(vfe_dev);
  7096. + if (cur_stream_cnt == 0) {
  7097. + vfe_dev->ignore_error = 1;
  7098. + if (camif_update == DISABLE_CAMIF_IMMEDIATELY) {
  7099. + vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 1);
  7100. + }
  7101. + vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, ISP_RST_HARD, 1);
  7102. + vfe_dev->hw_info->vfe_ops.core_ops.init_hw_reg(vfe_dev);
  7103. + vfe_dev->ignore_error = 0;
  7104. + }
  7105.  
  7106. for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
  7107. stream_info = &axi_data->stream_info[
  7108. @@ -1156,9 +1460,16 @@ int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg)
  7109. return -EBUSY;
  7110. }
  7111.  
  7112. + /*num_stream is uint32 and update_info[] bound by MAX_NUM_STREAM*/
  7113. + if (update_cmd->num_streams > MAX_NUM_STREAM) {
  7114. + return -EINVAL;
  7115. + }
  7116. +
  7117. for (i = 0; i < update_cmd->num_streams; i++) {
  7118. update_info = &update_cmd->update_info[i];
  7119. - if (HANDLE_TO_IDX(update_info->stream_handle) >= MAX_NUM_STREAM) {
  7120. + /*check array reference bounds*/
  7121. + if (HANDLE_TO_IDX(update_info->stream_handle)
  7122. + >= MAX_NUM_STREAM) {
  7123. return -EINVAL;
  7124. }
  7125. stream_info = &axi_data->stream_info[
  7126. @@ -1169,7 +1480,10 @@ int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg)
  7127. return -EINVAL;
  7128. }
  7129. if (stream_info->state == ACTIVE &&
  7130. - stream_info->stream_type == BURST_STREAM) {
  7131. + stream_info->stream_type == BURST_STREAM &&
  7132. + (1 != update_cmd->num_streams ||
  7133. + UPDATE_STREAM_FRAMEDROP_PATTERN !=
  7134. + update_cmd->update_type)) {
  7135. pr_err("%s: Cannot update active burst stream\n",
  7136. __func__);
  7137. return -EINVAL;
  7138. @@ -1196,7 +1510,10 @@ int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg)
  7139. msm_isp_get_framedrop_period(
  7140. update_info->skip_pattern);
  7141. stream_info->runtime_init_frame_drop = 0;
  7142. - stream_info->framedrop_pattern = 0x1;
  7143. + if (update_info->skip_pattern == SKIP_ALL)
  7144. + stream_info->framedrop_pattern = 0x0;
  7145. + else
  7146. + stream_info->framedrop_pattern = 0x1;
  7147. stream_info->framedrop_period = framedrop_period - 1;
  7148. vfe_dev->hw_info->vfe_ops.axi_ops.
  7149. cfg_framedrop(vfe_dev, stream_info);
  7150. @@ -1243,6 +1560,7 @@ void msm_isp_process_axi_irq(struct vfe_device *vfe_dev,
  7151. struct msm_vfe_axi_stream *stream_info;
  7152. struct msm_vfe_axi_composite_info *comp_info;
  7153. struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
  7154. + struct msm_isp_timestamp buf_ts;
  7155.  
  7156. comp_mask = vfe_dev->hw_info->vfe_ops.axi_ops.
  7157. get_comp_mask(irq_status0, irq_status1);
  7158. @@ -1255,11 +1573,14 @@ void msm_isp_process_axi_irq(struct vfe_device *vfe_dev,
  7159. pingpong_status =
  7160. vfe_dev->hw_info->vfe_ops.axi_ops.get_pingpong_status(vfe_dev);
  7161.  
  7162. + msm_isp_get_buffer_ts(vfe_dev, ts, &buf_ts);
  7163. +
  7164. for (i = 0; i < axi_data->hw_info->num_comp_mask; i++) {
  7165. comp_info = &axi_data->composite_info[i];
  7166. if (comp_mask & (1 << i)) {
  7167. stream_idx = HANDLE_TO_IDX(comp_info->stream_handle);
  7168. - if ((!comp_info->stream_handle) || (stream_idx >= MAX_NUM_STREAM)) {
  7169. + if ((!comp_info->stream_handle) ||
  7170. + (stream_idx >= MAX_NUM_STREAM)) {
  7171. pr_err("%s: Invalid handle for composite irq\n",
  7172. __func__);
  7173. } else {
  7174. @@ -1288,7 +1609,7 @@ void msm_isp_process_axi_irq(struct vfe_device *vfe_dev,
  7175. }
  7176. if (done_buf && !rc)
  7177. msm_isp_process_done_buf(vfe_dev,
  7178. - stream_info, done_buf, ts);
  7179. + stream_info, done_buf, &buf_ts);
  7180. }
  7181. }
  7182. wm_mask &= ~(comp_info->stream_composite_mask);
  7183. @@ -1297,7 +1618,8 @@ void msm_isp_process_axi_irq(struct vfe_device *vfe_dev,
  7184. for (i = 0; i < axi_data->hw_info->num_wm; i++) {
  7185. if (wm_mask & (1 << i)) {
  7186. stream_idx = HANDLE_TO_IDX(axi_data->free_wm[i]);
  7187. - if ((!axi_data->free_wm[i]) || (stream_idx >= MAX_NUM_STREAM)) {
  7188. + if ((!axi_data->free_wm[i]) ||
  7189. + (stream_idx >= MAX_NUM_STREAM)) {
  7190. pr_err("%s: Invalid handle for wm irq\n",
  7191. __func__);
  7192. continue;
  7193. @@ -1320,7 +1642,7 @@ void msm_isp_process_axi_irq(struct vfe_device *vfe_dev,
  7194. }
  7195. if (done_buf && !rc)
  7196. msm_isp_process_done_buf(vfe_dev,
  7197. - stream_info, done_buf, ts);
  7198. + stream_info, done_buf, &buf_ts);
  7199. }
  7200. }
  7201. return;
  7202. diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
  7203. index 34ebf62..dbc27ad 100644
  7204. --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
  7205. +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
  7206. @@ -1,4 +1,4 @@
  7207. -/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
  7208. +/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
  7209. *
  7210. * This program is free software; you can redistribute it and/or modify
  7211. * it under the terms of the GNU General Public License version 2 and
  7212. @@ -27,7 +27,7 @@ static struct msm_isp_bandwidth_mgr isp_bandwidth_mgr;
  7213. #define MSM_ISP_MIN_AB 300000000
  7214. #define MSM_ISP_MIN_IB 450000000
  7215.  
  7216. -
  7217. +#define VFE40_8974V2_VERSION 0x1001001A
  7218. static struct msm_bus_vectors msm_isp_init_vectors[] = {
  7219. {
  7220. .src = MSM_BUS_MASTER_VFE,
  7221. @@ -76,6 +76,25 @@ static struct msm_bus_scale_pdata msm_isp_bus_client_pdata = {
  7222. .name = "msm_camera_isp",
  7223. };
  7224.  
  7225. +static void msm_isp_print_fourcc_error(const char *origin,
  7226. + uint32_t fourcc_format)
  7227. +{
  7228. + int i;
  7229. + char text[5];
  7230. + text[4] = '\0';
  7231. + for (i = 0; i < 4; i++) {
  7232. + text[i] = (char)(((fourcc_format) >> (i * 8)) & 0xFF);
  7233. + if ((text[i] < '0') || (text[i] > 'z')) {
  7234. + pr_err("%s: Invalid output format %d (unprintable)\n",
  7235. + origin, fourcc_format);
  7236. + return;
  7237. + }
  7238. + }
  7239. + pr_err("%s: Invalid output format %s\n",
  7240. + origin, text);
  7241. + return;
  7242. +}
  7243. +
  7244. int msm_isp_init_bandwidth_mgr(enum msm_isp_hw_client client)
  7245. {
  7246. int rc = 0;
  7247. @@ -113,6 +132,7 @@ int msm_isp_update_bandwidth(enum msm_isp_hw_client client,
  7248. pr_err("%s:error bandwidth manager inactive use_cnt:%d bus_clnt:%d\n",
  7249. __func__, isp_bandwidth_mgr.use_count,
  7250. isp_bandwidth_mgr.bus_client);
  7251. + mutex_unlock(&bandwidth_mgr_mutex);
  7252. return -EINVAL;
  7253. }
  7254.  
  7255. @@ -236,6 +256,43 @@ int msm_isp_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
  7256. return rc;
  7257. }
  7258.  
  7259. +static int msm_isp_get_max_clk_rate(struct vfe_device *vfe_dev, long *rate)
  7260. +{
  7261. + int clk_idx = 0;
  7262. + unsigned long max_value = ~0;
  7263. + long round_rate = 0;
  7264. +
  7265. + if (!vfe_dev || !rate) {
  7266. + pr_err("%s:%d failed: vfe_dev %p rate %p\n", __func__, __LINE__,
  7267. + vfe_dev, rate);
  7268. + return -EINVAL;
  7269. + }
  7270. +
  7271. + *rate = 0;
  7272. + if (!vfe_dev->hw_info) {
  7273. + pr_err("%s:%d failed: vfe_dev->hw_info %p\n", __func__,
  7274. + __LINE__, vfe_dev->hw_info);
  7275. + return -EINVAL;
  7276. + }
  7277. +
  7278. + clk_idx = vfe_dev->hw_info->vfe_clk_idx;
  7279. + if (clk_idx >= ARRAY_SIZE(vfe_dev->vfe_clk)) {
  7280. + pr_err("%s:%d failed: clk_idx %d max array size %d\n",
  7281. + __func__, __LINE__, clk_idx,
  7282. + ARRAY_SIZE(vfe_dev->vfe_clk));
  7283. + return -EINVAL;
  7284. + }
  7285. +
  7286. + round_rate = clk_round_rate(vfe_dev->vfe_clk[clk_idx], max_value);
  7287. + if (round_rate < 0) {
  7288. + pr_err("%s: Invalid vfe clock rate\n", __func__);
  7289. + return -EINVAL;
  7290. + }
  7291. +
  7292. + *rate = round_rate;
  7293. + return 0;
  7294. +}
  7295. +
  7296. static int msm_isp_set_clk_rate(struct vfe_device *vfe_dev, long *rate)
  7297. {
  7298. int rc = 0;
  7299. @@ -279,6 +336,9 @@ int msm_isp_cfg_pix(struct vfe_device *vfe_dev,
  7300. return rc;
  7301. }
  7302.  
  7303. + vfe_dev->axi_data.src_info[VFE_PIX_0].input_format =
  7304. + input_cfg->d.pix_cfg.input_format;
  7305. +
  7306. vfe_dev->hw_info->vfe_ops.core_ops.cfg_camif(
  7307. vfe_dev, &input_cfg->d.pix_cfg);
  7308. return rc;
  7309. @@ -298,13 +358,6 @@ int msm_isp_cfg_rdi(struct vfe_device *vfe_dev,
  7310. input_cfg->input_pix_clk;
  7311. vfe_dev->hw_info->vfe_ops.core_ops.cfg_rdi_reg(
  7312. vfe_dev, &input_cfg->d.rdi_cfg, input_cfg->input_src);
  7313. -
  7314. - rc = msm_isp_set_clk_rate(vfe_dev,
  7315. - &vfe_dev->axi_data.src_info[input_cfg->input_src].pixel_clock);
  7316. - if (rc < 0) {
  7317. - pr_err("%s: clock set rate failed\n", __func__);
  7318. - return rc;
  7319. - }
  7320. return rc;
  7321. }
  7322.  
  7323. @@ -335,10 +388,6 @@ long msm_isp_ioctl(struct v4l2_subdev *sd,
  7324. long rc = 0;
  7325. struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
  7326.  
  7327. - if (!vfe_dev) {
  7328. - pr_err("%s: vfe_dev NULL\n", __func__);
  7329. - return -EINVAL;
  7330. - }
  7331. /* Use real time mutex for hard real-time ioctls such as
  7332. * buffer operations and register updates.
  7333. * Use core mutex for other ioctls that could take
  7334. @@ -383,7 +432,7 @@ long msm_isp_ioctl(struct v4l2_subdev *sd,
  7335. break;
  7336. case VIDIOC_MSM_ISP_SET_SRC_STATE:
  7337. mutex_lock(&vfe_dev->core_mutex);
  7338. - msm_isp_set_src_state(vfe_dev, arg);
  7339. + rc = msm_isp_set_src_state(vfe_dev, arg);
  7340. mutex_unlock(&vfe_dev->core_mutex);
  7341. break;
  7342. case VIDIOC_MSM_ISP_REQUEST_STATS_STREAM:
  7343. @@ -409,11 +458,10 @@ long msm_isp_ioctl(struct v4l2_subdev *sd,
  7344. case MSM_SD_SHUTDOWN:
  7345. while (vfe_dev->vfe_open_cnt != 0)
  7346. msm_isp_close_node(sd, NULL);
  7347. - rc = 0;
  7348. break;
  7349.  
  7350. default:
  7351. - pr_err("%s: Invalid ISP command\n", __func__);
  7352. + pr_err_ratelimited("%s: Invalid ISP command\n", __func__);
  7353. rc = -EINVAL;
  7354. }
  7355. return rc;
  7356. @@ -489,8 +537,8 @@ static int msm_isp_send_hw_cmd(struct vfe_device *vfe_dev,
  7357. pr_err("%s:%d len %d\n",
  7358. __func__, __LINE__,
  7359. reg_cfg_cmd->u.dmi_info.len);
  7360. - return -EINVAL;
  7361. - }
  7362. + return -EINVAL;
  7363. + }
  7364. if (((UINT_MAX -
  7365. reg_cfg_cmd->u.dmi_info.hi_tbl_offset) <
  7366. (reg_cfg_cmd->u.dmi_info.len -
  7367. @@ -549,6 +597,7 @@ static int msm_isp_send_hw_cmd(struct vfe_device *vfe_dev,
  7368. }
  7369. temp = msm_camera_io_r(vfe_dev->vfe_base +
  7370. reg_cfg_cmd->u.mask_info.reg_offset);
  7371. +
  7372. temp &= ~reg_cfg_cmd->u.mask_info.mask;
  7373. temp |= reg_cfg_cmd->u.mask_info.val;
  7374. msm_camera_io_w(temp, vfe_dev->vfe_base +
  7375. @@ -565,10 +614,11 @@ static int msm_isp_send_hw_cmd(struct vfe_device *vfe_dev,
  7376. hi_tbl_ptr = cfg_data +
  7377. reg_cfg_cmd->u.dmi_info.hi_tbl_offset/4;
  7378. }
  7379. -
  7380. lo_tbl_ptr = cfg_data +
  7381. reg_cfg_cmd->u.dmi_info.lo_tbl_offset/4;
  7382. -
  7383. + if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT)
  7384. + reg_cfg_cmd->u.dmi_info.len =
  7385. + reg_cfg_cmd->u.dmi_info.len / 2;
  7386. for (i = 0; i < reg_cfg_cmd->u.dmi_info.len/4; i++) {
  7387. lo_val = *lo_tbl_ptr++;
  7388. if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_16BIT) {
  7389. @@ -607,7 +657,7 @@ static int msm_isp_send_hw_cmd(struct vfe_device *vfe_dev,
  7390. reg_cfg_cmd->u.dmi_info.len =
  7391. reg_cfg_cmd->u.dmi_info.len / 2;
  7392.  
  7393. - for (i = 0; i < reg_cfg_cmd->u.dmi_info.len / 4; i++) {
  7394. + for (i = 0; i < reg_cfg_cmd->u.dmi_info.len/4; i++) {
  7395. lo_val = msm_camera_io_r(vfe_dev->vfe_base +
  7396. vfe_dev->hw_info->dmi_reg_offset + 0x4);
  7397.  
  7398. @@ -627,17 +677,87 @@ static int msm_isp_send_hw_cmd(struct vfe_device *vfe_dev,
  7399. }
  7400. break;
  7401. }
  7402. + case VFE_HW_UPDATE_LOCK: {
  7403. + uint32_t update_id =
  7404. + vfe_dev->axi_data.src_info[VFE_PIX_0].last_updt_frm_id;
  7405. + if (vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id != *cfg_data
  7406. + || update_id == *cfg_data) {
  7407. + ISP_DBG("hw update lock failed,acquire id %u\n",
  7408. + *cfg_data);
  7409. + ISP_DBG("hw update lock failed,current id %lu\n",
  7410. + vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id);
  7411. + ISP_DBG("hw update lock failed,last id %u\n",
  7412. + update_id);
  7413. + return -EINVAL;
  7414. + }
  7415. + break;
  7416. + }
  7417. + case VFE_HW_UPDATE_UNLOCK: {
  7418. + if (vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id
  7419. + != *cfg_data) {
  7420. + ISP_DBG("hw update across frame boundary,begin id %u\n",
  7421. + *cfg_data);
  7422. + ISP_DBG("hw update across frame boundary,end id %lu\n",
  7423. + vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id);
  7424. + }
  7425. + vfe_dev->axi_data.src_info[VFE_PIX_0].last_updt_frm_id =
  7426. + vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
  7427. + break;
  7428. + }
  7429. case VFE_READ: {
  7430. int i;
  7431. uint32_t *data_ptr = cfg_data +
  7432. reg_cfg_cmd->u.rw_info.cmd_data_offset/4;
  7433. for (i = 0; i < reg_cfg_cmd->u.rw_info.len/4; i++) {
  7434. + if ((data_ptr < cfg_data) ||
  7435. + (UINT_MAX / sizeof(*data_ptr) <
  7436. + (data_ptr - cfg_data)) ||
  7437. + (sizeof(*data_ptr) * (data_ptr - cfg_data) >=
  7438. + cmd_len))
  7439. + return -EINVAL;
  7440. *data_ptr++ = msm_camera_io_r(vfe_dev->vfe_base +
  7441. reg_cfg_cmd->u.rw_info.reg_offset);
  7442. reg_cfg_cmd->u.rw_info.reg_offset += 4;
  7443. }
  7444. break;
  7445. }
  7446. + case GET_SOC_HW_VER: {
  7447. + if (cmd_len < sizeof(uint32_t)) {
  7448. + pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
  7449. + __func__, __LINE__, cmd_len,
  7450. + sizeof(uint32_t));
  7451. + return -EINVAL;
  7452. + }
  7453. + *cfg_data = vfe_dev->soc_hw_version;
  7454. + break;
  7455. + }
  7456. + case GET_MAX_CLK_RATE: {
  7457. + int rc = 0;
  7458. +
  7459. + if (cmd_len < sizeof(unsigned long)) {
  7460. + pr_err("%s:%d failed: invalid cmd len %d exp %d\n",
  7461. + __func__, __LINE__, cmd_len,
  7462. + sizeof(unsigned long));
  7463. + return -EINVAL;
  7464. + }
  7465. + rc = msm_isp_get_max_clk_rate(vfe_dev,
  7466. + (unsigned long *)cfg_data);
  7467. + if (rc < 0) {
  7468. + pr_err("%s:%d failed: rc %d\n", __func__, __LINE__, rc);
  7469. + return -EINVAL;
  7470. + }
  7471. + break;
  7472. + }
  7473. + case SET_WM_UB_SIZE: {
  7474. + if (cmd_len < sizeof(uint32_t)) {
  7475. + pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
  7476. + __func__, __LINE__, cmd_len,
  7477. + sizeof(uint32_t));
  7478. + return -EINVAL;
  7479. + }
  7480. + vfe_dev->vfe_ub_size = *cfg_data;
  7481. + break;
  7482. + }
  7483. }
  7484. return 0;
  7485. }
  7486. @@ -649,6 +769,11 @@ int msm_isp_proc_cmd(struct vfe_device *vfe_dev, void *arg)
  7487. struct msm_vfe_reg_cfg_cmd *reg_cfg_cmd;
  7488. uint32_t *cfg_data;
  7489.  
  7490. + if (!proc_cmd->num_cfg) {
  7491. + pr_err("%s: Passed num_cfg as 0\n", __func__);
  7492. + return -EINVAL;
  7493. + }
  7494. +
  7495. reg_cfg_cmd = kzalloc(sizeof(struct msm_vfe_reg_cfg_cmd)*
  7496. proc_cmd->num_cfg, GFP_KERNEL);
  7497. if (!reg_cfg_cmd) {
  7498. @@ -657,6 +782,12 @@ int msm_isp_proc_cmd(struct vfe_device *vfe_dev, void *arg)
  7499. goto reg_cfg_failed;
  7500. }
  7501.  
  7502. + if (!proc_cmd->cmd_len) {
  7503. + pr_err("%s: Passed cmd_len as 0\n", __func__);
  7504. + rc = -EINVAL;
  7505. + goto cfg_data_failed;
  7506. + }
  7507. +
  7508. cfg_data = kzalloc(proc_cmd->cmd_len, GFP_KERNEL);
  7509. if (!cfg_data) {
  7510. pr_err("%s: cfg_data alloc failed\n", __func__);
  7511. @@ -679,7 +810,7 @@ int msm_isp_proc_cmd(struct vfe_device *vfe_dev, void *arg)
  7512. }
  7513.  
  7514. for (i = 0; i < proc_cmd->num_cfg; i++)
  7515. - msm_isp_send_hw_cmd(vfe_dev, &reg_cfg_cmd[i],
  7516. + rc = msm_isp_send_hw_cmd(vfe_dev, &reg_cfg_cmd[i],
  7517. cfg_data, proc_cmd->cmd_len);
  7518.  
  7519. if (copy_to_user(proc_cmd->cfg_data,
  7520. @@ -725,7 +856,7 @@ int msm_isp_cal_word_per_line(uint32_t output_format,
  7521. case V4L2_PIX_FMT_QGBRG8:
  7522. case V4L2_PIX_FMT_QGRBG8:
  7523. case V4L2_PIX_FMT_QRGGB8:
  7524. - case V4L2_PIX_FMT_JPEG:
  7525. + case V4L2_PIX_FMT_JPEG:
  7526. case V4L2_PIX_FMT_META:
  7527. val = CAL_WORD(pixel_per_line, 1, 8);
  7528. break;
  7529. @@ -761,17 +892,63 @@ int msm_isp_cal_word_per_line(uint32_t output_format,
  7530. case V4L2_PIX_FMT_NV61:
  7531. val = CAL_WORD(pixel_per_line, 1, 8);
  7532. break;
  7533. + case V4L2_PIX_FMT_YUYV:
  7534. + case V4L2_PIX_FMT_YVYU:
  7535. + case V4L2_PIX_FMT_UYVY:
  7536. + case V4L2_PIX_FMT_VYUY:
  7537. + val = CAL_WORD(pixel_per_line, 2, 8);
  7538. + break;
  7539. /*TD: Add more image format*/
  7540. default:
  7541. - pr_err("%s: Invalid output format\n", __func__);
  7542. + msm_isp_print_fourcc_error(__func__, output_format);
  7543. break;
  7544. }
  7545. return val;
  7546. }
  7547.  
  7548. +enum msm_isp_pack_fmt msm_isp_get_pack_format(uint32_t output_format)
  7549. +{
  7550. + switch (output_format) {
  7551. + case V4L2_PIX_FMT_SBGGR8:
  7552. + case V4L2_PIX_FMT_SGBRG8:
  7553. + case V4L2_PIX_FMT_SGRBG8:
  7554. + case V4L2_PIX_FMT_SRGGB8:
  7555. + case V4L2_PIX_FMT_SBGGR10:
  7556. + case V4L2_PIX_FMT_SGBRG10:
  7557. + case V4L2_PIX_FMT_SGRBG10:
  7558. + case V4L2_PIX_FMT_SRGGB10:
  7559. + case V4L2_PIX_FMT_SBGGR12:
  7560. + case V4L2_PIX_FMT_SGBRG12:
  7561. + case V4L2_PIX_FMT_SGRBG12:
  7562. + case V4L2_PIX_FMT_SRGGB12:
  7563. + return MIPI;
  7564. + case V4L2_PIX_FMT_QBGGR8:
  7565. + case V4L2_PIX_FMT_QGBRG8:
  7566. + case V4L2_PIX_FMT_QGRBG8:
  7567. + case V4L2_PIX_FMT_QRGGB8:
  7568. + case V4L2_PIX_FMT_QBGGR10:
  7569. + case V4L2_PIX_FMT_QGBRG10:
  7570. + case V4L2_PIX_FMT_QGRBG10:
  7571. + case V4L2_PIX_FMT_QRGGB10:
  7572. + case V4L2_PIX_FMT_QBGGR12:
  7573. + case V4L2_PIX_FMT_QGBRG12:
  7574. + case V4L2_PIX_FMT_QGRBG12:
  7575. + case V4L2_PIX_FMT_QRGGB12:
  7576. + return QCOM;
  7577. + default:
  7578. + msm_isp_print_fourcc_error(__func__, output_format);
  7579. + break;
  7580. + }
  7581. + return -EINVAL;
  7582. +}
  7583. +
  7584. int msm_isp_get_bit_per_pixel(uint32_t output_format)
  7585. {
  7586. switch (output_format) {
  7587. + case V4L2_PIX_FMT_Y4:
  7588. + return 4;
  7589. + case V4L2_PIX_FMT_Y6:
  7590. + return 6;
  7591. case V4L2_PIX_FMT_SBGGR8:
  7592. case V4L2_PIX_FMT_SGBRG8:
  7593. case V4L2_PIX_FMT_SGRBG8:
  7594. @@ -780,8 +957,31 @@ int msm_isp_get_bit_per_pixel(uint32_t output_format)
  7595. case V4L2_PIX_FMT_QGBRG8:
  7596. case V4L2_PIX_FMT_QGRBG8:
  7597. case V4L2_PIX_FMT_QRGGB8:
  7598. - case V4L2_PIX_FMT_JPEG:
  7599. + case V4L2_PIX_FMT_JPEG:
  7600. case V4L2_PIX_FMT_META:
  7601. + case V4L2_PIX_FMT_NV12:
  7602. + case V4L2_PIX_FMT_NV21:
  7603. + case V4L2_PIX_FMT_NV14:
  7604. + case V4L2_PIX_FMT_NV41:
  7605. + case V4L2_PIX_FMT_YVU410:
  7606. + case V4L2_PIX_FMT_YVU420:
  7607. + case V4L2_PIX_FMT_YUYV:
  7608. + case V4L2_PIX_FMT_YYUV:
  7609. + case V4L2_PIX_FMT_YVYU:
  7610. + case V4L2_PIX_FMT_UYVY:
  7611. + case V4L2_PIX_FMT_VYUY:
  7612. + case V4L2_PIX_FMT_YUV422P:
  7613. + case V4L2_PIX_FMT_YUV411P:
  7614. + case V4L2_PIX_FMT_Y41P:
  7615. + case V4L2_PIX_FMT_YUV444:
  7616. + case V4L2_PIX_FMT_YUV555:
  7617. + case V4L2_PIX_FMT_YUV565:
  7618. + case V4L2_PIX_FMT_YUV32:
  7619. + case V4L2_PIX_FMT_YUV410:
  7620. + case V4L2_PIX_FMT_YUV420:
  7621. + case V4L2_PIX_FMT_GREY:
  7622. + case V4L2_PIX_FMT_PAL8:
  7623. + case MSM_V4L2_PIX_FMT_META:
  7624. return 8;
  7625. case V4L2_PIX_FMT_SBGGR10:
  7626. case V4L2_PIX_FMT_SGBRG10:
  7627. @@ -791,6 +991,8 @@ int msm_isp_get_bit_per_pixel(uint32_t output_format)
  7628. case V4L2_PIX_FMT_QGBRG10:
  7629. case V4L2_PIX_FMT_QGRBG10:
  7630. case V4L2_PIX_FMT_QRGGB10:
  7631. + case V4L2_PIX_FMT_Y10:
  7632. + case V4L2_PIX_FMT_Y10BPACK:
  7633. return 10;
  7634. case V4L2_PIX_FMT_SBGGR12:
  7635. case V4L2_PIX_FMT_SGBRG12:
  7636. @@ -800,19 +1002,16 @@ int msm_isp_get_bit_per_pixel(uint32_t output_format)
  7637. case V4L2_PIX_FMT_QGBRG12:
  7638. case V4L2_PIX_FMT_QGRBG12:
  7639. case V4L2_PIX_FMT_QRGGB12:
  7640. + case V4L2_PIX_FMT_Y12:
  7641. return 12;
  7642. - case V4L2_PIX_FMT_NV12:
  7643. - case V4L2_PIX_FMT_NV21:
  7644. - case V4L2_PIX_FMT_NV14:
  7645. - case V4L2_PIX_FMT_NV41:
  7646. - return 8;
  7647. case V4L2_PIX_FMT_NV16:
  7648. case V4L2_PIX_FMT_NV61:
  7649. + case V4L2_PIX_FMT_Y16:
  7650. return 16;
  7651. /*TD: Add more image format*/
  7652. default:
  7653. - pr_err("%s: Invalid output format\n", __func__);
  7654. - return 10;
  7655. + msm_isp_print_fourcc_error(__func__, output_format);
  7656. + return -EINVAL;
  7657. }
  7658. }
  7659.  
  7660. @@ -827,15 +1026,16 @@ void msm_isp_update_error_frame_count(struct vfe_device *vfe_dev)
  7661. void msm_isp_process_error_info(struct vfe_device *vfe_dev)
  7662. {
  7663. int i;
  7664. + uint8_t num_stats_type =
  7665. + vfe_dev->hw_info->stats_hw_info->num_stats_type;
  7666. struct msm_vfe_error_info *error_info = &vfe_dev->error_info;
  7667. static DEFINE_RATELIMIT_STATE(rs,
  7668. DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
  7669. static DEFINE_RATELIMIT_STATE(rs_stats,
  7670. DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
  7671.  
  7672. -// if (error_info->error_count == 1 ||
  7673. -// !(error_info->info_dump_frame_count % 100)) {
  7674. - if (1) {
  7675. + if (error_info->error_count == 1 ||
  7676. + !(error_info->info_dump_frame_count % 100)) {
  7677. vfe_dev->hw_info->vfe_ops.core_ops.
  7678. process_error_status(vfe_dev);
  7679. error_info->error_mask0 = 0;
  7680. @@ -851,7 +1051,7 @@ void msm_isp_process_error_info(struct vfe_device *vfe_dev)
  7681. error_info->stream_framedrop_count[i] = 0;
  7682. }
  7683. }
  7684. - for (i = 0; i < MSM_ISP_STATS_MAX; i++) {
  7685. + for (i = 0; i < num_stats_type; i++) {
  7686. if (error_info->stats_framedrop_count[i] != 0 &&
  7687. __ratelimit(&rs_stats)) {
  7688. pr_err("%s: Stats stream[%d]: dropped %d frames\n",
  7689. @@ -871,6 +1071,125 @@ static inline void msm_isp_update_error_info(struct vfe_device *vfe_dev,
  7690. vfe_dev->error_info.error_count++;
  7691. }
  7692.  
  7693. +static inline void msm_isp_process_overflow_irq(
  7694. + struct vfe_device *vfe_dev,
  7695. + uint32_t *irq_status0, uint32_t *irq_status1)
  7696. +{
  7697. + uint32_t overflow_mask;
  7698. + uint32_t halt_restart_mask0, halt_restart_mask1;
  7699. + /*Mask out all other irqs if recovery is started*/
  7700. + if (atomic_read(&vfe_dev->error_info.overflow_state) !=
  7701. + NO_OVERFLOW) {
  7702. + vfe_dev->hw_info->vfe_ops.core_ops.
  7703. + get_halt_restart_mask(&halt_restart_mask0,
  7704. + &halt_restart_mask1);
  7705. + *irq_status0 &= halt_restart_mask0;
  7706. + *irq_status1 &= halt_restart_mask1;
  7707. + return;
  7708. + }
  7709. +
  7710. + /*Check if any overflow bit is set*/
  7711. + vfe_dev->hw_info->vfe_ops.core_ops.
  7712. + get_overflow_mask(&overflow_mask);
  7713. + overflow_mask &= *irq_status1;
  7714. + if (overflow_mask) {
  7715. + pr_warning("%s: Bus overflow detected: 0x%x\n",
  7716. + __func__, overflow_mask);
  7717. + atomic_set(&vfe_dev->error_info.overflow_state,
  7718. + OVERFLOW_DETECTED);
  7719. + pr_warning("%s: Start bus overflow recovery\n", __func__);
  7720. + /*Store current IRQ mask*/
  7721. + vfe_dev->hw_info->vfe_ops.core_ops.get_irq_mask(vfe_dev,
  7722. + &vfe_dev->error_info.overflow_recover_irq_mask0,
  7723. + &vfe_dev->error_info.overflow_recover_irq_mask1);
  7724. + /*Stop CAMIF Immediately*/
  7725. + vfe_dev->hw_info->vfe_ops.core_ops.
  7726. + update_camif_state(vfe_dev, DISABLE_CAMIF_IMMEDIATELY);
  7727. + /*Halt the hardware & Clear all other IRQ mask*/
  7728. + vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 0);
  7729. + /*Update overflow state*/
  7730. + atomic_set(&vfe_dev->error_info.overflow_state, HALT_REQUESTED);
  7731. + *irq_status0 = 0;
  7732. + *irq_status1 = 0;
  7733. + }
  7734. +}
  7735. +
  7736. +static inline void msm_isp_reset_burst_count(
  7737. + struct vfe_device *vfe_dev)
  7738. +{
  7739. + int i;
  7740. + struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
  7741. + struct msm_vfe_axi_stream *stream_info;
  7742. + struct msm_vfe_axi_stream_request_cmd framedrop_info;
  7743. + for (i = 0; i < MAX_NUM_STREAM; i++) {
  7744. + stream_info = &axi_data->stream_info[i];
  7745. + if (stream_info->state != ACTIVE)
  7746. + continue;
  7747. + if (stream_info->stream_type == BURST_STREAM &&
  7748. + stream_info->num_burst_capture != 0) {
  7749. + framedrop_info.burst_count =
  7750. + stream_info->num_burst_capture;
  7751. + framedrop_info.frame_skip_pattern =
  7752. + stream_info->frame_skip_pattern;
  7753. + framedrop_info.init_frame_drop = 0;
  7754. + msm_isp_calculate_framedrop(&vfe_dev->axi_data,
  7755. + &framedrop_info);
  7756. + }
  7757. + }
  7758. +}
  7759. +
  7760. +static void msm_isp_process_overflow_recovery(
  7761. + struct vfe_device *vfe_dev,
  7762. + uint32_t irq_status0, uint32_t irq_status1)
  7763. +{
  7764. + uint32_t halt_restart_mask0, halt_restart_mask1;
  7765. + vfe_dev->hw_info->vfe_ops.core_ops.
  7766. + get_halt_restart_mask(&halt_restart_mask0,
  7767. + &halt_restart_mask1);
  7768. + irq_status0 &= halt_restart_mask0;
  7769. + irq_status1 &= halt_restart_mask1;
  7770. + if (irq_status0 == 0 && irq_status1 == 0)
  7771. + return;
  7772. +
  7773. + switch (atomic_read(&vfe_dev->error_info.overflow_state)) {
  7774. + case HALT_REQUESTED: {
  7775. + pr_err("%s: Halt done, Restart Pending\n", __func__);
  7776. + /*Reset the hardware*/
  7777. + vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev,
  7778. + ISP_RST_SOFT, 0);
  7779. + /*Update overflow state*/
  7780. + atomic_set(&vfe_dev->error_info.overflow_state,
  7781. + RESTART_REQUESTED);
  7782. + }
  7783. + break;
  7784. + case RESTART_REQUESTED: {
  7785. + pr_err("%s: Restart done, Resuming\n", __func__);
  7786. + /*Reset the burst stream frame drop pattern, in the
  7787. + *case where bus overflow happens during the burstshot,
  7788. + *the framedrop pattern might be updated after reg update
  7789. + *to skip all the frames after the burst shot. The burst shot
  7790. + *might not be completed due to the overflow, so the framedrop
  7791. + *pattern need to change back to the original settings in order
  7792. + *to recovr from overflow.
  7793. + */
  7794. + msm_isp_reset_burst_count(vfe_dev);
  7795. + vfe_dev->hw_info->vfe_ops.axi_ops.
  7796. + reload_wm(vfe_dev, 0xFFFFFFFF);
  7797. + vfe_dev->hw_info->vfe_ops.core_ops.restore_irq_mask(vfe_dev);
  7798. + vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev);
  7799. + memset(&vfe_dev->error_info, 0, sizeof(vfe_dev->error_info));
  7800. + atomic_set(&vfe_dev->error_info.overflow_state, NO_OVERFLOW);
  7801. + vfe_dev->hw_info->vfe_ops.core_ops.
  7802. + update_camif_state(vfe_dev, ENABLE_CAMIF);
  7803. + }
  7804. + break;
  7805. + case NO_OVERFLOW:
  7806. + case OVERFLOW_DETECTED:
  7807. + default:
  7808. + break;
  7809. + }
  7810. +}
  7811. +
  7812. irqreturn_t msm_isp_process_irq(int irq_num, void *data)
  7813. {
  7814. unsigned long flags;
  7815. @@ -881,19 +1200,27 @@ irqreturn_t msm_isp_process_irq(int irq_num, void *data)
  7816.  
  7817. vfe_dev->hw_info->vfe_ops.irq_ops.
  7818. read_irq_status(vfe_dev, &irq_status0, &irq_status1);
  7819. + if ((irq_status0 == 0) && (irq_status1 == 0)) {
  7820. + pr_err_ratelimited("%s: irq_status0 & 1 are both 0\n",
  7821. + __func__);
  7822. + return IRQ_HANDLED;
  7823. + }
  7824. + msm_isp_process_overflow_irq(vfe_dev,
  7825. + &irq_status0, &irq_status1);
  7826. vfe_dev->hw_info->vfe_ops.core_ops.
  7827. get_error_mask(&error_mask0, &error_mask1);
  7828. error_mask0 &= irq_status0;
  7829. error_mask1 &= irq_status1;
  7830. irq_status0 &= ~error_mask0;
  7831. irq_status1 &= ~error_mask1;
  7832. - if ((error_mask0 != 0) || (error_mask1 != 0))
  7833. + if (!vfe_dev->ignore_error &&
  7834. + ((error_mask0 != 0) || (error_mask1 != 0)))
  7835. msm_isp_update_error_info(vfe_dev, error_mask0, error_mask1);
  7836.  
  7837. if ((irq_status0 == 0) && (irq_status1 == 0) &&
  7838. (!((error_mask0 != 0) || (error_mask1 != 0)) &&
  7839. vfe_dev->error_info.error_count == 1)) {
  7840. - ISP_DBG("%s: irq_status0 & 1 are both 0!\n", __func__);
  7841. + ISP_DBG("%s: error_mask0/1 & error_count are set!\n", __func__);
  7842. return IRQ_HANDLED;
  7843. }
  7844.  
  7845. @@ -942,29 +1269,39 @@ void msm_isp_do_tasklet(unsigned long data)
  7846. irq_status1 = queue_cmd->vfeInterruptStatus1;
  7847. ts = queue_cmd->ts;
  7848. spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
  7849. + if (atomic_read(&vfe_dev->error_info.overflow_state) !=
  7850. + NO_OVERFLOW) {
  7851. + pr_err_ratelimited("There is Overflow, kicking up recovery !!!!");
  7852. + msm_isp_process_overflow_recovery(vfe_dev,
  7853. + irq_status0, irq_status1);
  7854. + continue;
  7855. + }
  7856. ISP_DBG("%s: status0: 0x%x status1: 0x%x\n",
  7857. __func__, irq_status0, irq_status1);
  7858. irq_ops->process_reset_irq(vfe_dev,
  7859. irq_status0, irq_status1);
  7860. irq_ops->process_halt_irq(vfe_dev,
  7861. irq_status0, irq_status1);
  7862. + irq_ops->process_camif_irq(vfe_dev,
  7863. + irq_status0, irq_status1, &ts);
  7864. irq_ops->process_axi_irq(vfe_dev,
  7865. irq_status0, irq_status1, &ts);
  7866. irq_ops->process_stats_irq(vfe_dev,
  7867. irq_status0, irq_status1, &ts);
  7868. irq_ops->process_reg_update(vfe_dev,
  7869. irq_status0, irq_status1, &ts);
  7870. - irq_ops->process_camif_irq(vfe_dev,
  7871. - irq_status0, irq_status1, &ts);
  7872. msm_isp_process_error_info(vfe_dev);
  7873. }
  7874. }
  7875.  
  7876. -void msm_isp_set_src_state(struct vfe_device *vfe_dev, void *arg)
  7877. +int msm_isp_set_src_state(struct vfe_device *vfe_dev, void *arg)
  7878. {
  7879. struct msm_vfe_axi_src_state *src_state = arg;
  7880. + if (src_state->input_src >= VFE_SRC_MAX)
  7881. + return -EINVAL;
  7882. vfe_dev->axi_data.src_info[src_state->input_src].active =
  7883. src_state->src_active;
  7884. + return 0;
  7885. }
  7886.  
  7887. int msm_isp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
  7888. @@ -989,7 +1326,10 @@ int msm_isp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
  7889. return -EBUSY;
  7890. }
  7891.  
  7892. - rc = vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev);
  7893. + memset(&vfe_dev->error_info, 0, sizeof(vfe_dev->error_info));
  7894. + atomic_set(&vfe_dev->error_info.overflow_state, NO_OVERFLOW);
  7895. + rc = vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev,
  7896. + ISP_RST_HARD, 1);
  7897. if (rc <= 0) {
  7898. pr_err("%s: reset timeout\n", __func__);
  7899. mutex_unlock(&vfe_dev->core_mutex);
  7900. @@ -1003,6 +1343,15 @@ int msm_isp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
  7901.  
  7902. vfe_dev->buf_mgr->ops->buf_mgr_init(vfe_dev->buf_mgr, "msm_isp", 28);
  7903.  
  7904. + switch (vfe_dev->vfe_hw_version) {
  7905. + case VFE40_8974V2_VERSION:
  7906. + vfe_dev->soc_hw_version = msm_camera_io_r(vfe_dev->tcsr_base);
  7907. + break;
  7908. + default:
  7909. + /* SOC HARDWARE VERSION NOT SUPPORTED */
  7910. + vfe_dev->soc_hw_version = 0x00;
  7911. + }
  7912. +
  7913. memset(&vfe_dev->axi_data, 0, sizeof(struct msm_vfe_axi_shared_data));
  7914. memset(&vfe_dev->stats_data, 0,
  7915. sizeof(struct msm_vfe_stats_shared_data));
  7916. @@ -1010,18 +1359,30 @@ int msm_isp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
  7917. vfe_dev->axi_data.hw_info = vfe_dev->hw_info->axi_hw_info;
  7918. vfe_dev->vfe_open_cnt++;
  7919. vfe_dev->taskletq_idx = 0;
  7920. + vfe_dev->vt_enable = 0;
  7921. mutex_unlock(&vfe_dev->core_mutex);
  7922. mutex_unlock(&vfe_dev->realtime_mutex);
  7923. return 0;
  7924. }
  7925.  
  7926. +#ifdef CONFIG_MSM_AVTIMER
  7927. +void msm_isp_end_avtimer(void)
  7928. +{
  7929. + avcs_core_disable_power_collapse(0);
  7930. +}
  7931. +#else
  7932. +void msm_isp_end_avtimer(void)
  7933. +{
  7934. + pr_err("AV Timer is not supported\n");
  7935. +}
  7936. +#endif
  7937. +
  7938. int msm_isp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
  7939. {
  7940. - long rc;
  7941. struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
  7942. - ISP_DBG("%s\n", __func__);
  7943. mutex_lock(&vfe_dev->realtime_mutex);
  7944. mutex_lock(&vfe_dev->core_mutex);
  7945. +
  7946. if (vfe_dev->vfe_open_cnt == 0) {
  7947. pr_err("%s: Invalid close\n", __func__);
  7948. mutex_unlock(&vfe_dev->core_mutex);
  7949. @@ -1029,13 +1390,14 @@ int msm_isp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
  7950. return -ENODEV;
  7951. }
  7952.  
  7953. - rc = vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev);
  7954. - if (rc <= 0)
  7955. - pr_err("%s: halt timeout rc=%ld\n", __func__, rc);
  7956. -
  7957. + vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 1);
  7958. vfe_dev->buf_mgr->ops->buf_mgr_deinit(vfe_dev->buf_mgr);
  7959. vfe_dev->hw_info->vfe_ops.core_ops.release_hw(vfe_dev);
  7960. vfe_dev->vfe_open_cnt--;
  7961. + if (vfe_dev->vt_enable) {
  7962. + msm_isp_end_avtimer();
  7963. + vfe_dev->vt_enable = 0;
  7964. + }
  7965. mutex_unlock(&vfe_dev->core_mutex);
  7966. mutex_unlock(&vfe_dev->realtime_mutex);
  7967. return 0;
  7968. diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
  7969. index ba85831..a38fd7a 100755
  7970. --- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
  7971. +++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
  7972. @@ -1,4 +1,4 @@
  7973. -/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
  7974. +/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
  7975. *
  7976. * This program is free software; you can redistribute it and/or modify
  7977. * it under the terms of the GNU General Public License version 2 and
  7978. @@ -19,6 +19,7 @@
  7979. #include <linux/platform_device.h>
  7980. #include <linux/gpio.h>
  7981. #include <linux/iopoll.h>
  7982. +#include <linux/ratelimit.h>
  7983. #include <media/msmb_isp.h>
  7984.  
  7985. #include "msm_ispif.h"
  7986. @@ -40,9 +41,7 @@
  7987. #define ISPIF_INTF_CMD_DISABLE_IMMEDIATELY 0x02
  7988.  
  7989. #define ISPIF_TIMEOUT_SLEEP_US 1000
  7990. -#define ISPIF_TIMEOUT_ALL_US 500000
  7991. -
  7992. -#define CSID_VERSION_V30 0x30000000
  7993. +#define ISPIF_TIMEOUT_ALL_US 1000000
  7994.  
  7995. #undef CDBG
  7996. #ifdef CONFIG_MSMB_CAMERA_DEBUG
  7997. @@ -51,33 +50,6 @@
  7998. #define CDBG(fmt, args...) do { } while (0)
  7999. #endif
  8000.  
  8001. -static void msm_camera_io_dump_3(void __iomem *addr, int size)
  8002. -{
  8003. - char line_str[128], *p_str;
  8004. - int i;
  8005. - u32 *p = (u32 *) addr;
  8006. - u32 data;
  8007. - printk("%s: %p %d\n", __func__, addr, size);
  8008. - line_str[0] = '\0';
  8009. - p_str = line_str;
  8010. - for (i = 0; i < size/4; i++) {
  8011. - if (i % 4 == 0) {
  8012. - snprintf(p_str, 12, "%08x: ", (u32) p);
  8013. - p_str += 10;
  8014. - }
  8015. - data = readl_relaxed(p++);
  8016. - snprintf(p_str, 12, "%08x ", data);
  8017. - p_str += 9;
  8018. - if ((i + 1) % 4 == 0) {
  8019. - printk("%s\n", line_str);
  8020. - line_str[0] = '\0';
  8021. - p_str = line_str;
  8022. - }
  8023. - }
  8024. - if (line_str[0] != '\0')
  8025. - printk("%s\n", line_str);
  8026. -}
  8027. -
  8028. static void msm_ispif_io_dump_reg(struct ispif_device *ispif)
  8029. {
  8030. if (!ispif->enb_dump_reg)
  8031. @@ -85,20 +57,32 @@ static void msm_ispif_io_dump_reg(struct ispif_device *ispif)
  8032. msm_camera_io_dump(ispif->base, 0x250);
  8033. }
  8034.  
  8035. -static void msm_ispif_io_dump_start_reg(struct ispif_device *ispif)
  8036. -{
  8037. - if (!ispif->enb_dump_reg)
  8038. - return;
  8039. - msm_camera_io_dump_3(ispif->base, 0x270);
  8040. -}
  8041. -
  8042. static inline int msm_ispif_is_intf_valid(uint32_t csid_version,
  8043. uint8_t intf_type)
  8044. {
  8045. - return ((csid_version <= CSID_VERSION_V2 && intf_type != VFE0) ||
  8046. - (intf_type >= VFE_MAX)) ? false : true;
  8047. + return ((csid_version <= CSID_VERSION_V22 && intf_type != VFE0) ||
  8048. + (intf_type >= VFE_MAX)) ? false : true;
  8049. }
  8050.  
  8051. +static struct msm_cam_clk_info ispif_8626_reset_clk_info[] = {
  8052. + {"ispif_ahb_clk", NO_SET_RATE},
  8053. + {"camss_top_ahb_clk", NO_SET_RATE},
  8054. + {"csi0_ahb_clk", NO_SET_RATE},
  8055. + {"csi0_src_clk", NO_SET_RATE},
  8056. + {"csi0_phy_clk", NO_SET_RATE},
  8057. + {"csi0_clk", NO_SET_RATE},
  8058. + {"csi0_pix_clk", NO_SET_RATE},
  8059. + {"csi0_rdi_clk", NO_SET_RATE},
  8060. + {"csi1_ahb_clk", NO_SET_RATE},
  8061. + {"csi1_src_clk", NO_SET_RATE},
  8062. + {"csi1_phy_clk", NO_SET_RATE},
  8063. + {"csi1_clk", NO_SET_RATE},
  8064. + {"csi1_pix_clk", NO_SET_RATE},
  8065. + {"csi1_rdi_clk", NO_SET_RATE},
  8066. + {"camss_vfe_vfe_clk", NO_SET_RATE},
  8067. + {"camss_csi_vfe_clk", NO_SET_RATE},
  8068. +};
  8069. +
  8070. static struct msm_cam_clk_info ispif_8974_ahb_clk_info[] = {
  8071. {"ispif_ahb_clk", -1},
  8072. };
  8073. @@ -133,19 +117,26 @@ static int msm_ispif_reset_hw(struct ispif_device *ispif)
  8074. int rc = 0;
  8075. long timeout = 0;
  8076. struct clk *reset_clk[ARRAY_SIZE(ispif_8974_reset_clk_info)];
  8077. -
  8078. - if (ispif->csid_version < CSID_VERSION_V30) {
  8079. - /* currently reset is done only for 8974 */
  8080. - return 0;
  8081. -
  8082. - }
  8083. + struct clk *reset_clk1[ARRAY_SIZE(ispif_8626_reset_clk_info)];
  8084. + ispif->clk_idx = 0;
  8085.  
  8086. rc = msm_cam_clk_enable(&ispif->pdev->dev,
  8087. ispif_8974_reset_clk_info, reset_clk,
  8088. ARRAY_SIZE(ispif_8974_reset_clk_info), 1);
  8089. if (rc < 0) {
  8090. - pr_err("%s: cannot enable clock, error = %d",
  8091. - __func__, rc);
  8092. + rc = msm_cam_clk_enable(&ispif->pdev->dev,
  8093. + ispif_8626_reset_clk_info, reset_clk1,
  8094. + ARRAY_SIZE(ispif_8626_reset_clk_info), 1);
  8095. + if (rc < 0){
  8096. + pr_err("%s: cannot enable clock, error = %d",
  8097. + __func__, rc);
  8098. + } else {
  8099. + /* This is set if device is 8x26 */
  8100. + ispif->clk_idx = 2;
  8101. + }
  8102. + } else {
  8103. + /* This is set if device is 8974 */
  8104. + ispif->clk_idx = 1;
  8105. }
  8106.  
  8107. init_completion(&ispif->reset_complete[VFE0]);
  8108. @@ -159,13 +150,23 @@ static int msm_ispif_reset_hw(struct ispif_device *ispif)
  8109. msm_camera_io_w(ISPIF_RST_CMD_1_MASK,
  8110. ispif->base + ISPIF_RST_CMD_1_ADDR);
  8111.  
  8112. - timeout = wait_for_completion_interruptible_timeout(
  8113. + timeout = wait_for_completion_timeout(
  8114. &ispif->reset_complete[VFE0], msecs_to_jiffies(500));
  8115. CDBG("%s: VFE0 done\n", __func__);
  8116. +
  8117. if (timeout <= 0) {
  8118. pr_err("%s: VFE0 reset wait timeout\n", __func__);
  8119. - rc = -ETIMEDOUT;
  8120. - goto end;
  8121. + rc = msm_cam_clk_enable(&ispif->pdev->dev,
  8122. + ispif_8974_reset_clk_info, reset_clk,
  8123. + ARRAY_SIZE(ispif_8974_reset_clk_info), 0);
  8124. + if (rc < 0){
  8125. + rc = msm_cam_clk_enable(&ispif->pdev->dev,
  8126. + ispif_8626_reset_clk_info, reset_clk1,
  8127. + ARRAY_SIZE(ispif_8626_reset_clk_info), 0);
  8128. + if (rc < 0)
  8129. + pr_err("%s: VFE0 reset wait timeout\n", __func__);
  8130. + }
  8131. + return -ETIMEDOUT;
  8132. }
  8133.  
  8134. if (ispif->hw_num_isps > 1) {
  8135. @@ -175,19 +176,33 @@ static int msm_ispif_reset_hw(struct ispif_device *ispif)
  8136. CDBG("%s: VFE1 done\n", __func__);
  8137. if (timeout <= 0) {
  8138. pr_err("%s: VFE1 reset wait timeout\n", __func__);
  8139. - rc = -ETIMEDOUT;
  8140. - goto end;
  8141. + msm_cam_clk_enable(&ispif->pdev->dev,
  8142. + ispif_8974_reset_clk_info, reset_clk,
  8143. + ARRAY_SIZE(ispif_8974_reset_clk_info), 0);
  8144. + return -ETIMEDOUT;
  8145. }
  8146. }
  8147. - pr_info("%s: ISPIF reset hw done", __func__);
  8148. -end:
  8149. - rc = msm_cam_clk_enable(&ispif->pdev->dev,
  8150. - ispif_8974_reset_clk_info, reset_clk,
  8151. - ARRAY_SIZE(ispif_8974_reset_clk_info), 0);
  8152. - if (rc < 0) {
  8153. - pr_err("%s: cannot disable clock, error = %d",
  8154. - __func__, rc);
  8155. +
  8156. + if (ispif->clk_idx == 1){
  8157. + rc = msm_cam_clk_enable(&ispif->pdev->dev,
  8158. + ispif_8974_reset_clk_info, reset_clk,
  8159. + ARRAY_SIZE(ispif_8974_reset_clk_info), 0);
  8160. + if (rc < 0) {
  8161. + pr_err("%s: cannot disable clock, error = %d",
  8162. + __func__, rc);
  8163. + }
  8164. }
  8165. +
  8166. + if (ispif->clk_idx == 2){
  8167. + rc = msm_cam_clk_enable(&ispif->pdev->dev,
  8168. + ispif_8626_reset_clk_info, reset_clk1,
  8169. + ARRAY_SIZE(ispif_8626_reset_clk_info), 0);
  8170. + if (rc < 0) {
  8171. + pr_err("%s: cannot disable clock, error = %d",
  8172. + __func__, rc);
  8173. + }
  8174. + }
  8175. +
  8176. return rc;
  8177. }
  8178.  
  8179. @@ -195,7 +210,7 @@ static int msm_ispif_clk_ahb_enable(struct ispif_device *ispif, int enable)
  8180. {
  8181. int rc = 0;
  8182.  
  8183. - if (ispif->csid_version < CSID_VERSION_V3) {
  8184. + if (ispif->csid_version < CSID_VERSION_V30) {
  8185. /* Older ISPIF versiond don't need ahb clokc */
  8186. return 0;
  8187. }
  8188. @@ -221,7 +236,8 @@ static int msm_ispif_reset(struct ispif_device *ispif)
  8189. memset(ispif->sof_count, 0, sizeof(ispif->sof_count));
  8190. for (i = 0; i < ispif->vfe_info.num_vfe; i++) {
  8191.  
  8192. - msm_camera_io_w(1 << PIX0_LINE_BUF_EN_BIT, ispif->base + ISPIF_VFE_m_CTRL_0(i));
  8193. + msm_camera_io_w(1 << PIX0_LINE_BUF_EN_BIT,
  8194. + ispif->base + ISPIF_VFE_m_CTRL_0(i));
  8195. msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_IRQ_MASK_0(i));
  8196. msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_IRQ_MASK_1(i));
  8197. msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_IRQ_MASK_2(i));
  8198. @@ -231,24 +247,24 @@ static int msm_ispif_reset(struct ispif_device *ispif)
  8199. ISPIF_VFE_m_IRQ_CLEAR_1(i));
  8200. msm_camera_io_w(0xFFFFFFFF, ispif->base +
  8201. ISPIF_VFE_m_IRQ_CLEAR_2(i));
  8202. +
  8203. msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_INPUT_SEL(i));
  8204.  
  8205. - msm_camera_io_w(0xAAAAAAAA,
  8206. + msm_camera_io_w(ISPIF_STOP_INTF_IMMEDIATELY,
  8207. ispif->base + ISPIF_VFE_m_INTF_CMD_0(i));
  8208. -
  8209. - msm_camera_io_w(0xAAAAAAAA,
  8210. + msm_camera_io_w(ISPIF_STOP_INTF_IMMEDIATELY,
  8211. ispif->base + ISPIF_VFE_m_INTF_CMD_1(i));
  8212. -
  8213. + pr_debug("%s: base %lx", __func__, (unsigned long)ispif->base);
  8214. msm_camera_io_w(0, ispif->base +
  8215. ISPIF_VFE_m_PIX_INTF_n_CID_MASK(i, 0));
  8216. msm_camera_io_w(0, ispif->base +
  8217. ISPIF_VFE_m_PIX_INTF_n_CID_MASK(i, 1));
  8218. msm_camera_io_w(0, ispif->base +
  8219. - ISPIF_VFE_m_PIX_INTF_n_CID_MASK(i, 0));
  8220. + ISPIF_VFE_m_RDI_INTF_n_CID_MASK(i, 0));
  8221. msm_camera_io_w(0, ispif->base +
  8222. - ISPIF_VFE_m_PIX_INTF_n_CID_MASK(i, 1));
  8223. + ISPIF_VFE_m_RDI_INTF_n_CID_MASK(i, 1));
  8224. msm_camera_io_w(0, ispif->base +
  8225. - ISPIF_VFE_m_PIX_INTF_n_CID_MASK(i, 2));
  8226. + ISPIF_VFE_m_RDI_INTF_n_CID_MASK(i, 2));
  8227.  
  8228. msm_camera_io_w(0, ispif->base +
  8229. ISPIF_VFE_m_PIX_INTF_n_CROP(i, 0));
  8230. @@ -508,9 +524,20 @@ static int msm_ispif_config(struct ispif_device *ispif,
  8231. rc = -EPERM;
  8232. return rc;
  8233. }
  8234. + if (params->num > MAX_PARAM_ENTRIES) {
  8235. + pr_err("%s: invalid param entries %d\n", __func__,
  8236. + params->num);
  8237. + rc = -EINVAL;
  8238. + return rc;
  8239. + }
  8240.  
  8241. for (i = 0; i < params->num; i++) {
  8242. vfe_intf = params->entries[i].vfe_intf;
  8243. + if (vfe_intf >= VFE_MAX) {
  8244. + pr_err("%s: %d invalid i %d vfe_intf %d\n", __func__,
  8245. + __LINE__, i, vfe_intf);
  8246. + return -EINVAL;
  8247. + }
  8248. if (!msm_ispif_is_intf_valid(ispif->csid_version,
  8249. vfe_intf)) {
  8250. pr_err("%s: invalid interface type\n", __func__);
  8251. @@ -534,14 +561,14 @@ static int msm_ispif_config(struct ispif_device *ispif,
  8252.  
  8253. if ((intftype >= INTF_MAX) ||
  8254. (vfe_intf >= ispif->vfe_info.num_vfe) ||
  8255. - (ispif->csid_version <= CSID_VERSION_V2 &&
  8256. + (ispif->csid_version <= CSID_VERSION_V22 &&
  8257. (vfe_intf > VFE0))) {
  8258. pr_err("%s: VFEID %d and CSID version %d mismatch\n",
  8259. __func__, vfe_intf, ispif->csid_version);
  8260. return -EINVAL;
  8261. }
  8262.  
  8263. - if (ispif->csid_version >= CSID_VERSION_V3)
  8264. + if (ispif->csid_version >= CSID_VERSION_V30)
  8265. msm_ispif_select_clk_mux(ispif, intftype,
  8266. params->entries[i].csid, vfe_intf);
  8267.  
  8268. @@ -602,12 +629,27 @@ static void msm_ispif_intf_cmd(struct ispif_device *ispif, uint32_t cmd_bits,
  8269. BUG_ON(!ispif);
  8270. BUG_ON(!params);
  8271.  
  8272. + if (params->num > MAX_PARAM_ENTRIES) {
  8273. + pr_err("%s: invalid param entries %d\n", __func__,
  8274. + params->num);
  8275. + return;
  8276. + }
  8277. for (i = 0; i < params->num; i++) {
  8278. vfe_intf = params->entries[i].vfe_intf;
  8279. + if (vfe_intf >= VFE_MAX) {
  8280. + pr_err("%s: %d invalid i %d vfe_intf %d\n", __func__,
  8281. + __LINE__, i, vfe_intf);
  8282. + return;
  8283. + }
  8284. if (!msm_ispif_is_intf_valid(ispif->csid_version, vfe_intf)) {
  8285. pr_err("%s: invalid interface type\n", __func__);
  8286. return;
  8287. }
  8288. + if (params->entries[i].num_cids > MAX_CID_CH) {
  8289. + pr_err("%s: out of range of cid_num %d\n",
  8290. + __func__, params->entries[i].num_cids);
  8291. + return;
  8292. + }
  8293. }
  8294.  
  8295. for (i = 0; i < params->num; i++) {
  8296. @@ -634,11 +676,10 @@ static void msm_ispif_intf_cmd(struct ispif_device *ispif, uint32_t cmd_bits,
  8297. }
  8298.  
  8299. /* cmd for PIX0, PIX1, RDI0, RDI1 */
  8300. - if (ispif->applied_intf_cmd[vfe_intf].intf_cmd != 0xFFFFFFFF) {
  8301. + if (ispif->applied_intf_cmd[vfe_intf].intf_cmd != 0xFFFFFFFF)
  8302. msm_camera_io_w_mb(
  8303. ispif->applied_intf_cmd[vfe_intf].intf_cmd,
  8304. ispif->base + ISPIF_VFE_m_INTF_CMD_0(vfe_intf));
  8305. - }
  8306.  
  8307. /* cmd for RDI2 */
  8308. if (ispif->applied_intf_cmd[vfe_intf].intf_cmd1 != 0xFFFFFFFF)
  8309. @@ -664,6 +705,12 @@ static int msm_ispif_stop_immediately(struct ispif_device *ispif,
  8310. return rc;
  8311. }
  8312.  
  8313. + if (params->num > MAX_PARAM_ENTRIES) {
  8314. + pr_err("%s: invalid param entries %d\n", __func__,
  8315. + params->num);
  8316. + rc = -EINVAL;
  8317. + return rc;
  8318. + }
  8319. msm_ispif_intf_cmd(ispif, ISPIF_INTF_CMD_DISABLE_IMMEDIATELY, params);
  8320.  
  8321. /* after stop the interface we need to unmask the CID enable bits */
  8322. @@ -688,6 +735,12 @@ static int msm_ispif_start_frame_boundary(struct ispif_device *ispif,
  8323. rc = -EPERM;
  8324. return rc;
  8325. }
  8326. + if (params->num > MAX_PARAM_ENTRIES) {
  8327. + pr_err("%s: invalid param entries %d\n", __func__,
  8328. + params->num);
  8329. + rc = -EINVAL;
  8330. + return rc;
  8331. + }
  8332.  
  8333. msm_ispif_intf_cmd(ispif, ISPIF_INTF_CMD_ENABLE_FRAME_BOUNDARY, params);
  8334.  
  8335. @@ -714,6 +767,13 @@ static int msm_ispif_stop_frame_boundary(struct ispif_device *ispif,
  8336. return rc;
  8337. }
  8338.  
  8339. + if (params->num > MAX_PARAM_ENTRIES) {
  8340. + pr_err("%s: invalid param entries %d\n", __func__,
  8341. + params->num);
  8342. + rc = -EINVAL;
  8343. + return rc;
  8344. + }
  8345. +
  8346. for (i = 0; i < params->num; i++) {
  8347. if (!msm_ispif_is_intf_valid(ispif->csid_version,
  8348. params->entries[i].vfe_intf)) {
  8349. @@ -754,19 +814,13 @@ static int msm_ispif_stop_frame_boundary(struct ispif_device *ispif,
  8350. goto end;
  8351. }
  8352.  
  8353. -#if 0
  8354. - /* todo_bug_fix? very bad. use readl_poll_timeout */
  8355. - while ((msm_camera_io_r(ispif->base + intf_addr) & 0xF) != 0xF)
  8356. - CDBG("%s: Wait for %d Idle\n", __func__,
  8357. - params->entries[i].intftype);
  8358. -#else
  8359. rc = readl_poll_timeout(ispif->base + intf_addr, stop_flag,
  8360. - (stop_flag & 0xF) == 0xF,
  8361. - ISPIF_TIMEOUT_SLEEP_US,
  8362. - ISPIF_TIMEOUT_ALL_US);
  8363. + (stop_flag & 0xF) == 0xF,
  8364. + ISPIF_TIMEOUT_SLEEP_US,
  8365. + ISPIF_TIMEOUT_ALL_US);
  8366. if (rc < 0)
  8367. goto end;
  8368. -#endif
  8369. +
  8370. /* disable CIDs in CID_MASK register */
  8371. msm_ispif_enable_intf_cids(ispif, params->entries[i].intftype,
  8372. cid_mask, vfe_intf, 0);
  8373. @@ -860,7 +914,7 @@ static inline void msm_ispif_read_irq_status(struct ispif_irq_status *out,
  8374.  
  8375. ispif_process_irq(ispif, out, VFE0);
  8376. }
  8377. - if (ispif->vfe_info.num_vfe > 1) {
  8378. + if (ispif->hw_num_isps > 1) {
  8379. if (out[VFE1].ispifIrqStatus0 & RESET_DONE_IRQ)
  8380. complete(&ispif->reset_complete[VFE1]);
  8381.  
  8382. @@ -919,7 +973,7 @@ static int msm_ispif_init(struct ispif_device *ispif,
  8383.  
  8384. ispif->csid_version = csid_version;
  8385.  
  8386. - if (ispif->csid_version >= CSID_VERSION_V3) {
  8387. + if (ispif->csid_version >= CSID_VERSION_V30) {
  8388. if (!ispif->clk_mux_mem || !ispif->clk_mux_io) {
  8389. pr_err("%s csi clk mux mem %p io %p\n", __func__,
  8390. ispif->clk_mux_mem, ispif->clk_mux_io);
  8391. @@ -955,16 +1009,12 @@ static int msm_ispif_init(struct ispif_device *ispif,
  8392. goto error_ahb;
  8393. }
  8394.  
  8395. - if(of_device_is_compatible(ispif->pdev->dev.of_node,
  8396. - "qcom,ispif-v3.0")) {
  8397. - /*Currently HW reset is implemented for 8974 only*/
  8398. - msm_ispif_reset_hw(ispif);
  8399. - }
  8400. + msm_ispif_reset_hw(ispif);
  8401.  
  8402. rc = msm_ispif_reset(ispif);
  8403. if (rc == 0) {
  8404. ispif->ispif_state = ISPIF_POWER_UP;
  8405. - pr_info("%s: power up done\n", __func__);
  8406. + CDBG("%s: power up done\n", __func__);
  8407. goto end;
  8408. }
  8409.  
  8410. @@ -987,12 +1037,6 @@ static void msm_ispif_release(struct ispif_device *ispif)
  8411. return;
  8412. }
  8413.  
  8414. - if(of_device_is_compatible(ispif->pdev->dev.of_node,
  8415. - "qcom,ispif-v3.0")) {
  8416. - /*Currently HW reset is implemented for 8974 only*/
  8417. - msm_ispif_reset_hw(ispif);
  8418. - }
  8419. -
  8420. /* make sure no streaming going on */
  8421. msm_ispif_reset(ispif);
  8422.  
  8423. @@ -1005,7 +1049,6 @@ static void msm_ispif_release(struct ispif_device *ispif)
  8424. iounmap(ispif->clk_mux_base);
  8425.  
  8426. ispif->ispif_state = ISPIF_POWER_DOWN;
  8427. - pr_info("%s: power down done", __func__);
  8428. }
  8429.  
  8430. static long msm_ispif_cmd(struct v4l2_subdev *sd, void *arg)
  8431. @@ -1033,7 +1076,7 @@ static long msm_ispif_cmd(struct v4l2_subdev *sd, void *arg)
  8432. break;
  8433. case ISPIF_START_FRAME_BOUNDARY:
  8434. rc = msm_ispif_start_frame_boundary(ispif, &pcdata->params);
  8435. - msm_ispif_io_dump_start_reg(ispif);
  8436. + msm_ispif_io_dump_reg(ispif);
  8437. break;
  8438. case ISPIF_STOP_FRAME_BOUNDARY:
  8439. rc = msm_ispif_stop_frame_boundary(ispif, &pcdata->params);
  8440. @@ -1067,11 +1110,13 @@ static long msm_ispif_subdev_ioctl(struct v4l2_subdev *sd,
  8441. case MSM_SD_SHUTDOWN: {
  8442. struct ispif_device *ispif =
  8443. (struct ispif_device *)v4l2_get_subdevdata(sd);
  8444. - msm_ispif_release(ispif);
  8445. + if (ispif && ispif->base)
  8446. + msm_ispif_release(ispif);
  8447. return 0;
  8448. }
  8449. default:
  8450. - pr_err("%s: invalid cmd 0x%x received\n", __func__, cmd);
  8451. + pr_err_ratelimited("%s: invalid cmd 0x%x received\n",
  8452. + __func__, cmd);
  8453. return -ENOIOCTLCMD;
  8454. }
  8455. }
  8456. @@ -1158,6 +1203,7 @@ static int __devinit ispif_probe(struct platform_device *pdev)
  8457. goto error_sd_register;
  8458. }
  8459.  
  8460. +
  8461. if (pdev->dev.of_node) {
  8462. of_property_read_u32((&pdev->dev)->of_node,
  8463. "cell-index", &pdev->id);
  8464. @@ -1218,6 +1264,7 @@ error_sd_register:
  8465.  
  8466. static const struct of_device_id msm_ispif_dt_match[] = {
  8467. {.compatible = "qcom,ispif"},
  8468. + {}
  8469. };
  8470.  
  8471. MODULE_DEVICE_TABLE(of, msm_ispif_dt_match);
  8472. diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
  8473. index 652ee89..4dd8e5f 100644
  8474. --- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
  8475. +++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
  8476. @@ -1,4 +1,4 @@
  8477. -/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
  8478. +/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
  8479. *
  8480. * This program is free software; you can redistribute it and/or modify
  8481. * it under the terms of the GNU General Public License version 2 and
  8482. @@ -35,15 +35,14 @@
  8483. #include <media/v4l2-event.h>
  8484. #include <media/v4l2-ioctl.h>
  8485. #include <media/msmb_camera.h>
  8486. -#include <media/msmb_pproc.h>
  8487. #include <media/msmb_generic_buf_mgr.h>
  8488. +#include <media/msmb_pproc.h>
  8489. +#include <mach/clk-provider.h>
  8490. #include "msm_cpp.h"
  8491. #include "msm_isp_util.h"
  8492. #include "msm_camera_io_util.h"
  8493. #include <linux/debugfs.h>
  8494.  
  8495. -#include <mach/clk-provider.h>
  8496. -
  8497. #define MSM_CPP_DRV_NAME "msm_cpp"
  8498.  
  8499. #define MSM_CPP_MAX_BUFF_QUEUE 16
  8500. @@ -51,33 +50,40 @@
  8501. #define CONFIG_MSM_CPP_DBG 0
  8502.  
  8503. #define CPP_CMD_TIMEOUT_MS 300
  8504. -#define MSM_CPP_MAX_TIMEOUT_TRIAL 10
  8505. +#define MSM_CPP_CORE_CLK_IDX 4
  8506. +#define MSM_MICRO_IFACE_CLK_IDX 7
  8507.  
  8508. #define MSM_CPP_NOMINAL_CLOCK 266670000
  8509. #define MSM_CPP_TURBO_CLOCK 320000000
  8510.  
  8511. -extern int poweroff_charging;
  8512. +#define CPP_FW_VERSION_1_2_0 0x10020000
  8513. +#define CPP_FW_VERSION_1_4_0 0x10040000
  8514. +#define CPP_FW_VERSION_1_6_0 0x10060000
  8515. +#define CPP_FW_VERSION_1_8_0 0x10080000
  8516.  
  8517. +/* stripe information offsets in frame command */
  8518. +#define STRIPE_BASE_FW_1_2_0 130
  8519. +#define STRIPE_BASE_FW_1_4_0 140
  8520. +#define STRIPE_BASE_FW_1_6_0 464
  8521.  
  8522. -typedef struct _msm_cpp_timer_data_t {
  8523. +struct msm_cpp_timer_data_t {
  8524. struct cpp_device *cpp_dev;
  8525. struct msm_cpp_frame_info_t *processed_frame;
  8526. -} msm_cpp_timer_data_t;
  8527. +};
  8528.  
  8529. -typedef struct _msm_cpp_timer_t {
  8530. - uint8_t used;
  8531. - msm_cpp_timer_data_t data;
  8532. +struct msm_cpp_timer_t {
  8533. + atomic_t used;
  8534. + struct msm_cpp_timer_data_t data;
  8535. struct timer_list cpp_timer;
  8536. -} msm_cpp_timer_t;
  8537. +};
  8538.  
  8539. -msm_cpp_timer_t cpp_timers[2];
  8540. -static int del_timer_idx=0;
  8541. -static int set_timer_idx=0;
  8542. +struct msm_cpp_timer_t cpp_timer;
  8543.  
  8544. /* dump the frame command before writing to the hardware */
  8545. #define MSM_CPP_DUMP_FRM_CMD 0
  8546.  
  8547. -static int msm_cpp_buffer_ops(struct cpp_device *cpp_dev,uint32_t buff_mgr_ops, struct msm_buf_mngr_info *buff_mgr_info);
  8548. +static int msm_cpp_buffer_ops(struct cpp_device *cpp_dev,
  8549. + uint32_t buff_mgr_ops, struct msm_buf_mngr_info *buff_mgr_info);
  8550.  
  8551. #if CONFIG_MSM_CPP_DBG
  8552. #define CPP_DBG(fmt, args...) pr_err(fmt, ##args)
  8553. @@ -104,6 +110,8 @@ static int msm_cpp_buffer_ops(struct cpp_device *cpp_dev,uint32_t buff_mgr_ops,
  8554. qcmd; \
  8555. })
  8556.  
  8557. +#define MSM_CPP_MAX_TIMEOUT_TRIAL 3
  8558. +
  8559. static void msm_queue_init(struct msm_device_queue *queue, const char *name)
  8560. {
  8561. CPP_DBG("E\n");
  8562. @@ -131,27 +139,6 @@ static void msm_enqueue(struct msm_device_queue *queue,
  8563. spin_unlock_irqrestore(&queue->lock, flags);
  8564. }
  8565.  
  8566. -static void msm_cpp_empty_list_eventdata(struct msm_device_queue *queue)
  8567. -{
  8568. - unsigned long flags;
  8569. - struct msm_queue_cmd *qcmd = NULL;
  8570. - if (!queue)
  8571. - return;
  8572. -
  8573. - spin_lock_irqsave(&queue->lock, flags);
  8574. - while (!list_empty(&queue->list)) {
  8575. - queue->len--;
  8576. - qcmd = list_first_entry(&queue->list,
  8577. - struct msm_queue_cmd, list_eventdata);
  8578. - list_del_init(&qcmd->list_eventdata);
  8579. - kfree(qcmd);
  8580. - }
  8581. - spin_unlock_irqrestore(&queue->lock, flags);
  8582. -
  8583. - return;
  8584. -}
  8585. -
  8586. -
  8587. static struct msm_cam_clk_info cpp_clk_info[] = {
  8588. {"camss_top_ahb_clk", -1},
  8589. {"vfe_clk_src", 266670000},
  8590. @@ -162,11 +149,29 @@ static struct msm_cam_clk_info cpp_clk_info[] = {
  8591. {"cpp_bus_clk", -1},
  8592. {"micro_iface_clk", -1},
  8593. };
  8594. -static int msm_cpp_notify_frame_done(struct cpp_device *cpp_dev);
  8595. +
  8596. +#define msm_cpp_empty_list(queue, member) { \
  8597. + unsigned long flags; \
  8598. + struct msm_queue_cmd *qcmd = NULL; \
  8599. + if (queue) { \
  8600. + spin_lock_irqsave(&queue->lock, flags); \
  8601. + while (!list_empty(&queue->list)) { \
  8602. + queue->len--; \
  8603. + qcmd = list_first_entry(&queue->list, \
  8604. + struct msm_queue_cmd, member); \
  8605. + list_del_init(&qcmd->member); \
  8606. + kfree(qcmd); \
  8607. + } \
  8608. + spin_unlock_irqrestore(&queue->lock, flags); \
  8609. + } \
  8610. +}
  8611. +
  8612. +static int msm_cpp_notify_frame_done(struct cpp_device *cpp_dev,
  8613. + uint32_t buff_mgr_ops);
  8614. static void cpp_load_fw(struct cpp_device *cpp_dev, char *fw_name_bin);
  8615. -void cpp_timer_callback(unsigned long data);
  8616. +static void cpp_timer_callback(unsigned long data);
  8617.  
  8618. -uint8_t induce_error = 0;
  8619. +uint8_t induce_error;
  8620. static int msm_cpp_enable_debugfs(struct cpp_device *cpp_dev);
  8621.  
  8622. static void msm_cpp_write(u32 data, void __iomem *cpp_base)
  8623. @@ -174,6 +179,14 @@ static void msm_cpp_write(u32 data, void __iomem *cpp_base)
  8624. writel_relaxed((data), cpp_base + MSM_CPP_MICRO_FIFO_RX_DATA);
  8625. }
  8626.  
  8627. +static void msm_cpp_clear_timer(struct cpp_device *cpp_dev)
  8628. +{
  8629. + atomic_set(&cpp_timer.used, 0);
  8630. + del_timer(&cpp_timer.cpp_timer);
  8631. + cpp_timer.data.processed_frame = NULL;
  8632. + cpp_dev->timeout_trial_cnt = 0;
  8633. +}
  8634. +
  8635. static uint32_t msm_cpp_read(void __iomem *cpp_base)
  8636. {
  8637. uint32_t tmp, retry = 0;
  8638. @@ -215,7 +228,7 @@ static struct msm_cpp_buff_queue_info_t *msm_cpp_get_buff_queue_entry(
  8639.  
  8640. static unsigned long msm_cpp_get_phy_addr(struct cpp_device *cpp_dev,
  8641. struct msm_cpp_buff_queue_info_t *buff_queue_info, uint32_t buff_index,
  8642. - uint8_t native_buff)
  8643. + uint8_t native_buff, int *fd)
  8644. {
  8645. unsigned long phy_add = 0;
  8646. struct list_head *buff_head;
  8647. @@ -229,6 +242,7 @@ static unsigned long msm_cpp_get_phy_addr(struct cpp_device *cpp_dev,
  8648. list_for_each_entry_safe(buff, save, buff_head, entry) {
  8649. if (buff->map_info.buff_info.index == buff_index) {
  8650. phy_add = buff->map_info.phy_addr;
  8651. + *fd = buff->map_info.buff_info.fd;
  8652. break;
  8653. }
  8654. }
  8655. @@ -285,8 +299,6 @@ static unsigned long msm_cpp_queue_buffer_info(struct cpp_device *cpp_dev,
  8656. return buff->map_info.phy_addr;
  8657.  
  8658. QUEUE_BUFF_ERROR2:
  8659. - ion_unmap_iommu(cpp_dev->client, buff->map_info.ion_handle,
  8660. - cpp_dev->domain_num, 0);
  8661. ion_free(cpp_dev->client, buff->map_info.ion_handle);
  8662. QUEUE_BUFF_ERROR1:
  8663. buff->map_info.ion_handle = NULL;
  8664. @@ -312,7 +324,7 @@ static void msm_cpp_dequeue_buffer_info(struct cpp_device *cpp_dev,
  8665.  
  8666. static unsigned long msm_cpp_fetch_buffer_info(struct cpp_device *cpp_dev,
  8667. struct msm_cpp_buffer_info_t *buffer_info, uint32_t session_id,
  8668. - uint32_t stream_id)
  8669. + uint32_t stream_id, int *fd)
  8670. {
  8671. unsigned long phy_addr = 0;
  8672. struct msm_cpp_buff_queue_info_t *buff_queue_info;
  8673. @@ -327,10 +339,11 @@ static unsigned long msm_cpp_fetch_buffer_info(struct cpp_device *cpp_dev,
  8674. }
  8675.  
  8676. phy_addr = msm_cpp_get_phy_addr(cpp_dev, buff_queue_info,
  8677. - buffer_info->index, native_buff);
  8678. + buffer_info->index, native_buff, fd);
  8679. if ((phy_addr == 0) && (native_buff)) {
  8680. phy_addr = msm_cpp_queue_buffer_info(cpp_dev, buff_queue_info,
  8681. buffer_info);
  8682. + *fd = buffer_info->fd;
  8683. }
  8684. return phy_addr;
  8685. }
  8686. @@ -624,23 +637,17 @@ void msm_cpp_do_tasklet(unsigned long data)
  8687. if (msg_id == MSM_CPP_MSG_ID_FRAME_ACK) {
  8688. CPP_DBG("Frame done!!\n");
  8689. /* delete CPP timer */
  8690. - CPP_DBG("deleting cpp_timer %d.\n", del_timer_idx);
  8691. - del_timer(&cpp_timers[del_timer_idx].cpp_timer);
  8692. - cpp_timers[del_timer_idx].used = 0;
  8693. - cpp_timers[del_timer_idx].data.processed_frame = NULL;
  8694. - del_timer_idx = 1 - del_timer_idx;
  8695. - cpp_dev->timeout_trial_cnt = 0;
  8696. - msm_cpp_notify_frame_done(cpp_dev);
  8697. + CPP_DBG("delete timer.\n");
  8698. + msm_cpp_clear_timer(cpp_dev);
  8699. + msm_cpp_notify_frame_done(cpp_dev,
  8700. + VIDIOC_MSM_BUF_MNGR_BUF_DONE);
  8701. } else if (msg_id ==
  8702. MSM_CPP_MSG_ID_FRAME_NACK) {
  8703. pr_err("NACK error from hw!!\n");
  8704. - CPP_DBG("deleting cpp_timer %d.\n", del_timer_idx);
  8705. - del_timer(&cpp_timers[del_timer_idx].cpp_timer);
  8706. - cpp_timers[del_timer_idx].used = 0;
  8707. - cpp_timers[del_timer_idx].data.processed_frame = NULL;
  8708. - del_timer_idx = 1 - del_timer_idx;
  8709. - cpp_dev->timeout_trial_cnt = 0;
  8710. - msm_cpp_notify_frame_done(cpp_dev);
  8711. + CPP_DBG("delete timer.\n");
  8712. + msm_cpp_clear_timer(cpp_dev);
  8713. + msm_cpp_notify_frame_done(cpp_dev,
  8714. + VIDIOC_MSM_BUF_MNGR_PUT_BUF);
  8715. }
  8716. i += cmd_len + 2;
  8717. }
  8718. @@ -648,43 +655,27 @@ void msm_cpp_do_tasklet(unsigned long data)
  8719. }
  8720. }
  8721.  
  8722. -#if 0
  8723. -static void msm_cpp_boot_hw(struct cpp_device *cpp_dev)
  8724. +static void cpp_get_clk_freq_tbl(struct clk *clk, struct cpp_hw_info *hw_info)
  8725. {
  8726. - msm_camera_io_w(0x1, cpp_dev->base + MSM_CPP_MICRO_CLKEN_CTL);
  8727. - msm_camera_io_w(0x1, cpp_dev->base +
  8728. - MSM_CPP_MICRO_BOOT_START);
  8729. - msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_CMD);
  8730. + uint32_t count;
  8731. + signed long freq_tbl_entry = 0;
  8732.  
  8733. - /*Trigger MC to jump to start address*/
  8734. - msm_cpp_write(MSM_CPP_CMD_EXEC_JUMP, cpp_dev->base);
  8735. - msm_cpp_write(MSM_CPP_JUMP_ADDRESS, cpp_dev->base);
  8736. -
  8737. - msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_CMD);
  8738. - msm_cpp_poll(cpp_dev->base, 0x1);
  8739. - msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_JUMP_ACK);
  8740. - msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_TRAILER);
  8741. -
  8742. - /*Get Bootloader Version*/
  8743. - msm_cpp_write(MSM_CPP_CMD_GET_BOOTLOADER_VER, cpp_dev->base);
  8744. - pr_info("MC Bootloader Version: 0x%x\n",
  8745. - msm_cpp_read(cpp_dev->base));
  8746. + if ((clk == NULL) || (hw_info == NULL) || (clk->ops == NULL) ||
  8747. + (clk->ops->list_rate == NULL)) {
  8748. + pr_err("Bad parameter\n");
  8749. + return;
  8750. + }
  8751.  
  8752. - /*Get Firmware Version*/
  8753. - msm_cpp_write(MSM_CPP_CMD_GET_FW_VER, cpp_dev->base);
  8754. - msm_cpp_write(MSM_CPP_MSG_ID_CMD, cpp_dev->base);
  8755. - msm_cpp_write(0x1, cpp_dev->base);
  8756. - msm_cpp_write(MSM_CPP_CMD_GET_FW_VER, cpp_dev->base);
  8757. - msm_cpp_write(MSM_CPP_MSG_ID_TRAILER, cpp_dev->base);
  8758. + for (count = 0; count < MAX_FREQ_TBL; count++) {
  8759. + freq_tbl_entry = clk->ops->list_rate(clk, count);
  8760. + if (freq_tbl_entry >= 0)
  8761. + hw_info->freq_tbl[count] = freq_tbl_entry;
  8762. + else
  8763. + break;
  8764. + }
  8765.  
  8766. - msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_CMD);
  8767. - msm_cpp_poll(cpp_dev->base, 0x2);
  8768. - msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_FW_VER);
  8769. - pr_info("CPP FW Version: 0x%x\n", msm_cpp_read(cpp_dev->base));
  8770. - msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_TRAILER);
  8771. + hw_info->freq_tbl_count = count;
  8772. }
  8773. -#endif
  8774. -
  8775. static int cpp_init_hardware(struct cpp_device *cpp_dev)
  8776. {
  8777. int rc = 0;
  8778. @@ -710,33 +701,37 @@ static int cpp_init_hardware(struct cpp_device *cpp_dev)
  8779. }
  8780. }
  8781.  
  8782. - cpp_dev->cpp_clk[7] = clk_get(&cpp_dev->pdev->dev,
  8783. - cpp_clk_info[7].clk_name);
  8784. - if (IS_ERR(cpp_dev->cpp_clk[7])) {
  8785. - pr_err("%s get failed\n", cpp_clk_info[7].clk_name);
  8786. - rc = PTR_ERR(cpp_dev->cpp_clk[7]);
  8787. + cpp_dev->cpp_clk[MSM_MICRO_IFACE_CLK_IDX] =
  8788. + clk_get(&cpp_dev->pdev->dev,
  8789. + cpp_clk_info[MSM_MICRO_IFACE_CLK_IDX].clk_name);
  8790. + if (IS_ERR(cpp_dev->cpp_clk[MSM_MICRO_IFACE_CLK_IDX])) {
  8791. + pr_err("%s get failed\n",
  8792. + cpp_clk_info[MSM_MICRO_IFACE_CLK_IDX].clk_name);
  8793. + rc = PTR_ERR(cpp_dev->cpp_clk[MSM_MICRO_IFACE_CLK_IDX]);
  8794. goto remap_failed;
  8795. }
  8796.  
  8797. - rc = clk_reset(cpp_dev->cpp_clk[7], CLK_RESET_ASSERT);
  8798. + rc = clk_reset(cpp_dev->cpp_clk[MSM_MICRO_IFACE_CLK_IDX],
  8799. + CLK_RESET_ASSERT);
  8800. if (rc) {
  8801. - pr_err("%s:micro_iface_clk assert failed\n", __func__);
  8802. - clk_put(cpp_dev->cpp_clk[7]);
  8803. - goto remap_failed;
  8804. + pr_err("%s:micro_iface_clk assert failed\n", __func__);
  8805. + clk_put(cpp_dev->cpp_clk[MSM_MICRO_IFACE_CLK_IDX]);
  8806. + goto remap_failed;
  8807. }
  8808. -
  8809. +
  8810. usleep_range(10000, 12000);
  8811. -
  8812. - rc = clk_reset(cpp_dev->cpp_clk[7], CLK_RESET_DEASSERT);
  8813. +
  8814. + rc = clk_reset(cpp_dev->cpp_clk[MSM_MICRO_IFACE_CLK_IDX],
  8815. + CLK_RESET_DEASSERT);
  8816. if (rc) {
  8817. pr_err("%s:micro_iface_clk assert failed\n", __func__);
  8818. - clk_put(cpp_dev->cpp_clk[7]);
  8819. + clk_put(cpp_dev->cpp_clk[MSM_MICRO_IFACE_CLK_IDX]);
  8820. goto remap_failed;
  8821. }
  8822.  
  8823. usleep_range(1000, 1200);
  8824.  
  8825. - clk_put(cpp_dev->cpp_clk[7]);
  8826. + clk_put(cpp_dev->cpp_clk[MSM_MICRO_IFACE_CLK_IDX]);
  8827.  
  8828. rc = msm_cam_clk_enable(&cpp_dev->pdev->dev, cpp_clk_info,
  8829. cpp_dev->cpp_clk, ARRAY_SIZE(cpp_clk_info), 1);
  8830. @@ -778,8 +773,9 @@ static int cpp_init_hardware(struct cpp_device *cpp_dev)
  8831. goto req_irq_fail;
  8832. }
  8833. cpp_dev->buf_mgr_subdev = msm_buf_mngr_get_subdev();
  8834. -
  8835. - rc = msm_cpp_buffer_ops(cpp_dev,VIDIOC_MSM_BUF_MNGR_INIT, NULL);
  8836. +
  8837. + rc = msm_cpp_buffer_ops(cpp_dev,
  8838. + VIDIOC_MSM_BUF_MNGR_INIT, NULL);
  8839. if (rc < 0) {
  8840. pr_err("buf mngr init failed\n");
  8841. free_irq(cpp_dev->irq->start, cpp_dev);
  8842. @@ -792,35 +788,24 @@ static int cpp_init_hardware(struct cpp_device *cpp_dev)
  8843. pr_debug("CPP HW Version: 0x%x\n", cpp_dev->hw_info.cpp_hw_version);
  8844. cpp_dev->hw_info.cpp_hw_caps =
  8845. msm_camera_io_r(cpp_dev->cpp_hw_base + 0x4);
  8846. + cpp_get_clk_freq_tbl(cpp_dev->cpp_clk[MSM_CPP_CORE_CLK_IDX],
  8847. + &cpp_dev->hw_info);
  8848. pr_debug("CPP HW Caps: 0x%x\n", cpp_dev->hw_info.cpp_hw_caps);
  8849. msm_camera_io_w(0x1, cpp_dev->vbif_base + 0x4);
  8850. cpp_dev->taskletq_idx = 0;
  8851. atomic_set(&cpp_dev->irq_cnt, 0);
  8852. msm_cpp_create_buff_queue(cpp_dev, MSM_CPP_MAX_BUFF_QUEUE);
  8853. - pr_err("stream_cnt:%d\n", cpp_dev->stream_cnt);
  8854. + pr_debug("stream_cnt:%d\n", cpp_dev->stream_cnt);
  8855. cpp_dev->stream_cnt = 0;
  8856. if (cpp_dev->is_firmware_loaded == 1) {
  8857. - pr_err("cpp_dbg: is_firmware_loaded==1\n");
  8858. disable_irq(cpp_dev->irq->start);
  8859. - pr_err("cpp_dbg: disable_irq e\n");
  8860. - //msm_cpp_boot_hw(cpp_dev);
  8861. -
  8862. - pr_err("cpp_dbg: cpp_load_fw e\n");
  8863. - cpp_load_fw(cpp_dev, NULL);
  8864. - pr_err("cpp_dbg: cpp_load_fw x\n");
  8865. -
  8866. + cpp_load_fw(cpp_dev, cpp_dev->fw_name_bin);
  8867. enable_irq(cpp_dev->irq->start);
  8868. - pr_err("cpp_dbg: enable_irq e\n");
  8869. -
  8870. msm_camera_io_w_mb(0x7C8, cpp_dev->base +
  8871. MSM_CPP_MICRO_IRQGEN_MASK);
  8872. - pr_err("cpp_dbg: MSM_CPP_MICRO_IRQGEN_MASK\n");
  8873. -
  8874. msm_camera_io_w_mb(0xFFFF, cpp_dev->base +
  8875. MSM_CPP_MICRO_IRQGEN_CLR);
  8876. - pr_err("cpp_dbg: MSM_CPP_MICRO_IRQGEN_CLR\n");
  8877. }
  8878. - pr_err("cpp_dbg: end of cpp_init_hardware\n");
  8879. return rc;
  8880. req_irq_fail:
  8881. iounmap(cpp_dev->cpp_hw_base);
  8882. @@ -844,9 +829,10 @@ static void cpp_release_hardware(struct cpp_device *cpp_dev)
  8883. {
  8884. int32_t rc;
  8885. if (cpp_dev->state != CPP_STATE_BOOT) {
  8886. - rc = msm_cpp_buffer_ops(cpp_dev,VIDIOC_MSM_BUF_MNGR_DEINIT, NULL);
  8887. - if (rc < 0)
  8888. - pr_err("error in buf mngr deinit rc=%d\n", rc);
  8889. + rc = msm_cpp_buffer_ops(cpp_dev,
  8890. + VIDIOC_MSM_BUF_MNGR_DEINIT, NULL);
  8891. + if (rc < 0)
  8892. + pr_err("error in buf mngr deinit rc=%d\n", rc);
  8893. free_irq(cpp_dev->irq->start, cpp_dev);
  8894. tasklet_kill(&cpp_dev->cpp_tasklet);
  8895. atomic_set(&cpp_dev->irq_cnt, 0);
  8896. @@ -857,37 +843,17 @@ static void cpp_release_hardware(struct cpp_device *cpp_dev)
  8897. iounmap(cpp_dev->cpp_hw_base);
  8898. msm_cam_clk_enable(&cpp_dev->pdev->dev, cpp_clk_info,
  8899. cpp_dev->cpp_clk, ARRAY_SIZE(cpp_clk_info), 0);
  8900. - if (0) {
  8901. - regulator_disable(cpp_dev->fs_cpp);
  8902. - regulator_put(cpp_dev->fs_cpp);
  8903. - cpp_dev->fs_cpp = NULL;
  8904. + regulator_disable(cpp_dev->fs_cpp);
  8905. + regulator_put(cpp_dev->fs_cpp);
  8906. + cpp_dev->fs_cpp = NULL;
  8907. + if (cpp_dev->stream_cnt > 0) {
  8908. + pr_debug("error: stream count active\n");
  8909. + msm_isp_update_bandwidth(ISP_CPP, 0, 0);
  8910. }
  8911. - if (cpp_dev->stream_cnt > 0)
  8912. - pr_err("error: stream count active\n");
  8913. cpp_dev->stream_cnt = 0;
  8914. msm_isp_deinit_bandwidth_mgr(ISP_CPP);
  8915. }
  8916.  
  8917. -int check_clocks(struct cpp_device *cpp_dev)
  8918. -{
  8919. - struct clk** clkp;
  8920. - int i, j, ret;
  8921. -
  8922. - ret = 0;
  8923. - clkp = cpp_dev->cpp_clk;
  8924. - for (i=0;i<8;i++) {
  8925. - j = 0;
  8926. - if (clkp[i]) {
  8927. - j=(!!clkp[i]->prepare_count) | (!!clkp[i]->count);
  8928. - if (!j) {
  8929. - pr_err ("%s, %d clock : [%d:%d]\n", __func__, i, clkp[i]->prepare_count, clkp[i]->count);
  8930. - ret = -1;
  8931. - }
  8932. - }
  8933. - }
  8934. - return ret;
  8935. -}
  8936. -
  8937. static void cpp_load_fw(struct cpp_device *cpp_dev, char *fw_name_bin)
  8938. {
  8939. uint32_t i;
  8940. @@ -896,26 +862,9 @@ static void cpp_load_fw(struct cpp_device *cpp_dev, char *fw_name_bin)
  8941. const struct firmware *fw = NULL;
  8942. struct device *dev = &cpp_dev->pdev->dev;
  8943.  
  8944. - if (check_clocks(cpp_dev) < 0)
  8945. - {
  8946. - pr_err ("QCTKD: some clocks were off\n");
  8947. - dump_stack();
  8948. - //BUG();
  8949. - pr_err ("QCTKD: emergency clock for Samsung H\n");
  8950. - rc = msm_cam_clk_enable(&cpp_dev->pdev->dev, cpp_clk_info,
  8951. - cpp_dev->cpp_clk, ARRAY_SIZE(cpp_clk_info), 1);
  8952. - }
  8953. -
  8954. - pr_err("cpp_dbg: MSM_CPP_MICRO_CLKEN_CTL\n");
  8955. msm_camera_io_w(0x1, cpp_dev->base + MSM_CPP_MICRO_CLKEN_CTL);
  8956. -
  8957. - usleep(2000);
  8958. -
  8959. - pr_err("cpp_dbg: MSM_CPP_MICRO_BOOT_START\n");
  8960. msm_camera_io_w(0x1, cpp_dev->base +
  8961. MSM_CPP_MICRO_BOOT_START);
  8962. -
  8963. - pr_err("cpp_dbg: MSM_CPP_MSG_ID_CMD\n");
  8964. msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_CMD);
  8965.  
  8966. if (fw_name_bin) {
  8967. @@ -923,7 +872,7 @@ static void cpp_load_fw(struct cpp_device *cpp_dev, char *fw_name_bin)
  8968. rc = request_firmware(&fw, fw_name_bin, dev);
  8969. if (rc) {
  8970. dev_err(dev,
  8971. - "Failed to locate blob %s from device %p, Error: %d\n",
  8972. + "Fail to loc blob %s from dev %p, Error: %d\n",
  8973. fw_name_bin, dev, rc);
  8974. }
  8975. if (NULL != fw)
  8976. @@ -932,11 +881,15 @@ static void cpp_load_fw(struct cpp_device *cpp_dev, char *fw_name_bin)
  8977. msm_camera_io_w(0x1, cpp_dev->base +
  8978. MSM_CPP_MICRO_BOOT_START);
  8979. msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_CMD);
  8980. - msm_camera_io_w(0xFFFFFFFF, cpp_dev->base + MSM_CPP_MICRO_IRQGEN_CLR);
  8981. + msm_camera_io_w(0xFFFFFFFF, cpp_dev->base +
  8982. + MSM_CPP_MICRO_IRQGEN_CLR);
  8983.  
  8984. /*Start firmware loading*/
  8985. msm_cpp_write(MSM_CPP_CMD_FW_LOAD, cpp_dev->base);
  8986. - msm_cpp_write(MSM_CPP_END_ADDRESS, cpp_dev->base);
  8987. + if (fw)
  8988. + msm_cpp_write(fw->size, cpp_dev->base);
  8989. + else
  8990. + msm_cpp_write(MSM_CPP_END_ADDRESS, cpp_dev->base);
  8991. msm_cpp_write(MSM_CPP_START_ADDRESS, cpp_dev->base);
  8992.  
  8993. if (ptr_bin) {
  8994. @@ -947,22 +900,18 @@ static void cpp_load_fw(struct cpp_device *cpp_dev, char *fw_name_bin)
  8995. }
  8996. if (fw)
  8997. release_firmware(fw);
  8998. + msm_camera_io_w_mb(0x00, cpp_dev->cpp_hw_base + 0xC);
  8999. msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_OK);
  9000. msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_CMD);
  9001. }
  9002.  
  9003. - pr_err("cpp_dbg: Trigger MC to jump to start address\n");
  9004. /*Trigger MC to jump to start address*/
  9005. msm_cpp_write(MSM_CPP_CMD_EXEC_JUMP, cpp_dev->base);
  9006. msm_cpp_write(MSM_CPP_JUMP_ADDRESS, cpp_dev->base);
  9007.  
  9008. - pr_err("cpp_dbg: msm_cpp_poll MSM_CPP_MSG_ID_CMD\n");
  9009. msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_CMD);
  9010. - pr_err("cpp_dbg: msm_cpp_poll 0x1\n");
  9011. msm_cpp_poll(cpp_dev->base, 0x1);
  9012. - pr_err("cpp_dbg: msm_cpp_poll MSM_CPP_MSG_ID_JUMP_ACK\n");
  9013. msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_JUMP_ACK);
  9014. - pr_err("cpp_dbg: msm_cpp_poll MSM_CPP_MSG_ID_TRAILER\n");
  9015. msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_TRAILER);
  9016.  
  9017. /*Get Bootloader Version*/
  9018. @@ -980,7 +929,8 @@ static void cpp_load_fw(struct cpp_device *cpp_dev, char *fw_name_bin)
  9019. msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_CMD);
  9020. msm_cpp_poll(cpp_dev->base, 0x2);
  9021. msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_FW_VER);
  9022. - pr_info("CPP FW Version: 0x%x\n", msm_cpp_read(cpp_dev->base));
  9023. + cpp_dev->fw_version = msm_cpp_read(cpp_dev->base);
  9024. + pr_info("CPP FW Version: 0x%08x\n", cpp_dev->fw_version);
  9025. msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_TRAILER);
  9026.  
  9027. /*Disable MC clock*/
  9028. @@ -1018,6 +968,7 @@ static int cpp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
  9029. cpp_dev->cpp_open_cnt++;
  9030. if (cpp_dev->cpp_open_cnt == 1) {
  9031. cpp_init_hardware(cpp_dev);
  9032. + iommu_attach_device(cpp_dev->domain, cpp_dev->iommu_ctx);
  9033. cpp_init_mem(cpp_dev);
  9034. cpp_dev->state = CPP_STATE_IDLE;
  9035. }
  9036. @@ -1029,9 +980,19 @@ static int cpp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
  9037. {
  9038. uint32_t i;
  9039. struct cpp_device *cpp_dev = v4l2_get_subdevdata(sd);
  9040. + struct msm_device_queue *processing_q = NULL;
  9041. + struct msm_device_queue *eventData_q = NULL;
  9042. +
  9043. + if (!cpp_dev) {
  9044. + pr_err("failed: cpp_dev %p\n", cpp_dev);
  9045. + return -EINVAL;
  9046. + }
  9047.  
  9048. mutex_lock(&cpp_dev->mutex);
  9049.  
  9050. + processing_q = &cpp_dev->processing_q;
  9051. + eventData_q = &cpp_dev->eventData_q;
  9052. +
  9053. if (cpp_dev->cpp_open_cnt == 0) {
  9054. mutex_unlock(&cpp_dev->mutex);
  9055. return 0;
  9056. @@ -1052,39 +1013,43 @@ static int cpp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
  9057.  
  9058. cpp_dev->cpp_open_cnt--;
  9059. if (cpp_dev->cpp_open_cnt == 0) {
  9060. - pr_err("%s: irq_status: 0x%x\n", __func__,
  9061. + pr_debug("irq_status: 0x%x\n",
  9062. msm_camera_io_r(cpp_dev->cpp_hw_base + 0x4));
  9063. - pr_err("%s: DEBUG_SP: 0x%x\n", __func__,
  9064. + pr_debug("DEBUG_SP: 0x%x\n",
  9065. msm_camera_io_r(cpp_dev->cpp_hw_base + 0x40));
  9066. - pr_err("%s: DEBUG_T: 0x%x\n", __func__,
  9067. + pr_debug("DEBUG_T: 0x%x\n",
  9068. msm_camera_io_r(cpp_dev->cpp_hw_base + 0x44));
  9069. - pr_err("%s: DEBUG_N: 0x%x\n", __func__,
  9070. + pr_debug("DEBUG_N: 0x%x\n",
  9071. msm_camera_io_r(cpp_dev->cpp_hw_base + 0x48));
  9072. - pr_err("%s: DEBUG_R: 0x%x\n", __func__,
  9073. + pr_debug("DEBUG_R: 0x%x\n",
  9074. msm_camera_io_r(cpp_dev->cpp_hw_base + 0x4C));
  9075. - pr_err("%s: DEBUG_OPPC: 0x%x\n", __func__,
  9076. + pr_debug("DEBUG_OPPC: 0x%x\n",
  9077. msm_camera_io_r(cpp_dev->cpp_hw_base + 0x50));
  9078. - pr_err("%s: DEBUG_MO: 0x%x\n", __func__,
  9079. + pr_debug("DEBUG_MO: 0x%x\n",
  9080. msm_camera_io_r(cpp_dev->cpp_hw_base + 0x54));
  9081. - pr_err("%s: DEBUG_TIMER0: 0x%x\n", __func__,
  9082. + pr_debug("DEBUG_TIMER0: 0x%x\n",
  9083. msm_camera_io_r(cpp_dev->cpp_hw_base + 0x60));
  9084. - pr_err("%s: DEBUG_TIMER1: 0x%x\n", __func__,
  9085. + pr_debug("DEBUG_TIMER1: 0x%x\n",
  9086. msm_camera_io_r(cpp_dev->cpp_hw_base + 0x64));
  9087. - pr_err("%s: DEBUG_GPI: 0x%x\n", __func__,
  9088. + pr_debug("DEBUG_GPI: 0x%x\n",
  9089. msm_camera_io_r(cpp_dev->cpp_hw_base + 0x70));
  9090. - pr_err("%s: DEBUG_GPO: 0x%x\n", __func__,
  9091. + pr_debug("DEBUG_GPO: 0x%x\n",
  9092. msm_camera_io_r(cpp_dev->cpp_hw_base + 0x74));
  9093. - pr_err("%s: DEBUG_T0: 0x%x\n", __func__,
  9094. + pr_debug("DEBUG_T0: 0x%x\n",
  9095. msm_camera_io_r(cpp_dev->cpp_hw_base + 0x80));
  9096. - pr_err("%s: DEBUG_R0: 0x%x\n", __func__,
  9097. + pr_debug("DEBUG_R0: 0x%x\n",
  9098. msm_camera_io_r(cpp_dev->cpp_hw_base + 0x84));
  9099. - pr_err("%s: DEBUG_T1: 0x%x\n", __func__,
  9100. + pr_debug("DEBUG_T1: 0x%x\n",
  9101. msm_camera_io_r(cpp_dev->cpp_hw_base + 0x88));
  9102. - pr_err("%s: DEBUG_R1: 0x%x\n", __func__,
  9103. + pr_debug("DEBUG_R1: 0x%x\n",
  9104. msm_camera_io_r(cpp_dev->cpp_hw_base + 0x8C));
  9105. msm_camera_io_w(0x0, cpp_dev->base + MSM_CPP_MICRO_CLKEN_CTL);
  9106. + msm_cpp_clear_timer(cpp_dev);
  9107. cpp_deinit_mem(cpp_dev);
  9108. + iommu_detach_device(cpp_dev->domain, cpp_dev->iommu_ctx);
  9109. cpp_release_hardware(cpp_dev);
  9110. + msm_cpp_empty_list(processing_q, list_frame);
  9111. + msm_cpp_empty_list(eventData_q, list_eventdata);
  9112. cpp_dev->state = CPP_STATE_OFF;
  9113. }
  9114.  
  9115. @@ -1109,18 +1074,19 @@ static int msm_cpp_buffer_ops(struct cpp_device *cpp_dev,
  9116. return rc;
  9117. }
  9118.  
  9119. -static int msm_cpp_notify_frame_done(struct cpp_device *cpp_dev)
  9120. +static int msm_cpp_notify_frame_done(struct cpp_device *cpp_dev,
  9121. + uint32_t buff_mgr_ops)
  9122. {
  9123. struct v4l2_event v4l2_evt;
  9124. - struct msm_queue_cmd *frame_qcmd;
  9125. - struct msm_queue_cmd *event_qcmd;
  9126. - struct msm_cpp_frame_info_t *processed_frame;
  9127. + struct msm_queue_cmd *frame_qcmd = NULL;
  9128. + struct msm_queue_cmd *event_qcmd = NULL;
  9129. + struct msm_cpp_frame_info_t *processed_frame = NULL;
  9130. struct msm_device_queue *queue = &cpp_dev->processing_q;
  9131. struct msm_buf_mngr_info buff_mgr_info;
  9132. int rc = 0;
  9133.  
  9134. - if (queue->len > 0) {
  9135. - frame_qcmd = msm_dequeue(queue, list_frame);
  9136. + frame_qcmd = msm_dequeue(queue, list_frame);
  9137. + if (frame_qcmd) {
  9138. processed_frame = frame_qcmd->command;
  9139. do_gettimeofday(&(processed_frame->out_time));
  9140. kfree(frame_qcmd);
  9141. @@ -1145,7 +1111,7 @@ static int msm_cpp_notify_frame_done(struct cpp_device *cpp_dev)
  9142. buff_mgr_info.index =
  9143. processed_frame->output_buffer_info[0].index;
  9144. rc = msm_cpp_buffer_ops(cpp_dev,
  9145. - VIDIOC_MSM_BUF_MNGR_BUF_DONE,
  9146. + buff_mgr_ops,
  9147. &buff_mgr_info);
  9148. if (rc < 0) {
  9149. pr_err("error putting buffer\n");
  9150. @@ -1153,26 +1119,26 @@ static int msm_cpp_notify_frame_done(struct cpp_device *cpp_dev)
  9151. }
  9152. }
  9153.  
  9154. - if (!processed_frame->output_buffer_info[1].processed_divert) {
  9155. - if (processed_frame->duplicate_output) {
  9156. - memset(&buff_mgr_info, 0 ,
  9157. - sizeof(struct msm_buf_mngr_info));
  9158. - buff_mgr_info.session_id =
  9159. - ((processed_frame->duplicate_identity >> 16) & 0xFFFF);
  9160. - buff_mgr_info.stream_id =
  9161. - (processed_frame->duplicate_identity & 0xFFFF);
  9162. - buff_mgr_info.frame_id = processed_frame->frame_id;
  9163. - buff_mgr_info.timestamp = processed_frame->timestamp;
  9164. - buff_mgr_info.index =
  9165. - processed_frame->output_buffer_info[1].index;
  9166. + if (processed_frame->duplicate_output &&
  9167. + !processed_frame->
  9168. + output_buffer_info[1].processed_divert) {
  9169. + memset(&buff_mgr_info, 0 ,
  9170. + sizeof(struct msm_buf_mngr_info));
  9171. + buff_mgr_info.session_id =
  9172. + ((processed_frame->duplicate_identity >> 16) & 0xFFFF);
  9173. + buff_mgr_info.stream_id =
  9174. + (processed_frame->duplicate_identity & 0xFFFF);
  9175. + buff_mgr_info.frame_id = processed_frame->frame_id;
  9176. + buff_mgr_info.timestamp = processed_frame->timestamp;
  9177. + buff_mgr_info.index =
  9178. + processed_frame->output_buffer_info[1].index;
  9179. rc = msm_cpp_buffer_ops(cpp_dev,
  9180. - VIDIOC_MSM_BUF_MNGR_BUF_DONE,
  9181. + buff_mgr_ops,
  9182. &buff_mgr_info);
  9183. if (rc < 0) {
  9184. pr_err("error putting buffer\n");
  9185. rc = -EINVAL;
  9186. }
  9187. - }
  9188. }
  9189. v4l2_evt.id = processed_frame->inst_id;
  9190. v4l2_evt.type = V4L2_EVENT_CPP_FRAME_DONE;
  9191. @@ -1181,127 +1147,92 @@ static int msm_cpp_notify_frame_done(struct cpp_device *cpp_dev)
  9192. return rc;
  9193. }
  9194.  
  9195. +#if MSM_CPP_DUMP_FRM_CMD
  9196. +static int msm_cpp_dump_frame_cmd(uint32_t *cmd, int32_t len)
  9197. +{
  9198. + int i;
  9199. + pr_err("%s: -------- cpp frame cmd msg start --------", __func__);
  9200. + for (i = 0; i < len; i++)
  9201. + pr_err("%s: msg[%03d] = 0x%08x", __func__, i, cmd[i]);
  9202. + pr_err("%s: --------- cpp frame cmd msg end ---------", __func__);
  9203. + return 0;
  9204. +}
  9205. +#else
  9206. +static int msm_cpp_dump_frame_cmd(uint32_t *cmd, int32_t len)
  9207. +{
  9208. + return 0;
  9209. +}
  9210. +#endif
  9211. +
  9212. static void msm_cpp_do_timeout_work(struct work_struct *work)
  9213. {
  9214. int ret;
  9215. uint32_t i = 0;
  9216. - struct msm_cpp_frame_info_t *this_frame =
  9217. - cpp_timers[del_timer_idx].data.processed_frame;
  9218. - struct msm_cpp_frame_info_t *second_frame = NULL;
  9219. - struct msm_queue_cmd *frame_qcmd = NULL;
  9220. - struct msm_cpp_frame_info_t *processed_frame = NULL;
  9221. - struct msm_device_queue *queue = NULL;
  9222. -
  9223. - mutex_lock(&cpp_timers[0].data.cpp_dev->mutex);
  9224. -
  9225. - pr_err("cpp_timer_callback called idx:%d. (jiffies=%lu)\n",
  9226. - del_timer_idx, jiffies);
  9227. - cpp_timers[del_timer_idx].used = 0;
  9228. - cpp_timers[del_timer_idx].data.processed_frame = NULL;
  9229. - del_timer_idx = 1 - del_timer_idx;
  9230. + struct msm_cpp_frame_info_t *this_frame = NULL;
  9231.  
  9232. - if (!work || !this_frame) {
  9233. - pr_err("Invalid work:%p, this_frame:%p, del_idx:%d\n",
  9234. - work, this_frame, del_timer_idx);
  9235. - mutex_unlock(&cpp_timers[0].data.cpp_dev->mutex);
  9236. + pr_err("cpp_timer_callback called. (jiffies=%lu)\n",
  9237. + jiffies);
  9238. + if (!work || cpp_timer.data.cpp_dev->state != CPP_STATE_ACTIVE) {
  9239. + pr_err("Invalid work:%p or state:%d\n", work,
  9240. + cpp_timer.data.cpp_dev->state);
  9241. return;
  9242. }
  9243. -
  9244. -
  9245. - /* If cpp_dev state is off we can safely clear the pending frame or
  9246. - If the trial count exceed max attempts then clean the pending frame */
  9247. - if ((cpp_timers[0].data.cpp_dev->state != CPP_STATE_ACTIVE) ||
  9248. - (cpp_timers[0].data.cpp_dev->timeout_trial_cnt >
  9249. - MSM_CPP_MAX_TIMEOUT_TRIAL)) {
  9250. - pr_err("State:%d\n, timeout_trial_cnt:%d\n",
  9251. - cpp_timers[0].data.cpp_dev->state,
  9252. - cpp_timers[0].data.cpp_dev->timeout_trial_cnt);
  9253. -
  9254. - queue = &cpp_timers[0].data.cpp_dev->processing_q;
  9255. - frame_qcmd = msm_dequeue(queue, list_frame);
  9256. - if (frame_qcmd) {
  9257. - processed_frame = frame_qcmd->command;
  9258. - kfree(frame_qcmd);
  9259. - if (processed_frame)
  9260. - kfree(processed_frame->cpp_cmd_msg);
  9261. - kfree(processed_frame);
  9262. - }
  9263. - mutex_unlock(&cpp_timers[0].data.cpp_dev->mutex);
  9264. + if (!atomic_read(&cpp_timer.used)) {
  9265. + pr_err("Delayed trigger, IRQ serviced\n");
  9266. return;
  9267. }
  9268.  
  9269. - pr_err("fatal: cpp_timer expired for identity=0x%x, frame_id=%03d",
  9270. - this_frame->identity, this_frame->frame_id);
  9271. -
  9272. - if (cpp_timers[del_timer_idx].used == 1) {
  9273. - pr_err("deleting cpp_timer %d.\n", del_timer_idx);
  9274. - del_timer(&cpp_timers[del_timer_idx].cpp_timer);
  9275. - cpp_timers[del_timer_idx].used = 0;
  9276. - second_frame = cpp_timers[del_timer_idx].data.processed_frame;
  9277. - cpp_timers[del_timer_idx].data.processed_frame = NULL;
  9278. - del_timer_idx = 1 - del_timer_idx;
  9279. - }
  9280. -
  9281. - disable_irq(cpp_timers[del_timer_idx].data.cpp_dev->irq->start);
  9282. + disable_irq(cpp_timer.data.cpp_dev->irq->start);
  9283. pr_err("Reloading firmware\n");
  9284. - cpp_load_fw(cpp_timers[del_timer_idx].data.cpp_dev, NULL);
  9285. + cpp_load_fw(cpp_timer.data.cpp_dev, NULL);
  9286. pr_err("Firmware loading done\n");
  9287. - enable_irq(cpp_timers[del_timer_idx].data.cpp_dev->irq->start);
  9288. - msm_camera_io_w_mb(0x8,cpp_timers[del_timer_idx].data.cpp_dev->base +
  9289. + enable_irq(cpp_timer.data.cpp_dev->irq->start);
  9290. + msm_camera_io_w_mb(0x8, cpp_timer.data.cpp_dev->base +
  9291. MSM_CPP_MICRO_IRQGEN_MASK);
  9292. - msm_camera_io_w_mb(0xFFFF, cpp_timers[del_timer_idx].data.cpp_dev->base +
  9293. + msm_camera_io_w_mb(0xFFFF,
  9294. + cpp_timer.data.cpp_dev->base +
  9295. MSM_CPP_MICRO_IRQGEN_CLR);
  9296.  
  9297. - cpp_timers[set_timer_idx].data.processed_frame = this_frame;
  9298. - cpp_timers[set_timer_idx].used = 1;
  9299. - pr_err("ReInstalling cpp_timer %d\n", set_timer_idx);
  9300. - setup_timer(&cpp_timers[set_timer_idx].cpp_timer, cpp_timer_callback,
  9301. - (unsigned long)&cpp_timers[0]);
  9302. + if (!atomic_read(&cpp_timer.used)) {
  9303. + pr_err("Delayed trigger, IRQ serviced\n");
  9304. + return;
  9305. + }
  9306. +
  9307. + if (cpp_timer.data.cpp_dev->timeout_trial_cnt >=
  9308. + MSM_CPP_MAX_TIMEOUT_TRIAL) {
  9309. + pr_info("Max trial reached\n");
  9310. + msm_cpp_notify_frame_done(cpp_timer.data.cpp_dev,
  9311. + VIDIOC_MSM_BUF_MNGR_PUT_BUF);
  9312. + cpp_timer.data.cpp_dev->timeout_trial_cnt = 0;
  9313. + return;
  9314. + }
  9315. +
  9316. + this_frame = cpp_timer.data.processed_frame;
  9317. pr_err("Starting timer to fire in %d ms. (jiffies=%lu)\n",
  9318. CPP_CMD_TIMEOUT_MS, jiffies);
  9319. - ret = mod_timer(&cpp_timers[set_timer_idx].cpp_timer,
  9320. + ret = mod_timer(&cpp_timer.cpp_timer,
  9321. jiffies + msecs_to_jiffies(CPP_CMD_TIMEOUT_MS));
  9322. if (ret)
  9323. pr_err("error in mod_timer\n");
  9324.  
  9325. - set_timer_idx = 1 - set_timer_idx;
  9326. - pr_err("Rescheduling for identity=0x%x, frame_id=%03d",
  9327. + pr_err("Rescheduling for identity=0x%x, frame_id=%03d\n",
  9328. this_frame->identity, this_frame->frame_id);
  9329. - msm_cpp_write(0x6, cpp_timers[set_timer_idx].data.cpp_dev->base);
  9330. + msm_cpp_write(0x6, cpp_timer.data.cpp_dev->base);
  9331. + msm_cpp_dump_frame_cmd(this_frame->cpp_cmd_msg,
  9332. + this_frame->msg_len);
  9333. for (i = 0; i < this_frame->msg_len; i++)
  9334. msm_cpp_write(this_frame->cpp_cmd_msg[i],
  9335. - cpp_timers[set_timer_idx].data.cpp_dev->base);
  9336. -
  9337. -
  9338. - if (second_frame != NULL) {
  9339. - cpp_timers[set_timer_idx].data.processed_frame = second_frame;
  9340. - cpp_timers[set_timer_idx].used = 1;
  9341. - pr_err("ReInstalling cpp_timer %d\n", set_timer_idx);
  9342. - setup_timer(&cpp_timers[set_timer_idx].cpp_timer, cpp_timer_callback,
  9343. - (unsigned long)&cpp_timers[0]);
  9344. - pr_err("Starting timer to fire in %d ms. (jiffies=%lu)\n",
  9345. - CPP_CMD_TIMEOUT_MS, jiffies);
  9346. - ret = mod_timer(&cpp_timers[set_timer_idx].cpp_timer,
  9347. - jiffies + msecs_to_jiffies(CPP_CMD_TIMEOUT_MS));
  9348. - if (ret)
  9349. - pr_err("error in mod_timer\n");
  9350. -
  9351. - set_timer_idx = 1 - set_timer_idx;
  9352. - pr_err("Rescheduling for identity=0x%x, frame_id=%03d",
  9353. - second_frame->identity, second_frame->frame_id);
  9354. - msm_cpp_write(0x6, cpp_timers[set_timer_idx].data.cpp_dev->base);
  9355. - for (i = 0; i < second_frame->msg_len; i++)
  9356. - msm_cpp_write(second_frame->cpp_cmd_msg[i],
  9357. - cpp_timers[set_timer_idx].data.cpp_dev->base);
  9358. - }
  9359. - cpp_timers[1 - set_timer_idx].data.cpp_dev->timeout_trial_cnt++;
  9360. - mutex_unlock(&cpp_timers[0].data.cpp_dev->mutex);
  9361. + cpp_timer.data.cpp_dev->base);
  9362. + cpp_timer.data.cpp_dev->timeout_trial_cnt++;
  9363. + return;
  9364. }
  9365.  
  9366. void cpp_timer_callback(unsigned long data)
  9367. {
  9368. - queue_work(cpp_timers[set_timer_idx].data.cpp_dev->timer_wq,
  9369. - (struct work_struct *)cpp_timers[set_timer_idx].data.cpp_dev->work);
  9370. + struct msm_cpp_work_t *work =
  9371. + cpp_timer.data.cpp_dev->work;
  9372. + queue_work(cpp_timer.data.cpp_dev->timer_wq,
  9373. + (struct work_struct *)work);
  9374. }
  9375.  
  9376. static int msm_cpp_send_frame_to_hardware(struct cpp_device *cpp_dev,
  9377. @@ -1317,22 +1248,22 @@ static int msm_cpp_send_frame_to_hardware(struct cpp_device *cpp_dev,
  9378. msm_enqueue(&cpp_dev->processing_q,
  9379. &frame_qcmd->list_frame);
  9380.  
  9381. - cpp_timers[set_timer_idx].data.processed_frame = process_frame;
  9382. - cpp_timers[set_timer_idx].used = 1;
  9383. + cpp_timer.data.processed_frame = process_frame;
  9384. + atomic_set(&cpp_timer.used, 1);
  9385. /* install timer for cpp timeout */
  9386. - CPP_DBG("Installing cpp_timer %d\n", set_timer_idx);
  9387. - setup_timer(&cpp_timers[set_timer_idx].cpp_timer, cpp_timer_callback,
  9388. - (unsigned long)&cpp_timers[0]);
  9389. - CPP_DBG( "Starting timer to fire in %d ms. (jiffies=%lu)\n",
  9390. + CPP_DBG("Installing cpp_timer\n");
  9391. + setup_timer(&cpp_timer.cpp_timer,
  9392. + cpp_timer_callback, (unsigned long)&cpp_timer);
  9393. + CPP_DBG("Starting timer to fire in %d ms. (jiffies=%lu)\n",
  9394. CPP_CMD_TIMEOUT_MS, jiffies);
  9395. - ret = mod_timer(&cpp_timers[set_timer_idx].cpp_timer,
  9396. + ret = mod_timer(&cpp_timer.cpp_timer,
  9397. jiffies + msecs_to_jiffies(CPP_CMD_TIMEOUT_MS));
  9398. if (ret)
  9399. pr_err("error in mod_timer\n");
  9400.  
  9401. - set_timer_idx = 1 - set_timer_idx;
  9402. -
  9403. msm_cpp_write(0x6, cpp_dev->base);
  9404. + msm_cpp_dump_frame_cmd(process_frame->cpp_cmd_msg,
  9405. + process_frame->msg_len);
  9406. for (i = 0; i < process_frame->msg_len; i++) {
  9407. if ((induce_error) && (i == 1)) {
  9408. pr_err("Induce error\n");
  9409. @@ -1368,9 +1299,8 @@ static int msm_cpp_cfg(struct cpp_device *cpp_dev,
  9410. uint16_t num_stripes = 0;
  9411. struct msm_buf_mngr_info buff_mgr_info, dup_buff_mgr_info;
  9412. int32_t status = 0;
  9413. - uint8_t fw_version_1_2_x = 0;
  9414. - int32_t *ret_status = 0;
  9415. -
  9416. + int in_fd;
  9417. + int32_t stripe_base = 0;
  9418. int i = 0;
  9419. if (!new_frame) {
  9420. pr_err("Insufficient memory. return\n");
  9421. @@ -1382,9 +1312,16 @@ static int msm_cpp_cfg(struct cpp_device *cpp_dev,
  9422. if (rc) {
  9423. ERR_COPY_FROM_USER();
  9424. rc = -EINVAL;
  9425. - goto ERROR0;
  9426. + goto ERROR1;
  9427. + }
  9428. +
  9429. + if ((new_frame->msg_len == 0) ||
  9430. + (new_frame->msg_len > MSM_CPP_MAX_FRAME_LENGTH)) {
  9431. + pr_err("%s:%d: Invalid frame len:%d\n", __func__,
  9432. + __LINE__, new_frame->msg_len);
  9433. + rc = -EINVAL;
  9434. + goto ERROR1;
  9435. }
  9436. - ret_status = new_frame->status;
  9437.  
  9438. cpp_frame_msg = kzalloc(sizeof(uint32_t)*new_frame->msg_len,
  9439. GFP_KERNEL);
  9440. @@ -1404,19 +1341,26 @@ static int msm_cpp_cfg(struct cpp_device *cpp_dev,
  9441. }
  9442.  
  9443. new_frame->cpp_cmd_msg = cpp_frame_msg;
  9444. -
  9445. + if (cpp_frame_msg == NULL ||
  9446. + (new_frame->msg_len < MSM_CPP_MIN_FRAME_LENGTH)) {
  9447. + pr_err("%s %d Length is not correct or frame message is missing\n",
  9448. + __func__, __LINE__);
  9449. + return -EINVAL;
  9450. + }
  9451. + if (cpp_frame_msg[new_frame->msg_len - 1] != MSM_CPP_MSG_ID_TRAILER) {
  9452. + pr_err("%s %d Invalid frame message\n", __func__, __LINE__);
  9453. + return -EINVAL;
  9454. + }
  9455. in_phyaddr = msm_cpp_fetch_buffer_info(cpp_dev,
  9456. &new_frame->input_buffer_info,
  9457. - ((new_frame->identity >> 16) & 0xFFFF),
  9458. - (new_frame->identity & 0xFFFF));
  9459. + ((new_frame->input_buffer_info.identity >> 16) & 0xFFFF),
  9460. + (new_frame->input_buffer_info.identity & 0xFFFF), &in_fd);
  9461. if (!in_phyaddr) {
  9462. pr_err("error gettting input physical address\n");
  9463. rc = -EINVAL;
  9464. goto ERROR2;
  9465. }
  9466.  
  9467. - memset(&new_frame->output_buffer_info[0], 0,
  9468. - sizeof(struct msm_cpp_buffer_info_t));
  9469. memset(&buff_mgr_info, 0, sizeof(struct msm_buf_mngr_info));
  9470. buff_mgr_info.session_id = ((new_frame->identity >> 16) & 0xFFFF);
  9471. buff_mgr_info.stream_id = (new_frame->identity & 0xFFFF);
  9472. @@ -1431,7 +1375,8 @@ static int msm_cpp_cfg(struct cpp_device *cpp_dev,
  9473. out_phyaddr0 = msm_cpp_fetch_buffer_info(cpp_dev,
  9474. &new_frame->output_buffer_info[0],
  9475. ((new_frame->identity >> 16) & 0xFFFF),
  9476. - (new_frame->identity & 0xFFFF));
  9477. + (new_frame->identity & 0xFFFF),
  9478. + &new_frame->output_buffer_info[0].fd);
  9479. if (!out_phyaddr0) {
  9480. pr_err("error gettting output physical address\n");
  9481. rc = -EINVAL;
  9482. @@ -1441,12 +1386,15 @@ static int msm_cpp_cfg(struct cpp_device *cpp_dev,
  9483.  
  9484. /* get buffer for duplicate output */
  9485. if (new_frame->duplicate_output) {
  9486. - pr_debug("duplication enabled, dup_id=0x%x", new_frame->duplicate_identity);
  9487. + CPP_DBG("duplication enabled, dup_id=0x%x",
  9488. + new_frame->duplicate_identity);
  9489. memset(&new_frame->output_buffer_info[1], 0,
  9490. sizeof(struct msm_cpp_buffer_info_t));
  9491. memset(&dup_buff_mgr_info, 0, sizeof(struct msm_buf_mngr_info));
  9492. - dup_buff_mgr_info.session_id = ((new_frame->duplicate_identity >> 16) & 0xFFFF);
  9493. - dup_buff_mgr_info.stream_id = (new_frame->duplicate_identity & 0xFFFF);
  9494. + dup_buff_mgr_info.session_id =
  9495. + ((new_frame->duplicate_identity >> 16) & 0xFFFF);
  9496. + dup_buff_mgr_info.stream_id =
  9497. + (new_frame->duplicate_identity & 0xFFFF);
  9498. rc = msm_cpp_buffer_ops(cpp_dev, VIDIOC_MSM_BUF_MNGR_GET_BUF,
  9499. &dup_buff_mgr_info);
  9500. if (rc < 0) {
  9501. @@ -1454,11 +1402,13 @@ static int msm_cpp_cfg(struct cpp_device *cpp_dev,
  9502. pr_debug("error getting buffer rc:%d\n", rc);
  9503. goto ERROR3;
  9504. }
  9505. - new_frame->output_buffer_info[1].index = dup_buff_mgr_info.index;
  9506. + new_frame->output_buffer_info[1].index =
  9507. + dup_buff_mgr_info.index;
  9508. out_phyaddr1 = msm_cpp_fetch_buffer_info(cpp_dev,
  9509. &new_frame->output_buffer_info[1],
  9510. ((new_frame->duplicate_identity >> 16) & 0xFFFF),
  9511. - (new_frame->duplicate_identity & 0xFFFF));
  9512. + (new_frame->duplicate_identity & 0xFFFF),
  9513. + &new_frame->output_buffer_info[1].fd);
  9514. if (!out_phyaddr1) {
  9515. pr_err("error gettting output physical address\n");
  9516. rc = -EINVAL;
  9517. @@ -1469,25 +1419,41 @@ static int msm_cpp_cfg(struct cpp_device *cpp_dev,
  9518. /* set duplicate enable bit */
  9519. cpp_frame_msg[5] |= 0x1;
  9520. }
  9521. -
  9522. +
  9523. num_stripes = ((cpp_frame_msg[12] >> 20) & 0x3FF) +
  9524. ((cpp_frame_msg[12] >> 10) & 0x3FF) +
  9525. (cpp_frame_msg[12] & 0x3FF);
  9526.  
  9527. - fw_version_1_2_x = 0;
  9528. - if (cpp_dev->hw_info.cpp_hw_version == 0x10010000) {
  9529. - fw_version_1_2_x = 2;
  9530. + if ((cpp_dev->fw_version & 0xffff0000) ==
  9531. + CPP_FW_VERSION_1_2_0) {
  9532. + stripe_base = STRIPE_BASE_FW_1_2_0;
  9533. + } else if ((cpp_dev->fw_version & 0xffff0000) ==
  9534. + CPP_FW_VERSION_1_4_0) {
  9535. + stripe_base = STRIPE_BASE_FW_1_4_0;
  9536. + } else if ((cpp_dev->fw_version & 0xffff0000) ==
  9537. + CPP_FW_VERSION_1_6_0) {
  9538. + stripe_base = STRIPE_BASE_FW_1_6_0;
  9539. + } else {
  9540. + pr_err("invalid fw version %08x", cpp_dev->fw_version);
  9541. }
  9542. +
  9543. + if ((stripe_base + num_stripes*27 + 1) != new_frame->msg_len) {
  9544. + pr_err("Invalid frame message\n");
  9545. + rc = -EINVAL;
  9546. + goto ERROR3;
  9547. + }
  9548. +
  9549. +
  9550. for (i = 0; i < num_stripes; i++) {
  9551. - cpp_frame_msg[(133 + fw_version_1_2_x) + i * 27] +=
  9552. + cpp_frame_msg[stripe_base + 5 + i*27] +=
  9553. (uint32_t) in_phyaddr;
  9554. - cpp_frame_msg[(139 + fw_version_1_2_x) + i * 27] +=
  9555. + cpp_frame_msg[stripe_base + 11 + i * 27] +=
  9556. (uint32_t) out_phyaddr0;
  9557. - cpp_frame_msg[(140 + fw_version_1_2_x) + i * 27] +=
  9558. + cpp_frame_msg[stripe_base + 12 + i * 27] +=
  9559. (uint32_t) out_phyaddr1;
  9560. - cpp_frame_msg[(141 + fw_version_1_2_x) + i * 27] +=
  9561. + cpp_frame_msg[stripe_base + 13 + i * 27] +=
  9562. (uint32_t) out_phyaddr0;
  9563. - cpp_frame_msg[(142 + fw_version_1_2_x) + i * 27] +=
  9564. + cpp_frame_msg[stripe_base + 14 + i * 27] +=
  9565. (uint32_t) out_phyaddr1;
  9566. }
  9567.  
  9568. @@ -1509,7 +1475,7 @@ static int msm_cpp_cfg(struct cpp_device *cpp_dev,
  9569.  
  9570. ioctl_ptr->trans_code = rc;
  9571. status = rc;
  9572. - rc = (copy_to_user((void __user *)ret_status, &status,
  9573. + rc = (copy_to_user((void __user *)new_frame->status, &status,
  9574. sizeof(int32_t)) ? -EFAULT : 0);
  9575. if (rc) {
  9576. ERR_COPY_FROM_USER();
  9577. @@ -1527,14 +1493,33 @@ ERROR2:
  9578. ERROR1:
  9579. ioctl_ptr->trans_code = rc;
  9580. status = rc;
  9581. - if (copy_to_user((void __user *)ret_status, &status,
  9582. + if (copy_to_user((void __user *)new_frame->status, &status,
  9583. sizeof(int32_t)))
  9584. pr_err("error cannot copy error\n");
  9585. -ERROR0:
  9586. kfree(new_frame);
  9587. return rc;
  9588. }
  9589.  
  9590. +void msm_cpp_clean_queue(struct cpp_device *cpp_dev)
  9591. +{
  9592. + struct msm_queue_cmd *frame_qcmd = NULL;
  9593. + struct msm_cpp_frame_info_t *processed_frame = NULL;
  9594. + struct msm_device_queue *queue = NULL;
  9595. +
  9596. + while (cpp_dev->processing_q.len) {
  9597. + pr_info("queue len:%d\n", cpp_dev->processing_q.len);
  9598. + queue = &cpp_dev->processing_q;
  9599. + frame_qcmd = msm_dequeue(queue, list_frame);
  9600. + if (frame_qcmd) {
  9601. + processed_frame = frame_qcmd->command;
  9602. + kfree(frame_qcmd);
  9603. + if (processed_frame)
  9604. + kfree(processed_frame->cpp_cmd_msg);
  9605. + kfree(processed_frame);
  9606. + }
  9607. + }
  9608. +}
  9609. +
  9610. long msm_cpp_subdev_ioctl(struct v4l2_subdev *sd,
  9611. unsigned int cmd, void *arg)
  9612. {
  9613. @@ -1550,17 +1535,17 @@ long msm_cpp_subdev_ioctl(struct v4l2_subdev *sd,
  9614. pr_err("cpp_dev is null\n");
  9615. return -EINVAL;
  9616. }
  9617. +
  9618. + if ((ioctl_ptr->ioctl_ptr == NULL) || (ioctl_ptr->len == 0)){
  9619. + pr_err("ioctl_ptr OR ioctl_ptr->len is NULL %p %d \n",
  9620. + ioctl_ptr, ioctl_ptr->len);
  9621. + return -EINVAL;
  9622. + }
  9623. +
  9624. mutex_lock(&cpp_dev->mutex);
  9625. CPP_DBG("E cmd: %d\n", cmd);
  9626. switch (cmd) {
  9627. case VIDIOC_MSM_CPP_GET_HW_INFO: {
  9628. -
  9629. - if (ioctl_ptr->ioctl_ptr == NULL) {
  9630. - pr_err("ioctl_ptr->ioctl_ptr is NULL\n");
  9631. - mutex_unlock(&cpp_dev->mutex);
  9632. - return -EINVAL;
  9633. - }
  9634. -
  9635. if (copy_to_user((void __user *)ioctl_ptr->ioctl_ptr,
  9636. &cpp_dev->hw_info,
  9637. sizeof(struct cpp_hw_info))) {
  9638. @@ -1572,31 +1557,31 @@ long msm_cpp_subdev_ioctl(struct v4l2_subdev *sd,
  9639.  
  9640. case VIDIOC_MSM_CPP_LOAD_FIRMWARE: {
  9641. if (cpp_dev->is_firmware_loaded == 0) {
  9642. - if (cpp_dev->fw_name_bin) {
  9643. + if (cpp_dev->fw_name_bin != NULL) {
  9644. kfree(cpp_dev->fw_name_bin);
  9645. cpp_dev->fw_name_bin = NULL;
  9646. }
  9647. -
  9648. - if (ioctl_ptr->len == 0) {
  9649. - pr_err("ioctl_ptr->len is 0\n");
  9650. + if (ioctl_ptr->len >= MSM_CPP_MAX_FW_NAME_LEN) {
  9651. + pr_err("Error: ioctl_ptr->len = %d \n",
  9652. + ioctl_ptr->len);
  9653. mutex_unlock(&cpp_dev->mutex);
  9654. return -EINVAL;
  9655. }
  9656. -
  9657. - if (ioctl_ptr->ioctl_ptr == NULL) {
  9658. - pr_err("ioctl_ptr->ioctl_ptr is NULL\n");
  9659. - mutex_unlock(&cpp_dev->mutex);
  9660. - return -EINVAL;
  9661. - }
  9662. -
  9663. - cpp_dev->fw_name_bin = kzalloc(ioctl_ptr->len, GFP_KERNEL);
  9664. + cpp_dev->fw_name_bin = kzalloc(ioctl_ptr->len+1,
  9665. + GFP_KERNEL);
  9666. if (!cpp_dev->fw_name_bin) {
  9667. pr_err("%s:%d: malloc error\n", __func__,
  9668. __LINE__);
  9669. mutex_unlock(&cpp_dev->mutex);
  9670. return -EINVAL;
  9671. }
  9672. -
  9673. + if (ioctl_ptr->ioctl_ptr == NULL) {
  9674. + pr_err("ioctl_ptr->ioctl_ptr is NULL\n");
  9675. + kfree(cpp_dev->fw_name_bin);
  9676. + cpp_dev->fw_name_bin = NULL;
  9677. + mutex_unlock(&cpp_dev->mutex);
  9678. + return -EINVAL;
  9679. + }
  9680. rc = (copy_from_user(cpp_dev->fw_name_bin,
  9681. (void __user *)ioctl_ptr->ioctl_ptr,
  9682. ioctl_ptr->len) ? -EFAULT : 0);
  9683. @@ -1607,7 +1592,7 @@ long msm_cpp_subdev_ioctl(struct v4l2_subdev *sd,
  9684. mutex_unlock(&cpp_dev->mutex);
  9685. return -EINVAL;
  9686. }
  9687. -
  9688. + *(cpp_dev->fw_name_bin+ioctl_ptr->len) = '\0';
  9689. disable_irq(cpp_dev->irq->start);
  9690. cpp_load_fw(cpp_dev, cpp_dev->fw_name_bin);
  9691. enable_irq(cpp_dev->irq->start);
  9692. @@ -1616,13 +1601,6 @@ long msm_cpp_subdev_ioctl(struct v4l2_subdev *sd,
  9693. break;
  9694. }
  9695. case VIDIOC_MSM_CPP_CFG:
  9696. -
  9697. - if (ioctl_ptr->ioctl_ptr == NULL) {
  9698. - pr_err("ioctl_ptr->ioctl_ptr is NULL\n");
  9699. - mutex_unlock(&cpp_dev->mutex);
  9700. - return -EINVAL;
  9701. - }
  9702. -
  9703. rc = msm_cpp_cfg(cpp_dev, ioctl_ptr);
  9704. break;
  9705. case VIDIOC_MSM_CPP_FLUSH_QUEUE:
  9706. @@ -1639,12 +1617,6 @@ long msm_cpp_subdev_ioctl(struct v4l2_subdev *sd,
  9707. return -EINVAL;
  9708. }
  9709.  
  9710. - if (ioctl_ptr->ioctl_ptr == NULL) {
  9711. - pr_err("ioctl_ptr->ioctl_ptr is NULL\n");
  9712. - mutex_unlock(&cpp_dev->mutex);
  9713. - return -EINVAL;
  9714. - }
  9715. -
  9716. u_stream_buff_info = kzalloc(ioctl_ptr->len, GFP_KERNEL);
  9717. if (!u_stream_buff_info) {
  9718. pr_err("%s:%d: malloc error\n", __func__, __LINE__);
  9719. @@ -1662,21 +1634,28 @@ long msm_cpp_subdev_ioctl(struct v4l2_subdev *sd,
  9720. return -EINVAL;
  9721. }
  9722.  
  9723. + if (u_stream_buff_info->num_buffs == 0) {
  9724. + pr_err("%s:%d: Invalid number of buffers\n", __func__,
  9725. + __LINE__);
  9726. + kfree(u_stream_buff_info);
  9727. + mutex_unlock(&cpp_dev->mutex);
  9728. + return -EINVAL;
  9729. + }
  9730. k_stream_buff_info.num_buffs = u_stream_buff_info->num_buffs;
  9731. k_stream_buff_info.identity = u_stream_buff_info->identity;
  9732. - k_stream_buff_info.buffer_info =
  9733. - kzalloc(k_stream_buff_info.num_buffs *
  9734. - sizeof(struct msm_cpp_buffer_info_t), GFP_KERNEL);
  9735. - if (!k_stream_buff_info.buffer_info) {
  9736. - pr_err("%s:%d: malloc error\n", __func__, __LINE__);
  9737. +
  9738. + if (k_stream_buff_info.num_buffs > MSM_CAMERA_MAX_STREAM_BUF) {
  9739. + pr_err("%s:%d: unexpected large num buff requested\n",
  9740. + __func__, __LINE__);
  9741. kfree(u_stream_buff_info);
  9742. mutex_unlock(&cpp_dev->mutex);
  9743. return -EINVAL;
  9744. }
  9745. -
  9746. - if (u_stream_buff_info->buffer_info == NULL) {
  9747. - pr_err("u_stream_buff_info->buffer_info is NULL\n");
  9748. - kfree(k_stream_buff_info.buffer_info);
  9749. + k_stream_buff_info.buffer_info =
  9750. + kzalloc(k_stream_buff_info.num_buffs *
  9751. + sizeof(struct msm_cpp_buffer_info_t), GFP_KERNEL);
  9752. + if (ZERO_OR_NULL_PTR(k_stream_buff_info.buffer_info)) {
  9753. + pr_err("%s:%d: malloc error\n", __func__, __LINE__);
  9754. kfree(u_stream_buff_info);
  9755. mutex_unlock(&cpp_dev->mutex);
  9756. return -EINVAL;
  9757. @@ -1700,6 +1679,7 @@ long msm_cpp_subdev_ioctl(struct v4l2_subdev *sd,
  9758. ((k_stream_buff_info.identity >> 16) & 0xFFFF),
  9759. (k_stream_buff_info.identity & 0xFFFF));
  9760. }
  9761. +
  9762. if (!rc)
  9763. rc = msm_cpp_enqueue_buff_info_list(cpp_dev,
  9764. &k_stream_buff_info);
  9765. @@ -1707,65 +1687,23 @@ long msm_cpp_subdev_ioctl(struct v4l2_subdev *sd,
  9766. kfree(k_stream_buff_info.buffer_info);
  9767. kfree(u_stream_buff_info);
  9768. if (cpp_dev->stream_cnt == 0) {
  9769. - struct msm_queue_cmd *frame_qcmd = NULL;
  9770. - struct msm_cpp_frame_info_t *processed_frame = NULL;
  9771. - struct msm_device_queue *queue = NULL;
  9772. - rc = msm_isp_update_bandwidth(ISP_CPP, 981345600, 1066680000);
  9773. - if (rc < 0) {
  9774. - pr_err("Bandwidth Set Failed!\n");
  9775. - msm_isp_update_bandwidth(ISP_CPP, 0, 0);
  9776. - mutex_unlock(&cpp_dev->mutex);
  9777. - return -EINVAL;
  9778. - }
  9779. cpp_dev->state = CPP_STATE_ACTIVE;
  9780. - cpp_dev->timeout_trial_cnt = 0;
  9781. - if (cpp_timers[0].used == 1) {
  9782. - del_timer(&cpp_timers[0].cpp_timer);
  9783. - cpp_timers[0].used = 0;
  9784. - cpp_timers[0].data.processed_frame = NULL;
  9785. - }
  9786. - if (cpp_timers[1].used == 1) {
  9787. - del_timer(&cpp_timers[1].cpp_timer);
  9788. - cpp_timers[1].used = 0;
  9789. - cpp_timers[1].data.processed_frame = NULL;
  9790. - }
  9791. - if (cpp_dev->processing_q.len) {
  9792. - queue = &cpp_dev->processing_q;
  9793. - frame_qcmd = msm_dequeue(queue, list_frame);
  9794. - if (frame_qcmd) {
  9795. - processed_frame = frame_qcmd->command;
  9796. - kfree(frame_qcmd);
  9797. - if (processed_frame)
  9798. - kfree(processed_frame->cpp_cmd_msg);
  9799. - kfree(processed_frame);
  9800. - }
  9801. - }
  9802. - del_timer_idx = 0;
  9803. - set_timer_idx = 0;
  9804. + msm_cpp_clear_timer(cpp_dev);
  9805. + msm_cpp_clean_queue(cpp_dev);
  9806. + }
  9807. + if (cmd != VIDIOC_MSM_CPP_APPEND_STREAM_BUFF_INFO) {
  9808. + cpp_dev->stream_cnt++;
  9809. + pr_debug("stream_cnt:%d\n", cpp_dev->stream_cnt);
  9810. }
  9811. - cpp_dev->stream_cnt++;
  9812. - pr_err("stream_cnt:%d\n", cpp_dev->stream_cnt);
  9813. break;
  9814. }
  9815. case VIDIOC_MSM_CPP_DEQUEUE_STREAM_BUFF_INFO: {
  9816. uint32_t identity;
  9817. struct msm_cpp_buff_queue_info_t *buff_queue_info;
  9818. - struct msm_queue_cmd *frame_qcmd = NULL;
  9819. - struct msm_cpp_frame_info_t *processed_frame = NULL;
  9820. - struct msm_device_queue *queue = NULL;
  9821. -
  9822. - if (ioctl_ptr->ioctl_ptr == NULL) {
  9823. - pr_err("ioctl_ptr->ioctl_ptr is NULL\n");
  9824. - mutex_unlock(&cpp_dev->mutex);
  9825. - return -EINVAL;
  9826. - }
  9827.  
  9828. if ((ioctl_ptr->len == 0) ||
  9829. - (ioctl_ptr->len > sizeof(uint32_t))) {
  9830. - pr_err("ioctl_ptr->len is wrong : %d\n", ioctl_ptr->len);
  9831. - mutex_unlock(&cpp_dev->mutex);
  9832. + (ioctl_ptr->len > sizeof(uint32_t)))
  9833. return -EINVAL;
  9834. - }
  9835.  
  9836. rc = (copy_from_user(&identity,
  9837. (void __user *)ioctl_ptr->ioctl_ptr,
  9838. @@ -1791,40 +1729,18 @@ long msm_cpp_subdev_ioctl(struct v4l2_subdev *sd,
  9839. buff_queue_info->stream_id);
  9840. if (cpp_dev->stream_cnt > 0) {
  9841. cpp_dev->stream_cnt--;
  9842. - pr_err("stream_cnt:%d\n", cpp_dev->stream_cnt);
  9843. + pr_debug("stream_cnt:%d\n", cpp_dev->stream_cnt);
  9844. if (cpp_dev->stream_cnt == 0) {
  9845. rc = msm_isp_update_bandwidth(ISP_CPP, 0, 0);
  9846. if (rc < 0)
  9847. pr_err("Bandwidth Reset Failed!\n");
  9848. - cpp_dev->state = CPP_STATE_IDLE;
  9849. - cpp_dev->timeout_trial_cnt = 0;
  9850. - if (cpp_timers[0].used == 1) {
  9851. - del_timer(&cpp_timers[0].cpp_timer);
  9852. - cpp_timers[0].used = 0;
  9853. - cpp_timers[0].data.processed_frame = NULL;
  9854. - }
  9855. - if (cpp_timers[1].used == 1) {
  9856. - del_timer(&cpp_timers[1].cpp_timer);
  9857. - cpp_timers[1].used = 0;
  9858. - cpp_timers[1].data.processed_frame = NULL;
  9859. - }
  9860. -
  9861. - if (cpp_dev->processing_q.len) {
  9862. - queue = &cpp_dev->processing_q;
  9863. - frame_qcmd = msm_dequeue(queue, list_frame);
  9864. - if (frame_qcmd) {
  9865. - processed_frame = frame_qcmd->command;
  9866. - kfree(frame_qcmd);
  9867. - if (processed_frame)
  9868. - kfree(processed_frame->cpp_cmd_msg);
  9869. - kfree(processed_frame);
  9870. - }
  9871. - }
  9872. - del_timer_idx = 0;
  9873. - set_timer_idx = 0;
  9874. + cpp_dev->state = CPP_STATE_IDLE;
  9875. + msm_cpp_clear_timer(cpp_dev);
  9876. + msm_cpp_clean_queue(cpp_dev);
  9877. }
  9878. } else {
  9879. - pr_err("error: stream count underflow %d\n", cpp_dev->stream_cnt);
  9880. + pr_err("error: stream count underflow %d\n",
  9881. + cpp_dev->stream_cnt);
  9882. }
  9883. break;
  9884. }
  9885. @@ -1833,31 +1749,36 @@ long msm_cpp_subdev_ioctl(struct v4l2_subdev *sd,
  9886. struct msm_queue_cmd *event_qcmd;
  9887. struct msm_cpp_frame_info_t *process_frame;
  9888. event_qcmd = msm_dequeue(queue, list_eventdata);
  9889. - process_frame = event_qcmd->command;
  9890. - CPP_DBG("fid %d\n", process_frame->frame_id);
  9891. -
  9892. - if (ioctl_ptr->ioctl_ptr == NULL) {
  9893. - pr_err("ioctl_ptr->ioctl_ptr is NULL\n");
  9894. - mutex_unlock(&cpp_dev->mutex);
  9895. - return -EINVAL;
  9896. - }
  9897. + if(event_qcmd) {
  9898. + process_frame = event_qcmd->command;
  9899. + CPP_DBG("fid %d\n", process_frame->frame_id);
  9900. + if (copy_to_user((void __user *)ioctl_ptr->ioctl_ptr,
  9901. + process_frame,
  9902. + sizeof(struct msm_cpp_frame_info_t))) {
  9903. + kfree(process_frame->cpp_cmd_msg);
  9904. + process_frame->cpp_cmd_msg = NULL;
  9905. + kfree(process_frame);
  9906. + process_frame = NULL;
  9907. + kfree(event_qcmd);
  9908. + event_qcmd = NULL;
  9909. + mutex_unlock(&cpp_dev->mutex);
  9910. + return -EINVAL;
  9911. + }
  9912.  
  9913. - if (copy_to_user((void __user *)ioctl_ptr->ioctl_ptr,
  9914. - process_frame,
  9915. - sizeof(struct msm_cpp_frame_info_t))) {
  9916. - mutex_unlock(&cpp_dev->mutex);
  9917. - return -EINVAL;
  9918. + kfree(process_frame->cpp_cmd_msg);
  9919. + kfree(process_frame);
  9920. + kfree(event_qcmd);
  9921. + } else {
  9922. + pr_err("Empty command list\n");
  9923. + return -EFAULT;
  9924. }
  9925. -
  9926. - kfree(process_frame->cpp_cmd_msg);
  9927. - kfree(process_frame);
  9928. - kfree(event_qcmd);
  9929. break;
  9930. }
  9931. -
  9932. case VIDIOC_MSM_CPP_SET_CLOCK: {
  9933. - long clock_rate = 0;
  9934. - if (ioctl_ptr->len == 0 || (ioctl_ptr->len > sizeof(long))) {
  9935. + struct msm_cpp_clock_settings_t clock_settings;
  9936. + unsigned long clock_rate = 0;
  9937. + CPP_DBG("VIDIOC_MSM_CPP_SET_CLOCK\n");
  9938. + if (ioctl_ptr->len == 0) {
  9939. pr_err("ioctl_ptr->len is 0\n");
  9940. mutex_unlock(&cpp_dev->mutex);
  9941. return -EINVAL;
  9942. @@ -1869,7 +1790,13 @@ long msm_cpp_subdev_ioctl(struct v4l2_subdev *sd,
  9943. return -EINVAL;
  9944. }
  9945.  
  9946. - rc = (copy_from_user(&clock_rate,
  9947. + if (ioctl_ptr->len != sizeof(struct msm_cpp_clock_settings_t)) {
  9948. + pr_err("Not valid ioctl_ptr->len\n");
  9949. + mutex_unlock(&cpp_dev->mutex);
  9950. + return -EINVAL;
  9951. + }
  9952. +
  9953. + rc = (copy_from_user(&clock_settings,
  9954. (void __user *)ioctl_ptr->ioctl_ptr,
  9955. ioctl_ptr->len) ? -EFAULT : 0);
  9956. if (rc) {
  9957. @@ -1878,22 +1805,67 @@ long msm_cpp_subdev_ioctl(struct v4l2_subdev *sd,
  9958. return -EINVAL;
  9959. }
  9960.  
  9961. - if ((clock_rate == MSM_CPP_NOMINAL_CLOCK) ||
  9962. - (clock_rate == MSM_CPP_TURBO_CLOCK)) {
  9963. - pr_err("clk:%ld\n", clock_rate);
  9964. - clk_set_rate(cpp_dev->cpp_clk[4], clock_rate);
  9965. + if (clock_settings.clock_rate > 0) {
  9966. + rc = msm_isp_update_bandwidth(ISP_CPP,
  9967. + clock_settings.avg,
  9968. + clock_settings.inst);
  9969. + if (rc < 0) {
  9970. + pr_err("Bandwidth Set Failed!\n");
  9971. + msm_isp_update_bandwidth(ISP_CPP, 0, 0);
  9972. + mutex_unlock(&cpp_dev->mutex);
  9973. + return -EINVAL;
  9974. + }
  9975. + clock_rate = clk_round_rate(
  9976. + cpp_dev->cpp_clk[MSM_CPP_CORE_CLK_IDX],
  9977. + clock_settings.clock_rate);
  9978. + if (clock_rate != clock_settings.clock_rate)
  9979. + pr_err("clock rate differ from settings\n");
  9980. + clk_set_rate(cpp_dev->cpp_clk[MSM_CPP_CORE_CLK_IDX],
  9981. + clock_rate);
  9982. }
  9983. -
  9984. break;
  9985. }
  9986. case MSM_SD_SHUTDOWN: {
  9987. - msm_cpp_empty_list_eventdata(&cpp_dev->eventData_q);
  9988. mutex_unlock(&cpp_dev->mutex);
  9989. + pr_info("shutdown cpp node. open cnt:%d\n",
  9990. + cpp_dev->cpp_open_cnt);
  9991. +
  9992. + if (atomic_read(&cpp_timer.used))
  9993. + pr_info("Timer state not cleared\n");
  9994. +
  9995. while (cpp_dev->cpp_open_cnt != 0)
  9996. cpp_close_node(sd, NULL);
  9997. + mutex_lock(&cpp_dev->mutex);
  9998. rc = 0;
  9999. break;
  10000. }
  10001. + case VIDIOC_MSM_CPP_QUEUE_BUF: {
  10002. + struct msm_pproc_queue_buf_info queue_buf_info;
  10003. + rc = (copy_from_user(&queue_buf_info,
  10004. + (void __user *)ioctl_ptr->ioctl_ptr,
  10005. + sizeof(struct msm_pproc_queue_buf_info)) ?
  10006. + -EFAULT : 0);
  10007. + if (rc) {
  10008. + ERR_COPY_FROM_USER();
  10009. + break;
  10010. + }
  10011. +
  10012. + if (queue_buf_info.is_buf_dirty) {
  10013. + rc = msm_cpp_buffer_ops(cpp_dev,
  10014. + VIDIOC_MSM_BUF_MNGR_PUT_BUF,
  10015. + &queue_buf_info.buff_mgr_info);
  10016. + } else {
  10017. + rc = msm_cpp_buffer_ops(cpp_dev,
  10018. + VIDIOC_MSM_BUF_MNGR_BUF_DONE,
  10019. + &queue_buf_info.buff_mgr_info);
  10020. + }
  10021. + if (rc < 0) {
  10022. + pr_err("error in buf done\n");
  10023. + rc = -EINVAL;
  10024. + }
  10025. +
  10026. + break;
  10027. + }
  10028. }
  10029. mutex_unlock(&cpp_dev->mutex);
  10030. CPP_DBG("X\n");
  10031. @@ -1951,23 +1923,13 @@ static long msm_cpp_subdev_do_ioctl(
  10032. struct cpp_device *cpp_dev = v4l2_get_subdevdata(sd);
  10033. struct msm_camera_v4l2_ioctl_t *ioctl_ptr = arg;
  10034. struct msm_cpp_frame_info_t inst_info;
  10035. + memset(&inst_info, 0, sizeof(struct msm_cpp_frame_info_t));
  10036. for (i = 0; i < MAX_ACTIVE_CPP_INSTANCE; i++) {
  10037. if (cpp_dev->cpp_subscribe_list[i].vfh == vfh) {
  10038. inst_info.inst_id = i;
  10039. break;
  10040. }
  10041. }
  10042. -
  10043. - if (ioctl_ptr == NULL) {
  10044. - pr_err("ioctl_ptr is null\n");
  10045. - return -EINVAL;
  10046. - }
  10047. -
  10048. - if (ioctl_ptr->ioctl_ptr == NULL) {
  10049. - pr_err("ioctl_ptr->ioctl_ptr is NULL\n");
  10050. - return -EINVAL;
  10051. - }
  10052. -
  10053. if (copy_to_user(
  10054. (void __user *)ioctl_ptr->ioctl_ptr, &inst_info,
  10055. sizeof(struct msm_cpp_frame_info_t))) {
  10056. @@ -2004,17 +1966,12 @@ static int cpp_register_domain(void)
  10057. return msm_register_domain(&cpp_fw_layout);
  10058. }
  10059.  
  10060. +
  10061. static int __devinit cpp_probe(struct platform_device *pdev)
  10062. {
  10063. struct cpp_device *cpp_dev;
  10064. int rc = 0;
  10065.  
  10066. - if (poweroff_charging == 1)
  10067. - {
  10068. - pr_err("forced return cpp_probe at lpm mode\n");
  10069. - return rc;
  10070. - }
  10071. -
  10072. cpp_dev = kzalloc(sizeof(struct cpp_device), GFP_KERNEL);
  10073. if (!cpp_dev) {
  10074. pr_err("no enough memory\n");
  10075. @@ -2102,9 +2059,9 @@ static int __devinit cpp_probe(struct platform_device *pdev)
  10076. }
  10077.  
  10078. cpp_dev->iommu_ctx = msm_iommu_get_ctx("cpp");
  10079. - if (!cpp_dev->iommu_ctx) {
  10080. + if (IS_ERR(cpp_dev->iommu_ctx)) {
  10081. pr_err("%s: cannot get iommu_ctx\n", __func__);
  10082. - rc = -ENODEV;
  10083. + rc = -EPROBE_DEFER;
  10084. goto ERROR3;
  10085. }
  10086.  
  10087. @@ -2124,7 +2081,6 @@ static int __devinit cpp_probe(struct platform_device *pdev)
  10088. cpp_dev->msm_sd.sd.entity.revision = cpp_dev->msm_sd.sd.devnode->num;
  10089. cpp_dev->state = CPP_STATE_BOOT;
  10090. cpp_init_hardware(cpp_dev);
  10091. - iommu_attach_device(cpp_dev->domain, cpp_dev->iommu_ctx);
  10092.  
  10093. msm_camera_io_w(0x0, cpp_dev->base +
  10094. MSM_CPP_MICRO_IRQGEN_MASK);
  10095. @@ -2141,24 +2097,21 @@ static int __devinit cpp_probe(struct platform_device *pdev)
  10096. tasklet_init(&cpp_dev->cpp_tasklet, msm_cpp_do_tasklet,
  10097. (unsigned long)cpp_dev);
  10098. cpp_dev->timer_wq = create_workqueue("msm_cpp_workqueue");
  10099. - if(!cpp_dev->timer_wq) {
  10100. - pr_err("%s: cannot create msm_cpp_workqueue\n", __func__);
  10101. - rc = -EINVAL;
  10102. + cpp_dev->work = kmalloc(sizeof(struct msm_cpp_work_t),
  10103. + GFP_KERNEL);
  10104. + if (!cpp_dev->work) {
  10105. + pr_err("cpp_dev->work is NULL\n");
  10106. + rc = -ENOMEM;
  10107. goto ERROR3;
  10108. }
  10109. - cpp_dev->work =
  10110. - (struct msm_cpp_work_t *)kmalloc(sizeof(struct msm_cpp_work_t),
  10111. - GFP_KERNEL);
  10112. +
  10113. INIT_WORK((struct work_struct *)cpp_dev->work, msm_cpp_do_timeout_work);
  10114. cpp_dev->cpp_open_cnt = 0;
  10115. cpp_dev->is_firmware_loaded = 0;
  10116. - cpp_timers[0].data.cpp_dev = cpp_dev;
  10117. - cpp_timers[1].data.cpp_dev = cpp_dev;
  10118. - cpp_timers[0].used = 0;
  10119. - cpp_timers[1].used = 0;
  10120. + cpp_timer.data.cpp_dev = cpp_dev;
  10121. + atomic_set(&cpp_timer.used, 0);
  10122. cpp_dev->fw_name_bin = NULL;
  10123. return rc;
  10124. -
  10125. ERROR3:
  10126. release_mem_region(cpp_dev->mem->start, resource_size(cpp_dev->mem));
  10127. ERROR2:
  10128. @@ -2188,7 +2141,6 @@ static int cpp_device_remove(struct platform_device *dev)
  10129. return 0;
  10130. }
  10131.  
  10132. - iommu_detach_device(cpp_dev->domain, cpp_dev->iommu_ctx);
  10133. msm_sd_unregister(&cpp_dev->msm_sd);
  10134. release_mem_region(cpp_dev->mem->start, resource_size(cpp_dev->mem));
  10135. release_mem_region(cpp_dev->vbif_mem->start,
  10136. diff --git a/drivers/media/platform/msm/camera_v2/pproc/vpe/msm_vpe.c b/drivers/media/platform/msm/camera_v2/pproc/vpe/msm_vpe.c
  10137. new file mode 100644
  10138. index 0000000..717771d
  10139. --- /dev/null
  10140. +++ b/drivers/media/platform/msm/camera_v2/pproc/vpe/msm_vpe.c
  10141. @@ -0,0 +1,1666 @@
  10142. +/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  10143. + *
  10144. + * This program is free software; you can redistribute it and/or modify
  10145. + * it under the terms of the GNU General Public License version 2 and
  10146. + * only version 2 as published by the Free Software Foundation.
  10147. + *
  10148. + * This program is distributed in the hope that it will be useful,
  10149. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10150. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10151. + * GNU General Public License for more details.
  10152. + */
  10153. +
  10154. +#define pr_fmt(fmt) "MSM-VPE %s:%d " fmt, __func__, __LINE__
  10155. +
  10156. +#include <linux/module.h>
  10157. +#include <linux/mutex.h>
  10158. +#include <linux/videodev2.h>
  10159. +#include <linux/msm_ion.h>
  10160. +#include <linux/iommu.h>
  10161. +#include <mach/iommu_domains.h>
  10162. +#include <mach/iommu.h>
  10163. +#include <media/v4l2-dev.h>
  10164. +#include <media/v4l2-event.h>
  10165. +#include <media/v4l2-fh.h>
  10166. +#include <media/v4l2-ioctl.h>
  10167. +#include <media/v4l2-subdev.h>
  10168. +#include <media/media-entity.h>
  10169. +#include <media/msmb_generic_buf_mgr.h>
  10170. +#include <media/msmb_pproc.h>
  10171. +#include "msm_vpe.h"
  10172. +#include "msm_camera_io_util.h"
  10173. +
  10174. +#define MSM_VPE_IDENT_TO_SESSION_ID(identity) ((identity >> 16) & 0xFFFF)
  10175. +#define MSM_VPE_IDENT_TO_STREAM_ID(identity) (identity & 0xFFFF)
  10176. +
  10177. +#define MSM_VPE_DRV_NAME "msm_vpe"
  10178. +
  10179. +#define MSM_VPE_MAX_BUFF_QUEUE 16
  10180. +
  10181. +#define CONFIG_MSM_VPE_DBG 0
  10182. +
  10183. +#if CONFIG_MSM_VPE_DBG
  10184. +#define VPE_DBG(fmt, args...) pr_err(fmt, ##args)
  10185. +#else
  10186. +#define VPE_DBG(fmt, args...) pr_debug(fmt, ##args)
  10187. +#endif
  10188. +
  10189. +static void vpe_mem_dump(const char * const name, const void * const addr,
  10190. + int size)
  10191. +{
  10192. + char line_str[128], *p_str;
  10193. + int i;
  10194. + u32 *p = (u32 *) addr;
  10195. + u32 data;
  10196. + VPE_DBG("%s: (%s) %p %d\n", __func__, name, addr, size);
  10197. + line_str[0] = '\0';
  10198. + p_str = line_str;
  10199. + for (i = 0; i < size/4; i++) {
  10200. + if (i % 4 == 0) {
  10201. + snprintf(p_str, 12, "%08x: ", (u32) p);
  10202. + p_str += 10;
  10203. + }
  10204. + data = *p++;
  10205. + snprintf(p_str, 12, "%08x ", data);
  10206. + p_str += 9;
  10207. + if ((i + 1) % 4 == 0) {
  10208. + VPE_DBG("%s\n", line_str);
  10209. + line_str[0] = '\0';
  10210. + p_str = line_str;
  10211. + }
  10212. + }
  10213. + if (line_str[0] != '\0')
  10214. + VPE_DBG("%s\n", line_str);
  10215. +}
  10216. +
  10217. +static inline long long vpe_do_div(long long num, long long den)
  10218. +{
  10219. + do_div(num, den);
  10220. + return num;
  10221. +}
  10222. +
  10223. +#define msm_dequeue(queue, member) ({ \
  10224. + unsigned long flags; \
  10225. + struct msm_device_queue *__q = (queue); \
  10226. + struct msm_queue_cmd *qcmd = 0; \
  10227. + spin_lock_irqsave(&__q->lock, flags); \
  10228. + if (!list_empty(&__q->list)) { \
  10229. + __q->len--; \
  10230. + qcmd = list_first_entry(&__q->list, \
  10231. + struct msm_queue_cmd, \
  10232. + member); \
  10233. + list_del_init(&qcmd->member); \
  10234. + } \
  10235. + spin_unlock_irqrestore(&__q->lock, flags); \
  10236. + qcmd; \
  10237. + })
  10238. +
  10239. +static void msm_queue_init(struct msm_device_queue *queue, const char *name)
  10240. +{
  10241. + spin_lock_init(&queue->lock);
  10242. + queue->len = 0;
  10243. + queue->max = 0;
  10244. + queue->name = name;
  10245. + INIT_LIST_HEAD(&queue->list);
  10246. + init_waitqueue_head(&queue->wait);
  10247. +}
  10248. +
  10249. +static struct msm_cam_clk_info vpe_clk_info[] = {
  10250. + {"vpe_clk", 160000000},
  10251. + {"vpe_pclk", -1},
  10252. +};
  10253. +
  10254. +static int msm_vpe_notify_frame_done(struct vpe_device *vpe_dev);
  10255. +
  10256. +static void msm_enqueue(struct msm_device_queue *queue,
  10257. + struct list_head *entry)
  10258. +{
  10259. + unsigned long flags;
  10260. + spin_lock_irqsave(&queue->lock, flags);
  10261. + queue->len++;
  10262. + if (queue->len > queue->max) {
  10263. + queue->max = queue->len;
  10264. + pr_debug("queue %s new max is %d\n", queue->name, queue->max);
  10265. + }
  10266. + list_add_tail(entry, &queue->list);
  10267. + wake_up(&queue->wait);
  10268. + VPE_DBG("woke up %s\n", queue->name);
  10269. + spin_unlock_irqrestore(&queue->lock, flags);
  10270. +}
  10271. +
  10272. +static struct msm_vpe_buff_queue_info_t *msm_vpe_get_buff_queue_entry(
  10273. + struct vpe_device *vpe_dev, uint32_t session_id, uint32_t stream_id)
  10274. +{
  10275. + uint32_t i = 0;
  10276. + struct msm_vpe_buff_queue_info_t *buff_queue_info = NULL;
  10277. +
  10278. + for (i = 0; i < vpe_dev->num_buffq; i++) {
  10279. + if ((vpe_dev->buff_queue[i].used == 1) &&
  10280. + (vpe_dev->buff_queue[i].session_id == session_id) &&
  10281. + (vpe_dev->buff_queue[i].stream_id == stream_id)) {
  10282. + buff_queue_info = &vpe_dev->buff_queue[i];
  10283. + break;
  10284. + }
  10285. + }
  10286. +
  10287. + if (buff_queue_info == NULL) {
  10288. + pr_err("error buffer queue entry for sess:%d strm:%d not found\n",
  10289. + session_id, stream_id);
  10290. + }
  10291. + return buff_queue_info;
  10292. +}
  10293. +
  10294. +static unsigned long msm_vpe_get_phy_addr(struct vpe_device *vpe_dev,
  10295. + struct msm_vpe_buff_queue_info_t *buff_queue_info, uint32_t buff_index,
  10296. + uint8_t native_buff)
  10297. +{
  10298. + unsigned long phy_add = 0;
  10299. + struct list_head *buff_head;
  10300. + struct msm_vpe_buffer_map_list_t *buff, *save;
  10301. +
  10302. + if (native_buff)
  10303. + buff_head = &buff_queue_info->native_buff_head;
  10304. + else
  10305. + buff_head = &buff_queue_info->vb2_buff_head;
  10306. +
  10307. + list_for_each_entry_safe(buff, save, buff_head, entry) {
  10308. + if (buff->map_info.buff_info.index == buff_index) {
  10309. + phy_add = buff->map_info.phy_addr;
  10310. + break;
  10311. + }
  10312. + }
  10313. +
  10314. + return phy_add;
  10315. +}
  10316. +
  10317. +static unsigned long msm_vpe_queue_buffer_info(struct vpe_device *vpe_dev,
  10318. + struct msm_vpe_buff_queue_info_t *buff_queue,
  10319. + struct msm_vpe_buffer_info_t *buffer_info)
  10320. +{
  10321. + struct list_head *buff_head;
  10322. + struct msm_vpe_buffer_map_list_t *buff, *save;
  10323. + int rc = 0;
  10324. +
  10325. + if (buffer_info->native_buff)
  10326. + buff_head = &buff_queue->native_buff_head;
  10327. + else
  10328. + buff_head = &buff_queue->vb2_buff_head;
  10329. +
  10330. + list_for_each_entry_safe(buff, save, buff_head, entry) {
  10331. + if (buff->map_info.buff_info.index == buffer_info->index) {
  10332. + pr_err("error buffer index already queued\n");
  10333. + return -EINVAL;
  10334. + }
  10335. + }
  10336. +
  10337. + buff = kzalloc(
  10338. + sizeof(struct msm_vpe_buffer_map_list_t), GFP_KERNEL);
  10339. + if (!buff) {
  10340. + pr_err("error allocating memory\n");
  10341. + return -EINVAL;
  10342. + }
  10343. +
  10344. + buff->map_info.buff_info = *buffer_info;
  10345. + buff->map_info.ion_handle = ion_import_dma_buf(vpe_dev->client,
  10346. + buffer_info->fd);
  10347. + if (IS_ERR_OR_NULL(buff->map_info.ion_handle)) {
  10348. + pr_err("ION import failed\n");
  10349. + goto queue_buff_error1;
  10350. + }
  10351. +
  10352. + rc = ion_map_iommu(vpe_dev->client, buff->map_info.ion_handle,
  10353. + vpe_dev->domain_num, 0, SZ_4K, 0,
  10354. + (unsigned long *)&buff->map_info.phy_addr,
  10355. + &buff->map_info.len, 0, 0);
  10356. + if (rc < 0) {
  10357. + pr_err("ION mmap failed\n");
  10358. + goto queue_buff_error2;
  10359. + }
  10360. +
  10361. + INIT_LIST_HEAD(&buff->entry);
  10362. + list_add_tail(&buff->entry, buff_head);
  10363. +
  10364. + return buff->map_info.phy_addr;
  10365. +
  10366. +queue_buff_error2:
  10367. + ion_unmap_iommu(vpe_dev->client, buff->map_info.ion_handle,
  10368. + vpe_dev->domain_num, 0);
  10369. +queue_buff_error1:
  10370. + ion_free(vpe_dev->client, buff->map_info.ion_handle);
  10371. + buff->map_info.ion_handle = NULL;
  10372. + kzfree(buff);
  10373. +
  10374. + return 0;
  10375. +}
  10376. +
  10377. +static void msm_vpe_dequeue_buffer_info(struct vpe_device *vpe_dev,
  10378. + struct msm_vpe_buffer_map_list_t *buff)
  10379. +{
  10380. + ion_unmap_iommu(vpe_dev->client, buff->map_info.ion_handle,
  10381. + vpe_dev->domain_num, 0);
  10382. + ion_free(vpe_dev->client, buff->map_info.ion_handle);
  10383. + buff->map_info.ion_handle = NULL;
  10384. +
  10385. + list_del_init(&buff->entry);
  10386. + kzfree(buff);
  10387. +
  10388. + return;
  10389. +}
  10390. +
  10391. +static unsigned long msm_vpe_fetch_buffer_info(struct vpe_device *vpe_dev,
  10392. + struct msm_vpe_buffer_info_t *buffer_info, uint32_t session_id,
  10393. + uint32_t stream_id)
  10394. +{
  10395. + unsigned long phy_addr = 0;
  10396. + struct msm_vpe_buff_queue_info_t *buff_queue_info;
  10397. + uint8_t native_buff = buffer_info->native_buff;
  10398. +
  10399. + buff_queue_info = msm_vpe_get_buff_queue_entry(vpe_dev, session_id,
  10400. + stream_id);
  10401. + if (buff_queue_info == NULL) {
  10402. + pr_err("error finding buffer queue entry for sessid:%d strmid:%d\n",
  10403. + session_id, stream_id);
  10404. + return phy_addr;
  10405. + }
  10406. +
  10407. + phy_addr = msm_vpe_get_phy_addr(vpe_dev, buff_queue_info,
  10408. + buffer_info->index, native_buff);
  10409. + if ((phy_addr == 0) && (native_buff)) {
  10410. + phy_addr = msm_vpe_queue_buffer_info(vpe_dev, buff_queue_info,
  10411. + buffer_info);
  10412. + }
  10413. + return phy_addr;
  10414. +}
  10415. +
  10416. +static int32_t msm_vpe_enqueue_buff_info_list(struct vpe_device *vpe_dev,
  10417. + struct msm_vpe_stream_buff_info_t *stream_buff_info)
  10418. +{
  10419. + uint32_t j;
  10420. + struct msm_vpe_buff_queue_info_t *buff_queue_info;
  10421. +
  10422. + buff_queue_info = msm_vpe_get_buff_queue_entry(vpe_dev,
  10423. + (stream_buff_info->identity >> 16) & 0xFFFF,
  10424. + stream_buff_info->identity & 0xFFFF);
  10425. + if (buff_queue_info == NULL) {
  10426. + pr_err("error finding buffer queue entry for sessid:%d strmid:%d\n",
  10427. + (stream_buff_info->identity >> 16) & 0xFFFF,
  10428. + stream_buff_info->identity & 0xFFFF);
  10429. + return -EINVAL;
  10430. + }
  10431. +
  10432. + for (j = 0; j < stream_buff_info->num_buffs; j++) {
  10433. + msm_vpe_queue_buffer_info(vpe_dev, buff_queue_info,
  10434. + &stream_buff_info->buffer_info[j]);
  10435. + }
  10436. + return 0;
  10437. +}
  10438. +
  10439. +static int32_t msm_vpe_dequeue_buff_info_list(struct vpe_device *vpe_dev,
  10440. + struct msm_vpe_buff_queue_info_t *buff_queue_info)
  10441. +{
  10442. + struct msm_vpe_buffer_map_list_t *buff, *save;
  10443. + struct list_head *buff_head;
  10444. +
  10445. + buff_head = &buff_queue_info->native_buff_head;
  10446. + list_for_each_entry_safe(buff, save, buff_head, entry) {
  10447. + msm_vpe_dequeue_buffer_info(vpe_dev, buff);
  10448. + }
  10449. +
  10450. + buff_head = &buff_queue_info->vb2_buff_head;
  10451. + list_for_each_entry_safe(buff, save, buff_head, entry) {
  10452. + msm_vpe_dequeue_buffer_info(vpe_dev, buff);
  10453. + }
  10454. +
  10455. + return 0;
  10456. +}
  10457. +
  10458. +static int32_t msm_vpe_add_buff_queue_entry(struct vpe_device *vpe_dev,
  10459. + uint16_t session_id, uint16_t stream_id)
  10460. +{
  10461. + uint32_t i;
  10462. + struct msm_vpe_buff_queue_info_t *buff_queue_info;
  10463. +
  10464. + for (i = 0; i < vpe_dev->num_buffq; i++) {
  10465. + if (vpe_dev->buff_queue[i].used == 0) {
  10466. + buff_queue_info = &vpe_dev->buff_queue[i];
  10467. + buff_queue_info->used = 1;
  10468. + buff_queue_info->session_id = session_id;
  10469. + buff_queue_info->stream_id = stream_id;
  10470. + INIT_LIST_HEAD(&buff_queue_info->vb2_buff_head);
  10471. + INIT_LIST_HEAD(&buff_queue_info->native_buff_head);
  10472. + return 0;
  10473. + }
  10474. + }
  10475. + pr_err("buffer queue full. error for sessionid: %d streamid: %d\n",
  10476. + session_id, stream_id);
  10477. + return -EINVAL;
  10478. +}
  10479. +
  10480. +static int32_t msm_vpe_free_buff_queue_entry(struct vpe_device *vpe_dev,
  10481. + uint32_t session_id, uint32_t stream_id)
  10482. +{
  10483. + struct msm_vpe_buff_queue_info_t *buff_queue_info;
  10484. +
  10485. + buff_queue_info = msm_vpe_get_buff_queue_entry(vpe_dev, session_id,
  10486. + stream_id);
  10487. + if (buff_queue_info == NULL) {
  10488. + pr_err("error finding buffer queue entry for sessid:%d strmid:%d\n",
  10489. + session_id, stream_id);
  10490. + return -EINVAL;
  10491. + }
  10492. +
  10493. + buff_queue_info->used = 0;
  10494. + buff_queue_info->session_id = 0;
  10495. + buff_queue_info->stream_id = 0;
  10496. + INIT_LIST_HEAD(&buff_queue_info->vb2_buff_head);
  10497. + INIT_LIST_HEAD(&buff_queue_info->native_buff_head);
  10498. + return 0;
  10499. +}
  10500. +
  10501. +static int32_t msm_vpe_create_buff_queue(struct vpe_device *vpe_dev,
  10502. + uint32_t num_buffq)
  10503. +{
  10504. + struct msm_vpe_buff_queue_info_t *buff_queue;
  10505. + buff_queue = kzalloc(
  10506. + sizeof(struct msm_vpe_buff_queue_info_t) * num_buffq,
  10507. + GFP_KERNEL);
  10508. + if (!buff_queue) {
  10509. + pr_err("Buff queue allocation failure\n");
  10510. + return -ENOMEM;
  10511. + }
  10512. +
  10513. + if (vpe_dev->buff_queue) {
  10514. + pr_err("Buff queue not empty\n");
  10515. + kzfree(buff_queue);
  10516. + return -EINVAL;
  10517. + } else {
  10518. + vpe_dev->buff_queue = buff_queue;
  10519. + vpe_dev->num_buffq = num_buffq;
  10520. + }
  10521. + return 0;
  10522. +}
  10523. +
  10524. +static void msm_vpe_delete_buff_queue(struct vpe_device *vpe_dev)
  10525. +{
  10526. + uint32_t i;
  10527. +
  10528. + for (i = 0; i < vpe_dev->num_buffq; i++) {
  10529. + if (vpe_dev->buff_queue[i].used == 1) {
  10530. + pr_err("Queue not free sessionid: %d, streamid: %d\n",
  10531. + vpe_dev->buff_queue[i].session_id,
  10532. + vpe_dev->buff_queue[i].stream_id);
  10533. + msm_vpe_free_buff_queue_entry(vpe_dev,
  10534. + vpe_dev->buff_queue[i].session_id,
  10535. + vpe_dev->buff_queue[i].stream_id);
  10536. + }
  10537. + }
  10538. + kzfree(vpe_dev->buff_queue);
  10539. + vpe_dev->buff_queue = NULL;
  10540. + vpe_dev->num_buffq = 0;
  10541. + return;
  10542. +}
  10543. +
  10544. +void vpe_release_ion_client(struct kref *ref)
  10545. +{
  10546. + struct vpe_device *vpe_dev = container_of(ref,
  10547. + struct vpe_device, refcount);
  10548. + ion_client_destroy(vpe_dev->client);
  10549. +}
  10550. +
  10551. +static int vpe_init_mem(struct vpe_device *vpe_dev)
  10552. +{
  10553. + kref_init(&vpe_dev->refcount);
  10554. + kref_get(&vpe_dev->refcount);
  10555. + vpe_dev->client = msm_ion_client_create(-1, "vpe");
  10556. +
  10557. + if (!vpe_dev->client) {
  10558. + pr_err("couldn't create ion client\n");
  10559. + return -ENODEV;
  10560. + }
  10561. +
  10562. + return 0;
  10563. +}
  10564. +
  10565. +static void vpe_deinit_mem(struct vpe_device *vpe_dev)
  10566. +{
  10567. + kref_put(&vpe_dev->refcount, vpe_release_ion_client);
  10568. +}
  10569. +
  10570. +static irqreturn_t msm_vpe_irq(int irq_num, void *data)
  10571. +{
  10572. + unsigned long flags;
  10573. + uint32_t irq_status;
  10574. + struct msm_vpe_tasklet_queue_cmd *queue_cmd;
  10575. + struct vpe_device *vpe_dev = (struct vpe_device *) data;
  10576. +
  10577. + irq_status = msm_camera_io_r_mb(vpe_dev->base +
  10578. + VPE_INTR_STATUS_OFFSET);
  10579. +
  10580. + spin_lock_irqsave(&vpe_dev->tasklet_lock, flags);
  10581. + queue_cmd = &vpe_dev->tasklet_queue_cmd[vpe_dev->taskletq_idx];
  10582. + if (queue_cmd->cmd_used) {
  10583. + VPE_DBG("%s: vpe tasklet queue overflow\n", __func__);
  10584. + list_del(&queue_cmd->list);
  10585. + } else {
  10586. + atomic_add(1, &vpe_dev->irq_cnt);
  10587. + }
  10588. + queue_cmd->irq_status = irq_status;
  10589. +
  10590. + queue_cmd->cmd_used = 1;
  10591. + vpe_dev->taskletq_idx =
  10592. + (vpe_dev->taskletq_idx + 1) % MSM_VPE_TASKLETQ_SIZE;
  10593. + list_add_tail(&queue_cmd->list, &vpe_dev->tasklet_q);
  10594. + spin_unlock_irqrestore(&vpe_dev->tasklet_lock, flags);
  10595. +
  10596. + tasklet_schedule(&vpe_dev->vpe_tasklet);
  10597. +
  10598. + msm_camera_io_w_mb(irq_status, vpe_dev->base + VPE_INTR_CLEAR_OFFSET);
  10599. + msm_camera_io_w(0, vpe_dev->base + VPE_INTR_ENABLE_OFFSET);
  10600. + VPE_DBG("%s: irq_status=0x%x.\n", __func__, irq_status);
  10601. +
  10602. + return IRQ_HANDLED;
  10603. +}
  10604. +
  10605. +static void msm_vpe_do_tasklet(unsigned long data)
  10606. +{
  10607. + unsigned long flags;
  10608. + struct vpe_device *vpe_dev = (struct vpe_device *)data;
  10609. + struct msm_vpe_tasklet_queue_cmd *queue_cmd;
  10610. +
  10611. + while (atomic_read(&vpe_dev->irq_cnt)) {
  10612. + spin_lock_irqsave(&vpe_dev->tasklet_lock, flags);
  10613. + queue_cmd = list_first_entry(&vpe_dev->tasklet_q,
  10614. + struct msm_vpe_tasklet_queue_cmd, list);
  10615. + if (!queue_cmd) {
  10616. + atomic_set(&vpe_dev->irq_cnt, 0);
  10617. + spin_unlock_irqrestore(&vpe_dev->tasklet_lock, flags);
  10618. + return;
  10619. + }
  10620. + atomic_sub(1, &vpe_dev->irq_cnt);
  10621. + list_del(&queue_cmd->list);
  10622. + queue_cmd->cmd_used = 0;
  10623. +
  10624. + spin_unlock_irqrestore(&vpe_dev->tasklet_lock, flags);
  10625. +
  10626. + VPE_DBG("Frame done!!\n");
  10627. + msm_vpe_notify_frame_done(vpe_dev);
  10628. + }
  10629. +}
  10630. +
  10631. +static int vpe_init_hardware(struct vpe_device *vpe_dev)
  10632. +{
  10633. + int rc = 0;
  10634. +
  10635. + if (vpe_dev->fs_vpe == NULL) {
  10636. + vpe_dev->fs_vpe =
  10637. + regulator_get(&vpe_dev->pdev->dev, "vdd");
  10638. + if (IS_ERR(vpe_dev->fs_vpe)) {
  10639. + pr_err("Regulator vpe vdd get failed %ld\n",
  10640. + PTR_ERR(vpe_dev->fs_vpe));
  10641. + vpe_dev->fs_vpe = NULL;
  10642. + rc = -ENODEV;
  10643. + goto fail;
  10644. + } else if (regulator_enable(vpe_dev->fs_vpe)) {
  10645. + pr_err("Regulator vpe vdd enable failed\n");
  10646. + regulator_put(vpe_dev->fs_vpe);
  10647. + vpe_dev->fs_vpe = NULL;
  10648. + rc = -ENODEV;
  10649. + goto fail;
  10650. + }
  10651. + }
  10652. +
  10653. + rc = msm_cam_clk_enable(&vpe_dev->pdev->dev, vpe_clk_info,
  10654. + vpe_dev->vpe_clk, ARRAY_SIZE(vpe_clk_info), 1);
  10655. + if (rc < 0) {
  10656. + rc = -ENODEV;
  10657. + pr_err("clk enable failed\n");
  10658. + goto disable_and_put_regulator;
  10659. + }
  10660. +
  10661. + vpe_dev->base = ioremap(vpe_dev->mem->start,
  10662. + resource_size(vpe_dev->mem));
  10663. + if (!vpe_dev->base) {
  10664. + rc = -ENOMEM;
  10665. + pr_err("ioremap failed\n");
  10666. + goto disable_and_put_regulator;
  10667. + }
  10668. +
  10669. + if (vpe_dev->state != VPE_STATE_BOOT) {
  10670. + rc = request_irq(vpe_dev->irq->start, msm_vpe_irq,
  10671. + IRQF_TRIGGER_RISING,
  10672. + "vpe", vpe_dev);
  10673. + if (rc < 0) {
  10674. + pr_err("irq request fail! start=%u\n",
  10675. + vpe_dev->irq->start);
  10676. + rc = -EBUSY;
  10677. + goto unmap_base;
  10678. + } else {
  10679. + VPE_DBG("Got irq! %d\n", vpe_dev->irq->start);
  10680. + }
  10681. + } else {
  10682. + VPE_DBG("Skip requesting the irq since device is booting\n");
  10683. + }
  10684. + vpe_dev->buf_mgr_subdev = msm_buf_mngr_get_subdev();
  10685. +
  10686. + msm_vpe_create_buff_queue(vpe_dev, MSM_VPE_MAX_BUFF_QUEUE);
  10687. + return rc;
  10688. +
  10689. +unmap_base:
  10690. + iounmap(vpe_dev->base);
  10691. +disable_and_put_regulator:
  10692. + regulator_disable(vpe_dev->fs_vpe);
  10693. + regulator_put(vpe_dev->fs_vpe);
  10694. +fail:
  10695. + return rc;
  10696. +}
  10697. +
  10698. +static int vpe_release_hardware(struct vpe_device *vpe_dev)
  10699. +{
  10700. + if (vpe_dev->state != VPE_STATE_BOOT) {
  10701. + free_irq(vpe_dev->irq->start, vpe_dev);
  10702. + tasklet_kill(&vpe_dev->vpe_tasklet);
  10703. + atomic_set(&vpe_dev->irq_cnt, 0);
  10704. + }
  10705. +
  10706. + msm_vpe_delete_buff_queue(vpe_dev);
  10707. + iounmap(vpe_dev->base);
  10708. + msm_cam_clk_enable(&vpe_dev->pdev->dev, vpe_clk_info,
  10709. + vpe_dev->vpe_clk, ARRAY_SIZE(vpe_clk_info), 0);
  10710. + return 0;
  10711. +}
  10712. +
  10713. +static int vpe_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
  10714. +{
  10715. + int rc = 0;
  10716. + uint32_t i;
  10717. + struct vpe_device *vpe_dev = v4l2_get_subdevdata(sd);
  10718. +
  10719. + mutex_lock(&vpe_dev->mutex);
  10720. + if (vpe_dev->vpe_open_cnt == MAX_ACTIVE_VPE_INSTANCE) {
  10721. + pr_err("No free VPE instance\n");
  10722. + rc = -ENODEV;
  10723. + goto err_mutex_unlock;
  10724. + }
  10725. +
  10726. + for (i = 0; i < MAX_ACTIVE_VPE_INSTANCE; i++) {
  10727. + if (vpe_dev->vpe_subscribe_list[i].active == 0) {
  10728. + vpe_dev->vpe_subscribe_list[i].active = 1;
  10729. + vpe_dev->vpe_subscribe_list[i].vfh = &fh->vfh;
  10730. + break;
  10731. + }
  10732. + }
  10733. + if (i == MAX_ACTIVE_VPE_INSTANCE) {
  10734. + pr_err("No free instance\n");
  10735. + rc = -ENODEV;
  10736. + goto err_mutex_unlock;
  10737. + }
  10738. +
  10739. + VPE_DBG("open %d %p\n", i, &fh->vfh);
  10740. + vpe_dev->vpe_open_cnt++;
  10741. + if (vpe_dev->vpe_open_cnt == 1) {
  10742. + rc = vpe_init_hardware(vpe_dev);
  10743. + if (rc < 0) {
  10744. + pr_err("%s: Couldn't init vpe hardware\n", __func__);
  10745. + vpe_dev->vpe_open_cnt--;
  10746. + rc = -ENODEV;
  10747. + goto err_fixup_sub_list;
  10748. + }
  10749. + rc = vpe_init_mem(vpe_dev);
  10750. + if (rc < 0) {
  10751. + pr_err("%s: Couldn't init mem\n", __func__);
  10752. + vpe_dev->vpe_open_cnt--;
  10753. + rc = -ENODEV;
  10754. + goto err_release_hardware;
  10755. + }
  10756. + vpe_dev->state = VPE_STATE_IDLE;
  10757. + }
  10758. + mutex_unlock(&vpe_dev->mutex);
  10759. +
  10760. + return rc;
  10761. +
  10762. +err_release_hardware:
  10763. + vpe_release_hardware(vpe_dev);
  10764. +err_fixup_sub_list:
  10765. + for (i = 0; i < MAX_ACTIVE_VPE_INSTANCE; i++) {
  10766. + if (vpe_dev->vpe_subscribe_list[i].vfh == &fh->vfh) {
  10767. + vpe_dev->vpe_subscribe_list[i].active = 0;
  10768. + vpe_dev->vpe_subscribe_list[i].vfh = NULL;
  10769. + break;
  10770. + }
  10771. + }
  10772. +err_mutex_unlock:
  10773. + mutex_unlock(&vpe_dev->mutex);
  10774. + return rc;
  10775. +}
  10776. +
  10777. +static int vpe_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
  10778. +{
  10779. + uint32_t i;
  10780. + struct vpe_device *vpe_dev = v4l2_get_subdevdata(sd);
  10781. + mutex_lock(&vpe_dev->mutex);
  10782. + for (i = 0; i < MAX_ACTIVE_VPE_INSTANCE; i++) {
  10783. + if (vpe_dev->vpe_subscribe_list[i].vfh == &fh->vfh) {
  10784. + vpe_dev->vpe_subscribe_list[i].active = 0;
  10785. + vpe_dev->vpe_subscribe_list[i].vfh = NULL;
  10786. + break;
  10787. + }
  10788. + }
  10789. + if (i == MAX_ACTIVE_VPE_INSTANCE) {
  10790. + pr_err("Invalid close\n");
  10791. + mutex_unlock(&vpe_dev->mutex);
  10792. + return -ENODEV;
  10793. + }
  10794. +
  10795. + VPE_DBG("close %d %p\n", i, &fh->vfh);
  10796. + vpe_dev->vpe_open_cnt--;
  10797. + if (vpe_dev->vpe_open_cnt == 0) {
  10798. + vpe_deinit_mem(vpe_dev);
  10799. + vpe_release_hardware(vpe_dev);
  10800. + vpe_dev->state = VPE_STATE_OFF;
  10801. + }
  10802. + mutex_unlock(&vpe_dev->mutex);
  10803. + return 0;
  10804. +}
  10805. +
  10806. +static const struct v4l2_subdev_internal_ops msm_vpe_internal_ops = {
  10807. + .open = vpe_open_node,
  10808. + .close = vpe_close_node,
  10809. +};
  10810. +
  10811. +static int msm_vpe_buffer_ops(struct vpe_device *vpe_dev,
  10812. + uint32_t buff_mgr_ops, struct msm_buf_mngr_info *buff_mgr_info)
  10813. +{
  10814. + int rc = -EINVAL;
  10815. +
  10816. + rc = v4l2_subdev_call(vpe_dev->buf_mgr_subdev, core, ioctl,
  10817. + buff_mgr_ops, buff_mgr_info);
  10818. + if (rc < 0)
  10819. + pr_err("%s: line %d rc = %d\n", __func__, __LINE__, rc);
  10820. + return rc;
  10821. +}
  10822. +
  10823. +static int msm_vpe_notify_frame_done(struct vpe_device *vpe_dev)
  10824. +{
  10825. + struct v4l2_event v4l2_evt;
  10826. + struct msm_queue_cmd *frame_qcmd;
  10827. + struct msm_queue_cmd *event_qcmd;
  10828. + struct msm_vpe_frame_info_t *processed_frame;
  10829. + struct msm_device_queue *queue = &vpe_dev->processing_q;
  10830. + struct msm_buf_mngr_info buff_mgr_info;
  10831. + int rc = 0;
  10832. +
  10833. + if (queue->len > 0) {
  10834. + frame_qcmd = msm_dequeue(queue, list_frame);
  10835. + if(frame_qcmd) {
  10836. + processed_frame = frame_qcmd->command;
  10837. + do_gettimeofday(&(processed_frame->out_time));
  10838. + kfree(frame_qcmd);
  10839. + event_qcmd = kzalloc(sizeof(struct msm_queue_cmd), GFP_ATOMIC);
  10840. + if (!event_qcmd) {
  10841. + pr_err("%s: Insufficient memory\n", __func__);
  10842. + return -ENOMEM;
  10843. + }
  10844. + atomic_set(&event_qcmd->on_heap, 1);
  10845. + event_qcmd->command = processed_frame;
  10846. + VPE_DBG("fid %d\n", processed_frame->frame_id);
  10847. + msm_enqueue(&vpe_dev->eventData_q, &event_qcmd->list_eventdata);
  10848. +
  10849. + if (!processed_frame->output_buffer_info.processed_divert) {
  10850. + memset(&buff_mgr_info, 0 ,
  10851. + sizeof(buff_mgr_info));
  10852. + buff_mgr_info.session_id =
  10853. + ((processed_frame->identity >> 16) & 0xFFFF);
  10854. + buff_mgr_info.stream_id =
  10855. + (processed_frame->identity & 0xFFFF);
  10856. + buff_mgr_info.frame_id = processed_frame->frame_id;
  10857. + buff_mgr_info.timestamp = processed_frame->timestamp;
  10858. + buff_mgr_info.index =
  10859. + processed_frame->output_buffer_info.index;
  10860. + rc = msm_vpe_buffer_ops(vpe_dev,
  10861. + VIDIOC_MSM_BUF_MNGR_BUF_DONE,
  10862. + &buff_mgr_info);
  10863. + if (rc < 0) {
  10864. + pr_err("%s: error doing VIDIOC_MSM_BUF_MNGR_BUF_DONE\n",
  10865. + __func__);
  10866. + rc = -EINVAL;
  10867. + }
  10868. + }
  10869. +
  10870. + v4l2_evt.id = processed_frame->inst_id;
  10871. + v4l2_evt.type = V4L2_EVENT_VPE_FRAME_DONE;
  10872. + v4l2_event_queue(vpe_dev->msm_sd.sd.devnode, &v4l2_evt);
  10873. + }
  10874. + else
  10875. + rc = -EFAULT;
  10876. + }
  10877. + return rc;
  10878. +}
  10879. +
  10880. +static void vpe_update_scaler_params(struct vpe_device *vpe_dev,
  10881. + struct msm_vpe_frame_strip_info strip_info)
  10882. +{
  10883. + uint32_t out_ROI_width, out_ROI_height;
  10884. + uint32_t src_ROI_width, src_ROI_height;
  10885. +
  10886. + /*
  10887. + * phase_step_x, phase_step_y, phase_init_x and phase_init_y
  10888. + * are represented in fixed-point, unsigned 3.29 format
  10889. + */
  10890. + uint32_t phase_step_x = 0;
  10891. + uint32_t phase_step_y = 0;
  10892. + uint32_t phase_init_x = 0;
  10893. + uint32_t phase_init_y = 0;
  10894. +
  10895. + uint32_t src_roi, src_x, src_y, src_xy, temp;
  10896. + uint32_t yscale_filter_sel, xscale_filter_sel;
  10897. + uint32_t scale_unit_sel_x, scale_unit_sel_y;
  10898. + uint64_t numerator, denominator;
  10899. +
  10900. + /*
  10901. + * assumption is both direction need zoom. this can be
  10902. + * improved.
  10903. + */
  10904. + temp = msm_camera_io_r(vpe_dev->base + VPE_OP_MODE_OFFSET) | 0x3;
  10905. + msm_camera_io_w(temp, vpe_dev->base + VPE_OP_MODE_OFFSET);
  10906. +
  10907. + src_ROI_width = strip_info.src_w;
  10908. + src_ROI_height = strip_info.src_h;
  10909. + out_ROI_width = strip_info.dst_w;
  10910. + out_ROI_height = strip_info.dst_h;
  10911. +
  10912. + VPE_DBG("src w = %u, h=%u, dst w = %u, h =%u.\n",
  10913. + src_ROI_width, src_ROI_height, out_ROI_width,
  10914. + out_ROI_height);
  10915. + src_roi = (src_ROI_height << 16) + src_ROI_width;
  10916. +
  10917. + msm_camera_io_w(src_roi, vpe_dev->base + VPE_SRC_SIZE_OFFSET);
  10918. +
  10919. + src_x = strip_info.src_x;
  10920. + src_y = strip_info.src_y;
  10921. +
  10922. + VPE_DBG("src_x = %d, src_y=%d.\n", src_x, src_y);
  10923. +
  10924. + src_xy = src_y*(1<<16) + src_x;
  10925. + msm_camera_io_w(src_xy, vpe_dev->base +
  10926. + VPE_SRC_XY_OFFSET);
  10927. + VPE_DBG("src_xy = 0x%x, src_roi=0x%x.\n", src_xy, src_roi);
  10928. +
  10929. + /* decide whether to use FIR or M/N for scaling */
  10930. + if ((out_ROI_width == 1 && src_ROI_width < 4) ||
  10931. + (src_ROI_width < 4 * out_ROI_width - 3))
  10932. + scale_unit_sel_x = 0;/* use FIR scalar */
  10933. + else
  10934. + scale_unit_sel_x = 1;/* use M/N scalar */
  10935. +
  10936. + if ((out_ROI_height == 1 && src_ROI_height < 4) ||
  10937. + (src_ROI_height < 4 * out_ROI_height - 3))
  10938. + scale_unit_sel_y = 0;/* use FIR scalar */
  10939. + else
  10940. + scale_unit_sel_y = 1;/* use M/N scalar */
  10941. +
  10942. + /* calculate phase step for the x direction */
  10943. +
  10944. + /*
  10945. + * if destination is only 1 pixel wide, the value of
  10946. + * phase_step_x is unimportant. Assigning phase_step_x to src
  10947. + * ROI width as an arbitrary value.
  10948. + */
  10949. + if (out_ROI_width == 1)
  10950. + phase_step_x = (uint32_t) ((src_ROI_width) <<
  10951. + SCALER_PHASE_BITS);
  10952. +
  10953. + /* if using FIR scalar */
  10954. + else if (scale_unit_sel_x == 0) {
  10955. +
  10956. + /*
  10957. + * Calculate the quotient ( src_ROI_width - 1 ) (
  10958. + * out_ROI_width - 1) with u3.29 precision. Quotient
  10959. + * is rounded up to the larger 29th decimal point
  10960. + */
  10961. + numerator = (uint64_t)(src_ROI_width - 1) <<
  10962. + SCALER_PHASE_BITS;
  10963. + /*
  10964. + * never equals to 0 because of the "(out_ROI_width ==
  10965. + * 1 )"
  10966. + */
  10967. + denominator = (uint64_t)(out_ROI_width - 1);
  10968. + /*
  10969. + * divide and round up to the larger 29th decimal
  10970. + * point.
  10971. + */
  10972. + phase_step_x = (uint32_t) vpe_do_div((numerator +
  10973. + denominator - 1), denominator);
  10974. + } else if (scale_unit_sel_x == 1) { /* if M/N scalar */
  10975. + /*
  10976. + * Calculate the quotient ( src_ROI_width ) / (
  10977. + * out_ROI_width) with u3.29 precision. Quotient is
  10978. + * rounded down to the smaller 29th decimal point.
  10979. + */
  10980. + numerator = (uint64_t)(src_ROI_width) <<
  10981. + SCALER_PHASE_BITS;
  10982. + denominator = (uint64_t)(out_ROI_width);
  10983. + phase_step_x =
  10984. + (uint32_t) vpe_do_div(numerator, denominator);
  10985. + }
  10986. + /* calculate phase step for the y direction */
  10987. +
  10988. + /*
  10989. + * if destination is only 1 pixel wide, the value of
  10990. + * phase_step_x is unimportant. Assigning phase_step_x to src
  10991. + * ROI width as an arbitrary value.
  10992. + */
  10993. + if (out_ROI_height == 1)
  10994. + phase_step_y =
  10995. + (uint32_t) ((src_ROI_height) << SCALER_PHASE_BITS);
  10996. +
  10997. + /* if FIR scalar */
  10998. + else if (scale_unit_sel_y == 0) {
  10999. + /*
  11000. + * Calculate the quotient ( src_ROI_height - 1 ) / (
  11001. + * out_ROI_height - 1) with u3.29 precision. Quotient
  11002. + * is rounded up to the larger 29th decimal point.
  11003. + */
  11004. + numerator = (uint64_t)(src_ROI_height - 1) <<
  11005. + SCALER_PHASE_BITS;
  11006. + /*
  11007. + * never equals to 0 because of the " ( out_ROI_height
  11008. + * == 1 )" case
  11009. + */
  11010. + denominator = (uint64_t)(out_ROI_height - 1);
  11011. + /*
  11012. + * Quotient is rounded up to the larger 29th decimal
  11013. + * point.
  11014. + */
  11015. + phase_step_y =
  11016. + (uint32_t) vpe_do_div(
  11017. + (numerator + denominator - 1), denominator);
  11018. + } else if (scale_unit_sel_y == 1) { /* if M/N scalar */
  11019. + /*
  11020. + * Calculate the quotient ( src_ROI_height ) (
  11021. + * out_ROI_height) with u3.29 precision. Quotient is
  11022. + * rounded down to the smaller 29th decimal point.
  11023. + */
  11024. + numerator = (uint64_t)(src_ROI_height) <<
  11025. + SCALER_PHASE_BITS;
  11026. + denominator = (uint64_t)(out_ROI_height);
  11027. + phase_step_y = (uint32_t) vpe_do_div(
  11028. + numerator, denominator);
  11029. + }
  11030. +
  11031. + /* decide which set of FIR coefficients to use */
  11032. + if (phase_step_x > HAL_MDP_PHASE_STEP_2P50)
  11033. + xscale_filter_sel = 0;
  11034. + else if (phase_step_x > HAL_MDP_PHASE_STEP_1P66)
  11035. + xscale_filter_sel = 1;
  11036. + else if (phase_step_x > HAL_MDP_PHASE_STEP_1P25)
  11037. + xscale_filter_sel = 2;
  11038. + else
  11039. + xscale_filter_sel = 3;
  11040. +
  11041. + if (phase_step_y > HAL_MDP_PHASE_STEP_2P50)
  11042. + yscale_filter_sel = 0;
  11043. + else if (phase_step_y > HAL_MDP_PHASE_STEP_1P66)
  11044. + yscale_filter_sel = 1;
  11045. + else if (phase_step_y > HAL_MDP_PHASE_STEP_1P25)
  11046. + yscale_filter_sel = 2;
  11047. + else
  11048. + yscale_filter_sel = 3;
  11049. +
  11050. + /* calculate phase init for the x direction */
  11051. +
  11052. + /* if using FIR scalar */
  11053. + if (scale_unit_sel_x == 0) {
  11054. + if (out_ROI_width == 1)
  11055. + phase_init_x =
  11056. + (uint32_t) ((src_ROI_width - 1) <<
  11057. + SCALER_PHASE_BITS);
  11058. + else
  11059. + phase_init_x = 0;
  11060. + } else if (scale_unit_sel_x == 1) /* M over N scalar */
  11061. + phase_init_x = 0;
  11062. +
  11063. + /*
  11064. + * calculate phase init for the y direction if using FIR
  11065. + * scalar
  11066. + */
  11067. + if (scale_unit_sel_y == 0) {
  11068. + if (out_ROI_height == 1)
  11069. + phase_init_y =
  11070. + (uint32_t) ((src_ROI_height -
  11071. + 1) << SCALER_PHASE_BITS);
  11072. + else
  11073. + phase_init_y = 0;
  11074. + } else if (scale_unit_sel_y == 1) /* M over N scalar */
  11075. + phase_init_y = 0;
  11076. +
  11077. + strip_info.phase_step_x = phase_step_x;
  11078. + strip_info.phase_step_y = phase_step_y;
  11079. + strip_info.phase_init_x = phase_init_x;
  11080. + strip_info.phase_init_y = phase_init_y;
  11081. + VPE_DBG("phase step x = %d, step y = %d.\n",
  11082. + strip_info.phase_step_x, strip_info.phase_step_y);
  11083. + VPE_DBG("phase init x = %d, init y = %d.\n",
  11084. + strip_info.phase_init_x, strip_info.phase_init_y);
  11085. +
  11086. + msm_camera_io_w(strip_info.phase_step_x, vpe_dev->base +
  11087. + VPE_SCALE_PHASEX_STEP_OFFSET);
  11088. + msm_camera_io_w(strip_info.phase_step_y, vpe_dev->base +
  11089. + VPE_SCALE_PHASEY_STEP_OFFSET);
  11090. +
  11091. + msm_camera_io_w(strip_info.phase_init_x, vpe_dev->base +
  11092. + VPE_SCALE_PHASEX_INIT_OFFSET);
  11093. + msm_camera_io_w(strip_info.phase_init_y, vpe_dev->base +
  11094. + VPE_SCALE_PHASEY_INIT_OFFSET);
  11095. +}
  11096. +
  11097. +static void vpe_program_buffer_addresses(
  11098. + struct vpe_device *vpe_dev,
  11099. + unsigned long srcP0,
  11100. + unsigned long srcP1,
  11101. + unsigned long outP0,
  11102. + unsigned long outP1)
  11103. +{
  11104. + VPE_DBG("%s VPE Configured with:\n"
  11105. + "Src %x, %x Dest %x, %x",
  11106. + __func__, (uint32_t)srcP0, (uint32_t)srcP1,
  11107. + (uint32_t)outP0, (uint32_t)outP1);
  11108. +
  11109. + msm_camera_io_w(srcP0, vpe_dev->base + VPE_SRCP0_ADDR_OFFSET);
  11110. + msm_camera_io_w(srcP1, vpe_dev->base + VPE_SRCP1_ADDR_OFFSET);
  11111. + msm_camera_io_w(outP0, vpe_dev->base + VPE_OUTP0_ADDR_OFFSET);
  11112. + msm_camera_io_w(outP1, vpe_dev->base + VPE_OUTP1_ADDR_OFFSET);
  11113. +}
  11114. +
  11115. +static int vpe_start(struct vpe_device *vpe_dev)
  11116. +{
  11117. + /* enable the frame irq, bit 0 = Display list 0 ROI done */
  11118. + msm_camera_io_w_mb(1, vpe_dev->base + VPE_INTR_ENABLE_OFFSET);
  11119. + msm_camera_io_dump(vpe_dev->base, 0x120);
  11120. + msm_camera_io_dump(vpe_dev->base + 0x00400, 0x18);
  11121. + msm_camera_io_dump(vpe_dev->base + 0x10000, 0x250);
  11122. + msm_camera_io_dump(vpe_dev->base + 0x30000, 0x20);
  11123. + msm_camera_io_dump(vpe_dev->base + 0x50000, 0x30);
  11124. + msm_camera_io_dump(vpe_dev->base + 0x50400, 0x10);
  11125. +
  11126. + /*
  11127. + * This triggers the operation. When the VPE is done,
  11128. + * msm_vpe_irq will fire.
  11129. + */
  11130. + msm_camera_io_w_mb(1, vpe_dev->base + VPE_DL0_START_OFFSET);
  11131. + return 0;
  11132. +}
  11133. +
  11134. +static void vpe_config_axi_default(struct vpe_device *vpe_dev)
  11135. +{
  11136. + msm_camera_io_w(0x25, vpe_dev->base + VPE_AXI_ARB_2_OFFSET);
  11137. +}
  11138. +
  11139. +static int vpe_reset(struct vpe_device *vpe_dev)
  11140. +{
  11141. + uint32_t vpe_version;
  11142. + uint32_t rc = 0;
  11143. +
  11144. + vpe_version = msm_camera_io_r(
  11145. + vpe_dev->base + VPE_HW_VERSION_OFFSET);
  11146. + VPE_DBG("vpe_version = 0x%x\n", vpe_version);
  11147. + /* disable all interrupts.*/
  11148. + msm_camera_io_w(0, vpe_dev->base + VPE_INTR_ENABLE_OFFSET);
  11149. + /* clear all pending interrupts*/
  11150. + msm_camera_io_w(0x1fffff, vpe_dev->base + VPE_INTR_CLEAR_OFFSET);
  11151. + /* write sw_reset to reset the core. */
  11152. + msm_camera_io_w(0x10, vpe_dev->base + VPE_SW_RESET_OFFSET);
  11153. + /* then poll the reset bit, it should be self-cleared. */
  11154. + while (1) {
  11155. + rc = msm_camera_io_r(vpe_dev->base + VPE_SW_RESET_OFFSET) \
  11156. + & 0x10;
  11157. + if (rc == 0)
  11158. + break;
  11159. + cpu_relax();
  11160. + }
  11161. + /*
  11162. + * at this point, hardware is reset. Then pogram to default
  11163. + * values.
  11164. + */
  11165. + msm_camera_io_w(VPE_AXI_RD_ARB_CONFIG_VALUE,
  11166. + vpe_dev->base + VPE_AXI_RD_ARB_CONFIG_OFFSET);
  11167. +
  11168. + msm_camera_io_w(VPE_CGC_ENABLE_VALUE,
  11169. + vpe_dev->base + VPE_CGC_EN_OFFSET);
  11170. + msm_camera_io_w(1, vpe_dev->base + VPE_CMD_MODE_OFFSET);
  11171. + msm_camera_io_w(VPE_DEFAULT_OP_MODE_VALUE,
  11172. + vpe_dev->base + VPE_OP_MODE_OFFSET);
  11173. + msm_camera_io_w(VPE_DEFAULT_SCALE_CONFIG,
  11174. + vpe_dev->base + VPE_SCALE_CONFIG_OFFSET);
  11175. + vpe_config_axi_default(vpe_dev);
  11176. + return rc;
  11177. +}
  11178. +
  11179. +static void vpe_update_scale_coef(struct vpe_device *vpe_dev, uint32_t *p)
  11180. +{
  11181. + uint32_t i, offset;
  11182. + offset = *p;
  11183. + for (i = offset; i < (VPE_SCALE_COEFF_NUM + offset); i++) {
  11184. + VPE_DBG("Setting scale table %d\n", i);
  11185. + msm_camera_io_w(*(++p),
  11186. + vpe_dev->base + VPE_SCALE_COEFF_LSBn(i));
  11187. + msm_camera_io_w(*(++p),
  11188. + vpe_dev->base + VPE_SCALE_COEFF_MSBn(i));
  11189. + }
  11190. +}
  11191. +
  11192. +static void vpe_input_plane_config(struct vpe_device *vpe_dev, uint32_t *p)
  11193. +{
  11194. + msm_camera_io_w(*p, vpe_dev->base + VPE_SRC_FORMAT_OFFSET);
  11195. + msm_camera_io_w(*(++p),
  11196. + vpe_dev->base + VPE_SRC_UNPACK_PATTERN1_OFFSET);
  11197. + msm_camera_io_w(*(++p), vpe_dev->base + VPE_SRC_IMAGE_SIZE_OFFSET);
  11198. + msm_camera_io_w(*(++p), vpe_dev->base + VPE_SRC_YSTRIDE1_OFFSET);
  11199. + msm_camera_io_w(*(++p), vpe_dev->base + VPE_SRC_SIZE_OFFSET);
  11200. + msm_camera_io_w(*(++p), vpe_dev->base + VPE_SRC_XY_OFFSET);
  11201. +}
  11202. +
  11203. +static void vpe_output_plane_config(struct vpe_device *vpe_dev, uint32_t *p)
  11204. +{
  11205. + msm_camera_io_w(*p, vpe_dev->base + VPE_OUT_FORMAT_OFFSET);
  11206. + msm_camera_io_w(*(++p),
  11207. + vpe_dev->base + VPE_OUT_PACK_PATTERN1_OFFSET);
  11208. + msm_camera_io_w(*(++p), vpe_dev->base + VPE_OUT_YSTRIDE1_OFFSET);
  11209. + msm_camera_io_w(*(++p), vpe_dev->base + VPE_OUT_SIZE_OFFSET);
  11210. + msm_camera_io_w(*(++p), vpe_dev->base + VPE_OUT_XY_OFFSET);
  11211. +}
  11212. +
  11213. +static void vpe_operation_config(struct vpe_device *vpe_dev, uint32_t *p)
  11214. +{
  11215. + msm_camera_io_w(*p, vpe_dev->base + VPE_OP_MODE_OFFSET);
  11216. +}
  11217. +
  11218. +/**
  11219. + * msm_vpe_transaction_setup() - send setup for one frame to VPE
  11220. + * @vpe_dev: vpe device
  11221. + * @data: packed setup commands
  11222. + *
  11223. + * See msm_vpe.h for the expected format of `data'
  11224. + */
  11225. +static void msm_vpe_transaction_setup(struct vpe_device *vpe_dev, void *data)
  11226. +{
  11227. + int i;
  11228. + void *iter = data;
  11229. +
  11230. + vpe_mem_dump("vpe_transaction", data, VPE_TRANSACTION_SETUP_CONFIG_LEN);
  11231. +
  11232. + for (i = 0; i < VPE_NUM_SCALER_TABLES; ++i) {
  11233. + vpe_update_scale_coef(vpe_dev, (uint32_t *)iter);
  11234. + iter += VPE_SCALER_CONFIG_LEN;
  11235. + }
  11236. + vpe_input_plane_config(vpe_dev, (uint32_t *)iter);
  11237. + iter += VPE_INPUT_PLANE_CFG_LEN;
  11238. + vpe_output_plane_config(vpe_dev, (uint32_t *)iter);
  11239. + iter += VPE_OUTPUT_PLANE_CFG_LEN;
  11240. + vpe_operation_config(vpe_dev, (uint32_t *)iter);
  11241. +}
  11242. +
  11243. +static int msm_vpe_send_frame_to_hardware(struct vpe_device *vpe_dev,
  11244. + struct msm_queue_cmd *frame_qcmd)
  11245. +{
  11246. + struct msm_vpe_frame_info_t *process_frame;
  11247. +
  11248. + if (vpe_dev->processing_q.len < MAX_VPE_PROCESSING_FRAME) {
  11249. + process_frame = frame_qcmd->command;
  11250. + msm_enqueue(&vpe_dev->processing_q,
  11251. + &frame_qcmd->list_frame);
  11252. +
  11253. + vpe_update_scaler_params(vpe_dev, process_frame->strip_info);
  11254. + vpe_program_buffer_addresses(
  11255. + vpe_dev,
  11256. + process_frame->src_phyaddr,
  11257. + process_frame->src_phyaddr
  11258. + + process_frame->src_chroma_plane_offset,
  11259. + process_frame->dest_phyaddr,
  11260. + process_frame->dest_phyaddr
  11261. + + process_frame->dest_chroma_plane_offset);
  11262. + vpe_start(vpe_dev);
  11263. + do_gettimeofday(&(process_frame->in_time));
  11264. + }
  11265. + return 0;
  11266. +}
  11267. +
  11268. +static int msm_vpe_cfg(struct vpe_device *vpe_dev,
  11269. + struct msm_camera_v4l2_ioctl_t *ioctl_ptr)
  11270. +{
  11271. + int rc = 0;
  11272. + struct msm_queue_cmd *frame_qcmd = NULL;
  11273. + struct msm_vpe_frame_info_t *new_frame =
  11274. + kzalloc(sizeof(struct msm_vpe_frame_info_t), GFP_KERNEL);
  11275. + unsigned long in_phyaddr, out_phyaddr;
  11276. + struct msm_buf_mngr_info buff_mgr_info;
  11277. +
  11278. + if (!new_frame) {
  11279. + pr_err("Insufficient memory. return\n");
  11280. + return -ENOMEM;
  11281. + }
  11282. +
  11283. + rc = copy_from_user(new_frame, (void __user *)ioctl_ptr->ioctl_ptr,
  11284. + sizeof(struct msm_vpe_frame_info_t));
  11285. + if (rc) {
  11286. + pr_err("%s:%d copy from user\n", __func__, __LINE__);
  11287. + rc = -EINVAL;
  11288. + goto err_free_new_frame;
  11289. + }
  11290. +
  11291. + in_phyaddr = msm_vpe_fetch_buffer_info(vpe_dev,
  11292. + &new_frame->input_buffer_info,
  11293. + ((new_frame->identity >> 16) & 0xFFFF),
  11294. + (new_frame->identity & 0xFFFF));
  11295. + if (!in_phyaddr) {
  11296. + pr_err("error gettting input physical address\n");
  11297. + rc = -EINVAL;
  11298. + goto err_free_new_frame;
  11299. + }
  11300. +
  11301. + memset(&new_frame->output_buffer_info, 0,
  11302. + sizeof(struct msm_vpe_buffer_info_t));
  11303. + memset(&buff_mgr_info, 0, sizeof(struct msm_buf_mngr_info));
  11304. + buff_mgr_info.session_id = ((new_frame->identity >> 16) & 0xFFFF);
  11305. + buff_mgr_info.stream_id = (new_frame->identity & 0xFFFF);
  11306. + rc = msm_vpe_buffer_ops(vpe_dev, VIDIOC_MSM_BUF_MNGR_GET_BUF,
  11307. + &buff_mgr_info);
  11308. + if (rc < 0) {
  11309. + pr_err("error getting buffer\n");
  11310. + rc = -EINVAL;
  11311. + goto err_free_new_frame;
  11312. + }
  11313. +
  11314. + new_frame->output_buffer_info.index = buff_mgr_info.index;
  11315. + out_phyaddr = msm_vpe_fetch_buffer_info(vpe_dev,
  11316. + &new_frame->output_buffer_info,
  11317. + ((new_frame->identity >> 16) & 0xFFFF),
  11318. + (new_frame->identity & 0xFFFF));
  11319. + if (!out_phyaddr) {
  11320. + pr_err("error gettting output physical address\n");
  11321. + rc = -EINVAL;
  11322. + goto err_put_buf;
  11323. + }
  11324. +
  11325. + new_frame->src_phyaddr = in_phyaddr;
  11326. + new_frame->dest_phyaddr = out_phyaddr;
  11327. +
  11328. + frame_qcmd = kzalloc(sizeof(struct msm_queue_cmd), GFP_KERNEL);
  11329. + if (!frame_qcmd) {
  11330. + pr_err("Insufficient memory. return\n");
  11331. + rc = -ENOMEM;
  11332. + goto err_put_buf;
  11333. + }
  11334. +
  11335. + atomic_set(&frame_qcmd->on_heap, 1);
  11336. + frame_qcmd->command = new_frame;
  11337. + rc = msm_vpe_send_frame_to_hardware(vpe_dev, frame_qcmd);
  11338. + if (rc < 0) {
  11339. + pr_err("error cannot send frame to hardware\n");
  11340. + rc = -EINVAL;
  11341. + goto err_free_frame_qcmd;
  11342. + }
  11343. +
  11344. + return rc;
  11345. +
  11346. +err_free_frame_qcmd:
  11347. + kfree(frame_qcmd);
  11348. +err_put_buf:
  11349. + msm_vpe_buffer_ops(vpe_dev, VIDIOC_MSM_BUF_MNGR_PUT_BUF,
  11350. + &buff_mgr_info);
  11351. +err_free_new_frame:
  11352. + kfree(new_frame);
  11353. + return rc;
  11354. +}
  11355. +
  11356. +static long msm_vpe_subdev_ioctl(struct v4l2_subdev *sd,
  11357. + unsigned int cmd, void *arg)
  11358. +{
  11359. + struct vpe_device *vpe_dev = v4l2_get_subdevdata(sd);
  11360. + struct msm_camera_v4l2_ioctl_t *ioctl_ptr = arg;
  11361. + int rc = 0;
  11362. +
  11363. + mutex_lock(&vpe_dev->mutex);
  11364. + switch (cmd) {
  11365. + case VIDIOC_MSM_VPE_TRANSACTION_SETUP: {
  11366. + struct msm_vpe_transaction_setup_cfg *cfg;
  11367. + VPE_DBG("VIDIOC_MSM_VPE_TRANSACTION_SETUP\n");
  11368. + if (sizeof(*cfg) != ioctl_ptr->len) {
  11369. + pr_err("%s: size mismatch cmd=%d, len=%d, expected=%d",
  11370. + __func__, cmd, ioctl_ptr->len,
  11371. + sizeof(*cfg));
  11372. + rc = -EINVAL;
  11373. + break;
  11374. + }
  11375. +
  11376. + cfg = kzalloc(ioctl_ptr->len, GFP_KERNEL);
  11377. + if (!cfg) {
  11378. + pr_err("%s:%d: malloc error\n", __func__, __LINE__);
  11379. + mutex_unlock(&vpe_dev->mutex);
  11380. + return -EINVAL;
  11381. + }
  11382. +
  11383. + rc = copy_from_user(cfg, (void __user *)ioctl_ptr->ioctl_ptr,
  11384. + ioctl_ptr->len);
  11385. + if (rc) {
  11386. + pr_err("%s:%d copy from user\n", __func__, __LINE__);
  11387. + kfree(cfg);
  11388. + break;
  11389. + }
  11390. +
  11391. + msm_vpe_transaction_setup(vpe_dev, (void *)cfg);
  11392. + kfree(cfg);
  11393. + break;
  11394. + }
  11395. + case VIDIOC_MSM_VPE_CFG: {
  11396. + VPE_DBG("VIDIOC_MSM_VPE_CFG\n");
  11397. + rc = msm_vpe_cfg(vpe_dev, ioctl_ptr);
  11398. + break;
  11399. + }
  11400. + case VIDIOC_MSM_VPE_ENQUEUE_STREAM_BUFF_INFO: {
  11401. + struct msm_vpe_stream_buff_info_t *u_stream_buff_info;
  11402. + struct msm_vpe_stream_buff_info_t k_stream_buff_info;
  11403. +
  11404. + VPE_DBG("VIDIOC_MSM_VPE_ENQUEUE_STREAM_BUFF_INFO\n");
  11405. +
  11406. + if (sizeof(struct msm_vpe_stream_buff_info_t) !=
  11407. + ioctl_ptr->len) {
  11408. + pr_err("%s:%d: invalid length\n", __func__, __LINE__);
  11409. + mutex_unlock(&vpe_dev->mutex);
  11410. + return -EINVAL;
  11411. + }
  11412. +
  11413. + u_stream_buff_info = kzalloc(ioctl_ptr->len, GFP_KERNEL);
  11414. + if (!u_stream_buff_info) {
  11415. + pr_err("%s:%d: malloc error\n", __func__, __LINE__);
  11416. + mutex_unlock(&vpe_dev->mutex);
  11417. + return -EINVAL;
  11418. + }
  11419. +
  11420. + rc = (copy_from_user(u_stream_buff_info,
  11421. + (void __user *)ioctl_ptr->ioctl_ptr,
  11422. + ioctl_ptr->len) ? -EFAULT : 0);
  11423. + if (rc) {
  11424. + pr_err("%s:%d copy from user\n", __func__, __LINE__);
  11425. + kfree(u_stream_buff_info);
  11426. + mutex_unlock(&vpe_dev->mutex);
  11427. + return -EINVAL;
  11428. + }
  11429. +
  11430. + if ((u_stream_buff_info->num_buffs == 0) ||
  11431. + (u_stream_buff_info->num_buffs >
  11432. + MSM_CAMERA_MAX_STREAM_BUF)) {
  11433. + pr_err("%s:%d: Invalid number of buffers\n", __func__,
  11434. + __LINE__);
  11435. + kfree(u_stream_buff_info);
  11436. + mutex_unlock(&vpe_dev->mutex);
  11437. + return -EINVAL;
  11438. + }
  11439. + k_stream_buff_info.num_buffs = u_stream_buff_info->num_buffs;
  11440. + k_stream_buff_info.identity = u_stream_buff_info->identity;
  11441. + k_stream_buff_info.buffer_info =
  11442. + kzalloc(k_stream_buff_info.num_buffs *
  11443. + sizeof(struct msm_vpe_buffer_info_t), GFP_KERNEL);
  11444. + if (!k_stream_buff_info.buffer_info) {
  11445. + pr_err("%s:%d: malloc error\n", __func__, __LINE__);
  11446. + kfree(u_stream_buff_info);
  11447. + mutex_unlock(&vpe_dev->mutex);
  11448. + return -EINVAL;
  11449. + }
  11450. +
  11451. + rc = (copy_from_user(k_stream_buff_info.buffer_info,
  11452. + (void __user *)u_stream_buff_info->buffer_info,
  11453. + k_stream_buff_info.num_buffs *
  11454. + sizeof(struct msm_vpe_buffer_info_t)) ?
  11455. + -EFAULT : 0);
  11456. + if (rc) {
  11457. + pr_err("%s:%d copy from user\n", __func__, __LINE__);
  11458. + kfree(k_stream_buff_info.buffer_info);
  11459. + kfree(u_stream_buff_info);
  11460. + mutex_unlock(&vpe_dev->mutex);
  11461. + return -EINVAL;
  11462. + }
  11463. +
  11464. + rc = msm_vpe_add_buff_queue_entry(vpe_dev,
  11465. + ((k_stream_buff_info.identity >> 16) & 0xFFFF),
  11466. + (k_stream_buff_info.identity & 0xFFFF));
  11467. + if (!rc)
  11468. + rc = msm_vpe_enqueue_buff_info_list(vpe_dev,
  11469. + &k_stream_buff_info);
  11470. +
  11471. + kfree(k_stream_buff_info.buffer_info);
  11472. + kfree(u_stream_buff_info);
  11473. + break;
  11474. + }
  11475. + case VIDIOC_MSM_VPE_DEQUEUE_STREAM_BUFF_INFO: {
  11476. + uint32_t identity;
  11477. + struct msm_vpe_buff_queue_info_t *buff_queue_info;
  11478. +
  11479. + VPE_DBG("VIDIOC_MSM_VPE_DEQUEUE_STREAM_BUFF_INFO\n");
  11480. + if (ioctl_ptr->len != sizeof(uint32_t)) {
  11481. + pr_err("%s:%d Invalid len\n", __func__, __LINE__);
  11482. + mutex_unlock(&vpe_dev->mutex);
  11483. + return -EINVAL;
  11484. + }
  11485. +
  11486. + rc = (copy_from_user(&identity,
  11487. + (void __user *)ioctl_ptr->ioctl_ptr,
  11488. + ioctl_ptr->len) ? -EFAULT : 0);
  11489. + if (rc) {
  11490. + pr_err("%s:%d copy from user\n", __func__, __LINE__);
  11491. + mutex_unlock(&vpe_dev->mutex);
  11492. + return -EINVAL;
  11493. + }
  11494. +
  11495. + buff_queue_info = msm_vpe_get_buff_queue_entry(vpe_dev,
  11496. + ((identity >> 16) & 0xFFFF), (identity & 0xFFFF));
  11497. + if (buff_queue_info == NULL) {
  11498. + pr_err("error finding buffer queue entry for identity:%d\n",
  11499. + identity);
  11500. + mutex_unlock(&vpe_dev->mutex);
  11501. + return -EINVAL;
  11502. + }
  11503. +
  11504. + msm_vpe_dequeue_buff_info_list(vpe_dev, buff_queue_info);
  11505. + rc = msm_vpe_free_buff_queue_entry(vpe_dev,
  11506. + buff_queue_info->session_id,
  11507. + buff_queue_info->stream_id);
  11508. + break;
  11509. + }
  11510. + case VIDIOC_MSM_VPE_GET_EVENTPAYLOAD: {
  11511. + struct msm_device_queue *queue = &vpe_dev->eventData_q;
  11512. + struct msm_queue_cmd *event_qcmd;
  11513. + struct msm_vpe_frame_info_t *process_frame;
  11514. + VPE_DBG("VIDIOC_MSM_VPE_GET_EVENTPAYLOAD\n");
  11515. + event_qcmd = msm_dequeue(queue, list_eventdata);
  11516. + if (NULL == event_qcmd)
  11517. + break;
  11518. + process_frame = event_qcmd->command;
  11519. + VPE_DBG("fid %d\n", process_frame->frame_id);
  11520. + if (copy_to_user((void __user *)ioctl_ptr->ioctl_ptr,
  11521. + process_frame,
  11522. + sizeof(struct msm_vpe_frame_info_t))) {
  11523. + mutex_unlock(&vpe_dev->mutex);
  11524. + kfree(process_frame);
  11525. + kfree(event_qcmd);
  11526. + return -EINVAL;
  11527. + }
  11528. +
  11529. + kfree(process_frame);
  11530. + kfree(event_qcmd);
  11531. + break;
  11532. + }
  11533. + }
  11534. + mutex_unlock(&vpe_dev->mutex);
  11535. + return rc;
  11536. +}
  11537. +
  11538. +static int msm_vpe_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
  11539. + struct v4l2_event_subscription *sub)
  11540. +{
  11541. + return v4l2_event_subscribe(fh, sub, MAX_VPE_V4l2_EVENTS);
  11542. +}
  11543. +
  11544. +static int msm_vpe_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
  11545. + struct v4l2_event_subscription *sub)
  11546. +{
  11547. + return v4l2_event_unsubscribe(fh, sub);
  11548. +}
  11549. +
  11550. +static struct v4l2_subdev_core_ops msm_vpe_subdev_core_ops = {
  11551. + .ioctl = msm_vpe_subdev_ioctl,
  11552. + .subscribe_event = msm_vpe_subscribe_event,
  11553. + .unsubscribe_event = msm_vpe_unsubscribe_event,
  11554. +};
  11555. +
  11556. +static const struct v4l2_subdev_ops msm_vpe_subdev_ops = {
  11557. + .core = &msm_vpe_subdev_core_ops,
  11558. +};
  11559. +
  11560. +static struct v4l2_file_operations msm_vpe_v4l2_subdev_fops;
  11561. +
  11562. +static long msm_vpe_subdev_do_ioctl(
  11563. + struct file *file, unsigned int cmd, void *arg)
  11564. +{
  11565. + struct video_device *vdev = video_devdata(file);
  11566. + struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
  11567. + struct v4l2_fh *vfh = file->private_data;
  11568. +
  11569. + switch (cmd) {
  11570. + case VIDIOC_DQEVENT:
  11571. + if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
  11572. + return -ENOIOCTLCMD;
  11573. +
  11574. + return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK);
  11575. +
  11576. + case VIDIOC_SUBSCRIBE_EVENT:
  11577. + return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg);
  11578. +
  11579. + case VIDIOC_UNSUBSCRIBE_EVENT:
  11580. + return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg);
  11581. + case VIDIOC_MSM_VPE_GET_INST_INFO: {
  11582. + uint32_t i;
  11583. + struct vpe_device *vpe_dev = v4l2_get_subdevdata(sd);
  11584. + struct msm_camera_v4l2_ioctl_t *ioctl_ptr = arg;
  11585. + struct msm_vpe_frame_info_t inst_info;
  11586. + memset(&inst_info, 0, sizeof(struct msm_vpe_frame_info_t));
  11587. + for (i = 0; i < MAX_ACTIVE_VPE_INSTANCE; i++) {
  11588. + if (vpe_dev->vpe_subscribe_list[i].vfh == vfh) {
  11589. + inst_info.inst_id = i;
  11590. + break;
  11591. + }
  11592. + }
  11593. + if (copy_to_user(
  11594. + (void __user *)ioctl_ptr->ioctl_ptr, &inst_info,
  11595. + sizeof(struct msm_vpe_frame_info_t))) {
  11596. + return -EINVAL;
  11597. + }
  11598. + }
  11599. + default:
  11600. + return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
  11601. + }
  11602. +
  11603. + return 0;
  11604. +}
  11605. +
  11606. +static long msm_vpe_subdev_fops_ioctl(struct file *file, unsigned int cmd,
  11607. + unsigned long arg)
  11608. +{
  11609. + return video_usercopy(file, cmd, arg, msm_vpe_subdev_do_ioctl);
  11610. +}
  11611. +
  11612. +static int vpe_register_domain(void)
  11613. +{
  11614. + struct msm_iova_partition vpe_iommu_partition = {
  11615. + /* TODO: verify that these are correct? */
  11616. + .start = SZ_128K,
  11617. + .size = SZ_2G - SZ_128K,
  11618. + };
  11619. + struct msm_iova_layout vpe_iommu_layout = {
  11620. + .partitions = &vpe_iommu_partition,
  11621. + .npartitions = 1,
  11622. + .client_name = "camera_vpe",
  11623. + .domain_flags = 0,
  11624. + };
  11625. +
  11626. + return msm_register_domain(&vpe_iommu_layout);
  11627. +}
  11628. +
  11629. +static int __devinit vpe_probe(struct platform_device *pdev)
  11630. +{
  11631. + struct vpe_device *vpe_dev;
  11632. + int rc = 0;
  11633. +
  11634. + vpe_dev = kzalloc(sizeof(struct vpe_device), GFP_KERNEL);
  11635. + if (!vpe_dev) {
  11636. + pr_err("not enough memory\n");
  11637. + return -ENOMEM;
  11638. + }
  11639. +
  11640. + vpe_dev->vpe_clk = kzalloc(sizeof(struct clk *) *
  11641. + ARRAY_SIZE(vpe_clk_info), GFP_KERNEL);
  11642. + if (!vpe_dev->vpe_clk) {
  11643. + pr_err("not enough memory\n");
  11644. + rc = -ENOMEM;
  11645. + goto err_free_vpe_dev;
  11646. + }
  11647. +
  11648. + v4l2_subdev_init(&vpe_dev->msm_sd.sd, &msm_vpe_subdev_ops);
  11649. + vpe_dev->msm_sd.sd.internal_ops = &msm_vpe_internal_ops;
  11650. + snprintf(vpe_dev->msm_sd.sd.name, ARRAY_SIZE(vpe_dev->msm_sd.sd.name),
  11651. + "vpe");
  11652. + vpe_dev->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
  11653. + vpe_dev->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_EVENTS;
  11654. + v4l2_set_subdevdata(&vpe_dev->msm_sd.sd, vpe_dev);
  11655. + platform_set_drvdata(pdev, &vpe_dev->msm_sd.sd);
  11656. + mutex_init(&vpe_dev->mutex);
  11657. + spin_lock_init(&vpe_dev->tasklet_lock);
  11658. +
  11659. + vpe_dev->pdev = pdev;
  11660. +
  11661. + vpe_dev->mem = platform_get_resource_byname(pdev,
  11662. + IORESOURCE_MEM, "vpe");
  11663. + if (!vpe_dev->mem) {
  11664. + pr_err("no mem resource?\n");
  11665. + rc = -ENODEV;
  11666. + goto err_free_vpe_clk;
  11667. + }
  11668. +
  11669. + vpe_dev->irq = platform_get_resource_byname(pdev,
  11670. + IORESOURCE_IRQ, "vpe");
  11671. + if (!vpe_dev->irq) {
  11672. + pr_err("%s: no irq resource?\n", __func__);
  11673. + rc = -ENODEV;
  11674. + goto err_release_mem;
  11675. + }
  11676. +
  11677. + vpe_dev->domain_num = vpe_register_domain();
  11678. + if (vpe_dev->domain_num < 0) {
  11679. + pr_err("%s: could not register domain\n", __func__);
  11680. + rc = -ENODEV;
  11681. + goto err_release_mem;
  11682. + }
  11683. +
  11684. + vpe_dev->domain =
  11685. + msm_get_iommu_domain(vpe_dev->domain_num);
  11686. + if (!vpe_dev->domain) {
  11687. + pr_err("%s: cannot find domain\n", __func__);
  11688. + rc = -ENODEV;
  11689. + goto err_release_mem;
  11690. + }
  11691. +
  11692. + vpe_dev->iommu_ctx_src = msm_iommu_get_ctx("vpe_src");
  11693. + vpe_dev->iommu_ctx_dst = msm_iommu_get_ctx("vpe_dst");
  11694. + if (!vpe_dev->iommu_ctx_src || !vpe_dev->iommu_ctx_dst) {
  11695. + pr_err("%s: cannot get iommu_ctx\n", __func__);
  11696. + rc = -ENODEV;
  11697. + goto err_release_mem;
  11698. + }
  11699. +
  11700. + media_entity_init(&vpe_dev->msm_sd.sd.entity, 0, NULL, 0);
  11701. + vpe_dev->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
  11702. + vpe_dev->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_VPE;
  11703. + vpe_dev->msm_sd.sd.entity.name = pdev->name;
  11704. + msm_sd_register(&vpe_dev->msm_sd);
  11705. + msm_vpe_v4l2_subdev_fops.owner = v4l2_subdev_fops.owner;
  11706. + msm_vpe_v4l2_subdev_fops.open = v4l2_subdev_fops.open;
  11707. + msm_vpe_v4l2_subdev_fops.unlocked_ioctl = msm_vpe_subdev_fops_ioctl;
  11708. + msm_vpe_v4l2_subdev_fops.release = v4l2_subdev_fops.release;
  11709. + msm_vpe_v4l2_subdev_fops.poll = v4l2_subdev_fops.poll;
  11710. +
  11711. + vpe_dev->msm_sd.sd.devnode->fops = &msm_vpe_v4l2_subdev_fops;
  11712. + vpe_dev->msm_sd.sd.entity.revision = vpe_dev->msm_sd.sd.devnode->num;
  11713. + vpe_dev->state = VPE_STATE_BOOT;
  11714. + rc = vpe_init_hardware(vpe_dev);
  11715. + if (rc < 0) {
  11716. + pr_err("%s: Couldn't init vpe hardware\n", __func__);
  11717. + rc = -ENODEV;
  11718. + goto err_unregister_sd;
  11719. + }
  11720. + vpe_reset(vpe_dev);
  11721. + vpe_release_hardware(vpe_dev);
  11722. + vpe_dev->state = VPE_STATE_OFF;
  11723. +
  11724. + rc = iommu_attach_device(vpe_dev->domain, vpe_dev->iommu_ctx_src);
  11725. + if (rc < 0) {
  11726. + pr_err("Couldn't attach to vpe_src context bank\n");
  11727. + rc = -ENODEV;
  11728. + goto err_unregister_sd;
  11729. + }
  11730. + rc = iommu_attach_device(vpe_dev->domain, vpe_dev->iommu_ctx_dst);
  11731. + if (rc < 0) {
  11732. + pr_err("Couldn't attach to vpe_dst context bank\n");
  11733. + rc = -ENODEV;
  11734. + goto err_detach_src;
  11735. + }
  11736. +
  11737. + vpe_dev->state = VPE_STATE_OFF;
  11738. +
  11739. + msm_queue_init(&vpe_dev->eventData_q, "vpe-eventdata");
  11740. + msm_queue_init(&vpe_dev->processing_q, "vpe-frame");
  11741. + INIT_LIST_HEAD(&vpe_dev->tasklet_q);
  11742. + tasklet_init(&vpe_dev->vpe_tasklet, msm_vpe_do_tasklet,
  11743. + (unsigned long)vpe_dev);
  11744. + vpe_dev->vpe_open_cnt = 0;
  11745. +
  11746. + return rc;
  11747. +
  11748. +err_detach_src:
  11749. + iommu_detach_device(vpe_dev->domain, vpe_dev->iommu_ctx_src);
  11750. +err_unregister_sd:
  11751. + msm_sd_unregister(&vpe_dev->msm_sd);
  11752. +err_release_mem:
  11753. + release_mem_region(vpe_dev->mem->start, resource_size(vpe_dev->mem));
  11754. +err_free_vpe_clk:
  11755. + kfree(vpe_dev->vpe_clk);
  11756. +err_free_vpe_dev:
  11757. + kfree(vpe_dev);
  11758. + return rc;
  11759. +}
  11760. +
  11761. +static int vpe_device_remove(struct platform_device *dev)
  11762. +{
  11763. + struct v4l2_subdev *sd = platform_get_drvdata(dev);
  11764. + struct vpe_device *vpe_dev;
  11765. + if (!sd) {
  11766. + pr_err("%s: Subdevice is NULL\n", __func__);
  11767. + return 0;
  11768. + }
  11769. +
  11770. + vpe_dev = (struct vpe_device *)v4l2_get_subdevdata(sd);
  11771. + if (!vpe_dev) {
  11772. + pr_err("%s: vpe device is NULL\n", __func__);
  11773. + return 0;
  11774. + }
  11775. +
  11776. + iommu_detach_device(vpe_dev->domain, vpe_dev->iommu_ctx_dst);
  11777. + iommu_detach_device(vpe_dev->domain, vpe_dev->iommu_ctx_src);
  11778. + msm_sd_unregister(&vpe_dev->msm_sd);
  11779. + release_mem_region(vpe_dev->mem->start, resource_size(vpe_dev->mem));
  11780. + mutex_destroy(&vpe_dev->mutex);
  11781. + kfree(vpe_dev);
  11782. + return 0;
  11783. +}
  11784. +
  11785. +static struct platform_driver vpe_driver = {
  11786. + .probe = vpe_probe,
  11787. + .remove = __devexit_p(vpe_device_remove),
  11788. + .driver = {
  11789. + .name = MSM_VPE_DRV_NAME,
  11790. + .owner = THIS_MODULE,
  11791. + },
  11792. +};
  11793. +
  11794. +static int __init msm_vpe_init_module(void)
  11795. +{
  11796. + return platform_driver_register(&vpe_driver);
  11797. +}
  11798. +
  11799. +static void __exit msm_vpe_exit_module(void)
  11800. +{
  11801. + platform_driver_unregister(&vpe_driver);
  11802. +}
  11803. +
  11804. +module_init(msm_vpe_init_module);
  11805. +module_exit(msm_vpe_exit_module);
  11806. +MODULE_DESCRIPTION("MSM VPE driver");
  11807. +MODULE_LICENSE("GPL v2");
  11808. diff --git a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c
  11809. index 7c6e4f0..e5d82fd 100644
  11810. --- a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c
  11811. +++ b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c
  11812. @@ -43,96 +43,87 @@
  11813.  
  11814. /* Max bytes that can be read per CCI read transaction */
  11815. #define CCI_READ_MAX 12
  11816. +#define CCI_I2C_READ_MAX_RETRIES 3
  11817. +#define CCI_I2C_MAX_READ 8192
  11818. +#define CCI_I2C_MAX_WRITE 8192
  11819.  
  11820. static struct v4l2_subdev *g_cci_subdev;
  11821.  
  11822. static void msm_cci_set_clk_param(struct cci_device *cci_dev)
  11823. {
  11824. - struct msm_cci_clk_params_t *clk_params = &cci_dev->cci_clk_params;
  11825. -
  11826. - msm_camera_io_w(clk_params->hw_thigh << 16 | clk_params->hw_tlow,
  11827. - cci_dev->base + CCI_I2C_M0_SCL_CTL_ADDR);
  11828. - msm_camera_io_w(clk_params->hw_tsu_sto << 16 | clk_params->hw_tsu_sta,
  11829. - cci_dev->base + CCI_I2C_M0_SDA_CTL_0_ADDR);
  11830. - msm_camera_io_w(clk_params->hw_thd_dat << 16 | clk_params->hw_thd_sta,
  11831. - cci_dev->base + CCI_I2C_M0_SDA_CTL_1_ADDR);
  11832. - msm_camera_io_w(clk_params->hw_tbuf,
  11833. - cci_dev->base + CCI_I2C_M0_SDA_CTL_2_ADDR);
  11834. - msm_camera_io_w(clk_params->hw_scl_stretch_en << 8 |
  11835. - clk_params->hw_trdhld << 4 | clk_params->hw_tsp,
  11836. - cci_dev->base + CCI_I2C_M0_MISC_CTL_ADDR);
  11837. - msm_camera_io_w(clk_params->hw_thigh << 16 | clk_params->hw_tlow,
  11838. - cci_dev->base + CCI_I2C_M1_SCL_CTL_ADDR);
  11839. - msm_camera_io_w(clk_params->hw_tsu_sto << 16 | clk_params->hw_tsu_sta,
  11840. - cci_dev->base + CCI_I2C_M1_SDA_CTL_0_ADDR);
  11841. - msm_camera_io_w(clk_params->hw_thd_dat << 16 | clk_params->hw_thd_sta,
  11842. - cci_dev->base + CCI_I2C_M1_SDA_CTL_1_ADDR);
  11843. - msm_camera_io_w(clk_params->hw_tbuf,
  11844. - cci_dev->base + CCI_I2C_M1_SDA_CTL_2_ADDR);
  11845. - msm_camera_io_w(clk_params->hw_scl_stretch_en << 8 |
  11846. - clk_params->hw_trdhld << 4 | clk_params->hw_tsp,
  11847. - cci_dev->base + CCI_I2C_M1_MISC_CTL_ADDR);
  11848. + struct msm_cci_clk_params_t *clk_params = NULL;
  11849. + uint8_t count = 0;
  11850. +
  11851. + for (count = 0; count < MASTER_MAX; count++) {
  11852. + if (MASTER_0 == count) {
  11853. + clk_params = &cci_dev->cci_clk_params[count];
  11854. + msm_camera_io_w(clk_params->hw_thigh << 16 |
  11855. + clk_params->hw_tlow,
  11856. + cci_dev->base + CCI_I2C_M0_SCL_CTL_ADDR);
  11857. + msm_camera_io_w(clk_params->hw_tsu_sto << 16 |
  11858. + clk_params->hw_tsu_sta,
  11859. + cci_dev->base + CCI_I2C_M0_SDA_CTL_0_ADDR);
  11860. + msm_camera_io_w(clk_params->hw_thd_dat << 16 |
  11861. + clk_params->hw_thd_sta,
  11862. + cci_dev->base + CCI_I2C_M0_SDA_CTL_1_ADDR);
  11863. + msm_camera_io_w(clk_params->hw_tbuf,
  11864. + cci_dev->base + CCI_I2C_M0_SDA_CTL_2_ADDR);
  11865. + msm_camera_io_w(clk_params->hw_scl_stretch_en << 8 |
  11866. + clk_params->hw_trdhld << 4 | clk_params->hw_tsp,
  11867. + cci_dev->base + CCI_I2C_M0_MISC_CTL_ADDR);
  11868. + } else if (MASTER_1 == count) {
  11869. + clk_params = &cci_dev->cci_clk_params[count];
  11870. + msm_camera_io_w(clk_params->hw_thigh << 16 |
  11871. + clk_params->hw_tlow,
  11872. + cci_dev->base + CCI_I2C_M1_SCL_CTL_ADDR);
  11873. + msm_camera_io_w(clk_params->hw_tsu_sto << 16 |
  11874. + clk_params->hw_tsu_sta,
  11875. + cci_dev->base + CCI_I2C_M1_SDA_CTL_0_ADDR);
  11876. + msm_camera_io_w(clk_params->hw_thd_dat << 16 |
  11877. + clk_params->hw_thd_sta,
  11878. + cci_dev->base + CCI_I2C_M1_SDA_CTL_1_ADDR);
  11879. + msm_camera_io_w(clk_params->hw_tbuf,
  11880. + cci_dev->base + CCI_I2C_M1_SDA_CTL_2_ADDR);
  11881. + msm_camera_io_w(clk_params->hw_scl_stretch_en << 8 |
  11882. + clk_params->hw_trdhld << 4 | clk_params->hw_tsp,
  11883. + cci_dev->base + CCI_I2C_M1_MISC_CTL_ADDR);
  11884. + }
  11885. + }
  11886. return;
  11887. }
  11888.  
  11889. -static int32_t msm_cci_i2c_config_sync_timer(struct v4l2_subdev *sd,
  11890. - struct msm_camera_cci_ctrl *c_ctrl)
  11891. -{
  11892. - struct cci_device *cci_dev;
  11893. - cci_dev = v4l2_get_subdevdata(sd);
  11894. - msm_camera_io_w(c_ctrl->cci_info->cid, cci_dev->base +
  11895. - CCI_SET_CID_SYNC_TIMER_0_ADDR + (c_ctrl->cci_info->cid * 0x4));
  11896. - return 0;
  11897. -}
  11898. -
  11899. -static int32_t msm_cci_i2c_set_freq(struct v4l2_subdev *sd,
  11900. - struct msm_camera_cci_ctrl *c_ctrl)
  11901. -{
  11902. - struct cci_device *cci_dev;
  11903. - uint32_t val;
  11904. - cci_dev = v4l2_get_subdevdata(sd);
  11905. - val = c_ctrl->cci_info->freq;
  11906. - msm_camera_io_w(val, cci_dev->base + CCI_I2C_M0_SCL_CTL_ADDR +
  11907. - c_ctrl->cci_info->cci_i2c_master*0x100);
  11908. - msm_camera_io_w(val, cci_dev->base + CCI_I2C_M0_SDA_CTL_0_ADDR +
  11909. - c_ctrl->cci_info->cci_i2c_master*0x100);
  11910. - msm_camera_io_w(val, cci_dev->base + CCI_I2C_M0_SDA_CTL_1_ADDR +
  11911. - c_ctrl->cci_info->cci_i2c_master*0x100);
  11912. - msm_camera_io_w(val, cci_dev->base + CCI_I2C_M0_SDA_CTL_2_ADDR +
  11913. - c_ctrl->cci_info->cci_i2c_master*0x100);
  11914. - msm_camera_io_w(val, cci_dev->base + CCI_I2C_M0_MISC_CTL_ADDR +
  11915. - c_ctrl->cci_info->cci_i2c_master*0x100);
  11916. - return 0;
  11917. -}
  11918. -
  11919. static void msm_cci_flush_queue(struct cci_device *cci_dev,
  11920. enum cci_i2c_master_t master)
  11921. {
  11922. - uint32_t rc = 0;
  11923. + int32_t rc = 0;
  11924.  
  11925. msm_camera_io_w(1 << master, cci_dev->base + CCI_HALT_REQ_ADDR);
  11926. - rc = wait_for_completion_interruptible_timeout(
  11927. + rc = wait_for_completion_timeout(
  11928. &cci_dev->cci_master_info[master].reset_complete, CCI_TIMEOUT);
  11929. if (rc < 0) {
  11930. - pr_err("%s: wait failed %d\n", __func__, __LINE__);
  11931. - } else if (rc == 0) {
  11932. - pr_err("%s:%d wait timeout\n", __func__, __LINE__);
  11933. - /* Set reset pending flag to TRUE */
  11934. - cci_dev->cci_master_info[master].reset_pending = TRUE;
  11935. - /* Set proper mask to RESET CMD address based on MASTER */
  11936. - if (master == MASTER_0)
  11937. - msm_camera_io_w(CCI_M0_RESET_RMSK,
  11938. - cci_dev->base + CCI_RESET_CMD_ADDR);
  11939. - else
  11940. - msm_camera_io_w(CCI_M1_RESET_RMSK,
  11941. - cci_dev->base + CCI_RESET_CMD_ADDR);
  11942. - /* wait for reset done irq */
  11943. - rc = wait_for_completion_interruptible_timeout(
  11944. - &cci_dev->cci_master_info[master].reset_complete,
  11945. - CCI_TIMEOUT);
  11946. - if (rc <= 0)
  11947. - pr_err("%s:%d wait failed %d\n", __func__, __LINE__, rc);
  11948. - }
  11949. + pr_err("%s:%d wait failed\n", __func__, __LINE__);
  11950. + } else if (rc == 0) {
  11951. + pr_err("%s:%d wait timeout\n", __func__, __LINE__);
  11952. +
  11953. + /* Set reset pending flag to TRUE */
  11954. + cci_dev->cci_master_info[master].reset_pending = TRUE;
  11955. +
  11956. + /* Set proper mask to RESET CMD address based on MASTER */
  11957. + if (master == MASTER_0)
  11958. + msm_camera_io_w(CCI_M0_RESET_RMSK,
  11959. + cci_dev->base + CCI_RESET_CMD_ADDR);
  11960. + else
  11961. + msm_camera_io_w(CCI_M1_RESET_RMSK,
  11962. + cci_dev->base + CCI_RESET_CMD_ADDR);
  11963. +
  11964. + /* wait for reset done irq */
  11965. + rc = wait_for_completion_timeout(
  11966. + &cci_dev->cci_master_info[master].reset_complete,
  11967. + CCI_TIMEOUT);
  11968. + if (rc <= 0)
  11969. + pr_err("%s:%d wait failed %d\n", __func__, __LINE__,
  11970. + rc);
  11971. + }
  11972. return;
  11973. }
  11974.  
  11975. @@ -167,10 +158,10 @@ static int32_t msm_cci_validate_queue(struct cci_device *cci_dev,
  11976. msm_camera_io_w(reg_val, cci_dev->base + CCI_QUEUE_START_ADDR);
  11977. CDBG("%s line %d wait_for_completion_interruptible\n",
  11978. __func__, __LINE__);
  11979. - rc = wait_for_completion_interruptible_timeout(&cci_dev->
  11980. + rc = wait_for_completion_timeout(&cci_dev->
  11981. cci_master_info[master].reset_complete, CCI_TIMEOUT);
  11982. if (rc <= 0) {
  11983. - pr_err("%s: wait_for_completion_interruptible_timeout %d\n",
  11984. + pr_err("%s: wait_for_completion_timeout %d\n",
  11985. __func__, __LINE__);
  11986. if (rc == 0)
  11987. rc = -ETIMEDOUT;
  11988. @@ -185,8 +176,7 @@ static int32_t msm_cci_validate_queue(struct cci_device *cci_dev,
  11989. }
  11990.  
  11991. static int32_t msm_cci_data_queue(struct cci_device *cci_dev,
  11992. - struct msm_camera_cci_ctrl *c_ctrl, enum cci_i2c_queue_t queue,
  11993. - uint8_t is_burst)
  11994. + struct msm_camera_cci_ctrl *c_ctrl, enum cci_i2c_queue_t queue)
  11995. {
  11996. uint16_t i = 0, j = 0, k = 0, h = 0, len = 0;
  11997. int32_t rc = 0;
  11998. @@ -198,17 +188,46 @@ static int32_t msm_cci_data_queue(struct cci_device *cci_dev,
  11999. uint16_t cmd_size = i2c_msg->size;
  12000. struct msm_camera_i2c_reg_array *i2c_cmd = i2c_msg->reg_setting;
  12001. enum cci_i2c_master_t master = c_ctrl->cci_info->cci_i2c_master;
  12002. +
  12003. + if (i2c_cmd == NULL) {
  12004. + pr_err("%s:%d Failed line\n", __func__,
  12005. + __LINE__);
  12006. + return -EINVAL;
  12007. + }
  12008. +
  12009. + if ((!cmd_size) || (cmd_size > CCI_I2C_MAX_WRITE)) {
  12010. + pr_err("%s:%d Failed line\n", __func__, __LINE__);
  12011. + return -EINVAL;
  12012. + }
  12013. +
  12014. CDBG("%s addr type %d data type %d\n", __func__,
  12015. i2c_msg->addr_type, i2c_msg->data_type);
  12016.  
  12017. - /* assume total size within the max queue */
  12018. + if (i2c_msg->addr_type >= MSM_CAMERA_I2C_ADDR_TYPE_MAX) {
  12019. + pr_err("%s failed line %d\n", __func__, __LINE__);
  12020. + return -EINVAL;
  12021. + }
  12022. + if (i2c_msg->data_type >= MSM_CAMERA_I2C_DATA_TYPE_MAX) {
  12023. + pr_err("%s failed line %d\n", __func__, __LINE__);
  12024. + return -EINVAL;
  12025. + }
  12026. +
  12027. + reg_addr = i2c_cmd->reg_addr;
  12028. while (cmd_size) {
  12029. - CDBG("%s cmd_size %d addr 0x%x data 0x%x", __func__,
  12030. + CDBG("%s cmd_size %d addr 0x%x data 0x%x\n", __func__,
  12031. cmd_size, i2c_cmd->reg_addr, i2c_cmd->reg_data);
  12032. delay = i2c_cmd->delay;
  12033. data[i++] = CCI_I2C_WRITE_CMD;
  12034. - if (i2c_cmd->reg_addr)
  12035. +
  12036. + /* in case of multiple command
  12037. + * MSM_CCI_I2C_WRITE : address is not continuous, so update
  12038. + * address for a new packet.
  12039. + * MSM_CCI_I2C_WRITE_SEQ : address is continuous, need to keep
  12040. + * the incremented address for a
  12041. + * new packet */
  12042. + if (c_ctrl->cmd == MSM_CCI_I2C_WRITE)
  12043. reg_addr = i2c_cmd->reg_addr;
  12044. +
  12045. /* either byte or word addr */
  12046. if (i2c_msg->addr_type == MSM_CAMERA_I2C_BYTE_ADDR)
  12047. data[i++] = reg_addr;
  12048. @@ -218,24 +237,24 @@ static int32_t msm_cci_data_queue(struct cci_device *cci_dev,
  12049. }
  12050. /* max of 10 data bytes */
  12051. do {
  12052. - if ((i2c_msg->data_type == MSM_CAMERA_I2C_BYTE_DATA) ||
  12053. - (i2c_msg->data_type == MSM_CAMERA_I2C_BURST_DATA)) {
  12054. + if (i2c_msg->data_type == MSM_CAMERA_I2C_BYTE_DATA) {
  12055. data[i++] = i2c_cmd->reg_data;
  12056. - if (!is_burst)
  12057. - reg_addr++;
  12058. + reg_addr++;
  12059. } else {
  12060. if ((i + 1) <= 10) {
  12061. data[i++] = (i2c_cmd->reg_data &
  12062. 0xFF00) >> 8; /* MSB */
  12063. data[i++] = i2c_cmd->reg_data &
  12064. 0x00FF; /* LSB */
  12065. - if (!is_burst)
  12066. - reg_addr += 2;
  12067. + reg_addr += 2;
  12068. } else
  12069. break;
  12070. }
  12071. i2c_cmd++;
  12072. - } while (--cmd_size && !i2c_cmd->reg_addr && (i <= 10));
  12073. + --cmd_size;
  12074. + } while ((c_ctrl->cmd == MSM_CCI_I2C_WRITE_SEQ) &&
  12075. + (cmd_size > 0) && (i <= 10));
  12076. +
  12077. data[0] |= ((i-1) << 4);
  12078. len = ((i-1)/4) + 1;
  12079. rc = msm_cci_validate_queue(cci_dev, len, master, queue);
  12080. @@ -249,14 +268,10 @@ static int32_t msm_cci_data_queue(struct cci_device *cci_dev,
  12081. cmd |= (data[k++] << (j * 8));
  12082. CDBG("%s CCI_I2C_M0_Q0_LOAD_DATA_ADDR 0x%x\n",
  12083. __func__, cmd);
  12084. -
  12085. msm_camera_io_w(cmd, cci_dev->base +
  12086. CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
  12087. master * 0x200 + queue * 0x100);
  12088. }
  12089. - if (delay > CCI_MAX_DELAY) {
  12090. - pr_err("%s:%d invalid delay %d\n", __func__, __LINE__, delay);
  12091. - }
  12092. if ((delay > 0) && (delay < CCI_MAX_DELAY)) {
  12093. cmd = (uint32_t)((delay * CYCLES_PER_MICRO_SEC) /
  12094. 0x100);
  12095. @@ -297,7 +312,7 @@ static int32_t msm_cci_write_i2c_queue(struct cci_device *cci_dev,
  12096. static int32_t msm_cci_i2c_read(struct v4l2_subdev *sd,
  12097. struct msm_camera_cci_ctrl *c_ctrl)
  12098. {
  12099. - uint32_t rc = 0;
  12100. + int32_t rc = 0;
  12101. uint32_t val = 0;
  12102. int32_t read_words = 0, exp_words = 0;
  12103. int32_t index = 0, first_byte = 0;
  12104. @@ -310,7 +325,7 @@ static int32_t msm_cci_i2c_read(struct v4l2_subdev *sd,
  12105. cci_dev = v4l2_get_subdevdata(sd);
  12106. master = c_ctrl->cci_info->cci_i2c_master;
  12107. read_cfg = &c_ctrl->cfg.cci_i2c_read_cfg;
  12108. - mutex_lock(&cci_dev->mutex);
  12109. + mutex_lock(&cci_dev->cci_master_info[master].mutex);
  12110.  
  12111. /*
  12112. * Call validate queue to make sure queue is empty before starting.
  12113. @@ -326,6 +341,18 @@ static int32_t msm_cci_i2c_read(struct v4l2_subdev *sd,
  12114. goto ERROR;
  12115. }
  12116.  
  12117. + if (c_ctrl->cci_info->retries > CCI_I2C_READ_MAX_RETRIES) {
  12118. + pr_err("%s:%d More than max retries\n", __func__,
  12119. + __LINE__);
  12120. + goto ERROR;
  12121. + }
  12122. +
  12123. + if (read_cfg->data == NULL) {
  12124. + pr_err("%s:%d Data ptr is NULL\n", __func__,
  12125. + __LINE__);
  12126. + goto ERROR;
  12127. + }
  12128. +
  12129. CDBG("%s master %d, queue %d\n", __func__, master, queue);
  12130. CDBG("%s set param sid 0x%x retries %d id_map %d\n", __func__,
  12131. c_ctrl->cci_info->sid, c_ctrl->cci_info->retries,
  12132. @@ -346,11 +373,16 @@ static int32_t msm_cci_i2c_read(struct v4l2_subdev *sd,
  12133. goto ERROR;
  12134. }
  12135.  
  12136. + if (read_cfg->addr_type >= MSM_CAMERA_I2C_ADDR_TYPE_MAX) {
  12137. + CDBG("%s failed line %d\n", __func__, __LINE__);
  12138. + goto ERROR;
  12139. + }
  12140. +
  12141. if (read_cfg->addr_type == MSM_CAMERA_I2C_BYTE_ADDR)
  12142. - val = CCI_I2C_WRITE_CMD | (read_cfg->addr_type << 4) |
  12143. + val = CCI_I2C_WRITE_DISABLE_P_CMD | (read_cfg->addr_type << 4) |
  12144. ((read_cfg->addr & 0xFF) << 8);
  12145. if (read_cfg->addr_type == MSM_CAMERA_I2C_WORD_ADDR)
  12146. - val = CCI_I2C_WRITE_CMD | (read_cfg->addr_type << 4) |
  12147. + val = CCI_I2C_WRITE_DISABLE_P_CMD | (read_cfg->addr_type << 4) |
  12148. (((read_cfg->addr & 0xFF00) >> 8) << 8) |
  12149. ((read_cfg->addr & 0xFF) << 16);
  12150. rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
  12151. @@ -381,12 +413,12 @@ static int32_t msm_cci_i2c_read(struct v4l2_subdev *sd,
  12152.  
  12153. val = 1 << ((master * 2) + queue);
  12154. msm_camera_io_w(val, cci_dev->base + CCI_QUEUE_START_ADDR);
  12155. - CDBG("%s:%d E wait_for_completion_interruptible_timeout\n", __func__,
  12156. + CDBG("%s:%d E wait_for_completion_timeout\n", __func__,
  12157. __LINE__);
  12158. - rc = wait_for_completion_interruptible_timeout(&cci_dev->
  12159. + rc = wait_for_completion_timeout(&cci_dev->
  12160. cci_master_info[master].reset_complete, CCI_TIMEOUT);
  12161. if (rc <= 0) {
  12162. - pr_err("%s: wait_for_completion_interruptible_timeout %d\n",
  12163. + pr_err("%s: wait_for_completion_timeout %d\n",
  12164. __func__, __LINE__);
  12165. if (rc == 0)
  12166. rc = -ETIMEDOUT;
  12167. @@ -395,7 +427,7 @@ static int32_t msm_cci_i2c_read(struct v4l2_subdev *sd,
  12168. } else {
  12169. rc = 0;
  12170. }
  12171. - CDBG("%s:%d E wait_for_completion_interruptible_timeout\n", __func__,
  12172. + CDBG("%s:%d E wait_for_completion_timeout\n", __func__,
  12173. __LINE__);
  12174.  
  12175. read_words = msm_camera_io_r(cci_dev->base +
  12176. @@ -431,7 +463,7 @@ static int32_t msm_cci_i2c_read(struct v4l2_subdev *sd,
  12177. }
  12178. } while (--read_words > 0);
  12179. ERROR:
  12180. - mutex_unlock(&cci_dev->mutex);
  12181. + mutex_unlock(&cci_dev->cci_master_info[master].mutex);
  12182. return rc;
  12183. }
  12184.  
  12185. @@ -459,9 +491,15 @@ static int32_t msm_cci_i2c_read_bytes(struct v4l2_subdev *sd,
  12186. return -EINVAL;
  12187. }
  12188.  
  12189. + if (c_ctrl->cci_info->cci_i2c_master > MASTER_MAX
  12190. + || c_ctrl->cci_info->cci_i2c_master < 0) {
  12191. + pr_err("%s:%d Invalid I2C master addr\n", __func__, __LINE__);
  12192. + return -EINVAL;
  12193. + }
  12194. +
  12195. master = c_ctrl->cci_info->cci_i2c_master;
  12196. read_cfg = &c_ctrl->cfg.cci_i2c_read_cfg;
  12197. - if (!read_cfg->num_byte) {
  12198. + if ((!read_cfg->num_byte) || (read_cfg->num_byte > CCI_I2C_MAX_READ)) {
  12199. pr_err("%s:%d read num bytes 0\n", __func__, __LINE__);
  12200. rc = -EINVAL;
  12201. goto ERROR;
  12202. @@ -491,7 +529,7 @@ ERROR:
  12203. }
  12204.  
  12205. static int32_t msm_cci_i2c_write(struct v4l2_subdev *sd,
  12206. - struct msm_camera_cci_ctrl *c_ctrl, uint8_t is_burst)
  12207. + struct msm_camera_cci_ctrl *c_ctrl)
  12208. {
  12209. int32_t rc = 0;
  12210. struct cci_device *cci_dev;
  12211. @@ -499,12 +537,17 @@ static int32_t msm_cci_i2c_write(struct v4l2_subdev *sd,
  12212. enum cci_i2c_master_t master;
  12213. enum cci_i2c_queue_t queue = QUEUE_0;
  12214. cci_dev = v4l2_get_subdevdata(sd);
  12215. + if (c_ctrl->cci_info->cci_i2c_master > MASTER_MAX
  12216. + || c_ctrl->cci_info->cci_i2c_master < 0) {
  12217. + pr_err("%s:%d Invalid I2C master addr\n", __func__, __LINE__);
  12218. + return -EINVAL;
  12219. + }
  12220. master = c_ctrl->cci_info->cci_i2c_master;
  12221. CDBG("%s master %d, queue %d\n", __func__, master, queue);
  12222. CDBG("%s set param sid 0x%x retries %d id_map %d\n", __func__,
  12223. c_ctrl->cci_info->sid, c_ctrl->cci_info->retries,
  12224. c_ctrl->cci_info->id_map);
  12225. - mutex_lock(&cci_dev->mutex);
  12226. + mutex_lock(&cci_dev->cci_master_info[master].mutex);
  12227.  
  12228. /*
  12229. * Call validate queue to make sure queue is empty before starting.
  12230. @@ -519,6 +562,11 @@ static int32_t msm_cci_i2c_write(struct v4l2_subdev *sd,
  12231. __LINE__, rc);
  12232. goto ERROR;
  12233. }
  12234. + if (c_ctrl->cci_info->retries > CCI_I2C_READ_MAX_RETRIES) {
  12235. + pr_err("%s:%d More than max retries\n", __func__,
  12236. + __LINE__);
  12237. + goto ERROR;
  12238. + }
  12239.  
  12240. val = CCI_I2C_SET_PARAM_CMD | c_ctrl->cci_info->sid << 4 |
  12241. c_ctrl->cci_info->retries << 16 |
  12242. @@ -538,7 +586,11 @@ static int32_t msm_cci_i2c_write(struct v4l2_subdev *sd,
  12243. goto ERROR;
  12244. }
  12245.  
  12246. - msm_cci_data_queue(cci_dev, c_ctrl, queue, is_burst);
  12247. + rc = msm_cci_data_queue(cci_dev, c_ctrl, queue);
  12248. + if (rc < 0) {
  12249. + CDBG("%s failed line %d\n", __func__, __LINE__);
  12250. + goto ERROR;
  12251. + }
  12252. val = CCI_I2C_UNLOCK_CMD;
  12253. CDBG("%s:%d CCI_I2C_UNLOCK_CMD\n", __func__, __LINE__);
  12254. rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
  12255. @@ -568,10 +620,10 @@ static int32_t msm_cci_i2c_write(struct v4l2_subdev *sd,
  12256.  
  12257. CDBG("%s:%d E wait_for_completion_interruptible\n",
  12258. __func__, __LINE__);
  12259. - rc = wait_for_completion_interruptible_timeout(&cci_dev->
  12260. + rc = wait_for_completion_timeout(&cci_dev->
  12261. cci_master_info[master].reset_complete, CCI_TIMEOUT);
  12262. if (rc <= 0) {
  12263. - pr_err("%s: wait_for_completion_interruptible_timeout %d\n",
  12264. + pr_err("%s: wait_for_completion_timeout %d\n",
  12265. __func__, __LINE__);
  12266. if (rc == 0)
  12267. rc = -ETIMEDOUT;
  12268. @@ -584,14 +636,18 @@ static int32_t msm_cci_i2c_write(struct v4l2_subdev *sd,
  12269. __LINE__);
  12270.  
  12271. ERROR:
  12272. - mutex_unlock(&cci_dev->mutex);
  12273. + mutex_unlock(&cci_dev->cci_master_info[master].mutex);
  12274. return rc;
  12275. }
  12276.  
  12277. static int msm_cci_subdev_g_chip_ident(struct v4l2_subdev *sd,
  12278. struct v4l2_dbg_chip_ident *chip)
  12279. {
  12280. - BUG_ON(!chip);
  12281. + if (!chip) {
  12282. + pr_err("%s:%d: NULL pointer supplied for chip ident\n",
  12283. + __func__, __LINE__);
  12284. + return -EINVAL;
  12285. + }
  12286. chip->ident = V4L2_IDENT_CCI;
  12287. chip->revision = 0;
  12288. return 0;
  12289. @@ -605,34 +661,29 @@ static struct msm_cam_clk_info cci_clk_info[] = {
  12290. };
  12291.  
  12292. static int32_t msm_cci_init(struct v4l2_subdev *sd,
  12293. - struct msm_camera_cci_ctrl *c_ctrl){
  12294. - int rc = 0;
  12295. + struct msm_camera_cci_ctrl *c_ctrl)
  12296. +{
  12297. + int32_t rc = 0;
  12298. struct cci_device *cci_dev;
  12299. enum cci_i2c_master_t master;
  12300. cci_dev = v4l2_get_subdevdata(sd);
  12301. - CDBG("%s line %d\n", __func__, __LINE__);
  12302. -
  12303.  
  12304. if (!cci_dev || !c_ctrl) {
  12305. pr_err("%s:%d failed: invalid params %p %p\n", __func__,
  12306. __LINE__, cci_dev, c_ctrl);
  12307. -
  12308. rc = -ENOMEM;
  12309. return rc;
  12310. }
  12311. -
  12312. - mutex_lock(&cci_dev->mutex);
  12313.  
  12314. if (cci_dev->ref_count++) {
  12315. CDBG("%s ref_count %d\n", __func__, cci_dev->ref_count);
  12316. -
  12317. master = c_ctrl->cci_info->cci_i2c_master;
  12318. - CDBG("%s:%d master %d\n", __func__, __LINE__,
  12319. - master);
  12320. - if (master < MASTER_MAX) {
  12321. + CDBG("%s:%d master %d\n", __func__, __LINE__, master);
  12322. + if (master < MASTER_MAX && master >= 0) {
  12323. + mutex_lock(&cci_dev->cci_master_info[master].mutex);
  12324. /* Set reset pending flag to TRUE */
  12325. cci_dev->cci_master_info[master].reset_pending = TRUE;
  12326. - /* Set proper mask to RESET CMD address based on MASTER */
  12327. + /* Set proper mask to RESET CMD address */
  12328. if (master == MASTER_0)
  12329. msm_camera_io_w(CCI_M0_RESET_RMSK,
  12330. cci_dev->base + CCI_RESET_CMD_ADDR);
  12331. @@ -640,30 +691,32 @@ static int32_t msm_cci_init(struct v4l2_subdev *sd,
  12332. msm_camera_io_w(CCI_M1_RESET_RMSK,
  12333. cci_dev->base + CCI_RESET_CMD_ADDR);
  12334. /* wait for reset done irq */
  12335. - rc = wait_for_completion_interruptible_timeout(
  12336. - &cci_dev->cci_master_info[master].reset_complete,
  12337. + rc = wait_for_completion_timeout(
  12338. + &cci_dev->cci_master_info[master].
  12339. + reset_complete,
  12340. CCI_TIMEOUT);
  12341. if (rc <= 0)
  12342. - pr_err("%s:%d wait failed %d\n", __func__, __LINE__, rc);
  12343. + pr_err("%s:%d wait failed %d\n", __func__,
  12344. + __LINE__, rc);
  12345. + mutex_unlock(&cci_dev->cci_master_info[master].mutex);
  12346. }
  12347. - mutex_unlock(&cci_dev->mutex);
  12348. -
  12349. return 0;
  12350. }
  12351.  
  12352. rc = msm_camera_request_gpio_table(cci_dev->cci_gpio_tbl,
  12353. cci_dev->cci_gpio_tbl_size, 1);
  12354. if (rc < 0) {
  12355. + cci_dev->ref_count--;
  12356. CDBG("%s: request gpio failed\n", __func__);
  12357. - goto ERROR;
  12358. + goto request_gpio_failed;
  12359. }
  12360.  
  12361. rc = msm_cam_clk_enable(&cci_dev->pdev->dev, cci_clk_info,
  12362. cci_dev->cci_clk, ARRAY_SIZE(cci_clk_info), 1);
  12363. if (rc < 0) {
  12364. -
  12365. + cci_dev->ref_count--;
  12366. CDBG("%s: clk enable failed\n", __func__);
  12367. - goto ERROR;
  12368. + goto clk_enable_failed;
  12369. }
  12370.  
  12371. enable_irq(cci_dev->irq->start);
  12372. @@ -672,15 +725,15 @@ static int32_t msm_cci_init(struct v4l2_subdev *sd,
  12373. cci_dev->cci_master_info[MASTER_0].reset_pending = TRUE;
  12374. msm_camera_io_w(CCI_RESET_CMD_RMSK, cci_dev->base + CCI_RESET_CMD_ADDR);
  12375. msm_camera_io_w(0x1, cci_dev->base + CCI_RESET_CMD_ADDR);
  12376. - rc = wait_for_completion_interruptible_timeout(
  12377. + rc = wait_for_completion_timeout(
  12378. &cci_dev->cci_master_info[MASTER_0].reset_complete,
  12379. CCI_TIMEOUT);
  12380. if (rc <= 0) {
  12381. - pr_err("%s: wait_for_completion_interruptible_timeout %d\n",
  12382. + pr_err("%s: wait_for_completion_timeout %d\n",
  12383. __func__, __LINE__);
  12384. if (rc == 0)
  12385. rc = -ETIMEDOUT;
  12386. - goto ERROR;
  12387. + goto reset_complete_failed;
  12388. }
  12389. msm_cci_set_clk_param(cci_dev);
  12390. msm_camera_io_w(CCI_IRQ_MASK_0_RMSK,
  12391. @@ -689,13 +742,18 @@ static int32_t msm_cci_init(struct v4l2_subdev *sd,
  12392. cci_dev->base + CCI_IRQ_CLEAR_0_ADDR);
  12393. msm_camera_io_w(0x1, cci_dev->base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
  12394. cci_dev->cci_state = CCI_STATE_ENABLED;
  12395. - CDBG("%s:%d Exit\n", __func__, __LINE__);
  12396. - mutex_unlock(&cci_dev->mutex);
  12397. +
  12398. return 0;
  12399.  
  12400. -ERROR:
  12401. +reset_complete_failed:
  12402. + disable_irq(cci_dev->irq->start);
  12403. + msm_cam_clk_enable(&cci_dev->pdev->dev, cci_clk_info,
  12404. + cci_dev->cci_clk, ARRAY_SIZE(cci_clk_info), 0);
  12405. +clk_enable_failed:
  12406. + msm_camera_request_gpio_table(cci_dev->cci_gpio_tbl,
  12407. + cci_dev->cci_gpio_tbl_size, 0);
  12408. +request_gpio_failed:
  12409. cci_dev->ref_count--;
  12410. - mutex_unlock(&cci_dev->mutex);
  12411. return rc;
  12412. }
  12413.  
  12414. @@ -703,7 +761,6 @@ static int32_t msm_cci_release(struct v4l2_subdev *sd)
  12415. {
  12416. struct cci_device *cci_dev;
  12417. cci_dev = v4l2_get_subdevdata(sd);
  12418. - CDBG("%s:%d Enter\n", __func__, __LINE__);
  12419.  
  12420. if (!cci_dev->ref_count || cci_dev->cci_state != CCI_STATE_ENABLED) {
  12421. pr_err("%s invalid ref count %d / cci state %d\n",
  12422. @@ -711,11 +768,8 @@ static int32_t msm_cci_release(struct v4l2_subdev *sd)
  12423. return -EINVAL;
  12424. }
  12425.  
  12426. - mutex_lock(&cci_dev->mutex);
  12427. -
  12428. if (--cci_dev->ref_count) {
  12429. CDBG("%s ref_count Exit %d\n", __func__, cci_dev->ref_count);
  12430. - mutex_unlock(&cci_dev->mutex);
  12431. return 0;
  12432. }
  12433.  
  12434. @@ -728,8 +782,7 @@ static int32_t msm_cci_release(struct v4l2_subdev *sd)
  12435. cci_dev->cci_gpio_tbl_size, 0);
  12436.  
  12437. cci_dev->cci_state = CCI_STATE_DISABLED;
  12438. - CDBG("%s:%d Exit\n", __func__, __LINE__);
  12439. - mutex_unlock(&cci_dev->mutex);
  12440. +
  12441. return 0;
  12442. }
  12443.  
  12444. @@ -746,22 +799,12 @@ static int32_t msm_cci_config(struct v4l2_subdev *sd,
  12445. case MSM_CCI_RELEASE:
  12446. rc = msm_cci_release(sd);
  12447. break;
  12448. - case MSM_CCI_SET_SID:
  12449. - break;
  12450. - case MSM_CCI_SET_FREQ:
  12451. - rc = msm_cci_i2c_set_freq(sd, cci_ctrl);
  12452. - break;
  12453. - case MSM_CCI_SET_SYNC_CID:
  12454. - rc = msm_cci_i2c_config_sync_timer(sd, cci_ctrl);
  12455. - break;
  12456. case MSM_CCI_I2C_READ:
  12457. rc = msm_cci_i2c_read_bytes(sd, cci_ctrl);
  12458. break;
  12459. case MSM_CCI_I2C_WRITE:
  12460. - rc = msm_cci_i2c_write(sd, cci_ctrl, 0);
  12461. - break;
  12462. - case MSM_CCI_I2C_WRITE_BURST:
  12463. - rc = msm_cci_i2c_write(sd, cci_ctrl, 1);
  12464. + case MSM_CCI_I2C_WRITE_SEQ:
  12465. + rc = msm_cci_i2c_write(sd, cci_ctrl);
  12466. break;
  12467. case MSM_CCI_GPIO_WRITE:
  12468. break;
  12469. @@ -795,36 +838,36 @@ static irqreturn_t msm_cci_irq(int irq_num, void *data)
  12470. complete(&cci_dev->cci_master_info[MASTER_1].
  12471. reset_complete);
  12472. }
  12473. - }
  12474. - if ((irq & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK) ||
  12475. + }
  12476. + if ((irq & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK) ||
  12477. (irq & CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT_BMSK) ||
  12478. (irq & CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT_BMSK)) {
  12479. cci_dev->cci_master_info[MASTER_0].status = 0;
  12480. complete(&cci_dev->cci_master_info[MASTER_0].reset_complete);
  12481. - }
  12482. - if ((irq & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) ||
  12483. + }
  12484. + if ((irq & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) ||
  12485. (irq & CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT_BMSK) ||
  12486. (irq & CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT_BMSK)) {
  12487. cci_dev->cci_master_info[MASTER_1].status = 0;
  12488. complete(&cci_dev->cci_master_info[MASTER_1].reset_complete);
  12489. - }
  12490. - if (irq & CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK_BMSK) {
  12491. + }
  12492. + if (irq & CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK_BMSK) {
  12493. cci_dev->cci_master_info[MASTER_0].reset_pending = TRUE;
  12494. msm_camera_io_w(CCI_M0_RESET_RMSK,
  12495. cci_dev->base + CCI_RESET_CMD_ADDR);
  12496. - }
  12497. - if (irq & CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK_BMSK) {
  12498. + }
  12499. + if (irq & CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK_BMSK) {
  12500. cci_dev->cci_master_info[MASTER_1].reset_pending = TRUE;
  12501. msm_camera_io_w(CCI_M1_RESET_RMSK,
  12502. cci_dev->base + CCI_RESET_CMD_ADDR);
  12503. - }
  12504. - if (irq & CCI_IRQ_STATUS_0_I2C_M0_ERROR_BMSK) {
  12505. + }
  12506. + if (irq & CCI_IRQ_STATUS_0_I2C_M0_ERROR_BMSK) {
  12507. pr_err("%s:%d MASTER_0 error %x\n", __func__, __LINE__, irq);
  12508. cci_dev->cci_master_info[MASTER_0].status = -EINVAL;
  12509. msm_camera_io_w(CCI_M0_HALT_REQ_RMSK,
  12510. cci_dev->base + CCI_HALT_REQ_ADDR);
  12511. - }
  12512. - if (irq & CCI_IRQ_STATUS_0_I2C_M1_ERROR_BMSK) {
  12513. + }
  12514. + if (irq & CCI_IRQ_STATUS_0_I2C_M1_ERROR_BMSK) {
  12515. pr_err("%s:%d MASTER_1 error %x\n", __func__, __LINE__, irq);
  12516. cci_dev->cci_master_info[MASTER_1].status = -EINVAL;
  12517. msm_camera_io_w(CCI_M1_HALT_REQ_RMSK,
  12518. @@ -855,7 +898,7 @@ static long msm_cci_subdev_ioctl(struct v4l2_subdev *sd,
  12519. rc = msm_cci_config(sd, arg);
  12520. break;
  12521. case MSM_SD_SHUTDOWN: {
  12522. - break;
  12523. + return rc;
  12524. }
  12525. default:
  12526. rc = -ENOIOCTLCMD;
  12527. @@ -881,7 +924,7 @@ static void msm_cci_init_cci_params(struct cci_device *new_cci_dev)
  12528. uint8_t i = 0, j = 0;
  12529. for (i = 0; i < NUM_MASTERS; i++) {
  12530. new_cci_dev->cci_master_info[i].status = 0;
  12531. - mutex_init(&new_cci_dev->mutex);
  12532. + mutex_init(&new_cci_dev->cci_master_info[i].mutex);
  12533. init_completion(&new_cci_dev->
  12534. cci_master_info[i].reset_complete);
  12535. for (j = 0; j < NUM_QUEUES; j++) {
  12536. @@ -970,58 +1013,96 @@ static void msm_cci_init_clk_params(struct cci_device *cci_dev)
  12537. {
  12538. int32_t rc = 0;
  12539. uint32_t val = 0;
  12540. + uint8_t count = 0;
  12541. struct device_node *of_node = cci_dev->pdev->dev.of_node;
  12542. + struct device_node *src_node = NULL;
  12543. +
  12544. + for (count = 0; count < MASTER_MAX; count++) {
  12545. +
  12546. + if (MASTER_0 == count)
  12547. + src_node = of_find_node_by_name(of_node,
  12548. + "qcom,cci-master0");
  12549. + else if (MASTER_1 == count)
  12550. + src_node = of_find_node_by_name(of_node,
  12551. + "qcom,cci-master1");
  12552. + else
  12553. + return;
  12554. +
  12555. + rc = of_property_read_u32(src_node, "qcom,hw-thigh", &val);
  12556. + CDBG("%s qcom,hw-thigh %d, rc %d\n", __func__, val, rc);
  12557. + if (!rc)
  12558. + cci_dev->cci_clk_params[count].hw_thigh = val;
  12559. + else
  12560. + cci_dev->cci_clk_params[count].hw_thigh = 78;
  12561. +
  12562. + rc = of_property_read_u32(src_node, "qcom,hw-tlow", &val);
  12563. + CDBG("%s qcom,hw-tlow %d, rc %d\n", __func__, val, rc);
  12564. + if (!rc)
  12565. + cci_dev->cci_clk_params[count].hw_tlow = val;
  12566. + else
  12567. + cci_dev->cci_clk_params[count].hw_tlow = 114;
  12568. +
  12569. + rc = of_property_read_u32(src_node, "qcom,hw-tsu-sto", &val);
  12570. + CDBG("%s qcom,hw-tsu-sto %d, rc %d\n", __func__, val, rc);
  12571. + if (!rc)
  12572. + cci_dev->cci_clk_params[count].hw_tsu_sto = val;
  12573. + else
  12574. + cci_dev->cci_clk_params[count].hw_tsu_sto = 28;
  12575. +
  12576. + rc = of_property_read_u32(src_node, "qcom,hw-tsu-sta", &val);
  12577. + CDBG("%s qcom,hw-tsu-sta %d, rc %d\n", __func__, val, rc);
  12578. + if (!rc)
  12579. + cci_dev->cci_clk_params[count].hw_tsu_sta = val;
  12580. + else
  12581. + cci_dev->cci_clk_params[count].hw_tsu_sta = 28;
  12582. +
  12583. + rc = of_property_read_u32(src_node, "qcom,hw-thd-dat", &val);
  12584. + CDBG("%s qcom,hw-thd-dat %d, rc %d\n", __func__, val, rc);
  12585. + if (!rc)
  12586. + cci_dev->cci_clk_params[count].hw_thd_dat = val;
  12587. + else
  12588. + cci_dev->cci_clk_params[count].hw_thd_dat = 10;
  12589. +
  12590. + rc = of_property_read_u32(src_node, "qcom,hw-thd-sta", &val);
  12591. + CDBG("%s qcom,hwthd-sta %d, rc %d\n", __func__, val, rc);
  12592. + if (!rc)
  12593. + cci_dev->cci_clk_params[count].hw_thd_sta = val;
  12594. + else
  12595. + cci_dev->cci_clk_params[count].hw_thd_sta = 77;
  12596.  
  12597. - rc = of_property_read_u32(of_node, "qcom,hw-thigh", &val);
  12598. - CDBG("%s qcom,hw-thigh %d, rc %d\n", __func__, val, rc);
  12599. - if (!rc)
  12600. - cci_dev->cci_clk_params.hw_thigh = val;
  12601. -
  12602. - rc = of_property_read_u32(of_node, "qcom,hw-tlow", &val);
  12603. - CDBG("%s qcom,hw-tlow %d, rc %d\n", __func__, val, rc);
  12604. - if (!rc)
  12605. - cci_dev->cci_clk_params.hw_tlow = val;
  12606. -
  12607. - rc = of_property_read_u32(of_node, "qcom,hw-tsu-sto", &val);
  12608. - CDBG("%s qcom,hw-tsu-sto %d, rc %d\n", __func__, val, rc);
  12609. - if (!rc)
  12610. - cci_dev->cci_clk_params.hw_tsu_sto = val;
  12611. -
  12612. - rc = of_property_read_u32(of_node, "qcom,hw-tsu-sta", &val);
  12613. - CDBG("%s qcom,hw-tsu-sta %d, rc %d\n", __func__, val, rc);
  12614. - if (!rc)
  12615. - cci_dev->cci_clk_params.hw_tsu_sta = val;
  12616. -
  12617. - rc = of_property_read_u32(of_node, "qcom,hw-thd-dat", &val);
  12618. - CDBG("%s qcom,hw-thd-dat %d, rc %d\n", __func__, val, rc);
  12619. - if (!rc)
  12620. - cci_dev->cci_clk_params.hw_thd_dat = val;
  12621. -
  12622. - rc = of_property_read_u32(of_node, "qcom,hw-thd-sta", &val);
  12623. - CDBG("%s qcom,hwthd-sta %d, rc %d\n", __func__, val, rc);
  12624. - if (!rc)
  12625. - cci_dev->cci_clk_params.hw_thd_sta = val;
  12626. -
  12627. - rc = of_property_read_u32(of_node, "qcom,hw-tbuf", &val);
  12628. - CDBG("%s qcom,hw-tbuf %d, rc %d\n", __func__, val, rc);
  12629. - if (!rc)
  12630. - cci_dev->cci_clk_params.hw_tbuf = val;
  12631. -
  12632. - rc = of_property_read_u32(of_node, "qcom,hw-scl-stretch-en", &val);
  12633. - CDBG("%s qcom,hw-scl-stretch-en %d, rc %d\n", __func__, val, rc);
  12634. - if (!rc)
  12635. - cci_dev->cci_clk_params.hw_scl_stretch_en = val;
  12636. -
  12637. - rc = of_property_read_u32(of_node, "qcom,hw-trdhld", &val);
  12638. - CDBG("%s qcom,hw-trdhld %d, rc %d\n", __func__, val, rc);
  12639. - if (!rc)
  12640. - cci_dev->cci_clk_params.hw_trdhld = val;
  12641. -
  12642. - rc = of_property_read_u32(of_node, "qcom,hw-tsp", &val);
  12643. - CDBG("%s qcom,hw-tsp %d, rc %d\n", __func__, val, rc);
  12644. - if (!rc)
  12645. - cci_dev->cci_clk_params.hw_tsp = val;
  12646. + rc = of_property_read_u32(src_node, "qcom,hw-tbuf", &val);
  12647. + CDBG("%s qcom,hw-tbuf %d, rc %d\n", __func__, val, rc);
  12648. + if (!rc)
  12649. + cci_dev->cci_clk_params[count].hw_tbuf = val;
  12650. + else
  12651. + cci_dev->cci_clk_params[count].hw_tbuf = 118;
  12652. +
  12653. + rc = of_property_read_u32(src_node,
  12654. + "qcom,hw-scl-stretch-en", &val);
  12655. + CDBG("%s qcom,hw-scl-stretch-en %d, rc %d\n",
  12656. + __func__, val, rc);
  12657. + if (!rc)
  12658. + cci_dev->cci_clk_params[count].hw_scl_stretch_en = val;
  12659. + else
  12660. + cci_dev->cci_clk_params[count].hw_scl_stretch_en = 0;
  12661.  
  12662. + rc = of_property_read_u32(src_node, "qcom,hw-trdhld", &val);
  12663. + CDBG("%s qcom,hw-trdhld %d, rc %d\n", __func__, val, rc);
  12664. + if (!rc)
  12665. + cci_dev->cci_clk_params[count].hw_trdhld = val;
  12666. + else
  12667. + cci_dev->cci_clk_params[count].hw_trdhld = 6;
  12668. +
  12669. + rc = of_property_read_u32(src_node, "qcom,hw-tsp", &val);
  12670. + CDBG("%s qcom,hw-tsp %d, rc %d\n", __func__, val, rc);
  12671. + if (!rc)
  12672. + cci_dev->cci_clk_params[count].hw_tsp = val;
  12673. + else
  12674. + cci_dev->cci_clk_params[count].hw_tsp = 1;
  12675. +
  12676. + of_node_put(src_node);
  12677. + src_node = NULL;
  12678. + }
  12679. return;
  12680. }
  12681.  
  12682. @@ -1037,7 +1118,7 @@ static int __devinit msm_cci_probe(struct platform_device *pdev)
  12683. CDBG("%s: pdev %p device id = %d\n", __func__, pdev, pdev->id);
  12684. new_cci_dev = kzalloc(sizeof(struct cci_device), GFP_KERNEL);
  12685. if (!new_cci_dev) {
  12686. - pr_err("%s: no enough memory\n", __func__);
  12687. + CDBG("%s: no enough memory\n", __func__);
  12688. return -ENOMEM;
  12689. }
  12690. v4l2_subdev_init(&new_cci_dev->msm_sd.sd, &msm_cci_subdev_ops);
  12691. @@ -1054,7 +1135,7 @@ static int __devinit msm_cci_probe(struct platform_device *pdev)
  12692. new_cci_dev->mem = platform_get_resource_byname(pdev,
  12693. IORESOURCE_MEM, "cci");
  12694. if (!new_cci_dev->mem) {
  12695. - pr_err("%s: no mem resource?\n", __func__);
  12696. + CDBG("%s: no mem resource?\n", __func__);
  12697. rc = -ENODEV;
  12698. goto cci_no_resource;
  12699. }
  12700. @@ -1065,14 +1146,14 @@ static int __devinit msm_cci_probe(struct platform_device *pdev)
  12701. new_cci_dev->irq->start,
  12702. new_cci_dev->irq->end);
  12703. if (!new_cci_dev->irq) {
  12704. - pr_err("%s: no irq resource?\n", __func__);
  12705. + CDBG("%s: no irq resource?\n", __func__);
  12706. rc = -ENODEV;
  12707. goto cci_no_resource;
  12708. }
  12709. new_cci_dev->io = request_mem_region(new_cci_dev->mem->start,
  12710. resource_size(new_cci_dev->mem), pdev->name);
  12711. if (!new_cci_dev->io) {
  12712. - pr_err("%s: no valid mem region\n", __func__);
  12713. + CDBG("%s: no valid mem region\n", __func__);
  12714. rc = -EBUSY;
  12715. goto cci_no_resource;
  12716. }
  12717. @@ -1080,19 +1161,18 @@ static int __devinit msm_cci_probe(struct platform_device *pdev)
  12718. new_cci_dev->base = ioremap(new_cci_dev->mem->start,
  12719. resource_size(new_cci_dev->mem));
  12720. if (!new_cci_dev->base) {
  12721. - pr_err("%s : cci_dev base is NULL", __func__);
  12722. rc = -ENOMEM;
  12723. goto cci_release_mem;
  12724. }
  12725. rc = request_irq(new_cci_dev->irq->start, msm_cci_irq,
  12726. IRQF_TRIGGER_RISING, "cci", new_cci_dev);
  12727. if (rc < 0) {
  12728. - pr_err("%s: irq request fail\n", __func__);
  12729. + CDBG("%s: irq request fail\n", __func__);
  12730. rc = -EBUSY;
  12731. - goto cci_ioremap_mem;
  12732. + goto cci_release_mem;
  12733. }
  12734. disable_irq(new_cci_dev->irq->start);
  12735. - new_cci_dev->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x7;
  12736. + new_cci_dev->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x6;
  12737. msm_sd_register(&new_cci_dev->msm_sd);
  12738. new_cci_dev->pdev = pdev;
  12739. msm_cci_init_cci_params(new_cci_dev);
  12740. @@ -1105,17 +1185,14 @@ static int __devinit msm_cci_probe(struct platform_device *pdev)
  12741. g_cci_subdev = &new_cci_dev->msm_sd.sd;
  12742. CDBG("%s cci subdev %p\n", __func__, &new_cci_dev->msm_sd.sd);
  12743. CDBG("%s line %d\n", __func__, __LINE__);
  12744. - pr_warn("%s : Succeed!", __func__);
  12745. return 0;
  12746.  
  12747. -cci_ioremap_mem:
  12748. - iounmap(new_cci_dev->base);
  12749. cci_release_mem:
  12750. release_mem_region(new_cci_dev->mem->start,
  12751. resource_size(new_cci_dev->mem));
  12752. cci_no_resource:
  12753. kfree(new_cci_dev);
  12754. - return rc;
  12755. + return 0;
  12756. }
  12757.  
  12758. static int __exit msm_cci_exit(struct platform_device *pdev)
  12759. diff --git a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.h b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.h
  12760. index 81293f2..e1012c6 100644
  12761. --- a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.h
  12762. +++ b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.h
  12763. @@ -51,6 +51,7 @@ enum msm_cci_cmd_type {
  12764. MSM_CCI_SET_SYNC_CID,
  12765. MSM_CCI_I2C_READ,
  12766. MSM_CCI_I2C_WRITE,
  12767. + MSM_CCI_I2C_WRITE_SEQ,
  12768. MSM_CCI_GPIO_WRITE,
  12769. MSM_CCI_I2C_WRITE_BURST,
  12770. };
  12771. diff --git a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
  12772. index e0659d6..53a5ed3 100644
  12773. --- a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
  12774. +++ b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
  12775. @@ -13,6 +13,7 @@
  12776. #include <linux/delay.h>
  12777. #include <linux/module.h>
  12778. #include <linux/of.h>
  12779. +#include <linux/ratelimit.h>
  12780. #include <linux/irqreturn.h>
  12781. #include "msm_csid.h"
  12782. #include "msm_csid_hwreg.h"
  12783. @@ -20,8 +21,9 @@
  12784. #include "msm_camera_io_util.h"
  12785.  
  12786. #define V4L2_IDENT_CSID 50002
  12787. -#define CSID_VERSION_V2 0x02000011
  12788. -#define CSID_VERSION_V3 0x30000000
  12789. +#define CSID_VERSION_V20 0x02000011
  12790. +#define CSID_VERSION_V22 0x02001000
  12791. +#define CSID_VERSION_V30 0x30000000
  12792. #define MSM_CSID_DRV_NAME "msm_csid"
  12793.  
  12794. #define DBG_CSID 0
  12795. @@ -51,9 +53,9 @@ static int msm_csid_cid_lut(
  12796. if (csid_lut_params->vc_cfg[i]->cid >=
  12797. csid_lut_params->num_cid ||
  12798. csid_lut_params->vc_cfg[i]->cid < 0) {
  12799. - pr_err("%s: cid outside range %d\n",
  12800. - __func__, csid_lut_params->vc_cfg[i]->cid);
  12801. - return -EINVAL;
  12802. + pr_err("%s: cid outside range %d\n",
  12803. + __func__, csid_lut_params->vc_cfg[i]->cid);
  12804. + return -EINVAL;
  12805. }
  12806. CDBG("%s lut params num_cid = %d, cid = %d, dt = %x, df = %d\n",
  12807. __func__,
  12808. @@ -100,7 +102,7 @@ static void msm_csid_set_debug_reg(void __iomem *csidbase,
  12809. static void msm_csid_reset(struct csid_device *csid_dev)
  12810. {
  12811. msm_camera_io_w(CSID_RST_STB_ALL, csid_dev->base + CSID_RST_CMD_ADDR);
  12812. - wait_for_completion_interruptible(&csid_dev->reset_complete);
  12813. + wait_for_completion(&csid_dev->reset_complete);
  12814. return;
  12815. }
  12816.  
  12817. @@ -148,23 +150,14 @@ static int msm_csid_config(struct csid_device *csid_dev,
  12818. static irqreturn_t msm_csid_irq(int irq_num, void *data)
  12819. {
  12820. uint32_t irq;
  12821. - struct csid_device *csid_dev ;//prevent
  12822. + struct csid_device *csid_dev = data;
  12823. void __iomem *csidbase;
  12824. + csidbase = csid_dev->base;
  12825.  
  12826. - if (!data) {
  12827. - pr_err("%s:%d data NULL\n", __func__, __LINE__);
  12828. - return IRQ_HANDLED;
  12829. - }//prevent
  12830. -
  12831. - csid_dev = data;
  12832. -
  12833. - if (!csid_dev || ! csid_dev->base) {
  12834. + if (!csid_dev) {
  12835. pr_err("%s:%d csid_dev NULL\n", __func__, __LINE__);
  12836. return IRQ_HANDLED;
  12837. }
  12838. -
  12839. - csidbase = csid_dev->base;
  12840. -
  12841. irq = msm_camera_io_r(csid_dev->base + CSID_IRQ_STATUS_ADDR);
  12842. CDBG("%s CSID%d_IRQ_STATUS_ADDR = 0x%x\n",
  12843. __func__, csid_dev->pdev->id, irq);
  12844. @@ -212,6 +205,27 @@ static struct msm_cam_clk_info csid_8974_clk_info[] = {
  12845. {"csi_rdi_clk", -1},
  12846. };
  12847.  
  12848. +static struct msm_cam_clk_info csid_8610_clk_info[] = {
  12849. + {"csi_ahb_clk", -1},
  12850. + {"csi_src_clk", 200000000},
  12851. + {"csi_clk", -1},
  12852. + {"csi0phy_mux_clk", -1},
  12853. + {"csi1phy_mux_clk", -1},
  12854. + {"csi0pix_mux_clk", -1},
  12855. + {"csi0rdi_mux_clk", -1},
  12856. + {"csi1rdi_mux_clk", -1},
  12857. + {"csi2rdi_mux_clk", -1},
  12858. +};
  12859. +
  12860. +static struct msm_cam_clk_info csid_8610_clk_src_info[] = {
  12861. + {"csi_phy_src_clk", 0},
  12862. + {"csi_phy_src_clk", 0},
  12863. + {"csi_pix_src_clk", 0},
  12864. + {"csi_rdi_src_clk", 0},
  12865. + {"csi_rdi_src_clk", 0},
  12866. + {"csi_rdi_src_clk", 0},
  12867. +};
  12868. +
  12869. static struct camera_vreg_t csid_8960_vreg_info[] = {
  12870. {"mipi_csi_vdd", REG_LDO, 1200000, 1200000, 20000},
  12871. };
  12872. @@ -223,6 +237,7 @@ static struct camera_vreg_t csid_vreg_info[] = {
  12873. static int msm_csid_init(struct csid_device *csid_dev, uint32_t *csid_version)
  12874. {
  12875. int rc = 0;
  12876. + struct camera_vreg_t *cam_vreg;
  12877.  
  12878. if (!csid_version) {
  12879. pr_err("%s:%d csid_version NULL\n", __func__, __LINE__);
  12880. @@ -245,31 +260,50 @@ static int msm_csid_init(struct csid_device *csid_dev, uint32_t *csid_version)
  12881. return rc;
  12882. }
  12883.  
  12884. - if (CSID_VERSION <= CSID_VERSION_V2) {
  12885. + if (CSID_VERSION == CSID_VERSION_V20)
  12886. + cam_vreg = csid_8960_vreg_info;
  12887. + else
  12888. + cam_vreg = csid_vreg_info;
  12889. +
  12890. + if (CSID_VERSION < CSID_VERSION_V30) {
  12891. rc = msm_camera_config_vreg(&csid_dev->pdev->dev,
  12892. - csid_8960_vreg_info, ARRAY_SIZE(csid_8960_vreg_info),
  12893. + csid_vreg_info, ARRAY_SIZE(csid_vreg_info),
  12894. NULL, 0, &csid_dev->csi_vdd, 1);
  12895. if (rc < 0) {
  12896. pr_err("%s: regulator on failed\n", __func__);
  12897. goto vreg_config_failed;
  12898. }
  12899. -
  12900. rc = msm_camera_enable_vreg(&csid_dev->pdev->dev,
  12901. - csid_8960_vreg_info, ARRAY_SIZE(csid_8960_vreg_info),
  12902. + csid_vreg_info, ARRAY_SIZE(csid_vreg_info),
  12903. NULL, 0, &csid_dev->csi_vdd, 1);
  12904. if (rc < 0) {
  12905. pr_err("%s: regulator enable failed\n", __func__);
  12906. goto vreg_enable_failed;
  12907. }
  12908.  
  12909. - rc = msm_cam_clk_enable(&csid_dev->pdev->dev,
  12910. - csid_8960_clk_info, csid_dev->csid_clk,
  12911. - ARRAY_SIZE(csid_8960_clk_info), 1);
  12912. - if (rc < 0) {
  12913. - pr_err("%s: clock enable failed\n", __func__);
  12914. - goto clk_enable_failed;
  12915. + if (CSID_VERSION == CSID_VERSION_V20) {
  12916. + rc = msm_cam_clk_enable(&csid_dev->pdev->dev,
  12917. + csid_8960_clk_info, csid_dev->csid_clk,
  12918. + ARRAY_SIZE(csid_8960_clk_info), 1);
  12919. + if (rc < 0) {
  12920. + pr_err("%s: 8960: clock enable failed\n",
  12921. + __func__);
  12922. + goto clk_enable_failed;
  12923. + }
  12924. + } else {
  12925. + msm_cam_clk_sel_src(&csid_dev->pdev->dev,
  12926. + &csid_8610_clk_info[3], csid_8610_clk_src_info,
  12927. + ARRAY_SIZE(csid_8610_clk_src_info));
  12928. + rc = msm_cam_clk_enable(&csid_dev->pdev->dev,
  12929. + csid_8610_clk_info, csid_dev->csid_clk,
  12930. + ARRAY_SIZE(csid_8610_clk_info), 1);
  12931. + if (rc < 0) {
  12932. + pr_err("%s: 8610: clock enable failed\n",
  12933. + __func__);
  12934. + goto clk_enable_failed;
  12935. + }
  12936. }
  12937. - } else if (CSID_VERSION >= CSID_VERSION_V3) {
  12938. + } else if (CSID_VERSION >= CSID_VERSION_V30) {
  12939. rc = msm_camera_config_vreg(&csid_dev->pdev->dev,
  12940. csid_vreg_info, ARRAY_SIZE(csid_vreg_info),
  12941. NULL, 0, &csid_dev->csi_vdd, 1);
  12942. @@ -310,21 +344,21 @@ static int msm_csid_init(struct csid_device *csid_dev, uint32_t *csid_version)
  12943. return rc;
  12944.  
  12945. clk_enable_failed:
  12946. - if (CSID_VERSION <= CSID_VERSION_V2) {
  12947. + if (CSID_VERSION < CSID_VERSION_V30) {
  12948. msm_camera_enable_vreg(&csid_dev->pdev->dev,
  12949. - csid_8960_vreg_info, ARRAY_SIZE(csid_8960_vreg_info),
  12950. + csid_vreg_info, ARRAY_SIZE(csid_vreg_info),
  12951. NULL, 0, &csid_dev->csi_vdd, 0);
  12952. - } else if (CSID_VERSION >= CSID_VERSION_V3) {
  12953. + } else if (CSID_VERSION >= CSID_VERSION_V30) {
  12954. msm_camera_enable_vreg(&csid_dev->pdev->dev,
  12955. csid_vreg_info, ARRAY_SIZE(csid_vreg_info),
  12956. NULL, 0, &csid_dev->csi_vdd, 0);
  12957. }
  12958. vreg_enable_failed:
  12959. - if (CSID_VERSION <= CSID_VERSION_V2) {
  12960. + if (CSID_VERSION < CSID_VERSION_V30) {
  12961. msm_camera_config_vreg(&csid_dev->pdev->dev,
  12962. - csid_8960_vreg_info, ARRAY_SIZE(csid_8960_vreg_info),
  12963. + csid_vreg_info, ARRAY_SIZE(csid_vreg_info),
  12964. NULL, 0, &csid_dev->csi_vdd, 0);
  12965. - } else if (CSID_VERSION >= CSID_VERSION_V3) {
  12966. + } else if (CSID_VERSION >= CSID_VERSION_V30) {
  12967. msm_camera_config_vreg(&csid_dev->pdev->dev,
  12968. csid_vreg_info, ARRAY_SIZE(csid_vreg_info),
  12969. NULL, 0, &csid_dev->csi_vdd, 0);
  12970. @@ -351,7 +385,7 @@ static int msm_csid_release(struct csid_device *csid_dev)
  12971.  
  12972. disable_irq(csid_dev->irq->start);
  12973.  
  12974. - if (csid_dev->hw_version <= CSID_VERSION_V2) {
  12975. + if (csid_dev->hw_version == CSID_VERSION_V20) {
  12976. msm_cam_clk_enable(&csid_dev->pdev->dev, csid_8960_clk_info,
  12977. csid_dev->csid_clk, ARRAY_SIZE(csid_8960_clk_info), 0);
  12978.  
  12979. @@ -362,7 +396,20 @@ static int msm_csid_release(struct csid_device *csid_dev)
  12980. msm_camera_config_vreg(&csid_dev->pdev->dev,
  12981. csid_8960_vreg_info, ARRAY_SIZE(csid_8960_vreg_info),
  12982. NULL, 0, &csid_dev->csi_vdd, 0);
  12983. - } else if (csid_dev->hw_version >= CSID_VERSION_V3) {
  12984. + } else if (csid_dev->hw_version == CSID_VERSION_V22) {
  12985. + msm_cam_clk_enable(&csid_dev->pdev->dev,
  12986. + csid_8610_clk_info,
  12987. + csid_dev->csid_clk,
  12988. + ARRAY_SIZE(csid_8610_clk_info), 0);
  12989. +
  12990. + msm_camera_enable_vreg(&csid_dev->pdev->dev,
  12991. + csid_vreg_info, ARRAY_SIZE(csid_vreg_info),
  12992. + NULL, 0, &csid_dev->csi_vdd, 0);
  12993. +
  12994. + msm_camera_config_vreg(&csid_dev->pdev->dev,
  12995. + csid_vreg_info, ARRAY_SIZE(csid_vreg_info),
  12996. + NULL, 0, &csid_dev->csi_vdd, 0);
  12997. + } else if (csid_dev->hw_version >= CSID_VERSION_V30) {
  12998. msm_cam_clk_enable(&csid_dev->pdev->dev, csid_8974_clk_info,
  12999. csid_dev->csid_clk, ARRAY_SIZE(csid_8974_clk_info), 0);
  13000.  
  13001. @@ -401,7 +448,7 @@ static long msm_csid_cmd(struct csid_device *csid_dev, void *arg)
  13002. case CSID_CFG: {
  13003. struct msm_camera_csid_params csid_params;
  13004. struct msm_camera_csid_vc_cfg *vc_cfg = NULL;
  13005. - int32_t i = 0;
  13006. + int8_t i = 0;
  13007. if (copy_from_user(&csid_params,
  13008. (void *)cdata->cfg.csid_params,
  13009. sizeof(struct msm_camera_csid_params))) {
  13010. @@ -409,10 +456,16 @@ static long msm_csid_cmd(struct csid_device *csid_dev, void *arg)
  13011. rc = -EFAULT;
  13012. break;
  13013. }
  13014. + if (csid_params.lut_params.num_cid < 1 ||
  13015. + csid_params.lut_params.num_cid > 16) {
  13016. + pr_err("%s: %d num_cid outside range\n",
  13017. + __func__, __LINE__);
  13018. + rc = -EINVAL;
  13019. + break;
  13020. + }
  13021. for (i = 0; i < csid_params.lut_params.num_cid; i++) {
  13022. - vc_cfg = kzalloc(csid_params.lut_params.num_cid *
  13023. - sizeof(struct msm_camera_csid_vc_cfg),
  13024. - GFP_KERNEL);
  13025. + vc_cfg = kzalloc(sizeof(struct msm_camera_csid_vc_cfg),
  13026. + GFP_KERNEL);
  13027. if (!vc_cfg) {
  13028. pr_err("%s: %d failed\n", __func__, __LINE__);
  13029. for (i--; i >= 0; i--)
  13030. @@ -422,8 +475,7 @@ static long msm_csid_cmd(struct csid_device *csid_dev, void *arg)
  13031. }
  13032. if (copy_from_user(vc_cfg,
  13033. (void *)csid_params.lut_params.vc_cfg[i],
  13034. - (csid_params.lut_params.num_cid *
  13035. - sizeof(struct msm_camera_csid_vc_cfg)))) {
  13036. + sizeof(struct msm_camera_csid_vc_cfg))) {
  13037. pr_err("%s: %d failed\n", __func__, __LINE__);
  13038. kfree(vc_cfg);
  13039. for (i--; i >= 0; i--)
  13040. @@ -442,7 +494,7 @@ static long msm_csid_cmd(struct csid_device *csid_dev, void *arg)
  13041. rc = msm_csid_release(csid_dev);
  13042. break;
  13043. default:
  13044. - pr_err("%s: %d failed\n", __func__, __LINE__);
  13045. + pr_err_ratelimited("%s: %d failed\n", __func__, __LINE__);
  13046. rc = -ENOIOCTLCMD;
  13047. break;
  13048. }
  13049. @@ -582,13 +634,20 @@ static int __devinit csid_probe(struct platform_device *pdev)
  13050. goto csid_no_resource;
  13051. }
  13052. disable_irq(new_csid_dev->irq->start);
  13053. + if (rc < 0) {
  13054. + release_mem_region(new_csid_dev->mem->start,
  13055. + resource_size(new_csid_dev->mem));
  13056. + pr_err("%s Error registering irq ", __func__);
  13057. + goto csid_no_resource;
  13058. + }
  13059. +
  13060. new_csid_dev->csid_state = CSID_POWER_DOWN;
  13061. return 0;
  13062.  
  13063. csid_no_resource:
  13064. mutex_destroy(&new_csid_dev->mutex);
  13065. kfree(new_csid_dev);
  13066. - return rc;
  13067. + return 0;
  13068. }
  13069.  
  13070. static const struct of_device_id msm_csid_dt_match[] = {
  13071. diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c
  13072. index d844593..0ca94f9 100644
  13073. --- a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c
  13074. +++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c
  13075. @@ -161,7 +161,7 @@ int32_t msm_camera_cci_i2c_write_seq(struct msm_camera_i2c_client *client,
  13076. reg_conf_tbl[i].reg_data = data[i];
  13077. reg_conf_tbl[i].delay = 0;
  13078. }
  13079. - cci_ctrl.cmd = MSM_CCI_I2C_WRITE;
  13080. + cci_ctrl.cmd = MSM_CCI_I2C_WRITE_SEQ;
  13081. cci_ctrl.cci_info = client->cci_client;
  13082. cci_ctrl.cfg.cci_i2c_write_cfg.reg_setting = reg_conf_tbl;
  13083. cci_ctrl.cfg.cci_i2c_write_cfg.data_type = MSM_CAMERA_I2C_BYTE_DATA;
  13084. diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_io_util.c b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_io_util.c
  13085. index 5e4805c..7d369ff 100644
  13086. --- a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_io_util.c
  13087. +++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_io_util.c
  13088. @@ -21,8 +21,6 @@
  13089. #include <mach/msm_bus.h>
  13090. #include "msm_camera_io_util.h"
  13091.  
  13092. -#include <mach/clk-provider.h>
  13093. -
  13094. #define BUFF_SIZE_128 128
  13095.  
  13096. #undef CDBG
  13097. @@ -120,17 +118,40 @@ void msm_camera_io_memcpy_mb(void __iomem *dest_addr,
  13098. msm_camera_io_w_mb(*s++, d++);
  13099. }
  13100.  
  13101. +int msm_cam_clk_sel_src(struct device *dev, struct msm_cam_clk_info *clk_info,
  13102. + struct msm_cam_clk_info *clk_src_info, int num_clk)
  13103. +{
  13104. + int i;
  13105. + int rc = 0;
  13106. + struct clk *mux_clk = NULL;
  13107. + struct clk *src_clk = NULL;
  13108. +
  13109. + for (i = 0; i < num_clk; i++) {
  13110. + if (clk_src_info[i].clk_name) {
  13111. + mux_clk = clk_get(dev, clk_info[i].clk_name);
  13112. + if (IS_ERR(mux_clk)) {
  13113. + pr_err("%s get failed\n",
  13114. + clk_info[i].clk_name);
  13115. + continue;
  13116. + }
  13117. + src_clk = clk_get(dev, clk_src_info[i].clk_name);
  13118. + if (IS_ERR(src_clk)) {
  13119. + pr_err("%s get failed\n",
  13120. + clk_src_info[i].clk_name);
  13121. + continue;
  13122. + }
  13123. + clk_set_parent(mux_clk, src_clk);
  13124. + }
  13125. + }
  13126. + return rc;
  13127. +}
  13128. +
  13129. int msm_cam_clk_enable(struct device *dev, struct msm_cam_clk_info *clk_info,
  13130. struct clk **clk_ptr, int num_clk, int enable)
  13131. {
  13132. int i;
  13133. int rc = 0;
  13134. - int qctkd = 0;
  13135. long clk_rate;
  13136. -
  13137. - if (num_clk == 8) //CPP use case
  13138. - qctkd = 1;
  13139. -
  13140. if (enable) {
  13141. for (i = 0; i < num_clk; i++) {
  13142. CDBG("%s enable %s\n", __func__,
  13143. @@ -141,7 +162,7 @@ int msm_cam_clk_enable(struct device *dev, struct msm_cam_clk_info *clk_info,
  13144. rc = PTR_ERR(clk_ptr[i]);
  13145. goto cam_clk_get_err;
  13146. }
  13147. - if (clk_info[i].clk_rate >= 0) {
  13148. + if (clk_info[i].clk_rate > 0) {
  13149. rc = clk_set_rate(clk_ptr[i],
  13150. clk_info[i].clk_rate);
  13151. if (rc < 0) {
  13152. @@ -187,7 +208,6 @@ int msm_cam_clk_enable(struct device *dev, struct msm_cam_clk_info *clk_info,
  13153. usleep_range(clk_info[i].delay * 1000,
  13154. (clk_info[i].delay * 1000) + 1000);
  13155. }
  13156. - if (qctkd) printk (KERN_ERR "QCTKD: %s[%d:%d] Enable \n", clk_info[i].clk_name, clk_ptr[i]->prepare_count, clk_ptr[i]->count);
  13157. }
  13158. } else {
  13159. for (i = num_clk - 1; i >= 0; i--) {
  13160. @@ -198,7 +218,6 @@ int msm_cam_clk_enable(struct device *dev, struct msm_cam_clk_info *clk_info,
  13161. clk_unprepare(clk_ptr[i]);
  13162. clk_put(clk_ptr[i]);
  13163. }
  13164. - if (qctkd) printk (KERN_ERR "QCTKD: %s[%d:%d] Disable\n", clk_info[i].clk_name, clk_ptr[i]->prepare_count, clk_ptr[i]->count);
  13165. }
  13166. }
  13167. return rc;
  13168. @@ -466,13 +485,8 @@ int msm_camera_config_single_vreg(struct device *dev,
  13169. struct camera_vreg_t *cam_vreg, struct regulator **reg_ptr, int config)
  13170. {
  13171. int rc = 0;
  13172. -
  13173. if (config) {
  13174. - if (cam_vreg->reg_name == NULL) {
  13175. - pr_err("%s : can't find reg name", __func__);
  13176. - goto vreg_get_fail;
  13177. - }
  13178. - pr_info("%s enable %s\n", __func__, cam_vreg->reg_name);
  13179. + CDBG("%s enable %s\n", __func__, cam_vreg->reg_name);
  13180. *reg_ptr = regulator_get(dev, cam_vreg->reg_name);
  13181. if (IS_ERR(*reg_ptr)) {
  13182. pr_err("%s: %s get failed\n", __func__,
  13183. @@ -508,7 +522,7 @@ int msm_camera_config_single_vreg(struct device *dev,
  13184. }
  13185. } else {
  13186. if (*reg_ptr) {
  13187. - pr_info("%s disable %s\n", __func__, cam_vreg->reg_name);
  13188. + CDBG("%s disable %s\n", __func__, cam_vreg->reg_name);
  13189. regulator_disable(*reg_ptr);
  13190. if (cam_vreg->type == REG_LDO) {
  13191. if (cam_vreg->op_mode >= 0)
  13192. @@ -518,8 +532,7 @@ int msm_camera_config_single_vreg(struct device *dev,
  13193. }
  13194. regulator_put(*reg_ptr);
  13195. *reg_ptr = NULL;
  13196. - } else
  13197. - pr_err("%s can't disable %s\n", __func__, cam_vreg->reg_name);
  13198. + }
  13199. }
  13200. return 0;
  13201.  
  13202. @@ -542,23 +555,31 @@ vreg_get_fail:
  13203. int msm_camera_request_gpio_table(struct gpio *gpio_tbl, uint8_t size,
  13204. int gpio_en)
  13205. {
  13206. - int rc = 0, i = 0;
  13207. + int rc = 0, i = 0, err = 0;
  13208.  
  13209. if (!gpio_tbl || !size) {
  13210. pr_err("%s:%d invalid gpio_tbl %p / size %d\n", __func__,
  13211. - __LINE__, gpio_tbl, size);
  13212. - return -EINVAL;
  13213. + __LINE__, gpio_tbl, size);
  13214. + return -EINVAL;
  13215. }
  13216. for (i = 0; i < size; i++) {
  13217. CDBG("%s:%d i %d, gpio %d dir %ld\n", __func__, __LINE__, i,
  13218. gpio_tbl[i].gpio, gpio_tbl[i].flags);
  13219. }
  13220. if (gpio_en) {
  13221. - rc = gpio_request_array(gpio_tbl, size);
  13222. - if (rc < 0) {
  13223. - pr_err("%s:%d camera gpio request failed\n", __func__,
  13224. - __LINE__);
  13225. - return rc;
  13226. + for (i = 0; i < size; i++) {
  13227. + err = gpio_request_one(gpio_tbl[i].gpio,
  13228. + gpio_tbl[i].flags, gpio_tbl[i].label);
  13229. + if (err) {
  13230. + /*
  13231. + * After GPIO request fails, contine to
  13232. + * apply new gpios, outout a error message
  13233. + * for driver bringup debug
  13234. + */
  13235. + pr_err("%s:%d gpio %d:%s request fails\n",
  13236. + __func__, __LINE__,
  13237. + gpio_tbl[i].gpio, gpio_tbl[i].label);
  13238. + }
  13239. }
  13240. } else {
  13241. gpio_free_array(gpio_tbl, size);
  13242. diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_io_util.h b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_io_util.h
  13243. index a5286bb..90925a9 100644
  13244. --- a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_io_util.h
  13245. +++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_io_util.h
  13246. @@ -30,6 +30,8 @@ void msm_camera_io_memcpy(void __iomem *dest_addr,
  13247. void __iomem *src_addr, u32 len);
  13248. void msm_camera_io_memcpy_mb(void __iomem *dest_addr,
  13249. void __iomem *src_addr, u32 len);
  13250. +int msm_cam_clk_sel_src(struct device *dev, struct msm_cam_clk_info *clk_info,
  13251. + struct msm_cam_clk_info *clk_src_info, int num_clk);
  13252. int msm_cam_clk_enable(struct device *dev, struct msm_cam_clk_info *clk_info,
  13253. struct clk **clk_ptr, int num_clk, int enable);
  13254.  
  13255. diff --git a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
  13256. index c0358e9..06c1adc 100644
  13257. --- a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
  13258. +++ b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
  13259. @@ -1,4 +1,4 @@
  13260. -/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
  13261. +/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
  13262. *
  13263. * This program is free software; you can redistribute it and/or modify
  13264. * it under the terms of the GNU General Public License version 2 and
  13265. @@ -10,8 +10,7 @@
  13266. * GNU General Public License for more details.
  13267. */
  13268.  
  13269. -#define pr_fmt(fmt) "MSM-SENSOR-DRIVER %s:%d " fmt "\n", __func__, __LINE__
  13270. -
  13271. +#define SENSOR_DRIVER_I2C "camera"
  13272. /* Header file declaration */
  13273. #include "msm_sensor.h"
  13274. #include "msm_sd.h"
  13275. @@ -19,8 +18,8 @@
  13276. #include "msm_cci.h"
  13277. #include "msm_camera_dt_util.h"
  13278.  
  13279. -//#define MSM_SENSOR_DRIVER_DEBUG
  13280. /* Logging macro */
  13281. +/*#define MSM_SENSOR_DRIVER_DEBUG*/
  13282. #undef CDBG
  13283. #ifdef MSM_SENSOR_DRIVER_DEBUG
  13284. #define CDBG(fmt, args...) pr_err(fmt, ##args)
  13285. @@ -28,9 +27,33 @@
  13286. #define CDBG(fmt, args...) pr_debug(fmt, ##args)
  13287. #endif
  13288.  
  13289. +#define SENSOR_MAX_MOUNTANGLE (360)
  13290. +
  13291. /* Static declaration */
  13292. static struct msm_sensor_ctrl_t *g_sctrl[MAX_CAMERAS];
  13293.  
  13294. +static int msm_sensor_platform_remove(struct platform_device *pdev)
  13295. +{
  13296. + struct msm_sensor_ctrl_t *s_ctrl;
  13297. +
  13298. + pr_err("%s: sensor FREE\n", __func__);
  13299. +
  13300. + s_ctrl = g_sctrl[pdev->id];
  13301. + if (!s_ctrl) {
  13302. + pr_err("%s: sensor device is NULL\n", __func__);
  13303. + return 0;
  13304. + }
  13305. +
  13306. + msm_sensor_free_sensor_data(s_ctrl);
  13307. + kfree(s_ctrl->msm_sensor_mutex);
  13308. + kfree(s_ctrl->sensor_i2c_client);
  13309. + kfree(s_ctrl);
  13310. + g_sctrl[pdev->id] = NULL;
  13311. +
  13312. + return 0;
  13313. +}
  13314. +
  13315. +
  13316. static const struct of_device_id msm_sensor_driver_dt_match[] = {
  13317. {.compatible = "qcom,camera"},
  13318. {}
  13319. @@ -44,6 +67,7 @@ static struct platform_driver msm_sensor_platform_driver = {
  13320. .owner = THIS_MODULE,
  13321. .of_match_table = msm_sensor_driver_dt_match,
  13322. },
  13323. + .remove = msm_sensor_platform_remove,
  13324. };
  13325.  
  13326. static struct v4l2_subdev_info msm_sensor_driver_subdev_info[] = {
  13327. @@ -55,316 +79,574 @@ static struct v4l2_subdev_info msm_sensor_driver_subdev_info[] = {
  13328. },
  13329. };
  13330.  
  13331. +static int32_t msm_sensor_driver_create_i2c_v4l_subdev
  13332. + (struct msm_sensor_ctrl_t *s_ctrl)
  13333. +{
  13334. + int32_t rc = 0;
  13335. + uint32_t session_id = 0;
  13336. + struct i2c_client *client = s_ctrl->sensor_i2c_client->client;
  13337. +
  13338. + CDBG("%s %s I2c probe succeeded\n", __func__, client->name);
  13339. + rc = camera_init_v4l2(&client->dev, &session_id);
  13340. + if (rc < 0) {
  13341. + pr_err("failed: camera_init_i2c_v4l2 rc %d", rc);
  13342. + return rc;
  13343. + }
  13344. + CDBG("%s rc %d session_id %d\n", __func__, rc, session_id);
  13345. + snprintf(s_ctrl->msm_sd.sd.name,
  13346. + sizeof(s_ctrl->msm_sd.sd.name), "%s",
  13347. + s_ctrl->sensordata->sensor_name);
  13348. + v4l2_i2c_subdev_init(&s_ctrl->msm_sd.sd, client,
  13349. + s_ctrl->sensor_v4l2_subdev_ops);
  13350. + v4l2_set_subdevdata(&s_ctrl->msm_sd.sd, client);
  13351. + s_ctrl->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
  13352. + media_entity_init(&s_ctrl->msm_sd.sd.entity, 0, NULL, 0);
  13353. + s_ctrl->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
  13354. + s_ctrl->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_SENSOR;
  13355. + s_ctrl->msm_sd.sd.entity.name = s_ctrl->msm_sd.sd.name;
  13356. + s_ctrl->sensordata->sensor_info->session_id = session_id;
  13357. + s_ctrl->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x3;
  13358. + msm_sd_register(&s_ctrl->msm_sd);
  13359. + CDBG("%s:%d\n", __func__, __LINE__);
  13360. + return rc;
  13361. +}
  13362. +
  13363. +static int32_t msm_sensor_driver_create_v4l_subdev
  13364. + (struct msm_sensor_ctrl_t *s_ctrl)
  13365. +{
  13366. + int32_t rc = 0;
  13367. + uint32_t session_id = 0;
  13368. +
  13369. + rc = camera_init_v4l2(&s_ctrl->pdev->dev, &session_id);
  13370. + if (rc < 0) {
  13371. + pr_err("failed: camera_init_v4l2 rc %d", rc);
  13372. + return rc;
  13373. + }
  13374. + CDBG("rc %d session_id %d", rc, session_id);
  13375. + s_ctrl->sensordata->sensor_info->session_id = session_id;
  13376. +
  13377. + /* Create /dev/v4l-subdevX device */
  13378. + v4l2_subdev_init(&s_ctrl->msm_sd.sd, s_ctrl->sensor_v4l2_subdev_ops);
  13379. + snprintf(s_ctrl->msm_sd.sd.name, sizeof(s_ctrl->msm_sd.sd.name), "%s",
  13380. + s_ctrl->sensordata->sensor_name);
  13381. + v4l2_set_subdevdata(&s_ctrl->msm_sd.sd, s_ctrl->pdev);
  13382. + s_ctrl->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
  13383. + media_entity_init(&s_ctrl->msm_sd.sd.entity, 0, NULL, 0);
  13384. + s_ctrl->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
  13385. + s_ctrl->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_SENSOR;
  13386. + s_ctrl->msm_sd.sd.entity.name = s_ctrl->msm_sd.sd.name;
  13387. + s_ctrl->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x3;
  13388. + msm_sd_register(&s_ctrl->msm_sd);
  13389. + return rc;
  13390. +}
  13391. +
  13392. +static int32_t msm_sensor_fill_eeprom_subdevid_by_name(
  13393. + struct msm_sensor_ctrl_t *s_ctrl)
  13394. +{
  13395. + int32_t rc = 0;
  13396. + const char *eeprom_name;
  13397. + struct device_node *src_node = NULL;
  13398. + uint32_t val = 0, count = 0, eeprom_name_len;
  13399. + int i;
  13400. + int32_t *eeprom_subdev_id;
  13401. + struct msm_sensor_info_t *sensor_info;
  13402. + struct device_node *of_node = s_ctrl->of_node;
  13403. + const void *p;
  13404. +
  13405. + if (!s_ctrl->sensordata->eeprom_name || !of_node)
  13406. + return -EINVAL;
  13407. +
  13408. + eeprom_name_len = strlen(s_ctrl->sensordata->eeprom_name);
  13409. + if (eeprom_name_len >= MAX_SENSOR_NAME)
  13410. + return -EINVAL;
  13411. +
  13412. + sensor_info = s_ctrl->sensordata->sensor_info;
  13413. + eeprom_subdev_id = &sensor_info->subdev_id[SUB_MODULE_EEPROM];
  13414. + /*
  13415. + * string for eeprom name is valid, set sudev id to -1
  13416. + * and try to found new id
  13417. + */
  13418. + *eeprom_subdev_id = -1;
  13419. +
  13420. + if (0 == eeprom_name_len)
  13421. + return 0;
  13422. +
  13423. + CDBG("Try to find eeprom subdev for %s\n",
  13424. + s_ctrl->sensordata->eeprom_name);
  13425. + p = of_get_property(of_node, "qcom,eeprom-src", &count);
  13426. + if (!p || !count)
  13427. + return 0;
  13428. +
  13429. + count /= sizeof(uint32_t);
  13430. + for (i = 0; i < count; i++) {
  13431. + eeprom_name = NULL;
  13432. + src_node = of_parse_phandle(of_node, "qcom,eeprom-src", i);
  13433. + if (!src_node) {
  13434. + pr_err("eeprom src node NULL\n");
  13435. + continue;
  13436. + }
  13437. + rc = of_property_read_string(src_node, "qcom,eeprom-name",
  13438. + &eeprom_name);
  13439. + if (rc < 0) {
  13440. + pr_err("failed\n");
  13441. + of_node_put(src_node);
  13442. + continue;
  13443. + }
  13444. + if (strcmp(eeprom_name, s_ctrl->sensordata->eeprom_name))
  13445. + continue;
  13446. +
  13447. + rc = of_property_read_u32(src_node, "cell-index", &val);
  13448. +
  13449. + CDBG("%s qcom,eeprom cell index %d, rc %d\n", __func__,
  13450. + val, rc);
  13451. + if (rc < 0) {
  13452. + pr_err("failed\n");
  13453. + of_node_put(src_node);
  13454. + continue;
  13455. + }
  13456. +
  13457. + *eeprom_subdev_id = val;
  13458. + CDBG("Done. Eeprom subdevice id is %d\n", val);
  13459. + of_node_put(src_node);
  13460. + src_node = NULL;
  13461. + break;
  13462. + }
  13463. +
  13464. + return rc;
  13465. +}
  13466. +
  13467. +static int32_t msm_sensor_fill_actuator_subdevid_by_name(
  13468. + struct msm_sensor_ctrl_t *s_ctrl)
  13469. +{
  13470. + int32_t rc = 0;
  13471. + struct device_node *src_node = NULL;
  13472. + uint32_t val = 0, actuator_name_len;
  13473. + int32_t *actuator_subdev_id;
  13474. + struct msm_sensor_info_t *sensor_info;
  13475. + struct device_node *of_node = s_ctrl->of_node;
  13476. +
  13477. + if (!s_ctrl->sensordata->actuator_name || !of_node)
  13478. + return -EINVAL;
  13479. +
  13480. + actuator_name_len = strlen(s_ctrl->sensordata->actuator_name);
  13481. + if (actuator_name_len >= MAX_SENSOR_NAME)
  13482. + return -EINVAL;
  13483. +
  13484. + sensor_info = s_ctrl->sensordata->sensor_info;
  13485. + actuator_subdev_id = &sensor_info->subdev_id[SUB_MODULE_ACTUATOR];
  13486. + /*
  13487. + * string for actuator name is valid, set sudev id to -1
  13488. + * and try to found new id
  13489. + */
  13490. + *actuator_subdev_id = -1;
  13491. +
  13492. + if (0 == actuator_name_len)
  13493. + return 0;
  13494. +
  13495. + src_node = of_parse_phandle(of_node, "qcom,actuator-src", 0);
  13496. + if (!src_node) {
  13497. + CDBG("%s:%d src_node NULL\n", __func__, __LINE__);
  13498. + } else {
  13499. + rc = of_property_read_u32(src_node, "cell-index", &val);
  13500. + CDBG("%s qcom,actuator cell index %d, rc %d\n", __func__,
  13501. + val, rc);
  13502. + if (rc < 0) {
  13503. + pr_err("%s failed %d\n", __func__, __LINE__);
  13504. + return -EINVAL;
  13505. + }
  13506. + *actuator_subdev_id = val;
  13507. + of_node_put(src_node);
  13508. + src_node = NULL;
  13509. + }
  13510. +
  13511. + return rc;
  13512. +}
  13513. +
  13514. +static int32_t msm_sensor_fill_slave_info_init_params(
  13515. + struct msm_camera_sensor_slave_info *slave_info,
  13516. + struct msm_sensor_info_t *sensor_info)
  13517. +{
  13518. + struct msm_sensor_init_params *sensor_init_params;
  13519. + if (!slave_info || !sensor_info)
  13520. + return -EINVAL;
  13521. +
  13522. + if (!slave_info->is_init_params_valid)
  13523. + return 0;
  13524. +
  13525. + sensor_init_params = &slave_info->sensor_init_params;
  13526. + if (INVALID_CAMERA_B != sensor_init_params->position)
  13527. + sensor_info->position =
  13528. + sensor_init_params->position;
  13529. +
  13530. + if (SENSOR_MAX_MOUNTANGLE > sensor_init_params->sensor_mount_angle) {
  13531. + sensor_info->sensor_mount_angle =
  13532. + sensor_init_params->sensor_mount_angle;
  13533. + sensor_info->is_mount_angle_valid = 1;
  13534. + }
  13535. +
  13536. + if (CAMERA_MODE_INVALID != sensor_init_params->modes_supported)
  13537. + sensor_info->modes_supported =
  13538. + sensor_init_params->modes_supported;
  13539. +
  13540. + return 0;
  13541. +}
  13542. +
  13543. +
  13544. +static int32_t msm_sensor_validate_slave_info(
  13545. + struct msm_sensor_info_t *sensor_info)
  13546. +{
  13547. + if (INVALID_CAMERA_B == sensor_info->position) {
  13548. + sensor_info->position = BACK_CAMERA_B;
  13549. + pr_err("%s Set dafault sensor position%d\n",
  13550. + __func__, __LINE__);
  13551. + }
  13552. + if (CAMERA_MODE_INVALID == sensor_info->modes_supported) {
  13553. + sensor_info->modes_supported = CAMERA_MODE_2D_B;
  13554. + pr_err("%s Set dafault sensor modes_supported%d\n",
  13555. + __func__, __LINE__);
  13556. + }
  13557. + if (SENSOR_MAX_MOUNTANGLE < sensor_info->sensor_mount_angle) {
  13558. + sensor_info->sensor_mount_angle = 0;
  13559. + pr_err("%s Set dafault sensor mount angle%d\n",
  13560. + __func__, __LINE__);
  13561. + sensor_info->is_mount_angle_valid = 1;
  13562. + }
  13563. + return 0;
  13564. +}
  13565. +
  13566. /* static function definition */
  13567. int32_t msm_sensor_driver_probe(void *setting)
  13568. {
  13569. - int32_t rc = 0;
  13570. - int32_t is_power_off = 0;
  13571. - uint16_t i = 0, size = 0, off_size = 0;
  13572. - uint32_t session_id = 0;
  13573. - struct msm_sensor_ctrl_t *s_ctrl = NULL;
  13574. - struct msm_camera_cci_client *cci_client = NULL;
  13575. - struct msm_camera_sensor_slave_info *slave_info = NULL;
  13576. - struct msm_sensor_power_setting *power_setting = NULL;
  13577. - struct msm_sensor_power_setting *power_off_setting = NULL;
  13578. - struct msm_camera_slave_info *camera_info = NULL;
  13579. - struct msm_camera_power_ctrl_t *power_info = NULL;
  13580. -
  13581. - /* Validate input parameters */
  13582. - if (!setting) {
  13583. - pr_err("failed: slave_info %p", setting);
  13584. - return -EINVAL;
  13585. - }
  13586. -
  13587. - /* Allocate memory for slave info */
  13588. - slave_info = kzalloc(sizeof(*slave_info), GFP_KERNEL);
  13589. - if (!slave_info) {
  13590. - pr_err("failed: no memory slave_info %p", slave_info);
  13591. - return -ENOMEM;
  13592. - }
  13593. -
  13594. - if (copy_from_user(slave_info, (void *)setting, sizeof(*slave_info))) {
  13595. - pr_err("failed: copy_from_user");
  13596. - rc = -EFAULT;
  13597. - goto FREE_SLAVE_INFO;
  13598. - }
  13599. -
  13600. - /* Print slave info */
  13601. - CDBG("camera id %d", slave_info->camera_id);
  13602. - CDBG("slave_addr %x", slave_info->slave_addr);
  13603. - CDBG("addr_type %d", slave_info->addr_type);
  13604. - CDBG("sensor_id_reg_addr %x",
  13605. - slave_info->sensor_id_info.sensor_id_reg_addr);
  13606. - CDBG("sensor_id %x", slave_info->sensor_id_info.sensor_id);
  13607. - CDBG("size %x", slave_info->power_setting_array.size);
  13608. -
  13609. - /* Validate camera id */
  13610. - if (slave_info->camera_id >= MAX_CAMERAS) {
  13611. - pr_err("failed: invalid camera id %d max %d",
  13612. - slave_info->camera_id, MAX_CAMERAS);
  13613. - rc = -EINVAL;
  13614. - goto FREE_SLAVE_INFO;
  13615. - }
  13616. -
  13617. - /* Extract s_ctrl from camera id */
  13618. - s_ctrl = g_sctrl[slave_info->camera_id];
  13619. - if (!s_ctrl) {
  13620. - pr_err("failed: s_ctrl %p for camera_id %d", s_ctrl,
  13621. - slave_info->camera_id);
  13622. - rc = -EINVAL;
  13623. - goto FREE_SLAVE_INFO;
  13624. - }
  13625. -
  13626. - CDBG("s_ctrl[%d] %p", slave_info->camera_id, s_ctrl);
  13627. -
  13628. - if (s_ctrl->is_probe_succeed == 1) {
  13629. - /*
  13630. - * Different sensor on this camera slot has been connected
  13631. - * and probe already succeeded for that sensor. Ignore this
  13632. - * probe
  13633. - */
  13634. - pr_err("slot %d has some other sensor", slave_info->camera_id);
  13635. - kfree(slave_info);
  13636. - return 0;
  13637. - }
  13638. -
  13639. - size = slave_info->power_setting_array.size;
  13640. - /* Allocate memory for power setting */
  13641. - power_setting = kzalloc(sizeof(*power_setting) * size, GFP_KERNEL);
  13642. - if (!power_setting) {
  13643. - pr_err("failed: no memory power_setting %p", power_setting);
  13644. - rc = -ENOMEM;
  13645. - goto FREE_SLAVE_INFO;
  13646. - }
  13647. -
  13648. - if (copy_from_user(power_setting,
  13649. - (void *)slave_info->power_setting_array.power_setting,
  13650. - sizeof(*power_setting) * size)) {
  13651. - pr_err("failed: copy_from_user");
  13652. - rc = -EFAULT;
  13653. - goto FREE_POWER_SETTING;
  13654. - }
  13655. -
  13656. - /* Print power setting */
  13657. - for (i = 0; i < size; i++) {
  13658. - CDBG("seq_type %d seq_val %d config_val %ld delay %d",
  13659. - power_setting[i].seq_type, power_setting[i].seq_val,
  13660. - power_setting[i].config_val, power_setting[i].delay);
  13661. - }
  13662. -
  13663. - off_size = slave_info->power_setting_array.off_size;
  13664. - if (off_size > 0) {
  13665. - /* Allocate memory for power setting */
  13666. - power_off_setting = kzalloc(sizeof(*power_off_setting) * off_size, GFP_KERNEL);
  13667. - if (!power_off_setting) {
  13668. - pr_err("failed: no memory power_setting %p", power_off_setting);
  13669. - rc = -ENOMEM;
  13670. - goto FREE_POWER_SETTING;
  13671. - }
  13672. -
  13673. - if (copy_from_user(power_off_setting,
  13674. - (void *)slave_info->power_setting_array.power_off_setting,
  13675. - sizeof(*power_off_setting) * off_size)) {
  13676. - pr_err("failed: copy_from_user");
  13677. - rc = -EFAULT;
  13678. - goto FREE_POWER_OFF_SETTING;
  13679. - }
  13680. -
  13681. - /* Print power setting */
  13682. - for (i = 0; i < off_size; i++) {
  13683. - CDBG("seq_type %d seq_val %d config_val %ld delay %d",
  13684. - power_off_setting[i].seq_type, power_off_setting[i].seq_val,
  13685. - power_off_setting[i].config_val, power_off_setting[i].delay);
  13686. - }
  13687. - is_power_off = 1;
  13688. - }
  13689. -
  13690. - camera_info = kzalloc(sizeof(struct msm_camera_slave_info), GFP_KERNEL);
  13691. - if (!camera_info) {
  13692. - pr_err("failed: no memory slave_info %p", camera_info);
  13693. - if (is_power_off)
  13694. - goto FREE_POWER_OFF_SETTING;
  13695. - else
  13696. - goto FREE_POWER_SETTING;
  13697. - }
  13698. -
  13699. - /* Fill power up setting and power up setting size */
  13700. - power_info = &s_ctrl->sensordata->power_info;
  13701. - power_info->power_setting = power_setting;
  13702. - power_info->power_setting_size = size;
  13703. - power_info->power_off_setting = power_off_setting;
  13704. - power_info->power_off_setting_size = off_size;
  13705. -
  13706. - s_ctrl->sensordata->slave_info = camera_info;
  13707. -
  13708. - /* Fill sensor slave info */
  13709. - camera_info->sensor_slave_addr = slave_info->slave_addr;
  13710. - camera_info->sensor_id_reg_addr =
  13711. - slave_info->sensor_id_info.sensor_id_reg_addr;
  13712. - camera_info->sensor_id = slave_info->sensor_id_info.sensor_id;
  13713. -
  13714. - /* Fill CCI master, slave address and CCI default params */
  13715. - if (!s_ctrl->sensor_i2c_client) {
  13716. - pr_err("failed: sensor_i2c_client %p",
  13717. - s_ctrl->sensor_i2c_client);
  13718. - rc = -EINVAL;
  13719. - if (is_power_off)
  13720. - goto FREE_POWER_OFF_SETTING;
  13721. - else
  13722. - goto FREE_POWER_SETTING;
  13723. - }
  13724. - /* Fill sensor address type */
  13725. - s_ctrl->sensor_i2c_client->addr_type = slave_info->addr_type;
  13726. -
  13727. - cci_client = s_ctrl->sensor_i2c_client->cci_client;
  13728. - if (!cci_client) {
  13729. - pr_err("failed: cci_client %p", cci_client);
  13730. - if (is_power_off)
  13731. - goto FREE_POWER_OFF_SETTING;
  13732. - else
  13733. - goto FREE_POWER_SETTING;
  13734. - }
  13735. - cci_client->cci_i2c_master = s_ctrl->cci_i2c_master;
  13736. - cci_client->sid = slave_info->slave_addr >> 1;
  13737. - cci_client->retries = 3;
  13738. - cci_client->id_map = 0;
  13739. -
  13740. - /* Parse and fill vreg params */
  13741. - rc = msm_camera_fill_vreg_params(
  13742. - power_info->cam_vreg,
  13743. - power_info->num_vreg,
  13744. - power_info->power_setting,
  13745. - power_info->power_setting_size);
  13746. - if (rc < 0) {
  13747. - pr_err("failed: msm_camera_get_dt_power_setting_data rc %d",
  13748. - rc);
  13749. - if (is_power_off)
  13750. - goto FREE_POWER_OFF_SETTING;
  13751. - else
  13752. - goto FREE_POWER_SETTING;
  13753. - }
  13754. -
  13755. - if (power_info->power_off_setting && (power_info->power_off_setting_size > 0)) {
  13756. - /* Parse and fill vreg params */
  13757. - rc = msm_camera_fill_vreg_params(
  13758. - power_info->cam_vreg,
  13759. - power_info->num_vreg,
  13760. - power_info->power_off_setting,
  13761. - power_info->power_off_setting_size);
  13762. - if (rc < 0) {
  13763. - pr_err("failed: msm_camera_get_dt_power_setting_data rc %d",
  13764. - rc);
  13765. - if (is_power_off)
  13766. - goto FREE_POWER_OFF_SETTING;
  13767. - else
  13768. - goto FREE_POWER_SETTING;
  13769. - }
  13770. - }
  13771. - /* remove this code for DFMS test */
  13772. -#if 0
  13773. - /* Power up and probe sensor */
  13774. - rc = s_ctrl->func_tbl->sensor_power_up(s_ctrl,
  13775. - &s_ctrl->sensordata->power_info,
  13776. - s_ctrl->sensor_i2c_client,
  13777. - s_ctrl->sensordata->slave_info,
  13778. - slave_info->sensor_name);
  13779. - if (rc < 0) {
  13780. - pr_err("%s power up failed", slave_info->sensor_name);
  13781. - if (is_power_off)
  13782. - goto FREE_POWER_OFF_SETTING;
  13783. - else
  13784. - goto FREE_POWER_SETTING;
  13785. - }
  13786. -#endif
  13787. + int32_t rc = 0;
  13788. + uint16_t i = 0, size = 0, size_down = 0;
  13789. + struct msm_sensor_ctrl_t *s_ctrl = NULL;
  13790. + struct msm_camera_cci_client *cci_client = NULL;
  13791. + struct msm_camera_sensor_slave_info *slave_info = NULL;
  13792. + struct msm_sensor_power_setting *power_setting = NULL;
  13793. + struct msm_sensor_power_setting *power_down_setting = NULL;
  13794. + struct msm_camera_slave_info *camera_info = NULL;
  13795. + struct msm_camera_power_ctrl_t *power_info = NULL;
  13796. + int c, end;
  13797. + struct msm_sensor_power_setting power_down_setting_t;
  13798. + unsigned long mount_pos = 0;
  13799.  
  13800. - /* Update sensor name in sensor control structure */
  13801. - s_ctrl->sensordata->sensor_name = slave_info->sensor_name;
  13802. -
  13803. - /*
  13804. - Set probe succeeded flag to 1 so that no other camera shall
  13805. - * probed on this slot
  13806. - */
  13807. - s_ctrl->is_probe_succeed = 1;
  13808. -
  13809. - /*
  13810. - * Create /dev/videoX node, comment for now until dummy /dev/videoX
  13811. - * node is created and used by HAL
  13812. - */
  13813. - rc = camera_init_v4l2(&s_ctrl->pdev->dev, &session_id);
  13814. - if (rc < 0) {
  13815. - pr_err("failed: camera_init_v4l2 rc %d", rc);
  13816. - if (is_power_off)
  13817. - goto FREE_POWER_OFF_SETTING;
  13818. - else
  13819. - goto FREE_POWER_SETTING;
  13820. - }
  13821. - s_ctrl->sensordata->sensor_info->session_id = session_id;
  13822. -
  13823. - /* Create /dev/v4l-subdevX device */
  13824. - v4l2_subdev_init(&s_ctrl->msm_sd.sd, s_ctrl->sensor_v4l2_subdev_ops);
  13825. - snprintf(s_ctrl->msm_sd.sd.name, sizeof(s_ctrl->msm_sd.sd.name), "%s",
  13826. - s_ctrl->sensordata->sensor_name);
  13827. - v4l2_set_subdevdata(&s_ctrl->msm_sd.sd, s_ctrl->pdev);
  13828. - s_ctrl->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
  13829. - media_entity_init(&s_ctrl->msm_sd.sd.entity, 0, NULL, 0);
  13830. - s_ctrl->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
  13831. - s_ctrl->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_SENSOR;
  13832. - s_ctrl->msm_sd.sd.entity.name = s_ctrl->msm_sd.sd.name;
  13833. - s_ctrl->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x3;
  13834. - msm_sd_register(&s_ctrl->msm_sd);
  13835. -
  13836. - memcpy(slave_info->subdev_name, s_ctrl->msm_sd.sd.entity.name,
  13837. - sizeof(slave_info->subdev_name));
  13838. - slave_info->is_probe_succeed = 1;
  13839. -
  13840. - slave_info->sensor_info.session_id =
  13841. - s_ctrl->sensordata->sensor_info->session_id;
  13842. - for (i = 0; i < SUB_MODULE_MAX; i++)
  13843. - slave_info->sensor_info.subdev_id[i] =
  13844. - s_ctrl->sensordata->sensor_info->subdev_id[i];
  13845. - slave_info->sensor_info.is_mount_angle_valid =
  13846. - s_ctrl->sensordata->sensor_info->is_mount_angle_valid;
  13847. - slave_info->sensor_info.sensor_mount_angle =
  13848. - s_ctrl->sensordata->sensor_info->sensor_mount_angle;
  13849. - CDBG("%s:%d sensor name %s\n", __func__, __LINE__,
  13850. - slave_info->sensor_info.sensor_name);
  13851. - CDBG("%s:%d session id %d\n", __func__, __LINE__,
  13852. - slave_info->sensor_info.session_id);
  13853. - for (i = 0; i < SUB_MODULE_MAX; i++)
  13854. - CDBG("%s:%d subdev_id[%d] %d\n", __func__, __LINE__, i,
  13855. - slave_info->sensor_info.subdev_id[i]);
  13856. - CDBG("%s:%d mount angle valid %d value %d\n", __func__,
  13857. - __LINE__, slave_info->sensor_info.is_mount_angle_valid,
  13858. - slave_info->sensor_info.sensor_mount_angle);
  13859. -
  13860. - if (copy_to_user((void __user *)setting,
  13861. - (void *)slave_info, sizeof(*slave_info))) {
  13862. - pr_err("%s:%d copy failed\n", __func__, __LINE__);
  13863. - rc = -EFAULT;
  13864. - }
  13865. -
  13866. - pr_warn("rc %d session_id %d", rc, session_id);
  13867. - pr_warn("%s probe succeeded", slave_info->sensor_name);
  13868. -
  13869. - /* remove this code for DFMS test */
  13870. -#if 0
  13871. - /* Power down */
  13872. - s_ctrl->func_tbl->sensor_power_down(
  13873. - s_ctrl,
  13874. - &s_ctrl->sensordata->power_info,
  13875. - s_ctrl->sensor_device_type,
  13876. - s_ctrl->sensor_i2c_client);
  13877. -#endif
  13878. + /* Validate input parameters */
  13879. + if (!setting) {
  13880. + pr_err("failed: slave_info %p", setting);
  13881. + return -EINVAL;
  13882. + }
  13883. +
  13884. + /* Allocate memory for slave info */
  13885. + slave_info = kzalloc(sizeof(*slave_info), GFP_KERNEL);
  13886. + if (!slave_info) {
  13887. + pr_err("failed: no memory slave_info %p", slave_info);
  13888. + return -ENOMEM;
  13889. + }
  13890. +
  13891. + if (copy_from_user(slave_info, (void *)setting, sizeof(*slave_info))) {
  13892. + pr_err("failed: copy_from_user");
  13893. + rc = -EFAULT;
  13894. + goto FREE_SLAVE_INFO;
  13895. + }
  13896. +
  13897. + /* Print slave info */
  13898. + CDBG("camera id %d", slave_info->camera_id);
  13899. + CDBG("slave_addr %x", slave_info->slave_addr);
  13900. + CDBG("addr_type %d", slave_info->addr_type);
  13901. + CDBG("sensor_id_reg_addr %x",
  13902. + slave_info->sensor_id_info.sensor_id_reg_addr);
  13903. + CDBG("sensor_id %x", slave_info->sensor_id_info.sensor_id);
  13904. + CDBG("size %d", slave_info->power_setting_array.size);
  13905. + CDBG("size down %d", slave_info->power_setting_array.size_down);
  13906. +
  13907. + if (slave_info->is_init_params_valid) {
  13908. + CDBG("position %d",
  13909. + slave_info->sensor_init_params.position);
  13910. + CDBG("mount %d",
  13911. + slave_info->sensor_init_params.sensor_mount_angle);
  13912. + }
  13913. +
  13914. + /* Validate camera id */
  13915. + if (slave_info->camera_id >= MAX_CAMERAS) {
  13916. + pr_err("failed: invalid camera id %d max %d",
  13917. + slave_info->camera_id, MAX_CAMERAS);
  13918. + rc = -EINVAL;
  13919. + goto FREE_POWER_SETTING;
  13920. + }
  13921. +
  13922. + /* Extract s_ctrl from camera id */
  13923. + s_ctrl = g_sctrl[slave_info->camera_id];
  13924. + if (!s_ctrl) {
  13925. + pr_err("failed: s_ctrl %p for camera_id %d", s_ctrl,
  13926. + slave_info->camera_id);
  13927. + rc = -EINVAL;
  13928. + goto FREE_POWER_SETTING;
  13929. + }
  13930. +
  13931. + CDBG("s_ctrl[%d] %p", slave_info->camera_id, s_ctrl);
  13932. +
  13933. + if (s_ctrl->is_probe_succeed == 1) {
  13934. + /*
  13935. + * Different sensor on this camera slot has been connected
  13936. + * and probe already succeeded for that sensor. Ignore this
  13937. + * probe
  13938. + */
  13939. + pr_err("slot %d has some other sensor", slave_info->camera_id);
  13940. + kfree(slave_info);
  13941. + return 0;
  13942. + }
  13943. +
  13944. + size = slave_info->power_setting_array.size;
  13945. + /* Validate size */
  13946. + if (size > MAX_POWER_CONFIG) {
  13947. + pr_err("failed: invalid number of power_up_setting %d\n", size);
  13948. + rc = -EINVAL;
  13949. + goto FREE_SLAVE_INFO;
  13950. + }
  13951. +
  13952. + /* Allocate memory for power up setting */
  13953. + power_setting = kzalloc(sizeof(*power_setting) * size, GFP_KERNEL);
  13954. + if (!power_setting) {
  13955. + pr_err("failed: no memory power_setting %p", power_setting);
  13956. + rc = -ENOMEM;
  13957. + goto FREE_SLAVE_INFO;
  13958. + }
  13959. +
  13960. + if (copy_from_user(power_setting,
  13961. + (void *)slave_info->power_setting_array.power_setting,
  13962. + sizeof(*power_setting) * size)) {
  13963. + pr_err("failed: copy_from_user");
  13964. + rc = -EFAULT;
  13965. + goto FREE_POWER_SETTING;
  13966. + }
  13967. +
  13968. + /* Print power setting */
  13969. + for (i = 0; i < size; i++) {
  13970. + CDBG("UP seq_type %d seq_val %d config_val %ld delay %d",
  13971. + power_setting[i].seq_type, power_setting[i].seq_val,
  13972. + power_setting[i].config_val, power_setting[i].delay);
  13973. + }
  13974. + /*DOWN*/
  13975. + size_down = slave_info->power_setting_array.size_down;
  13976. + if (!size_down)
  13977. + size_down = size;
  13978. + /* Validate size_down */
  13979. + if (size_down > MAX_POWER_CONFIG) {
  13980. + pr_err("failed: invalid size_down %d", size_down);
  13981. + rc = -EINVAL;
  13982. + goto FREE_POWER_SETTING;
  13983. + }
  13984. + /* Allocate memory for power down setting */
  13985. + power_down_setting =
  13986. + kzalloc(sizeof(*power_setting) * size_down, GFP_KERNEL);
  13987. + if (!power_down_setting) {
  13988. + pr_err("failed: no memory power_setting %p",
  13989. + power_down_setting);
  13990. + rc = -ENOMEM;
  13991. + goto FREE_POWER_SETTING;
  13992. + }
  13993. +
  13994. + if (slave_info->power_setting_array.power_down_setting) {
  13995. + if (copy_from_user(power_down_setting,
  13996. + (void *)slave_info->power_setting_array.
  13997. + power_down_setting,
  13998. + sizeof(*power_down_setting) * size_down)) {
  13999. + pr_err("failed: copy_from_user");
  14000. + rc = -EFAULT;
  14001. + goto FREE_POWER_DOWN_SETTING;
  14002. + }
  14003. + } else {
  14004. + pr_err("failed: no power_down_setting");
  14005. + if (copy_from_user(power_down_setting,
  14006. + (void *)slave_info->power_setting_array.
  14007. + power_setting,
  14008. + sizeof(*power_down_setting) * size_down)) {
  14009. + pr_err("failed: copy_from_user");
  14010. + rc = -EFAULT;
  14011. + goto FREE_POWER_DOWN_SETTING;
  14012. + }
  14013. +
  14014. + /*reverce*/
  14015. + end = size_down - 1;
  14016. + for (c = 0; c < size_down/2; c++) {
  14017. + power_down_setting_t = power_down_setting[c];
  14018. + power_down_setting[c] = power_down_setting[end];
  14019. + power_down_setting[end] = power_down_setting_t;
  14020. + end--;
  14021. + }
  14022. +
  14023. + }
  14024. +
  14025. + /* Print power setting */
  14026. + for (i = 0; i < size_down; i++) {
  14027. + CDBG("DOWN seq_type %d seq_val %d config_val %ld delay %d",
  14028. + power_down_setting[i].seq_type,
  14029. + power_down_setting[i].seq_val,
  14030. + power_down_setting[i].config_val,
  14031. + power_down_setting[i].delay);
  14032. + }
  14033. +
  14034. + camera_info = kzalloc(sizeof(struct msm_camera_slave_info), GFP_KERNEL);
  14035. + if (!camera_info) {
  14036. + pr_err("failed: no memory slave_info %p", camera_info);
  14037. + goto FREE_POWER_DOWN_SETTING;
  14038. +
  14039. + }
  14040. +
  14041. + /* Fill power up setting and power up setting size */
  14042. + power_info = &s_ctrl->sensordata->power_info;
  14043. + power_info->power_setting = power_setting;
  14044. + power_info->power_setting_size = size;
  14045. + power_info->power_down_setting = power_down_setting;
  14046. + power_info->power_down_setting_size = size_down;
  14047. +
  14048. + s_ctrl->sensordata->slave_info = camera_info;
  14049. +
  14050. + /* Fill sensor slave info */
  14051. + camera_info->sensor_slave_addr = slave_info->slave_addr;
  14052. + camera_info->sensor_id_reg_addr =
  14053. + slave_info->sensor_id_info.sensor_id_reg_addr;
  14054. + camera_info->sensor_id = slave_info->sensor_id_info.sensor_id;
  14055. +
  14056. + /* Fill CCI master, slave address and CCI default params */
  14057. + if (!s_ctrl->sensor_i2c_client) {
  14058. + pr_err("failed: sensor_i2c_client %p",
  14059. + s_ctrl->sensor_i2c_client);
  14060. + rc = -EINVAL;
  14061. + goto FREE_CAMERA_INFO;
  14062. + }
  14063. + /* Fill sensor address type */
  14064. + s_ctrl->sensor_i2c_client->addr_type = slave_info->addr_type;
  14065. + if (s_ctrl->sensor_i2c_client->client)
  14066. + s_ctrl->sensor_i2c_client->client->addr =
  14067. + camera_info->sensor_slave_addr;
  14068. +
  14069. + cci_client = s_ctrl->sensor_i2c_client->cci_client;
  14070. + if (!cci_client) {
  14071. + pr_err("failed: cci_client %p", cci_client);
  14072. + goto FREE_CAMERA_INFO;
  14073. + }
  14074. + cci_client->cci_i2c_master = s_ctrl->cci_i2c_master;
  14075. + cci_client->sid = slave_info->slave_addr >> 1;
  14076. + cci_client->retries = 3;
  14077. + cci_client->id_map = 0;
  14078. +
  14079. + /* Parse and fill vreg params for powerup settings */
  14080. + rc = msm_camera_fill_vreg_params(
  14081. + power_info->cam_vreg,
  14082. + power_info->num_vreg,
  14083. + power_info->power_setting,
  14084. + power_info->power_setting_size);
  14085. + if (rc < 0) {
  14086. + pr_err("failed: msm_camera_get_dt_power_setting_data rc %d",
  14087. + rc);
  14088. + goto FREE_CAMERA_INFO;
  14089. + }
  14090. +
  14091. + /* Parse and fill vreg params for powerdown settings*/
  14092. + rc = msm_camera_fill_vreg_params(
  14093. + power_info->cam_vreg,
  14094. + power_info->num_vreg,
  14095. + power_info->power_down_setting,
  14096. + power_info->power_down_setting_size);
  14097. + if (rc < 0) {
  14098. + pr_err("failed: msm_camera_fill_vreg_params for PDOWN rc %d",
  14099. + rc);
  14100. + goto FREE_CAMERA_INFO;
  14101. + }
  14102. +
  14103. + /*
  14104. + * Update sensor, actuator and eeprom name in
  14105. + * sensor control structure.
  14106. + */
  14107. + s_ctrl->sensordata->sensor_name = slave_info->sensor_name;
  14108. + s_ctrl->sensordata->eeprom_name = slave_info->eeprom_name;
  14109. + s_ctrl->sensordata->actuator_name = slave_info->actuator_name;
  14110. +
  14111. + /*
  14112. + * Update eeporm subdevice Id by input eeprom name
  14113. + */
  14114. + rc = msm_sensor_fill_eeprom_subdevid_by_name(s_ctrl);
  14115. + if (rc < 0) {
  14116. + pr_err("%s failed %d\n", __func__, __LINE__);
  14117. + goto FREE_POWER_SETTING;
  14118. + }
  14119. + /*
  14120. + * Update actuator subdevice Id by input actuator name
  14121. + */
  14122. + rc = msm_sensor_fill_actuator_subdevid_by_name(s_ctrl);
  14123. + if (rc < 0) {
  14124. + pr_err("%s failed %d\n", __func__, __LINE__);
  14125. + goto FREE_POWER_SETTING;
  14126. + }
  14127. +
  14128. + /* Power up and probe sensor */
  14129. + rc = s_ctrl->func_tbl->sensor_power_up(s_ctrl);
  14130. + if (rc < 0) {
  14131. + pr_err("%s power up failed", slave_info->sensor_name);
  14132. + goto FREE_CAMERA_INFO;
  14133. + }
  14134. +
  14135. + pr_err("%s probe succeeded", slave_info->sensor_name);
  14136. +
  14137. + /*
  14138. + Set probe succeeded flag to 1 so that no other camera shall
  14139. + * probed on this slot
  14140. + */
  14141. + s_ctrl->is_probe_succeed = 1;
  14142. +
  14143. + /*
  14144. + * Create /dev/videoX node, comment for now until dummy /dev/videoX
  14145. + * node is created and used by HAL
  14146. + */
  14147. +
  14148. + if (s_ctrl->sensor_device_type == MSM_CAMERA_PLATFORM_DEVICE)
  14149. + rc = msm_sensor_driver_create_v4l_subdev(s_ctrl);
  14150. + else
  14151. + rc = msm_sensor_driver_create_i2c_v4l_subdev(s_ctrl);
  14152. + if (rc < 0) {
  14153. + pr_err("failed: camera creat v4l2 rc %d", rc);
  14154. + goto CAMERA_POWER_DOWN;
  14155. + }
  14156. +
  14157. + /* Power down */
  14158. + s_ctrl->func_tbl->sensor_power_down(s_ctrl);
  14159. +
  14160. + rc = msm_sensor_fill_slave_info_init_params(
  14161. + slave_info,
  14162. + s_ctrl->sensordata->sensor_info);
  14163. + if (rc < 0) {
  14164. + pr_err("%s Fill slave info failed", slave_info->sensor_name);
  14165. + goto FREE_CAMERA_INFO;
  14166. + }
  14167. + rc = msm_sensor_validate_slave_info(s_ctrl->sensordata->sensor_info);
  14168. + if (rc < 0) {
  14169. + pr_err("%s Validate slave info failed",
  14170. + slave_info->sensor_name);
  14171. + goto FREE_CAMERA_INFO;
  14172. + }
  14173. + /* Update sensor mount angle and position in media entity flag */
  14174. + mount_pos = s_ctrl->sensordata->sensor_info->position << 16;
  14175. + mount_pos = mount_pos | ((s_ctrl->sensordata->sensor_info->
  14176. + sensor_mount_angle / 90) << 8);
  14177. + s_ctrl->msm_sd.sd.entity.flags = mount_pos | MEDIA_ENT_FL_DEFAULT;
  14178.  
  14179. - return rc;
  14180. + /*Save sensor info*/
  14181. + s_ctrl->sensordata->cam_slave_info = slave_info;
  14182.  
  14183. -FREE_POWER_OFF_SETTING:
  14184. - kfree(power_off_setting);
  14185. + return rc;
  14186. +
  14187. +CAMERA_POWER_DOWN:
  14188. + s_ctrl->func_tbl->sensor_power_down(s_ctrl);
  14189. +FREE_CAMERA_INFO:
  14190. + kfree(camera_info);
  14191. +FREE_POWER_DOWN_SETTING:
  14192. + kfree(power_down_setting);
  14193. FREE_POWER_SETTING:
  14194. - kfree(power_setting);
  14195. + kfree(power_setting);
  14196. FREE_SLAVE_INFO:
  14197. - kfree(slave_info);
  14198. - return rc;
  14199. + kfree(slave_info);
  14200. + return rc;
  14201. }
  14202.  
  14203. static int32_t msm_sensor_driver_get_gpio_data(
  14204. @@ -431,12 +713,12 @@ FREE_GPIO_CONF:
  14205. return rc;
  14206. }
  14207.  
  14208. -static int32_t msm_sensor_driver_get_dt_data(struct msm_sensor_ctrl_t *s_ctrl,
  14209. - struct platform_device *pdev)
  14210. +static int32_t msm_sensor_driver_get_dt_data(struct msm_sensor_ctrl_t *s_ctrl)
  14211. {
  14212. int32_t rc = 0;
  14213. struct msm_camera_sensor_board_info *sensordata = NULL;
  14214. - struct device_node *of_node = pdev->dev.of_node;
  14215. + struct device_node *of_node = s_ctrl->of_node;
  14216. + uint32_t cell_id;
  14217.  
  14218. s_ctrl->sensordata = kzalloc(sizeof(*sensordata), GFP_KERNEL);
  14219. if (!s_ctrl->sensordata) {
  14220. @@ -446,27 +728,27 @@ static int32_t msm_sensor_driver_get_dt_data(struct msm_sensor_ctrl_t *s_ctrl,
  14221.  
  14222. sensordata = s_ctrl->sensordata;
  14223.  
  14224. -
  14225. /*
  14226. * Read cell index - this cell index will be the camera slot where
  14227. * this camera will be mounted
  14228. */
  14229. - rc = of_property_read_u32(of_node, "cell-index", &pdev->id);
  14230. + rc = of_property_read_u32(of_node, "cell-index", &cell_id);
  14231. if (rc < 0) {
  14232. pr_err("failed: cell-index rc %d", rc);
  14233. goto FREE_SENSOR_DATA;
  14234. }
  14235. + s_ctrl->id = cell_id;
  14236.  
  14237. - /* Validate pdev->id */
  14238. - if (pdev->id >= MAX_CAMERAS) {
  14239. - pr_err("failed: invalid pdev->id %d", pdev->id);
  14240. + /* Validate cell_id */
  14241. + if (cell_id >= MAX_CAMERAS) {
  14242. + pr_err("failed: invalid cell_id %d", cell_id);
  14243. rc = -EINVAL;
  14244. goto FREE_SENSOR_DATA;
  14245. }
  14246.  
  14247. - /* Check whether g_sctrl is already filled for this pdev id */
  14248. - if (g_sctrl[pdev->id]) {
  14249. - pr_err("failed: sctrl already filled for id %d", pdev->id);
  14250. + /* Check whether g_sctrl is already filled for this cell_id */
  14251. + if (g_sctrl[cell_id]) {
  14252. + pr_err("failed: sctrl already filled for cell_id %d", cell_id);
  14253. rc = -EINVAL;
  14254. goto FREE_SENSOR_DATA;
  14255. }
  14256. @@ -505,11 +787,13 @@ static int32_t msm_sensor_driver_get_dt_data(struct msm_sensor_ctrl_t *s_ctrl,
  14257. }
  14258.  
  14259. /* Get mount angle */
  14260. +
  14261. rc = of_property_read_u32(of_node, "qcom,mount-angle",
  14262. &sensordata->sensor_info->sensor_mount_angle);
  14263. CDBG("%s qcom,mount-angle %d, rc %d\n", __func__,
  14264. sensordata->sensor_info->sensor_mount_angle, rc);
  14265. if (rc < 0) {
  14266. + /* Invalidate mount angle flag */
  14267. sensordata->sensor_info->is_mount_angle_valid = 0;
  14268. sensordata->sensor_info->sensor_mount_angle = 0;
  14269. rc = 0;
  14270. @@ -517,6 +801,22 @@ static int32_t msm_sensor_driver_get_dt_data(struct msm_sensor_ctrl_t *s_ctrl,
  14271. sensordata->sensor_info->is_mount_angle_valid = 1;
  14272. }
  14273.  
  14274. + rc = of_property_read_u32(of_node, "qcom,sensor-position",
  14275. + &sensordata->sensor_info->position);
  14276. + if (rc < 0) {
  14277. + pr_err("%s:%d Invalid sensor position\n", __func__, __LINE__);
  14278. + sensordata->sensor_info->position = INVALID_CAMERA_B;
  14279. + }
  14280. +
  14281. + rc = of_property_read_u32(of_node, "qcom,sensor-mode",
  14282. + &sensordata->sensor_info->modes_supported);
  14283. + if (rc < 0) {
  14284. + pr_err("%s:%d Invalid sensor mode supported\n",
  14285. + __func__, __LINE__);
  14286. + sensordata->sensor_info->modes_supported = CAMERA_MODE_INVALID;
  14287. + rc = 0;
  14288. + }
  14289. +
  14290. /* Get vdd-cx regulator */
  14291. /*Optional property, don't return error if absent */
  14292. of_property_read_string(of_node, "qcom,vdd-cx-name",
  14293. @@ -534,27 +834,13 @@ FREE_SENSOR_DATA:
  14294. return rc;
  14295. }
  14296.  
  14297. -static int32_t msm_sensor_driver_parse(struct platform_device *pdev)
  14298. +static int32_t msm_sensor_driver_parse(struct msm_sensor_ctrl_t *s_ctrl)
  14299. {
  14300. int32_t rc = 0;
  14301. - struct msm_sensor_ctrl_t *s_ctrl = NULL;
  14302.  
  14303. CDBG("Enter");
  14304. /* Validate input parameters */
  14305. - if (!pdev || !pdev->dev.of_node) {
  14306. - pr_err("failed: invalid params");
  14307. - return -EINVAL;
  14308. - }
  14309.  
  14310. - /* Create sensor control structure */
  14311. - s_ctrl = kzalloc(sizeof(*s_ctrl), GFP_KERNEL);
  14312. - if (!s_ctrl) {
  14313. - pr_err("failed: no memory s_ctrl %p", s_ctrl);
  14314. - return -ENOMEM;
  14315. - }
  14316. -
  14317. - /* Fill platform device */
  14318. - s_ctrl->pdev = pdev;
  14319.  
  14320. /* Allocate memory for sensor_i2c_client */
  14321. s_ctrl->sensor_i2c_client = kzalloc(sizeof(*s_ctrl->sensor_i2c_client),
  14322. @@ -562,7 +848,7 @@ static int32_t msm_sensor_driver_parse(struct platform_device *pdev)
  14323. if (!s_ctrl->sensor_i2c_client) {
  14324. pr_err("failed: no memory sensor_i2c_client %p",
  14325. s_ctrl->sensor_i2c_client);
  14326. - goto FREE_SCTRL;
  14327. + return -ENOMEM;
  14328. }
  14329.  
  14330. /* Allocate memory for mutex */
  14331. @@ -575,15 +861,12 @@ static int32_t msm_sensor_driver_parse(struct platform_device *pdev)
  14332. }
  14333.  
  14334. /* Parse dt information and store in sensor control structure */
  14335. - rc = msm_sensor_driver_get_dt_data(s_ctrl, pdev);
  14336. + rc = msm_sensor_driver_get_dt_data(s_ctrl);
  14337. if (rc < 0) {
  14338. pr_err("failed: rc %d", rc);
  14339. goto FREE_MUTEX;
  14340. }
  14341.  
  14342. - /* Fill device in power info */
  14343. - s_ctrl->sensordata->power_info.dev = &pdev->dev;
  14344. -
  14345. /* Initialize mutex */
  14346. mutex_init(s_ctrl->msm_sensor_mutex);
  14347.  
  14348. @@ -600,8 +883,8 @@ static int32_t msm_sensor_driver_parse(struct platform_device *pdev)
  14349. }
  14350.  
  14351. /* Store sensor control structure in static database */
  14352. - g_sctrl[pdev->id] = s_ctrl;
  14353. - pr_warn("g_sctrl[%d] %p", pdev->id, g_sctrl[pdev->id]);
  14354. + g_sctrl[s_ctrl->id] = s_ctrl;
  14355. + pr_err("g_sctrl[%d] %p", s_ctrl->id, g_sctrl[s_ctrl->id]);
  14356.  
  14357. return rc;
  14358.  
  14359. @@ -615,20 +898,141 @@ FREE_MUTEX:
  14360. kfree(s_ctrl->msm_sensor_mutex);
  14361. FREE_SENSOR_I2C_CLIENT:
  14362. kfree(s_ctrl->sensor_i2c_client);
  14363. -FREE_SCTRL:
  14364. + return rc;
  14365. +}
  14366. +
  14367. +static int32_t msm_sensor_driver_platform_probe(struct platform_device *pdev)
  14368. +{
  14369. + int32_t rc = 0;
  14370. + struct msm_sensor_ctrl_t *s_ctrl = NULL;
  14371. +
  14372. +
  14373. + /* Create sensor control structure */
  14374. + s_ctrl = kzalloc(sizeof(*s_ctrl), GFP_KERNEL);
  14375. + if (!s_ctrl) {
  14376. + pr_err("failed: no memory s_ctrl %p", s_ctrl);
  14377. + return -ENOMEM;
  14378. + }
  14379. +
  14380. + platform_set_drvdata(pdev, s_ctrl);
  14381. +
  14382. + /* Initialize sensor device type */
  14383. + s_ctrl->sensor_device_type = MSM_CAMERA_PLATFORM_DEVICE;
  14384. + s_ctrl->of_node = pdev->dev.of_node;
  14385. +
  14386. + rc = msm_sensor_driver_parse(s_ctrl);
  14387. + if (rc < 0) {
  14388. + pr_err("failed: msm_sensor_driver_parse rc %d", rc);
  14389. + goto FREE_S_CTRL;
  14390. + }
  14391. +
  14392. + /* Fill platform device */
  14393. + pdev->id = s_ctrl->id;
  14394. + s_ctrl->pdev = pdev;
  14395. +
  14396. + /* Fill device in power info */
  14397. + s_ctrl->sensordata->power_info.dev = &pdev->dev;
  14398. +
  14399. + return rc;
  14400. +FREE_S_CTRL:
  14401. kfree(s_ctrl);
  14402. return rc;
  14403. }
  14404.  
  14405. +static int32_t msm_sensor_driver_i2c_probe(struct i2c_client *client,
  14406. + const struct i2c_device_id *id)
  14407. +{
  14408. + int32_t rc = 0;
  14409. + struct msm_sensor_ctrl_t *s_ctrl;
  14410. +
  14411. + CDBG("\n\nEnter: msm_sensor_driver_i2c_probe");
  14412. + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
  14413. + pr_err("%s %s i2c_check_functionality failed\n",
  14414. + __func__, client->name);
  14415. + rc = -EFAULT;
  14416. + return rc;
  14417. + }
  14418. +
  14419. + /* Create sensor control structure */
  14420. + s_ctrl = kzalloc(sizeof(*s_ctrl), GFP_KERNEL);
  14421. + if (!s_ctrl) {
  14422. + pr_err("failed: no memory s_ctrl %p", s_ctrl);
  14423. + return -ENOMEM;
  14424. + }
  14425. +
  14426. + i2c_set_clientdata(client, s_ctrl);
  14427. +
  14428. + /* Initialize sensor device type */
  14429. + s_ctrl->sensor_device_type = MSM_CAMERA_I2C_DEVICE;
  14430. + s_ctrl->of_node = client->dev.of_node;
  14431. +
  14432. + rc = msm_sensor_driver_parse(s_ctrl);
  14433. + if (rc < 0) {
  14434. + pr_err("failed: msm_sensor_driver_parse rc %d", rc);
  14435. + goto FREE_S_CTRL;
  14436. + }
  14437. +
  14438. + if (s_ctrl->sensor_i2c_client != NULL) {
  14439. + s_ctrl->sensor_i2c_client->client = client;
  14440. + s_ctrl->sensordata->power_info.dev = &client->dev;
  14441. +
  14442. + }
  14443. +
  14444. + return rc;
  14445. +FREE_S_CTRL:
  14446. + kfree(s_ctrl);
  14447. + return rc;
  14448. +}
  14449. +
  14450. +static int msm_sensor_driver_i2c_remove(struct i2c_client *client)
  14451. +{
  14452. + struct msm_sensor_ctrl_t *s_ctrl = i2c_get_clientdata(client);
  14453. +
  14454. + pr_err("%s: sensor FREE\n", __func__);
  14455. +
  14456. + if (!s_ctrl) {
  14457. + pr_err("%s: sensor device is NULL\n", __func__);
  14458. + return 0;
  14459. + }
  14460. +
  14461. + g_sctrl[s_ctrl->id] = NULL;
  14462. + msm_sensor_free_sensor_data(s_ctrl);
  14463. + kfree(s_ctrl->msm_sensor_mutex);
  14464. + kfree(s_ctrl->sensor_i2c_client);
  14465. + kfree(s_ctrl);
  14466. +
  14467. + return 0;
  14468. +}
  14469. +
  14470. +static const struct i2c_device_id i2c_id[] = {
  14471. + {SENSOR_DRIVER_I2C, (kernel_ulong_t)NULL},
  14472. + { }
  14473. +};
  14474. +
  14475. +static struct i2c_driver msm_sensor_driver_i2c = {
  14476. + .id_table = i2c_id,
  14477. + .probe = msm_sensor_driver_i2c_probe,
  14478. + .remove = msm_sensor_driver_i2c_remove,
  14479. + .driver = {
  14480. + .name = SENSOR_DRIVER_I2C,
  14481. + },
  14482. +};
  14483. +
  14484. static int __init msm_sensor_driver_init(void)
  14485. {
  14486. int32_t rc = 0;
  14487.  
  14488. - pr_warn("%s : Enter", __func__);
  14489. + CDBG("Enter");
  14490. rc = platform_driver_probe(&msm_sensor_platform_driver,
  14491. - msm_sensor_driver_parse);
  14492. - if (!rc)
  14493. - pr_warn("probe success");
  14494. + msm_sensor_driver_platform_probe);
  14495. + if (!rc) {
  14496. + CDBG("probe success");
  14497. + return rc;
  14498. + } else {
  14499. + CDBG("probe i2c");
  14500. + rc = i2c_add_driver(&msm_sensor_driver_i2c);
  14501. + }
  14502. +
  14503. return rc;
  14504. }
  14505.  
  14506. @@ -636,6 +1040,8 @@ static int __init msm_sensor_driver_init(void)
  14507. static void __exit msm_sensor_driver_exit(void)
  14508. {
  14509. CDBG("Enter");
  14510. + platform_driver_unregister(&msm_sensor_platform_driver);
  14511. + i2c_del_driver(&msm_sensor_driver_i2c);
  14512. return;
  14513. }
  14514.  
  14515. diff --git a/drivers/media/platform/msm/camera_v2/sensor/mt9m114.c b/drivers/media/platform/msm/camera_v2/sensor/mt9m114.c
  14516. index 02ff7a2..367bdbe 100644
  14517. --- a/drivers/media/platform/msm/camera_v2/sensor/mt9m114.c
  14518. +++ b/drivers/media/platform/msm/camera_v2/sensor/mt9m114.c
  14519. @@ -1,4 +1,4 @@
  14520. -/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  14521. +/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
  14522. *
  14523. * This program is free software; you can redistribute it and/or modify
  14524. * it under the terms of the GNU General Public License version 2 and
  14525. @@ -19,7 +19,7 @@
  14526.  
  14527. /*#define CONFIG_MSMB_CAMERA_DEBUG*/
  14528. #undef CDBG
  14529. -#ifdef MT9M114_DEBUG
  14530. +#ifdef CONFIG_MSMB_CAMERA_DEBUG
  14531. #define CDBG(fmt, args...) pr_err(fmt, ##args)
  14532. #else
  14533. #define CDBG(fmt, args...) do { } while (0)
  14534. @@ -36,6 +36,50 @@
  14535. DEFINE_MSM_MUTEX(mt9m114_mut);
  14536. static struct msm_sensor_ctrl_t mt9m114_s_ctrl;
  14537.  
  14538. +static struct msm_sensor_power_setting mt9m114_power_setting[] = {
  14539. + {
  14540. + .seq_type = SENSOR_VREG,
  14541. + .seq_val = CAM_VIO,
  14542. + .config_val = 0,
  14543. + .delay = 0,
  14544. + },
  14545. + {
  14546. + .seq_type = SENSOR_VREG,
  14547. + .seq_val = CAM_VDIG,
  14548. + .config_val = 0,
  14549. + .delay = 0,
  14550. + },
  14551. + {
  14552. + .seq_type = SENSOR_VREG,
  14553. + .seq_val = CAM_VANA,
  14554. + .config_val = 0,
  14555. + .delay = 0,
  14556. + },
  14557. + {
  14558. + .seq_type = SENSOR_GPIO,
  14559. + .seq_val = SENSOR_GPIO_RESET,
  14560. + .config_val = GPIO_OUT_LOW,
  14561. + .delay = 1,
  14562. + },
  14563. + {
  14564. + .seq_type = SENSOR_GPIO,
  14565. + .seq_val = SENSOR_GPIO_RESET,
  14566. + .config_val = GPIO_OUT_HIGH,
  14567. + .delay = 30,
  14568. + },
  14569. + {
  14570. + .seq_type = SENSOR_CLK,
  14571. + .seq_val = SENSOR_CAM_MCLK,
  14572. + .config_val = 0,
  14573. + .delay = 100,
  14574. + },
  14575. + {
  14576. + .seq_type = SENSOR_I2C_MUX,
  14577. + .seq_val = 0,
  14578. + .config_val = 0,
  14579. + .delay = 0,
  14580. + },
  14581. +};
  14582.  
  14583. static struct msm_camera_i2c_reg_conf mt9m114_720p_settings[] = {
  14584. {0xdc00, 0x50, MSM_CAMERA_I2C_BYTE_DATA, MSM_CAMERA_I2C_CMD_WRITE},
  14585. @@ -1068,6 +1112,7 @@ static int32_t msm_mt9m114_i2c_probe(struct i2c_client *client,
  14586. {
  14587. return msm_sensor_i2c_probe(client, id, &mt9m114_s_ctrl);
  14588. }
  14589. +
  14590. static struct i2c_driver mt9m114_i2c_driver = {
  14591. .id_table = mt9m114_i2c_id,
  14592. .probe = msm_mt9m114_i2c_probe,
  14593. @@ -1107,14 +1152,12 @@ static int32_t mt9m114_platform_probe(struct platform_device *pdev)
  14594. static int __init mt9m114_init_module(void)
  14595. {
  14596. int32_t rc;
  14597. - CDBG("%s:%d\n", __func__, __LINE__);
  14598. + pr_info("%s:%d\n", __func__, __LINE__);
  14599. rc = platform_driver_probe(&mt9m114_platform_driver,
  14600. mt9m114_platform_probe);
  14601. - if (!rc) {
  14602. - pr_info("%s: probe success\n", __func__);
  14603. + if (!rc)
  14604. return rc;
  14605. - }
  14606. - CDBG("%s:%d rc %d\n", __func__, __LINE__, rc);
  14607. + pr_err("%s:%d rc %d\n", __func__, __LINE__, rc);
  14608. return i2c_add_driver(&mt9m114_i2c_driver);
  14609. }
  14610.  
  14611. @@ -1148,6 +1191,10 @@ int32_t mt9m114_sensor_config(struct msm_sensor_ctrl_t *s_ctrl,
  14612. for (i = 0; i < SUB_MODULE_MAX; i++)
  14613. cdata->cfg.sensor_info.subdev_id[i] =
  14614. s_ctrl->sensordata->sensor_info->subdev_id[i];
  14615. + cdata->cfg.sensor_info.is_mount_angle_valid =
  14616. + s_ctrl->sensordata->sensor_info->is_mount_angle_valid;
  14617. + cdata->cfg.sensor_info.sensor_mount_angle =
  14618. + s_ctrl->sensordata->sensor_info->sensor_mount_angle;
  14619. CDBG("%s:%d sensor name %s\n", __func__, __LINE__,
  14620. cdata->cfg.sensor_info.sensor_name);
  14621. CDBG("%s:%d session id %d\n", __func__, __LINE__,
  14622. @@ -1155,6 +1202,9 @@ int32_t mt9m114_sensor_config(struct msm_sensor_ctrl_t *s_ctrl,
  14623. for (i = 0; i < SUB_MODULE_MAX; i++)
  14624. CDBG("%s:%d subdev_id[%d] %d\n", __func__, __LINE__, i,
  14625. cdata->cfg.sensor_info.subdev_id[i]);
  14626. + CDBG("%s:%d mount angle valid %d value %d\n", __func__,
  14627. + __LINE__, cdata->cfg.sensor_info.is_mount_angle_valid,
  14628. + cdata->cfg.sensor_info.sensor_mount_angle);
  14629.  
  14630. break;
  14631. case CFG_SET_INIT_SETTING:
  14632. @@ -1190,8 +1240,12 @@ int32_t mt9m114_sensor_config(struct msm_sensor_ctrl_t *s_ctrl,
  14633. MSM_CAMERA_I2C_WORD_DATA);
  14634. break;
  14635. case CFG_GET_SENSOR_INIT_PARAMS:
  14636. - cdata->cfg.sensor_init_params =
  14637. - *s_ctrl->sensordata->sensor_init_params;
  14638. + cdata->cfg.sensor_init_params.modes_supported =
  14639. + s_ctrl->sensordata->sensor_info->modes_supported;
  14640. + cdata->cfg.sensor_init_params.position =
  14641. + s_ctrl->sensordata->sensor_info->position;
  14642. + cdata->cfg.sensor_init_params.sensor_mount_angle =
  14643. + s_ctrl->sensordata->sensor_info->sensor_mount_angle;
  14644. CDBG("%s:%d init params mode %d pos %d mount %d\n", __func__,
  14645. __LINE__,
  14646. cdata->cfg.sensor_init_params.modes_supported,
  14647. @@ -1204,8 +1258,8 @@ int32_t mt9m114_sensor_config(struct msm_sensor_ctrl_t *s_ctrl,
  14648. uint16_t size;
  14649. int slave_index = 0;
  14650. if (copy_from_user(&sensor_slave_info,
  14651. - (void *)cdata->cfg.setting,
  14652. - sizeof(struct msm_camera_sensor_slave_info))) {
  14653. + (void *)cdata->cfg.setting,
  14654. + sizeof(struct msm_camera_sensor_slave_info))) {
  14655. pr_err("%s:%d failed\n", __func__, __LINE__);
  14656. rc = -EFAULT;
  14657. break;
  14658. @@ -1229,13 +1283,14 @@ int32_t mt9m114_sensor_config(struct msm_sensor_ctrl_t *s_ctrl,
  14659. * size, GFP_KERNEL);
  14660. if (!tmp) {
  14661. pr_err("%s: failed to alloc mem\n", __func__);
  14662. - rc = -ENOMEM;
  14663. - break;
  14664. + rc = -ENOMEM;
  14665. + break;
  14666. }
  14667. kfree(p_ctrl->power_setting);
  14668. p_ctrl->power_setting = tmp;
  14669. }
  14670. p_ctrl->power_setting_size = size;
  14671. +
  14672. rc = copy_from_user(p_ctrl->power_setting, (void *)
  14673. sensor_slave_info.power_setting_array.power_setting,
  14674. size * sizeof(struct msm_sensor_power_setting));
  14675. @@ -1275,6 +1330,14 @@ int32_t mt9m114_sensor_config(struct msm_sensor_ctrl_t *s_ctrl,
  14676. break;
  14677. }
  14678.  
  14679. + if (!conf_array.size ||
  14680. + conf_array.size > I2C_SEQ_REG_DATA_MAX) {
  14681. +
  14682. + pr_err("%s:%d failed\n", __func__, __LINE__);
  14683. + rc = -EFAULT;
  14684. + break;
  14685. + }
  14686. +
  14687. reg_setting = kzalloc(conf_array.size *
  14688. (sizeof(struct msm_camera_i2c_reg_array)), GFP_KERNEL);
  14689. if (!reg_setting) {
  14690. @@ -1336,22 +1399,14 @@ int32_t mt9m114_sensor_config(struct msm_sensor_ctrl_t *s_ctrl,
  14691.  
  14692. case CFG_POWER_UP:
  14693. if (s_ctrl->func_tbl->sensor_power_up)
  14694. - rc = s_ctrl->func_tbl->sensor_power_up(s_ctrl,
  14695. - &s_ctrl->sensordata->power_info,
  14696. - s_ctrl->sensor_i2c_client,
  14697. - s_ctrl->sensordata->slave_info,
  14698. - s_ctrl->sensordata->sensor_name);
  14699. + rc = s_ctrl->func_tbl->sensor_power_up(s_ctrl);
  14700. else
  14701. rc = -EFAULT;
  14702. break;
  14703.  
  14704. case CFG_POWER_DOWN:
  14705. if (s_ctrl->func_tbl->sensor_power_down)
  14706. - rc = s_ctrl->func_tbl->sensor_power_down(
  14707. - s_ctrl,
  14708. - &s_ctrl->sensordata->power_info,
  14709. - s_ctrl->sensor_device_type,
  14710. - s_ctrl->sensor_i2c_client);
  14711. + rc = s_ctrl->func_tbl->sensor_power_down(s_ctrl);
  14712. else
  14713. rc = -EFAULT;
  14714. break;
  14715. @@ -1386,8 +1441,51 @@ int32_t mt9m114_sensor_config(struct msm_sensor_ctrl_t *s_ctrl,
  14716. break;
  14717. }
  14718. break;
  14719. - }
  14720. - default:
  14721. + }
  14722. + case CFG_SET_SATURATION: {
  14723. + int32_t sat_lev;
  14724. + if (copy_from_user(&sat_lev, (void *)cdata->cfg.setting,
  14725. + sizeof(int32_t))) {
  14726. + pr_err("%s:%d failed\n", __func__, __LINE__);
  14727. + rc = -EFAULT;
  14728. + break;
  14729. + }
  14730. + pr_debug("%s: Saturation Value is %d", __func__, sat_lev);
  14731. + break;
  14732. + }
  14733. + case CFG_SET_CONTRAST: {
  14734. + int32_t con_lev;
  14735. + if (copy_from_user(&con_lev, (void *)cdata->cfg.setting,
  14736. + sizeof(int32_t))) {
  14737. + pr_err("%s:%d failed\n", __func__, __LINE__);
  14738. + rc = -EFAULT;
  14739. + break;
  14740. + }
  14741. + pr_debug("%s: Contrast Value is %d", __func__, con_lev);
  14742. + break;
  14743. + }
  14744. + case CFG_SET_SHARPNESS: {
  14745. + int32_t shp_lev;
  14746. + if (copy_from_user(&shp_lev, (void *)cdata->cfg.setting,
  14747. + sizeof(int32_t))) {
  14748. + pr_err("%s:%d failed\n", __func__, __LINE__);
  14749. + rc = -EFAULT;
  14750. + break;
  14751. + }
  14752. + pr_debug("%s: Sharpness Value is %d", __func__, shp_lev);
  14753. + break;
  14754. + }
  14755. + case CFG_SET_AUTOFOCUS: {
  14756. + /* TO-DO: set the Auto Focus */
  14757. + pr_debug("%s: Setting Auto Focus", __func__);
  14758. + break;
  14759. + }
  14760. + case CFG_CANCEL_AUTOFOCUS: {
  14761. + /* TO-DO: Cancel the Auto Focus */
  14762. + pr_debug("%s: Cancelling Auto Focus", __func__);
  14763. + break;
  14764. + }
  14765. + default:
  14766. rc = -EFAULT;
  14767. break;
  14768. }
  14769. @@ -1406,6 +1504,8 @@ static struct msm_sensor_fn_t mt9m114_sensor_func_tbl = {
  14770.  
  14771. static struct msm_sensor_ctrl_t mt9m114_s_ctrl = {
  14772. .sensor_i2c_client = &mt9m114_sensor_i2c_client,
  14773. + .power_setting_array.power_setting = mt9m114_power_setting,
  14774. + .power_setting_array.size = ARRAY_SIZE(mt9m114_power_setting),
  14775. .msm_sensor_mutex = &mt9m114_mut,
  14776. .sensor_v4l2_subdev_info = mt9m114_subdev_info,
  14777. .sensor_v4l2_subdev_info_size = ARRAY_SIZE(mt9m114_subdev_info),
  14778. diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
  14779. index 258009c..c0b3531 100644
  14780. --- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
  14781. +++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
  14782. @@ -35,7 +35,7 @@
  14783. V4L2_EVENT_MSM_VIDC_RELEASE_BUFFER_REFERENCE
  14784.  
  14785. #define NUM_MBS_PER_SEC(__height, __width, __fps) ({\
  14786. - (__height / 16) * (__width / 16) * __fps; \
  14787. + (__height >> 4) * (__width >> 4) * __fps; \
  14788. })
  14789.  
  14790. #define VIDC_BUS_LOAD(__height, __width, __fps, __br) ({\
  14791. @@ -50,9 +50,30 @@ static void msm_comm_generate_session_error(struct msm_vidc_inst *inst);
  14792. static void msm_comm_generate_sys_error(struct msm_vidc_inst *inst);
  14793. static void handle_session_error(enum command_response cmd, void *data);
  14794.  
  14795. -static inline bool is_turbo_session(struct msm_vidc_inst *inst)
  14796. +static bool is_turbo_requested(struct msm_vidc_core *core,
  14797. + enum session_type type)
  14798. {
  14799. - return !!(inst->flags & VIDC_TURBO);
  14800. + struct msm_vidc_inst *inst = NULL;
  14801. + bool wants_turbo = false;
  14802. +
  14803. + mutex_lock(&core->lock);
  14804. + list_for_each_entry(inst, &core->instances, list) {
  14805. +
  14806. + mutex_lock(&inst->lock);
  14807. + if (inst->session_type == type &&
  14808. + inst->state >= MSM_VIDC_OPEN_DONE &&
  14809. + inst->state < MSM_VIDC_STOP_DONE) {
  14810. + wants_turbo = inst->flags & VIDC_TURBO;
  14811. + }
  14812. + mutex_unlock(&inst->lock);
  14813. +
  14814. + if (wants_turbo)
  14815. + break;
  14816. + }
  14817. +
  14818. + mutex_unlock(&core->lock);
  14819. +
  14820. + return wants_turbo;
  14821. }
  14822.  
  14823. static bool is_thumbnail_session(struct msm_vidc_inst *inst)
  14824. @@ -68,17 +89,6 @@ static bool is_thumbnail_session(struct msm_vidc_inst *inst)
  14825. }
  14826. return false;
  14827. }
  14828. -
  14829. -static inline bool is_non_realtime_session(struct msm_vidc_inst *inst)
  14830. -{
  14831. - int rc = 0;
  14832. - struct v4l2_control ctrl = {
  14833. - .id = V4L2_CID_MPEG_VIDC_VIDEO_PRIORITY
  14834. - };
  14835. - rc = v4l2_g_ctrl(&inst->ctrl_handler, &ctrl);
  14836. - return (!rc && ctrl.value);
  14837. -}
  14838. -
  14839. enum multi_stream msm_comm_get_stream_output_mode(struct msm_vidc_inst *inst)
  14840. {
  14841. if (inst->session_type == MSM_VIDC_DECODER) {
  14842. @@ -97,57 +107,15 @@ enum multi_stream msm_comm_get_stream_output_mode(struct msm_vidc_inst *inst)
  14843. static int msm_comm_get_mbs_per_sec(struct msm_vidc_inst *inst)
  14844. {
  14845. int height, width;
  14846. - int fps, rc;
  14847. - struct v4l2_control ctrl;
  14848. height = max(inst->prop.height[CAPTURE_PORT],
  14849. inst->prop.height[OUTPUT_PORT]);
  14850. width = max(inst->prop.width[CAPTURE_PORT],
  14851. inst->prop.width[OUTPUT_PORT]);
  14852. - ctrl.id = V4L2_CID_MPEG_VIDC_VIDEO_OPERATING_RATE;
  14853. - rc = v4l2_g_ctrl(&inst->ctrl_handler, &ctrl);
  14854. - if (!rc && ctrl.value) {
  14855. - fps = (ctrl.value >> 16)? ctrl.value >> 16: 1;
  14856. - return NUM_MBS_PER_SEC(height, width, fps);
  14857. - } else
  14858. - return NUM_MBS_PER_SEC(height, width, inst->prop.fps);
  14859. -}
  14860. -enum load_calc_quirks {
  14861. - LOAD_CALC_NO_QUIRKS = 0,
  14862. - LOAD_CALC_IGNORE_TURBO_LOAD = 1 << 0,
  14863. - LOAD_CALC_IGNORE_THUMBNAIL_LOAD = 1 << 1,
  14864. - LOAD_CALC_IGNORE_NON_REALTIME_LOAD = 1 << 2,
  14865. -};
  14866. -
  14867. -static int msm_comm_get_inst_load(struct msm_vidc_inst *inst,
  14868. - enum load_calc_quirks quirks)
  14869. -{
  14870. - int load = 0;
  14871. -
  14872. - if (!(inst->state >= MSM_VIDC_OPEN_DONE &&
  14873. - inst->state < MSM_VIDC_STOP_DONE))
  14874. - return 0;
  14875. -
  14876. - load = msm_comm_get_mbs_per_sec(inst);
  14877. -
  14878. - if (is_thumbnail_session(inst)) {
  14879. - if (quirks & LOAD_CALC_IGNORE_THUMBNAIL_LOAD)
  14880. - load = 0;
  14881. - }
  14882. -
  14883. - if (is_turbo_session(inst)) {
  14884. - if (!(quirks & LOAD_CALC_IGNORE_TURBO_LOAD))
  14885. - load = inst->core->resources.max_load;
  14886. - }
  14887. -
  14888. - if (is_non_realtime_session(inst) &&
  14889. - (quirks & LOAD_CALC_IGNORE_NON_REALTIME_LOAD))
  14890. - load = msm_comm_get_mbs_per_sec(inst) / inst->prop.fps;
  14891. -
  14892. - return load;
  14893. + return NUM_MBS_PER_SEC(height, width, inst->prop.fps);
  14894. }
  14895.  
  14896. static int msm_comm_get_load(struct msm_vidc_core *core,
  14897. - enum session_type type, enum load_calc_quirks quirks)
  14898. + enum session_type type)
  14899. {
  14900. struct msm_vidc_inst *inst = NULL;
  14901. int num_mbs_per_sec = 0;
  14902. @@ -157,11 +125,14 @@ static int msm_comm_get_load(struct msm_vidc_core *core,
  14903. }
  14904. mutex_lock(&core->lock);
  14905. list_for_each_entry(inst, &core->instances, list) {
  14906. - if (inst->session_type != type)
  14907. - continue;
  14908. -
  14909. mutex_lock(&inst->lock);
  14910. - num_mbs_per_sec += msm_comm_get_inst_load(inst, quirks);
  14911. + if (inst->session_type == type &&
  14912. + inst->state >= MSM_VIDC_OPEN_DONE &&
  14913. + inst->state < MSM_VIDC_STOP_DONE) {
  14914. + if (!is_thumbnail_session(inst))
  14915. + num_mbs_per_sec +=
  14916. + msm_comm_get_mbs_per_sec(inst);
  14917. + }
  14918. mutex_unlock(&inst->lock);
  14919. }
  14920. mutex_unlock(&core->lock);
  14921. @@ -186,7 +157,10 @@ static int msm_comm_scale_bus(struct msm_vidc_core *core,
  14922. return -EINVAL;
  14923. }
  14924.  
  14925. - load = msm_comm_get_load(core, type, LOAD_CALC_NO_QUIRKS);
  14926. + if (is_turbo_requested(core, type))
  14927. + load = core->resources.max_load;
  14928. + else
  14929. + load = msm_comm_get_load(core, type);
  14930.  
  14931. rc = call_hfi_op(hdev, scale_bus, hdev->hfi_device_data,
  14932. load, type, mtype);
  14933. @@ -649,11 +623,7 @@ static void handle_event_change(enum command_response cmd, void *data)
  14934. rc = msm_vidc_check_session_supported(inst);
  14935. if (!rc) {
  14936. msm_vidc_queue_v4l2_event(inst, event);
  14937. - } else if (rc) {
  14938. - msm_vidc_queue_v4l2_event(inst,
  14939. - V4L2_EVENT_MSM_VIDC_HW_OVERLOAD);
  14940. }
  14941. - wake_up(&inst->kernel_event_queue);
  14942.  
  14943. return;
  14944. } else {
  14945. @@ -1544,7 +1514,6 @@ static int msm_comm_scale_clocks(struct msm_vidc_core *core)
  14946. int num_mbs_per_sec;
  14947. int rc = 0;
  14948. struct hfi_device *hdev;
  14949. -
  14950. if (!core) {
  14951. dprintk(VIDC_ERR, "%s Invalid args: %p\n", __func__, core);
  14952. return -EINVAL;
  14953. @@ -1557,10 +1526,13 @@ static int msm_comm_scale_clocks(struct msm_vidc_core *core)
  14954. return -EINVAL;
  14955. }
  14956.  
  14957. - num_mbs_per_sec =
  14958. - msm_comm_get_load(core, MSM_VIDC_ENCODER, LOAD_CALC_NO_QUIRKS) +
  14959. - msm_comm_get_load(core, MSM_VIDC_DECODER, LOAD_CALC_NO_QUIRKS);
  14960. -
  14961. + if (is_turbo_requested(core, MSM_VIDC_ENCODER) ||
  14962. + is_turbo_requested(core, MSM_VIDC_DECODER)) {
  14963. + num_mbs_per_sec = core->resources.max_load;
  14964. + } else {
  14965. + num_mbs_per_sec = msm_comm_get_load(core, MSM_VIDC_ENCODER);
  14966. + num_mbs_per_sec += msm_comm_get_load(core, MSM_VIDC_DECODER);
  14967. + }
  14968.  
  14969. dprintk(VIDC_INFO, "num_mbs_per_sec = %d\n", num_mbs_per_sec);
  14970. rc = call_hfi_op(hdev, scale_clocks,
  14971. @@ -1894,9 +1866,6 @@ static int msm_vidc_load_resources(int flipped_state,
  14972. int rc = 0;
  14973. struct hfi_device *hdev;
  14974. int num_mbs_per_sec = 0;
  14975. - enum load_calc_quirks quirks = LOAD_CALC_IGNORE_TURBO_LOAD |
  14976. - LOAD_CALC_IGNORE_THUMBNAIL_LOAD |
  14977. - LOAD_CALC_IGNORE_NON_REALTIME_LOAD;
  14978.  
  14979. if (!inst || !inst->core || !inst->core->device) {
  14980. dprintk(VIDC_ERR, "%s invalid parameters", __func__);
  14981. @@ -1915,19 +1884,16 @@ static int msm_vidc_load_resources(int flipped_state,
  14982. return -EINVAL;
  14983. }
  14984.  
  14985. - num_mbs_per_sec =
  14986. - msm_comm_get_load(inst->core, MSM_VIDC_DECODER, quirks) +
  14987. - msm_comm_get_load(inst->core, MSM_VIDC_ENCODER, quirks);
  14988. + num_mbs_per_sec = msm_comm_get_load(inst->core, MSM_VIDC_DECODER);
  14989. + num_mbs_per_sec += msm_comm_get_load(inst->core, MSM_VIDC_ENCODER);
  14990.  
  14991. if (num_mbs_per_sec > inst->core->resources.max_load) {
  14992. dprintk(VIDC_ERR, "HW is overloaded, needed: %d max: %d\n",
  14993. num_mbs_per_sec, inst->core->resources.max_load);
  14994. msm_vidc_print_running_insts(inst->core);
  14995. -#if 0 /* Samsung skips the overloaded error return */
  14996. inst->state = MSM_VIDC_CORE_INVALID;
  14997. msm_comm_kill_session(inst);
  14998. return -EBUSY;
  14999. -#endif
  15000. }
  15001.  
  15002. hdev = inst->core->device;
  15003. @@ -2459,8 +2425,10 @@ int msm_comm_try_state(struct msm_vidc_inst *inst, int state)
  15004. if (rc || state <= get_flipped_state(inst->state, state))
  15005. break;
  15006. case MSM_VIDC_OPEN_DONE:
  15007. + mutex_unlock(&inst->sync_lock);
  15008. rc = wait_for_state(inst, flipped_state, MSM_VIDC_OPEN_DONE,
  15009. SESSION_INIT_DONE);
  15010. + mutex_lock(&inst->sync_lock);
  15011. if (rc || state <= get_flipped_state(inst->state, state))
  15012. break;
  15013. case MSM_VIDC_LOAD_RESOURCES:
  15014. @@ -2473,8 +2441,10 @@ int msm_comm_try_state(struct msm_vidc_inst *inst, int state)
  15015. if (rc || state <= get_flipped_state(inst->state, state))
  15016. break;
  15017. case MSM_VIDC_START_DONE:
  15018. + mutex_unlock(&inst->sync_lock);
  15019. rc = wait_for_state(inst, flipped_state, MSM_VIDC_START_DONE,
  15020. SESSION_START_DONE);
  15021. + mutex_lock(&inst->sync_lock);
  15022. if (rc || state <= get_flipped_state(inst->state, state))
  15023. break;
  15024. case MSM_VIDC_STOP:
  15025. @@ -2482,8 +2452,10 @@ int msm_comm_try_state(struct msm_vidc_inst *inst, int state)
  15026. if (rc || state <= get_flipped_state(inst->state, state))
  15027. break;
  15028. case MSM_VIDC_STOP_DONE:
  15029. + mutex_unlock(&inst->sync_lock);
  15030. rc = wait_for_state(inst, flipped_state, MSM_VIDC_STOP_DONE,
  15031. SESSION_STOP_DONE);
  15032. + mutex_lock(&inst->sync_lock);
  15033. if (rc || state <= get_flipped_state(inst->state, state))
  15034. break;
  15035. dprintk(VIDC_DBG, "Moving to Stop Done state\n");
  15036. @@ -2492,9 +2464,11 @@ int msm_comm_try_state(struct msm_vidc_inst *inst, int state)
  15037. if (rc || state <= get_flipped_state(inst->state, state))
  15038. break;
  15039. case MSM_VIDC_RELEASE_RESOURCES_DONE:
  15040. + mutex_unlock(&inst->sync_lock);
  15041. rc = wait_for_state(inst, flipped_state,
  15042. MSM_VIDC_RELEASE_RESOURCES_DONE,
  15043. SESSION_RELEASE_RESOURCE_DONE);
  15044. + mutex_lock(&inst->sync_lock);
  15045. if (rc || state <= get_flipped_state(inst->state, state))
  15046. break;
  15047. dprintk(VIDC_DBG,
  15048. @@ -2504,8 +2478,10 @@ int msm_comm_try_state(struct msm_vidc_inst *inst, int state)
  15049. if (rc || state <= get_flipped_state(inst->state, state))
  15050. break;
  15051. case MSM_VIDC_CLOSE_DONE:
  15052. + mutex_unlock(&inst->sync_lock);
  15053. rc = wait_for_state(inst, flipped_state, MSM_VIDC_CLOSE_DONE,
  15054. SESSION_END_DONE);
  15055. + mutex_lock(&inst->sync_lock);
  15056. if (rc || state <= get_flipped_state(inst->state, state))
  15057. break;
  15058. case MSM_VIDC_CORE_UNINIT:
  15059. @@ -2595,15 +2571,6 @@ int msm_comm_qbuf(struct vb2_buffer *vb)
  15060. dprintk(VIDC_DBG,
  15061. "Received EOS on output capability\n");
  15062. }
  15063. - /*Start : Qualcomm Local Patch - 20131226 */
  15064. - if (vb->v4l2_buf.flags &
  15065. - V4L2_MSM_BUF_FLAG_YUV_601_709_CLAMP) {
  15066. - frame_data.flags |=
  15067. - HAL_BUFFERFLAG_YUV_601_709_CSC_CLAMP;
  15068. - dprintk(VIDC_DBG,
  15069. - "Received buff with 601to709 clamp\n");
  15070. - }
  15071. - /*End : Qualcomm Local Patch - 20131226 */
  15072.  
  15073. if (vb->v4l2_buf.flags &
  15074. V4L2_QCOM_BUF_FLAG_CODECCONFIG) {
  15075. @@ -2704,7 +2671,6 @@ int msm_comm_try_get_bufreqs(struct msm_vidc_inst *inst)
  15076. return -EINVAL;
  15077. }
  15078. hdev = inst->core->device;
  15079. - mutex_lock(&inst->sync_lock);
  15080. if (inst->state < MSM_VIDC_OPEN_DONE || inst->state >= MSM_VIDC_CLOSE) {
  15081. dprintk(VIDC_ERR,
  15082. "Not in proper state to query buffer requirements\n");
  15083. @@ -2733,7 +2699,6 @@ int msm_comm_try_get_bufreqs(struct msm_vidc_inst *inst)
  15084. }
  15085. rc = 0;
  15086. exit:
  15087. - mutex_unlock(&inst->sync_lock);
  15088. return rc;
  15089. }
  15090. int msm_comm_release_output_buffers(struct msm_vidc_inst *inst)
  15091. @@ -2947,7 +2912,6 @@ int msm_comm_try_set_prop(struct msm_vidc_inst *inst,
  15092. }
  15093. hdev = inst->core->device;
  15094.  
  15095. - mutex_lock(&inst->sync_lock);
  15096. if (inst->state < MSM_VIDC_OPEN_DONE || inst->state >= MSM_VIDC_CLOSE) {
  15097. dprintk(VIDC_ERR, "Not in proper state to set property\n");
  15098. rc = -EAGAIN;
  15099. @@ -2958,7 +2922,6 @@ int msm_comm_try_set_prop(struct msm_vidc_inst *inst,
  15100. if (rc)
  15101. dprintk(VIDC_ERR, "Failed to set hal property for framesize\n");
  15102. exit:
  15103. - mutex_unlock(&inst->sync_lock);
  15104. return rc;
  15105. }
  15106.  
  15107. @@ -3394,26 +3357,19 @@ int msm_vidc_trigger_ssr(struct msm_vidc_core *core,
  15108. static int msm_vidc_load_supported(struct msm_vidc_inst *inst)
  15109. {
  15110. int num_mbs_per_sec = 0;
  15111. - enum load_calc_quirks quirks = LOAD_CALC_IGNORE_TURBO_LOAD |
  15112. - LOAD_CALC_IGNORE_THUMBNAIL_LOAD |
  15113. - LOAD_CALC_IGNORE_NON_REALTIME_LOAD;
  15114.  
  15115. if (inst->state == MSM_VIDC_OPEN_DONE) {
  15116. num_mbs_per_sec = msm_comm_get_load(inst->core,
  15117. - MSM_VIDC_DECODER, quirks);
  15118. + MSM_VIDC_DECODER);
  15119. num_mbs_per_sec += msm_comm_get_load(inst->core,
  15120. - MSM_VIDC_ENCODER, quirks);
  15121. + MSM_VIDC_ENCODER);
  15122. if (num_mbs_per_sec > inst->core->resources.max_load) {
  15123. dprintk(VIDC_ERR,
  15124. "H/w is overloaded. needed: %d max: %d\n",
  15125. num_mbs_per_sec,
  15126. inst->core->resources.max_load);
  15127. msm_vidc_print_running_insts(inst->core);
  15128. -/* MMRND_AVRC. Start */
  15129. -#if 0 // Samsung skips the overloaded error return
  15130. return -EINVAL;
  15131. -#endif
  15132. -/* MMRND_AVRC. End */
  15133. }
  15134. }
  15135. return 0;
  15136. @@ -3532,8 +3488,11 @@ int msm_vidc_check_session_supported(struct msm_vidc_inst *inst)
  15137. if (rc) {
  15138. change_inst_state(inst, MSM_VIDC_CORE_INVALID);
  15139. msm_comm_kill_session(inst);
  15140. + msm_vidc_queue_v4l2_event(inst,
  15141. + V4L2_EVENT_MSM_VIDC_HW_OVERLOAD);
  15142. dprintk(VIDC_WARN,
  15143. "%s: Hardware is overloaded\n", __func__);
  15144. + wake_up(&inst->kernel_event_queue);
  15145. }
  15146. return rc;
  15147. }
  15148. @@ -3729,6 +3688,7 @@ void msm_vidc_fw_unload_handler(struct work_struct *work)
  15149. dprintk(VIDC_ERR,
  15150. "Failed to release core, id = %d\n",
  15151. core->id);
  15152. + mutex_unlock(&core->lock);
  15153. return;
  15154. }
  15155. }
  15156. diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
  15157. index 59fb8f6..2371f3b 100644
  15158. --- a/drivers/media/platform/msm/vidc/venus_hfi.c
  15159. +++ b/drivers/media/platform/msm/vidc/venus_hfi.c
  15160. @@ -1159,6 +1159,8 @@ static inline int venus_hfi_clk_enable(struct venus_hfi_device *device)
  15161. }
  15162.  
  15163. for (i = VCODEC_CLK; i <= device->clk_gating_level; i++) {
  15164. + if (i == VCODEC_OCMEM_CLK && !device->res->ocmem_size)
  15165. + continue;
  15166. cl = &device->resources.clock[i];
  15167. rc = clk_enable(cl->clk);
  15168. if (rc) {
  15169. @@ -1175,6 +1177,8 @@ static inline int venus_hfi_clk_enable(struct venus_hfi_device *device)
  15170. return 0;
  15171. fail_clk_enable:
  15172. for (i--; i >= VCODEC_CLK; i--) {
  15173. + if (i == VCODEC_OCMEM_CLK && !device->res->ocmem_size)
  15174. + continue;
  15175. cl = &device->resources.clock[i];
  15176. usleep(100);
  15177. clk_disable(cl->clk);
  15178. @@ -1209,15 +1213,17 @@ static inline void venus_hfi_clk_disable(struct venus_hfi_device *device)
  15179. if (rc)
  15180. dprintk(VIDC_WARN, "Failed to set clock rate to min: %d\n", rc);
  15181.  
  15182. + device->clk_state = DISABLED_PREPARED;
  15183. + --device->clk_cnt;
  15184. for (i = VCODEC_CLK; i <= device->clk_gating_level; i++) {
  15185. + if (i == VCODEC_OCMEM_CLK && !device->res->ocmem_size)
  15186. + continue;
  15187. cl = &device->resources.clock[i];
  15188. usleep(100);
  15189. clk_disable(cl->clk);
  15190. dprintk(VIDC_DBG, "%s: Clock: %s disabled\n",
  15191. __func__, cl->name);
  15192. }
  15193. - device->clk_state = DISABLED_PREPARED;
  15194. - --device->clk_cnt;
  15195. }
  15196.  
  15197. static int venus_hfi_halt_axi(struct venus_hfi_device *device)
  15198. @@ -1334,13 +1340,6 @@ static inline int venus_hfi_power_on(struct venus_hfi_device *device)
  15199. goto err_iommu_attach;
  15200. }
  15201.  
  15202. - /* Reboot the firmware */
  15203. - rc = venus_hfi_tzbsp_set_video_state(TZBSP_VIDEO_STATE_RESUME);
  15204. - if (rc) {
  15205. - dprintk(VIDC_ERR, "Failed to resume video core %d\n", rc);
  15206. - goto err_set_video_state;
  15207. - }
  15208. -
  15209. /*
  15210. * Re-program all of the registers that get reset as a result of
  15211. * regulator_disable() and _enable()
  15212. @@ -1362,6 +1361,13 @@ static inline int venus_hfi_power_on(struct venus_hfi_device *device)
  15213. venus_hfi_write_register(device, VIDC_MMAP_ADDR,
  15214. (u32)device->qdss.align_device_addr, 0);
  15215.  
  15216. + /* Reboot the firmware */
  15217. + rc = venus_hfi_tzbsp_set_video_state(TZBSP_VIDEO_STATE_RESUME);
  15218. + if (rc) {
  15219. + dprintk(VIDC_ERR, "Failed to resume video core %d\n", rc);
  15220. + goto err_set_video_state;
  15221. + }
  15222. +
  15223. /* Wait for boot completion */
  15224. rc = venus_hfi_reset_core(device);
  15225. if (rc) {
  15226. @@ -2024,6 +2030,7 @@ static int venus_hfi_core_release(void *device)
  15227. return -ENODEV;
  15228. }
  15229. if (dev->hal_client) {
  15230. + cancel_delayed_work_sync(&venus_hfi_pm_work);
  15231. mutex_lock(&dev->clk_pwr_lock);
  15232. rc = venus_hfi_clk_gating_off(device);
  15233. if (rc) {
  15234. @@ -2872,7 +2879,6 @@ err_pc_prep:
  15235. static void venus_hfi_pm_hndlr(struct work_struct *work)
  15236. {
  15237. int rc = 0;
  15238. - u32 ctrl_status = 0;
  15239. struct venus_hfi_device *device = list_first_entry(
  15240. &hal_ctxt.dev_head, struct venus_hfi_device, list);
  15241.  
  15242. @@ -2934,13 +2940,12 @@ static void venus_hfi_pm_hndlr(struct work_struct *work)
  15243. err_power_off:
  15244. skip_power_off:
  15245.  
  15246. - /* Reset PC_READY bit as power_off is skipped, if set by Venus */
  15247. - ctrl_status = venus_hfi_read_register(device, VIDC_CPU_CS_SCIACMDARG0);
  15248. - if (ctrl_status & VIDC_CPU_CS_SCIACMDARG0_HFI_CTRL_PC_READY) {
  15249. - ctrl_status &= ~(VIDC_CPU_CS_SCIACMDARG0_HFI_CTRL_PC_READY);
  15250. - venus_hfi_write_register(device, VIDC_CPU_CS_SCIACMDARG0,
  15251. - ctrl_status, 0);
  15252. - }
  15253. + /*
  15254. + * When power collapse is escaped, driver no need to inform Venus.
  15255. + * Venus is self-sufficient to come out of the power collapse at
  15256. + * any stage. Driver can skip power collapse and continue with
  15257. + * normal execution.
  15258. + */
  15259.  
  15260. /* Cancel pending delayed works if any */
  15261. cancel_delayed_work(&venus_hfi_pm_work);
  15262. @@ -3250,7 +3255,18 @@ static inline void venus_hfi_disable_unprepare_clks(
  15263. }
  15264.  
  15265. WARN_ON(!mutex_is_locked(&device->clk_pwr_lock));
  15266. + /*
  15267. + * Make the clock state variable as unprepared before actually
  15268. + * unpreparing clocks. This will make sure that when we check
  15269. + * the state, we have the right clock state. We are not taking
  15270. + * any action based unprepare failures. So it is safe to do
  15271. + * before the call. This is also in sync with prepare_enable
  15272. + * state update.
  15273. + */
  15274. +
  15275. + --device->clk_cnt;
  15276. if (device->clk_state == ENABLED_PREPARED) {
  15277. + device->clk_state = DISABLED_PREPARED;
  15278. for (i = VCODEC_CLK; i < VCODEC_MAX_CLKS; i++) {
  15279. if (i == VCODEC_OCMEM_CLK && !device->res->ocmem_size)
  15280. continue;
  15281. @@ -3261,8 +3277,11 @@ static inline void venus_hfi_disable_unprepare_clks(
  15282. __func__, cl->name);
  15283. }
  15284. } else {
  15285. + device->clk_state = DISABLED_PREPARED;
  15286. for (i = device->clk_gating_level + 1;
  15287. i < VCODEC_MAX_CLKS; i++) {
  15288. + if (i == VCODEC_OCMEM_CLK && !device->res->ocmem_size)
  15289. + continue;
  15290. cl = &device->resources.clock[i];
  15291. usleep(100);
  15292. clk_disable(cl->clk);
  15293. @@ -3270,6 +3289,7 @@ static inline void venus_hfi_disable_unprepare_clks(
  15294. __func__, cl->name);
  15295. }
  15296. }
  15297. + device->clk_state = DISABLED_UNPREPARED;
  15298. for (i = VCODEC_CLK; i < VCODEC_MAX_CLKS; i++) {
  15299. if (i == VCODEC_OCMEM_CLK && !device->res->ocmem_size)
  15300. continue;
  15301. @@ -3278,8 +3298,6 @@ static inline void venus_hfi_disable_unprepare_clks(
  15302. dprintk(VIDC_DBG, "%s: Clock: %s unprepared\n",
  15303. __func__, cl->name);
  15304. }
  15305. - device->clk_state = DISABLED_UNPREPARED;
  15306. - --device->clk_cnt;
  15307. }
  15308.  
  15309. static inline int venus_hfi_prepare_enable_clks(struct venus_hfi_device *device)
  15310. @@ -3316,6 +3334,8 @@ static inline int venus_hfi_prepare_enable_clks(struct venus_hfi_device *device)
  15311. return rc;
  15312. fail_clk_enable:
  15313. for (; i >= VCODEC_CLK; i--) {
  15314. + if (i == VCODEC_OCMEM_CLK && !device->res->ocmem_size)
  15315. + continue;
  15316. cl = &device->resources.clock[i];
  15317. usleep(100);
  15318. clk_disable_unprepare(cl->clk);
  15319. diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
  15320. index 8f0557a..92bfadc 100644
  15321. --- a/drivers/misc/qseecom.c
  15322. +++ b/drivers/misc/qseecom.c
  15323. @@ -42,9 +42,6 @@
  15324. #include <mach/socinfo.h>
  15325. #include <mach/qseecomi.h>
  15326. #include <asm/cacheflush.h>
  15327. -#ifdef CONFIG_SEC_DEBUG
  15328. -#include <mach/sec_debug.h>
  15329. -#endif
  15330. #include "qseecom_legacy.h"
  15331. #include "qseecom_kernel.h"
  15332.  
  15333. @@ -76,7 +73,6 @@
  15334.  
  15335. #define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT 2000
  15336. #define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT 2000
  15337. -#define TWO 2
  15338.  
  15339. enum qseecom_clk_definitions {
  15340. CLK_DFAB = 0,
  15341. @@ -1043,9 +1039,8 @@ static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
  15342. }
  15343. entry->app_id = app_id;
  15344. entry->ref_cnt = 1;
  15345. - memset((void *)entry->app_name, 0, MAX_APP_NAME_SIZE);
  15346. - memcpy((void *)entry->app_name,
  15347. - (void *)load_img_req.img_name, MAX_APP_NAME_SIZE);
  15348. + memcpy(entry->app_name, load_img_req.img_name,
  15349. + MAX_APP_NAME_SIZE);
  15350. /* Deallocate the handle */
  15351. if (!IS_ERR_OR_NULL(ihandle))
  15352. ion_free(qseecom.ion_clnt, ihandle);
  15353. @@ -1059,9 +1054,8 @@ static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
  15354. (char *)(load_img_req.img_name));
  15355. }
  15356. data->client.app_id = app_id;
  15357. - memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
  15358. - memcpy((void *)data->client.app_name,
  15359. - (void *)load_img_req.img_name, MAX_APP_NAME_SIZE);
  15360. + memcpy(data->client.app_name, load_img_req.img_name,
  15361. + MAX_APP_NAME_SIZE);
  15362. load_img_req.app_id = app_id;
  15363. if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
  15364. pr_err("copy_to_user failed\n");
  15365. @@ -1119,8 +1113,8 @@ static int qseecom_unload_app(struct qseecom_dev_handle *data,
  15366. bool found_dead_app = false;
  15367.  
  15368. if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
  15369. - pr_warn("Do not unload keymaster app from tz\n");
  15370. - return 0;
  15371. + pr_debug("Do not unload keymaster app from tz\n");
  15372. + goto unload_exit;
  15373. }
  15374.  
  15375. if (data->client.app_id > 0) {
  15376. @@ -1215,6 +1209,7 @@ static int qseecom_unload_app(struct qseecom_dev_handle *data,
  15377. spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
  15378. flags1);
  15379. }
  15380. +unload_exit:
  15381. qseecom_unmap_ion_allocated_memory(data);
  15382. data->released = true;
  15383. return ret;
  15384. @@ -1403,9 +1398,8 @@ static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
  15385. pr_err("Client or client handle is not initialized\n");
  15386. return -EINVAL;
  15387. }
  15388. -
  15389. - if (((req->cmd_req_buf == NULL) && (req->resp_len != 0)) ||
  15390. - (req->resp_buf == NULL)) {
  15391. + if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
  15392. + (req->cmd_req_buf == NULL)) {
  15393. pr_err("cmd buffer or response buffer is null\n");
  15394. return -EINVAL;
  15395. }
  15396. @@ -1415,23 +1409,19 @@ static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
  15397. pr_err("cmd buffer address not within shared bufffer\n");
  15398. return -EINVAL;
  15399. }
  15400. -
  15401. -
  15402. - if (((uintptr_t)req->cmd_req_buf <
  15403. - data->client.user_virt_sb_base) ||
  15404. - ((uintptr_t)req->cmd_req_buf >=
  15405. - (data->client.user_virt_sb_base + data->client.sb_length))) {
  15406. + if (((uintptr_t)req->resp_buf <
  15407. + data->client.user_virt_sb_base) ||
  15408. + ((uintptr_t)req->resp_buf >=
  15409. + (data->client.user_virt_sb_base + data->client.sb_length))) {
  15410. pr_err("response buffer address not within shared bufffer\n");
  15411. return -EINVAL;
  15412. }
  15413. -
  15414. if ((req->cmd_req_len == 0) ||
  15415. (req->cmd_req_len > data->client.sb_length) ||
  15416. (req->resp_len > data->client.sb_length)) {
  15417. - pr_err("cmd buffer length or response buffer length not valid\n");
  15418. + pr_err("cmd buf length or response buf length not valid\n");
  15419. return -EINVAL;
  15420. }
  15421. -
  15422. if (req->cmd_req_len > UINT_MAX - req->resp_len) {
  15423. pr_err("Integer overflow detected in req_len & rsp_len\n");
  15424. return -EINVAL;
  15425. @@ -1441,7 +1431,7 @@ static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
  15426. pr_debug("Not enough memory to fit cmd_buf.\n");
  15427. pr_debug("resp_buf. Required: %u, Available: %zu\n",
  15428. (req->cmd_req_len + req->resp_len),
  15429. - data->client.sb_length);
  15430. + data->client.sb_length);
  15431. return -ENOMEM;
  15432. }
  15433. if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
  15434. @@ -1468,7 +1458,7 @@ static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
  15435. }
  15436. return 0;
  15437. }
  15438. -
  15439. +
  15440. static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
  15441. struct qseecom_send_cmd_req *req)
  15442. {
  15443. @@ -1489,8 +1479,8 @@ static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
  15444. name_len = min(strlen(data->client.app_name),
  15445. strlen(ptr_app->app_name));
  15446. if ((ptr_app->app_id == data->client.app_id) &&
  15447. - (!memcmp((void *)ptr_app->app_name,
  15448. - (void *)data->client.app_name, name_len))) {
  15449. + (!memcmp(ptr_app->app_name,
  15450. + data->client.app_name, name_len))) {
  15451. found_app = true;
  15452. break;
  15453. }
  15454. @@ -1567,55 +1557,32 @@ static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
  15455. return ret;
  15456. }
  15457.  
  15458. -int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
  15459. +int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *cmd_req,
  15460. struct qseecom_send_modfd_listener_resp *lstnr_resp,
  15461. - struct qseecom_dev_handle *data, bool qteec,
  15462. + struct qseecom_dev_handle *data, bool listener_svc,
  15463. int i) {
  15464.  
  15465. - if ((data->type != QSEECOM_LISTENER_SERVICE) &&
  15466. - (req->ifd_data[i].fd > 0)) {
  15467. - if (qteec) {
  15468. - if ((req->cmd_req_len < (TWO * sizeof(uint32_t))) ||
  15469. - (req->ifd_data[i].cmd_buf_offset >
  15470. - req->cmd_req_len - (TWO * sizeof(uint32_t)))) {
  15471. - pr_err("Invalid offset (QTEEC req len) 0x%x\n",
  15472. - req->ifd_data[i].cmd_buf_offset);
  15473. - return -EINVAL;
  15474. - }
  15475. - } else {
  15476. - if ((req->cmd_req_len < sizeof(uint32_t)) ||
  15477. - (req->ifd_data[i].cmd_buf_offset >
  15478. - req->cmd_req_len - sizeof(uint32_t))) {
  15479. - pr_err("Invalid offset (req len) 0x%x\n",
  15480. - req->ifd_data[i].cmd_buf_offset);
  15481. - return -EINVAL;
  15482. - }
  15483. + if ((!listener_svc) && (cmd_req->ifd_data[i].fd > 0)) {
  15484. + if ((cmd_req->cmd_req_len < sizeof(uint32_t)) ||
  15485. + (cmd_req->ifd_data[i].cmd_buf_offset >
  15486. + cmd_req->cmd_req_len - sizeof(uint32_t))) {
  15487. + pr_err("Invalid offset 0x%x\n",
  15488. + cmd_req->ifd_data[i].cmd_buf_offset);
  15489. + return -EINVAL;
  15490. }
  15491. - } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
  15492. - (lstnr_resp->ifd_data[i].fd > 0)) {
  15493. - if (qteec) {
  15494. - if ((lstnr_resp->resp_len < TWO * sizeof(uint32_t)) ||
  15495. - (lstnr_resp->ifd_data[i].cmd_buf_offset >
  15496. - lstnr_resp->resp_len - TWO*sizeof(uint32_t))) {
  15497. - pr_err("Invalid offset (QTEEC resp len) 0x%x\n",
  15498. - lstnr_resp->ifd_data[i].cmd_buf_offset);
  15499. - return -EINVAL;
  15500. - }
  15501. - } else {
  15502. - if ((lstnr_resp->resp_len < sizeof(uint32_t)) ||
  15503. + } else if ((listener_svc) && (lstnr_resp->ifd_data[i].fd > 0)) {
  15504. + if ((lstnr_resp->resp_len < sizeof(uint32_t)) ||
  15505. (lstnr_resp->ifd_data[i].cmd_buf_offset >
  15506. lstnr_resp->resp_len - sizeof(uint32_t))) {
  15507. - pr_err("Invalid offset (lstnr resp len) 0x%x\n",
  15508. + pr_err("Invalid offset 0x%x\n",
  15509. lstnr_resp->ifd_data[i].cmd_buf_offset);
  15510. - return -EINVAL;
  15511. - }
  15512. + return -EINVAL;
  15513. }
  15514. }
  15515. return 0;
  15516. }
  15517.  
  15518. #define SG_ENTRY_SZ sizeof(struct qseecom_sg_entry)
  15519. -
  15520. static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
  15521. struct qseecom_dev_handle *data,
  15522. bool listener_svc)
  15523. @@ -1691,7 +1658,7 @@ static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
  15524. update = (uint32_t *) field;
  15525.  
  15526. if (__boundary_checks_offset(cmd_req, lstnr_resp, data,
  15527. - false, i))
  15528. + listener_svc, i))
  15529. goto err;
  15530. if (cleanup)
  15531. *update = 0;
  15532. @@ -1703,21 +1670,24 @@ static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
  15533. struct qseecom_sg_entry *update;
  15534. int j = 0;
  15535.  
  15536. - if ((data->type != QSEECOM_LISTENER_SERVICE) &&
  15537. - (cmd_req->ifd_data[i].fd > 0)) {
  15538. - if (cmd_req->ifd_data[i].cmd_buf_offset >
  15539. - cmd_req->cmd_req_len -
  15540. - sizeof(struct qseecom_sg_entry)) {
  15541. + if ((!listener_svc) && (cmd_req->ifd_data[i].fd > 0)) {
  15542. + if ((cmd_req->cmd_req_len <
  15543. + SG_ENTRY_SZ * sg_ptr->nents) ||
  15544. + (cmd_req->ifd_data[i].cmd_buf_offset >
  15545. + (cmd_req->cmd_req_len -
  15546. + SG_ENTRY_SZ * sg_ptr->nents))) {
  15547. pr_err("Invalid offset = 0x%x\n",
  15548. cmd_req->ifd_data[i].
  15549. cmd_buf_offset);
  15550. goto err;
  15551. }
  15552. - } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
  15553. + } else if ((listener_svc) &&
  15554. (lstnr_resp->ifd_data[i].fd > 0)) {
  15555. - if (lstnr_resp->ifd_data[i].cmd_buf_offset >
  15556. - lstnr_resp->resp_len -
  15557. - sizeof(struct qseecom_sg_entry)) {
  15558. + if ((lstnr_resp->resp_len <
  15559. + SG_ENTRY_SZ * sg_ptr->nents) ||
  15560. + (lstnr_resp->ifd_data[i].cmd_buf_offset >
  15561. + (lstnr_resp->resp_len -
  15562. + SG_ENTRY_SZ * sg_ptr->nents))) {
  15563. pr_err("Invalid offset = 0x%x\n",
  15564. lstnr_resp->ifd_data[i].
  15565. cmd_buf_offset);
  15566. @@ -2214,6 +2184,11 @@ int qseecom_start_app(struct qseecom_handle **handle,
  15567. uint32_t len;
  15568. ion_phys_addr_t pa;
  15569.  
  15570. + if (!app_name || strlen(app_name) >= MAX_APP_NAME_SIZE) {
  15571. + pr_err("The app_name (%s) is not valid\n", app_name);
  15572. + return -EINVAL;
  15573. + }
  15574. +
  15575. *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
  15576. if (!(*handle)) {
  15577. pr_err("failed to allocate memory for kernel client handle\n");
  15578. @@ -2294,6 +2269,7 @@ int qseecom_start_app(struct qseecom_handle **handle,
  15579. if (ret < 0)
  15580. goto err;
  15581. data->client.app_id = ret;
  15582. + memcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
  15583. }
  15584. if (!found_app) {
  15585. entry = kmalloc(sizeof(*entry), GFP_KERNEL);
  15586. @@ -2304,6 +2280,7 @@ int qseecom_start_app(struct qseecom_handle **handle,
  15587. }
  15588. entry->app_id = ret;
  15589. entry->ref_cnt = 1;
  15590. + memcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
  15591.  
  15592. spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
  15593. list_add_tail(&entry->list, &qseecom.registered_app_list_head);
  15594. @@ -2367,6 +2344,9 @@ int qseecom_shutdown_app(struct qseecom_handle **handle)
  15595. return -EINVAL;
  15596. }
  15597. data = (struct qseecom_dev_handle *) ((*handle)->dev);
  15598. + mutex_lock(&app_access_lock);
  15599. + atomic_inc(&data->ioctl_count);
  15600. +
  15601. spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
  15602. list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
  15603. list) {
  15604. @@ -2399,12 +2379,16 @@ int qseecom_shutdown_app(struct qseecom_handle **handle)
  15605. if (data->perf_enabled == true)
  15606. qsee_disable_clock_vote(data, CLK_DFAB);
  15607. }
  15608. +
  15609. + atomic_dec(&data->ioctl_count);
  15610. + mutex_unlock(&app_access_lock);
  15611. if (ret == 0) {
  15612. kzfree(data);
  15613. kzfree(*handle);
  15614. kzfree(kclient);
  15615. *handle = NULL;
  15616. }
  15617. +
  15618. return ret;
  15619. }
  15620. EXPORT_SYMBOL(qseecom_shutdown_app);
  15621. @@ -3056,9 +3040,8 @@ static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
  15622. &qseecom.registered_app_list_lock, flags);
  15623. data->client.app_id = ret;
  15624. query_req.app_id = ret;
  15625. - memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
  15626. - memcpy((void *)data->client.app_name,
  15627. - (void *)query_req.app_name, MAX_APP_NAME_SIZE);
  15628. + memcpy(data->client.app_name, query_req.app_name,
  15629. + MAX_APP_NAME_SIZE);
  15630. if (copy_to_user(argp, &query_req, sizeof(query_req))) {
  15631. pr_err("copy_to_user failed\n");
  15632. return -EFAULT;
  15633. @@ -3111,7 +3094,6 @@ static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
  15634. enum qseecom_key_management_usage_type usage,
  15635. struct qseecom_key_generate_ireq *ireq)
  15636. {
  15637. -
  15638. struct qseecom_command_scm_resp resp;
  15639. int ret;
  15640.  
  15641. @@ -3120,7 +3102,6 @@ static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
  15642. pr_err("Error:: unsupported usage %d\n", usage);
  15643. return -EFAULT;
  15644. }
  15645. -
  15646. __qseecom_enable_clk(CLK_QSEE);
  15647.  
  15648. ret = scm_call(SCM_SVC_TZSCHEDULER, 1,
  15649. @@ -3173,7 +3154,6 @@ static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
  15650. return -EFAULT;
  15651. }
  15652.  
  15653. -
  15654. __qseecom_enable_clk(CLK_QSEE);
  15655.  
  15656. ret = scm_call(SCM_SVC_TZSCHEDULER, 1,
  15657. @@ -3856,8 +3836,6 @@ static long qseecom_ioctl(struct file *file, unsigned cmd,
  15658. ret = -EINVAL;
  15659. break;
  15660. }
  15661. - pr_debug("%s : Perf Enable ioctl (Process:%s PID:%d)\n", __func__, \
  15662. - current->comm, current->pid);
  15663. atomic_inc(&data->ioctl_count);
  15664. if (qseecom.support_bus_scaling) {
  15665. mutex_lock(&qsee_bw_mutex);
  15666. @@ -3886,8 +3864,6 @@ static long qseecom_ioctl(struct file *file, unsigned cmd,
  15667. ret = -EINVAL;
  15668. break;
  15669. }
  15670. - pr_debug("%s : Perf Disable ioctl (Process:%s PID:%d)\n", __func__, \
  15671. - current->comm, current->pid);
  15672. atomic_inc(&data->ioctl_count);
  15673. if (!qseecom.support_bus_scaling) {
  15674. qsee_disable_clock_vote(data, CLK_DFAB);
  15675. @@ -4505,9 +4481,6 @@ static int __devinit qseecom_probe(struct platform_device *pdev)
  15676. req.size = resource_size(resource);
  15677. pr_warn("secure app region addr=0x%x size=0x%x",
  15678. req.addr, req.size);
  15679. -#ifdef CONFIG_SEC_DEBUG
  15680. - sec_debug_secure_app_addr_size(req.addr, req.size);
  15681. -#endif
  15682. } else {
  15683. pr_err("Fail to get secure app region info\n");
  15684. rc = -EINVAL;
  15685. @@ -4634,8 +4607,7 @@ static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
  15686. mutex_lock(&qsee_bw_mutex);
  15687. mutex_lock(&clk_access_lock);
  15688.  
  15689. - if (qseecom.cumulative_mode != INACTIVE &&
  15690. - qseecom.current_mode != INACTIVE) {
  15691. + if (qseecom.current_mode != INACTIVE) {
  15692. ret = msm_bus_scale_client_update_request(
  15693. qseecom.qsee_perf_client, INACTIVE);
  15694. if (ret)
  15695. diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
  15696. index 8fab5aa..1cbc66f 100644
  15697. --- a/drivers/mmc/card/block.c
  15698. +++ b/drivers/mmc/card/block.c
  15699. @@ -3501,7 +3501,7 @@ static void mmc_blk_shutdown(struct mmc_card *card)
  15700. mmc_claim_host(card->host);
  15701. mmc_stop_bkops(card);
  15702. mmc_release_host(card->host);
  15703. - mmc_send_long_pon(card);
  15704. + mmc_send_pon(card);
  15705. mmc_rpm_release(card->host, &card->dev);
  15706. }
  15707. return;
  15708. diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
  15709. index da6f043..76c4fbe 100644
  15710. --- a/drivers/mmc/core/mmc.c
  15711. +++ b/drivers/mmc/core/mmc.c
  15712. @@ -1378,10 +1378,7 @@ static int mmc_reboot_notify(struct notifier_block *notify_block,
  15713. struct mmc_card *card = container_of(
  15714. notify_block, struct mmc_card, reboot_notify);
  15715.  
  15716. - if (event != SYS_RESTART)
  15717. - card->issue_long_pon = true;
  15718. - else
  15719. - card->issue_long_pon = false;
  15720. + card->pon_type = (event != SYS_RESTART) ? MMC_LONG_PON : MMC_SHRT_PON;
  15721.  
  15722. return NOTIFY_OK;
  15723. }
  15724. @@ -1788,19 +1785,24 @@ static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type)
  15725. return err;
  15726. }
  15727.  
  15728. -int mmc_send_long_pon(struct mmc_card *card)
  15729. +int mmc_send_pon(struct mmc_card *card)
  15730. {
  15731. int err = 0;
  15732. struct mmc_host *host = card->host;
  15733.  
  15734. + if (!mmc_can_poweroff_notify(card))
  15735. + goto out;
  15736. +
  15737. mmc_claim_host(host);
  15738. - if (card->issue_long_pon && mmc_can_poweroff_notify(card)) {
  15739. + if (card->pon_type & MMC_LONG_PON)
  15740. err = mmc_poweroff_notify(host->card, EXT_CSD_POWER_OFF_LONG);
  15741. - if (err)
  15742. - pr_warning("%s: error %d sending Long PON",
  15743. - mmc_hostname(host), err);
  15744. - }
  15745. + else if (card->pon_type & MMC_SHRT_PON)
  15746. + err = mmc_poweroff_notify(host->card, EXT_CSD_POWER_OFF_SHORT);
  15747. + if (err)
  15748. + pr_warn("%s: error %d sending PON type %u",
  15749. + mmc_hostname(host), err, card->pon_type);
  15750. mmc_release_host(host);
  15751. +out:
  15752. return err;
  15753. }
  15754.  
  15755. diff --git a/drivers/net/wireless/wcnss/wcnss_vreg.c b/drivers/net/wireless/wcnss/wcnss_vreg.c
  15756. index 9c65a63..6cfd8cb 100644
  15757. --- a/drivers/net/wireless/wcnss/wcnss_vreg.c
  15758. +++ b/drivers/net/wireless/wcnss/wcnss_vreg.c
  15759. @@ -1,4 +1,4 @@
  15760. -/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  15761. +/* Copyright (c) 2011-2013,2015 The Linux Foundation. All rights reserved.
  15762. *
  15763. * This program is free software; you can redistribute it and/or modify
  15764. * it under the terms of the GNU General Public License version 2 and
  15765. @@ -37,7 +37,6 @@ static int auto_detect;
  15766. #define MSM_PRONTO_PHYS 0xfb21b000
  15767.  
  15768. #define RIVA_PMU_OFFSET 0x28
  15769. -#define PRONTO_PMU_OFFSET 0x1004
  15770.  
  15771. #define RIVA_SPARE_OFFSET 0x0b4
  15772. #define PRONTO_SPARE_OFFSET 0x1088
  15773. @@ -48,7 +47,6 @@ static int auto_detect;
  15774.  
  15775. #define WCNSS_PMU_CFG_IRIS_XO_CFG BIT(3)
  15776. #define WCNSS_PMU_CFG_IRIS_XO_EN BIT(4)
  15777. -#define WCNSS_PMU_CFG_GC_BUS_MUX_SEL_TOP BIT(5)
  15778. #define WCNSS_PMU_CFG_IRIS_XO_CFG_STS BIT(6) /* 1: in progress, 0: done */
  15779.  
  15780. #define WCNSS_PMU_CFG_IRIS_RESET BIT(7)
  15781. @@ -100,7 +98,7 @@ static struct vregs_info iris_vregs_pronto[] = {
  15782. {"qcom,iris-vddrfa", VREG_NULL_CONFIG, 1300000, 0,
  15783. 1300000, 100000, NULL},
  15784. {"qcom,iris-vddpa", VREG_NULL_CONFIG, 2900000, 0,
  15785. - 3000000, 515000, NULL},
  15786. + 3350000, 515000, NULL},
  15787. {"qcom,iris-vdddig", VREG_NULL_CONFIG, 1225000, 0,
  15788. 1800000, 10000, NULL},
  15789. };
  15790. diff --git a/drivers/net/wireless/wcnss/wcnss_wlan.c b/drivers/net/wireless/wcnss/wcnss_wlan.c
  15791. index 985fb1f..07f4d13 100644
  15792. --- a/drivers/net/wireless/wcnss/wcnss_wlan.c
  15793. +++ b/drivers/net/wireless/wcnss/wcnss_wlan.c
  15794. @@ -1,4 +1,4 @@
  15795. -/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  15796. +/* Copyright (c) 2011-2013,2015 The Linux Foundation. All rights reserved.
  15797. *
  15798. * This program is free software; you can redistribute it and/or modify
  15799. * it under the terms of the GNU General Public License version 2 and
  15800. @@ -898,6 +898,22 @@ static void wcnss_log_iris_regs(void)
  15801. }
  15802. }
  15803.  
  15804. +int wcnss_get_mux_control(void)
  15805. +{
  15806. + void __iomem *pmu_conf_reg;
  15807. + u32 reg = 0;
  15808. +
  15809. + if (NULL == penv)
  15810. + return 0;
  15811. +
  15812. + pmu_conf_reg = penv->msm_wcnss_base + PRONTO_PMU_OFFSET;
  15813. + writel_relaxed(0, pmu_conf_reg);
  15814. + reg = readl_relaxed(pmu_conf_reg);
  15815. + reg |= WCNSS_PMU_CFG_GC_BUS_MUX_SEL_TOP;
  15816. + writel_relaxed(reg, pmu_conf_reg);
  15817. + return 1;
  15818. +}
  15819. +
  15820. void wcnss_log_debug_regs_on_bite(void)
  15821. {
  15822. struct platform_device *pdev = wcnss_get_platform_device();
  15823. @@ -920,6 +936,8 @@ void wcnss_log_debug_regs_on_bite(void)
  15824.  
  15825. if (clk_rate) {
  15826. wcnss_pronto_log_debug_regs();
  15827. + if (wcnss_get_mux_control())
  15828. + wcnss_log_iris_regs();
  15829. } else {
  15830. pr_err("clock frequency is zero, cannot access PMU or other registers\n");
  15831. wcnss_log_iris_regs();
  15832. @@ -933,6 +951,8 @@ void wcnss_reset_intr(void)
  15833. {
  15834. if (wcnss_hardware_type() == WCNSS_PRONTO_HW) {
  15835. wcnss_pronto_log_debug_regs();
  15836. + if (wcnss_get_mux_control())
  15837. + wcnss_log_iris_regs();
  15838. wmb();
  15839. __raw_writel(1 << 16, penv->fiq_reg);
  15840. } else {
  15841. @@ -990,20 +1010,20 @@ static void wcnss_remove_sysfs(struct device *dev)
  15842.  
  15843. static void wcnss_pm_qos_add_request(void)
  15844. {
  15845. - pr_info("%s: add request", __func__);
  15846. + pr_info("%s: add request\n", __func__);
  15847. pm_qos_add_request(&penv->wcnss_pm_qos_request, PM_QOS_CPU_DMA_LATENCY,
  15848. PM_QOS_DEFAULT_VALUE);
  15849. }
  15850.  
  15851. static void wcnss_pm_qos_remove_request(void)
  15852. {
  15853. - pr_info("%s: remove request", __func__);
  15854. + pr_info("%s: remove request\n", __func__);
  15855. pm_qos_remove_request(&penv->wcnss_pm_qos_request);
  15856. }
  15857.  
  15858. void wcnss_pm_qos_update_request(int val)
  15859. {
  15860. - pr_info("%s: update request %d", __func__, val);
  15861. + pr_info("%s: update request %d\n", __func__, val);
  15862. pm_qos_update_request(&penv->wcnss_pm_qos_request, val);
  15863. }
  15864.  
  15865. @@ -1810,8 +1830,10 @@ static void wcnssctrl_rx_handler(struct work_struct *worker)
  15866. smd_read(penv->smd_ch, NULL, len);
  15867. return;
  15868. }
  15869. - if (len <= 0)
  15870. + if (len < sizeof(struct smd_msg_hdr)) {
  15871. + pr_err("wcnss: incomplete header available len = %d\n", len);
  15872. return;
  15873. + }
  15874.  
  15875. rc = smd_read(penv->smd_ch, buf, sizeof(struct smd_msg_hdr));
  15876. if (rc < sizeof(struct smd_msg_hdr)) {
  15877. @@ -1961,13 +1983,13 @@ static void wcnss_send_pm_config(struct work_struct *worker)
  15878. return;
  15879. }
  15880.  
  15881. - pr_debug("%s:size=%d: <%d, %d, %d, %d, %d>\n", __func__,
  15882. + pr_debug("%s:size=%d: <%d, %d, %d, %d, %d %d>\n", __func__,
  15883. prop_len, *payload, *(payload+1), *(payload+2),
  15884. - *(payload+3), *(payload+4));
  15885. + *(payload+3), *(payload+4), *(payload+5));
  15886.  
  15887. hdr = (struct smd_msg_hdr *)msg;
  15888. hdr->msg_type = WCNSS_PM_CONFIG_REQ;
  15889. - hdr->msg_len = sizeof(struct smd_msg_hdr) + prop_len;
  15890. + hdr->msg_len = sizeof(struct smd_msg_hdr) + (prop_len * sizeof(int));
  15891.  
  15892. rc = wcnss_smd_tx(msg, hdr->msg_len);
  15893. if (rc < 0)
  15894. diff --git a/drivers/nfc/nfc-nci.c b/drivers/nfc/nfc-nci.c
  15895. index 776673f..b1681f1 100644
  15896. --- a/drivers/nfc/nfc-nci.c
  15897. +++ b/drivers/nfc/nfc-nci.c
  15898. @@ -28,6 +28,7 @@
  15899. #include <linux/regulator/consumer.h>
  15900. #include "nfc-nci.h"
  15901. #include <mach/gpiomux.h>
  15902. +#include <linux/pm_runtime.h>
  15903.  
  15904. struct qca199x_platform_data {
  15905. unsigned int irq_gpio;
  15906. @@ -60,8 +61,10 @@ MODULE_DEVICE_TABLE(of, msm_match_table);
  15907. #define CORE_RESET_RSP_GID (0x60)
  15908. #define CORE_RESET_OID (0x00)
  15909. #define CORE_RST_NTF_LENGTH (0x02)
  15910. -#define WAKE_TIMEOUT (10)
  15911. +#define WAKE_TIMEOUT (1000)
  15912. #define WAKE_REG (0x10)
  15913. +#define EFUSE_REG (0xA0)
  15914. +#define WAKEUP_SRC_TIMEOUT (2000)
  15915.  
  15916. static void clk_req_update(struct work_struct *work);
  15917.  
  15918. @@ -98,6 +101,8 @@ struct qca199x_dev {
  15919. struct workqueue_struct *my_wq;
  15920. };
  15921.  
  15922. +static int nfcc_reboot(struct notifier_block *notifier, unsigned long val,
  15923. + void *v);
  15924. static int nfc_i2c_write(struct i2c_client *client, u8 *buf, int len);
  15925. static int nfcc_hw_check(struct i2c_client *client, unsigned short curr_addr);
  15926. static int nfcc_initialise(struct i2c_client *client, unsigned short curr_addr,
  15927. @@ -121,7 +126,7 @@ static int ftm_werr_code;
  15928.  
  15929.  
  15930. unsigned int disable_ctrl;
  15931. -bool region2_sent;
  15932. +bool region2_sent;
  15933.  
  15934. static void qca199x_init_stat(struct qca199x_dev *qca199x_dev)
  15935. {
  15936. @@ -157,6 +162,21 @@ static irqreturn_t qca199x_dev_irq_handler(int irq, void *dev_id)
  15937. struct qca199x_dev *qca199x_dev = dev_id;
  15938. unsigned long flags;
  15939.  
  15940. + if (device_may_wakeup(&qca199x_dev->client->dev) &&
  15941. + (qca199x_dev->client->dev.power.is_suspended == true)) {
  15942. + dev_dbg(&qca199x_dev->client->dev,
  15943. + "%s: NFC:Processor in suspend state device_may_wakeup\n",
  15944. + __func__);
  15945. + /*
  15946. + * Keep system awake long enough to allow userspace
  15947. + * to process the packet.
  15948. + */
  15949. + pm_wakeup_event(&qca199x_dev->client->dev, WAKEUP_SRC_TIMEOUT);
  15950. + } else {
  15951. + dev_dbg(&qca199x_dev->client->dev,
  15952. + "%s: NFC:Processor not in suspend state\n", __func__);
  15953. + }
  15954. +
  15955. spin_lock_irqsave(&qca199x_dev->irq_enabled_lock, flags);
  15956. qca199x_dev->count_irq++;
  15957. spin_unlock_irqrestore(&qca199x_dev->irq_enabled_lock, flags);
  15958. @@ -312,7 +332,12 @@ static ssize_t nfc_read(struct file *filp, char __user *buf,
  15959. /* READ */
  15960. if ((ftm_raw_write_mode == 0) && (ftm_werr_code == 0)) {
  15961. ftm_rerr_code = i2c_master_recv(qca199x_dev->client,
  15962. - &rd_byte, 1);
  15963. + &rd_byte, sizeof(rd_byte));
  15964. +
  15965. + if (ftm_rerr_code != sizeof(rd_byte)) {
  15966. + total = -EMSGSIZE;
  15967. + goto err;
  15968. + }
  15969. if (ftm_rerr_code == 0x1)
  15970. ftm_rerr_code = 0;
  15971. tmp[0] = (unsigned char)ftm_rerr_code;
  15972. @@ -371,8 +396,8 @@ static ssize_t nfc_read(struct file *filp, char __user *buf,
  15973. if (total > 0) {
  15974. if ((total > count) || copy_to_user(buf, tmp, total)) {
  15975. dev_err(&qca199x_dev->client->dev,
  15976. - "failed to copy to user space, total = %d\n",
  15977. - total);
  15978. + "%s: failed to copy to user space, total = %d\n",
  15979. + __func__, total);
  15980. total = -EFAULT;
  15981. }
  15982. }
  15983. @@ -421,6 +446,9 @@ int nfcc_read_buff_svc(struct qca199x_dev *qca199x_dev)
  15984. ret = i2c_master_recv(qca199x_dev->client, tmp, (length +
  15985. PAYLOAD_HEADER_LENGTH));
  15986. total = ret;
  15987. + if (ret != (length + PAYLOAD_HEADER_LENGTH))
  15988. + goto leave;
  15989. +
  15990. }
  15991. dev_dbg(&qca199x_dev->client->dev, "%s : NfcNciRx %x %x %x\n",
  15992. __func__, tmp[0], tmp[1], tmp[2]);
  15993. @@ -439,12 +467,13 @@ static ssize_t nfc_write(struct file *filp, const char __user *buf,
  15994. int nfcc_buffer = 0;
  15995.  
  15996. if (count > MAX_BUFFER_SIZE) {
  15997. - dev_err(&qca199x_dev->client->dev, "out of memory\n");
  15998. + dev_err(&qca199x_dev->client->dev, "%s: out of memory\n",
  15999. + __func__);
  16000. return -ENOMEM;
  16001. }
  16002. if (copy_from_user(tmp, buf, count)) {
  16003. dev_err(&qca199x_dev->client->dev,
  16004. - "nfc-nci write: failed to copy from user space\n");
  16005. + "%s: failed to copy from user space\n", __func__);
  16006. return -EFAULT;
  16007. }
  16008. /*
  16009. @@ -460,7 +489,8 @@ static ssize_t nfc_write(struct file *filp, const char __user *buf,
  16010. /* There has been an error while reading from nfcc */
  16011. if (nfcc_buffer < 0) {
  16012. dev_err(&qca199x_dev->client->dev,
  16013. - "nfc-nci write: error while servicing nfcc read buffer\n");
  16014. + "%s: error while servicing nfcc read buffer\n"
  16015. + , __func__);
  16016. }
  16017. qca199x_dev->sent_first_nci_write = true;
  16018. qca199x_enable_irq(qca199x_dev);
  16019. @@ -475,7 +505,7 @@ static ssize_t nfc_write(struct file *filp, const char __user *buf,
  16020. if (count == 1) {
  16021. ftm_raw_write_mode = 0;
  16022. ret = i2c_master_send(qca199x_dev->client, tmp, count);
  16023. - if (ret == 1)
  16024. + if (ret == count)
  16025. ftm_werr_code = 0;
  16026. else
  16027. ftm_werr_code = ret;
  16028. @@ -485,7 +515,7 @@ static ssize_t nfc_write(struct file *filp, const char __user *buf,
  16029. if (count == 2) {
  16030. ftm_raw_write_mode = 1;
  16031. ret = i2c_master_send(qca199x_dev->client, tmp, count);
  16032. - if (ret == 2)
  16033. + if (ret == count)
  16034. ftm_werr_code = 0;
  16035. else
  16036. ftm_werr_code = ret;
  16037. @@ -498,7 +528,7 @@ static ssize_t nfc_write(struct file *filp, const char __user *buf,
  16038. }
  16039. if (ret != count) {
  16040. dev_err(&qca199x_dev->client->dev,
  16041. - "NFC: failed to write %d\n", ret);
  16042. + "%s: failed to write %d\n", __func__, ret);
  16043. ret = -EIO;
  16044. }
  16045. mutex_unlock(&qca199x_dev->read_mutex);
  16046. @@ -532,7 +562,7 @@ static int nfc_open(struct inode *inode, struct file *filp)
  16047. qca199x_enable_irq_clk_req(qca199x_dev);
  16048. }
  16049. dev_dbg(&qca199x_dev->client->dev,
  16050. - "%d,%d\n", imajor(inode), iminor(inode));
  16051. + "%s: %d,%d\n", __func__, imajor(inode), iminor(inode));
  16052. return ret;
  16053. }
  16054.  
  16055. @@ -544,17 +574,21 @@ int nfcc_wake(int level, struct file *filp)
  16056. int r = 0;
  16057. int time_taken = 0;
  16058. unsigned char raw_nci_sleep[] = {0x2F, 0x03, 0x00};
  16059. - /* Change slave address to 0xE */
  16060. unsigned char raw_nci_wake[] = {0x10, 0x0F};
  16061. - unsigned short slave_addr = 0xE;
  16062. + /* Change slave address to 0xE */
  16063. + unsigned short slave_addr = 0xE;
  16064. unsigned short curr_addr;
  16065. - unsigned char wake_status = WAKE_REG;
  16066. + unsigned char wake_status = WAKE_REG;
  16067. struct qca199x_dev *qca199x_dev = filp->private_data;
  16068.  
  16069. - dev_dbg(&qca199x_dev->client->dev, "nfcc_wake: %s: info: %p\n",
  16070. + dev_dbg(&qca199x_dev->client->dev, "%s: info: %p\n",
  16071. __func__, qca199x_dev);
  16072.  
  16073. + curr_addr = qca199x_dev->client->addr;
  16074. if (level == NFCC_SLEEP) {
  16075. + /*
  16076. + * Normal NCI write
  16077. + */
  16078. r = i2c_master_send(qca199x_dev->client, &raw_nci_sleep[0],
  16079. sizeof(raw_nci_sleep));
  16080.  
  16081. @@ -562,54 +596,79 @@ int nfcc_wake(int level, struct file *filp)
  16082. return -EMSGSIZE;
  16083. qca199x_dev->state = NFCC_STATE_NORMAL_SLEEP;
  16084. } else {
  16085. - curr_addr = qca199x_dev->client->addr;
  16086. qca199x_dev->client->addr = slave_addr;
  16087. r = nfc_i2c_write(qca199x_dev->client, &raw_nci_wake[0],
  16088. sizeof(raw_nci_wake));
  16089. + if (r != sizeof(raw_nci_wake)) {
  16090. + r = -EMSGSIZE;
  16091. + dev_err(&qca199x_dev->client->dev,
  16092. + "%s: nci wake write failed. Check hardware\n",
  16093. + __func__);
  16094. + goto leave;
  16095. + }
  16096. do {
  16097. wake_status = WAKE_REG;
  16098. - r = nfc_i2c_write(qca199x_dev->client, &wake_status, 1);
  16099. + r = nfc_i2c_write(qca199x_dev->client, &wake_status,
  16100. + sizeof(wake_status));
  16101. + if (r != sizeof(wake_status)) {
  16102. + r = -EMSGSIZE;
  16103. + dev_err(&qca199x_dev->client->dev,
  16104. + "%s: wake status write fail.Check hardware\n",
  16105. + __func__);
  16106. + goto leave;
  16107. + }
  16108. /*
  16109. - * NFCC chip needs to be at least
  16110. - * 10usec high before make it low
  16111. + * I2C line is low after ~10 usec
  16112. */
  16113. usleep_range(10, 15);
  16114. r = i2c_master_recv(qca199x_dev->client, &wake_status,
  16115. sizeof(wake_status));
  16116. + if (r != sizeof(wake_status)) {
  16117. + r = -EMSGSIZE;
  16118. + dev_err(&qca199x_dev->client->dev,
  16119. + "%s: wake status read fail.Check hardware\n",
  16120. + __func__);
  16121. + goto leave;
  16122. + }
  16123.  
  16124. time_taken++;
  16125. + /*
  16126. + * Each NFCC wakeup cycle
  16127. + * takes about 0.5 ms
  16128. + */
  16129. if ((wake_status & NCI_WAKE) != 0)
  16130. /* NFCC wakeup time is between 0.5 and .52 ms */
  16131. - usleep_range(500, 520);
  16132. + usleep_range(500, 550);
  16133.  
  16134. } while ((wake_status & NCI_WAKE)
  16135. && (time_taken < WAKE_TIMEOUT));
  16136. - /* Restore original NFCC slave I2C address */
  16137. - if (time_taken >= WAKE_TIMEOUT)
  16138. + if (time_taken >= WAKE_TIMEOUT) {
  16139. dev_err(&qca199x_dev->client->dev,
  16140. - "nfc_ioctl_nfcc_version : TIMED OUT to get WAKEUP bit\n");
  16141. -
  16142. - qca199x_dev->client->addr = curr_addr;
  16143. - if (r != sizeof(wake_status))
  16144. - return -EMSGSIZE;
  16145. + "%s: timed out to get wakeup bit\n", __func__);
  16146. + r = -EIO;
  16147. + goto leave;
  16148. + }
  16149. + r = 0;
  16150. qca199x_dev->state = NFCC_STATE_NORMAL_WAKE;
  16151. }
  16152. -
  16153. +leave:
  16154. + /* Restore original NFCC slave I2C address */
  16155. + qca199x_dev->client->addr = curr_addr;
  16156. return r;
  16157. }
  16158.  
  16159. /*
  16160. * Inside nfc_ioctl_power_states
  16161. *
  16162. - * @brief ioctl functions
  16163. + * @brief ioctl functions
  16164. *
  16165. *
  16166. * Device control
  16167. * remove control via ioctl
  16168. - * (arg = 0): NFC_DISABLE GPIO = 0
  16169. - * (arg = 1): NFC_DISABLE GPIO = 1
  16170. - * NOT USED (arg = 2): FW_DL GPIO = 0
  16171. - * NOT USED (arg = 3): FW_DL GPIO = 1
  16172. + * (arg = 0): NFC_DISABLE GPIO = 0
  16173. + * (arg = 1): NFC_DISABLE GPIO = 1
  16174. + * NOT USED (arg = 2): FW_DL GPIO = 0
  16175. + * NOT USED (arg = 3): FW_DL GPIO = 1
  16176. * (arg = 4): NFCC_WAKE = 1
  16177. * (arg = 5): NFCC_WAKE = 0
  16178. *
  16179. @@ -628,7 +687,7 @@ int nfc_ioctl_power_states(struct file *filp, unsigned int cmd,
  16180. dev_dbg(&qca199x_dev->client->dev, "gpio_set_value disable: %s: info: %p\n",
  16181. __func__, qca199x_dev);
  16182. gpio_set_value(qca199x_dev->dis_gpio, 0);
  16183. - usleep(1000);
  16184. + usleep_range(1000, 1100);
  16185. } else if (arg == 1) {
  16186. /*
  16187. * We are attempting a hardware reset so let us disable
  16188. @@ -650,7 +709,7 @@ int nfc_ioctl_power_states(struct file *filp, unsigned int cmd,
  16189. dev_dbg(&qca199x_dev->client->dev, "gpio_set_value enable: %s: info: %p\n",
  16190. __func__, qca199x_dev);
  16191. gpio_set_value(qca199x_dev->dis_gpio, 1);
  16192. - /*nfcc needs atleast 100ms for the chip to power cycle*/
  16193. + /* NFCC needs at least 100 ms to power cycle*/
  16194. msleep(100);
  16195. } else if (arg == 2) {
  16196. mutex_lock(&qca199x_dev->read_mutex);
  16197. @@ -671,12 +730,12 @@ int nfc_ioctl_power_states(struct file *filp, unsigned int cmd,
  16198. msleep(20);
  16199. } else if (arg == 4) {
  16200. mutex_lock(&qca199x_dev->read_mutex);
  16201. - nfcc_wake(NFCC_WAKE, filp);
  16202. + r = nfcc_wake(NFCC_WAKE, filp);
  16203. dev_dbg(&qca199x_dev->client->dev, "nfcc wake: %s: info: %p\n",
  16204. __func__, qca199x_dev);
  16205. mutex_unlock(&qca199x_dev->read_mutex);
  16206. } else if (arg == 5) {
  16207. - nfcc_wake(NFCC_SLEEP, filp);
  16208. + r = nfcc_wake(NFCC_SLEEP, filp);
  16209. } else {
  16210. r = -ENOIOCTLCMD;
  16211. }
  16212. @@ -688,14 +747,14 @@ err_req:
  16213. /*
  16214. * Inside nfc_ioctl_nfcc_mode
  16215. *
  16216. - * @brief nfc_ioctl_nfcc_mode
  16217. + * @brief nfc_ioctl_nfcc_mode
  16218. *
  16219. * (arg = 0) ; NORMAL_MODE - Standard mode, unsolicited read behaviour
  16220. * (arg = 1) ; SOLICITED_MODE - As above but reads are solicited from User Land
  16221. * (arg = 2) ; UNSOLICITED_FTM_RAW MODE - NORMAL_MODE but messages from FTM and
  16222. - * not NCI Host.
  16223. + * not NCI Host.
  16224. * (arg = 2) ; SOLICITED_FTM_RAW_MODE - As SOLICITED_MODE but messages from FTM
  16225. - * and not NCI Host.
  16226. + * and not NCI Host.
  16227. *
  16228. *
  16229. *
  16230. @@ -742,6 +801,61 @@ int nfc_ioctl_nfcc_mode(struct file *filp, unsigned int cmd, unsigned long arg)
  16231. }
  16232.  
  16233. /*
  16234. + * Inside nfc_ioctl_nfcc_efuse
  16235. + *
  16236. + * @brief nfc_ioctl_nfcc_efuse
  16237. + *
  16238. + *
  16239. + */
  16240. +int nfc_ioctl_nfcc_efuse(struct file *filp, unsigned int cmd,
  16241. + unsigned long arg)
  16242. +{
  16243. + int r = 0;
  16244. + unsigned short slave_addr = 0xE;
  16245. + unsigned short curr_addr;
  16246. + unsigned char efuse_addr = EFUSE_REG;
  16247. + unsigned char efuse_value = 0xFF;
  16248. +
  16249. + struct qca199x_dev *qca199x_dev = filp->private_data;
  16250. +
  16251. + curr_addr = qca199x_dev->client->addr;
  16252. + qca199x_dev->client->addr = slave_addr;
  16253. +
  16254. + r = nfc_i2c_write(qca199x_dev->client,
  16255. + &efuse_addr, 1);
  16256. + if (r < 0) {
  16257. + /* Restore original NFCC slave I2C address */
  16258. + qca199x_dev->client->addr = curr_addr;
  16259. + dev_err(&qca199x_dev->client->dev,
  16260. + "ERROR_WRITE_FAIL : i2c write fail\n");
  16261. + return -EIO;
  16262. + }
  16263. +
  16264. + /*
  16265. + * NFCC chip needs to be at least
  16266. + * 10usec high before make it low
  16267. + */
  16268. + usleep_range(10, 15);
  16269. +
  16270. + r = i2c_master_recv(qca199x_dev->client, &efuse_value,
  16271. + sizeof(efuse_value));
  16272. + if (r < 0) {
  16273. + /* Restore original NFCC slave I2C address */
  16274. + qca199x_dev->client->addr = curr_addr;
  16275. + dev_err(&qca199x_dev->client->dev,
  16276. + "ERROR_I2C_RCV_FAIL : i2c recv fail\n");
  16277. + return -EIO;
  16278. + }
  16279. +
  16280. + dev_dbg(&qca199x_dev->client->dev, "%s : EFUSE_VALUE %02x\n",
  16281. + __func__, efuse_value);
  16282. +
  16283. + /* Restore original NFCC slave I2C address */
  16284. + qca199x_dev->client->addr = curr_addr;
  16285. + return efuse_value;
  16286. +}
  16287. +
  16288. +/*
  16289. * Inside nfc_ioctl_nfcc_version
  16290. *
  16291. * @brief nfc_ioctl_nfcc_version
  16292. @@ -752,11 +866,8 @@ int nfc_ioctl_nfcc_version(struct file *filp, unsigned int cmd,
  16293. unsigned long arg)
  16294. {
  16295. int r = 0;
  16296. - int time_taken = 0;
  16297. - unsigned short slave_addr = 0xE;
  16298. + unsigned short slave_addr = 0xE;
  16299. unsigned short curr_addr;
  16300. - unsigned char raw_nci_wake[] = {0x10, 0x0F};
  16301. - unsigned char raw_nci_read;
  16302. unsigned char raw_chip_version_addr = 0x00;
  16303. unsigned char raw_chip_rev_id_addr = 0x9C;
  16304. unsigned char raw_chip_version = 0xFF;
  16305. @@ -770,88 +881,56 @@ int nfc_ioctl_nfcc_version(struct file *filp, unsigned int cmd,
  16306. * Always wake up chip when reading 0x9C, otherwise this
  16307. * register is not updated
  16308. */
  16309. + r = nfcc_wake(NFCC_WAKE, filp);
  16310. curr_addr = qca199x_dev->client->addr;
  16311. qca199x_dev->client->addr = slave_addr;
  16312. - r = nfc_i2c_write(qca199x_dev->client, &raw_nci_wake[0],
  16313. - sizeof(raw_nci_wake));
  16314. -
  16315. - if (r != sizeof(raw_nci_wake))
  16316. - dev_err(&qca199x_dev->client->dev,
  16317. - "nfc_ioctl_nfcc_version : failed to send wake command\n");
  16318. -
  16319. - /*
  16320. - * After placing the NFCC to sleep by a PROP
  16321. - * SLEEP NCI msg (2F 03) and we need to wake
  16322. - * it back up to obtain some information (by
  16323. - * setting the wake bit).We need to determine
  16324. - * when it has in actual fact woken before we
  16325. - * can read the required data. We do that by
  16326. - * reading back & testing if that wake bit has
  16327. - * been cleared.
  16328. - */
  16329. - do {
  16330. - raw_nci_read = 0x10;
  16331. - r = nfc_i2c_write(qca199x_dev->client, &raw_nci_read, 1);
  16332. - /*
  16333. - * NFCC chip needs to be at least
  16334. - * 10usec high before make it low
  16335. - */
  16336. - usleep_range(10, 15);
  16337. -
  16338. - r = i2c_master_recv(qca199x_dev->client, &raw_nci_read,
  16339. - sizeof(raw_nci_read));
  16340. -
  16341. - if ((raw_nci_read & NCI_WAKE) != 0)
  16342. - /* NFCC wakeup time is between 0.5 and .52 ms */
  16343. - usleep_range(500, 520);
  16344. -
  16345. - time_taken++;
  16346. -
  16347. - } while ((raw_nci_read & NCI_WAKE)
  16348. - && (time_taken < WAKE_TIMEOUT));
  16349. -
  16350. - if (time_taken < WAKE_TIMEOUT)
  16351. - qca199x_dev->state = NFCC_STATE_NORMAL_WAKE;
  16352. - else
  16353. - dev_err(&qca199x_dev->client->dev,
  16354. - "nfc_ioctl_nfcc_version : TIMED OUT to get WAKEUP bit\n");
  16355.  
  16356.  
  16357. - if (r != 1) {
  16358. - /*
  16359. - * r < 0 indicates an error, maybe chip isn't
  16360. - * up yet.What should we do??? r = 0 indicates
  16361. - * nothing read, maybe chip isn't up yet. (should
  16362. - * not happen) r > 1 indicates too many bytes read,
  16363. - * maybe ?(should not happen)
  16364. - */
  16365. + if (r) {
  16366. dev_err(&qca199x_dev->client->dev,
  16367. - "nfc_ioctl_nfcc_version : i2c error %d\n", r);
  16368. + "%s: nfcc wake failed: %d\n", __func__, r);
  16369. + r = -EIO;
  16370. + goto leave;
  16371. }
  16372.  
  16373. if (arg == 0) {
  16374. r = nfc_i2c_write(qca199x_dev->client,
  16375. - &raw_chip_version_addr, 1);
  16376. + &raw_chip_version_addr, sizeof(raw_chip_version_addr));
  16377. + if (r != sizeof(raw_chip_version_addr)) {
  16378. + r = -EMSGSIZE;
  16379. + goto err;
  16380. + }
  16381. } else if (arg == 1) {
  16382. r = nfc_i2c_write(qca199x_dev->client,
  16383. - &raw_chip_rev_id_addr, 1);
  16384. + &raw_chip_rev_id_addr, sizeof(raw_chip_rev_id_addr));
  16385. + if (r != sizeof(raw_chip_version_addr)) {
  16386. + r = -EMSGSIZE;
  16387. + goto err;
  16388. + }
  16389. } else {
  16390. - /* Restore original NFCC slave I2C address */
  16391. - qca199x_dev->client->addr = curr_addr;
  16392. - return -EINVAL;
  16393. + r = -EINVAL;
  16394. + goto err;
  16395. }
  16396.  
  16397. if (r < 0) {
  16398. - /* Restore original NFCC slave I2C address */
  16399. - qca199x_dev->client->addr = curr_addr;
  16400. - dev_err(&qca199x_dev->client->dev,
  16401. - "NFCC_INVALID_CHIP_VERSION : i2c write fail\n");
  16402. - return -EIO;
  16403. + r = -EIO;
  16404. + goto err;
  16405. }
  16406. -
  16407. - usleep(10);
  16408. - r = i2c_master_recv(qca199x_dev->client, &raw_chip_version, 1);
  16409. -
  16410. + /*
  16411. + * I2C line is low after ~10 usec
  16412. + */
  16413. + usleep_range(10, 15);
  16414. + r = i2c_master_recv(qca199x_dev->client, &raw_chip_version,
  16415. + sizeof(raw_chip_version));
  16416. + if (r != sizeof(raw_chip_version)) {
  16417. + r = -EMSGSIZE;
  16418. + goto err;
  16419. + }
  16420. + goto leave;
  16421. +err:
  16422. + dev_err(&qca199x_dev->client->dev,
  16423. + "%s: i2c access failed\n", __func__);
  16424. +leave:
  16425. /* Restore original NFCC slave I2C address */
  16426. qca199x_dev->client->addr = curr_addr;
  16427. return raw_chip_version;
  16428. @@ -860,12 +939,12 @@ int nfc_ioctl_nfcc_version(struct file *filp, unsigned int cmd,
  16429. /*
  16430. * Inside nfc_ioctl_kernel_logging
  16431. *
  16432. - * @brief nfc_ioctl_kernel_logging
  16433. + * @brief nfc_ioctl_kernel_logging
  16434. *
  16435. * (arg = 0) ; NO_LOGGING
  16436. * (arg = 1) ; COMMS_LOGGING - BASIC LOGGING - Mainly just comms over I2C
  16437. * (arg = 2) ; FULL_LOGGING - ENABLE ALL - DBG messages for handlers etc.
  16438. - * ; ! Be aware as amount of logging could impact behaviour !
  16439. + * ; ! Be aware as amount of logging could impact behaviour !
  16440. *
  16441. *
  16442. */
  16443. @@ -873,19 +952,19 @@ int nfc_ioctl_kernel_logging(unsigned long arg, struct file *filp)
  16444. {
  16445. int retval = 0;
  16446. struct qca199x_dev *qca199x_dev = container_of(filp->private_data,
  16447. - struct qca199x_dev,
  16448. - qca199x_device);
  16449. + struct qca199x_dev,
  16450. + qca199x_device);
  16451. if (arg == 0) {
  16452. dev_dbg(&qca199x_dev->client->dev,
  16453. - "nfc_ioctl_kernel_logging : level = NO_LOGGING\n");
  16454. + "%s : level = NO_LOGGING\n", __func__);
  16455. logging_level = 0;
  16456. } else if (arg == 1) {
  16457. dev_dbg(&qca199x_dev->client->dev,
  16458. - "nfc_ioctl_kernel_logging: level = COMMS_LOGGING only\n");
  16459. + "%s: level = COMMS_LOGGING only\n", __func__);
  16460. logging_level = 1;
  16461. } else if (arg == 2) {
  16462. dev_dbg(&qca199x_dev->client->dev,
  16463. - "nfc_ioctl_kernel_logging: level = FULL_LOGGING\n");
  16464. + "%s: level = FULL_LOGGING\n", __func__);
  16465. logging_level = 2;
  16466. }
  16467. return retval;
  16468. @@ -894,7 +973,7 @@ int nfc_ioctl_kernel_logging(unsigned long arg, struct file *filp)
  16469. /*
  16470. * Inside nfc_ioctl_core_reset_ntf
  16471. *
  16472. - * @brief nfc_ioctl_core_reset_ntf
  16473. + * @brief nfc_ioctl_core_reset_ntf
  16474. *
  16475. * Allows callers to determine if a CORE_RESET_NTF has arrived
  16476. *
  16477. @@ -906,22 +985,23 @@ int nfc_ioctl_core_reset_ntf(struct file *filp, unsigned int cmd,
  16478. {
  16479. struct qca199x_dev *qca199x_dev = filp->private_data;
  16480. dev_dbg(&qca199x_dev->client->dev,
  16481. - "nfc_ioctl_core_reset_ntf: returning = %d\n",
  16482. + "%s: returning = %d\n",
  16483. + __func__,
  16484. qca199x_dev->core_reset_ntf);
  16485. return qca199x_dev->core_reset_ntf;
  16486. }
  16487.  
  16488. -static long nfc_ioctl(struct file *pfile, unsigned int cmd, unsigned long arg)
  16489. +static long nfc_ioctl(struct file *pfile, unsigned int cmd,
  16490. + unsigned long arg)
  16491. {
  16492. int r = 0;
  16493. -
  16494. + struct qca199x_dev *qca199x_dev = pfile->private_data;
  16495. switch (cmd) {
  16496. -
  16497. case NFC_SET_PWR:
  16498. - nfc_ioctl_power_states(pfile, cmd, arg);
  16499. + r = nfc_ioctl_power_states(pfile, cmd, arg);
  16500. break;
  16501. case NFCC_MODE:
  16502. - nfc_ioctl_nfcc_mode(pfile, cmd, arg);
  16503. + r = nfc_ioctl_nfcc_mode(pfile, cmd, arg);
  16504. break;
  16505. case NFCC_VERSION:
  16506. r = nfc_ioctl_nfcc_version(pfile, cmd, arg);
  16507. @@ -936,6 +1016,14 @@ static long nfc_ioctl(struct file *pfile, unsigned int cmd, unsigned long arg)
  16508. case NFCC_INITIAL_CORE_RESET_NTF:
  16509. r = nfc_ioctl_core_reset_ntf(pfile, cmd, arg);
  16510. break;
  16511. + case NFC_GET_EFUSE:
  16512. + r = nfc_ioctl_nfcc_efuse(pfile, cmd, arg);
  16513. + if (r < 0) {
  16514. + r = 0xFF;
  16515. + dev_err(&qca199x_dev->client->dev,
  16516. + "nfc_ioctl : FAILED TO READ EFUSE TYPE\n");
  16517. + }
  16518. + break;
  16519. default:
  16520. r = -ENOIOCTLCMD;
  16521. }
  16522. @@ -968,9 +1056,15 @@ void dumpqca1990(struct i2c_client *client)
  16523. ((i > 0xF) && (i < 0x12)) || ((i > 0x39) && (i < 0x4d)) ||
  16524. ((i > 0x69) && (i < 0x74)) || (i == 0x18) || (i == 0x30) ||
  16525. (i == 0x58)) {
  16526. - r = nfc_i2c_write(client, &raw_reg_rd, 1);
  16527. + r = nfc_i2c_write(client, &raw_reg_rd,
  16528. + sizeof(raw_reg_rd));
  16529. + if (r != sizeof(raw_reg_rd))
  16530. + break;
  16531. msleep(20);
  16532. - r = i2c_master_recv(client, &raw_reg_rd, 1);
  16533. + r = i2c_master_recv(client, &raw_reg_rd,
  16534. + sizeof(raw_reg_rd));
  16535. + if (r != sizeof(raw_reg_rd))
  16536. + break;
  16537. }
  16538. }
  16539. client->addr = temp_addr;
  16540. @@ -981,11 +1075,11 @@ static int nfc_i2c_write(struct i2c_client *client, u8 *buf, int len)
  16541. int r;
  16542.  
  16543. r = i2c_master_send(client, buf, len);
  16544. - dev_dbg(&client->dev, "send: %d\n", r);
  16545. + dev_dbg(&client->dev, "%s: send: %d\n", __func__, r);
  16546. if (r == -EREMOTEIO) { /* Retry, chip was in standby */
  16547. usleep_range(6000, 10000);
  16548. r = i2c_master_send(client, buf, len);
  16549. - dev_dbg(&client->dev, "send2: %d\n", r);
  16550. + dev_dbg(&client->dev, "%s: send attempt 2: %d\n", __func__, r);
  16551. }
  16552. if (r != len)
  16553. return -EREMOTEIO;
  16554. @@ -1001,22 +1095,23 @@ static int nfcc_hw_check(struct i2c_client *client, unsigned short curr_addr)
  16555.  
  16556. client->addr = curr_addr;
  16557. /* Set-up Addr 0. No data written */
  16558. - r = i2c_master_send(client, &buf, 1);
  16559. + r = i2c_master_send(client, &buf, sizeof(buf));
  16560. if (r < 0)
  16561. goto err_presence_check;
  16562. buf = 0;
  16563. /* Read back from Addr 0 */
  16564. - r = i2c_master_recv(client, &buf, 1);
  16565. + r = i2c_master_recv(client, &buf, sizeof(buf));
  16566. if (r < 0)
  16567. goto err_presence_check;
  16568.  
  16569. r = 0;
  16570. - return r;
  16571. + goto leave;
  16572.  
  16573. err_presence_check:
  16574. r = -ENXIO;
  16575. dev_err(&client->dev,
  16576. - "nfc-nci nfcc_presence check - no NFCC available\n");
  16577. + "%s: - no NFCC available\n", __func__);
  16578. +leave:
  16579. return r;
  16580. }
  16581. /* Initialise qca199x_ NFC controller hardware */
  16582. @@ -1024,7 +1119,7 @@ static int nfcc_initialise(struct i2c_client *client, unsigned short curr_addr,
  16583. struct qca199x_dev *qca199x_dev)
  16584. {
  16585. int r = 0;
  16586. - unsigned char raw_1p8_CONTROL_011[] = {0x11, XTAL_CLOCK};
  16587. + unsigned char raw_1P8_CONTROL_011[] = {0x11, XTAL_CLOCK};
  16588. unsigned char raw_1P8_CONTROL_010[] = {0x10, PWR_EN};
  16589. unsigned char raw_1P8_X0_0B0[] = {0xB0, (FREQ_SEL)};
  16590. unsigned char raw_slave1[] = {0x09, NCI_I2C_SLAVE};
  16591. @@ -1041,12 +1136,17 @@ static int nfcc_initialise(struct i2c_client *client, unsigned short curr_addr,
  16592.  
  16593. client->addr = curr_addr;
  16594. qca199x_dev->core_reset_ntf = DEFAULT_INITIAL_CORE_RESET_NTF;
  16595. - r = i2c_master_send(client, &buf, 1);
  16596. + r = i2c_master_send(client, &buf, sizeof(buf));
  16597. if (r < 0)
  16598. goto err_init;
  16599.  
  16600. + /*
  16601. + * I2C line is low after ~10 usec
  16602. + */
  16603. + usleep_range(10, 15);
  16604. +
  16605. buf = 0;
  16606. - r = i2c_master_recv(client, &buf, 1);
  16607. + r = i2c_master_recv(client, &buf, sizeof(buf));
  16608. if (r < 0)
  16609. goto err_init;
  16610.  
  16611. @@ -1056,36 +1156,37 @@ static int nfcc_initialise(struct i2c_client *client, unsigned short curr_addr,
  16612. if (r < 0)
  16613. goto err_init;
  16614.  
  16615. - usleep(1000);
  16616. - RAW(1p8_CONTROL_011, XTAL_CLOCK | 0x01);
  16617. + usleep_range(1000, 1100);
  16618.  
  16619. - r = nfc_i2c_write(client, &raw_1p8_CONTROL_011[0],
  16620. - sizeof(raw_1p8_CONTROL_011));
  16621. + RAW(1P8_CONTROL_011, XTAL_CLOCK | 0x01);
  16622. +
  16623. + r = nfc_i2c_write(client, &raw_1P8_CONTROL_011[0],
  16624. + sizeof(raw_1P8_CONTROL_011));
  16625. if (r < 0)
  16626. goto err_init;
  16627.  
  16628. - usleep(1000);
  16629. + usleep_range(1000, 1100); /* 1 ms wait */
  16630. RAW(1P8_CONTROL_010, (0x8));
  16631. r = nfc_i2c_write(client, &raw_1P8_CONTROL_010[0],
  16632. sizeof(raw_1P8_CONTROL_010));
  16633. if (r < 0)
  16634. goto err_init;
  16635.  
  16636. - usleep(10000); /* 10ms wait */
  16637. + usleep_range(10000, 11000); /* 10 ms wait */
  16638. RAW(1P8_CONTROL_010, (0xC));
  16639. r = nfc_i2c_write(client, &raw_1P8_CONTROL_010[0],
  16640. sizeof(raw_1P8_CONTROL_010));
  16641. if (r < 0)
  16642. goto err_init;
  16643.  
  16644. - usleep(100); /* 100uS wait */
  16645. + usleep_range(100, 110); /* 100 us wait */
  16646. RAW(1P8_X0_0B0, (FREQ_SEL_19));
  16647. r = nfc_i2c_write(client, &raw_1P8_X0_0B0[0],
  16648. sizeof(raw_1P8_X0_0B0));
  16649. if (r < 0)
  16650. goto err_init;
  16651.  
  16652. - usleep(1000);
  16653. + usleep_range(1000, 1100); /* 1 ms wait */
  16654.  
  16655. /* PWR_EN = 1 */
  16656. RAW(1P8_CONTROL_010, (0xd));
  16657. @@ -1095,7 +1196,7 @@ static int nfcc_initialise(struct i2c_client *client, unsigned short curr_addr,
  16658. goto err_init;
  16659.  
  16660.  
  16661. - usleep(20000); /* 20ms wait */
  16662. + msleep(20); /* 20ms wait */
  16663. /* LS_EN = 1 */
  16664. RAW(1P8_CONTROL_010, 0xF);
  16665. r = nfc_i2c_write(client, &raw_1P8_CONTROL_010[0],
  16666. @@ -1103,41 +1204,43 @@ static int nfcc_initialise(struct i2c_client *client, unsigned short curr_addr,
  16667. if (r < 0)
  16668. goto err_init;
  16669.  
  16670. - usleep(20000); /* 20ms wait */
  16671. + msleep(20); /* 20ms wait */
  16672.  
  16673. /* Enable the PMIC clock */
  16674. RAW(1P8_PAD_CFG_CLK_REQ, (0x1));
  16675. r = nfc_i2c_write(client, &raw_1P8_PAD_CFG_CLK_REQ[0],
  16676. - sizeof(raw_1P8_PAD_CFG_CLK_REQ));
  16677. + sizeof(raw_1P8_PAD_CFG_CLK_REQ));
  16678. if (r < 0)
  16679. goto err_init;
  16680.  
  16681. - usleep(1000);
  16682. + usleep_range(1000, 1100); /* 1 ms wait */
  16683.  
  16684. RAW(1P8_PAD_CFG_PWR_REQ, (0x1));
  16685. r = nfc_i2c_write(client, &raw_1P8_PAD_CFG_PWR_REQ[0],
  16686. - sizeof(raw_1P8_PAD_CFG_PWR_REQ));
  16687. + sizeof(raw_1P8_PAD_CFG_PWR_REQ));
  16688. if (r < 0)
  16689. goto err_init;
  16690.  
  16691. - usleep(1000);
  16692. + usleep_range(1000, 1100); /* 1 ms wait */
  16693.  
  16694. RAW(slave2, 0x10);
  16695. r = nfc_i2c_write(client, &raw_slave2[0], sizeof(raw_slave2));
  16696. if (r < 0)
  16697. goto err_init;
  16698.  
  16699. - usleep(1000);
  16700. + usleep_range(1000, 1100); /* 1 ms wait */
  16701.  
  16702. RAW(slave1, NCI_I2C_SLAVE);
  16703. r = nfc_i2c_write(client, &raw_slave1[0], sizeof(raw_slave1));
  16704. if (r < 0)
  16705. goto err_init;
  16706.  
  16707. - usleep(1000);
  16708. + usleep_range(1000, 1100); /* 1 ms wait */
  16709.  
  16710. /* QCA199x NFCC CPU should now boot... */
  16711. - r = i2c_master_recv(client, &raw_slave1_rd, 1);
  16712. + r = i2c_master_recv(client, &raw_slave1_rd, sizeof(raw_slave1_rd));
  16713. + if (r < 0)
  16714. + goto err_init;
  16715. /* Talk on NCI slave address NCI_I2C_SLAVE 0x2C*/
  16716. client->addr = NCI_I2C_SLAVE;
  16717.  
  16718. @@ -1146,35 +1249,37 @@ static int nfcc_initialise(struct i2c_client *client, unsigned short curr_addr,
  16719. * get a core reset notification - This is time for chip
  16720. * & NFCC controller to come-up.
  16721. */
  16722. - usleep(15000); /* 15 ms */
  16723. + usleep_range(15000, 16500); /* 15 ms */
  16724.  
  16725. do {
  16726. - ret = i2c_master_recv(client, rsp, 5);
  16727. + ret = i2c_master_recv(client, rsp, sizeof(rsp));
  16728. + if (ret < 0)
  16729. + goto err_init;
  16730. /* Found core reset notification */
  16731. - if (((rsp[0] == CORE_RESET_RSP_GID) &&
  16732. + if ((rsp[0] == CORE_RESET_RSP_GID) &&
  16733. (rsp[1] == CORE_RESET_OID) &&
  16734. - (rsp[2] == CORE_RST_NTF_LENGTH))
  16735. - || time_taken == NTF_TIMEOUT) {
  16736. + (rsp[2] == CORE_RST_NTF_LENGTH)) {
  16737. dev_dbg(&client->dev,
  16738. - "NFC core reset recevd: %s: info: %p\n",
  16739. + "NFC core reset recvd: %s: info: %p\n",
  16740. __func__, client);
  16741. core_reset_completed = true;
  16742. } else {
  16743. - usleep(2000); /* 2ms sleep before retry */
  16744. + usleep_range(2000, 2200); /* 2 ms wait before retry */
  16745. }
  16746. time_taken++;
  16747. - } while (!core_reset_completed);
  16748. - if (time_taken == NTF_TIMEOUT)
  16749. + } while (!core_reset_completed && (time_taken < NTF_TIMEOUT));
  16750. + if (time_taken >= NTF_TIMEOUT) {
  16751. qca199x_dev->core_reset_ntf = TIMEDOUT_INITIAL_CORE_RESET_NTF;
  16752. - else
  16753. - qca199x_dev->core_reset_ntf = ARRIVED_INITIAL_CORE_RESET_NTF;
  16754. + goto err_init;
  16755. + }
  16756. + qca199x_dev->core_reset_ntf = ARRIVED_INITIAL_CORE_RESET_NTF;
  16757.  
  16758. r = 0;
  16759. return r;
  16760. err_init:
  16761. r = 1;
  16762. dev_err(&client->dev,
  16763. - "nfc-nci nfcc_initialise: failed. Check Hardware\n");
  16764. + "%s: failed. Check Hardware\n", __func__);
  16765. return r;
  16766. }
  16767. /*
  16768. @@ -1185,19 +1290,20 @@ static int qca199x_clock_select(struct qca199x_dev *qca199x_dev)
  16769. int r = 0;
  16770.  
  16771. if (!strcmp(qca199x_dev->clk_src_name, "BBCLK2")) {
  16772. - qca199x_dev->s_clk =
  16773. + qca199x_dev->s_clk =
  16774. clk_get(&qca199x_dev->client->dev, "ref_clk");
  16775. if (qca199x_dev->s_clk == NULL)
  16776. goto err_invalid_dis_gpio;
  16777. } else if (!strcmp(qca199x_dev->clk_src_name, "RFCLK3")) {
  16778. - qca199x_dev->s_clk =
  16779. + qca199x_dev->s_clk =
  16780. clk_get(&qca199x_dev->client->dev, "ref_clk_rf");
  16781. if (qca199x_dev->s_clk == NULL)
  16782. goto err_invalid_dis_gpio;
  16783. } else if (!strcmp(qca199x_dev->clk_src_name, "GPCLK")) {
  16784. if (gpio_is_valid(qca199x_dev->clk_src_gpio)) {
  16785. - qca199x_dev->s_clk =
  16786. - clk_get(&qca199x_dev->client->dev, "core_clk");
  16787. + qca199x_dev->s_clk =
  16788. + clk_get(&qca199x_dev->client->dev,
  16789. + "core_clk");
  16790. if (qca199x_dev->s_clk == NULL)
  16791. goto err_invalid_dis_gpio;
  16792. } else {
  16793. @@ -1205,8 +1311,9 @@ static int qca199x_clock_select(struct qca199x_dev *qca199x_dev)
  16794. }
  16795. } else if (!strcmp(qca199x_dev->clk_src_name, "GPCLK2")) {
  16796. if (gpio_is_valid(qca199x_dev->clk_src_gpio)) {
  16797. - qca199x_dev->s_clk =
  16798. - clk_get(&qca199x_dev->client->dev, "core_clk_pvt");
  16799. + qca199x_dev->s_clk =
  16800. + clk_get(&qca199x_dev->client->dev,
  16801. + "core_clk_pvt");
  16802. if (qca199x_dev->s_clk == NULL)
  16803. goto err_invalid_dis_gpio;
  16804. } else {
  16805. @@ -1282,7 +1389,7 @@ static int nfc_parse_dt(struct device *dev, struct qca199x_platform_data *pdata)
  16806. }
  16807.  
  16808. if ((!strcmp(pdata->clk_src_name, "GPCLK")) ||
  16809. - (!strcmp(pdata->clk_src_name, "GPCLK2"))) {
  16810. + (!strcmp(pdata->clk_src_name, "GPCLK2"))) {
  16811. pdata->clk_src_gpio = of_get_named_gpio(np,
  16812. "qcom,clk-src-gpio", 0);
  16813. if ((!gpio_is_valid(pdata->clk_src_gpio)))
  16814. @@ -1311,7 +1418,7 @@ static int qca199x_probe(struct i2c_client *client,
  16815. sizeof(struct qca199x_platform_data), GFP_KERNEL);
  16816. if (!platform_data) {
  16817. dev_err(&client->dev,
  16818. - "nfc-nci probe: Failed to allocate memory\n");
  16819. + "%s: Failed to allocate memory\n", __func__);
  16820. return -ENOMEM;
  16821. }
  16822. r = nfc_parse_dt(&client->dev, platform_data);
  16823. @@ -1323,20 +1430,20 @@ static int qca199x_probe(struct i2c_client *client,
  16824. if (!platform_data)
  16825. return -EINVAL;
  16826. dev_dbg(&client->dev,
  16827. - "nfc-nci probe: %s, inside nfc-nci flags = %x\n",
  16828. + "%s, inside nfc-nci flags = %x\n",
  16829. __func__, client->flags);
  16830. if (platform_data == NULL) {
  16831. - dev_err(&client->dev, "nfc-nci probe: failed\n");
  16832. + dev_err(&client->dev, "%s: failed\n", __func__);
  16833. return -ENODEV;
  16834. }
  16835. if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
  16836. - dev_err(&client->dev, "nfc-nci probe: need I2C_FUNC_I2C\n");
  16837. + dev_err(&client->dev, "%s: need I2C_FUNC_I2C\n", __func__);
  16838. return -ENODEV;
  16839. }
  16840. qca199x_dev = kzalloc(sizeof(*qca199x_dev), GFP_KERNEL);
  16841. if (qca199x_dev == NULL) {
  16842. dev_err(&client->dev,
  16843. - "nfc-nci probe: failed to allocate memory for module data\n");
  16844. + "%s: failed to allocate memory for module data\n", __func__);
  16845. return -ENOMEM;
  16846. }
  16847. qca199x_dev->client = client;
  16848. @@ -1354,27 +1461,29 @@ static int qca199x_probe(struct i2c_client *client,
  16849. r = gpio_request(platform_data->dis_gpio, "nfc_reset_gpio");
  16850. if (r) {
  16851. dev_err(&client->dev,
  16852. - "NFC: unable to request gpio [%d]\n",
  16853. + "%s: unable to request gpio [%d]\n",
  16854. + __func__,
  16855. platform_data->dis_gpio);
  16856. goto err_free_dev;
  16857. }
  16858. r = gpio_direction_output(platform_data->dis_gpio, 1);
  16859. if (r) {
  16860. dev_err(&client->dev,
  16861. - "NFC: unable to set direction for gpio [%d]\n",
  16862. + "%s: unable to set direction for gpio [%d]\n",
  16863. + __func__,
  16864. platform_data->dis_gpio);
  16865. goto err_dis_gpio;
  16866. }
  16867. } else {
  16868. - dev_err(&client->dev, "dis gpio not provided\n");
  16869. + dev_err(&client->dev, "%s: dis gpio not provided\n", __func__);
  16870. goto err_free_dev;
  16871. }
  16872.  
  16873. /* Guarantee that the NFCC starts in a clean state. */
  16874. gpio_set_value(platform_data->dis_gpio, 1);/* HPD */
  16875. - usleep(200);
  16876. + usleep_range(200, 220);
  16877. gpio_set_value(platform_data->dis_gpio, 0);/* ULPM */
  16878. - usleep(200);
  16879. + usleep_range(200, 220);
  16880.  
  16881. r = nfcc_hw_check(client, platform_data->reg);
  16882. if (r) {
  16883. @@ -1386,7 +1495,8 @@ static int qca199x_probe(struct i2c_client *client,
  16884. if (gpio_is_valid(platform_data->irq_gpio)) {
  16885. r = gpio_request(platform_data->irq_gpio, "nfc_irq_gpio");
  16886. if (r) {
  16887. - dev_err(&client->dev, "unable to request gpio [%d]\n",
  16888. + dev_err(&client->dev, "%s: unable to request gpio [%d]\n",
  16889. + __func__,
  16890. platform_data->irq_gpio);
  16891. goto err_dis_gpio;
  16892. }
  16893. @@ -1394,7 +1504,8 @@ static int qca199x_probe(struct i2c_client *client,
  16894. if (r) {
  16895.  
  16896. dev_err(&client->dev,
  16897. - "unable to set direction for gpio [%d]\n",
  16898. + "%s: unable to set direction for gpio [%d]\n",
  16899. + __func__,
  16900. platform_data->irq_gpio);
  16901. goto err_irq;
  16902. }
  16903. @@ -1406,7 +1517,7 @@ static int qca199x_probe(struct i2c_client *client,
  16904. client->irq = irqn;
  16905.  
  16906. } else {
  16907. - dev_err(&client->dev, "irq gpio not provided\n");
  16908. + dev_err(&client->dev, "%s: irq gpio not provided\n", __func__);
  16909. goto err_dis_gpio;
  16910. }
  16911. /* Interrupt from NFCC CLK_REQ to handle REF_CLK
  16912. @@ -1417,7 +1528,9 @@ static int qca199x_probe(struct i2c_client *client,
  16913. r = gpio_request(platform_data->irq_gpio_clk_req,
  16914. "nfc_irq_gpio_clk_en");
  16915. if (r) {
  16916. - dev_err(&client->dev, "unable to request CLK_EN gpio [%d]\n",
  16917. + dev_err(&client->dev,
  16918. + "%s: unable to request CLK_EN gpio [%d]\n",
  16919. + __func__,
  16920. platform_data->irq_gpio_clk_req);
  16921. goto err_irq;
  16922. }
  16923. @@ -1425,8 +1538,8 @@ static int qca199x_probe(struct i2c_client *client,
  16924. platform_data->irq_gpio_clk_req);
  16925. if (r) {
  16926. dev_err(&client->dev,
  16927. - "unable to set direction for CLK_EN gpio [%d]\n",
  16928. - platform_data->irq_gpio_clk_req);
  16929. + "%s: cannot set direction CLK_EN gpio [%d]\n",
  16930. + __func__, platform_data->irq_gpio_clk_req);
  16931. goto err_irq_clk;
  16932. }
  16933. gpio_to_irq(0);
  16934. @@ -1437,7 +1550,8 @@ static int qca199x_probe(struct i2c_client *client,
  16935. }
  16936. platform_data->clk_req_irq_num = irqn;
  16937. } else {
  16938. - dev_err(&client->dev, "irq CLK_EN gpio not provided\n");
  16939. + dev_err(&client->dev,
  16940. + "%s: irq CLK_EN gpio not provided\n", __func__);
  16941. goto err_irq;
  16942. }
  16943. }
  16944. @@ -1458,19 +1572,21 @@ static int qca199x_probe(struct i2c_client *client,
  16945. r = gpio_request(platform_data->clkreq_gpio,
  16946. "nfc_clkreq_gpio");
  16947. if (r) {
  16948. - dev_err(&client->dev, "unable to request gpio [%d]\n",
  16949. - platform_data->clkreq_gpio);
  16950. + dev_err(&client->dev,
  16951. + "%s: unable to request gpio [%d]\n",
  16952. + __func__, platform_data->clkreq_gpio);
  16953. goto err_clkreq_gpio;
  16954. }
  16955. r = gpio_direction_input(platform_data->clkreq_gpio);
  16956. if (r) {
  16957. dev_err(&client->dev,
  16958. - "unable to set direction for gpio [%d]\n",
  16959. - platform_data->clkreq_gpio);
  16960. + "%s: cannot set direction for gpio [%d]\n",
  16961. + __func__, platform_data->clkreq_gpio);
  16962. goto err_clkreq_gpio;
  16963. }
  16964. } else {
  16965. - dev_err(&client->dev, "clkreq gpio not provided\n");
  16966. + dev_err(&client->dev,
  16967. + "%s: clkreq gpio not provided\n", __func__);
  16968. goto err_clk;
  16969. }
  16970. qca199x_dev->clkreq_gpio = platform_data->clkreq_gpio;
  16971. @@ -1497,7 +1613,7 @@ static int qca199x_probe(struct i2c_client *client,
  16972.  
  16973. r = misc_register(&qca199x_dev->qca199x_device);
  16974. if (r) {
  16975. - dev_err(&client->dev, "misc_register failed\n");
  16976. + dev_err(&client->dev, "%s: misc_register failed\n", __func__);
  16977. goto err_misc_register;
  16978. }
  16979.  
  16980. @@ -1534,7 +1650,7 @@ static int qca199x_probe(struct i2c_client *client,
  16981. r = request_irq(client->irq, qca199x_dev_irq_handler,
  16982. IRQF_TRIGGER_RISING, client->name, qca199x_dev);
  16983. if (r) {
  16984. - dev_err(&client->dev, "nfc-nci probe: request_irq failed\n");
  16985. + dev_err(&client->dev, "%s: request_irq failed\n", __func__);
  16986. goto err_request_irq_failed;
  16987. }
  16988. qca199x_disable_irq(qca199x_dev);
  16989. @@ -1547,7 +1663,8 @@ static int qca199x_probe(struct i2c_client *client,
  16990. client->name, qca199x_dev);
  16991. if (r) {
  16992. dev_err(&client->dev,
  16993. - "nfc-nci probe: request_irq failed. irq no = %d\n, main irq = %d",
  16994. + "%s: request_irq failed. irq no = %d\n, main irq = %d",
  16995. + __func__,
  16996. qca199x_dev->clk_req_irq_num, client->irq);
  16997. goto err_request_irq_failed;
  16998. }
  16999. @@ -1563,6 +1680,8 @@ static int qca199x_probe(struct i2c_client *client,
  17000. INIT_WORK(&qca199x_dev->msm_clock_controll_work,
  17001. clk_req_update);
  17002. }
  17003. + device_init_wakeup(&client->dev, true);
  17004. + device_set_wakeup_capable(&client->dev, true);
  17005. i2c_set_clientdata(client, qca199x_dev);
  17006. gpio_set_value(platform_data->dis_gpio, 1);
  17007.  
  17008. @@ -1570,13 +1689,13 @@ static int qca199x_probe(struct i2c_client *client,
  17009. region2_sent = false;
  17010.  
  17011. dev_dbg(&client->dev,
  17012. - "nfc-nci probe: %s, probing qca1990 exited successfully\n",
  17013. + "%s: probing qca1990 exited successfully\n",
  17014. __func__);
  17015. return 0;
  17016.  
  17017. err_create_workq:
  17018. dev_err(&client->dev,
  17019. - "nfc-nci probe: %s, work_queue creation failure\n",
  17020. + "%s: work_queue creation failure\n",
  17021. __func__);
  17022. free_irq(client->irq, qca199x_dev);
  17023. err_nfcc_not_present:
  17024. @@ -1594,7 +1713,8 @@ err_irq_clk:
  17025. (!strcmp(platform_data->clk_src_name, "GPCLK2"))) {
  17026. r = gpio_direction_input(platform_data->irq_gpio_clk_req);
  17027. if (r)
  17028. - dev_err(&client->dev, "nfc-nci probe: Unable to set direction\n");
  17029. + dev_err(&client->dev,
  17030. + "%s: Unable to set direction\n", __func__);
  17031. gpio_free(platform_data->irq_gpio_clk_req);
  17032. }
  17033. err_irq:
  17034. @@ -1627,11 +1747,33 @@ static int qca199x_remove(struct i2c_client *client)
  17035. return 0;
  17036. }
  17037.  
  17038. +static int qca199x_suspend(struct device *device)
  17039. +{
  17040. + struct i2c_client *client = to_i2c_client(device);
  17041. +
  17042. + if (device_may_wakeup(&client->dev))
  17043. + enable_irq_wake(client->irq);
  17044. + return 0;
  17045. +}
  17046. +
  17047. +static int qca199x_resume(struct device *device)
  17048. +{
  17049. + struct i2c_client *client = to_i2c_client(device);
  17050. +
  17051. + if (device_may_wakeup(&client->dev))
  17052. + disable_irq_wake(client->irq);
  17053. + return 0;
  17054. +}
  17055. +
  17056. static const struct i2c_device_id qca199x_id[] = {
  17057. {"qca199x-i2c", 0},
  17058. {}
  17059. };
  17060.  
  17061. +static const struct dev_pm_ops nfc_pm_ops = {
  17062. + SET_SYSTEM_SLEEP_PM_OPS(qca199x_suspend, qca199x_resume)
  17063. +};
  17064. +
  17065. static struct i2c_driver qca199x = {
  17066. .id_table = qca199x_id,
  17067. .probe = qca199x_probe,
  17068. @@ -1640,12 +1782,13 @@ static struct i2c_driver qca199x = {
  17069. .owner = THIS_MODULE,
  17070. .name = "nfc-nci",
  17071. .of_match_table = msm_match_table,
  17072. + .pm = &nfc_pm_ops,
  17073. },
  17074. };
  17075.  
  17076.  
  17077. static int nfcc_reboot(struct notifier_block *notifier, unsigned long val,
  17078. - void *v)
  17079. + void *v)
  17080. {
  17081. /*
  17082. * Set DISABLE=1 *ONLY* if the NFC service has been disabled.
  17083. diff --git a/drivers/nfc/nfc-nci.h b/drivers/nfc/nfc-nci.h
  17084. index 8186861..398fa3f 100644
  17085. --- a/drivers/nfc/nfc-nci.h
  17086. +++ b/drivers/nfc/nfc-nci.h
  17087. @@ -60,6 +60,7 @@ struct devicemode {
  17088. #define SET_RX_BLOCK _IOW(0xE9, 0x04, unsigned int)
  17089. #define SET_EMULATOR_TEST_POINT _IOW(0xE9, 0x05, unsigned int)
  17090. #define NFCC_VERSION _IOW(0xE9, 0x08, unsigned int)
  17091. +#define NFC_GET_EFUSE _IOW(0xE9, 0x09, unsigned int)
  17092. #define NFCC_INITIAL_CORE_RESET_NTF _IOW(0xE9, 0x10, unsigned int)
  17093.  
  17094. #define NFC_MAX_I2C_TRANSFER (0x0400)
  17095. diff --git a/drivers/power/qpnp-bms.c b/drivers/power/qpnp-bms.c
  17096. index 211e5cc..4b36555 100644
  17097. --- a/drivers/power/qpnp-bms.c
  17098. +++ b/drivers/power/qpnp-bms.c
  17099. @@ -142,7 +142,9 @@ struct fcc_sample {
  17100. struct bms_irq {
  17101. unsigned int irq;
  17102. unsigned long disabled;
  17103. + unsigned long wake_enabled;
  17104. bool ready;
  17105. + bool is_wake;
  17106. };
  17107.  
  17108. struct bms_wakeup_source {
  17109. @@ -455,6 +457,9 @@ static void enable_bms_irq(struct bms_irq *irq)
  17110. if (irq->ready && __test_and_clear_bit(0, &irq->disabled)) {
  17111. enable_irq(irq->irq);
  17112. pr_debug("enabled irq %d\n", irq->irq);
  17113. + if ((irq->is_wake) &&
  17114. + !__test_and_set_bit(0, &irq->wake_enabled))
  17115. + enable_irq_wake(irq->irq);
  17116. }
  17117. }
  17118.  
  17119. @@ -463,6 +468,9 @@ static void disable_bms_irq(struct bms_irq *irq)
  17120. if (irq->ready && !__test_and_set_bit(0, &irq->disabled)) {
  17121. disable_irq(irq->irq);
  17122. pr_debug("disabled irq %d\n", irq->irq);
  17123. + if ((irq->is_wake) &&
  17124. + __test_and_clear_bit(0, &irq->wake_enabled))
  17125. + disable_irq_wake(irq->irq);
  17126. }
  17127. }
  17128.  
  17129. @@ -471,6 +479,9 @@ static void disable_bms_irq_nosync(struct bms_irq *irq)
  17130. if (irq->ready && !__test_and_set_bit(0, &irq->disabled)) {
  17131. disable_irq_nosync(irq->irq);
  17132. pr_debug("disabled irq %d\n", irq->irq);
  17133. + if ((irq->is_wake) &&
  17134. + __test_and_clear_bit(0, &irq->wake_enabled))
  17135. + disable_irq_wake(irq->irq);
  17136. }
  17137. }
  17138.  
  17139. @@ -2369,7 +2380,6 @@ skip_limits:
  17140. rc_new_uah = (params->fcc_uah * pc_new) / 100;
  17141. soc_new = (rc_new_uah - params->cc_uah - params->uuc_uah)*100
  17142. / (params->fcc_uah - params->uuc_uah);
  17143. - soc_new = bound_soc(soc_new);
  17144.  
  17145. /*
  17146. * if soc_new is ZERO force it higher so that phone doesnt report soc=0
  17147. @@ -2684,6 +2694,8 @@ static int calculate_state_of_charge(struct qpnp_bms_chip *chip,
  17148. /* always clamp soc due to BMS hw/sw immaturities */
  17149. new_calculated_soc = clamp_soc_based_on_voltage(chip,
  17150. new_calculated_soc);
  17151. +
  17152. + new_calculated_soc = bound_soc(new_calculated_soc);
  17153. /*
  17154. * If the battery is full, configure the cc threshold so the system
  17155. * wakes up after SoC changes
  17156. @@ -4507,11 +4519,11 @@ static int bms_request_irqs(struct qpnp_bms_chip *chip)
  17157. int rc;
  17158.  
  17159. SPMI_REQUEST_IRQ(chip, rc, sw_cc_thr);
  17160. + chip->sw_cc_thr_irq.is_wake = true;
  17161. disable_bms_irq(&chip->sw_cc_thr_irq);
  17162. - enable_irq_wake(chip->sw_cc_thr_irq.irq);
  17163. SPMI_REQUEST_IRQ(chip, rc, ocv_thr);
  17164. + chip->ocv_thr_irq.is_wake = true;
  17165. disable_bms_irq(&chip->ocv_thr_irq);
  17166. - enable_irq_wake(chip->ocv_thr_irq.irq);
  17167. return 0;
  17168. }
  17169.  
  17170. diff --git a/drivers/power/qpnp-charger.c b/drivers/power/qpnp-charger.c
  17171. index 8b084e4..3029744 100644
  17172. --- a/drivers/power/qpnp-charger.c
  17173. +++ b/drivers/power/qpnp-charger.c
  17174. @@ -10,11 +10,7 @@
  17175. * GNU General Public License for more details.
  17176. *
  17177. */
  17178. -#if defined(CONFIG_BATTERY_SAMSUNG)
  17179. -#define pr_fmt(fmt) "qpnp-chg: %s: " fmt, __func__
  17180. -#else
  17181. #define pr_fmt(fmt) "%s: " fmt, __func__
  17182. -#endif
  17183.  
  17184. #include <linux/module.h>
  17185. #include <linux/slab.h>
  17186. @@ -39,16 +35,6 @@
  17187. #include <linux/gpio.h>
  17188. #include <linux/of_gpio.h>
  17189. #include <linux/qpnp/pin.h>
  17190. -#if defined(CONFIG_BATTERY_SAMSUNG)
  17191. -#include <linux/battery/sec_charger.h>
  17192. -#endif
  17193. -#if defined(CONFIG_USB_SWITCH_RT8973)
  17194. -#include <linux/platform_data/rt8973.h>
  17195. -#endif
  17196. -
  17197. -#if defined(CONFIG_USB_SWITCH_RT8973)
  17198. -extern int rt_uart_connecting;
  17199. -#endif
  17200.  
  17201. /* Interrupt offsets */
  17202. #define INT_RT_STS(base) (base + 0x10)
  17203. @@ -243,6 +229,7 @@ struct qpnp_chg_irq {
  17204. int irq;
  17205. unsigned long disabled;
  17206. unsigned long wake_enable;
  17207. + bool is_wake;
  17208. };
  17209.  
  17210. struct qpnp_chg_regulator {
  17211. @@ -320,9 +307,7 @@ struct qpnp_chg_chip {
  17212. struct qpnp_chg_irq chg_fastchg;
  17213. struct qpnp_chg_irq chg_trklchg;
  17214. struct qpnp_chg_irq chg_failed;
  17215. - #ifndef CONFIG_BATTERY_SAMSUNG
  17216. struct qpnp_chg_irq chg_vbatdet_lo;
  17217. - #endif
  17218. struct qpnp_chg_irq batt_pres;
  17219. struct qpnp_chg_irq batt_temp_ok;
  17220. struct qpnp_chg_irq coarse_det_usb;
  17221. @@ -383,37 +368,24 @@ struct qpnp_chg_chip {
  17222. struct power_supply dc_psy;
  17223. struct power_supply *usb_psy;
  17224. struct power_supply *bms_psy;
  17225. - #ifndef CONFIG_BATTERY_SAMSUNG
  17226. struct power_supply batt_psy;
  17227. - #endif
  17228. uint32_t flags;
  17229. struct qpnp_adc_tm_btm_param adc_param;
  17230. struct work_struct adc_measure_work;
  17231. struct work_struct adc_disable_work;
  17232. struct delayed_work arb_stop_work;
  17233. - #ifdef CONFIG_BATTERY_SAMSUNG
  17234. - struct delayed_work usbin_valid_work;
  17235. - #endif
  17236. - #ifndef CONFIG_BATTERY_SAMSUNG
  17237. struct delayed_work eoc_work;
  17238. - #endif
  17239. struct delayed_work usbin_health_check;
  17240. - #ifndef CONFIG_BATTERY_SAMSUNG
  17241. struct work_struct soc_check_work;
  17242. - #endif
  17243. struct delayed_work aicl_check_work;
  17244. struct work_struct insertion_ocv_work;
  17245. struct work_struct ocp_clear_work;
  17246. struct qpnp_chg_regulator flash_wa_vreg;
  17247. struct qpnp_chg_regulator otg_vreg;
  17248. struct qpnp_chg_regulator boost_vreg;
  17249. - #ifndef CONFIG_BATTERY_SAMSUNG
  17250. struct qpnp_chg_regulator batfet_vreg;
  17251. - #endif
  17252. bool batfet_ext_en;
  17253. - #ifndef CONFIG_BATTERY_SAMSUNG
  17254. struct work_struct batfet_lcl_work;
  17255. - #endif
  17256. struct qpnp_vadc_chip *vadc_dev;
  17257. struct qpnp_iadc_chip *iadc_dev;
  17258. struct qpnp_adc_tm_chip *adc_tm_dev;
  17259. @@ -476,7 +448,6 @@ module_param(ext_ovp_isns_present, int, 0444);
  17260. static int ext_ovp_isns_r;
  17261. module_param(ext_ovp_isns_r, int, 0444);
  17262.  
  17263. -#ifndef CONFIG_BATTERY_SAMSUNG
  17264. static bool ext_ovp_isns_online;
  17265. static long ext_ovp_isns_ua;
  17266. #define MAX_CURRENT_LENGTH_9A 10
  17267. @@ -546,7 +517,6 @@ static struct kernel_param_ops ext_ovp_en_ops = {
  17268. };
  17269. module_param_cb(ext_ovp_isns_online, &ext_ovp_en_ops,
  17270. &ext_ovp_isns_online, 0664);
  17271. -#endif
  17272.  
  17273. static inline int
  17274. get_bpd(const char *name)
  17275. @@ -642,7 +612,6 @@ qpnp_chg_masked_write(struct qpnp_chg_chip *chip, u16 base,
  17276. return 0;
  17277. }
  17278.  
  17279. -#ifndef CONFIG_BATTERY_SAMSUNG
  17280. static void
  17281. qpnp_chg_enable_irq(struct qpnp_chg_irq *irq)
  17282. {
  17283. @@ -650,6 +619,10 @@ qpnp_chg_enable_irq(struct qpnp_chg_irq *irq)
  17284. pr_debug("number = %d\n", irq->irq);
  17285. enable_irq(irq->irq);
  17286. }
  17287. + if ((irq->is_wake) && (!__test_and_set_bit(0, &irq->wake_enable))) {
  17288. + pr_debug("enable wake, number = %d\n", irq->irq);
  17289. + enable_irq_wake(irq->irq);
  17290. + }
  17291. }
  17292.  
  17293. static void
  17294. @@ -659,8 +632,11 @@ qpnp_chg_disable_irq(struct qpnp_chg_irq *irq)
  17295. pr_debug("number = %d\n", irq->irq);
  17296. disable_irq_nosync(irq->irq);
  17297. }
  17298. + if ((irq->is_wake) && (__test_and_clear_bit(0, &irq->wake_enable))) {
  17299. + pr_debug("disable wake, number = %d\n", irq->irq);
  17300. + disable_irq_wake(irq->irq);
  17301. + }
  17302. }
  17303. -#endif
  17304.  
  17305. static void
  17306. qpnp_chg_irq_wake_enable(struct qpnp_chg_irq *irq)
  17307. @@ -669,6 +645,7 @@ qpnp_chg_irq_wake_enable(struct qpnp_chg_irq *irq)
  17308. pr_debug("number = %d\n", irq->irq);
  17309. enable_irq_wake(irq->irq);
  17310. }
  17311. + irq->is_wake = true;
  17312. }
  17313.  
  17314. static void
  17315. @@ -678,6 +655,7 @@ qpnp_chg_irq_wake_disable(struct qpnp_chg_irq *irq)
  17316. pr_debug("number = %d\n", irq->irq);
  17317. disable_irq_wake(irq->irq);
  17318. }
  17319. + irq->is_wake = false;
  17320. }
  17321.  
  17322. #define USB_OTG_EN_BIT BIT(0)
  17323. @@ -869,13 +847,8 @@ qpnp_chg_check_usbin_health(struct qpnp_chg_chip *chip)
  17324. return rc;
  17325. }
  17326.  
  17327. - #if defined(CONFIG_BATTERY_SAMSUNG)
  17328. - pr_err("chgr usb sts 0x%x, chgpth rt sts 0x%x\n",
  17329. - usbin_chg_rt_sts, usb_chgpth_rt_sts);
  17330. - #else
  17331. pr_debug("chgr usb sts 0x%x, chgpth rt sts 0x%x\n",
  17332. usbin_chg_rt_sts, usb_chgpth_rt_sts);
  17333. - #endif
  17334. if ((usbin_chg_rt_sts & USB_COARSE_DET) == USB_COARSE_DET) {
  17335. if ((usbin_chg_rt_sts & USB_VALID_MASK)
  17336. == USB_VALID_OVP_VALUE) {
  17337. @@ -1017,9 +990,6 @@ qpnp_chg_iusbmax_set(struct qpnp_chg_chip *chip, int mA)
  17338. {
  17339. int rc = 0;
  17340. u8 usb_reg = 0, temp = 8;
  17341. - #ifdef CONFIG_BATTERY_SAMSUNG
  17342. - union power_supply_propval val;
  17343. - #endif
  17344.  
  17345. if (mA < 0 || mA > QPNP_CHG_I_MAX_MAX_MA) {
  17346. pr_err("bad mA=%d asked to set\n", mA);
  17347. @@ -1041,14 +1011,6 @@ qpnp_chg_iusbmax_set(struct qpnp_chg_chip *chip, int mA)
  17348. /* Impose input current limit */
  17349. if (chip->maxinput_usb_ma)
  17350. mA = (chip->maxinput_usb_ma) <= mA ? chip->maxinput_usb_ma : mA;
  17351. - #ifdef CONFIG_BATTERY_SAMSUNG
  17352. - psy_do_property("qpnp-chg", get,
  17353. - POWER_SUPPLY_PROP_CURRENT_MAX, val);
  17354. - if (mA > val.intval && val.intval) {
  17355. - pr_err("force set to %d mA (<= %d)\n", val.intval, mA);
  17356. - mA = val.intval;
  17357. - }
  17358. - #endif
  17359.  
  17360. usb_reg = mA / QPNP_CHG_I_MAXSTEP_MA;
  17361.  
  17362. @@ -1061,11 +1023,7 @@ qpnp_chg_iusbmax_set(struct qpnp_chg_chip *chip, int mA)
  17363. 0x0C, 0x0C, 1);
  17364. }
  17365.  
  17366. - #ifdef CONFIG_BATTERY_SAMSUNG
  17367. - pr_err("current=%d setting 0x%x\n", mA, usb_reg);
  17368. - #else
  17369. pr_debug("current=%d setting 0x%x\n", mA, usb_reg);
  17370. - #endif
  17371. rc = qpnp_chg_write(chip, &usb_reg,
  17372. chip->usb_chgpth_base + CHGR_I_MAX_REG, 1);
  17373.  
  17374. @@ -1276,16 +1234,10 @@ qpnp_chg_force_run_on_batt(struct qpnp_chg_chip *chip, int disable)
  17375. /* Don't run on battery for batteryless hardware */
  17376. if (chip->use_default_batt_values)
  17377. return 0;
  17378. -
  17379. - #ifdef CONFIG_BATTERY_SAMSUNG
  17380. - /* Don't force on battery and allow charge if battery is not present*/
  17381. - if (!disable && !qpnp_chg_is_batt_present(chip))
  17382. - return 0;
  17383. - #else
  17384. /* Don't force on battery if battery is not present */
  17385. if (!qpnp_chg_is_batt_present(chip))
  17386. return 0;
  17387. - #endif
  17388. +
  17389. /* This bit forces the charger to run off of the battery rather
  17390. * than a connected charger */
  17391. return qpnp_chg_masked_write(chip, chip->chgr_base + CHGR_CHG_CTRL,
  17392. @@ -1415,7 +1367,6 @@ qpnp_bat_if_adc_disable_work(struct work_struct *work)
  17393. }
  17394.  
  17395. #define EOC_CHECK_PERIOD_MS 10000
  17396. -#ifndef CONFIG_BATTERY_SAMSUNG
  17397. static irqreturn_t
  17398. qpnp_chg_vbatdet_lo_irq_handler(int irq, void *_chip)
  17399. {
  17400. @@ -1449,7 +1400,6 @@ qpnp_chg_vbatdet_lo_irq_handler(int irq, void *_chip)
  17401. }
  17402. return IRQ_HANDLED;
  17403. }
  17404. -#endif
  17405.  
  17406. #define ARB_STOP_WORK_MS 1000
  17407. static irqreturn_t
  17408. @@ -1608,7 +1558,6 @@ qpnp_chg_vddmax_and_trim_set(struct qpnp_chg_chip *chip,
  17409. return 0;
  17410. }
  17411.  
  17412. -#ifndef CONFIG_BATTERY_SAMSUNG
  17413. static int
  17414. qpnp_chg_vddmax_get(struct qpnp_chg_chip *chip)
  17415. {
  17416. @@ -1623,7 +1572,6 @@ qpnp_chg_vddmax_get(struct qpnp_chg_chip *chip)
  17417.  
  17418. return QPNP_CHG_V_MIN_MV + (int)vddmax * QPNP_CHG_V_STEP_MV;
  17419. }
  17420. -#endif
  17421.  
  17422. /* JEITA compliance logic */
  17423. static void
  17424. @@ -1640,32 +1588,6 @@ qpnp_chg_set_appropriate_vddmax(struct qpnp_chg_chip *chip)
  17425. chip->delta_vddmax_mv);
  17426. }
  17427.  
  17428. -#define MIN_DELTA_MV_TO_INCREASE_VDD_MAX 8
  17429. -#define MAX_DELTA_VDD_MAX_MV 80
  17430. -#define VDD_MAX_CENTER_OFFSET 4
  17431. -static void
  17432. -qpnp_chg_adjust_vddmax(struct qpnp_chg_chip *chip, int vbat_mv)
  17433. -{
  17434. - int delta_mv, closest_delta_mv, sign;
  17435. -
  17436. - delta_mv = chip->max_voltage_mv - VDD_MAX_CENTER_OFFSET - vbat_mv;
  17437. - if (delta_mv > 0 && delta_mv < MIN_DELTA_MV_TO_INCREASE_VDD_MAX) {
  17438. - pr_debug("vbat is not low enough to increase vdd\n");
  17439. - return;
  17440. - }
  17441. -
  17442. - sign = delta_mv > 0 ? 1 : -1;
  17443. - closest_delta_mv = ((delta_mv + sign * QPNP_CHG_BUCK_TRIM1_STEP / 2)
  17444. - / QPNP_CHG_BUCK_TRIM1_STEP) * QPNP_CHG_BUCK_TRIM1_STEP;
  17445. - pr_debug("max_voltage = %d, vbat_mv = %d, delta_mv = %d, closest = %d\n",
  17446. - chip->max_voltage_mv, vbat_mv,
  17447. - delta_mv, closest_delta_mv);
  17448. - chip->delta_vddmax_mv = clamp(chip->delta_vddmax_mv + closest_delta_mv,
  17449. - -MAX_DELTA_VDD_MAX_MV, MAX_DELTA_VDD_MAX_MV);
  17450. - pr_debug("using delta_vddmax_mv = %d\n", chip->delta_vddmax_mv);
  17451. - qpnp_chg_set_appropriate_vddmax(chip);
  17452. -}
  17453. -
  17454. static void
  17455. qpnp_usbin_health_check_work(struct work_struct *work)
  17456. {
  17457. @@ -1685,10 +1607,8 @@ qpnp_usbin_health_check_work(struct work_struct *work)
  17458. psy_health_sts = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
  17459. else if (usbin_health == USBIN_OK)
  17460. psy_health_sts = POWER_SUPPLY_HEALTH_GOOD;
  17461. - #ifndef CONFIG_BATTERY_SAMSUNG
  17462. power_supply_set_health_state(chip->usb_psy, psy_health_sts);
  17463. power_supply_changed(chip->usb_psy);
  17464. - #endif
  17465. }
  17466. /* enable OVP monitor in usb valid after coarse-det complete */
  17467. chip->usb_valid_check_ovp = true;
  17468. @@ -1696,123 +1616,6 @@ qpnp_usbin_health_check_work(struct work_struct *work)
  17469. return;
  17470. }
  17471.  
  17472. -#ifdef CONFIG_BATTERY_SAMSUNG
  17473. -int wait_muic_event = 0;
  17474. -
  17475. -static void
  17476. -sec_qpnp_usbin_valid_work(struct work_struct *work)
  17477. -{
  17478. - struct delayed_work *dwork = to_delayed_work(work);
  17479. - struct qpnp_chg_chip *chip = container_of(dwork,
  17480. - struct qpnp_chg_chip, usbin_valid_work);
  17481. -
  17482. - int usb_present, host_mode, usbin_health;
  17483. - u8 psy_health_sts;
  17484. -#ifdef CONFIG_BATTERY_SAMSUNG
  17485. - union power_supply_propval value;
  17486. -#endif
  17487. -
  17488. - usb_present = qpnp_chg_is_usb_chg_plugged_in(chip);
  17489. - host_mode = qpnp_chg_is_otg_en_set(chip);
  17490. -#ifdef CONFIG_BATTERY_SAMSUNG
  17491. - pr_err("usbin-valid triggered: %d->%d host_mode: %d\n",
  17492. - chip->usb_present, usb_present, host_mode);
  17493. -#else
  17494. - pr_debug("usbin-valid triggered: %d host_mode: %d\n",
  17495. - usb_present, host_mode);
  17496. -#endif
  17497. -
  17498. - if (chip->usb_present ^ usb_present) {
  17499. - chip->usb_present = usb_present;
  17500. - if (!usb_present) {
  17501. - /* when a valid charger inserted, and increase the
  17502. - * charger voltage to OVP threshold, then
  17503. - * usb_in_valid falling edge interrupt triggers.
  17504. - * So we handle the OVP monitor here, and ignore
  17505. - * other health state changes */
  17506. - if (chip->ovp_monitor_enable &&
  17507. - (chip->usb_valid_check_ovp)) {
  17508. - usbin_health =
  17509. - qpnp_chg_check_usbin_health(chip);
  17510. - if ((chip->usbin_health != usbin_health)
  17511. - && (usbin_health == USBIN_OVP)) {
  17512. - chip->usbin_health = usbin_health;
  17513. - psy_health_sts =
  17514. - POWER_SUPPLY_HEALTH_OVERVOLTAGE;
  17515. - #ifdef CONFIG_BATTERY_SAMSUNG
  17516. - value.intval = psy_health_sts;
  17517. - psy_do_property("battery", set,
  17518. - POWER_SUPPLY_PROP_HEALTH, value);
  17519. - pr_info("%s overvoltage detected \n",__func__);
  17520. - #else
  17521. - power_supply_set_health_state(
  17522. - chip->usb_psy,
  17523. - psy_health_sts);
  17524. - power_supply_changed(chip->usb_psy);
  17525. - #endif
  17526. - }
  17527. - }
  17528. - if (!qpnp_chg_is_dc_chg_plugged_in(chip)) {
  17529. - chip->delta_vddmax_mv = 0;
  17530. - qpnp_chg_set_appropriate_vddmax(chip);
  17531. - chip->chg_done = false;
  17532. - }
  17533. - qpnp_chg_usb_suspend_enable(chip, 0);
  17534. - qpnp_chg_iusbmax_set(chip, QPNP_CHG_I_MAX_MIN_100);
  17535. - chip->prev_usb_max_ma = -EINVAL;
  17536. - chip->aicl_settled = false;
  17537. - wait_muic_event = 1;
  17538. - pr_info("%s disconnected vbus \n",__func__);
  17539. - } else {
  17540. - /* when OVP clamped usbin, and then decrease
  17541. - * the charger voltage to lower than the OVP
  17542. - * threshold, a usbin_valid rising edge
  17543. - * interrupt triggered. So we change the usb
  17544. - * psy health state back to good */
  17545. - if (chip->ovp_monitor_enable &&
  17546. - (chip->usb_valid_check_ovp)) {
  17547. - usbin_health =
  17548. - qpnp_chg_check_usbin_health(chip);
  17549. - if ((chip->usbin_health != usbin_health)
  17550. - && (usbin_health == USBIN_OK)) {
  17551. - chip->usbin_health = usbin_health;
  17552. - psy_health_sts =
  17553. - POWER_SUPPLY_HEALTH_GOOD;
  17554. -
  17555. - #ifdef CONFIG_BATTERY_SAMSUNG
  17556. - value.intval = psy_health_sts;
  17557. - psy_do_property("battery", set,
  17558. - POWER_SUPPLY_PROP_HEALTH, value);
  17559. - #else
  17560. - power_supply_set_health_state(
  17561. - chip->usb_psy,
  17562. - psy_health_sts);
  17563. - power_supply_changed(chip->usb_psy);
  17564. - #endif
  17565. - }
  17566. - }
  17567. -
  17568. - if (!qpnp_chg_is_dc_chg_plugged_in(chip)) {
  17569. - chip->delta_vddmax_mv = 0;
  17570. - qpnp_chg_set_appropriate_vddmax(chip);
  17571. - }
  17572. - wait_muic_event = 0;
  17573. - pr_info("%s connected vbus \n",__func__);
  17574. - #ifndef CONFIG_BATTERY_SAMSUNG
  17575. - schedule_delayed_work(&chip->eoc_work,
  17576. - msecs_to_jiffies(EOC_CHECK_PERIOD_MS));
  17577. - schedule_work(&chip->soc_check_work);
  17578. - #endif
  17579. - }
  17580. - #ifndef CONFIG_BATTERY_SAMSUNG
  17581. - power_supply_set_present(chip->usb_psy, chip->usb_present);
  17582. - schedule_work(&chip->batfet_lcl_work);
  17583. - #endif
  17584. - }
  17585. -
  17586. -}
  17587. -#endif
  17588. -
  17589. #define USB_VALID_DEBOUNCE_TIME_MASK 0x3
  17590. #define USB_DEB_BYPASS 0x0
  17591. #define USB_DEB_5MS 0x1
  17592. @@ -1866,18 +1669,15 @@ qpnp_chg_coarse_det_usb_irq_handler(int irq, void *_chip)
  17593. status to unknown */
  17594. pr_debug("usb coarse det clear, set usb health to unknown\n");
  17595. chip->usbin_health = USBIN_UNKNOW;
  17596. - #ifndef CONFIG_BATTERY_SAMSUNG
  17597. power_supply_set_health_state(chip->usb_psy,
  17598. POWER_SUPPLY_HEALTH_UNKNOWN);
  17599. power_supply_changed(chip->usb_psy);
  17600. - #endif
  17601. }
  17602.  
  17603. }
  17604. return IRQ_HANDLED;
  17605. }
  17606.  
  17607. -#ifndef CONFIG_BATTERY_SAMSUNG
  17608. #define BATFET_LPM_MASK 0xC0
  17609. #define BATFET_LPM 0x40
  17610. #define BATFET_NO_LPM 0x00
  17611. @@ -1902,42 +1702,11 @@ qpnp_chg_regulator_batfet_set(struct qpnp_chg_chip *chip, bool enable)
  17612.  
  17613. return rc;
  17614. }
  17615. -#endif
  17616.  
  17617. #define USB_WALL_THRESHOLD_MA 500
  17618. #define ENUM_T_STOP_BIT BIT(0)
  17619. #define USB_5V_UV 5000000
  17620. #define USB_9V_UV 9000000
  17621. -
  17622. -#ifdef CONFIG_BATTERY_SAMSUNG
  17623. -#define USBIN_VALID_WORK_MS 500
  17624. -
  17625. -static irqreturn_t
  17626. -qpnp_chg_usb_usbin_valid_irq_handler(int irq, void *_chip)
  17627. -{
  17628. - struct qpnp_chg_chip *chip = _chip;
  17629. - int usb_present, host_mode;
  17630. -
  17631. - usb_present = qpnp_chg_is_usb_chg_plugged_in(chip);
  17632. - host_mode = qpnp_chg_is_otg_en_set(chip);
  17633. -#ifdef CONFIG_BATTERY_SAMSUNG
  17634. - pr_err("usbin-valid triggered: %d->%d host_mode: %d\n",
  17635. - chip->usb_present, usb_present, host_mode);
  17636. -#else
  17637. - pr_debug("usbin-valid triggered: %d host_mode: %d\n",
  17638. - usb_present, host_mode);
  17639. -#endif
  17640. -
  17641. - /* In host mode notifications cmoe from USB supply */
  17642. - if (host_mode)
  17643. - return IRQ_HANDLED;
  17644. -
  17645. - schedule_delayed_work(&chip->usbin_valid_work,
  17646. - msecs_to_jiffies(USBIN_VALID_WORK_MS));
  17647. -
  17648. - return IRQ_HANDLED;
  17649. -}
  17650. -#else
  17651. static irqreturn_t
  17652. qpnp_chg_usb_usbin_valid_irq_handler(int irq, void *_chip)
  17653. {
  17654. @@ -1947,13 +1716,8 @@ qpnp_chg_usb_usbin_valid_irq_handler(int irq, void *_chip)
  17655.  
  17656. usb_present = qpnp_chg_is_usb_chg_plugged_in(chip);
  17657. host_mode = qpnp_chg_is_otg_en_set(chip);
  17658. - #ifdef CONFIG_BATTERY_SAMSUNG
  17659. - pr_err("usbin-valid triggered: %d->%d host_mode: %d\n",
  17660. - chip->usb_present, usb_present, host_mode);
  17661. - #else
  17662. pr_debug("usbin-valid triggered: %d host_mode: %d\n",
  17663. usb_present, host_mode);
  17664. - #endif
  17665.  
  17666. /* In host mode notifications cmoe from USB supply */
  17667. if (host_mode)
  17668. @@ -1977,19 +1741,14 @@ qpnp_chg_usb_usbin_valid_irq_handler(int irq, void *_chip)
  17669. chip->usbin_health = usbin_health;
  17670. psy_health_sts =
  17671. POWER_SUPPLY_HEALTH_OVERVOLTAGE;
  17672. - #ifndef CONFIG_BATTERY_SAMSUNG
  17673. power_supply_set_health_state(
  17674. chip->usb_psy,
  17675. psy_health_sts);
  17676. power_supply_changed(chip->usb_psy);
  17677. - #endif
  17678. }
  17679. }
  17680. - if (!qpnp_chg_is_dc_chg_plugged_in(chip)) {
  17681. - chip->delta_vddmax_mv = 0;
  17682. - qpnp_chg_set_appropriate_vddmax(chip);
  17683. + if (!qpnp_chg_is_dc_chg_plugged_in(chip))
  17684. chip->chg_done = false;
  17685. - }
  17686.  
  17687. if (!qpnp_is_dc_higher_prio(chip))
  17688. qpnp_chg_idcmax_set(chip, chip->maxinput_dc_ma);
  17689. @@ -2013,36 +1772,25 @@ qpnp_chg_usb_usbin_valid_irq_handler(int irq, void *_chip)
  17690. chip->usbin_health = usbin_health;
  17691. psy_health_sts =
  17692. POWER_SUPPLY_HEALTH_GOOD;
  17693. - #ifndef CONFIG_BATTERY_SAMSUNG
  17694. power_supply_set_health_state(
  17695. chip->usb_psy,
  17696. psy_health_sts);
  17697. power_supply_changed(chip->usb_psy);
  17698. - #endif
  17699. }
  17700. }
  17701.  
  17702. - if (!qpnp_chg_is_dc_chg_plugged_in(chip)) {
  17703. - chip->delta_vddmax_mv = 0;
  17704. - qpnp_chg_set_appropriate_vddmax(chip);
  17705. - }
  17706. - #ifndef CONFIG_BATTERY_SAMSUNG
  17707. schedule_delayed_work(&chip->eoc_work,
  17708. msecs_to_jiffies(EOC_CHECK_PERIOD_MS));
  17709. schedule_work(&chip->soc_check_work);
  17710. - #endif
  17711. }
  17712. - #ifndef CONFIG_BATTERY_SAMSUNG
  17713. +
  17714. power_supply_set_present(chip->usb_psy, chip->usb_present);
  17715. schedule_work(&chip->batfet_lcl_work);
  17716. - #endif
  17717. }
  17718.  
  17719. return IRQ_HANDLED;
  17720. }
  17721. -#endif
  17722.  
  17723. -#ifndef CONFIG_BATTERY_SAMSUNG
  17724. #define BUCK_VIN_LOOP_CMP_OVRD_MASK 0x30
  17725. static int
  17726. qpnp_chg_bypass_vchg_loop_debouncer(struct qpnp_chg_chip *chip, bool bypass)
  17727. @@ -2083,7 +1831,6 @@ qpnp_chg_vchg_loop_debouncer_setting_get(struct qpnp_chg_chip *chip)
  17728.  
  17729. return value & BUCK_VIN_LOOP_CMP_OVRD_MASK;
  17730. }
  17731. -#endif
  17732.  
  17733. #define TEST_EN_SMBC_LOOP 0xE5
  17734. #define IBAT_REGULATION_DISABLE BIT(2)
  17735. @@ -2117,10 +1864,8 @@ qpnp_chg_bat_if_batt_temp_irq_handler(int irq, void *_chip)
  17736. }
  17737. }
  17738.  
  17739. - #ifndef CONFIG_BATTERY_SAMSUNG
  17740. pr_debug("psy changed batt_psy\n");
  17741. power_supply_changed(&chip->batt_psy);
  17742. - #endif
  17743. return IRQ_HANDLED;
  17744. }
  17745.  
  17746. @@ -2177,12 +1922,11 @@ qpnp_chg_bat_if_batt_pres_irq_handler(int irq, void *_chip)
  17747. qpnp_chg_charge_en(chip, 0);
  17748. }
  17749. chip->batt_present = batt_present;
  17750. - #ifndef CONFIG_BATTERY_SAMSUNG
  17751. pr_debug("psy changed batt_psy\n");
  17752. power_supply_changed(&chip->batt_psy);
  17753. pr_debug("psy changed usb_psy\n");
  17754. power_supply_changed(chip->usb_psy);
  17755. - #endif
  17756. +
  17757. if ((chip->cool_bat_decidegc || chip->warm_bat_decidegc)
  17758. && batt_present) {
  17759. pr_debug("enabling vadc notifications\n");
  17760. @@ -2192,13 +1936,6 @@ qpnp_chg_bat_if_batt_pres_irq_handler(int irq, void *_chip)
  17761. schedule_work(&chip->adc_disable_work);
  17762. pr_debug("disabling vadc notifications\n");
  17763. }
  17764. - #ifdef CONFIG_BATTERY_SAMSUNG
  17765. - {
  17766. - union power_supply_propval val;
  17767. - psy_do_property("battery", set,
  17768. - POWER_SUPPLY_PROP_PRESENT, val);
  17769. - }
  17770. - #endif
  17771. }
  17772.  
  17773. return IRQ_HANDLED;
  17774. @@ -2219,19 +1956,11 @@ qpnp_chg_dc_dcin_valid_irq_handler(int irq, void *_chip)
  17775. qpnp_chg_force_run_on_batt(chip, !dc_present ? 1 : 0);
  17776. if (!dc_present && (!qpnp_chg_is_usb_chg_plugged_in(chip) ||
  17777. qpnp_chg_is_otg_en_set(chip))) {
  17778. - chip->delta_vddmax_mv = 0;
  17779. - qpnp_chg_set_appropriate_vddmax(chip);
  17780. chip->chg_done = false;
  17781. } else {
  17782. - if (!qpnp_chg_is_usb_chg_plugged_in(chip)) {
  17783. - chip->delta_vddmax_mv = 0;
  17784. - qpnp_chg_set_appropriate_vddmax(chip);
  17785. - }
  17786. - #ifndef CONFIG_BATTERY_SAMSUNG
  17787. schedule_delayed_work(&chip->eoc_work,
  17788. msecs_to_jiffies(EOC_CHECK_PERIOD_MS));
  17789. schedule_work(&chip->soc_check_work);
  17790. - #endif
  17791. }
  17792.  
  17793. if (qpnp_is_dc_higher_prio(chip)) {
  17794. @@ -2250,13 +1979,11 @@ qpnp_chg_dc_dcin_valid_irq_handler(int irq, void *_chip)
  17795. }
  17796. }
  17797.  
  17798. - #ifndef CONFIG_BATTERY_SAMSUNG
  17799. pr_debug("psy changed dc_psy\n");
  17800. power_supply_changed(&chip->dc_psy);
  17801. pr_debug("psy changed batt_psy\n");
  17802. power_supply_changed(&chip->batt_psy);
  17803. schedule_work(&chip->batfet_lcl_work);
  17804. - #endif
  17805. }
  17806.  
  17807. return IRQ_HANDLED;
  17808. @@ -2278,14 +2005,12 @@ qpnp_chg_chgr_chg_failed_irq_handler(int irq, void *_chip)
  17809. if (rc)
  17810. pr_err("Failed to write chg_fail clear bit!\n");
  17811.  
  17812. - #ifndef CONFIG_BATTERY_SAMSUNG
  17813. if (chip->bat_if_base) {
  17814. pr_debug("psy changed batt_psy\n");
  17815. power_supply_changed(&chip->batt_psy);
  17816. }
  17817. pr_debug("psy changed usb_psy\n");
  17818. power_supply_changed(chip->usb_psy);
  17819. - #endif
  17820. if (chip->dc_chgpth_base) {
  17821. pr_debug("psy changed dc_psy\n");
  17822. power_supply_changed(&chip->dc_psy);
  17823. @@ -2301,12 +2026,11 @@ qpnp_chg_chgr_chg_trklchg_irq_handler(int irq, void *_chip)
  17824. pr_debug("TRKL IRQ triggered\n");
  17825.  
  17826. chip->chg_done = false;
  17827. - #ifndef CONFIG_BATTERY_SAMSUNG
  17828. if (chip->bat_if_base) {
  17829. pr_debug("psy changed batt_psy\n");
  17830. power_supply_changed(&chip->batt_psy);
  17831. }
  17832. - #endif
  17833. +
  17834. return IRQ_HANDLED;
  17835. }
  17836.  
  17837. @@ -2345,6 +2069,7 @@ bypass_vbatdet_comp(struct qpnp_chg_chip *chip, bool bypass)
  17838. pr_err("Failed to bypass vbatdet comp rc = %d\n", rc);
  17839. return rc;
  17840. }
  17841. +
  17842. return rc;
  17843. }
  17844.  
  17845. @@ -2360,13 +2085,10 @@ qpnp_chg_chgr_chg_fastchg_irq_handler(int irq, void *_chip)
  17846.  
  17847. if (chip->fastchg_on ^ fastchg_on) {
  17848. chip->fastchg_on = fastchg_on;
  17849. -
  17850. - #ifndef CONFIG_BATTERY_SAMSUNG
  17851. if (chip->bat_if_base) {
  17852. pr_debug("psy changed batt_psy\n");
  17853. power_supply_changed(&chip->batt_psy);
  17854. }
  17855. - #endif
  17856.  
  17857. pr_debug("psy changed usb_psy\n");
  17858. power_supply_changed(chip->usb_psy);
  17859. @@ -2389,14 +2111,11 @@ qpnp_chg_chgr_chg_fastchg_irq_handler(int irq, void *_chip)
  17860. qpnp_chg_set_appropriate_vbatdet(chip);
  17861. }
  17862.  
  17863. - #ifndef CONFIG_BATTERY_SAMSUNG
  17864. if (!chip->charging_disabled) {
  17865. schedule_delayed_work(&chip->eoc_work,
  17866. msecs_to_jiffies(EOC_CHECK_PERIOD_MS));
  17867. pm_stay_awake(chip->dev);
  17868. }
  17869. - #endif
  17870. -
  17871. if (chip->parallel_ovp_mode)
  17872. switch_parallel_ovp_mode(chip, 1);
  17873.  
  17874. @@ -2414,9 +2133,7 @@ qpnp_chg_chgr_chg_fastchg_irq_handler(int irq, void *_chip)
  17875. }
  17876. }
  17877.  
  17878. - #ifndef CONFIG_BATTERY_SAMSUNG
  17879. qpnp_chg_enable_irq(&chip->chg_vbatdet_lo);
  17880. - #endif
  17881.  
  17882. return IRQ_HANDLED;
  17883. }
  17884. @@ -2435,7 +2152,6 @@ qpnp_dc_property_is_writeable(struct power_supply *psy,
  17885. return 0;
  17886. }
  17887.  
  17888. -#ifndef CONFIG_BATTERY_SAMSUNG
  17889. static int
  17890. qpnp_batt_property_is_writeable(struct power_supply *psy,
  17891. enum power_supply_property psp)
  17892. @@ -2481,7 +2197,6 @@ qpnp_chg_buck_control(struct qpnp_chg_chip *chip, int enable)
  17893.  
  17894. return rc;
  17895. }
  17896. -#endif
  17897.  
  17898. static int
  17899. switch_usb_to_charge_mode(struct qpnp_chg_chip *chip)
  17900. @@ -2597,7 +2312,6 @@ static enum power_supply_property pm_power_props_mains[] = {
  17901. POWER_SUPPLY_PROP_CURRENT_MAX,
  17902. };
  17903.  
  17904. -#ifndef CONFIG_BATTERY_SAMSUNG
  17905. static enum power_supply_property msm_batt_power_props[] = {
  17906. POWER_SUPPLY_PROP_CHARGING_ENABLED,
  17907. POWER_SUPPLY_PROP_STATUS,
  17908. @@ -2626,17 +2340,14 @@ static enum power_supply_property msm_batt_power_props[] = {
  17909. POWER_SUPPLY_PROP_CYCLE_COUNT,
  17910. POWER_SUPPLY_PROP_VOLTAGE_OCV,
  17911. };
  17912. -#endif
  17913.  
  17914. static char *pm_power_supplied_to[] = {
  17915. "battery",
  17916. };
  17917.  
  17918. -#ifndef CONFIG_BATTERY_SAMSUNG
  17919. static char *pm_batt_supplied_to[] = {
  17920. "bms",
  17921. };
  17922. -#endif
  17923.  
  17924. static int charger_monitor;
  17925. module_param(charger_monitor, int, 0644);
  17926. @@ -2687,7 +2398,6 @@ qpnp_aicl_check_work(struct work_struct *work)
  17927. ret.intval / 1000);
  17928. qpnp_chg_iusbmax_set(chip, ret.intval / 1000);
  17929. }
  17930. - pr_err("charger_monitor is absent\n");
  17931. } else {
  17932. pr_debug("charger_monitor is present\n");
  17933. }
  17934. @@ -2847,7 +2557,6 @@ get_prop_current_now(struct qpnp_chg_chip *chip)
  17935. return 0;
  17936. }
  17937.  
  17938. -#ifndef CONFIG_BATTERY_SAMSUNG
  17939. static int
  17940. get_prop_full_design(struct qpnp_chg_chip *chip)
  17941. {
  17942. @@ -2879,9 +2588,7 @@ get_prop_charge_full(struct qpnp_chg_chip *chip)
  17943.  
  17944. return 0;
  17945. }
  17946. -#endif
  17947.  
  17948. -#ifndef CONFIG_BATTERY_SAMSUNG
  17949. static int
  17950. get_prop_capacity(struct qpnp_chg_chip *chip)
  17951. {
  17952. @@ -2934,7 +2641,6 @@ get_prop_capacity(struct qpnp_chg_chip *chip)
  17953. * from shutting down unecessarily */
  17954. return DEFAULT_CAPACITY;
  17955. }
  17956. -#endif
  17957.  
  17958. #define DEFAULT_TEMP 250
  17959. #define MAX_TOLERABLE_BATT_TEMP_DDC 680
  17960. @@ -2958,7 +2664,6 @@ get_prop_batt_temp(struct qpnp_chg_chip *chip)
  17961. return (int)results.physical;
  17962. }
  17963.  
  17964. -#ifndef CONFIG_BATTERY_SAMSUNG
  17965. static int get_prop_cycle_count(struct qpnp_chg_chip *chip)
  17966. {
  17967. union power_supply_propval ret = {0,};
  17968. @@ -2968,7 +2673,6 @@ static int get_prop_cycle_count(struct qpnp_chg_chip *chip)
  17969. POWER_SUPPLY_PROP_CYCLE_COUNT, &ret);
  17970. return ret.intval;
  17971. }
  17972. -#endif
  17973.  
  17974. static int get_prop_vchg_loop(struct qpnp_chg_chip *chip)
  17975. {
  17976. @@ -2992,7 +2696,6 @@ static int get_prop_online(struct qpnp_chg_chip *chip)
  17977. return qpnp_chg_is_batfet_closed(chip);
  17978. }
  17979.  
  17980. -#ifndef CONFIG_BATTERY_SAMSUNG
  17981. static void
  17982. qpnp_batt_external_power_changed(struct power_supply *psy)
  17983. {
  17984. @@ -3039,7 +2742,8 @@ qpnp_batt_external_power_changed(struct power_supply *psy)
  17985. OVP_USB_WALL_TRSH_MA);
  17986. } else if (unlikely(
  17987. ext_ovp_isns_present)) {
  17988. - qpnp_chg_iusb_trim_set(chip, 0);
  17989. + qpnp_chg_iusb_trim_set(chip,
  17990. + chip->usb_trim_default);
  17991. qpnp_chg_iusbmax_set(chip,
  17992. IOVP_USB_WALL_TRSH_MA);
  17993. } else {
  17994. @@ -3162,7 +2866,6 @@ qpnp_batt_power_get_property(struct power_supply *psy,
  17995.  
  17996. return 0;
  17997. }
  17998. -#endif
  17999.  
  18000. #define BTC_CONFIG_ENABLED BIT(7)
  18001. #define BTC_COLD BIT(1)
  18002. @@ -3229,14 +2932,8 @@ qpnp_chg_ibatterm_set(struct qpnp_chg_chip *chip, int term_current)
  18003.  
  18004. if (term_current < QPNP_CHG_ITERM_MIN_MA
  18005. || term_current > QPNP_CHG_ITERM_MAX_MA) {
  18006. - #ifdef CONFIG_BATTERY_SAMSUNG
  18007. - pr_err("bad mA=%d asked to set, so changed to %dmA\n",
  18008. - term_current, QPNP_CHG_ITERM_MIN_MA);
  18009. - term_current = QPNP_CHG_ITERM_MIN_MA;
  18010. - #else
  18011. pr_err("bad mA=%d asked to set\n", term_current);
  18012. return -EINVAL;
  18013. - #endif
  18014. }
  18015.  
  18016. temp = (term_current - QPNP_CHG_ITERM_MIN_MA)
  18017. @@ -3259,9 +2956,6 @@ qpnp_chg_ibatmax_set(struct qpnp_chg_chip *chip, int chg_current)
  18018. return -EINVAL;
  18019. }
  18020. temp = chg_current / QPNP_CHG_I_STEP_MA;
  18021. - #ifdef CONFIG_BATTERY_SAMSUNG
  18022. - pr_info("current=%d setting 0x%x\n", chg_current, temp);
  18023. - #endif
  18024. return qpnp_chg_masked_write(chip, chip->chgr_base + CHGR_IBAT_MAX,
  18025. QPNP_CHG_I_MASK, temp, 1);
  18026. }
  18027. @@ -3395,6 +3089,17 @@ qpnp_chg_trim_ibat(struct qpnp_chg_chip *chip, u8 ibat_trim)
  18028. IBAT_TRIM_HIGH_LIM))
  18029. return;
  18030. }
  18031. +
  18032. + if (chip->type == SMBBP) {
  18033. + rc = qpnp_chg_masked_write(chip,
  18034. + chip->buck_base + SEC_ACCESS,
  18035. + 0xFF, 0xA5, 1);
  18036. + if (rc) {
  18037. + pr_err("failed to write SEC_ACCESS: %d\n", rc);
  18038. + return;
  18039. + }
  18040. + }
  18041. +
  18042. ibat_trim |= IBAT_TRIM_GOOD_BIT;
  18043. rc = qpnp_chg_write(chip, &ibat_trim,
  18044. chip->buck_base + BUCK_CTRL_TRIM3, 1);
  18045. @@ -3430,7 +3135,7 @@ qpnp_chg_input_current_settled(struct qpnp_chg_chip *chip)
  18046. if (!chip->ibat_calibration_enabled)
  18047. return 0;
  18048.  
  18049. - if (chip->type != SMBB)
  18050. + if (chip->type != SMBB && chip->type != SMBBP)
  18051. return 0;
  18052.  
  18053. rc = qpnp_chg_read(chip, &reg,
  18054. @@ -3450,6 +3155,17 @@ qpnp_chg_input_current_settled(struct qpnp_chg_chip *chip)
  18055. pr_debug("Improper ibat_trim value=%x setting to value=%x\n",
  18056. ibat_trim, IBAT_TRIM_MEAN);
  18057. ibat_trim = IBAT_TRIM_MEAN;
  18058. +
  18059. + if (chip->type == SMBBP) {
  18060. + rc = qpnp_chg_masked_write(chip,
  18061. + chip->buck_base + SEC_ACCESS,
  18062. + 0xFF, 0xA5, 1);
  18063. + if (rc) {
  18064. + pr_err("failed to write SEC_ACCESS: %d\n", rc);
  18065. + return rc;
  18066. + }
  18067. + }
  18068. +
  18069. rc = qpnp_chg_masked_write(chip,
  18070. chip->buck_base + BUCK_CTRL_TRIM3,
  18071. IBAT_TRIM_OFFSET_MASK, ibat_trim, 1);
  18072. @@ -3513,6 +3229,7 @@ qpnp_chg_input_current_settled(struct qpnp_chg_chip *chip)
  18073. return rc;
  18074. }
  18075.  
  18076. +
  18077. #define BOOST_MIN_UV 4200000
  18078. #define BOOST_MAX_UV 5500000
  18079. #define BOOST_STEP_UV 50000
  18080. @@ -3555,7 +3272,6 @@ qpnp_boost_vget_uv(struct qpnp_chg_chip *chip)
  18081. return BOOST_MIN_UV + ((boost_reg - BOOST_MIN) * BOOST_STEP_UV);
  18082. }
  18083.  
  18084. -#ifndef CONFIG_BATTERY_SAMSUNG
  18085. static void
  18086. qpnp_batt_system_temp_level_set(struct qpnp_chg_chip *chip, int lvl_sel)
  18087. {
  18088. @@ -3572,7 +3288,6 @@ qpnp_batt_system_temp_level_set(struct qpnp_chg_chip *chip, int lvl_sel)
  18089. pr_err("Unsupported level selected %d\n", lvl_sel);
  18090. }
  18091. }
  18092. -#endif
  18093.  
  18094. /*
  18095. * Increase the SMBB/SMBBP charger overtemp threshold to 150C while firing
  18096. @@ -3714,13 +3429,12 @@ qpnp_chg_regulator_boost_enable(struct regulator_dev *rdev)
  18097. /*
  18098. * update battery status when charger is connected and state is full
  18099. */
  18100. - #ifndef CONFIG_BATTERY_SAMSUNG
  18101. if (usb_present && (chip->chg_done
  18102. || (get_batt_capacity(chip) == 100)
  18103. || (get_prop_batt_status(chip) ==
  18104. POWER_SUPPLY_STATUS_FULL)))
  18105. power_supply_changed(&chip->batt_psy);
  18106. - #endif
  18107. +
  18108. return rc;
  18109. }
  18110.  
  18111. @@ -3823,12 +3537,9 @@ qpnp_chg_regulator_boost_disable(struct regulator_dev *rdev)
  18112. chip->chg_done = false;
  18113. chip->resuming_charging = true;
  18114. qpnp_chg_set_appropriate_vbatdet(chip);
  18115. - }
  18116. - #ifndef CONFIG_BATTERY_SAMSUNG
  18117. - else if (chip->chg_done) {
  18118. + } else if (chip->chg_done) {
  18119. power_supply_changed(&chip->batt_psy);
  18120. }
  18121. - #endif
  18122. }
  18123.  
  18124. if (ext_ovp_isns_present && chip->ext_ovp_ic_gpio_enabled) {
  18125. @@ -3914,8 +3625,6 @@ static struct regulator_ops qpnp_chg_boost_reg_ops = {
  18126. .list_voltage = qpnp_chg_regulator_boost_list_voltage,
  18127. };
  18128.  
  18129. -#define VBATDET_MAX_ERR_MV 50
  18130. -#ifndef CONFIG_BATTERY_SAMSUNG
  18131. static int
  18132. qpnp_chg_bat_if_batfet_reg_enabled(struct qpnp_chg_chip *chip)
  18133. {
  18134. @@ -3998,6 +3707,32 @@ static struct regulator_ops qpnp_chg_batfet_vreg_ops = {
  18135. .is_enabled = qpnp_chg_regulator_batfet_is_enabled,
  18136. };
  18137.  
  18138. +#define MIN_DELTA_MV_TO_INCREASE_VDD_MAX 8
  18139. +#define MAX_DELTA_VDD_MAX_MV 80
  18140. +#define VDD_MAX_CENTER_OFFSET 4
  18141. +static void
  18142. +qpnp_chg_adjust_vddmax(struct qpnp_chg_chip *chip, int vbat_mv)
  18143. +{
  18144. + int delta_mv, closest_delta_mv, sign;
  18145. +
  18146. + delta_mv = chip->max_voltage_mv - VDD_MAX_CENTER_OFFSET - vbat_mv;
  18147. + if (delta_mv > 0 && delta_mv < MIN_DELTA_MV_TO_INCREASE_VDD_MAX) {
  18148. + pr_debug("vbat is not low enough to increase vdd\n");
  18149. + return;
  18150. + }
  18151. +
  18152. + sign = delta_mv > 0 ? 1 : -1;
  18153. + closest_delta_mv = ((delta_mv + sign * QPNP_CHG_BUCK_TRIM1_STEP / 2)
  18154. + / QPNP_CHG_BUCK_TRIM1_STEP) * QPNP_CHG_BUCK_TRIM1_STEP;
  18155. + pr_debug("max_voltage = %d, vbat_mv = %d, delta_mv = %d, closest = %d\n",
  18156. + chip->max_voltage_mv, vbat_mv,
  18157. + delta_mv, closest_delta_mv);
  18158. + chip->delta_vddmax_mv = clamp(chip->delta_vddmax_mv + closest_delta_mv,
  18159. + -MAX_DELTA_VDD_MAX_MV, MAX_DELTA_VDD_MAX_MV);
  18160. + pr_debug("using delta_vddmax_mv = %d\n", chip->delta_vddmax_mv);
  18161. + qpnp_chg_set_appropriate_vddmax(chip);
  18162. +}
  18163. +
  18164. #define CONSECUTIVE_COUNT 3
  18165. #define VBATDET_MAX_ERR_MV 50
  18166. static void
  18167. @@ -4094,8 +3829,6 @@ qpnp_eoc_work(struct work_struct *work)
  18168. ? "cool" : "warm",
  18169. qpnp_chg_vddmax_get(chip));
  18170. }
  18171. - chip->delta_vddmax_mv = 0;
  18172. - qpnp_chg_set_appropriate_vddmax(chip);
  18173. qpnp_chg_charge_en(chip, 0);
  18174. /* sleep for a second before enabling */
  18175. msleep(2000);
  18176. @@ -4125,7 +3858,6 @@ stop_eoc:
  18177. count = 0;
  18178. pm_relax(chip->dev);
  18179. }
  18180. -#endif
  18181.  
  18182. static void
  18183. qpnp_chg_insertion_ocv_work(struct work_struct *work)
  18184. @@ -4149,13 +3881,10 @@ qpnp_chg_insertion_ocv_work(struct work_struct *work)
  18185. pr_debug("batfet sts = %02x, charge_en = %02x ocv = %d\n",
  18186. bat_if_sts, charge_en, chip->insertion_ocv_uv);
  18187. qpnp_chg_charge_en(chip, !chip->charging_disabled);
  18188. - #ifndef CONFIG_BATTERY_SAMSUNG
  18189. pr_debug("psy changed batt_psy\n");
  18190. power_supply_changed(&chip->batt_psy);
  18191. - #endif
  18192. }
  18193.  
  18194. -#ifndef CONFIG_BATTERY_SAMSUNG
  18195. static void
  18196. qpnp_chg_soc_check_work(struct work_struct *work)
  18197. {
  18198. @@ -4164,7 +3893,6 @@ qpnp_chg_soc_check_work(struct work_struct *work)
  18199.  
  18200. get_prop_capacity(chip);
  18201. }
  18202. -#endif
  18203.  
  18204. #define HYSTERISIS_DECIDEGC 20
  18205. static void
  18206. @@ -4266,7 +3994,6 @@ qpnp_chg_adc_notification(enum qpnp_tm_state state, void *ctx)
  18207. pr_err("request ADC error\n");
  18208. }
  18209.  
  18210. -#ifndef CONFIG_BATTERY_SAMSUNG
  18211. #define MIN_COOL_TEMP -300
  18212. #define MAX_WARM_TEMP 1000
  18213.  
  18214. @@ -4328,7 +4055,6 @@ mutex_unlock:
  18215. mutex_unlock(&chip->jeita_configure_lock);
  18216. return rc;
  18217. }
  18218. -#endif
  18219.  
  18220. #define POWER_STAGE_REDUCE_CHECK_PERIOD_SECONDS 20
  18221. #define POWER_STAGE_REDUCE_MAX_VBAT_UV 3900000
  18222. @@ -4505,7 +4231,6 @@ qpnp_chg_reduce_power_stage(struct qpnp_chg_chip *chip)
  18223. }
  18224. }
  18225.  
  18226. -#ifndef CONFIG_BATTERY_SAMSUNG
  18227. static void
  18228. qpnp_chg_batfet_lcl_work(struct work_struct *work)
  18229. {
  18230. @@ -4524,7 +4249,6 @@ qpnp_chg_batfet_lcl_work(struct work_struct *work)
  18231. }
  18232. mutex_unlock(&chip->batfet_vreg_lock);
  18233. }
  18234. -#endif
  18235.  
  18236. static void
  18237. qpnp_chg_reduce_power_stage_work(struct work_struct *work)
  18238. @@ -4575,7 +4299,6 @@ qpnp_dc_power_set_property(struct power_supply *psy,
  18239. return rc;
  18240. }
  18241.  
  18242. -#ifndef CONFIG_BATTERY_SAMSUNG
  18243. static int
  18244. qpnp_batt_power_set_property(struct power_supply *psy,
  18245. enum power_supply_property psp,
  18246. @@ -4642,7 +4365,6 @@ qpnp_batt_power_set_property(struct power_supply *psy,
  18247. power_supply_changed(&chip->batt_psy);
  18248. return rc;
  18249. }
  18250. -#endif
  18251.  
  18252. static int
  18253. qpnp_chg_setup_flags(struct qpnp_chg_chip *chip)
  18254. @@ -4679,25 +4401,6 @@ qpnp_chg_setup_flags(struct qpnp_chg_chip *chip)
  18255. return 0;
  18256. }
  18257.  
  18258. -static void
  18259. -sec_qpnp_chg_check_vddmax(struct qpnp_chg_chip *chip)
  18260. -{
  18261. - int rc;
  18262. - u8 buck_sts = 0;
  18263. - unsigned int batt_voltage;
  18264. -
  18265. - pr_info("%s \n",__func__);
  18266. - batt_voltage = get_prop_battery_voltage_now(chip) / 1000;
  18267. - rc = qpnp_chg_read(chip, &buck_sts, INT_RT_STS(chip->buck_base), 1);
  18268. - if (!rc) {
  18269. - if (buck_sts & VDD_LOOP_IRQ) {
  18270. - qpnp_chg_adjust_vddmax(chip, batt_voltage);
  18271. - }
  18272. - } else {
  18273. - pr_err("failed to read buck rc=%d\n", rc);
  18274. - }
  18275. -}
  18276. -
  18277. static int
  18278. qpnp_chg_request_irqs(struct qpnp_chg_chip *chip)
  18279. {
  18280. @@ -4753,14 +4456,13 @@ qpnp_chg_request_irqs(struct qpnp_chg_chip *chip)
  18281. return rc;
  18282. }
  18283.  
  18284. - #ifndef CONFIG_BATTERY_SAMSUNG
  18285. chip->chg_vbatdet_lo.irq = spmi_get_irq_byname(spmi,
  18286. spmi_resource, "vbat-det-lo");
  18287. if (chip->chg_vbatdet_lo.irq < 0) {
  18288. pr_err("Unable to get fast-chg-on irq\n");
  18289. return rc;
  18290. }
  18291. - #endif
  18292. +
  18293. rc |= devm_request_irq(chip->dev, chip->chg_failed.irq,
  18294. qpnp_chg_chgr_chg_failed_irq_handler,
  18295. IRQF_TRIGGER_RISING, "chg-failed", chip);
  18296. @@ -4791,7 +4493,6 @@ qpnp_chg_request_irqs(struct qpnp_chg_chip *chip)
  18297. return rc;
  18298. }
  18299.  
  18300. - #ifndef CONFIG_BATTERY_SAMSUNG
  18301. rc |= devm_request_irq(chip->dev,
  18302. chip->chg_vbatdet_lo.irq,
  18303. qpnp_chg_vbatdet_lo_irq_handler,
  18304. @@ -4802,15 +4503,13 @@ qpnp_chg_request_irqs(struct qpnp_chg_chip *chip)
  18305. chip->chg_vbatdet_lo.irq, rc);
  18306. return rc;
  18307. }
  18308. - #endif
  18309. +
  18310. qpnp_chg_irq_wake_enable(&chip->chg_trklchg);
  18311. qpnp_chg_irq_wake_enable(&chip->chg_failed);
  18312. - #ifndef CONFIG_BATTERY_SAMSUNG
  18313. - qpnp_chg_disable_irq(&chip->chg_vbatdet_lo);
  18314. qpnp_chg_irq_wake_enable(&chip->chg_vbatdet_lo);
  18315. - #endif
  18316. -
  18317. + qpnp_chg_disable_irq(&chip->chg_vbatdet_lo);
  18318. break;
  18319. +
  18320. case SMBB_BAT_IF_SUBTYPE:
  18321. case SMBBP_BAT_IF_SUBTYPE:
  18322. case SMBCL_BAT_IF_SUBTYPE:
  18323. @@ -4963,7 +4662,6 @@ qpnp_chg_request_irqs(struct qpnp_chg_chip *chip)
  18324. return rc;
  18325. }
  18326.  
  18327. -#ifndef CONFIG_BATTERY_SAMSUNG
  18328. static int
  18329. qpnp_chg_load_battery_data(struct qpnp_chg_chip *chip)
  18330. {
  18331. @@ -5003,7 +4701,6 @@ qpnp_chg_load_battery_data(struct qpnp_chg_chip *chip)
  18332.  
  18333. return 0;
  18334. }
  18335. -#endif
  18336.  
  18337. #define WDOG_EN_BIT BIT(7)
  18338. static int
  18339. @@ -5159,21 +4856,10 @@ qpnp_chg_hwinit(struct qpnp_chg_chip *chip, u8 subtype,
  18340. reg = BAT_THM_EN;
  18341. break;
  18342. case BPD_TYPE_BAT_ID:
  18343. -#if defined(CONFIG_USB_SWITCH_RT8973)
  18344. - if (rt_check_jig_state() || rt_uart_connecting)
  18345. - reg = !(BAT_ID_EN);
  18346. - else
  18347. -#endif
  18348. - reg = BAT_ID_EN;
  18349. + reg = BAT_ID_EN;
  18350. break;
  18351. case BPD_TYPE_BAT_THM_BAT_ID:
  18352. -#if defined(CONFIG_USB_SWITCH_RT8973)
  18353. - if (rt_check_jig_state() || rt_uart_connecting)
  18354. - reg = !(BAT_ID_EN);
  18355. - else
  18356. -#endif
  18357. - reg = BAT_THM_EN | BAT_ID_EN;
  18358. -
  18359. + reg = BAT_THM_EN | BAT_ID_EN;
  18360. break;
  18361. default:
  18362. reg = BAT_THM_EN;
  18363. @@ -5198,7 +4884,6 @@ qpnp_chg_hwinit(struct qpnp_chg_chip *chip, u8 subtype,
  18364. return rc;
  18365. }
  18366.  
  18367. - #ifndef CONFIG_BATTERY_SAMSUNG
  18368. init_data = of_get_regulator_init_data(chip->dev,
  18369. spmi_resource->of_node);
  18370.  
  18371. @@ -5224,7 +4909,6 @@ qpnp_chg_hwinit(struct qpnp_chg_chip *chip, u8 subtype,
  18372. return rc;
  18373. }
  18374. }
  18375. - #endif
  18376. break;
  18377. case SMBB_USB_CHGPTH_SUBTYPE:
  18378. case SMBBP_USB_CHGPTH_SUBTYPE:
  18379. @@ -5533,338 +5217,6 @@ qpnp_charger_read_dt_props(struct qpnp_chg_chip *chip)
  18380. return rc;
  18381. }
  18382.  
  18383. -#ifdef CONFIG_BATTERY_SAMSUNG
  18384. -#define CHG_ON 1
  18385. -#define CHG_OFF 0
  18386. -#define INPUT_ON 0
  18387. -#define INPUT_OFF 1
  18388. -static void
  18389. -sec_qpnp_chg_control(struct qpnp_chg_chip *chip,
  18390. - int chg_en, int input_en)
  18391. -{
  18392. - pr_info("chg_en : %d, input_en : %d\n", chg_en, input_en);
  18393. - qpnp_chg_usb_suspend_enable(chip, input_en);
  18394. - qpnp_chg_charge_en(chip, chg_en);
  18395. - qpnp_chg_force_run_on_batt(chip, input_en);
  18396. -}
  18397. -
  18398. -static enum power_supply_property sec_qpnp_chg_props[] = {
  18399. - POWER_SUPPLY_PROP_STATUS,
  18400. - POWER_SUPPLY_PROP_CHARGE_TYPE,
  18401. - POWER_SUPPLY_PROP_HEALTH,
  18402. - POWER_SUPPLY_PROP_ONLINE,
  18403. - POWER_SUPPLY_PROP_CURRENT_MAX,
  18404. - POWER_SUPPLY_PROP_CURRENT_AVG,
  18405. - POWER_SUPPLY_PROP_CURRENT_NOW,
  18406. - POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
  18407. - POWER_SUPPLY_PROP_BATFET,
  18408. - POWER_SUPPLY_PROP_INPUT_CURRENT_MAX,
  18409. - POWER_SUPPLY_PROP_INPUT_CURRENT_TRIM,
  18410. - POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
  18411. - POWER_SUPPLY_PROP_VOLTAGE_MIN,
  18412. - POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION,
  18413. -};
  18414. -
  18415. -static int
  18416. -sec_qpnp_chg_property_is_writeable(struct power_supply *psy,
  18417. - enum power_supply_property psp)
  18418. -{
  18419. - switch (psp) {
  18420. - case POWER_SUPPLY_PROP_CHARGING_ENABLED:
  18421. - case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
  18422. - case POWER_SUPPLY_PROP_INPUT_CURRENT_MAX:
  18423. - case POWER_SUPPLY_PROP_INPUT_CURRENT_TRIM:
  18424. - case POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED:
  18425. - case POWER_SUPPLY_PROP_VOLTAGE_MIN:
  18426. - case POWER_SUPPLY_PROP_COOL_TEMP:
  18427. - case POWER_SUPPLY_PROP_WARM_TEMP:
  18428. - case POWER_SUPPLY_PROP_CAPACITY:
  18429. - return 1;
  18430. - default:
  18431. - break;
  18432. - }
  18433. -
  18434. - return 0;
  18435. -}
  18436. -
  18437. -static int
  18438. -sec_qpnp_chg_get_property(struct power_supply *psy,
  18439. - enum power_supply_property psp,
  18440. - union power_supply_propval *val)
  18441. -{
  18442. - struct sec_charger_info *charger =
  18443. - container_of(psy, struct sec_charger_info, psy_chg);
  18444. - struct qpnp_chg_chip *chip = charger->chip;
  18445. - int ret;
  18446. -
  18447. - switch (psp) {
  18448. - case POWER_SUPPLY_PROP_ONLINE:
  18449. - val->intval = charger->cable_type;
  18450. - break;
  18451. - case POWER_SUPPLY_PROP_STATUS:
  18452. - val->intval = get_prop_batt_status(chip);
  18453. - break;
  18454. - case POWER_SUPPLY_PROP_HEALTH:
  18455. - ret = qpnp_chg_check_usbin_health(chip);
  18456. - if (ret == USBIN_OVP)
  18457. - val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
  18458. - else if (ret == USBIN_UNKNOW && wait_muic_event)
  18459. - val->intval = POWER_SUPPLY_HEALTH_UNDERVOLTAGE;
  18460. - else
  18461. - val->intval = POWER_SUPPLY_HEALTH_GOOD;
  18462. - break;
  18463. - case POWER_SUPPLY_PROP_CURRENT_MAX:
  18464. - sec_qpnp_chg_check_vddmax(chip);
  18465. - val->intval = charger->charging_current_max;
  18466. - break;
  18467. - case POWER_SUPPLY_PROP_CURRENT_AVG:
  18468. - val->intval = charger->charging_current;
  18469. - break;
  18470. - case POWER_SUPPLY_PROP_CURRENT_NOW:
  18471. - val->intval = get_prop_current_now(chip);
  18472. - break;
  18473. - case POWER_SUPPLY_PROP_CHARGE_TYPE:
  18474. - val->intval = get_prop_charge_type(chip);
  18475. - break;
  18476. - case POWER_SUPPLY_PROP_PRESENT:
  18477. - val->intval = qpnp_chg_is_batt_present(chip);
  18478. - break;
  18479. - case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
  18480. - break;
  18481. - case POWER_SUPPLY_PROP_BATFET:
  18482. - val->intval = get_prop_online(chip);
  18483. - break;
  18484. - case POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION:
  18485. - val->intval = get_prop_vchg_loop(chip);
  18486. - break;
  18487. - case POWER_SUPPLY_PROP_INPUT_CURRENT_MAX:
  18488. - val->intval = qpnp_chg_usb_iusbmax_get(chip) * 1000;
  18489. - break;
  18490. - case POWER_SUPPLY_PROP_INPUT_CURRENT_TRIM:
  18491. - val->intval = qpnp_chg_iusb_trim_get(chip);
  18492. - break;
  18493. - case POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED:
  18494. - val->intval = chip->aicl_settled;
  18495. - break;
  18496. - case POWER_SUPPLY_PROP_VOLTAGE_MIN:
  18497. - val->intval = qpnp_chg_vinmin_get(chip) * 1000;
  18498. - break;
  18499. - default:
  18500. - return -EINVAL;
  18501. - }
  18502. -
  18503. - return 0;
  18504. -}
  18505. -
  18506. -static int
  18507. -sec_qpnp_chg_set_property(struct power_supply *psy,
  18508. - enum power_supply_property psp,
  18509. - const union power_supply_propval *val)
  18510. -{
  18511. - struct sec_charger_info *charger =
  18512. - container_of(psy, struct sec_charger_info, psy_chg);
  18513. - struct qpnp_chg_chip *chip = charger->chip;
  18514. - union power_supply_propval value;
  18515. - int set_charging_current, set_charging_current_max;
  18516. -
  18517. - switch (psp) {
  18518. - case POWER_SUPPLY_PROP_STATUS:
  18519. - charger->status = val->intval;
  18520. - break;
  18521. - /* val->intval : type */
  18522. - case POWER_SUPPLY_PROP_ONLINE:
  18523. - charger->cable_type = val->intval;
  18524. - psy_do_property("battery", get,
  18525. - POWER_SUPPLY_PROP_HEALTH, value);
  18526. -
  18527. - if (val->intval == POWER_SUPPLY_TYPE_BATTERY || \
  18528. - val->intval == POWER_SUPPLY_TYPE_OTG) {
  18529. - charger->is_charging = false;
  18530. - set_charging_current = 0;
  18531. - set_charging_current_max =
  18532. - charger->pdata->charging_current[
  18533. - POWER_SUPPLY_TYPE_USB].input_current_limit;
  18534. - if (value.intval == POWER_SUPPLY_HEALTH_UNSPEC_FAILURE) {
  18535. - sec_qpnp_chg_control(chip, CHG_OFF, INPUT_OFF);
  18536. - } else {
  18537. - sec_qpnp_chg_control(chip, CHG_OFF, INPUT_ON);
  18538. - }
  18539. - } else {
  18540. - charger->is_charging = true;
  18541. - charger->charging_current_max =
  18542. - charger->pdata->charging_current
  18543. - [charger->cable_type].input_current_limit;
  18544. - charger->charging_current =
  18545. - charger->pdata->charging_current
  18546. - [charger->cable_type].fast_charging_current;
  18547. - set_charging_current_max =
  18548. - charger->charging_current_max;
  18549. - set_charging_current =
  18550. - charger->charging_current * charger->siop_level / 100;
  18551. -
  18552. - if ((charger->status == POWER_SUPPLY_STATUS_CHARGING) ||
  18553. - (charger->status == POWER_SUPPLY_STATUS_DISCHARGING) ||
  18554. - (value.intval == POWER_SUPPLY_HEALTH_UNSPEC_FAILURE)) {
  18555. -
  18556. - if (value.intval == POWER_SUPPLY_HEALTH_UNSPEC_FAILURE) {
  18557. - sec_qpnp_chg_control(chip, CHG_OFF, INPUT_OFF);
  18558. - } else {
  18559. - sec_qpnp_chg_control(chip, CHG_ON, INPUT_ON);
  18560. -
  18561. - /* set USB_WALL_THRESHOLD_MA for working charger_monitor */
  18562. - if (charger_monitor || !chip->charger_monitor_checked)
  18563. - qpnp_chg_iusbmax_set(chip, USB_WALL_THRESHOLD_MA);
  18564. - else
  18565. - qpnp_chg_iusbmax_set(chip, charger->charging_current_max);
  18566. - psy_do_property("ac", get, POWER_SUPPLY_PROP_ONLINE, value);
  18567. - if (value.intval)
  18568. - qpnp_chg_iusb_trim_set(chip, 48);
  18569. - else
  18570. - qpnp_chg_iusb_trim_set(chip, 40);
  18571. -
  18572. - if (charger->siop_level == 100)
  18573. - qpnp_chg_ibatmax_set(chip, 2000);
  18574. - else
  18575. - qpnp_chg_ibatmax_set(chip, set_charging_current);
  18576. - }
  18577. - } else {
  18578. - sec_qpnp_chg_control(chip, CHG_ON, INPUT_ON);
  18579. - }
  18580. - }
  18581. - break;
  18582. - /* val->intval : input charging current */
  18583. - case POWER_SUPPLY_PROP_CURRENT_MAX:
  18584. - break;
  18585. - /* val->intval : charging current */
  18586. - case POWER_SUPPLY_PROP_CURRENT_AVG:
  18587. - break;
  18588. - case POWER_SUPPLY_PROP_CURRENT_NOW:
  18589. - break;
  18590. - case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
  18591. - charger->siop_level = val->intval;
  18592. - if (charger->is_charging) {
  18593. - /* decrease the charging current according to siop level */
  18594. - if (charger->siop_level == 100)
  18595. - qpnp_chg_ibatmax_set(chip, 2000);
  18596. - else {
  18597. - int current_now =
  18598. - charger->charging_current * charger->siop_level / 100;
  18599. - qpnp_chg_ibatmax_set(chip, current_now);
  18600. - }
  18601. - }
  18602. - break;
  18603. - case POWER_SUPPLY_PROP_INPUT_CURRENT_MAX:
  18604. - if (qpnp_chg_is_usb_chg_plugged_in(chip))
  18605. - qpnp_chg_iusbmax_set(chip, val->intval / 1000);
  18606. - break;
  18607. - case POWER_SUPPLY_PROP_INPUT_CURRENT_TRIM:
  18608. - qpnp_chg_iusb_trim_set(chip, val->intval);
  18609. - break;
  18610. - case POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED:
  18611. - qpnp_chg_input_current_settled(chip);
  18612. - break;
  18613. - case POWER_SUPPLY_PROP_VOLTAGE_MIN:
  18614. - qpnp_chg_vinmin_set(chip, val->intval / 1000);
  18615. - break;
  18616. - default:
  18617. - return -EINVAL;
  18618. - }
  18619. -
  18620. - pr_debug("psy changed psy_chg\n");
  18621. - power_supply_changed(&charger->psy_chg);
  18622. - return 0;
  18623. -}
  18624. -
  18625. -
  18626. -static int sec_qpnp_charger_read_u32_index_dt(const struct device_node *np,
  18627. - const char *propname,
  18628. - u32 index, u32 *out_value)
  18629. -{
  18630. - struct property *prop = of_find_property(np, propname, NULL);
  18631. - u32 len = (index + 1) * sizeof(*out_value);
  18632. -
  18633. - if (!prop)
  18634. - return (-EINVAL);
  18635. - if (!prop->value)
  18636. - return (-ENODATA);
  18637. - if (len > prop->length)
  18638. - return (-EOVERFLOW);
  18639. -
  18640. - *out_value = be32_to_cpup(((__be32 *)prop->value) + index);
  18641. -
  18642. - return 0;
  18643. -}
  18644. -
  18645. -static int sec_qpnp_charger_parse_dt(struct sec_charger_info *charger)
  18646. -{
  18647. - struct device_node *np = of_find_node_by_name(NULL, "charger");
  18648. - sec_battery_platform_data_t *pdata = charger->pdata;
  18649. - int ret = 0;
  18650. - int i, len;
  18651. - const u32 *p;
  18652. -
  18653. - if (np == NULL) {
  18654. - pr_err("%s np NULL\n", __func__);
  18655. - return -1;
  18656. - } else {
  18657. - ret = of_property_read_u32(np, "battery,ovp_uvlo_check_type",
  18658. - &pdata->ovp_uvlo_check_type);
  18659. - if (ret < 0)
  18660. - pr_err("%s: ovp_uvlo_check_type read failed (%d)\n", __func__, ret);
  18661. -
  18662. - ret = of_property_read_u32(np, "battery,full_check_type",
  18663. - &pdata->full_check_type);
  18664. - if (ret < 0)
  18665. - pr_err("%s: full_check_type read failed (%d)\n", __func__, ret);
  18666. -
  18667. - p = of_get_property(np, "battery,input_current_limit", &len);
  18668. - len = len / sizeof(u32);
  18669. - pdata->charging_current = kzalloc(sizeof(sec_charging_current_t) * len,
  18670. - GFP_KERNEL);
  18671. -
  18672. - for(i = 0; i < len; i++) {
  18673. - ret = sec_qpnp_charger_read_u32_index_dt(np,
  18674. - "battery,input_current_limit", i,
  18675. - &pdata->charging_current[i].input_current_limit);
  18676. - ret = sec_qpnp_charger_read_u32_index_dt(np,
  18677. - "battery,fast_charging_current", i,
  18678. - &pdata->charging_current[i].fast_charging_current);
  18679. - ret = sec_qpnp_charger_read_u32_index_dt(np,
  18680. - "battery,full_check_current_1st", i,
  18681. - &pdata->charging_current[i].full_check_current_1st);
  18682. - ret = sec_qpnp_charger_read_u32_index_dt(np,
  18683. - "battery,full_check_current_2nd", i,
  18684. - &pdata->charging_current[i].full_check_current_2nd);
  18685. - }
  18686. - }
  18687. - return ret;
  18688. -}
  18689. -
  18690. -static void sec_qpnp_cable_initial_check(struct sec_charger_info *charger)
  18691. -{
  18692. - union power_supply_propval val_cable, val_status;
  18693. -
  18694. - psy_do_property("battery", get,
  18695. - POWER_SUPPLY_PROP_ONLINE, val_cable);
  18696. -
  18697. - if ((POWER_SUPPLY_TYPE_BATTERY != val_cable.intval)
  18698. - && (charger->cable_type != val_cable.intval)) {
  18699. - psy_do_property("battery", get,
  18700. - POWER_SUPPLY_PROP_STATUS, val_status);
  18701. -
  18702. - pr_info("battert_staus(%d), battery_cable_type(%d), charger_cable_type(%d)\n",
  18703. - val_status.intval, val_cable.intval, charger->cable_type);
  18704. -
  18705. - psy_do_property("qpnp-chg", set,
  18706. - POWER_SUPPLY_PROP_STATUS, val_status);
  18707. - psy_do_property("qpnp-chg", set,
  18708. - POWER_SUPPLY_PROP_ONLINE, val_cable);
  18709. - psy_do_property("bms", set,
  18710. - POWER_SUPPLY_PROP_ONLINE, val_cable);
  18711. - }
  18712. -}
  18713. -#endif
  18714. -
  18715. static int __devinit
  18716. qpnp_charger_probe(struct spmi_device *spmi)
  18717. {
  18718. @@ -5873,10 +5225,6 @@ qpnp_charger_probe(struct spmi_device *spmi)
  18719. struct resource *resource;
  18720. struct spmi_resource *spmi_resource;
  18721. int rc = 0;
  18722. - #ifdef CONFIG_BATTERY_SAMSUNG
  18723. - struct sec_charger_info *charger;
  18724. - u8 val_bat_reg = 0;
  18725. - #endif
  18726.  
  18727. chip = devm_kzalloc(&spmi->dev,
  18728. sizeof(struct qpnp_chg_chip), GFP_KERNEL);
  18729. @@ -5890,23 +5238,6 @@ qpnp_charger_probe(struct spmi_device *spmi)
  18730. chip->dev = &(spmi->dev);
  18731. chip->spmi = spmi;
  18732.  
  18733. - #ifdef CONFIG_BATTERY_SAMSUNG
  18734. - charger = kzalloc(sizeof(*charger), GFP_KERNEL);
  18735. - if (!charger)
  18736. - return -ENOMEM;
  18737. -
  18738. - charger->chip = chip;
  18739. - if (chip->spmi->dev.of_node) {
  18740. - void * pdata = kzalloc(sizeof(sec_battery_platform_data_t), GFP_KERNEL);
  18741. - if (!pdata)
  18742. - goto err_free1;
  18743. - charger->pdata = pdata;
  18744. - if (sec_qpnp_charger_parse_dt(charger))
  18745. - pr_err("%s : Failed to get charger dt\n", __func__);
  18746. - } else
  18747. - charger->pdata = chip->spmi->dev.platform_data;
  18748. - #endif
  18749. -
  18750. chip->usb_psy = power_supply_get_by_name("usb");
  18751. if (!chip->usb_psy) {
  18752. pr_err("usb supply not found deferring probe\n");
  18753. @@ -5923,10 +5254,8 @@ qpnp_charger_probe(struct spmi_device *spmi)
  18754. mutex_init(&chip->batfet_vreg_lock);
  18755. INIT_WORK(&chip->ocp_clear_work,
  18756. qpnp_chg_ocp_clear_work);
  18757. - #ifndef CONFIG_BATTERY_SAMSUNG
  18758. INIT_WORK(&chip->batfet_lcl_work,
  18759. qpnp_chg_batfet_lcl_work);
  18760. - #endif
  18761. INIT_WORK(&chip->insertion_ocv_work,
  18762. qpnp_chg_insertion_ocv_work);
  18763.  
  18764. @@ -5976,7 +5305,8 @@ qpnp_charger_probe(struct spmi_device *spmi)
  18765. goto fail_chg_enable;
  18766. }
  18767.  
  18768. - if (subtype == SMBB_BAT_IF_SUBTYPE) {
  18769. + if (subtype == SMBB_BAT_IF_SUBTYPE ||
  18770. + subtype == SMBBP_BAT_IF_SUBTYPE) {
  18771. chip->iadc_dev = qpnp_get_iadc(chip->dev,
  18772. "chg");
  18773. if (IS_ERR(chip->iadc_dev)) {
  18774. @@ -5986,11 +5316,10 @@ qpnp_charger_probe(struct spmi_device *spmi)
  18775. goto fail_chg_enable;
  18776. }
  18777. }
  18778. - #ifndef CONFIG_BATTERY_SAMSUNG
  18779. +
  18780. rc = qpnp_chg_load_battery_data(chip);
  18781. if (rc)
  18782. goto fail_chg_enable;
  18783. - #endif
  18784. }
  18785. }
  18786.  
  18787. @@ -6124,21 +5453,9 @@ qpnp_charger_probe(struct spmi_device *spmi)
  18788. dev_set_drvdata(&spmi->dev, chip);
  18789. device_init_wakeup(&spmi->dev, 1);
  18790.  
  18791. - #ifdef CONFIG_BATTERY_SAMSUNG
  18792. - val_bat_reg = 0xA5;
  18793. - qpnp_chg_write(chip, &val_bat_reg, 0x12D0, 1);
  18794. - val_bat_reg = 0x28;
  18795. - qpnp_chg_write(chip, &val_bat_reg, 0x12E5, 1);
  18796. -
  18797. - /* force set BATFET_NO_LPM */
  18798. - val_bat_reg = 0x00;
  18799. - qpnp_chg_write(chip, &val_bat_reg, 0x1293, 1);
  18800. - #endif
  18801. -
  18802. chip->insertion_ocv_uv = -EINVAL;
  18803. chip->batt_present = qpnp_chg_is_batt_present(chip);
  18804. if (chip->bat_if_base) {
  18805. - #ifndef CONFIG_BATTERY_SAMSUNG
  18806. chip->batt_psy.name = "battery";
  18807. chip->batt_psy.type = POWER_SUPPLY_TYPE_BATTERY;
  18808. chip->batt_psy.properties = msm_batt_power_props;
  18809. @@ -6159,25 +5476,17 @@ qpnp_charger_probe(struct spmi_device *spmi)
  18810. pr_err("batt failed to register rc = %d\n", rc);
  18811. goto fail_chg_enable;
  18812. }
  18813. - #endif
  18814. INIT_WORK(&chip->adc_measure_work,
  18815. qpnp_bat_if_adc_measure_work);
  18816. INIT_WORK(&chip->adc_disable_work,
  18817. qpnp_bat_if_adc_disable_work);
  18818. }
  18819.  
  18820. - #ifndef CONFIG_BATTERY_SAMSUNG
  18821. INIT_DELAYED_WORK(&chip->eoc_work, qpnp_eoc_work);
  18822. - #endif
  18823. INIT_DELAYED_WORK(&chip->arb_stop_work, qpnp_arb_stop_work);
  18824. - #ifdef CONFIG_BATTERY_SAMSUNG
  18825. - INIT_DELAYED_WORK(&chip->usbin_valid_work, sec_qpnp_usbin_valid_work);
  18826. - #endif
  18827. INIT_DELAYED_WORK(&chip->usbin_health_check,
  18828. qpnp_usbin_health_check_work);
  18829. - #ifndef CONFIG_BATTERY_SAMSUNG
  18830. INIT_WORK(&chip->soc_check_work, qpnp_chg_soc_check_work);
  18831. - #endif
  18832. INIT_DELAYED_WORK(&chip->aicl_check_work, qpnp_aicl_check_work);
  18833.  
  18834. if (chip->dc_chgpth_base) {
  18835. @@ -6199,24 +5508,6 @@ qpnp_charger_probe(struct spmi_device *spmi)
  18836. }
  18837. }
  18838.  
  18839. - #ifdef CONFIG_BATTERY_SAMSUNG
  18840. - charger->siop_level = 100;
  18841. - charger->psy_chg.name = "qpnp-chg";
  18842. - charger->psy_chg.type = POWER_SUPPLY_TYPE_UNKNOWN;
  18843. - charger->psy_chg.get_property = sec_qpnp_chg_get_property;
  18844. - charger->psy_chg.set_property = sec_qpnp_chg_set_property;
  18845. - charger->psy_chg.properties = sec_qpnp_chg_props;
  18846. - charger->psy_chg.num_properties = ARRAY_SIZE(sec_qpnp_chg_props);
  18847. - charger->psy_chg.property_is_writeable =
  18848. - sec_qpnp_chg_property_is_writeable;
  18849. -
  18850. - rc = power_supply_register(chip->dev, &charger->psy_chg);
  18851. - if (rc < 0) {
  18852. - pr_err("power_supply_register qpnp-chg failed rc=%d\n", rc);
  18853. - goto err_free;
  18854. - }
  18855. - #endif
  18856. -
  18857. /* Turn on appropriate workaround flags */
  18858. rc = qpnp_chg_setup_flags(chip);
  18859. if (rc < 0) {
  18860. @@ -6270,11 +5561,6 @@ qpnp_charger_probe(struct spmi_device *spmi)
  18861. goto unregister_dc_psy;
  18862. }
  18863. }
  18864. - #ifdef CONFIG_BATTERY_SAMSUNG
  18865. - /* if sec_battery probed before qpnp-charger,
  18866. - charger driver cannot recognize cable type */
  18867. - sec_qpnp_cable_initial_check(charger);
  18868. - #endif
  18869.  
  18870. rc = qpnp_chg_request_irqs(chip);
  18871. if (rc) {
  18872. @@ -6285,17 +5571,13 @@ qpnp_charger_probe(struct spmi_device *spmi)
  18873. qpnp_chg_usb_chg_gone_irq_handler(chip->chg_gone.irq, chip);
  18874. qpnp_chg_usb_usbin_valid_irq_handler(chip->usbin_valid.irq, chip);
  18875. qpnp_chg_dc_dcin_valid_irq_handler(chip->dcin_valid.irq, chip);
  18876. - #ifndef CONFIG_BATTERY_SAMSUNG
  18877. power_supply_set_present(chip->usb_psy,
  18878. qpnp_chg_is_usb_chg_plugged_in(chip));
  18879. - #endif
  18880.  
  18881. /* Set USB psy online to avoid userspace from shutting down if battery
  18882. * capacity is at zero and no chargers online. */
  18883. - #ifndef CONFIG_BATTERY_SAMSUNG
  18884. if (qpnp_chg_is_usb_chg_plugged_in(chip))
  18885. power_supply_set_online(chip->usb_psy, 1);
  18886. - #endif
  18887.  
  18888. schedule_delayed_work(&chip->aicl_check_work,
  18889. msecs_to_jiffies(EOC_CHECK_PERIOD_MS));
  18890. @@ -6312,16 +5594,8 @@ unregister_dc_psy:
  18891. if (chip->dc_chgpth_base)
  18892. power_supply_unregister(&chip->dc_psy);
  18893. unregister_batt:
  18894. -#ifndef CONFIG_BATTERY_SAMSUNG
  18895. if (chip->bat_if_base)
  18896. power_supply_unregister(&chip->batt_psy);
  18897. -#endif
  18898. -#ifdef CONFIG_BATTERY_SAMSUNG
  18899. -err_free:
  18900. - kfree(charger->pdata);
  18901. -err_free1:
  18902. - kfree(charger);
  18903. -#endif
  18904. fail_chg_enable:
  18905. regulator_unregister(chip->otg_vreg.rdev);
  18906. regulator_unregister(chip->boost_vreg.rdev);
  18907. @@ -6340,20 +5614,14 @@ qpnp_charger_remove(struct spmi_device *spmi)
  18908.  
  18909. cancel_delayed_work_sync(&chip->aicl_check_work);
  18910. power_supply_unregister(&chip->dc_psy);
  18911. - #ifndef CONFIG_BATTERY_SAMSUNG
  18912. cancel_work_sync(&chip->soc_check_work);
  18913. - #endif
  18914. cancel_delayed_work_sync(&chip->usbin_health_check);
  18915. cancel_delayed_work_sync(&chip->arb_stop_work);
  18916. - #ifndef CONFIG_BATTERY_SAMSUNG
  18917. cancel_delayed_work_sync(&chip->eoc_work);
  18918. - #endif
  18919. cancel_work_sync(&chip->adc_disable_work);
  18920. cancel_work_sync(&chip->adc_measure_work);
  18921. - #ifndef CONFIG_BATTERY_SAMSUNG
  18922. power_supply_unregister(&chip->batt_psy);
  18923. cancel_work_sync(&chip->batfet_lcl_work);
  18924. - #endif
  18925. cancel_work_sync(&chip->insertion_ocv_work);
  18926. cancel_work_sync(&chip->reduce_power_stage_work);
  18927. alarm_cancel(&chip->reduce_power_stage_alarm);
  18928. @@ -6369,9 +5637,6 @@ qpnp_charger_remove(struct spmi_device *spmi)
  18929.  
  18930. static int qpnp_chg_resume(struct device *dev)
  18931. {
  18932. -#ifdef CONFIG_BATTERY_SAMSUNG
  18933. - return 0;
  18934. -#else
  18935. struct qpnp_chg_chip *chip = dev_get_drvdata(dev);
  18936. int rc = 0;
  18937.  
  18938. @@ -6385,14 +5650,10 @@ static int qpnp_chg_resume(struct device *dev)
  18939. }
  18940.  
  18941. return rc;
  18942. -#endif
  18943. }
  18944.  
  18945. static int qpnp_chg_suspend(struct device *dev)
  18946. {
  18947. -#ifdef CONFIG_BATTERY_SAMSUNG
  18948. - return 0;
  18949. -#else
  18950. struct qpnp_chg_chip *chip = dev_get_drvdata(dev);
  18951. int rc = 0;
  18952.  
  18953. @@ -6406,7 +5667,6 @@ static int qpnp_chg_suspend(struct device *dev)
  18954. }
  18955.  
  18956. return rc;
  18957. -#endif
  18958. }
  18959.  
  18960. static const struct dev_pm_ops qpnp_chg_pm_ops = {
  18961. diff --git a/drivers/slimbus/slim-msm-ctrl.c b/drivers/slimbus/slim-msm-ctrl.c
  18962. index c662a2b..924a028 100644
  18963. --- a/drivers/slimbus/slim-msm-ctrl.c
  18964. +++ b/drivers/slimbus/slim-msm-ctrl.c
  18965. @@ -1,4 +1,4 @@
  18966. -/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  18967. +/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
  18968. *
  18969. * This program is free software; you can redistribute it and/or modify
  18970. * it under the terms of the GNU General Public License version 2 and
  18971. @@ -207,8 +207,7 @@ static irqreturn_t msm_slim_interrupt(int irq, void *d)
  18972. * signalling completion/exiting ISR
  18973. */
  18974. mb();
  18975. - if (dev->wr_comp)
  18976. - complete(dev->wr_comp);
  18977. + msm_slim_manage_tx_msgq(dev, false, NULL);
  18978. }
  18979. if (stat & MGR_INT_RX_MSG_RCVD) {
  18980. u32 rx_buf[10];
  18981. @@ -372,8 +371,7 @@ static int msm_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
  18982. }
  18983. }
  18984. txn->rl--;
  18985. - pbuf = msm_get_msg_buf(dev, txn->rl);
  18986. - dev->wr_comp = NULL;
  18987. + pbuf = msm_get_msg_buf(dev, txn->rl, &done);
  18988. dev->err = 0;
  18989.  
  18990. if (txn->dt == SLIM_MSG_DEST_ENUMADDR) {
  18991. @@ -438,11 +436,8 @@ static int msm_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
  18992. if (txn->mt == SLIM_MSG_MT_CORE &&
  18993. mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION)
  18994. dev->reconf_busy = true;
  18995. - dev->wr_comp = &done;
  18996. msm_send_msg_buf(dev, pbuf, txn->rl, MGR_TX_MSG);
  18997. timeout = wait_for_completion_timeout(&done, HZ);
  18998. - if (!timeout)
  18999. - dev->wr_comp = NULL;
  19000. if (mc == SLIM_MSG_MC_RECONFIGURE_NOW) {
  19001. if ((txn->mc == (SLIM_MSG_MC_RECONFIGURE_NOW |
  19002. SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
  19003. @@ -505,7 +500,9 @@ static int msm_set_laddr(struct slim_controller *ctrl, const u8 *ea,
  19004. retry_laddr:
  19005. init_completion(&done);
  19006. mutex_lock(&dev->tx_lock);
  19007. - buf = msm_get_msg_buf(dev, 9);
  19008. + buf = msm_get_msg_buf(dev, 9, &done);
  19009. + if (buf == NULL)
  19010. + return -ENOMEM;
  19011. buf[0] = SLIM_MSG_ASM_FIRST_WORD(9, SLIM_MSG_MT_CORE,
  19012. SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS,
  19013. SLIM_MSG_DEST_LOGICALADDR,
  19014. @@ -513,7 +510,6 @@ retry_laddr:
  19015. buf[1] = ea[3] | (ea[2] << 8) | (ea[1] << 16) | (ea[0] << 24);
  19016. buf[2] = laddr;
  19017.  
  19018. - dev->wr_comp = &done;
  19019. ret = msm_send_msg_buf(dev, buf, 9, MGR_TX_MSG);
  19020. timeout = wait_for_completion_timeout(&done, HZ);
  19021. if (!timeout)
  19022. @@ -521,7 +517,6 @@ retry_laddr:
  19023. if (dev->err) {
  19024. ret = dev->err;
  19025. dev->err = 0;
  19026. - dev->wr_comp = NULL;
  19027. }
  19028. mutex_unlock(&dev->tx_lock);
  19029. if (ret) {
  19030. @@ -1183,6 +1178,10 @@ static int __devinit msm_slim_probe(struct platform_device *pdev)
  19031. ret = -ENOMEM;
  19032. goto err_get_res_failed;
  19033. }
  19034. + dev->wr_comp = kzalloc(sizeof(struct completion *) * MSM_TX_BUFS,
  19035. + GFP_KERNEL);
  19036. + if (!dev->wr_comp)
  19037. + return -ENOMEM;
  19038. dev->dev = &pdev->dev;
  19039. platform_set_drvdata(pdev, dev);
  19040. slim_set_ctrldata(&dev->ctrl, dev);
  19041. @@ -1271,7 +1270,8 @@ static int __devinit msm_slim_probe(struct platform_device *pdev)
  19042. dev->ctrl.dev.parent = &pdev->dev;
  19043. dev->ctrl.dev.of_node = pdev->dev.of_node;
  19044.  
  19045. - ret = request_irq(dev->irq, msm_slim_interrupt, IRQF_TRIGGER_HIGH,
  19046. + ret = request_threaded_irq(dev->irq, NULL, msm_slim_interrupt,
  19047. + IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
  19048. "msm_slim_irq", dev);
  19049. if (ret) {
  19050. dev_err(&pdev->dev, "request IRQ failed\n");
  19051. @@ -1400,6 +1400,7 @@ err_of_init_failed:
  19052. err_ioremap_bam_failed:
  19053. iounmap(dev->base);
  19054. err_ioremap_failed:
  19055. + kfree(dev->wr_comp);
  19056. kfree(dev);
  19057. err_get_res_failed:
  19058. release_mem_region(bam_mem->start, resource_size(bam_mem));
  19059. @@ -1437,6 +1438,7 @@ static int __devexit msm_slim_remove(struct platform_device *pdev)
  19060. kthread_stop(dev->rx_msgq_thread);
  19061. iounmap(dev->bam.base);
  19062. iounmap(dev->base);
  19063. + kfree(dev->wr_comp);
  19064. kfree(dev);
  19065. bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  19066. "slimbus_bam_physical");
  19067. diff --git a/drivers/slimbus/slim-msm-ngd.c b/drivers/slimbus/slim-msm-ngd.c
  19068. index 1bfeca8..d31ae1b 100644
  19069. --- a/drivers/slimbus/slim-msm-ngd.c
  19070. +++ b/drivers/slimbus/slim-msm-ngd.c
  19071. @@ -117,15 +117,13 @@ static irqreturn_t ngd_slim_interrupt(int irq, void *d)
  19072. dev->err);
  19073. /* Guarantee that error interrupts are cleared */
  19074. mb();
  19075. - if (dev->wr_comp)
  19076. - complete(dev->wr_comp);
  19077. + msm_slim_manage_tx_msgq(dev, false, NULL);
  19078.  
  19079. } else if (stat & NGD_INT_TX_MSG_SENT) {
  19080. writel_relaxed(NGD_INT_TX_MSG_SENT, ngd + NGD_INT_CLR);
  19081. /* Make sure interrupt is cleared */
  19082. mb();
  19083. - if (dev->wr_comp)
  19084. - complete(dev->wr_comp);
  19085. + msm_slim_manage_tx_msgq(dev, false, NULL);
  19086. }
  19087. if (stat & NGD_INT_RX_MSG_RCVD) {
  19088. u32 rx_buf[10];
  19089. @@ -181,27 +179,51 @@ static int ngd_qmi_available(struct notifier_block *n, unsigned long code,
  19090. case QMI_SERVER_ARRIVE:
  19091. schedule_work(&qmi->ssr_up);
  19092. break;
  19093. - case QMI_SERVER_EXIT:
  19094. - dev->state = MSM_CTRL_DOWN;
  19095. + default:
  19096. + break;
  19097. + }
  19098. + return 0;
  19099. +}
  19100. +
  19101. +static int dsp_ssr_notify_cb(struct notifier_block *n, unsigned long code,
  19102. + void *_cmd)
  19103. +{
  19104. + struct msm_slim_ss *dsp = container_of(n, struct msm_slim_ss, nb);
  19105. + struct msm_slim_ctrl *dev = container_of(dsp, struct msm_slim_ctrl,
  19106. + dsp);
  19107. +
  19108. + switch (code) {
  19109. + case SUBSYS_BEFORE_SHUTDOWN:
  19110. + SLIM_INFO(dev, "SLIM DSP SSR notify cb:%lu\n", code);
  19111. + /* wait for current transaction */
  19112. + mutex_lock(&dev->tx_lock);
  19113. /* make sure autosuspend is not called until ADSP comes up*/
  19114. pm_runtime_get_noresume(dev->dev);
  19115. + dev->state = MSM_CTRL_DOWN;
  19116. /* Reset ctrl_up completion */
  19117. init_completion(&dev->ctrl_up);
  19118. - schedule_work(&qmi->ssr_down);
  19119. + /* disconnect BAM pipes */
  19120. + if (dev->use_rx_msgqs == MSM_MSGQ_ENABLED)
  19121. + dev->use_rx_msgqs = MSM_MSGQ_DOWN;
  19122. + if (dev->use_tx_msgqs == MSM_MSGQ_ENABLED)
  19123. + dev->use_tx_msgqs = MSM_MSGQ_DOWN;
  19124. + msm_slim_sps_exit(dev, false);
  19125. + schedule_work(&dev->qmi.ssr_down);
  19126. + mutex_unlock(&dev->tx_lock);
  19127. break;
  19128. default:
  19129. break;
  19130. }
  19131. - return 0;
  19132. + return NOTIFY_DONE;
  19133. }
  19134.  
  19135. static int mdm_ssr_notify_cb(struct notifier_block *n, unsigned long code,
  19136. void *_cmd)
  19137. {
  19138. void __iomem *ngd;
  19139. - struct msm_slim_mdm *mdm = container_of(n, struct msm_slim_mdm, nb);
  19140. - struct msm_slim_ctrl *dev = container_of(mdm, struct msm_slim_ctrl,
  19141. - mdm);
  19142. + struct msm_slim_ss *ext_mdm = container_of(n, struct msm_slim_ss, nb);
  19143. + struct msm_slim_ctrl *dev = container_of(ext_mdm, struct msm_slim_ctrl,
  19144. + ext_mdm);
  19145. struct slim_controller *ctrl = &dev->ctrl;
  19146. u32 laddr;
  19147. struct slim_device *sbdev;
  19148. @@ -216,11 +238,11 @@ static int mdm_ssr_notify_cb(struct notifier_block *n, unsigned long code,
  19149. * handover later
  19150. */
  19151. msm_slim_qmi_check_framer_request(dev);
  19152. - dev->mdm.state = MSM_CTRL_DOWN;
  19153. + dev->ext_mdm.state = MSM_CTRL_DOWN;
  19154. msm_slim_put_ctrl(dev);
  19155. break;
  19156. case SUBSYS_AFTER_POWERUP:
  19157. - if (dev->mdm.state != MSM_CTRL_DOWN)
  19158. + if (dev->ext_mdm.state != MSM_CTRL_DOWN)
  19159. return NOTIFY_DONE;
  19160. SLIM_INFO(dev,
  19161. "SLIM %lu external_modem SSR notify cb\n", code);
  19162. @@ -231,19 +253,21 @@ static int mdm_ssr_notify_cb(struct notifier_block *n, unsigned long code,
  19163. ngd = dev->base + NGD_BASE(dev->ctrl.nr, dev->ver);
  19164. laddr = readl_relaxed(ngd + NGD_STATUS);
  19165. if (!(laddr & NGD_LADDR)) {
  19166. + mutex_lock(&dev->tx_lock);
  19167. /* runtime-pm state should be consistent with HW */
  19168. pm_runtime_disable(dev->dev);
  19169. pm_runtime_set_suspended(dev->dev);
  19170. dev->state = MSM_CTRL_DOWN;
  19171. + mutex_unlock(&dev->tx_lock);
  19172. SLIM_INFO(dev,
  19173. "SLIM MDM SSR (active framer on MDM) dev-down\n");
  19174. list_for_each_entry(sbdev, &ctrl->devs, dev_list)
  19175. slim_report_absent(sbdev);
  19176. - ngd_slim_power_up(dev, true);
  19177. + ngd_slim_runtime_resume(dev->dev);
  19178. pm_runtime_set_active(dev->dev);
  19179. pm_runtime_enable(dev->dev);
  19180. }
  19181. - dev->mdm.state = MSM_CTRL_AWAKE;
  19182. + dev->ext_mdm.state = MSM_CTRL_AWAKE;
  19183. msm_slim_put_ctrl(dev);
  19184. break;
  19185. default:
  19186. @@ -302,12 +326,24 @@ static int ngd_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
  19187. u16 txn_mc = txn->mc;
  19188. u8 wbuf[SLIM_MSGQ_BUF_LEN];
  19189. bool report_sat = false;
  19190. + bool sync_wr = true;
  19191. +
  19192. + if (txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG)
  19193. + return -EPROTONOSUPPORT;
  19194. +
  19195. + if (txn->mt == SLIM_MSG_MT_CORE &&
  19196. + (txn->mc >= SLIM_MSG_MC_BEGIN_RECONFIGURATION &&
  19197. + txn->mc <= SLIM_MSG_MC_RECONFIGURE_NOW))
  19198. + return 0;
  19199.  
  19200. if (txn->mc == SLIM_USR_MC_REPORT_SATELLITE &&
  19201. txn->mt == SLIM_MSG_MT_SRC_REFERRED_USER)
  19202. report_sat = true;
  19203. - if (!pm_runtime_enabled(dev->dev) && dev->state == MSM_CTRL_ASLEEP &&
  19204. - report_sat == false) {
  19205. + else
  19206. + mutex_lock(&dev->tx_lock);
  19207. +
  19208. + if (!report_sat && !pm_runtime_enabled(dev->dev) &&
  19209. + dev->state == MSM_CTRL_ASLEEP) {
  19210. /*
  19211. * Counter-part of system-suspend when runtime-pm is not enabled
  19212. * This way, resume can be left empty and device will be put in
  19213. @@ -315,22 +351,24 @@ static int ngd_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
  19214. * If the state was DOWN, SSR UP notification will take
  19215. * care of putting the device in active state.
  19216. */
  19217. - ngd_slim_runtime_resume(dev->dev);
  19218. - }
  19219. -
  19220. - else if (txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG)
  19221. - return -EPROTONOSUPPORT;
  19222. + mutex_unlock(&dev->tx_lock);
  19223. + ret = ngd_slim_runtime_resume(dev->dev);
  19224.  
  19225. - if (txn->mt == SLIM_MSG_MT_CORE &&
  19226. - (txn->mc >= SLIM_MSG_MC_BEGIN_RECONFIGURATION &&
  19227. - txn->mc <= SLIM_MSG_MC_RECONFIGURE_NOW)) {
  19228. - return 0;
  19229. + if (ret) {
  19230. + SLIM_ERR(dev, "slim resume failed ret:%d, state:%d",
  19231. + ret, dev->state);
  19232. + return -EREMOTEIO;
  19233. + }
  19234. + mutex_lock(&dev->tx_lock);
  19235. }
  19236. +
  19237. /* If txn is tried when controller is down, wait for ADSP to boot */
  19238. if (!report_sat) {
  19239. +
  19240. if (dev->state == MSM_CTRL_DOWN) {
  19241. u8 mc = (u8)txn->mc;
  19242. int timeout;
  19243. + mutex_unlock(&dev->tx_lock);
  19244. SLIM_INFO(dev, "ADSP slimbus not up yet\n");
  19245. /*
  19246. * Messages related to data channel management can't
  19247. @@ -370,33 +408,31 @@ static int ngd_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
  19248. return -EREMOTEIO;
  19249. timeout = wait_for_completion_timeout(&dev->ctrl_up,
  19250. HZ);
  19251. - if (!timeout && dev->state == MSM_CTRL_DOWN)
  19252. + if (!timeout)
  19253. return -ETIMEDOUT;
  19254. + mutex_lock(&dev->tx_lock);
  19255. }
  19256. +
  19257. + mutex_unlock(&dev->tx_lock);
  19258. ret = msm_slim_get_ctrl(dev);
  19259. + mutex_lock(&dev->tx_lock);
  19260. /*
  19261. * Runtime-pm's callbacks are not called until runtime-pm's
  19262. * error status is cleared
  19263. * Setting runtime status to suspended clears the error
  19264. * It also makes HW status cosistent with what SW has it here
  19265. */
  19266. - if (ret == -ENETRESET && dev->state == MSM_CTRL_DOWN) {
  19267. + if ((pm_runtime_enabled(dev->dev) && ret < 0) ||
  19268. + dev->state == MSM_CTRL_DOWN) {
  19269. + SLIM_ERR(dev, "slim ctrl vote failed ret:%d, state:%d",
  19270. + ret, dev->state);
  19271. pm_runtime_set_suspended(dev->dev);
  19272. + mutex_unlock(&dev->tx_lock);
  19273. msm_slim_put_ctrl(dev);
  19274. return -EREMOTEIO;
  19275. - } else if (ret >= 0) {
  19276. - dev->state = MSM_CTRL_AWAKE;
  19277. }
  19278. }
  19279. - mutex_lock(&dev->tx_lock);
  19280.  
  19281. - if (report_sat == false && dev->state != MSM_CTRL_AWAKE) {
  19282. - SLIM_ERR(dev, "controller not ready\n");
  19283. - mutex_unlock(&dev->tx_lock);
  19284. - pm_runtime_set_suspended(dev->dev);
  19285. - msm_slim_put_ctrl(dev);
  19286. - return -EREMOTEIO;
  19287. - }
  19288. if (txn->mt == SLIM_MSG_MT_CORE &&
  19289. (txn->mc == SLIM_MSG_MC_CONNECT_SOURCE ||
  19290. txn->mc == SLIM_MSG_MC_CONNECT_SINK ||
  19291. @@ -454,7 +490,25 @@ static int ngd_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
  19292. txn->rl = txn->len + 4;
  19293. }
  19294. txn->rl--;
  19295. - pbuf = msm_get_msg_buf(dev, txn->rl);
  19296. +
  19297. + if (txn->mt == SLIM_MSG_MT_CORE && txn->comp &&
  19298. + dev->use_tx_msgqs == MSM_MSGQ_ENABLED &&
  19299. + (txn_mc != SLIM_MSG_MC_REQUEST_INFORMATION &&
  19300. + txn_mc != SLIM_MSG_MC_REQUEST_VALUE &&
  19301. + txn_mc != SLIM_MSG_MC_REQUEST_CHANGE_VALUE &&
  19302. + txn_mc != SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION)) {
  19303. + sync_wr = false;
  19304. + pbuf = msm_get_msg_buf(dev, txn->rl, txn->comp);
  19305. + } else if (txn->mt == SLIM_MSG_MT_DEST_REFERRED_USER &&
  19306. + dev->use_tx_msgqs == MSM_MSGQ_ENABLED &&
  19307. + txn->mc == SLIM_USR_MC_REPEAT_CHANGE_VALUE &&
  19308. + txn->comp) {
  19309. + sync_wr = false;
  19310. + pbuf = msm_get_msg_buf(dev, txn->rl, txn->comp);
  19311. + } else {
  19312. + pbuf = msm_get_msg_buf(dev, txn->rl, &tx_sent);
  19313. + }
  19314. +
  19315. if (!pbuf) {
  19316. SLIM_ERR(dev, "Message buffer unavailable\n");
  19317. ret = -ENOMEM;
  19318. @@ -525,10 +579,9 @@ static int ngd_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
  19319. */
  19320. txn_mc = txn->mc;
  19321. txn_mt = txn->mt;
  19322. - dev->wr_comp = &tx_sent;
  19323. ret = msm_send_msg_buf(dev, pbuf, txn->rl,
  19324. NGD_BASE(dev->ctrl.nr, dev->ver) + NGD_TX_MSG);
  19325. - if (!ret) {
  19326. + if (!ret && sync_wr) {
  19327. int timeout = wait_for_completion_timeout(&tx_sent, HZ);
  19328. if (!timeout) {
  19329. ret = -ETIMEDOUT;
  19330. @@ -537,14 +590,15 @@ static int ngd_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
  19331. * transactions don't timeout due to unavailable
  19332. * descriptors
  19333. */
  19334. - msm_slim_disconnect_endp(dev, &dev->tx_msgq,
  19335. - &dev->use_tx_msgqs);
  19336. - msm_slim_connect_endp(dev, &dev->tx_msgq, NULL);
  19337. + if (dev->state != MSM_CTRL_DOWN) {
  19338. + msm_slim_disconnect_endp(dev, &dev->tx_msgq,
  19339. + &dev->use_tx_msgqs);
  19340. + msm_slim_connect_endp(dev, &dev->tx_msgq, NULL);
  19341. + }
  19342. } else {
  19343. ret = dev->err;
  19344. }
  19345. }
  19346. - dev->wr_comp = NULL;
  19347. if (ret) {
  19348. u32 conf, stat, rx_msgq, int_stat, int_en, int_clr;
  19349. void __iomem *ngd = dev->base + NGD_BASE(dev->ctrl.nr,
  19350. @@ -585,9 +639,10 @@ static int ngd_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
  19351. return ret ? ret : dev->err;
  19352. }
  19353. ngd_xfer_err:
  19354. - mutex_unlock(&dev->tx_lock);
  19355. - if (!report_sat)
  19356. + if (!report_sat) {
  19357. + mutex_unlock(&dev->tx_lock);
  19358. msm_slim_put_ctrl(dev);
  19359. + }
  19360. return ret ? ret : dev->err;
  19361. }
  19362.  
  19363. @@ -896,7 +951,6 @@ capability_retry:
  19364. enum msm_ctrl_state prev_state = dev->state;
  19365. SLIM_INFO(dev,
  19366. "SLIM SAT: capability exchange successful\n");
  19367. - dev->state = MSM_CTRL_AWAKE;
  19368. if (prev_state >= MSM_CTRL_ASLEEP)
  19369. complete(&dev->reconf);
  19370. else
  19371. @@ -980,8 +1034,10 @@ static int ngd_slim_power_up(struct msm_slim_ctrl *dev, bool mdm_restart)
  19372. if (!mdm_restart && cur_state == MSM_CTRL_DOWN) {
  19373. int timeout = wait_for_completion_timeout(&dev->qmi.qmi_comp,
  19374. HZ);
  19375. - if (!timeout)
  19376. + if (!timeout) {
  19377. SLIM_ERR(dev, "slimbus QMI init timed out\n");
  19378. + return -EREMOTEIO;
  19379. + }
  19380. }
  19381.  
  19382. /* No need to vote if contorller is not in low power mode */
  19383. @@ -1068,11 +1124,11 @@ static int ngd_slim_power_up(struct msm_slim_ctrl *dev, bool mdm_restart)
  19384. SLIM_ERR(dev, "Failed to receive master capability\n");
  19385. return -ETIMEDOUT;
  19386. }
  19387. - if (cur_state == MSM_CTRL_DOWN) {
  19388. - complete(&dev->ctrl_up);
  19389. - /* Resetting the log level */
  19390. - SLIM_RST_LOGLVL(dev);
  19391. - }
  19392. + /* mutliple transactions waiting on slimbus to power up? */
  19393. + if (cur_state == MSM_CTRL_DOWN)
  19394. + complete_all(&dev->ctrl_up);
  19395. + /* Resetting the log level */
  19396. + SLIM_RST_LOGLVL(dev);
  19397. return 0;
  19398. }
  19399.  
  19400. @@ -1238,12 +1294,6 @@ static void ngd_adsp_down(struct work_struct *work)
  19401. struct slim_device *sbdev;
  19402.  
  19403. ngd_slim_enable(dev, false);
  19404. - /* disconnect BAM pipes */
  19405. - if (dev->use_rx_msgqs == MSM_MSGQ_ENABLED)
  19406. - dev->use_rx_msgqs = MSM_MSGQ_DOWN;
  19407. - if (dev->use_tx_msgqs == MSM_MSGQ_ENABLED)
  19408. - dev->use_tx_msgqs = MSM_MSGQ_DOWN;
  19409. - msm_slim_sps_exit(dev, false);
  19410. /* device up should be called again after SSR */
  19411. list_for_each_entry(sbdev, &ctrl->devs, dev_list)
  19412. slim_report_absent(sbdev);
  19413. @@ -1322,6 +1372,10 @@ static int __devinit ngd_slim_probe(struct platform_device *pdev)
  19414. dev_err(&pdev->dev, "no memory for MSM slimbus controller\n");
  19415. return PTR_ERR(dev);
  19416. }
  19417. + dev->wr_comp = kzalloc(sizeof(struct completion *) * MSM_TX_BUFS,
  19418. + GFP_KERNEL);
  19419. + if (!dev->wr_comp)
  19420. + return -ENOMEM;
  19421. dev->dev = &pdev->dev;
  19422. platform_set_drvdata(pdev, dev);
  19423. slim_set_ctrldata(&dev->ctrl, dev);
  19424. @@ -1405,6 +1459,7 @@ static int __devinit ngd_slim_probe(struct platform_device *pdev)
  19425. init_completion(&dev->reconf);
  19426. init_completion(&dev->ctrl_up);
  19427. mutex_init(&dev->tx_lock);
  19428. + mutex_init(&dev->tx_buf_lock);
  19429. spin_lock_init(&dev->rx_lock);
  19430. dev->ee = 1;
  19431. dev->irq = irq->start;
  19432. @@ -1432,8 +1487,9 @@ static int __devinit ngd_slim_probe(struct platform_device *pdev)
  19433. dev->ctrl.dev.of_node = pdev->dev.of_node;
  19434. dev->state = MSM_CTRL_DOWN;
  19435.  
  19436. - ret = request_irq(dev->irq, ngd_slim_interrupt,
  19437. - IRQF_TRIGGER_HIGH, "ngd_slim_irq", dev);
  19438. + ret = request_threaded_irq(dev->irq, NULL,
  19439. + ngd_slim_interrupt,
  19440. + IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "ngd_slim_irq", dev);
  19441.  
  19442. if (ret) {
  19443. dev_err(&pdev->dev, "request IRQ failed\n");
  19444. @@ -1447,14 +1503,21 @@ static int __devinit ngd_slim_probe(struct platform_device *pdev)
  19445. pm_runtime_set_suspended(dev->dev);
  19446. pm_runtime_enable(dev->dev);
  19447.  
  19448. + dev->dsp.nb.notifier_call = dsp_ssr_notify_cb;
  19449. + dev->dsp.ssr = subsys_notif_register_notifier("adsp",
  19450. + &dev->dsp.nb);
  19451. + if (IS_ERR_OR_NULL(dev->dsp.ssr))
  19452. + dev_err(dev->dev,
  19453. + "subsys_notif_register_notifier failed %p",
  19454. + dev->dsp.ssr);
  19455. if (slim_mdm) {
  19456. - dev->mdm.nb.notifier_call = mdm_ssr_notify_cb;
  19457. - dev->mdm.ssr = subsys_notif_register_notifier(ext_modem_id,
  19458. - &dev->mdm.nb);
  19459. - if (IS_ERR_OR_NULL(dev->mdm.ssr))
  19460. + dev->ext_mdm.nb.notifier_call = mdm_ssr_notify_cb;
  19461. + dev->ext_mdm.ssr = subsys_notif_register_notifier(ext_modem_id,
  19462. + &dev->ext_mdm.nb);
  19463. + if (IS_ERR_OR_NULL(dev->ext_mdm.ssr))
  19464. dev_err(dev->dev,
  19465. "subsys_notif_register_notifier failed %p",
  19466. - dev->mdm.ssr);
  19467. + dev->ext_mdm.ssr);
  19468. }
  19469.  
  19470. INIT_WORK(&dev->qmi.ssr_down, ngd_adsp_down);
  19471. @@ -1496,6 +1559,7 @@ err_ioremap_failed:
  19472. if (dev->sysfs_created)
  19473. sysfs_remove_file(&dev->dev->kobj,
  19474. &dev_attr_debug_mask.attr);
  19475. + kfree(dev->wr_comp);
  19476. kfree(dev);
  19477. return ret;
  19478. }
  19479. @@ -1511,13 +1575,18 @@ static int __devexit ngd_slim_remove(struct platform_device *pdev)
  19480. SLIMBUS_QMI_SVC_V1,
  19481. SLIMBUS_QMI_INS_ID, &dev->qmi.nb);
  19482. pm_runtime_disable(&pdev->dev);
  19483. - if (!IS_ERR_OR_NULL(dev->mdm.ssr))
  19484. - subsys_notif_unregister_notifier(dev->mdm.ssr, &dev->mdm.nb);
  19485. + if (!IS_ERR_OR_NULL(dev->dsp.ssr))
  19486. + subsys_notif_unregister_notifier(dev->dsp.ssr,
  19487. + &dev->dsp.nb);
  19488. + if (!IS_ERR_OR_NULL(dev->ext_mdm.ssr))
  19489. + subsys_notif_unregister_notifier(dev->ext_mdm.ssr,
  19490. + &dev->ext_mdm.nb);
  19491. free_irq(dev->irq, dev);
  19492. slim_del_controller(&dev->ctrl);
  19493. kthread_stop(dev->rx_msgq_thread);
  19494. iounmap(dev->bam.base);
  19495. iounmap(dev->base);
  19496. + kfree(dev->wr_comp);
  19497. kfree(dev);
  19498. return 0;
  19499. }
  19500. @@ -1543,8 +1612,10 @@ static int ngd_slim_runtime_idle(struct device *device)
  19501. {
  19502. struct platform_device *pdev = to_platform_device(device);
  19503. struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
  19504. + mutex_lock(&dev->tx_lock);
  19505. if (dev->state == MSM_CTRL_AWAKE)
  19506. dev->state = MSM_CTRL_IDLE;
  19507. + mutex_unlock(&dev->tx_lock);
  19508. dev_dbg(device, "pm_runtime: idle...\n");
  19509. pm_request_autosuspend(device);
  19510. return -EAGAIN;
  19511. @@ -1562,6 +1633,7 @@ static int ngd_slim_runtime_resume(struct device *device)
  19512. struct platform_device *pdev = to_platform_device(device);
  19513. struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
  19514. int ret = 0;
  19515. + mutex_lock(&dev->tx_lock);
  19516. if (dev->state >= MSM_CTRL_ASLEEP)
  19517. ret = ngd_slim_power_up(dev, false);
  19518. if (ret) {
  19519. @@ -1573,6 +1645,7 @@ static int ngd_slim_runtime_resume(struct device *device)
  19520. } else {
  19521. dev->state = MSM_CTRL_AWAKE;
  19522. }
  19523. + mutex_unlock(&dev->tx_lock);
  19524. SLIM_INFO(dev, "Slim runtime resume: ret %d\n", ret);
  19525. return ret;
  19526. }
  19527. @@ -1583,6 +1656,7 @@ static int ngd_slim_runtime_suspend(struct device *device)
  19528. struct platform_device *pdev = to_platform_device(device);
  19529. struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
  19530. int ret = 0;
  19531. + mutex_lock(&dev->tx_lock);
  19532. ret = ngd_slim_power_down(dev);
  19533. if (ret) {
  19534. if (ret != -EBUSY)
  19535. @@ -1591,6 +1665,7 @@ static int ngd_slim_runtime_suspend(struct device *device)
  19536. } else {
  19537. dev->state = MSM_CTRL_ASLEEP;
  19538. }
  19539. + mutex_unlock(&dev->tx_lock);
  19540. SLIM_INFO(dev, "Slim runtime suspend: ret %d\n", ret);
  19541. return ret;
  19542. }
  19543. diff --git a/drivers/slimbus/slim-msm.c b/drivers/slimbus/slim-msm.c
  19544. index f509f67..ec82375 100644
  19545. --- a/drivers/slimbus/slim-msm.c
  19546. +++ b/drivers/slimbus/slim-msm.c
  19547. @@ -11,6 +11,7 @@
  19548. */
  19549. #include <linux/pm_runtime.h>
  19550. #include <linux/dma-mapping.h>
  19551. +#include <linux/delay.h>
  19552. #include <linux/slimbus/slimbus.h>
  19553. #include <mach/sps.h>
  19554. #include "slim-msm.h"
  19555. @@ -398,9 +399,9 @@ static int msm_slim_post_tx_msgq(struct msm_slim_ctrl *dev, u8 *buf, int len)
  19556. struct msm_slim_endp *endpoint = &dev->tx_msgq;
  19557. struct sps_mem_buffer *mem = &endpoint->buf;
  19558. struct sps_pipe *pipe = endpoint->sps;
  19559. - int ix = (buf - (u8 *)mem->base) / SLIM_MSGQ_BUF_LEN;
  19560. + int ix = (buf - (u8 *)mem->base);
  19561.  
  19562. - phys_addr_t phys_addr = mem->phys_base + (SLIM_MSGQ_BUF_LEN * ix);
  19563. + phys_addr_t phys_addr = mem->phys_base + ix;
  19564.  
  19565. for (ret = 0; ret < ((len + 3) >> 2); ret++)
  19566. pr_debug("BAM TX buf[%d]:0x%x", ret, ((u32 *)buf)[ret]);
  19567. @@ -413,29 +414,110 @@ static int msm_slim_post_tx_msgq(struct msm_slim_ctrl *dev, u8 *buf, int len)
  19568. return ret;
  19569. }
  19570.  
  19571. -static u32 *msm_slim_tx_msgq_return(struct msm_slim_ctrl *dev)
  19572. +void msm_slim_tx_msg_return(struct msm_slim_ctrl *dev)
  19573. {
  19574. struct msm_slim_endp *endpoint = &dev->tx_msgq;
  19575. struct sps_mem_buffer *mem = &endpoint->buf;
  19576. struct sps_pipe *pipe = endpoint->sps;
  19577. struct sps_iovec iovec;
  19578. - int ret;
  19579. -
  19580. - /* first transaction after establishing connection */
  19581. - if (dev->tx_idx == -1) {
  19582. - dev->tx_idx = 0;
  19583. - return mem->base;
  19584. + int idx, ret = 0;
  19585. + if (dev->use_tx_msgqs != MSM_MSGQ_ENABLED) {
  19586. + /* use 1 buffer, non-blocking writes are not possible */
  19587. + if (dev->wr_comp[0]) {
  19588. + struct completion *comp = dev->wr_comp[0];
  19589. + dev->wr_comp[0] = NULL;
  19590. + complete(comp);
  19591. + }
  19592. + return;
  19593. }
  19594. - ret = sps_get_iovec(pipe, &iovec);
  19595. - if (ret || iovec.addr == 0) {
  19596. - dev_err(dev->dev, "sps_get_iovec() failed 0x%x\n", ret);
  19597. + while (!ret) {
  19598. + ret = sps_get_iovec(pipe, &iovec);
  19599. + if (ret || iovec.addr == 0) {
  19600. + if (ret)
  19601. + pr_err("SLIM TX get IOVEC failed:%d", ret);
  19602. + return;
  19603. + }
  19604. + idx = (int) ((iovec.addr - mem->phys_base) / SLIM_MSGQ_BUF_LEN);
  19605. + if (idx < MSM_TX_BUFS && dev->wr_comp[idx]) {
  19606. + struct completion *comp = dev->wr_comp[idx];
  19607. + dev->wr_comp[idx] = NULL;
  19608. + complete(comp);
  19609. + }
  19610. + /* reclaim all packets that were delivered out of order */
  19611. + if (idx != dev->tx_head)
  19612. + pr_err("SLIM OUT OF ORDER TX:idx:%d, head:%d", idx,
  19613. + dev->tx_head);
  19614. + while (idx == dev->tx_head) {
  19615. + dev->tx_head = (dev->tx_head + 1) % MSM_TX_BUFS;
  19616. + idx++;
  19617. + if (dev->tx_head == dev->tx_tail ||
  19618. + dev->wr_comp[idx] != NULL)
  19619. + break;
  19620. + }
  19621. + }
  19622. +}
  19623. +
  19624. +static u32 *msm_slim_modify_tx_buf(struct msm_slim_ctrl *dev,
  19625. + struct completion *comp)
  19626. +{
  19627. + struct msm_slim_endp *endpoint = &dev->tx_msgq;
  19628. + struct sps_mem_buffer *mem = &endpoint->buf;
  19629. + u32 *retbuf = NULL;
  19630. + if ((dev->tx_tail + 1) % MSM_TX_BUFS == dev->tx_head)
  19631. + return NULL;
  19632. +
  19633. + retbuf = (u32 *)((u8 *)mem->base +
  19634. + (dev->tx_tail * SLIM_MSGQ_BUF_LEN));
  19635. + dev->wr_comp[dev->tx_tail] = comp;
  19636. + dev->tx_tail = (dev->tx_tail + 1) % MSM_TX_BUFS;
  19637. + return retbuf;
  19638. +}
  19639. +u32 *msm_slim_manage_tx_msgq(struct msm_slim_ctrl *dev, bool getbuf,
  19640. + struct completion *comp)
  19641. +{
  19642. + int ret = 0;
  19643. + int retries = 0;
  19644. + u32 *retbuf = NULL;
  19645. +
  19646. + mutex_lock(&dev->tx_buf_lock);
  19647. + if (!getbuf) {
  19648. + msm_slim_tx_msg_return(dev);
  19649. + mutex_unlock(&dev->tx_buf_lock);
  19650. return NULL;
  19651. }
  19652.  
  19653. - /* Calculate buffer index */
  19654. - dev->tx_idx = ((int)(iovec.addr - mem->phys_base)) / SLIM_MSGQ_BUF_LEN;
  19655. + retbuf = msm_slim_modify_tx_buf(dev, comp);
  19656. + if (retbuf) {
  19657. + mutex_unlock(&dev->tx_buf_lock);
  19658. + return retbuf;
  19659. + }
  19660.  
  19661. - return (u32 *)((u8 *)mem->base + (dev->tx_idx * SLIM_MSGQ_BUF_LEN));
  19662. + do {
  19663. + msm_slim_tx_msg_return(dev);
  19664. + retbuf = msm_slim_modify_tx_buf(dev, comp);
  19665. + if (!retbuf)
  19666. + ret = -EAGAIN;
  19667. + else {
  19668. + if (retries > 0)
  19669. + SLIM_INFO(dev, "SLIM TX retrieved:%d retries",
  19670. + retries);
  19671. + mutex_unlock(&dev->tx_buf_lock);
  19672. + return retbuf;
  19673. + }
  19674. +
  19675. + /*
  19676. + * superframe size will vary based on clock gear
  19677. + * 1 superframe will consume at least 1 message
  19678. + * if HW is in good condition. With MX_RETRIES,
  19679. + * make sure we wait for a [3, 10] superframes
  19680. + * before deciding HW couldn't process descriptors
  19681. + */
  19682. + usleep_range(100, 250);
  19683. + retries++;
  19684. + } while (ret && (retries < INIT_MX_RETRIES));
  19685. +
  19686. + mutex_unlock(&dev->tx_buf_lock);
  19687. + return NULL;
  19688. }
  19689.  
  19690. int msm_send_msg_buf(struct msm_slim_ctrl *dev, u32 *buf, u8 len, u32 tx_reg)
  19691. @@ -453,16 +535,19 @@ int msm_send_msg_buf(struct msm_slim_ctrl *dev, u32 *buf, u8 len, u32 tx_reg)
  19692. return msm_slim_post_tx_msgq(dev, (u8 *)buf, len);
  19693. }
  19694.  
  19695. -u32 *msm_get_msg_buf(struct msm_slim_ctrl *dev, int len)
  19696. +u32 *msm_get_msg_buf(struct msm_slim_ctrl *dev, int len,
  19697. + struct completion *comp)
  19698. {
  19699. /*
  19700. * Currently we block a transaction until the current one completes.
  19701. * In case we need multiple transactions, use message Q
  19702. */
  19703. - if (dev->use_tx_msgqs != MSM_MSGQ_ENABLED)
  19704. + if (dev->use_tx_msgqs != MSM_MSGQ_ENABLED) {
  19705. + dev->wr_comp[0] = comp;
  19706. return dev->tx_buf;
  19707. + }
  19708.  
  19709. - return msm_slim_tx_msgq_return(dev);
  19710. + return msm_slim_manage_tx_msgq(dev, true, comp);
  19711. }
  19712.  
  19713. static void
  19714. @@ -612,7 +697,8 @@ int msm_slim_connect_endp(struct msm_slim_ctrl *dev,
  19715. }
  19716. dev->use_rx_msgqs = MSM_MSGQ_ENABLED;
  19717. } else {
  19718. - dev->tx_idx = -1;
  19719. + dev->tx_tail = 0;
  19720. + dev->tx_head = 0;
  19721. dev->use_tx_msgqs = MSM_MSGQ_ENABLED;
  19722. }
  19723.  
  19724. @@ -719,16 +805,18 @@ static int msm_slim_init_tx_msgq(struct msm_slim_ctrl *dev, u32 pipe_reg)
  19725. config->options = SPS_O_ERROR | SPS_O_NO_Q |
  19726. SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
  19727.  
  19728. + /* Desc and TX buf are circular queues */
  19729. /* Allocate memory for the FIFO descriptors */
  19730. ret = msm_slim_sps_mem_alloc(dev, descr,
  19731. - MSM_TX_BUFS * sizeof(struct sps_iovec));
  19732. + (MSM_TX_BUFS + 1) * sizeof(struct sps_iovec));
  19733. if (ret) {
  19734. dev_err(dev->dev, "unable to allocate SPS descriptors\n");
  19735. goto alloc_descr_failed;
  19736. }
  19737.  
  19738. - /* Allocate memory for the message buffer(s), N descrs, 40-byte mesg */
  19739. - ret = msm_slim_sps_mem_alloc(dev, mem, MSM_TX_BUFS * SLIM_MSGQ_BUF_LEN);
  19740. + /* Allocate TX buffer from which descriptors are created */
  19741. + ret = msm_slim_sps_mem_alloc(dev, mem, ((MSM_TX_BUFS + 1) *
  19742. + SLIM_MSGQ_BUF_LEN));
  19743. if (ret) {
  19744. dev_err(dev->dev, "dma_alloc_coherent failed\n");
  19745. goto alloc_buffer_failed;
  19746. @@ -871,10 +959,16 @@ static void msm_slim_remove_ep(struct msm_slim_ctrl *dev,
  19747.  
  19748. void msm_slim_sps_exit(struct msm_slim_ctrl *dev, bool dereg)
  19749. {
  19750. + int i;
  19751. +
  19752. if (dev->use_rx_msgqs >= MSM_MSGQ_ENABLED)
  19753. msm_slim_remove_ep(dev, &dev->rx_msgq, &dev->use_rx_msgqs);
  19754. if (dev->use_tx_msgqs >= MSM_MSGQ_ENABLED)
  19755. msm_slim_remove_ep(dev, &dev->tx_msgq, &dev->use_tx_msgqs);
  19756. + for (i = dev->port_b; i < MSM_SLIM_NPORTS; i++) {
  19757. + if (dev->pipes[i - dev->port_b].connected)
  19758. + msm_slim_disconn_pipe_port(dev, i - dev->port_b);
  19759. + }
  19760. if (dereg) {
  19761. int i;
  19762. for (i = dev->port_b; i < MSM_SLIM_NPORTS; i++) {
  19763. diff --git a/drivers/slimbus/slim-msm.h b/drivers/slimbus/slim-msm.h
  19764. index 9673208..99297a9 100644
  19765. --- a/drivers/slimbus/slim-msm.h
  19766. +++ b/drivers/slimbus/slim-msm.h
  19767. @@ -22,7 +22,7 @@
  19768. /* Per spec.max 40 bytes per received message */
  19769. #define SLIM_MSGQ_BUF_LEN 40
  19770.  
  19771. -#define MSM_TX_BUFS 2
  19772. +#define MSM_TX_BUFS 32
  19773.  
  19774. #define SLIM_USR_MC_GENERIC_ACK 0x25
  19775. #define SLIM_USR_MC_MASTER_CAPABILITY 0x0
  19776. @@ -214,7 +214,7 @@ struct msm_slim_qmi {
  19777. struct work_struct ssr_up;
  19778. };
  19779.  
  19780. -struct msm_slim_mdm {
  19781. +struct msm_slim_ss {
  19782. struct notifier_block nb;
  19783. void *ssr;
  19784. enum msm_ctrl_state state;
  19785. @@ -236,14 +236,15 @@ struct msm_slim_ctrl {
  19786. u8 msg_cnt;
  19787. u32 tx_buf[10];
  19788. u8 rx_msgs[MSM_CONCUR_MSG][SLIM_MSGQ_BUF_LEN];
  19789. - int tx_idx;
  19790. + int tx_tail;
  19791. + int tx_head;
  19792. spinlock_t rx_lock;
  19793. int head;
  19794. int tail;
  19795. int irq;
  19796. int err;
  19797. int ee;
  19798. - struct completion *wr_comp;
  19799. + struct completion **wr_comp;
  19800. struct msm_slim_sat *satd[MSM_MAX_NSATS];
  19801. struct msm_slim_endp pipes[7];
  19802. struct msm_slim_sps_bam bam;
  19803. @@ -254,6 +255,7 @@ struct msm_slim_ctrl {
  19804. struct clk *rclk;
  19805. struct clk *hclk;
  19806. struct mutex tx_lock;
  19807. + struct mutex tx_buf_lock;
  19808. u8 pgdla;
  19809. enum msm_slim_msgq use_rx_msgqs;
  19810. enum msm_slim_msgq use_tx_msgqs;
  19811. @@ -267,7 +269,8 @@ struct msm_slim_ctrl {
  19812. u32 ver;
  19813. struct msm_slim_qmi qmi;
  19814. struct msm_slim_pdata pdata;
  19815. - struct msm_slim_mdm mdm;
  19816. + struct msm_slim_ss ext_mdm;
  19817. + struct msm_slim_ss dsp;
  19818. int default_ipc_log_mask;
  19819. int ipc_log_mask;
  19820. bool sysfs_created;
  19821. @@ -372,7 +375,10 @@ enum slim_port_err msm_slim_port_xfer_status(struct slim_controller *ctr,
  19822. int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, phys_addr_t iobuf,
  19823. u32 len, struct completion *comp);
  19824. int msm_send_msg_buf(struct msm_slim_ctrl *dev, u32 *buf, u8 len, u32 tx_reg);
  19825. -u32 *msm_get_msg_buf(struct msm_slim_ctrl *dev, int len);
  19826. +u32 *msm_get_msg_buf(struct msm_slim_ctrl *dev, int len,
  19827. + struct completion *comp);
  19828. +u32 *msm_slim_manage_tx_msgq(struct msm_slim_ctrl *dev, bool getbuf,
  19829. + struct completion *comp);
  19830. int msm_slim_rx_msgq_get(struct msm_slim_ctrl *dev, u32 *data, int offset);
  19831. int msm_slim_sps_init(struct msm_slim_ctrl *dev, struct resource *bam_mem,
  19832. u32 pipe_reg, bool remote);
  19833. diff --git a/drivers/slimbus/slimbus.c b/drivers/slimbus/slimbus.c
  19834. index 02f4b53..d5d49c5 100644
  19835. --- a/drivers/slimbus/slimbus.c
  19836. +++ b/drivers/slimbus/slimbus.c
  19837. @@ -1102,7 +1102,7 @@ int slim_xfer_msg(struct slim_controller *ctrl, struct slim_device *sbdev,
  19838. } else
  19839. ret = slim_processtxn(ctrl, SLIM_MSG_DEST_LOGICALADDR, mc, ec,
  19840. SLIM_MSG_MT_CORE, rbuf, wbuf, len, mlen,
  19841. - NULL, sbdev->laddr, NULL);
  19842. + msg->comp, sbdev->laddr, NULL);
  19843. xfer_err:
  19844. return ret;
  19845. }
  19846. diff --git a/drivers/spi/spi_qsd.c b/drivers/spi/spi_qsd.c
  19847. index aa403a3..126b8b3 100644
  19848. --- a/drivers/spi/spi_qsd.c
  19849. +++ b/drivers/spi/spi_qsd.c
  19850. @@ -2115,10 +2115,8 @@ static void msm_spi_process_message(struct msm_spi *dd)
  19851. dd->num_xfrs_grped = 1;
  19852. msm_spi_process_transfer(dd);
  19853. }
  19854. -
  19855. if (dd->qup_ver)
  19856. write_force_cs(dd, 0);
  19857. -
  19858. return;
  19859.  
  19860. error:
  19861. @@ -2890,17 +2888,6 @@ struct msm_spi_platform_data * __init msm_spi_dt_to_pdata(
  19862. }
  19863. }
  19864.  
  19865. -#ifdef ENABLE_SENSORS_FPRINT_SECURE
  19866. - /* Even if you set the bam setting, */
  19867. - /* you can't access bam when you use tzspi */
  19868. - if ((dd->cs_gpios[0].gpio_num) == FP_SPI_CS) {
  19869. - pdata->use_bam = false;
  19870. - pr_info("%s: disable bam for BLSP5 tzspi\n", __func__);
  19871. - }
  19872. -#endif
  19873. - dev_warn(&pdev->dev,
  19874. - "%s pdata->use_bam: %d", __func__, pdata->use_bam);
  19875. -
  19876. if (pdata->use_bam) {
  19877. if (!pdata->bam_consumer_pipe_index) {
  19878. dev_warn(&pdev->dev,
  19879. @@ -2962,85 +2949,6 @@ static int __init msm_spi_bam_get_resources(struct msm_spi *dd,
  19880. return 0;
  19881. }
  19882.  
  19883. -#ifdef ENABLE_SENSORS_FPRINT_SECURE
  19884. -int fp_spi_clock_set_rate(struct spi_device *spidev)
  19885. -{
  19886. - struct msm_spi *dd;
  19887. -
  19888. - if (!spidev) {
  19889. - pr_err("%s: spidev pointer is NULL\n", __func__);
  19890. - return -EFAULT;
  19891. - }
  19892. -
  19893. - dd = spi_master_get_devdata(spidev->master);
  19894. - if (!dd) {
  19895. - pr_err("%s: spi master pointer is NULL\n", __func__);
  19896. - return -EFAULT;
  19897. - }
  19898. -
  19899. - msm_spi_clock_set(dd, spidev->max_speed_hz);
  19900. -
  19901. - pr_info("%s sucess\n", __func__);
  19902. - return 0;
  19903. -}
  19904. -EXPORT_SYMBOL_GPL(fp_spi_clock_set_rate);
  19905. -
  19906. -int fp_spi_clock_enable(struct spi_device *spidev)
  19907. -{
  19908. - struct msm_spi *dd;
  19909. - int rc;
  19910. -
  19911. - if (!spidev) {
  19912. - pr_err("%s: spidev pointer is NULL\n", __func__);
  19913. - return -EFAULT;
  19914. - }
  19915. -
  19916. - dd = spi_master_get_devdata(spidev->master);
  19917. - if (!dd) {
  19918. - pr_err("%s: spi master pointer is NULL\n", __func__);
  19919. - return -EFAULT;
  19920. - }
  19921. -
  19922. - rc = clk_prepare_enable(dd->clk);
  19923. - if (rc) {
  19924. - pr_err("%s: unable to enable core_clk\n", __func__);
  19925. - return rc;
  19926. - }
  19927. -
  19928. - rc = clk_prepare_enable(dd->pclk);
  19929. - if (rc) {
  19930. - pr_err("%s: unable to enable iface_clk\n", __func__);
  19931. - return rc;
  19932. - }
  19933. - pr_info("%s sucess\n", __func__);
  19934. - return 0;
  19935. -}
  19936. -EXPORT_SYMBOL_GPL(fp_spi_clock_enable);
  19937. -
  19938. -int fp_spi_clock_disable(struct spi_device *spidev)
  19939. -{
  19940. - struct msm_spi *dd;
  19941. -
  19942. - if (!spidev) {
  19943. - pr_err("%s: spidev pointer is NULL\n", __func__);
  19944. - return -EFAULT;
  19945. - }
  19946. -
  19947. - dd = spi_master_get_devdata(spidev->master);
  19948. - if (!dd) {
  19949. - pr_err("%s: spi master pointer is NULL\n", __func__);
  19950. - return -EFAULT;
  19951. - }
  19952. -
  19953. - clk_disable_unprepare(dd->clk);
  19954. - clk_disable_unprepare(dd->pclk);
  19955. -
  19956. - pr_info("%s sucess\n", __func__);
  19957. - return 0;
  19958. -}
  19959. -EXPORT_SYMBOL_GPL(fp_spi_clock_disable);
  19960. -#endif
  19961. -
  19962. static int __init msm_spi_probe(struct platform_device *pdev)
  19963. {
  19964. struct spi_master *master;
  19965. diff --git a/drivers/thermal/msm_thermal.c b/drivers/thermal/msm_thermal.c
  19966. index 92b971d..7a000a6 100644
  19967. --- a/drivers/thermal/msm_thermal.c
  19968. +++ b/drivers/thermal/msm_thermal.c
  19969. @@ -1586,7 +1586,6 @@ static __ref int do_freq_mitigation(void *data)
  19970. ;
  19971. INIT_COMPLETION(freq_mitigation_complete);
  19972.  
  19973. - get_online_cpus();
  19974. for_each_possible_cpu(cpu) {
  19975. max_freq_req = (cpus[cpu].max_freq) ?
  19976. msm_thermal_info.freq_limit :
  19977. @@ -1614,7 +1613,6 @@ reset_threshold:
  19978. cpus[cpu].freq_thresh_clear = false;
  19979. }
  19980. }
  19981. - put_online_cpus();
  19982. }
  19983. return ret;
  19984. }
  19985. diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
  19986. index 3ba54e7..3b4221c 100644
  19987. --- a/drivers/usb/dwc3/gadget.c
  19988. +++ b/drivers/usb/dwc3/gadget.c
  19989. @@ -2708,13 +2708,13 @@ static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
  19990. {
  19991. pr_info("usb:: %s\n", __func__);
  19992.  
  19993. - /*
  19994. - * TODO take core out of low power mode when that's
  19995. - * implemented.
  19996. - */
  19997. + /* Only perform resume from L2 or Early suspend states */
  19998. + if (dwc->link_state == DWC3_LINK_STATE_U3) {
  19999. + dbg_event(0xFF, "WAKEUP", 0);
  20000. + dwc->gadget_driver->resume(&dwc->gadget);
  20001. + }
  20002.  
  20003. - dbg_event(0xFF, "WAKEUP", 0);
  20004. - dwc->gadget_driver->resume(&dwc->gadget);
  20005. + dwc->link_state = DWC3_LINK_STATE_U0;
  20006. }
  20007.  
  20008. static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
  20009. diff --git a/drivers/usb/otg/msm_otg.c b/drivers/usb/otg/msm_otg.c
  20010. index 0e3cef2..859f188 100644
  20011. --- a/drivers/usb/otg/msm_otg.c
  20012. +++ b/drivers/usb/otg/msm_otg.c
  20013. @@ -1,4 +1,4 @@
  20014. -/* Copyright (c) 2009-2014, Linux Foundation. All rights reserved.
  20015. +/* Copyright (c) 2009-2015, Linux Foundation. All rights reserved.
  20016. *
  20017. * This program is free software; you can redistribute it and/or modify
  20018. * it under the terms of the GNU General Public License version 2 and
  20019. @@ -1081,7 +1081,8 @@ static int msm_otg_suspend(struct msm_otg *motg)
  20020. phy_ctrl_val |= PHY_OTGSESSVLDHV_INTEN;
  20021. }
  20022. if (host_bus_suspend)
  20023. - phy_ctrl_val |= PHY_CLAMP_DPDMSE_EN;
  20024. + phy_ctrl_val |= (PHY_CLAMP_DPDMSE_EN |PHY_DMSE_INTEN |
  20025. + PHY_DPSE_INTEN);
  20026.  
  20027. if (!(motg->caps & ALLOW_VDD_MIN_WITH_RETENTION_DISABLED)) {
  20028. writel_relaxed(phy_ctrl_val & ~PHY_RETEN, USB_PHY_CTRL);
  20029. @@ -1239,7 +1240,8 @@ static int msm_otg_resume(struct msm_otg *motg)
  20030. /* Disable PHY HV interrupts */
  20031. phy_ctrl_val &=
  20032. ~(PHY_IDHV_INTEN | PHY_OTGSESSVLDHV_INTEN);
  20033. - phy_ctrl_val &= ~(PHY_CLAMP_DPDMSE_EN);
  20034. + phy_ctrl_val &= ~(PHY_CLAMP_DPDMSE_EN | PHY_DMSE_INTEN |
  20035. + PHY_DPSE_INTEN);
  20036. writel_relaxed(phy_ctrl_val, USB_PHY_CTRL);
  20037. motg->lpm_flags &= ~PHY_RETENTIONED;
  20038. }
  20039. diff --git a/drivers/video/msm/mdss/dsi_host_v2.c b/drivers/video/msm/mdss/dsi_host_v2.c
  20040. index da26a48..0219ce2 100644
  20041. --- a/drivers/video/msm/mdss/dsi_host_v2.c
  20042. +++ b/drivers/video/msm/mdss/dsi_host_v2.c
  20043. @@ -682,14 +682,25 @@ int msm_dsi_cmd_dma_tx(struct mdss_dsi_ctrl_pdata *ctrl,
  20044. return rc;
  20045. }
  20046.  
  20047. +/* MIPI_DSI_MRPS, Maximum Return Packet Size */
  20048. +static char max_pktsize[2] = {0x00, 0x00}; /* LSB tx first, 10 bytes */
  20049. +
  20050. +static struct dsi_cmd_desc pkt_size_cmd = {
  20051. + {DTYPE_MAX_PKTSIZE, 1, 0, 0, 0, sizeof(max_pktsize)},
  20052. + max_pktsize,
  20053. +};
  20054. +
  20055. int msm_dsi_cmd_dma_rx(struct mdss_dsi_ctrl_pdata *ctrl,
  20056. struct dsi_buf *rp, int rlen)
  20057. {
  20058. - u32 *lp, data;
  20059. - int i, off, cnt;
  20060. + u32 *lp, data, *temp;
  20061. + int i, j = 0, off, cnt;
  20062. unsigned char *ctrl_base = dsi_host_private->dsi_base;
  20063. + char reg[16];
  20064. + int repeated_bytes = 0;
  20065.  
  20066. lp = (u32 *)rp->data;
  20067. + temp = (u32 *)reg;
  20068. cnt = rlen;
  20069. cnt += 3;
  20070. cnt >>= 2;
  20071. @@ -697,16 +708,52 @@ int msm_dsi_cmd_dma_rx(struct mdss_dsi_ctrl_pdata *ctrl,
  20072. if (cnt > 4)
  20073. cnt = 4; /* 4 x 32 bits registers only */
  20074.  
  20075. + if (rlen == 4)
  20076. + rp->read_cnt = 4;
  20077. + else
  20078. + rp->read_cnt = (max_pktsize[0] + 6);
  20079. +
  20080. + if (rp->read_cnt > 16) {
  20081. + int bytes_shifted, data_lost = 0, rem_header_bytes = 0;
  20082. + /* Any data more than 16 bytes will be shifted out */
  20083. + bytes_shifted = rp->read_cnt - rlen;
  20084. + if (bytes_shifted >= 4)
  20085. + data_lost = bytes_shifted - 4; /* remove dcs header */
  20086. + else
  20087. + rem_header_bytes = 4 - bytes_shifted; /* rem header */
  20088. + /*
  20089. + * (rp->len - 4) -> current rx buffer data length.
  20090. + * If data_lost > 0, then ((rp->len - 4) - data_lost) will be
  20091. + * the number of repeating bytes.
  20092. + * If data_lost == 0, then ((rp->len - 4) + rem_header_bytes)
  20093. + * will be the number of bytes repeating in between rx buffer
  20094. + * and the current RDBK_DATA registers. We need to skip the
  20095. + * repeating bytes.
  20096. + */
  20097. + repeated_bytes = (rp->len - 4) - data_lost + rem_header_bytes;
  20098. + }
  20099. +
  20100. off = DSI_RDBK_DATA0;
  20101. off += ((cnt - 1) * 4);
  20102.  
  20103. for (i = 0; i < cnt; i++) {
  20104. data = (u32)MIPI_INP(ctrl_base + off);
  20105. - *lp++ = ntohl(data); /* to network byte order */
  20106. + /* to network byte order */
  20107. + if (!repeated_bytes)
  20108. + *lp++ = ntohl(data);
  20109. + else
  20110. + *temp++ = ntohl(data);
  20111. pr_debug("%s: data = 0x%x and ntohl(data) = 0x%x\n",
  20112. __func__, data, ntohl(data));
  20113. off -= 4;
  20114. - rp->len += sizeof(*lp);
  20115. + if (rlen == 4)
  20116. + rp->len += sizeof(*lp);
  20117. + }
  20118. +
  20119. + /* Skip duplicates and append other data to the rx buffer */
  20120. + if (repeated_bytes) {
  20121. + for (i = repeated_bytes; i < 16; i++)
  20122. + rp->data[j++] = reg[i];
  20123. }
  20124.  
  20125. return rlen;
  20126. @@ -798,14 +845,6 @@ static int msm_dsi_parse_rx_response(struct dsi_buf *rp)
  20127. return rc;
  20128. }
  20129.  
  20130. -/* MIPI_DSI_MRPS, Maximum Return Packet Size */
  20131. -static char max_pktsize[2] = {0x00, 0x00}; /* LSB tx first, 10 bytes */
  20132. -
  20133. -static struct dsi_cmd_desc pkt_size_cmd = {
  20134. - {DTYPE_MAX_PKTSIZE, 1, 0, 0, 0, sizeof(max_pktsize)},
  20135. - max_pktsize,
  20136. -};
  20137. -
  20138. static int msm_dsi_set_max_packet_size(struct mdss_dsi_ctrl_pdata *ctrl,
  20139. int size)
  20140. {
  20141. @@ -843,10 +882,23 @@ static int msm_dsi_cmds_rx_1(struct mdss_dsi_ctrl_pdata *ctrl,
  20142. {
  20143. int rc;
  20144. struct dsi_buf *tp, *rp;
  20145. + int rx_byte = 0;
  20146. +
  20147. + if (rlen <= 2)
  20148. + rx_byte = 4;
  20149. + else
  20150. + rx_byte = DSI_MAX_BYTES_TO_READ;
  20151.  
  20152. tp = &ctrl->tx_buf;
  20153. rp = &ctrl->rx_buf;
  20154. mdss_dsi_buf_init(rp);
  20155. + rc = msm_dsi_set_max_packet_size(ctrl, rlen);
  20156. + if (rc) {
  20157. + pr_err("%s: dsi_set_max_pkt failed\n", __func__);
  20158. + rc = -EINVAL;
  20159. + goto dsi_cmds_rx_1_error;
  20160. + }
  20161. +
  20162. mdss_dsi_buf_init(tp);
  20163.  
  20164. rc = mdss_dsi_cmd_dma_add(tp, cmds);
  20165. @@ -869,10 +921,12 @@ static int msm_dsi_cmds_rx_1(struct mdss_dsi_ctrl_pdata *ctrl,
  20166. }
  20167.  
  20168. if (rlen <= DSI_SHORT_PKT_DATA_SIZE) {
  20169. - msm_dsi_cmd_dma_rx(ctrl, rp, rlen);
  20170. + msm_dsi_cmd_dma_rx(ctrl, rp, rx_byte);
  20171. } else {
  20172. - msm_dsi_cmd_dma_rx(ctrl, rp, rlen + DSI_HOST_HDR_SIZE);
  20173. - rp->len = rlen + DSI_HOST_HDR_SIZE;
  20174. + msm_dsi_cmd_dma_rx(ctrl, rp, rx_byte);
  20175. + rp->len = rx_byte - 2; /*2 bytes for CRC*/
  20176. + rp->len = rp->len - (DSI_MAX_PKT_SIZE - rlen);
  20177. + rp->data = rp->start + (16 - (rlen + 2 + DSI_HOST_HDR_SIZE));
  20178. }
  20179. rc = msm_dsi_parse_rx_response(rp);
  20180.  
  20181. @@ -889,16 +943,15 @@ static int msm_dsi_cmds_rx_2(struct mdss_dsi_ctrl_pdata *ctrl,
  20182. {
  20183. int rc;
  20184. struct dsi_buf *tp, *rp;
  20185. - int pkt_size, data_bytes, total;
  20186. + int pkt_size, data_bytes, dlen, end = 0, diff;
  20187.  
  20188. tp = &ctrl->tx_buf;
  20189. rp = &ctrl->rx_buf;
  20190. mdss_dsi_buf_init(rp);
  20191. pkt_size = DSI_MAX_PKT_SIZE;
  20192. data_bytes = MDSS_DSI_LEN;
  20193. - total = 0;
  20194.  
  20195. - while (true) {
  20196. + while (!end) {
  20197. rc = msm_dsi_set_max_packet_size(ctrl, pkt_size);
  20198. if (rc)
  20199. break;
  20200. @@ -909,7 +962,7 @@ static int msm_dsi_cmds_rx_2(struct mdss_dsi_ctrl_pdata *ctrl,
  20201. pr_err("%s: dsi_cmd_dma_add failed\n", __func__);
  20202. rc = -EINVAL;
  20203. break;
  20204. - }
  20205. + }
  20206. rc = msm_dsi_wait4video_eng_busy(ctrl);
  20207. if (rc) {
  20208. pr_err("%s: wait4video_eng failed\n", __func__);
  20209. @@ -923,19 +976,32 @@ static int msm_dsi_cmds_rx_2(struct mdss_dsi_ctrl_pdata *ctrl,
  20210. }
  20211.  
  20212. msm_dsi_cmd_dma_rx(ctrl, rp, DSI_MAX_BYTES_TO_READ);
  20213. -
  20214. - rp->data += DSI_MAX_BYTES_TO_READ - DSI_HOST_HDR_SIZE;
  20215. - total += data_bytes;
  20216. - if (total >= rlen)
  20217. - break;
  20218. -
  20219. - data_bytes = DSI_MAX_BYTES_TO_READ - DSI_HOST_HDR_SIZE;
  20220. - pkt_size += data_bytes;
  20221. + if (rlen <= data_bytes) {
  20222. + diff = data_bytes - rlen;
  20223. + end = 1;
  20224. + } else {
  20225. + diff = 0;
  20226. + rlen -= data_bytes;
  20227. + }
  20228. + dlen = DSI_MAX_BYTES_TO_READ - 2;
  20229. + dlen -= diff;
  20230. + rp->data += dlen;
  20231. + rp->len += dlen;
  20232. +
  20233. + if (!end) {
  20234. + data_bytes = 14;
  20235. + if (rlen < data_bytes)
  20236. + pkt_size += rlen;
  20237. + else
  20238. + pkt_size += data_bytes;
  20239. + }
  20240. + pr_debug("%s: rp data=%x len=%d dlen=%d diff=%d\n",
  20241. + __func__, (int) (unsigned long) rp->data,
  20242. + rp->len, dlen, diff);
  20243. }
  20244.  
  20245. if (!rc) {
  20246. rp->data = rp->start;
  20247. - rp->len = rlen + DSI_HOST_HDR_SIZE;
  20248. rc = msm_dsi_parse_rx_response(rp);
  20249. }
  20250.  
  20251. diff --git a/drivers/video/msm/mdss/mdp3_ctrl.c b/drivers/video/msm/mdss/mdp3_ctrl.c
  20252. index 28e3344..879fea8 100644
  20253. --- a/drivers/video/msm/mdss/mdp3_ctrl.c
  20254. +++ b/drivers/video/msm/mdss/mdp3_ctrl.c
  20255. @@ -1615,8 +1615,9 @@ static int mdp3_ctrl_lut_update(struct msm_fb_data_type *mfd,
  20256. lut_config.lut_sel = mdp3_session->lut_sel;
  20257. lut_config.lut_position = 0;
  20258. lut_config.lut_dirty = true;
  20259. - lut.color0_lut = r;
  20260. - lut.color1_lut = g;
  20261. + /* In HW the order is color0 = g, color1 = r and color2 = b*/
  20262. + lut.color0_lut = g;
  20263. + lut.color1_lut = r;
  20264. lut.color2_lut = b;
  20265.  
  20266. mutex_lock(&mdp3_session->lock);
  20267. diff --git a/drivers/video/msm/mdss/mdp3_dma.c b/drivers/video/msm/mdss/mdp3_dma.c
  20268. index 2dd66f8..7afbf46 100644
  20269. --- a/drivers/video/msm/mdss/mdp3_dma.c
  20270. +++ b/drivers/video/msm/mdss/mdp3_dma.c
  20271. @@ -16,6 +16,7 @@
  20272. #include "mdp3.h"
  20273. #include "mdp3_dma.h"
  20274. #include "mdp3_hwio.h"
  20275. +#include "mdss_debug.h"
  20276.  
  20277. #define DMA_STOP_POLL_SLEEP_US 1000
  20278. #define DMA_STOP_POLL_TIMEOUT_US 200000
  20279. @@ -610,17 +611,20 @@ static int mdp3_dmap_update(struct mdp3_dma *dma, void *buf,
  20280. int cb_type = MDP3_DMA_CALLBACK_TYPE_VSYNC;
  20281. int rc = 0;
  20282.  
  20283. + ATRACE_BEGIN(__func__);
  20284. pr_debug("mdp3_dmap_update\n");
  20285.  
  20286. if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
  20287. cb_type = MDP3_DMA_CALLBACK_TYPE_DMA_DONE;
  20288. if (intf->active) {
  20289. + ATRACE_BEGIN("mdp3_wait_for_dma_comp");
  20290. rc = wait_for_completion_timeout(&dma->dma_comp,
  20291. KOFF_TIMEOUT);
  20292. if (rc <= 0) {
  20293. WARN(1, "cmd kickoff timed out (%d)\n", rc);
  20294. rc = -1;
  20295. }
  20296. + ATRACE_END("mdp3_wait_for_dma_comp");
  20297. }
  20298. }
  20299. if (dma->update_src_cfg) {
  20300. @@ -652,12 +656,15 @@ static int mdp3_dmap_update(struct mdp3_dma *dma, void *buf,
  20301. mdp3_dma_callback_enable(dma, cb_type);
  20302. pr_debug("mdp3_dmap_update wait for vsync_comp in\n");
  20303. if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO) {
  20304. + ATRACE_BEGIN("mdp3_wait_for_vsync_comp");
  20305. rc = wait_for_completion_timeout(&dma->vsync_comp,
  20306. KOFF_TIMEOUT);
  20307. if (rc <= 0)
  20308. rc = -1;
  20309. + ATRACE_END("mdp3_wait_for_vsync_comp");
  20310. }
  20311. pr_debug("mdp3_dmap_update wait for vsync_comp out\n");
  20312. + ATRACE_END(__func__);
  20313. return rc;
  20314. }
  20315.  
  20316. @@ -763,7 +770,7 @@ static int mdp3_dmap_histo_get(struct mdp3_dma *dma)
  20317. MDP3_REG_READ(MDP3_REG_DMA_P_HIST_EXTRA_INFO_1);
  20318.  
  20319. spin_lock_irqsave(&dma->histo_lock, flag);
  20320. - init_completion(&dma->histo_comp);
  20321. + INIT_COMPLETION(dma->histo_comp);
  20322. MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_START, 1);
  20323. wmb();
  20324. dma->histo_state = MDP3_DMA_HISTO_STATE_START;
  20325. @@ -781,7 +788,7 @@ static int mdp3_dmap_histo_start(struct mdp3_dma *dma)
  20326.  
  20327. spin_lock_irqsave(&dma->histo_lock, flag);
  20328.  
  20329. - init_completion(&dma->histo_comp);
  20330. + INIT_COMPLETION(dma->histo_comp);
  20331. MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_START, 1);
  20332. wmb();
  20333. dma->histo_state = MDP3_DMA_HISTO_STATE_START;
  20334. @@ -800,7 +807,7 @@ static int mdp3_dmap_histo_reset(struct mdp3_dma *dma)
  20335.  
  20336. spin_lock_irqsave(&dma->histo_lock, flag);
  20337.  
  20338. - init_completion(&dma->histo_comp);
  20339. + INIT_COMPLETION(dma->histo_comp);
  20340.  
  20341. mdp3_dma_clk_auto_gating(dma, 0);
  20342.  
  20343. diff --git a/drivers/video/msm/mdss/mdp3_ppp.c b/drivers/video/msm/mdss/mdp3_ppp.c
  20344. index 0cb3c08..f21f3ef 100644
  20345. --- a/drivers/video/msm/mdss/mdp3_ppp.c
  20346. +++ b/drivers/video/msm/mdss/mdp3_ppp.c
  20347. @@ -29,6 +29,7 @@
  20348. #include "mdp3_ppp.h"
  20349. #include "mdp3_hwio.h"
  20350. #include "mdp3.h"
  20351. +#include "mdss_debug.h"
  20352.  
  20353. #define MDP_IS_IMGTYPE_BAD(x) ((x) >= MDP_IMGTYPE_LIMIT)
  20354. #define MDP_RELEASE_BW_TIMEOUT 50
  20355. @@ -332,7 +333,9 @@ void mdp3_ppp_kickoff(void)
  20356. init_completion(&ppp_stat->ppp_comp);
  20357. mdp3_irq_enable(MDP3_PPP_DONE);
  20358. ppp_enable();
  20359. + ATRACE_BEGIN("mdp3_wait_for_ppp_comp");
  20360. mdp3_ppp_pipe_wait();
  20361. + ATRACE_END("mdp3_wait_for_ppp_comp");
  20362. mdp3_irq_disable(MDP3_PPP_DONE);
  20363. }
  20364.  
  20365. @@ -893,6 +896,7 @@ int mdp3_ppp_start_blit(struct msm_fb_data_type *mfd,
  20366. void mdp3_ppp_wait_for_fence(struct blit_req_list *req)
  20367. {
  20368. int i, ret = 0;
  20369. + ATRACE_BEGIN(__func__);
  20370. /* buf sync */
  20371. for (i = 0; i < req->acq_fen_cnt; i++) {
  20372. ret = sync_fence_wait(req->acq_fen[i],
  20373. @@ -904,7 +908,7 @@ void mdp3_ppp_wait_for_fence(struct blit_req_list *req)
  20374. }
  20375. sync_fence_put(req->acq_fen[i]);
  20376. }
  20377. -
  20378. + ATRACE_END(__func__);
  20379. if (ret < 0) {
  20380. while (i < req->acq_fen_cnt) {
  20381. sync_fence_put(req->acq_fen[i]);
  20382. @@ -1062,6 +1066,7 @@ static void mdp3_ppp_blit_wq_handler(struct work_struct *work)
  20383. }
  20384. while (req) {
  20385. mdp3_ppp_wait_for_fence(req);
  20386. + ATRACE_BEGIN("mdp3_ppp_start");
  20387. for (i = 0; i < req->count; i++) {
  20388. if (!(req->req_list[i].flags & MDP_NO_BLIT)) {
  20389. /* Do the actual blit. */
  20390. @@ -1077,6 +1082,7 @@ static void mdp3_ppp_blit_wq_handler(struct work_struct *work)
  20391. MDP3_CLIENT_PPP);
  20392. }
  20393. }
  20394. + ATRACE_END("mdp3_ppp_start");
  20395. /* Signal to release fence */
  20396. mutex_lock(&ppp_stat->req_mutex);
  20397. mdp3_ppp_signal_timeline(req);
  20398. diff --git a/drivers/video/msm/mdss/mdss_dsi.h b/drivers/video/msm/mdss/mdss_dsi.h
  20399. index ba8cabd..53ae680 100644
  20400. --- a/drivers/video/msm/mdss/mdss_dsi.h
  20401. +++ b/drivers/video/msm/mdss/mdss_dsi.h
  20402. @@ -48,8 +48,6 @@
  20403. #define MIPI_DSI_PANEL_720P_PT 8
  20404. #define DSI_PANEL_MAX 8
  20405.  
  20406. -//#define DSI_CLK_DEBUG
  20407. -
  20408. enum { /* mipi dsi panel */
  20409. DSI_VIDEO_MODE,
  20410. DSI_CMD_MODE,
  20411. @@ -84,7 +82,6 @@ enum dsi_panel_bl_ctrl {
  20412. BL_PWM,
  20413. BL_WLED,
  20414. BL_DCS_CMD,
  20415. - BL_GPIO_SWING,
  20416. UNKNOWN_CTRL,
  20417. };
  20418.  
  20419. @@ -160,7 +157,6 @@ enum dsi_lane_map_type {
  20420. #define DSI_CMD_TERM BIT(0)
  20421.  
  20422. extern struct device dsi_dev;
  20423. -extern int mdss_dsi_clk_on;
  20424. extern u32 dsi_irq;
  20425. extern struct mdss_dsi_ctrl_pdata *ctrl_list[];
  20426.  
  20427. @@ -205,20 +201,7 @@ struct dsi_clk_desc {
  20428. u32 pre_div_func;
  20429. };
  20430.  
  20431. -#if defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_CMD_WQHD_PT_PANEL)
  20432. -#define DEBUG_LDI_STATUS
  20433. -#define DYNAMIC_FPS_USE_TE_CTRL
  20434. -extern int dynamic_fps_use_te_ctrl;
  20435. -#endif
  20436.  
  20437. -struct dsi_cmd {
  20438. - struct dsi_cmd_desc *cmd_desc;
  20439. - char *read_size;
  20440. - char *read_startoffset;
  20441. - int num_of_cmds;
  20442. - char *cmds_buff;
  20443. - int cmds_len;
  20444. -};
  20445. struct dsi_panel_cmds {
  20446. char *buf;
  20447. int blen;
  20448. @@ -227,8 +210,6 @@ struct dsi_panel_cmds {
  20449. int link_state;
  20450. };
  20451.  
  20452. -#define CMD_REQ_SINGLE_TX 0x0010
  20453. -
  20454. struct dsi_kickoff_action {
  20455. struct list_head act_entry;
  20456. void (*action) (void *);
  20457. @@ -239,7 +220,6 @@ struct dsi_drv_cm_data {
  20458. struct regulator *vdd_vreg;
  20459. struct regulator *vdd_io_vreg;
  20460. struct regulator *vdda_vreg;
  20461. - struct regulator *iovdd_vreg;
  20462. int broadcast_enable;
  20463. };
  20464.  
  20465. @@ -266,20 +246,10 @@ struct mdss_dsi_ctrl_pdata {
  20466. int ndx; /* panel_num */
  20467. int (*on) (struct mdss_panel_data *pdata);
  20468. int (*off) (struct mdss_panel_data *pdata);
  20469. -#if defined (CONFIG_FB_MSM_MDSS_S6E8AA0A_HD_PANEL)
  20470. - int (*mtp) (struct mdss_panel_data *pdata);
  20471. -#endif
  20472. int (*partial_update_fnc) (struct mdss_panel_data *pdata);
  20473. int (*check_status) (struct mdss_dsi_ctrl_pdata *pdata);
  20474. int (*cmdlist_commit)(struct mdss_dsi_ctrl_pdata *ctrl, int from_mdp);
  20475. void (*switch_mode) (struct mdss_panel_data *pdata, int mode);
  20476. - int (*registered) (struct mdss_panel_data *pdata);
  20477. - int (*dimming_init) (struct mdss_panel_data *pdata);
  20478. - int (*event_handler) (int e);
  20479. - int (*panel_blank) (struct mdss_panel_data *pdata, int blank);
  20480. - void (*panel_reset) (struct mdss_panel_data *pdata, int enable);
  20481. - int (*panel_extra_power) (struct mdss_panel_data *pdata, int enable);
  20482. - void (*bl_fnc) (struct mdss_panel_data *pdata, u32 level);
  20483. struct mdss_panel_data panel_data;
  20484. unsigned char *ctrl_base;
  20485. struct dss_io_data ctrl_io;
  20486. @@ -289,8 +259,6 @@ struct mdss_dsi_ctrl_pdata {
  20487. u32 bus_clk_cnt;
  20488. u32 link_clk_cnt;
  20489. u32 flags;
  20490. - u32 clk_cnt;
  20491. - u32 clk_cnt_by_dsi1;
  20492. struct clk *mdp_core_clk;
  20493. struct clk *ahb_clk;
  20494. struct clk *axi_clk;
  20495. @@ -301,39 +269,11 @@ struct mdss_dsi_ctrl_pdata {
  20496. u8 ctrl_state;
  20497. int panel_mode;
  20498. int irq_cnt;
  20499. - int mdss_dsi_clk_on;
  20500. int rst_gpio;
  20501. int disp_en_gpio;
  20502. - int disp_en_gpio2;
  20503. -#if defined(CONFIG_FB_MSM_MDSS_HX8394C_TFT_VIDEO_720P_PANEL)
  20504. - int disp_en_vsp_gpio;
  20505. - int disp_en_vsn_gpio;
  20506. -#endif
  20507. -#if defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_CMD_WQXGA_S6TNMR7_PT_PANEL)
  20508. - int tcon_ready_gpio;
  20509. -#endif
  20510. -#if defined(CONFIG_FB_MSM_MDSS_MAGNA_OCTA_VIDEO_720P_PANEL)
  20511. - int lcd_crack_det;
  20512. - int expander_enble_gpio;
  20513. -#endif
  20514. -#if defined(CONFIG_FB_MSM_MDSS_SHARP_HD_PANEL)
  20515. - int disp_en_gpio_p;
  20516. - int disp_en_gpio_n;
  20517. -#endif
  20518. -#if defined(CONFIG_FB_MSM_MIPI_MAGNA_OCTA_VIDEO_WXGA_PT_DUAL_PANEL)
  20519. - int lcd_crack_det_gpio;
  20520. - int lcd_esd_det_gpio;
  20521. - int lcd_sel_gpio;
  20522. - struct regulator *lcd_3p0_vreg;
  20523. - struct regulator *lcd_1p8_vreg;
  20524. -#endif
  20525. - int bl_on_gpio;
  20526. int disp_te_gpio;
  20527. int mode_gpio;
  20528. - int rst_gpio_requested;
  20529. - int disp_en_gpio_requested;
  20530. int disp_te_gpio_requested;
  20531. - int mode_gpio_requested;
  20532. int bklt_ctrl; /* backlight ctrl */
  20533. int pwm_period;
  20534. int pwm_pmic_gpio;
  20535. @@ -341,9 +281,7 @@ struct mdss_dsi_ctrl_pdata {
  20536. int bklt_max;
  20537. int new_fps;
  20538. int pwm_enabled;
  20539. -#if defined(CONFIG_CABC_TUNING_HX8394C)
  20540. - int current_cabc_duty;
  20541. -#endif
  20542. + bool dmap_iommu_map;
  20543. struct pwm_device *pwm_bl;
  20544. struct dsi_drv_cm_data shared_pdata;
  20545. u32 pclk_rate;
  20546. @@ -357,20 +295,6 @@ struct mdss_dsi_ctrl_pdata {
  20547. struct dsi_panel_cmds off_cmds;
  20548. struct dsi_panel_cmds status_cmds;
  20549. u32 status_value;
  20550. - struct dsi_panel_cmds ce_on_cmds;
  20551. - struct dsi_panel_cmds ce_off_cmds;
  20552. - struct dsi_panel_cmds cabc_on_cmds;
  20553. - struct dsi_panel_cmds cabc_off_cmds;
  20554. -#if defined(CONFIG_CABC_TUNING_HX8394C)
  20555. - struct dsi_panel_cmds cabc_duty_72;
  20556. - struct dsi_panel_cmds cabc_duty_74;
  20557. - struct dsi_panel_cmds cabc_duty_78;
  20558. - struct dsi_panel_cmds cabc_duty_82;
  20559. -#endif
  20560. - struct dsi_panel_cmds cabc_tune_cmds;
  20561. -#if defined(CONFIG_FB_MSM_MDSS_CPT_QHD_PANEL)
  20562. - struct dsi_panel_cmds disp_on_cmd;
  20563. -#endif
  20564.  
  20565. struct dsi_panel_cmds video2cmd;
  20566. struct dsi_panel_cmds cmd2video;
  20567. @@ -385,8 +309,6 @@ struct mdss_dsi_ctrl_pdata {
  20568. int mdp_busy;
  20569. struct mutex mutex;
  20570. struct mutex cmd_mutex;
  20571. - struct mutex dfps_mutex;
  20572. - int mdp_tg_on;
  20573.  
  20574. bool ulps;
  20575.  
  20576. @@ -394,10 +316,6 @@ struct mdss_dsi_ctrl_pdata {
  20577. struct dsi_buf rx_buf;
  20578. struct dsi_buf status_buf;
  20579. int status_mode;
  20580. - int dsi_err_cnt;
  20581. -#if defined(CONFIG_FB_MSM_MDSS_TC_DSI2LVDS_WXGA_PANEL)
  20582. - struct regulator *iovdd_vreg;
  20583. -#endif
  20584. };
  20585.  
  20586. struct dsi_status_data {
  20587. @@ -406,31 +324,6 @@ struct dsi_status_data {
  20588. struct msm_fb_data_type *mfd;
  20589. };
  20590.  
  20591. -#if defined(CONFIG_FB_MSM_MDSS_MDP3)
  20592. -enum {
  20593. - MIPI_RESUME_STATE,
  20594. - MIPI_SUSPEND_STATE,
  20595. -};
  20596. -
  20597. -struct mdss_dsi_driver_data {
  20598. - struct msm_fb_data_type *mfd;
  20599. - struct mdss_panel_data *pdata;
  20600. - struct mdss_dsi_ctrl_pdata *ctrl_pdata;
  20601. - struct mutex lock;
  20602. -#if defined(CONFIG_LCD_CLASS_DEVICE)
  20603. - const char *panel_name;
  20604. -#endif
  20605. -#if defined(CONFIG_GET_LCD_ATTACHED)
  20606. - unsigned int manufacture_id;
  20607. - int lcd_attached;
  20608. -#endif
  20609. -};
  20610. -#if defined(CONFIG_MDNIE_LITE_TUNING)
  20611. -void mdss_dsi_cmds_send(struct mdss_dsi_ctrl_pdata *ctrl, struct dsi_cmd_desc *cmds, int cnt);
  20612. -#endif
  20613. -#endif /* CONFIG_FB_MSM_MDSS_MDP3 */
  20614. -
  20615. -extern unsigned int gv_manufacture_id;
  20616. int dsi_panel_device_register(struct device_node *pan_node,
  20617. struct mdss_dsi_ctrl_pdata *ctrl_pdata);
  20618.  
  20619. @@ -452,7 +345,6 @@ int mdss_dsi_clk_ctrl(struct mdss_dsi_ctrl_pdata *ctrl,
  20620. u8 clk_type, int enable);
  20621. void mdss_dsi_clk_req(struct mdss_dsi_ctrl_pdata *ctrl,
  20622. int enable);
  20623. -void mdss_dsi_clk_ctrl_mdp(int ndx, int enable);
  20624. void mdss_dsi_controller_cfg(int enable,
  20625. struct mdss_panel_data *pdata);
  20626. void mdss_dsi_sw_reset(struct mdss_panel_data *pdata);
  20627. @@ -468,11 +360,7 @@ int mdss_dsi_clk_init(struct platform_device *pdev,
  20628. void mdss_dsi_clk_deinit(struct mdss_dsi_ctrl_pdata *ctrl_pdata);
  20629. int mdss_dsi_enable_bus_clocks(struct mdss_dsi_ctrl_pdata *ctrl_pdata);
  20630. void mdss_dsi_disable_bus_clocks(struct mdss_dsi_ctrl_pdata *ctrl_pdata);
  20631. -#if defined(CONFIG_FB_MSM_MDSS_MDP3)
  20632. int mdss_dsi_panel_reset(struct mdss_panel_data *pdata, int enable);
  20633. -#else
  20634. -void mdss_dsi_panel_reset(struct mdss_panel_data *pdata, int enable);
  20635. -#endif
  20636. void mdss_dsi_phy_disable(struct mdss_dsi_ctrl_pdata *ctrl);
  20637. void mdss_dsi_phy_init(struct mdss_panel_data *pdata);
  20638. void mdss_dsi_phy_sw_reset(unsigned char *ctrl_base);
  20639. @@ -480,9 +368,6 @@ void mdss_dsi_cmd_test_pattern(struct mdss_dsi_ctrl_pdata *ctrl);
  20640. void mdss_dsi_video_test_pattern(struct mdss_dsi_ctrl_pdata *ctrl);
  20641. void mdss_dsi_panel_pwm_cfg(struct mdss_dsi_ctrl_pdata *ctrl);
  20642.  
  20643. -int mdss_dsi_cmds_single_tx(struct mdss_dsi_ctrl_pdata *ctrl,
  20644. - struct dsi_cmd_desc *cmds, int cnt);
  20645. -
  20646. void mdss_dsi_ctrl_init(struct mdss_dsi_ctrl_pdata *ctrl);
  20647. void mdss_dsi_cmd_mdp_busy(struct mdss_dsi_ctrl_pdata *ctrl);
  20648. void mdss_dsi_wait4video_done(struct mdss_dsi_ctrl_pdata *ctrl);
  20649. @@ -543,11 +428,4 @@ static inline struct mdss_dsi_ctrl_pdata *mdss_dsi_get_ctrl_by_index(int ndx)
  20650.  
  20651. return ctrl_list[ndx];
  20652. }
  20653. -void mdss_dsi_mdp_busy_wait(int panel_ndx);
  20654. -void mdss_dsi_dump_power_clk(struct mdss_panel_data *pdata, int flag);
  20655. -
  20656. -/*for mondrian*/
  20657. -void pwm_backlight_enable(void);
  20658. -void pwm_backlight_disable(void);
  20659. -
  20660. #endif /* MDSS_DSI_H */
  20661. diff --git a/drivers/video/msm/mdss/mdss_dsi_cmd.c b/drivers/video/msm/mdss/mdss_dsi_cmd.c
  20662. index e055414..4084627 100644
  20663. --- a/drivers/video/msm/mdss/mdss_dsi_cmd.c
  20664. +++ b/drivers/video/msm/mdss/mdss_dsi_cmd.c
  20665. @@ -1,4 +1,4 @@
  20666. -/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  20667. +/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  20668. *
  20669. * This program is free software; you can redistribute it and/or modify
  20670. * it under the terms of the GNU General Public License version 2 and
  20671. @@ -66,6 +66,7 @@ char *mdss_dsi_buf_init(struct dsi_buf *dp)
  20672. off = 8 - off;
  20673. dp->data += off;
  20674. dp->len = 0;
  20675. + dp->read_cnt = 0;
  20676. return dp->data;
  20677. }
  20678.  
  20679. @@ -121,6 +122,7 @@ int mdss_dsi_buf_alloc(struct dsi_buf *dp, int size)
  20680.  
  20681. dp->data = dp->start;
  20682. dp->len = 0;
  20683. + dp->read_cnt = 0;
  20684. return size;
  20685. #endif
  20686. }
  20687. @@ -610,6 +612,7 @@ int mdss_dsi_short_read1_resp(struct dsi_buf *rp)
  20688. /* strip out dcs type */
  20689. rp->data++;
  20690. rp->len = 1;
  20691. + rp->read_cnt -= 3;
  20692. return rp->len;
  20693. }
  20694.  
  20695. @@ -621,6 +624,7 @@ int mdss_dsi_short_read2_resp(struct dsi_buf *rp)
  20696. /* strip out dcs type */
  20697. rp->data++;
  20698. rp->len = 2;
  20699. + rp->read_cnt -= 2;
  20700. return rp->len;
  20701. }
  20702.  
  20703. @@ -629,6 +633,7 @@ int mdss_dsi_long_read_resp(struct dsi_buf *rp)
  20704. /* strip out dcs header */
  20705. rp->data += 4;
  20706. rp->len -= 4;
  20707. + rp->read_cnt -= 6;
  20708. return rp->len;
  20709. }
  20710.  
  20711. diff --git a/drivers/video/msm/mdss/mdss_dsi_cmd.h b/drivers/video/msm/mdss/mdss_dsi_cmd.h
  20712. index f806e78..7ad2d71 100644
  20713. --- a/drivers/video/msm/mdss/mdss_dsi_cmd.h
  20714. +++ b/drivers/video/msm/mdss/mdss_dsi_cmd.h
  20715. @@ -1,4 +1,4 @@
  20716. -/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  20717. +/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  20718. *
  20719. * This program is free software; you can redistribute it and/or modify
  20720. * it under the terms of the GNU General Public License version 2 and
  20721. @@ -30,7 +30,7 @@ struct mdss_dsi_ctrl_pdata;
  20722.  
  20723. #define MDSS_DSI_MRPS 0x04 /* Maximum Return Packet Size */
  20724.  
  20725. -#define MDSS_DSI_LEN 8 /* 4 x 4 - 6 - 2, bytes dcs header+crc-align */
  20726. +#define MDSS_DSI_LEN 10 /* 4 x 4 - 4 - 2, bytes dcs header+crc-align */
  20727.  
  20728. struct dsi_buf {
  20729. u32 *hdr; /* dsi host header */
  20730. @@ -40,6 +40,7 @@ struct dsi_buf {
  20731. char *data; /* buffer */
  20732. int len; /* data length */
  20733. dma_addr_t dmap; /* mapped dma addr */
  20734. + int read_cnt;
  20735. };
  20736.  
  20737. /* dcs read/write */
  20738. @@ -99,6 +100,7 @@ struct dsi_cmd_desc {
  20739. #define CMD_CLK_CTRL 0x0004
  20740. #define CMD_REQ_NO_MAX_PKT_SIZE 0x0008
  20741. #define CMD_REQ_LP_MODE 0x0010
  20742. +#define CMD_REQ_HS_MODE 0x0020
  20743.  
  20744. struct dcs_cmd_req {
  20745. struct dsi_cmd_desc *cmds;
  20746. diff --git a/drivers/video/msm/mdss/mdss_dsi_host.c b/drivers/video/msm/mdss/mdss_dsi_host.c
  20747. index 9f8d388..5119f54 100644
  20748. --- a/drivers/video/msm/mdss/mdss_dsi_host.c
  20749. +++ b/drivers/video/msm/mdss/mdss_dsi_host.c
  20750. @@ -1345,6 +1345,7 @@ static int mdss_dsi_cmd_dma_tx(struct mdss_dsi_ctrl_pdata *ctrl,
  20751. pr_err("unable to map dma memory to iommu(%d)\n", ret);
  20752. return -ENOMEM;
  20753. }
  20754. + ctrl->dmap_iommu_map = true;
  20755. } else {
  20756. addr = tp->dmap;
  20757. }
  20758. @@ -1392,9 +1393,11 @@ static int mdss_dsi_cmd_dma_tx(struct mdss_dsi_ctrl_pdata *ctrl,
  20759. } else
  20760. ret = tp->len;
  20761.  
  20762. - if (is_mdss_iommu_attached())
  20763. + if (ctrl->dmap_iommu_map) {
  20764. msm_iommu_unmap_contig_buffer(addr,
  20765. mdss_get_iommu_domain(domain), 0, size);
  20766. + ctrl->dmap_iommu_map = false;
  20767. + }
  20768.  
  20769. return ret;
  20770. }
  20771. @@ -1677,6 +1680,10 @@ int mdss_dsi_cmdlist_commit(struct mdss_dsi_ctrl_pdata *ctrl, int from_mdp)
  20772. mutex_unlock(&ctrl->cmd_mutex);
  20773. return rc;
  20774. }
  20775. +
  20776. + if (req->flags & CMD_REQ_HS_MODE)
  20777. + mdss_dsi_set_tx_power_mode(0, &ctrl->panel_data);
  20778. +
  20779. if (req->flags & CMD_REQ_RX)
  20780. ret = mdss_dsi_cmdlist_rx(ctrl, req);
  20781. #if !defined(CONFIG_MACH_S3VE3G_EUR)
  20782. @@ -1685,6 +1692,10 @@ int mdss_dsi_cmdlist_commit(struct mdss_dsi_ctrl_pdata *ctrl, int from_mdp)
  20783. #endif
  20784. else
  20785. ret = mdss_dsi_cmdlist_tx(ctrl, req);
  20786. +
  20787. + if (req->flags & CMD_REQ_HS_MODE)
  20788. + mdss_dsi_set_tx_power_mode(1, &ctrl->panel_data);
  20789. +
  20790. mdss_iommu_ctrl(0);
  20791. mdss_dsi_clk_ctrl(ctrl, DSI_ALL_CLKS, 0);
  20792. mdss_bus_scale_set_quota(MDSS_HW_DSI0, 0, 0);
  20793. diff --git a/drivers/video/msm/mdss/mdss_fb.c b/drivers/video/msm/mdss/mdss_fb.c
  20794. index 3b36353..db66761 100644
  20795. --- a/drivers/video/msm/mdss/mdss_fb.c
  20796. +++ b/drivers/video/msm/mdss/mdss_fb.c
  20797. @@ -2,7 +2,7 @@
  20798. * Core MDSS framebuffer driver.
  20799. *
  20800. * Copyright (C) 2007 Google Incorporated
  20801. - * Copyright (c) 2008-2014, The Linux Foundation. All rights reserved.
  20802. + * Copyright (c) 2008-2015, The Linux Foundation. All rights reserved.
  20803. *
  20804. * This software is licensed under the terms of the GNU General Public
  20805. * License version 2, as published by the Free Software Foundation, and
  20806. @@ -54,8 +54,6 @@
  20807.  
  20808. #include "mdss_fb.h"
  20809. #include "mdss_mdp_splash_logo.h"
  20810. -#include "mdss_debug.h"
  20811. -#include "mdss_mdp_trace.h"
  20812.  
  20813. #ifdef CONFIG_FB_MSM_TRIPLE_BUFFER
  20814. #define MDSS_FB_NUM 3
  20815. @@ -74,17 +72,6 @@ static u32 mdss_fb_pseudo_palette[16] = {
  20816. 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff
  20817. };
  20818.  
  20819. -#ifdef CONFIG_FB_MSM_CAMERA_CSC
  20820. -#if defined(CONFIG_SEC_KS01_PROJECT) || defined(CONFIG_SEC_ATLANTIC_PROJECT)
  20821. -u8 prev_csc_update = 1;
  20822. -#endif
  20823. -u8 csc_update = 1;
  20824. -#endif
  20825. -
  20826. -#if (defined(CONFIG_MACH_S3VE3G_EUR) || defined(CONFIG_MACH_VICTOR3GDSDTV_LTN)) && defined(CONFIG_ESD_ERR_FG_RECOVERY)
  20827. -struct mutex esd_lock;
  20828. -#endif
  20829. -
  20830. static struct msm_mdp_interface *mdp_instance;
  20831.  
  20832. static int mdss_fb_register(struct msm_fb_data_type *mfd);
  20833. @@ -114,12 +101,6 @@ static int mdss_fb_pan_idle(struct msm_fb_data_type *mfd);
  20834. static int mdss_fb_send_panel_event(struct msm_fb_data_type *mfd,
  20835. int event, void *arg);
  20836. static void mdss_fb_set_mdp_sync_pt_threshold(struct msm_fb_data_type *mfd);
  20837. -
  20838. -#if defined (CONFIG_FB_MSM_MIPI_SAMSUNG_TFT_VIDEO_WQXGA_PT_PANEL)|| \
  20839. - defined (CONFIG_FB_MSM8x26_MDSS_CHECK_LCD_CONNECTION)
  20840. -int get_lcd_attached(void);
  20841. -#endif
  20842. -
  20843. void mdss_fb_no_update_notify_timer_cb(unsigned long data)
  20844. {
  20845. struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)data;
  20846. @@ -249,63 +230,6 @@ static struct led_classdev backlight_led = {
  20847. .brightness_set = mdss_fb_set_bl_brightness,
  20848. };
  20849.  
  20850. -#ifdef CONFIG_FB_MSM_CAMERA_CSC
  20851. -static ssize_t csc_read_cfg(struct device *dev,
  20852. - struct device_attribute *attr, char *buf)
  20853. -{
  20854. - ssize_t ret = 0;
  20855. -
  20856. - ret = snprintf(buf, PAGE_SIZE, "%d\n", csc_update);
  20857. - return ret;
  20858. -}
  20859. -
  20860. -static ssize_t csc_write_cfg(struct device *dev,
  20861. - struct device_attribute *attr, const char *buf, size_t count)
  20862. -{
  20863. - ssize_t ret = strnlen(buf, PAGE_SIZE);
  20864. - int err;
  20865. - int mode;
  20866. -
  20867. - err = kstrtoint(buf, 0, &mode);
  20868. - if (err)
  20869. - return ret;
  20870. -
  20871. - csc_update = !!(u8)mode;
  20872. -
  20873. - pr_info("%s: csc ctrl set to %d \n", __func__, mode);
  20874. -
  20875. - return ret;
  20876. -}
  20877. -
  20878. -static DEVICE_ATTR(csc_cfg, S_IRUGO | S_IWUSR, csc_read_cfg, csc_write_cfg);
  20879. -
  20880. -static struct attribute *csc_fs_attrs[] = {
  20881. - &dev_attr_csc_cfg.attr,
  20882. - NULL,
  20883. -};
  20884. -
  20885. -static struct attribute_group csc_fs_attr_group = {
  20886. - .attrs = csc_fs_attrs,
  20887. -};
  20888. -
  20889. -int mdp4_reg_csc_fs(struct msm_fb_data_type *mfd)
  20890. -{
  20891. - int ret = 0;
  20892. - struct device *dev = mfd->fbi->dev;
  20893. -
  20894. - ret = sysfs_create_group(&dev->kobj,
  20895. - &csc_fs_attr_group);
  20896. - if (ret) {
  20897. - pr_err("%s: sysfs group creation failed, ret=%d\n",
  20898. - __func__, ret);
  20899. - return ret;
  20900. - }
  20901. -
  20902. - kobject_uevent(&dev->kobj, KOBJ_ADD);
  20903. - pr_info("%s: kobject_uevent(KOBJ_ADD)\n", __func__);
  20904. - return ret;
  20905. -}
  20906. -#endif
  20907. static ssize_t mdss_fb_get_type(struct device *dev,
  20908. struct device_attribute *attr, char *buf)
  20909. {
  20910. @@ -350,22 +274,15 @@ static void mdss_fb_parse_dt(struct msm_fb_data_type *mfd)
  20911. {
  20912. u32 data[2] = {0};
  20913. u32 panel_xres;
  20914. - int coeff = 1;
  20915. struct platform_device *pdev = mfd->pdev;
  20916.  
  20917. - if (of_property_read_u32_array(pdev->dev.of_node, "qcom,mdss-fb-split",
  20918. - data, 2))
  20919. - return;
  20920. -#if defined(CONFIG_FB_MSM_EDP_SAMSUNG)
  20921. - coeff = 1;
  20922. -#else
  20923. - coeff = 2;
  20924. -#endif
  20925. + of_property_read_u32_array(pdev->dev.of_node,
  20926. + "qcom,mdss-fb-split", data, 2);
  20927.  
  20928. panel_xres = mfd->panel_info->xres;
  20929. if (data[0] && data[1]) {
  20930. if (mfd->split_display)
  20931. - panel_xres *= coeff;
  20932. + panel_xres *= 2;
  20933.  
  20934. if (panel_xres == data[0] + data[1]) {
  20935. mfd->split_fb_left = data[0];
  20936. @@ -473,8 +390,8 @@ static ssize_t mdss_fb_get_panel_info(struct device *dev,
  20937. ret = scnprintf(buf, PAGE_SIZE,
  20938. "pu_en=%d\nxstart=%d\nwalign=%d\nystart=%d\nhalign=%d\n"
  20939. "min_w=%d\nmin_h=%d",
  20940. - pinfo->partial_update_enabled, pinfo->xstart_pix_align,
  20941. - pinfo->width_pix_align, pinfo->ystart_pix_align,
  20942. + pinfo->partial_update_enabled, pinfo->xstart_pix_align,
  20943. + pinfo->width_pix_align, pinfo->ystart_pix_align,
  20944. pinfo->height_pix_align, pinfo->min_width,
  20945. pinfo->min_height);
  20946.  
  20947. @@ -650,11 +567,8 @@ static int mdss_fb_probe(struct platform_device *pdev)
  20948. mfd->bl_level = 0;
  20949. mfd->bl_level_prev_scaled = 0;
  20950. mfd->bl_scale = 1024;
  20951. -#if defined(CONFIG_FB_MSM_MDSS_S6E8AA0A_HD_PANEL)
  20952. - mfd->bl_min_lvl = 20;
  20953. -#else
  20954. - mfd->bl_min_lvl = 0;
  20955. -#endif
  20956. + mfd->bl_min_lvl = 30;
  20957. + mfd->ad_bl_level = 0;
  20958. mfd->fb_imgType = MDP_RGBA_8888;
  20959.  
  20960. mfd->pdev = pdev;
  20961. @@ -664,11 +578,7 @@ static int mdss_fb_probe(struct platform_device *pdev)
  20962. INIT_LIST_HEAD(&mfd->proc_list);
  20963.  
  20964. mutex_init(&mfd->bl_lock);
  20965. -#if (defined(CONFIG_MACH_S3VE3G_EUR) || defined(CONFIG_MACH_VICTOR3GDSDTV_LTN)) && defined(CONFIG_ESD_ERR_FG_RECOVERY)
  20966. - mutex_init(&esd_lock);
  20967. -#endif
  20968. - mutex_init(&mfd->power_state);
  20969. - mutex_init(&mfd->ctx_lock);
  20970. +
  20971. fbi_list[fbi_list_index++] = fbi;
  20972.  
  20973. platform_set_drvdata(pdev, mfd);
  20974. @@ -703,9 +613,6 @@ static int mdss_fb_probe(struct platform_device *pdev)
  20975.  
  20976. mdss_fb_create_sysfs(mfd);
  20977. mdss_fb_send_panel_event(mfd, MDSS_EVENT_FB_REGISTERED, fbi);
  20978. -#ifdef CONFIG_FB_MSM_CAMERA_CSC
  20979. - mdp4_reg_csc_fs(mfd);
  20980. -#endif
  20981.  
  20982. mfd->mdp_sync_pt_data.fence_name = "mdp-fence";
  20983. if (mfd->mdp_sync_pt_data.timeline == NULL) {
  20984. @@ -906,10 +813,7 @@ static int mdss_fb_pm_suspend(struct device *dev)
  20985. return -ENODEV;
  20986.  
  20987. dev_dbg(dev, "display pm suspend\n");
  20988. - if(mfd->panel_info->type == DTV_PANEL) {
  20989. - dev_dbg(dev, "Ignore Suspend\n");
  20990. - return 0;
  20991. - }
  20992. +
  20993. return mdss_fb_suspend_sub(mfd);
  20994. }
  20995.  
  20996. @@ -920,10 +824,7 @@ static int mdss_fb_pm_resume(struct device *dev)
  20997. return -ENODEV;
  20998.  
  20999. dev_dbg(dev, "display pm resume\n");
  21000. - if(mfd->panel_info->type == DTV_PANEL) {
  21001. - dev_dbg(dev, "Ignore Resume\n");
  21002. - return 0;
  21003. - }
  21004. +
  21005. return mdss_fb_resume_sub(mfd);
  21006. }
  21007. #endif
  21008. @@ -986,31 +887,26 @@ static void mdss_fb_scale_bl(struct msm_fb_data_type *mfd, u32 *bl_lvl)
  21009. void mdss_fb_set_backlight(struct msm_fb_data_type *mfd, u32 bkl_lvl)
  21010. {
  21011. struct mdss_panel_data *pdata;
  21012. - int (*update_ad_input)(struct msm_fb_data_type *mfd);
  21013. u32 temp = bkl_lvl;
  21014. - int ret = -EINVAL;
  21015. - bool is_bl_changed = (bkl_lvl != mfd->bl_level);
  21016. + bool bl_notify_needed = false;
  21017.  
  21018. - if (((!mfd->panel_power_on && mfd->dcm_state != DCM_ENTER)
  21019. - || !mfd->bl_updated) && !IS_CALIB_MODE_BL(mfd)) {
  21020. + if ((((!mfd->panel_power_on && mfd->dcm_state != DCM_ENTER)
  21021. + || !mfd->bl_updated) && !IS_CALIB_MODE_BL(mfd)) ||
  21022. + mfd->panel_info->cont_splash_enabled) {
  21023. mfd->unset_bl_level = bkl_lvl;
  21024. - pr_info("[BL1] bkl_lvl (%d), bl_updated(%d), power(%d)\n",
  21025. - bkl_lvl, mfd->bl_updated, mfd->panel_power_on);
  21026. return;
  21027. } else {
  21028. mfd->unset_bl_level = 0;
  21029. - pr_info("[BL2] bkl_lvl (%d), bl_updated(%d)\n",
  21030. - bkl_lvl, mfd->bl_updated);
  21031. }
  21032.  
  21033. pdata = dev_get_platdata(&mfd->pdev->dev);
  21034.  
  21035. if ((pdata) && (pdata->set_backlight)) {
  21036. - if (mfd->mdp.ad_attenuate_bl) {
  21037. - ret = (*mfd->mdp.ad_attenuate_bl)(bkl_lvl, &temp, mfd);
  21038. - if (ret)
  21039. - pr_err("Failed to attenuate BL\n");
  21040. - }
  21041. + if (mfd->mdp.ad_calc_bl)
  21042. + (*mfd->mdp.ad_calc_bl)(mfd, temp, &temp,
  21043. + &bl_notify_needed);
  21044. + if (bl_notify_needed)
  21045. + mdss_fb_bl_update_notify(mfd);
  21046.  
  21047. mfd->bl_level_prev_scaled = mfd->bl_level_scaled;
  21048. if (!IS_CALIB_MODE_BL(mfd))
  21049. @@ -1025,51 +921,33 @@ void mdss_fb_set_backlight(struct msm_fb_data_type *mfd, u32 bkl_lvl)
  21050. */
  21051. if (mfd->bl_level_scaled == temp) {
  21052. mfd->bl_level = bkl_lvl;
  21053. - return;
  21054. - }
  21055. - if(mfd->panel_power_on == true)
  21056. - pdata->set_backlight(pdata, temp);
  21057. - mfd->bl_level = bkl_lvl;
  21058. - mfd->bl_level_scaled = temp;
  21059. -
  21060. - if (mfd->mdp.update_ad_input && is_bl_changed) {
  21061. - update_ad_input = mfd->mdp.update_ad_input;
  21062. - mutex_unlock(&mfd->bl_lock);
  21063. - /* Will trigger ad_setup which will grab bl_lock */
  21064. - update_ad_input(mfd);
  21065. - mutex_lock(&mfd->bl_lock);
  21066. + } else {
  21067. + pr_debug("backlight sent to panel :%d\n", temp);
  21068. + pdata->set_backlight(pdata, temp);
  21069. + mfd->bl_level = bkl_lvl;
  21070. + mfd->bl_level_scaled = temp;
  21071. }
  21072. - mdss_fb_bl_update_notify(mfd);
  21073. }
  21074. }
  21075.  
  21076. -static int fist_commit_flag = 1;
  21077. -
  21078. void mdss_fb_update_backlight(struct msm_fb_data_type *mfd)
  21079. {
  21080. struct mdss_panel_data *pdata;
  21081. - int ret = 0;
  21082. u32 temp;
  21083. + bool bl_notify = false;
  21084.  
  21085. mutex_lock(&mfd->bl_lock);
  21086. if (mfd->unset_bl_level && !mfd->bl_updated) {
  21087. pdata = dev_get_platdata(&mfd->pdev->dev);
  21088. if ((pdata) && (pdata->set_backlight)) {
  21089. -#if defined(CONFIG_MACH_KANAS3G_CTC)
  21090. - pr_info("[TSP]extend 200ms delay from LCD backlight\n");
  21091. - msleep(100);
  21092. -#endif
  21093. mfd->bl_level = mfd->unset_bl_level;
  21094. temp = mfd->bl_level;
  21095. - if (mfd->mdp.ad_attenuate_bl) {
  21096. - ret = (*mfd->mdp.ad_attenuate_bl)(temp,
  21097. - &temp, mfd);
  21098. - if (ret)
  21099. - pr_err("Failed to attenuate BL\n");
  21100. - }
  21101. - pr_info("mfd->bl_level (%d), bl_updated (%d)\n",
  21102. - mfd->bl_level, mfd->bl_updated);
  21103. - pdata->set_backlight(pdata, mfd->bl_level);
  21104. + if (mfd->mdp.ad_calc_bl)
  21105. + (*mfd->mdp.ad_calc_bl)(mfd, temp, &temp,
  21106. + &bl_notify);
  21107. + if (bl_notify)
  21108. + mdss_fb_bl_update_notify(mfd);
  21109. + pdata->set_backlight(pdata, temp);
  21110. mfd->bl_level_scaled = mfd->unset_bl_level;
  21111. mfd->bl_updated = 1;
  21112. }
  21113. @@ -1083,23 +961,15 @@ static int mdss_fb_blank_sub(int blank_mode, struct fb_info *info,
  21114. struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
  21115. int ret = 0;
  21116.  
  21117. - pr_info("FB_NUM:%d, MDSS_FB_%s ++ \n", mfd->panel_info->fb_num,
  21118. - blank_mode? "BLANK": "UNBLANK");
  21119. -
  21120. if (!op_enable)
  21121. return -EPERM;
  21122.  
  21123. if (mfd->dcm_state == DCM_ENTER)
  21124. return -EPERM;
  21125.  
  21126. - mfd->blank_mode = blank_mode;
  21127. -
  21128. switch (blank_mode) {
  21129. case FB_BLANK_UNBLANK:
  21130. if (!mfd->panel_power_on && mfd->mdp.on_fnc) {
  21131. -#if defined(CONFIG_CLK_TUNING)
  21132. - load_clk_tuning_file();
  21133. -#endif
  21134. ret = mfd->mdp.on_fnc(mfd);
  21135. if (ret == 0) {
  21136. mfd->panel_power_on = true;
  21137. @@ -1119,7 +989,7 @@ static int mdss_fb_blank_sub(int blank_mode, struct fb_info *info,
  21138. mutex_lock(&mfd->bl_lock);
  21139. if (!mfd->bl_updated) {
  21140. mfd->bl_updated = 1;
  21141. - mdss_fb_set_backlight(mfd, mfd->unset_bl_level);
  21142. + mdss_fb_set_backlight(mfd, mfd->bl_level_prev_scaled);
  21143. }
  21144. mutex_unlock(&mfd->bl_lock);
  21145. break;
  21146. @@ -1156,17 +1026,12 @@ static int mdss_fb_blank_sub(int blank_mode, struct fb_info *info,
  21147. mdss_fb_release_fences(mfd);
  21148. mfd->op_enable = true;
  21149. complete(&mfd->power_off_comp);
  21150. -
  21151. - fist_commit_flag = 1;
  21152. }
  21153. break;
  21154. }
  21155. /* Notify listeners */
  21156. sysfs_notify(&mfd->fbi->dev->kobj, NULL, "show_blank_event");
  21157.  
  21158. - pr_info("FB_NUM:%d, MDSS_FB_%s -- \n", mfd->panel_info->fb_num,
  21159. - blank_mode ? "BLANK": "UNBLANK");
  21160. -
  21161. return ret;
  21162. }
  21163.  
  21164. @@ -1174,57 +1039,13 @@ static int mdss_fb_blank(int blank_mode, struct fb_info *info)
  21165. {
  21166. struct mdss_panel_data *pdata;
  21167. struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
  21168. -#if (defined(CONFIG_MACH_S3VE3G_EUR) || defined(CONFIG_MACH_VICTOR3GDSDTV_LTN)) && defined(CONFIG_ESD_ERR_FG_RECOVERY)
  21169. - static int nblank_mode = FB_BLANK_UNBLANK;
  21170. - static int final_state = -1;
  21171. - int ret;
  21172. -
  21173. - mutex_lock(&esd_lock);
  21174. -
  21175. - printk("%s : nblank_mode[%d], blank_mode[%d], final_state[%d], esd_active[%d]\n", __func__, nblank_mode, blank_mode, final_state, info->esd_active);
  21176. - if(info->esd_active) {
  21177. - if(nblank_mode == FB_BLANK_POWERDOWN) {
  21178. -// if(final_state == FB_BLANK_UNBLANK)
  21179. -// goto NEXT_STEP1;
  21180. -// final_state = blank_mode;
  21181. - if(blank_mode == FB_BLANK_UNBLANK)
  21182. - final_state = -1;
  21183. - mutex_unlock(&esd_lock);
  21184. - return 0;
  21185. - } else if(nblank_mode == FB_BLANK_UNBLANK) {
  21186. - if(final_state == FB_BLANK_POWERDOWN && blank_mode == FB_BLANK_POWERDOWN) {
  21187. - nblank_mode = blank_mode;
  21188. - mutex_unlock(&esd_lock);
  21189. - return 0;
  21190. - }
  21191.  
  21192. - if(blank_mode == FB_BLANK_UNBLANK)
  21193. - final_state = -1;
  21194. - else if(blank_mode == FB_BLANK_POWERDOWN)
  21195. - final_state = blank_mode;
  21196. - goto NEXT_STEP2;
  21197. - }
  21198. - }
  21199. -
  21200. -//NEXT_STEP1:
  21201. -
  21202. - if(blank_mode == FB_BLANK_UNBLANK || blank_mode == FB_BLANK_POWERDOWN) {
  21203. - nblank_mode = blank_mode;
  21204. -// final_state = -1;
  21205. - }
  21206. -
  21207. -NEXT_STEP2:
  21208. -#endif
  21209. mdss_fb_pan_idle(mfd);
  21210. if (mfd->op_enable == 0) {
  21211. if (blank_mode == FB_BLANK_UNBLANK)
  21212. mfd->suspend.panel_power_on = true;
  21213. else
  21214. mfd->suspend.panel_power_on = false;
  21215. -
  21216. -#if (defined(CONFIG_MACH_S3VE3G_EUR) || defined(CONFIG_MACH_VICTOR3GDSDTV_LTN)) && defined(CONFIG_ESD_ERR_FG_RECOVERY)
  21217. - mutex_unlock(&esd_lock);
  21218. -#endif
  21219. return 0;
  21220. }
  21221. pr_debug("mode: %d\n", blank_mode);
  21222. @@ -1239,34 +1060,7 @@ NEXT_STEP2:
  21223. pdata->panel_info.is_lpm_mode = false;
  21224. }
  21225.  
  21226. -#if (defined(CONFIG_MACH_S3VE3G_EUR) || defined(CONFIG_MACH_VICTOR3GDSDTV_LTN)) && defined(CONFIG_ESD_ERR_FG_RECOVERY)
  21227. - ret = mdss_fb_blank_sub(blank_mode, info, mfd->op_enable);
  21228. - mutex_unlock(&esd_lock);
  21229. - return ret;
  21230. -#else
  21231. return mdss_fb_blank_sub(blank_mode, info, mfd->op_enable);
  21232. -#endif
  21233. -}
  21234. -
  21235. -/* Set VM page protection */
  21236. -static inline void __mdss_fb_set_page_protection(struct vm_area_struct *vma,
  21237. - struct msm_fb_data_type *mfd)
  21238. -{
  21239. - if (mfd->mdp_fb_page_protection == MDP_FB_PAGE_PROTECTION_WRITECOMBINE)
  21240. - vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  21241. - else if (mfd->mdp_fb_page_protection ==
  21242. - MDP_FB_PAGE_PROTECTION_WRITETHROUGHCACHE)
  21243. - vma->vm_page_prot = pgprot_writethroughcache(vma->vm_page_prot);
  21244. - else if (mfd->mdp_fb_page_protection ==
  21245. - MDP_FB_PAGE_PROTECTION_WRITEBACKCACHE)
  21246. - vma->vm_page_prot = pgprot_writebackcache(vma->vm_page_prot);
  21247. - else if (mfd->mdp_fb_page_protection ==
  21248. - MDP_FB_PAGE_PROTECTION_WRITEBACKWACACHE)
  21249. - vma->vm_page_prot = pgprot_writebackwacache(vma->vm_page_prot);
  21250. - else
  21251. - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  21252. -
  21253. -
  21254. }
  21255.  
  21256. static inline int mdss_fb_create_ion_client(struct msm_fb_data_type *mfd)
  21257. @@ -1451,7 +1245,10 @@ static int mdss_fb_fbmem_ion_mmap(struct fb_info *info,
  21258. }
  21259. len = min(len, remainder);
  21260.  
  21261. - __mdss_fb_set_page_protection(vma, mfd);
  21262. + if (mfd->mdp_fb_page_protection ==
  21263. + MDP_FB_PAGE_PROTECTION_WRITECOMBINE)
  21264. + vma->vm_page_prot =
  21265. + pgprot_writecombine(vma->vm_page_prot);
  21266.  
  21267. pr_debug("vma=%p, addr=%x len=%ld",
  21268. vma, (unsigned int)addr, len);
  21269. @@ -1495,27 +1292,17 @@ static int mdss_fb_physical_mmap(struct fb_info *info,
  21270. u32 len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len);
  21271. unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
  21272. struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
  21273. - int ret = 0;
  21274. -
  21275. +
  21276. if (!start) {
  21277. pr_warn("No framebuffer memory is allocated\n");
  21278. return -ENOMEM;
  21279. }
  21280.  
  21281. - if ((vma->vm_end <= vma->vm_start) || (off >= len) ||
  21282. - ((vma->vm_end - vma->vm_start) > (len - off)))
  21283. - return -EINVAL;
  21284. - ret = mdss_fb_pan_idle(mfd);
  21285. - if (ret) {
  21286. - pr_err("Shutdown pending. Aborting operation\n");
  21287. - return ret;
  21288. - }
  21289. -
  21290. /* Set VM flags. */
  21291. start &= PAGE_MASK;
  21292. if ((vma->vm_end <= vma->vm_start) ||
  21293. - (off >= len) ||
  21294. - ((vma->vm_end - vma->vm_start) > (len - off)))
  21295. + (off >= len) ||
  21296. + ((vma->vm_end - vma->vm_start) > (len - off)))
  21297. return -EINVAL;
  21298. off += start;
  21299. if (off < start)
  21300. @@ -1524,10 +1311,13 @@ static int mdss_fb_physical_mmap(struct fb_info *info,
  21301. /* This is an IO map - tell maydump to skip this VMA */
  21302. vma->vm_flags |= VM_IO;
  21303.  
  21304. + if (mfd->mdp_fb_page_protection == MDP_FB_PAGE_PROTECTION_WRITECOMBINE)
  21305. + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  21306. +
  21307. /* Remap the frame buffer I/O range */
  21308. if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
  21309. - vma->vm_end - vma->vm_start,
  21310. - vma->vm_page_prot))
  21311. + vma->vm_end - vma->vm_start,
  21312. + vma->vm_page_prot))
  21313. return -EAGAIN;
  21314.  
  21315. return 0;
  21316. @@ -1571,29 +1361,32 @@ static int mdss_fb_alloc_fbmem_iommu(struct msm_fb_data_type *mfd, int dom)
  21317. size_t size = 0;
  21318. struct platform_device *pdev = mfd->pdev;
  21319. int rc = 0;
  21320. - //struct device_node *fbmem_pnode = NULL;
  21321. + struct device_node *fbmem_pnode = NULL;
  21322.  
  21323. if (!pdev || !pdev->dev.of_node) {
  21324. pr_err("Invalid device node\n");
  21325. return -ENODEV;
  21326. }
  21327.  
  21328. -
  21329. - of_property_read_u32(pdev->dev.of_node,
  21330. - "qcom,memory-reservation-size", &size);
  21331. -
  21332. - pr_info("boot_mode_lpm = %d, boot_mode_recovery = %d\n",
  21333. - boot_mode_lpm, boot_mode_recovery);
  21334. -
  21335. - /* Incase of Normal Booting, Do not reserve FB memory */
  21336. - if ((!boot_mode_lpm) && (!boot_mode_recovery)){
  21337. - /* Normal Booting */
  21338. + fbmem_pnode = of_parse_phandle(pdev->dev.of_node,
  21339. + "linux,contiguous-region", 0);
  21340. + if (!fbmem_pnode) {
  21341. + pr_debug("fbmem is not reserved for %s\n", pdev->name);
  21342. mfd->fbi->screen_base = NULL;
  21343. mfd->fbi->fix.smem_start = 0;
  21344. return 0;
  21345. } else {
  21346. - of_property_read_u32(pdev->dev.of_node,
  21347. - "qcom,memory-alt-reservation-size", &size);
  21348. + const u32 *addr;
  21349. + u64 len;
  21350. +
  21351. + addr = of_get_address(fbmem_pnode, 0, &len, NULL);
  21352. + if (!addr) {
  21353. + pr_err("fbmem size is not specified\n");
  21354. + of_node_put(fbmem_pnode);
  21355. + return -EINVAL;
  21356. + }
  21357. + size = (size_t)len;
  21358. + of_node_put(fbmem_pnode);
  21359. }
  21360.  
  21361. pr_debug("%s frame buffer reserve_size=0x%zx\n", __func__, size);
  21362. @@ -1793,12 +1586,8 @@ static int mdss_fb_register(struct msm_fb_data_type *mfd)
  21363. var->yres = panel_info->yres;
  21364. if (panel_info->physical_width)
  21365. var->width = panel_info->physical_width;
  21366. - else if(panel_info->width)
  21367. - var->width = panel_info->width;
  21368. if (panel_info->physical_height)
  21369. var->height = panel_info->physical_height;
  21370. - else if (panel_info->height)
  21371. - var->height = panel_info->height;
  21372. var->xres_virtual = var->xres;
  21373. var->yres_virtual = panel_info->yres * mfd->fb_page;
  21374. var->bits_per_pixel = bpp * 8; /* FrameBuffer color depth */
  21375. @@ -2100,13 +1889,7 @@ void mdss_fb_wait_for_fence(struct msm_sync_pt_data *sync_pt_data)
  21376. int i, ret = 0;
  21377.  
  21378. pr_debug("%s: wait for fences\n", sync_pt_data->fence_name);
  21379. -#if defined (CONFIG_FB_MSM_MIPI_SAMSUNG_TFT_VIDEO_WQXGA_PT_PANEL) || \
  21380. - defined (CONFIG_FB_MSM8x26_MDSS_CHECK_LCD_CONNECTION)
  21381. - if (get_lcd_attached() == 0) {
  21382. - pr_debug("%s : lcd is not attached..\n",__func__);
  21383. - return;
  21384. - }
  21385. -#endif
  21386. +
  21387. mutex_lock(&sync_pt_data->sync_mutex);
  21388. /*
  21389. * Assuming that acq_fen_cnt is sanitized in bufsync ioctl
  21390. @@ -2415,13 +2198,9 @@ static int __mdss_fb_perform_commit(struct msm_fb_data_type *mfd)
  21391. sync_pt_data->flushed = false;
  21392.  
  21393. if (fb_backup->disp_commit.flags & MDP_DISPLAY_COMMIT_OVERLAY) {
  21394. - if (mfd->mdp.kickoff_fnc) {
  21395. + if (mfd->mdp.kickoff_fnc)
  21396. ret = mfd->mdp.kickoff_fnc(mfd,
  21397. &fb_backup->disp_commit);
  21398. -
  21399. - if (fist_commit_flag)
  21400. - pr_info("kickoff done!\n");
  21401. - }
  21402. else
  21403. pr_warn("no kickoff function setup for fb%d\n",
  21404. mfd->index);
  21405. @@ -2436,14 +2215,10 @@ static int __mdss_fb_perform_commit(struct msm_fb_data_type *mfd)
  21406. }
  21407. if (!ret)
  21408. mdss_fb_update_backlight(mfd);
  21409. - else
  21410. - pr_err("skip mdss_fb_update_backlight..\n");
  21411.  
  21412. if (IS_ERR_VALUE(ret) || !sync_pt_data->flushed)
  21413. mdss_fb_signal_timeline(sync_pt_data);
  21414.  
  21415. - fist_commit_flag = 0;
  21416. -
  21417. return ret;
  21418. }
  21419.  
  21420. @@ -2460,7 +2235,6 @@ static int __mdss_fb_display_thread(void *data)
  21421. mfd->index);
  21422.  
  21423. while (1) {
  21424. - ATRACE_BEGIN(__func__);
  21425. wait_event(mfd->commit_wait_q,
  21426. (atomic_read(&mfd->commits_pending) ||
  21427. kthread_should_stop()));
  21428. @@ -2471,7 +2245,6 @@ static int __mdss_fb_display_thread(void *data)
  21429. ret = __mdss_fb_perform_commit(mfd);
  21430. atomic_dec(&mfd->commits_pending);
  21431. wake_up_all(&mfd->idle_wait_q);
  21432. - ATRACE_END(__func__);
  21433. }
  21434.  
  21435. atomic_set(&mfd->commits_pending, 0);
  21436. @@ -2797,14 +2570,6 @@ static int mdss_fb_handle_buf_sync_ioctl(struct msm_sync_pt_data *sync_pt_data,
  21437. int retire_fen_fd;
  21438. int val;
  21439.  
  21440. -#if defined (CONFIG_FB_MSM_MIPI_SAMSUNG_TFT_VIDEO_WQXGA_PT_PANEL)|| \
  21441. - defined (CONFIG_FB_MSM8x26_MDSS_CHECK_LCD_CONNECTION)
  21442. - if (get_lcd_attached() == 0) {
  21443. - pr_debug("%s : lcd is not attached..\n",__func__);
  21444. - return 0;
  21445. - }
  21446. -#endif
  21447. -
  21448. if ((buf_sync->acq_fen_fd_cnt > MDP_MAX_FENCE_FD) ||
  21449. (sync_pt_data->timeline == NULL))
  21450. return -EINVAL;
  21451. @@ -2958,7 +2723,8 @@ static int __ioctl_wait_idle(struct msm_fb_data_type *mfd, u32 cmd)
  21452. (cmd != MSMFB_OVERLAY_VSYNC_CTRL) &&
  21453. (cmd != MSMFB_ASYNC_BLIT) &&
  21454. (cmd != MSMFB_BLIT) &&
  21455. - (cmd != MSMFB_NOTIFY_UPDATE)) {
  21456. + (cmd != MSMFB_NOTIFY_UPDATE) &&
  21457. + (cmd != MSMFB_OVERLAY_PREPARE)) {
  21458. ret = mdss_fb_pan_idle(mfd);
  21459. }
  21460.  
  21461. @@ -2988,7 +2754,6 @@ static int mdss_fb_ioctl(struct fb_info *info, unsigned int cmd,
  21462. if (mfd->shutdown_pending)
  21463. return -EPERM;
  21464.  
  21465. - ATRACE_BEGIN(__func__);
  21466. atomic_inc(&mfd->ioctl_ref_cnt);
  21467.  
  21468. mdss_fb_power_setting_idle(mfd);
  21469. @@ -3041,9 +2806,7 @@ static int mdss_fb_ioctl(struct fb_info *info, unsigned int cmd,
  21470. break;
  21471.  
  21472. case MSMFB_DISPLAY_COMMIT:
  21473. - ATRACE_BEGIN("MSMFB_DISPLAY_COMMIT");
  21474. ret = mdss_fb_display_commit(info, argp);
  21475. - ATRACE_END("MSMFB_DISPLAY_COMMIT");
  21476. break;
  21477.  
  21478. case MSMFB_LPM_ENABLE:
  21479. @@ -3069,7 +2832,6 @@ exit:
  21480. if (!atomic_dec_return(&mfd->ioctl_ref_cnt))
  21481. wake_up_all(&mfd->ioctl_q);
  21482.  
  21483. - ATRACE_END(__func__);
  21484. return ret;
  21485. }
  21486.  
  21487. @@ -3163,30 +2925,6 @@ mdss_notfound:
  21488. }
  21489. EXPORT_SYMBOL(mdss_register_panel);
  21490.  
  21491. -int mdss_panel_force_update(struct mdss_panel_data *pdata)
  21492. -{
  21493. - struct msm_fb_data_type *mfd = NULL;
  21494. - int i;
  21495. -
  21496. - if (!pdata)
  21497. - return -ENODEV;
  21498. -
  21499. - for (i = 0; i < fbi_list_index; i++) {
  21500. - mfd = fbi_list[i]->par;
  21501. -
  21502. - if (mfd->panel_info == &pdata->panel_info)
  21503. - break;
  21504. - }
  21505. -
  21506. - if (i == fbi_list_index || !mfd)
  21507. - return -ENOENT;
  21508. -
  21509. - mdss_fb_pan_display_ex(mfd->fbi, &mfd->msm_fb_backup.disp_commit);
  21510. -
  21511. - return 0;
  21512. -}
  21513. -EXPORT_SYMBOL(mdss_panel_force_update);
  21514. -
  21515. int mdss_fb_register_mdp_instance(struct msm_mdp_interface *mdp)
  21516. {
  21517. if (mdp_instance) {
  21518. @@ -3199,12 +2937,6 @@ int mdss_fb_register_mdp_instance(struct msm_mdp_interface *mdp)
  21519. }
  21520. EXPORT_SYMBOL(mdss_fb_register_mdp_instance);
  21521.  
  21522. -int mdss_fb_get_first_cmt_flag(void)
  21523. -{
  21524. - return fist_commit_flag;
  21525. -}
  21526. -EXPORT_SYMBOL(mdss_fb_get_first_cmt_flag);
  21527. -
  21528. int mdss_fb_get_phys_info(unsigned long *start, unsigned long *len, int fb_num)
  21529. {
  21530. struct fb_info *info;
  21531. @@ -3257,9 +2989,6 @@ int mdss_fb_suspres_panel(struct device *dev, void *data)
  21532. if (!mfd)
  21533. return 0;
  21534.  
  21535. - if (mfd->index == 1)
  21536. - return 0;
  21537. -
  21538. event = *((bool *) data) ? MDSS_EVENT_RESUME : MDSS_EVENT_SUSPEND;
  21539.  
  21540. rc = mdss_fb_send_panel_event(mfd, event, NULL);
  21541. diff --git a/drivers/video/msm/mdss/mdss_fb.h b/drivers/video/msm/mdss/mdss_fb.h
  21542. index 760738a..a9def29 100644
  21543. --- a/drivers/video/msm/mdss/mdss_fb.h
  21544. +++ b/drivers/video/msm/mdss/mdss_fb.h
  21545. @@ -28,19 +28,8 @@
  21546. #define MSM_FB_MAX_DEV_LIST 32
  21547.  
  21548. #define MSM_FB_ENABLE_DBGFS
  21549. -/*
  21550. - * This temporary work around of fence time-out modification is being added to handle
  21551. - * screen being locked up/blank after resuming - being discussed in SR# 01515705.
  21552. - * needs to be rolled back once a solution is found to address the issue at hand
  21553. - */
  21554. -#if defined(CONFIG_FB_MSM_MDSS_TC_DSI2LVDS_WXGA_PANEL) || defined(CONFIG_FB_MSM_MDSS_SDC_WXGA_PANEL)\
  21555. - || defined(CONFIG_FB_MSM_MDSS_CPT_QHD_PANEL) || defined(CONFIG_FB_MSM_MDSS_MAGNA_OCTA_VIDEO_720P_PANEL)
  21556. -#define WAIT_FENCE_FIRST_TIMEOUT (0.5 * MSEC_PER_SEC)
  21557. -#define WAIT_FENCE_FINAL_TIMEOUT (1 * MSEC_PER_SEC)
  21558. -#else
  21559. #define WAIT_FENCE_FIRST_TIMEOUT (3 * MSEC_PER_SEC)
  21560. #define WAIT_FENCE_FINAL_TIMEOUT (10 * MSEC_PER_SEC)
  21561. -#endif
  21562. /* Display op timeout should be greater than total timeout */
  21563. #define WAIT_DISP_OP_TIMEOUT ((WAIT_FENCE_FIRST_TIMEOUT + \
  21564. WAIT_FENCE_FINAL_TIMEOUT) * MDP_MAX_FENCE_FD)
  21565. @@ -53,6 +42,9 @@
  21566. #define MIN(x, y) (((x) < (y)) ? (x) : (y))
  21567. #endif
  21568.  
  21569. +#define MDP_PP_AD_BL_LINEAR 0x0
  21570. +#define MDP_PP_AD_BL_LINEAR_INV 0x1
  21571. +
  21572. /**
  21573. * enum mdp_notify_event - Different frame events to indicate frame update state
  21574. *
  21575. @@ -133,9 +125,8 @@ struct msm_mdp_interface {
  21576. int (*lut_update)(struct msm_fb_data_type *mfd, struct fb_cmap *cmap);
  21577. int (*do_histogram)(struct msm_fb_data_type *mfd,
  21578. struct mdp_histogram *hist);
  21579. - int (*update_ad_input)(struct msm_fb_data_type *mfd);
  21580. - int (*ad_attenuate_bl)(u32 bl, u32 *bl_out,
  21581. - struct msm_fb_data_type *mfd);
  21582. + int (*ad_calc_bl)(struct msm_fb_data_type *mfd, int bl_in,
  21583. + int *bl_out, bool *bl_out_notify);
  21584. int (*panel_register_done)(struct mdss_panel_data *pdata);
  21585. u32 (*fb_stride)(u32 fb_index, u32 xres, int bpp);
  21586. int (*splash_init_fnc)(struct msm_fb_data_type *mfd);
  21587. @@ -186,7 +177,6 @@ struct msm_fb_data_type {
  21588. int panel_reconfig;
  21589.  
  21590. u32 dst_format;
  21591. - int resume_state;
  21592. int panel_power_on;
  21593. struct disp_info_type_suspend suspend;
  21594.  
  21595. @@ -199,8 +189,8 @@ struct msm_fb_data_type {
  21596. int ext_ad_ctrl;
  21597. u32 ext_bl_ctrl;
  21598. u32 calib_mode;
  21599. + u32 ad_bl_level;
  21600. u32 bl_level;
  21601. - u32 bl_previous;
  21602. u32 bl_scale;
  21603. u32 bl_min_lvl;
  21604. u32 unset_bl_level;
  21605. @@ -208,8 +198,6 @@ struct msm_fb_data_type {
  21606. u32 bl_level_scaled;
  21607. u32 bl_level_prev_scaled;
  21608. struct mutex bl_lock;
  21609. - struct mutex power_state;
  21610. - struct mutex ctx_lock;
  21611.  
  21612. struct platform_device *pdev;
  21613.  
  21614. @@ -223,17 +211,6 @@ struct msm_fb_data_type {
  21615.  
  21616. struct msm_sync_pt_data mdp_sync_pt_data;
  21617.  
  21618. - u32 acq_fen_cnt;
  21619. - struct sync_fence *acq_fen[MDP_MAX_FENCE_FD];
  21620. - int cur_rel_fen_fd;
  21621. - struct sync_pt *cur_rel_sync_pt;
  21622. - struct sync_fence *cur_rel_fence;
  21623. - struct sync_fence *last_rel_fence;
  21624. - struct sw_sync_timeline *timeline;
  21625. - int timeline_value;
  21626. - u32 last_acq_fen_cnt;
  21627. - struct sync_fence *last_acq_fen[MDP_MAX_FENCE_FD];
  21628. - struct mutex sync_mutex;
  21629. /* for non-blocking */
  21630. struct task_struct *disp_thread;
  21631. atomic_t commits_pending;
  21632. @@ -257,8 +234,6 @@ struct msm_fb_data_type {
  21633. u32 wait_for_kickoff;
  21634. struct ion_client *fb_ion_client;
  21635. struct ion_handle *fb_ion_handle;
  21636. -
  21637. - int blank_mode;
  21638. };
  21639.  
  21640. static inline void mdss_fb_update_notify_update(struct msm_fb_data_type *mfd)
  21641. @@ -279,49 +254,8 @@ static inline void mdss_fb_update_notify_update(struct msm_fb_data_type *mfd)
  21642. mutex_unlock(&mfd->no_update.lock);
  21643. }
  21644. }
  21645. -#ifdef CONFIG_FB_MSM_CAMERA_CSC
  21646. -#if defined(CONFIG_SEC_KS01_PROJECT)|| defined(CONFIG_SEC_ATLANTIC_PROJECT)
  21647. -extern u8 prev_csc_update;
  21648. -#endif
  21649. -extern u8 csc_update;
  21650. -#if !defined(CONFIG_SEC_KS01_PROJECT) && !defined(CONFIG_SEC_ATLANTIC_PROJECT)
  21651. -extern u8 pre_csc_update;
  21652. -#endif
  21653. -#endif
  21654.  
  21655. -#if defined (CONFIG_FB_MSM_MDSS_DBG_SEQ_TICK)
  21656. -
  21657. -enum{
  21658. - COMMIT,
  21659. - KICKOFF,
  21660. - PP_DONE
  21661. -};
  21662. -
  21663. -struct mdss_tick_debug {
  21664. - u64 commit[10];
  21665. - u64 kickoff[10];
  21666. - u64 pingpong_done[10];
  21667. - u8 commit_cnt;
  21668. - u8 kickoff_cnt;
  21669. - u8 pingpong_done_cnt;
  21670. -};
  21671. -void mdss_dbg_tick_save(int op_name);
  21672. -
  21673. -#endif
  21674. -
  21675. -#if defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_CMD_WQHD_PT_PANEL)
  21676. -enum TE_SETTING {
  21677. - TE_SET_INIT = -1,
  21678. - TE_SET_READY,
  21679. - TE_SET_START,
  21680. - TE_SET_DONE,
  21681. - TE_SET_FAIL,
  21682. -};
  21683. -#endif
  21684. -
  21685. -extern int boot_mode_lpm, boot_mode_recovery;
  21686. int mdss_fb_get_phys_info(unsigned long *start, unsigned long *len, int fb_num);
  21687. -int mdss_fb_get_first_cmt_flag(void);
  21688. void mdss_fb_set_backlight(struct msm_fb_data_type *mfd, u32 bkl_lvl);
  21689. void mdss_fb_update_backlight(struct msm_fb_data_type *mfd);
  21690. void mdss_fb_wait_for_fence(struct msm_sync_pt_data *sync_pt_data);
  21691. @@ -329,9 +263,6 @@ void mdss_fb_signal_timeline(struct msm_sync_pt_data *sync_pt_data);
  21692. struct sync_fence *mdss_fb_sync_get_fence(struct sw_sync_timeline *timeline,
  21693. const char *fence_name, int val);
  21694. int mdss_fb_register_mdp_instance(struct msm_mdp_interface *mdp);
  21695. -#if defined(CONFIG_MDNIE_TFT_MSM8X26) || defined (CONFIG_FB_MSM_MDSS_S6E8AA0A_HD_PANEL) || defined(CONFIG_MDNIE_VIDEO_ENHANCED)
  21696. -void mdss_negative_color(int is_negative_on);
  21697. -#endif
  21698. int mdss_fb_dcm(struct msm_fb_data_type *mfd, int req_state);
  21699. int mdss_fb_suspres_panel(struct device *dev, void *data);
  21700. #endif /* MDSS_FB_H */
  21701. diff --git a/drivers/video/msm/mdss/mdss_mdp.h b/drivers/video/msm/mdss/mdss_mdp.h
  21702. index 69e0198..cfd37a0 100644
  21703. --- a/drivers/video/msm/mdss/mdss_mdp.h
  21704. +++ b/drivers/video/msm/mdss/mdss_mdp.h
  21705. @@ -333,7 +333,6 @@ struct mdss_ad_info {
  21706. u32 last_bl;
  21707. u32 bl_data;
  21708. u32 calc_itr;
  21709. - uint32_t bl_bright_shift;
  21710. uint32_t bl_lin[AD_BL_LIN_LEN];
  21711. uint32_t bl_lin_inv[AD_BL_LIN_LEN];
  21712. uint32_t bl_att_lut[AD_BL_ATT_LUT_LEN];
  21713. @@ -633,6 +632,7 @@ int mdss_mdp_csc_setup_data(u32 block, u32 blk_idx, u32 tbl_idx,
  21714.  
  21715. int mdss_mdp_pp_init(struct device *dev);
  21716. void mdss_mdp_pp_term(struct device *dev);
  21717. +int mdss_mdp_pp_overlay_init(struct msm_fb_data_type *mfd);
  21718.  
  21719. int mdss_mdp_pp_resume(struct mdss_mdp_ctl *ctl, u32 mixer_num);
  21720.  
  21721. diff --git a/drivers/video/msm/mdss/mdss_mdp_ctl.c b/drivers/video/msm/mdss/mdss_mdp_ctl.c
  21722. index d7394b6..b0b5a61 100644
  21723. --- a/drivers/video/msm/mdss/mdss_mdp_ctl.c
  21724. +++ b/drivers/video/msm/mdss/mdss_mdp_ctl.c
  21725. @@ -37,11 +37,9 @@ static inline u64 fudge_factor(u64 val, u32 numer, u32 denom)
  21726. static inline u64 apply_fudge_factor(u64 val,
  21727. struct mdss_fudge_factor *factor)
  21728. {
  21729. - return fudge_factor(val, factor->numer, factor->denom);
  21730. + return fudge_factor(val, factor->numer, factor->denom);
  21731. }
  21732. -#ifdef CONFIG_VIDEO_MHL_V2
  21733. -extern int hdmi_hpd_status(void);
  21734. -#endif
  21735. +
  21736. static DEFINE_MUTEX(mdss_mdp_ctl_lock);
  21737.  
  21738. static int mdss_mdp_mixer_free(struct mdss_mdp_mixer *mixer);
  21739. @@ -735,7 +733,8 @@ static void mdss_mdp_perf_calc_ctl(struct mdss_mdp_ctl *ctl,
  21740. left_plist, (left_plist ? MDSS_MDP_MAX_STAGE : 0),
  21741. right_plist, (right_plist ? MDSS_MDP_MAX_STAGE : 0));
  21742.  
  21743. - if (ctl->is_video_mode || mdss_mdp_video_mode_intf_connected(ctl)) {
  21744. + if (ctl->is_video_mode || ((ctl->intf_type != MDSS_MDP_NO_INTF) &&
  21745. + mdss_mdp_video_mode_intf_connected(ctl))) {
  21746. perf->bw_ctl =
  21747. max(apply_fudge_factor(perf->bw_overlap,
  21748. &mdss_res->ib_factor_overlap),
  21749. @@ -830,17 +829,6 @@ u32 mdss_mdp_ctl_perf_get_transaction_status(struct mdss_mdp_ctl *ctl)
  21750. unsigned long flags;
  21751. u32 transaction_status;
  21752.  
  21753. - if (!ctl)
  21754. - return PERF_STATUS_BUSY;
  21755. -
  21756. - /*
  21757. - * If Rotator mode and bandwidth has been released; return STATUS_DONE
  21758. - * so the bandwidth is re-calculated.
  21759. - */
  21760. - if (ctl->mixer_left && ctl->mixer_left->rotator_mode &&
  21761. - !ctl->perf_release_ctl_bw)
  21762. - return PERF_STATUS_DONE;
  21763. -
  21764. /*
  21765. * If Video Mode or not valid data to determine the status, return busy
  21766. * status, so the bandwidth cannot be freed by the caller
  21767. @@ -877,8 +865,8 @@ static inline void mdss_mdp_ctl_perf_update_bus(struct mdss_mdp_ctl *ctl)
  21768. ctl->cur_perf.bw_ctl);
  21769. }
  21770. }
  21771. - bus_ib_quota = max(bw_sum_of_intfs, mdata->perf_tune.min_bus_vote);
  21772. - bus_ab_quota = apply_fudge_factor(bus_ib_quota,
  21773. + bus_ib_quota = bw_sum_of_intfs;
  21774. + bus_ab_quota = apply_fudge_factor(bw_sum_of_intfs,
  21775. &mdss_res->ab_factor);
  21776. trace_mdp_perf_update_bus(bus_ab_quota, bus_ib_quota);
  21777. ATRACE_INT("bus_quota", bus_ib_quota);
  21778. @@ -923,7 +911,7 @@ void mdss_mdp_ctl_perf_release_bw(struct mdss_mdp_ctl *ctl)
  21779. pr_debug("transaction_status=0x%x\n", transaction_status);
  21780.  
  21781. /*Release the bandwidth only if there are no transactions pending*/
  21782. - if (!transaction_status && mdata->enable_bw_release) {
  21783. + if (!transaction_status) {
  21784. trace_mdp_cmd_release_bw(ctl->num);
  21785. ctl->cur_perf.bw_ctl = 0;
  21786. ctl->new_perf.bw_ctl = 0;
  21787. @@ -957,16 +945,6 @@ static int mdss_mdp_select_clk_lvl(struct mdss_mdp_ctl *ctl,
  21788. return clk_rate;
  21789. }
  21790.  
  21791. -static void mdss_mdp_perf_release_ctl_bw(struct mdss_mdp_ctl *ctl,
  21792. - struct mdss_mdp_perf_params *perf)
  21793. -{
  21794. - /* Set to zero controller bandwidth. */
  21795. - memset(perf, 0, sizeof(*perf));
  21796. - ctl->perf_release_ctl_bw = false;
  21797. -}
  21798. -
  21799. -#define ADDING_BW_ROTATE_MODE 130
  21800. -#define ADDING_BW_LANDSCAPE_MODE 107
  21801. static void mdss_mdp_ctl_perf_update(struct mdss_mdp_ctl *ctl,
  21802. int params_changed)
  21803. {
  21804. @@ -991,10 +969,7 @@ static void mdss_mdp_ctl_perf_update(struct mdss_mdp_ctl *ctl,
  21805. is_bw_released = !mdss_mdp_ctl_perf_get_transaction_status(ctl);
  21806.  
  21807. if (ctl->power_on) {
  21808. - if (ctl->perf_release_ctl_bw &&
  21809. - mdata->enable_rotator_bw_release)
  21810. - mdss_mdp_perf_release_ctl_bw(ctl, new);
  21811. - else if (is_bw_released || params_changed)
  21812. + if (is_bw_released || params_changed)
  21813. mdss_mdp_perf_calc_ctl(ctl, new);
  21814. /*
  21815. * if params have just changed delay the update until
  21816. @@ -1309,31 +1284,6 @@ int mdss_mdp_ctl_splash_finish(struct mdss_mdp_ctl *ctl, bool handoff)
  21817. }
  21818. }
  21819.  
  21820. -#if defined(CONFIG_FB_MSM_EDP_SAMSUNG)
  21821. -int mdss_mdp_scan_pipes(void)
  21822. -{
  21823. - unsigned long off;
  21824. - u32 size;
  21825. - int i, pnum = 0;
  21826. -
  21827. - mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
  21828. - for (i = 0; i < 6; i++) {
  21829. - off = MDSS_MDP_REG_SSPP_OFFSET(i) + MDSS_MDP_REG_SSPP_SRC_SIZE;
  21830. -
  21831. - size = MDSS_MDP_REG_READ(off);
  21832. -
  21833. - pr_debug("%s: i=%d: addr=%x hw=%x\n",
  21834. - __func__, i, (int)off, (int)size);
  21835. - if (size)
  21836. - pnum++;
  21837. -
  21838. - }
  21839. - mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
  21840. -
  21841. - return pnum;
  21842. -}
  21843. -#endif
  21844. -
  21845. static inline int mdss_mdp_set_split_ctl(struct mdss_mdp_ctl *ctl,
  21846. struct mdss_mdp_ctl *split_ctl)
  21847. {
  21848. @@ -1546,7 +1496,6 @@ struct mdss_mdp_ctl *mdss_mdp_ctl_init(struct mdss_panel_data *pdata,
  21849. ctl->mfd = mfd;
  21850. ctl->panel_data = pdata;
  21851. ctl->is_video_mode = false;
  21852. - ctl->perf_release_ctl_bw = false;
  21853.  
  21854. switch (pdata->panel_info.type) {
  21855. case EDP_PANEL:
  21856. @@ -1581,16 +1530,9 @@ struct mdss_mdp_ctl *mdss_mdp_ctl_init(struct mdss_panel_data *pdata,
  21857. ctl->intf_type = MDSS_INTF_HDMI;
  21858. ctl->opmode = MDSS_MDP_CTL_OP_VIDEO_MODE;
  21859. ctl->start_fnc = mdss_mdp_video_start;
  21860. -#ifndef CONFIG_VIDEO_MHL_V2
  21861. -/*
  21862. -* mdss_mdp_limited_lut_igc_config() is for make limited range
  21863. -* but we use limited range in MHL driver side
  21864. -* so comment that function
  21865. -*/
  21866. ret = mdss_mdp_limited_lut_igc_config(ctl);
  21867. if (ret)
  21868. pr_err("Unable to config IGC LUT data");
  21869. -#endif
  21870. break;
  21871. case WRITEBACK_PANEL:
  21872. ctl->intf_num = MDSS_MDP_NO_INTF;
  21873. @@ -2515,11 +2457,6 @@ int mdss_mdp_display_wakeup_time(struct mdss_mdp_ctl *ctl,
  21874. u32 time_of_line, time_to_vsync;
  21875. ktime_t current_time = ktime_get();
  21876.  
  21877. - if (!ctl) {
  21878. - pr_err("%s : invalid ctl\n", __func__);
  21879. - return -ENODEV;
  21880. - }
  21881. -
  21882. if (!ctl->read_line_cnt_fnc)
  21883. return -ENOSYS;
  21884.  
  21885. @@ -2629,10 +2566,6 @@ int mdss_mdp_display_wait4pingpong(struct mdss_mdp_ctl *ctl)
  21886. return ret;
  21887. }
  21888.  
  21889. -#if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  21890. -struct mdss_mdp_ctl *commit_ctl;
  21891. -#endif
  21892. -
  21893. int mdss_mdp_display_commit(struct mdss_mdp_ctl *ctl, void *arg)
  21894. {
  21895. struct mdss_mdp_ctl *sctl = NULL;
  21896. @@ -2645,11 +2578,6 @@ int mdss_mdp_display_commit(struct mdss_mdp_ctl *ctl, void *arg)
  21897. return -ENODEV;
  21898. }
  21899.  
  21900. -#if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  21901. - commit_ctl = ctl;
  21902. -#endif
  21903. -
  21904. - ATRACE_BEGIN(__func__);
  21905. mutex_lock(&ctl->lock);
  21906. pr_debug("commit ctl=%d play_cnt=%d\n", ctl->num, ctl->play_cnt);
  21907.  
  21908. @@ -2746,7 +2674,6 @@ done:
  21909.  
  21910. mutex_unlock(&ctl->lock);
  21911.  
  21912. - ATRACE_END(__func__);
  21913. return ret;
  21914. }
  21915.  
  21916. @@ -2838,15 +2765,6 @@ static inline int __mdss_mdp_ctl_get_mixer_off(struct mdss_mdp_mixer *mixer)
  21917. }
  21918. }
  21919.  
  21920. -u32 mdss_mdp_get_mixercfg(struct mdss_mdp_mixer *mixer)
  21921. -{
  21922. - if (!mixer && !mixer->ctl)
  21923. - return 0;
  21924. -
  21925. - return mdss_mdp_ctl_read(mixer->ctl,
  21926. - __mdss_mdp_ctl_get_mixer_off(mixer));
  21927. -}
  21928. -
  21929. static int __mdss_mdp_mixer_handoff_helper(struct mdss_mdp_mixer *mixer,
  21930. struct mdss_mdp_pipe *pipe)
  21931. {
  21932. @@ -2924,18 +2842,3 @@ static void mdss_mdp_xlog_mixer_reg(struct mdss_mdp_ctl *ctl)
  21933. data[MDSS_MDP_INTF_LAYERMIXER2],
  21934. data[MDSS_MDP_INTF_LAYERMIXER3], off);
  21935. }
  21936. -#if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  21937. -void mdss_mdp_mixer_read(void)
  21938. -{
  21939. - int i, off;
  21940. - u32 data[4];
  21941. -
  21942. - for (i=0; i < 4; i++) {
  21943. - off = MDSS_MDP_REG_CTL_LAYER(i);
  21944. - data[i] = mdss_mdp_ctl_read(commit_ctl, off);
  21945. - }
  21946. - xlog(__func__, data[0], data[1], data[2], data[3], off, 0);
  21947. -
  21948. -}
  21949. -#endif
  21950. -
  21951. diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c b/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
  21952. old mode 100644
  21953. new mode 100755
  21954. index ccdb603..70e2972
  21955. --- a/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
  21956. +++ b/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
  21957. @@ -1,4 +1,4 @@
  21958. -/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
  21959. +/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
  21960. *
  21961. * This program is free software; you can redistribute it and/or modify
  21962. * it under the terms of the GNU General Public License version 2 and
  21963. @@ -12,16 +12,13 @@
  21964. */
  21965.  
  21966. #include <linux/kernel.h>
  21967. -#include <linux/bootmem.h>
  21968. -#include <linux/memblock.h>
  21969.  
  21970. #include "mdss_mdp.h"
  21971. #include "mdss_panel.h"
  21972. #include "mdss_debug.h"
  21973. -#include "mdss_fb.h"
  21974. #include "mdss_mdp_trace.h"
  21975.  
  21976. -#define VSYNC_EXPIRE_TICK 6
  21977. +#define VSYNC_EXPIRE_TICK 4
  21978.  
  21979. #define MAX_SESSIONS 2
  21980.  
  21981. @@ -31,34 +28,20 @@
  21982. #define STOP_TIMEOUT(hz) msecs_to_jiffies((1000 / hz) * (VSYNC_EXPIRE_TICK + 2))
  21983. #define ULPS_ENTER_TIME msecs_to_jiffies(100)
  21984.  
  21985. -/*
  21986. - * STOP_TIMEOUT need to wait for cmd stop depends on fps
  21987. - * if the command panel support 60fps the timeout value
  21988. - * generated using 16ms(1frame). If that support 15fps the timeout value
  21989. - * generated by 40ms(1frame)
  21990. - */
  21991. -#define STOP_TIMEOUT_FOR_ALPM msecs_to_jiffies(40 * (VSYNC_EXPIRE_TICK + 2))
  21992. -
  21993. struct mdss_mdp_cmd_ctx {
  21994. struct mdss_mdp_ctl *ctl;
  21995. -#if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  21996. - u32 panel_ndx;
  21997. -#endif
  21998. u32 pp_num;
  21999. u8 ref_cnt;
  22000. - struct completion pp_comp;
  22001. struct completion stop_comp;
  22002. + wait_queue_head_t pp_waitq;
  22003. struct list_head vsync_handlers;
  22004. int panel_on;
  22005. - int koff_cnt;
  22006. + atomic_t koff_cnt;
  22007. int clk_enabled;
  22008. int vsync_enabled;
  22009. int rdptr_enabled;
  22010. struct mutex clk_mtx;
  22011. spinlock_t clk_lock;
  22012. -#if defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_CMD_WQHD_PT_PANEL)
  22013. - spinlock_t te_lock;
  22014. -#endif
  22015. struct work_struct clk_work;
  22016. struct delayed_work ulps_work;
  22017. struct work_struct pp_done_work;
  22018. @@ -72,11 +55,13 @@ struct mdss_mdp_cmd_ctx {
  22019. u32 vclk_line; /* vsync clock per line */
  22020. struct mdss_panel_recovery recovery;
  22021. bool ulps;
  22022. + struct mdss_mdp_cmd_ctx *sync_ctx; /* for partial update */
  22023. + u32 pp_timeout_report_cnt;
  22024. };
  22025.  
  22026. struct mdss_mdp_cmd_ctx mdss_mdp_cmd_ctx_list[MAX_SESSIONS];
  22027. -extern char board_rev;
  22028. -int get_lcd_attached(void);
  22029. +
  22030. +static int mdss_mdp_cmd_do_notifier(struct mdss_mdp_cmd_ctx *ctx);
  22031.  
  22032. static inline u32 mdss_mdp_cmd_line_count(struct mdss_mdp_ctl *ctl)
  22033. {
  22034. @@ -164,10 +149,10 @@ static int mdss_mdp_cmd_tearcheck_cfg(struct mdss_mdp_ctl *ctl,
  22035.  
  22036. cfg |= vclks_line;
  22037.  
  22038. - pr_info("%s: te->tear_check_en = %d, res=%d vclks=%x height=%d init=%d rd=%d start=%d ",
  22039. - __func__, te->tear_check_en, pinfo->yres, vclks_line, te->sync_cfg_height,
  22040. + pr_debug("%s: yres=%d vclks=%x height=%d init=%d rd=%d start=%d ",
  22041. + __func__, pinfo->yres, vclks_line, te->sync_cfg_height,
  22042. te->vsync_init_val, te->rd_ptr_irq, te->start_pos);
  22043. - pr_info("thrd_start =%d thrd_cont=%d\n",
  22044. + pr_debug("thrd_start =%d thrd_cont=%d\n",
  22045. te->sync_threshold_start, te->sync_threshold_continue);
  22046.  
  22047. mdss_mdp_pingpong_write(mixer, MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC, cfg);
  22048. @@ -210,14 +195,9 @@ static inline void mdss_mdp_cmd_clk_on(struct mdss_mdp_cmd_ctx *ctx)
  22049. struct mdss_data_type *mdata = mdss_mdp_get_mdata();
  22050. int rc;
  22051.  
  22052. - if (!ctx->panel_on) {
  22053. - pr_info("%s: Ignore clock on because the unblank does not finished\n", __func__);
  22054. + if (!ctx->panel_on)
  22055. return;
  22056. - }
  22057.  
  22058. -#if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  22059. - xlog(__func__, ctx->panel_ndx, ctx->koff_cnt, ctx->clk_enabled, ctx->rdptr_enabled, 0, 0);
  22060. -#endif
  22061. mutex_lock(&ctx->clk_mtx);
  22062. MDSS_XLOG(ctx->pp_num, ctx->koff_cnt, ctx->clk_enabled,
  22063. ctx->rdptr_enabled);
  22064. @@ -261,9 +241,6 @@ static inline void mdss_mdp_cmd_clk_off(struct mdss_mdp_cmd_ctx *ctx)
  22065. struct mdss_data_type *mdata = mdss_mdp_get_mdata();
  22066. int set_clk_off = 0;
  22067.  
  22068. -#if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  22069. - xlog(__func__,ctx->panel_ndx, ctx->koff_cnt, ctx->clk_enabled, ctx->rdptr_enabled, 0, 0);
  22070. -#endif
  22071. mutex_lock(&ctx->clk_mtx);
  22072. MDSS_XLOG(ctx->pp_num, ctx->koff_cnt, ctx->clk_enabled,
  22073. ctx->rdptr_enabled);
  22074. @@ -272,7 +249,7 @@ static inline void mdss_mdp_cmd_clk_off(struct mdss_mdp_cmd_ctx *ctx)
  22075. set_clk_off = 1;
  22076. spin_unlock_irqrestore(&ctx->clk_lock, flags);
  22077.  
  22078. - if ((ctx->clk_enabled && set_clk_off) || (get_lcd_attached() == 0)) {
  22079. + if (ctx->clk_enabled && set_clk_off) {
  22080. ctx->clk_enabled = 0;
  22081. mdss_mdp_hist_intr_setup(&mdata->hist_intr, MDSS_IRQ_SUSPEND);
  22082. mdss_mdp_ctl_intf_event
  22083. @@ -285,21 +262,6 @@ static inline void mdss_mdp_cmd_clk_off(struct mdss_mdp_cmd_ctx *ctx)
  22084. }
  22085. mutex_unlock(&ctx->clk_mtx);
  22086. }
  22087. -#if defined(DYNAMIC_FPS_USE_TE_CTRL)
  22088. -int dynamic_fps_use_te_ctrl_value;
  22089. -#endif
  22090. -#if defined(CONFIG_LCD_HMT)
  22091. -int skip_te_enable = 0;
  22092. -static unsigned int skip_te = 0;
  22093. -#endif
  22094. -
  22095. -#if defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_CMD_WQHD_PT_PANEL)
  22096. -int te;
  22097. -int te_cnt;
  22098. -int te_set_done;
  22099. -struct completion te_check_comp;
  22100. -int get_lcd_ldi_info(void);
  22101. -#endif
  22102.  
  22103. static void mdss_mdp_cmd_readptr_done(void *arg)
  22104. {
  22105. @@ -308,99 +270,15 @@ static void mdss_mdp_cmd_readptr_done(void *arg)
  22106. struct mdss_mdp_vsync_handler *tmp;
  22107. ktime_t vsync_time;
  22108.  
  22109. -#if defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_CMD_WQHD_PT_PANEL)
  22110. - static ktime_t vsync_time1;
  22111. - static ktime_t vsync_time2;
  22112. - static int i = 0;
  22113. - static int time1 = 0, time2 = 0;
  22114. -#endif
  22115. - static long long vsync[2];
  22116. - long long duration = 16000;
  22117. - static int index;
  22118. - static int add_value = 1;
  22119. -// pr_err("mdss_mdp_cmd_readptr_done\n");
  22120. -#if defined(DYNAMIC_FPS_USE_TE_CTRL)
  22121. - if(dynamic_fps_use_te_ctrl)
  22122. - {
  22123. - if(dynamic_fps_use_te_ctrl_value)
  22124. - {
  22125. - dynamic_fps_use_te_ctrl_value = 0;
  22126. - return;
  22127. - }
  22128. - dynamic_fps_use_te_ctrl_value = 1;
  22129. - }
  22130. -#endif
  22131. -
  22132. if (!ctx) {
  22133. pr_err("invalid ctx\n");
  22134. return;
  22135. }
  22136.  
  22137. -#if defined(CONFIG_LCD_HMT)
  22138. - if (skip_te_enable) {
  22139. - if (skip_te) {
  22140. - pr_debug("%s : Skip TE Signal \n",__func__);
  22141. - skip_te = 0;
  22142. - return;
  22143. - }
  22144. - skip_te = 1;
  22145. - }
  22146. -#endif
  22147. -
  22148. - ATRACE_BEGIN(__func__);
  22149. vsync_time = ktime_get();
  22150. - vsync[index] = ktime_to_us(vsync_time);
  22151. -
  22152. - index += add_value;
  22153. - add_value *= -1;
  22154. -
  22155. - if (vsync[0] && vsync[1])
  22156. - duration = vsync[index + add_value] - vsync[index];
  22157. ctl->vsync_cnt++;
  22158. - MDSS_XLOG(0xFFFF, ctl->num, ctx->koff_cnt, ctx->clk_enabled,
  22159. - ctx->rdptr_enabled, duration);
  22160. -
  22161. - if (duration <= 8000 || duration >= 22000)
  22162. - pr_err("[DEBUG]%s:time : %lld, duration : %lld\n",
  22163. - __func__, vsync[index + add_value], duration);
  22164. -
  22165. -#if defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_CMD_WQHD_PT_PANEL)
  22166. - if (get_lcd_ldi_info()) {
  22167. - if (te_set_done == TE_SET_START) {
  22168. -
  22169. - pr_debug("%s : TE_SET_START...",__func__);
  22170. -
  22171. - if (i % 2 == 0) {
  22172. - vsync_time1 = ktime_get();
  22173. - time1 = (int)ktime_to_us(vsync_time1);
  22174. - te = time1 && time2 ? time1 - time2 : 0;
  22175. - pr_debug("[%s] : ktime = %d\n",__func__, te);
  22176. - } else {
  22177. - vsync_time2 = ktime_get();
  22178. - time2 = (int)ktime_to_us(vsync_time2);
  22179. - te = time1 && time2 ? time2 - time1 : 0;
  22180. - pr_debug("[%s] : ktime = %d\n",__func__, te);
  22181. - }
  22182. - i++;
  22183. -
  22184. - pr_debug("[%s] TE = %d\n",__func__, te);
  22185. -
  22186. - spin_lock(&ctx->te_lock);
  22187. - te_cnt++;
  22188. - if (te_cnt >= 2) { // check TE using only two signal..
  22189. - pr_debug(">>>> te_check_comp COMPLETE (%d) <<<< \n", te_cnt);
  22190. - complete(&te_check_comp);
  22191. - }
  22192. - spin_unlock(&ctx->te_lock);
  22193. - } else {
  22194. - pr_debug("%s : not TE_SET_START...",__func__);
  22195. - }
  22196. - }
  22197. -#endif
  22198. -
  22199. -#if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  22200. - xlog(__func__,ctl->num, ctx->koff_cnt, ctx->clk_enabled, ctx->rdptr_enabled, 0, 0x88888);
  22201. -#endif
  22202. + MDSS_XLOG(ctl->num, ctx->koff_cnt, ctx->clk_enabled,
  22203. + ctx->rdptr_enabled);
  22204.  
  22205. spin_lock(&ctx->clk_lock);
  22206. list_for_each_entry(tmp, &ctx->vsync_handlers, list) {
  22207. @@ -411,17 +289,9 @@ static void mdss_mdp_cmd_readptr_done(void *arg)
  22208. if (!ctx->vsync_enabled) {
  22209. if (ctx->rdptr_enabled)
  22210. ctx->rdptr_enabled--;
  22211. -#if defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_CMD_WQHD_PT_PANEL)
  22212. - if (get_lcd_ldi_info())
  22213. - if (!(te_set_done == TE_SET_DONE || te_set_done == TE_SET_FAIL))
  22214. - {
  22215. - pr_info("now restoring TE/ rdptr_enabled++\n");
  22216. - if (ctx->rdptr_enabled == 0)
  22217. - ctx->rdptr_enabled++;
  22218. - }
  22219. -#endif
  22220. +
  22221. /* keep clk on during kickoff */
  22222. - if (ctx->rdptr_enabled == 0 && ctx->koff_cnt)
  22223. + if (ctx->rdptr_enabled == 0 && atomic_read(&ctx->koff_cnt))
  22224. ctx->rdptr_enabled++;
  22225. }
  22226.  
  22227. @@ -430,12 +300,8 @@ static void mdss_mdp_cmd_readptr_done(void *arg)
  22228. (MDSS_MDP_IRQ_PING_PONG_RD_PTR, ctx->pp_num);
  22229. complete(&ctx->stop_comp);
  22230. schedule_work(&ctx->clk_work);
  22231. - index = 0;
  22232. - add_value = 1;
  22233. - vsync[0] = vsync[1] = 0;
  22234. }
  22235.  
  22236. - ATRACE_END(__func__);
  22237. spin_unlock(&ctx->clk_lock);
  22238. }
  22239.  
  22240. @@ -452,42 +318,16 @@ static void mdss_mdp_cmd_underflow_recovery(void *data)
  22241. if (!ctx->ctl)
  22242. return;
  22243. spin_lock_irqsave(&ctx->clk_lock, flags);
  22244. - if (ctx->koff_cnt) {
  22245. + if (atomic_read(&ctx->koff_cnt)) {
  22246. mdss_mdp_ctl_reset(ctx->ctl);
  22247. pr_debug("%s: intf_num=%d\n", __func__,
  22248. ctx->ctl->intf_num);
  22249. - ctx->koff_cnt--;
  22250. + atomic_dec(&ctx->koff_cnt);
  22251. mdss_mdp_irq_disable_nosync(MDSS_MDP_IRQ_PING_PONG_COMP,
  22252. ctx->pp_num);
  22253. - complete_all(&ctx->pp_comp);
  22254. }
  22255. spin_unlock_irqrestore(&ctx->clk_lock, flags);
  22256. }
  22257. -#if 0
  22258. -static void mdss_mdp_cmd_pingpong_recovery(struct mdss_mdp_cmd_ctx *ctx)
  22259. -{
  22260. - unsigned long flags;
  22261. -
  22262. - if (!ctx) {
  22263. - pr_err("%s: invalid ctx\n", __func__);
  22264. - return;
  22265. - }
  22266. -
  22267. - if (!ctx->ctl)
  22268. - return;
  22269. - spin_lock_irqsave(&ctx->clk_lock, flags);
  22270. - if (ctx->koff_cnt) {
  22271. - mdss_mdp_ctl_reset(ctx->ctl);
  22272. - pr_debug("%s: intf_num=%d\n", __func__,
  22273. - ctx->ctl->intf_num);
  22274. - ctx->koff_cnt--;
  22275. - mdss_mdp_irq_disable_nosync(MDSS_MDP_IRQ_PING_PONG_COMP,
  22276. - ctx->pp_num);
  22277. - complete_all(&ctx->pp_comp);
  22278. - }
  22279. - spin_unlock_irqrestore(&ctx->clk_lock, flags);
  22280. -}
  22281. -#endif
  22282.  
  22283. static void mdss_mdp_cmd_pingpong_done(void *arg)
  22284. {
  22285. @@ -496,9 +336,6 @@ static void mdss_mdp_cmd_pingpong_done(void *arg)
  22286. struct mdss_mdp_vsync_handler *tmp;
  22287. ktime_t vsync_time;
  22288.  
  22289. -#if defined (CONFIG_FB_MSM_MDSS_DBG_SEQ_TICK)
  22290. - mdss_dbg_tick_save(PP_DONE);
  22291. -#endif
  22292. if (!ctx) {
  22293. pr_err("%s: invalid ctx\n", __func__);
  22294. return;
  22295. @@ -514,28 +351,27 @@ static void mdss_mdp_cmd_pingpong_done(void *arg)
  22296. }
  22297. mdss_mdp_irq_disable_nosync(MDSS_MDP_IRQ_PING_PONG_COMP, ctx->pp_num);
  22298.  
  22299. - complete_all(&ctx->pp_comp);
  22300. MDSS_XLOG(ctl->num, ctx->koff_cnt, ctx->clk_enabled,
  22301. ctx->rdptr_enabled);
  22302. -#if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  22303. - xlog(__func__, ctl->num, ctx->koff_cnt, ctx->clk_enabled, ctx->rdptr_enabled, ctl->roi_bkup.w, ctl->roi_bkup.h);
  22304. -#endif
  22305. -
  22306. - if (ctx->koff_cnt) {
  22307. - atomic_inc(&ctx->pp_done_cnt);
  22308. - schedule_work(&ctx->pp_done_work);
  22309. - ctx->koff_cnt--;
  22310. - if (ctx->koff_cnt) {
  22311. +
  22312. + if (atomic_add_unless(&ctx->koff_cnt, -1, 0)) {
  22313. + if (atomic_read(&ctx->koff_cnt))
  22314. pr_err("%s: too many kickoffs=%d!\n", __func__,
  22315. - ctx->koff_cnt);
  22316. - ctx->koff_cnt = 0;
  22317. + atomic_read(&ctx->koff_cnt));
  22318. + if (mdss_mdp_cmd_do_notifier(ctx)) {
  22319. + atomic_inc(&ctx->pp_done_cnt);
  22320. + schedule_work(&ctx->pp_done_work);
  22321. }
  22322. - } else
  22323. + wake_up_all(&ctx->pp_waitq);
  22324. + } else {
  22325. pr_err("%s: should not have pingpong interrupt!\n", __func__);
  22326. + }
  22327.  
  22328. - trace_mdp_cmd_pingpong_done(ctl, ctx->pp_num, ctx->koff_cnt);
  22329. + trace_mdp_cmd_pingpong_done(ctl, ctx->pp_num,
  22330. + atomic_read(&ctx->koff_cnt));
  22331. pr_debug("%s: ctl_num=%d intf_num=%d ctx=%d kcnt=%d\n", __func__,
  22332. - ctl->num, ctl->intf_num, ctx->pp_num, ctx->koff_cnt);
  22333. + ctl->num, ctl->intf_num, ctx->pp_num,
  22334. + atomic_read(&ctx->koff_cnt));
  22335.  
  22336. spin_unlock(&ctx->clk_lock);
  22337. }
  22338. @@ -549,9 +385,7 @@ static void pingpong_done_work(struct work_struct *work)
  22339. while (atomic_add_unless(&ctx->pp_done_cnt, -1, 0))
  22340. mdss_mdp_ctl_notify(ctx->ctl, MDP_NOTIFY_FRAME_DONE);
  22341.  
  22342. -#if !defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_CMD_FHD_FA2_PT_PANEL)
  22343. mdss_mdp_ctl_perf_release_bw(ctx->ctl);
  22344. -#endif
  22345. }
  22346. }
  22347.  
  22348. @@ -604,9 +438,6 @@ static int mdss_mdp_cmd_add_vsync_handler(struct mdss_mdp_ctl *ctl,
  22349. pr_err("%s: invalid ctx\n", __func__);
  22350. return -ENODEV;
  22351. }
  22352. -#if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  22353. - xlog(__func__, ctl->num, ctx->koff_cnt, ctx->clk_enabled, ctx->rdptr_enabled, 0, 0);
  22354. -#endif
  22355.  
  22356. MDSS_XLOG(ctl->num, ctx->koff_cnt, ctx->clk_enabled,
  22357. ctx->rdptr_enabled);
  22358. @@ -642,9 +473,6 @@ static int mdss_mdp_cmd_remove_vsync_handler(struct mdss_mdp_ctl *ctl,
  22359.  
  22360. MDSS_XLOG(ctl->num, ctx->koff_cnt, ctx->clk_enabled,
  22361. ctx->rdptr_enabled, 0x88888);
  22362. -#if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  22363. - xlog(__func__, ctl->num, ctx->koff_cnt, ctx->clk_enabled, ctx->rdptr_enabled, 0, 0x88888);
  22364. -#endif
  22365.  
  22366. spin_lock_irqsave(&ctx->clk_lock, flags);
  22367. if (handle->enabled) {
  22368. @@ -670,34 +498,10 @@ int mdss_mdp_cmd_reconfigure_splash_done(struct mdss_mdp_ctl *ctl, bool handoff)
  22369. pdata = ctl->panel_data;
  22370.  
  22371. pdata->panel_info.cont_splash_enabled = 0;
  22372. -#if !defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_CMD_WQXGA_S6TNMR7_PT_PANEL)
  22373. +
  22374. mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_PANEL_CLK_CTRL, (void *)0);
  22375. -#endif
  22376. - return ret;
  22377. -}
  22378.  
  22379. -void mdp5_dump_regs(void)
  22380. -{
  22381. - int i, z, start, len;
  22382. - int offsets[] = {0x0};
  22383. - int length[] = {19776};
  22384. -
  22385. - printk("%s: =============MDSS Reg DUMP==============\n", __func__);
  22386. - for (i = 0; i < sizeof(offsets) / sizeof(int); i++) {
  22387. - start = offsets[i];
  22388. - len = length[i];
  22389. - printk("-------- Address %05x: -------\n", start);
  22390. - for (z = 0; z < len; z++) {
  22391. - if ((z & 3) == 0)
  22392. - printk("%05x:", start + (z * 4));
  22393. - printk(" %08x", MDSS_MDP_REG_READ(start + (z * 4)));
  22394. - if ((z & 3) == 3)
  22395. - printk("\n");
  22396. - }
  22397. - if ((z & 3) != 0)
  22398. - printk("\n");
  22399. - }
  22400. - printk("%s: ============= END ==============\n", __func__);
  22401. + return ret;
  22402. }
  22403.  
  22404. static int mdss_mdp_cmd_wait4pingpong(struct mdss_mdp_ctl *ctl, void *arg)
  22405. @@ -705,9 +509,7 @@ static int mdss_mdp_cmd_wait4pingpong(struct mdss_mdp_ctl *ctl, void *arg)
  22406. struct mdss_mdp_cmd_ctx *ctx;
  22407. struct mdss_panel_data *pdata;
  22408. unsigned long flags;
  22409. - int need_wait = 0;
  22410. int rc = 0;
  22411. - static int recovery_cnt;
  22412.  
  22413. ctx = (struct mdss_mdp_cmd_ctx *) ctl->priv_data;
  22414. if (!ctx) {
  22415. @@ -717,14 +519,6 @@ static int mdss_mdp_cmd_wait4pingpong(struct mdss_mdp_ctl *ctl, void *arg)
  22416.  
  22417. pdata = ctl->panel_data;
  22418.  
  22419. - spin_lock_irqsave(&ctx->clk_lock, flags);
  22420. - if (ctx->koff_cnt > 0)
  22421. - need_wait = 1;
  22422. - spin_unlock_irqrestore(&ctx->clk_lock, flags);
  22423. -#if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  22424. - xlog(__func__, ctl->num, ctx->koff_cnt, ctx->clk_enabled, ctx->rdptr_enabled, ctl->roi_bkup.w, ctl->roi_bkup.h);
  22425. -#endif
  22426. -
  22427. ctl->roi_bkup.w = ctl->width;
  22428. ctl->roi_bkup.h = ctl->height;
  22429.  
  22430. @@ -732,50 +526,95 @@ static int mdss_mdp_cmd_wait4pingpong(struct mdss_mdp_ctl *ctl, void *arg)
  22431. ctx->rdptr_enabled, ctl->roi_bkup.w,
  22432. ctl->roi_bkup.h);
  22433.  
  22434. - pr_debug("%s: need_wait=%d intf_num=%d ctx=%p\n",
  22435. - __func__, need_wait, ctl->intf_num, ctx);
  22436. -
  22437. - if (need_wait) {
  22438. -#if defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_CMD_WQXGA_S6E3HA1_PT_PANEL)
  22439. - if (!board_rev)
  22440. - rc = wait_for_completion_timeout(
  22441. - &ctx->pp_comp, msecs_to_jiffies(20));
  22442. - else
  22443. -#endif
  22444. - rc = wait_for_completion_timeout(
  22445. - &ctx->pp_comp, msecs_to_jiffies(1000));
  22446. - trace_mdp_cmd_wait_pingpong(ctl->num, ctx->koff_cnt);
  22447. -
  22448. - if (rc <= 0) {
  22449. - WARN(1, "cmd kickoff timed out (rc = %d, recovery_cnt = %d) ctl=%d\n",
  22450. - rc, ++recovery_cnt, ctl->num);
  22451. + pr_debug("%s: intf_num=%d ctx=%p koff_cnt=%d\n", __func__,
  22452. + ctl->intf_num, ctx, atomic_read(&ctx->koff_cnt));
  22453. +
  22454. + rc = wait_event_timeout(ctx->pp_waitq,
  22455. + atomic_read(&ctx->koff_cnt) == 0,
  22456. + KOFF_TIMEOUT);
  22457. +
  22458. + if (rc <= 0) {
  22459. + u32 status, mask;
  22460. +
  22461. + mask = BIT(MDSS_MDP_IRQ_PING_PONG_COMP + ctx->pp_num);
  22462. + status = mask & readl_relaxed(ctl->mdata->mdp_base +
  22463. + MDSS_MDP_REG_INTR_STATUS);
  22464. + if (status) {
  22465. + WARN(1, "pp done but irq not triggered\n");
  22466. + mdss_mdp_irq_clear(ctl->mdata,
  22467. + MDSS_MDP_IRQ_PING_PONG_COMP,
  22468. + ctx->pp_num);
  22469. + local_irq_save(flags);
  22470. + mdss_mdp_cmd_pingpong_done(ctl);
  22471. + local_irq_restore(flags);
  22472. + rc = 1;
  22473. + }
  22474. +
  22475. + rc = atomic_read(&ctx->koff_cnt) == 0;
  22476. + }
  22477. +
  22478. + if (rc <= 0) {
  22479. + if (!ctx->pp_timeout_report_cnt) {
  22480. + WARN(1, "cmd kickoff timed out (%d) ctl=%d\n",
  22481. + rc, ctl->num);
  22482. mdss_dsi_debug_check_te(pdata);
  22483. -#if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  22484. - dumpreg();
  22485. - mdp5_dump_regs();
  22486. - mdss_mdp_debug_bus();
  22487. - xlog_dump();
  22488. -#if 0
  22489. - mdss_mdp_cmd_pingpong_recovery(ctx);
  22490. -#else
  22491. - panic("Pingpong Timeout");
  22492. -#endif
  22493. -#endif
  22494. - rc = -EPERM;
  22495. - mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_TIMEOUT);
  22496. - } else {
  22497. - rc = 0;
  22498. + MDSS_XLOG_TOUT_HANDLER("mdp", "dsi0", "dsi1",
  22499. + "edp", "hdmi", "panic");
  22500. }
  22501. + ctx->pp_timeout_report_cnt++;
  22502. + rc = -EPERM;
  22503. + mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_TIMEOUT);
  22504. + atomic_add_unless(&ctx->koff_cnt, -1, 0);
  22505. + } else {
  22506. + rc = 0;
  22507. + ctx->pp_timeout_report_cnt = 0;
  22508. }
  22509. -#if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  22510. - xlog(__func__,ctl->num, ctx->koff_cnt, ctx->clk_enabled, ctx->rdptr_enabled, 0, rc);
  22511. -#endif
  22512.  
  22513. - MDSS_XLOG(ctl->num, ctx->koff_cnt, ctx->clk_enabled,
  22514. - ctx->rdptr_enabled, rc);
  22515. + /* signal any pending ping pong done events */
  22516. + while (atomic_add_unless(&ctx->pp_done_cnt, -1, 0))
  22517. + mdss_mdp_ctl_notify(ctx->ctl, MDP_NOTIFY_FRAME_DONE);
  22518. +
  22519. + MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt), ctx->clk_enabled,
  22520. + ctx->rdptr_enabled, rc);
  22521. +
  22522. return rc;
  22523. }
  22524.  
  22525. +static int mdss_mdp_cmd_do_notifier(struct mdss_mdp_cmd_ctx *ctx)
  22526. +{
  22527. + struct mdss_mdp_cmd_ctx *sctx;
  22528. + sctx = ctx->sync_ctx;
  22529. +
  22530. + if (!sctx || atomic_read(&sctx->koff_cnt) == 0)
  22531. + return 1;
  22532. +
  22533. + return 0;
  22534. +}
  22535. +
  22536. +static void mdss_mdp_cmd_set_sync_ctx(
  22537. + struct mdss_mdp_ctl *ctl, struct mdss_mdp_ctl *sctl)
  22538. +{
  22539. + struct mdss_mdp_cmd_ctx *ctx, *sctx;
  22540. +
  22541. + ctx = (struct mdss_mdp_cmd_ctx *)ctl->priv_data;
  22542. + if (!sctl) {
  22543. + ctx->sync_ctx = NULL;
  22544. + return;
  22545. + }
  22546. +
  22547. + sctx = (struct mdss_mdp_cmd_ctx *)sctl->priv_data;
  22548. +
  22549. + if (!sctl->roi.w && !sctl->roi.h) {
  22550. + /* left only */
  22551. + ctx->sync_ctx = NULL;
  22552. + sctx->sync_ctx = NULL;
  22553. + } else {
  22554. + /* left + right */
  22555. + ctx->sync_ctx = sctx;
  22556. + sctx->sync_ctx = ctx;
  22557. + }
  22558. +}
  22559. +
  22560. static int mdss_mdp_cmd_set_partial_roi(struct mdss_mdp_ctl *ctl)
  22561. {
  22562. int rc = 0;
  22563. @@ -794,26 +633,18 @@ static int mdss_mdp_cmd_set_partial_roi(struct mdss_mdp_ctl *ctl)
  22564.  
  22565. int mdss_mdp_cmd_kickoff(struct mdss_mdp_ctl *ctl, void *arg)
  22566. {
  22567. - struct mdss_mdp_cmd_ctx *ctx;
  22568. - unsigned long flags;
  22569. + struct mdss_mdp_cmd_ctx *ctx, *sctx = NULL;
  22570. int rc;
  22571.  
  22572. - ATRACE_BEGIN(__func__);
  22573. ctx = (struct mdss_mdp_cmd_ctx *) ctl->priv_data;
  22574. if (!ctx) {
  22575. pr_err("invalid ctx\n");
  22576. return -ENODEV;
  22577. }
  22578.  
  22579. - if (get_lcd_attached() == 0) {
  22580. - pr_err("%s : lcd is not attached..\n",__func__);
  22581. - return -ENODEV;
  22582. - }
  22583. mdss_mdp_ctl_perf_set_transaction_status(ctl,
  22584. PERF_HW_MDP_STATE, PERF_STATUS_BUSY);
  22585.  
  22586. - pr_debug("%s:+\n", __func__);
  22587. -
  22588. if (ctx->panel_on == 0) {
  22589. rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_UNBLANK, NULL);
  22590. WARN(rc, "intf %d unblank error (%d)\n", ctl->intf_num, rc);
  22591. @@ -830,14 +661,12 @@ int mdss_mdp_cmd_kickoff(struct mdss_mdp_ctl *ctl, void *arg)
  22592.  
  22593. MDSS_XLOG(ctl->num, ctl->roi.x, ctl->roi.y, ctl->roi.w,
  22594. ctl->roi.h);
  22595. -#if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  22596. - xlog(__func__, ctl->num, ctl->roi.x, ctl->roi.y, ctl->roi.w, ctl->roi.h, 0x1234);
  22597. -#endif
  22598.  
  22599. - spin_lock_irqsave(&ctx->clk_lock, flags);
  22600. - ctx->koff_cnt++;
  22601. - spin_unlock_irqrestore(&ctx->clk_lock, flags);
  22602. - trace_mdp_cmd_kickoff(ctl->num, ctx->koff_cnt);
  22603. + atomic_inc(&ctx->koff_cnt);
  22604. + if (sctx)
  22605. + atomic_inc(&sctx->koff_cnt);
  22606. +
  22607. + trace_mdp_cmd_kickoff(ctl->num, atomic_read(&ctx->koff_cnt));
  22608.  
  22609. mdss_mdp_cmd_clk_on(ctx);
  22610.  
  22611. @@ -847,25 +676,16 @@ int mdss_mdp_cmd_kickoff(struct mdss_mdp_ctl *ctl, void *arg)
  22612. * tx dcs command if had any
  22613. */
  22614. mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_DSI_CMDLIST_KOFF, NULL);
  22615. - INIT_COMPLETION(ctx->pp_comp);
  22616. -#if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  22617. - xlog(__func__, ctl->num, ctx->koff_cnt, ctx->clk_enabled, ctx->rdptr_enabled, 0, 0);
  22618. -#endif
  22619. +
  22620. + mdss_mdp_cmd_set_sync_ctx(ctl, NULL);
  22621. +
  22622. mdss_mdp_irq_enable(MDSS_MDP_IRQ_PING_PONG_COMP, ctx->pp_num);
  22623. mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_START, 1);
  22624. mdss_mdp_ctl_perf_set_transaction_status(ctl,
  22625. - PERF_SW_COMMIT_STATE, PERF_STATUS_DONE);
  22626. + PERF_SW_COMMIT_STATE, PERF_STATUS_DONE);
  22627. mb();
  22628. MDSS_XLOG(ctl->num, ctx->koff_cnt, ctx->clk_enabled,
  22629. ctx->rdptr_enabled);
  22630. -#if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  22631. - {
  22632. - void mdss_mdp_mixer_read(void);
  22633. - mdss_mdp_mixer_read();
  22634. - }
  22635. -#endif
  22636. - ATRACE_END(__func__);
  22637. - pr_debug("%s : -- \n", __func__);
  22638.  
  22639. return 0;
  22640. }
  22641. @@ -873,21 +693,13 @@ int mdss_mdp_cmd_kickoff(struct mdss_mdp_ctl *ctl, void *arg)
  22642. int mdss_mdp_cmd_stop(struct mdss_mdp_ctl *ctl)
  22643. {
  22644. struct mdss_mdp_cmd_ctx *ctx;
  22645. - struct mdss_panel_info *pinfo = &ctl->panel_data->panel_info;
  22646. + struct mdss_panel_info *pinfo;
  22647. unsigned long flags;
  22648. struct mdss_mdp_vsync_handler *tmp, *handle;
  22649. int need_wait = 0;
  22650. int ret = 0;
  22651. - u8 timeout_status = 0;
  22652. int hz;
  22653.  
  22654. - pr_debug("%s:+\n", __func__);
  22655. -
  22656. - if (get_lcd_attached() == 0) {
  22657. - pr_err("%s : lcd is not attached..\n",__func__);
  22658. - return 0;
  22659. - }
  22660. -
  22661. ctx = (struct mdss_mdp_cmd_ctx *) ctl->priv_data;
  22662. if (!ctx) {
  22663. pr_err("invalid ctx\n");
  22664. @@ -899,9 +711,6 @@ int mdss_mdp_cmd_stop(struct mdss_mdp_ctl *ctl)
  22665. MDSS_XLOG(ctl->num, ctx->koff_cnt, ctx->clk_enabled,
  22666. ctx->rdptr_enabled, XLOG_FUNC_ENTRY);
  22667.  
  22668. -#if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  22669. - xlog(__func__, ctl->num, ctx->koff_cnt, ctx->clk_enabled, ctx->rdptr_enabled, 0, 0x11111);
  22670. -#endif
  22671. spin_lock_irqsave(&ctx->clk_lock, flags);
  22672. if (ctx->rdptr_enabled) {
  22673. INIT_COMPLETION(ctx->stop_comp);
  22674. @@ -911,43 +720,22 @@ int mdss_mdp_cmd_stop(struct mdss_mdp_ctl *ctl)
  22675.  
  22676. hz = mdss_panel_get_framerate(&ctl->panel_data->panel_info);
  22677.  
  22678. - if (need_wait) {
  22679. - if (pinfo->alpm_event && pinfo->alpm_event(CHECK_CURRENT_STATUS))
  22680. - timeout_status = wait_for_completion_timeout(&ctx->stop_comp,\
  22681. - STOP_TIMEOUT_FOR_ALPM);
  22682. - else
  22683. - timeout_status = wait_for_completion_timeout(&ctx->stop_comp,\
  22684. - STOP_TIMEOUT(hz)); /*msecs_to_jiffies(1000));*/ //STOP_TIMEOUT(16 * 4 frames) -> 1000
  22685. - if (timeout_status <= 0) {
  22686. + if (need_wait)
  22687. + if (wait_for_completion_timeout(&ctx->stop_comp,
  22688. + STOP_TIMEOUT(hz))
  22689. + <= 0) {
  22690. WARN(1, "stop cmd time out\n");
  22691. +
  22692. if (IS_ERR_OR_NULL(ctl->panel_data)) {
  22693. pr_err("no panel data\n");
  22694. } else {
  22695. pinfo = &ctl->panel_data->panel_info;
  22696. -
  22697. -#if defined(CONFIG_MACH_KLTE_CUDUOS) || defined(CONFIG_MACH_H3G_CHN_OPEN) || defined(CONFIG_MACH_H3G_CHN_CMCC) || defined(CONFIG_MACH_HLTE_CHN_CMCC) || defined(CONFIG_MACH_HLTE_CHN_TDOPEN)
  22698. mdss_mdp_irq_disable
  22699. (MDSS_MDP_IRQ_PING_PONG_RD_PTR,
  22700. ctx->pp_num);
  22701. ctx->rdptr_enabled = 0;
  22702. -#else
  22703. - if (pinfo->panel_dead) {
  22704. - mdss_mdp_irq_disable
  22705. - (MDSS_MDP_IRQ_PING_PONG_RD_PTR,
  22706. - ctx->pp_num);
  22707. - ctx->rdptr_enabled = 0;
  22708. - }
  22709. -#endif
  22710. }
  22711. }
  22712. - }
  22713. -#if defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_CMD_WQXGA_S6E3HA1_PT_PANEL)
  22714. - if (!board_rev) {
  22715. - mdss_mdp_irq_disable(MDSS_MDP_IRQ_PING_PONG_RD_PTR, ctx->pp_num);
  22716. - if (ctx->rdptr_enabled)
  22717. - ctx->rdptr_enabled = 0;
  22718. - }
  22719. -#endif
  22720.  
  22721. if (cancel_work_sync(&ctx->clk_work))
  22722. pr_debug("no pending clk work\n");
  22723. @@ -973,13 +761,11 @@ int mdss_mdp_cmd_stop(struct mdss_mdp_ctl *ctl)
  22724. memset(ctx, 0, sizeof(*ctx));
  22725. ctl->priv_data = NULL;
  22726.  
  22727. - if (ctl->num == 0) {
  22728. - ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_BLANK, NULL);
  22729. - WARN(ret, "intf %d unblank error (%d)\n", ctl->intf_num, ret);
  22730. + ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_BLANK, NULL);
  22731. + WARN(ret, "intf %d unblank error (%d)\n", ctl->intf_num, ret);
  22732.  
  22733. - ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_PANEL_OFF, NULL);
  22734. - WARN(ret, "intf %d unblank error (%d)\n", ctl->intf_num, ret);
  22735. - }
  22736. + ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_PANEL_OFF, NULL);
  22737. + WARN(ret, "intf %d unblank error (%d)\n", ctl->intf_num, ret);
  22738.  
  22739. ctl->stop_fnc = NULL;
  22740. ctl->display_fnc = NULL;
  22741. @@ -989,9 +775,6 @@ int mdss_mdp_cmd_stop(struct mdss_mdp_ctl *ctl)
  22742.  
  22743. MDSS_XLOG(ctl->num, ctx->koff_cnt, ctx->clk_enabled,
  22744. ctx->rdptr_enabled, XLOG_FUNC_EXIT);
  22745. -#if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  22746. - xlog(__func__, ctl->num, ctx->koff_cnt, ctx->clk_enabled, ctx->rdptr_enabled, 0, 0x222222);
  22747. -#endif
  22748. pr_debug("%s:-\n", __func__);
  22749.  
  22750. return 0;
  22751. @@ -1030,16 +813,11 @@ int mdss_mdp_cmd_start(struct mdss_mdp_ctl *ctl)
  22752. }
  22753.  
  22754. ctx->ctl = ctl;
  22755. -#if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  22756. - ctx->panel_ndx = ctl->panel_ndx;
  22757. -#endif
  22758. ctx->pp_num = mixer->num;
  22759. - init_completion(&ctx->pp_comp);
  22760. + ctx->pp_timeout_report_cnt = 0;
  22761. + init_waitqueue_head(&ctx->pp_waitq);
  22762. init_completion(&ctx->stop_comp);
  22763. spin_lock_init(&ctx->clk_lock);
  22764. -#if defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_CMD_WQHD_PT_PANEL)
  22765. - spin_lock_init(&ctx->te_lock);
  22766. -#endif
  22767. mutex_init(&ctx->clk_mtx);
  22768. INIT_WORK(&ctx->clk_work, clk_ctrl_work);
  22769. INIT_DELAYED_WORK(&ctx->ulps_work, __mdss_mdp_cmd_ulps_work);
  22770. @@ -1058,9 +836,6 @@ int mdss_mdp_cmd_start(struct mdss_mdp_ctl *ctl)
  22771. mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_PING_PONG_RD_PTR, ctx->pp_num,
  22772. mdss_mdp_cmd_readptr_done, ctl);
  22773.  
  22774. -#if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  22775. - xlog(__func__, ctl->num, ctx->koff_cnt, ctx->clk_enabled, ctx->rdptr_enabled, 0, 0);
  22776. -#endif
  22777. mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_PING_PONG_COMP, ctx->pp_num,
  22778. mdss_mdp_cmd_pingpong_done, ctl);
  22779.  
  22780. diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
  22781. index b0c609b..5b99105 100644
  22782. --- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
  22783. +++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
  22784. @@ -1,4 +1,4 @@
  22785. -/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
  22786. +/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  22787. *
  22788. * This program is free software; you can redistribute it and/or modify
  22789. * it under the terms of the GNU General Public License version 2 and
  22790. @@ -46,25 +46,6 @@
  22791. #define PP_CLK_CFG_OFF 0
  22792. #define PP_CLK_CFG_ON 1
  22793.  
  22794. -#ifdef __MDSS_DEBUG__
  22795. -
  22796. -#define ID_PRINTK(mdss_id, fmt, args...) if(mfd->index == mdss_id) printk(fmt, ##args);
  22797. -#else
  22798. -
  22799. -#define ID_PRINTK(mdss_id, fmt, args...)
  22800. -#endif
  22801. -
  22802. -enum mdss_id_state {
  22803. - ID_LCD = 0,
  22804. - ID_HDMI = 1
  22805. -};
  22806. -int get_lcd_attached(void);
  22807. -
  22808. -
  22809. -#ifdef CONFIG_FB_MSM_CAMERA_CSC
  22810. -u8 pre_csc_update = 0xFF;
  22811. -#endif
  22812. -
  22813. #define MEM_PROTECT_SD_CTRL 0xF
  22814.  
  22815. #define OVERLAY_MAX 10
  22816. @@ -73,23 +54,12 @@ struct sd_ctrl_req {
  22817. unsigned int enable;
  22818. } __attribute__ ((__packed__));
  22819.  
  22820. -#if defined (CONFIG_FB_MSM_MDSS_DBG_SEQ_TICK)
  22821. -static struct mdss_tick_debug mdss_dbg_tick;
  22822. -#endif
  22823. -struct list_head *pipes_used_dbg;
  22824. -
  22825. -DEFINE_MUTEX(free_list_purge_mutex);
  22826. -
  22827. static atomic_t ov_active_panels = ATOMIC_INIT(0);
  22828. static int mdss_mdp_overlay_free_fb_pipe(struct msm_fb_data_type *mfd);
  22829. static int mdss_mdp_overlay_fb_parse_dt(struct msm_fb_data_type *mfd);
  22830. static int mdss_mdp_overlay_off(struct msm_fb_data_type *mfd);
  22831. static void __overlay_kickoff_requeue(struct msm_fb_data_type *mfd);
  22832. static void __vsync_retire_signal(struct msm_fb_data_type *mfd, int val);
  22833. -#if defined(CONFIG_FB_MSM_MDSS_S6E8AA0A_HD_PANEL)
  22834. -extern int err_fg_working;
  22835. -extern int lcd_connected_status;
  22836. -#endif
  22837.  
  22838. static inline u32 left_lm_w_from_mfd(struct msm_fb_data_type *mfd)
  22839. {
  22840. @@ -409,8 +379,8 @@ static int __mdss_mdp_validate_pxl_extn(struct mdss_mdp_pipe *pipe)
  22841. ((pipe->src_fmt->chroma_sample == MDSS_MDP_CHROMA_420) ||
  22842. (pipe->src_fmt->chroma_sample == MDSS_MDP_CHROMA_H2V1))) {
  22843. src_w >>= 1;
  22844. - }
  22845. -
  22846. + }
  22847. +
  22848. if (plane == 1 && !pipe->vert_deci &&
  22849. ((pipe->src_fmt->chroma_sample == MDSS_MDP_CHROMA_420) ||
  22850. (pipe->src_fmt->chroma_sample == MDSS_MDP_CHROMA_H1V2)))
  22851. @@ -433,7 +403,8 @@ static int __mdss_mdp_validate_pxl_extn(struct mdss_mdp_pipe *pipe)
  22852. vert_req_pixels = pipe->scale.num_ext_pxls_top[plane] +
  22853. pipe->scale.num_ext_pxls_btm[plane];
  22854.  
  22855. - vert_fetch_pixels = (pipe->scale.top_ftch[plane] >> pipe->vert_deci) +
  22856. + vert_fetch_pixels =
  22857. + (pipe->scale.top_ftch[plane] >> pipe->vert_deci) +
  22858. pipe->scale.top_rpt[plane] +
  22859. (pipe->scale.btm_ftch[plane] >> pipe->vert_deci)+
  22860. pipe->scale.btm_rpt[plane];
  22861. @@ -542,16 +513,9 @@ int mdss_mdp_overlay_pipe_setup(struct msm_fb_data_type *mfd,
  22862.  
  22863. if (req->flags & MDP_ROT_90) {
  22864. pr_err("unsupported inline rotation\n");
  22865. - return -ENOTSUPP;
  22866. + return -EOPNOTSUPP;
  22867. }
  22868.  
  22869. -#if defined(CONFIG_MDSS_UD_FLIP)
  22870. - if (req->flags & MDP_FLIP_UD)
  22871. - req->flags &= ~MDP_FLIP_UD;
  22872. - else
  22873. - req->flags |= MDP_FLIP_UD;
  22874. -#endif
  22875. -
  22876. if ((req->dst_rect.w > MAX_DST_W) || (req->dst_rect.h > MAX_DST_H)) {
  22877. pr_err("exceeded max mixer supported resolution %dx%d\n",
  22878. req->dst_rect.w, req->dst_rect.h);
  22879. @@ -632,7 +596,7 @@ int mdss_mdp_overlay_pipe_setup(struct msm_fb_data_type *mfd,
  22880.  
  22881. if (pipe == NULL) {
  22882. pr_err("error allocating pipe\n");
  22883. - return -ENOMEM;
  22884. + return -ENODEV;
  22885. }
  22886.  
  22887. ret = mdss_mdp_pipe_map(pipe);
  22888. @@ -739,7 +703,6 @@ int mdss_mdp_overlay_pipe_setup(struct msm_fb_data_type *mfd,
  22889. pipe->pp_cfg.igc_cfg.c0_c1_data,
  22890. sizeof(uint32_t) * len);
  22891. if (ret) {
  22892. - pr_err("pp_cfg1 get from user was NULL \n");
  22893. ret = -ENOMEM;
  22894. goto exit_fail;
  22895. }
  22896. @@ -747,7 +710,6 @@ int mdss_mdp_overlay_pipe_setup(struct msm_fb_data_type *mfd,
  22897. pipe->pp_cfg.igc_cfg.c2_data,
  22898. sizeof(uint32_t) * len);
  22899. if (ret) {
  22900. - pr_err("pp_cfg2 get from user was NULL \n");
  22901. ret = -ENOMEM;
  22902. goto exit_fail;
  22903. }
  22904. @@ -775,7 +737,6 @@ int mdss_mdp_overlay_pipe_setup(struct msm_fb_data_type *mfd,
  22905. pipe->pp_cfg.hist_lut_cfg.data,
  22906. sizeof(uint32_t) * len);
  22907. if (ret) {
  22908. - pr_err("lut get from user was NULL \n");
  22909. ret = -ENOMEM;
  22910. goto exit_fail;
  22911. }
  22912. @@ -867,14 +828,6 @@ static int mdss_mdp_overlay_set(struct msm_fb_data_type *mfd,
  22913. }
  22914.  
  22915. if (req->flags & MDSS_MDP_ROT_ONLY) {
  22916. -#if defined(CONFIG_MDSS_UD_FLIP)
  22917. - if (req->flags & MDP_BWC_EN) {
  22918. - if (req->flags & MDP_FLIP_LR)
  22919. - req->flags &= ~MDP_FLIP_LR;
  22920. - else
  22921. - req->flags |= MDP_FLIP_LR;
  22922. - }
  22923. -#endif
  22924. ret = mdss_mdp_rotator_setup(mfd, req);
  22925. } else if (req->src.format == MDP_RGB_BORDERFILL) {
  22926. req->id = BORDERFILL_NDX;
  22927. @@ -900,7 +853,7 @@ int mdss_mdp_overlay_get_buf(struct msm_fb_data_type *mfd,
  22928. int num_planes,
  22929. u32 flags)
  22930. {
  22931. - int i, rc = 0;
  22932. + int i, rc;
  22933.  
  22934. if ((num_planes <= 0) || (num_planes > MAX_PLANES))
  22935. return -EINVAL;
  22936. @@ -960,14 +913,10 @@ static void __mdss_mdp_overlay_free_list_purge(struct msm_fb_data_type *mfd)
  22937. struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
  22938. int i;
  22939.  
  22940. - mutex_lock(&free_list_purge_mutex);
  22941. -
  22942. pr_debug("purging fb%d free list\n", mfd->index);
  22943. for (i = 0; i < mdp5_data->free_list_size; i++)
  22944. mdss_mdp_overlay_free_buf(&mdp5_data->free_list[i]);
  22945. mdp5_data->free_list_size = 0;
  22946. -
  22947. - mutex_unlock(&free_list_purge_mutex);
  22948. }
  22949.  
  22950. /**
  22951. @@ -992,24 +941,18 @@ static void __mdss_mdp_overlay_free_list_add(struct msm_fb_data_type *mfd,
  22952. memset(buf, 0, sizeof(*buf));
  22953. }
  22954.  
  22955. -/**
  22956. - * mdss_mdp_overlay_cleanup() - handles cleanup after frame commit
  22957. - * @mfd: Msm frame buffer data structure for the associated fb
  22958. - * @destroy_pipes: list of pipes that should be destroyed as part of cleanup
  22959. - *
  22960. - * Goes through destroy_pipes list and ensures they are ready to be destroyed
  22961. - * and cleaned up. Also cleanup of any pipe buffers after flip.
  22962. - */
  22963. -static void mdss_mdp_overlay_cleanup(struct msm_fb_data_type *mfd,
  22964. - struct list_head *destroy_pipes)
  22965. +static void mdss_mdp_overlay_cleanup(struct msm_fb_data_type *mfd)
  22966. {
  22967. struct mdss_mdp_pipe *pipe, *tmp;
  22968. struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
  22969. struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
  22970. bool recovery_mode = false;
  22971. + LIST_HEAD(destroy_pipes);
  22972.  
  22973. mutex_lock(&mdp5_data->list_lock);
  22974. - list_for_each_entry(pipe, destroy_pipes, list) {
  22975. + list_for_each_entry_safe(pipe, tmp, &mdp5_data->pipes_cleanup, list) {
  22976. + list_move(&pipe->list, &destroy_pipes);
  22977. +
  22978. /* make sure pipe fetch has been halted before freeing buffer */
  22979. if (mdss_mdp_pipe_fetch_halt(pipe)) {
  22980. /*
  22981. @@ -1044,7 +987,7 @@ static void mdss_mdp_overlay_cleanup(struct msm_fb_data_type *mfd,
  22982. }
  22983. }
  22984.  
  22985. - list_for_each_entry_safe(pipe, tmp, destroy_pipes, list) {
  22986. + list_for_each_entry_safe(pipe, tmp, &destroy_pipes, list) {
  22987. /*
  22988. * in case of secure UI, the buffer needs to be released as
  22989. * soon as session is closed.
  22990. @@ -1054,7 +997,6 @@ static void mdss_mdp_overlay_cleanup(struct msm_fb_data_type *mfd,
  22991. else
  22992. __mdss_mdp_overlay_free_list_add(mfd, &pipe->front_buf);
  22993. mdss_mdp_overlay_free_buf(&pipe->back_buf);
  22994. - list_del_init(&pipe->list);
  22995. mdss_mdp_pipe_destroy(pipe);
  22996. }
  22997. mutex_unlock(&mdp5_data->list_lock);
  22998. @@ -1112,11 +1054,6 @@ int mdss_mdp_overlay_start(struct msm_fb_data_type *mfd)
  22999. struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
  23000. struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
  23001.  
  23002. - if (!ctl) {
  23003. - pr_err("%s unable to access ctrl\n", __func__);
  23004. - return -ENODEV;
  23005. - }
  23006. -
  23007. if (ctl->power_on) {
  23008. if (mdp5_data->mdata->ulps) {
  23009. rc = mdss_mdp_footswitch_ctrl_ulps(1, &mfd->pdev->dev);
  23010. @@ -1298,29 +1235,15 @@ static void __overlay_kickoff_requeue(struct msm_fb_data_type *mfd)
  23011. mdss_mdp_display_wait4comp(ctl);
  23012. }
  23013.  
  23014. -#if defined(CONFIG_MDNIE_LITE_TUNING)
  23015. -#if defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_CMD_FULL_HD_PT_PANEL)
  23016. -static bool mdss_first_init = true;
  23017. -#endif
  23018. -#endif
  23019. -
  23020. -#if defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_CMD_WQHD_PT_PANEL)
  23021. -int get_lcd_ldi_info(void);
  23022. -#endif
  23023. -
  23024. int mdss_mdp_overlay_kickoff(struct msm_fb_data_type *mfd,
  23025. struct mdp_display_commit *data)
  23026. {
  23027. struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
  23028. - struct mdss_mdp_pipe *pipe, *tmp;
  23029. + struct mdss_mdp_pipe *pipe;
  23030. struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
  23031. int ret = 0;
  23032. int sd_in_pipe = 0;
  23033. bool need_cleanup = false;
  23034. - LIST_HEAD(destroy_pipes);
  23035. -#if defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_CMD_WQHD_PT_PANEL)
  23036. - int te_ret = 0;
  23037. -#endif
  23038.  
  23039. ATRACE_BEGIN(__func__);
  23040. if (ctl->shared_lock) {
  23041. @@ -1330,17 +1253,6 @@ int mdss_mdp_overlay_kickoff(struct msm_fb_data_type *mfd,
  23042. }
  23043.  
  23044. mutex_lock(&mdp5_data->ov_lock);
  23045. - if (mfd->panel_info->type == DTV_PANEL) {
  23046. - ret = mdss_mdp_overlay_start(mfd);
  23047. - if (ret) {
  23048. - pr_err("unable to start overlay %d (%d)\n",
  23049. - mfd->index, ret);
  23050. - mutex_unlock(&mdp5_data->ov_lock);
  23051. - if (ctl->shared_lock)
  23052. - mutex_unlock(ctl->shared_lock);
  23053. - return ret;
  23054. - }
  23055. - }
  23056. mutex_lock(&mdp5_data->list_lock);
  23057.  
  23058. /*
  23059. @@ -1374,10 +1286,9 @@ int mdss_mdp_overlay_kickoff(struct msm_fb_data_type *mfd,
  23060. * Setup pipe in solid fill before unstaging,
  23061. * to ensure no fetches are happening after dettach or reattach.
  23062. */
  23063. - list_for_each_entry_safe(pipe, tmp, &mdp5_data->pipes_cleanup, list) {
  23064. + list_for_each_entry(pipe, &mdp5_data->pipes_cleanup, list) {
  23065. mdss_mdp_pipe_queue_data(pipe, NULL);
  23066. mdss_mdp_mixer_pipe_unstage(pipe);
  23067. - list_move(&pipe->list, &destroy_pipes);
  23068. need_cleanup = true;
  23069. }
  23070.  
  23071. @@ -1391,7 +1302,9 @@ int mdss_mdp_overlay_kickoff(struct msm_fb_data_type *mfd,
  23072. ret = mdss_mdp_wb_kickoff(mfd);
  23073. ATRACE_END("wb_kickoff");
  23074. } else {
  23075. + ATRACE_BEGIN("display_commit");
  23076. ret = mdss_mdp_display_commit(mdp5_data->ctl, NULL);
  23077. + ATRACE_END("display_commit");
  23078. }
  23079.  
  23080. if (!need_cleanup) {
  23081. @@ -1419,40 +1332,9 @@ int mdss_mdp_overlay_kickoff(struct msm_fb_data_type *mfd,
  23082. }
  23083.  
  23084. mdss_fb_update_notify_update(mfd);
  23085. -#if defined(CONFIG_MDNIE_LITE_TUNING)
  23086. -#if defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_CMD_FULL_HD_PT_PANEL) \
  23087. - || defined(CONFIG_FB_MSM_MIPI_SAMSUNG_YOUM_CMD_FULL_HD_PT_PANEL)
  23088. - if(mdss_first_init)
  23089. - {
  23090. - mdss_mdp_ctl_intf_event(mdp5_data->ctl, MDSS_EVENT_MDNIE_DEFAULT_UPDATE, NULL);
  23091. - mdss_first_init = false;
  23092. - }
  23093. -#endif
  23094. -#endif
  23095. -
  23096. -#if defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_CMD_WQHD_PT_PANEL)
  23097. - if (get_lcd_ldi_info()) {
  23098. - te_ret = mdss_mdp_ctl_intf_event(mdp5_data->ctl, MDSS_EVENT_TE_UPDATE, NULL);
  23099. - if (te_ret < 0) {
  23100. - mdss_mdp_ctl_intf_event(mdp5_data->ctl, MDSS_EVENT_TE_RESTORE, NULL);
  23101. - }
  23102. - }
  23103. -#endif
  23104. -
  23105. -#if defined(CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_CMD_FULL_HD_PT_PANEL) || defined (CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_CMD_WQHD_PT_PANEL)\
  23106. - || defined(CONFIG_FB_MSM_MIPI_SAMSUNG_YOUM_CMD_FULL_HD_PT_PANEL) || defined(CONFIG_FB_MSM_MIPI_JDI_TFT_VIDEO_FULL_HD_PT_PANEL)\
  23107. - || defined (CONFIG_FB_MSM_MIPI_MAGNA_OCTA_CMD_HD_PT_PANEL) \
  23108. - || defined (CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_CMD_WQXGA_S6TNMR7_PT_PANEL) \
  23109. - || defined (CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_CMD_WQXGA_S6E3HA1_PT_PANEL)
  23110. - mdss_mdp_ctl_intf_event(mdp5_data->ctl, MDSS_EVENT_FRAME_UPDATE, NULL);
  23111. -#endif
  23112. -#if defined(CONFIG_FB_MSM_MDSS_SDC_WXGA_PANEL) && !defined(CONFIG_MACH_DEGASLTE_SPR)
  23113. - mdss_mdp_ctl_intf_event(mdp5_data->ctl, MDSS_EVENT_BACKLIGHT_LATE_ON, NULL);
  23114. -#endif
  23115. -
  23116. commit_fail:
  23117. ATRACE_BEGIN("overlay_cleanup");
  23118. - mdss_mdp_overlay_cleanup(mfd, &destroy_pipes);
  23119. + mdss_mdp_overlay_cleanup(mfd);
  23120. ATRACE_END("overlay_cleanup");
  23121. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
  23122. mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_FLUSHED);
  23123. @@ -1537,21 +1419,10 @@ static int mdss_mdp_overlay_unset(struct msm_fb_data_type *mfd, int ndx)
  23124. goto done;
  23125. }
  23126.  
  23127. -#if defined(CONFIG_FB_MSM_MDSS_S6E8AA0A_HD_PANEL)
  23128. - if(lcd_connected_status == 1){
  23129. - if (!mfd->panel_power_on && !err_fg_working) {
  23130. - ret = -EPERM;
  23131. - goto done;
  23132. - }
  23133. - }
  23134. -
  23135. -#else
  23136. if (!mfd->panel_power_on) {
  23137. ret = -EPERM;
  23138. goto done;
  23139. }
  23140. -#endif
  23141. -
  23142.  
  23143. pr_debug("unset ndx=%x\n", ndx);
  23144.  
  23145. @@ -1686,8 +1557,6 @@ static void mdss_mdp_overlay_force_cleanup(struct msm_fb_data_type *mfd)
  23146. {
  23147. struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
  23148. struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
  23149. - struct mdss_mdp_pipe *pipe, *tmp;
  23150. - LIST_HEAD(destroy_pipes);
  23151. int ret;
  23152.  
  23153. pr_debug("forcing cleanup to unset dma pipes on fb%d\n", mfd->index);
  23154. @@ -1702,12 +1571,7 @@ static void mdss_mdp_overlay_force_cleanup(struct msm_fb_data_type *mfd)
  23155. mdss_mdp_display_wait4comp(ctl);
  23156. }
  23157.  
  23158. - mutex_lock(&mdp5_data->list_lock);
  23159. - list_for_each_entry_safe(pipe, tmp, &mdp5_data->pipes_cleanup, list) {
  23160. - list_move(&pipe->list, &destroy_pipes);
  23161. - }
  23162. - mutex_unlock(&mdp5_data->list_lock);
  23163. - mdss_mdp_overlay_cleanup(mfd, &destroy_pipes);
  23164. + mdss_mdp_overlay_cleanup(mfd);
  23165. }
  23166.  
  23167. static void mdss_mdp_overlay_force_dma_cleanup(struct mdss_data_type *mdata)
  23168. @@ -1891,6 +1755,7 @@ static void mdss_mdp_overlay_pan_display(struct msm_fb_data_type *mfd)
  23169.  
  23170. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
  23171.  
  23172. +
  23173. bpp = fbi->var.bits_per_pixel / 8;
  23174. offset = fbi->var.xoffset * bpp +
  23175. fbi->var.yoffset * fbi->fix.line_length;
  23176. @@ -2027,22 +1892,6 @@ static void mdss_mdp_overlay_handle_vsync(struct mdss_mdp_ctl *ctl,
  23177. }
  23178.  
  23179. pr_debug("vsync on fb%d play_cnt=%d\n", mfd->index, ctl->play_cnt);
  23180. -#if defined(CONFIG_SEC_KS01_PROJECT) ||defined(CONFIG_SEC_ATLANTIC_PROJECT)
  23181. -#ifdef CONFIG_FB_MSM_CAMERA_CSC
  23182. - if (csc_update != prev_csc_update) {
  23183. - struct mdss_mdp_pipe *pipe, *next;
  23184. -
  23185. - list_for_each_entry_safe(pipe, next, &mdp5_data->pipes_used,
  23186. - list) {
  23187. - if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG) {
  23188. - mdss_mdp_csc_setup(MDSS_MDP_BLOCK_SSPP, pipe->num, 1,
  23189. - MDSS_MDP_CSC_YUV2RGB);
  23190. - }
  23191. - }
  23192. - prev_csc_update = csc_update;
  23193. - }
  23194. -#endif
  23195. -#endif
  23196.  
  23197. mdp5_data->vsync_time = t;
  23198. sysfs_notify_dirent(mdp5_data->vsync_event_sd);
  23199. @@ -2837,21 +2686,16 @@ static int mdss_mdp_overlay_ioctl_handler(struct msm_fb_data_type *mfd,
  23200.  
  23201. switch (cmd) {
  23202. case MSMFB_MDP_PP:
  23203. - ID_PRINTK(ID_HDMI, "%s() MSMFB_MDP_PP\n", __func__);
  23204. ret = mdss_mdp_pp_ioctl(mfd, argp);
  23205. break;
  23206.  
  23207. case MSMFB_HISTOGRAM_START:
  23208. - ID_PRINTK(ID_HDMI, "%s() MSMFB_HISTOGRAM_START\n", __func__);
  23209. case MSMFB_HISTOGRAM_STOP:
  23210. - ID_PRINTK(ID_HDMI, "%s() MSMFB_HISTOGRAM_STOP\n", __func__);
  23211. case MSMFB_HISTOGRAM:
  23212. - ID_PRINTK(ID_HDMI, "%s() MSMFB_HISTOGRAM\n", __func__);
  23213. ret = mdss_mdp_histo_ioctl(mfd, cmd, argp);
  23214. break;
  23215.  
  23216. case MSMFB_OVERLAY_GET:
  23217. - ID_PRINTK(ID_HDMI, "%s() MSMFB_OVERLAY_GET\n", __func__);
  23218. req = kmalloc(sizeof(struct mdp_overlay), GFP_KERNEL);
  23219. if (!req)
  23220. return -ENOMEM;
  23221. @@ -2868,13 +2712,9 @@ static int mdss_mdp_overlay_ioctl_handler(struct msm_fb_data_type *mfd,
  23222. break;
  23223.  
  23224. case MSMFB_OVERLAY_SET:
  23225. - ID_PRINTK(ID_HDMI, "%s() MSMFB_OVERLAY_SET\n", __func__);
  23226. req = kmalloc(sizeof(struct mdp_overlay), GFP_KERNEL);
  23227. if (!req)
  23228. - {
  23229. - pr_err("MSMFB_OVERLAY_SET kmalloc result was NULL \n");
  23230. return -ENOMEM;
  23231. - }
  23232. ret = copy_from_user(req, argp, sizeof(*req));
  23233. if (!ret) {
  23234. ret = mdss_mdp_overlay_set(mfd, req);
  23235. @@ -2888,13 +2728,11 @@ static int mdss_mdp_overlay_ioctl_handler(struct msm_fb_data_type *mfd,
  23236.  
  23237.  
  23238. case MSMFB_OVERLAY_UNSET:
  23239. - ID_PRINTK(ID_HDMI, "%s() MSMFB_OVERLAY_UNSET\n", __func__);
  23240. if (!IS_ERR_VALUE(copy_from_user(&val, argp, sizeof(val))))
  23241. ret = mdss_mdp_overlay_unset(mfd, val);
  23242. break;
  23243.  
  23244. case MSMFB_OVERLAY_PLAY_ENABLE:
  23245. - ID_PRINTK(ID_HDMI, "%s() MSMFB_OVERLAY_PLAY_ENABLE\n", __func__);
  23246. if (!copy_from_user(&val, argp, sizeof(val))) {
  23247. mdp5_data->overlay_play_enable = val;
  23248. ret = 0;
  23249. @@ -2905,7 +2743,6 @@ static int mdss_mdp_overlay_ioctl_handler(struct msm_fb_data_type *mfd,
  23250. break;
  23251.  
  23252. case MSMFB_OVERLAY_PLAY:
  23253. - ID_PRINTK(ID_HDMI, "%s() MSMFB_OVERLAY_PLAY\n", __func__);
  23254. if (mdp5_data->overlay_play_enable) {
  23255. struct msmfb_overlay_data data;
  23256.  
  23257. @@ -2921,7 +2758,6 @@ static int mdss_mdp_overlay_ioctl_handler(struct msm_fb_data_type *mfd,
  23258. break;
  23259.  
  23260. case MSMFB_OVERLAY_PLAY_WAIT:
  23261. - ID_PRINTK(ID_HDMI, "%s() MSMFB_OVERLAY_PLAY_WAIT\n", __func__);
  23262. if (mdp5_data->overlay_play_enable) {
  23263. struct msmfb_overlay_data data;
  23264.  
  23265. @@ -2937,32 +2773,25 @@ static int mdss_mdp_overlay_ioctl_handler(struct msm_fb_data_type *mfd,
  23266. break;
  23267.  
  23268. case MSMFB_VSYNC_CTRL:
  23269. - ID_PRINTK(ID_HDMI, "%s() MSMFB_VSYNC_CTRL\n", __func__);
  23270. case MSMFB_OVERLAY_VSYNC_CTRL:
  23271. - ID_PRINTK(ID_HDMI, "%s() MSMFB_OVERLAY_VSYNC_CTRL\n", __func__);
  23272. if (!copy_from_user(&val, argp, sizeof(val))) {
  23273. - mutex_lock(&mfd->ctx_lock);
  23274. ret = mdss_mdp_overlay_vsync_ctrl(mfd, val);
  23275. - mutex_unlock(&mfd->ctx_lock);
  23276. } else {
  23277. pr_err("MSMFB_OVERLAY_VSYNC_CTRL failed (%d)\n", ret);
  23278. ret = -EFAULT;
  23279. }
  23280. break;
  23281. case MSMFB_OVERLAY_COMMIT:
  23282. - ID_PRINTK(ID_HDMI, "%s() MSMFB_OVERLAY_COMMIT\n", __func__);
  23283. mdss_fb_wait_for_fence(&(mfd->mdp_sync_pt_data));
  23284. ret = mfd->mdp.kickoff_fnc(mfd, NULL);
  23285. break;
  23286. case MSMFB_METADATA_SET:
  23287. - ID_PRINTK(ID_HDMI, "%s() MSMFB_METADATA_SET\n", __func__);
  23288. ret = copy_from_user(&metadata, argp, sizeof(metadata));
  23289. if (ret)
  23290. return ret;
  23291. ret = mdss_fb_set_metadata(mfd, &metadata);
  23292. break;
  23293. case MSMFB_METADATA_GET:
  23294. - ID_PRINTK(ID_HDMI, "%s() MSMFB_METADATA_GET\n", __func__);
  23295. ret = copy_from_user(&metadata, argp, sizeof(metadata));
  23296. if (ret)
  23297. return ret;
  23298. @@ -2974,7 +2803,6 @@ static int mdss_mdp_overlay_ioctl_handler(struct msm_fb_data_type *mfd,
  23299. ret = __handle_ioctl_overlay_prepare(mfd, argp);
  23300. break;
  23301. default:
  23302. - ID_PRINTK(ID_HDMI, "%s() default\n", __func__);
  23303. if (mfd->panel.type == WRITEBACK_PANEL)
  23304. ret = mdss_mdp_wb_ioctl_handler(mfd, cmd, argp);
  23305. break;
  23306. @@ -3050,7 +2878,6 @@ static int mdss_mdp_overlay_on(struct msm_fb_data_type *mfd)
  23307. int rc;
  23308. struct mdss_overlay_private *mdp5_data;
  23309. struct mdss_mdp_ctl *ctl = NULL;
  23310. - struct mdss_panel_info *pinfo = mfd->panel_info;
  23311.  
  23312. if (!mfd)
  23313. return -ENODEV;
  23314. @@ -3058,7 +2885,6 @@ static int mdss_mdp_overlay_on(struct msm_fb_data_type *mfd)
  23315. if (mfd->key != MFD_KEY)
  23316. return -EINVAL;
  23317.  
  23318. - pr_info("%s: ++ \n",__func__);
  23319. mdp5_data = mfd_to_mdp5_data(mfd);
  23320. if (!mdp5_data)
  23321. return -EINVAL;
  23322. @@ -3071,8 +2897,7 @@ static int mdss_mdp_overlay_on(struct msm_fb_data_type *mfd)
  23323. }
  23324.  
  23325. if (!mfd->panel_info->cont_splash_enabled &&
  23326. - (mfd->panel_info->type != DTV_PANEL) &&
  23327. - !(pinfo->alpm_event && pinfo->alpm_event(CHECK_PREVIOUS_STATUS))) {
  23328. + (mfd->panel_info->type != DTV_PANEL)) {
  23329. rc = mdss_mdp_overlay_start(mfd);
  23330. if (!IS_ERR_VALUE(rc) &&
  23331. (mfd->panel_info->type != WRITEBACK_PANEL)) {
  23332. @@ -3089,9 +2914,6 @@ static int mdss_mdp_overlay_on(struct msm_fb_data_type *mfd)
  23333. pr_err("Failed to turn on fb%d\n", mfd->index);
  23334. mdss_mdp_overlay_off(mfd);
  23335. }
  23336. -
  23337. - pr_info("%s: -- \n",__func__);
  23338. -
  23339. return rc;
  23340. }
  23341.  
  23342. @@ -3101,7 +2923,6 @@ static int mdss_mdp_overlay_off(struct msm_fb_data_type *mfd)
  23343. struct mdss_overlay_private *mdp5_data;
  23344. struct mdss_mdp_mixer *mixer;
  23345. int need_cleanup;
  23346. - struct mdss_panel_info *pinfo;
  23347.  
  23348. if (!mfd)
  23349. return -ENODEV;
  23350. @@ -3109,8 +2930,6 @@ static int mdss_mdp_overlay_off(struct msm_fb_data_type *mfd)
  23351. if (mfd->key != MFD_KEY)
  23352. return -EINVAL;
  23353.  
  23354. - pinfo = mfd->panel_info;
  23355. -
  23356. mdp5_data = mfd_to_mdp5_data(mfd);
  23357.  
  23358. if (!mdp5_data || !mdp5_data->ctl) {
  23359. @@ -3136,13 +2955,8 @@ static int mdss_mdp_overlay_off(struct msm_fb_data_type *mfd)
  23360. mutex_unlock(&mdp5_data->list_lock);
  23361.  
  23362. if (need_cleanup) {
  23363. - if (pinfo->alpm_event && pinfo->alpm_event(CHECK_CURRENT_STATUS)) {
  23364. - pr_debug("[ALPM_DEBUG] %s, Skip cleanup pipes on fb%d\n",\
  23365. - __func__, mfd->index);
  23366. - } else {
  23367. - pr_debug("cleaning up pipes on fb%d\n", mfd->index);
  23368. - mdss_mdp_overlay_kickoff(mfd, NULL);
  23369. - }
  23370. + pr_debug("cleaning up pipes on fb%d\n", mfd->index);
  23371. + mdss_mdp_overlay_kickoff(mfd, NULL);
  23372. }
  23373.  
  23374. /*
  23375. @@ -3184,15 +2998,9 @@ static int mdss_mdp_overlay_off(struct msm_fb_data_type *mfd)
  23376.  
  23377. int mdss_panel_register_done(struct mdss_panel_data *pdata)
  23378. {
  23379. - static int first_register=true;
  23380. - /*
  23381. - * Clocks are already on if continuous splash is enabled,
  23382. - * increasing ref_cnt to help balance clocks once done.
  23383. - */
  23384. - if (pdata->panel_info.cont_splash_enabled && first_register) {
  23385. + if (pdata->panel_info.cont_splash_enabled)
  23386. mdss_mdp_footswitch_ctrl_splash(1);
  23387. - first_register=false;
  23388. - }
  23389. +
  23390. return 0;
  23391. }
  23392.  
  23393. @@ -3273,11 +3081,6 @@ static int mdss_mdp_overlay_handoff(struct msm_fb_data_type *mfd)
  23394. mdp5_data->ctl = ctl;
  23395. }
  23396.  
  23397. - if (IS_ERR_OR_NULL(ctl)) {
  23398. - rc = PTR_ERR(ctl);
  23399. - goto error;
  23400. - }
  23401. -
  23402. /*
  23403. * vsync interrupt needs on during continuous splash, this is
  23404. * to initialize necessary ctl members here.
  23405. @@ -3568,6 +3371,8 @@ int mdss_mdp_overlay_init(struct msm_fb_data_type *mfd)
  23406. }
  23407. }
  23408.  
  23409. + if (mdss_mdp_pp_overlay_init(mfd))
  23410. + pr_warn("Failed to initialize pp overlay data.\n");
  23411. return rc;
  23412. init_fail:
  23413. kfree(mdp5_data);
  23414. @@ -3589,60 +3394,3 @@ static int mdss_mdp_overlay_fb_parse_dt(struct msm_fb_data_type *mfd)
  23415.  
  23416. return rc;
  23417. }
  23418. -
  23419. -#if defined (CONFIG_FB_MSM_MDSS_DBG_SEQ_TICK)
  23420. -void mdss_dbg_tick_save(int op_name)
  23421. -{
  23422. - ktime_t tick;
  23423. - tick = ktime_get();
  23424. -
  23425. - switch(op_name)
  23426. - {
  23427. - case COMMIT :
  23428. - mdss_dbg_tick.commit[mdss_dbg_tick.commit_cnt] = ktime_to_ns(tick);
  23429. - mdss_dbg_tick.commit_cnt++;
  23430. - if(mdss_dbg_tick.commit_cnt > 9)
  23431. - mdss_dbg_tick.commit_cnt = 0;
  23432. - break;
  23433. - case KICKOFF :
  23434. - mdss_dbg_tick.kickoff[mdss_dbg_tick.kickoff_cnt] = ktime_to_ns(tick);
  23435. - mdss_dbg_tick.kickoff_cnt++;
  23436. - if(mdss_dbg_tick.kickoff_cnt > 9)
  23437. - mdss_dbg_tick.kickoff_cnt = 0;
  23438. - break;
  23439. - case PP_DONE :
  23440. - mdss_dbg_tick.pingpong_done[mdss_dbg_tick.pingpong_done_cnt] = ktime_to_ns(tick);
  23441. - mdss_dbg_tick.pingpong_done_cnt++;
  23442. - if(mdss_dbg_tick.pingpong_done_cnt > 9)
  23443. - mdss_dbg_tick.pingpong_done_cnt = 0;
  23444. - break;
  23445. - }
  23446. -}
  23447. -
  23448. -#endif
  23449. -/*
  23450. - * [srcx,srcy,srcw,srch]->[dstx,dsty,dstw,dsth][flags]|src_format|bpp|pipe_ndx|
  23451. - * mdp_clk = %ld, bus_ab = %llu, bus_ib = %llu
  23452. - */
  23453. -void mdss_mdp_underrun_dump_info(struct msm_fb_data_type *mfd)
  23454. -{
  23455. - struct mdss_mdp_pipe *pipe;
  23456. - struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
  23457. -
  23458. - pr_info(" ============ dump_start ===========\n");
  23459. -
  23460. - list_for_each_entry(pipe, &mdp5_data->pipes_used, list) {
  23461. - if (pipe)
  23462. - pr_info(" [%4d, %4d, %4d, %4d] -> [%4d, %4d, %4d, %4d]"
  23463. - "|flags = %8d|src_format = %2d|bpp = %2d|ndx = %3d|\n",
  23464. - pipe->src.x, pipe->src.y, pipe->src.w, pipe->src.h,
  23465. - pipe->dst.x, pipe->dst.y, pipe->dst.w, pipe->dst.h,
  23466. - pipe->flags, pipe->src_fmt->format, pipe->src_fmt->bpp,
  23467. - pipe->ndx);
  23468. - pr_info("pipe addr : %p\n", pipe);
  23469. - }
  23470. -
  23471. - mdss_mdp_underrun_clk_info();
  23472. - pr_info(" ============ dump_end =========== \n");
  23473. -}
  23474. -
  23475. diff --git a/drivers/video/msm/mdss/mdss_mdp_pipe.c b/drivers/video/msm/mdss/mdss_mdp_pipe.c
  23476. index 239c9d4..134a3d9 100644
  23477. --- a/drivers/video/msm/mdss/mdss_mdp_pipe.c
  23478. +++ b/drivers/video/msm/mdss/mdss_mdp_pipe.c
  23479. @@ -76,7 +76,6 @@ static u32 mdss_mdp_smp_mmb_reserve(struct mdss_mdp_pipe_smp_map *smp_map,
  23480. if (i != 0 && n != i && !force_alloc) {
  23481. pr_debug("Can't change mmb config, num_blks: %d alloc: %d\n",
  23482. n, i);
  23483. - pr_debug("Can't change mmb configuration in set call\n");
  23484. return 0;
  23485. }
  23486.  
  23487. @@ -341,11 +340,11 @@ int mdss_mdp_smp_reserve(struct mdss_mdp_pipe *pipe)
  23488. }
  23489.  
  23490. if (reserved < num_blks) {
  23491. - pr_err("insufficient MMB blocks\n");
  23492. + pr_debug("insufficient MMB blocks\n");
  23493. for (; i >= 0; i--)
  23494. mdss_mdp_smp_mmb_free(pipe->smp_map[i].reserved,
  23495. false);
  23496. - rc = -ENOMEM;
  23497. + rc = -ENOBUFS;
  23498. }
  23499. mutex_unlock(&mdss_mdp_smp_lock);
  23500.  
  23501. @@ -697,42 +696,6 @@ static void mdss_mdp_pipe_free(struct kref *kref)
  23502. pipe->mfd = NULL;
  23503. memset(&pipe->scale, 0, sizeof(struct mdp_scale_data));
  23504. }
  23505. -static bool mdss_mdp_check_pipe_in_use(struct mdss_mdp_pipe *pipe)
  23506. -{
  23507. - int i;
  23508. - u32 mixercfg, stage_off_mask = BIT(0) | BIT(1) | BIT(2);
  23509. - bool in_use = false;
  23510. - struct mdss_data_type *mdata = mdss_mdp_get_mdata();
  23511. - struct mdss_mdp_ctl *ctl;
  23512. - struct mdss_mdp_mixer *mixer;
  23513. -
  23514. - if (pipe->num == MDSS_MDP_SSPP_VIG3 ||
  23515. - pipe->num == MDSS_MDP_SSPP_RGB3)
  23516. - stage_off_mask = stage_off_mask << ((3 * pipe->num) + 2);
  23517. - else
  23518. - stage_off_mask = stage_off_mask << (3 * pipe->num);
  23519. -
  23520. - mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
  23521. - for (i = 0; i < mdata->nctl; i++) {
  23522. - ctl = mdata->ctl_off + i;
  23523. - if (!ctl || !ctl->ref_cnt)
  23524. - continue;
  23525. -
  23526. - mixer = ctl->mixer_left;
  23527. - if (mixer && mixer->rotator_mode)
  23528. - continue;
  23529. -
  23530. - mixercfg = mdss_mdp_get_mixercfg(mixer);
  23531. - if ((mixercfg & stage_off_mask) && ctl->play_cnt) {
  23532. - pr_err("BUG. pipe%d is active. mcfg:0x%x mask:0x%x\n",
  23533. - pipe->num, mixercfg, stage_off_mask);
  23534. - BUG();
  23535. - }
  23536. - }
  23537. - mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
  23538. -
  23539. - return in_use;
  23540. -}
  23541.  
  23542. static int mdss_mdp_is_pipe_idle(struct mdss_mdp_pipe *pipe,
  23543. bool ignore_force_on)
  23544. @@ -795,15 +758,13 @@ exit:
  23545. */
  23546. int mdss_mdp_pipe_fetch_halt(struct mdss_mdp_pipe *pipe)
  23547. {
  23548. - bool is_idle, in_use = false;
  23549. + bool is_idle;
  23550. int rc = 0;
  23551. u32 reg_val, idle_mask, status;
  23552. struct mdss_data_type *mdata = mdss_mdp_get_mdata();
  23553.  
  23554. is_idle = mdss_mdp_is_pipe_idle(pipe, true);
  23555. - if (!is_idle)
  23556. - in_use = mdss_mdp_check_pipe_in_use(pipe);
  23557. - if (!is_idle && !in_use) {
  23558. + if (!is_idle) {
  23559. pr_err("%pS: pipe%d is not idle. xin_id=%d\n",
  23560. __builtin_return_address(0), pipe->num, pipe->xin_id);
  23561.  
  23562. @@ -915,6 +876,8 @@ error:
  23563. return rc;
  23564. }
  23565.  
  23566. +
  23567. +
  23568. static int mdss_mdp_image_setup(struct mdss_mdp_pipe *pipe,
  23569. struct mdss_mdp_data *data)
  23570. {
  23571. @@ -979,11 +942,7 @@ static int mdss_mdp_image_setup(struct mdss_mdp_pipe *pipe,
  23572. src_size = (src.h << 16) | src.w;
  23573. src_xy = (src.y << 16) | src.x;
  23574. dst_size = (dst.h << 16) | dst.w;
  23575. -#if defined(CONFIG_MDSS_UD_FLIP)
  23576. - dst_xy = (((pipe->mixer->height - pipe->dst.y - pipe->dst.h) << 16) | pipe->dst.x);
  23577. -#else
  23578. dst_xy = (dst.y << 16) | dst.x;
  23579. -#endif
  23580.  
  23581. ystride0 = (pipe->src_planes.ystride[0]) |
  23582. (pipe->src_planes.ystride[1] << 16);
  23583. @@ -1301,27 +1260,31 @@ static inline void __mdss_mdp_pipe_program_pixel_extn_helper(
  23584. */
  23585. if (plane == 1)
  23586. src_h >>= pipe->chroma_sample_v;
  23587. +
  23588. lr_pe = ((pipe->scale.right_ftch[plane] & mask) << 24)|
  23589. ((pipe->scale.right_rpt[plane] & mask) << 16)|
  23590. ((pipe->scale.left_ftch[plane] & mask) << 8)|
  23591. (pipe->scale.left_rpt[plane] & mask);
  23592. - tb_pe = ((pipe->scale.btm_ftch[plane] & mask) << 24)|
  23593. +
  23594. + tb_pe = ((pipe->scale.btm_ftch[plane] & mask) << 24)|
  23595. ((pipe->scale.btm_rpt[plane] & mask) << 16)|
  23596. ((pipe->scale.top_ftch[plane] & mask) << 8)|
  23597. (pipe->scale.top_rpt[plane] & mask);
  23598. +
  23599. writel_relaxed(lr_pe, pipe->base +
  23600. MDSS_MDP_REG_SSPP_SW_PIX_EXT_C0_LR + off);
  23601. writel_relaxed(tb_pe, pipe->base +
  23602. MDSS_MDP_REG_SSPP_SW_PIX_EXT_C0_TB + off);
  23603. +
  23604. mask = 0xFFFF;
  23605. tot_req_pixels = (((src_h + pipe->scale.num_ext_pxls_top[plane] +
  23606. pipe->scale.num_ext_pxls_btm[plane]) & mask) << 16) |
  23607. ((pipe->scale.roi_w[plane] +
  23608. pipe->scale.num_ext_pxls_left[plane] +
  23609. pipe->scale.num_ext_pxls_right[plane]) & mask);
  23610. - writel_relaxed(tot_req_pixels, pipe->base +
  23611. + writel_relaxed(tot_req_pixels, pipe->base +
  23612. MDSS_MDP_REG_SSPP_SW_PIX_EXT_C0_REQ_PIXELS + off);
  23613. -
  23614. +
  23615. pr_debug("pipe num=%d, plane=%d, LR PE=0x%x, TB PE=0x%x, req_pixels=0x0%x\n",
  23616. pipe->num, plane, lr_pe, tb_pe, tot_req_pixels);
  23617. }
  23618. diff --git a/drivers/video/msm/mdss/mdss_mdp_pp.c b/drivers/video/msm/mdss/mdss_mdp_pp.c
  23619. index ea133ee..a9e32d7 100644
  23620. --- a/drivers/video/msm/mdss/mdss_mdp_pp.c
  23621. +++ b/drivers/video/msm/mdss/mdss_mdp_pp.c
  23622. @@ -21,20 +21,6 @@
  23623. #include <linux/delay.h>
  23624. #include <mach/msm_bus.h>
  23625. #include <mach/msm_bus_board.h>
  23626. -#ifdef CONFIG_FB_MSM_CAMERA_CSC
  23627. -struct mdp_csc_cfg mdp_csc_convert_wideband = {
  23628. - 0,
  23629. - {
  23630. - 0x0200, 0x0000, 0x02CD,
  23631. - 0x0200, 0xFF4F, 0xFE91,
  23632. - 0x0200, 0x038B, 0x0000,
  23633. - },
  23634. - { 0x0, 0xFF80, 0xFF80,},
  23635. - { 0x0, 0x0, 0x0,},
  23636. - { 0x0, 0xFF, 0x0, 0xFF, 0x0, 0xFF,},
  23637. - { 0x0, 0xFF, 0x0, 0xFF, 0x0, 0xFF,},
  23638. -};
  23639. -#endif
  23640.  
  23641. struct mdp_csc_cfg mdp_csc_convert[MDSS_MDP_MAX_CSC] = {
  23642. [MDSS_MDP_CSC_RGB2RGB] = {
  23643. @@ -87,29 +73,23 @@ struct mdp_csc_cfg mdp_csc_convert[MDSS_MDP_MAX_CSC] = {
  23644. },
  23645. };
  23646.  
  23647. -#if defined(CONFIG_MDNIE_TFT_MSM8X26) || defined (CONFIG_FB_MSM_MDSS_S6E8AA0A_HD_PANEL) || defined(CONFIG_MDNIE_VIDEO_ENHANCED)
  23648. -struct mdp_pcc_cfg_data pcc_reverse = {
  23649. - .block = MDP_LOGICAL_BLOCK_DISP_0,
  23650. - .ops = MDP_PP_OPS_WRITE | MDP_PP_OPS_ENABLE,
  23651. - .r = { 0x00007ff8, 0xffff8000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  23652. - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
  23653. - .g = { 0x00007ff8, 0x00000000, 0xffff8000, 0x00000000, 0x00000000, 0x00000000,
  23654. - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
  23655. - .b = { 0x00007ff8, 0x00000000, 0x00000000, 0xffff8000, 0x00000000, 0x00000000,
  23656. - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
  23657. -};
  23658. -
  23659. -struct mdp_pcc_cfg_data pcc_normal = {
  23660. - .block = MDP_LOGICAL_BLOCK_DISP_0,
  23661. - .ops = MDP_PP_OPS_WRITE | MDP_PP_OPS_DISABLE,
  23662. - .r = { 0x00000000, 0x00008000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  23663. - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
  23664. - .g = { 0x00000000, 0x00000000, 0x00008000, 0x00000000, 0x00000000, 0x00000000,
  23665. - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
  23666. - .b = { 0x00000000, 0x00000000, 0x00000000, 0x00008000, 0x00000000, 0x00000000,
  23667. - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
  23668. +/*
  23669. + * To program a linear LUT we need to make the slope to be 1/16 to enable
  23670. + * conversion from 12bit to 8bit. Also in cases where post blend values might
  23671. + * cross 255, we need to cap them now to 255. The offset of the final segment
  23672. + * would be programmed in such a case and we set the value to 32460 which is
  23673. + * 255 in U8.7.
  23674. + */
  23675. +static struct mdp_ar_gc_lut_data lin_gc_data[GC_LUT_SEGMENTS] = {
  23676. + { 0, 256, 0}, {4095, 0, 0},
  23677. + {4095, 0, 0}, {4095, 0, 0},
  23678. + {4095, 0, 0}, {4095, 0, 0},
  23679. + {4095, 0, 0}, {4095, 0, 0},
  23680. + {4095, 0, 0}, {4095, 0, 0},
  23681. + {4095, 0, 0}, {4095, 0, 0},
  23682. + {4095, 0, 0}, {4095, 0, 0},
  23683. + {4095, 0, 0}, {4095, 0, 32640}
  23684. };
  23685. -#endif
  23686.  
  23687. #define CSC_MV_OFF 0x0
  23688. #define CSC_BV_OFF 0x2C
  23689. @@ -117,6 +97,8 @@ struct mdp_pcc_cfg_data pcc_normal = {
  23690. #define CSC_POST_OFF 0xC
  23691.  
  23692. #define MDSS_BLOCK_DISP_NUM (MDP_BLOCK_MAX - MDP_LOGICAL_BLOCK_DISP_0)
  23693. +#define MDSS_MAX_MIXER_DISP_NUM (MDSS_BLOCK_DISP_NUM + \
  23694. + MDSS_MDP_WB_MAX_LAYERMIXER)
  23695.  
  23696. #define HIST_WAIT_TIMEOUT(frame) ((75 * HZ * (frame)) / 1000)
  23697. #define HIST_KICKOFF_WAIT_FRACTION 4
  23698. @@ -325,15 +307,15 @@ static struct msm_bus_scale_pdata mdp_pp_bus_scale_table = {
  23699.  
  23700. struct mdss_pp_res_type {
  23701. /* logical info */
  23702. - u32 pp_disp_flags[MDSS_BLOCK_DISP_NUM];
  23703. + u32 pp_disp_flags[MDSS_MAX_MIXER_DISP_NUM];
  23704. u32 igc_lut_c0c1[MDSS_BLOCK_DISP_NUM][IGC_LUT_ENTRIES];
  23705. u32 igc_lut_c2[MDSS_BLOCK_DISP_NUM][IGC_LUT_ENTRIES];
  23706. struct mdp_ar_gc_lut_data
  23707. - gc_lut_r[MDSS_BLOCK_DISP_NUM][GC_LUT_SEGMENTS];
  23708. + gc_lut_r[MDSS_MAX_MIXER_DISP_NUM][GC_LUT_SEGMENTS];
  23709. struct mdp_ar_gc_lut_data
  23710. - gc_lut_g[MDSS_BLOCK_DISP_NUM][GC_LUT_SEGMENTS];
  23711. + gc_lut_g[MDSS_MAX_MIXER_DISP_NUM][GC_LUT_SEGMENTS];
  23712. struct mdp_ar_gc_lut_data
  23713. - gc_lut_b[MDSS_BLOCK_DISP_NUM][GC_LUT_SEGMENTS];
  23714. + gc_lut_b[MDSS_MAX_MIXER_DISP_NUM][GC_LUT_SEGMENTS];
  23715. u32 enhist_lut[MDSS_BLOCK_DISP_NUM][ENHIST_LUT_ENTRIES];
  23716. struct mdp_pa_cfg pa_disp_cfg[MDSS_BLOCK_DISP_NUM];
  23717. struct mdp_pa_v2_data pa_v2_disp_cfg[MDSS_BLOCK_DISP_NUM];
  23718. @@ -341,14 +323,14 @@ struct mdss_pp_res_type {
  23719. u32 six_zone_lut_curve_p1[MDSS_BLOCK_DISP_NUM][MDP_SIX_ZONE_LUT_SIZE];
  23720. struct mdp_pcc_cfg_data pcc_disp_cfg[MDSS_BLOCK_DISP_NUM];
  23721. struct mdp_igc_lut_data igc_disp_cfg[MDSS_BLOCK_DISP_NUM];
  23722. - struct mdp_pgc_lut_data argc_disp_cfg[MDSS_BLOCK_DISP_NUM];
  23723. + struct mdp_pgc_lut_data argc_disp_cfg[MDSS_MAX_MIXER_DISP_NUM];
  23724. struct mdp_pgc_lut_data pgc_disp_cfg[MDSS_BLOCK_DISP_NUM];
  23725. struct mdp_hist_lut_data enhist_disp_cfg[MDSS_BLOCK_DISP_NUM];
  23726. struct mdp_dither_cfg_data dither_disp_cfg[MDSS_BLOCK_DISP_NUM];
  23727. struct mdp_gamut_cfg_data gamut_disp_cfg[MDSS_BLOCK_DISP_NUM];
  23728. uint16_t gamut_tbl[MDSS_BLOCK_DISP_NUM][GAMUT_TOTAL_TABLE_SIZE];
  23729. u32 hist_data[MDSS_BLOCK_DISP_NUM][HIST_V_SIZE];
  23730. - struct pp_sts_type pp_disp_sts[MDSS_BLOCK_DISP_NUM];
  23731. + struct pp_sts_type pp_disp_sts[MDSS_MAX_MIXER_DISP_NUM];
  23732. /* physical info */
  23733. struct pp_hist_col_info dspp_hist[MDSS_MDP_MAX_DSPP];
  23734. };
  23735. @@ -420,10 +402,11 @@ static int pp_read_pa_v2_regs(char __iomem *addr,
  23736. u32 disp_num);
  23737. static void pp_read_pa_mem_col_regs(char __iomem *addr,
  23738. struct mdp_pa_mem_col_cfg *mem_col_cfg);
  23739. +static struct msm_fb_data_type *mdss_get_mfd_from_index(int index);
  23740. static int mdss_ad_init_checks(struct msm_fb_data_type *mfd);
  23741. static int mdss_mdp_get_ad(struct msm_fb_data_type *mfd,
  23742. struct mdss_ad_info **ad);
  23743. -static int pp_update_ad_input(struct msm_fb_data_type *mfd);
  23744. +static int pp_ad_invalidate_input(struct msm_fb_data_type *mfd);
  23745. static void pp_ad_vsync_handler(struct mdss_mdp_ctl *ctl, ktime_t t);
  23746. static void pp_ad_cfg_write(struct mdss_mdp_ad *ad_hw,
  23747. struct mdss_ad_info *ad);
  23748. @@ -437,8 +420,11 @@ static void pp_ad_bypass_config(struct mdss_ad_info *ad,
  23749. struct mdss_mdp_ctl *ctl, u32 num, u32 *opmode);
  23750. static int mdss_mdp_ad_setup(struct msm_fb_data_type *mfd);
  23751. static void pp_ad_cfg_lut(char __iomem *addr, u32 *data);
  23752. -static int pp_ad_attenuate_bl(u32 bl, u32 *bl_out,
  23753. - struct msm_fb_data_type *mfd);
  23754. +static int pp_ad_attenuate_bl(struct mdss_ad_info *ad, u32 bl, u32 *bl_out);
  23755. +static int pp_ad_linearize_bl(struct mdss_ad_info *ad, u32 bl, u32 *bl_out,
  23756. + int inv);
  23757. +static int pp_ad_calc_bl(struct msm_fb_data_type *mfd, int bl_in, int *bl_out,
  23758. + bool *bl_out_notify);
  23759. static int pp_num_to_side(struct mdss_mdp_ctl *ctl, u32 num);
  23760. static inline bool pp_sts_is_enabled(u32 sts, int side);
  23761. static inline void pp_sts_set_split_bits(u32 *sts, u32 bits);
  23762. @@ -549,20 +535,7 @@ int mdss_mdp_csc_setup(u32 block, u32 blk_idx, u32 tbl_idx, u32 csc_type)
  23763. pr_debug("csc type=%d blk=%d idx=%d tbl=%d\n", csc_type,
  23764. block, blk_idx, tbl_idx);
  23765.  
  23766. -#ifdef CONFIG_FB_MSM_CAMERA_CSC
  23767. - if (csc_type == MDSS_MDP_CSC_YUV2RGB && !csc_update)
  23768. - {
  23769. - data = &mdp_csc_convert_wideband;
  23770. - pr_debug("will do mdp_csc_convert_wideband\n");
  23771. - }
  23772. - else
  23773. - {
  23774. - data = &mdp_csc_convert[csc_type];
  23775. - pr_debug("will do mdp_csc_convert(narrow band)\n");
  23776. - }
  23777. -#else
  23778. data = &mdp_csc_convert[csc_type];
  23779. -#endif
  23780. return mdss_mdp_csc_setup_data(block, blk_idx, tbl_idx, data);
  23781. }
  23782.  
  23783. @@ -1069,11 +1042,11 @@ static int mdss_mdp_scale_setup(struct mdss_mdp_pipe *pipe)
  23784. (chroma_sample == MDSS_MDP_CHROMA_H1V2)))
  23785. chroma_shift_y = 1; /* 2x upsample chroma */
  23786.  
  23787. - if (src_h <= pipe->dst.h) {
  23788. + if (src_h <= pipe->dst.h)
  23789. scale_config |= /* G/Y, A */
  23790. (filter_mode << 10) |
  23791. (MDSS_MDP_SCALE_FILTER_BIL << 18);
  23792. - } else
  23793. + else
  23794. scale_config |= /* G/Y, A */
  23795. (MDSS_MDP_SCALE_FILTER_PCMN << 10) |
  23796. (MDSS_MDP_SCALE_FILTER_PCMN << 18);
  23797. @@ -1350,44 +1323,69 @@ int mdss_mdp_pipe_sspp_setup(struct mdss_mdp_pipe *pipe, u32 *op)
  23798. static int pp_mixer_setup(u32 disp_num,
  23799. struct mdss_mdp_mixer *mixer)
  23800. {
  23801. - u32 flags, dspp_num, opmode = 0;
  23802. + u32 flags, mixer_num, opmode = 0, lm_bitmask = 0;
  23803. struct mdp_pgc_lut_data *pgc_config;
  23804. struct pp_sts_type *pp_sts;
  23805. struct mdss_mdp_ctl *ctl;
  23806. char __iomem *addr;
  23807. + struct mdss_data_type *mdata = mdss_mdp_get_mdata();
  23808.  
  23809. - if (!mixer || !mixer->ctl)
  23810. + if (!mixer || !mixer->ctl || !mdata)
  23811. return -EINVAL;
  23812. - dspp_num = mixer->num;
  23813. +
  23814. + mixer_num = mixer->num;
  23815. ctl = mixer->ctl;
  23816. + lm_bitmask = (BIT(6) << mixer_num);
  23817.  
  23818. - /* no corresponding dspp */
  23819. - if ((mixer->type != MDSS_MDP_MIXER_TYPE_INTF) ||
  23820. - (dspp_num >= MDSS_MDP_MAX_DSPP))
  23821. + /* Assign appropriate flags after mixer index validation */
  23822. + if (mixer->type == MDSS_MDP_MIXER_TYPE_INTF) {
  23823. + if (mixer_num >= mdata->nmixers_intf) {
  23824. + pr_err("bad intf mixer index = %d total = %d\n",
  23825. + mixer_num, mdata->nmixers_intf);
  23826. + return 0;
  23827. + }
  23828. + if (mixer_num == MDSS_MDP_DSPP3)
  23829. + lm_bitmask = BIT(20);
  23830. + } else if (mixer->type == MDSS_MDP_MIXER_TYPE_WRITEBACK) {
  23831. + if (mixer_num >= mdata->nmixers_wb +
  23832. + mdata->nmixers_intf) {
  23833. + pr_err("bad wb mixer index = %d total = %d\n",
  23834. + mixer_num,
  23835. + mdata->nmixers_intf + mdata->nmixers_wb);
  23836. + return 0;
  23837. + }
  23838. + } else {
  23839. return 0;
  23840. - if (disp_num < MDSS_BLOCK_DISP_NUM)
  23841. - flags = mdss_pp_res->pp_disp_flags[disp_num];
  23842. - else
  23843. - flags = 0;
  23844. + }
  23845.  
  23846. + flags = mdss_pp_res->pp_disp_flags[disp_num];
  23847. pp_sts = &mdss_pp_res->pp_disp_sts[disp_num];
  23848. /* GC_LUT is in layer mixer */
  23849. if (flags & PP_FLAGS_DIRTY_ARGC) {
  23850. pgc_config = &mdss_pp_res->argc_disp_cfg[disp_num];
  23851. - if (pgc_config->flags & MDP_PP_OPS_WRITE) {
  23852. - addr = mixer->base +
  23853. - MDSS_MDP_REG_LM_GC_LUT_BASE;
  23854. + addr = mixer->base + MDSS_MDP_REG_LM_GC_LUT_BASE;
  23855. + /*
  23856. + * ARGC will always be enabled. When user setting is
  23857. + * disabled we program the linear ARGC data to enable
  23858. + * rounding in HW.
  23859. + */
  23860. + pp_sts->argc_sts |= PP_STS_ENABLE;
  23861. + if (pgc_config->flags & MDP_PP_OPS_WRITE)
  23862. + pp_update_argc_lut(addr, pgc_config);
  23863. + if (pgc_config->flags & MDP_PP_OPS_DISABLE) {
  23864. + pgc_config->r_data = &lin_gc_data[0];
  23865. + pgc_config->g_data = &lin_gc_data[0];
  23866. + pgc_config->b_data = &lin_gc_data[0];
  23867. + pgc_config->num_r_stages = GC_LUT_SEGMENTS;
  23868. + pgc_config->num_g_stages = GC_LUT_SEGMENTS;
  23869. + pgc_config->num_b_stages = GC_LUT_SEGMENTS;
  23870. pp_update_argc_lut(addr, pgc_config);
  23871. }
  23872. - if (pgc_config->flags & MDP_PP_OPS_DISABLE)
  23873. - pp_sts->argc_sts &= ~PP_STS_ENABLE;
  23874. - else if (pgc_config->flags & MDP_PP_OPS_ENABLE)
  23875. - pp_sts->argc_sts |= PP_STS_ENABLE;
  23876. - ctl->flush_bits |= BIT(6) << dspp_num; /* LAYER_MIXER */
  23877. + ctl->flush_bits |= lm_bitmask;
  23878. }
  23879. +
  23880. /* update LM opmode if LM needs flush */
  23881. - if ((pp_sts->argc_sts & PP_STS_ENABLE) &&
  23882. - (ctl->flush_bits & (BIT(6) << dspp_num))) {
  23883. + if (flags & PP_FLAGS_DIRTY_ARGC) {
  23884. addr = mixer->base + MDSS_MDP_REG_LM_OP_MODE;
  23885. opmode = readl_relaxed(addr);
  23886. opmode |= (1 << 0); /* GC_LUT_EN */
  23887. @@ -1624,23 +1622,23 @@ static int pp_dspp_setup(u32 disp_num, struct mdss_mdp_mixer *mixer)
  23888.  
  23889. pp_sts = &mdss_pp_res->pp_disp_sts[disp_num];
  23890.  
  23891. - if (mdata->mdp_rev >= MDSS_MDP_HW_REV_103) {
  23892. - pp_pa_v2_config(flags, base + MDSS_MDP_REG_DSPP_PA_BASE, pp_sts,
  23893. - &mdss_pp_res->pa_v2_disp_cfg[disp_num],
  23894. - PP_DSPP);
  23895. - } else
  23896. - pp_pa_config(flags, base + MDSS_MDP_REG_DSPP_PA_BASE, pp_sts,
  23897. - &mdss_pp_res->pa_disp_cfg[disp_num]);
  23898. -
  23899. - pp_pcc_config(flags, base + MDSS_MDP_REG_DSPP_PCC_BASE, pp_sts,
  23900. - &mdss_pp_res->pcc_disp_cfg[disp_num]);
  23901. + if (disp_num < MDSS_BLOCK_DISP_NUM) {
  23902. + if (mdata->mdp_rev >= MDSS_MDP_HW_REV_103) {
  23903. + pp_pa_v2_config(flags, base + MDSS_MDP_REG_DSPP_PA_BASE, pp_sts,
  23904. + &mdss_pp_res->pa_v2_disp_cfg[disp_num],
  23905. + PP_DSPP);
  23906. + } else
  23907. + pp_pa_config(flags, base + MDSS_MDP_REG_DSPP_PA_BASE, pp_sts,
  23908. + &mdss_pp_res->pa_disp_cfg[disp_num]);
  23909.  
  23910. - pp_igc_config(flags, mdata->mdp_base + MDSS_MDP_REG_IGC_DSPP_BASE,
  23911. + pp_pcc_config(flags, base + MDSS_MDP_REG_DSPP_PCC_BASE, pp_sts,
  23912. + &mdss_pp_res->pcc_disp_cfg[disp_num]);
  23913. + pp_igc_config(flags, mdata->mdp_base + MDSS_MDP_REG_IGC_DSPP_BASE,
  23914. pp_sts, &mdss_pp_res->igc_disp_cfg[disp_num],
  23915. dspp_num);
  23916. -
  23917. - pp_enhist_config(flags, base + MDSS_MDP_REG_DSPP_HIST_LUT_BASE,
  23918. - pp_sts, &mdss_pp_res->enhist_disp_cfg[disp_num]);
  23919. + pp_enhist_config(flags, base + MDSS_MDP_REG_DSPP_HIST_LUT_BASE,
  23920. + pp_sts, &mdss_pp_res->enhist_disp_cfg[disp_num]);
  23921. + }
  23922.  
  23923. if (pp_sts->enhist_sts & PP_STS_ENABLE &&
  23924. !(pp_sts->pa_sts & PP_STS_ENABLE)) {
  23925. @@ -1651,26 +1649,29 @@ static int pp_dspp_setup(u32 disp_num, struct mdss_mdp_mixer *mixer)
  23926. writel_relaxed(0, addr + 8);
  23927. writel_relaxed(0, addr + 12);
  23928. }
  23929. - if (flags & PP_FLAGS_DIRTY_DITHER) {
  23930. - addr = base + MDSS_MDP_REG_DSPP_DITHER_DEPTH;
  23931. - pp_dither_config(addr, pp_sts,
  23932. - &mdss_pp_res->dither_disp_cfg[disp_num]);
  23933. - }
  23934. - if (flags & PP_FLAGS_DIRTY_GAMUT)
  23935. - pp_gamut_config(&mdss_pp_res->gamut_disp_cfg[disp_num], base,
  23936. - pp_sts);
  23937.  
  23938. - if (flags & PP_FLAGS_DIRTY_PGC) {
  23939. - pgc_config = &mdss_pp_res->pgc_disp_cfg[disp_num];
  23940. - if (pgc_config->flags & MDP_PP_OPS_WRITE) {
  23941. - addr = base + MDSS_MDP_REG_DSPP_GC_BASE;
  23942. - pp_update_argc_lut(addr, pgc_config);
  23943. + if (disp_num < MDSS_BLOCK_DISP_NUM) {
  23944. + if (flags & PP_FLAGS_DIRTY_DITHER) {
  23945. + addr = base + MDSS_MDP_REG_DSPP_DITHER_DEPTH;
  23946. + pp_dither_config(addr, pp_sts,
  23947. + &mdss_pp_res->dither_disp_cfg[disp_num]);
  23948. + }
  23949. + if (flags & PP_FLAGS_DIRTY_GAMUT)
  23950. + pp_gamut_config(&mdss_pp_res->gamut_disp_cfg[disp_num], base,
  23951. + pp_sts);
  23952. +
  23953. + if (flags & PP_FLAGS_DIRTY_PGC) {
  23954. + pgc_config = &mdss_pp_res->pgc_disp_cfg[disp_num];
  23955. + if (pgc_config->flags & MDP_PP_OPS_WRITE) {
  23956. + addr = base + MDSS_MDP_REG_DSPP_GC_BASE;
  23957. + pp_update_argc_lut(addr, pgc_config);
  23958. + }
  23959. + if (pgc_config->flags & MDP_PP_OPS_DISABLE)
  23960. + pp_sts->pgc_sts &= ~PP_STS_ENABLE;
  23961. + else if (pgc_config->flags & MDP_PP_OPS_ENABLE)
  23962. + pp_sts->pgc_sts |= PP_STS_ENABLE;
  23963. + pp_sts_set_split_bits(&pp_sts->pgc_sts, pgc_config->flags);
  23964. }
  23965. - if (pgc_config->flags & MDP_PP_OPS_DISABLE)
  23966. - pp_sts->pgc_sts &= ~PP_STS_ENABLE;
  23967. - else if (pgc_config->flags & MDP_PP_OPS_ENABLE)
  23968. - pp_sts->pgc_sts |= PP_STS_ENABLE;
  23969. - pp_sts_set_split_bits(&pp_sts->pgc_sts, pgc_config->flags);
  23970. }
  23971.  
  23972. pp_dspp_opmode_config(ctl, dspp_num, pp_sts, mdata->mdp_rev, &opmode);
  23973. @@ -1734,14 +1735,22 @@ int mdss_mdp_pp_setup_locked(struct mdss_mdp_ctl *ctl)
  23974.  
  23975. /* treat fb_num the same as block logical id*/
  23976. disp_num = ctl->mfd->index;
  23977. + if (disp_num >= MDSS_MAX_MIXER_DISP_NUM) {
  23978. + pr_warn("Invalid display number found, %u", disp_num);
  23979. + return -EINVAL;
  23980. + }
  23981.  
  23982. mixer_cnt = mdss_mdp_get_ctl_mixers(disp_num, mixer_id);
  23983. if (!mixer_cnt) {
  23984. valid_mixers = false;
  23985. - ret = -EINVAL;
  23986. - pr_warn("Configuring post processing without mixers, err = %d",
  23987. - ret);
  23988. - goto exit;
  23989. + /* exit if mixer is not writeback */
  23990. + if (!ctl->mixer_left ||
  23991. + (ctl->mixer_left->type == MDSS_MDP_MIXER_TYPE_INTF)) {
  23992. + ret = -EINVAL;
  23993. + pr_warn("No mixers for post processing err = %d\n",
  23994. + ret);
  23995. + goto exit;
  23996. + }
  23997. }
  23998. if (mdata->nad_cfgs == 0)
  23999. valid_mixers = false;
  24000. @@ -1771,7 +1780,7 @@ int mdss_mdp_pp_setup_locked(struct mdss_mdp_ctl *ctl)
  24001. pp_dspp_setup(disp_num, ctl->mixer_right);
  24002. }
  24003. /* clear dirty flag */
  24004. - if (disp_num < MDSS_BLOCK_DISP_NUM) {
  24005. + if (disp_num < MDSS_MAX_MIXER_DISP_NUM) {
  24006. mdss_pp_res->pp_disp_flags[disp_num] = 0;
  24007. if (disp_num < mdata->nad_cfgs)
  24008. mdata->ad_cfgs[disp_num].reg_sts = 0;
  24009. @@ -1791,41 +1800,12 @@ int mdss_mdp_pp_resume(struct mdss_mdp_ctl *ctl, u32 dspp_num)
  24010. struct pp_sts_type pp_sts;
  24011. struct mdss_ad_info *ad;
  24012. struct mdss_data_type *mdata = ctl->mdata;
  24013. + struct msm_fb_data_type *bl_mfd;
  24014. if (dspp_num >= MDSS_MDP_MAX_DSPP) {
  24015. pr_warn("invalid dspp_num");
  24016. return -EINVAL;
  24017. }
  24018. disp_num = ctl->mfd->index;
  24019. -
  24020. - if (dspp_num < mdata->nad_cfgs) {
  24021. - ret = mdss_mdp_get_ad(ctl->mfd, &ad);
  24022. - if (ret)
  24023. - return ret;
  24024. -
  24025. - if (PP_AD_STATE_CFG & ad->state)
  24026. - pp_ad_cfg_write(&mdata->ad_off[dspp_num], ad);
  24027. - if (PP_AD_STATE_INIT & ad->state)
  24028. - pp_ad_init_write(&mdata->ad_off[dspp_num], ad, ctl);
  24029. - if ((PP_AD_STATE_DATA & ad->state) &&
  24030. - (ad->sts & PP_STS_ENABLE)) {
  24031. - bl = ad->bl_mfd->bl_level;
  24032. - ad->last_bl = bl;
  24033. - if (ad->state & PP_AD_STATE_BL_LIN) {
  24034. - bl = ad->bl_lin[bl >> ad->bl_bright_shift];
  24035. - bl = bl << ad->bl_bright_shift;
  24036. - ret = pp_ad_attenuate_bl(bl, &bl, ad->mfd);
  24037. - if (ret)
  24038. - pr_err("Failed to attenuate BL\n");
  24039. - }
  24040. - linear_map(bl, &ad->bl_data,
  24041. - ad->bl_mfd->panel_info->bl_max,
  24042. - MDSS_MDP_AD_BL_SCALE);
  24043. - pp_ad_input_write(&mdata->ad_off[dspp_num], ad);
  24044. - }
  24045. - if ((PP_AD_STATE_VSYNC & ad->state) && ad->calc_itr)
  24046. - ctl->add_vsync_handler(ctl, &ad->handle);
  24047. - }
  24048. -
  24049. pp_sts = mdss_pp_res->pp_disp_sts[disp_num];
  24050.  
  24051. if (pp_sts.pa_sts & PP_STS_ENABLE) {
  24052. @@ -1893,6 +1873,44 @@ int mdss_mdp_pp_resume(struct mdss_mdp_ctl *ctl, u32 dspp_num)
  24053. }
  24054.  
  24055. mdss_pp_res->pp_disp_flags[disp_num] |= flags;
  24056. +
  24057. + if (dspp_num < mdata->nad_cfgs) {
  24058. + ret = mdss_mdp_get_ad(ctl->mfd, &ad);
  24059. + if (ret) {
  24060. + pr_warn("Failed to get AD info, err = %d\n", ret);
  24061. + return ret;
  24062. + }
  24063. + if (ctl->mfd->panel_info->type == WRITEBACK_PANEL) {
  24064. + bl_mfd = mdss_get_mfd_from_index(0);
  24065. + if (!bl_mfd) {
  24066. + ret = -EINVAL;
  24067. + pr_warn("Failed to get primary FB bl handle, err = %d\n",
  24068. + ret);
  24069. + return ret;
  24070. + }
  24071. + } else {
  24072. + bl_mfd = ctl->mfd;
  24073. + }
  24074. +
  24075. + mutex_lock(&ad->lock);
  24076. + bl = bl_mfd->ad_bl_level;
  24077. + if (PP_AD_STATE_CFG & ad->state)
  24078. + pp_ad_cfg_write(&mdata->ad_off[dspp_num], ad);
  24079. + if (PP_AD_STATE_INIT & ad->state)
  24080. + pp_ad_init_write(&mdata->ad_off[dspp_num], ad, ctl);
  24081. + if ((PP_AD_STATE_DATA & ad->state) &&
  24082. + (ad->sts & PP_STS_ENABLE)) {
  24083. + ad->last_bl = bl;
  24084. + linear_map(bl, &ad->bl_data,
  24085. + ad->bl_mfd->panel_info->bl_max,
  24086. + MDSS_MDP_AD_BL_SCALE);
  24087. + pp_ad_input_write(&mdata->ad_off[dspp_num], ad);
  24088. + }
  24089. + if ((PP_AD_STATE_VSYNC & ad->state) && ad->calc_itr)
  24090. + ctl->add_vsync_handler(ctl, &ad->handle);
  24091. + mutex_unlock(&ad->lock);
  24092. + }
  24093. +
  24094. return 0;
  24095. }
  24096.  
  24097. @@ -1902,6 +1920,10 @@ int mdss_mdp_pp_init(struct device *dev)
  24098. struct mdss_data_type *mdata = mdss_mdp_get_mdata();
  24099. struct mdss_mdp_pipe *vig;
  24100. struct msm_bus_scale_pdata *pp_bus_pdata;
  24101. + struct mdp_pgc_lut_data *gc_cfg;
  24102. +
  24103. + if (!mdata)
  24104. + return -EPERM;
  24105.  
  24106. mutex_lock(&mdss_pp_mutex);
  24107. if (!mdss_pp_res) {
  24108. @@ -1921,6 +1943,17 @@ int mdss_mdp_pp_init(struct device *dev)
  24109. init_completion(
  24110. &mdss_pp_res->dspp_hist[i].first_kick);
  24111. }
  24112. +
  24113. + /*
  24114. + * Set LM ARGC flags to disable. This would program
  24115. + * default GC which would allow for rounding in HW.
  24116. + */
  24117. + for (i = 0; i < MDSS_MAX_MIXER_DISP_NUM; i++) {
  24118. + gc_cfg = &mdss_pp_res->argc_disp_cfg[i];
  24119. + gc_cfg->flags = MDP_PP_OPS_DISABLE;
  24120. + mdss_pp_res->pp_disp_flags[i] |=
  24121. + PP_FLAGS_DIRTY_ARGC;
  24122. + }
  24123. }
  24124. }
  24125. if (mdata && mdata->vig_pipes) {
  24126. @@ -1967,6 +2000,89 @@ void mdss_mdp_pp_term(struct device *dev)
  24127. mutex_unlock(&mdss_pp_mutex);
  24128. }
  24129. }
  24130. +int mdss_mdp_pp_overlay_init(struct msm_fb_data_type *mfd)
  24131. +{
  24132. + if (!mfd) {
  24133. + pr_err("Invalid mfd.\n");
  24134. + return -EPERM;
  24135. + }
  24136. +
  24137. + mfd->mdp.ad_calc_bl = pp_ad_calc_bl;
  24138. + return 0;
  24139. +}
  24140. +
  24141. +static int pp_ad_calc_bl(struct msm_fb_data_type *mfd, int bl_in, int *bl_out,
  24142. + bool *bl_out_notify)
  24143. +{
  24144. + int ret = -1;
  24145. + int temp = bl_in;
  24146. + u32 ad_bl_out = 0;
  24147. + struct mdss_ad_info *ad;
  24148. +
  24149. + ret = mdss_mdp_get_ad(mfd, &ad);
  24150. + if (ret == -ENODEV) {
  24151. + pr_debug("AD not supported on device.\n");
  24152. + return ret;
  24153. + } else if (ret || !ad) {
  24154. + pr_err("Failed to get ad info: ret = %d, ad = 0x%p.\n",
  24155. + ret, ad);
  24156. + return ret;
  24157. + }
  24158. +
  24159. + mutex_lock(&ad->lock);
  24160. + if (!(ad->state & PP_AD_STATE_RUN)) {
  24161. + pr_debug("AD is not running.\n");
  24162. + mutex_unlock(&ad->lock);
  24163. + return -EPERM;
  24164. + }
  24165. +
  24166. + if (!ad->bl_mfd || !ad->bl_mfd->panel_info ||
  24167. + !ad->bl_att_lut) {
  24168. + pr_err("Invalid ad info: bl_mfd = 0x%p, ad->bl_mfd->panel_info = 0x%p, bl_att_lut = 0x%p\n",
  24169. + ad->bl_mfd,
  24170. + (!ad->bl_mfd) ? NULL : ad->bl_mfd->panel_info,
  24171. + ad->bl_att_lut);
  24172. + mutex_unlock(&ad->lock);
  24173. + return -EINVAL;
  24174. + }
  24175. +
  24176. + ret = pp_ad_linearize_bl(ad, bl_in, &temp,
  24177. + MDP_PP_AD_BL_LINEAR);
  24178. + if (ret) {
  24179. + pr_err("Failed to linearize BL: %d\n", ret);
  24180. + mutex_unlock(&ad->lock);
  24181. + return ret;
  24182. + }
  24183. +
  24184. + ret = pp_ad_attenuate_bl(ad, temp, &temp);
  24185. + if (ret) {
  24186. + pr_err("Failed to attenuate BL: %d\n", ret);
  24187. + mutex_unlock(&ad->lock);
  24188. + return ret;
  24189. + }
  24190. + ad_bl_out = temp;
  24191. +
  24192. + ret = pp_ad_linearize_bl(ad, temp, &temp, MDP_PP_AD_BL_LINEAR_INV);
  24193. + if (ret) {
  24194. + pr_err("Failed to inverse linearize BL: %d\n", ret);
  24195. + mutex_unlock(&ad->lock);
  24196. + return ret;
  24197. + }
  24198. + *bl_out = temp;
  24199. +
  24200. + if(!mfd->ad_bl_level)
  24201. + mfd->ad_bl_level = bl_in;
  24202. +
  24203. + if (ad_bl_out != mfd->ad_bl_level) {
  24204. + mfd->ad_bl_level = ad_bl_out;
  24205. + *bl_out_notify = true;
  24206. + }
  24207. +
  24208. + pp_ad_invalidate_input(mfd);
  24209. + mutex_unlock(&ad->lock);
  24210. + return 0;
  24211. +}
  24212. +
  24213. static int pp_get_dspp_num(u32 disp_num, u32 *dspp_num)
  24214. {
  24215. int i;
  24216. @@ -3435,7 +3551,6 @@ int mdss_mdp_hist_intr_setup(struct mdss_intr *intr, int type)
  24217. return -EINVAL;
  24218. }
  24219.  
  24220. - return ret; // not used.
  24221. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
  24222. spin_lock_irqsave(&intr->lock, flag);
  24223.  
  24224. @@ -4018,34 +4133,42 @@ static int mdss_mdp_get_ad(struct msm_fb_data_type *mfd,
  24225. return ret;
  24226. }
  24227.  
  24228. -static int pp_update_ad_input(struct msm_fb_data_type *mfd)
  24229. +/* must call this function from within ad->lock */
  24230. +static int pp_ad_invalidate_input(struct msm_fb_data_type *mfd)
  24231. {
  24232. int ret;
  24233. struct mdss_ad_info *ad;
  24234. - struct mdss_ad_input input;
  24235. struct mdss_mdp_ctl *ctl;
  24236.  
  24237. - if (!mfd)
  24238. + if (!mfd) {
  24239. + pr_err("Invalid mfd\n");
  24240. return -EINVAL;
  24241. + }
  24242. ctl = mfd_to_ctl(mfd);
  24243. - if (!ctl)
  24244. + if (!ctl) {
  24245. + pr_err("Invalid ctl\n");
  24246. return -EINVAL;
  24247. + }
  24248.  
  24249. ret = mdss_mdp_get_ad(mfd, &ad);
  24250. - if (ret)
  24251. - return ret;
  24252. - if (!ad || ad->cfg.mode == MDSS_AD_MODE_AUTO_BL)
  24253. + if (ret || !ad) {
  24254. + pr_err("Fail to get ad: ret = %d, ad = 0x%p\n", ret, ad);
  24255. return -EINVAL;
  24256. + }
  24257. + pr_debug("AD backlight level changed (%d), trigger update to AD\n",
  24258. + mfd->ad_bl_level);
  24259. + if (ad->cfg.mode == MDSS_AD_MODE_AUTO_BL) {
  24260. + pr_err("AD auto backlight no longer supported.\n");
  24261. + return -EINVAL;
  24262. + }
  24263.  
  24264. - pr_debug("backlight level changed (%d), trigger update to AD",
  24265. - mfd->bl_level);
  24266. - input.mode = ad->cfg.mode;
  24267. - if (MDSS_AD_MODE_DATA_MATCH(ad->cfg.mode, MDSS_AD_INPUT_AMBIENT))
  24268. - input.in.amb_light = ad->ad_data;
  24269. - else
  24270. - input.in.strength = ad->ad_data;
  24271. - /* call to ad_input will trigger backlight read */
  24272. - return mdss_mdp_ad_input(mfd, &input, 0);
  24273. + if (ad->state & PP_AD_STATE_RUN) {
  24274. + ad->calc_itr = ad->cfg.stab_itr;
  24275. + ad->sts |= PP_AD_STS_DIRTY_VSYNC;
  24276. + ad->sts |= PP_AD_STS_DIRTY_DATA;
  24277. + }
  24278. +
  24279. + return 0;
  24280. }
  24281.  
  24282. int mdss_mdp_ad_config(struct msm_fb_data_type *mfd,
  24283. @@ -4054,7 +4177,7 @@ int mdss_mdp_ad_config(struct msm_fb_data_type *mfd,
  24284. struct mdss_ad_info *ad;
  24285. struct msm_fb_data_type *bl_mfd;
  24286. int lin_ret = -1, inv_ret = -1, att_ret = -1, ret = 0;
  24287. - u32 ratio_temp, shift = 0, last_ops;
  24288. + u32 last_ops;
  24289.  
  24290. ret = mdss_mdp_get_ad(mfd, &ad);
  24291. if (ret)
  24292. @@ -4087,12 +4210,6 @@ int mdss_mdp_ad_config(struct msm_fb_data_type *mfd,
  24293. sizeof(uint32_t));
  24294. if (lin_ret || inv_ret)
  24295. ret = -ENOMEM;
  24296. - ratio_temp = mfd->panel_info->bl_max / AD_BL_LIN_LEN;
  24297. - while (ratio_temp > 0) {
  24298. - ratio_temp = ratio_temp >> 1;
  24299. - shift++;
  24300. - }
  24301. - ad->bl_bright_shift = shift;
  24302. } else {
  24303. ret = -EINVAL;
  24304. }
  24305. @@ -4192,7 +4309,7 @@ int mdss_mdp_ad_input(struct msm_fb_data_type *mfd,
  24306. goto error;
  24307. }
  24308. ad->ad_data_mode = MDSS_AD_INPUT_AMBIENT;
  24309. - pr_debug("ambient = %d", input->in.amb_light);
  24310. + pr_debug("ambient = %d\n", input->in.amb_light);
  24311. ad->ad_data = input->in.amb_light;
  24312. ad->calc_itr = ad->cfg.stab_itr;
  24313. ad->sts |= PP_AD_STS_DIRTY_VSYNC;
  24314. @@ -4211,7 +4328,7 @@ int mdss_mdp_ad_input(struct msm_fb_data_type *mfd,
  24315. goto error;
  24316. }
  24317. ad->ad_data_mode = MDSS_AD_INPUT_STRENGTH;
  24318. - pr_debug("strength = %d", input->in.strength);
  24319. + pr_debug("strength = %d\n", input->in.strength);
  24320. ad->ad_data = input->in.strength;
  24321. ad->calc_itr = ad->cfg.stab_itr;
  24322. ad->sts |= PP_AD_STS_DIRTY_VSYNC;
  24323. @@ -4527,26 +4644,18 @@ static int mdss_mdp_ad_setup(struct msm_fb_data_type *mfd)
  24324. */
  24325. ad->sts &= ~PP_AD_STS_DIRTY_DATA;
  24326. ad->state |= PP_AD_STATE_DATA;
  24327. - mutex_lock(&bl_mfd->bl_lock);
  24328. - bl = bl_mfd->bl_level;
  24329. pr_debug("dirty data, last_bl = %d ", ad->last_bl);
  24330. + bl = bl_mfd->ad_bl_level;
  24331. +
  24332. if ((ad->cfg.mode == MDSS_AD_MODE_AUTO_STR) &&
  24333. (ad->last_bl != bl)) {
  24334. ad->last_bl = bl;
  24335. ad->calc_itr = ad->cfg.stab_itr;
  24336. ad->sts |= PP_AD_STS_DIRTY_VSYNC;
  24337. - if (ad->state & PP_AD_STATE_BL_LIN) {
  24338. - bl = ad->bl_lin[bl >> ad->bl_bright_shift];
  24339. - bl = bl << ad->bl_bright_shift;
  24340. - ret = pp_ad_attenuate_bl(bl, &bl, ad->mfd);
  24341. - if (ret)
  24342. - pr_err("Failed to attenuate BL\n");
  24343. - }
  24344. linear_map(bl, &ad->bl_data,
  24345. ad->bl_mfd->panel_info->bl_max,
  24346. MDSS_MDP_AD_BL_SCALE);
  24347. }
  24348. - mutex_unlock(&bl_mfd->bl_lock);
  24349. ad->reg_sts |= PP_AD_STS_DIRTY_DATA;
  24350. }
  24351.  
  24352. @@ -4590,14 +4699,9 @@ static int mdss_mdp_ad_setup(struct msm_fb_data_type *mfd)
  24353. bypass = 0;
  24354. ad->reg_sts |= PP_AD_STS_DIRTY_ENABLE;
  24355. ad->state |= PP_AD_STATE_RUN;
  24356. - mutex_lock(&bl_mfd->bl_lock);
  24357. if (bl_mfd != mfd)
  24358. bl_mfd->ext_ad_ctrl = mfd->index;
  24359. - bl_mfd->mdp.update_ad_input = pp_update_ad_input;
  24360. - bl_mfd->mdp.ad_attenuate_bl = pp_ad_attenuate_bl;
  24361. bl_mfd->ext_bl_ctrl = ad->cfg.bl_ctrl_mode;
  24362. - mutex_unlock(&bl_mfd->bl_lock);
  24363. -
  24364. } else {
  24365. if (ad->state & PP_AD_STATE_RUN) {
  24366. ad->reg_sts = PP_AD_STS_DIRTY_ENABLE;
  24367. @@ -4608,7 +4712,6 @@ static int mdss_mdp_ad_setup(struct msm_fb_data_type *mfd)
  24368. ad->state &= !PP_AD_STATE_CFG;
  24369. ad->state &= !PP_AD_STATE_DATA;
  24370. ad->state &= !PP_AD_STATE_BL_LIN;
  24371. - ad->bl_bright_shift = 0;
  24372. ad->ad_data = 0;
  24373. ad->ad_data_mode = 0;
  24374. ad->last_bl = 0;
  24375. @@ -4622,12 +4725,8 @@ static int mdss_mdp_ad_setup(struct msm_fb_data_type *mfd)
  24376. AD_BL_ATT_LUT_LEN);
  24377. memset(&ad->init, 0, sizeof(struct mdss_ad_init));
  24378. memset(&ad->cfg, 0, sizeof(struct mdss_ad_cfg));
  24379. - mutex_lock(&bl_mfd->bl_lock);
  24380. - bl_mfd->mdp.update_ad_input = NULL;
  24381. - bl_mfd->mdp.ad_attenuate_bl = NULL;
  24382. bl_mfd->ext_bl_ctrl = 0;
  24383. bl_mfd->ext_ad_ctrl = -1;
  24384. - mutex_unlock(&bl_mfd->bl_lock);
  24385. }
  24386. ad->state &= ~PP_AD_STATE_RUN;
  24387. }
  24388. @@ -4669,7 +4768,7 @@ static void pp_ad_calc_worker(struct work_struct *work)
  24389. struct msm_fb_data_type *mfd, *bl_mfd;
  24390. struct mdss_data_type *mdata;
  24391. char __iomem *base;
  24392. - u32 bl, calc_done = 0;
  24393. + u32 calc_done = 0;
  24394. ad = container_of(work, struct mdss_ad_info, calc_work);
  24395.  
  24396. mutex_lock(&ad->lock);
  24397. @@ -4711,22 +4810,8 @@ static void pp_ad_calc_worker(struct work_struct *work)
  24398. if (calc_done) {
  24399. ad->last_str = 0xFF & readl_relaxed(base +
  24400. MDSS_MDP_REG_AD_STR_OUT);
  24401. - if (MDSS_AD_RUNNING_AUTO_BL(ad)) {
  24402. - bl = 0xFFFF & readl_relaxed(base +
  24403. - MDSS_MDP_REG_AD_BL_OUT);
  24404. - if (ad->state & PP_AD_STATE_BL_LIN) {
  24405. - bl = bl >> ad->bl_bright_shift;
  24406. - bl = min_t(u32, bl, (AD_BL_LIN_LEN-1));
  24407. - bl = ad->bl_lin_inv[bl];
  24408. - bl = bl << ad->bl_bright_shift;
  24409. - }
  24410. - pr_debug("calc bl = %d", bl);
  24411. - ad->last_str |= bl << 16;
  24412. - mutex_lock(&ad->bl_mfd->bl_lock);
  24413. - if (ad->bl_mfd->bl_level)
  24414. - mdss_fb_set_backlight(ad->bl_mfd, bl);
  24415. - mutex_unlock(&ad->bl_mfd->bl_lock);
  24416. - }
  24417. + if (MDSS_AD_RUNNING_AUTO_BL(ad))
  24418. + pr_err("AD auto backlight no longer supported.\n");
  24419. pr_debug("calc_str = %d, calc_itr %d",
  24420. ad->last_str & 0xFF,
  24421. ad->calc_itr);
  24422. @@ -4765,25 +4850,17 @@ static void pp_ad_cfg_lut(char __iomem *addr, u32 *data)
  24423. addr + ((PP_AD_LUT_LEN - 1) * 2));
  24424. }
  24425.  
  24426. -static int pp_ad_attenuate_bl(u32 bl, u32 *bl_out,
  24427. - struct msm_fb_data_type *mfd)
  24428. +/* must call this function from within ad->lock */
  24429. +static int pp_ad_attenuate_bl(struct mdss_ad_info *ad, u32 bl, u32 *bl_out)
  24430. {
  24431. u32 shift = 0, ratio_temp = 0;
  24432. u32 n, lut_interval, bl_att;
  24433. - int ret = -1;
  24434. - struct mdss_ad_info *ad;
  24435.  
  24436. if (bl < 0) {
  24437. pr_err("Invalid backlight input\n");
  24438. - return ret;
  24439. + return -EINVAL;
  24440. }
  24441.  
  24442. - ret = mdss_mdp_get_ad(mfd, &ad);
  24443. - if (ret || !ad || !ad->bl_mfd || !ad->bl_mfd->panel_info ||
  24444. - !ad->bl_mfd->panel_info->bl_max || !ad->bl_att_lut) {
  24445. - pr_err("Failed to get the ad.\n");
  24446. - return ret;
  24447. - }
  24448. pr_debug("bl_in = %d\n", bl);
  24449. /* map panel backlight range to AD backlight range */
  24450. linear_map(bl, &bl, ad->bl_mfd->panel_info->bl_max,
  24451. @@ -4798,7 +4875,7 @@ static int pp_ad_attenuate_bl(u32 bl, u32 *bl_out,
  24452. n = bl >> shift;
  24453. if (n >= (AD_BL_ATT_LUT_LEN - 1)) {
  24454. pr_err("Invalid index for BL attenuation: %d.\n", n);
  24455. - return ret;
  24456. + return -EINVAL;
  24457. }
  24458. lut_interval = (MDSS_MDP_AD_BL_SCALE + 1) / (AD_BL_ATT_LUT_LEN - 1);
  24459. bl_att = ad->bl_att_lut[n] + (bl - lut_interval * n) *
  24460. @@ -4821,6 +4898,63 @@ static int pp_ad_attenuate_bl(u32 bl, u32 *bl_out,
  24461. return 0;
  24462. }
  24463.  
  24464. +/* must call this function from within ad->lock */
  24465. +static int pp_ad_linearize_bl(struct mdss_ad_info *ad, u32 bl, u32 *bl_out,
  24466. + int inv)
  24467. +{
  24468. +
  24469. + u32 n;
  24470. + int ret = -EINVAL;
  24471. +
  24472. + if (bl < 0 || bl > ad->bl_mfd->panel_info->bl_max) {
  24473. + pr_err("Invalid backlight input: bl = %d, bl_max = %d\n", bl,
  24474. + ad->bl_mfd->panel_info->bl_max);
  24475. + return -EINVAL;
  24476. + }
  24477. +
  24478. + pr_debug("bl_in = %d, inv = %d\n", bl, inv);
  24479. +
  24480. + /* map panel backlight range to AD backlight range */
  24481. + linear_map(bl, &bl, ad->bl_mfd->panel_info->bl_max,
  24482. + MDSS_MDP_AD_BL_SCALE);
  24483. +
  24484. + pr_debug("Before linearization = %d\n", bl);
  24485. + n = bl * (AD_BL_LIN_LEN - 1) / MDSS_MDP_AD_BL_SCALE;
  24486. + pr_debug("n = %d\n", n);
  24487. + if (n > (AD_BL_LIN_LEN - 1)) {
  24488. + pr_err("Invalid index for BL linearization: %d.\n", n);
  24489. + return ret;
  24490. + } else if (n == (AD_BL_LIN_LEN - 1)) {
  24491. + if (inv == MDP_PP_AD_BL_LINEAR_INV)
  24492. + *bl_out = ad->bl_lin_inv[n];
  24493. + else if (inv == MDP_PP_AD_BL_LINEAR)
  24494. + *bl_out = ad->bl_lin[n];
  24495. + } else {
  24496. + /* linear piece-wise interpolation */
  24497. + if (inv == MDP_PP_AD_BL_LINEAR_INV) {
  24498. + *bl_out = bl * (AD_BL_LIN_LEN - 1) *
  24499. + (ad->bl_lin_inv[n + 1] - ad->bl_lin_inv[n]) /
  24500. + MDSS_MDP_AD_BL_SCALE - n *
  24501. + (ad->bl_lin_inv[n + 1] - ad->bl_lin_inv[n]) +
  24502. + ad->bl_lin_inv[n];
  24503. + } else if (inv == MDP_PP_AD_BL_LINEAR) {
  24504. + *bl_out = bl * (AD_BL_LIN_LEN - 1) *
  24505. + (ad->bl_lin[n + 1] - ad->bl_lin[n]) /
  24506. + MDSS_MDP_AD_BL_SCALE -
  24507. + n * (ad->bl_lin[n + 1] - ad->bl_lin[n]) +
  24508. + ad->bl_lin[n];
  24509. + }
  24510. + }
  24511. + pr_debug("After linearization = %d\n", *bl_out);
  24512. +
  24513. + /* map AD backlight range back to panel backlight range */
  24514. + linear_map(*bl_out, bl_out, MDSS_MDP_AD_BL_SCALE,
  24515. + ad->bl_mfd->panel_info->bl_max);
  24516. +
  24517. + pr_debug("bl_out = %d\n", *bl_out);
  24518. + return 0;
  24519. +}
  24520. +
  24521. int mdss_mdp_ad_addr_setup(struct mdss_data_type *mdata, u32 *ad_offsets)
  24522. {
  24523. u32 i;
  24524. @@ -5188,34 +5322,6 @@ int mdss_mdp_calib_mode(struct msm_fb_data_type *mfd,
  24525. return 0;
  24526. }
  24527.  
  24528. -#if defined(CONFIG_MDNIE_TFT_MSM8X26) || defined (CONFIG_FB_MSM_MDSS_S6E8AA0A_HD_PANEL) || defined(CONFIG_MDNIE_VIDEO_ENHANCED)
  24529. -void mdss_negative_color(int is_negative_on)
  24530. -{
  24531. - u32 copyback;
  24532. - int i;
  24533. - struct mdss_mdp_ctl *ctl;
  24534. - struct mdss_mdp_ctl *ctl_d = NULL;
  24535. - struct mdss_data_type *mdata;
  24536. -
  24537. - mdata = mdss_mdp_get_mdata();
  24538. - for (i = 0; i < mdata->nctl; i++) {
  24539. - ctl = mdata->ctl_off + i;
  24540. - if ((ctl->power_on) && (ctl->mfd) && (ctl->mfd->index == 0)) {
  24541. - ctl_d = ctl;
  24542. - break;
  24543. - }
  24544. - }
  24545. - if (ctl_d) {
  24546. - if(is_negative_on)
  24547. - mdss_mdp_pcc_config(&pcc_reverse, &copyback);
  24548. - else
  24549. - mdss_mdp_pcc_config(&pcc_normal, &copyback);
  24550. - } else {
  24551. - pr_info("%s:ctl_d is NULL ", __func__);
  24552. - }
  24553. -}
  24554. -#endif
  24555. -
  24556. int mdss_mdp_calib_config_buffer(struct mdp_calib_config_buffer *cfg,
  24557. u32 *copyback)
  24558. {
  24559. diff --git a/drivers/video/msm/mdss/mdss_mdp_rotator.c b/drivers/video/msm/mdss/mdss_mdp_rotator.c
  24560. index b0b726f..41f7f94 100755
  24561. --- a/drivers/video/msm/mdss/mdss_mdp_rotator.c
  24562. +++ b/drivers/video/msm/mdss/mdss_mdp_rotator.c
  24563. @@ -1,4 +1,4 @@
  24564. -/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
  24565. +/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  24566. *
  24567. * This program is free software; you can redistribute it and/or modify
  24568. * it under the terms of the GNU General Public License version 2 and
  24569. @@ -99,14 +99,14 @@ static struct mdss_mdp_pipe *mdss_mdp_rotator_pipe_alloc(void)
  24570.  
  24571. mixer = mdss_mdp_wb_mixer_alloc(1);
  24572. if (!mixer) {
  24573. - pr_err("wb mixer alloc failed\n");
  24574. + pr_debug("wb mixer alloc failed\n");
  24575. return NULL;
  24576. }
  24577.  
  24578. pipe = mdss_mdp_pipe_alloc_dma(mixer);
  24579. if (!pipe) {
  24580. mdss_mdp_wb_mixer_destroy(mixer);
  24581. - pr_err("dma pipe allocation failed\n");
  24582. + pr_debug("dma pipe allocation failed\n");
  24583. return NULL;
  24584. }
  24585.  
  24586. @@ -469,13 +469,14 @@ int mdss_mdp_rotator_setup(struct msm_fb_data_type *mfd,
  24587. list_add(&rot->list, &mdp5_data->rot_proc_list);
  24588. } else if (req->id & MDSS_MDP_ROT_SESSION_MASK) {
  24589. rot = mdss_mdp_rotator_session_get(req->id);
  24590. +
  24591. if (!rot) {
  24592. pr_err("rotator session=%x not found\n", req->id);
  24593. ret = -ENODEV;
  24594. goto rot_err;
  24595. }
  24596.  
  24597. - if (work_busy(&rot->commit_work)) {
  24598. + if (work_pending(&rot->commit_work)) {
  24599. mutex_unlock(&rotator_lock);
  24600. flush_work(&rot->commit_work);
  24601. mutex_lock(&rotator_lock);
  24602. @@ -646,11 +647,12 @@ static int mdss_mdp_rotator_finish(struct mdss_mdp_rotator_session *rot)
  24603.  
  24604. rot_pipe = rot->pipe;
  24605. if (rot_pipe) {
  24606. - if (work_busy(&rot->commit_work)) {
  24607. + if (work_pending(&rot->commit_work)) {
  24608. mutex_unlock(&rotator_lock);
  24609. - flush_work(&rot->commit_work);
  24610. + cancel_work_sync(&rot->commit_work);
  24611. mutex_lock(&rotator_lock);
  24612. }
  24613. +
  24614. mdss_mdp_rotator_busy_wait(rot);
  24615. list_del(&rot->head);
  24616. }
  24617. @@ -754,17 +756,6 @@ int mdss_mdp_rotator_play(struct msm_fb_data_type *mfd,
  24618. pr_err("rotator queue error session id=%x\n", req->id);
  24619.  
  24620. dst_buf_fail:
  24621. - if(ret){
  24622. - if (rot && rot->use_sync_pt){
  24623. - if (rot->rot_sync_pt_data) {
  24624. - atomic_inc(&rot->rot_sync_pt_data->commit_cnt);
  24625. - mdss_fb_signal_timeline(rot->rot_sync_pt_data);
  24626. - pr_err("release fence as this commit is failed.\n");
  24627. - } else {
  24628. - pr_err("rot_sync_pt_data is NULL\n");
  24629. - }
  24630. - }
  24631. - }
  24632. mutex_unlock(&rotator_lock);
  24633. return ret;
  24634. }
  24635. diff --git a/include/linux/avtimer_kernel.h b/include/linux/avtimer_kernel.h
  24636. new file mode 100644
  24637. index 0000000..5eff8cc
  24638. --- /dev/null
  24639. +++ b/include/linux/avtimer_kernel.h
  24640. @@ -0,0 +1,24 @@
  24641. +/*
  24642. + * Copyright (c) 2014, The Linux Foundation. All rights reserved.
  24643. + *
  24644. + * This program is free software; you can redistribute it and/or modify
  24645. + * it under the terms of the GNU General Public License version 2 and
  24646. + * only version 2 as published by the Free Software Foundation.
  24647. + *
  24648. + * This program is distributed in the hope that it will be useful,
  24649. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  24650. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  24651. + * GNU General Public License for more details.
  24652. + *
  24653. + */
  24654. +
  24655. +#ifndef _AVTIMER_H
  24656. +#define _AVTIMER_H
  24657. +
  24658. +#include <uapi/linux/avtimer.h>
  24659. +
  24660. +int avcs_core_open(void);
  24661. +int avcs_core_disable_power_collapse(int disable);/* true or flase */
  24662. +int avcs_core_query_timer(uint64_t *avtimer_tick);
  24663. +
  24664. +#endif
  24665. diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
  24666. index f8b849d..6a5f4c6 100644
  24667. --- a/include/linux/mmc/card.h
  24668. +++ b/include/linux/mmc/card.h
  24669. @@ -326,6 +326,10 @@ struct mmc_bkops_info {
  24670. #define BKOPS_SIZE_PERCENTAGE_TO_QUEUE_DELAYED_WORK 1 /* 1% */
  24671. };
  24672.  
  24673. +enum mmc_pon_type {
  24674. + MMC_LONG_PON = 1,
  24675. + MMC_SHRT_PON,
  24676. +};
  24677. /*
  24678. * MMC device
  24679. */
  24680. @@ -416,7 +420,7 @@ struct mmc_card {
  24681. struct device_attribute rpm_attrib;
  24682. unsigned int idle_timeout;
  24683. struct notifier_block reboot_notify;
  24684. - bool issue_long_pon;
  24685. + enum mmc_pon_type pon_type;
  24686. u8 *cached_ext_csd;
  24687. };
  24688.  
  24689. @@ -674,5 +678,5 @@ extern struct mmc_wr_pack_stats *mmc_blk_get_packed_statistics(
  24690. struct mmc_card *card);
  24691. extern void mmc_blk_init_packed_statistics(struct mmc_card *card);
  24692. extern void mmc_blk_disable_wr_packing(struct mmc_queue *mq);
  24693. -extern int mmc_send_long_pon(struct mmc_card *card);
  24694. +extern int mmc_send_pon(struct mmc_card *card);
  24695. #endif /* LINUX_MMC_CARD_H */
  24696. diff --git a/include/linux/msm_mdp.h b/include/linux/msm_mdp.h
  24697. index 5fb6ffd..1045f14 100644
  24698. --- a/include/linux/msm_mdp.h
  24699. +++ b/include/linux/msm_mdp.h
  24700. @@ -528,6 +528,7 @@ enum mdss_mdp_blend_op {
  24701. BLEND_OP_COVERAGE,
  24702. BLEND_OP_MAX,
  24703. };
  24704. +
  24705. #define DECIMATED_DIMENSION(dim, deci) (((dim) + ((1 << (deci)) - 1)) >> (deci))
  24706. #define MAX_PLANES 4
  24707. struct mdp_scale_data {
  24708. diff --git a/include/linux/prctl.h b/include/linux/prctl.h
  24709. index 8879b34..23728f0 100644
  24710. --- a/include/linux/prctl.h
  24711. +++ b/include/linux/prctl.h
  24712. @@ -128,9 +128,7 @@
  24713. * arg2 slack value, 0 means "use default"
  24714. * arg3 pid of the thread whose timer slack needs to be set
  24715. */
  24716. -#ifndef CONFIG_SEC_H_PROJECT
  24717. - #define PR_SET_TIMERSLACK_PID 41
  24718. -#endif
  24719. +#define PR_SET_TIMERSLACK_PID 41
  24720.  
  24721. #define PR_SET_VMA 0x53564d41
  24722. # define PR_SET_VMA_ANON_NAME 0
  24723. diff --git a/include/linux/usb/msm_hsusb_hw.h b/include/linux/usb/msm_hsusb_hw.h
  24724. index 2a5ebd4..64cd4b4 100644
  24725. --- a/include/linux/usb/msm_hsusb_hw.h
  24726. +++ b/include/linux/usb/msm_hsusb_hw.h
  24727. @@ -87,6 +87,8 @@
  24728. #define PHY_RETEN (1 << 1) /* PHY retention enable/disable */
  24729. #define PHY_IDHV_INTEN (1 << 8) /* PHY ID HV interrupt */
  24730. #define PHY_OTGSESSVLDHV_INTEN (1 << 9) /* PHY Session Valid HV int. */
  24731. +#define PHY_DPSE_INTEN (1 << 14) /* PHY DPSE HV interrupt*/
  24732. +#define PHY_DMSE_INTEN (1 << 20) /* PHY DMSE HV interrupt*/
  24733. #define PHY_CLAMP_DPDMSE_EN (1 << 21) /* PHY mpm DP DM clamp enable */
  24734. #define PHY_POR_BIT_MASK BIT(0)
  24735. #define PHY_POR_ASSERT (1 << 0) /* USB2 28nm PHY POR ASSERT */
  24736. diff --git a/include/linux/wcnss_wlan.h b/include/linux/wcnss_wlan.h
  24737. index c64c59e..55d6b1e 100644
  24738. --- a/include/linux/wcnss_wlan.h
  24739. +++ b/include/linux/wcnss_wlan.h
  24740. @@ -1,4 +1,4 @@
  24741. -/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  24742. +/* Copyright (c) 2011-2013,2015 The Linux Foundation. All rights reserved.
  24743. *
  24744. * This program is free software; you can redistribute it and/or modify
  24745. * it under the terms of the GNU General Public License version 2 and
  24746. @@ -29,7 +29,6 @@ enum wcnss_hw_type {
  24747. struct wcnss_wlan_config {
  24748. int use_48mhz_xo;
  24749. int is_pronto_v3;
  24750. - void __iomem *msm_wcnss_base;
  24751. };
  24752.  
  24753. enum {
  24754. @@ -44,7 +43,8 @@ enum {
  24755. #define HAVE_WCNSS_CAL_DOWNLOAD 1
  24756. #define HAVE_WCNSS_RX_BUFF_COUNT 1
  24757. #define WLAN_MAC_ADDR_SIZE (6)
  24758. -#define CONFIG_WCNSS_REGISTER_DUMP_ON_BITE 1
  24759. +#define PRONTO_PMU_OFFSET 0x1004
  24760. +#define WCNSS_PMU_CFG_GC_BUS_MUX_SEL_TOP BIT(5)
  24761.  
  24762. void wcnss_get_monotonic_boottime(struct timespec *ts);
  24763. struct device *wcnss_wlan_get_device(void);
  24764. diff --git a/include/media/msm_cam_sensor.h b/include/media/msm_cam_sensor.h
  24765. index eb02dcc..7c6d5be 100644
  24766. --- a/include/media/msm_cam_sensor.h
  24767. +++ b/include/media/msm_cam_sensor.h
  24768. @@ -49,6 +49,7 @@
  24769. #define MAX_EEPROM_NAME 32
  24770.  
  24771. #define MAX_NUMBER_OF_STEPS 47
  24772. +#define MAX_POWER_CONFIG 12
  24773.  
  24774. //************************************* Native functionalities for YUV sensor added by jai.prakash
  24775. #define EXT_CAM_EV 1
  24776. diff --git a/include/sound/Kbuild b/include/sound/Kbuild
  24777. index 62d56c6..170ef9e 100644
  24778. --- a/include/sound/Kbuild
  24779. +++ b/include/sound/Kbuild
  24780. @@ -15,3 +15,4 @@ header-y += compress_offload.h
  24781. header-y += lsm_params.h
  24782. header-y += voice_params.h
  24783. header-y += voice_svc.h
  24784. +header-y += msmcal-hwdep.h
  24785. diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h
  24786. index 184cff3..028f3d7 100644
  24787. --- a/include/sound/apr_audio-v2.h
  24788. +++ b/include/sound/apr_audio-v2.h
  24789. @@ -7382,7 +7382,7 @@ struct afe_svc_cmd_set_clip_bank_selection {
  24790. /* Ultrasound supported formats */
  24791. #define US_POINT_EPOS_FORMAT_V2 0x0001272D
  24792. #define US_RAW_FORMAT_V2 0x0001272C
  24793. -#define US_PROX_FORMAT_V2 0x0001272E
  24794. +#define US_PROX_FORMAT_V4 0x0001273B
  24795. #define US_RAW_SYNC_FORMAT 0x0001272F
  24796. #define US_GES_SYNC_FORMAT 0x00012730
  24797. #endif /*_APR_AUDIO_V2_H_ */
  24798. diff --git a/include/sound/asound.h b/include/sound/asound.h
  24799. index 7bf01b6..244bb30 100644
  24800. --- a/include/sound/asound.h
  24801. +++ b/include/sound/asound.h
  24802. @@ -95,9 +95,10 @@ enum {
  24803. SNDRV_HWDEP_IFACE_SB_RC, /* SB Extigy/Audigy2NX remote control */
  24804. SNDRV_HWDEP_IFACE_HDA, /* HD-audio */
  24805. SNDRV_HWDEP_IFACE_USB_STREAM, /* direct access to usb stream */
  24806. + SNDRV_HWDEP_IFACE_AUDIO_CODEC, /* codec Audio Control */
  24807.  
  24808. /* Don't forget to change the following: */
  24809. - SNDRV_HWDEP_IFACE_LAST = SNDRV_HWDEP_IFACE_USB_STREAM
  24810. + SNDRV_HWDEP_IFACE_LAST = SNDRV_HWDEP_IFACE_AUDIO_CODEC
  24811. };
  24812.  
  24813. struct snd_hwdep_info {
  24814. diff --git a/include/sound/compress_offload.h b/include/sound/compress_offload.h
  24815. index 5a5599a..4292802 100644
  24816. --- a/include/sound/compress_offload.h
  24817. +++ b/include/sound/compress_offload.h
  24818. @@ -131,6 +131,8 @@ struct snd_compr_codec_caps {
  24819. enum {
  24820. SNDRV_COMPRESS_ENCODER_PADDING = 1,
  24821. SNDRV_COMPRESS_ENCODER_DELAY = 2,
  24822. + SNDRV_COMPRESS_MIN_BLK_SIZE = 3,
  24823. + SNDRV_COMPRESS_MAX_BLK_SIZE = 4,
  24824. };
  24825.  
  24826. /**
  24827. diff --git a/include/sound/msmcal-hwdep.h b/include/sound/msmcal-hwdep.h
  24828. new file mode 100644
  24829. index 0000000..324b497
  24830. --- /dev/null
  24831. +++ b/include/sound/msmcal-hwdep.h
  24832. @@ -0,0 +1,34 @@
  24833. +/*
  24834. + * Copyright (c) 2014, The Linux Foundation. All rights reserved.
  24835. + *
  24836. + * This program is free software; you can redistribute it and/or modify
  24837. + * it under the terms of the GNU General Public License version 2 and
  24838. + * only version 2 as published by the Free Software Foundation.
  24839. + *
  24840. + * This program is distributed in the hope that it will be useful,
  24841. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  24842. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  24843. + * GNU General Public License for more details.
  24844. + */
  24845. +#ifndef _CALIB_HWDEP_H
  24846. +#define _CALIB_HWDEP_H
  24847. +
  24848. +#define WCD9XXX_CODEC_HWDEP_NODE 1000
  24849. +enum wcd_cal_type {
  24850. + WCD9XXX_MIN_CAL,
  24851. + WCD9XXX_ANC_CAL = WCD9XXX_MIN_CAL,
  24852. + WCD9XXX_MAD_CAL,
  24853. + WCD9XXX_MBHC_CAL,
  24854. + WCD9XXX_MAX_CAL,
  24855. +};
  24856. +
  24857. +struct wcdcal_ioctl_buffer {
  24858. + __u32 size;
  24859. + __u8 __user *buffer;
  24860. + enum wcd_cal_type cal_type;
  24861. +};
  24862. +
  24863. +#define SNDRV_CTL_IOCTL_HWDEP_CAL_TYPE \
  24864. + _IOW('U', 0x1, struct wcdcal_ioctl_buffer)
  24865. +
  24866. +#endif /*_CALIB_HWDEP_H*/
  24867. diff --git a/include/sound/q6core.h b/include/sound/q6core.h
  24868. old mode 100755
  24869. new mode 100644
  24870. diff --git a/include/uapi/linux/avtimer.h b/include/uapi/linux/avtimer.h
  24871. new file mode 100644
  24872. index 0000000..f688b38
  24873. --- /dev/null
  24874. +++ b/include/uapi/linux/avtimer.h
  24875. @@ -0,0 +1,19 @@
  24876. +#ifndef _UAPI_AVTIMER_H
  24877. +#define _UAPI_AVTIMER_H
  24878. +
  24879. +#include <linux/ioctl.h>
  24880. +
  24881. +#define MAJOR_NUM 100
  24882. +
  24883. +#define IOCTL_GET_AVTIMER_TICK _IOR(MAJOR_NUM, 0, char *)
  24884. +/*
  24885. + * This IOCTL is used read the avtimer tick value.
  24886. + * Avtimer is a 64 bit timer tick, hence the expected
  24887. + * argument is of type uint64_t
  24888. + */
  24889. +struct dev_avtimer_data {
  24890. + uint32_t avtimer_msw_phy_addr;
  24891. + uint32_t avtimer_lsw_phy_addr;
  24892. +};
  24893. +
  24894. +#endif
  24895. diff --git a/kernel/sched/core.c b/kernel/sched/core.c
  24896. index b873f1f..d847509 100644
  24897. --- a/kernel/sched/core.c
  24898. +++ b/kernel/sched/core.c
  24899. @@ -1607,6 +1607,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
  24900. {
  24901. unsigned long flags;
  24902. int cpu, src_cpu, success = 0;
  24903. + int notify = 0;
  24904.  
  24905. smp_wmb();
  24906. raw_spin_lock_irqsave(&p->pi_lock, flags);
  24907. @@ -1664,10 +1665,13 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
  24908. ttwu_queue(p, cpu);
  24909. stat:
  24910. ttwu_stat(p, cpu, wake_flags);
  24911. +
  24912. + if (src_cpu != cpu && task_notify_on_migrate(p))
  24913. + notify = 1;
  24914. out:
  24915. raw_spin_unlock_irqrestore(&p->pi_lock, flags);
  24916.  
  24917. - if (src_cpu != cpu && task_notify_on_migrate(p))
  24918. + if (notify)
  24919. atomic_notifier_call_chain(&migration_notifier_head,
  24920. cpu, (void *)src_cpu);
  24921. return success;
  24922. diff --git a/kernel/sys.c b/kernel/sys.c
  24923. index fc5ec84..7d01c44 100644
  24924. --- a/kernel/sys.c
  24925. +++ b/kernel/sys.c
  24926. @@ -50,9 +50,6 @@
  24927. #include <linux/user_namespace.h>
  24928.  
  24929. #include <linux/kmsg_dump.h>
  24930. -#ifdef CONFIG_SEC_DEBUG
  24931. -#include <mach/sec_debug.h>
  24932. -#endif
  24933. /* Move somewhere else to avoid recompiling? */
  24934. #include <generated/utsrelease.h>
  24935.  
  24936. @@ -129,54 +126,6 @@ EXPORT_SYMBOL(cad_pid);
  24937.  
  24938. void (*pm_power_off_prepare)(void);
  24939.  
  24940. -#if defined CONFIG_SEC_RESTRICT_SETUID
  24941. -int sec_check_execpath(struct mm_struct *mm, char *denypath);
  24942. -#if defined CONFIG_SEC_RESTRICT_ROOTING_LOG
  24943. -#define PRINT_LOG(...) printk(KERN_ERR __VA_ARGS__)
  24944. -#else
  24945. -#define PRINT_LOG(...)
  24946. -#endif // End of CONFIG_SEC_RESTRICT_ROOTING_LOG
  24947. -
  24948. -static int sec_restrict_uid(void)
  24949. -{
  24950. - int ret = 0;
  24951. - struct task_struct *parent_tsk;
  24952. - const struct cred *parent_cred;
  24953. -
  24954. - read_lock(&tasklist_lock);
  24955. - parent_tsk = current->parent;
  24956. - if (!parent_tsk) {
  24957. - read_unlock(&tasklist_lock);
  24958. - return 0;
  24959. - }
  24960. -
  24961. - get_task_struct(parent_tsk);
  24962. - /* holding on to the task struct is enough so just release
  24963. - * the tasklist lock here */
  24964. - read_unlock(&tasklist_lock);
  24965. -
  24966. - parent_cred = get_task_cred(parent_tsk);
  24967. - if (!parent_cred)
  24968. - goto out;
  24969. - if (parent_cred->euid == 0 || parent_tsk->pid == 1) {
  24970. - ret = 0;
  24971. - } else if (sec_check_execpath(current->mm, "/system/bin/pppd")) {
  24972. - PRINT_LOG("VPN allowed to use root permission");
  24973. - ret = 0;
  24974. - } else {
  24975. - PRINT_LOG("Restricted changing UID. PID = %d(%s) PPID = %d(%s)\n",
  24976. - current->pid, current->comm,
  24977. - parent_tsk->pid, parent_tsk->comm);
  24978. - ret = 1;
  24979. - }
  24980. - put_cred(parent_cred);
  24981. -out:
  24982. - put_task_struct(parent_tsk);
  24983. -
  24984. - return ret;
  24985. -}
  24986. -#endif // End of CONFIG_SEC_RESTRICT_SETUID
  24987. -
  24988. /*
  24989. * Returns true if current's euid is same as p's uid or euid,
  24990. * or has CAP_SYS_NICE to p's user_ns.
  24991. @@ -418,9 +367,6 @@ EXPORT_SYMBOL(unregister_reboot_notifier);
  24992. */
  24993. void kernel_restart(char *cmd)
  24994. {
  24995. -#ifdef CONFIG_SEC_MONITOR_BATTERY_REMOVAL
  24996. - kernel_sec_set_normal_pwroff(1);
  24997. -#endif
  24998. kernel_restart_prepare(cmd);
  24999. if (!cmd)
  25000. printk(KERN_EMERG "Restarting system.\n");
  25001. @@ -462,9 +408,6 @@ EXPORT_SYMBOL_GPL(kernel_halt);
  25002. */
  25003. void kernel_power_off(void)
  25004. {
  25005. -#ifdef CONFIG_SEC_MONITOR_BATTERY_REMOVAL
  25006. - kernel_sec_set_normal_pwroff(1);
  25007. -#endif
  25008. kernel_shutdown_prepare(SYSTEM_POWER_OFF);
  25009. if (pm_power_off_prepare)
  25010. pm_power_off_prepare();
  25011. @@ -573,11 +516,8 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
  25012. return ret;
  25013. }
  25014.  
  25015. -extern void do_emergency_remount(struct work_struct *work);
  25016. -
  25017. static void deferred_cad(struct work_struct *dummy)
  25018. {
  25019. - do_emergency_remount(NULL);
  25020. kernel_restart(NULL);
  25021. }
  25022.  
  25023. @@ -620,14 +560,6 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
  25024. struct cred *new;
  25025. int retval;
  25026.  
  25027. -#if defined CONFIG_SEC_RESTRICT_SETUID
  25028. - if(rgid == 0 || egid == 0)
  25029. - {
  25030. - if(sec_restrict_uid())
  25031. - return -EACCES;
  25032. - }
  25033. -#endif // End of CONFIG_SEC_RESTRICT_SETUID
  25034. -
  25035. new = prepare_creds();
  25036. if (!new)
  25037. return -ENOMEM;
  25038. @@ -675,14 +607,6 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
  25039. struct cred *new;
  25040. int retval;
  25041.  
  25042. -#if defined CONFIG_SEC_RESTRICT_SETUID
  25043. - if(gid == 0)
  25044. - {
  25045. - if(sec_restrict_uid())
  25046. - return -EACCES;
  25047. - }
  25048. -#endif // End of CONFIG_SEC_RESTRICT_SETUID
  25049. -
  25050. new = prepare_creds();
  25051. if (!new)
  25052. return -ENOMEM;
  25053. @@ -753,14 +677,6 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
  25054. struct cred *new;
  25055. int retval;
  25056.  
  25057. -#if defined CONFIG_SEC_RESTRICT_SETUID
  25058. - if(ruid == 0 || euid == 0)
  25059. - {
  25060. - if(sec_restrict_uid())
  25061. - return -EACCES;
  25062. - }
  25063. -#endif // End of CONFIG_SEC_RESTRICT_SETUID
  25064. -
  25065. new = prepare_creds();
  25066. if (!new)
  25067. return -ENOMEM;
  25068. @@ -822,14 +738,6 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
  25069. struct cred *new;
  25070. int retval;
  25071.  
  25072. -#if defined CONFIG_SEC_RESTRICT_SETUID
  25073. - if(uid == 0)
  25074. - {
  25075. - if(sec_restrict_uid())
  25076. - return -EACCES;
  25077. - }
  25078. -#endif // End of CONFIG_SEC_RESTRICT_SETUID
  25079. -
  25080. new = prepare_creds();
  25081. if (!new)
  25082. return -ENOMEM;
  25083. @@ -871,14 +779,6 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
  25084. struct cred *new;
  25085. int retval;
  25086.  
  25087. -#if defined CONFIG_SEC_RESTRICT_SETUID
  25088. - if(ruid == 0 || euid == 0 || suid == 0)
  25089. - {
  25090. - if(sec_restrict_uid())
  25091. - return -EACCES;
  25092. - }
  25093. -#endif // End of CONFIG_SEC_RESTRICT_SETUID
  25094. -
  25095. new = prepare_creds();
  25096. if (!new)
  25097. return -ENOMEM;
  25098. @@ -944,14 +844,6 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
  25099. struct cred *new;
  25100. int retval;
  25101.  
  25102. -#if defined CONFIG_SEC_RESTRICT_SETUID
  25103. - if(rgid == 0 || egid == 0 || sgid == 0)
  25104. - {
  25105. - if(sec_restrict_uid())
  25106. - return -EACCES;
  25107. - }
  25108. -#endif // End of CONFIG_SEC_RESTRICT_SETUID
  25109. -
  25110. new = prepare_creds();
  25111. if (!new)
  25112. return -ENOMEM;
  25113. @@ -1309,7 +1201,7 @@ static int override_release(char __user *release, size_t len)
  25114. rest++;
  25115. }
  25116. v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
  25117. - copy = clamp_t(size_t, len, 1, sizeof(buf));
  25118. + copy = min(sizeof(buf), max_t(size_t, 1, len));
  25119. copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
  25120. ret = copy_to_user(release, buf, copy + 1);
  25121. }
  25122. @@ -2076,9 +1968,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
  25123. unsigned long, arg4, unsigned long, arg5)
  25124. {
  25125. struct task_struct *me = current;
  25126. -#ifndef CONFIG_SEC_H_PROJECT
  25127. struct task_struct *tsk;
  25128. -#endif
  25129. unsigned char comm[sizeof(me->comm)];
  25130. long error;
  25131.  
  25132. @@ -2238,15 +2128,12 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
  25133. case PR_SET_VMA:
  25134. error = prctl_set_vma(arg2, arg3, arg4, arg5);
  25135. break;
  25136. - /* remove this case because of sidesync call mute for H-projects */
  25137. -
  25138. -#ifndef CONFIG_SEC_H_PROJECT
  25139. case PR_SET_TIMERSLACK_PID:
  25140. - if (current->pid != (pid_t)arg3 &&
  25141. + if (task_pid_vnr(current) != (pid_t)arg3 &&
  25142. !capable(CAP_SYS_NICE))
  25143. return -EPERM;
  25144. rcu_read_lock();
  25145. - tsk = find_task_by_pid_ns((pid_t)arg3, &init_pid_ns);
  25146. + tsk = find_task_by_vpid((pid_t)arg3);
  25147. if (tsk == NULL) {
  25148. rcu_read_unlock();
  25149. return -EINVAL;
  25150. @@ -2261,7 +2148,6 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
  25151. put_task_struct(tsk);
  25152. error = 0;
  25153. break;
  25154. -#endif
  25155. default:
  25156. error = -EINVAL;
  25157. break;
  25158. diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
  25159. index ed0c66d..3cd6b12 100644
  25160. --- a/kernel/time/timekeeping.c
  25161. +++ b/kernel/time/timekeeping.c
  25162. @@ -296,7 +296,7 @@ void ktime_get_ts(struct timespec *ts)
  25163. } while (read_seqretry(&timekeeper.lock, seq));
  25164.  
  25165. set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
  25166. - (s64)ts->tv_nsec + tomono.tv_nsec + nsecs);
  25167. + ts->tv_nsec + tomono.tv_nsec + nsecs);
  25168. }
  25169. EXPORT_SYMBOL_GPL(ktime_get_ts);
  25170.  
  25171. @@ -1147,12 +1147,14 @@ out:
  25172. */
  25173. void getboottime(struct timespec *ts)
  25174. {
  25175. - time_t tv_sec = timekeeper.wall_to_monotonic.tv_sec +
  25176. - timekeeper.total_sleep_time.tv_sec;
  25177. - s64 tv_nsec = (s64)timekeeper.wall_to_monotonic.tv_nsec +
  25178. - timekeeper.total_sleep_time.tv_nsec;
  25179. -
  25180. - set_normalized_timespec(ts, -tv_sec, -tv_nsec);
  25181. + struct timespec boottime = {
  25182. + .tv_sec = timekeeper.wall_to_monotonic.tv_sec +
  25183. + timekeeper.total_sleep_time.tv_sec,
  25184. + .tv_nsec = timekeeper.wall_to_monotonic.tv_nsec +
  25185. + timekeeper.total_sleep_time.tv_nsec
  25186. + };
  25187. +
  25188. + set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
  25189. }
  25190. EXPORT_SYMBOL_GPL(getboottime);
  25191.  
  25192. @@ -1184,7 +1186,7 @@ void get_monotonic_boottime(struct timespec *ts)
  25193. } while (read_seqretry(&timekeeper.lock, seq));
  25194.  
  25195. set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec,
  25196. - (s64)ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec + nsecs);
  25197. + (s64)ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec + nsecs);
  25198. }
  25199. EXPORT_SYMBOL_GPL(get_monotonic_boottime);
  25200.  
  25201. @@ -1254,7 +1256,7 @@ struct timespec get_monotonic_coarse(void)
  25202. } while (read_seqretry(&timekeeper.lock, seq));
  25203.  
  25204. set_normalized_timespec(&now, now.tv_sec + mono.tv_sec,
  25205. - (s64)now.tv_nsec + mono.tv_nsec);
  25206. + now.tv_nsec + mono.tv_nsec);
  25207. return now;
  25208. }
  25209.  
  25210. diff --git a/mm/vmscan.c b/mm/vmscan.c
  25211. index dbbc164..e2e43aa 100644
  25212. --- a/mm/vmscan.c
  25213. +++ b/mm/vmscan.c
  25214. @@ -19,7 +19,6 @@
  25215. #include <linux/pagemap.h>
  25216. #include <linux/init.h>
  25217. #include <linux/highmem.h>
  25218. -#include <linux/vmpressure.h>
  25219. #include <linux/vmstat.h>
  25220. #include <linux/file.h>
  25221. #include <linux/writeback.h>
  25222. @@ -54,19 +53,6 @@
  25223. #define CREATE_TRACE_POINTS
  25224. #include <trace/events/vmscan.h>
  25225.  
  25226. -#ifdef CONFIG_INCREASE_MAXIMUM_SWAPPINESS
  25227. -int max_swappiness = 200;
  25228. -#endif
  25229. -
  25230. -#ifdef CONFIG_RUNTIME_COMPCACHE
  25231. -struct rtcc_control {
  25232. - int nr_anon;
  25233. - int nr_file;
  25234. - int swappiness;
  25235. - int nr_swapped;
  25236. -};
  25237. -#endif /* CONFIG_RUNTIME_COMPCACHE */
  25238. -
  25239. struct scan_control {
  25240. /* Incremented by the number of inactive pages that were scanned */
  25241. unsigned long nr_scanned;
  25242. @@ -92,8 +78,6 @@ struct scan_control {
  25243.  
  25244. int order;
  25245.  
  25246. - int swappiness;
  25247. -
  25248. /* Scan (total_size >> priority) pages at once */
  25249. int priority;
  25250.  
  25251. @@ -108,10 +92,6 @@ struct scan_control {
  25252. * are scanned.
  25253. */
  25254. nodemask_t *nodemask;
  25255. -
  25256. -#ifdef CONFIG_RUNTIME_COMPCACHE
  25257. - struct rtcc_control *rc;
  25258. -#endif /* CONFIG_RUNTIME_COMPCACHE */
  25259. };
  25260.  
  25261. struct mem_cgroup_zone {
  25262. @@ -155,17 +135,6 @@ struct mem_cgroup_zone {
  25263. int vm_swappiness = 60;
  25264. long vm_total_pages; /* The total number of pages which the VM controls */
  25265.  
  25266. -#ifdef CONFIG_RUNTIME_COMPCACHE
  25267. -extern int get_rtcc_status(void);
  25268. -atomic_t kswapd_running = ATOMIC_INIT(1);
  25269. -long nr_kswapd_swapped = 0;
  25270. -
  25271. -static bool rtcc_reclaim(struct scan_control *sc)
  25272. -{
  25273. - return (sc->rc != NULL);
  25274. -}
  25275. -#endif /* CONFIG_RUNTIME_COMPCACHE */
  25276. -
  25277. static LIST_HEAD(shrinker_list);
  25278. static DECLARE_RWSEM(shrinker_rwsem);
  25279.  
  25280. @@ -196,11 +165,9 @@ unsigned long zone_reclaimable_pages(struct zone *zone)
  25281. nr = zone_page_state(zone, NR_ACTIVE_FILE) +
  25282. zone_page_state(zone, NR_INACTIVE_FILE);
  25283.  
  25284. -#ifndef CONFIG_RUNTIME_COMPCACHE
  25285. if (get_nr_swap_pages() > 0)
  25286. nr += zone_page_state(zone, NR_ACTIVE_ANON) +
  25287. zone_page_state(zone, NR_INACTIVE_ANON);
  25288. -#endif /* CONFIG_RUNTIME_COMPCACHE */
  25289.  
  25290. return nr;
  25291. }
  25292. @@ -293,7 +260,7 @@ unsigned long shrink_slab(struct shrink_control *shrink,
  25293.  
  25294. list_for_each_entry(shrinker, &shrinker_list, list) {
  25295. unsigned long long delta;
  25296. - long total_scan, pages_got;
  25297. + long total_scan;
  25298. long max_pass;
  25299. int shrink_ret = 0;
  25300. long nr;
  25301. @@ -359,14 +326,10 @@ unsigned long shrink_slab(struct shrink_control *shrink,
  25302. batch_size);
  25303. if (shrink_ret == -1)
  25304. break;
  25305. - if (shrink_ret < nr_before) {
  25306. - pages_got = nr_before - shrink_ret;
  25307. - ret += pages_got;
  25308. - total_scan -= pages_got > batch_size ? pages_got : batch_size;
  25309. - } else {
  25310. - total_scan -= batch_size;
  25311. - }
  25312. + if (shrink_ret < nr_before)
  25313. + ret += nr_before - shrink_ret;
  25314. count_vm_events(SLABS_SCANNED, batch_size);
  25315. + total_scan -= batch_size;
  25316.  
  25317. cond_resched();
  25318. }
  25319. @@ -511,6 +474,18 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
  25320. if (!PageWriteback(page)) {
  25321. /* synchronous write or broken a_ops? */
  25322. ClearPageReclaim(page);
  25323. + if (PageError(page) && PageSwapCache(page)) {
  25324. + ClearPageError(page);
  25325. + /*
  25326. + * We lock the page here because it is required
  25327. + * to free the swp space later in
  25328. + * shrink_page_list. But the page may be
  25329. + * unclocked by functions like
  25330. + * handle_write_error.
  25331. + */
  25332. + __set_page_locked(page);
  25333. + return PAGE_ACTIVATE;
  25334. + }
  25335. }
  25336. trace_mm_vmscan_writepage(page, trace_reclaim_flags(page));
  25337. inc_zone_page_state(page, NR_VMSCAN_WRITE);
  25338. @@ -1151,11 +1126,6 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
  25339. mem_cgroup_lru_del_list(page, lru);
  25340. list_move(&page->lru, dst);
  25341. nr_taken += hpage_nr_pages(page);
  25342. -#if defined(CONFIG_CMA_PAGE_COUNTING)
  25343. - if (PageCMA(page))
  25344. - __mod_zone_page_state(page_zone(page),
  25345. - NR_FREE_CMA_PAGES + 1 + lru, -1);
  25346. -#endif
  25347. break;
  25348.  
  25349. case -EBUSY:
  25350. @@ -1233,11 +1203,6 @@ static int too_many_isolated(struct zone *zone, int file,
  25351. {
  25352. unsigned long inactive, isolated;
  25353.  
  25354. -#ifdef CONFIG_RUNTIME_COMPCACHE
  25355. - if (get_rtcc_status() == 1)
  25356. - return 0;
  25357. -#endif /* CONFIG_RUNTIME_COMPCACHE */
  25358. -
  25359. if (current_is_kswapd())
  25360. return 0;
  25361.  
  25362. @@ -1414,15 +1379,6 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
  25363. (nr_taken >> (DEF_PRIORITY - sc->priority)))
  25364. wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10);
  25365.  
  25366. -#ifdef CONFIG_RUNTIME_COMPCACHE
  25367. - if (!file) {
  25368. - if (rtcc_reclaim(sc))
  25369. - sc->rc->nr_swapped += nr_reclaimed;
  25370. - else
  25371. - nr_kswapd_swapped += nr_reclaimed;
  25372. - }
  25373. -#endif /* CONFIG_RUNTIME_COMPCACHE */
  25374. -
  25375. trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
  25376. zone_idx(zone),
  25377. nr_scanned, nr_reclaimed,
  25378. @@ -1456,9 +1412,6 @@ static void move_active_pages_to_lru(struct zone *zone,
  25379. {
  25380. unsigned long pgmoved = 0;
  25381. struct page *page;
  25382. -#if defined(CONFIG_CMA_PAGE_COUNTING)
  25383. - unsigned long nr_cma = 0;
  25384. -#endif
  25385.  
  25386. while (!list_empty(list)) {
  25387. struct lruvec *lruvec;
  25388. @@ -1471,10 +1424,6 @@ static void move_active_pages_to_lru(struct zone *zone,
  25389. lruvec = mem_cgroup_lru_add_list(zone, page, lru);
  25390. list_move(&page->lru, &lruvec->lists[lru]);
  25391. pgmoved += hpage_nr_pages(page);
  25392. -#if defined(CONFIG_CMA_PAGE_COUNTING)
  25393. - if (PageCMA(page))
  25394. - nr_cma++;
  25395. -#endif
  25396.  
  25397. if (put_page_testzero(page)) {
  25398. __ClearPageLRU(page);
  25399. @@ -1490,10 +1439,6 @@ static void move_active_pages_to_lru(struct zone *zone,
  25400. }
  25401. }
  25402. __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
  25403. -#if defined(CONFIG_CMA_PAGE_COUNTING)
  25404. - __mod_zone_page_state(zone, NR_FREE_CMA_PAGES + 1 + lru, nr_cma);
  25405. -#endif
  25406. -
  25407. if (!is_active_lru(lru))
  25408. __count_vm_events(PGDEACTIVATE, pgmoved);
  25409. }
  25410. @@ -1700,12 +1645,8 @@ static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
  25411.  
  25412. static int vmscan_swappiness(struct scan_control *sc)
  25413. {
  25414. -#ifdef CONFIG_RUNTIME_COMPCACHE
  25415. - if (rtcc_reclaim(sc))
  25416. - return sc->rc->swappiness;
  25417. -#endif /* CONFIG_RUNTIME_COMPCACHE */
  25418. if (global_reclaim(sc))
  25419. - return sc->swappiness;
  25420. + return vm_swappiness;
  25421. return mem_cgroup_swappiness(sc->target_mem_cgroup);
  25422. }
  25423.  
  25424. @@ -1775,11 +1716,7 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
  25425. * This scanning priority is essentially the inverse of IO cost.
  25426. */
  25427. anon_prio = vmscan_swappiness(sc);
  25428. -#ifdef CONFIG_INCREASE_MAXIMUM_SWAPPINESS
  25429. - file_prio = max_swappiness - vmscan_swappiness(sc);
  25430. -#else
  25431. file_prio = 200 - vmscan_swappiness(sc);
  25432. -#endif
  25433.  
  25434. /*
  25435. * OK, so we have swap space and a fair amount of page cache
  25436. @@ -1921,30 +1858,15 @@ static void shrink_mem_cgroup_zone(struct mem_cgroup_zone *mz,
  25437. unsigned long nr_reclaimed, nr_scanned;
  25438. unsigned long nr_to_reclaim = sc->nr_to_reclaim;
  25439. struct blk_plug plug;
  25440. -#ifdef CONFIG_RUNTIME_COMPCACHE
  25441. - struct rtcc_control *rc = sc->rc;
  25442. -#endif /* CONFIG_RUNTIME_COMPCACHE */
  25443.  
  25444. restart:
  25445. nr_reclaimed = 0;
  25446. nr_scanned = sc->nr_scanned;
  25447. get_scan_count(mz, sc, nr);
  25448.  
  25449. -#ifdef CONFIG_RUNTIME_COMPCACHE
  25450. - if (rtcc_reclaim(sc))
  25451. - nr[LRU_INACTIVE_FILE] = nr[LRU_ACTIVE_FILE] = 0;
  25452. -#endif /* CONFIG_RUNTIME_COMPCACHE */
  25453. -
  25454. blk_start_plug(&plug);
  25455. while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
  25456. nr[LRU_INACTIVE_FILE]) {
  25457. -#ifdef CONFIG_RUNTIME_COMPCACHE
  25458. - if (rtcc_reclaim(sc)) {
  25459. - if (rc->nr_swapped >= rc->nr_anon)
  25460. - nr[LRU_INACTIVE_ANON] = nr[LRU_ACTIVE_ANON] = 0;
  25461. - }
  25462. -#endif /* CONFIG_RUNTIME_COMPCACHE */
  25463. -
  25464. for_each_evictable_lru(lru) {
  25465. if (nr[lru]) {
  25466. nr_to_scan = min_t(unsigned long,
  25467. @@ -1988,7 +1910,6 @@ restart:
  25468.  
  25469. static void shrink_zone(struct zone *zone, struct scan_control *sc)
  25470. {
  25471. - unsigned long nr_reclaimed, nr_scanned;
  25472. struct mem_cgroup *root = sc->target_mem_cgroup;
  25473. struct mem_cgroup_reclaim_cookie reclaim = {
  25474. .zone = zone,
  25475. @@ -1996,9 +1917,6 @@ static void shrink_zone(struct zone *zone, struct scan_control *sc)
  25476. };
  25477. struct mem_cgroup *memcg;
  25478.  
  25479. - nr_reclaimed = sc->nr_reclaimed;
  25480. - nr_scanned = sc->nr_scanned;
  25481. -
  25482. memcg = mem_cgroup_iter(root, NULL, &reclaim);
  25483. do {
  25484. struct mem_cgroup_zone mz = {
  25485. @@ -2023,10 +1941,6 @@ static void shrink_zone(struct zone *zone, struct scan_control *sc)
  25486. }
  25487. memcg = mem_cgroup_iter(root, memcg, &reclaim);
  25488. } while (memcg);
  25489. -
  25490. - vmpressure(sc->gfp_mask, sc->target_mem_cgroup,
  25491. - sc->nr_scanned - nr_scanned,
  25492. - sc->nr_reclaimed - nr_reclaimed);
  25493. }
  25494.  
  25495. /* Returns true if compaction should go ahead for a high-order request */
  25496. @@ -2172,140 +2086,6 @@ static bool all_unreclaimable(struct zonelist *zonelist,
  25497. return true;
  25498. }
  25499.  
  25500. -#ifdef CONFIG_RUNTIME_COMPCACHE
  25501. -/*
  25502. - * This is the main entry point to direct page reclaim for RTCC.
  25503. - *
  25504. - * If a full scan of the inactive list fails to free enough memory then we
  25505. - * are "out of memory" and something needs to be killed.
  25506. - *
  25507. - * If the caller is !__GFP_FS then the probability of a failure is reasonably
  25508. - * high - the zone may be full of dirty or under-writeback pages, which this
  25509. - * caller can't do much about. We kick the writeback threads and take explicit
  25510. - * naps in the hope that some of these pages can be written. But if the
  25511. - * allocating task holds filesystem locks which prevent writeout this might not
  25512. - * work, and the allocation attempt will fail.
  25513. - *
  25514. - * returns: 0, if no pages reclaimed
  25515. - * else, the number of pages reclaimed
  25516. - */
  25517. -static unsigned long rtcc_do_try_to_free_pages(struct zonelist *zonelist, struct scan_control *sc, struct shrink_control *shrink)
  25518. -{
  25519. - unsigned long total_scanned = 0;
  25520. - unsigned long writeback_threshold;
  25521. - bool aborted_reclaim;
  25522. -
  25523. - delayacct_freepages_start();
  25524. -
  25525. - if (global_reclaim(sc))
  25526. - count_vm_event(ALLOCSTALL);
  25527. -
  25528. - do {
  25529. - sc->nr_scanned = 0;
  25530. - aborted_reclaim = shrink_zones(zonelist, sc);
  25531. -
  25532. - total_scanned += sc->nr_scanned;
  25533. - if (sc->nr_reclaimed >= sc->nr_to_reclaim)
  25534. - goto out;
  25535. -
  25536. - /*
  25537. - * Try to write back as many pages as we just scanned. This
  25538. - * tends to cause slow streaming writers to write data to the
  25539. - * disk smoothly, at the dirtying rate, which is nice. But
  25540. - * that's undesirable in laptop mode, where we *want* lumpy
  25541. - * writeout. So in laptop mode, write out the whole world.
  25542. - */
  25543. - writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
  25544. - if (total_scanned > writeback_threshold) {
  25545. - wakeup_flusher_threads(laptop_mode ? 0 : total_scanned,
  25546. - WB_REASON_TRY_TO_FREE_PAGES);
  25547. - sc->may_writepage = 1;
  25548. - }
  25549. -
  25550. - /* Take a nap, wait for some writeback to complete */
  25551. - if (!sc->hibernation_mode && sc->nr_scanned &&
  25552. - sc->priority < DEF_PRIORITY - 2) {
  25553. - struct zone *preferred_zone;
  25554. -
  25555. - first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask),
  25556. - &cpuset_current_mems_allowed,
  25557. - &preferred_zone);
  25558. - wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10);
  25559. - }
  25560. - } while (--sc->priority >= 0);
  25561. -
  25562. -out:
  25563. - delayacct_freepages_end();
  25564. -
  25565. - if (sc->nr_reclaimed)
  25566. - return sc->nr_reclaimed;
  25567. -
  25568. - /*
  25569. - * As hibernation is going on, kswapd is freezed so that it can't mark
  25570. - * the zone into all_unreclaimable. Thus bypassing all_unreclaimable
  25571. - * check.
  25572. - */
  25573. - if (oom_killer_disabled)
  25574. - return 0;
  25575. -
  25576. - /* Aborted reclaim to try compaction? don't OOM, then */
  25577. - if (aborted_reclaim)
  25578. - return 1;
  25579. -
  25580. - /* top priority shrink_zones still had more to do? don't OOM, then */
  25581. - if (global_reclaim(sc) && !all_unreclaimable(zonelist, sc))
  25582. - return 1;
  25583. -
  25584. - return 0;
  25585. -}
  25586. -
  25587. -unsigned long rtcc_reclaim_pages(unsigned long nr_to_reclaim, int swappiness, unsigned long *nr_swapped)
  25588. -{
  25589. - struct reclaim_state reclaim_state;
  25590. -
  25591. - struct scan_control sc = {
  25592. - .gfp_mask = GFP_HIGHUSER_MOVABLE,
  25593. - .may_swap = 1,
  25594. - .may_unmap = 1,
  25595. - .may_writepage = 1,
  25596. - .nr_to_reclaim = nr_to_reclaim,
  25597. - .target_mem_cgroup = NULL,
  25598. - .order = 0,
  25599. - .priority = DEF_PRIORITY/2,
  25600. - };
  25601. - struct shrink_control shrink = {
  25602. - .gfp_mask = sc.gfp_mask,
  25603. - };
  25604. - struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
  25605. - struct task_struct *p = current;
  25606. - unsigned long nr_reclaimed;
  25607. - struct rtcc_control rc;
  25608. -
  25609. - rc.swappiness = swappiness;
  25610. - rc.nr_anon = nr_to_reclaim * swappiness / 200;
  25611. - rc.nr_file = nr_to_reclaim - rc.nr_anon;
  25612. - rc.nr_swapped = 0;
  25613. - sc.rc = &rc;
  25614. -
  25615. - if (swappiness <= 1)
  25616. - sc.may_swap = 0;
  25617. -
  25618. - p->flags |= PF_MEMALLOC;
  25619. - lockdep_set_current_reclaim_state(sc.gfp_mask);
  25620. - reclaim_state.reclaimed_slab = 0;
  25621. - p->reclaim_state = &reclaim_state;
  25622. -
  25623. - nr_reclaimed = rtcc_do_try_to_free_pages(zonelist, &sc, &shrink);
  25624. - *nr_swapped = rc.nr_swapped;
  25625. -
  25626. - p->reclaim_state = NULL;
  25627. - lockdep_clear_current_reclaim_state();
  25628. - p->flags &= ~PF_MEMALLOC;
  25629. -
  25630. - return nr_reclaimed;
  25631. -}
  25632. -#endif /* CONFIG_RUNTIME_COMPCACHE */
  25633. -
  25634. /*
  25635. * This is the main entry point to direct page reclaim.
  25636. *
  25637. @@ -2339,7 +2119,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
  25638. count_vm_event(ALLOCSTALL);
  25639.  
  25640. do {
  25641. - vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup, sc->priority);
  25642. sc->nr_scanned = 0;
  25643. aborted_reclaim = shrink_zones(zonelist, sc);
  25644.  
  25645. @@ -2357,7 +2136,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
  25646. lru_pages += zone_reclaimable_pages(zone);
  25647. }
  25648.  
  25649. - shrink->priority = sc->priority;
  25650. shrink_slab(shrink, sc->nr_scanned, lru_pages);
  25651. if (reclaim_state) {
  25652. sc->nr_reclaimed += reclaim_state->reclaimed_slab;
  25653. @@ -2428,16 +2206,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
  25654. .may_writepage = !laptop_mode,
  25655. .nr_to_reclaim = SWAP_CLUSTER_MAX,
  25656. .may_unmap = 1,
  25657. -#if defined(CONFIG_DIRECT_RECLAIM_FILE_PAGES_ONLY) || defined(CONFIG_RUNTIME_COMPCACHE)
  25658. - .may_swap = 0,
  25659. -#else
  25660. .may_swap = 1,
  25661. -#endif
  25662. -#ifdef CONFIG_ZSWAP
  25663. - .swappiness = vm_swappiness / 2,
  25664. -#else
  25665. - .swappiness = vm_swappiness,
  25666. -#endif
  25667. .order = order,
  25668. .priority = DEF_PRIORITY,
  25669. .target_mem_cgroup = NULL,
  25670. @@ -2472,7 +2241,6 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
  25671. .may_unmap = 1,
  25672. .may_swap = !noswap,
  25673. .order = 0,
  25674. - .swappiness = vm_swappiness,
  25675. .priority = 0,
  25676. .target_mem_cgroup = memcg,
  25677. };
  25678. @@ -2516,7 +2284,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
  25679. .may_swap = !noswap,
  25680. .nr_to_reclaim = SWAP_CLUSTER_MAX,
  25681. .order = 0,
  25682. - .swappiness = vm_swappiness,
  25683. .priority = DEF_PRIORITY,
  25684. .target_mem_cgroup = memcg,
  25685. .nodemask = NULL, /* we don't care the placement */
  25686. @@ -2608,12 +2375,8 @@ static bool pgdat_balanced(pg_data_t *pgdat, unsigned long balanced_pages,
  25687. for (i = 0; i <= classzone_idx; i++)
  25688. present_pages += pgdat->node_zones[i].present_pages;
  25689.  
  25690. -#ifdef CONFIG_TIGHT_PGDAT_BALANCE
  25691. - return balanced_pages >= (present_pages >> 1);
  25692. -#else
  25693. /* A special case here: if zone has no page, we think it's balanced */
  25694. return balanced_pages >= (present_pages >> 2);
  25695. -#endif
  25696. }
  25697.  
  25698. /* is kswapd sleeping prematurely? */
  25699. @@ -2687,7 +2450,7 @@ static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining,
  25700. static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
  25701. int *classzone_idx)
  25702. {
  25703. - struct zone *unbalanced_zone;
  25704. + int all_zones_ok;
  25705. unsigned long balanced;
  25706. int i;
  25707. int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
  25708. @@ -2698,18 +2461,13 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
  25709. struct scan_control sc = {
  25710. .gfp_mask = GFP_KERNEL,
  25711. .may_unmap = 1,
  25712. -#ifndef CONFIG_KSWAPD_NOSWAP
  25713. .may_swap = 1,
  25714. -#else
  25715. - .may_swap = 0,
  25716. -#endif /* CONFIG_KSWAPD_NOSWAP */
  25717. /*
  25718. * kswapd doesn't want to be bailed out while reclaim. because
  25719. * we want to put equal scanning pressure on each zone.
  25720. */
  25721. .nr_to_reclaim = ULONG_MAX,
  25722. .order = order,
  25723. - .swappiness = vm_swappiness,
  25724. .target_mem_cgroup = NULL,
  25725. };
  25726. struct shrink_control shrink = {
  25727. @@ -2726,7 +2484,7 @@ loop_again:
  25728. unsigned long lru_pages = 0;
  25729. int has_under_min_watermark_zone = 0;
  25730.  
  25731. - unbalanced_zone = NULL;
  25732. + all_zones_ok = 1;
  25733. balanced = 0;
  25734.  
  25735. /*
  25736. @@ -2841,7 +2599,6 @@ loop_again:
  25737. shrink_zone(zone, &sc);
  25738.  
  25739. reclaim_state->reclaimed_slab = 0;
  25740. - shrink.priority = sc.priority;
  25741. nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages);
  25742. sc.nr_reclaimed += reclaim_state->reclaimed_slab;
  25743. total_scanned += sc.nr_scanned;
  25744. @@ -2864,7 +2621,7 @@ loop_again:
  25745. }
  25746.  
  25747. if (!zone_balanced(zone, testorder, 0, end_zone)) {
  25748. - unbalanced_zone = zone;
  25749. + all_zones_ok = 0;
  25750. /*
  25751. * We are still under min water mark. This
  25752. * means that we have a GFP_ATOMIC allocation
  25753. @@ -2887,7 +2644,7 @@ loop_again:
  25754. }
  25755.  
  25756. }
  25757. - if (!unbalanced_zone || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))
  25758. + if (all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))
  25759. break; /* kswapd: all done */
  25760. /*
  25761. * OK, kswapd is getting into trouble. Take a nap, then take
  25762. @@ -2897,7 +2654,7 @@ loop_again:
  25763. if (has_under_min_watermark_zone)
  25764. count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT);
  25765. else
  25766. - wait_iff_congested(unbalanced_zone, BLK_RW_ASYNC, HZ/10);
  25767. + congestion_wait(BLK_RW_ASYNC, HZ/10);
  25768. }
  25769.  
  25770. /*
  25771. @@ -2916,7 +2673,7 @@ out:
  25772. * high-order: Balanced zones must make up at least 25% of the node
  25773. * for the node to be balanced
  25774. */
  25775. - if (unbalanced_zone && (!order || !pgdat_balanced(pgdat, balanced, *classzone_idx))) {
  25776. + if (!(all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))) {
  25777. cond_resched();
  25778.  
  25779. try_to_freeze();
  25780. @@ -3005,10 +2762,6 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
  25781. if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) {
  25782. trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
  25783.  
  25784. -#ifdef CONFIG_RUNTIME_COMPCACHE
  25785. - atomic_set(&kswapd_running, 0);
  25786. -#endif /* CONFIG_RUNTIME_COMPCACHE */
  25787. -
  25788. /*
  25789. * vmstat counters are not perfectly accurate and the estimated
  25790. * value for counters such as NR_FREE_PAGES can deviate from the
  25791. @@ -3130,10 +2883,6 @@ static int kswapd(void *p)
  25792. if (kthread_should_stop())
  25793. break;
  25794.  
  25795. -#ifdef CONFIG_RUNTIME_COMPCACHE
  25796. - atomic_set(&kswapd_running, 1);
  25797. -#endif /* CONFIG_RUNTIME_COMPCACHE */
  25798. -
  25799. /*
  25800. * We can speed up thawing tasks if we don't call balance_pgdat
  25801. * after returning from the refrigerator
  25802. @@ -3174,6 +2923,27 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
  25803. wake_up_interruptible(&pgdat->kswapd_wait);
  25804. }
  25805.  
  25806. +/*
  25807. + * The reclaimable count would be mostly accurate.
  25808. + * The less reclaimable pages may be
  25809. + * - mlocked pages, which will be moved to unevictable list when encountered
  25810. + * - mapped pages, which may require several travels to be reclaimed
  25811. + * - dirty pages, which is not "instantly" reclaimable
  25812. + */
  25813. +unsigned long global_reclaimable_pages(void)
  25814. +{
  25815. + int nr;
  25816. +
  25817. + nr = global_page_state(NR_ACTIVE_FILE) +
  25818. + global_page_state(NR_INACTIVE_FILE);
  25819. +
  25820. + if (get_nr_swap_pages() > 0)
  25821. + nr += global_page_state(NR_ACTIVE_ANON) +
  25822. + global_page_state(NR_INACTIVE_ANON);
  25823. +
  25824. + return nr;
  25825. +}
  25826. +
  25827. #ifdef CONFIG_HIBERNATION
  25828. /*
  25829. * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
  25830. @@ -3194,7 +2964,6 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
  25831. .nr_to_reclaim = nr_to_reclaim,
  25832. .hibernation_mode = 1,
  25833. .order = 0,
  25834. - .swappiness = vm_swappiness,
  25835. .priority = DEF_PRIORITY,
  25836. };
  25837. struct shrink_control shrink = {
  25838. @@ -3376,16 +3145,11 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
  25839. struct scan_control sc = {
  25840. .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
  25841. .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
  25842. -#ifdef CONFIG_RUNTIME_COMPCACHE
  25843. - .may_swap = 0,
  25844. -#else
  25845. .may_swap = 1,
  25846. -#endif /* CONFIG_RUNTIME_COMPCACHE */
  25847. .nr_to_reclaim = max_t(unsigned long, nr_pages,
  25848. SWAP_CLUSTER_MAX),
  25849. .gfp_mask = gfp_mask,
  25850. .order = order,
  25851. - .swappiness = vm_swappiness,
  25852. .priority = ZONE_RECLAIM_PRIORITY,
  25853. };
  25854. struct shrink_control shrink = {
  25855. @@ -3430,7 +3194,6 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
  25856. unsigned long lru_pages = zone_reclaimable_pages(zone);
  25857.  
  25858. /* No reclaimable slab or very low memory pressure */
  25859. - shrink.priority = sc.priority;
  25860. if (!shrink_slab(&shrink, sc.nr_scanned, lru_pages))
  25861. break;
  25862.  
  25863. @@ -3577,11 +3340,6 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
  25864. LRU_UNEVICTABLE, lru);
  25865. list_move(&page->lru, &lruvec->lists[lru]);
  25866. __inc_zone_state(zone, NR_INACTIVE_ANON + lru);
  25867. -#if defined(CONFIG_CMA_PAGE_COUNTING)
  25868. - if (PageCMA(page))
  25869. - __inc_zone_state(zone,
  25870. - NR_FREE_CMA_PAGES + 1 + lru);
  25871. -#endif
  25872. pgrescued++;
  25873. }
  25874. }
  25875. diff --git a/net/core/dev.c b/net/core/dev.c
  25876. index cd46460..542d014 100644
  25877. --- a/net/core/dev.c
  25878. +++ b/net/core/dev.c
  25879. @@ -6274,10 +6274,20 @@ static int dev_cpu_callback(struct notifier_block *nfb,
  25880. oldsd->output_queue = NULL;
  25881. oldsd->output_queue_tailp = &oldsd->output_queue;
  25882. }
  25883. - /* Append NAPI poll list from offline CPU. */
  25884. - if (!list_empty(&oldsd->poll_list)) {
  25885. - list_splice_init(&oldsd->poll_list, &sd->poll_list);
  25886. - raise_softirq_irqoff(NET_RX_SOFTIRQ);
  25887. + /* Append NAPI poll list from offline CPU, with one exception :
  25888. + * process_backlog() must be called by cpu owning percpu backlog.
  25889. + * We properly handle process_queue & input_pkt_queue later.
  25890. + */
  25891. + while (!list_empty(&oldsd->poll_list)) {
  25892. + struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
  25893. + struct napi_struct,
  25894. + poll_list);
  25895. +
  25896. + list_del_init(&napi->poll_list);
  25897. + if (napi->poll == process_backlog)
  25898. + napi->state = 0;
  25899. + else
  25900. + ____napi_schedule(sd, napi);
  25901. }
  25902.  
  25903. raise_softirq_irqoff(NET_TX_SOFTIRQ);
  25904. @@ -6288,7 +6298,7 @@ static int dev_cpu_callback(struct notifier_block *nfb,
  25905. netif_rx(skb);
  25906. input_queue_head_incr(oldsd);
  25907. }
  25908. - while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
  25909. + while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
  25910. netif_rx(skb);
  25911. input_queue_head_incr(oldsd);
  25912. }
  25913. diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c
  25914. index 1f78f21..a5f6665 100644
  25915. --- a/net/netfilter/xt_qtaguid.c
  25916. +++ b/net/netfilter/xt_qtaguid.c
  25917. @@ -1298,6 +1298,38 @@ static void iface_stat_update(struct net_device *net_dev, bool stash_only)
  25918. spin_unlock_bh(&iface_stat_list_lock);
  25919. }
  25920.  
  25921. +/* Guarantied to return a net_device that has a name */
  25922. +static void get_dev_and_dir(const struct sk_buff *skb,
  25923. + struct xt_action_param *par,
  25924. + enum ifs_tx_rx *direction,
  25925. + const struct net_device **el_dev)
  25926. +{
  25927. + BUG_ON(!direction || !el_dev);
  25928. +
  25929. + if (par->in) {
  25930. + *el_dev = par->in;
  25931. + *direction = IFS_RX;
  25932. + } else if (par->out) {
  25933. + *el_dev = par->out;
  25934. + *direction = IFS_TX;
  25935. + } else {
  25936. + pr_err("qtaguid[%d]: %s(): no par->in/out?!!\n",
  25937. + par->hooknum, __func__);
  25938. + BUG();
  25939. + }
  25940. + if (unlikely(!(*el_dev)->name)) {
  25941. + pr_err("qtaguid[%d]: %s(): no dev->name?!!\n",
  25942. + par->hooknum, __func__);
  25943. + BUG();
  25944. + }
  25945. + if (skb->dev && *el_dev != skb->dev) {
  25946. + MT_DEBUG("qtaguid[%d]: skb->dev=%p %s vs par->%s=%p %s\n",
  25947. + par->hooknum, skb->dev, skb->dev->name,
  25948. + *direction == IFS_RX ? "in" : "out", *el_dev,
  25949. + (*el_dev)->name);
  25950. + }
  25951. +}
  25952. +
  25953. /*
  25954. * Update stats for the specified interface from the skb.
  25955. * Do nothing if the entry
  25956. @@ -1309,50 +1341,27 @@ static void iface_stat_update_from_skb(const struct sk_buff *skb,
  25957. {
  25958. struct iface_stat *entry;
  25959. const struct net_device *el_dev;
  25960. - enum ifs_tx_rx direction = par->in ? IFS_RX : IFS_TX;
  25961. + enum ifs_tx_rx direction;
  25962. int bytes = skb->len;
  25963. int proto;
  25964.  
  25965. - if (!skb->dev) {
  25966. - MT_DEBUG("qtaguid[%d]: no skb->dev\n", par->hooknum);
  25967. - el_dev = par->in ? : par->out;
  25968. - } else {
  25969. - const struct net_device *other_dev;
  25970. - el_dev = skb->dev;
  25971. - other_dev = par->in ? : par->out;
  25972. - if (el_dev != other_dev) {
  25973. - MT_DEBUG("qtaguid[%d]: skb->dev=%p %s vs "
  25974. - "par->(in/out)=%p %s\n",
  25975. - par->hooknum, el_dev, el_dev->name, other_dev,
  25976. - other_dev->name);
  25977. - }
  25978. - }
  25979. -
  25980. - if (unlikely(!el_dev)) {
  25981. - pr_err_ratelimited("qtaguid[%d]: %s(): no par->in/out?!!\n",
  25982. - par->hooknum, __func__);
  25983. - BUG();
  25984. - } else if (unlikely(!el_dev->name)) {
  25985. - pr_err_ratelimited("qtaguid[%d]: %s(): no dev->name?!!\n",
  25986. - par->hooknum, __func__);
  25987. - BUG();
  25988. - } else {
  25989. - proto = ipx_proto(skb, par);
  25990. - MT_DEBUG("qtaguid[%d]: dev name=%s type=%d fam=%d proto=%d\n",
  25991. - par->hooknum, el_dev->name, el_dev->type,
  25992. - par->family, proto);
  25993. - }
  25994. + get_dev_and_dir(skb, par, &direction, &el_dev);
  25995. + proto = ipx_proto(skb, par);
  25996. + MT_DEBUG("qtaguid[%d]: iface_stat: %s(%s): "
  25997. + "type=%d fam=%d proto=%d dir=%d\n",
  25998. + par->hooknum, __func__, el_dev->name, el_dev->type,
  25999. + par->family, proto, direction);
  26000.  
  26001. spin_lock_bh(&iface_stat_list_lock);
  26002. entry = get_iface_entry(el_dev->name);
  26003. if (entry == NULL) {
  26004. - IF_DEBUG("qtaguid: iface_stat: %s(%s): not tracked\n",
  26005. - __func__, el_dev->name);
  26006. + IF_DEBUG("qtaguid[%d]: iface_stat: %s(%s): not tracked\n",
  26007. + par->hooknum, __func__, el_dev->name);
  26008. spin_unlock_bh(&iface_stat_list_lock);
  26009. return;
  26010. }
  26011.  
  26012. - IF_DEBUG("qtaguid: %s(%s): entry=%p\n", __func__,
  26013. + IF_DEBUG("qtaguid[%d]: %s(%s): entry=%p\n", par->hooknum, __func__,
  26014. el_dev->name, entry);
  26015.  
  26016. data_counters_update(&entry->totals_via_skb, 0, direction, proto,
  26017. @@ -1415,15 +1424,18 @@ static void if_tag_stat_update(const char *ifname, uid_t uid,
  26018. ifname, uid, sk, direction, proto, bytes);
  26019.  
  26020.  
  26021. + spin_lock_bh(&iface_stat_list_lock);
  26022. iface_entry = get_iface_entry(ifname);
  26023. if (!iface_entry) {
  26024. - pr_err_ratelimited("qtaguid: iface_stat: stat_update() "
  26025. + spin_unlock_bh(&iface_stat_list_lock);
  26026. + pr_err_ratelimited("qtaguid: tag_stat: stat_update() "
  26027. "%s not found\n", ifname);
  26028. return;
  26029. }
  26030. + spin_unlock_bh(&iface_stat_list_lock);
  26031. /* It is ok to process data when an iface_entry is inactive */
  26032.  
  26033. - MT_DEBUG("qtaguid: iface_stat: stat_update() dev=%s entry=%p\n",
  26034. + MT_DEBUG("qtaguid: tag_stat: stat_update() dev=%s entry=%p\n",
  26035. ifname, iface_entry);
  26036.  
  26037. /*
  26038. @@ -1440,7 +1452,7 @@ static void if_tag_stat_update(const char *ifname, uid_t uid,
  26039. tag = combine_atag_with_uid(acct_tag, uid);
  26040. uid_tag = make_tag_from_uid(uid);
  26041. }
  26042. - MT_DEBUG("qtaguid: iface_stat: stat_update(): "
  26043. + MT_DEBUG("qtaguid: tag_stat: stat_update(): "
  26044. " looking for tag=0x%llx (uid=%u) in ife=%p\n",
  26045. tag, get_uid_from_tag(tag), iface_entry);
  26046. /* Loop over tag list under this interface for {acct_tag,uid_tag} */
  26047. @@ -1673,8 +1685,8 @@ static struct sock *qtaguid_find_sk(const struct sk_buff *skb,
  26048. struct sock *sk;
  26049. unsigned int hook_mask = (1 << par->hooknum);
  26050.  
  26051. - MT_DEBUG("qtaguid: find_sk(skb=%p) hooknum=%d family=%d\n", skb,
  26052. - par->hooknum, par->family);
  26053. + MT_DEBUG("qtaguid[%d]: find_sk(skb=%p) family=%d\n",
  26054. + par->hooknum, skb, par->family);
  26055.  
  26056. /*
  26057. * Let's not abuse the the xt_socket_get*_sk(), or else it will
  26058. @@ -1700,8 +1712,12 @@ static struct sock *qtaguid_find_sk(const struct sk_buff *skb,
  26059. * Not fixed in 3.0-r3 :(
  26060. */
  26061. if (sk) {
  26062. - MT_DEBUG("qtaguid: %p->sk_proto=%u "
  26063. - "->sk_state=%d\n", sk, sk->sk_protocol, sk->sk_state);
  26064. + MT_DEBUG("qtaguid[%d]: %p->sk_proto=%u->sk_state=%d\n",
  26065. + par->hooknum, sk, sk->sk_protocol, sk->sk_state);
  26066. + /*
  26067. + * When in TCP_TIME_WAIT the sk is not a "struct sock" but
  26068. + * "struct inet_timewait_sock" which is missing fields.
  26069. + */
  26070. if (sk->sk_state == TCP_TIME_WAIT) {
  26071. xt_socket_put_sk(sk);
  26072. sk = NULL;
  26073. @@ -1715,37 +1731,19 @@ static void account_for_uid(const struct sk_buff *skb,
  26074. struct xt_action_param *par)
  26075. {
  26076. const struct net_device *el_dev;
  26077. + enum ifs_tx_rx direction;
  26078. + int proto;
  26079.  
  26080. - if (!skb->dev) {
  26081. - MT_DEBUG("qtaguid[%d]: no skb->dev\n", par->hooknum);
  26082. - el_dev = par->in ? : par->out;
  26083. - } else {
  26084. - const struct net_device *other_dev;
  26085. - el_dev = skb->dev;
  26086. - other_dev = par->in ? : par->out;
  26087. - if (el_dev != other_dev) {
  26088. - MT_DEBUG("qtaguid[%d]: skb->dev=%p %s vs "
  26089. - "par->(in/out)=%p %s\n",
  26090. - par->hooknum, el_dev, el_dev->name, other_dev,
  26091. - other_dev->name);
  26092. - }
  26093. - }
  26094. -
  26095. - if (unlikely(!el_dev)) {
  26096. - pr_info("qtaguid[%d]: no par->in/out?!!\n", par->hooknum);
  26097. - } else if (unlikely(!el_dev->name)) {
  26098. - pr_info("qtaguid[%d]: no dev->name?!!\n", par->hooknum);
  26099. - } else {
  26100. - int proto = ipx_proto(skb, par);
  26101. - MT_DEBUG("qtaguid[%d]: dev name=%s type=%d fam=%d proto=%d\n",
  26102. - par->hooknum, el_dev->name, el_dev->type,
  26103. - par->family, proto);
  26104. + get_dev_and_dir(skb, par, &direction, &el_dev);
  26105. + proto = ipx_proto(skb, par);
  26106. + MT_DEBUG("qtaguid[%d]: dev name=%s type=%d fam=%d proto=%d dir=%d\n",
  26107. + par->hooknum, el_dev->name, el_dev->type,
  26108. + par->family, proto, direction);
  26109.  
  26110. - if_tag_stat_update(el_dev->name, uid,
  26111. - skb->sk ? skb->sk : alternate_sk,
  26112. - par->in ? IFS_RX : IFS_TX,
  26113. - proto, skb->len);
  26114. - }
  26115. + if_tag_stat_update(el_dev->name, uid,
  26116. + skb->sk ? skb->sk : alternate_sk,
  26117. + direction,
  26118. + proto, skb->len);
  26119. }
  26120.  
  26121. static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
  26122. @@ -1756,6 +1754,11 @@ static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
  26123. struct sock *sk;
  26124. uid_t sock_uid;
  26125. bool res;
  26126. + /*
  26127. + * TODO: unhack how to force just accounting.
  26128. + * For now we only do tag stats when the uid-owner is not requested
  26129. + */
  26130. + bool do_tag_stat = !(info->match & XT_QTAGUID_UID);
  26131.  
  26132. if (unlikely(module_passive))
  26133. return (info->match ^ info->invert) == 0;
  26134. @@ -1822,12 +1825,7 @@ static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
  26135. * couldn't find the owner, so for now we just count them
  26136. * against the system.
  26137. */
  26138. - /*
  26139. - * TODO: unhack how to force just accounting.
  26140. - * For now we only do iface stats when the uid-owner is not
  26141. - * requested.
  26142. - */
  26143. - if (!(info->match & XT_QTAGUID_UID))
  26144. + if (do_tag_stat)
  26145. account_for_uid(skb, sk, 0, par);
  26146. MT_DEBUG("qtaguid[%d]: leaving (sk?sk->sk_socket)=%p\n",
  26147. par->hooknum,
  26148. @@ -1842,18 +1840,15 @@ static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
  26149. filp = sk->sk_socket->file;
  26150. if (filp == NULL) {
  26151. MT_DEBUG("qtaguid[%d]: leaving filp=NULL\n", par->hooknum);
  26152. - account_for_uid(skb, sk, 0, par);
  26153. + if (do_tag_stat)
  26154. + account_for_uid(skb, sk, 0, par);
  26155. res = ((info->match ^ info->invert) &
  26156. (XT_QTAGUID_UID | XT_QTAGUID_GID)) == 0;
  26157. atomic64_inc(&qtu_events.match_no_sk_file);
  26158. goto put_sock_ret_res;
  26159. }
  26160. sock_uid = filp->f_cred->fsuid;
  26161. - /*
  26162. - * TODO: unhack how to force just accounting.
  26163. - * For now we only do iface stats when the uid-owner is not requested
  26164. - */
  26165. - if (!(info->match & XT_QTAGUID_UID))
  26166. + if (do_tag_stat)
  26167. account_for_uid(skb, sk, sock_uid, par);
  26168.  
  26169. /*
  26170. diff --git a/scripts/dtc/libfdt/fdt.c b/scripts/dtc/libfdt/fdt.c
  26171. index e56833a..e591c54 100644
  26172. --- a/scripts/dtc/libfdt/fdt.c
  26173. +++ b/scripts/dtc/libfdt/fdt.c
  26174. @@ -71,6 +71,20 @@ int fdt_check_header(const void *fdt)
  26175. return -FDT_ERR_BADMAGIC;
  26176. }
  26177.  
  26178. + if (fdt_off_dt_struct(fdt) > (UINT_MAX - fdt_size_dt_struct(fdt)))
  26179. + return FDT_ERR_BADOFFSET;
  26180. +
  26181. + if (fdt_off_dt_strings(fdt) > (UINT_MAX - fdt_size_dt_strings(fdt)))
  26182. + return FDT_ERR_BADOFFSET;
  26183. +
  26184. + if ((fdt_off_dt_struct(fdt) + fdt_size_dt_struct(fdt))
  26185. + > fdt_totalsize(fdt))
  26186. + return FDT_ERR_BADOFFSET;
  26187. +
  26188. + if ((fdt_off_dt_strings(fdt) + fdt_size_dt_strings(fdt))
  26189. + > fdt_totalsize(fdt))
  26190. + return FDT_ERR_BADOFFSET;
  26191. +
  26192. return 0;
  26193. }
  26194.  
  26195. diff --git a/scripts/dtc/libfdt/fdt_rw.c b/scripts/dtc/libfdt/fdt_rw.c
  26196. index 24437df..d7d09fe 100644
  26197. --- a/scripts/dtc/libfdt/fdt_rw.c
  26198. +++ b/scripts/dtc/libfdt/fdt_rw.c
  26199. @@ -394,7 +394,7 @@ int fdt_del_node(void *fdt, int nodeoffset)
  26200. static void _fdt_packblocks(const char *old, char *new,
  26201. int mem_rsv_size, int struct_size)
  26202. {
  26203. - int mem_rsv_off, struct_off, strings_off;
  26204. + uint32_t mem_rsv_off, struct_off, strings_off;
  26205.  
  26206. mem_rsv_off = FDT_ALIGN(sizeof(struct fdt_header), 8);
  26207. struct_off = mem_rsv_off + mem_rsv_size;
  26208. diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
  26209. index 32e7d35..185cbf8 100644
  26210. --- a/sound/soc/codecs/Makefile
  26211. +++ b/sound/soc/codecs/Makefile
  26212. @@ -55,8 +55,8 @@ snd-soc-uda1380-objs := uda1380.o
  26213. snd-soc-wcd9304-objs := wcd9304.o wcd9304-tables.o
  26214. snd-soc-wcd9310-objs := wcd9310.o wcd9310-tables.o
  26215. snd-soc-cs8427-objs := cs8427.o
  26216. -snd-soc-wcd9320-objs := wcd9xxx-resmgr.o wcd9320.o wcd9320-tables.o wcd9xxx-mbhc.o wcd9xxx-common.o
  26217. -snd-soc-wcd9306-objs := wcd9306.o wcd9306-tables.o wcd9xxx-common.o
  26218. +snd-soc-wcd9320-objs := wcd9xxx-resmgr.o wcd9320.o wcd9320-tables.o wcd9xxx-mbhc.o wcd9xxx-common.o wcdcal-hwdep.o
  26219. +snd-soc-wcd9306-objs := wcd9306.o wcd9306-tables.o wcd9xxx-common.o wcdcal-hwdep.o
  26220. snd-soc-msm8x10-wcd-objs := msm8x10-wcd.o msm8x10-wcd-tables.o wcd9xxx-common.o
  26221. snd-soc-es325-objs := es325.o
  26222. snd-soc-es325_atlantic-objs := es325_atlantic.o
  26223. diff --git a/sound/soc/codecs/msm8x10-wcd.c b/sound/soc/codecs/msm8x10-wcd.c
  26224. index 42c1f3b..801286a 100644
  26225. --- a/sound/soc/codecs/msm8x10-wcd.c
  26226. +++ b/sound/soc/codecs/msm8x10-wcd.c
  26227. @@ -1691,20 +1691,22 @@ static int msm8x10_wcd_codec_enable_micbias(struct snd_soc_dapm_widget *w,
  26228. char *internal1_text = "Internal1";
  26229. char *internal2_text = "Internal2";
  26230. char *internal3_text = "Internal3";
  26231. + char *external_text = "External";
  26232. enum wcd9xxx_notify_event e_post_off, e_pre_on, e_post_on;
  26233.  
  26234. dev_dbg(codec->dev, "%s %d\n", __func__, event);
  26235. - switch (w->reg) {
  26236. - case MSM8X10_WCD_A_MICB_1_CTL:
  26237. +
  26238. + if ((strnstr(w->name, internal1_text, 30)) ||
  26239. + (strnstr(w->name, internal2_text, 30)) ||
  26240. + (strnstr(w->name, internal3_text, 30)) ||
  26241. + (strnstr(w->name, external_text, 30))) {
  26242. micb_int_reg = MSM8X10_WCD_A_MICB_1_INT_RBIAS;
  26243. e_pre_on = WCD9XXX_EVENT_PRE_MICBIAS_1_ON;
  26244. e_post_on = WCD9XXX_EVENT_POST_MICBIAS_1_ON;
  26245. e_post_off = WCD9XXX_EVENT_POST_MICBIAS_1_OFF;
  26246. - break;
  26247. - default:
  26248. + } else {
  26249. dev_err(codec->dev,
  26250. - "%s: Error, invalid micbias register 0x%x\n",
  26251. - __func__, w->reg);
  26252. + "%s: Error, invalid micbias %s\n", __func__, w->name);
  26253. return -EINVAL;
  26254. }
  26255.  
  26256. @@ -1722,9 +1724,9 @@ static int msm8x10_wcd_codec_enable_micbias(struct snd_soc_dapm_widget *w,
  26257.  
  26258. /* Always pull up TxFe for TX2 to Micbias */
  26259. snd_soc_update_bits(codec, micb_int_reg, 0x04, 0x04);
  26260. - snd_soc_update_bits(codec, MSM8X10_WCD_A_MICB_1_CTL,
  26261. + if (++msm8x10_wcd->micb_en_count == 1)
  26262. + snd_soc_update_bits(codec, MSM8X10_WCD_A_MICB_1_CTL,
  26263. 0x80, 0x80);
  26264. - msm8x10_wcd->micb_en_count++;
  26265. pr_debug("%s micb_en_count : %d", __func__,
  26266. msm8x10_wcd->micb_en_count);
  26267. break;
  26268. @@ -1734,12 +1736,11 @@ static int msm8x10_wcd_codec_enable_micbias(struct snd_soc_dapm_widget *w,
  26269. wcd9xxx_resmgr_notifier_call(&msm8x10_wcd->resmgr, e_post_on);
  26270. break;
  26271. case SND_SOC_DAPM_POST_PMD:
  26272. - if (msm8x10_wcd->micb_en_count > 0)
  26273. - msm8x10_wcd->micb_en_count--;
  26274. + if (--msm8x10_wcd->micb_en_count == 0)
  26275. + snd_soc_update_bits(codec, MSM8X10_WCD_A_MICB_1_CTL,
  26276. + 0x80, 0x00);
  26277. pr_debug("%s micb_en_count : %d", __func__,
  26278. msm8x10_wcd->micb_en_count);
  26279. - snd_soc_update_bits(codec, MSM8X10_WCD_A_MICB_1_CTL,
  26280. - 0x80, 0x00);
  26281. /* Let MBHC module know so micbias switch to be off */
  26282. wcd9xxx_resmgr_notifier_call(&msm8x10_wcd->resmgr, e_post_off);
  26283.  
  26284. @@ -2553,7 +2554,7 @@ static const struct snd_soc_dapm_widget msm8x10_wcd_dapm_widgets[] = {
  26285.  
  26286. SND_SOC_DAPM_INPUT("AMIC1"),
  26287. SND_SOC_DAPM_MICBIAS_E("MIC BIAS Internal1",
  26288. - MSM8X10_WCD_A_MICB_1_CTL, 7, 0,
  26289. + SND_SOC_NOPM, 7, 0,
  26290. msm8x10_wcd_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
  26291. SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
  26292. #if defined(CONFIG_SEC_HEAT_PROJECT) /*Remove Intenal mic bias2*/
  26293. @@ -2563,20 +2564,20 @@ static const struct snd_soc_dapm_widget msm8x10_wcd_dapm_widgets[] = {
  26294. SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
  26295. #else
  26296. SND_SOC_DAPM_MICBIAS_E("MIC BIAS Internal2",
  26297. - MSM8X10_WCD_A_MICB_1_CTL, 7, 0,
  26298. + SND_SOC_NOPM, 7, 0,
  26299. msm8x10_wcd_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
  26300. SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
  26301. #endif
  26302. SND_SOC_DAPM_MICBIAS_E("MIC BIAS Internal3",
  26303. - MSM8X10_WCD_A_MICB_1_CTL, 7, 0,
  26304. + SND_SOC_NOPM, 7, 0,
  26305. msm8x10_wcd_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
  26306. SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
  26307. SND_SOC_DAPM_MICBIAS_E("MIC BIAS External",
  26308. - MSM8X10_WCD_A_MICB_1_CTL, 7, 0,
  26309. + SND_SOC_NOPM, 7, 0,
  26310. msm8x10_wcd_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
  26311. SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
  26312. SND_SOC_DAPM_MICBIAS_E(DAPM_MICBIAS_EXTERNAL_STANDALONE,
  26313. - MSM8X10_WCD_A_MICB_1_CTL,
  26314. + SND_SOC_NOPM,
  26315. 7, 0, msm8x10_wcd_codec_enable_micbias,
  26316. SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
  26317. SND_SOC_DAPM_POST_PMD),
  26318. @@ -2864,7 +2865,6 @@ static int msm8x10_wcd_enable_mbhc_micbias(struct snd_soc_codec *codec,
  26319. enum wcd9xxx_micbias_num micb_num)
  26320. {
  26321. int rc;
  26322. - struct msm8x10_wcd_priv *msm8x10_wcd = snd_soc_codec_get_drvdata(codec);
  26323.  
  26324. if (micb_num != MBHC_MICBIAS1) {
  26325. rc = -EINVAL;
  26326. @@ -2875,12 +2875,6 @@ static int msm8x10_wcd_enable_mbhc_micbias(struct snd_soc_codec *codec,
  26327. rc = snd_soc_dapm_force_enable_pin(&codec->dapm,
  26328. DAPM_MICBIAS_EXTERNAL_STANDALONE);
  26329. else {
  26330. - if (msm8x10_wcd->micb_en_count > 1) {
  26331. - msm8x10_wcd->micb_en_count--;
  26332. - pr_debug("%s micb_en_count : %d", __func__,
  26333. - msm8x10_wcd->micb_en_count);
  26334. - return 0;
  26335. - }
  26336. rc = snd_soc_dapm_disable_pin(&codec->dapm,
  26337. DAPM_MICBIAS_EXTERNAL_STANDALONE);
  26338. }
  26339. diff --git a/sound/soc/codecs/wcd9306.c b/sound/soc/codecs/wcd9306.c
  26340. index 23d9437..2ed6479 100644
  26341. --- a/sound/soc/codecs/wcd9306.c
  26342. +++ b/sound/soc/codecs/wcd9306.c
  26343. @@ -1,4 +1,4 @@
  26344. -/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  26345. +/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
  26346. *
  26347. * This program is free software; you can redistribute it and/or modify
  26348. * it under the terms of the GNU General Public License version 2 and
  26349. @@ -42,28 +42,7 @@
  26350. #define TAPAN_HPH_PA_SETTLE_COMP_ON 3000
  26351. #define TAPAN_HPH_PA_SETTLE_COMP_OFF 13000
  26352.  
  26353. -#if defined(CONFIG_SND_SOC_ES705)
  26354. -#include "audience/es705-export.h"
  26355. -#elif defined(CONFIG_SND_SOC_ES325_ATLANTIC)
  26356. -#include "es325-export.h"
  26357. -#endif
  26358. -
  26359. -#if defined(CONFIG_SND_SOC_ES705)
  26360. -#define REMOTE_ROUTE_ENABLE_CB es705_remote_route_enable
  26361. -#define SLIM_GET_CHANNEL_MAP_CB es705_slim_get_channel_map
  26362. -#define SLIM_SET_CHANNEL_MAP_CB es705_slim_set_channel_map
  26363. -#define SLIM_HW_PARAMS_CB es705_slim_hw_params
  26364. -#define REMOTE_CFG_SLIM_RX_CB es705_remote_cfg_slim_rx
  26365. -#define REMOTE_CLOSE_SLIM_RX_CB es705_remote_close_slim_rx
  26366. -#define REMOTE_CFG_SLIM_TX_CB es705_remote_cfg_slim_tx
  26367. -#define REMOTE_CLOSE_SLIM_TX_CB es705_remote_close_slim_tx
  26368. -#define REMOTE_ADD_CODEC_CONTROLS_CB es705_remote_add_codec_controls
  26369. -#endif
  26370. -
  26371. -#ifndef CONFIG_ARCH_MSM8226
  26372. #define DAPM_MICBIAS2_EXTERNAL_STANDALONE "MIC BIAS2 External Standalone"
  26373. -#endif
  26374. -
  26375. #define TAPAN_VALIDATE_RX_SBPORT_RANGE(port) ((port >= 16) && (port <= 20))
  26376. #define TAPAN_CONVERT_RX_SBPORT_ID(port) (port - 16) /* RX1 port ID = 0 */
  26377.  
  26378. @@ -312,6 +291,9 @@ struct tapan_priv {
  26379. u32 anc_slot;
  26380. bool anc_func;
  26381.  
  26382. + /*track adie loopback mode*/
  26383. + bool lb_mode;
  26384. +
  26385. /*track tapan interface type*/
  26386. u8 intf_type;
  26387.  
  26388. @@ -327,6 +309,8 @@ struct tapan_priv {
  26389. u8 aux_l_gain;
  26390. u8 aux_r_gain;
  26391.  
  26392. + bool dec_active[NUM_DECIMATORS];
  26393. +
  26394. bool spkr_pa_widget_on;
  26395.  
  26396. struct afe_param_cdc_slimbus_slave_cfg slimbus_slave_cfg;
  26397. @@ -339,10 +323,6 @@ struct tapan_priv {
  26398. /* class h specific data */
  26399. struct wcd9xxx_clsh_cdc_data clsh_d;
  26400.  
  26401. - int ldo_h_count;
  26402. - int (*mclk_cb_fn) (struct snd_soc_codec*, int, bool);
  26403. - int micb_2_ref_cnt;
  26404. -
  26405. /* pointers to regulators required for chargepump */
  26406. struct regulator *cp_regulators[CP_REG_MAX];
  26407.  
  26408. @@ -537,6 +517,43 @@ static int tapan_put_anc_func(struct snd_kcontrol *kcontrol,
  26409. return 0;
  26410. }
  26411.  
  26412. +static int tapan_loopback_mode_get(struct snd_kcontrol *kcontrol,
  26413. + struct snd_ctl_elem_value *ucontrol)
  26414. +{
  26415. + struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
  26416. + struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
  26417. +
  26418. + ucontrol->value.integer.value[0] = tapan->lb_mode;
  26419. + dev_dbg(codec->dev, "%s: lb_mode = %d\n",
  26420. + __func__, tapan->lb_mode);
  26421. +
  26422. + return 0;
  26423. +}
  26424. +
  26425. +static int tapan_loopback_mode_put(struct snd_kcontrol *kcontrol,
  26426. + struct snd_ctl_elem_value *ucontrol)
  26427. +{
  26428. + struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
  26429. + struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
  26430. +
  26431. + dev_dbg(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
  26432. + __func__, ucontrol->value.integer.value[0]);
  26433. +
  26434. + switch (ucontrol->value.integer.value[0]) {
  26435. + case 0:
  26436. + tapan->lb_mode = false;
  26437. + break;
  26438. + case 1:
  26439. + tapan->lb_mode = true;
  26440. + break;
  26441. + default:
  26442. + return -EINVAL;
  26443. + }
  26444. +
  26445. + return 0;
  26446. +}
  26447. +
  26448. +
  26449. static int tapan_pa_gain_get(struct snd_kcontrol *kcontrol,
  26450. struct snd_ctl_elem_value *ucontrol)
  26451. {
  26452. @@ -1061,6 +1078,13 @@ static int tapan_config_compander(struct snd_soc_dapm_widget *w,
  26453. return 0;
  26454. }
  26455.  
  26456. +static const char * const tapan_loopback_mode_ctrl_text[] = {
  26457. + "DISABLE", "ENABLE"};
  26458. +static const struct soc_enum tapan_loopback_mode_ctl_enum[] = {
  26459. + SOC_ENUM_SINGLE_EXT(2, tapan_loopback_mode_ctrl_text),
  26460. +};
  26461. +
  26462. +
  26463. static const char * const tapan_ear_pa_gain_text[] = {"POS_6_DB", "POS_4P5_DB",
  26464. "POS_3_DB", "POS_1P5_DB",
  26465. "POS_0_DB", "NEG_2P5_DB",
  26466. @@ -1113,11 +1137,33 @@ static const struct soc_enum class_h_dsm_enum =
  26467. static const struct snd_kcontrol_new class_h_dsm_mux =
  26468. SOC_DAPM_ENUM("CLASS_H_DSM MUX Mux", class_h_dsm_enum);
  26469.  
  26470. +static int tapan_hph_impedance_get(struct snd_kcontrol *kcontrol,
  26471. + struct snd_ctl_elem_value *ucontrol)
  26472. +{
  26473. + uint32_t zl, zr;
  26474. + bool hphr;
  26475. + struct soc_multi_mixer_control *mc;
  26476. + struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
  26477. + struct tapan_priv *priv = snd_soc_codec_get_drvdata(codec);
  26478. +
  26479. + mc = (struct soc_multi_mixer_control *)(kcontrol->private_value);
  26480. +
  26481. + hphr = mc->shift;
  26482. + wcd9xxx_mbhc_get_impedance(&priv->mbhc, &zl, &zr);
  26483. + pr_debug("%s: zl %u, zr %u\n", __func__, zl, zr);
  26484. + ucontrol->value.integer.value[0] = hphr ? zr : zl;
  26485. +
  26486. + return 0;
  26487. +}
  26488. +
  26489. static const struct snd_kcontrol_new tapan_common_snd_controls[] = {
  26490.  
  26491. SOC_ENUM_EXT("EAR PA Gain", tapan_ear_pa_gain_enum[0],
  26492. tapan_pa_gain_get, tapan_pa_gain_put),
  26493.  
  26494. + SOC_ENUM_EXT("LOOPBACK Mode", tapan_loopback_mode_ctl_enum[0],
  26495. + tapan_loopback_mode_get, tapan_loopback_mode_put),
  26496. +
  26497. SOC_SINGLE_TLV("HPHL Volume", TAPAN_A_RX_HPH_L_GAIN, 0, 20, 1,
  26498. line_gain),
  26499. SOC_SINGLE_TLV("HPHR Volume", TAPAN_A_RX_HPH_R_GAIN, 0, 20, 1,
  26500. @@ -1223,6 +1269,11 @@ static const struct snd_kcontrol_new tapan_common_snd_controls[] = {
  26501. tapan_get_iir_band_audio_mixer, tapan_put_iir_band_audio_mixer),
  26502. SOC_SINGLE_MULTI_EXT("IIR2 Band5", IIR2, BAND5, 255, 0, 5,
  26503. tapan_get_iir_band_audio_mixer, tapan_put_iir_band_audio_mixer),
  26504. +
  26505. + SOC_SINGLE_EXT("HPHL Impedance", 0, 0, UINT_MAX, 0,
  26506. + tapan_hph_impedance_get, NULL),
  26507. + SOC_SINGLE_EXT("HPHR Impedance", 0, 1, UINT_MAX, 0,
  26508. + tapan_hph_impedance_get, NULL),
  26509. };
  26510.  
  26511. static const struct snd_kcontrol_new tapan_9306_snd_controls[] = {
  26512. @@ -2035,15 +2086,20 @@ static int tapan_codec_enable_lineout(struct snd_soc_dapm_widget *w,
  26513. WCD9XXX_CLSH_STATE_LO,
  26514. WCD9XXX_CLSH_REQ_ENABLE,
  26515. WCD9XXX_CLSH_EVENT_POST_PA);
  26516. - dev_dbg(codec->dev, "%s: sleeping 3 ms after %s PA turn on\n",
  26517. + dev_dbg(codec->dev, "%s: sleeping 5 ms after %s PA turn on\n",
  26518. __func__, w->name);
  26519. - usleep_range(3000, 3010);
  26520. + /* Wait for CnP time after PA enable */
  26521. + usleep_range(5000, 5100);
  26522. break;
  26523. case SND_SOC_DAPM_POST_PMD:
  26524. wcd9xxx_clsh_fsm(codec, &tapan->clsh_d,
  26525. WCD9XXX_CLSH_STATE_LO,
  26526. WCD9XXX_CLSH_REQ_DISABLE,
  26527. WCD9XXX_CLSH_EVENT_POST_PA);
  26528. + dev_dbg(codec->dev, "%s: sleeping 5 ms after %s PA turn on\n",
  26529. + __func__, w->name);
  26530. + /* Wait for CnP time after PA disable */
  26531. + usleep_range(5000, 5100);
  26532. break;
  26533. }
  26534. return 0;
  26535. @@ -2250,40 +2306,36 @@ static int tapan_codec_enable_micbias(struct snd_soc_dapm_widget *w,
  26536. {
  26537. struct snd_soc_codec *codec = w->codec;
  26538. struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
  26539. - u16 micb_int_reg, micb_ctl_reg;
  26540. + u16 micb_int_reg = 0, micb_ctl_reg = 0;
  26541. u8 cfilt_sel_val = 0;
  26542. char *internal1_text = "Internal1";
  26543. char *internal2_text = "Internal2";
  26544. char *internal3_text = "Internal3";
  26545. enum wcd9xxx_notify_event e_post_off, e_pre_on, e_post_on;
  26546.  
  26547. - dev_dbg(codec->dev, "%s %d, shift = %d\n", __func__, event, w->shift);
  26548. - switch (w->shift) {
  26549. - case 1:
  26550. + pr_debug("%s: w->name %s event %d\n", __func__, w->name, event);
  26551. + if (strnstr(w->name, "MIC BIAS1", sizeof("MIC BIAS1"))) {
  26552. micb_ctl_reg = TAPAN_A_MICB_1_CTL;
  26553. micb_int_reg = TAPAN_A_MICB_1_INT_RBIAS;
  26554. cfilt_sel_val = tapan->resmgr.pdata->micbias.bias1_cfilt_sel;
  26555. e_pre_on = WCD9XXX_EVENT_PRE_MICBIAS_1_ON;
  26556. e_post_on = WCD9XXX_EVENT_POST_MICBIAS_1_ON;
  26557. e_post_off = WCD9XXX_EVENT_POST_MICBIAS_1_OFF;
  26558. - break;
  26559. - case 2:
  26560. + } else if (strnstr(w->name, "MIC BIAS2", sizeof("MIC BIAS2"))) {
  26561. micb_ctl_reg = TAPAN_A_MICB_2_CTL;
  26562. micb_int_reg = TAPAN_A_MICB_2_INT_RBIAS;
  26563. cfilt_sel_val = tapan->resmgr.pdata->micbias.bias2_cfilt_sel;
  26564. e_pre_on = WCD9XXX_EVENT_PRE_MICBIAS_2_ON;
  26565. e_post_on = WCD9XXX_EVENT_POST_MICBIAS_2_ON;
  26566. e_post_off = WCD9XXX_EVENT_POST_MICBIAS_2_OFF;
  26567. - break;
  26568. - case 3:
  26569. + } else if (strnstr(w->name, "MIC BIAS3", sizeof("MIC BIAS3"))) {
  26570. micb_ctl_reg = TAPAN_A_MICB_3_CTL;
  26571. micb_int_reg = TAPAN_A_MICB_3_INT_RBIAS;
  26572. cfilt_sel_val = tapan->resmgr.pdata->micbias.bias3_cfilt_sel;
  26573. e_pre_on = WCD9XXX_EVENT_PRE_MICBIAS_3_ON;
  26574. e_post_on = WCD9XXX_EVENT_POST_MICBIAS_3_ON;
  26575. e_post_off = WCD9XXX_EVENT_POST_MICBIAS_3_OFF;
  26576. - break;
  26577. - default:
  26578. + } else {
  26579. pr_err("%s: Error, invalid micbias %s\n", __func__, w->name);
  26580. return -EINVAL;
  26581. }
  26582. @@ -2319,33 +2371,27 @@ static int tapan_codec_enable_micbias(struct snd_soc_dapm_widget *w,
  26583.  
  26584. break;
  26585. case SND_SOC_DAPM_POST_PMU:
  26586. - if (w->shift ==2) {
  26587. - if (++tapan->micb_2_ref_cnt == 1) {
  26588. - snd_soc_update_bits(codec, micb_ctl_reg, 0x80, 0x80);
  26589. - usleep_range(20000, 20000);
  26590. - } else {
  26591. - dev_dbg(codec->dev, "MIC BIAS2 already enabled, ref_count = %d",
  26592. - tapan->micb_2_ref_cnt);
  26593. - }
  26594. - } else {
  26595. - snd_soc_update_bits(codec, micb_ctl_reg, 0x80, 0x80);
  26596. - usleep_range(20000, 20000);
  26597. - }
  26598. + usleep_range(20000, 20000);
  26599. /* Let MBHC module know so micbias is on */
  26600. wcd9xxx_resmgr_notifier_call(&tapan->resmgr, e_post_on);
  26601. break;
  26602. case SND_SOC_DAPM_POST_PMD:
  26603. - if (w->shift ==2) {
  26604. - if (--tapan->micb_2_ref_cnt <= 0) {
  26605. - snd_soc_update_bits(codec, micb_ctl_reg, 0x80, 0x00);
  26606. - tapan->micb_2_ref_cnt = 0;
  26607. - dev_dbg(codec->dev, "MIC BIAS2 disabled\n");
  26608. - } else {
  26609. - dev_dbg(codec->dev, "micbias2 is still needed, do not turn off\n");
  26610. - }
  26611. - } else {
  26612. - snd_soc_update_bits(codec, micb_ctl_reg, 0x80, 0x00);
  26613. - }
  26614. + if (micb_ctl_reg == TAPAN_A_MICB_2_CTL) {
  26615. + if (--tapan->micb_2_users == 0)
  26616. + wcd9xxx_resmgr_rm_cond_update_bits(
  26617. + &tapan->resmgr,
  26618. + WCD9XXX_COND_HPH_MIC,
  26619. + micb_ctl_reg, 7,
  26620. + false);
  26621. + pr_debug("%s: micb_2_users %d\n", __func__,
  26622. + tapan->micb_2_users);
  26623. + WARN(tapan->micb_2_users < 0,
  26624. + "Unexpected micbias users %d\n",
  26625. + tapan->micb_2_users);
  26626. + } else
  26627. + snd_soc_update_bits(codec, micb_ctl_reg, 1 << w->shift,
  26628. + 0);
  26629. +
  26630. /* Let MBHC module know so micbias switch to be off */
  26631. wcd9xxx_resmgr_notifier_call(&tapan->resmgr, e_post_off);
  26632.  
  26633. @@ -2364,7 +2410,6 @@ static int tapan_codec_enable_micbias(struct snd_soc_dapm_widget *w,
  26634. return 0;
  26635. }
  26636.  
  26637. -#ifndef CONFIG_ARCH_MSM8226
  26638. /* called under codec_resource_lock acquisition */
  26639. static int tapan_enable_mbhc_micbias(struct snd_soc_codec *codec, bool enable,
  26640. enum wcd9xxx_micbias_num micb_num)
  26641. @@ -2388,7 +2433,6 @@ static int tapan_enable_mbhc_micbias(struct snd_soc_codec *codec, bool enable,
  26642. pr_debug("%s: leave ret %d\n", __func__, rc);
  26643. return rc;
  26644. }
  26645. -#endif
  26646.  
  26647. static void tx_hpf_corner_freq_callback(struct work_struct *work)
  26648. {
  26649. @@ -2425,10 +2469,11 @@ static int tapan_codec_enable_dec(struct snd_soc_dapm_widget *w,
  26650. {
  26651. struct snd_soc_codec *codec = w->codec;
  26652. unsigned int decimator;
  26653. + struct tapan_priv *tapan_p = snd_soc_codec_get_drvdata(codec);
  26654. char *dec_name = NULL;
  26655. char *widget_name = NULL;
  26656. char *temp;
  26657. - int ret = 0;
  26658. + int ret = 0, i;
  26659. u16 dec_reset_reg, tx_vol_ctl_reg, tx_mux_ctl_reg;
  26660. u8 dec_hpf_cut_of_freq;
  26661. int offset;
  26662. @@ -2480,6 +2525,10 @@ static int tapan_codec_enable_dec(struct snd_soc_dapm_widget *w,
  26663.  
  26664. switch (event) {
  26665. case SND_SOC_DAPM_PRE_PMU:
  26666. + for (i = 0; i < NUM_DECIMATORS; i++) {
  26667. + if (decimator == i + 1)
  26668. + tapan_p->dec_active[i] = true;
  26669. + }
  26670.  
  26671. /* Enableable TX digital mute */
  26672. snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x01, 0x01);
  26673. @@ -2510,8 +2559,11 @@ static int tapan_codec_enable_dec(struct snd_soc_dapm_widget *w,
  26674.  
  26675. case SND_SOC_DAPM_POST_PMU:
  26676.  
  26677. - /* Disable TX digital mute */
  26678. - snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x01, 0x00);
  26679. + if (tapan_p->lb_mode) {
  26680. + pr_debug("%s: loopback mode unmute the DEC\n",
  26681. + __func__);
  26682. + snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x01, 0x00);
  26683. + }
  26684.  
  26685. if (tx_hpf_work[decimator - 1].tx_hpf_cut_of_freq !=
  26686. CF_MIN_3DB_150HZ) {
  26687. @@ -2540,7 +2592,10 @@ static int tapan_codec_enable_dec(struct snd_soc_dapm_widget *w,
  26688. snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x08, 0x08);
  26689. snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x30,
  26690. (tx_hpf_work[decimator - 1].tx_hpf_cut_of_freq) << 4);
  26691. -
  26692. + for (i = 0; i < NUM_DECIMATORS; i++) {
  26693. + if (decimator == i + 1)
  26694. + tapan_p->dec_active[i] = false;
  26695. + }
  26696. break;
  26697. }
  26698. out:
  26699. @@ -2634,7 +2689,6 @@ static int tapan_codec_enable_interpolator(struct snd_soc_dapm_widget *w,
  26700. return 0;
  26701. }
  26702.  
  26703. -#ifndef CONFIG_ARCH_MSM8226
  26704. /* called under codec_resource_lock acquisition */
  26705. static int __tapan_codec_enable_ldo_h(struct snd_soc_dapm_widget *w,
  26706. struct snd_kcontrol *kcontrol, int event)
  26707. @@ -2688,97 +2742,13 @@ static int __tapan_codec_enable_ldo_h(struct snd_soc_dapm_widget *w,
  26708. pr_debug("%s: leave\n", __func__);
  26709. return 0;
  26710. }
  26711. -#endif
  26712. -void tapan_register_mclk_cb(struct snd_soc_codec *codec,
  26713. - int (*mclk_cb_fn) (struct snd_soc_codec*, int, bool))
  26714. -{
  26715. - struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
  26716. - tapan->mclk_cb_fn = mclk_cb_fn;
  26717. -}
  26718. -
  26719. -static void tapan_enable_ldo_h(struct snd_soc_codec *codec, u32 enable)
  26720. -{
  26721. - struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
  26722. -
  26723. - if (enable) {
  26724. - if (++tapan->ldo_h_count == 1)
  26725. - snd_soc_update_bits(codec, TAPAN_A_LDO_H_MODE_1,
  26726. - 0x80, 0x80);
  26727. - } else {
  26728. - if (--tapan->ldo_h_count <= 0) {
  26729. - snd_soc_update_bits(codec, TAPAN_A_LDO_H_MODE_1,
  26730. - 0x80, 0x00);
  26731. - tapan->ldo_h_count = 0;
  26732. - }
  26733. - }
  26734. -}
  26735. -
  26736. -static int tapan_codec_enable_micbias_power(struct snd_soc_dapm_widget *w,
  26737. - struct snd_kcontrol *kcontrol,
  26738. - int event)
  26739. -{
  26740. - struct snd_soc_codec *codec = w->codec;
  26741. - struct tapan_priv *tapan = snd_soc_codec_get_drvdata(codec);
  26742. -
  26743. - pr_debug("%s %d\n", __func__, event);
  26744. -
  26745. - if (!tapan->mclk_cb_fn) {
  26746. - pr_err("%s: Callback to enable mclk is not registered\n",
  26747. - __func__);
  26748. - return -EINVAL;
  26749. - }
  26750. -
  26751. - switch (event) {
  26752. - case SND_SOC_DAPM_PRE_PMU:
  26753. - tapan->mclk_cb_fn(codec, 1, true);
  26754. - WCD9XXX_BCL_LOCK(&tapan->resmgr);
  26755. - WCD9XXX_BG_CLK_LOCK(&tapan->resmgr);
  26756. - wcd9xxx_resmgr_get_bandgap(&tapan->resmgr,
  26757. - WCD9XXX_BANDGAP_AUDIO_MODE);
  26758. - WCD9XXX_BG_CLK_UNLOCK(&tapan->resmgr);
  26759. - WCD9XXX_BCL_UNLOCK(&tapan->resmgr);
  26760. - tapan_enable_ldo_h(codec, 1);
  26761. - tapan_codec_enable_micbias(w, kcontrol, event);
  26762. - break;
  26763. - case SND_SOC_DAPM_POST_PMU:
  26764. - tapan_codec_enable_micbias(w, kcontrol, event);
  26765. - tapan->mclk_cb_fn(codec, 0, true);
  26766. - break;
  26767. - case SND_SOC_DAPM_POST_PMD:
  26768. - tapan_codec_enable_micbias(w, kcontrol, event);
  26769. - tapan_enable_ldo_h(codec, 0);
  26770. - WCD9XXX_BCL_LOCK(&tapan->resmgr);
  26771. - WCD9XXX_BG_CLK_LOCK(&tapan->resmgr);
  26772. - wcd9xxx_resmgr_put_bandgap(&tapan->resmgr,
  26773. - WCD9XXX_BANDGAP_AUDIO_MODE);
  26774. - WCD9XXX_BG_CLK_UNLOCK(&tapan->resmgr);
  26775. - WCD9XXX_BCL_UNLOCK(&tapan->resmgr);
  26776. - break;
  26777. - }
  26778. - return 0;
  26779. -}
  26780.  
  26781. static int tapan_codec_enable_ldo_h(struct snd_soc_dapm_widget *w,
  26782. struct snd_kcontrol *kcontrol, int event)
  26783. {
  26784. -#ifndef CONFIG_ARCH_MSM8226
  26785. int rc;
  26786. rc = __tapan_codec_enable_ldo_h(w, kcontrol, event);
  26787. return rc;
  26788. -#else
  26789. - struct snd_soc_codec *codec = w->codec;
  26790. - switch (event) {
  26791. - case SND_SOC_DAPM_POST_PMU:
  26792. - tapan_enable_ldo_h(codec, 1);
  26793. - usleep_range(1000, 1000);
  26794. - break;
  26795. - case SND_SOC_DAPM_POST_PMD:
  26796. - tapan_enable_ldo_h(codec, 0);
  26797. - usleep_range(1000, 1000);
  26798. - break;
  26799. - }
  26800. - return 0;
  26801. -#endif
  26802. }
  26803.  
  26804. static int tapan_codec_enable_rx_bias(struct snd_soc_dapm_widget *w,
  26805. @@ -2882,10 +2852,6 @@ static int tapan_hph_pa_event(struct snd_soc_dapm_widget *w,
  26806. case SND_SOC_DAPM_PRE_PMU:
  26807. /* Let MBHC module know PA is turning on */
  26808. wcd9xxx_resmgr_notifier_call(&tapan->resmgr, e_pre_on);
  26809. -#if defined(CONFIG_MACH_ATLANTICLTE_ATT) || defined(CONFIG_MACH_ATLANTICLTE_USC)
  26810. - snd_soc_write(codec,TAPAN_A_CDC_CLSH_V_PA_HD_HPH,0x19);
  26811. - snd_soc_write(codec,TAPAN_A_CDC_CLSH_V_PA_MIN_HPH, 0x26);
  26812. -#endif
  26813. break;
  26814. case SND_SOC_DAPM_POST_PMU:
  26815. dev_dbg(codec->dev, "%s: sleep %d ms after %s PA enable.\n",
  26816. @@ -2912,10 +2878,6 @@ static int tapan_hph_pa_event(struct snd_soc_dapm_widget *w,
  26817. req_clsh_state,
  26818. WCD9XXX_CLSH_REQ_DISABLE,
  26819. WCD9XXX_CLSH_EVENT_POST_PA);
  26820. -#if defined(CONFIG_MACH_ATLANTICLTE_ATT) || defined(CONFIG_MACH_ATLANTICLTE_USC)
  26821. - snd_soc_write(codec,TAPAN_A_CDC_CLSH_V_PA_HD_HPH,0x00);
  26822. - snd_soc_write(codec,TAPAN_A_CDC_CLSH_V_PA_MIN_HPH, 0x00);
  26823. -#endif
  26824. break;
  26825. }
  26826. return 0;
  26827. @@ -3366,9 +3328,7 @@ static const struct snd_soc_dapm_route audio_map[] = {
  26828. {"MIC BIAS2 Internal2", NULL, "LDO_H"},
  26829. {"MIC BIAS2 Internal3", NULL, "LDO_H"},
  26830. {"MIC BIAS2 External", NULL, "LDO_H"},
  26831. -#ifndef CONFIG_ARCH_MSM8226
  26832. {DAPM_MICBIAS2_EXTERNAL_STANDALONE, NULL, "LDO_H Standalone"},
  26833. -#endif
  26834.  
  26835. /*sidetone path enable*/
  26836. {"IIR1", NULL, "IIR1 INP1 MUX"},
  26837. @@ -3588,8 +3548,6 @@ static unsigned int tapan_read(struct snd_soc_codec *codec,
  26838. return val;
  26839. }
  26840.  
  26841. -#ifdef CONFIG_SND_SOC_ES325_ATLANTIC
  26842. -
  26843. static int tapan_startup(struct snd_pcm_substream *substream,
  26844. struct snd_soc_dai *dai)
  26845. {
  26846. @@ -3600,25 +3558,10 @@ static int tapan_startup(struct snd_pcm_substream *substream,
  26847. (tapan_core->dev != NULL) &&
  26848. (tapan_core->dev->parent != NULL))
  26849. pm_runtime_get_sync(tapan_core->dev->parent);
  26850. - es325_wrapper_wakeup(dai);
  26851.  
  26852. return 0;
  26853. }
  26854. -#else
  26855. -static int tapan_startup(struct snd_pcm_substream *substream,
  26856. - struct snd_soc_dai *dai)
  26857. -{
  26858. - struct wcd9xxx *tapan_core = dev_get_drvdata(dai->codec->dev->parent);
  26859. - dev_dbg(dai->codec->dev, "%s(): substream = %s stream = %d\n",
  26860. - __func__, substream->name, substream->stream);
  26861. - if ((tapan_core != NULL) &&
  26862. - (tapan_core->dev != NULL) &&
  26863. - (tapan_core->dev->parent != NULL))
  26864. - pm_runtime_get_sync(tapan_core->dev->parent);
  26865.  
  26866. - return 0;
  26867. -}
  26868. -#endif
  26869. static void tapan_shutdown(struct snd_pcm_substream *substream,
  26870. struct snd_soc_dai *dai)
  26871. {
  26872. @@ -3637,13 +3580,6 @@ static void tapan_shutdown(struct snd_pcm_substream *substream,
  26873. tapan->dai[dai->id].ch_mask);
  26874. }
  26875. }
  26876. -#ifdef CONFIG_SND_SOC_ES325_ATLANTIC
  26877. - if ((tapan_core != NULL) &&
  26878. - (tapan_core->dev != NULL) &&
  26879. - (tapan_core->dev->parent != NULL)) {
  26880. - es325_wrapper_sleep(dai->id);
  26881. - }
  26882. -#endif
  26883. if ((tapan_core != NULL) &&
  26884. (tapan_core->dev != NULL) &&
  26885. (tapan_core->dev->parent != NULL) &&
  26886. @@ -4160,239 +4096,63 @@ static int tapan_hw_params(struct snd_pcm_substream *substream,
  26887. return 0;
  26888. }
  26889.  
  26890. -#if defined(CONFIG_SND_SOC_ES705)
  26891. -int (*remote_route_enable)(struct snd_soc_dai *dai) = REMOTE_ROUTE_ENABLE_CB;
  26892. -int (*slim_get_channel_map)(struct snd_soc_dai *dai,
  26893. - unsigned int *tx_num, unsigned int *tx_slot,
  26894. - unsigned int *rx_num, unsigned int *rx_slot)
  26895. - = SLIM_GET_CHANNEL_MAP_CB;
  26896. -int (*slim_set_channel_map)(struct snd_soc_dai *dai,
  26897. - unsigned int tx_num, unsigned int *tx_slot,
  26898. - unsigned int rx_num, unsigned int *rx_slot)
  26899. - = SLIM_SET_CHANNEL_MAP_CB;
  26900. -int (*slim_hw_params)(struct snd_pcm_substream *substream,
  26901. - struct snd_pcm_hw_params *params,
  26902. - struct snd_soc_dai *dai)
  26903. - = SLIM_HW_PARAMS_CB;
  26904. -int (*remote_cfg_slim_rx)(int dai_id) = REMOTE_CFG_SLIM_RX_CB;
  26905. -int (*remote_close_slim_rx)(int dai_id) = REMOTE_CLOSE_SLIM_RX_CB;
  26906. -int (*remote_cfg_slim_tx)(int dai_id) = REMOTE_CFG_SLIM_TX_CB;
  26907. -int (*remote_close_slim_tx)(int dai_id) = REMOTE_CLOSE_SLIM_TX_CB;
  26908. -int (*remote_add_codec_controls)(struct snd_soc_codec *codec)
  26909. - = REMOTE_ADD_CODEC_CONTROLS_CB;
  26910. -
  26911. -static int tapan_esxxx_startup(struct snd_pcm_substream *substream,
  26912. - struct snd_soc_dai *dai)
  26913. -{
  26914. - tapan_startup(substream, dai);
  26915. -/*
  26916. - if (es705_remote_route_enable(dai))
  26917. - es705_slim_startup(substream, dai);
  26918. -*/
  26919. -
  26920. - return 0;
  26921. -}
  26922. -
  26923. -static void tapan_esxxx_shutdown(struct snd_pcm_substream *substream,
  26924. - struct snd_soc_dai *dai)
  26925. -{
  26926. - tapan_shutdown(substream, dai);
  26927. -
  26928. -/*
  26929. - if (es705_remote_route_enable(dai))
  26930. - es705_slim_shutdown(substream, dai);
  26931. -*/
  26932. -}
  26933. -
  26934. -static int tapan_esxxx_hw_params(struct snd_pcm_substream *substream,
  26935. - struct snd_pcm_hw_params *params,
  26936. - struct snd_soc_dai *dai)
  26937. +int tapan_digital_mute(struct snd_soc_dai *dai, int mute)
  26938. {
  26939. - int rc = 0;
  26940. - pr_info("%s: dai_name = %s DAI-ID %x rate %d num_ch %d\n", __func__,
  26941. - dai->name, dai->id, params_rate(params),
  26942. - params_channels(params));
  26943. -
  26944. - rc = tapan_hw_params(substream, params, dai);
  26945. + struct snd_soc_codec *codec = NULL;
  26946. + u16 tx_vol_ctl_reg = 0;
  26947. + u8 decimator = 0, i;
  26948. + struct tapan_priv *tapan_p;
  26949.  
  26950. - if (remote_route_enable(dai))
  26951. - rc = slim_hw_params(substream, params, dai);
  26952. + pr_debug("%s: Digital Mute val = %d\n", __func__, mute);
  26953.  
  26954. - return rc;
  26955. -}
  26956. -static int tapan_esxxx_set_channel_map(struct snd_soc_dai *dai,
  26957. - unsigned int tx_num, unsigned int *tx_slot,
  26958. - unsigned int rx_num, unsigned int *rx_slot)
  26959. -
  26960. -{
  26961. - unsigned int tapan_tx_num = 0;
  26962. - unsigned int tapan_tx_slot[6];
  26963. - unsigned int tapan_rx_num = 0;
  26964. - unsigned int tapan_rx_slot[6];
  26965. - int rc = 0;
  26966. - pr_info("%s(): dai_name = %s DAI-ID %x tx_ch %d rx_ch %d\n",
  26967. - __func__, dai->name, dai->id, tx_num, rx_num);
  26968. -
  26969. - if (remote_route_enable(dai)) {
  26970. - rc = tapan_get_channel_map(dai, &tapan_tx_num, tapan_tx_slot,
  26971. - &tapan_rx_num, tapan_rx_slot);
  26972. -
  26973. - rc = tapan_set_channel_map(dai, tx_num, tapan_tx_slot, rx_num, tapan_rx_slot);
  26974. -
  26975. - rc = slim_set_channel_map(dai, tx_num, tx_slot, rx_num,
  26976. - rx_slot);
  26977. - } else
  26978. - rc = tapan_set_channel_map(dai, tx_num, tx_slot, rx_num, rx_slot);
  26979. -
  26980. - return rc;
  26981. -}
  26982. -
  26983. -static int tapan_esxxx_get_channel_map(struct snd_soc_dai *dai,
  26984. - unsigned int *tx_num, unsigned int *tx_slot,
  26985. - unsigned int *rx_num, unsigned int *rx_slot)
  26986. -
  26987. -{
  26988. - int rc = 0;
  26989. -
  26990. - pr_info("%s(): dai_name = %s DAI-ID %d tx_ch %d rx_ch %d\n",
  26991. - __func__, dai->name, dai->id, *tx_num, *rx_num);
  26992. -
  26993. - if (remote_route_enable(dai))
  26994. - rc = slim_get_channel_map(dai, tx_num, tx_slot, rx_num,
  26995. - rx_slot);
  26996. - else
  26997. - rc = tapan_get_channel_map(dai, tx_num, tx_slot, rx_num, rx_slot);
  26998. -
  26999. - return rc;
  27000. -}
  27001. -static struct snd_soc_dai_ops tapan_dai_ops = {
  27002. - .startup = tapan_esxxx_startup, /* tapan_startup, */
  27003. - .shutdown = tapan_esxxx_shutdown, /* tapan_shutdown, */
  27004. - .hw_params = tapan_esxxx_hw_params, /* tapan_hw_params, */
  27005. - .set_sysclk = tapan_set_dai_sysclk,
  27006. - .set_fmt = tapan_set_dai_fmt,
  27007. - .set_channel_map = tapan_esxxx_set_channel_map,
  27008. - /* tapan_set_channel_map, */
  27009. - .get_channel_map = tapan_esxxx_get_channel_map,
  27010. - /* tapan_get_channel_map, */
  27011. -};
  27012. -#elif defined(CONFIG_SND_SOC_ES325_ATLANTIC)
  27013. -static int tapan_es325_hw_params(struct snd_pcm_substream *substream,
  27014. - struct snd_pcm_hw_params *params,
  27015. - struct snd_soc_dai *dai)
  27016. -{
  27017. - int rc = 0;
  27018. - dev_err(dai->dev,"%s: dai_name = %s DAI-ID %x rate %d num_ch %d\n", __func__,
  27019. - dai->name, dai->id, params_rate(params),
  27020. - params_channels(params));
  27021. -
  27022. - rc = tapan_hw_params(substream, params, dai);
  27023. -
  27024. - if (es325_remote_route_enable(dai))
  27025. - rc = es325_slim_hw_params(substream, params, dai);
  27026. -
  27027. - return rc;
  27028. -}
  27029. -
  27030. -#define SLIM_BUGFIX
  27031. -static int tapan_es325_set_channel_map(struct snd_soc_dai *dai,
  27032. - unsigned int tx_num, unsigned int *tx_slot,
  27033. - unsigned int rx_num, unsigned int *rx_slot)
  27034. -
  27035. -{
  27036. -#if !defined(SLIM_BUGFIX)
  27037. - unsigned int tapan_tx_num = 0;
  27038. -#endif
  27039. -#if !defined(SLIM_BUGFIX)
  27040. - unsigned int tapan_rx_num = 0;
  27041. -#endif
  27042. -
  27043. -#if defined(SLIM_BUGFIX)
  27044. - unsigned int temp_tx_num = 0;
  27045. - unsigned int temp_rx_num = 0;
  27046. -#endif
  27047. - int rc = 0;
  27048. -
  27049. - if (es325_remote_route_enable(dai)) {
  27050. - unsigned int tapan_tx_slot[6];
  27051. - unsigned int tapan_rx_slot[6];
  27052. -#if defined(SLIM_BUGFIX)
  27053. - rc = tapan_get_channel_map(dai, &temp_tx_num, tapan_tx_slot,
  27054. - &temp_rx_num, tapan_rx_slot);
  27055. - if (rc < 0 )
  27056. - pr_err ("error statement");
  27057. - goto out;
  27058. -
  27059. -#else
  27060. - rc = tapan_get_channel_map(dai, &tapan_tx_num, tapan_tx_slot,
  27061. - &tapan_rx_num, tapan_rx_slot);
  27062. - if (rc < 0 )
  27063. - pr_err ("error statement");
  27064. - goto out;
  27065. -
  27066. -#endif
  27067. -
  27068. - rc = tapan_set_channel_map(dai, tx_num, tapan_tx_slot, rx_num, tapan_rx_slot);
  27069. - if (rc < 0 )
  27070. - pr_err ("error statement");
  27071. - goto out;
  27072. -
  27073. - rc = es325_slim_set_channel_map(dai, tx_num, tx_slot, rx_num, rx_slot);
  27074. - if (rc < 0 )
  27075. - pr_err ("error statement");
  27076. - goto out;
  27077. -
  27078. -
  27079. - } else
  27080. - rc = tapan_set_channel_map(dai, tx_num, tx_slot, rx_num, rx_slot);
  27081. - if (rc < 0 )
  27082. - pr_err ("error statement");
  27083. - goto out;
  27084. -
  27085. -out:
  27086. - return rc;
  27087. -}
  27088. -static int tapan_es325_get_channel_map(struct snd_soc_dai *dai,
  27089. - unsigned int *tx_num, unsigned int *tx_slot,
  27090. - unsigned int *rx_num, unsigned int *rx_slot)
  27091. + if (!dai || !dai->codec) {
  27092. + pr_err("%s: Invalid params\n", __func__);
  27093. + return -EINVAL;
  27094. + }
  27095. + codec = dai->codec;
  27096. + tapan_p = snd_soc_codec_get_drvdata(codec);
  27097.  
  27098. -{
  27099. - int rc = 0;
  27100. + if (dai->id != AIF1_CAP) {
  27101. + dev_dbg(codec->dev, "%s: Not capture use case skip\n",
  27102. + __func__);
  27103. + return 0;
  27104. + }
  27105.  
  27106. - if (es325_remote_route_enable(dai))
  27107. - rc = es325_slim_get_channel_map(dai, tx_num, tx_slot, rx_num, rx_slot);
  27108. - else
  27109. - rc = tapan_get_channel_map(dai, tx_num, tx_slot, rx_num, rx_slot);
  27110. + mute = (mute) ? 1 : 0;
  27111. + if (!mute) {
  27112. + /*
  27113. + * 5 ms is an emperical value for the mute time
  27114. + * that was arrived by checking the pop level
  27115. + * to be inaudible
  27116. + */
  27117. + usleep_range(5000, 5010);
  27118. + }
  27119.  
  27120. - return rc;
  27121. + for (i = 0; i < NUM_DECIMATORS; i++) {
  27122. + if (tapan_p->dec_active[i])
  27123. + decimator = i + 1;
  27124. + if (decimator && decimator <= NUM_DECIMATORS) {
  27125. + pr_debug("%s: Mute = %d Decimator = %d", __func__,
  27126. + mute, decimator);
  27127. + tx_vol_ctl_reg = TAPAN_A_CDC_TX1_VOL_CTL_CFG +
  27128. + 8 * (decimator - 1);
  27129. + snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x01, mute);
  27130. + }
  27131. + decimator = 0;
  27132. + }
  27133. + return 0;
  27134. }
  27135.  
  27136. static struct snd_soc_dai_ops tapan_dai_ops = {
  27137. .startup = tapan_startup,
  27138. .shutdown = tapan_shutdown,
  27139. - .hw_params = tapan_es325_hw_params, /* tabla_hw_params, */
  27140. - .set_sysclk = tapan_set_dai_sysclk,
  27141. - .set_fmt = tapan_set_dai_fmt,
  27142. - .set_channel_map = tapan_set_channel_map, /* tabla_set_channel_map, */
  27143. - .get_channel_map = tapan_es325_get_channel_map, /* tabla_get_channel_map, */
  27144. -};
  27145. -static struct snd_soc_dai_ops tapan_es325_dai_ops = {
  27146. - .startup = tapan_startup,
  27147. - .hw_params = tapan_es325_hw_params,
  27148. - .set_channel_map = tapan_es325_set_channel_map,
  27149. - .get_channel_map = tapan_es325_get_channel_map,
  27150. -};
  27151. -#else
  27152. -static struct snd_soc_dai_ops tapan_dai_ops = {
  27153. - .startup = tapan_startup,
  27154. - .shutdown = tapan_shutdown,
  27155. .hw_params = tapan_hw_params,
  27156. .set_sysclk = tapan_set_dai_sysclk,
  27157. .set_fmt = tapan_set_dai_fmt,
  27158. .set_channel_map = tapan_set_channel_map,
  27159. .get_channel_map = tapan_get_channel_map,
  27160. + .digital_mute = tapan_digital_mute,
  27161. };
  27162. -#endif
  27163.  
  27164. static struct snd_soc_dai_driver tapan9302_dai[] = {
  27165. {
  27166. @@ -4479,50 +4239,6 @@ static struct snd_soc_dai_driver tapan9302_dai[] = {
  27167. },
  27168. .ops = &tapan_dai_ops,
  27169. },
  27170. -#ifdef CONFIG_SND_SOC_ES325_ATLANTIC
  27171. - {
  27172. - .name = "tapan_es325_rx1",
  27173. - .id = AIF1_PB + ES325_DAI_ID_OFFSET,
  27174. - .playback = {
  27175. - .stream_name = "AIF1 Playback",
  27176. - .rates = WCD9306_RATES,
  27177. - .formats = TAPAN_FORMATS,
  27178. - .rate_max = 192000,
  27179. - .rate_min = 8000,
  27180. - .channels_min = 1,
  27181. - .channels_max = 2,
  27182. - },
  27183. - .ops = &tapan_es325_dai_ops,
  27184. - },
  27185. - {
  27186. - .name = "tapan_es325_tx1",
  27187. - .id = AIF1_CAP + ES325_DAI_ID_OFFSET,
  27188. - .capture = {
  27189. - .stream_name = "AIF1 Capture",
  27190. - .rates = WCD9306_RATES,
  27191. - .formats = TAPAN_FORMATS,
  27192. - .rate_max = 192000,
  27193. - .rate_min = 8000,
  27194. - .channels_min = 1,
  27195. - .channels_max = 2,
  27196. - },
  27197. - .ops = &tapan_es325_dai_ops,
  27198. - },
  27199. - {
  27200. - .name = "tapan_es325_rx2",
  27201. - .id = AIF2_PB + ES325_DAI_ID_OFFSET,
  27202. - .playback = {
  27203. - .stream_name = "AIF2 Playback",
  27204. - .rates = WCD9306_RATES,
  27205. - .formats = TAPAN_FORMATS,
  27206. - .rate_max = 192000,
  27207. - .rate_min = 8000,
  27208. - .channels_min = 1,
  27209. - .channels_max = 2,
  27210. - },
  27211. - .ops = &tapan_es325_dai_ops,
  27212. - },
  27213. -#endif
  27214. };
  27215.  
  27216. static struct snd_soc_dai_driver tapan_dai[] = {
  27217. @@ -4710,21 +4426,11 @@ static int tapan_codec_enable_slimrx(struct snd_soc_dapm_widget *w,
  27218. case SND_SOC_DAPM_POST_PMU:
  27219. dai->bus_down_in_recovery = false;
  27220. (void) tapan_codec_enable_slim_chmask(dai, true);
  27221. -#if defined(CONFIG_SND_SOC_ES705)
  27222. - ret = remote_cfg_slim_rx(w->shift);
  27223. -#elif defined(CONFIG_SND_SOC_ES325_ATLANTIC)
  27224. - ret = es325_remote_cfg_slim_rx(w->shift);
  27225. -#endif
  27226. ret = wcd9xxx_cfg_slim_sch_rx(core, &dai->wcd9xxx_ch_list,
  27227. dai->rate, dai->bit_width,
  27228. &dai->grph);
  27229. break;
  27230. case SND_SOC_DAPM_POST_PMD:
  27231. -#if defined(CONFIG_SND_SOC_ES705)
  27232. - ret = remote_close_slim_rx(w->shift);
  27233. -#elif defined(CONFIG_SND_SOC_ES325_ATLANTIC)
  27234. - ret = es325_remote_close_slim_rx(w->shift);
  27235. -#endif
  27236. ret = wcd9xxx_close_slim_sch_rx(core, &dai->wcd9xxx_ch_list,
  27237. dai->grph);
  27238. if (!dai->bus_down_in_recovery)
  27239. @@ -4756,14 +4462,11 @@ static int tapan_codec_enable_slimtx(struct snd_soc_dapm_widget *w,
  27240. struct wcd9xxx *core;
  27241. struct snd_soc_codec *codec = w->codec;
  27242. struct tapan_priv *tapan_p = snd_soc_codec_get_drvdata(codec);
  27243. - int ret = 0;
  27244. + u32 ret = 0;
  27245. struct wcd9xxx_codec_dai_data *dai;
  27246.  
  27247. core = dev_get_drvdata(codec->dev->parent);
  27248. - if (!core) {
  27249. - dev_err(codec->dev,"core is NULL\n");
  27250. - return -ENOMEM;
  27251. - }
  27252. +
  27253. dev_dbg(codec->dev, "%s: event called! codec name %s\n",
  27254. __func__, w->codec->name);
  27255. dev_dbg(codec->dev, "%s: num_dai %d stream name %s\n",
  27256. @@ -4783,18 +4486,8 @@ static int tapan_codec_enable_slimtx(struct snd_soc_dapm_widget *w,
  27257. ret = wcd9xxx_cfg_slim_sch_tx(core, &dai->wcd9xxx_ch_list,
  27258. dai->rate, dai->bit_width,
  27259. &dai->grph);
  27260. -#if defined(CONFIG_SND_SOC_ES705)
  27261. - ret = remote_cfg_slim_tx(w->shift);
  27262. -#elif defined(CONFIG_SND_SOC_ES325_ATLANTIC)
  27263. - ret = es325_remote_cfg_slim_tx(w->shift);
  27264. -#endif
  27265. break;
  27266. case SND_SOC_DAPM_POST_PMD:
  27267. -#if defined(CONFIG_SND_SOC_ES705)
  27268. - ret = remote_close_slim_tx(w->shift);
  27269. -#elif defined(CONFIG_SND_SOC_ES325_ATLANTIC)
  27270. - ret = es325_remote_close_slim_tx(w->shift);
  27271. -#endif
  27272. ret = wcd9xxx_close_slim_sch_tx(core, &dai->wcd9xxx_ch_list,
  27273. dai->grph);
  27274. if (!dai->bus_down_in_recovery)
  27275. @@ -5013,7 +4706,6 @@ static int tapan_codec_set_iir_gain(struct snd_soc_dapm_widget *w,
  27276. return 0;
  27277. }
  27278.  
  27279. -
  27280. static const struct snd_soc_dapm_widget tapan_9306_dapm_widgets[] = {
  27281. /* RX4 MIX1 mux inputs */
  27282. SND_SOC_DAPM_MUX("RX4 MIX1 INP1", SND_SOC_NOPM, 0, 0,
  27283. @@ -5078,13 +4770,13 @@ static const struct snd_soc_dapm_widget tapan_9306_dapm_widgets[] = {
  27284. SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
  27285. SND_SOC_DAPM_MUX("ANC1 FB MUX", SND_SOC_NOPM, 0, 0, &anc1_fb_mux),
  27286.  
  27287. - SND_SOC_DAPM_MICBIAS_E("MIC BIAS3 External", SND_SOC_NOPM, 3, 0,
  27288. + SND_SOC_DAPM_MICBIAS_E("MIC BIAS3 External", SND_SOC_NOPM, 7, 0,
  27289. tapan_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
  27290. SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
  27291. - SND_SOC_DAPM_MICBIAS_E("MIC BIAS3 Internal1", SND_SOC_NOPM, 3, 0,
  27292. + SND_SOC_DAPM_MICBIAS_E("MIC BIAS3 Internal1", SND_SOC_NOPM, 7, 0,
  27293. tapan_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
  27294. SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
  27295. - SND_SOC_DAPM_MICBIAS_E("MIC BIAS3 Internal2", SND_SOC_NOPM, 3, 0,
  27296. + SND_SOC_DAPM_MICBIAS_E("MIC BIAS3 Internal2", SND_SOC_NOPM, 7, 0,
  27297. tapan_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
  27298. SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
  27299.  
  27300. @@ -5309,11 +5001,10 @@ static const struct snd_soc_dapm_widget tapan_common_dapm_widgets[] = {
  27301. SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
  27302. SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
  27303.  
  27304. - SND_SOC_DAPM_SUPPLY("LDO_H", SND_SOC_NOPM, 0, 0,
  27305. - tapan_codec_enable_ldo_h, SND_SOC_DAPM_POST_PMU |
  27306. - SND_SOC_DAPM_POST_PMD),
  27307. + SND_SOC_DAPM_SUPPLY("LDO_H", SND_SOC_NOPM, 7, 0,
  27308. + tapan_codec_enable_ldo_h,
  27309. + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
  27310.  
  27311. -#ifndef CONFIG_ARCH_MSM8226
  27312. /*
  27313. * DAPM 'LDO_H Standalone' is to be powered by mbhc driver after
  27314. * acquring codec_resource lock.
  27315. @@ -5322,24 +5013,18 @@ static const struct snd_soc_dapm_widget tapan_common_dapm_widgets[] = {
  27316. SND_SOC_DAPM_SUPPLY("LDO_H Standalone", SND_SOC_NOPM, 7, 0,
  27317. __tapan_codec_enable_ldo_h,
  27318. SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
  27319. -#endif
  27320. +
  27321. SND_SOC_DAPM_INPUT("AMIC1"),
  27322. - SND_SOC_DAPM_MICBIAS_E("MIC BIAS1 External", SND_SOC_NOPM, 1, 0,
  27323. + SND_SOC_DAPM_MICBIAS_E("MIC BIAS1 External", SND_SOC_NOPM, 7, 0,
  27324. tapan_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
  27325. SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
  27326. - SND_SOC_DAPM_MICBIAS_E("MIC BIAS1 Internal1", SND_SOC_NOPM, 1, 0,
  27327. + SND_SOC_DAPM_MICBIAS_E("MIC BIAS1 Internal1", SND_SOC_NOPM, 7, 0,
  27328. tapan_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
  27329. SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
  27330. - SND_SOC_DAPM_MICBIAS_E("MIC BIAS1 Internal2", SND_SOC_NOPM, 1, 0,
  27331. + SND_SOC_DAPM_MICBIAS_E("MIC BIAS1 Internal2", SND_SOC_NOPM, 7, 0,
  27332. tapan_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
  27333. SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
  27334.  
  27335. - SND_SOC_DAPM_MICBIAS_E("MIC BIAS2 Power External",
  27336. - SND_SOC_NOPM, 2, 0,
  27337. - tapan_codec_enable_micbias_power,
  27338. - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
  27339. - SND_SOC_DAPM_POST_PMD),
  27340. -
  27341. SND_SOC_DAPM_ADC_E("ADC1", NULL, TAPAN_A_TX_1_EN, 7, 0,
  27342. tapan_codec_enable_adc, SND_SOC_DAPM_PRE_PMU |
  27343. SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
  27344. @@ -5358,24 +5043,24 @@ static const struct snd_soc_dapm_widget tapan_common_dapm_widgets[] = {
  27345. SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
  27346.  
  27347. SND_SOC_DAPM_INPUT("AMIC2"),
  27348. - SND_SOC_DAPM_MICBIAS_E("MIC BIAS2 External", SND_SOC_NOPM, 2, 0,
  27349. + SND_SOC_DAPM_MICBIAS_E("MIC BIAS2 External", SND_SOC_NOPM, 7, 0,
  27350. tapan_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
  27351. SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
  27352. - SND_SOC_DAPM_MICBIAS_E("MIC BIAS2 Internal1", SND_SOC_NOPM, 2, 0,
  27353. + SND_SOC_DAPM_MICBIAS_E("MIC BIAS2 Internal1", SND_SOC_NOPM, 7, 0,
  27354. tapan_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
  27355. SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
  27356. - SND_SOC_DAPM_MICBIAS_E("MIC BIAS2 Internal2", SND_SOC_NOPM, 2, 0,
  27357. + SND_SOC_DAPM_MICBIAS_E("MIC BIAS2 Internal2", SND_SOC_NOPM, 7, 0,
  27358. tapan_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
  27359. SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
  27360. - SND_SOC_DAPM_MICBIAS_E("MIC BIAS2 Internal3", SND_SOC_NOPM, 2, 0,
  27361. + SND_SOC_DAPM_MICBIAS_E("MIC BIAS2 Internal3", SND_SOC_NOPM, 7, 0,
  27362. tapan_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
  27363. SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
  27364. -#ifndef CONFIG_ARCH_MSM8226
  27365. +
  27366. SND_SOC_DAPM_MICBIAS_E(DAPM_MICBIAS2_EXTERNAL_STANDALONE, SND_SOC_NOPM,
  27367. 7, 0, tapan_codec_enable_micbias,
  27368. SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
  27369. SND_SOC_DAPM_POST_PMD),
  27370. -#endif
  27371. +
  27372. SND_SOC_DAPM_AIF_OUT_E("AIF1 CAP", "AIF1 Capture", 0, SND_SOC_NOPM,
  27373. AIF1_CAP, 0, tapan_codec_enable_slimtx,
  27374. SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
  27375. @@ -5398,10 +5083,12 @@ static const struct snd_soc_dapm_widget tapan_common_dapm_widgets[] = {
  27376. SND_SOC_DAPM_POST_PMD),
  27377.  
  27378. /* Sidetone */
  27379. - //SND_SOC_DAPM_MUX("IIR1 INP1 MUX", SND_SOC_NOPM, 0, 0, &iir1_inp1_mux),
  27380. - SND_SOC_DAPM_MUX_E("IIR1 INP1 MUX", TAPAN_A_CDC_IIR1_GAIN_B1_CTL, 0, 0,&iir1_inp1_mux, tapan_codec_iir_mux_event,SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
  27381. - //SND_SOC_DAPM_PGA("IIR1", TAPAN_A_CDC_CLK_SD_CTL, 0, 0, NULL, 0),
  27382. - SND_SOC_DAPM_PGA_E("IIR1", TAPAN_A_CDC_CLK_SD_CTL, 0, 0, NULL, 0,tapan_codec_set_iir_gain, SND_SOC_DAPM_POST_PMU),
  27383. + SND_SOC_DAPM_MUX_E("IIR1 INP1 MUX", TAPAN_A_CDC_IIR1_GAIN_B1_CTL, 0, 0,
  27384. + &iir1_inp1_mux, tapan_codec_iir_mux_event,
  27385. + SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
  27386. +
  27387. + SND_SOC_DAPM_PGA_E("IIR1", TAPAN_A_CDC_CLK_SD_CTL, 0, 0, NULL, 0,
  27388. + tapan_codec_set_iir_gain, SND_SOC_DAPM_POST_PMU),
  27389.  
  27390. SND_SOC_DAPM_MUX_E("IIR1 INP2 MUX", TAPAN_A_CDC_IIR1_GAIN_B2_CTL, 0, 0,
  27391. &iir1_inp2_mux, tapan_codec_iir_mux_event,
  27392. @@ -5545,10 +5232,10 @@ static int tapan_handle_pdata(struct tapan_priv *tapan)
  27393. rc = -ENODEV;
  27394. goto done;
  27395. }
  27396. -
  27397. txfe_bypass = pdata->amic_settings.txfe_enable;
  27398. txfe_buff = pdata->amic_settings.txfe_buff;
  27399. flag = pdata->amic_settings.use_pdata;
  27400. +
  27401. /* Make sure settings are correct */
  27402. if ((pdata->micbias.ldoh_v > WCD9XXX_LDOH_3P0_V) ||
  27403. (pdata->micbias.bias1_cfilt_sel > WCD9XXX_CFILT3_SEL) ||
  27404. @@ -5765,13 +5452,7 @@ static const struct tapan_reg_mask_val tapan_reg_defaults[] = {
  27405. /*Reduce EAR DAC bias to 70% */
  27406. TAPAN_REG_VAL(TAPAN_A_RX_EAR_BIAS_PA, 0x76),
  27407. /* Reduce LINE DAC bias to 70% */
  27408. -#if defined(CONFIG_SEC_MATISSE_PROJECT) || defined(CONFIG_SEC_T10_PROJECT)
  27409. TAPAN_REG_VAL(TAPAN_A_RX_LINE_BIAS_PA, 0x78),
  27410. -#elif defined(CONFIG_MACH_MS01_EUR_3G)
  27411. - TAPAN_REG_VAL(TAPAN_A_RX_LINE_BIAS_PA, 0x7A),
  27412. -#else
  27413. - TAPAN_REG_VAL(TAPAN_A_RX_LINE_BIAS_PA, 0x7B),
  27414. -#endif
  27415.  
  27416. /*
  27417. * There is a diode to pull down the micbias while doing
  27418. @@ -5805,8 +5486,8 @@ static const struct tapan_reg_mask_val tapan_2_x_reg_reset_values[] = {
  27419. TAPAN_REG_VAL(TAPAN_A_CDC_CLSH_B3_CTL, 0x00),
  27420. TAPAN_REG_VAL(TAPAN_A_CDC_CLSH_BUCK_NCP_VARS, 0x00),
  27421. TAPAN_REG_VAL(TAPAN_A_CDC_CLSH_V_PA_HD_EAR, 0x00),
  27422. - TAPAN_REG_VAL(TAPAN_A_CDC_CLSH_V_PA_MIN_EAR, 0x00),
  27423. TAPAN_REG_VAL(TAPAN_A_CDC_CLSH_V_PA_HD_HPH, 0x00),
  27424. + TAPAN_REG_VAL(TAPAN_A_CDC_CLSH_V_PA_MIN_EAR, 0x00),
  27425. TAPAN_REG_VAL(TAPAN_A_CDC_CLSH_V_PA_MIN_HPH, 0x00),
  27426. };
  27427.  
  27428. @@ -5865,11 +5546,7 @@ static const struct tapan_reg_mask_val tapan_codec_reg_init_val[] = {
  27429. /* Initialize current threshold to 365MA
  27430. * number of wait and run cycles to 4096
  27431. */
  27432. -#if defined (CONFIG_MACH_MILLETLTE_KOR)
  27433. - {TAPAN_A_RX_HPH_OCP_CTL, 0xEB, 0x6B},
  27434. -#else
  27435. {TAPAN_A_RX_HPH_OCP_CTL, 0xE9, 0x69},
  27436. -#endif
  27437. {TAPAN_A_RX_COM_OCP_COUNT, 0xFF, 0xFF},
  27438. {TAPAN_A_RX_HPH_L_TEST, 0x01, 0x01},
  27439. {TAPAN_A_RX_HPH_R_TEST, 0x01, 0x01},
  27440. @@ -5928,24 +5605,6 @@ static const struct tapan_reg_mask_val tapan_codec_reg_init_val[] = {
  27441. {TAPAN_A_RX_HPH_CHOP_CTL, 0xFF, 0x24},
  27442. };
  27443.  
  27444. -#if defined (CONFIG_MACH_RUBENSLTE_OPEN)
  27445. -static const struct tapan_reg_mask_val tapan_codec_reg_mib3_int_rbias_init_val[] = {
  27446. - {TAPAN_A_MICB_3_INT_RBIAS, 0x20, 0x00},
  27447. -};
  27448. -#endif
  27449. -
  27450. -#if defined(CONFIG_MACH_MILLETLTE_VZW)
  27451. -static const struct tapan_reg_mask_val tapan_codec_reg_mib2_ctl_init_val[] = {
  27452. - {TAPAN_A_MICB_2_CTL, 0x01, 0x01},
  27453. -};
  27454. -#endif
  27455. -
  27456. -#if defined(CONFIG_MACH_ATLANTICLTE_ATT) || defined(CONFIG_SEC_ATLANTIC3G_COMMON) || defined(CONFIG_MACH_ATLANTICLTE_USC) || defined(CONFIG_SEC_RUBENS_PROJECT)
  27457. -static const struct tapan_reg_mask_val tapan_codec_reg_hph_ocp_ctl_init_val[] = {
  27458. - {TAPAN_A_RX_HPH_OCP_CTL, 0xEB, 0x6B},
  27459. -};
  27460. -#endif
  27461. -
  27462. void *tapan_get_afe_config(struct snd_soc_codec *codec,
  27463. enum afe_config_type config_type)
  27464. {
  27465. @@ -5986,7 +5645,6 @@ static void tapan_init_slim_slave_cfg(struct snd_soc_codec *codec)
  27466. pr_debug("%s: slimbus logical address 0x%llx\n", __func__, eaddr);
  27467. }
  27468.  
  27469. -extern int system_rev;
  27470. static void tapan_codec_init_reg(struct snd_soc_codec *codec)
  27471. {
  27472. u32 i;
  27473. @@ -5995,27 +5653,6 @@ static void tapan_codec_init_reg(struct snd_soc_codec *codec)
  27474. snd_soc_update_bits(codec, tapan_codec_reg_init_val[i].reg,
  27475. tapan_codec_reg_init_val[i].mask,
  27476. tapan_codec_reg_init_val[i].val);
  27477. -#if defined (CONFIG_MACH_RUBENSLTE_OPEN)
  27478. - snd_soc_update_bits(codec,tapan_codec_reg_mib3_int_rbias_init_val[0].reg,
  27479. - tapan_codec_reg_mib3_int_rbias_init_val[0].mask,
  27480. - tapan_codec_reg_mib3_int_rbias_init_val[0].val);
  27481. -#endif
  27482. -
  27483. -#if defined(CONFIG_MACH_MILLETLTE_VZW)
  27484. - if (system_rev > 1)
  27485. - {
  27486. - snd_soc_update_bits(codec,tapan_codec_reg_mib2_ctl_init_val[0].reg,
  27487. - tapan_codec_reg_mib2_ctl_init_val[0].mask,
  27488. - tapan_codec_reg_mib2_ctl_init_val[0].val);
  27489. - }
  27490. -#endif
  27491. -
  27492. -#if defined(CONFIG_MACH_ATLANTICLTE_ATT) || defined(CONFIG_SEC_ATLANTIC3G_COMMON) || defined(CONFIG_MACH_ATLANTICLTE_USC) || defined(CONFIG_SEC_RUBENS_PROJECT)
  27493. - snd_soc_update_bits(codec,tapan_codec_reg_hph_ocp_ctl_init_val[0].reg,
  27494. - tapan_codec_reg_hph_ocp_ctl_init_val[0].mask,
  27495. - tapan_codec_reg_hph_ocp_ctl_init_val[0].val);
  27496. -#endif
  27497. -
  27498. }
  27499. static void tapan_slim_interface_init_reg(struct snd_soc_codec *codec)
  27500. {
  27501. @@ -6111,7 +5748,6 @@ static void wcd9xxx_prepare_hph_pa(struct wcd9xxx_mbhc *mbhc,
  27502. int i;
  27503. struct snd_soc_codec *codec = mbhc->codec;
  27504. u32 delay;
  27505. - int ret = 0;
  27506.  
  27507. const struct wcd9xxx_reg_mask_val reg_set_paon[] = {
  27508. {WCD9XXX_A_CDC_CLSH_B1_CTL, 0x0F, 0x00},
  27509. @@ -6177,14 +5813,10 @@ static void wcd9xxx_prepare_hph_pa(struct wcd9xxx_mbhc *mbhc,
  27510. delay = 1000;
  27511. else
  27512. delay = 0;
  27513. - ret = wcd9xxx_soc_update_bits_push(codec, lh,
  27514. + wcd9xxx_soc_update_bits_push(codec, lh,
  27515. reg_set_paon[i].reg,
  27516. reg_set_paon[i].mask,
  27517. reg_set_paon[i].val, delay);
  27518. - if (ret < 0) {
  27519. - pr_debug("%s: wcd9xxx_soc_update_bits_push failed\n", __func__);
  27520. - return;
  27521. - }
  27522. }
  27523. pr_debug("%s: PAs are prepared\n", __func__);
  27524. return;
  27525. @@ -6466,16 +6098,11 @@ static int tapan_post_reset_cb(struct wcd9xxx *wcd9xxx)
  27526. rco_clk_rate = TAPAN_MCLK_CLK_12P288MHZ;
  27527. else
  27528. rco_clk_rate = TAPAN_MCLK_CLK_9P6MHZ;
  27529. -#ifndef CONFIG_ARCH_MSM8226
  27530. +
  27531. ret = wcd9xxx_mbhc_init(&tapan->mbhc, &tapan->resmgr, codec,
  27532. tapan_enable_mbhc_micbias,
  27533. &mbhc_cb, &cdc_intr_ids, rco_clk_rate,
  27534. TAPAN_CDC_ZDET_SUPPORTED);
  27535. -#else
  27536. - ret = wcd9xxx_mbhc_init(&tapan->mbhc, &tapan->resmgr, codec,NULL,
  27537. - &mbhc_cb, &cdc_intr_ids, rco_clk_rate,
  27538. - TAPAN_CDC_ZDET_SUPPORTED);
  27539. -#endif
  27540. if (ret)
  27541. pr_err("%s: mbhc init failed %d\n", __func__, ret);
  27542. else
  27543. @@ -6588,11 +6215,11 @@ static void tapan_enable_config_rco(struct wcd9xxx *core, bool enable)
  27544.  
  27545. }
  27546.  
  27547. -static int tapan_check_wcd9306(struct device *cdc_dev, bool sensed)
  27548. +static bool tapan_check_wcd9306(struct device *cdc_dev, bool sensed)
  27549. {
  27550. struct wcd9xxx *core = dev_get_drvdata(cdc_dev->parent);
  27551. u8 reg_val;
  27552. - int ret = 1;
  27553. + bool ret = true;
  27554. unsigned long timeout;
  27555. bool timedout;
  27556. struct wcd9xxx_core_resource *core_res = &core->core_res;
  27557. @@ -6619,7 +6246,7 @@ static int tapan_check_wcd9306(struct device *cdc_dev, bool sensed)
  27558. if (wcd9xxx_reg_read(core_res, TAPAN_A_QFUSE_DATA_OUT1) ||
  27559. wcd9xxx_reg_read(core_res, TAPAN_A_QFUSE_DATA_OUT2)) {
  27560. dev_info(cdc_dev, "%s: wcd9302 detected\n", __func__);
  27561. - ret = 0;
  27562. + ret = false;
  27563. } else
  27564. dev_info(cdc_dev, "%s: wcd9306 detected\n", __func__);
  27565.  
  27566. @@ -6627,16 +6254,6 @@ static int tapan_check_wcd9306(struct device *cdc_dev, bool sensed)
  27567. return ret;
  27568. };
  27569.  
  27570. -#ifdef CONFIG_ARCH_MSM8226
  27571. -bool codec_probe_done = false;
  27572. -
  27573. -bool is_codec_probe_done(void)
  27574. -{
  27575. - return codec_probe_done;
  27576. -}
  27577. -EXPORT_SYMBOL(is_codec_probe_done);
  27578. -#endif
  27579. -
  27580. static int tapan_codec_probe(struct snd_soc_codec *codec)
  27581. {
  27582. struct wcd9xxx *control;
  27583. @@ -6671,7 +6288,6 @@ static int tapan_codec_probe(struct snd_soc_codec *codec)
  27584.  
  27585. snd_soc_codec_set_drvdata(codec, tapan);
  27586.  
  27587. - tapan->ldo_h_count = 0;
  27588. /* codec resmgr module init */
  27589. wcd9xxx = codec->control_data;
  27590. core_res = &wcd9xxx->core_res;
  27591. @@ -6681,7 +6297,6 @@ static int tapan_codec_probe(struct snd_soc_codec *codec)
  27592. WCD9XXX_CDC_TYPE_TAPAN);
  27593. if (ret) {
  27594. pr_err("%s: wcd9xxx init failed %d\n", __func__, ret);
  27595. - kfree(tapan);
  27596. return ret;
  27597. }
  27598.  
  27599. @@ -6705,10 +6320,8 @@ static int tapan_codec_probe(struct snd_soc_codec *codec)
  27600. else
  27601. rco_clk_rate = TAPAN_MCLK_CLK_9P6MHZ;
  27602.  
  27603. - tapan->micb_2_ref_cnt = 0;
  27604. -
  27605. -#ifndef CONFIG_SAMSUNG_JACK //Comment Disable MBHC
  27606. - ret = wcd9xxx_mbhc_init(&tapan->mbhc, &tapan->resmgr, codec, NULL,
  27607. + ret = wcd9xxx_mbhc_init(&tapan->mbhc, &tapan->resmgr, codec,
  27608. + tapan_enable_mbhc_micbias,
  27609. &mbhc_cb, &cdc_intr_ids, rco_clk_rate,
  27610. TAPAN_CDC_ZDET_SUPPORTED);
  27611.  
  27612. @@ -6716,7 +6329,6 @@ static int tapan_codec_probe(struct snd_soc_codec *codec)
  27613. pr_err("%s: mbhc init failed %d\n", __func__, ret);
  27614. return ret;
  27615. }
  27616. -#endif
  27617.  
  27618. tapan->codec = codec;
  27619. for (i = 0; i < COMPANDER_MAX; i++) {
  27620. @@ -6729,6 +6341,7 @@ static int tapan_codec_probe(struct snd_soc_codec *codec)
  27621. tapan->aux_r_gain = 0x1F;
  27622. tapan->ldo_h_users = 0;
  27623. tapan->micb_2_users = 0;
  27624. + tapan->lb_mode = false;
  27625. tapan_update_reg_defaults(codec);
  27626. tapan_update_reg_mclk_rate(wcd9xxx);
  27627. tapan_codec_init_reg(codec);
  27628. @@ -6745,11 +6358,6 @@ static int tapan_codec_probe(struct snd_soc_codec *codec)
  27629. WCD9XXX_BG_CLK_UNLOCK(&tapan->resmgr);
  27630. }
  27631.  
  27632. -#if defined(CONFIG_SND_SOC_ES705)
  27633. - remote_add_codec_controls(codec);
  27634. -#elif defined(CONFIG_SND_SOC_ES325_ATLANTIC)
  27635. - es325_remote_add_codec_controls(codec);
  27636. -#endif
  27637. ptr = kmalloc((sizeof(tapan_rx_chs) +
  27638. sizeof(tapan_tx_chs)), GFP_KERNEL);
  27639. if (!ptr) {
  27640. @@ -6813,10 +6421,6 @@ static int tapan_codec_probe(struct snd_soc_codec *codec)
  27641. if (ret)
  27642. tapan_cleanup_irqs(tapan);
  27643.  
  27644. -#ifdef CONFIG_ARCH_MSM8226
  27645. - codec_probe_done = true;
  27646. -#endif
  27647. -
  27648. return ret;
  27649.  
  27650. err_pdata:
  27651. @@ -6841,10 +6445,8 @@ static int tapan_codec_remove(struct snd_soc_codec *codec)
  27652.  
  27653. tapan_cleanup_irqs(tapan);
  27654.  
  27655. -#ifndef CONFIG_SAMSUNG_JACK //Comment Disable MBHC
  27656. /* cleanup MBHC */
  27657. wcd9xxx_mbhc_deinit(&tapan->mbhc);
  27658. -#endif
  27659. /* cleanup resmgr */
  27660. wcd9xxx_resmgr_deinit(&tapan->resmgr);
  27661.  
  27662. @@ -6903,13 +6505,13 @@ static const struct dev_pm_ops tapan_pm_ops = {
  27663. static int __devinit tapan_probe(struct platform_device *pdev)
  27664. {
  27665. int ret = 0;
  27666. - int is_wcd9306;
  27667. + bool is_wcd9306;
  27668.  
  27669. is_wcd9306 = tapan_check_wcd9306(&pdev->dev, false);
  27670. if (is_wcd9306 < 0) {
  27671. dev_info(&pdev->dev, "%s: cannot find codec type, default to 9306\n",
  27672. __func__);
  27673. - is_wcd9306 = 1;
  27674. + is_wcd9306 = true;
  27675. }
  27676. codec_ver = is_wcd9306 ? WCD9306 : WCD9302;
  27677.  
  27678. diff --git a/sound/soc/codecs/wcd9320.c b/sound/soc/codecs/wcd9320.c
  27679. index 69844c0..4474ee0 100644
  27680. --- a/sound/soc/codecs/wcd9320.c
  27681. +++ b/sound/soc/codecs/wcd9320.c
  27682. @@ -19,7 +19,6 @@
  27683. #include <linux/ratelimit.h>
  27684. #include <linux/debugfs.h>
  27685. #include <linux/wait.h>
  27686. -#include <linux/bitops.h>
  27687. #include <linux/mfd/wcd9xxx/core.h>
  27688. #include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
  27689. #include <linux/mfd/wcd9xxx/wcd9320_registers.h>
  27690. @@ -41,26 +40,8 @@
  27691. #include "wcd9320.h"
  27692. #include "wcd9xxx-resmgr.h"
  27693. #include "wcd9xxx-common.h"
  27694. +#include "wcdcal-hwdep.h"
  27695.  
  27696. -#if defined(CONFIG_SND_SOC_ES705)
  27697. -#include "audience/es705-export.h"
  27698. -#elif defined(CONFIG_SND_SOC_ES325)
  27699. -#include "es325-export.h"
  27700. -#endif
  27701. -
  27702. -#if defined(CONFIG_SND_SOC_ES705)
  27703. -
  27704. -#define CONFIG_SND_SOC_ESXXX
  27705. -#define REMOTE_ROUTE_ENABLE_CB es705_remote_route_enable
  27706. -#define SLIM_GET_CHANNEL_MAP_CB es705_slim_get_channel_map
  27707. -#define SLIM_SET_CHANNEL_MAP_CB es705_slim_set_channel_map
  27708. -#define SLIM_HW_PARAMS_CB es705_slim_hw_params
  27709. -#define REMOTE_CFG_SLIM_RX_CB es705_remote_cfg_slim_rx
  27710. -#define REMOTE_CLOSE_SLIM_RX_CB es705_remote_close_slim_rx
  27711. -#define REMOTE_CFG_SLIM_TX_CB es705_remote_cfg_slim_tx
  27712. -#define REMOTE_CLOSE_SLIM_TX_CB es705_remote_close_slim_tx
  27713. -#define REMOTE_ADD_CODEC_CONTROLS_CB es705_remote_add_codec_controls
  27714. -#endif
  27715.  
  27716. #define TAIKO_MAD_SLIMBUS_TX_PORT 12
  27717. #define TAIKO_MAD_AUDIO_FIRMWARE_PATH "wcd9320/wcd9320_mad_audio.bin"
  27718. @@ -81,10 +62,6 @@ static int spkr_drv_wrnd_param_set(const char *val,
  27719. const struct kernel_param *kp);
  27720. static int spkr_drv_wrnd = 1;
  27721.  
  27722. -#if defined(CONFIG_SEC_JACTIVE_PROJECT)
  27723. -static int sub_mic_rec_delay = 0;
  27724. -#endif
  27725. -
  27726. static struct kernel_param_ops spkr_drv_wrnd_param_ops = {
  27727. .set = spkr_drv_wrnd_param_set,
  27728. .get = param_get_int,
  27729. @@ -477,6 +454,8 @@ struct taiko_priv {
  27730. */
  27731. struct list_head reg_save_restore;
  27732. struct pm_qos_request pm_qos_req;
  27733. + /* cal info for codec */
  27734. + struct fw_info *fw_data;
  27735. };
  27736.  
  27737. static const u32 comp_shift[] = {
  27738. @@ -690,23 +669,6 @@ static int taiko_get_iir_enable_audio_mixer(
  27739. return 0;
  27740. }
  27741.  
  27742. -#if defined(CONFIG_SEC_JACTIVE_PROJECT)
  27743. -static int taiko_get_sub_mic_delay_set(struct snd_kcontrol *kcontrol,
  27744. - struct snd_ctl_elem_value *ucontrol)
  27745. -{
  27746. - return 0;
  27747. -}
  27748. -
  27749. -static int taiko_put_sub_mic_delay_set(struct snd_kcontrol *kcontrol,
  27750. - struct snd_ctl_elem_value *ucontrol)
  27751. -{
  27752. - sub_mic_rec_delay = ucontrol->value.integer.value[0];
  27753. - pr_info("%s : sub_mic_rec_delay : %d\n", __func__, sub_mic_rec_delay);
  27754. -
  27755. - return 0;
  27756. -}
  27757. -#endif
  27758. -
  27759. static int taiko_put_iir_enable_audio_mixer(
  27760. struct snd_kcontrol *kcontrol,
  27761. struct snd_ctl_elem_value *ucontrol)
  27762. @@ -1104,12 +1066,6 @@ static const char *const taiko_anc_func_text[] = {"OFF", "ON"};
  27763. static const struct soc_enum taiko_anc_func_enum =
  27764. SOC_ENUM_SINGLE_EXT(2, taiko_anc_func_text);
  27765.  
  27766. -#if defined(CONFIG_SEC_JACTIVE_PROJECT)
  27767. -static const char *const taiko_sub_mic_delay_text[] = {"OFF", "ON"};
  27768. -static const struct soc_enum taiko_sub_mic_delay_enum =
  27769. - SOC_ENUM_SINGLE_EXT(2, taiko_sub_mic_delay_text);
  27770. -#endif
  27771. -
  27772. static const char *const tabla_ear_pa_gain_text[] = {"POS_6_DB", "POS_2_DB"};
  27773. static const struct soc_enum tabla_ear_pa_gain_enum[] = {
  27774. SOC_ENUM_SINGLE_EXT(2, tabla_ear_pa_gain_text),
  27775. @@ -1181,6 +1137,24 @@ static const struct soc_enum class_h_dsm_enum =
  27776. static const struct snd_kcontrol_new class_h_dsm_mux =
  27777. SOC_DAPM_ENUM("CLASS_H_DSM MUX Mux", class_h_dsm_enum);
  27778.  
  27779. +static const char * const rx1_interpolator_text[] = {
  27780. + "ZERO", "RX1 MIX2"
  27781. +};
  27782. +static const struct soc_enum rx1_interpolator_enum =
  27783. + SOC_ENUM_SINGLE(TAIKO_A_CDC_CLK_RX_B1_CTL, 0, 2, rx1_interpolator_text);
  27784. +
  27785. +static const struct snd_kcontrol_new rx1_interpolator =
  27786. + SOC_DAPM_ENUM("RX1 INTERP Mux", rx1_interpolator_enum);
  27787. +
  27788. +static const char * const rx2_interpolator_text[] = {
  27789. + "ZERO", "RX2 MIX2"
  27790. +};
  27791. +static const struct soc_enum rx2_interpolator_enum =
  27792. + SOC_ENUM_SINGLE(TAIKO_A_CDC_CLK_RX_B1_CTL, 1, 2, rx2_interpolator_text);
  27793. +
  27794. +static const struct snd_kcontrol_new rx2_interpolator =
  27795. + SOC_DAPM_ENUM("RX2 INTERP Mux", rx2_interpolator_enum);
  27796. +
  27797. static const char *const taiko_conn_mad_text[] = {
  27798. "ADC_MB", "ADC1", "ADC2", "ADC3", "ADC4", "ADC5", "ADC6", "NOTUSED1",
  27799. "DMIC1", "DMIC2", "DMIC3", "DMIC4", "DMIC5", "DMIC6", "NOTUSED2",
  27800. @@ -1360,10 +1334,6 @@ static const struct snd_kcontrol_new taiko_snd_controls[] = {
  27801. taiko_put_anc_slot),
  27802. SOC_ENUM_EXT("ANC Function", taiko_anc_func_enum, taiko_get_anc_func,
  27803. taiko_put_anc_func),
  27804. -#if defined(CONFIG_SEC_JACTIVE_PROJECT)
  27805. - SOC_ENUM_EXT("SUB_MIC_REC_DELAY", taiko_sub_mic_delay_enum, taiko_get_sub_mic_delay_set,
  27806. - taiko_put_sub_mic_delay_set),
  27807. -#endif
  27808.  
  27809. SOC_ENUM("TX1 HPF cut off", cf_dec1_enum),
  27810. SOC_ENUM("TX2 HPF cut off", cf_dec2_enum),
  27811. @@ -1573,45 +1543,9 @@ static const struct snd_kcontrol_new taiko_2_x_analog_gain_controls[] = {
  27812. analog_gain),
  27813. };
  27814.  
  27815. -#if defined(CONFIG_MACH_KLTE_JPN) || defined(CONFIG_MACH_KLTE_KOR)
  27816. -extern unsigned int system_rev;
  27817. -#endif
  27818. -
  27819. static int taiko_hph_impedance_get(struct snd_kcontrol *kcontrol,
  27820. struct snd_ctl_elem_value *ucontrol)
  27821. {
  27822. -#if defined(CONFIG_MACH_KLTE_KOR)
  27823. - if (system_rev >= 13) {
  27824. - uint32_t zl, zr;
  27825. - bool hphr;
  27826. - struct soc_multi_mixer_control *mc;
  27827. - struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
  27828. - struct taiko_priv *priv = snd_soc_codec_get_drvdata(codec);
  27829. -
  27830. - mc = (struct soc_multi_mixer_control *)(kcontrol->private_value);
  27831. -
  27832. - hphr = mc->shift;
  27833. - wcd9xxx_mbhc_get_impedance(&priv->mbhc, &zl, &zr);
  27834. - pr_debug("%s: zl %u, zr %u\n", __func__, zl, zr);
  27835. - ucontrol->value.integer.value[0] = hphr ? zr : zl;
  27836. - }
  27837. -#elif defined(CONFIG_MACH_KLTE_JPN)
  27838. - if (system_rev >= 11) {
  27839. - uint32_t zl, zr;
  27840. - bool hphr;
  27841. - struct soc_multi_mixer_control *mc;
  27842. - struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
  27843. - struct taiko_priv *priv = snd_soc_codec_get_drvdata(codec);
  27844. -
  27845. - mc = (struct soc_multi_mixer_control *)(kcontrol->private_value);
  27846. -
  27847. - hphr = mc->shift;
  27848. - wcd9xxx_mbhc_get_impedance(&priv->mbhc, &zl, &zr);
  27849. - pr_debug("%s: zl %u, zr %u\n", __func__, zl, zr);
  27850. - ucontrol->value.integer.value[0] = hphr ? zr : zl;
  27851. - }
  27852. -#else
  27853. -#if !defined(CONFIG_SAMSUNG_JACK) && !defined(CONFIG_MUIC_DET_JACK)
  27854. uint32_t zl, zr;
  27855. bool hphr;
  27856. struct soc_multi_mixer_control *mc;
  27857. @@ -1624,9 +1558,7 @@ static int taiko_hph_impedance_get(struct snd_kcontrol *kcontrol,
  27858. wcd9xxx_mbhc_get_impedance(&priv->mbhc, &zl, &zr);
  27859. pr_debug("%s: zl %u, zr %u\n", __func__, zl, zr);
  27860. ucontrol->value.integer.value[0] = hphr ? zr : zl;
  27861. -#endif
  27862. -#endif
  27863. - ucontrol->value.integer.value[0] = 0;
  27864. +
  27865. return 0;
  27866. }
  27867.  
  27868. @@ -1745,6 +1677,21 @@ static const char * const iir_inp1_text[] = {
  27869. "DEC9", "DEC10", "RX1", "RX2", "RX3", "RX4", "RX5", "RX6", "RX7"
  27870. };
  27871.  
  27872. +static const char * const iir_inp2_text[] = {
  27873. + "ZERO", "DEC1", "DEC2", "DEC3", "DEC4", "DEC5", "DEC6", "DEC7", "DEC8",
  27874. + "DEC9", "DEC10", "RX1", "RX2", "RX3", "RX4", "RX5", "RX6", "RX7"
  27875. +};
  27876. +
  27877. +static const char * const iir_inp3_text[] = {
  27878. + "ZERO", "DEC1", "DEC2", "DEC3", "DEC4", "DEC5", "DEC6", "DEC7", "DEC8",
  27879. + "DEC9", "DEC10", "RX1", "RX2", "RX3", "RX4", "RX5", "RX6", "RX7"
  27880. +};
  27881. +
  27882. +static const char * const iir_inp4_text[] = {
  27883. + "ZERO", "DEC1", "DEC2", "DEC3", "DEC4", "DEC5", "DEC6", "DEC7", "DEC8",
  27884. + "DEC9", "DEC10", "RX1", "RX2", "RX3", "RX4", "RX5", "RX6", "RX7"
  27885. +};
  27886. +
  27887. static const struct soc_enum rx_mix1_inp1_chain_enum =
  27888. SOC_ENUM_SINGLE(TAIKO_A_CDC_CONN_RX1_B1_CTL, 0, 12, rx_mix1_text);
  27889.  
  27890. @@ -1893,6 +1840,24 @@ static const struct soc_enum iir1_inp1_mux_enum =
  27891. static const struct soc_enum iir2_inp1_mux_enum =
  27892. SOC_ENUM_SINGLE(TAIKO_A_CDC_CONN_EQ2_B1_CTL, 0, 18, iir_inp1_text);
  27893.  
  27894. +static const struct soc_enum iir1_inp2_mux_enum =
  27895. + SOC_ENUM_SINGLE(TAIKO_A_CDC_CONN_EQ1_B2_CTL, 0, 18, iir_inp2_text);
  27896. +
  27897. +static const struct soc_enum iir2_inp2_mux_enum =
  27898. + SOC_ENUM_SINGLE(TAIKO_A_CDC_CONN_EQ2_B2_CTL, 0, 18, iir_inp2_text);
  27899. +
  27900. +static const struct soc_enum iir1_inp3_mux_enum =
  27901. + SOC_ENUM_SINGLE(TAIKO_A_CDC_CONN_EQ1_B3_CTL, 0, 18, iir_inp3_text);
  27902. +
  27903. +static const struct soc_enum iir2_inp3_mux_enum =
  27904. + SOC_ENUM_SINGLE(TAIKO_A_CDC_CONN_EQ2_B3_CTL, 0, 18, iir_inp3_text);
  27905. +
  27906. +static const struct soc_enum iir1_inp4_mux_enum =
  27907. + SOC_ENUM_SINGLE(TAIKO_A_CDC_CONN_EQ1_B4_CTL, 0, 18, iir_inp4_text);
  27908. +
  27909. +static const struct soc_enum iir2_inp4_mux_enum =
  27910. + SOC_ENUM_SINGLE(TAIKO_A_CDC_CONN_EQ2_B4_CTL, 0, 18, iir_inp4_text);
  27911. +
  27912. static const struct snd_kcontrol_new rx_mix1_inp1_mux =
  27913. SOC_DAPM_ENUM("RX1 MIX1 INP1 Mux", rx_mix1_inp1_chain_enum);
  27914.  
  27915. @@ -2118,6 +2083,24 @@ static const struct snd_kcontrol_new iir1_inp1_mux =
  27916. static const struct snd_kcontrol_new iir2_inp1_mux =
  27917. SOC_DAPM_ENUM("IIR2 INP1 Mux", iir2_inp1_mux_enum);
  27918.  
  27919. +static const struct snd_kcontrol_new iir1_inp2_mux =
  27920. + SOC_DAPM_ENUM("IIR1 INP2 Mux", iir1_inp2_mux_enum);
  27921. +
  27922. +static const struct snd_kcontrol_new iir2_inp2_mux =
  27923. + SOC_DAPM_ENUM("IIR2 INP2 Mux", iir2_inp2_mux_enum);
  27924. +
  27925. +static const struct snd_kcontrol_new iir1_inp3_mux =
  27926. + SOC_DAPM_ENUM("IIR1 INP3 Mux", iir1_inp3_mux_enum);
  27927. +
  27928. +static const struct snd_kcontrol_new iir2_inp3_mux =
  27929. + SOC_DAPM_ENUM("IIR2 INP3 Mux", iir2_inp3_mux_enum);
  27930. +
  27931. +static const struct snd_kcontrol_new iir1_inp4_mux =
  27932. + SOC_DAPM_ENUM("IIR1 INP4 Mux", iir1_inp4_mux_enum);
  27933. +
  27934. +static const struct snd_kcontrol_new iir2_inp4_mux =
  27935. + SOC_DAPM_ENUM("IIR2 INP4 Mux", iir2_inp4_mux_enum);
  27936. +
  27937. static const struct snd_kcontrol_new anc1_mux =
  27938. SOC_DAPM_ENUM("ANC1 MUX Mux", anc1_mux_enum);
  27939.  
  27940. @@ -2499,10 +2482,6 @@ static int taiko_codec_enable_adc(struct snd_soc_dapm_widget *w,
  27941.  
  27942. switch (event) {
  27943. case SND_SOC_DAPM_PRE_PMU:
  27944. -#if defined(CONFIG_SEC_JACTIVE_PROJECT)
  27945. - if ((sub_mic_rec_delay == 1) && ((w->reg) == TAIKO_A_CDC_TX_3_GAIN))
  27946. - usleep_range(400000, 400000);
  27947. -#endif
  27948. taiko_codec_enable_adc_block(codec, 1);
  27949. snd_soc_update_bits(codec, adc_reg, 1 << init_bit_shift,
  27950. 1 << init_bit_shift);
  27951. @@ -2704,8 +2683,11 @@ static int taiko_codec_config_mad(struct snd_soc_codec *codec)
  27952. int ret;
  27953. const struct firmware *fw;
  27954. struct mad_audio_cal *mad_cal;
  27955. + struct firmware_cal *hwdep_cal = NULL;
  27956. + const void *data;
  27957. const char *filename = TAIKO_MAD_AUDIO_FIRMWARE_PATH;
  27958. struct taiko_priv *taiko = snd_soc_codec_get_drvdata(codec);
  27959. + size_t cal_size;
  27960.  
  27961. pr_debug("%s: enter\n", __func__);
  27962. /* wakeup for codec calibration access */
  27963. @@ -2714,24 +2696,46 @@ static int taiko_codec_config_mad(struct snd_soc_codec *codec)
  27964. PM_QOS_DEFAULT_VALUE);
  27965. pm_qos_update_request(&taiko->pm_qos_req,
  27966. msm_cpuidle_get_deep_idle_latency());
  27967. - ret = request_firmware(&fw, filename, codec->dev);
  27968. - if (ret != 0) {
  27969. - pr_err("Failed to acquire MAD firwmare data %s: %d\n", filename,
  27970. - ret);
  27971. + if (!taiko->fw_data) {
  27972. + dev_err(codec->dev, "%s: invalid cal data\n",
  27973. + __func__);
  27974. return -ENODEV;
  27975. }
  27976. -
  27977. - if (fw->size < sizeof(struct mad_audio_cal)) {
  27978. - pr_err("%s: incorrect firmware size %u\n", __func__, fw->size);
  27979. - release_firmware(fw);
  27980. - return -ENOMEM;
  27981. + hwdep_cal = wcdcal_get_fw_cal(taiko->fw_data, WCD9XXX_MAD_CAL);
  27982. + if (hwdep_cal) {
  27983. + data = hwdep_cal->data;
  27984. + cal_size = hwdep_cal->size;
  27985. + dev_dbg(codec->dev, "%s: using hwdep calibration\n",
  27986. + __func__);
  27987. + } else {
  27988. + ret = request_firmware(&fw, filename, codec->dev);
  27989. + if (ret != 0) {
  27990. + pr_err("Failed to acquire MAD firwmare data %s: %d\n",
  27991. + filename, ret);
  27992. + return -ENODEV;
  27993. + }
  27994. + if (!fw) {
  27995. + dev_err(codec->dev, "failed to get mad fw");
  27996. + return -ENODEV;
  27997. + }
  27998. + data = fw->data;
  27999. + cal_size = fw->size;
  28000. + dev_dbg(codec->dev, "%s: using request_firmware calibration\n",
  28001. + __func__);
  28002. + }
  28003. + if (cal_size < sizeof(struct mad_audio_cal)) {
  28004. + pr_err("%s: incorrect hwdep cal size %zu\n",
  28005. + __func__, cal_size);
  28006. + ret = -ENOMEM;
  28007. + goto err;
  28008. }
  28009.  
  28010. - mad_cal = (struct mad_audio_cal *)(fw->data);
  28011. + mad_cal = (struct mad_audio_cal *)(data);
  28012. if (!mad_cal) {
  28013. - pr_err("%s: Invalid calibration data\n", __func__);
  28014. - release_firmware(fw);
  28015. - return -EINVAL;
  28016. + dev_err(codec->dev, "%s: Invalid calibration data\n",
  28017. + __func__);
  28018. + ret = -EINVAL;
  28019. + goto err;
  28020. }
  28021.  
  28022. snd_soc_write(codec, TAIKO_A_CDC_MAD_MAIN_CTL_2,
  28023. @@ -2781,11 +2785,13 @@ static int taiko_codec_config_mad(struct snd_soc_codec *codec)
  28024. snd_soc_write(codec, TAIKO_A_CDC_MAD_ULTR_CTL_6,
  28025. mad_cal->ultrasound_info.rms_threshold_msb);
  28026.  
  28027. - release_firmware(fw);
  28028. pr_debug("%s: leave ret %d\n", __func__, ret);
  28029. pm_qos_update_request(&taiko->pm_qos_req,
  28030. PM_QOS_DEFAULT_VALUE);
  28031. pm_qos_remove_request(&taiko->pm_qos_req);
  28032. +err:
  28033. + if (!hwdep_cal)
  28034. + release_firmware(fw);
  28035. return ret;
  28036. }
  28037.  
  28038. @@ -3279,8 +3285,8 @@ static int taiko_hphl_dac_event(struct snd_soc_dapm_widget *w,
  28039. {
  28040. struct snd_soc_codec *codec = w->codec;
  28041. struct taiko_priv *taiko_p = snd_soc_codec_get_drvdata(codec);
  28042. - /* uint32_t impedl, impedr; */
  28043. - /* int ret = 0; */
  28044. + uint32_t impedl, impedr;
  28045. + int ret = 0;
  28046.  
  28047. pr_debug("%s %s %d\n", __func__, w->name, event);
  28048.  
  28049. @@ -3292,18 +3298,18 @@ static int taiko_hphl_dac_event(struct snd_soc_dapm_widget *w,
  28050. WCD9XXX_CLSH_STATE_HPHL,
  28051. WCD9XXX_CLSH_REQ_ENABLE,
  28052. WCD9XXX_CLSH_EVENT_PRE_DAC);
  28053. -
  28054. - /*ret = wcd9xxx_mbhc_get_impedance(&taiko_p->mbhc,
  28055. + ret = wcd9xxx_mbhc_get_impedance(&taiko_p->mbhc,
  28056. &impedl, &impedr);
  28057. - if (!ret) */
  28058. - wcd9xxx_clsh_imped_config(codec, 0);
  28059. - /* else
  28060. + if (!ret)
  28061. + wcd9xxx_clsh_imped_config(codec, impedl);
  28062. + else
  28063. dev_err(codec->dev, "Failed to get mbhc impedance %d\n",
  28064. - ret); */
  28065. + ret);
  28066. break;
  28067. case SND_SOC_DAPM_POST_PMD:
  28068. snd_soc_update_bits(codec, TAIKO_A_CDC_CLK_RDAC_CLK_EN_CTL,
  28069. 0x02, 0x00);
  28070. + break;
  28071. }
  28072. return 0;
  28073. }
  28074. @@ -3342,15 +3348,18 @@ static int taiko_codec_enable_anc(struct snd_soc_dapm_widget *w,
  28075. const char *filename;
  28076. const struct firmware *fw;
  28077. int i;
  28078. - int ret;
  28079. + int ret =0;
  28080. int num_anc_slots;
  28081. struct wcd9xxx_anc_header *anc_head;
  28082. struct taiko_priv *taiko = snd_soc_codec_get_drvdata(codec);
  28083. + struct firmware_cal *hwdep_cal = NULL;
  28084. u32 anc_writes_size = 0;
  28085. int anc_size_remaining;
  28086. u32 *anc_ptr;
  28087. u16 reg;
  28088. u8 mask, val, old_val;
  28089. + size_t cal_size;
  28090. + const void *data;
  28091.  
  28092.  
  28093. if (taiko->anc_func == 0)
  28094. @@ -3359,38 +3368,53 @@ static int taiko_codec_enable_anc(struct snd_soc_dapm_widget *w,
  28095. switch (event) {
  28096. case SND_SOC_DAPM_PRE_PMU:
  28097. filename = "wcd9320/wcd9320_anc.bin";
  28098. + hwdep_cal = wcdcal_get_fw_cal(taiko->fw_data, WCD9XXX_ANC_CAL);
  28099. + if (hwdep_cal) {
  28100. + data = hwdep_cal->data;
  28101. + cal_size = hwdep_cal->size;
  28102. + dev_dbg(codec->dev, "%s: using hwdep calibration\n",
  28103. + __func__);
  28104. + } else {
  28105. + ret = request_firmware(&fw, filename, codec->dev);
  28106. + if (ret != 0) {
  28107. + dev_err(codec->dev, "Failed to acquire ANC data: %d\n",
  28108. + ret);
  28109. + return -ENODEV;
  28110. + }
  28111. + if (!fw) {
  28112. + dev_err(codec->dev, "failed to get anc fw");
  28113. + return -ENODEV;
  28114. + }
  28115. + data = fw->data;
  28116. + cal_size = fw->size;
  28117. + dev_dbg(codec->dev, "%s: using request_firmware calibration\n",
  28118. + __func__);
  28119.  
  28120. - ret = request_firmware(&fw, filename, codec->dev);
  28121. - if (ret != 0) {
  28122. - dev_err(codec->dev, "Failed to acquire ANC data: %d\n",
  28123. - ret);
  28124. - return -ENODEV;
  28125. }
  28126.  
  28127. - if (fw->size < sizeof(struct wcd9xxx_anc_header)) {
  28128. + if (cal_size < sizeof(struct wcd9xxx_anc_header)) {
  28129. dev_err(codec->dev, "Not enough data\n");
  28130. - release_firmware(fw);
  28131. - return -ENOMEM;
  28132. + goto err;
  28133. }
  28134.  
  28135. /* First number is the number of register writes */
  28136. - anc_head = (struct wcd9xxx_anc_header *)(fw->data);
  28137. - anc_ptr = (u32 *)((u32)fw->data +
  28138. + anc_head = (struct wcd9xxx_anc_header *)(data);
  28139. + anc_ptr = (u32 *)(data +
  28140. sizeof(struct wcd9xxx_anc_header));
  28141. - anc_size_remaining = fw->size -
  28142. + anc_size_remaining = cal_size -
  28143. sizeof(struct wcd9xxx_anc_header);
  28144. num_anc_slots = anc_head->num_anc_slots;
  28145.  
  28146. if (taiko->anc_slot >= num_anc_slots) {
  28147. dev_err(codec->dev, "Invalid ANC slot selected\n");
  28148. - release_firmware(fw);
  28149. - return -EINVAL;
  28150. + ret = -EINVAL;
  28151. + goto err;
  28152. }
  28153. for (i = 0; i < num_anc_slots; i++) {
  28154. if (anc_size_remaining < TAIKO_PACKED_REG_SIZE) {
  28155. dev_err(codec->dev, "Invalid register format\n");
  28156. - release_firmware(fw);
  28157. - return -EINVAL;
  28158. + ret = -EINVAL;
  28159. + goto err;
  28160. }
  28161. anc_writes_size = (u32)(*anc_ptr);
  28162. anc_size_remaining -= sizeof(u32);
  28163. @@ -3399,8 +3423,8 @@ static int taiko_codec_enable_anc(struct snd_soc_dapm_widget *w,
  28164. if (anc_writes_size * TAIKO_PACKED_REG_SIZE
  28165. > anc_size_remaining) {
  28166. dev_err(codec->dev, "Invalid register format\n");
  28167. - release_firmware(fw);
  28168. - return -ENOMEM;
  28169. + ret = -EINVAL;
  28170. + goto err;
  28171. }
  28172.  
  28173. if (taiko->anc_slot == i)
  28174. @@ -3412,8 +3436,8 @@ static int taiko_codec_enable_anc(struct snd_soc_dapm_widget *w,
  28175. }
  28176. if (i == num_anc_slots) {
  28177. dev_err(codec->dev, "Selected ANC slot not present\n");
  28178. - release_firmware(fw);
  28179. - return -ENOMEM;
  28180. + ret = -EINVAL;
  28181. + goto err;
  28182. }
  28183. for (i = 0; i < anc_writes_size; i++) {
  28184. TAIKO_CODEC_UNPACK_ENTRY(anc_ptr[i], reg,
  28185. @@ -3422,7 +3446,8 @@ static int taiko_codec_enable_anc(struct snd_soc_dapm_widget *w,
  28186. snd_soc_write(codec, reg, (old_val & ~mask) |
  28187. (val & mask));
  28188. }
  28189. - release_firmware(fw);
  28190. + if (!hwdep_cal)
  28191. + release_firmware(fw);
  28192. break;
  28193. case SND_SOC_DAPM_PRE_PMD:
  28194. msleep(40);
  28195. @@ -3435,6 +3460,11 @@ static int taiko_codec_enable_anc(struct snd_soc_dapm_widget *w,
  28196. break;
  28197. }
  28198. return 0;
  28199. +err:
  28200. + if (!hwdep_cal)
  28201. + release_firmware(fw);
  28202. + return ret;
  28203. +
  28204. }
  28205.  
  28206. static int taiko_hph_pa_event(struct snd_soc_dapm_widget *w,
  28207. @@ -3808,8 +3838,10 @@ static const struct snd_soc_dapm_route audio_map[] = {
  28208.  
  28209. {"CLASS_H_DSM MUX", "DSM_HPHL_RX1", "RX1 CHAIN"},
  28210.  
  28211. - {"RX1 CHAIN", NULL, "RX1 MIX2"},
  28212. - {"RX2 CHAIN", NULL, "RX2 MIX2"},
  28213. + {"RX1 INTERP", NULL, "RX1 MIX2"},
  28214. + {"RX1 CHAIN", NULL, "RX1 INTERP"},
  28215. + {"RX2 INTERP", NULL, "RX2 MIX2"},
  28216. + {"RX2 CHAIN", NULL, "RX2 INTERP"},
  28217.  
  28218. {"RX1 MIX2", NULL, "ANC1 MUX"},
  28219. {"RX2 MIX2", NULL, "ANC2 MUX"},
  28220. @@ -4129,6 +4161,120 @@ static const struct snd_soc_dapm_route audio_map[] = {
  28221. {"IIR2 INP1 MUX", "RX6", "SLIM RX6"},
  28222. {"IIR2 INP1 MUX", "RX7", "SLIM RX7"},
  28223.  
  28224. + {"IIR1", NULL, "IIR1 INP2 MUX"},
  28225. + {"IIR1 INP2 MUX", "DEC1", "DEC1 MUX"},
  28226. + {"IIR1 INP2 MUX", "DEC2", "DEC2 MUX"},
  28227. + {"IIR1 INP2 MUX", "DEC3", "DEC3 MUX"},
  28228. + {"IIR1 INP2 MUX", "DEC4", "DEC4 MUX"},
  28229. + {"IIR1 INP2 MUX", "DEC5", "DEC5 MUX"},
  28230. + {"IIR1 INP2 MUX", "DEC6", "DEC6 MUX"},
  28231. + {"IIR1 INP2 MUX", "DEC7", "DEC7 MUX"},
  28232. + {"IIR1 INP2 MUX", "DEC8", "DEC8 MUX"},
  28233. + {"IIR1 INP2 MUX", "DEC9", "DEC9 MUX"},
  28234. + {"IIR1 INP2 MUX", "DEC10", "DEC10 MUX"},
  28235. + {"IIR1 INP2 MUX", "RX1", "SLIM RX1"},
  28236. + {"IIR1 INP2 MUX", "RX2", "SLIM RX2"},
  28237. + {"IIR1 INP2 MUX", "RX3", "SLIM RX3"},
  28238. + {"IIR1 INP2 MUX", "RX4", "SLIM RX4"},
  28239. + {"IIR1 INP2 MUX", "RX5", "SLIM RX5"},
  28240. + {"IIR1 INP2 MUX", "RX6", "SLIM RX6"},
  28241. + {"IIR1 INP2 MUX", "RX7", "SLIM RX7"},
  28242. +
  28243. + {"IIR2", NULL, "IIR2 INP2 MUX"},
  28244. + {"IIR2 INP2 MUX", "DEC1", "DEC1 MUX"},
  28245. + {"IIR2 INP2 MUX", "DEC2", "DEC2 MUX"},
  28246. + {"IIR2 INP2 MUX", "DEC3", "DEC3 MUX"},
  28247. + {"IIR2 INP2 MUX", "DEC4", "DEC4 MUX"},
  28248. + {"IIR2 INP2 MUX", "DEC5", "DEC5 MUX"},
  28249. + {"IIR2 INP2 MUX", "DEC6", "DEC6 MUX"},
  28250. + {"IIR2 INP2 MUX", "DEC7", "DEC7 MUX"},
  28251. + {"IIR2 INP2 MUX", "DEC8", "DEC8 MUX"},
  28252. + {"IIR2 INP2 MUX", "DEC9", "DEC9 MUX"},
  28253. + {"IIR2 INP2 MUX", "DEC10", "DEC10 MUX"},
  28254. + {"IIR2 INP2 MUX", "RX1", "SLIM RX1"},
  28255. + {"IIR2 INP2 MUX", "RX2", "SLIM RX2"},
  28256. + {"IIR2 INP2 MUX", "RX3", "SLIM RX3"},
  28257. + {"IIR2 INP2 MUX", "RX4", "SLIM RX4"},
  28258. + {"IIR2 INP2 MUX", "RX5", "SLIM RX5"},
  28259. + {"IIR2 INP2 MUX", "RX6", "SLIM RX6"},
  28260. + {"IIR2 INP2 MUX", "RX7", "SLIM RX7"},
  28261. +
  28262. + {"IIR1", NULL, "IIR1 INP3 MUX"},
  28263. + {"IIR1 INP3 MUX", "DEC1", "DEC1 MUX"},
  28264. + {"IIR1 INP3 MUX", "DEC2", "DEC2 MUX"},
  28265. + {"IIR1 INP3 MUX", "DEC3", "DEC3 MUX"},
  28266. + {"IIR1 INP3 MUX", "DEC4", "DEC4 MUX"},
  28267. + {"IIR1 INP3 MUX", "DEC5", "DEC5 MUX"},
  28268. + {"IIR1 INP3 MUX", "DEC6", "DEC6 MUX"},
  28269. + {"IIR1 INP3 MUX", "DEC7", "DEC7 MUX"},
  28270. + {"IIR1 INP3 MUX", "DEC8", "DEC8 MUX"},
  28271. + {"IIR1 INP3 MUX", "DEC9", "DEC9 MUX"},
  28272. + {"IIR1 INP3 MUX", "DEC10", "DEC10 MUX"},
  28273. + {"IIR1 INP3 MUX", "RX1", "SLIM RX1"},
  28274. + {"IIR1 INP3 MUX", "RX2", "SLIM RX2"},
  28275. + {"IIR1 INP3 MUX", "RX3", "SLIM RX3"},
  28276. + {"IIR1 INP3 MUX", "RX4", "SLIM RX4"},
  28277. + {"IIR1 INP3 MUX", "RX5", "SLIM RX5"},
  28278. + {"IIR1 INP3 MUX", "RX6", "SLIM RX6"},
  28279. + {"IIR1 INP3 MUX", "RX7", "SLIM RX7"},
  28280. +
  28281. + {"IIR2", NULL, "IIR2 INP3 MUX"},
  28282. + {"IIR2 INP3 MUX", "DEC1", "DEC1 MUX"},
  28283. + {"IIR2 INP3 MUX", "DEC2", "DEC2 MUX"},
  28284. + {"IIR2 INP3 MUX", "DEC3", "DEC3 MUX"},
  28285. + {"IIR2 INP3 MUX", "DEC4", "DEC4 MUX"},
  28286. + {"IIR2 INP3 MUX", "DEC5", "DEC5 MUX"},
  28287. + {"IIR2 INP3 MUX", "DEC6", "DEC6 MUX"},
  28288. + {"IIR2 INP3 MUX", "DEC7", "DEC7 MUX"},
  28289. + {"IIR2 INP3 MUX", "DEC8", "DEC8 MUX"},
  28290. + {"IIR2 INP3 MUX", "DEC9", "DEC9 MUX"},
  28291. + {"IIR2 INP3 MUX", "DEC10", "DEC10 MUX"},
  28292. + {"IIR2 INP3 MUX", "RX1", "SLIM RX1"},
  28293. + {"IIR2 INP3 MUX", "RX2", "SLIM RX2"},
  28294. + {"IIR2 INP3 MUX", "RX3", "SLIM RX3"},
  28295. + {"IIR2 INP3 MUX", "RX4", "SLIM RX4"},
  28296. + {"IIR2 INP3 MUX", "RX5", "SLIM RX5"},
  28297. + {"IIR2 INP3 MUX", "RX6", "SLIM RX6"},
  28298. + {"IIR2 INP3 MUX", "RX7", "SLIM RX7"},
  28299. +
  28300. + {"IIR1", NULL, "IIR1 INP4 MUX"},
  28301. + {"IIR1 INP4 MUX", "DEC1", "DEC1 MUX"},
  28302. + {"IIR1 INP4 MUX", "DEC2", "DEC2 MUX"},
  28303. + {"IIR1 INP4 MUX", "DEC3", "DEC3 MUX"},
  28304. + {"IIR1 INP4 MUX", "DEC4", "DEC4 MUX"},
  28305. + {"IIR1 INP4 MUX", "DEC5", "DEC5 MUX"},
  28306. + {"IIR1 INP4 MUX", "DEC6", "DEC6 MUX"},
  28307. + {"IIR1 INP4 MUX", "DEC7", "DEC7 MUX"},
  28308. + {"IIR1 INP4 MUX", "DEC8", "DEC8 MUX"},
  28309. + {"IIR1 INP4 MUX", "DEC9", "DEC9 MUX"},
  28310. + {"IIR1 INP4 MUX", "DEC10", "DEC10 MUX"},
  28311. + {"IIR1 INP4 MUX", "RX1", "SLIM RX1"},
  28312. + {"IIR1 INP4 MUX", "RX2", "SLIM RX2"},
  28313. + {"IIR1 INP4 MUX", "RX3", "SLIM RX3"},
  28314. + {"IIR1 INP4 MUX", "RX4", "SLIM RX4"},
  28315. + {"IIR1 INP4 MUX", "RX5", "SLIM RX5"},
  28316. + {"IIR1 INP4 MUX", "RX6", "SLIM RX6"},
  28317. + {"IIR1 INP4 MUX", "RX7", "SLIM RX7"},
  28318. +
  28319. + {"IIR2", NULL, "IIR2 INP4 MUX"},
  28320. + {"IIR2 INP4 MUX", "DEC1", "DEC1 MUX"},
  28321. + {"IIR2 INP4 MUX", "DEC2", "DEC2 MUX"},
  28322. + {"IIR2 INP4 MUX", "DEC3", "DEC3 MUX"},
  28323. + {"IIR2 INP4 MUX", "DEC4", "DEC4 MUX"},
  28324. + {"IIR2 INP4 MUX", "DEC5", "DEC5 MUX"},
  28325. + {"IIR2 INP4 MUX", "DEC6", "DEC6 MUX"},
  28326. + {"IIR2 INP4 MUX", "DEC7", "DEC7 MUX"},
  28327. + {"IIR2 INP4 MUX", "DEC8", "DEC8 MUX"},
  28328. + {"IIR2 INP4 MUX", "DEC9", "DEC9 MUX"},
  28329. + {"IIR2 INP4 MUX", "DEC10", "DEC10 MUX"},
  28330. + {"IIR2 INP4 MUX", "RX1", "SLIM RX1"},
  28331. + {"IIR2 INP4 MUX", "RX2", "SLIM RX2"},
  28332. + {"IIR2 INP4 MUX", "RX3", "SLIM RX3"},
  28333. + {"IIR2 INP4 MUX", "RX4", "SLIM RX4"},
  28334. + {"IIR2 INP4 MUX", "RX5", "SLIM RX5"},
  28335. + {"IIR2 INP4 MUX", "RX6", "SLIM RX6"},
  28336. + {"IIR2 INP4 MUX", "RX7", "SLIM RX7"},
  28337. +
  28338. {"MIC BIAS1 Internal1", NULL, "LDO_H"},
  28339. {"MIC BIAS1 Internal2", NULL, "LDO_H"},
  28340. {"MIC BIAS1 External", NULL, "LDO_H"},
  28341. @@ -4140,10 +4286,8 @@ static const struct snd_soc_dapm_route audio_map[] = {
  28342. {"MIC BIAS3 Internal2", NULL, "LDO_H"},
  28343. {"MIC BIAS3 External", NULL, "LDO_H"},
  28344. {"MIC BIAS4 External", NULL, "LDO_H"},
  28345. - {"Main Mic Bias", NULL, "LDO_H"},
  28346. {DAPM_MICBIAS2_EXTERNAL_STANDALONE, NULL, "LDO_H Standalone"},
  28347. {DAPM_MICBIAS3_EXTERNAL_STANDALONE, NULL, "LDO_H Standalone"},
  28348. - {"Ear Mic Bias", NULL, "LDO_H"},
  28349. };
  28350.  
  28351. static int taiko_readable(struct snd_soc_codec *ssc, unsigned int reg)
  28352. @@ -4289,129 +4433,20 @@ static unsigned int taiko_read(struct snd_soc_codec *codec,
  28353. return val;
  28354. }
  28355.  
  28356. -#ifdef CONFIG_SND_SOC_ES325
  28357. static int taiko_startup(struct snd_pcm_substream *substream,
  28358. struct snd_soc_dai *dai)
  28359. {
  28360. - struct wcd9xxx *taiko_core = dev_get_drvdata(dai->codec->dev->parent);
  28361. pr_debug("%s(): substream = %s stream = %d\n" , __func__,
  28362. substream->name, substream->stream);
  28363. - if ((taiko_core != NULL) &&
  28364. - (taiko_core->dev != NULL) &&
  28365. - (taiko_core->dev->parent != NULL)) {
  28366. - es325_wrapper_wakeup(dai);
  28367. - }
  28368.  
  28369. return 0;
  28370. }
  28371. -#else
  28372. -static int taiko_startup(struct snd_pcm_substream *substream,
  28373. - struct snd_soc_dai *dai)
  28374. -{
  28375. -// struct wcd9xxx *taiko_core = dev_get_drvdata(dai->codec->dev->parent);
  28376. - pr_debug("%s(): substream = %s stream = %d\n" , __func__,
  28377. - substream->name, substream->stream);
  28378. - return 0;
  28379. -}
  28380. -#endif
  28381.  
  28382. static void taiko_shutdown(struct snd_pcm_substream *substream,
  28383. struct snd_soc_dai *dai)
  28384. {
  28385. - struct wcd9xxx *taiko_core = dev_get_drvdata(dai->codec->dev->parent);
  28386. pr_debug("%s(): substream = %s stream = %d\n" , __func__,
  28387. substream->name, substream->stream);
  28388. - if ((taiko_core != NULL) &&
  28389. - (taiko_core->dev != NULL) &&
  28390. - (taiko_core->dev->parent != NULL)) {
  28391. -#ifdef CONFIG_SND_SOC_ES325
  28392. - es325_wrapper_sleep(dai->id);
  28393. -#endif
  28394. - }
  28395. -}
  28396. -
  28397. -static int taiko_prepare(struct snd_pcm_substream *substream,
  28398. - struct snd_soc_dai *dai)
  28399. -{
  28400. - int paths, i;
  28401. - struct snd_soc_dapm_widget_list *wlist;
  28402. - struct snd_soc_codec *codec = dai->codec;
  28403. - struct taiko_priv *taiko_p = snd_soc_codec_get_drvdata(codec);
  28404. - int found_hs_pa = 0;
  28405. -
  28406. - if (substream->stream)
  28407. - return 0;
  28408. -
  28409. - pr_debug("%s(): substream = %s. stream = %d. dai->name = %s."
  28410. - " dai->driver->name = %s. dai stream_name = %s\n",
  28411. - __func__, substream->name, substream->stream,
  28412. - dai->name, dai->driver->name,
  28413. - substream->stream ? dai->driver->capture.stream_name :
  28414. - dai->driver->playback.stream_name);
  28415. -
  28416. - pr_debug("%s(): dai AIF widget = %s. dai playback stream_name = %s.\n"
  28417. - " rate = %u. bit_width = %u. hs compander_enabled = %u\n",
  28418. - __func__, dai->playback_aif ? dai->playback_aif->name : "NULL",
  28419. - dai->driver->playback.stream_name, taiko_p->dai[dai->id].rate,
  28420. - taiko_p->dai[dai->id].bit_width,
  28421. - taiko_p->comp_enabled[COMPANDER_1]);
  28422. -
  28423. - if ((!(taiko_p->dai[dai->id].rate == 192000 ||
  28424. - taiko_p->dai[dai->id].rate == 96000)) ||
  28425. - !(taiko_p->dai[dai->id].bit_width == 24) ||
  28426. - !(taiko_p->comp_enabled[COMPANDER_1])) {
  28427. -
  28428. - taiko_p->clsh_d.hs_perf_mode_enabled = false;
  28429. - snd_soc_update_bits(codec, TAIKO_A_RX_HPH_CHOP_CTL, 0x20, 0x20);
  28430. -
  28431. - dev_dbg(dai->dev ,"%s(): high performnce mode not needed\n",
  28432. - __func__);
  28433. - return 0;
  28434. - }
  28435. -
  28436. - paths = snd_soc_dapm_codec_dai_get_playback_connected_widgets(dai, &wlist);
  28437. -
  28438. - if (!paths) {
  28439. - dev_err(dai->dev, "%s(): found no audio playback paths\n",
  28440. - __func__);
  28441. - return 0;
  28442. - }
  28443. -
  28444. - for (i = 0; i < wlist->num_widgets; i++) {
  28445. - dev_dbg(dai->dev, " dai stream_name = %s, widget name = %s\n",
  28446. - dai->driver->playback.stream_name, wlist->widgets[i]->name);
  28447. -
  28448. - if (!strcmp(wlist->widgets[i]->name, "HPHL") ||
  28449. - !strcmp(wlist->widgets[i]->name, "HPHR")) {
  28450. - found_hs_pa = 1;
  28451. - break;
  28452. - }
  28453. - }
  28454. -
  28455. - kfree(wlist);
  28456. -
  28457. - if (!found_hs_pa)
  28458. - return 0;
  28459. -
  28460. - pr_debug("%s(): rate = %u. bit_width = %u. hs compander_enabled = %u",
  28461. - __func__, taiko_p->dai[dai->id].rate,
  28462. - taiko_p->dai[dai->id].bit_width,
  28463. - taiko_p->comp_enabled[COMPANDER_1]);
  28464. -
  28465. - if ((taiko_p->dai[dai->id].rate == 192000 ||
  28466. - taiko_p->dai[dai->id].rate == 96000) &&
  28467. - (taiko_p->dai[dai->id].bit_width == 24) &&
  28468. - (taiko_p->comp_enabled[COMPANDER_1])) {
  28469. -
  28470. - pr_debug("%s(): HS peformance mode enabled", __func__);
  28471. - taiko_p->clsh_d.hs_perf_mode_enabled = true;
  28472. - snd_soc_update_bits(codec, TAIKO_A_RX_HPH_CHOP_CTL, 0x20, 0x00);
  28473. - } else {
  28474. - taiko_p->clsh_d.hs_perf_mode_enabled = false;
  28475. - snd_soc_update_bits(codec, TAIKO_A_RX_HPH_CHOP_CTL, 0x20, 0x20);
  28476. - }
  28477. -
  28478. - return 0;
  28479. }
  28480.  
  28481. int taiko_mclk_enable(struct snd_soc_codec *codec, int mclk_enable, bool dapm)
  28482. @@ -4787,7 +4822,7 @@ static int taiko_hw_params(struct snd_pcm_substream *substream,
  28483. u32 compander_fs;
  28484. int ret;
  28485.  
  28486. - pr_info("%s: dai_name = %s DAI-ID %x rate %d num_ch %d\n", __func__,
  28487. + pr_debug("%s: dai_name = %s DAI-ID %x rate %d num_ch %d\n", __func__,
  28488. dai->name, dai->id, params_rate(params),
  28489. params_channels(params));
  28490.  
  28491. @@ -4904,223 +4939,15 @@ static int taiko_hw_params(struct snd_pcm_substream *substream,
  28492. return 0;
  28493. }
  28494.  
  28495. -#if defined(CONFIG_SND_SOC_ESXXX)
  28496. -int (*remote_route_enable)(struct snd_soc_dai *dai) = REMOTE_ROUTE_ENABLE_CB;
  28497. -int (*slim_get_channel_map)(struct snd_soc_dai *dai,
  28498. - unsigned int *tx_num, unsigned int *tx_slot,
  28499. - unsigned int *rx_num, unsigned int *rx_slot)
  28500. - = SLIM_GET_CHANNEL_MAP_CB;
  28501. -int (*slim_set_channel_map)(struct snd_soc_dai *dai,
  28502. - unsigned int tx_num, unsigned int *tx_slot,
  28503. - unsigned int rx_num, unsigned int *rx_slot)
  28504. - = SLIM_SET_CHANNEL_MAP_CB;
  28505. -int (*slim_hw_params)(struct snd_pcm_substream *substream,
  28506. - struct snd_pcm_hw_params *params,
  28507. - struct snd_soc_dai *dai)
  28508. - = SLIM_HW_PARAMS_CB;
  28509. -int (*remote_cfg_slim_rx)(int dai_id) = REMOTE_CFG_SLIM_RX_CB;
  28510. -int (*remote_close_slim_rx)(int dai_id) = REMOTE_CLOSE_SLIM_RX_CB;
  28511. -int (*remote_cfg_slim_tx)(int dai_id) = REMOTE_CFG_SLIM_TX_CB;
  28512. -int (*remote_close_slim_tx)(int dai_id) = REMOTE_CLOSE_SLIM_TX_CB;
  28513. -int (*remote_add_codec_controls)(struct snd_soc_codec *codec)
  28514. - = REMOTE_ADD_CODEC_CONTROLS_CB;
  28515. -
  28516. -static int taiko_esxxx_startup(struct snd_pcm_substream *substream,
  28517. - struct snd_soc_dai *dai)
  28518. -{
  28519. - taiko_startup(substream, dai);
  28520. -/*
  28521. - if (es705_remote_route_enable(dai))
  28522. - es705_slim_startup(substream, dai);
  28523. -*/
  28524. -
  28525. - return 0;
  28526. -}
  28527. -
  28528. -static void taiko_esxxx_shutdown(struct snd_pcm_substream *substream,
  28529. - struct snd_soc_dai *dai)
  28530. -{
  28531. - taiko_shutdown(substream, dai);
  28532. -
  28533. -/*
  28534. - if (es705_remote_route_enable(dai))
  28535. - es705_slim_shutdown(substream, dai);
  28536. -*/
  28537. -}
  28538. -
  28539. -static int taiko_esxxx_hw_params(struct snd_pcm_substream *substream,
  28540. - struct snd_pcm_hw_params *params,
  28541. - struct snd_soc_dai *dai)
  28542. -{
  28543. - int rc = 0;
  28544. - pr_info("%s: dai_name = %s DAI-ID %x rate %d num_ch %d\n", __func__,
  28545. - dai->name, dai->id, params_rate(params),
  28546. - params_channels(params));
  28547. -
  28548. - rc = taiko_hw_params(substream, params, dai);
  28549. -
  28550. - if (remote_route_enable(dai))
  28551. - rc = slim_hw_params(substream, params, dai);
  28552. -
  28553. - return rc;
  28554. -}
  28555. -static int taiko_esxxx_set_channel_map(struct snd_soc_dai *dai,
  28556. - unsigned int tx_num, unsigned int *tx_slot,
  28557. - unsigned int rx_num, unsigned int *rx_slot)
  28558. -
  28559. -{
  28560. - unsigned int taiko_tx_num = 0;
  28561. - unsigned int taiko_tx_slot[6];
  28562. - unsigned int taiko_rx_num = 0;
  28563. - unsigned int taiko_rx_slot[6];
  28564. - int rc = 0;
  28565. - pr_info("%s(): dai_name = %s DAI-ID %x tx_ch %d rx_ch %d\n",
  28566. - __func__, dai->name, dai->id, tx_num, rx_num);
  28567. -
  28568. - if (remote_route_enable(dai)) {
  28569. - rc = taiko_get_channel_map(dai, &taiko_tx_num, taiko_tx_slot,
  28570. - &taiko_rx_num, taiko_rx_slot);
  28571. -
  28572. - rc = taiko_set_channel_map(dai, tx_num, taiko_tx_slot, rx_num, taiko_rx_slot);
  28573. -
  28574. - rc = slim_set_channel_map(dai, tx_num, tx_slot, rx_num,
  28575. - rx_slot);
  28576. - } else
  28577. - rc = taiko_set_channel_map(dai, tx_num, tx_slot, rx_num, rx_slot);
  28578. -
  28579. - return rc;
  28580. -}
  28581. -
  28582. -static int taiko_esxxx_get_channel_map(struct snd_soc_dai *dai,
  28583. - unsigned int *tx_num, unsigned int *tx_slot,
  28584. - unsigned int *rx_num, unsigned int *rx_slot)
  28585. -
  28586. -{
  28587. - int rc = 0;
  28588. -
  28589. - pr_info("%s(): dai_name = %s DAI-ID %d tx_ch %d rx_ch %d\n",
  28590. - __func__, dai->name, dai->id, *tx_num, *rx_num);
  28591. -
  28592. - if (remote_route_enable(dai))
  28593. - rc = slim_get_channel_map(dai, tx_num, tx_slot, rx_num,
  28594. - rx_slot);
  28595. - else
  28596. - rc = taiko_get_channel_map(dai, tx_num, tx_slot, rx_num, rx_slot);
  28597. -
  28598. - return rc;
  28599. -}
  28600. -static struct snd_soc_dai_ops taiko_dai_ops = {
  28601. - .startup = taiko_esxxx_startup, /* taiko_startup, */
  28602. - .shutdown = taiko_esxxx_shutdown, /* taiko_shutdown, */
  28603. - .prepare = taiko_prepare,
  28604. - .hw_params = taiko_esxxx_hw_params, /* taiko_hw_params, */
  28605. - .set_sysclk = taiko_set_dai_sysclk,
  28606. - .set_fmt = taiko_set_dai_fmt,
  28607. - .set_channel_map = taiko_esxxx_set_channel_map,
  28608. - /* taiko_set_channel_map, */
  28609. - .get_channel_map = taiko_esxxx_get_channel_map,
  28610. - /* taiko_get_channel_map, */
  28611. -};
  28612. -#elif defined(CONFIG_SND_SOC_ES325)
  28613. -static int taiko_es325_hw_params(struct snd_pcm_substream *substream,
  28614. - struct snd_pcm_hw_params *params,
  28615. - struct snd_soc_dai *dai)
  28616. -{
  28617. - int rc = 0;
  28618. - dev_info(dai->dev,"%s: dai_name = %s DAI-ID %x rate %d num_ch %d\n", __func__,
  28619. - dai->name, dai->id, params_rate(params),
  28620. - params_channels(params));
  28621. -
  28622. - rc = taiko_hw_params(substream, params, dai);
  28623. -
  28624. - if (es325_remote_route_enable(dai))
  28625. - rc = es325_slim_hw_params(substream, params, dai);
  28626. -
  28627. - return rc;
  28628. -}
  28629. -
  28630. -#define SLIM_BUGFIX
  28631. -static int taiko_es325_set_channel_map(struct snd_soc_dai *dai,
  28632. - unsigned int tx_num, unsigned int *tx_slot,
  28633. - unsigned int rx_num, unsigned int *rx_slot)
  28634. -
  28635. -{
  28636. -#if !defined(SLIM_BUGFIX)
  28637. - unsigned int taiko_tx_num = 0;
  28638. -#endif
  28639. - unsigned int taiko_tx_slot[6];
  28640. -#if !defined(SLIM_BUGFIX)
  28641. - unsigned int taiko_rx_num = 0;
  28642. -#endif
  28643. - unsigned int taiko_rx_slot[6];
  28644. -#if defined(SLIM_BUGFIX)
  28645. - unsigned int temp_tx_num = 0;
  28646. - unsigned int temp_rx_num = 0;
  28647. -#endif
  28648. - int rc = 0;
  28649. -
  28650. - if (es325_remote_route_enable(dai)) {
  28651. -#if defined(SLIM_BUGFIX)
  28652. - rc = taiko_get_channel_map(dai, &temp_tx_num, taiko_tx_slot,
  28653. - &temp_rx_num, taiko_rx_slot);
  28654. -#else
  28655. - rc = taiko_get_channel_map(dai, &taiko_tx_num, taiko_tx_slot,
  28656. - &taiko_rx_num, taiko_rx_slot);
  28657. -#endif
  28658. -
  28659. - rc = taiko_set_channel_map(dai, tx_num, taiko_tx_slot, rx_num, taiko_rx_slot);
  28660. -
  28661. - rc = es325_slim_set_channel_map(dai, tx_num, tx_slot, rx_num, rx_slot);
  28662. - } else
  28663. - rc = taiko_set_channel_map(dai, tx_num, tx_slot, rx_num, rx_slot);
  28664. -
  28665. - return rc;
  28666. -}
  28667. -
  28668. -static int taiko_es325_get_channel_map(struct snd_soc_dai *dai,
  28669. - unsigned int *tx_num, unsigned int *tx_slot,
  28670. - unsigned int *rx_num, unsigned int *rx_slot)
  28671. -
  28672. -{
  28673. - int rc = 0;
  28674. -
  28675. - if (es325_remote_route_enable(dai))
  28676. - rc = es325_slim_get_channel_map(dai, tx_num, tx_slot, rx_num, rx_slot);
  28677. - else
  28678. - rc = taiko_get_channel_map(dai, tx_num, tx_slot, rx_num, rx_slot);
  28679. -
  28680. - return rc;
  28681. -}
  28682. -
  28683. -static struct snd_soc_dai_ops taiko_dai_ops = {
  28684. - .startup = taiko_startup,
  28685. - .shutdown = taiko_shutdown,
  28686. - .prepare = taiko_prepare,
  28687. - .hw_params = taiko_es325_hw_params, /* tabla_hw_params, */
  28688. - .set_sysclk = taiko_set_dai_sysclk,
  28689. - .set_fmt = taiko_set_dai_fmt,
  28690. - .set_channel_map = taiko_set_channel_map, /* tabla_set_channel_map, */
  28691. - .get_channel_map = taiko_es325_get_channel_map, /* tabla_get_channel_map, */
  28692. -};
  28693. -
  28694. -static struct snd_soc_dai_ops taiko_es325_dai_ops = {
  28695. - .startup = taiko_startup,
  28696. - .hw_params = taiko_es325_hw_params,
  28697. - .set_channel_map = taiko_es325_set_channel_map,
  28698. - .get_channel_map = taiko_es325_get_channel_map,
  28699. -};
  28700. -#else
  28701. static struct snd_soc_dai_ops taiko_dai_ops = {
  28702. .startup = taiko_startup,
  28703. .shutdown = taiko_shutdown,
  28704. - .prepare = taiko_prepare,
  28705. .hw_params = taiko_hw_params,
  28706. .set_sysclk = taiko_set_dai_sysclk,
  28707. .set_fmt = taiko_set_dai_fmt,
  28708. .set_channel_map = taiko_set_channel_map,
  28709. .get_channel_map = taiko_get_channel_map,
  28710. };
  28711. -#endif
  28712.  
  28713. static struct snd_soc_dai_driver taiko_dai[] = {
  28714. {
  28715. @@ -5235,50 +5062,6 @@ static struct snd_soc_dai_driver taiko_dai[] = {
  28716. },
  28717. .ops = &taiko_dai_ops,
  28718. },
  28719. -#ifdef CONFIG_SND_SOC_ES325
  28720. - {
  28721. - .name = "taiko_es325_rx1",
  28722. - .id = AIF1_PB + ES325_DAI_ID_OFFSET,
  28723. - .playback = {
  28724. - .stream_name = "AIF1 Playback",
  28725. - .rates = WCD9320_RATES,
  28726. - .formats = TAIKO_FORMATS,
  28727. - .rate_max = 192000,
  28728. - .rate_min = 8000,
  28729. - .channels_min = 1,
  28730. - .channels_max = 2,
  28731. - },
  28732. - .ops = &taiko_es325_dai_ops,
  28733. - },
  28734. - {
  28735. - .name = "taiko_es325_tx1",
  28736. - .id = AIF1_CAP + ES325_DAI_ID_OFFSET,
  28737. - .capture = {
  28738. - .stream_name = "AIF1 Capture",
  28739. - .rates = WCD9320_RATES,
  28740. - .formats = TAIKO_FORMATS,
  28741. - .rate_max = 192000,
  28742. - .rate_min = 8000,
  28743. - .channels_min = 1,
  28744. - .channels_max = 2,
  28745. - },
  28746. - .ops = &taiko_es325_dai_ops,
  28747. - },
  28748. - {
  28749. - .name = "taiko_es325_rx2",
  28750. - .id = AIF2_PB + ES325_DAI_ID_OFFSET,
  28751. - .playback = {
  28752. - .stream_name = "AIF2 Playback",
  28753. - .rates = WCD9320_RATES,
  28754. - .formats = TAIKO_FORMATS,
  28755. - .rate_max = 192000,
  28756. - .rate_min = 8000,
  28757. - .channels_min = 1,
  28758. - .channels_max = 2,
  28759. - },
  28760. - .ops = &taiko_es325_dai_ops,
  28761. - },
  28762. -#endif
  28763. };
  28764.  
  28765. static struct snd_soc_dai_driver taiko_i2s_dai[] = {
  28766. @@ -5440,21 +5223,11 @@ static int taiko_codec_enable_slimrx(struct snd_soc_dapm_widget *w,
  28767. dai->bus_down_in_recovery = false;
  28768. taiko_codec_enable_int_port(dai, codec);
  28769. (void) taiko_codec_enable_slim_chmask(dai, true);
  28770. -#if defined(CONFIG_SND_SOC_ESXXX)
  28771. - ret = remote_cfg_slim_rx(w->shift);
  28772. -#elif defined(CONFIG_SND_SOC_ES325)
  28773. - ret = es325_remote_cfg_slim_rx(w->shift);
  28774. -#endif
  28775. ret = wcd9xxx_cfg_slim_sch_rx(core, &dai->wcd9xxx_ch_list,
  28776. dai->rate, dai->bit_width,
  28777. &dai->grph);
  28778. break;
  28779. case SND_SOC_DAPM_POST_PMD:
  28780. -#if defined(CONFIG_SND_SOC_ESXXX)
  28781. - ret = remote_close_slim_rx(w->shift);
  28782. -#elif defined(CONFIG_SND_SOC_ES325)
  28783. - ret = es325_remote_close_slim_rx(w->shift);
  28784. -#endif
  28785. ret = wcd9xxx_close_slim_sch_rx(core, &dai->wcd9xxx_ch_list,
  28786. dai->grph);
  28787. if (!dai->bus_down_in_recovery)
  28788. @@ -5583,18 +5356,8 @@ static int taiko_codec_enable_slimtx(struct snd_soc_dapm_widget *w,
  28789. ret = wcd9xxx_cfg_slim_sch_tx(core, &dai->wcd9xxx_ch_list,
  28790. dai->rate, dai->bit_width,
  28791. &dai->grph);
  28792. -#if defined(CONFIG_SND_SOC_ESXXX)
  28793. - ret = remote_cfg_slim_tx(w->shift);
  28794. -#elif defined(CONFIG_SND_SOC_ES325)
  28795. - ret = es325_remote_cfg_slim_tx(w->shift);
  28796. -#endif
  28797. break;
  28798. case SND_SOC_DAPM_POST_PMD:
  28799. -#if defined(CONFIG_SND_SOC_ESXXX)
  28800. - ret = remote_close_slim_tx(w->shift);
  28801. -#elif defined(CONFIG_SND_SOC_ES325)
  28802. - ret = es325_remote_close_slim_tx(w->shift);
  28803. -#endif
  28804. ret = wcd9xxx_close_slim_sch_tx(core, &dai->wcd9xxx_ch_list,
  28805. dai->grph);
  28806. if (!dai->bus_down_in_recovery)
  28807. @@ -5661,6 +5424,24 @@ static int taiko_codec_ear_dac_event(struct snd_soc_dapm_widget *w,
  28808. return 0;
  28809. }
  28810.  
  28811. +static int taiko_codec_iir_mux_event(struct snd_soc_dapm_widget *w,
  28812. + struct snd_kcontrol *kcontrol, int event)
  28813. +{
  28814. + struct snd_soc_codec *codec = w->codec;
  28815. +
  28816. + pr_debug("%s: event = %d\n", __func__, event);
  28817. +
  28818. + switch (event) {
  28819. + case SND_SOC_DAPM_POST_PMU:
  28820. + snd_soc_write(codec, w->reg, snd_soc_read(codec, w->reg));
  28821. + break;
  28822. + case SND_SOC_DAPM_POST_PMD:
  28823. + snd_soc_write(codec, w->reg, snd_soc_read(codec, w->reg));
  28824. + break;
  28825. + }
  28826. + return 0;
  28827. +}
  28828. +
  28829. static int taiko_codec_dsm_mux_event(struct snd_soc_dapm_widget *w,
  28830. struct snd_kcontrol *kcontrol, int event)
  28831. {
  28832. @@ -5719,6 +5500,24 @@ static int taiko_codec_enable_anc_ear(struct snd_soc_dapm_widget *w,
  28833. return ret;
  28834. }
  28835.  
  28836. +static int taiko_codec_set_iir_gain(struct snd_soc_dapm_widget *w,
  28837. + struct snd_kcontrol *kcontrol, int event)
  28838. +{
  28839. + struct snd_soc_codec *codec = w->codec;
  28840. + int value = 0;
  28841. +
  28842. + switch (event) {
  28843. + case SND_SOC_DAPM_POST_PMU:
  28844. + value = snd_soc_read(codec, TAIKO_A_CDC_IIR1_GAIN_B1_CTL);
  28845. + snd_soc_write(codec, TAIKO_A_CDC_IIR1_GAIN_B1_CTL, value);
  28846. + break;
  28847. + default:
  28848. + pr_info("%s: event = %d not expected\n", __func__, event);
  28849. + break;
  28850. + }
  28851. + return 0;
  28852. +}
  28853. +
  28854. /* Todo: Have seperate dapm widgets for I2S and Slimbus.
  28855. * Might Need to have callbacks registered only for slimbus
  28856. */
  28857. @@ -5836,12 +5635,8 @@ static const struct snd_soc_dapm_widget taiko_dapm_widgets[] = {
  28858. SND_SOC_DAPM_MIXER("RX2 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
  28859. SND_SOC_DAPM_MIXER("RX7 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
  28860.  
  28861. - SND_SOC_DAPM_MIXER_E("RX1 MIX2", TAIKO_A_CDC_CLK_RX_B1_CTL, 0, 0, NULL,
  28862. - 0, taiko_codec_enable_interpolator, SND_SOC_DAPM_PRE_PMU |
  28863. - SND_SOC_DAPM_POST_PMU),
  28864. - SND_SOC_DAPM_MIXER_E("RX2 MIX2", TAIKO_A_CDC_CLK_RX_B1_CTL, 1, 0, NULL,
  28865. - 0, taiko_codec_enable_interpolator, SND_SOC_DAPM_PRE_PMU |
  28866. - SND_SOC_DAPM_POST_PMU),
  28867. + SND_SOC_DAPM_MIXER("RX1 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0),
  28868. + SND_SOC_DAPM_MIXER("RX2 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0),
  28869.  
  28870. SND_SOC_DAPM_MIXER_E("RX3 MIX1", TAIKO_A_CDC_CLK_RX_B1_CTL, 2, 0, NULL,
  28871. 0, taiko_codec_enable_interpolator, SND_SOC_DAPM_PRE_PMU |
  28872. @@ -5859,6 +5654,13 @@ static const struct snd_soc_dapm_widget taiko_dapm_widgets[] = {
  28873. 0, taiko_codec_enable_interpolator, SND_SOC_DAPM_PRE_PMU |
  28874. SND_SOC_DAPM_POST_PMU),
  28875.  
  28876. + SND_SOC_DAPM_MUX_E("RX1 INTERP", TAIKO_A_CDC_CLK_RX_B1_CTL, 0, 0,
  28877. + &rx1_interpolator, taiko_codec_enable_interpolator,
  28878. + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
  28879. + SND_SOC_DAPM_MUX_E("RX2 INTERP", TAIKO_A_CDC_CLK_RX_B1_CTL, 1, 0,
  28880. + &rx2_interpolator, taiko_codec_enable_interpolator,
  28881. + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
  28882. +
  28883. SND_SOC_DAPM_MIXER("RX1 CHAIN", TAIKO_A_CDC_RX1_B6_CTL, 5, 0, NULL, 0),
  28884. SND_SOC_DAPM_MIXER("RX2 CHAIN", TAIKO_A_CDC_RX2_B6_CTL, 5, 0, NULL, 0),
  28885.  
  28886. @@ -5962,18 +5764,10 @@ static const struct snd_soc_dapm_widget taiko_dapm_widgets[] = {
  28887. taiko_codec_enable_micbias,
  28888. SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
  28889. SND_SOC_DAPM_POST_PMD),
  28890. - SND_SOC_DAPM_MICBIAS_E("Main Mic Bias", 0, 0, 0,
  28891. - 0, SND_SOC_DAPM_PRE_PMU |SND_SOC_DAPM_POST_PMU |
  28892. - SND_SOC_DAPM_POST_PMD),
  28893.  
  28894. SND_SOC_DAPM_INPUT("AMIC3"),
  28895.  
  28896. SND_SOC_DAPM_INPUT("AMIC4"),
  28897. -#if defined(CONFIG_LDO_SUBMIC_BIAS)
  28898. - SND_SOC_DAPM_MICBIAS_E("Sub Mic Bias", 0, 0, 0,
  28899. - 0, SND_SOC_DAPM_PRE_PMU |SND_SOC_DAPM_POST_PMU |
  28900. - SND_SOC_DAPM_POST_PMD),
  28901. -#endif
  28902.  
  28903. SND_SOC_DAPM_INPUT("AMIC5"),
  28904.  
  28905. @@ -6089,9 +5883,6 @@ static const struct snd_soc_dapm_widget taiko_dapm_widgets[] = {
  28906. 0, taiko_codec_enable_micbias,
  28907. SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
  28908. SND_SOC_DAPM_POST_PMD),
  28909. - SND_SOC_DAPM_MICBIAS_E("Ear Mic Bias", 0, 0, 0,
  28910. - 0, SND_SOC_DAPM_PRE_PMU |SND_SOC_DAPM_POST_PMU |
  28911. - SND_SOC_DAPM_POST_PMD),
  28912.  
  28913. SND_SOC_DAPM_AIF_OUT_E("AIF1 CAP", "AIF1 Capture", 0, SND_SOC_NOPM,
  28914. AIF1_CAP, 0, taiko_codec_enable_slimtx,
  28915. @@ -6170,10 +5961,42 @@ static const struct snd_soc_dapm_widget taiko_dapm_widgets[] = {
  28916. SND_SOC_DAPM_POST_PMD),
  28917.  
  28918. /* Sidetone */
  28919. - SND_SOC_DAPM_MUX("IIR1 INP1 MUX", SND_SOC_NOPM, 0, 0, &iir1_inp1_mux),
  28920. - SND_SOC_DAPM_MIXER("IIR1", TAIKO_A_CDC_CLK_SD_CTL, 0, 0, NULL, 0),
  28921. + SND_SOC_DAPM_MUX_E("IIR1 INP1 MUX", TAIKO_A_CDC_IIR1_GAIN_B1_CTL, 0, 0,
  28922. + &iir1_inp1_mux, taiko_codec_iir_mux_event,
  28923. + SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
  28924. +
  28925. + SND_SOC_DAPM_PGA_E("IIR1", TAIKO_A_CDC_CLK_SD_CTL, 0, 0, NULL, 0,
  28926. + taiko_codec_set_iir_gain, SND_SOC_DAPM_POST_PMU),
  28927. +
  28928. + SND_SOC_DAPM_MUX_E("IIR1 INP2 MUX", TAIKO_A_CDC_IIR1_GAIN_B2_CTL, 0, 0,
  28929. + &iir1_inp2_mux, taiko_codec_iir_mux_event,
  28930. + SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
  28931. +
  28932. + SND_SOC_DAPM_MUX_E("IIR1 INP3 MUX", TAIKO_A_CDC_IIR1_GAIN_B3_CTL, 0, 0,
  28933. + &iir1_inp3_mux, taiko_codec_iir_mux_event,
  28934. + SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
  28935. +
  28936. + SND_SOC_DAPM_MUX_E("IIR1 INP4 MUX", TAIKO_A_CDC_IIR1_GAIN_B4_CTL, 0, 0,
  28937. + &iir1_inp4_mux, taiko_codec_iir_mux_event,
  28938. + SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
  28939. +
  28940. +
  28941. + SND_SOC_DAPM_MUX_E("IIR2 INP1 MUX", TAIKO_A_CDC_IIR2_GAIN_B1_CTL, 0, 0,
  28942. + &iir2_inp1_mux, taiko_codec_iir_mux_event,
  28943. + SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
  28944. +
  28945. + SND_SOC_DAPM_MUX_E("IIR2 INP2 MUX", TAIKO_A_CDC_IIR2_GAIN_B2_CTL, 0, 0,
  28946. + &iir2_inp2_mux, taiko_codec_iir_mux_event,
  28947. + SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
  28948. +
  28949. + SND_SOC_DAPM_MUX_E("IIR2 INP3 MUX", TAIKO_A_CDC_IIR2_GAIN_B3_CTL, 0, 0,
  28950. + &iir2_inp3_mux, taiko_codec_iir_mux_event,
  28951. + SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
  28952. +
  28953. + SND_SOC_DAPM_MUX_E("IIR2 INP4 MUX", TAIKO_A_CDC_IIR2_GAIN_B4_CTL, 0, 0,
  28954. + &iir2_inp4_mux, taiko_codec_iir_mux_event,
  28955. + SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
  28956.  
  28957. - SND_SOC_DAPM_MUX("IIR2 INP1 MUX", SND_SOC_NOPM, 0, 0, &iir2_inp1_mux),
  28958. SND_SOC_DAPM_MIXER("IIR2", TAIKO_A_CDC_CLK_SD_CTL, 1, 0, NULL, 0),
  28959.  
  28960. /* AUX PGA */
  28961. @@ -6580,10 +6403,10 @@ static const struct wcd9xxx_reg_mask_val taiko_reg_defaults[] = {
  28962. TAIKO_REG_VAL(TAIKO_A_CDC_CLK_OTHR_RESET_B1_CTL, 0x00),
  28963. TAIKO_REG_VAL(TAIKO_A_CDC_CLK_OTHR_CTL, 0x00),
  28964. TAIKO_REG_VAL(TAIKO_A_CDC_CONN_MAD, 0x01),
  28965. -#if !defined(CONFIG_MACH_VIENNA_LTE) && !defined(CONFIG_MACH_LT03_LTE) && !defined(CONFIG_MACH_PICASSO_LTE) && !defined(CONFIG_SEC_H_PROJECT) && !defined(CONFIG_SEC_FRESCO_PROJECT) && !defined(CONFIG_MACH_KS01EUR)
  28966. +
  28967. /* Set HPH Path to low power mode */
  28968. TAIKO_REG_VAL(TAIKO_A_RX_HPH_BIAS_PA, 0x55),
  28969. -#endif
  28970. +
  28971. /* BUCK default */
  28972. TAIKO_REG_VAL(WCD9XXX_A_BUCK_CTRL_CCL_4, 0x51),
  28973. TAIKO_REG_VAL(WCD9XXX_A_BUCK_CTRL_CCL_1, 0x5B),
  28974. @@ -6613,13 +6436,7 @@ static const struct wcd9xxx_reg_mask_val taiko_1_0_reg_defaults[] = {
  28975. /*Reduce EAR DAC bias to 70% */
  28976. TAIKO_REG_VAL(TAIKO_A_RX_EAR_BIAS_PA, 0x76),
  28977. /* Reduce LINE DAC bias to 70% */
  28978. -#if !defined(CONFIG_MACH_VIENNA_LTE) && !defined(CONFIG_MACH_LT03_LTE) && !defined(CONFIG_MACH_PICASSO_LTE) && !defined(CONFIG_SEC_H_PROJECT) && !defined(CONFIG_SEC_FRESCO_PROJECT) && !defined(CONFIG_MACH_KS01EUR)
  28979. TAIKO_REG_VAL(TAIKO_A_RX_LINE_BIAS_PA, 0x78),
  28980. -#else
  28981. - TAIKO_REG_VAL(TAIKO_A_RX_LINE_BIAS_PA, 0x7A),
  28982. - /* Reduce HPH DAC bias to 70% */
  28983. - TAIKO_REG_VAL(TAIKO_A_RX_HPH_BIAS_PA, 0x7A),
  28984. -#endif
  28985.  
  28986. /*
  28987. * There is a diode to pull down the micbias while doing
  28988. @@ -6657,21 +6474,12 @@ static const struct wcd9xxx_reg_mask_val taiko_2_0_reg_defaults[] = {
  28989. TAIKO_REG_VAL(TAIKO_A_BUCK_CTRL_CCL_4, 0x51),
  28990. TAIKO_REG_VAL(TAIKO_A_NCP_DTEST, 0x10),
  28991. TAIKO_REG_VAL(TAIKO_A_RX_HPH_CHOP_CTL, 0xA4),
  28992. -#if !defined(CONFIG_MACH_VIENNA_LTE) && !defined(CONFIG_MACH_LT03_LTE) && !defined(CONFIG_MACH_PICASSO_LTE) && !defined(CONFIG_SEC_H_PROJECT) && !defined(CONFIG_SEC_FRESCO_PROJECT) && !defined(CONFIG_MACH_KS01EUR)
  28993. - TAIKO_REG_VAL(TAIKO_A_RX_HPH_OCP_CTL, 0x6B),
  28994. -#else
  28995. - TAIKO_REG_VAL(TAIKO_A_RX_HPH_BIAS_PA, 0x7A),
  28996. - TAIKO_REG_VAL(TAIKO_A_RX_HPH_OCP_CTL, 0x6B),
  28997. -#endif
  28998. + TAIKO_REG_VAL(TAIKO_A_RX_HPH_OCP_CTL, 0x69),
  28999. TAIKO_REG_VAL(TAIKO_A_RX_HPH_CNP_WG_CTL, 0xDA),
  29000. TAIKO_REG_VAL(TAIKO_A_RX_HPH_CNP_WG_TIME, 0x15),
  29001. TAIKO_REG_VAL(TAIKO_A_RX_EAR_BIAS_PA, 0x76),
  29002. TAIKO_REG_VAL(TAIKO_A_RX_EAR_CNP, 0xC0),
  29003. -#if !defined(CONFIG_MACH_VIENNA_LTE) && !defined(CONFIG_MACH_LT03_LTE) && !defined(CONFIG_MACH_PICASSO_LTE) && !defined(CONFIG_SEC_H_PROJECT) && !defined(CONFIG_SEC_FRESCO_PROJECT) && !defined(CONFIG_MACH_KS01EUR)
  29004. TAIKO_REG_VAL(TAIKO_A_RX_LINE_BIAS_PA, 0x78),
  29005. -#else
  29006. - TAIKO_REG_VAL(TAIKO_A_RX_LINE_BIAS_PA, 0x7A),
  29007. -#endif
  29008. TAIKO_REG_VAL(TAIKO_A_RX_LINE_1_TEST, 0x2),
  29009. TAIKO_REG_VAL(TAIKO_A_RX_LINE_2_TEST, 0x2),
  29010. TAIKO_REG_VAL(TAIKO_A_RX_LINE_3_TEST, 0x2),
  29011. @@ -6800,6 +6608,8 @@ static const struct wcd9xxx_reg_mask_val taiko_codec_reg_init_val[] = {
  29012. /* set MAD input MIC to DMIC1 */
  29013. {TAIKO_A_CDC_CONN_MAD, 0x0F, 0x08},
  29014.  
  29015. + /* set DMIC CLK drive strength to 4mA */
  29016. + {TAIKO_A_HDRIVE_OVERRIDE, 0x07, 0x01},
  29017. };
  29018.  
  29019. static void taiko_codec_init_reg(struct snd_soc_codec *codec)
  29020. @@ -6850,6 +6660,27 @@ static void taiko_cleanup_irqs(struct taiko_priv *taiko)
  29021.  
  29022. wcd9xxx_free_irq(core_res, WCD9XXX_IRQ_SLIMBUS, taiko);
  29023. }
  29024. +static
  29025. +struct firmware_cal *taiko_get_hwdep_fw_cal(struct snd_soc_codec *codec,
  29026. + enum wcd_cal_type type)
  29027. +{
  29028. + struct taiko_priv *taiko;
  29029. + struct firmware_cal *hwdep_cal;
  29030. +
  29031. + if (!codec) {
  29032. + pr_err("%s: NULL codec pointer\n", __func__);
  29033. + return NULL;
  29034. + }
  29035. + taiko = snd_soc_codec_get_drvdata(codec);
  29036. + hwdep_cal = wcdcal_get_fw_cal(taiko->fw_data, type);
  29037. + if (!hwdep_cal) {
  29038. + dev_err(codec->dev, "%s: cal not sent by %d\n",
  29039. + __func__, type);
  29040. + return NULL;
  29041. + }
  29042. +
  29043. + return hwdep_cal;
  29044. +}
  29045.  
  29046. int taiko_hs_detect(struct snd_soc_codec *codec,
  29047. struct wcd9xxx_mbhc_config *mbhc_cfg)
  29048. @@ -7104,6 +6935,7 @@ static const struct wcd9xxx_mbhc_cb mbhc_cb = {
  29049. .get_cdc_type = taiko_get_cdc_type,
  29050. .setup_zdet = taiko_setup_zdet,
  29051. .compute_impedance = taiko_compute_impedance,
  29052. + .get_hwdep_fw_cal = taiko_get_hwdep_fw_cal,
  29053. };
  29054.  
  29055. static const struct wcd9xxx_mbhc_intr cdc_intr_ids = {
  29056. @@ -7180,7 +7012,7 @@ static int taiko_post_reset_cb(struct wcd9xxx *wcd9xxx)
  29057. ret = wcd9xxx_mbhc_init(&taiko->mbhc, &taiko->resmgr, codec,
  29058. taiko_enable_mbhc_micbias,
  29059. &mbhc_cb, &cdc_intr_ids,
  29060. - rco_clk_rate, false);
  29061. + rco_clk_rate, true);
  29062. if (ret)
  29063. pr_err("%s: mbhc init failed %d\n", __func__, ret);
  29064. else
  29065. @@ -7320,9 +7152,6 @@ static int taiko_codec_probe(struct snd_soc_codec *codec)
  29066. struct wcd9xxx_pdata *pdata;
  29067. struct wcd9xxx *wcd9xxx;
  29068. struct snd_soc_dapm_context *dapm = &codec->dapm;
  29069. -#if defined(CONFIG_SEC_JACTIVE_PROJECT)
  29070. - extern unsigned int system_rev;
  29071. -#endif
  29072. int ret = 0;
  29073. int i, rco_clk_rate;
  29074. void *ptr = NULL;
  29075. @@ -7360,7 +7189,7 @@ static int taiko_codec_probe(struct snd_soc_codec *codec)
  29076. WCD9XXX_CDC_TYPE_TAIKO);
  29077. if (ret) {
  29078. pr_err("%s: wcd9xxx init failed %d\n", __func__, ret);
  29079. - goto err_init;
  29080. + goto err_nomem_slimch;
  29081. }
  29082.  
  29083. taiko->clsh_d.buck_mv = taiko_codec_get_buck_mv(codec);
  29084. @@ -7372,58 +7201,29 @@ static int taiko_codec_probe(struct snd_soc_codec *codec)
  29085. rco_clk_rate = TAIKO_MCLK_CLK_12P288MHZ;
  29086. else
  29087. rco_clk_rate = TAIKO_MCLK_CLK_9P6MHZ;
  29088. -
  29089. -#if defined(CONFIG_MACH_KLTE_KOR)
  29090. - if (system_rev >= 13) {
  29091. - /* init and start mbhc */
  29092. - ret = wcd9xxx_mbhc_init(&taiko->mbhc, &taiko->resmgr, codec,
  29093. - taiko_enable_mbhc_micbias,
  29094. - &mbhc_cb, &cdc_intr_ids,
  29095. - rco_clk_rate, false);
  29096. - if (ret) {
  29097. - pr_err("%s: mbhc init failed %d\n", __func__, ret);
  29098. - goto err_init;
  29099. - }
  29100. + taiko->fw_data = kzalloc(sizeof(*(taiko->fw_data)), GFP_KERNEL);
  29101. + if (!taiko->fw_data) {
  29102. + dev_err(codec->dev, "Failed to allocate fw_data\n");
  29103. + goto err_nomem_slimch;
  29104. }
  29105. -#elif defined(CONFIG_MACH_KLTE_JPN)
  29106. - if (system_rev >= 11) {
  29107. - /* init and start mbhc */
  29108. - ret = wcd9xxx_mbhc_init(&taiko->mbhc, &taiko->resmgr, codec,
  29109. - taiko_enable_mbhc_micbias,
  29110. - &mbhc_cb, &cdc_intr_ids,
  29111. - rco_clk_rate, false);
  29112. - if (ret) {
  29113. - pr_err("%s: mbhc init failed %d\n", __func__, ret);
  29114. - goto err_init;
  29115. - }
  29116. + set_bit(WCD9XXX_ANC_CAL, taiko->fw_data->cal_bit);
  29117. + set_bit(WCD9XXX_MAD_CAL, taiko->fw_data->cal_bit);
  29118. + set_bit(WCD9XXX_MBHC_CAL, taiko->fw_data->cal_bit);
  29119. + ret = wcd_cal_create_hwdep(taiko->fw_data,
  29120. + WCD9XXX_CODEC_HWDEP_NODE, codec);
  29121. + if (ret < 0) {
  29122. + dev_err(codec->dev, "%s hwdep failed %d\n", __func__, ret);
  29123. + goto err_hwdep;
  29124. }
  29125. -#else
  29126. -#if !defined(CONFIG_SAMSUNG_JACK) && !defined(CONFIG_MUIC_DET_JACK)
  29127. /* init and start mbhc */
  29128. ret = wcd9xxx_mbhc_init(&taiko->mbhc, &taiko->resmgr, codec,
  29129. taiko_enable_mbhc_micbias,
  29130. &mbhc_cb, &cdc_intr_ids,
  29131. - rco_clk_rate, false);
  29132. + rco_clk_rate, true);
  29133. if (ret) {
  29134. pr_err("%s: mbhc init failed %d\n", __func__, ret);
  29135. - goto err_init;
  29136. - }
  29137. -#elif defined(CONFIG_SEC_JACTIVE_PROJECT)
  29138. -/* init and start mbhc */
  29139. - pr_info("taiko_codec_probe system_rev %d",system_rev);
  29140. - if(system_rev < 3)
  29141. - {
  29142. - ret = wcd9xxx_mbhc_init(&taiko->mbhc, &taiko->resmgr, codec,
  29143. - taiko_enable_mbhc_micbias,
  29144. - &mbhc_cb, &cdc_intr_ids,
  29145. - rco_clk_rate, false);
  29146. - if (ret) {
  29147. - pr_err("%s: mbhc init failed %d\n", __func__, ret);
  29148. - goto err_init;
  29149. - }
  29150. + goto err_hwdep;
  29151. }
  29152. -#endif
  29153. -#endif
  29154.  
  29155. taiko->codec = codec;
  29156. for (i = 0; i < COMPANDER_MAX; i++) {
  29157. @@ -7446,7 +7246,7 @@ static int taiko_codec_probe(struct snd_soc_codec *codec)
  29158. ret = taiko_handle_pdata(taiko);
  29159. if (IS_ERR_VALUE(ret)) {
  29160. pr_err("%s: bad pdata\n", __func__);
  29161. - goto err_pdata;
  29162. + goto err_hwdep;
  29163. }
  29164.  
  29165. taiko->spkdrv_reg = taiko_codec_find_regulator(codec,
  29166. @@ -7459,18 +7259,12 @@ static int taiko_codec_probe(struct snd_soc_codec *codec)
  29167. WCD9XXX_BG_CLK_UNLOCK(&taiko->resmgr);
  29168. }
  29169.  
  29170. -#if defined(CONFIG_SND_SOC_ESXXX)
  29171. - remote_add_codec_controls(codec);
  29172. -#elif defined(CONFIG_SND_SOC_ES325)
  29173. - es325_remote_add_codec_controls(codec);
  29174. -#endif
  29175. -
  29176. ptr = kmalloc((sizeof(taiko_rx_chs) +
  29177. sizeof(taiko_tx_chs)), GFP_KERNEL);
  29178. if (!ptr) {
  29179. pr_err("%s: no mem for slim chan ctl data\n", __func__);
  29180. ret = -ENOMEM;
  29181. - goto err_nomem_slimch;
  29182. + goto err_hwdep;
  29183. }
  29184.  
  29185. if (taiko->intf_type == WCD9XXX_INTERFACE_TYPE_I2C) {
  29186. @@ -7548,19 +7342,17 @@ static int taiko_codec_probe(struct snd_soc_codec *codec)
  29187.  
  29188. err_irq:
  29189. taiko_cleanup_irqs(taiko);
  29190. -err_pdata:
  29191. - kfree(ptr);
  29192. + kfree(ptr);
  29193. +err_hwdep:
  29194. + kfree(taiko->fw_data);
  29195. err_nomem_slimch:
  29196. kfree(taiko);
  29197. -err_init:
  29198. return ret;
  29199. }
  29200. static int taiko_codec_remove(struct snd_soc_codec *codec)
  29201. {
  29202. struct taiko_priv *taiko = snd_soc_codec_get_drvdata(codec);
  29203. -#if defined(CONFIG_SEC_JACTIVE_PROJECT)
  29204. - extern unsigned int system_rev;
  29205. -#endif
  29206. +
  29207. WCD9XXX_BG_CLK_LOCK(&taiko->resmgr);
  29208. atomic_set(&kp_taiko_priv, 0);
  29209.  
  29210. @@ -7571,33 +7363,14 @@ static int taiko_codec_remove(struct snd_soc_codec *codec)
  29211.  
  29212. taiko_cleanup_irqs(taiko);
  29213.  
  29214. -#if defined(CONFIG_MACH_KLTE_KOR)
  29215. - if (system_rev >= 13) {
  29216. - /* cleanup MBHC */
  29217. - wcd9xxx_mbhc_deinit(&taiko->mbhc);
  29218. - }
  29219. -#elif defined(CONFIG_MACH_KLTE_JPN)
  29220. - if (system_rev >= 11) {
  29221. - /* cleanup MBHC */
  29222. - wcd9xxx_mbhc_deinit(&taiko->mbhc);
  29223. - }
  29224. -#else
  29225. -#if !defined(CONFIG_SAMSUNG_JACK) && !defined(CONFIG_MUIC_DET_JACK)
  29226. /* cleanup MBHC */
  29227. wcd9xxx_mbhc_deinit(&taiko->mbhc);
  29228. -#elif defined(CONFIG_SEC_JACTIVE_PROJECT)
  29229. - pr_info("taiko_codec_remove system_rev %d",system_rev);
  29230. - if(system_rev < 3)
  29231. - {
  29232. - wcd9xxx_mbhc_deinit(&taiko->mbhc);
  29233. - }
  29234. -#endif
  29235. -#endif
  29236. /* cleanup resmgr */
  29237. wcd9xxx_resmgr_deinit(&taiko->resmgr);
  29238.  
  29239. taiko->spkdrv_reg = NULL;
  29240.  
  29241. + kfree(taiko->fw_data);
  29242. kfree(taiko);
  29243. return 0;
  29244. }
  29245. diff --git a/sound/soc/codecs/wcd9xxx-mbhc.c b/sound/soc/codecs/wcd9xxx-mbhc.c
  29246. index c4ed685..ea0c58f 100644
  29247. --- a/sound/soc/codecs/wcd9xxx-mbhc.c
  29248. +++ b/sound/soc/codecs/wcd9xxx-mbhc.c
  29249. @@ -36,6 +36,7 @@
  29250. #include <linux/kernel.h>
  29251. #include <linux/gpio.h>
  29252. #include <linux/input.h>
  29253. +#include "wcdcal-hwdep.h"
  29254. #include "wcd9320.h"
  29255. #include "wcd9306.h"
  29256. #include "wcd9xxx-mbhc.h"
  29257. @@ -75,7 +76,7 @@
  29258. #define OCP_ATTEMPT 1
  29259.  
  29260. #define FW_READ_ATTEMPTS 15
  29261. -#define FW_READ_TIMEOUT 2000000
  29262. +#define FW_READ_TIMEOUT 4000000
  29263.  
  29264. #define BUTTON_POLLING_SUPPORTED true
  29265.  
  29266. @@ -189,6 +190,10 @@ static void wcd9xxx_get_z(struct wcd9xxx_mbhc *mbhc, s16 *dce_z, s16 *sta_z,
  29267.  
  29268. static void wcd9xxx_mbhc_calc_thres(struct wcd9xxx_mbhc *mbhc);
  29269.  
  29270. +static u16 wcd9xxx_codec_v_sta_dce(struct wcd9xxx_mbhc *mbhc,
  29271. + enum meas_type dce, s16 vin_mv,
  29272. + bool cs_enable);
  29273. +
  29274. static bool wcd9xxx_mbhc_polling(struct wcd9xxx_mbhc *mbhc)
  29275. {
  29276. return (snd_soc_read(mbhc->codec, WCD9XXX_A_CDC_MBHC_EN_CTL) & 0x1);
  29277. @@ -1148,10 +1153,6 @@ static short wcd9xxx_mbhc_setup_hs_polling(struct wcd9xxx_mbhc *mbhc,
  29278. struct snd_soc_codec *codec = mbhc->codec;
  29279. short bias_value;
  29280. u8 cfilt_mode;
  29281. - s16 reg;
  29282. - int change;
  29283. - struct wcd9xxx_mbhc_btn_detect_cfg *btn_det;
  29284. - s16 sta_z = 0, dce_z = 0;
  29285.  
  29286. WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
  29287.  
  29288. @@ -1161,7 +1162,6 @@ static short wcd9xxx_mbhc_setup_hs_polling(struct wcd9xxx_mbhc *mbhc,
  29289. return -ENODEV;
  29290. }
  29291.  
  29292. - btn_det = WCD9XXX_MBHC_CAL_BTN_DET_PTR(mbhc->mbhc_cfg->calibration);
  29293. /* Enable external voltage source to micbias if present */
  29294. if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mb_source)
  29295. mbhc->mbhc_cb->enable_mb_source(codec, true, true);
  29296. @@ -1221,6 +1221,21 @@ static short wcd9xxx_mbhc_setup_hs_polling(struct wcd9xxx_mbhc *mbhc,
  29297. snd_soc_write(codec, mbhc_micb_regs->cfilt_ctl, cfilt_mode);
  29298. snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x13, 0x00);
  29299.  
  29300. + return bias_value;
  29301. +}
  29302. +
  29303. +static void wcd9xxx_recalibrate(struct wcd9xxx_mbhc *mbhc,
  29304. + struct mbhc_micbias_regs *mbhc_micb_regs,
  29305. + bool is_cs_enable)
  29306. +{
  29307. + struct snd_soc_codec *codec = mbhc->codec;
  29308. + s16 reg;
  29309. + int change;
  29310. + struct wcd9xxx_mbhc_btn_detect_cfg *btn_det;
  29311. + s16 sta_z = 0, dce_z = 0;
  29312. +
  29313. + btn_det = WCD9XXX_MBHC_CAL_BTN_DET_PTR(mbhc->mbhc_cfg->calibration);
  29314. +
  29315. if (mbhc->mbhc_cfg->do_recalibration) {
  29316. /* recalibrate dce_z and sta_z */
  29317. reg = snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B1_CTL);
  29318. @@ -1255,17 +1270,24 @@ static short wcd9xxx_mbhc_setup_hs_polling(struct wcd9xxx_mbhc *mbhc,
  29319. snd_soc_write(mbhc->codec, WCD9XXX_A_CDC_MBHC_B1_CTL,
  29320. reg);
  29321. if (dce_z) {
  29322. - pr_debug("%s: dce_nsc_cs_z 0x%x -> 0x%x\n",
  29323. - __func__, mbhc->mbhc_data.dce_nsc_cs_z,
  29324. - dce_z & 0xffff);
  29325. mbhc->mbhc_data.dce_nsc_cs_z = dce_z;
  29326. + /* update v_cs_ins_h with new dce_nsc_cs_z */
  29327. + mbhc->mbhc_data.v_cs_ins_h =
  29328. + wcd9xxx_codec_v_sta_dce(
  29329. + mbhc, DCE,
  29330. + WCD9XXX_V_CS_HS_MAX,
  29331. + is_cs_enable);
  29332. + pr_debug("%s: dce_nsc_cs_z 0x%x -> 0x%x, v_cs_ins_h 0x%x\n",
  29333. + __func__,
  29334. + mbhc->mbhc_data.dce_nsc_cs_z,
  29335. + dce_z & 0xffff,
  29336. + mbhc->mbhc_data.v_cs_ins_h);
  29337. } else {
  29338. pr_debug("%s: failed get new dce_nsc_cs_z\n",
  29339. __func__);
  29340. }
  29341. }
  29342. }
  29343. - return bias_value;
  29344. }
  29345.  
  29346. static void wcd9xxx_shutdown_hs_removal_detect(struct wcd9xxx_mbhc *mbhc)
  29347. @@ -1822,6 +1844,9 @@ wcd9xxx_codec_cs_get_plug_type(struct wcd9xxx_mbhc *mbhc, bool highhph)
  29348. wcd9xxx_codec_hphr_gnd_switch(codec, false);
  29349. }
  29350.  
  29351. + /* recalibrate DCE/STA GND voltages */
  29352. + wcd9xxx_recalibrate(mbhc, &mbhc->mbhc_bias_regs, true);
  29353. +
  29354. type = wcd9xxx_cs_find_plug_type(mbhc, rt, ARRAY_SIZE(rt), highhph,
  29355. mbhc->event_state);
  29356.  
  29357. @@ -1902,6 +1927,8 @@ wcd9xxx_codec_get_plug_type(struct wcd9xxx_mbhc *mbhc, bool highhph)
  29358. if (rt[i].swap_gnd)
  29359. wcd9xxx_codec_hphr_gnd_switch(codec, false);
  29360. }
  29361. + /* recalibrate DCE/STA GND voltages */
  29362. + wcd9xxx_recalibrate(mbhc, &mbhc->mbhc_bias_regs, false);
  29363.  
  29364. if (vddioon)
  29365. __wcd9xxx_switch_micbias(mbhc, 1, false, false);
  29366. @@ -2812,35 +2839,39 @@ static void wcd9xxx_mbhc_insert_work(struct work_struct *work)
  29367. wcd9xxx_unlock_sleep(core_res);
  29368. }
  29369.  
  29370. -static bool wcd9xxx_mbhc_fw_validate(const struct firmware *fw)
  29371. +static bool wcd9xxx_mbhc_fw_validate(const void *data, size_t size)
  29372. {
  29373. u32 cfg_offset;
  29374. struct wcd9xxx_mbhc_imped_detect_cfg *imped_cfg;
  29375. struct wcd9xxx_mbhc_btn_detect_cfg *btn_cfg;
  29376. + struct firmware_cal fw;
  29377. +
  29378. + fw.data = (void *)data;
  29379. + fw.size = size;
  29380.  
  29381. - if (fw->size < WCD9XXX_MBHC_CAL_MIN_SIZE)
  29382. + if (fw.size < WCD9XXX_MBHC_CAL_MIN_SIZE)
  29383. return false;
  29384.  
  29385. /*
  29386. * Previous check guarantees that there is enough fw data up
  29387. * to num_btn
  29388. */
  29389. - btn_cfg = WCD9XXX_MBHC_CAL_BTN_DET_PTR(fw->data);
  29390. - cfg_offset = (u32) ((void *) btn_cfg - (void *) fw->data);
  29391. - if (fw->size < (cfg_offset + WCD9XXX_MBHC_CAL_BTN_SZ(btn_cfg)))
  29392. + btn_cfg = WCD9XXX_MBHC_CAL_BTN_DET_PTR(fw.data);
  29393. + cfg_offset = (u32) ((void *) btn_cfg - (void *) fw.data);
  29394. + if (fw.size < (cfg_offset + WCD9XXX_MBHC_CAL_BTN_SZ(btn_cfg)))
  29395. return false;
  29396.  
  29397. /*
  29398. * Previous check guarantees that there is enough fw data up
  29399. * to start of impedance detection configuration
  29400. */
  29401. - imped_cfg = WCD9XXX_MBHC_CAL_IMPED_DET_PTR(fw->data);
  29402. - cfg_offset = (u32) ((void *) imped_cfg - (void *) fw->data);
  29403. + imped_cfg = WCD9XXX_MBHC_CAL_IMPED_DET_PTR(fw.data);
  29404. + cfg_offset = (u32) ((void *) imped_cfg - (void *) fw.data);
  29405.  
  29406. - if (fw->size < (cfg_offset + WCD9XXX_MBHC_CAL_IMPED_MIN_SZ))
  29407. + if (fw.size < (cfg_offset + WCD9XXX_MBHC_CAL_IMPED_MIN_SZ))
  29408. return false;
  29409.  
  29410. - if (fw->size < (cfg_offset + WCD9XXX_MBHC_CAL_IMPED_SZ(imped_cfg)))
  29411. + if (fw.size < (cfg_offset + WCD9XXX_MBHC_CAL_IMPED_SZ(imped_cfg)))
  29412. return false;
  29413.  
  29414. return true;
  29415. @@ -4178,7 +4209,9 @@ static void wcd9xxx_mbhc_fw_read(struct work_struct *work)
  29416. struct wcd9xxx_mbhc *mbhc;
  29417. struct snd_soc_codec *codec;
  29418. const struct firmware *fw;
  29419. + struct firmware_cal *fw_data = NULL;
  29420. int ret = -1, retry = 0;
  29421. + bool use_default_cal = false;
  29422.  
  29423. dwork = to_delayed_work(work);
  29424. mbhc = container_of(dwork, struct wcd9xxx_mbhc, mbhc_firmware_dwork);
  29425. @@ -4186,29 +4219,62 @@ static void wcd9xxx_mbhc_fw_read(struct work_struct *work)
  29426.  
  29427. while (retry < FW_READ_ATTEMPTS) {
  29428. retry++;
  29429. - pr_info("%s:Attempt %d to request MBHC firmware\n",
  29430. - __func__, retry);
  29431. - ret = request_firmware(&fw, "wcd9320/wcd9320_mbhc.bin",
  29432. - codec->dev);
  29433. -
  29434. - if (ret != 0) {
  29435. + pr_debug("%s:Attempt %d to request MBHC firmware\n",
  29436. + __func__, retry);
  29437. + if (mbhc->mbhc_cb->get_hwdep_fw_cal)
  29438. + fw_data = mbhc->mbhc_cb->get_hwdep_fw_cal(codec,
  29439. + WCD9XXX_MBHC_CAL);
  29440. + if (!fw_data)
  29441. + ret = request_firmware(&fw, "wcd9320/wcd9320_mbhc.bin",
  29442. + codec->dev);
  29443. + /*
  29444. + * if request_firmware and hwdep cal both fail then
  29445. + * retry for few times before bailing out
  29446. + */
  29447. + if ((ret != 0) && !fw_data) {
  29448. usleep_range(FW_READ_TIMEOUT, FW_READ_TIMEOUT);
  29449. } else {
  29450. pr_info("%s: MBHC Firmware read succesful\n", __func__);
  29451. break;
  29452. }
  29453. }
  29454. -
  29455. - if (ret != 0) {
  29456. + if (!fw_data)
  29457. + pr_debug("%s: using request_firmware\n", __func__);
  29458. + else
  29459. + pr_debug("%s: using hwdep cal\n", __func__);
  29460. + if (ret != 0 && !fw_data) {
  29461. pr_err("%s: Cannot load MBHC firmware use default cal\n",
  29462. - __func__);
  29463. - } else if (wcd9xxx_mbhc_fw_validate(fw) == false) {
  29464. - pr_err("%s: Invalid MBHC cal data size use default cal\n",
  29465. - __func__);
  29466. - release_firmware(fw);
  29467. - } else {
  29468. - mbhc->mbhc_cfg->calibration = (void *)fw->data;
  29469. - mbhc->mbhc_fw = fw;
  29470. + __func__);
  29471. + use_default_cal = true;
  29472. + }
  29473. + if (!use_default_cal) {
  29474. + const void *data;
  29475. + size_t size;
  29476. +
  29477. + if (fw_data) {
  29478. + data = fw_data->data;
  29479. + size = fw_data->size;
  29480. + } else {
  29481. + data = fw->data;
  29482. + size = fw->size;
  29483. + }
  29484. + if (wcd9xxx_mbhc_fw_validate(data, size) == false) {
  29485. + pr_err("%s: Invalid MBHC cal data size use default cal\n",
  29486. + __func__);
  29487. + if (!fw_data)
  29488. + release_firmware(fw);
  29489. + } else {
  29490. + if (fw_data) {
  29491. + mbhc->mbhc_cfg->calibration =
  29492. + (void *)fw_data->data;
  29493. + mbhc->mbhc_cal = fw_data;
  29494. + } else {
  29495. + mbhc->mbhc_cfg->calibration =
  29496. + (void *)fw->data;
  29497. + mbhc->mbhc_fw = fw;
  29498. + }
  29499. + }
  29500. +
  29501. }
  29502.  
  29503. (void) wcd9xxx_init_and_calibrate(mbhc);
  29504. @@ -4398,15 +4464,16 @@ int wcd9xxx_mbhc_start(struct wcd9xxx_mbhc *mbhc,
  29505. mbhc->mbhc_cb->enable_clock_gate(mbhc->codec, true);
  29506.  
  29507. if (!mbhc->mbhc_cfg->read_fw_bin ||
  29508. - (mbhc->mbhc_cfg->read_fw_bin && mbhc->mbhc_fw)) {
  29509. + (mbhc->mbhc_cfg->read_fw_bin && mbhc->mbhc_fw) ||
  29510. + (mbhc->mbhc_cfg->read_fw_bin && mbhc->mbhc_cal)) {
  29511. rc = wcd9xxx_init_and_calibrate(mbhc);
  29512. } else {
  29513. - if (!mbhc->mbhc_fw)
  29514. + if (!mbhc->mbhc_fw || !mbhc->mbhc_cal)
  29515. schedule_delayed_work(&mbhc->mbhc_firmware_dwork,
  29516. usecs_to_jiffies(FW_READ_TIMEOUT));
  29517. else
  29518. - pr_debug("%s: Skipping to read mbhc fw, 0x%p\n",
  29519. - __func__, mbhc->mbhc_fw);
  29520. + pr_debug("%s: Skipping to read mbhc fw, 0x%p 0x%p\n",
  29521. + __func__, mbhc->mbhc_fw, mbhc->mbhc_cal);
  29522. }
  29523.  
  29524. pr_debug("%s: leave %d\n", __func__, rc);
  29525. @@ -4416,10 +4483,12 @@ EXPORT_SYMBOL(wcd9xxx_mbhc_start);
  29526.  
  29527. void wcd9xxx_mbhc_stop(struct wcd9xxx_mbhc *mbhc)
  29528. {
  29529. - if (mbhc->mbhc_fw) {
  29530. + if (mbhc->mbhc_fw || mbhc->mbhc_cal) {
  29531. cancel_delayed_work_sync(&mbhc->mbhc_firmware_dwork);
  29532. - release_firmware(mbhc->mbhc_fw);
  29533. + if (!mbhc->mbhc_cal)
  29534. + release_firmware(mbhc->mbhc_fw);
  29535. mbhc->mbhc_fw = NULL;
  29536. + mbhc->mbhc_cal = NULL;
  29537. }
  29538. }
  29539. EXPORT_SYMBOL(wcd9xxx_mbhc_stop);
  29540. diff --git a/sound/soc/codecs/wcd9xxx-mbhc.h b/sound/soc/codecs/wcd9xxx-mbhc.h
  29541. index 91edaca..7eba649 100644
  29542. --- a/sound/soc/codecs/wcd9xxx-mbhc.h
  29543. +++ b/sound/soc/codecs/wcd9xxx-mbhc.h
  29544. @@ -13,6 +13,7 @@
  29545. #define __WCD9XXX_MBHC_H__
  29546.  
  29547. #include "wcd9xxx-resmgr.h"
  29548. +#include "wcdcal-hwdep.h"
  29549.  
  29550. #define WCD9XXX_CFILT_FAST_MODE 0x00
  29551. #define WCD9XXX_CFILT_SLOW_MODE 0x40
  29552. @@ -285,6 +286,9 @@ struct wcd9xxx_mbhc_cb {
  29553. int (*enable_mb_source) (struct snd_soc_codec *, bool, bool);
  29554. void (*setup_int_rbias) (struct snd_soc_codec *, bool);
  29555. void (*pull_mb_to_vddio) (struct snd_soc_codec *, bool);
  29556. + struct firmware_cal * (*get_hwdep_fw_cal) (struct snd_soc_codec *,
  29557. + enum wcd_cal_type);
  29558. +
  29559. };
  29560.  
  29561. struct wcd9xxx_mbhc {
  29562. @@ -312,6 +316,7 @@ struct wcd9xxx_mbhc {
  29563. const struct firmware *mbhc_fw;
  29564.  
  29565. struct delayed_work mbhc_insert_dwork;
  29566. + struct firmware_cal *mbhc_cal;
  29567.  
  29568. u8 current_plug;
  29569. struct work_struct correct_plug_swch;
  29570. diff --git a/sound/soc/codecs/wcdcal-hwdep.c b/sound/soc/codecs/wcdcal-hwdep.c
  29571. new file mode 100644
  29572. index 0000000..1132a3c
  29573. --- /dev/null
  29574. +++ b/sound/soc/codecs/wcdcal-hwdep.c
  29575. @@ -0,0 +1,221 @@
  29576. +/*
  29577. + * Copyright (c) 2014, The Linux Foundation. All rights reserved.
  29578. + *
  29579. + * This program is free software; you can redistribute it and/or modify
  29580. + * it under the terms of the GNU General Public License version 2 and
  29581. + * only version 2 as published by the Free Software Foundation.
  29582. + *
  29583. + * This program is distributed in the hope that it will be useful,
  29584. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  29585. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  29586. + * GNU General Public License for more details.
  29587. + *
  29588. + */
  29589. +#include <linux/slab.h>
  29590. +#include <linux/module.h>
  29591. +#include <linux/ioctl.h>
  29592. +#include <linux/bitops.h>
  29593. +#include <sound/hwdep.h>
  29594. +#include <sound/msmcal-hwdep.h>
  29595. +#include <sound/soc.h>
  29596. +#include "wcdcal-hwdep.h"
  29597. +
  29598. +const int cal_size_info[WCD9XXX_MAX_CAL] = {
  29599. + [WCD9XXX_ANC_CAL] = 4096,
  29600. + [WCD9XXX_MBHC_CAL] = 4096,
  29601. + [WCD9XXX_MAD_CAL] = 4096,
  29602. +};
  29603. +
  29604. +const char *cal_name_info[WCD9XXX_MAX_CAL] = {
  29605. + [WCD9XXX_ANC_CAL] = "anc",
  29606. + [WCD9XXX_MBHC_CAL] = "mbhc",
  29607. + [WCD9XXX_MAD_CAL] = "mad",
  29608. +};
  29609. +
  29610. +struct firmware_cal *wcdcal_get_fw_cal(struct fw_info *fw_data,
  29611. + enum wcd_cal_type type)
  29612. +{
  29613. + if (!fw_data) {
  29614. + pr_err("%s: fw_data is NULL\n", __func__);
  29615. + return NULL;
  29616. + }
  29617. + if (type >= WCD9XXX_MAX_CAL ||
  29618. + type < WCD9XXX_MIN_CAL) {
  29619. + pr_err("%s: wrong cal type sent %d\n", __func__, type);
  29620. + return NULL;
  29621. + }
  29622. + mutex_lock(&fw_data->lock);
  29623. + if (!test_bit(WCDCAL_RECIEVED,
  29624. + &fw_data->wcdcal_state[type])) {
  29625. + pr_err("%s: cal not sent by userspace %d\n",
  29626. + __func__, type);
  29627. + mutex_unlock(&fw_data->lock);
  29628. + return NULL;
  29629. + }
  29630. + mutex_unlock(&fw_data->lock);
  29631. + return fw_data->fw[type];
  29632. +}
  29633. +EXPORT_SYMBOL(wcdcal_get_fw_cal);
  29634. +
  29635. +static int wcdcal_hwdep_ioctl_shared(struct snd_hwdep *hw,
  29636. + struct wcdcal_ioctl_buffer fw_user)
  29637. +{
  29638. + struct fw_info *fw_data = hw->private_data;
  29639. + struct firmware_cal **fw = fw_data->fw;
  29640. + void *data;
  29641. +
  29642. + if (!test_bit(fw_user.cal_type, fw_data->cal_bit)) {
  29643. + pr_err("%s: codec didn't set this %d!!\n",
  29644. + __func__, fw_user.cal_type);
  29645. + return -EFAULT;
  29646. + }
  29647. + if (fw_user.cal_type >= WCD9XXX_MAX_CAL ||
  29648. + fw_user.cal_type < WCD9XXX_MIN_CAL) {
  29649. + pr_err("%s: wrong cal type sent %d\n",
  29650. + __func__, fw_user.cal_type);
  29651. + return -EFAULT;
  29652. + }
  29653. + if (fw_user.size > cal_size_info[fw_user.cal_type] ||
  29654. + fw_user.size <= 0) {
  29655. + pr_err("%s: incorrect firmware size %d for %s\n",
  29656. + __func__, fw_user.size,
  29657. + cal_name_info[fw_user.cal_type]);
  29658. + return -EFAULT;
  29659. + }
  29660. + data = fw[fw_user.cal_type]->data;
  29661. + memcpy(data, fw_user.buffer, fw_user.size);
  29662. + fw[fw_user.cal_type]->size = fw_user.size;
  29663. + mutex_lock(&fw_data->lock);
  29664. + set_bit(WCDCAL_RECIEVED, &fw_data->wcdcal_state[fw_user.cal_type]);
  29665. + mutex_unlock(&fw_data->lock);
  29666. + return 0;
  29667. +}
  29668. +
  29669. +#ifdef CONFIG_COMPAT
  29670. +struct wcdcal_ioctl_buffer32 {
  29671. + u32 size;
  29672. + compat_uptr_t buffer;
  29673. + enum wcd_cal_type cal_type;
  29674. +};
  29675. +
  29676. +enum {
  29677. + SNDRV_CTL_IOCTL_HWDEP_CAL_TYPE32 =
  29678. + _IOW('U', 0x1, struct wcdcal_ioctl_buffer32),
  29679. +};
  29680. +
  29681. +static int wcdcal_hwdep_ioctl_compat(struct snd_hwdep *hw, struct file *file,
  29682. + unsigned int cmd, unsigned long arg)
  29683. +{
  29684. + struct wcdcal_ioctl_buffer __user *argp = (void __user *)arg;
  29685. + struct wcdcal_ioctl_buffer32 fw_user32;
  29686. + struct wcdcal_ioctl_buffer fw_user_compat;
  29687. +
  29688. + if (cmd != SNDRV_CTL_IOCTL_HWDEP_CAL_TYPE32) {
  29689. + pr_err("%s: wrong ioctl command sent %u!\n", __func__, cmd);
  29690. + return -ENOIOCTLCMD;
  29691. + }
  29692. + if (copy_from_user(&fw_user32, argp, sizeof(fw_user32))) {
  29693. + pr_err("%s: failed to copy\n", __func__);
  29694. + return -EFAULT;
  29695. + }
  29696. + fw_user_compat.size = fw_user32.size;
  29697. + fw_user_compat.buffer = compat_ptr(fw_user32.buffer);
  29698. + fw_user_compat.cal_type = fw_user32.cal_type;
  29699. + return wcdcal_hwdep_ioctl_shared(hw, fw_user_compat);
  29700. +}
  29701. +#else
  29702. +#define wcdcal_hwdep_ioctl_compat NULL
  29703. +#endif
  29704. +
  29705. +static int wcdcal_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
  29706. + unsigned int cmd, unsigned long arg)
  29707. +{
  29708. + struct wcdcal_ioctl_buffer __user *argp = (void __user *)arg;
  29709. + struct wcdcal_ioctl_buffer fw_user;
  29710. +
  29711. + if (cmd != SNDRV_CTL_IOCTL_HWDEP_CAL_TYPE) {
  29712. + pr_err("%s: wrong ioctl command sent %d!\n", __func__, cmd);
  29713. + return -ENOIOCTLCMD;
  29714. + }
  29715. + if (copy_from_user(&fw_user, argp, sizeof(fw_user))) {
  29716. + pr_err("%s: failed to copy\n", __func__);
  29717. + return -EFAULT;
  29718. + }
  29719. + return wcdcal_hwdep_ioctl_shared(hw, fw_user);
  29720. +}
  29721. +
  29722. +static int wcdcal_hwdep_release(struct snd_hwdep *hw, struct file *file)
  29723. +{
  29724. + struct fw_info *fw_data = hw->private_data;
  29725. + mutex_lock(&fw_data->lock);
  29726. + /* clear all the calibrations */
  29727. + memset(fw_data->wcdcal_state, 0,
  29728. + sizeof(fw_data->wcdcal_state));
  29729. + mutex_unlock(&fw_data->lock);
  29730. + return 0;
  29731. +}
  29732. +
  29733. +int wcd_cal_create_hwdep(void *data, int node, struct snd_soc_codec *codec)
  29734. +{
  29735. + char hwname[40];
  29736. + struct snd_hwdep *hwdep;
  29737. + struct firmware_cal **fw;
  29738. + struct fw_info *fw_data = data;
  29739. + int err, cal_bit;
  29740. +
  29741. + if (!fw_data || !codec) {
  29742. + pr_err("%s: wrong arguments passed\n", __func__);
  29743. + return -EINVAL;
  29744. + }
  29745. +
  29746. + fw = fw_data->fw;
  29747. + snprintf(hwname, strlen("Codec %s"), "Codec %s", codec->name);
  29748. + err = snd_hwdep_new(codec->card->snd_card, hwname, node, &hwdep);
  29749. + if (err < 0) {
  29750. + dev_err(codec->dev, "%s: new hwdep failed %d\n",
  29751. + __func__, err);
  29752. + return err;
  29753. + }
  29754. + snprintf(hwdep->name, strlen("Codec %s"), "Codec %s", codec->name);
  29755. + hwdep->iface = SNDRV_HWDEP_IFACE_AUDIO_CODEC;
  29756. + hwdep->private_data = fw_data;
  29757. + hwdep->ops.ioctl_compat = wcdcal_hwdep_ioctl_compat;
  29758. + hwdep->ops.ioctl = wcdcal_hwdep_ioctl;
  29759. + hwdep->ops.release = wcdcal_hwdep_release;
  29760. + mutex_init(&fw_data->lock);
  29761. +
  29762. + for_each_set_bit(cal_bit, fw_data->cal_bit, WCD9XXX_MAX_CAL) {
  29763. + set_bit(WCDCAL_UNINITIALISED,
  29764. + &fw_data->wcdcal_state[cal_bit]);
  29765. + fw[cal_bit] = kzalloc(sizeof *(fw[cal_bit]), GFP_KERNEL);
  29766. + if (!fw[cal_bit]) {
  29767. + dev_err(codec->dev, "%s: no memory for %s cal\n",
  29768. + __func__, cal_name_info[cal_bit]);
  29769. + goto end;
  29770. + }
  29771. + }
  29772. + for_each_set_bit(cal_bit, fw_data->cal_bit, WCD9XXX_MAX_CAL) {
  29773. + fw[cal_bit]->data = kzalloc(cal_size_info[cal_bit],
  29774. + GFP_KERNEL);
  29775. + if (!fw[cal_bit]->data) {
  29776. + dev_err(codec->dev, "%s: no memory for %s cal data\n",
  29777. + __func__, cal_name_info[cal_bit]);
  29778. + goto exit;
  29779. + }
  29780. + set_bit(WCDCAL_INITIALISED,
  29781. + &fw_data->wcdcal_state[cal_bit]);
  29782. + }
  29783. + return 0;
  29784. +exit:
  29785. + for_each_set_bit(cal_bit, fw_data->cal_bit, WCD9XXX_MAX_CAL) {
  29786. + kfree(fw[cal_bit]->data);
  29787. + fw[cal_bit]->data = NULL;
  29788. + }
  29789. +end:
  29790. + for_each_set_bit(cal_bit, fw_data->cal_bit, WCD9XXX_MAX_CAL) {
  29791. + kfree(fw[cal_bit]);
  29792. + fw[cal_bit] = NULL;
  29793. + }
  29794. + return -ENOMEM;
  29795. +}
  29796. +EXPORT_SYMBOL(wcd_cal_create_hwdep);
  29797. diff --git a/sound/soc/codecs/wcdcal-hwdep.h b/sound/soc/codecs/wcdcal-hwdep.h
  29798. new file mode 100644
  29799. index 0000000..632e2f1
  29800. --- /dev/null
  29801. +++ b/sound/soc/codecs/wcdcal-hwdep.h
  29802. @@ -0,0 +1,40 @@
  29803. +/*
  29804. + * Copyright (c) 2014, The Linux Foundation. All rights reserved.
  29805. + *
  29806. + * This program is free software; you can redistribute it and/or modify
  29807. + * it under the terms of the GNU General Public License version 2 and
  29808. + * only version 2 as published by the Free Software Foundation.
  29809. + *
  29810. + * This program is distributed in the hope that it will be useful,
  29811. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  29812. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  29813. + * GNU General Public License for more details.
  29814. + */
  29815. +#ifndef __WCD9XXX_HWDEP_H__
  29816. +#define __WCD9XXX_HWDEP_H__
  29817. +#include <sound/msmcal-hwdep.h>
  29818. +
  29819. +enum wcd_cal_states {
  29820. + WCDCAL_UNINITIALISED,
  29821. + WCDCAL_INITIALISED,
  29822. + WCDCAL_RECIEVED
  29823. +};
  29824. +
  29825. +struct fw_info {
  29826. + struct firmware_cal *fw[WCD9XXX_MAX_CAL];
  29827. + DECLARE_BITMAP(cal_bit, WCD9XXX_MAX_CAL);
  29828. + /* for calibration tracking */
  29829. + unsigned long wcdcal_state[WCD9XXX_MAX_CAL];
  29830. + struct mutex lock;
  29831. +};
  29832. +
  29833. +struct firmware_cal {
  29834. + u8 *data;
  29835. + size_t size;
  29836. +};
  29837. +
  29838. +struct snd_soc_codec;
  29839. +int wcd_cal_create_hwdep(void *fw, int node, struct snd_soc_codec *codec);
  29840. +struct firmware_cal *wcdcal_get_fw_cal(struct fw_info *fw_data,
  29841. + enum wcd_cal_type type);
  29842. +#endif /* __WCD9XXX_HWDEP_H__ */
  29843. diff --git a/sound/soc/msm/Kconfig b/sound/soc/msm/Kconfig
  29844. index 7bb86d9e..7fe63cb 100644
  29845. --- a/sound/soc/msm/Kconfig
  29846. +++ b/sound/soc/msm/Kconfig
  29847. @@ -183,6 +183,7 @@ config SND_SOC_MSM8974
  29848. select SND_DYNAMIC_MINORS
  29849. select AUDIO_OCMEM
  29850. select DOLBY_DAP
  29851. + select SND_HWDEP
  29852. help
  29853. To add support for SoC audio on MSM8974.
  29854. This will enable sound soc drivers which
  29855. diff --git a/sound/soc/msm/msm-pcm-loopback.c b/sound/soc/msm/msm-pcm-loopback.c
  29856. index ecf8394..f2024d8 100644
  29857. --- a/sound/soc/msm/msm-pcm-loopback.c
  29858. +++ b/sound/soc/msm/msm-pcm-loopback.c
  29859. @@ -1,4 +1,4 @@
  29860. -/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
  29861. +/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
  29862.  
  29863. * This program is free software; you can redistribute it and/or modify
  29864. * it under the terms of the GNU General Public License version 2 and
  29865. @@ -200,19 +200,23 @@ static int msm_pcm_open(struct snd_pcm_substream *substream)
  29866.  
  29867. static void stop_pcm(struct msm_pcm_loopback *pcm)
  29868. {
  29869. - struct snd_soc_pcm_runtime *soc_pcm_rx =
  29870. - pcm->playback_substream->private_data;
  29871. - struct snd_soc_pcm_runtime *soc_pcm_tx =
  29872. - pcm->capture_substream->private_data;
  29873. + struct snd_soc_pcm_runtime *soc_pcm_rx;
  29874. + struct snd_soc_pcm_runtime *soc_pcm_tx;
  29875.  
  29876. if (pcm->audio_client == NULL)
  29877. return;
  29878. q6asm_cmd(pcm->audio_client, CMD_CLOSE);
  29879.  
  29880. - msm_pcm_routing_dereg_phy_stream(soc_pcm_rx->dai_link->be_id,
  29881. - SNDRV_PCM_STREAM_PLAYBACK);
  29882. - msm_pcm_routing_dereg_phy_stream(soc_pcm_tx->dai_link->be_id,
  29883. - SNDRV_PCM_STREAM_CAPTURE);
  29884. + if (pcm->playback_substream != NULL) {
  29885. + soc_pcm_rx = pcm->playback_substream->private_data;
  29886. + msm_pcm_routing_dereg_phy_stream(soc_pcm_rx->dai_link->be_id,
  29887. + SNDRV_PCM_STREAM_PLAYBACK);
  29888. + }
  29889. + if (pcm->capture_substream != NULL) {
  29890. + soc_pcm_tx = pcm->capture_substream->private_data;
  29891. + msm_pcm_routing_dereg_phy_stream(soc_pcm_tx->dai_link->be_id,
  29892. + SNDRV_PCM_STREAM_CAPTURE);
  29893. + }
  29894. q6asm_audio_client_free(pcm->audio_client);
  29895. pcm->audio_client = NULL;
  29896. }
  29897. diff --git a/sound/soc/msm/msm8226.c b/sound/soc/msm/msm8226.c
  29898. index 0009a49..40b4f0d 100644
  29899. --- a/sound/soc/msm/msm8226.c
  29900. +++ b/sound/soc/msm/msm8226.c
  29901. @@ -495,7 +495,7 @@ static const struct soc_enum msm_enum[] = {
  29902. SOC_ENUM_SINGLE_EXT(4, slim0_tx_ch_text),
  29903. };
  29904.  
  29905. -static const char *const btsco_rate_text[] = {"8000", "16000"};
  29906. +static const char *const btsco_rate_text[] = {"BTSCO_RATE_8KHZ", "BTSCO_RATE_16KHZ"};
  29907. static const struct soc_enum msm_btsco_enum[] = {
  29908. SOC_ENUM_SINGLE_EXT(2, btsco_rate_text),
  29909. };
  29910. diff --git a/sound/soc/msm/msm8974.c b/sound/soc/msm/msm8974.c
  29911. index 05e0b0d..1a37e45 100644
  29912. --- a/sound/soc/msm/msm8974.c
  29913. +++ b/sound/soc/msm/msm8974.c
  29914. @@ -34,11 +34,6 @@
  29915. #include "../codecs/wcd9xxx-common.h"
  29916. #include "../codecs/wcd9320.h"
  29917.  
  29918. -#if defined(CONFIG_SND_SOC_ES705)
  29919. -#include "../codecs/audience/es705-export.h"
  29920. -#elif defined(CONFIG_SND_SOC_ES325)
  29921. -#include "../codecs/es325-export.h"
  29922. -#endif
  29923. #define DRV_NAME "msm8974-asoc-taiko"
  29924.  
  29925. #define MSM8974_SPK_ON 1
  29926. @@ -67,7 +62,7 @@ static int msm8974_auxpcm_rate = 8000;
  29927. #define I2S_PCM_SEL 1
  29928. #define I2S_PCM_SEL_OFFSET 1
  29929.  
  29930. -#define WCD9XXX_MBHC_DEF_BUTTONS 3
  29931. +#define WCD9XXX_MBHC_DEF_BUTTONS 8
  29932. #define WCD9XXX_MBHC_DEF_RLOADS 5
  29933. #define TAIKO_EXT_CLK_RATE 9600000
  29934.  
  29935. @@ -81,12 +76,6 @@ static int msm8974_auxpcm_rate = 8000;
  29936. #define EXT_CLASS_AB_DIS_DELAY 1000
  29937. #define EXT_CLASS_AB_DELAY_DELTA 1000
  29938.  
  29939. -#if defined (CONFIG_SND_SOC_MAX98504)
  29940. -#define GPIO_SECOND_MI2S_SCK 79
  29941. -#define GPIO_SECOND_MI2S_WS 80
  29942. -#define GPIO_SECOND_MI2S_DATA0 81
  29943. -#define GPIO_SECOND_MI2S_DATA1 82
  29944. -#endif
  29945. #define NUM_OF_AUXPCM_GPIOS 4
  29946.  
  29947. static void *adsp_state_notifier;
  29948. @@ -126,12 +115,7 @@ static int msm_snd_enable_codec_ext_clk(struct snd_soc_codec *codec, int enable,
  29949. bool dapm);
  29950.  
  29951. static struct wcd9xxx_mbhc_config mbhc_cfg = {
  29952. -#if defined(CONFIG_MACH_KS01EUR) || defined(CONFIG_MACH_KS01SKT) || \
  29953. -defined(CONFIG_MACH_KS01KTT) || defined(CONFIG_MACH_KS01LGT)
  29954. .read_fw_bin = false,
  29955. -#else
  29956. - .read_fw_bin = true,
  29957. -#endif
  29958. .calibration = NULL,
  29959. .micbias = MBHC_MICBIAS2,
  29960. .anc_micbias = MBHC_MICBIAS2,
  29961. @@ -139,23 +123,16 @@ defined(CONFIG_MACH_KS01KTT) || defined(CONFIG_MACH_KS01LGT)
  29962. .mclk_rate = TAIKO_EXT_CLK_RATE,
  29963. .gpio = 0,
  29964. .gpio_irq = 0,
  29965. - .gpio_level_insert = 0,
  29966. + .gpio_level_insert = 1,
  29967. .detect_extn_cable = true,
  29968. -#if defined(CONFIG_SEC_FACTORY)
  29969. - /* Micbias for MBHC is always on in factory test */
  29970. - .micbias_enable_flags = (1 << MBHC_MICBIAS_ENABLE_THRESHOLD_HEADSET |
  29971. - 1 << MBHC_MICBIAS_ENABLE_REGULAR_HEADSET),
  29972. -#else
  29973. .micbias_enable_flags = 1 << MBHC_MICBIAS_ENABLE_THRESHOLD_HEADSET,
  29974. -#endif
  29975. .insert_detect = true,
  29976. .swap_gnd_mic = NULL,
  29977. -#if (defined(CONFIG_MACH_KLTE_KOR) || defined(CONFIG_MACH_KLTE_JPN) || defined(CONFIG_MACH_KACTIVELTE_DCM) || defined(CONFIG_MACH_CHAGALL_KDI) || defined(CONFIG_MACH_KLIMT_LTE_DCM)) && !defined(CONFIG_SEC_FACTORY)
  29978. - .cs_enable_flags = (1 << MBHC_CS_ENABLE_POLLING),
  29979. -#else
  29980. - .cs_enable_flags = 0,
  29981. -#endif
  29982. - .do_recalibration = false,
  29983. + .cs_enable_flags = (1 << MBHC_CS_ENABLE_POLLING |
  29984. + 1 << MBHC_CS_ENABLE_INSERTION |
  29985. + 1 << MBHC_CS_ENABLE_REMOVAL |
  29986. + 1 << MBHC_CS_ENABLE_DET_ANC),
  29987. + .do_recalibration = true,
  29988. .use_vddio_meas = true,
  29989. .enable_anc_mic_detect = false,
  29990. .hw_jack_type = SIX_POLE_JACK,
  29991. @@ -239,61 +216,7 @@ static struct clk *codec_clk;
  29992. static int clk_users;
  29993. static atomic_t prim_auxpcm_rsc_ref;
  29994. static atomic_t sec_auxpcm_rsc_ref;
  29995. -static int mainmic_bias_gpio = 0;
  29996. -static int micbias_en_msm_gpio = 0;
  29997. -#if defined(CONFIG_LDO_SUBMIC_BIAS)
  29998. -static int submic_bias_gpio = 0;
  29999. -#endif
  30000. -#if defined(CONFIG_LDO_EARMIC_BIAS)
  30001. -static int earmic_bias_gpio = 0;
  30002. -#endif
  30003. -
  30004. -static int spkamp_en_gpio = 0;
  30005. -static int main_mic_delay = 0;
  30006. -#ifdef CONFIG_SEC_JACTIVE_PROJECT
  30007. -static int ear_jack_fsa8038_en = 0;
  30008. -#endif
  30009. -#if defined(CONFIG_SEC_H_PROJECT)
  30010. -int speaker_status = 0;
  30011. -EXPORT_SYMBOL(speaker_status);
  30012. -#endif
  30013. -#if defined(CONFIG_MACH_KLTE_KOR) || defined(CONFIG_MACH_KLTE_JPN) || defined(CONFIG_MACH_KACTIVELTE_DCM) || defined(CONFIG_MACH_CHAGALL_KDI) || defined(CONFIG_MACH_KLIMT_LTE_DCM)
  30014. -static int fsa_en_gpio;
  30015. -#endif
  30016. -
  30017. -#if defined (CONFIG_SND_SOC_MAX98504)
  30018. -struct request_gpio {
  30019. - unsigned gpio_no;
  30020. - char *gpio_name;
  30021. -};
  30022.  
  30023. -static struct request_gpio pri_mi2s_gpio[] = {
  30024. - {
  30025. - .gpio_no = GPIO_SECOND_MI2S_SCK,
  30026. - .gpio_name = "SECOND_MI2S_SCK",
  30027. - },
  30028. - {
  30029. - .gpio_no = GPIO_SECOND_MI2S_WS,
  30030. - .gpio_name = "SECOND_MI2S_WS",
  30031. - },
  30032. - {
  30033. - .gpio_no = GPIO_SECOND_MI2S_DATA0,
  30034. - .gpio_name = "SECOND_MI2S_DATA0",
  30035. - },
  30036. - {
  30037. - .gpio_no = GPIO_SECOND_MI2S_DATA1,
  30038. - .gpio_name = "SECOND_MI2S_DATA1",
  30039. - },
  30040. -};
  30041. -/* MI2S clock */
  30042. -struct mi2s_clk {
  30043. - struct clk *core_clk;
  30044. - struct clk *osr_clk;
  30045. - struct clk *bit_clk;
  30046. - atomic_t mi2s_rsc_ref;
  30047. -};
  30048. -static struct mi2s_clk pri_mi2s_clk;
  30049. -#endif
  30050.  
  30051. static int msm8974_liquid_ext_spk_power_amp_init(void)
  30052. {
  30053. @@ -467,22 +390,20 @@ static int msm8974_liquid_init_docking(struct snd_soc_dapm_context *dapm)
  30054.  
  30055. msm8974_liquid_dock_dev->dapm = dapm;
  30056.  
  30057. - INIT_WORK(
  30058. - &msm8974_liquid_dock_dev->irq_work,
  30059. - msm8974_liquid_docking_irq_work);
  30060. -
  30061. ret = request_irq(msm8974_liquid_dock_dev->dock_plug_irq,
  30062. msm8974_liquid_docking_irq_handler,
  30063. dock_plug_irq_flags,
  30064. "liquid_dock_plug_irq",
  30065. msm8974_liquid_dock_dev);
  30066.  
  30067. + INIT_WORK(
  30068. + &msm8974_liquid_dock_dev->irq_work,
  30069. + msm8974_liquid_docking_irq_work);
  30070. }
  30071.  
  30072. return 0;
  30073. }
  30074.  
  30075. -#if !defined(CONFIG_MACH_VIENNA_LTE) && !defined(CONFIG_MACH_V2_LTE) && !defined(CONFIG_MACH_LT03_LTE) && !defined(CONFIG_MACH_PICASSO_LTE) && !defined(CONFIG_MACH_MONDRIAN) && !defined(CONFIG_MACH_CHAGALL) && !defined(CONFIG_MACH_KLIMT)
  30076. static int msm8974_liquid_ext_spk_power_amp_on(u32 spk)
  30077. {
  30078. int rc;
  30079. @@ -589,7 +510,6 @@ static void msm8974_ext_spk_power_amp_off(u32 spk)
  30080. else if (gpio_is_valid(ext_ult_lo_amp_gpio))
  30081. msm8974_fluid_ext_us_amp_off(spk);
  30082. }
  30083. -#endif
  30084.  
  30085. static void msm8974_ext_control(struct snd_soc_codec *codec)
  30086. {
  30087. @@ -637,7 +557,6 @@ static int msm8974_set_spk(struct snd_kcontrol *kcontrol,
  30088. }
  30089.  
  30090.  
  30091. -#if !defined(CONFIG_MACH_VIENNA_LTE) && !defined(CONFIG_MACH_V2_LTE) && !defined(CONFIG_MACH_LT03_LTE) && !defined(CONFIG_MACH_PICASSO_LTE) && !defined(CONFIG_MACH_MONDRIAN) && !defined(CONFIG_MACH_CHAGALL) && !defined(CONFIG_MACH_KLIMT)
  30092. static int msm_ext_spkramp_event(struct snd_soc_dapm_widget *w,
  30093. struct snd_kcontrol *k, int event)
  30094. {
  30095. @@ -676,18 +595,6 @@ static int msm_ext_spkramp_event(struct snd_soc_dapm_widget *w,
  30096. return 0;
  30097.  
  30098. }
  30099. -#else
  30100. -static int msm_ext_spkramp_event(struct snd_soc_dapm_widget *w,
  30101. - struct snd_kcontrol *k, int event)
  30102. -{
  30103. - pr_info("%s() : control =%d\n", __func__, event);
  30104. -
  30105. - gpio_direction_output(spkamp_en_gpio,
  30106. - SND_SOC_DAPM_EVENT_ON(event));
  30107. -
  30108. - return 0;
  30109. -}
  30110. -#endif
  30111.  
  30112. static int msm_ext_spkramp_ultrasound_event(struct snd_soc_dapm_widget *w,
  30113. struct snd_kcontrol *k, int event)
  30114. @@ -777,63 +684,6 @@ static int msm8974_mclk_event(struct snd_soc_dapm_widget *w,
  30115. return 0;
  30116. }
  30117.  
  30118. -#if !defined(CONFIG_SEC_KS01_PROJECT)
  30119. -static int msm_mainmic_bias_event(struct snd_soc_dapm_widget *w,
  30120. - struct snd_kcontrol *k, int event)
  30121. -{
  30122. - pr_info("%s : Event %d, SND_SOC_DAPM:%d\n",
  30123. - __func__, (event), SND_SOC_DAPM_EVENT_ON(event));
  30124. -
  30125. - if (mainmic_bias_gpio < 0) {
  30126. - gpio_set_value_cansleep(micbias_en_msm_gpio,
  30127. - SND_SOC_DAPM_EVENT_ON(event));
  30128. - } else {
  30129. - gpio_direction_output(mainmic_bias_gpio,
  30130. - SND_SOC_DAPM_EVENT_ON(event));
  30131. - }
  30132. -
  30133. - if(main_mic_delay) {
  30134. - if(main_mic_delay != 100)
  30135. - main_mic_delay *= 50;
  30136. - msleep(main_mic_delay);
  30137. - pr_info("%s: main_mic_delay = %d\n", __func__, main_mic_delay);
  30138. - main_mic_delay = 0;
  30139. - }
  30140. -
  30141. - return 0;
  30142. -}
  30143. -#endif
  30144. -
  30145. -#if defined(CONFIG_LDO_SUBMIC_BIAS)
  30146. -static int msm_submic_bias_event(struct snd_soc_dapm_widget *w,
  30147. - struct snd_kcontrol *k, int event)
  30148. -{
  30149. - pr_info("%s : Event %d, SND_SOC_DAPM:%d\n",
  30150. - __func__, (event), SND_SOC_DAPM_EVENT_ON(event));
  30151. -
  30152. - gpio_direction_output(submic_bias_gpio,
  30153. - SND_SOC_DAPM_EVENT_ON(event));
  30154. -
  30155. - return 0;
  30156. -}
  30157. -#endif
  30158. -
  30159. -#if defined(CONFIG_LDO_EARMIC_BIAS)
  30160. -static int msm_earmic_bias_event(struct snd_soc_dapm_widget *w,
  30161. - struct snd_kcontrol *k, int event)
  30162. -{
  30163. - pr_info("%s : Event %d, SND_SOC_DAPM:%d\n",
  30164. - __func__, (event), SND_SOC_DAPM_EVENT_ON(event));
  30165. -
  30166. - gpio_direction_output(earmic_bias_gpio,
  30167. - SND_SOC_DAPM_EVENT_ON(event));
  30168. -
  30169. - return 0;
  30170. -}
  30171. -#endif
  30172. -
  30173. -
  30174. -#ifdef CONFIG_SEC_K_PROJECT
  30175. static const struct snd_soc_dapm_widget msm8974_dapm_widgets[] = {
  30176.  
  30177. SND_SOC_DAPM_SUPPLY("MCLK", SND_SOC_NOPM, 0, 0,
  30178. @@ -856,101 +706,12 @@ static const struct snd_soc_dapm_widget msm8974_dapm_widgets[] = {
  30179. SND_SOC_DAPM_MIC("Analog Mic7", NULL),
  30180.  
  30181. SND_SOC_DAPM_MIC("Digital Mic1", NULL),
  30182. - SND_SOC_DAPM_MIC("Main Mic", msm_mainmic_bias_event),
  30183. - SND_SOC_DAPM_MIC("Digital Mic3", NULL),
  30184. - SND_SOC_DAPM_MIC("Sub Mic", NULL),
  30185. - SND_SOC_DAPM_MIC("Digital Mic5", NULL),
  30186. - SND_SOC_DAPM_MIC("Third Mic", NULL),
  30187. -};
  30188. -#elif defined (CONFIG_SEC_JACTIVE_PROJECT)
  30189. -static const struct snd_soc_dapm_widget msm8974_dapm_widgets_01[] = {
  30190. -
  30191. - SND_SOC_DAPM_SUPPLY("MCLK", SND_SOC_NOPM, 0, 0,
  30192. - msm8974_mclk_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
  30193. -
  30194. - SND_SOC_DAPM_SPK("Lineout_1 amp", msm_ext_spkramp_event),
  30195. - SND_SOC_DAPM_SPK("Lineout_3 amp", msm_ext_spkramp_event),
  30196. -
  30197. - SND_SOC_DAPM_SPK("Lineout_2 amp", msm_ext_spkramp_event),
  30198. - SND_SOC_DAPM_SPK("Lineout_4 amp", msm_ext_spkramp_event),
  30199. - SND_SOC_DAPM_SPK("SPK_ultrasound amp",
  30200. - msm_ext_spkramp_ultrasound_event),
  30201. -
  30202. - SND_SOC_DAPM_MIC("Main Mic", NULL),
  30203. - SND_SOC_DAPM_MIC("Headset Mic", NULL),
  30204. - SND_SOC_DAPM_MIC("Sub Mic", NULL),
  30205. - SND_SOC_DAPM_MIC("Third Mic", NULL),
  30206. -
  30207. - SND_SOC_DAPM_MIC("Digital Mic1", NULL),
  30208. - SND_SOC_DAPM_MIC("Digital Mic2", NULL),
  30209. - SND_SOC_DAPM_MIC("Digital Mic3", NULL),
  30210. - SND_SOC_DAPM_MIC("Digital Mic4", NULL),
  30211. - SND_SOC_DAPM_MIC("Digital Mic5", NULL),
  30212. - SND_SOC_DAPM_MIC("Digital Mic6", NULL),
  30213. -};
  30214. -
  30215. -static const struct snd_soc_dapm_widget msm8974_dapm_widgets[] = {
  30216. -
  30217. - SND_SOC_DAPM_SUPPLY("MCLK", SND_SOC_NOPM, 0, 0,
  30218. - msm8974_mclk_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
  30219. -
  30220. - SND_SOC_DAPM_SPK("Lineout_1 amp", msm_ext_spkramp_event),
  30221. - SND_SOC_DAPM_SPK("Lineout_3 amp", msm_ext_spkramp_event),
  30222. -
  30223. - SND_SOC_DAPM_SPK("Lineout_2 amp", msm_ext_spkramp_event),
  30224. - SND_SOC_DAPM_SPK("Lineout_4 amp", msm_ext_spkramp_event),
  30225. - SND_SOC_DAPM_SPK("SPK_ultrasound amp",
  30226. - msm_ext_spkramp_ultrasound_event),
  30227. - SND_SOC_DAPM_MIC("Main Mic", msm_mainmic_bias_event),
  30228. - SND_SOC_DAPM_MIC("Headset Mic", NULL),
  30229. - SND_SOC_DAPM_MIC("Sub Mic", NULL),
  30230. - SND_SOC_DAPM_MIC("Third Mic", NULL),
  30231. -
  30232. - SND_SOC_DAPM_MIC("Digital Mic1", NULL),
  30233. - SND_SOC_DAPM_MIC("Digital Mic2", NULL),
  30234. - SND_SOC_DAPM_MIC("Digital Mic3", NULL),
  30235. - SND_SOC_DAPM_MIC("Digital Mic4", NULL),
  30236. - SND_SOC_DAPM_MIC("Digital Mic5", NULL),
  30237. - SND_SOC_DAPM_MIC("Digital Mic6", NULL),
  30238. -};
  30239. -#else
  30240. -static const struct snd_soc_dapm_widget msm8974_dapm_widgets[] = {
  30241. -
  30242. - SND_SOC_DAPM_SUPPLY("MCLK", SND_SOC_NOPM, 0, 0,
  30243. - msm8974_mclk_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
  30244. -
  30245. - SND_SOC_DAPM_SPK("Lineout_1 amp", msm_ext_spkramp_event),
  30246. - SND_SOC_DAPM_SPK("Lineout_3 amp", msm_ext_spkramp_event),
  30247. -
  30248. - SND_SOC_DAPM_SPK("Lineout_2 amp", msm_ext_spkramp_event),
  30249. - SND_SOC_DAPM_SPK("Lineout_4 amp", msm_ext_spkramp_event),
  30250. - SND_SOC_DAPM_SPK("SPK_ultrasound amp",
  30251. - msm_ext_spkramp_ultrasound_event),
  30252. -#if defined(CONFIG_SEC_KS01_PROJECT)
  30253. - SND_SOC_DAPM_MIC("Main Mic", NULL),
  30254. -#else
  30255. - SND_SOC_DAPM_MIC("Main Mic", msm_mainmic_bias_event),
  30256. -#endif
  30257. -#if defined(CONFIG_LDO_EARMIC_BIAS)
  30258. - SND_SOC_DAPM_MIC("Headset Mic", msm_earmic_bias_event),
  30259. -#else
  30260. - SND_SOC_DAPM_MIC("Headset Mic", NULL),
  30261. -#endif
  30262. -#if defined(CONFIG_LDO_SUBMIC_BIAS)
  30263. - SND_SOC_DAPM_MIC("Sub Mic", msm_submic_bias_event),
  30264. -#else
  30265. - SND_SOC_DAPM_MIC("Sub Mic", NULL),
  30266. -#endif
  30267. - SND_SOC_DAPM_MIC("Third Mic", NULL),
  30268. -
  30269. - SND_SOC_DAPM_MIC("Digital Mic1", NULL),
  30270. SND_SOC_DAPM_MIC("Digital Mic2", NULL),
  30271. SND_SOC_DAPM_MIC("Digital Mic3", NULL),
  30272. SND_SOC_DAPM_MIC("Digital Mic4", NULL),
  30273. SND_SOC_DAPM_MIC("Digital Mic5", NULL),
  30274. SND_SOC_DAPM_MIC("Digital Mic6", NULL),
  30275. };
  30276. -#endif
  30277.  
  30278. static const char *const spk_function[] = {"Off", "On"};
  30279. static const char *const slim0_rx_ch_text[] = {"One", "Two"};
  30280. @@ -967,8 +728,7 @@ static const char *const proxy_rx_ch_text[] = {"One", "Two", "Three", "Four",
  30281.  
  30282. static char const *hdmi_rx_sample_rate_text[] = {"KHZ_48", "KHZ_96",
  30283. "KHZ_192"};
  30284. -
  30285. -static const char *const btsco_rate_text[] = {"8000", "16000"};
  30286. +static const char *const btsco_rate_text[] = {"BTSCO_RATE_8KHZ", "BTSCO_RATE_16KHZ"};
  30287. static const struct soc_enum msm_btsco_enum[] = {
  30288. SOC_ENUM_SINGLE_EXT(2, btsco_rate_text),
  30289. };
  30290. @@ -1018,7 +778,7 @@ static int slim0_rx_sample_rate_put(struct snd_kcontrol *kcontrol,
  30291. slim0_rx_sample_rate = SAMPLING_RATE_48KHZ;
  30292. }
  30293.  
  30294. - pr_info("%s: slim0_rx_sample_rate = %d\n", __func__,
  30295. + pr_debug("%s: slim0_rx_sample_rate = %d\n", __func__,
  30296. slim0_rx_sample_rate);
  30297.  
  30298. return 0;
  30299. @@ -1110,10 +870,10 @@ static int msm_btsco_rate_put(struct snd_kcontrol *kcontrol,
  30300. struct snd_ctl_elem_value *ucontrol)
  30301. {
  30302. switch (ucontrol->value.integer.value[0]) {
  30303. - case 8000:
  30304. + case 0:
  30305. msm_btsco_rate = BTSCO_RATE_8KHZ;
  30306. break;
  30307. - case 16000:
  30308. + case 1:
  30309. msm_btsco_rate = BTSCO_RATE_16KHZ;
  30310. break;
  30311. default:
  30312. @@ -1239,11 +999,6 @@ static int hdmi_rx_sample_rate_put(struct snd_kcontrol *kcontrol,
  30313. return 0;
  30314. }
  30315.  
  30316. -static const struct snd_kcontrol_new int_btsco_rate_mixer_controls[] = {
  30317. - SOC_ENUM_EXT("Internal BTSCO SampleRate", msm_btsco_enum[0],
  30318. - msm_btsco_rate_get, msm_btsco_rate_put),
  30319. -};
  30320. -
  30321. static int msm_btsco_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
  30322. struct snd_pcm_hw_params *params)
  30323. {
  30324. @@ -1272,58 +1027,16 @@ static int msm8974_auxpcm_rate_put(struct snd_kcontrol *kcontrol,
  30325. switch (ucontrol->value.integer.value[0]) {
  30326. case 0:
  30327. msm8974_auxpcm_rate = 8000;
  30328. - msm_btsco_rate = BTSCO_RATE_8KHZ;
  30329. break;
  30330. case 1:
  30331. msm8974_auxpcm_rate = 16000;
  30332. - msm_btsco_rate = BTSCO_RATE_16KHZ;
  30333. break;
  30334. default:
  30335. msm8974_auxpcm_rate = 8000;
  30336. - msm_btsco_rate = BTSCO_RATE_8KHZ;
  30337. break;
  30338. }
  30339. - pr_info("%s: BT sample rate = %d ====\n",__func__, msm8974_auxpcm_rate);
  30340. return 0;
  30341. }
  30342. -
  30343. -static int main_mic_delay_get(struct snd_kcontrol *kcontrol,
  30344. - struct snd_ctl_elem_value *ucontrol)
  30345. -{
  30346. - pr_debug("%s: main_mic_delay = %d\n", __func__,
  30347. - main_mic_delay);
  30348. - ucontrol->value.integer.value[0] = main_mic_delay;
  30349. - return 0;
  30350. -}
  30351. -
  30352. -static int main_mic_delay_put(struct snd_kcontrol *kcontrol,
  30353. - struct snd_ctl_elem_value *ucontrol)
  30354. -{
  30355. - main_mic_delay = ucontrol->value.integer.value[0];
  30356. -
  30357. - pr_debug("%s: main_mic_delay = %d\n", __func__,
  30358. - main_mic_delay);
  30359. - return 1;
  30360. -}
  30361. -
  30362. -#if defined(CONFIG_SEC_H_PROJECT)
  30363. -static int speaker_status_get(struct snd_kcontrol *kcontrol,
  30364. - struct snd_ctl_elem_value *ucontrol)
  30365. -{
  30366. - pr_info("%s: speaker_status = %d\n", __func__, speaker_status);
  30367. - ucontrol->value.integer.value[0] = speaker_status;
  30368. - return 0;
  30369. -}
  30370. -
  30371. -static int speaker_status_put(struct snd_kcontrol *kcontrol,
  30372. - struct snd_ctl_elem_value *ucontrol)
  30373. -{
  30374. - speaker_status = ucontrol->value.integer.value[0];
  30375. - pr_info("%s: speaker_status = %d\n", __func__, speaker_status);
  30376. - return 1;
  30377. -}
  30378. -#endif
  30379. -
  30380. static int msm_proxy_rx_ch_get(struct snd_kcontrol *kcontrol,
  30381. struct snd_ctl_elem_value *ucontrol)
  30382. {
  30383. @@ -1357,22 +1070,6 @@ static int msm_auxpcm_be_params_fixup(struct snd_soc_pcm_runtime *rtd,
  30384. return 0;
  30385. }
  30386.  
  30387. -#if defined( CONFIG_PCM_ROUTE_VOICE_STUB ) || defined(CONFIG_BT_CALL_FORWARDING)
  30388. -static int msm_auxpcm2_be_params_fixup(struct snd_soc_pcm_runtime *rtd,
  30389. - struct snd_pcm_hw_params *params)
  30390. -{
  30391. - struct snd_interval *rate =
  30392. - hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
  30393. -
  30394. - struct snd_interval *channels =
  30395. - hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
  30396. -
  30397. - rate->min = rate->max = 8000;
  30398. - channels->min = channels->max = 1;
  30399. -
  30400. - return 0;
  30401. -}
  30402. -#endif
  30403. static int msm_proxy_rx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
  30404. struct snd_pcm_hw_params *params)
  30405. {
  30406. @@ -1410,7 +1107,7 @@ static int msm8974_hdmi_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
  30407. struct snd_interval *channels = hw_param_interval(params,
  30408. SNDRV_PCM_HW_PARAM_CHANNELS);
  30409.  
  30410. - pr_info("%s channels->min %u channels->max %u ()\n", __func__,
  30411. + pr_debug("%s channels->min %u channels->max %u ()\n", __func__,
  30412. channels->min, channels->max);
  30413.  
  30414. param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
  30415. @@ -1418,9 +1115,7 @@ static int msm8974_hdmi_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
  30416. if (channels->max < 2)
  30417. channels->min = channels->max = 2;
  30418. rate->min = rate->max = hdmi_rx_sample_rate;
  30419. -
  30420. - if (channels->min != channels->max)
  30421. - channels->min = channels->max;
  30422. + channels->min = channels->max = msm_hdmi_rx_ch;
  30423.  
  30424. return 0;
  30425. }
  30426. @@ -1606,7 +1301,7 @@ static int msm_slim_0_rx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
  30427. rate->min = rate->max = slim0_rx_sample_rate;
  30428. channels->min = channels->max = msm_slim_0_rx_ch;
  30429.  
  30430. - pr_info("%s: format = %d, rate = %d, channels = %d\n",
  30431. + pr_debug("%s: format = %d, rate = %d, channels = %d\n",
  30432. __func__, params_format(params), params_rate(params),
  30433. msm_slim_0_rx_ch);
  30434.  
  30435. @@ -1628,10 +1323,6 @@ static int msm_slim_0_tx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
  30436. rate->min = rate->max = 48000;
  30437. channels->min = channels->max = msm_slim_0_tx_ch;
  30438.  
  30439. - pr_info("%s: format = %d, rate = %d, channels = %d\n",
  30440. - __func__, params_format(params), params_rate(params),
  30441. - msm_slim_0_tx_ch);
  30442. -
  30443. return 0;
  30444. }
  30445.  
  30446. @@ -1690,29 +1381,6 @@ static int msm_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
  30447. return 0;
  30448. }
  30449.  
  30450. -#if defined(CONFIG_SND_SOC_MAX98504) || defined(CONFIG_SND_SOC_MAX98505)
  30451. -static int msm8974_mi2s_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
  30452. - struct snd_pcm_hw_params *params)
  30453. -{
  30454. - struct snd_interval *rate = hw_param_interval(params,
  30455. - SNDRV_PCM_HW_PARAM_RATE);
  30456. - struct snd_interval *channels = hw_param_interval(params,
  30457. - SNDRV_PCM_HW_PARAM_CHANNELS);
  30458. - pr_debug("%s: enter\n", __func__);
  30459. -
  30460. - rate->min = rate->max = 48000;
  30461. -
  30462. -
  30463. - param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
  30464. - SNDRV_PCM_FORMAT_S16_LE);
  30465. -
  30466. - channels->min = channels->max = 2;
  30467. -
  30468. - return 0;
  30469. -}
  30470. -#endif
  30471. -
  30472. -
  30473. static int msm_be_fm_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
  30474. struct snd_pcm_hw_params *params)
  30475. {
  30476. @@ -1763,12 +1431,6 @@ static const struct snd_kcontrol_new msm_snd_controls[] = {
  30477. msm_btsco_rate_get, msm_btsco_rate_put),
  30478. SOC_ENUM_EXT("HDMI_RX SampleRate", msm_snd_enum[7],
  30479. hdmi_rx_sample_rate_get, hdmi_rx_sample_rate_put),
  30480. - SOC_SINGLE_EXT("Main Mic Delay",SND_SOC_NOPM, 0, 100, 0,
  30481. - main_mic_delay_get, main_mic_delay_put),
  30482. -#if defined(CONFIG_SEC_H_PROJECT)
  30483. - SOC_SINGLE_EXT("SPK Status",SND_SOC_NOPM, 0, 1, 0,
  30484. - speaker_status_get, speaker_status_put),
  30485. -#endif
  30486. };
  30487.  
  30488. static bool msm8974_swap_gnd_mic(struct snd_soc_codec *codec)
  30489. @@ -1876,10 +1538,6 @@ static int msm8974_taiko_event_cb(struct snd_soc_codec *codec,
  30490. }
  30491. }
  30492.  
  30493. -#if defined(CONFIG_MACH_KLTE_KOR) || defined(CONFIG_MACH_KLTE_JPN) || defined(CONFIG_MACH_KACTIVELTE_DCM) || defined(CONFIG_MACH_CHAGALL_KDI) || defined(CONFIG_MACH_KLIMT_LTE_DCM)
  30494. -extern unsigned int system_rev;
  30495. -#endif
  30496. -
  30497. static int msm_audrx_init(struct snd_soc_pcm_runtime *rtd)
  30498. {
  30499. int err;
  30500. @@ -1888,9 +1546,6 @@ static int msm_audrx_init(struct snd_soc_pcm_runtime *rtd)
  30501. struct snd_soc_dapm_context *dapm = &codec->dapm;
  30502. struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
  30503. struct snd_soc_dai *codec_dai = rtd->codec_dai;
  30504. -#if defined(CONFIG_SEC_JACTIVE_PROJECT)
  30505. - extern unsigned int system_rev;
  30506. -#endif
  30507.  
  30508. /* Taiko SLIMBUS configuration
  30509. * RX1, RX2, RX3, RX4, RX5, RX6, RX7, RX8, RX9, RX10, RX11, RX12, RX13
  30510. @@ -1926,21 +1581,10 @@ static int msm_audrx_init(struct snd_soc_pcm_runtime *rtd)
  30511. __func__, err);
  30512. return err;
  30513. }
  30514. -#if defined(CONFIG_SEC_JACTIVE_PROJECT)
  30515. - pr_info("1. msm_audrx_init system_rev %d",system_rev);
  30516. - if(system_rev < 3) {
  30517. - snd_soc_dapm_new_controls(dapm, msm8974_dapm_widgets_01,
  30518. - ARRAY_SIZE(msm8974_dapm_widgets_01));
  30519. - }
  30520. - else
  30521. - {
  30522. - snd_soc_dapm_new_controls(dapm, msm8974_dapm_widgets,
  30523. - ARRAY_SIZE(msm8974_dapm_widgets));
  30524. - }
  30525. -#else
  30526. +
  30527. snd_soc_dapm_new_controls(dapm, msm8974_dapm_widgets,
  30528. ARRAY_SIZE(msm8974_dapm_widgets));
  30529. -#endif
  30530. +
  30531. snd_soc_dapm_enable_pin(dapm, "Lineout_1 amp");
  30532. snd_soc_dapm_enable_pin(dapm, "Lineout_3 amp");
  30533. snd_soc_dapm_enable_pin(dapm, "Lineout_2 amp");
  30534. @@ -1988,37 +1632,6 @@ static int msm_audrx_init(struct snd_soc_pcm_runtime *rtd)
  30535. return err;
  30536. }
  30537. }
  30538. -
  30539. -#if defined(CONFIG_MACH_KLTE_KOR)
  30540. - if (system_rev >= 13) {
  30541. - pr_info("%s: USE MBHC revision %d\n", __func__, system_rev);
  30542. - /* start mbhc */
  30543. - mbhc_cfg.calibration = def_taiko_mbhc_cal();
  30544. - if (mbhc_cfg.calibration) {
  30545. - err = taiko_hs_detect(codec, &mbhc_cfg);
  30546. - if (err)
  30547. - goto out;
  30548. - } else {
  30549. - err = -ENOMEM;
  30550. - goto out;
  30551. - }
  30552. - }
  30553. -#elif defined(CONFIG_MACH_KLTE_JPN)
  30554. - if (system_rev >= 11) {
  30555. - pr_info("%s: USE MBHC revision %d\n", __func__, system_rev);
  30556. - /* start mbhc */
  30557. - mbhc_cfg.calibration = def_taiko_mbhc_cal();
  30558. - if (mbhc_cfg.calibration) {
  30559. - err = taiko_hs_detect(codec, &mbhc_cfg);
  30560. - if (err)
  30561. - goto out;
  30562. - } else {
  30563. - err = -ENOMEM;
  30564. - goto out;
  30565. - }
  30566. - }
  30567. -#else
  30568. -#if !defined(CONFIG_SAMSUNG_JACK) && !defined(CONFIG_MUIC_DET_JACK)
  30569. /* start mbhc */
  30570. mbhc_cfg.calibration = def_taiko_mbhc_cal();
  30571. if (mbhc_cfg.calibration) {
  30572. @@ -2029,23 +1642,6 @@ static int msm_audrx_init(struct snd_soc_pcm_runtime *rtd)
  30573. err = -ENOMEM;
  30574. goto out;
  30575. }
  30576. -#elif defined(CONFIG_SEC_JACTIVE_PROJECT)
  30577. - pr_info("2. msm_audrx_init system_rev %d",system_rev);
  30578. - if(system_rev < 3) {
  30579. - mbhc_cfg.calibration = def_taiko_mbhc_cal();
  30580. - if (mbhc_cfg.calibration) {
  30581. - err = taiko_hs_detect(codec, &mbhc_cfg);
  30582. - if (err)
  30583. - goto out;
  30584. - else
  30585. - return err;
  30586. - } else {
  30587. - err = -ENOMEM;
  30588. - goto out;
  30589. - }
  30590. - }
  30591. -#endif
  30592. -#endif /* CONFIG_MACH_KLTE_KOR */
  30593. adsp_state_notifier =
  30594. subsys_notif_register_notifier("adsp",
  30595. &adsp_state_notifier_block);
  30596. @@ -2053,19 +1649,7 @@ static int msm_audrx_init(struct snd_soc_pcm_runtime *rtd)
  30597. pr_err("%s: Failed to register adsp state notifier\n",
  30598. __func__);
  30599. err = -EFAULT;
  30600. -#if defined(CONFIG_MACH_KLTE_KOR)
  30601. - if (system_rev >= 13) {
  30602. - taiko_hs_detect_exit(codec);
  30603. - }
  30604. -#elif defined(CONFIG_MACH_KLTE_JPN)
  30605. - if (system_rev >= 11) {
  30606. - taiko_hs_detect_exit(codec);
  30607. - }
  30608. -#else
  30609. -#if !defined(CONFIG_SAMSUNG_JACK) && !defined(CONFIG_MUIC_DET_JACK)
  30610. taiko_hs_detect_exit(codec);
  30611. -#endif
  30612. -#endif /* CONFIG_MACH_KLTE_KOR */
  30613. goto out;
  30614. }
  30615.  
  30616. @@ -2113,8 +1697,8 @@ void *def_taiko_mbhc_cal(void)
  30617. S(t_ins_retry, 200);
  30618. #undef S
  30619. #define S(X, Y) ((WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(taiko_cal)->X) = (Y))
  30620. - S(v_no_mic, 950);
  30621. - S(v_hs_max, 3000);
  30622. + S(v_no_mic, 30);
  30623. + S(v_hs_max, 2400);
  30624. #undef S
  30625. #define S(X, Y) ((WCD9XXX_MBHC_CAL_BTN_DET_PTR(taiko_cal)->X) = (Y))
  30626. S(c[0], 62);
  30627. @@ -2133,11 +1717,21 @@ void *def_taiko_mbhc_cal(void)
  30628. btn_high = wcd9xxx_mbhc_cal_btn_det_mp(btn_cfg,
  30629. MBHC_BTN_DET_V_BTN_HIGH);
  30630. btn_low[0] = -50;
  30631. - btn_high[0] = 160;
  30632. - btn_low[1] = 161;
  30633. - btn_high[1] = 330;
  30634. - btn_low[2] = 331;
  30635. - btn_high[2] = 730;
  30636. + btn_high[0] = 20;
  30637. + btn_low[1] = 21;
  30638. + btn_high[1] = 61;
  30639. + btn_low[2] = 62;
  30640. + btn_high[2] = 104;
  30641. + btn_low[3] = 105;
  30642. + btn_high[3] = 148;
  30643. + btn_low[4] = 149;
  30644. + btn_high[4] = 189;
  30645. + btn_low[5] = 190;
  30646. + btn_high[5] = 228;
  30647. + btn_low[6] = 229;
  30648. + btn_high[6] = 269;
  30649. + btn_low[7] = 270;
  30650. + btn_high[7] = 500;
  30651. n_ready = wcd9xxx_mbhc_cal_btn_det_mp(btn_cfg, MBHC_BTN_DET_N_READY);
  30652. n_ready[0] = 80;
  30653. n_ready[1] = 68;
  30654. @@ -2188,26 +1782,16 @@ static int msm_snd_hw_params(struct snd_pcm_substream *substream,
  30655. pr_err("%s: failed to get codec chan map\n", __func__);
  30656. goto end;
  30657. }
  30658. -
  30659. /* For tabla_tx1 case */
  30660. -#if defined(CONFIG_SND_SOC_ES705)
  30661. if (codec_dai->id == 1)
  30662. user_set_tx_ch = msm_slim_0_tx_ch;
  30663. -#elif defined(CONFIG_SND_SOC_ES325)
  30664. - if ((codec_dai->id == 1) ||(codec_dai->id == 11)) {
  30665. - user_set_tx_ch = msm_slim_0_tx_ch;
  30666. - }
  30667. -#else
  30668. - if (codec_dai->id == 1)
  30669. - user_set_tx_ch = msm_slim_0_tx_ch;
  30670. -#endif
  30671. -
  30672. - /* For tabla_tx2 case */
  30673. + /* For tabla_tx2 case */
  30674. else if (codec_dai->id == 3)
  30675. user_set_tx_ch = params_channels(params);
  30676. else
  30677. user_set_tx_ch = tx_ch_cnt;
  30678. - pr_info("%s: msm_slim_0_tx_ch(%d)user_set_tx_ch(%d)tx_ch_cnt(%d)\n",
  30679. +
  30680. + pr_debug("%s: msm_slim_0_tx_ch(%d)user_set_tx_ch(%d)tx_ch_cnt(%d)\n",
  30681. __func__, msm_slim_0_tx_ch, user_set_tx_ch, tx_ch_cnt);
  30682.  
  30683. ret = snd_soc_dai_set_channel_map(cpu_dai,
  30684. @@ -2228,124 +1812,6 @@ static void msm8974_snd_shudown(struct snd_pcm_substream *substream)
  30685.  
  30686. }
  30687.  
  30688. -#if defined (CONFIG_SND_SOC_MAX98504)
  30689. -static int msm8974_pri_mi2s_free_gpios(void)
  30690. -{
  30691. - int i;
  30692. - for (i = 0; i < ARRAY_SIZE(pri_mi2s_gpio); i++)
  30693. - gpio_free(pri_mi2s_gpio[i].gpio_no);
  30694. - return 0;
  30695. -}
  30696. -
  30697. -static struct afe_clk_cfg lpass_mi2s_enable = {
  30698. - AFE_API_VERSION_I2S_CONFIG,
  30699. - Q6AFE_LPASS_IBIT_CLK_1_P536_MHZ,
  30700. - Q6AFE_LPASS_OSR_CLK_12_P288_MHZ,
  30701. - Q6AFE_LPASS_CLK_SRC_INTERNAL,
  30702. - Q6AFE_LPASS_CLK_ROOT_DEFAULT,
  30703. - Q6AFE_LPASS_MODE_BOTH_VALID,
  30704. - 0,
  30705. -};
  30706. -static struct afe_clk_cfg lpass_mi2s_disable = {
  30707. - AFE_API_VERSION_I2S_CONFIG,
  30708. - 0,
  30709. - 0,
  30710. - Q6AFE_LPASS_CLK_SRC_INTERNAL,
  30711. - Q6AFE_LPASS_CLK_ROOT_DEFAULT,
  30712. - Q6AFE_LPASS_MODE_BOTH_VALID,
  30713. - 0,
  30714. -};
  30715. -
  30716. -
  30717. -static void msm8974_mi2s_shutdown(struct snd_pcm_substream *substream)
  30718. -{
  30719. -
  30720. -
  30721. - if (atomic_dec_return(&pri_mi2s_clk.mi2s_rsc_ref) == 0) {
  30722. - int ret =0;
  30723. - pr_debug("[MAX98504_DEBUG] %s: free mi2s resources\n", __func__);
  30724. - if(substream->stream==0)
  30725. - ret = afe_set_lpass_clock(AFE_PORT_ID_SECONDARY_MI2S_RX, &lpass_mi2s_disable);
  30726. - else if(substream->stream==1)
  30727. - ret = afe_set_lpass_clock(AFE_PORT_ID_SECONDARY_MI2S_TX, &lpass_mi2s_disable);
  30728. -
  30729. - if (ret < 0) {
  30730. - pr_err("%s: afe_set_lpass_clock failed\n", __func__);
  30731. -
  30732. - }
  30733. - msm8974_pri_mi2s_free_gpios();
  30734. - }
  30735. -}
  30736. -
  30737. -static int msm8974_configure_pri_mi2s_gpio(void)
  30738. -{
  30739. - int rtn;
  30740. - int i;
  30741. - for (i = 0; i < ARRAY_SIZE(pri_mi2s_gpio); i++) {
  30742. -
  30743. - rtn = gpio_request(pri_mi2s_gpio[i].gpio_no,
  30744. - pri_mi2s_gpio[i].gpio_name);
  30745. -
  30746. - pr_debug("%s: gpio = %d, gpio name = %s, rtn = %d\n", __func__,
  30747. - pri_mi2s_gpio[i].gpio_no, pri_mi2s_gpio[i].gpio_name, rtn);
  30748. - if (rtn) {
  30749. - pr_err("%s: Failed to request gpio %d\n",
  30750. - __func__,
  30751. - pri_mi2s_gpio[i].gpio_no);
  30752. - while( i >= 0) {
  30753. - gpio_free(pri_mi2s_gpio[i].gpio_no);
  30754. - i--;
  30755. - }
  30756. - break;
  30757. - }
  30758. - }
  30759. -
  30760. - return rtn;
  30761. -}
  30762. -static int msm8974_mi2s_startup(struct snd_pcm_substream *substream)
  30763. -{
  30764. - int ret = 0;
  30765. - struct snd_soc_pcm_runtime *rtd = substream->private_data;
  30766. - struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
  30767. - struct snd_soc_dai *codec_dai = rtd->codec_dai;
  30768. -
  30769. - pr_debug("%s: dai name %s %p\n", __func__, cpu_dai->name, cpu_dai->dev);
  30770. -
  30771. - if (atomic_inc_return(&pri_mi2s_clk.mi2s_rsc_ref) == 1) {
  30772. - pr_info("%s: acquire mi2s resources\n", __func__);
  30773. - msm8974_configure_pri_mi2s_gpio();
  30774. - if(substream->stream==0)
  30775. - ret = afe_set_lpass_clock(AFE_PORT_ID_SECONDARY_MI2S_RX, &lpass_mi2s_enable);
  30776. - else if(substream->stream==1)
  30777. - ret = afe_set_lpass_clock(AFE_PORT_ID_SECONDARY_MI2S_TX, &lpass_mi2s_enable);
  30778. - if (ret < 0) {
  30779. - pr_err("%s: afe_set_lpass_clock failed\n", __func__);
  30780. - return ret;
  30781. - }
  30782. - ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_CBS_CFS);
  30783. - if (ret < 0)
  30784. - dev_err(cpu_dai->dev, "set format for CPU dai"
  30785. - " failed\n");
  30786. -
  30787. - ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
  30788. - SND_SOC_DAIFMT_CBS_CFS);
  30789. - if (ret < 0)
  30790. - dev_err(codec_dai->dev, "set format for codec dai"
  30791. - " failed\n");
  30792. -
  30793. - ret = 0;
  30794. - }
  30795. - return ret;
  30796. -}
  30797. -
  30798. -
  30799. -
  30800. -static struct snd_soc_ops msm8974_mi2s_be_ops = {
  30801. - .startup = msm8974_mi2s_startup,
  30802. - .shutdown = msm8974_mi2s_shutdown
  30803. -};
  30804. -#endif
  30805. -
  30806. static struct snd_soc_ops msm8974_be_ops = {
  30807. .startup = msm8974_snd_startup,
  30808. .hw_params = msm_snd_hw_params,
  30809. @@ -3042,38 +2508,6 @@ static struct snd_soc_dai_link msm8974_common_dai_links[] = {
  30810. .be_hw_params_fixup = msm_proxy_tx_be_hw_params_fixup,
  30811. .ignore_suspend = 1,
  30812. },
  30813. -#if defined( CONFIG_PCM_ROUTE_VOICE_STUB ) || defined(CONFIG_BT_CALL_FORWARDING)
  30814. - {
  30815. - .name = "Voice Stub",
  30816. - .stream_name = "Voice Stub",
  30817. - .cpu_dai_name = "VOICE_STUB",
  30818. - .platform_name = "msm-pcm-hostless",
  30819. - .dynamic = 1,
  30820. - .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
  30821. - .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
  30822. - .ignore_suspend = 1,
  30823. - .ignore_pmdown_time = 1, /* this dainlink has playback support */
  30824. - .codec_dai_name = "snd-soc-dummy-dai",
  30825. - .codec_name = "snd-soc-dummy",
  30826. - },
  30827. -#endif /* CONFIG_PCM_ROUTE_VOICE_STUB || CONFIG_BT_CALL_FORWARDING */
  30828. -#ifdef CONFIG_JACK_AUDIO
  30829. - {
  30830. - .name = "MSM8974 JACK LowLatency",
  30831. - .stream_name = "MultiMedia10",
  30832. - .cpu_dai_name = "MultiMedia10",
  30833. - .platform_name = "msm-pcm-dsp.1",
  30834. - .dynamic = 1,
  30835. - .codec_dai_name = "snd-soc-dummy-dai",
  30836. - .codec_name = "snd-soc-dummy",
  30837. - .trigger = {SND_SOC_DPCM_TRIGGER_POST,
  30838. - SND_SOC_DPCM_TRIGGER_POST},
  30839. - .ignore_suspend = 1,
  30840. - /* this dainlink has playback support */
  30841. - .ignore_pmdown_time = 1,
  30842. - .be_id = MSM_FRONTEND_DAI_MULTIMEDIA10,
  30843. - },
  30844. -#endif
  30845. /* Primary AUX PCM Backend DAI Links */
  30846. {
  30847. .name = LPASS_BE_AUXPCM_RX,
  30848. @@ -3103,37 +2537,6 @@ static struct snd_soc_dai_link msm8974_common_dai_links[] = {
  30849. .ops = &msm_pri_auxpcm_be_ops,
  30850. .ignore_suspend = 1,
  30851. },
  30852. -#if defined( CONFIG_PCM_ROUTE_VOICE_STUB ) || defined(CONFIG_BT_CALL_FORWARDING)
  30853. - /* Secondary AUX PCM Backend DAI Links */
  30854. - {
  30855. - .name = LPASS_BE_SEC_AUXPCM_RX,
  30856. - .stream_name = "Sec AUX PCM Playback",
  30857. - .cpu_dai_name = "msm-dai-q6-auxpcm.2",
  30858. - .platform_name = "msm-pcm-routing",
  30859. - .codec_name = "msm-stub-codec.1",
  30860. - .codec_dai_name = "msm-stub-rx",
  30861. - .no_pcm = 1,
  30862. - .be_id = MSM_BACKEND_DAI_SEC_AUXPCM_RX,
  30863. - .be_hw_params_fixup = msm_auxpcm2_be_params_fixup,
  30864. - .ops = &msm_sec_auxpcm_be_ops,
  30865. - .ignore_pmdown_time = 1,
  30866. - .ignore_suspend = 1,
  30867. - /* this dainlink has playback support */
  30868. - },
  30869. - {
  30870. - .name = LPASS_BE_SEC_AUXPCM_TX,
  30871. - .stream_name = "Sec AUX PCM Capture",
  30872. - .cpu_dai_name = "msm-dai-q6-auxpcm.2",
  30873. - .platform_name = "msm-pcm-routing",
  30874. - .codec_name = "msm-stub-codec.1",
  30875. - .codec_dai_name = "msm-stub-tx",
  30876. - .no_pcm = 1,
  30877. - .be_id = MSM_BACKEND_DAI_SEC_AUXPCM_TX,
  30878. - .be_hw_params_fixup = msm_auxpcm2_be_params_fixup,
  30879. - .ops = &msm_sec_auxpcm_be_ops,
  30880. - .ignore_suspend = 1,
  30881. - },
  30882. -#else
  30883. /* Secondary AUX PCM Backend DAI Links */
  30884. {
  30885. .name = LPASS_BE_SEC_AUXPCM_RX,
  30886. @@ -3163,7 +2566,7 @@ static struct snd_soc_dai_link msm8974_common_dai_links[] = {
  30887. .ops = &msm_sec_auxpcm_be_ops,
  30888. .ignore_suspend = 1,
  30889. },
  30890. -#endif
  30891. +
  30892. /* Backend DAI Links */
  30893. {
  30894. .name = LPASS_BE_SLIMBUS_0_RX,
  30895. @@ -3316,7 +2719,7 @@ static struct snd_soc_dai_link msm8974_common_dai_links[] = {
  30896. .be_hw_params_fixup = msm_be_hw_params_fixup,
  30897. .ignore_suspend = 1,
  30898. },
  30899. - /* Incall Music 2 BACK END DAI Link */
  30900. + /* Incall Music 2 BACK END DAI Link */
  30901. {
  30902. .name = LPASS_BE_VOICE2_PLAYBACK_TX,
  30903. .stream_name = "Voice2 Farend Playback",
  30904. @@ -3328,21 +2731,7 @@ static struct snd_soc_dai_link msm8974_common_dai_links[] = {
  30905. .be_id = MSM_BACKEND_DAI_VOICE2_PLAYBACK_TX,
  30906. .be_hw_params_fixup = msm_be_hw_params_fixup,
  30907. .ignore_suspend = 1,
  30908. - },
  30909. -#ifdef CONFIG_SND_SOC_MAX98504
  30910. - {
  30911. - .name = LPASS_BE_SEC_MI2S_TX,
  30912. - .stream_name = "Secondary MI2S Capture",
  30913. - .cpu_dai_name = "msm-dai-q6-mi2s.1",
  30914. - .platform_name = "msm-pcm-routing",
  30915. - .codec_name = "max98504.18-0031",//"msm-stub-codec.1",
  30916. - .codec_dai_name = "max98504-aif1",//"msm-stub-tx",
  30917. - .no_pcm = 1,
  30918. - .be_id = MSM_BACKEND_DAI_SECONDARY_MI2S_TX,
  30919. - .be_hw_params_fixup = msm8974_mi2s_be_hw_params_fixup,
  30920. - .ops = &msm8974_mi2s_be_ops,
  30921. - },
  30922. -#endif
  30923. + }
  30924. };
  30925.  
  30926. static struct snd_soc_dai_link msm8974_hdmi_dai_link[] = {
  30927. @@ -3470,101 +2859,6 @@ static int msm8974_prepare_us_euro(struct snd_soc_card *card)
  30928. return 0;
  30929. }
  30930.  
  30931. -static int msm8974_prepare_mainmic(void)
  30932. -{
  30933. - int ret;
  30934. - if (mainmic_bias_gpio) {
  30935. - pr_debug("%s : mainmic bias gpio request %d", __func__,
  30936. - mainmic_bias_gpio);
  30937. - ret = gpio_request(mainmic_bias_gpio, "TAIKO_MAINMIC_BIAS");
  30938. - if (ret) {
  30939. - pr_debug("%s: Failed to request taiko mainmic bias gpio %d error %d\n",
  30940. - __func__, mainmic_bias_gpio, ret);
  30941. - return ret;
  30942. - }
  30943. - gpio_direction_output(mainmic_bias_gpio, 0);
  30944. - }
  30945. -
  30946. - return 0;
  30947. -}
  30948. -
  30949. -
  30950. -#if defined(CONFIG_LDO_SUBMIC_BIAS)
  30951. -static int msm8974_prepare_submic(void)
  30952. -{
  30953. - int ret;
  30954. - if (submic_bias_gpio) {
  30955. - pr_debug("%s : submic bias gpio request %d", __func__,
  30956. - submic_bias_gpio);
  30957. - ret = gpio_request(submic_bias_gpio, "TAIKO_SUBMIC_BIAS");
  30958. - if (ret) {
  30959. - pr_debug("%s: Failed to request taiko submic bias gpio %d error %d\n",
  30960. - __func__, submic_bias_gpio, ret);
  30961. - return ret;
  30962. - }
  30963. - gpio_direction_output(submic_bias_gpio, 0);
  30964. - }
  30965. -
  30966. - return 0;
  30967. -}
  30968. -#endif
  30969. -
  30970. -#if defined(CONFIG_LDO_EARMIC_BIAS)
  30971. -static int msm8974_prepare_earmic(void)
  30972. -{
  30973. - int ret;
  30974. - if (earmic_bias_gpio) {
  30975. - pr_debug("%s : earmic bias gpio request %d", __func__,
  30976. - earmic_bias_gpio);
  30977. - ret = gpio_request(earmic_bias_gpio, "TAIKO_EARMIC_BIAS");
  30978. - if (ret) {
  30979. - pr_debug("%s: Failed to request taiko earmic bias gpio %d error %d\n",
  30980. - __func__, earmic_bias_gpio, ret);
  30981. - return ret;
  30982. - }
  30983. - gpio_direction_output(earmic_bias_gpio, 0);
  30984. - }
  30985. -
  30986. - return 0;
  30987. -}
  30988. -#endif
  30989. -
  30990. -static int msm8974_prepare_spkamp(void)
  30991. -{
  30992. - int ret;
  30993. - if (spkamp_en_gpio) {
  30994. - pr_debug("%s : spkamp en gpio request %d", __func__,
  30995. - spkamp_en_gpio);
  30996. - ret = gpio_request(spkamp_en_gpio, "SPKAMP_EN");
  30997. - if (ret) {
  30998. - pr_debug("%s: Failed to request spkamp en gpio %d error %d\n",
  30999. - __func__, spkamp_en_gpio, ret);
  31000. - return ret;
  31001. - }
  31002. - gpio_direction_output(spkamp_en_gpio, 0);
  31003. - }
  31004. -
  31005. - return 0;
  31006. -}
  31007. -
  31008. -static int msm8974_prepare_micbias_to_codec(void)
  31009. -{
  31010. - int ret;
  31011. - if (micbias_en_msm_gpio) {
  31012. - pr_debug("%s : micbias en msm gpio request %d", __func__,
  31013. - micbias_en_msm_gpio);
  31014. - ret = gpio_request(micbias_en_msm_gpio, "MICBIAS_TO_CODEC");
  31015. - if (ret) {
  31016. - pr_debug("%s: Failed to request taiko micbias en msm gpio %d error %d\n",
  31017. - __func__, micbias_en_msm_gpio, ret);
  31018. - return ret;
  31019. - }
  31020. - gpio_direction_output(micbias_en_msm_gpio, 0);
  31021. - }
  31022. -
  31023. - return 0;
  31024. -}
  31025. -
  31026. static __devinit int msm8974_asoc_machine_probe(struct platform_device *pdev)
  31027. {
  31028. struct snd_soc_card *card = &snd_soc_card_msm8974;
  31029. @@ -3576,9 +2870,6 @@ static __devinit int msm8974_asoc_machine_probe(struct platform_device *pdev)
  31030. size_t n = strlen("4-pole-jack");
  31031. struct resource *pri_muxsel;
  31032. struct resource *sec_muxsel;
  31033. -#if defined(CONFIG_SEC_JACTIVE_PROJECT)
  31034. - extern unsigned int system_rev;
  31035. -#endif
  31036.  
  31037. if (!pdev->dev.of_node) {
  31038. dev_err(&pdev->dev, "No platform supplied from device tree\n");
  31039. @@ -3751,151 +3042,6 @@ static __devinit int msm8974_asoc_machine_probe(struct platform_device *pdev)
  31040. dev_err(&pdev->dev, "msm8974_prepare_us_euro failed (%d)\n",
  31041. ret);
  31042.  
  31043. -#ifdef CONFIG_SEC_JACTIVE_PROJECT
  31044. - if(system_rev < 3) {
  31045. - ear_jack_fsa8038_en = of_get_named_gpio(pdev->dev.of_node, "qcom,fsa8038_enable", 0);
  31046. -
  31047. - if (ear_jack_fsa8038_en < 0) {
  31048. - dev_err(&pdev->dev, "Looking up %s property in node %s failed",
  31049. - "qcom,fsa8038_enable",
  31050. - pdev->dev.of_node->full_name);
  31051. - } else {
  31052. - ret = gpio_request(ear_jack_fsa8038_en, "fsa8038 enable");
  31053. -
  31054. - if (ret) {
  31055. - dev_err(&pdev->dev, "Looking up %s property in node %s failed",
  31056. - "qcom,fsa8038_enable",
  31057. - pdev->dev.of_node->full_name);
  31058. - gpio_free(ear_jack_fsa8038_en);
  31059. - } else {
  31060. - gpio_direction_output(ear_jack_fsa8038_en, 1);
  31061. - }
  31062. - }
  31063. - }
  31064. -#endif
  31065. -
  31066. - /* the ldo of main mic bias */
  31067. - mainmic_bias_gpio = of_get_named_gpio(pdev->dev.of_node,
  31068. - "qcom,mainmic-bias-gpio", 0);
  31069. - if (mainmic_bias_gpio < 0) {
  31070. - dev_err(&pdev->dev, "Looking up %s property in node %s failed",
  31071. - "qcom,mainmic-bias-gpio",
  31072. - pdev->dev.of_node->full_name);
  31073. - } else {
  31074. - pr_info("%s : mic bias = %d\n", __func__, mainmic_bias_gpio);
  31075. -
  31076. - ret = msm8974_prepare_mainmic();
  31077. - if (ret) {
  31078. - dev_err(&pdev->dev, "msm8974_prepare_mainmic failed (%d)\n",
  31079. - ret);
  31080. - gpio_free(mainmic_bias_gpio);
  31081. - mainmic_bias_gpio = 0;
  31082. - }
  31083. - }
  31084. -
  31085. -#if defined(CONFIG_LDO_SUBMIC_BIAS)
  31086. - /* the ldo of sub mic bias */
  31087. - submic_bias_gpio = of_get_named_gpio(pdev->dev.of_node,
  31088. - "qcom,submic-bias-gpio", 0);
  31089. - pr_info("%s :sub mic bias = %d\n", __func__, submic_bias_gpio);
  31090. -
  31091. - ret = msm8974_prepare_submic();
  31092. - if (ret) {
  31093. - dev_err(&pdev->dev, "msm8974_prepare_submic failed (%d)\n",
  31094. - ret);
  31095. - gpio_free(submic_bias_gpio);
  31096. - submic_bias_gpio = 0;
  31097. - }
  31098. -#endif
  31099. -
  31100. -#if defined(CONFIG_LDO_EARMIC_BIAS)
  31101. - /* the ldo of ear mic bias */
  31102. - earmic_bias_gpio = of_get_named_gpio(pdev->dev.of_node,
  31103. - "qcom,earmic-bias-gpio", 0);
  31104. - pr_info("%s :ear mic bias = %d\n", __func__, earmic_bias_gpio);
  31105. -
  31106. - ret = msm8974_prepare_earmic();
  31107. - if (ret) {
  31108. - dev_err(&pdev->dev, "msm8974_prepare_earmic failed (%d)\n",
  31109. - ret);
  31110. - gpio_free(earmic_bias_gpio);
  31111. - earmic_bias_gpio = 0;
  31112. - }
  31113. -#endif
  31114. -
  31115. -
  31116. -#if defined(CONFIG_MACH_KLTE_KOR) || defined(CONFIG_MACH_KLTE_JPN) || defined(CONFIG_MACH_KACTIVELTE_DCM) || defined(CONFIG_MACH_CHAGALL_KDI) || defined(CONFIG_MACH_KLIMT_LTE_DCM)
  31117. - /* enable FSA8039 for jack detection */
  31118. - pr_info("%s: Check to enable FSA8039\n", __func__);
  31119. - fsa_en_gpio = of_get_named_gpio(pdev->dev.of_node,
  31120. - "qcom,earjack-fsa_en-gpio", 0);
  31121. - if (fsa_en_gpio < 0)
  31122. - of_property_read_u32(pdev->dev.of_node,
  31123. - "qcom,earjack-fsa_en-expander-gpio", &fsa_en_gpio);
  31124. - if (fsa_en_gpio < 0)
  31125. - pr_info("%s: No support FSA8039 chip\n", __func__);
  31126. - else
  31127. - pr_info("%s: earjack-fsa_en-gpio =%d\n",
  31128. - __func__, fsa_en_gpio);
  31129. -
  31130. - if (fsa_en_gpio > 0) {
  31131. - ret = gpio_request(fsa_en_gpio, "fsa_en");
  31132. - if (ret) {
  31133. - pr_err("%s : gpio_request failed for %d, ret %d\n",
  31134. - __func__, fsa_en_gpio, ret);
  31135. - goto err;
  31136. - }
  31137. - gpio_direction_output(fsa_en_gpio, 1);
  31138. - }
  31139. -#endif
  31140. - /* the switch to connect the main mic to the codec or es705 */
  31141. -#if defined(CONFIG_MACH_KLTE_JPN) || defined(CONFIG_MACH_KACTIVELTE_DCM) || defined(CONFIG_MACH_CHAGALL_KDI) || defined(CONFIG_MACH_KLIMT_LTE_DCM)
  31142. -#if defined(CONFIG_MACH_KLTE_MAX77828_JPN)
  31143. - micbias_en_msm_gpio = of_get_named_gpio(pdev->dev.of_node,
  31144. - "qcom,micbias-en-msm-gpio", 0);
  31145. -#else
  31146. - micbias_en_msm_gpio = of_get_named_gpio(pdev->dev.of_node,
  31147. - "qcom,micbias-en-msm-jpn-gpio", 0);
  31148. -#endif
  31149. -#else
  31150. - micbias_en_msm_gpio = of_get_named_gpio(pdev->dev.of_node,
  31151. - "qcom,micbias-en-msm-gpio", 0);
  31152. -#endif
  31153. - if (micbias_en_msm_gpio < 0) {
  31154. - dev_err(&pdev->dev, "Looking up %s property in node %s failed",
  31155. - "qcom,micbias-en-msm-gpio",
  31156. - pdev->dev.of_node->full_name);
  31157. - } else {
  31158. - pr_info("%s : micbias en msm = %d\n", __func__, micbias_en_msm_gpio);
  31159. -
  31160. - ret = msm8974_prepare_micbias_to_codec();
  31161. - if (ret) {
  31162. - dev_err(&pdev->dev, "msm8974_prepare_micbias_to_codec failed (%d)\n",
  31163. - ret);
  31164. - gpio_free(micbias_en_msm_gpio);
  31165. - micbias_en_msm_gpio = 0;
  31166. - }
  31167. - }
  31168. -
  31169. - spkamp_en_gpio = of_get_named_gpio(pdev->dev.of_node,
  31170. - "qcom,spkamp-en-gpio", 0);
  31171. - if (spkamp_en_gpio < 0) {
  31172. - dev_err(&pdev->dev, "Looking up %s property in node %s failed",
  31173. - "qcom,spkamp-en-gpio",
  31174. - pdev->dev.of_node->full_name);
  31175. - } else {
  31176. - pr_info("%s : spkamp_en_ gpio = %d\n", __func__, spkamp_en_gpio);
  31177. -
  31178. - ret = msm8974_prepare_spkamp();
  31179. - if (ret) {
  31180. - dev_err(&pdev->dev, "msm8974_prepare_spkamp_en_ gpio failed (%d)\n",
  31181. - ret);
  31182. - gpio_free(spkamp_en_gpio);
  31183. - spkamp_en_gpio = 0;
  31184. - }
  31185. - }
  31186. -
  31187. -
  31188. ret = of_property_read_string(pdev->dev.of_node,
  31189. "qcom,prim-auxpcm-gpio-set", &auxpcm_pri_gpio_set);
  31190. if (ret) {
  31191. @@ -3929,14 +3075,9 @@ static __devinit int msm8974_asoc_machine_probe(struct platform_device *pdev)
  31192. goto err1;
  31193. }
  31194. }
  31195. -#if defined( CONFIG_PCM_ROUTE_VOICE_STUB ) || defined(CONFIG_BT_CALL_FORWARDING)
  31196. - sec_muxsel = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  31197. - "lpaif_quat_mode_muxsel");
  31198. -#else
  31199. +
  31200. sec_muxsel = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  31201. "lpaif_sec_mode_muxsel");
  31202. -#endif /* CONFIG_PCM_ROUTE_VOICE_STUB || CONFIG_BT_CALL_FORWARDING*/
  31203. -
  31204. if (!sec_muxsel) {
  31205. dev_err(&pdev->dev, "MUX addr invalid for secondary AUXPCM\n");
  31206. ret = -ENODEV;
  31207. @@ -3949,10 +3090,6 @@ static __devinit int msm8974_asoc_machine_probe(struct platform_device *pdev)
  31208. ret = -EINVAL;
  31209. goto err2;
  31210. }
  31211. -#if defined (CONFIG_SND_SOC_MAX98504)
  31212. - atomic_set(&pri_mi2s_clk.mi2s_rsc_ref, 0);
  31213. -#endif
  31214. -
  31215. return 0;
  31216.  
  31217. err2:
  31218. diff --git a/sound/soc/msm/msm8x10.c b/sound/soc/msm/msm8x10.c
  31219. index ad9b9db..a74f69f 100644
  31220. --- a/sound/soc/msm/msm8x10.c
  31221. +++ b/sound/soc/msm/msm8x10.c
  31222. @@ -385,7 +385,7 @@ static int msm_tx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
  31223. }
  31224.  
  31225.  
  31226. -static const char *const btsco_rate_text[] = {"8000", "16000"};
  31227. +static const char *const btsco_rate_text[] = {"BTSCO_RATE_8KHZ", "BTSCO_RATE_16KHZ"};
  31228. static const struct soc_enum msm_btsco_enum[] = {
  31229. SOC_ENUM_SINGLE_EXT(2, btsco_rate_text),
  31230. };
  31231. @@ -405,10 +405,10 @@ static int msm_btsco_rate_put(struct snd_kcontrol *kcontrol,
  31232. struct snd_ctl_elem_value *ucontrol)
  31233. {
  31234. switch (ucontrol->value.integer.value[0]) {
  31235. - case 8000:
  31236. + case 0:
  31237. msm_btsco_rate = BTSCO_RATE_8KHZ;
  31238. break;
  31239. - case 16000:
  31240. + case 1:
  31241. msm_btsco_rate = BTSCO_RATE_16KHZ;
  31242. break;
  31243. default:
  31244. diff --git a/sound/soc/msm/qdsp6/q6asm.c b/sound/soc/msm/qdsp6/q6asm.c
  31245. index 659d5a2..8ecb147 100644
  31246. --- a/sound/soc/msm/qdsp6/q6asm.c
  31247. +++ b/sound/soc/msm/qdsp6/q6asm.c
  31248. @@ -57,6 +57,7 @@
  31249. #define OUT_BUFFER_SIZE 56
  31250. #define IN_BUFFER_SIZE 24
  31251. #endif
  31252. +#define FRAME_NUM (8)
  31253. static DEFINE_MUTEX(session_lock);
  31254.  
  31255. /* session id: 0 reserved */
  31256. @@ -509,6 +510,9 @@ int q6asm_audio_client_buf_alloc(unsigned int dir,
  31257. pr_debug("%s: buffer already allocated\n", __func__);
  31258. return 0;
  31259. }
  31260. +
  31261. + if (bufcnt != FRAME_NUM)
  31262. + goto fail;
  31263. mutex_lock(&ac->cmd_lock);
  31264. buf = kzalloc(((sizeof(struct audio_buffer))*bufcnt),
  31265. GFP_KERNEL);
  31266. diff --git a/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c
  31267. index 5e4d9d3..2fc4949 100644
  31268. --- a/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c
  31269. +++ b/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c
  31270. @@ -1,4 +1,4 @@
  31271. -/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
  31272. +/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
  31273. *
  31274. * This program is free software; you can redistribute it and/or modify
  31275. * it under the terms of the GNU General Public License version 2 and
  31276. @@ -621,6 +621,11 @@ int msm_audio_effects_popless_eq_handler(struct audio_client *ac,
  31277. }
  31278. for (j = 0; j < eq->config.num_bands; j++) {
  31279. idx = *values++;
  31280. + if (idx >= MAX_EQ_BANDS) {
  31281. + pr_err("EQ_CONFIG:invalid band index\n");
  31282. + rc = -EINVAL;
  31283. + goto invalid_config;
  31284. + }
  31285. eq->per_band_cfg[idx].band_idx = idx;
  31286. eq->per_band_cfg[idx].filter_type = *values++;
  31287. eq->per_band_cfg[idx].freq_millihertz =
  31288. diff --git a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
  31289. index fadebb4..74346da 100644
  31290. --- a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
  31291. +++ b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
  31292. @@ -1,4 +1,4 @@
  31293. -/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  31294. +/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
  31295. *
  31296. * This program is free software; you can redistribute it and/or modify
  31297. * it under the terms of the GNU General Public License version 2 and
  31298. @@ -304,6 +304,7 @@ static void compr_event_handler(uint32_t opcode,
  31299. uint32_t sample_rate = 0;
  31300. int bytes_available, stream_id;
  31301. uint32_t stream_index;
  31302. + unsigned long flags;
  31303.  
  31304. pr_debug("%s opcode =%08x\n", __func__, opcode);
  31305. switch (opcode) {
  31306. @@ -476,11 +477,17 @@ static void compr_event_handler(uint32_t opcode,
  31307. case RESET_EVENTS:
  31308. pr_err("%s: Received reset events CB, move to error state",
  31309. __func__);
  31310. - spin_lock(&prtd->lock);
  31311. + spin_lock_irqsave(&prtd->lock, flags);
  31312. snd_compr_fragment_elapsed(cstream);
  31313. prtd->copied_total = prtd->bytes_received;
  31314. atomic_set(&prtd->error, 1);
  31315. - spin_unlock(&prtd->lock);
  31316. + wake_up(&prtd->drain_wait);
  31317. + if (atomic_read(&prtd->eos)) {
  31318. + pr_debug("%s:unblock eos wait queues", __func__);
  31319. + wake_up(&prtd->eos_wait);
  31320. + atomic_set(&prtd->eos, 0);
  31321. + }
  31322. + spin_unlock_irqrestore(&prtd->lock, flags);
  31323. break;
  31324. default:
  31325. pr_debug("%s: Not Supported Event opcode[0x%x]\n",
  31326. @@ -624,6 +631,11 @@ static int msm_compr_configure_dsp(struct snd_compr_stream *cstream)
  31327. pr_err("%s: Send SoftVolume Param failed ret=%d\n",
  31328. __func__, ret);
  31329.  
  31330. + ret = q6asm_set_softpause(ac, &softpause);
  31331. + if (ret < 0)
  31332. + pr_err("%s: Send SoftPause Param failed ret=%d\n",
  31333. + __func__, ret);
  31334. +
  31335. ret = q6asm_set_io_mode(ac, (COMPRESSED_STREAM_IO | ASYNC_IO_MODE));
  31336. if (ret < 0) {
  31337. pr_err("%s: Set IO mode failed\n", __func__);
  31338. @@ -941,14 +953,19 @@ static int msm_compr_drain_buffer(struct msm_compr_audio *prtd,
  31339. rc = wait_event_interruptible(prtd->drain_wait,
  31340. prtd->drain_ready ||
  31341. prtd->cmd_interrupt ||
  31342. - atomic_read(&prtd->xrun));
  31343. - pr_debug("%s: out of buffer drain wait\n", __func__);
  31344. + atomic_read(&prtd->xrun) ||
  31345. + atomic_read(&prtd->error));
  31346. + pr_debug("%s: out of buffer drain wait with ret %d\n", __func__, rc);
  31347. spin_lock_irqsave(&prtd->lock, *flags);
  31348. if (prtd->cmd_interrupt) {
  31349. pr_debug("%s: buffer drain interrupted by flush)\n", __func__);
  31350. rc = -EINTR;
  31351. prtd->cmd_interrupt = 0;
  31352. }
  31353. + if (atomic_read(&prtd->error)) {
  31354. + pr_err("%s: Got RESET EVENTS notification, return\n", __func__);
  31355. + rc = -ENETRESET;
  31356. + }
  31357. return rc;
  31358. }
  31359.  
  31360. @@ -1253,7 +1270,9 @@ static int msm_compr_trigger(struct snd_compr_stream *cstream, int cmd)
  31361.  
  31362. /* Wait indefinitely for DRAIN. Flush can also signal this*/
  31363. rc = wait_event_interruptible(prtd->eos_wait,
  31364. - (prtd->cmd_ack || prtd->cmd_interrupt));
  31365. + (prtd->cmd_ack ||
  31366. + prtd->cmd_interrupt ||
  31367. + atomic_read(&prtd->error)));
  31368.  
  31369. if (rc < 0)
  31370. pr_err("%s: EOS wait failed\n", __func__);
  31371. @@ -1264,6 +1283,11 @@ static int msm_compr_trigger(struct snd_compr_stream *cstream, int cmd)
  31372. if (prtd->cmd_interrupt)
  31373. rc = -EINTR;
  31374.  
  31375. + if (atomic_read(&prtd->error)) {
  31376. + pr_err("%s: Got RESET EVENTS notification, return\n", __func__);
  31377. + rc = -ENETRESET;
  31378. + }
  31379. +
  31380. /*FIXME : what if a flush comes while PC is here */
  31381. if (rc == 0) {
  31382. /*
  31383. diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c
  31384. index 30506ef..e7b908d 100644
  31385. --- a/sound/soc/msm/qdsp6v2/q6adm.c
  31386. +++ b/sound/soc/msm/qdsp6v2/q6adm.c
  31387. @@ -439,7 +439,7 @@ int adm_get_params(int port_id, uint32_t module_id, uint32_t param_id,
  31388. }
  31389. if ((params_data) && (ARRAY_SIZE(adm_get_parameters) >=
  31390. (1+adm_get_parameters[0])) &&
  31391. - (params_length/sizeof(int) >=
  31392. + (params_length/sizeof(uint32_t) >=
  31393. adm_get_parameters[0])) {
  31394. for (i = 0; i < adm_get_parameters[0]; i++)
  31395. params_data[i] = adm_get_parameters[1+i];
  31396. @@ -650,16 +650,23 @@ static int32_t adm_callback(struct apr_client_data *data, void *priv)
  31397. /* is big enough and has a valid param size */
  31398. if ((payload[0] == 0) && (data->payload_size >
  31399. (4 * sizeof(*payload))) &&
  31400. - (data->payload_size/sizeof(*payload)-4 >=
  31401. + (data->payload_size - 4 >=
  31402. payload[3]) &&
  31403. (ARRAY_SIZE(adm_get_parameters)-1 >=
  31404. payload[3])) {
  31405. - adm_get_parameters[0] = payload[3];
  31406. + adm_get_parameters[0] = payload[3] /
  31407. + sizeof(uint32_t);
  31408. + /*
  31409. + * payload[3] is param_size which is
  31410. + * expressed in number of bytes
  31411. + */
  31412. pr_debug("%s: GET_PP PARAM:received parameter length: 0x%x\n",
  31413. __func__, adm_get_parameters[0]);
  31414. /* storing param size then params */
  31415. - for (i = 0; i < payload[3]; i++)
  31416. - adm_get_parameters[1+i] = payload[4+i];
  31417. + for (i = 0; i < payload[3] /
  31418. + sizeof(uint32_t); i++)
  31419. + adm_get_parameters[1+i] =
  31420. + payload[4+i];
  31421. } else {
  31422. adm_get_parameters[0] = -1;
  31423. pr_err("%s: GET_PP_PARAMS failed, setting size to %d\n",
  31424. diff --git a/sound/soc/msm/qdsp6v2/q6afe.c b/sound/soc/msm/qdsp6v2/q6afe.c
  31425. index 57aee34..bfe64db 100644
  31426. --- a/sound/soc/msm/qdsp6v2/q6afe.c
  31427. +++ b/sound/soc/msm/qdsp6v2/q6afe.c
  31428. @@ -64,6 +64,7 @@ struct afe_ctl {
  31429. struct afe_spkr_prot_calib_get_resp calib_data;
  31430. #endif
  31431. int vi_tx_port;
  31432. + int vi_rx_port;
  31433. uint32_t afe_sample_rates[AFE_MAX_PORTS];
  31434. struct aanc_data aanc_info;
  31435. };
  31436. @@ -513,7 +514,7 @@ int afe_unmap_cal_blocks(void)
  31437. return result;
  31438. }
  31439.  
  31440. -static int afe_spk_prot_prepare(int port, int param_id,
  31441. +static int afe_spk_prot_prepare(int src_port, int dst_port, int param_id,
  31442. union afe_spkr_prot_config *prot_config)
  31443. {
  31444. int ret = -EINVAL;
  31445. @@ -525,17 +526,28 @@ static int afe_spk_prot_prepare(int port, int param_id,
  31446. pr_err("%s Invalid params\n", __func__);
  31447. goto fail_cmd;
  31448. }
  31449. - if ((q6audio_validate_port(port) < 0)) {
  31450. - pr_err("%s invalid port %d", __func__, port);
  31451. + ret = q6audio_validate_port(src_port);
  31452. + if (ret < 0) {
  31453. + pr_err("%s: Invalid src port 0x%x ret %d",
  31454. + __func__, src_port, ret);
  31455. + ret = -EINVAL;
  31456. goto fail_cmd;
  31457. }
  31458. - index = q6audio_get_port_index(port);
  31459. + ret = q6audio_validate_port(dst_port);
  31460. + if (ret < 0) {
  31461. + pr_err("%s: Invalid dst port 0x%x ret %d", __func__,
  31462. + dst_port, ret);
  31463. + ret = -EINVAL;
  31464. + goto fail_cmd;
  31465. + }
  31466. + index = q6audio_get_port_index(src_port);
  31467. switch (param_id) {
  31468. case AFE_PARAM_ID_FBSP_MODE_RX_CFG:
  31469. config.pdata.module_id = AFE_MODULE_FB_SPKR_PROT_RX;
  31470. break;
  31471. case AFE_PARAM_ID_FEEDBACK_PATH_CFG:
  31472. - this_afe.vi_tx_port = port;
  31473. + this_afe.vi_tx_port = src_port;
  31474. + this_afe.vi_rx_port = dst_port;
  31475. case AFE_PARAM_ID_SPKR_CALIB_VI_PROC_CFG:
  31476. case AFE_PARAM_ID_MODE_VI_PROC_CFG:
  31477. config.pdata.module_id = AFE_MODULE_FB_SPKR_PROT_VI_PROC;
  31478. @@ -553,7 +565,7 @@ static int afe_spk_prot_prepare(int port, int param_id,
  31479. config.hdr.token = index;
  31480.  
  31481. config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
  31482. - config.param.port_id = q6audio_get_port_id(port);
  31483. + config.param.port_id = q6audio_get_port_id(src_port);
  31484. config.param.payload_size = sizeof(config) - sizeof(config.hdr)
  31485. - sizeof(config.param);
  31486. config.pdata.param_id = param_id;
  31487. @@ -562,8 +574,8 @@ static int afe_spk_prot_prepare(int port, int param_id,
  31488. atomic_set(&this_afe.state, 1);
  31489. ret = apr_send_pkt(this_afe.apr, (uint32_t *) &config);
  31490. if (ret < 0) {
  31491. - pr_err("%s: Setting param for port %d param[0x%x]failed\n",
  31492. - __func__, port, param_id);
  31493. + pr_err("%s: port = 0x%x param = 0x%x failed %d\n",
  31494. + __func__, src_port, param_id, ret);
  31495. goto fail_cmd;
  31496. }
  31497. ret = wait_event_timeout(this_afe.wait[index],
  31498. @@ -581,8 +593,8 @@ static int afe_spk_prot_prepare(int port, int param_id,
  31499. }
  31500. ret = 0;
  31501. fail_cmd:
  31502. - pr_debug("%s config.pdata.param_id %x status %d\n",
  31503. - __func__, config.pdata.param_id, ret);
  31504. + pr_debug("%s: config.pdata.param_id 0x%x status %d 0x%x\n",
  31505. + __func__, config.pdata.param_id, ret, src_port);
  31506. return ret;
  31507. }
  31508.  
  31509. @@ -683,7 +695,7 @@ static void afe_send_cal_spkr_prot_tx(int port_id)
  31510. else
  31511. afe_spk_config.mode_rx_cfg.mode =
  31512. Q6AFE_MSM_SPKR_PROCESSING;
  31513. - if (afe_spk_prot_prepare(port_id,
  31514. + if (afe_spk_prot_prepare(port_id, 0,
  31515. AFE_PARAM_ID_MODE_VI_PROC_CFG,
  31516. &afe_spk_config))
  31517. pr_err("%s TX VI_PROC_CFG failed\n", __func__);
  31518. @@ -693,7 +705,7 @@ static void afe_send_cal_spkr_prot_tx(int port_id)
  31519. (uint32_t) prot_cfg.r0;
  31520. afe_spk_config.vi_proc_cfg.t0_cali_q6 =
  31521. (uint32_t) prot_cfg.t0;
  31522. - if (afe_spk_prot_prepare(port_id,
  31523. + if (afe_spk_prot_prepare(port_id, 0,
  31524. AFE_PARAM_ID_SPKR_CALIB_VI_PROC_CFG,
  31525. &afe_spk_config))
  31526. pr_err("%s SPKR_CALIB_VI_PROC_CFG failed\n",
  31527. @@ -710,7 +722,8 @@ static void afe_send_cal_spkr_prot_rx(int port_id)
  31528. /*Get spkr protection cfg data*/
  31529. get_spk_protection_cfg(&prot_cfg);
  31530.  
  31531. - if (prot_cfg.mode != MSM_SPKR_PROT_DISABLED) {
  31532. + if ((prot_cfg.mode != MSM_SPKR_PROT_DISABLED) &&
  31533. + (this_afe.vi_rx_port == port_id)) {
  31534. if (prot_cfg.mode == MSM_SPKR_PROT_CALIBRATION_IN_PROGRESS)
  31535. afe_spk_config.mode_rx_cfg.mode =
  31536. Q6AFE_MSM_SPKR_CALIBRATION;
  31537. @@ -718,7 +731,7 @@ static void afe_send_cal_spkr_prot_rx(int port_id)
  31538. afe_spk_config.mode_rx_cfg.mode =
  31539. Q6AFE_MSM_SPKR_PROCESSING;
  31540. afe_spk_config.mode_rx_cfg.minor_version = 1;
  31541. - if (afe_spk_prot_prepare(port_id,
  31542. + if (afe_spk_prot_prepare(port_id, 0,
  31543. AFE_PARAM_ID_FBSP_MODE_RX_CFG,
  31544. &afe_spk_config))
  31545. pr_err("%s RX MODE_VI_PROC_CFG failed\n",
  31546. @@ -794,7 +807,7 @@ fail_cmd:
  31547.  
  31548. void afe_send_cal(u16 port_id)
  31549. {
  31550. - pr_debug("%s\n", __func__);
  31551. + pr_debug("%s: port_id=0x%x\n", __func__, port_id);
  31552.  
  31553. if (afe_get_port_type(port_id) == MSM_AFE_PORT_TYPE_TX) {
  31554. afe_send_cal_spkr_prot_tx(port_id);
  31555. @@ -3377,6 +3390,7 @@ int afe_spk_prot_feed_back_cfg(int src_port, int dst_port,
  31556. if (!enable) {
  31557. pr_debug("%s Disable Feedback tx path", __func__);
  31558. this_afe.vi_tx_port = -1;
  31559. + this_afe.vi_rx_port = -1;
  31560. return 0;
  31561. }
  31562.  
  31563. @@ -3405,7 +3419,7 @@ int afe_spk_prot_feed_back_cfg(int src_port, int dst_port,
  31564. }
  31565. prot_config.feedback_path_cfg.num_channels = index;
  31566. prot_config.feedback_path_cfg.minor_version = 1;
  31567. - ret = afe_spk_prot_prepare(src_port,
  31568. + ret = afe_spk_prot_prepare(src_port, dst_port,
  31569. AFE_PARAM_ID_FEEDBACK_PATH_CFG, &prot_config);
  31570. fail_cmd:
  31571. return ret;
  31572. @@ -3607,6 +3621,7 @@ static int __init afe_init(void)
  31573. this_afe.dtmf_gen_rx_portid = -1;
  31574. this_afe.mmap_handle = 0;
  31575. this_afe.vi_tx_port = -1;
  31576. + this_afe.vi_rx_port = -1;
  31577. for (i = 0; i < AFE_MAX_PORTS; i++)
  31578. init_waitqueue_head(&this_afe.wait[i]);
  31579.  
  31580. diff --git a/sound/soc/msm/qdsp6v2/q6core.h b/sound/soc/msm/qdsp6v2/q6core.h
  31581. deleted file mode 100644
  31582. index e5a59bc..0000000
  31583. --- a/sound/soc/msm/qdsp6v2/q6core.h
  31584. +++ /dev/null
  31585. @@ -1,106 +0,0 @@
  31586. -/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  31587. - *
  31588. - * This program is free software; you can redistribute it and/or modify
  31589. - * it under the terms of the GNU General Public License version 2 and
  31590. - * only version 2 as published by the Free Software Foundation.
  31591. - *
  31592. - * This program is distributed in the hope that it will be useful,
  31593. - * but WITHOUT ANY WARRANTY; without even the implied warranty of
  31594. - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  31595. - * GNU General Public License for more details.
  31596. - */
  31597. -
  31598. -#ifndef __Q6CORE_H__
  31599. -#define __Q6CORE_H__
  31600. -#include <mach/qdsp6v2/apr.h>
  31601. -#include <mach/ocmem.h>
  31602. -
  31603. -
  31604. -#define AVCS_CMD_GET_LOW_POWER_SEGMENTS_INFO 0x00012903
  31605. -
  31606. -struct avcs_cmd_get_low_power_segments_info {
  31607. - struct apr_hdr hdr;
  31608. -} __packed;
  31609. -
  31610. -
  31611. -#define AVCS_CMDRSP_GET_LOW_POWER_SEGMENTS_INFO 0x00012904
  31612. -
  31613. -#define AVCS_CMD_ADSP_EVENT_GET_STATE 0x0001290C
  31614. -#define AVCS_CMDRSP_ADSP_EVENT_GET_STATE 0x0001290D
  31615. -
  31616. -/* @brief AVCS_CMDRSP_GET_LOW_POWER_SEGMENTS_INFO payload
  31617. - * structure. Payload for this event comprises one instance of
  31618. - * avcs_cmd_rsp_get_low_power_segments_info_t, followed
  31619. - * immediately by num_segments number of instances of the
  31620. - * avcs_mem_segment_t structure.
  31621. - */
  31622. -
  31623. -/* Types of Low Power Memory Segments. */
  31624. -#define READ_ONLY_SEGMENT 1
  31625. -/*< Read Only memory segment. */
  31626. -#define READ_WRITE_SEGMENT 2
  31627. -/*< Read Write memory segment. */
  31628. -/*Category indicates whether audio/os/sensor segments. */
  31629. -#define AUDIO_SEGMENT 1
  31630. -/*< Audio memory segment. */
  31631. -#define OS_SEGMENT 2
  31632. -/*< QDSP6's OS memory segment. */
  31633. -
  31634. -/* @brief Payload structure for AVS low power memory segment
  31635. - * structure.
  31636. - */
  31637. -struct avcs_mem_segment_t {
  31638. - uint16_t type;
  31639. -/*< Indicates which type of memory this segment is.
  31640. - *Allowed values: READ_ONLY_SEGMENT or READ_WRITE_SEGMENT only.
  31641. - */
  31642. - uint16_t category;
  31643. -/*< Indicates whether audio or OS segments.
  31644. - *Allowed values: AUDIO_SEGMENT or OS_SEGMENT only.
  31645. - */
  31646. - uint32_t size;
  31647. -/*< Size (in bytes) of this segment.
  31648. - * Will be a non-zero value.
  31649. - */
  31650. - uint32_t start_address_lsw;
  31651. -/*< Lower 32 bits of the 64-bit physical start address
  31652. - * of this segment.
  31653. - */
  31654. - uint32_t start_address_msw;
  31655. -/*< Upper 32 bits of the 64-bit physical start address
  31656. - * of this segment.
  31657. - */
  31658. -};
  31659. -
  31660. -struct avcs_cmd_rsp_get_low_power_segments_info_t {
  31661. - uint32_t num_segments;
  31662. -/*< Number of segments in this response.
  31663. - * 0: there are no known sections that should be mapped
  31664. - * from DDR to OCMEM.
  31665. - * >0: the number of memory segments in the following list.
  31666. - */
  31667. -
  31668. - uint32_t bandwidth;
  31669. -/*< Required OCMEM read/write bandwidth (in bytes per second)
  31670. - * if OCMEM is granted.
  31671. - * 0 if num_segments = 0
  31672. - * >0 if num_segments > 0.
  31673. - */
  31674. - struct avcs_mem_segment_t mem_segment[OCMEM_MAX_CHUNKS];
  31675. -};
  31676. -
  31677. -
  31678. -int core_get_low_power_segments(
  31679. - struct avcs_cmd_rsp_get_low_power_segments_info_t **);
  31680. -bool q6core_is_adsp_ready(void);
  31681. -
  31682. -#define ADSP_CMD_SET_DOLBY_MANUFACTURER_ID 0x00012918
  31683. -
  31684. -struct adsp_dolby_manufacturer_id {
  31685. - struct apr_hdr hdr;
  31686. - int manufacturer_id;
  31687. -};
  31688. -
  31689. -uint32_t core_set_dolby_manufacturer_id(int manufacturer_id);
  31690. -
  31691. -#endif /* __Q6CORE_H__ */
  31692. diff --git a/sound/soc/msm/qdsp6v2/q6voice.c b/sound/soc/msm/qdsp6v2/q6voice.c
  31693. index 84d4c84..87c68e4 100644
  31694. --- a/sound/soc/msm/qdsp6v2/q6voice.c
  31695. +++ b/sound/soc/msm/qdsp6v2/q6voice.c
  31696. @@ -29,7 +29,7 @@
  31697. #include "q6voice.h"
  31698.  
  31699.  
  31700. -#define TIMEOUT_MS 300
  31701. +#define TIMEOUT_MS 500
  31702.  
  31703.  
  31704. #define CMD_STATUS_SUCCESS 0
Advertisement
Add Comment
Please, Sign In to add comment