Guest User

Untitled

a guest
Dec 12th, 2018
81
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 199.52 KB | None | 0 0
  1. /*
  2. * Copyright (C) 2007 Google, Inc.
  3. * Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
  4. *
  5. * This software is licensed under the terms of the GNU General Public
  6. * License version 2, as published by the Free Software Foundation, and
  7. * may be copied, distributed, and modified under those terms.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. */
  15.  
  16. #include <linux/kernel.h>
  17. #include <linux/module.h>
  18. #include <linux/mtd/mtd.h>
  19. #include <linux/mtd/partitions.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/sched.h>
  22. #include <linux/dma-mapping.h>
  23. #include <linux/io.h>
  24. #include <linux/crc16.h>
  25. #include <linux/bitrev.h>
  26. #include <linux/slab.h>
  27.  
  28. #include <asm/dma.h>
  29. #include <asm/mach/flash.h>
  30.  
  31. #include <mach/dma.h>
  32.  
  33. #include "msm_nand.h"
  34.  
  35. unsigned long msm_nand_phys;
  36. unsigned long msm_nandc01_phys;
  37. unsigned long msm_nandc10_phys;
  38. unsigned long msm_nandc11_phys;
  39. unsigned long ebi2_register_base;
  40. uint32_t dual_nand_ctlr_present;
  41. uint32_t interleave_enable;
  42. unsigned crci_mask;
  43.  
  44. #define MSM_NAND_DMA_BUFFER_SIZE SZ_8K
  45. #define MSM_NAND_DMA_BUFFER_SLOTS \
  46. (MSM_NAND_DMA_BUFFER_SIZE / (sizeof(((atomic_t *)0)->counter) * 8))
  47.  
  48. #define MSM_NAND_CFG0_RAW 0xA80420C0
  49. #define MSM_NAND_CFG1_RAW 0x5045D
  50.  
  51. #define MSM_NAND_CFG0_RAW_ONFI_IDENTIFIER 0x88000800
  52. #define MSM_NAND_CFG0_RAW_ONFI_PARAM_INFO 0x88040000
  53. #define MSM_NAND_CFG1_RAW_ONFI_IDENTIFIER 0x0005045d
  54. #define MSM_NAND_CFG1_RAW_ONFI_PARAM_INFO 0x0005045d
  55.  
  56. #define ONFI_IDENTIFIER_LENGTH 0x0004
  57. #define ONFI_PARAM_INFO_LENGTH 0x0200
  58. #define ONFI_PARAM_PAGE_LENGTH 0x0100
  59.  
  60. #define ONFI_PARAMETER_PAGE_SIGNATURE 0x49464E4F
  61.  
  62. #define FLASH_READ_ONFI_IDENTIFIER_COMMAND 0x90
  63. #define FLASH_READ_ONFI_IDENTIFIER_ADDRESS 0x20
  64. #define FLASH_READ_ONFI_PARAMETERS_COMMAND 0xEC
  65. #define FLASH_READ_ONFI_PARAMETERS_ADDRESS 0x00
  66.  
  67. #define VERBOSE 0
  68.  
  69. struct msm_nand_chip {
  70. struct device *dev;
  71. wait_queue_head_t wait_queue;
  72. atomic_t dma_buffer_busy;
  73. unsigned dma_channel;
  74. uint8_t *dma_buffer;
  75. dma_addr_t dma_addr;
  76. unsigned CFG0, CFG1;
  77. uint32_t ecc_buf_cfg;
  78. };
  79.  
  80. struct mtd_info *current_mtd = NULL;
  81. unsigned param_start_block;
  82. unsigned param_end_block;
  83.  
  84. #define CFG1_WIDE_FLASH (1U << 1)
  85.  
  86. /* TODO: move datamover code out */
  87.  
  88. #define SRC_CRCI_NAND_CMD CMD_SRC_CRCI(DMOV_NAND_CRCI_CMD)
  89. #define DST_CRCI_NAND_CMD CMD_DST_CRCI(DMOV_NAND_CRCI_CMD)
  90. #define SRC_CRCI_NAND_DATA CMD_SRC_CRCI(DMOV_NAND_CRCI_DATA)
  91. #define DST_CRCI_NAND_DATA CMD_DST_CRCI(DMOV_NAND_CRCI_DATA)
  92.  
  93. #define msm_virt_to_dma(chip, vaddr) \
  94. ((void)(*(vaddr)), (chip)->dma_addr + \
  95. ((uint8_t *)(vaddr) - (chip)->dma_buffer))
  96.  
  97. /**
  98. * msm_nand_oob_64 - oob info for 2KB page
  99. */
  100. static struct nand_ecclayout msm_nand_oob_64 = {
  101. .eccbytes = 40,
  102. .eccpos = {
  103. 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
  104. 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
  105. 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
  106. 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
  107. },
  108. .oobavail = 16,
  109. .oobfree = {
  110. {30, 16},
  111. }
  112. };
  113.  
  114. /**
  115. * msm_nand_oob_128 - oob info for 4KB page
  116. */
  117. static struct nand_ecclayout msm_nand_oob_128 = {
  118. .eccbytes = 80,
  119. .eccpos = {
  120. 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
  121. 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
  122. 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
  123. 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
  124. 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
  125. 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
  126. 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
  127. 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
  128. },
  129. .oobavail = 32,
  130. .oobfree = {
  131. {70, 32},
  132. }
  133. };
  134.  
  135. /**
  136. * msm_nand_oob_256 - oob info for 8KB page
  137. */
  138. static struct nand_ecclayout msm_nand_oob_256 = {
  139. .eccbytes = 160,
  140. .eccpos = {
  141. 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
  142. 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
  143. 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
  144. 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
  145. 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
  146. 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
  147. 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
  148. 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
  149. 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
  150. 90, 91, 92, 93, 94, 96, 97, 98 , 99, 100,
  151. 101, 102, 103, 104, 105, 106, 107, 108, 109, 110,
  152. 111, 112, 113, 114, 115, 116, 117, 118, 119, 120,
  153. 121, 122, 123, 124, 125, 126, 127, 128, 129, 130,
  154. 131, 132, 133, 134, 135, 136, 137, 138, 139, 140,
  155. 141, 142, 143, 144, 145, 146, 147, 148, 149, 150,
  156. 215, 216, 217, 218, 219, 220, 221, 222, 223, 224,
  157. },
  158. .oobavail = 64,
  159. .oobfree = {
  160. {151, 64},
  161. }
  162. };
  163.  
  164. /*
  165. * flexonenand_oob_128 - oob info for Flex-Onenand with 4KB page
  166. * For now, we expose only 64 out of 80 ecc bytes
  167. */
  168. static struct nand_ecclayout msm_flexonenand_oob_128 = {
  169. .eccbytes = 64,
  170. .eccpos = {
  171. 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
  172. 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
  173. 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
  174. 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
  175. 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
  176. 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
  177. 102, 103, 104, 105
  178. },
  179. .oobavail = 32,
  180. .oobfree = {
  181. {2, 4}, {18, 4}, {34, 4}, {50, 4},
  182. {66, 4}, {82, 4}, {98, 4}, {114, 4}
  183. }
  184. };
  185.  
  186. /*
  187. * onenand_oob_128 - oob info for OneNAND with 4KB page
  188. *
  189. * Based on specification:
  190. * 4Gb M-die OneNAND Flash (KFM4G16Q4M, KFN8G16Q4M). Rev. 1.3, Apr. 2010
  191. *
  192. * For eccpos we expose only 64 bytes out of 72 (see struct nand_ecclayout)
  193. *
  194. * oobfree uses the spare area fields marked as
  195. * "Managed by internal ECC logic for Logical Sector Number area"
  196. */
  197. static struct nand_ecclayout msm_onenand_oob_128 = {
  198. .eccbytes = 64,
  199. .eccpos = {
  200. 7, 8, 9, 10, 11, 12, 13, 14, 15,
  201. 23, 24, 25, 26, 27, 28, 29, 30, 31,
  202. 39, 40, 41, 42, 43, 44, 45, 46, 47,
  203. 55, 56, 57, 58, 59, 60, 61, 62, 63,
  204. 71, 72, 73, 74, 75, 76, 77, 78, 79,
  205. 87, 88, 89, 90, 91, 92, 93, 94, 95,
  206. 103, 104, 105, 106, 107, 108, 109, 110, 111,
  207. 119
  208. },
  209. .oobavail = 24,
  210. .oobfree = {
  211. {2, 3}, {18, 3}, {34, 3}, {50, 3},
  212. {66, 3}, {82, 3}, {98, 3}, {114, 3}
  213. }
  214. };
  215.  
  216. /**
  217. * msm_onenand_oob_64 - oob info for large (2KB) page
  218. */
  219. static struct nand_ecclayout msm_onenand_oob_64 = {
  220. .eccbytes = 20,
  221. .eccpos = {
  222. 8, 9, 10, 11, 12,
  223. 24, 25, 26, 27, 28,
  224. 40, 41, 42, 43, 44,
  225. 56, 57, 58, 59, 60,
  226. },
  227. .oobavail = 20,
  228. .oobfree = {
  229. {2, 3}, {14, 2}, {18, 3}, {30, 2},
  230. {34, 3}, {46, 2}, {50, 3}, {62, 2}
  231. }
  232. };
  233.  
  234. static void *msm_nand_get_dma_buffer(struct msm_nand_chip *chip, size_t size)
  235. {
  236. unsigned int bitmask, free_bitmask, old_bitmask;
  237. unsigned int need_mask, current_need_mask;
  238. int free_index;
  239.  
  240. need_mask = (1UL << DIV_ROUND_UP(size, MSM_NAND_DMA_BUFFER_SLOTS)) - 1;
  241. bitmask = atomic_read(&chip->dma_buffer_busy);
  242. free_bitmask = ~bitmask;
  243. do {
  244. free_index = __ffs(free_bitmask);
  245. current_need_mask = need_mask << free_index;
  246.  
  247. if (size + free_index * MSM_NAND_DMA_BUFFER_SLOTS >=
  248. MSM_NAND_DMA_BUFFER_SIZE)
  249. return NULL;
  250.  
  251. if ((bitmask & current_need_mask) == 0) {
  252. old_bitmask =
  253. atomic_cmpxchg(&chip->dma_buffer_busy,
  254. bitmask,
  255. bitmask | current_need_mask);
  256. if (old_bitmask == bitmask)
  257. return chip->dma_buffer +
  258. free_index * MSM_NAND_DMA_BUFFER_SLOTS;
  259. free_bitmask = 0; /* force return */
  260. }
  261. /* current free range was too small, clear all free bits */
  262. /* below the top busy bit within current_need_mask */
  263. free_bitmask &=
  264. ~(~0U >> (32 - fls(bitmask & current_need_mask)));
  265. } while (free_bitmask);
  266.  
  267. return NULL;
  268. }
  269.  
  270. static void msm_nand_release_dma_buffer(struct msm_nand_chip *chip,
  271. void *buffer, size_t size)
  272. {
  273. int index;
  274. unsigned int used_mask;
  275.  
  276. used_mask = (1UL << DIV_ROUND_UP(size, MSM_NAND_DMA_BUFFER_SLOTS)) - 1;
  277. index = ((uint8_t *)buffer - chip->dma_buffer) /
  278. MSM_NAND_DMA_BUFFER_SLOTS;
  279. atomic_sub(used_mask << index, &chip->dma_buffer_busy);
  280.  
  281. wake_up(&chip->wait_queue);
  282. }
  283.  
  284.  
  285. unsigned flash_rd_reg(struct msm_nand_chip *chip, unsigned addr)
  286. {
  287. struct {
  288. dmov_s cmd;
  289. unsigned cmdptr;
  290. unsigned data;
  291. } *dma_buffer;
  292. unsigned rv;
  293.  
  294. wait_event(chip->wait_queue,
  295. (dma_buffer = msm_nand_get_dma_buffer(
  296. chip, sizeof(*dma_buffer))));
  297.  
  298. dma_buffer->cmd.cmd = CMD_LC | CMD_OCB | CMD_OCU;
  299. dma_buffer->cmd.src = addr;
  300. dma_buffer->cmd.dst = msm_virt_to_dma(chip, &dma_buffer->data);
  301. dma_buffer->cmd.len = 4;
  302.  
  303. dma_buffer->cmdptr =
  304. (msm_virt_to_dma(chip, &dma_buffer->cmd) >> 3) | CMD_PTR_LP;
  305. dma_buffer->data = 0xeeeeeeee;
  306.  
  307. dsb();
  308. msm_dmov_exec_cmd(
  309. chip->dma_channel, crci_mask, DMOV_CMD_PTR_LIST |
  310. DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
  311. dsb();
  312.  
  313. rv = dma_buffer->data;
  314.  
  315. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  316.  
  317. return rv;
  318. }
  319.  
  320. void flash_wr_reg(struct msm_nand_chip *chip, unsigned addr, unsigned val)
  321. {
  322. struct {
  323. dmov_s cmd;
  324. unsigned cmdptr;
  325. unsigned data;
  326. } *dma_buffer;
  327.  
  328. wait_event(chip->wait_queue,
  329. (dma_buffer = msm_nand_get_dma_buffer(
  330. chip, sizeof(*dma_buffer))));
  331.  
  332. dma_buffer->cmd.cmd = CMD_LC | CMD_OCB | CMD_OCU;
  333. dma_buffer->cmd.src = msm_virt_to_dma(chip, &dma_buffer->data);
  334. dma_buffer->cmd.dst = addr;
  335. dma_buffer->cmd.len = 4;
  336.  
  337. dma_buffer->cmdptr =
  338. (msm_virt_to_dma(chip, &dma_buffer->cmd) >> 3) | CMD_PTR_LP;
  339. dma_buffer->data = val;
  340.  
  341. dsb();
  342. msm_dmov_exec_cmd(
  343. chip->dma_channel, crci_mask, DMOV_CMD_PTR_LIST |
  344. DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
  345. dsb();
  346.  
  347. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  348. }
  349.  
  350. static dma_addr_t
  351. msm_nand_dma_map(struct device *dev, void *addr, size_t size,
  352. enum dma_data_direction dir)
  353. {
  354. struct page *page;
  355. unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
  356. if (virt_addr_valid(addr))
  357. page = virt_to_page(addr);
  358. else {
  359. if (WARN_ON(size + offset > PAGE_SIZE))
  360. return ~0;
  361. page = vmalloc_to_page(addr);
  362. }
  363. return dma_map_page(dev, page, offset, size, dir);
  364. }
  365.  
  366. uint32_t flash_read_id(struct msm_nand_chip *chip)
  367. {
  368. struct {
  369. dmov_s cmd[7];
  370. unsigned cmdptr;
  371. unsigned data[7];
  372. } *dma_buffer;
  373. uint32_t rv;
  374.  
  375. wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
  376. (chip, sizeof(*dma_buffer))));
  377.  
  378. dma_buffer->data[0] = 0 | 4;
  379. dma_buffer->data[1] = MSM_NAND_CMD_FETCH_ID;
  380. dma_buffer->data[2] = 1;
  381. dma_buffer->data[3] = 0xeeeeeeee;
  382. dma_buffer->data[4] = 0xeeeeeeee;
  383. dma_buffer->data[5] = flash_rd_reg(chip, MSM_NAND_SFLASHC_BURST_CFG);
  384. dma_buffer->data[6] = 0x00000000;
  385. BUILD_BUG_ON(6 != ARRAY_SIZE(dma_buffer->data) - 1);
  386.  
  387. dma_buffer->cmd[0].cmd = 0 | CMD_OCB;
  388. dma_buffer->cmd[0].src = msm_virt_to_dma(chip, &dma_buffer->data[6]);
  389. dma_buffer->cmd[0].dst = MSM_NAND_SFLASHC_BURST_CFG;
  390. dma_buffer->cmd[0].len = 4;
  391.  
  392. dma_buffer->cmd[1].cmd = 0;
  393. dma_buffer->cmd[1].src = msm_virt_to_dma(chip, &dma_buffer->data[0]);
  394. dma_buffer->cmd[1].dst = MSM_NAND_FLASH_CHIP_SELECT;
  395. dma_buffer->cmd[1].len = 4;
  396.  
  397. dma_buffer->cmd[2].cmd = DST_CRCI_NAND_CMD;
  398. dma_buffer->cmd[2].src = msm_virt_to_dma(chip, &dma_buffer->data[1]);
  399. dma_buffer->cmd[2].dst = MSM_NAND_FLASH_CMD;
  400. dma_buffer->cmd[2].len = 4;
  401.  
  402. dma_buffer->cmd[3].cmd = 0;
  403. dma_buffer->cmd[3].src = msm_virt_to_dma(chip, &dma_buffer->data[2]);
  404. dma_buffer->cmd[3].dst = MSM_NAND_EXEC_CMD;
  405. dma_buffer->cmd[3].len = 4;
  406.  
  407. dma_buffer->cmd[4].cmd = SRC_CRCI_NAND_DATA;
  408. dma_buffer->cmd[4].src = MSM_NAND_FLASH_STATUS;
  409. dma_buffer->cmd[4].dst = msm_virt_to_dma(chip, &dma_buffer->data[3]);
  410. dma_buffer->cmd[4].len = 4;
  411.  
  412. dma_buffer->cmd[5].cmd = 0;
  413. dma_buffer->cmd[5].src = MSM_NAND_READ_ID;
  414. dma_buffer->cmd[5].dst = msm_virt_to_dma(chip, &dma_buffer->data[4]);
  415. dma_buffer->cmd[5].len = 4;
  416.  
  417. dma_buffer->cmd[6].cmd = CMD_OCU | CMD_LC;
  418. dma_buffer->cmd[6].src = msm_virt_to_dma(chip, &dma_buffer->data[5]);
  419. dma_buffer->cmd[6].dst = MSM_NAND_SFLASHC_BURST_CFG;
  420. dma_buffer->cmd[6].len = 4;
  421.  
  422. BUILD_BUG_ON(6 != ARRAY_SIZE(dma_buffer->cmd) - 1);
  423.  
  424. dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3
  425. ) | CMD_PTR_LP;
  426.  
  427. dsb();
  428. msm_dmov_exec_cmd(chip->dma_channel, crci_mask, DMOV_CMD_PTR_LIST |
  429. DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
  430. dsb();
  431.  
  432. pr_info("status: %x\n", dma_buffer->data[3]);
  433. pr_info("nandid: %x maker %02x device %02x\n",
  434. dma_buffer->data[4], dma_buffer->data[4] & 0xff,
  435. (dma_buffer->data[4] >> 8) & 0xff);
  436. rv = dma_buffer->data[4];
  437. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  438. return rv;
  439. }
  440.  
  441. struct flash_identification {
  442. uint32_t flash_id;
  443. uint32_t mask;
  444. uint32_t density;
  445. uint32_t widebus;
  446. uint32_t pagesize;
  447. uint32_t blksize;
  448. uint32_t oobsize;
  449. };
  450.  
  451. static struct flash_identification supported_flash[] =
  452. {
  453. /* Flash ID ID Mask Density(MB) Wid Pgsz Blksz oobsz Manuf */
  454. {0x00000000, 0xFFFFFFFF, 0, 0, 0, 0, 0, }, /*ONFI*/
  455. {0x5590bc2c, 0xFFFFFFFF, (512<<20), 1, 2048, (2048<<6), 64, }, /*Micr*/
  456. {0x1500aaec, 0xFF00FFFF, (256<<20), 0, 2048, (2048<<6), 64, }, /*Sams*/
  457. {0x5500baec, 0xFF00FFFF, (256<<20), 1, 2048, (2048<<6), 64, }, /*Sams*/
  458. {0x6600bcec, 0xFF00FFFF, (512<<20), 1, 4096, (4096<<6), 128,}, /*Sams*/
  459. {0x1500aa98, 0xFFFFFFFF, (256<<20), 0, 2048, (2048<<6), 64, }, /*Tosh*/
  460. {0x5500ba98, 0xFFFFFFFF, (256<<20), 1, 2048, (2048<<6), 64, }, /*Tosh*/
  461. {0xd580b12c, 0xFFFFFFFF, (128<<20), 1, 2048, (2048<<6), 64, }, /*Micr*/
  462. {0x5580baad, 0xFFFFFFFF, (256<<20), 1, 2048, (2048<<6), 64, }, /*Hynx*/
  463. {0x5510baad, 0xFFFFFFFF, (256<<20), 1, 2048, (2048<<6), 64, }, /*Hynx*/
  464. {0x1590ac2c, 0xFFFFFFFF, (512<<20), 1, 2048, (2048<<6), 64, }, /*Micr*/
  465. {0x6601b3ec, 0xFF00FFFF, (1024<<20), 1, 4096, (4096<<6), 128,}, /*Sams*/
  466. {0x55d1b32c, 0xFFFFFFFF, (1024<<20), 1, 2048, (2048<<6), 64, }, /*Micr*/
  467. /* Note: Width flag is 0 for 8 bit Flash and 1 for 16 bit flash */
  468. /* Note: The First row will be filled at runtime during ONFI probe */
  469. };
  470.  
  471. uint16_t flash_onfi_crc_check(uint8_t *buffer, uint16_t count)
  472. {
  473. int i;
  474. uint16_t result;
  475.  
  476. for (i = 0; i < count; i++)
  477. buffer[i] = bitrev8(buffer[i]);
  478.  
  479. result = bitrev16(crc16(bitrev16(0x4f4e), buffer, count));
  480.  
  481. for (i = 0; i < count; i++)
  482. buffer[i] = bitrev8(buffer[i]);
  483.  
  484. return result;
  485. }
  486.  
  487.  
  488. uint32_t flash_onfi_probe(struct msm_nand_chip *chip)
  489. {
  490. struct onfi_param_page {
  491. uint32_t parameter_page_signature;
  492. uint16_t revision_number;
  493. uint16_t features_supported;
  494. uint16_t optional_commands_supported;
  495. uint8_t reserved0[22];
  496. uint8_t device_manufacturer[12];
  497. uint8_t device_model[20];
  498. uint8_t jedec_manufacturer_id;
  499. uint16_t date_code;
  500. uint8_t reserved1[13];
  501. uint32_t number_of_data_bytes_per_page;
  502. uint16_t number_of_spare_bytes_per_page;
  503. uint32_t number_of_data_bytes_per_partial_page;
  504. uint16_t number_of_spare_bytes_per_partial_page;
  505. uint32_t number_of_pages_per_block;
  506. uint32_t number_of_blocks_per_logical_unit;
  507. uint8_t number_of_logical_units;
  508. uint8_t number_of_address_cycles;
  509. uint8_t number_of_bits_per_cell;
  510. uint16_t maximum_bad_blocks_per_logical_unit;
  511. uint16_t block_endurance;
  512. uint8_t guaranteed_valid_begin_blocks;
  513. uint16_t guaranteed_valid_begin_blocks_endurance;
  514. uint8_t number_of_programs_per_page;
  515. uint8_t partial_program_attributes;
  516. uint8_t number_of_bits_ecc_correctaility;
  517. uint8_t number_of_interleaved_address_bits;
  518. uint8_t interleaved_operation_attributes;
  519. uint8_t reserved2[13];
  520. uint8_t io_pin_capacitance;
  521. uint16_t timing_mode_support;
  522. uint16_t program_cache_timing_mode_support;
  523. uint16_t maximum_page_programming_time;
  524. uint16_t maximum_block_erase_time;
  525. uint16_t maximum_page_read_time;
  526. uint16_t maximum_change_column_setup_time;
  527. uint8_t reserved3[23];
  528. uint16_t vendor_specific_revision_number;
  529. uint8_t vendor_specific[88];
  530. uint16_t integrity_crc;
  531.  
  532. } __attribute__((__packed__));
  533.  
  534. struct onfi_param_page *onfi_param_page_ptr;
  535. uint8_t *onfi_identifier_buf = NULL;
  536. uint8_t *onfi_param_info_buf = NULL;
  537.  
  538. struct {
  539. dmov_s cmd[11];
  540. unsigned cmdptr;
  541. struct {
  542. uint32_t cmd;
  543. uint32_t addr0;
  544. uint32_t addr1;
  545. uint32_t cfg0;
  546. uint32_t cfg1;
  547. uint32_t exec;
  548. uint32_t flash_status;
  549. uint32_t devcmd1_orig;
  550. uint32_t devcmdvld_orig;
  551. uint32_t devcmd1_mod;
  552. uint32_t devcmdvld_mod;
  553. uint32_t sflash_bcfg_orig;
  554. uint32_t sflash_bcfg_mod;
  555. } data;
  556. } *dma_buffer;
  557. dmov_s *cmd;
  558.  
  559. unsigned page_address = 0;
  560. int err = 0;
  561. dma_addr_t dma_addr_param_info = 0;
  562. dma_addr_t dma_addr_identifier = 0;
  563. unsigned cmd_set_count = 2;
  564. unsigned crc_chk_count = 0;
  565.  
  566. if (msm_nand_data.nr_parts) {
  567. page_address = ((msm_nand_data.parts[0]).offset << 6);
  568. } else {
  569. pr_err("flash_onfi_probe: "
  570. "No partition info available\n");
  571. err = -EIO;
  572. return err;
  573. }
  574.  
  575. wait_event(chip->wait_queue, (onfi_identifier_buf =
  576. msm_nand_get_dma_buffer(chip, ONFI_IDENTIFIER_LENGTH)));
  577. dma_addr_identifier = msm_virt_to_dma(chip, onfi_identifier_buf);
  578.  
  579. wait_event(chip->wait_queue, (onfi_param_info_buf =
  580. msm_nand_get_dma_buffer(chip, ONFI_PARAM_INFO_LENGTH)));
  581. dma_addr_param_info = msm_virt_to_dma(chip, onfi_param_info_buf);
  582.  
  583. wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
  584. (chip, sizeof(*dma_buffer))));
  585.  
  586. dma_buffer->data.sflash_bcfg_orig = flash_rd_reg
  587. (chip, MSM_NAND_SFLASHC_BURST_CFG);
  588. dma_buffer->data.devcmd1_orig = flash_rd_reg(chip, MSM_NAND_DEV_CMD1);
  589. dma_buffer->data.devcmdvld_orig = flash_rd_reg(chip,
  590. MSM_NAND_DEV_CMD_VLD);
  591.  
  592. while (cmd_set_count-- > 0) {
  593. cmd = dma_buffer->cmd;
  594.  
  595. dma_buffer->data.devcmd1_mod = (dma_buffer->data.devcmd1_orig &
  596. 0xFFFFFF00) | (cmd_set_count
  597. ? FLASH_READ_ONFI_IDENTIFIER_COMMAND
  598. : FLASH_READ_ONFI_PARAMETERS_COMMAND);
  599. dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ;
  600. dma_buffer->data.addr0 = (page_address << 16) | (cmd_set_count
  601. ? FLASH_READ_ONFI_IDENTIFIER_ADDRESS
  602. : FLASH_READ_ONFI_PARAMETERS_ADDRESS);
  603. dma_buffer->data.addr1 = (page_address >> 16) & 0xFF;
  604. dma_buffer->data.cfg0 = (cmd_set_count
  605. ? MSM_NAND_CFG0_RAW_ONFI_IDENTIFIER
  606. : MSM_NAND_CFG0_RAW_ONFI_PARAM_INFO);
  607. dma_buffer->data.cfg1 = (cmd_set_count
  608. ? MSM_NAND_CFG1_RAW_ONFI_IDENTIFIER
  609. : MSM_NAND_CFG1_RAW_ONFI_PARAM_INFO);
  610. dma_buffer->data.sflash_bcfg_mod = 0x00000000;
  611. dma_buffer->data.devcmdvld_mod = (dma_buffer->
  612. data.devcmdvld_orig & 0xFFFFFFFE);
  613. dma_buffer->data.exec = 1;
  614. dma_buffer->data.flash_status = 0xeeeeeeee;
  615.  
  616. /* Put the Nand ctlr in Async mode and disable SFlash ctlr */
  617. cmd->cmd = 0;
  618. cmd->src = msm_virt_to_dma(chip,
  619. &dma_buffer->data.sflash_bcfg_mod);
  620. cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
  621. cmd->len = 4;
  622. cmd++;
  623.  
  624. /* Block on cmd ready, & write CMD,ADDR0,ADDR1,CHIPSEL regs */
  625. cmd->cmd = DST_CRCI_NAND_CMD;
  626. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
  627. cmd->dst = MSM_NAND_FLASH_CMD;
  628. cmd->len = 12;
  629. cmd++;
  630.  
  631. /* Configure the CFG0 and CFG1 registers */
  632. cmd->cmd = 0;
  633. cmd->src = msm_virt_to_dma(chip,
  634. &dma_buffer->data.cfg0);
  635. cmd->dst = MSM_NAND_DEV0_CFG0;
  636. cmd->len = 8;
  637. cmd++;
  638.  
  639. /* Configure the DEV_CMD_VLD register */
  640. cmd->cmd = 0;
  641. cmd->src = msm_virt_to_dma(chip,
  642. &dma_buffer->data.devcmdvld_mod);
  643. cmd->dst = MSM_NAND_DEV_CMD_VLD;
  644. cmd->len = 4;
  645. cmd++;
  646.  
  647. /* Configure the DEV_CMD1 register */
  648. cmd->cmd = 0;
  649. cmd->src = msm_virt_to_dma(chip,
  650. &dma_buffer->data.devcmd1_mod);
  651. cmd->dst = MSM_NAND_DEV_CMD1;
  652. cmd->len = 4;
  653. cmd++;
  654.  
  655. /* Kick the execute command */
  656. cmd->cmd = 0;
  657. cmd->src = msm_virt_to_dma(chip,
  658. &dma_buffer->data.exec);
  659. cmd->dst = MSM_NAND_EXEC_CMD;
  660. cmd->len = 4;
  661. cmd++;
  662.  
  663. /* Block on data ready, and read the two status registers */
  664. cmd->cmd = SRC_CRCI_NAND_DATA;
  665. cmd->src = MSM_NAND_FLASH_STATUS;
  666. cmd->dst = msm_virt_to_dma(chip,
  667. &dma_buffer->data.flash_status);
  668. cmd->len = 4;
  669. cmd++;
  670.  
  671. /* Read data block - valid only if status says success */
  672. cmd->cmd = 0;
  673. cmd->src = MSM_NAND_FLASH_BUFFER;
  674. cmd->dst = (cmd_set_count ? dma_addr_identifier :
  675. dma_addr_param_info);
  676. cmd->len = (cmd_set_count ? ONFI_IDENTIFIER_LENGTH :
  677. ONFI_PARAM_INFO_LENGTH);
  678. cmd++;
  679.  
  680. /* Restore the DEV_CMD1 register */
  681. cmd->cmd = 0 ;
  682. cmd->src = msm_virt_to_dma(chip,
  683. &dma_buffer->data.devcmd1_orig);
  684. cmd->dst = MSM_NAND_DEV_CMD1;
  685. cmd->len = 4;
  686. cmd++;
  687.  
  688. /* Restore the DEV_CMD_VLD register */
  689. cmd->cmd = 0;
  690. cmd->src = msm_virt_to_dma(chip,
  691. &dma_buffer->data.devcmdvld_orig);
  692. cmd->dst = MSM_NAND_DEV_CMD_VLD;
  693. cmd->len = 4;
  694. cmd++;
  695.  
  696. /* Restore the SFLASH_BURST_CONFIG register */
  697. cmd->cmd = 0;
  698. cmd->src = msm_virt_to_dma(chip,
  699. &dma_buffer->data.sflash_bcfg_orig);
  700. cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
  701. cmd->len = 4;
  702. cmd++;
  703.  
  704. BUILD_BUG_ON(11 != ARRAY_SIZE(dma_buffer->cmd));
  705. BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
  706. dma_buffer->cmd[0].cmd |= CMD_OCB;
  707. cmd[-1].cmd |= CMD_OCU | CMD_LC;
  708.  
  709. dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
  710. >> 3) | CMD_PTR_LP;
  711.  
  712. dsb();
  713. msm_dmov_exec_cmd(chip->dma_channel, crci_mask,
  714. DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
  715. &dma_buffer->cmdptr)));
  716. dsb();
  717.  
  718. /* Check for errors, protection violations etc */
  719. if (dma_buffer->data.flash_status & 0x110) {
  720. pr_info("MPU/OP error (0x%x) during "
  721. "ONFI probe\n",
  722. dma_buffer->data.flash_status);
  723. err = -EIO;
  724. break;
  725. }
  726.  
  727. if (cmd_set_count) {
  728. onfi_param_page_ptr = (struct onfi_param_page *)
  729. (&(onfi_identifier_buf[0]));
  730. if (onfi_param_page_ptr->parameter_page_signature !=
  731. ONFI_PARAMETER_PAGE_SIGNATURE) {
  732. pr_info("ONFI probe : Found a non"
  733. "ONFI Compliant device \n");
  734. err = -EIO;
  735. break;
  736. }
  737. } else {
  738. for (crc_chk_count = 0; crc_chk_count <
  739. ONFI_PARAM_INFO_LENGTH
  740. / ONFI_PARAM_PAGE_LENGTH;
  741. crc_chk_count++) {
  742. onfi_param_page_ptr =
  743. (struct onfi_param_page *)
  744. (&(onfi_param_info_buf
  745. [ONFI_PARAM_PAGE_LENGTH *
  746. crc_chk_count]));
  747. if (flash_onfi_crc_check(
  748. (uint8_t *)onfi_param_page_ptr,
  749. ONFI_PARAM_PAGE_LENGTH - 2) ==
  750. onfi_param_page_ptr->integrity_crc) {
  751. break;
  752. }
  753. }
  754. if (crc_chk_count >= ONFI_PARAM_INFO_LENGTH
  755. / ONFI_PARAM_PAGE_LENGTH) {
  756. pr_info("ONFI probe : CRC Check "
  757. "failed on ONFI Parameter "
  758. "data \n");
  759. err = -EIO;
  760. break;
  761. } else {
  762. supported_flash[0].flash_id =
  763. flash_read_id(chip);
  764. supported_flash[0].widebus =
  765. onfi_param_page_ptr->
  766. features_supported & 0x01;
  767. supported_flash[0].pagesize =
  768. onfi_param_page_ptr->
  769. number_of_data_bytes_per_page;
  770. supported_flash[0].blksize =
  771. onfi_param_page_ptr->
  772. number_of_pages_per_block *
  773. supported_flash[0].pagesize;
  774. supported_flash[0].oobsize =
  775. onfi_param_page_ptr->
  776. number_of_spare_bytes_per_page;
  777. supported_flash[0].density =
  778. onfi_param_page_ptr->
  779. number_of_blocks_per_logical_unit
  780. * supported_flash[0].blksize;
  781.  
  782. pr_info("ONFI probe : Found an ONFI "
  783. "compliant device %s\n",
  784. onfi_param_page_ptr->device_model);
  785.  
  786. /* Temporary hack for MT29F4G08ABC device.
  787. * Since the device is not properly adhering
  788. * to ONFi specification it is reporting
  789. * as 16 bit device though it is 8 bit device!!!
  790. */
  791. if (!strcmp(onfi_param_page_ptr->device_model,
  792. "MT29F4G08ABC"))
  793. supported_flash[0].widebus = 0;
  794. }
  795. }
  796. }
  797.  
  798. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  799. msm_nand_release_dma_buffer(chip, onfi_param_info_buf,
  800. ONFI_PARAM_INFO_LENGTH);
  801. msm_nand_release_dma_buffer(chip, onfi_identifier_buf,
  802. ONFI_IDENTIFIER_LENGTH);
  803.  
  804. return err;
  805. }
  806.  
  807. static int msm_nand_read_oob(struct mtd_info *mtd, loff_t from,
  808. struct mtd_oob_ops *ops)
  809. {
  810. struct msm_nand_chip *chip = mtd->priv;
  811.  
  812. struct {
  813. dmov_s cmd[8 * 5 + 2];
  814. unsigned cmdptr;
  815. struct {
  816. uint32_t cmd;
  817. uint32_t addr0;
  818. uint32_t addr1;
  819. uint32_t chipsel;
  820. uint32_t cfg0;
  821. uint32_t cfg1;
  822. uint32_t exec;
  823. uint32_t ecccfg;
  824. struct {
  825. uint32_t flash_status;
  826. uint32_t buffer_status;
  827. } result[8];
  828. } data;
  829. } *dma_buffer;
  830. dmov_s *cmd;
  831. unsigned n;
  832. unsigned page = 0;
  833. uint32_t oob_len;
  834. uint32_t sectordatasize;
  835. uint32_t sectoroobsize;
  836. int err, pageerr, rawerr;
  837. dma_addr_t data_dma_addr = 0;
  838. dma_addr_t oob_dma_addr = 0;
  839. dma_addr_t data_dma_addr_curr = 0;
  840. dma_addr_t oob_dma_addr_curr = 0;
  841. uint32_t oob_col = 0;
  842. unsigned page_count;
  843. unsigned pages_read = 0;
  844. unsigned start_sector = 0;
  845. uint32_t ecc_errors;
  846. uint32_t total_ecc_errors = 0;
  847. unsigned cwperpage;
  848.  
  849. if (mtd->writesize == 2048)
  850. page = from >> 11;
  851.  
  852. if (mtd->writesize == 4096)
  853. page = from >> 12;
  854.  
  855. oob_len = ops->ooblen;
  856. cwperpage = (mtd->writesize >> 9);
  857.  
  858. if (from & (mtd->writesize - 1)) {
  859. pr_err("%s: unsupported from, 0x%llx\n",
  860. __func__, from);
  861. return -EINVAL;
  862. }
  863. if (ops->mode != MTD_OOB_RAW) {
  864. if (ops->datbuf != NULL && (ops->len % mtd->writesize) != 0) {
  865. /* when ops->datbuf is NULL, ops->len can be ooblen */
  866. pr_err("%s: unsupported ops->len, %d\n",
  867. __func__, ops->len);
  868. return -EINVAL;
  869. }
  870. } else {
  871. if (ops->datbuf != NULL &&
  872. (ops->len % (mtd->writesize + mtd->oobsize)) != 0) {
  873. pr_err("%s: unsupported ops->len,"
  874. " %d for MTD_OOB_RAW\n", __func__, ops->len);
  875. return -EINVAL;
  876. }
  877. }
  878.  
  879. if (ops->mode != MTD_OOB_RAW && ops->ooblen != 0 && ops->ooboffs != 0) {
  880. pr_err("%s: unsupported ops->ooboffs, %d\n",
  881. __func__, ops->ooboffs);
  882. return -EINVAL;
  883. }
  884.  
  885. if (ops->oobbuf && !ops->datbuf && ops->mode == MTD_OOB_AUTO)
  886. start_sector = cwperpage - 1;
  887.  
  888. if (ops->oobbuf && !ops->datbuf) {
  889. page_count = ops->ooblen / ((ops->mode == MTD_OOB_AUTO) ?
  890. mtd->oobavail : mtd->oobsize);
  891. if ((page_count == 0) && (ops->ooblen))
  892. page_count = 1;
  893. } else if (ops->mode != MTD_OOB_RAW)
  894. page_count = ops->len / mtd->writesize;
  895. else
  896. page_count = ops->len / (mtd->writesize + mtd->oobsize);
  897.  
  898. #if 0 /* yaffs reads more oob data than it needs */
  899. if (ops->ooblen >= sectoroobsize * 4) {
  900. pr_err("%s: unsupported ops->ooblen, %d\n",
  901. __func__, ops->ooblen);
  902. return -EINVAL;
  903. }
  904. #endif
  905.  
  906. #if VERBOSE
  907. pr_info("msm_nand_read_oob %llx %p %x %p %x\n",
  908. from, ops->datbuf, ops->len, ops->oobbuf, ops->ooblen);
  909. #endif
  910. if (ops->datbuf) {
  911. /* memset(ops->datbuf, 0x55, ops->len); */
  912. data_dma_addr_curr = data_dma_addr =
  913. msm_nand_dma_map(chip->dev, ops->datbuf, ops->len,
  914. DMA_FROM_DEVICE);
  915. if (dma_mapping_error(chip->dev, data_dma_addr)) {
  916. pr_err("msm_nand_read_oob: failed to get dma addr "
  917. "for %p\n", ops->datbuf);
  918. return -EIO;
  919. }
  920. }
  921. if (ops->oobbuf) {
  922. memset(ops->oobbuf, 0xff, ops->ooblen);
  923. oob_dma_addr_curr = oob_dma_addr =
  924. msm_nand_dma_map(chip->dev, ops->oobbuf,
  925. ops->ooblen, DMA_BIDIRECTIONAL);
  926. if (dma_mapping_error(chip->dev, oob_dma_addr)) {
  927. pr_err("msm_nand_read_oob: failed to get dma addr "
  928. "for %p\n", ops->oobbuf);
  929. err = -EIO;
  930. goto err_dma_map_oobbuf_failed;
  931. }
  932. }
  933.  
  934. wait_event(chip->wait_queue,
  935. (dma_buffer = msm_nand_get_dma_buffer(
  936. chip, sizeof(*dma_buffer))));
  937.  
  938. oob_col = start_sector * 0x210;
  939. if (chip->CFG1 & CFG1_WIDE_FLASH)
  940. oob_col >>= 1;
  941.  
  942. err = 0;
  943. while (page_count-- > 0) {
  944. cmd = dma_buffer->cmd;
  945.  
  946. /* CMD / ADDR0 / ADDR1 / CHIPSEL program values */
  947. if (ops->mode != MTD_OOB_RAW) {
  948. dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ_ECC;
  949. dma_buffer->data.cfg0 =
  950. (chip->CFG0 & ~(7U << 6))
  951. | (((cwperpage-1) - start_sector) << 6);
  952. dma_buffer->data.cfg1 = chip->CFG1;
  953. } else {
  954. dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ;
  955. dma_buffer->data.cfg0 = (MSM_NAND_CFG0_RAW
  956. & ~(7U << 6)) | ((cwperpage-1) << 6);
  957. dma_buffer->data.cfg1 = MSM_NAND_CFG1_RAW |
  958. (chip->CFG1 & CFG1_WIDE_FLASH);
  959. }
  960.  
  961. dma_buffer->data.addr0 = (page << 16) | oob_col;
  962. /* qc example is (page >> 16) && 0xff !? */
  963. dma_buffer->data.addr1 = (page >> 16) & 0xff;
  964. /* flash0 + undoc bit */
  965. dma_buffer->data.chipsel = 0 | 4;
  966.  
  967.  
  968. /* GO bit for the EXEC register */
  969. dma_buffer->data.exec = 1;
  970.  
  971.  
  972. BUILD_BUG_ON(8 != ARRAY_SIZE(dma_buffer->data.result));
  973.  
  974. for (n = start_sector; n < cwperpage; n++) {
  975. /* flash + buffer status return words */
  976. dma_buffer->data.result[n].flash_status = 0xeeeeeeee;
  977. dma_buffer->data.result[n].buffer_status = 0xeeeeeeee;
  978.  
  979. /* block on cmd ready, then
  980. * write CMD / ADDR0 / ADDR1 / CHIPSEL
  981. * regs in a burst
  982. */
  983. cmd->cmd = DST_CRCI_NAND_CMD;
  984. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
  985. cmd->dst = MSM_NAND_FLASH_CMD;
  986. if (n == start_sector)
  987. cmd->len = 16;
  988. else
  989. cmd->len = 4;
  990. cmd++;
  991.  
  992. if (n == start_sector) {
  993. cmd->cmd = 0;
  994. cmd->src = msm_virt_to_dma(chip,
  995. &dma_buffer->data.cfg0);
  996. cmd->dst = MSM_NAND_DEV0_CFG0;
  997. cmd->len = 8;
  998. cmd++;
  999.  
  1000. dma_buffer->data.ecccfg = chip->ecc_buf_cfg;
  1001. cmd->cmd = 0;
  1002. cmd->src = msm_virt_to_dma(chip,
  1003. &dma_buffer->data.ecccfg);
  1004. cmd->dst = MSM_NAND_EBI2_ECC_BUF_CFG;
  1005. cmd->len = 4;
  1006. cmd++;
  1007. }
  1008.  
  1009. /* kick the execute register */
  1010. cmd->cmd = 0;
  1011. cmd->src =
  1012. msm_virt_to_dma(chip, &dma_buffer->data.exec);
  1013. cmd->dst = MSM_NAND_EXEC_CMD;
  1014. cmd->len = 4;
  1015. cmd++;
  1016.  
  1017. /* block on data ready, then
  1018. * read the status register
  1019. */
  1020. cmd->cmd = SRC_CRCI_NAND_DATA;
  1021. cmd->src = MSM_NAND_FLASH_STATUS;
  1022. cmd->dst = msm_virt_to_dma(chip,
  1023. &dma_buffer->data.result[n]);
  1024. /* MSM_NAND_FLASH_STATUS + MSM_NAND_BUFFER_STATUS */
  1025. cmd->len = 8;
  1026. cmd++;
  1027.  
  1028. /* read data block
  1029. * (only valid if status says success)
  1030. */
  1031. if (ops->datbuf) {
  1032. if (ops->mode != MTD_OOB_RAW)
  1033. sectordatasize = (n < (cwperpage - 1))
  1034. ? 516 : (512 - ((cwperpage - 1) << 2));
  1035. else
  1036. sectordatasize = 528;
  1037.  
  1038. cmd->cmd = 0;
  1039. cmd->src = MSM_NAND_FLASH_BUFFER;
  1040. cmd->dst = data_dma_addr_curr;
  1041. data_dma_addr_curr += sectordatasize;
  1042. cmd->len = sectordatasize;
  1043. cmd++;
  1044. }
  1045.  
  1046. if (ops->oobbuf && (n == (cwperpage - 1)
  1047. || ops->mode != MTD_OOB_AUTO)) {
  1048. cmd->cmd = 0;
  1049. if (n == (cwperpage - 1)) {
  1050. cmd->src = MSM_NAND_FLASH_BUFFER +
  1051. (512 - ((cwperpage - 1) << 2));
  1052. sectoroobsize = (cwperpage << 2);
  1053. if (ops->mode != MTD_OOB_AUTO)
  1054. sectoroobsize += 10;
  1055. } else {
  1056. cmd->src = MSM_NAND_FLASH_BUFFER + 516;
  1057. sectoroobsize = 10;
  1058. }
  1059.  
  1060. cmd->dst = oob_dma_addr_curr;
  1061. if (sectoroobsize < oob_len)
  1062. cmd->len = sectoroobsize;
  1063. else
  1064. cmd->len = oob_len;
  1065. oob_dma_addr_curr += cmd->len;
  1066. oob_len -= cmd->len;
  1067. if (cmd->len > 0)
  1068. cmd++;
  1069. }
  1070. }
  1071.  
  1072. BUILD_BUG_ON(8 * 5 + 2 != ARRAY_SIZE(dma_buffer->cmd));
  1073. BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
  1074. dma_buffer->cmd[0].cmd |= CMD_OCB;
  1075. cmd[-1].cmd |= CMD_OCU | CMD_LC;
  1076.  
  1077. dma_buffer->cmdptr =
  1078. (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3)
  1079. | CMD_PTR_LP;
  1080.  
  1081. dsb();
  1082. msm_dmov_exec_cmd(chip->dma_channel, crci_mask,
  1083. DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
  1084. &dma_buffer->cmdptr)));
  1085. dsb();
  1086.  
  1087. /* if any of the writes failed (0x10), or there
  1088. * was a protection violation (0x100), we lose
  1089. */
  1090. pageerr = rawerr = 0;
  1091. for (n = start_sector; n < cwperpage; n++) {
  1092. if (dma_buffer->data.result[n].flash_status & 0x110) {
  1093. rawerr = -EIO;
  1094. break;
  1095. }
  1096. }
  1097. if (rawerr) {
  1098. if (ops->datbuf && ops->mode != MTD_OOB_RAW) {
  1099. uint8_t *datbuf = ops->datbuf +
  1100. pages_read * mtd->writesize;
  1101.  
  1102. dma_sync_single_for_cpu(chip->dev,
  1103. data_dma_addr_curr-mtd->writesize,
  1104. mtd->writesize, DMA_BIDIRECTIONAL);
  1105.  
  1106. for (n = 0; n < mtd->writesize; n++) {
  1107. /* empty blocks read 0x54 at
  1108. * these offsets
  1109. */
  1110. if (n % 516 == 3 && datbuf[n] == 0x54)
  1111. datbuf[n] = 0xff;
  1112. if (datbuf[n] != 0xff) {
  1113. pageerr = rawerr;
  1114. break;
  1115. }
  1116. }
  1117.  
  1118. dma_sync_single_for_device(chip->dev,
  1119. data_dma_addr_curr-mtd->writesize,
  1120. mtd->writesize, DMA_BIDIRECTIONAL);
  1121.  
  1122. }
  1123. if (ops->oobbuf) {
  1124. for (n = 0; n < ops->ooblen; n++) {
  1125. if (ops->oobbuf[n] != 0xff) {
  1126. pageerr = rawerr;
  1127. break;
  1128. }
  1129. }
  1130. }
  1131. }
  1132. if (pageerr) {
  1133. for (n = start_sector; n < cwperpage; n++) {
  1134. if (dma_buffer->data.result[n].buffer_status
  1135. & 0x8) {
  1136. /* not thread safe */
  1137. mtd->ecc_stats.failed++;
  1138. pageerr = -EBADMSG;
  1139. break;
  1140. }
  1141. }
  1142. }
  1143. if (!rawerr) { /* check for corretable errors */
  1144. for (n = start_sector; n < cwperpage; n++) {
  1145. ecc_errors = dma_buffer->data.
  1146. result[n].buffer_status & 0x7;
  1147. if (ecc_errors) {
  1148. total_ecc_errors += ecc_errors;
  1149. /* not thread safe */
  1150. mtd->ecc_stats.corrected += ecc_errors;
  1151. if (ecc_errors > 1)
  1152. pageerr = -EUCLEAN;
  1153. }
  1154. }
  1155. }
  1156. if (pageerr && (pageerr != -EUCLEAN || err == 0))
  1157. err = pageerr;
  1158.  
  1159. #if VERBOSE
  1160. if (rawerr && !pageerr) {
  1161. pr_err("msm_nand_read_oob %llx %x %x empty page\n",
  1162. (loff_t)page * mtd->writesize, ops->len,
  1163. ops->ooblen);
  1164. } else {
  1165. pr_info("status: %x %x %x %x %x %x %x %x %x \
  1166. %x %x %x %x %x %x %x \n",
  1167. dma_buffer->data.result[0].flash_status,
  1168. dma_buffer->data.result[0].buffer_status,
  1169. dma_buffer->data.result[1].flash_status,
  1170. dma_buffer->data.result[1].buffer_status,
  1171. dma_buffer->data.result[2].flash_status,
  1172. dma_buffer->data.result[2].buffer_status,
  1173. dma_buffer->data.result[3].flash_status,
  1174. dma_buffer->data.result[3].buffer_status,
  1175. dma_buffer->data.result[4].flash_status,
  1176. dma_buffer->data.result[4].buffer_status,
  1177. dma_buffer->data.result[5].flash_status,
  1178. dma_buffer->data.result[5].buffer_status,
  1179. dma_buffer->data.result[6].flash_status,
  1180. dma_buffer->data.result[6].buffer_status,
  1181. dma_buffer->data.result[7].flash_status,
  1182. dma_buffer->data.result[7].buffer_status);
  1183. }
  1184. #endif
  1185. if (err && err != -EUCLEAN && err != -EBADMSG)
  1186. break;
  1187. pages_read++;
  1188. page++;
  1189. }
  1190. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  1191.  
  1192. if (ops->oobbuf) {
  1193. dma_unmap_page(chip->dev, oob_dma_addr,
  1194. ops->ooblen, DMA_FROM_DEVICE);
  1195. }
  1196. err_dma_map_oobbuf_failed:
  1197. if (ops->datbuf) {
  1198. dma_unmap_page(chip->dev, data_dma_addr,
  1199. ops->len, DMA_BIDIRECTIONAL);
  1200. }
  1201.  
  1202. if (ops->mode != MTD_OOB_RAW)
  1203. ops->retlen = mtd->writesize * pages_read;
  1204. else
  1205. ops->retlen = (mtd->writesize + mtd->oobsize) *
  1206. pages_read;
  1207. ops->oobretlen = ops->ooblen - oob_len;
  1208. if (err)
  1209. pr_err("msm_nand_read_oob %llx %x %x failed %d, corrected %d\n",
  1210. from, ops->datbuf ? ops->len : 0, ops->ooblen, err,
  1211. total_ecc_errors);
  1212. return err;
  1213. }
  1214.  
  1215. static int msm_nand_read_oob_dualnandc(struct mtd_info *mtd, loff_t from,
  1216. struct mtd_oob_ops *ops)
  1217. {
  1218. struct msm_nand_chip *chip = mtd->priv;
  1219.  
  1220. struct {
  1221. dmov_s cmd[16 * 6 + 20];
  1222. unsigned cmdptr;
  1223. struct {
  1224. uint32_t cmd;
  1225. uint32_t nandc01_addr0;
  1226. uint32_t nandc10_addr0;
  1227. uint32_t nandc11_addr1;
  1228. uint32_t chipsel_cs0;
  1229. uint32_t chipsel_cs1;
  1230. uint32_t cfg0;
  1231. uint32_t cfg1;
  1232. uint32_t exec;
  1233. uint32_t ecccfg;
  1234. uint32_t ebi2_cfg;
  1235. uint32_t ebi2_chip_select_cfg0;
  1236. uint32_t adm_mux_data_ack_req_nc01;
  1237. uint32_t adm_mux_cmd_ack_req_nc01;
  1238. uint32_t adm_mux_data_ack_req_nc10;
  1239. uint32_t adm_mux_cmd_ack_req_nc10;
  1240. uint32_t adm_default_mux;
  1241. uint32_t default_ebi2_chip_select_cfg0;
  1242. uint32_t nc10_flash_dev_cmd_vld;
  1243. uint32_t nc10_flash_dev_cmd1;
  1244. uint32_t nc10_flash_dev_cmd_vld_default;
  1245. uint32_t nc10_flash_dev_cmd1_default;
  1246. uint32_t ebi2_cfg_default;
  1247. struct {
  1248. uint32_t flash_status;
  1249. uint32_t buffer_status;
  1250. } result[16];
  1251. } data;
  1252. } *dma_buffer;
  1253. dmov_s *cmd;
  1254. unsigned n;
  1255. unsigned page = 0;
  1256. uint32_t oob_len;
  1257. uint32_t sectordatasize;
  1258. uint32_t sectoroobsize;
  1259. int err, pageerr, rawerr;
  1260. dma_addr_t data_dma_addr = 0;
  1261. dma_addr_t oob_dma_addr = 0;
  1262. dma_addr_t data_dma_addr_curr = 0;
  1263. dma_addr_t oob_dma_addr_curr = 0;
  1264. uint32_t oob_col = 0;
  1265. unsigned page_count;
  1266. unsigned pages_read = 0;
  1267. unsigned start_sector = 0;
  1268. uint32_t ecc_errors;
  1269. uint32_t total_ecc_errors = 0;
  1270. unsigned cwperpage;
  1271.  
  1272. if (mtd->writesize == 2048)
  1273. page = from >> 11;
  1274.  
  1275. if (mtd->writesize == 4096)
  1276. page = from >> 12;
  1277.  
  1278. if (interleave_enable)
  1279. page = (from >> 1) >> 12;
  1280.  
  1281. oob_len = ops->ooblen;
  1282. cwperpage = (mtd->writesize >> 9);
  1283.  
  1284. if (from & (mtd->writesize - 1)) {
  1285. pr_err("%s: unsupported from, 0x%llx\n",
  1286. __func__, from);
  1287. return -EINVAL;
  1288. }
  1289. if (ops->mode != MTD_OOB_RAW) {
  1290. if (ops->datbuf != NULL && (ops->len % mtd->writesize) != 0) {
  1291. pr_err("%s: unsupported ops->len, %d\n",
  1292. __func__, ops->len);
  1293. return -EINVAL;
  1294. }
  1295. } else {
  1296. if (ops->datbuf != NULL &&
  1297. (ops->len % (mtd->writesize + mtd->oobsize)) != 0) {
  1298. pr_err("%s: unsupported ops->len,"
  1299. " %d for MTD_OOB_RAW\n", __func__, ops->len);
  1300. return -EINVAL;
  1301. }
  1302. }
  1303.  
  1304. if (ops->mode != MTD_OOB_RAW && ops->ooblen != 0 && ops->ooboffs != 0) {
  1305. pr_err("%s: unsupported ops->ooboffs, %d\n",
  1306. __func__, ops->ooboffs);
  1307. return -EINVAL;
  1308. }
  1309.  
  1310. if (ops->oobbuf && !ops->datbuf && ops->mode == MTD_OOB_AUTO)
  1311. start_sector = cwperpage - 1;
  1312.  
  1313. if (ops->oobbuf && !ops->datbuf) {
  1314. page_count = ops->ooblen / ((ops->mode == MTD_OOB_AUTO) ?
  1315. mtd->oobavail : mtd->oobsize);
  1316. if ((page_count == 0) && (ops->ooblen))
  1317. page_count = 1;
  1318. } else if (ops->mode != MTD_OOB_RAW)
  1319. page_count = ops->len / mtd->writesize;
  1320. else
  1321. page_count = ops->len / (mtd->writesize + mtd->oobsize);
  1322.  
  1323. #if VERBOSE
  1324. pr_info("msm_nand_read_oob_dualnandc %llx %p %x %p %x\n",
  1325. from, ops->datbuf, ops->len, ops->oobbuf, ops->ooblen);
  1326. #endif
  1327.  
  1328. if (ops->datbuf) {
  1329. data_dma_addr_curr = data_dma_addr =
  1330. msm_nand_dma_map(chip->dev, ops->datbuf, ops->len,
  1331. DMA_FROM_DEVICE);
  1332. if (dma_mapping_error(chip->dev, data_dma_addr)) {
  1333. pr_err("msm_nand_read_oob_dualnandc: "
  1334. "failed to get dma addr for %p\n",
  1335. ops->datbuf);
  1336. return -EIO;
  1337. }
  1338. }
  1339. if (ops->oobbuf) {
  1340. memset(ops->oobbuf, 0xff, ops->ooblen);
  1341. oob_dma_addr_curr = oob_dma_addr =
  1342. msm_nand_dma_map(chip->dev, ops->oobbuf,
  1343. ops->ooblen, DMA_BIDIRECTIONAL);
  1344. if (dma_mapping_error(chip->dev, oob_dma_addr)) {
  1345. pr_err("msm_nand_read_oob_dualnandc: "
  1346. "failed to get dma addr for %p\n",
  1347. ops->oobbuf);
  1348. err = -EIO;
  1349. goto err_dma_map_oobbuf_failed;
  1350. }
  1351. }
  1352.  
  1353. wait_event(chip->wait_queue,
  1354. (dma_buffer = msm_nand_get_dma_buffer(
  1355. chip, sizeof(*dma_buffer))));
  1356.  
  1357. oob_col = start_sector * 0x210;
  1358. if (chip->CFG1 & CFG1_WIDE_FLASH)
  1359. oob_col >>= 1;
  1360.  
  1361. err = 0;
  1362. while (page_count-- > 0) {
  1363. cmd = dma_buffer->cmd;
  1364.  
  1365. if (ops->mode != MTD_OOB_RAW) {
  1366. dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ_ECC;
  1367. if (start_sector == (cwperpage - 1)) {
  1368. dma_buffer->data.cfg0 = (chip->CFG0 &
  1369. ~(7U << 6));
  1370. } else {
  1371. dma_buffer->data.cfg0 = (chip->CFG0 &
  1372. ~(7U << 6))
  1373. | (((cwperpage >> 1)-1) << 6);
  1374. }
  1375. dma_buffer->data.cfg1 = chip->CFG1;
  1376. } else {
  1377. dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ;
  1378. dma_buffer->data.cfg0 = ((MSM_NAND_CFG0_RAW &
  1379. ~(7U << 6)) | ((((cwperpage >> 1)-1) << 6)));
  1380. dma_buffer->data.cfg1 = MSM_NAND_CFG1_RAW |
  1381. (chip->CFG1 & CFG1_WIDE_FLASH);
  1382. }
  1383.  
  1384. if (!interleave_enable) {
  1385. if (start_sector == (cwperpage - 1)) {
  1386. dma_buffer->data.nandc10_addr0 =
  1387. (page << 16) | oob_col;
  1388. dma_buffer->data.nc10_flash_dev_cmd_vld = 0xD;
  1389. dma_buffer->data.nc10_flash_dev_cmd1 =
  1390. 0xF00F3000;
  1391. } else {
  1392. dma_buffer->data.nandc01_addr0 =
  1393. (page << 16) | oob_col;
  1394. dma_buffer->data.nandc10_addr0 = 0x108;
  1395. dma_buffer->data.nc10_flash_dev_cmd_vld = 0x1D;
  1396. dma_buffer->data.nc10_flash_dev_cmd1 =
  1397. 0xF00FE005;
  1398. }
  1399. } else {
  1400. dma_buffer->data.nandc01_addr0 =
  1401. dma_buffer->data.nandc10_addr0 =
  1402. (page << 16) | oob_col;
  1403. }
  1404. /* ADDR1 */
  1405. dma_buffer->data.nandc11_addr1 = (page >> 16) & 0xff;
  1406.  
  1407. dma_buffer->data.adm_mux_data_ack_req_nc01 = 0x00000A3C;
  1408. dma_buffer->data.adm_mux_cmd_ack_req_nc01 = 0x0000053C;
  1409. dma_buffer->data.adm_mux_data_ack_req_nc10 = 0x00000F28;
  1410. dma_buffer->data.adm_mux_cmd_ack_req_nc10 = 0x00000F14;
  1411. dma_buffer->data.adm_default_mux = 0x00000FC0;
  1412. dma_buffer->data.nc10_flash_dev_cmd_vld_default = 0x1D;
  1413. dma_buffer->data.nc10_flash_dev_cmd1_default = 0xF00F3000;
  1414.  
  1415. /* config ebi2 cfg reg for pingpong ( 0xA000_0004 ) */
  1416. dma_buffer->data.ebi2_cfg = 0x4010080;
  1417. dma_buffer->data.ebi2_cfg_default = 0x4010000;
  1418. dma_buffer->data.ebi2_chip_select_cfg0 = 0x00000805;
  1419. dma_buffer->data.default_ebi2_chip_select_cfg0 = 0x00000801;
  1420.  
  1421. /* flash0 + undoc bit */
  1422. dma_buffer->data.chipsel_cs0 = (1<<4) | 4;
  1423. dma_buffer->data.chipsel_cs1 = (1<<4) | 5;
  1424.  
  1425. /* GO bit for the EXEC register */
  1426. dma_buffer->data.exec = 1;
  1427.  
  1428. BUILD_BUG_ON(16 != ARRAY_SIZE(dma_buffer->data.result));
  1429.  
  1430. for (n = start_sector; n < cwperpage; n++) {
  1431. /* flash + buffer status return words */
  1432. dma_buffer->data.result[n].flash_status = 0xeeeeeeee;
  1433. dma_buffer->data.result[n].buffer_status = 0xeeeeeeee;
  1434.  
  1435. if (n == start_sector) {
  1436. if (!interleave_enable) {
  1437. /* config ebi2 cfg reg */
  1438. cmd->cmd = 0;
  1439. cmd->src = msm_virt_to_dma(chip,
  1440. &dma_buffer->data.ebi2_cfg);
  1441. cmd->dst = EBI2_CFG_REG;
  1442. cmd->len = 4;
  1443. cmd++;
  1444.  
  1445. cmd->cmd = 0;
  1446. cmd->src = msm_virt_to_dma(chip,
  1447. &dma_buffer->
  1448. data.nc10_flash_dev_cmd_vld);
  1449. cmd->dst = NC10(MSM_NAND_DEV_CMD_VLD);
  1450. cmd->len = 4;
  1451. cmd++;
  1452.  
  1453. cmd->cmd = 0;
  1454. cmd->src = msm_virt_to_dma(chip,
  1455. &dma_buffer->data.nc10_flash_dev_cmd1);
  1456. cmd->dst = NC10(MSM_NAND_DEV_CMD1);
  1457. cmd->len = 4;
  1458. cmd++;
  1459.  
  1460. /* NC01, NC10 --> ADDR1 */
  1461. cmd->cmd = 0;
  1462. cmd->src = msm_virt_to_dma(chip,
  1463. &dma_buffer->data.nandc11_addr1);
  1464. cmd->dst = NC11(MSM_NAND_ADDR1);
  1465. cmd->len = 8;
  1466. cmd++;
  1467.  
  1468. cmd->cmd = 0;
  1469. cmd->src = msm_virt_to_dma(chip,
  1470. &dma_buffer->data.cfg0);
  1471. cmd->dst = NC11(MSM_NAND_DEV0_CFG0);
  1472. cmd->len = 8;
  1473. cmd++;
  1474. } else {
  1475. /* enable CS0 & CS1 */
  1476. cmd->cmd = 0;
  1477. cmd->src = msm_virt_to_dma(chip,
  1478. &dma_buffer->
  1479. data.ebi2_chip_select_cfg0);
  1480. cmd->dst = EBI2_CHIP_SELECT_CFG0;
  1481. cmd->len = 4;
  1482. cmd++;
  1483.  
  1484. /* NC01, NC10 --> ADDR1 */
  1485. cmd->cmd = 0;
  1486. cmd->src = msm_virt_to_dma(chip,
  1487. &dma_buffer->data.nandc11_addr1);
  1488. cmd->dst = NC11(MSM_NAND_ADDR1);
  1489. cmd->len = 4;
  1490. cmd++;
  1491.  
  1492. /* Enable CS0 for NC01 */
  1493. cmd->cmd = 0;
  1494. cmd->src = msm_virt_to_dma(chip,
  1495. &dma_buffer->data.chipsel_cs0);
  1496. cmd->dst =
  1497. NC01(MSM_NAND_FLASH_CHIP_SELECT);
  1498. cmd->len = 4;
  1499. cmd++;
  1500.  
  1501. /* Enable CS1 for NC10 */
  1502. cmd->cmd = 0;
  1503. cmd->src = msm_virt_to_dma(chip,
  1504. &dma_buffer->data.chipsel_cs1);
  1505. cmd->dst =
  1506. NC10(MSM_NAND_FLASH_CHIP_SELECT);
  1507. cmd->len = 4;
  1508. cmd++;
  1509.  
  1510. /* config DEV0_CFG0 & CFG1 for CS0 */
  1511. cmd->cmd = 0;
  1512. cmd->src = msm_virt_to_dma(chip,
  1513. &dma_buffer->data.cfg0);
  1514. cmd->dst = NC01(MSM_NAND_DEV0_CFG0);
  1515. cmd->len = 8;
  1516. cmd++;
  1517.  
  1518. /* config DEV1_CFG0 & CFG1 for CS1 */
  1519. cmd->cmd = 0;
  1520. cmd->src = msm_virt_to_dma(chip,
  1521. &dma_buffer->data.cfg0);
  1522. cmd->dst = NC10(MSM_NAND_DEV1_CFG0);
  1523. cmd->len = 8;
  1524. cmd++;
  1525. }
  1526.  
  1527. dma_buffer->data.ecccfg = chip->ecc_buf_cfg;
  1528. cmd->cmd = 0;
  1529. cmd->src = msm_virt_to_dma(chip,
  1530. &dma_buffer->data.ecccfg);
  1531. cmd->dst = NC11(MSM_NAND_EBI2_ECC_BUF_CFG);
  1532. cmd->len = 4;
  1533. cmd++;
  1534.  
  1535. /* if 'only' the last code word */
  1536. if (n == cwperpage - 1) {
  1537. /* MASK CMD ACK/REQ --> NC01 (0x53C)*/
  1538. cmd->cmd = 0;
  1539. cmd->src = msm_virt_to_dma(chip,
  1540. &dma_buffer->
  1541. data.adm_mux_cmd_ack_req_nc01);
  1542. cmd->dst = EBI2_NAND_ADM_MUX;
  1543. cmd->len = 4;
  1544. cmd++;
  1545.  
  1546. /* CMD */
  1547. cmd->cmd = DST_CRCI_NAND_CMD;
  1548. cmd->src = msm_virt_to_dma(chip,
  1549. &dma_buffer->data.cmd);
  1550. cmd->dst = NC10(MSM_NAND_FLASH_CMD);
  1551. cmd->len = 4;
  1552. cmd++;
  1553.  
  1554. /* NC10 --> ADDR0 ( 0x0 ) */
  1555. cmd->cmd = 0;
  1556. cmd->src = msm_virt_to_dma(chip,
  1557. &dma_buffer->data.nandc10_addr0);
  1558. cmd->dst = NC10(MSM_NAND_ADDR0);
  1559. cmd->len = 4;
  1560. cmd++;
  1561.  
  1562. /* kick the execute reg for NC10 */
  1563. cmd->cmd = 0;
  1564. cmd->src = msm_virt_to_dma(chip,
  1565. &dma_buffer->data.exec);
  1566. cmd->dst = NC10(MSM_NAND_EXEC_CMD);
  1567. cmd->len = 4;
  1568. cmd++;
  1569.  
  1570. /* MASK DATA ACK/REQ --> NC01 (0xA3C)*/
  1571. cmd->cmd = 0;
  1572. cmd->src = msm_virt_to_dma(chip,
  1573. &dma_buffer->
  1574. data.adm_mux_data_ack_req_nc01);
  1575. cmd->dst = EBI2_NAND_ADM_MUX;
  1576. cmd->len = 4;
  1577. cmd++;
  1578.  
  1579. /* block on data ready from NC10, then
  1580. * read the status register
  1581. */
  1582. cmd->cmd = SRC_CRCI_NAND_DATA;
  1583. cmd->src = NC10(MSM_NAND_FLASH_STATUS);
  1584. cmd->dst = msm_virt_to_dma(chip,
  1585. &dma_buffer->data.result[n]);
  1586. /* MSM_NAND_FLASH_STATUS +
  1587. * MSM_NAND_BUFFER_STATUS
  1588. */
  1589. cmd->len = 8;
  1590. cmd++;
  1591. } else {
  1592. if (!interleave_enable) {
  1593. cmd->cmd = 0;
  1594. cmd->src =
  1595. msm_virt_to_dma(chip,
  1596. &dma_buffer->
  1597. data.nc10_flash_dev_cmd1);
  1598. cmd->dst =
  1599. NC10(MSM_NAND_DEV_CMD1);
  1600. cmd->len = 4;
  1601. cmd++;
  1602. }
  1603. /* NC01 --> ADDR0 */
  1604. cmd->cmd = 0;
  1605. cmd->src = msm_virt_to_dma(chip,
  1606. &dma_buffer->data.nandc01_addr0);
  1607. cmd->dst = NC01(MSM_NAND_ADDR0);
  1608. cmd->len = 4;
  1609. cmd++;
  1610.  
  1611. /* NC10 --> ADDR1 */
  1612. cmd->cmd = 0;
  1613. cmd->src = msm_virt_to_dma(chip,
  1614. &dma_buffer->data.nandc10_addr0);
  1615. cmd->dst = NC10(MSM_NAND_ADDR0);
  1616. cmd->len = 4;
  1617. cmd++;
  1618.  
  1619. /* MASK CMD ACK/REQ --> NC10 (0xF14)*/
  1620. cmd->cmd = 0;
  1621. cmd->src = msm_virt_to_dma(chip,
  1622. &dma_buffer->
  1623. data.adm_mux_cmd_ack_req_nc10);
  1624. cmd->dst = EBI2_NAND_ADM_MUX;
  1625. cmd->len = 4;
  1626. cmd++;
  1627.  
  1628. /* CMD */
  1629. cmd->cmd = DST_CRCI_NAND_CMD;
  1630. cmd->src = msm_virt_to_dma(chip,
  1631. &dma_buffer->data.cmd);
  1632. cmd->dst = NC01(MSM_NAND_FLASH_CMD);
  1633. cmd->len = 4;
  1634. cmd++;
  1635.  
  1636. /* kick the execute register for NC01*/
  1637. cmd->cmd = 0;
  1638. cmd->src = msm_virt_to_dma(chip,
  1639. &dma_buffer->data.exec);
  1640. cmd->dst = NC01(MSM_NAND_EXEC_CMD);
  1641. cmd->len = 4;
  1642. cmd++;
  1643. }
  1644. }
  1645.  
  1646. /* read data block
  1647. * (only valid if status says success)
  1648. */
  1649. if (ops->datbuf) {
  1650. if (ops->mode != MTD_OOB_RAW)
  1651. sectordatasize = (n < (cwperpage - 1))
  1652. ? 516 : (512 - ((cwperpage - 1) << 2));
  1653. else
  1654. sectordatasize = 528;
  1655.  
  1656. if (n % 2 == 0) {
  1657. /* MASK CMD ACK/REQ --> NC01 (0x53C)*/
  1658. cmd->cmd = 0;
  1659. cmd->src = msm_virt_to_dma(chip,
  1660. &dma_buffer->
  1661. data.adm_mux_cmd_ack_req_nc01);
  1662. cmd->dst = EBI2_NAND_ADM_MUX;
  1663. cmd->len = 4;
  1664. cmd++;
  1665.  
  1666. /* CMD */
  1667. cmd->cmd = DST_CRCI_NAND_CMD;
  1668. cmd->src = msm_virt_to_dma(chip,
  1669. &dma_buffer->data.cmd);
  1670. cmd->dst = NC10(MSM_NAND_FLASH_CMD);
  1671. cmd->len = 4;
  1672. cmd++;
  1673.  
  1674. /* kick the execute register for NC10 */
  1675. cmd->cmd = 0;
  1676. cmd->src = msm_virt_to_dma(chip,
  1677. &dma_buffer->data.exec);
  1678. cmd->dst = NC10(MSM_NAND_EXEC_CMD);
  1679. cmd->len = 4;
  1680. cmd++;
  1681.  
  1682. /* MASK DATA ACK/REQ --> NC10 (0xF28)*/
  1683. cmd->cmd = 0;
  1684. cmd->src = msm_virt_to_dma(chip,
  1685. &dma_buffer->
  1686. data.adm_mux_data_ack_req_nc10);
  1687. cmd->dst = EBI2_NAND_ADM_MUX;
  1688. cmd->len = 4;
  1689. cmd++;
  1690.  
  1691. /* block on data ready from NC01, then
  1692. * read the status register
  1693. */
  1694. cmd->cmd = SRC_CRCI_NAND_DATA;
  1695. cmd->src = NC01(MSM_NAND_FLASH_STATUS);
  1696. cmd->dst = msm_virt_to_dma(chip,
  1697. &dma_buffer->data.result[n]);
  1698. /* MSM_NAND_FLASH_STATUS +
  1699. * MSM_NAND_BUFFER_STATUS
  1700. */
  1701. cmd->len = 8;
  1702. cmd++;
  1703.  
  1704. cmd->cmd = 0;
  1705. cmd->src = NC01(MSM_NAND_FLASH_BUFFER);
  1706. cmd->dst = data_dma_addr_curr;
  1707. data_dma_addr_curr += sectordatasize;
  1708. cmd->len = sectordatasize;
  1709. cmd++;
  1710. } else {
  1711. if (n != cwperpage - 1) {
  1712. /* MASK CMD ACK/REQ -->
  1713. * NC10 (0xF14)
  1714. */
  1715. cmd->cmd = 0;
  1716. cmd->src =
  1717. msm_virt_to_dma(chip,
  1718. &dma_buffer->
  1719. data.adm_mux_cmd_ack_req_nc10);
  1720. cmd->dst = EBI2_NAND_ADM_MUX;
  1721. cmd->len = 4;
  1722. cmd++;
  1723.  
  1724. /* CMD */
  1725. cmd->cmd = DST_CRCI_NAND_CMD;
  1726. cmd->src = msm_virt_to_dma(chip,
  1727. &dma_buffer->data.cmd);
  1728. cmd->dst =
  1729. NC01(MSM_NAND_FLASH_CMD);
  1730. cmd->len = 4;
  1731. cmd++;
  1732.  
  1733. /* EXEC */
  1734. cmd->cmd = 0;
  1735. cmd->src = msm_virt_to_dma(chip,
  1736. &dma_buffer->data.exec);
  1737. cmd->dst =
  1738. NC01(MSM_NAND_EXEC_CMD);
  1739. cmd->len = 4;
  1740. cmd++;
  1741.  
  1742. /* MASK DATA ACK/REQ -->
  1743. * NC01 (0xA3C)
  1744. */
  1745. cmd->cmd = 0;
  1746. cmd->src = msm_virt_to_dma(chip,
  1747. &dma_buffer->
  1748. data.adm_mux_data_ack_req_nc01);
  1749. cmd->dst = EBI2_NAND_ADM_MUX;
  1750. cmd->len = 4;
  1751. cmd++;
  1752.  
  1753. /* block on data ready from NC10
  1754. * then read the status register
  1755. */
  1756. cmd->cmd = SRC_CRCI_NAND_DATA;
  1757. cmd->src =
  1758. NC10(MSM_NAND_FLASH_STATUS);
  1759. cmd->dst = msm_virt_to_dma(chip,
  1760. &dma_buffer->data.result[n]);
  1761. /* MSM_NAND_FLASH_STATUS +
  1762. * MSM_NAND_BUFFER_STATUS
  1763. */
  1764. cmd->len = 8;
  1765. cmd++;
  1766. } else {
  1767. /* MASK DATA ACK/REQ ->
  1768. * NC01 (0xA3C)
  1769. */
  1770. cmd->cmd = 0;
  1771. cmd->src = msm_virt_to_dma(chip,
  1772. &dma_buffer->
  1773. data.adm_mux_data_ack_req_nc01);
  1774. cmd->dst = EBI2_NAND_ADM_MUX;
  1775. cmd->len = 4;
  1776. cmd++;
  1777.  
  1778. /* block on data ready from NC10
  1779. * then read the status register
  1780. */
  1781. cmd->cmd = SRC_CRCI_NAND_DATA;
  1782. cmd->src =
  1783. NC10(MSM_NAND_FLASH_STATUS);
  1784. cmd->dst = msm_virt_to_dma(chip,
  1785. &dma_buffer->data.result[n]);
  1786. /* MSM_NAND_FLASH_STATUS +
  1787. * MSM_NAND_BUFFER_STATUS
  1788. */
  1789. cmd->len = 8;
  1790. cmd++;
  1791. }
  1792. cmd->cmd = 0;
  1793. cmd->src = NC10(MSM_NAND_FLASH_BUFFER);
  1794. cmd->dst = data_dma_addr_curr;
  1795. data_dma_addr_curr += sectordatasize;
  1796. cmd->len = sectordatasize;
  1797. cmd++;
  1798. }
  1799. }
  1800.  
  1801. if (ops->oobbuf && (n == (cwperpage - 1)
  1802. || ops->mode != MTD_OOB_AUTO)) {
  1803. cmd->cmd = 0;
  1804. if (n == (cwperpage - 1)) {
  1805. /* Use NC10 for reading the
  1806. * last codeword!!!
  1807. */
  1808. cmd->src = NC10(MSM_NAND_FLASH_BUFFER) +
  1809. (512 - ((cwperpage - 1) << 2));
  1810. sectoroobsize = (cwperpage << 2);
  1811. if (ops->mode != MTD_OOB_AUTO)
  1812. sectoroobsize += 10;
  1813. } else {
  1814. if (n % 2 == 0) {
  1815. cmd->src =
  1816. NC01(MSM_NAND_FLASH_BUFFER)
  1817. + 516;
  1818. sectoroobsize = 10;
  1819. } else {
  1820. cmd->src =
  1821. NC10(MSM_NAND_FLASH_BUFFER)
  1822. + 516;
  1823. sectoroobsize = 10;
  1824. }
  1825. }
  1826. cmd->dst = oob_dma_addr_curr;
  1827. if (sectoroobsize < oob_len)
  1828. cmd->len = sectoroobsize;
  1829. else
  1830. cmd->len = oob_len;
  1831. oob_dma_addr_curr += cmd->len;
  1832. oob_len -= cmd->len;
  1833. if (cmd->len > 0)
  1834. cmd++;
  1835. }
  1836. }
  1837. /* ADM --> Default mux state (0xFC0) */
  1838. cmd->cmd = 0;
  1839. cmd->src = msm_virt_to_dma(chip,
  1840. &dma_buffer->data.adm_default_mux);
  1841. cmd->dst = EBI2_NAND_ADM_MUX;
  1842. cmd->len = 4;
  1843. cmd++;
  1844.  
  1845. if (!interleave_enable) {
  1846. cmd->cmd = 0;
  1847. cmd->src = msm_virt_to_dma(chip,
  1848. &dma_buffer->data.nc10_flash_dev_cmd_vld_default);
  1849. cmd->dst = NC10(MSM_NAND_DEV_CMD_VLD);
  1850. cmd->len = 4;
  1851. cmd++;
  1852.  
  1853. cmd->cmd = 0;
  1854. cmd->src = msm_virt_to_dma(chip,
  1855. &dma_buffer->data.nc10_flash_dev_cmd1_default);
  1856. cmd->dst = NC10(MSM_NAND_DEV_CMD1);
  1857. cmd->len = 4;
  1858. cmd++;
  1859.  
  1860. cmd->cmd = 0;
  1861. cmd->src = msm_virt_to_dma(chip,
  1862. &dma_buffer->data.ebi2_cfg_default);
  1863. cmd->dst = EBI2_CFG_REG;
  1864. cmd->len = 4;
  1865. cmd++;
  1866. } else {
  1867. /* disable CS1 */
  1868. cmd->cmd = 0;
  1869. cmd->src = msm_virt_to_dma(chip,
  1870. &dma_buffer->data.default_ebi2_chip_select_cfg0);
  1871. cmd->dst = EBI2_CHIP_SELECT_CFG0;
  1872. cmd->len = 4;
  1873. cmd++;
  1874. }
  1875.  
  1876. BUILD_BUG_ON(16 * 6 + 20 != ARRAY_SIZE(dma_buffer->cmd));
  1877. BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
  1878. dma_buffer->cmd[0].cmd |= CMD_OCB;
  1879. cmd[-1].cmd |= CMD_OCU | CMD_LC;
  1880.  
  1881. dma_buffer->cmdptr =
  1882. (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3)
  1883. | CMD_PTR_LP;
  1884.  
  1885. dsb();
  1886. msm_dmov_exec_cmd(chip->dma_channel, crci_mask,
  1887. DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
  1888. &dma_buffer->cmdptr)));
  1889. dsb();
  1890.  
  1891. /* if any of the writes failed (0x10), or there
  1892. * was a protection violation (0x100), we lose
  1893. */
  1894. pageerr = rawerr = 0;
  1895. for (n = start_sector; n < cwperpage; n++) {
  1896. if (dma_buffer->data.result[n].flash_status & 0x110) {
  1897. rawerr = -EIO;
  1898. break;
  1899. }
  1900. }
  1901. if (rawerr) {
  1902. if (ops->datbuf && ops->mode != MTD_OOB_RAW) {
  1903. uint8_t *datbuf = ops->datbuf +
  1904. pages_read * mtd->writesize;
  1905.  
  1906. dma_sync_single_for_cpu(chip->dev,
  1907. data_dma_addr_curr-mtd->writesize,
  1908. mtd->writesize, DMA_BIDIRECTIONAL);
  1909.  
  1910. for (n = 0; n < mtd->writesize; n++) {
  1911. /* empty blocks read 0x54 at
  1912. * these offsets
  1913. */
  1914. if (n % 516 == 3 && datbuf[n] == 0x54)
  1915. datbuf[n] = 0xff;
  1916. if (datbuf[n] != 0xff) {
  1917. pageerr = rawerr;
  1918. break;
  1919. }
  1920. }
  1921.  
  1922. dma_sync_single_for_device(chip->dev,
  1923. data_dma_addr_curr-mtd->writesize,
  1924. mtd->writesize, DMA_BIDIRECTIONAL);
  1925.  
  1926. }
  1927. if (ops->oobbuf) {
  1928. for (n = 0; n < ops->ooblen; n++) {
  1929. if (ops->oobbuf[n] != 0xff) {
  1930. pageerr = rawerr;
  1931. break;
  1932. }
  1933. }
  1934. }
  1935. }
  1936. if (pageerr) {
  1937. for (n = start_sector; n < cwperpage; n++) {
  1938. if (dma_buffer->data.result[n].buffer_status
  1939. & 0x8) {
  1940. /* not thread safe */
  1941. mtd->ecc_stats.failed++;
  1942. pageerr = -EBADMSG;
  1943. break;
  1944. }
  1945. }
  1946. }
  1947. if (!rawerr) { /* check for corretable errors */
  1948. for (n = start_sector; n < cwperpage; n++) {
  1949. ecc_errors = dma_buffer->data.
  1950. result[n].buffer_status & 0x7;
  1951. if (ecc_errors) {
  1952. total_ecc_errors += ecc_errors;
  1953. /* not thread safe */
  1954. mtd->ecc_stats.corrected += ecc_errors;
  1955. if (ecc_errors > 1)
  1956. pageerr = -EUCLEAN;
  1957. }
  1958. }
  1959. }
  1960. if (pageerr && (pageerr != -EUCLEAN || err == 0))
  1961. err = pageerr;
  1962.  
  1963. #if VERBOSE
  1964. if (rawerr && !pageerr) {
  1965. pr_err("msm_nand_read_oob_dualnandc "
  1966. "%llx %x %x empty page\n",
  1967. (loff_t)page * mtd->writesize, ops->len,
  1968. ops->ooblen);
  1969. } else if (!interleave_enable) {
  1970. pr_info("status: %x %x %x %x %x %x %x %x %x \
  1971. %x %x %x %x %x %x %x \n",
  1972. dma_buffer->data.result[0].flash_status,
  1973. dma_buffer->data.result[0].buffer_status,
  1974. dma_buffer->data.result[1].flash_status,
  1975. dma_buffer->data.result[1].buffer_status,
  1976. dma_buffer->data.result[2].flash_status,
  1977. dma_buffer->data.result[2].buffer_status,
  1978. dma_buffer->data.result[3].flash_status,
  1979. dma_buffer->data.result[3].buffer_status,
  1980. dma_buffer->data.result[4].flash_status,
  1981. dma_buffer->data.result[4].buffer_status,
  1982. dma_buffer->data.result[5].flash_status,
  1983. dma_buffer->data.result[5].buffer_status,
  1984. dma_buffer->data.result[6].flash_status,
  1985. dma_buffer->data.result[6].buffer_status,
  1986. dma_buffer->data.result[7].flash_status,
  1987. dma_buffer->data.result[7].buffer_status);
  1988. } else {
  1989. pr_info("status: %x %x %x %x %x %x %x %x %x \
  1990. %x %x %x %x %x %x %x \
  1991. %x %x %x %x %x %x %x %x %x \
  1992. %x %x %x %x %x %x %x \n",
  1993. dma_buffer->data.result[0].flash_status,
  1994. dma_buffer->data.result[0].buffer_status,
  1995. dma_buffer->data.result[1].flash_status,
  1996. dma_buffer->data.result[1].buffer_status,
  1997. dma_buffer->data.result[2].flash_status,
  1998. dma_buffer->data.result[2].buffer_status,
  1999. dma_buffer->data.result[3].flash_status,
  2000. dma_buffer->data.result[3].buffer_status,
  2001. dma_buffer->data.result[4].flash_status,
  2002. dma_buffer->data.result[4].buffer_status,
  2003. dma_buffer->data.result[5].flash_status,
  2004. dma_buffer->data.result[5].buffer_status,
  2005. dma_buffer->data.result[6].flash_status,
  2006. dma_buffer->data.result[6].buffer_status,
  2007. dma_buffer->data.result[7].flash_status,
  2008. dma_buffer->data.result[7].buffer_status,
  2009. dma_buffer->data.result[8].flash_status,
  2010. dma_buffer->data.result[8].buffer_status,
  2011. dma_buffer->data.result[9].flash_status,
  2012. dma_buffer->data.result[9].buffer_status,
  2013. dma_buffer->data.result[10].flash_status,
  2014. dma_buffer->data.result[10].buffer_status,
  2015. dma_buffer->data.result[11].flash_status,
  2016. dma_buffer->data.result[11].buffer_status,
  2017. dma_buffer->data.result[12].flash_status,
  2018. dma_buffer->data.result[12].buffer_status,
  2019. dma_buffer->data.result[13].flash_status,
  2020. dma_buffer->data.result[13].buffer_status,
  2021. dma_buffer->data.result[14].flash_status,
  2022. dma_buffer->data.result[14].buffer_status,
  2023. dma_buffer->data.result[15].flash_status,
  2024. dma_buffer->data.result[15].buffer_status);
  2025.  
  2026. }
  2027. #endif
  2028. if (err && err != -EUCLEAN && err != -EBADMSG)
  2029. break;
  2030. pages_read++;
  2031. page++;
  2032. }
  2033.  
  2034. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  2035.  
  2036. if (ops->oobbuf) {
  2037. dma_unmap_page(chip->dev, oob_dma_addr,
  2038. ops->ooblen, DMA_FROM_DEVICE);
  2039. }
  2040. err_dma_map_oobbuf_failed:
  2041. if (ops->datbuf) {
  2042. dma_unmap_page(chip->dev, data_dma_addr,
  2043. ops->len, DMA_BIDIRECTIONAL);
  2044. }
  2045.  
  2046. if (ops->mode != MTD_OOB_RAW)
  2047. ops->retlen = mtd->writesize * pages_read;
  2048. else
  2049. ops->retlen = (mtd->writesize + mtd->oobsize) *
  2050. pages_read;
  2051. ops->oobretlen = ops->ooblen - oob_len;
  2052. if (err)
  2053. pr_err("msm_nand_read_oob_dualnandc "
  2054. "%llx %x %x failed %d, corrected %d\n",
  2055. from, ops->datbuf ? ops->len : 0, ops->ooblen, err,
  2056. total_ecc_errors);
  2057. return err;
  2058. }
  2059.  
  2060. static int
  2061. msm_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
  2062. size_t *retlen, u_char *buf)
  2063. {
  2064. int ret;
  2065. struct mtd_oob_ops ops;
  2066. uint8_t *org_buf = NULL;
  2067. size_t org_len = len;
  2068. loff_t org_from = from;
  2069.  
  2070. /* printk("msm_nand_read %llx %x\n", from, len); */
  2071.  
  2072. ops.mode = MTD_OOB_PLACE;
  2073. ops.len = len;
  2074. ops.retlen = 0;
  2075. ops.ooblen = 0;
  2076. ops.datbuf = buf;
  2077. ops.oobbuf = NULL;
  2078.  
  2079. /* support for non page alligned read */
  2080. if (ops.datbuf != NULL && (ops.len % mtd->writesize) != 0) {
  2081. ops.len += mtd->writesize;
  2082. ops.len ^= ops.len & (mtd->writesize - 1);
  2083. org_buf = ops.datbuf;
  2084. from ^= from & (mtd->writesize - 1);
  2085. ops.datbuf = kmalloc(ops.len, GFP_KERNEL);
  2086. if (!ops.datbuf){
  2087. ops.datbuf = org_buf;
  2088. org_buf = NULL;
  2089. ops.len = org_len;
  2090. pr_err("%s: allocation of temporary buffer has failed",
  2091. __func__);
  2092. }
  2093. }
  2094.  
  2095.  
  2096. if (!dual_nand_ctlr_present)
  2097. ret = msm_nand_read_oob(mtd, from, &ops);
  2098. else
  2099. ret = msm_nand_read_oob_dualnandc(mtd, from, &ops);
  2100.  
  2101. if (org_buf)
  2102. {
  2103. memcpy(org_buf, ops.datbuf + (org_from - from), org_len);
  2104. ops.len = org_len;
  2105. kfree(ops.datbuf);
  2106. ops.datbuf = org_buf;
  2107. ops.retlen = org_len;
  2108. }
  2109.  
  2110. *retlen = ops.retlen;
  2111. return ret;
  2112. }
  2113.  
  2114. static int
  2115. msm_nand_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
  2116. {
  2117. struct msm_nand_chip *chip = mtd->priv;
  2118. struct {
  2119. dmov_s cmd[8 * 7 + 2];
  2120. unsigned cmdptr;
  2121. struct {
  2122. uint32_t cmd;
  2123. uint32_t addr0;
  2124. uint32_t addr1;
  2125. uint32_t chipsel;
  2126. uint32_t cfg0;
  2127. uint32_t cfg1;
  2128. uint32_t exec;
  2129. uint32_t ecccfg;
  2130. uint32_t clrfstatus;
  2131. uint32_t clrrstatus;
  2132. uint32_t flash_status[8];
  2133. } data;
  2134. } *dma_buffer;
  2135. dmov_s *cmd;
  2136. unsigned n;
  2137. unsigned page = 0;
  2138. uint32_t oob_len;
  2139. uint32_t sectordatawritesize;
  2140. int err;
  2141. dma_addr_t data_dma_addr = 0;
  2142. dma_addr_t oob_dma_addr = 0;
  2143. dma_addr_t data_dma_addr_curr = 0;
  2144. dma_addr_t oob_dma_addr_curr = 0;
  2145. unsigned page_count;
  2146. unsigned pages_written = 0;
  2147. unsigned cwperpage;
  2148.  
  2149. if (mtd->writesize == 2048)
  2150. page = to >> 11;
  2151.  
  2152. if (mtd->writesize == 4096)
  2153. page = to >> 12;
  2154.  
  2155. oob_len = ops->ooblen;
  2156. cwperpage = (mtd->writesize >> 9);
  2157.  
  2158. if (to & (mtd->writesize - 1)) {
  2159. pr_err("%s: unsupported to, 0x%llx\n", __func__, to);
  2160. return -EINVAL;
  2161. }
  2162.  
  2163. if (ops->mode != MTD_OOB_RAW) {
  2164. if (ops->ooblen != 0 && ops->mode != MTD_OOB_AUTO) {
  2165. pr_err("%s: unsupported ops->mode,%d\n",
  2166. __func__, ops->mode);
  2167. return -EINVAL;
  2168. }
  2169. if ((ops->len % mtd->writesize) != 0) {
  2170. pr_err("%s: unsupported ops->len, %d\n",
  2171. __func__, ops->len);
  2172. return -EINVAL;
  2173. }
  2174. } else {
  2175. if ((ops->len % (mtd->writesize + mtd->oobsize)) != 0) {
  2176. pr_err("%s: unsupported ops->len, "
  2177. "%d for MTD_OOB_RAW mode\n",
  2178. __func__, ops->len);
  2179. return -EINVAL;
  2180. }
  2181. }
  2182.  
  2183. if (ops->datbuf == NULL) {
  2184. pr_err("%s: unsupported ops->datbuf == NULL\n", __func__);
  2185. return -EINVAL;
  2186. }
  2187. #if 0 /* yaffs writes more oob data than it needs */
  2188. if (ops->ooblen >= sectoroobsize * 4) {
  2189. pr_err("%s: unsupported ops->ooblen, %d\n",
  2190. __func__, ops->ooblen);
  2191. return -EINVAL;
  2192. }
  2193. #endif
  2194. if (ops->mode != MTD_OOB_RAW && ops->ooblen != 0 && ops->ooboffs != 0) {
  2195. pr_err("%s: unsupported ops->ooboffs, %d\n",
  2196. __func__, ops->ooboffs);
  2197. return -EINVAL;
  2198. }
  2199.  
  2200. if (ops->datbuf) {
  2201. data_dma_addr_curr = data_dma_addr =
  2202. msm_nand_dma_map(chip->dev, ops->datbuf,
  2203. ops->len, DMA_TO_DEVICE);
  2204. if (dma_mapping_error(chip->dev, data_dma_addr)) {
  2205. pr_err("msm_nand_write_oob: failed to get dma addr "
  2206. "for %p\n", ops->datbuf);
  2207. return -EIO;
  2208. }
  2209. }
  2210. if (ops->oobbuf) {
  2211. oob_dma_addr_curr = oob_dma_addr =
  2212. msm_nand_dma_map(chip->dev, ops->oobbuf,
  2213. ops->ooblen, DMA_TO_DEVICE);
  2214. if (dma_mapping_error(chip->dev, oob_dma_addr)) {
  2215. pr_err("msm_nand_write_oob: failed to get dma addr "
  2216. "for %p\n", ops->oobbuf);
  2217. err = -EIO;
  2218. goto err_dma_map_oobbuf_failed;
  2219. }
  2220. }
  2221. if (ops->mode != MTD_OOB_RAW)
  2222. page_count = ops->len / mtd->writesize;
  2223. else
  2224. page_count = ops->len / (mtd->writesize + mtd->oobsize);
  2225.  
  2226. wait_event(chip->wait_queue, (dma_buffer =
  2227. msm_nand_get_dma_buffer(chip, sizeof(*dma_buffer))));
  2228.  
  2229. while (page_count-- > 0) {
  2230. cmd = dma_buffer->cmd;
  2231.  
  2232. /* CMD / ADDR0 / ADDR1 / CHIPSEL program values */
  2233. if (ops->mode != MTD_OOB_RAW) {
  2234. dma_buffer->data.cfg0 = chip->CFG0;
  2235. dma_buffer->data.cfg1 = chip->CFG1;
  2236. } else {
  2237. dma_buffer->data.cfg0 = (MSM_NAND_CFG0_RAW &
  2238. ~(7U << 6)) | ((cwperpage-1) << 6);
  2239. dma_buffer->data.cfg1 = MSM_NAND_CFG1_RAW |
  2240. (chip->CFG1 & CFG1_WIDE_FLASH);
  2241. }
  2242.  
  2243. dma_buffer->data.cmd = MSM_NAND_CMD_PRG_PAGE;
  2244. dma_buffer->data.addr0 = page << 16;
  2245. dma_buffer->data.addr1 = (page >> 16) & 0xff;
  2246. dma_buffer->data.chipsel = 0 | 4; /* flash0 + undoc bit */
  2247.  
  2248.  
  2249. /* GO bit for the EXEC register */
  2250. dma_buffer->data.exec = 1;
  2251. dma_buffer->data.clrfstatus = 0x00000020;
  2252. dma_buffer->data.clrrstatus = 0x000000C0;
  2253.  
  2254. BUILD_BUG_ON(8 != ARRAY_SIZE(dma_buffer->data.flash_status));
  2255.  
  2256. for (n = 0; n < cwperpage ; n++) {
  2257. /* status return words */
  2258. dma_buffer->data.flash_status[n] = 0xeeeeeeee;
  2259. /* block on cmd ready, then
  2260. * write CMD / ADDR0 / ADDR1 / CHIPSEL regs in a burst
  2261. */
  2262. cmd->cmd = DST_CRCI_NAND_CMD;
  2263. cmd->src =
  2264. msm_virt_to_dma(chip, &dma_buffer->data.cmd);
  2265. cmd->dst = MSM_NAND_FLASH_CMD;
  2266. if (n == 0)
  2267. cmd->len = 16;
  2268. else
  2269. cmd->len = 4;
  2270. cmd++;
  2271.  
  2272. if (n == 0) {
  2273. cmd->cmd = 0;
  2274. cmd->src = msm_virt_to_dma(chip,
  2275. &dma_buffer->data.cfg0);
  2276. cmd->dst = MSM_NAND_DEV0_CFG0;
  2277. cmd->len = 8;
  2278. cmd++;
  2279.  
  2280. dma_buffer->data.ecccfg = chip->ecc_buf_cfg;
  2281. cmd->cmd = 0;
  2282. cmd->src = msm_virt_to_dma(chip,
  2283. &dma_buffer->data.ecccfg);
  2284. cmd->dst = MSM_NAND_EBI2_ECC_BUF_CFG;
  2285. cmd->len = 4;
  2286. cmd++;
  2287. }
  2288.  
  2289. /* write data block */
  2290. if (ops->mode != MTD_OOB_RAW)
  2291. sectordatawritesize = (n < (cwperpage - 1)) ?
  2292. 516 : (512 - ((cwperpage - 1) << 2));
  2293. else
  2294. sectordatawritesize = 528;
  2295.  
  2296. cmd->cmd = 0;
  2297. cmd->src = data_dma_addr_curr;
  2298. data_dma_addr_curr += sectordatawritesize;
  2299. cmd->dst = MSM_NAND_FLASH_BUFFER;
  2300. cmd->len = sectordatawritesize;
  2301. cmd++;
  2302.  
  2303. if (ops->oobbuf) {
  2304. if (n == (cwperpage - 1)) {
  2305. cmd->cmd = 0;
  2306. cmd->src = oob_dma_addr_curr;
  2307. cmd->dst = MSM_NAND_FLASH_BUFFER +
  2308. (512 - ((cwperpage - 1) << 2));
  2309. if ((cwperpage << 2) < oob_len)
  2310. cmd->len = (cwperpage << 2);
  2311. else
  2312. cmd->len = oob_len;
  2313. oob_dma_addr_curr += cmd->len;
  2314. oob_len -= cmd->len;
  2315. if (cmd->len > 0)
  2316. cmd++;
  2317. }
  2318. if (ops->mode != MTD_OOB_AUTO) {
  2319. /* skip ecc bytes in oobbuf */
  2320. if (oob_len < 10) {
  2321. oob_dma_addr_curr += 10;
  2322. oob_len -= 10;
  2323. } else {
  2324. oob_dma_addr_curr += oob_len;
  2325. oob_len = 0;
  2326. }
  2327. }
  2328. }
  2329.  
  2330. /* kick the execute register */
  2331. cmd->cmd = 0;
  2332. cmd->src =
  2333. msm_virt_to_dma(chip, &dma_buffer->data.exec);
  2334. cmd->dst = MSM_NAND_EXEC_CMD;
  2335. cmd->len = 4;
  2336. cmd++;
  2337.  
  2338. /* block on data ready, then
  2339. * read the status register
  2340. */
  2341. cmd->cmd = SRC_CRCI_NAND_DATA;
  2342. cmd->src = MSM_NAND_FLASH_STATUS;
  2343. cmd->dst = msm_virt_to_dma(chip,
  2344. &dma_buffer->data.flash_status[n]);
  2345. cmd->len = 4;
  2346. cmd++;
  2347.  
  2348. cmd->cmd = 0;
  2349. cmd->src = msm_virt_to_dma(chip,
  2350. &dma_buffer->data.clrfstatus);
  2351. cmd->dst = MSM_NAND_FLASH_STATUS;
  2352. cmd->len = 4;
  2353. cmd++;
  2354.  
  2355. cmd->cmd = 0;
  2356. cmd->src = msm_virt_to_dma(chip,
  2357. &dma_buffer->data.clrrstatus);
  2358. cmd->dst = MSM_NAND_READ_STATUS;
  2359. cmd->len = 4;
  2360. cmd++;
  2361.  
  2362. }
  2363.  
  2364. dma_buffer->cmd[0].cmd |= CMD_OCB;
  2365. cmd[-1].cmd |= CMD_OCU | CMD_LC;
  2366. BUILD_BUG_ON(8 * 7 + 2 != ARRAY_SIZE(dma_buffer->cmd));
  2367. BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
  2368. dma_buffer->cmdptr =
  2369. (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3) |
  2370. CMD_PTR_LP;
  2371.  
  2372. dsb();
  2373. msm_dmov_exec_cmd(chip->dma_channel, crci_mask,
  2374. DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(
  2375. msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
  2376. dsb();
  2377.  
  2378. /* if any of the writes failed (0x10), or there was a
  2379. * protection violation (0x100), or the program success
  2380. * bit (0x80) is unset, we lose
  2381. */
  2382. err = 0;
  2383. for (n = 0; n < cwperpage; n++) {
  2384. if (dma_buffer->data.flash_status[n] & 0x110) {
  2385. err = -EIO;
  2386. break;
  2387. }
  2388. if (!(dma_buffer->data.flash_status[n] & 0x80)) {
  2389. err = -EIO;
  2390. break;
  2391. }
  2392. }
  2393.  
  2394. #if VERBOSE
  2395. pr_info("write pg %d: status: %x %x %x %x %x %x %x %x\n", page,
  2396. dma_buffer->data.flash_status[0],
  2397. dma_buffer->data.flash_status[1],
  2398. dma_buffer->data.flash_status[2],
  2399. dma_buffer->data.flash_status[3],
  2400. dma_buffer->data.flash_status[4],
  2401. dma_buffer->data.flash_status[5],
  2402. dma_buffer->data.flash_status[6],
  2403. dma_buffer->data.flash_status[7]);
  2404. #endif
  2405. if (err)
  2406. break;
  2407. pages_written++;
  2408. page++;
  2409. }
  2410. if (ops->mode != MTD_OOB_RAW)
  2411. ops->retlen = mtd->writesize * pages_written;
  2412. else
  2413. ops->retlen = (mtd->writesize + mtd->oobsize) * pages_written;
  2414.  
  2415. ops->oobretlen = ops->ooblen - oob_len;
  2416.  
  2417. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  2418.  
  2419. if (ops->oobbuf)
  2420. dma_unmap_page(chip->dev, oob_dma_addr,
  2421. ops->ooblen, DMA_TO_DEVICE);
  2422. err_dma_map_oobbuf_failed:
  2423. if (ops->datbuf)
  2424. dma_unmap_page(chip->dev, data_dma_addr, ops->len,
  2425. DMA_TO_DEVICE);
  2426. if (err)
  2427. pr_err("msm_nand_write_oob %llx %x %x failed %d\n",
  2428. to, ops->len, ops->ooblen, err);
  2429. return err;
  2430. }
  2431.  
  2432. static int
  2433. msm_nand_write_oob_dualnandc(struct mtd_info *mtd, loff_t to,
  2434. struct mtd_oob_ops *ops)
  2435. {
  2436. struct msm_nand_chip *chip = mtd->priv;
  2437. struct {
  2438. dmov_s cmd[16 * 6 + 18];
  2439. unsigned cmdptr;
  2440. struct {
  2441. uint32_t cmd;
  2442. uint32_t nandc01_addr0;
  2443. uint32_t nandc10_addr0;
  2444. uint32_t nandc11_addr1;
  2445. uint32_t chipsel_cs0;
  2446. uint32_t chipsel_cs1;
  2447. uint32_t cfg0;
  2448. uint32_t cfg1;
  2449. uint32_t exec;
  2450. uint32_t ecccfg;
  2451. uint32_t ebi2_cfg;
  2452. uint32_t ebi2_chip_select_cfg0;
  2453. uint32_t adm_mux_data_ack_req_nc01;
  2454. uint32_t adm_mux_cmd_ack_req_nc01;
  2455. uint32_t adm_mux_data_ack_req_nc10;
  2456. uint32_t adm_mux_cmd_ack_req_nc10;
  2457. uint32_t adm_default_mux;
  2458. uint32_t default_ebi2_chip_select_cfg0;
  2459. uint32_t nc01_flash_dev_cmd_vld;
  2460. uint32_t nc10_flash_dev_cmd0;
  2461. uint32_t nc01_flash_dev_cmd_vld_default;
  2462. uint32_t nc10_flash_dev_cmd0_default;
  2463. uint32_t ebi2_cfg_default;
  2464. uint32_t flash_status[16];
  2465. uint32_t clrfstatus;
  2466. uint32_t clrrstatus;
  2467. } data;
  2468. } *dma_buffer;
  2469. dmov_s *cmd;
  2470. unsigned n;
  2471. unsigned page = 0;
  2472. uint32_t oob_len;
  2473. uint32_t sectordatawritesize;
  2474. int err;
  2475. dma_addr_t data_dma_addr = 0;
  2476. dma_addr_t oob_dma_addr = 0;
  2477. dma_addr_t data_dma_addr_curr = 0;
  2478. dma_addr_t oob_dma_addr_curr = 0;
  2479. unsigned page_count;
  2480. unsigned pages_written = 0;
  2481. unsigned cwperpage;
  2482.  
  2483. if (mtd->writesize == 2048)
  2484. page = to >> 11;
  2485.  
  2486. if (mtd->writesize == 4096)
  2487. page = to >> 12;
  2488.  
  2489. if (interleave_enable)
  2490. page = (to >> 1) >> 12;
  2491.  
  2492. oob_len = ops->ooblen;
  2493. cwperpage = (mtd->writesize >> 9);
  2494.  
  2495. if (to & (mtd->writesize - 1)) {
  2496. pr_err("%s: unsupported to, 0x%llx\n", __func__, to);
  2497. return -EINVAL;
  2498. }
  2499.  
  2500. if (ops->mode != MTD_OOB_RAW) {
  2501. if (ops->ooblen != 0 && ops->mode != MTD_OOB_AUTO) {
  2502. pr_err("%s: unsupported ops->mode,%d\n",
  2503. __func__, ops->mode);
  2504. return -EINVAL;
  2505. }
  2506. if ((ops->len % mtd->writesize) != 0) {
  2507. pr_err("%s: unsupported ops->len, %d\n",
  2508. __func__, ops->len);
  2509. return -EINVAL;
  2510. }
  2511. } else {
  2512. if ((ops->len % (mtd->writesize + mtd->oobsize)) != 0) {
  2513. pr_err("%s: unsupported ops->len, "
  2514. "%d for MTD_OOB_RAW mode\n",
  2515. __func__, ops->len);
  2516. return -EINVAL;
  2517. }
  2518. }
  2519.  
  2520. if (ops->datbuf == NULL) {
  2521. pr_err("%s: unsupported ops->datbuf == NULL\n", __func__);
  2522. return -EINVAL;
  2523. }
  2524.  
  2525. if (ops->mode != MTD_OOB_RAW && ops->ooblen != 0 && ops->ooboffs != 0) {
  2526. pr_err("%s: unsupported ops->ooboffs, %d\n",
  2527. __func__, ops->ooboffs);
  2528. return -EINVAL;
  2529. }
  2530.  
  2531. #if VERBOSE
  2532. pr_info("msm_nand_write_oob_dualnandc %llx %p %x %p %x\n",
  2533. to, ops->datbuf, ops->len, ops->oobbuf, ops->ooblen);
  2534. #endif
  2535.  
  2536. if (ops->datbuf) {
  2537. data_dma_addr_curr = data_dma_addr =
  2538. msm_nand_dma_map(chip->dev, ops->datbuf,
  2539. ops->len, DMA_TO_DEVICE);
  2540. if (dma_mapping_error(chip->dev, data_dma_addr)) {
  2541. pr_err("msm_nand_write_oob_dualnandc:"
  2542. "failed to get dma addr "
  2543. "for %p\n", ops->datbuf);
  2544. return -EIO;
  2545. }
  2546. }
  2547. if (ops->oobbuf) {
  2548. oob_dma_addr_curr = oob_dma_addr =
  2549. msm_nand_dma_map(chip->dev, ops->oobbuf,
  2550. ops->ooblen, DMA_TO_DEVICE);
  2551. if (dma_mapping_error(chip->dev, oob_dma_addr)) {
  2552. pr_err("msm_nand_write_oob_dualnandc:"
  2553. "failed to get dma addr "
  2554. "for %p\n", ops->oobbuf);
  2555. err = -EIO;
  2556. goto err_dma_map_oobbuf_failed;
  2557. }
  2558. }
  2559. if (ops->mode != MTD_OOB_RAW)
  2560. page_count = ops->len / mtd->writesize;
  2561. else
  2562. page_count = ops->len / (mtd->writesize + mtd->oobsize);
  2563.  
  2564. wait_event(chip->wait_queue, (dma_buffer =
  2565. msm_nand_get_dma_buffer(chip, sizeof(*dma_buffer))));
  2566.  
  2567. dma_buffer->data.ebi2_chip_select_cfg0 = 0x00000805;
  2568. dma_buffer->data.adm_mux_data_ack_req_nc01 = 0x00000A3C;
  2569. dma_buffer->data.adm_mux_cmd_ack_req_nc01 = 0x0000053C;
  2570. dma_buffer->data.adm_mux_data_ack_req_nc10 = 0x00000F28;
  2571. dma_buffer->data.adm_mux_cmd_ack_req_nc10 = 0x00000F14;
  2572. dma_buffer->data.adm_default_mux = 0x00000FC0;
  2573. dma_buffer->data.default_ebi2_chip_select_cfg0 = 0x00000801;
  2574. dma_buffer->data.nc01_flash_dev_cmd_vld = 0x9;
  2575. dma_buffer->data.nc10_flash_dev_cmd0 = 0x1085D060;
  2576. dma_buffer->data.nc01_flash_dev_cmd_vld_default = 0x1D;
  2577. dma_buffer->data.nc10_flash_dev_cmd0_default = 0x1080D060;
  2578. dma_buffer->data.clrfstatus = 0x00000020;
  2579. dma_buffer->data.clrrstatus = 0x000000C0;
  2580.  
  2581. while (page_count-- > 0) {
  2582. cmd = dma_buffer->cmd;
  2583.  
  2584. if (ops->mode != MTD_OOB_RAW) {
  2585. dma_buffer->data.cfg0 = ((chip->CFG0 & ~(7U << 6))
  2586. | (1 << 4)) | ((((cwperpage >> 1)-1)) << 6);
  2587. dma_buffer->data.cfg1 = chip->CFG1;
  2588. } else {
  2589. dma_buffer->data.cfg0 = ((MSM_NAND_CFG0_RAW &
  2590. ~(7U << 6)) | (1<<4)) | (((cwperpage >> 1)-1) << 6);
  2591. dma_buffer->data.cfg1 = MSM_NAND_CFG1_RAW |
  2592. (chip->CFG1 & CFG1_WIDE_FLASH);
  2593. }
  2594.  
  2595. dma_buffer->data.cmd = MSM_NAND_CMD_PRG_PAGE;
  2596. dma_buffer->data.chipsel_cs0 = (1<<4) | 4;
  2597. dma_buffer->data.chipsel_cs1 = (1<<4) | 5;
  2598.  
  2599. /* GO bit for the EXEC register */
  2600. dma_buffer->data.exec = 1;
  2601.  
  2602. /* config ebi2 cfg reg ( 0xA000_0004 ) */
  2603. dma_buffer->data.ebi2_cfg = 0x4010080;
  2604. dma_buffer->data.ebi2_cfg_default = 0x4010000;
  2605.  
  2606. if (!interleave_enable) {
  2607. dma_buffer->data.nandc01_addr0 = (page << 16) | 0x0;
  2608. dma_buffer->data.nandc10_addr0 = (page << 16) | 0x108;
  2609. } else {
  2610. dma_buffer->data.nandc01_addr0 =
  2611. dma_buffer->data.nandc10_addr0 = (page << 16) | 0x0;
  2612. }
  2613. /* ADDR1 */
  2614. dma_buffer->data.nandc11_addr1 = (page >> 16) & 0xff;
  2615.  
  2616. BUILD_BUG_ON(16 != ARRAY_SIZE(dma_buffer->data.flash_status));
  2617.  
  2618. for (n = 0; n < cwperpage; n++) {
  2619. /* status return words */
  2620. dma_buffer->data.flash_status[n] = 0xeeeeeeee;
  2621.  
  2622. if (n == 0) {
  2623. if (!interleave_enable) {
  2624. /* config ebi2 cfg reg */
  2625. cmd->cmd = 0;
  2626. cmd->src = msm_virt_to_dma(chip,
  2627. &dma_buffer->data.ebi2_cfg);
  2628. cmd->dst = EBI2_CFG_REG;
  2629. cmd->len = 4;
  2630. cmd++;
  2631.  
  2632. cmd->cmd = 0;
  2633. cmd->src = msm_virt_to_dma(chip,
  2634. &dma_buffer->
  2635. data.nc01_flash_dev_cmd_vld);
  2636. cmd->dst = NC01(MSM_NAND_DEV_CMD_VLD);
  2637. cmd->len = 4;
  2638. cmd++;
  2639.  
  2640. cmd->cmd = 0;
  2641. cmd->src = msm_virt_to_dma(chip,
  2642. &dma_buffer->data.nc10_flash_dev_cmd0);
  2643. cmd->dst = NC10(MSM_NAND_DEV_CMD0);
  2644. cmd->len = 4;
  2645. cmd++;
  2646.  
  2647. /* common settings for both NC01 & NC10
  2648. * NC01, NC10 --> ADDR1 / CHIPSEL
  2649. */
  2650. cmd->cmd = 0;
  2651. cmd->src = msm_virt_to_dma(chip,
  2652. &dma_buffer->data.nandc11_addr1);
  2653. cmd->dst = NC11(MSM_NAND_ADDR1);
  2654. cmd->len = 8;
  2655. cmd++;
  2656.  
  2657. cmd->cmd = 0;
  2658. cmd->src = msm_virt_to_dma(chip,
  2659. &dma_buffer->data.cfg0);
  2660. cmd->dst = NC11(MSM_NAND_DEV0_CFG0);
  2661. cmd->len = 8;
  2662. cmd++;
  2663. } else {
  2664. /* enable CS1 */
  2665. cmd->cmd = 0;
  2666. cmd->src = msm_virt_to_dma(chip,
  2667. &dma_buffer->
  2668. data.ebi2_chip_select_cfg0);
  2669. cmd->dst = EBI2_CHIP_SELECT_CFG0;
  2670. cmd->len = 4;
  2671. cmd++;
  2672.  
  2673. /* NC11 --> ADDR1 */
  2674. cmd->cmd = 0;
  2675. cmd->src = msm_virt_to_dma(chip,
  2676. &dma_buffer->data.nandc11_addr1);
  2677. cmd->dst = NC11(MSM_NAND_ADDR1);
  2678. cmd->len = 4;
  2679. cmd++;
  2680.  
  2681. /* Enable CS0 for NC01 */
  2682. cmd->cmd = 0;
  2683. cmd->src = msm_virt_to_dma(chip,
  2684. &dma_buffer->data.chipsel_cs0);
  2685. cmd->dst =
  2686. NC01(MSM_NAND_FLASH_CHIP_SELECT);
  2687. cmd->len = 4;
  2688. cmd++;
  2689.  
  2690. /* Enable CS1 for NC10 */
  2691. cmd->cmd = 0;
  2692. cmd->src = msm_virt_to_dma(chip,
  2693. &dma_buffer->data.chipsel_cs1);
  2694. cmd->dst =
  2695. NC10(MSM_NAND_FLASH_CHIP_SELECT);
  2696. cmd->len = 4;
  2697. cmd++;
  2698.  
  2699. /* config DEV0_CFG0 & CFG1 for CS0 */
  2700. cmd->cmd = 0;
  2701. cmd->src = msm_virt_to_dma(chip,
  2702. &dma_buffer->data.cfg0);
  2703. cmd->dst = NC01(MSM_NAND_DEV0_CFG0);
  2704. cmd->len = 8;
  2705. cmd++;
  2706.  
  2707. /* config DEV1_CFG0 & CFG1 for CS1 */
  2708. cmd->cmd = 0;
  2709. cmd->src = msm_virt_to_dma(chip,
  2710. &dma_buffer->data.cfg0);
  2711. cmd->dst = NC10(MSM_NAND_DEV1_CFG0);
  2712. cmd->len = 8;
  2713. cmd++;
  2714. }
  2715.  
  2716. dma_buffer->data.ecccfg = chip->ecc_buf_cfg;
  2717. cmd->cmd = 0;
  2718. cmd->src = msm_virt_to_dma(chip,
  2719. &dma_buffer->data.ecccfg);
  2720. cmd->dst = NC11(MSM_NAND_EBI2_ECC_BUF_CFG);
  2721. cmd->len = 4;
  2722. cmd++;
  2723.  
  2724. /* NC01 --> ADDR0 */
  2725. cmd->cmd = 0;
  2726. cmd->src = msm_virt_to_dma(chip,
  2727. &dma_buffer->data.nandc01_addr0);
  2728. cmd->dst = NC01(MSM_NAND_ADDR0);
  2729. cmd->len = 4;
  2730. cmd++;
  2731.  
  2732. /* NC10 --> ADDR0 */
  2733. cmd->cmd = 0;
  2734. cmd->src = msm_virt_to_dma(chip,
  2735. &dma_buffer->data.nandc10_addr0);
  2736. cmd->dst = NC10(MSM_NAND_ADDR0);
  2737. cmd->len = 4;
  2738. cmd++;
  2739. }
  2740.  
  2741. if (n % 2 == 0) {
  2742. /* MASK CMD ACK/REQ --> NC10 (0xF14)*/
  2743. cmd->cmd = 0;
  2744. cmd->src = msm_virt_to_dma(chip,
  2745. &dma_buffer->data.adm_mux_cmd_ack_req_nc10);
  2746. cmd->dst = EBI2_NAND_ADM_MUX;
  2747. cmd->len = 4;
  2748. cmd++;
  2749.  
  2750. /* CMD */
  2751. cmd->cmd = DST_CRCI_NAND_CMD;
  2752. cmd->src = msm_virt_to_dma(chip,
  2753. &dma_buffer->data.cmd);
  2754. cmd->dst = NC01(MSM_NAND_FLASH_CMD);
  2755. cmd->len = 4;
  2756. cmd++;
  2757. } else {
  2758. /* MASK CMD ACK/REQ --> NC01 (0x53C)*/
  2759. cmd->cmd = 0;
  2760. cmd->src = msm_virt_to_dma(chip,
  2761. &dma_buffer->data.adm_mux_cmd_ack_req_nc01);
  2762. cmd->dst = EBI2_NAND_ADM_MUX;
  2763. cmd->len = 4;
  2764. cmd++;
  2765.  
  2766. /* CMD */
  2767. cmd->cmd = DST_CRCI_NAND_CMD;
  2768. cmd->src = msm_virt_to_dma(chip,
  2769. &dma_buffer->data.cmd);
  2770. cmd->dst = NC10(MSM_NAND_FLASH_CMD);
  2771. cmd->len = 4;
  2772. cmd++;
  2773. }
  2774.  
  2775. if (ops->mode != MTD_OOB_RAW)
  2776. sectordatawritesize = (n < (cwperpage - 1)) ?
  2777. 516 : (512 - ((cwperpage - 1) << 2));
  2778. else
  2779. sectordatawritesize = 528;
  2780.  
  2781. cmd->cmd = 0;
  2782. cmd->src = data_dma_addr_curr;
  2783. data_dma_addr_curr += sectordatawritesize;
  2784.  
  2785. if (n % 2 == 0)
  2786. cmd->dst = NC01(MSM_NAND_FLASH_BUFFER);
  2787. else
  2788. cmd->dst = NC10(MSM_NAND_FLASH_BUFFER);
  2789. cmd->len = sectordatawritesize;
  2790. cmd++;
  2791.  
  2792. if (ops->oobbuf) {
  2793. if (n == (cwperpage - 1)) {
  2794. cmd->cmd = 0;
  2795. cmd->src = oob_dma_addr_curr;
  2796. cmd->dst = NC10(MSM_NAND_FLASH_BUFFER) +
  2797. (512 - ((cwperpage - 1) << 2));
  2798. if ((cwperpage << 2) < oob_len)
  2799. cmd->len = (cwperpage << 2);
  2800. else
  2801. cmd->len = oob_len;
  2802. oob_dma_addr_curr += cmd->len;
  2803. oob_len -= cmd->len;
  2804. if (cmd->len > 0)
  2805. cmd++;
  2806. }
  2807. if (ops->mode != MTD_OOB_AUTO) {
  2808. /* skip ecc bytes in oobbuf */
  2809. if (oob_len < 10) {
  2810. oob_dma_addr_curr += 10;
  2811. oob_len -= 10;
  2812. } else {
  2813. oob_dma_addr_curr += oob_len;
  2814. oob_len = 0;
  2815. }
  2816. }
  2817. }
  2818.  
  2819. if (n % 2 == 0) {
  2820. /* kick the NC01 execute register */
  2821. cmd->cmd = 0;
  2822. cmd->src = msm_virt_to_dma(chip,
  2823. &dma_buffer->data.exec);
  2824. cmd->dst = NC01(MSM_NAND_EXEC_CMD);
  2825. cmd->len = 4;
  2826. cmd++;
  2827. if (n != 0) {
  2828. /* MASK DATA ACK/REQ --> NC01 (0xA3C)*/
  2829. cmd->cmd = 0;
  2830. cmd->src = msm_virt_to_dma(chip,
  2831. &dma_buffer->
  2832. data.adm_mux_data_ack_req_nc01);
  2833. cmd->dst = EBI2_NAND_ADM_MUX;
  2834. cmd->len = 4;
  2835. cmd++;
  2836.  
  2837. /* block on data ready from NC10, then
  2838. * read the status register
  2839. */
  2840. cmd->cmd = SRC_CRCI_NAND_DATA;
  2841. cmd->src = NC10(MSM_NAND_FLASH_STATUS);
  2842. cmd->dst = msm_virt_to_dma(chip,
  2843. &dma_buffer->data.flash_status[n-1]);
  2844. cmd->len = 4;
  2845. cmd++;
  2846. }
  2847. } else {
  2848. /* kick the execute register */
  2849. cmd->cmd = 0;
  2850. cmd->src =
  2851. msm_virt_to_dma(chip, &dma_buffer->data.exec);
  2852. cmd->dst = NC10(MSM_NAND_EXEC_CMD);
  2853. cmd->len = 4;
  2854. cmd++;
  2855.  
  2856. /* MASK DATA ACK/REQ --> NC10 (0xF28)*/
  2857. cmd->cmd = 0;
  2858. cmd->src = msm_virt_to_dma(chip,
  2859. &dma_buffer->data.adm_mux_data_ack_req_nc10);
  2860. cmd->dst = EBI2_NAND_ADM_MUX;
  2861. cmd->len = 4;
  2862. cmd++;
  2863.  
  2864. /* block on data ready from NC01, then
  2865. * read the status register
  2866. */
  2867. cmd->cmd = SRC_CRCI_NAND_DATA;
  2868. cmd->src = NC01(MSM_NAND_FLASH_STATUS);
  2869. cmd->dst = msm_virt_to_dma(chip,
  2870. &dma_buffer->data.flash_status[n-1]);
  2871. cmd->len = 4;
  2872. cmd++;
  2873. }
  2874. }
  2875.  
  2876. /* MASK DATA ACK/REQ --> NC01 (0xA3C)*/
  2877. cmd->cmd = 0;
  2878. cmd->src = msm_virt_to_dma(chip,
  2879. &dma_buffer->data.adm_mux_data_ack_req_nc01);
  2880. cmd->dst = EBI2_NAND_ADM_MUX;
  2881. cmd->len = 4;
  2882. cmd++;
  2883.  
  2884. /* we should process outstanding request */
  2885. /* block on data ready, then
  2886. * read the status register
  2887. */
  2888. cmd->cmd = SRC_CRCI_NAND_DATA;
  2889. cmd->src = NC10(MSM_NAND_FLASH_STATUS);
  2890. cmd->dst = msm_virt_to_dma(chip,
  2891. &dma_buffer->data.flash_status[n-1]);
  2892. cmd->len = 4;
  2893. cmd++;
  2894.  
  2895. cmd->cmd = 0;
  2896. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.clrfstatus);
  2897. cmd->dst = NC11(MSM_NAND_FLASH_STATUS);
  2898. cmd->len = 4;
  2899. cmd++;
  2900.  
  2901. cmd->cmd = 0;
  2902. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.clrrstatus);
  2903. cmd->dst = NC11(MSM_NAND_READ_STATUS);
  2904. cmd->len = 4;
  2905. cmd++;
  2906.  
  2907. /* MASK DATA ACK/REQ --> NC01 (0xFC0)*/
  2908. cmd->cmd = 0;
  2909. cmd->src = msm_virt_to_dma(chip,
  2910. &dma_buffer->data.adm_default_mux);
  2911. cmd->dst = EBI2_NAND_ADM_MUX;
  2912. cmd->len = 4;
  2913. cmd++;
  2914.  
  2915. if (!interleave_enable) {
  2916. /* setting to defalut values back */
  2917. cmd->cmd = 0;
  2918. cmd->src = msm_virt_to_dma(chip,
  2919. &dma_buffer->data.nc01_flash_dev_cmd_vld_default);
  2920. cmd->dst = NC01(MSM_NAND_DEV_CMD_VLD);
  2921. cmd->len = 4;
  2922. cmd++;
  2923.  
  2924. cmd->cmd = 0;
  2925. cmd->src = msm_virt_to_dma(chip,
  2926. &dma_buffer->data.nc10_flash_dev_cmd0_default);
  2927. cmd->dst = NC10(MSM_NAND_DEV_CMD0);
  2928. cmd->len = 4;
  2929. cmd++;
  2930.  
  2931. cmd->cmd = 0;
  2932. cmd->src = msm_virt_to_dma(chip,
  2933. &dma_buffer->data.ebi2_cfg_default);
  2934. cmd->dst = EBI2_CFG_REG;
  2935. cmd->len = 4;
  2936. cmd++;
  2937. } else {
  2938. /* disable CS1 */
  2939. cmd->cmd = 0;
  2940. cmd->src = msm_virt_to_dma(chip,
  2941. &dma_buffer->data.default_ebi2_chip_select_cfg0);
  2942. cmd->dst = EBI2_CHIP_SELECT_CFG0;
  2943. cmd->len = 4;
  2944. cmd++;
  2945. }
  2946.  
  2947. dma_buffer->cmd[0].cmd |= CMD_OCB;
  2948. cmd[-1].cmd |= CMD_OCU | CMD_LC;
  2949. BUILD_BUG_ON(16 * 6 + 18 != ARRAY_SIZE(dma_buffer->cmd));
  2950. BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
  2951. dma_buffer->cmdptr =
  2952. ((msm_virt_to_dma(chip, dma_buffer->cmd) >> 3) | CMD_PTR_LP);
  2953.  
  2954. dsb();
  2955. msm_dmov_exec_cmd(chip->dma_channel, crci_mask,
  2956. DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(
  2957. msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
  2958. dsb();
  2959.  
  2960. /* if any of the writes failed (0x10), or there was a
  2961. * protection violation (0x100), or the program success
  2962. * bit (0x80) is unset, we lose
  2963. */
  2964. err = 0;
  2965. for (n = 0; n < cwperpage; n++) {
  2966. if (dma_buffer->data.flash_status[n] & 0x110) {
  2967. err = -EIO;
  2968. break;
  2969. }
  2970. if (!(dma_buffer->data.flash_status[n] & 0x80)) {
  2971. err = -EIO;
  2972. break;
  2973. }
  2974. }
  2975. #if VERBOSE
  2976. if (!interleave_enable) {
  2977. pr_info("write pg %d: status: %x %x %x %x %x %x %x %x\n", page,
  2978. dma_buffer->data.flash_status[0],
  2979. dma_buffer->data.flash_status[1],
  2980. dma_buffer->data.flash_status[2],
  2981. dma_buffer->data.flash_status[3],
  2982. dma_buffer->data.flash_status[4],
  2983. dma_buffer->data.flash_status[5],
  2984. dma_buffer->data.flash_status[6],
  2985. dma_buffer->data.flash_status[7]);
  2986. } else {
  2987. pr_info("write pg %d: status: %x %x %x %x %x %x %x %x \
  2988. %x %x %x %x %x %x %x %x \n", page,
  2989. dma_buffer->data.flash_status[0],
  2990. dma_buffer->data.flash_status[1],
  2991. dma_buffer->data.flash_status[2],
  2992. dma_buffer->data.flash_status[3],
  2993. dma_buffer->data.flash_status[4],
  2994. dma_buffer->data.flash_status[5],
  2995. dma_buffer->data.flash_status[6],
  2996. dma_buffer->data.flash_status[7],
  2997. dma_buffer->data.flash_status[8],
  2998. dma_buffer->data.flash_status[9],
  2999. dma_buffer->data.flash_status[10],
  3000. dma_buffer->data.flash_status[11],
  3001. dma_buffer->data.flash_status[12],
  3002. dma_buffer->data.flash_status[13],
  3003. dma_buffer->data.flash_status[14],
  3004. dma_buffer->data.flash_status[15]);
  3005. }
  3006. #endif
  3007. if (err)
  3008. break;
  3009. pages_written++;
  3010. page++;
  3011. }
  3012. if (ops->mode != MTD_OOB_RAW)
  3013. ops->retlen = mtd->writesize * pages_written;
  3014. else
  3015. ops->retlen = (mtd->writesize + mtd->oobsize) * pages_written;
  3016.  
  3017. ops->oobretlen = ops->ooblen - oob_len;
  3018.  
  3019. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  3020.  
  3021. if (ops->oobbuf)
  3022. dma_unmap_page(chip->dev, oob_dma_addr,
  3023. ops->ooblen, DMA_TO_DEVICE);
  3024. err_dma_map_oobbuf_failed:
  3025. if (ops->datbuf)
  3026. dma_unmap_page(chip->dev, data_dma_addr, ops->len,
  3027. DMA_TO_DEVICE);
  3028. if (err)
  3029. pr_err("msm_nand_write_oob_dualnandc %llx %x %x failed %d\n",
  3030. to, ops->len, ops->ooblen, err);
  3031. return err;
  3032. }
  3033.  
  3034. static int msm_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
  3035. size_t *retlen, const u_char *buf)
  3036. {
  3037. int ret;
  3038. struct mtd_oob_ops ops;
  3039.  
  3040. ops.mode = MTD_OOB_PLACE;
  3041. ops.len = len;
  3042. ops.retlen = 0;
  3043. ops.ooblen = 0;
  3044. ops.datbuf = (uint8_t *)buf;
  3045. ops.oobbuf = NULL;
  3046. if (!dual_nand_ctlr_present)
  3047. ret = msm_nand_write_oob(mtd, to, &ops);
  3048. else
  3049. ret = msm_nand_write_oob_dualnandc(mtd, to, &ops);
  3050. *retlen = ops.retlen;
  3051. return ret;
  3052. }
  3053.  
  3054. static int
  3055. msm_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
  3056. {
  3057. int err;
  3058. struct msm_nand_chip *chip = mtd->priv;
  3059. struct {
  3060. dmov_s cmd[6];
  3061. unsigned cmdptr;
  3062. unsigned data[10];
  3063. } *dma_buffer;
  3064. unsigned page = 0;
  3065.  
  3066. if (mtd->writesize == 2048)
  3067. page = instr->addr >> 11;
  3068.  
  3069. if (mtd->writesize == 4096)
  3070. page = instr->addr >> 12;
  3071.  
  3072. if (instr->addr & (mtd->erasesize - 1)) {
  3073. pr_err("%s: unsupported erase address, 0x%llx\n",
  3074. __func__, instr->addr);
  3075. return -EINVAL;
  3076. }
  3077. if (instr->len != mtd->erasesize) {
  3078. pr_err("%s: unsupported erase len, %lld\n",
  3079. __func__, instr->len);
  3080. return -EINVAL;
  3081. }
  3082.  
  3083. wait_event(chip->wait_queue,
  3084. (dma_buffer = msm_nand_get_dma_buffer(
  3085. chip, sizeof(*dma_buffer))));
  3086.  
  3087. dma_buffer->data[0] = MSM_NAND_CMD_BLOCK_ERASE;
  3088. dma_buffer->data[1] = page;
  3089. dma_buffer->data[2] = 0;
  3090. dma_buffer->data[3] = 0 | 4;
  3091. dma_buffer->data[4] = 1;
  3092. dma_buffer->data[5] = 0xeeeeeeee;
  3093. dma_buffer->data[6] = chip->CFG0 & (~(7 << 6)); /* CW_PER_PAGE = 0 */
  3094. dma_buffer->data[7] = chip->CFG1;
  3095. dma_buffer->data[8] = 0x00000020;
  3096. dma_buffer->data[9] = 0x000000C0;
  3097. BUILD_BUG_ON(9 != ARRAY_SIZE(dma_buffer->data) - 1);
  3098.  
  3099. dma_buffer->cmd[0].cmd = DST_CRCI_NAND_CMD | CMD_OCB;
  3100. dma_buffer->cmd[0].src = msm_virt_to_dma(chip, &dma_buffer->data[0]);
  3101. dma_buffer->cmd[0].dst = MSM_NAND_FLASH_CMD;
  3102. dma_buffer->cmd[0].len = 16;
  3103.  
  3104. dma_buffer->cmd[1].cmd = 0;
  3105. dma_buffer->cmd[1].src = msm_virt_to_dma(chip, &dma_buffer->data[6]);
  3106. dma_buffer->cmd[1].dst = MSM_NAND_DEV0_CFG0;
  3107. dma_buffer->cmd[1].len = 8;
  3108.  
  3109. dma_buffer->cmd[2].cmd = 0;
  3110. dma_buffer->cmd[2].src = msm_virt_to_dma(chip, &dma_buffer->data[4]);
  3111. dma_buffer->cmd[2].dst = MSM_NAND_EXEC_CMD;
  3112. dma_buffer->cmd[2].len = 4;
  3113.  
  3114. dma_buffer->cmd[3].cmd = SRC_CRCI_NAND_DATA;
  3115. dma_buffer->cmd[3].src = MSM_NAND_FLASH_STATUS;
  3116. dma_buffer->cmd[3].dst = msm_virt_to_dma(chip, &dma_buffer->data[5]);
  3117. dma_buffer->cmd[3].len = 4;
  3118.  
  3119. dma_buffer->cmd[4].cmd = 0;
  3120. dma_buffer->cmd[4].src = msm_virt_to_dma(chip, &dma_buffer->data[8]);
  3121. dma_buffer->cmd[4].dst = MSM_NAND_FLASH_STATUS;
  3122. dma_buffer->cmd[4].len = 4;
  3123.  
  3124. dma_buffer->cmd[5].cmd = CMD_OCU | CMD_LC;
  3125. dma_buffer->cmd[5].src = msm_virt_to_dma(chip, &dma_buffer->data[9]);
  3126. dma_buffer->cmd[5].dst = MSM_NAND_READ_STATUS;
  3127. dma_buffer->cmd[5].len = 4;
  3128.  
  3129. BUILD_BUG_ON(5 != ARRAY_SIZE(dma_buffer->cmd) - 1);
  3130.  
  3131. dma_buffer->cmdptr =
  3132. (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3) | CMD_PTR_LP;
  3133.  
  3134. dsb();
  3135. msm_dmov_exec_cmd(
  3136. chip->dma_channel, crci_mask, DMOV_CMD_PTR_LIST |
  3137. DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
  3138. dsb();
  3139.  
  3140. /* we fail if there was an operation error, a mpu error, or the
  3141. * erase success bit was not set.
  3142. */
  3143.  
  3144. if (dma_buffer->data[5] & 0x110 || !(dma_buffer->data[5] & 0x80))
  3145. err = -EIO;
  3146. else
  3147. err = 0;
  3148.  
  3149. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  3150. if (err) {
  3151. pr_err("%s: erase failed, 0x%llx\n", __func__, instr->addr);
  3152. instr->fail_addr = instr->addr;
  3153. instr->state = MTD_ERASE_FAILED;
  3154. } else {
  3155. instr->state = MTD_ERASE_DONE;
  3156. instr->fail_addr = 0xffffffff;
  3157. mtd_erase_callback(instr);
  3158. }
  3159. return err;
  3160. }
  3161.  
  3162. static int
  3163. msm_nand_erase_dualnandc(struct mtd_info *mtd, struct erase_info *instr)
  3164. {
  3165. int err;
  3166. struct msm_nand_chip *chip = mtd->priv;
  3167. struct {
  3168. dmov_s cmd[18];
  3169. unsigned cmdptr;
  3170. uint32_t ebi2_chip_select_cfg0;
  3171. uint32_t adm_mux_data_ack_req_nc01;
  3172. uint32_t adm_mux_cmd_ack_req_nc01;
  3173. uint32_t adm_mux_data_ack_req_nc10;
  3174. uint32_t adm_mux_cmd_ack_req_nc10;
  3175. uint32_t adm_default_mux;
  3176. uint32_t default_ebi2_chip_select_cfg0;
  3177. unsigned data[12];
  3178. } *dma_buffer;
  3179. unsigned page = 0;
  3180.  
  3181. if (mtd->writesize == 2048)
  3182. page = instr->addr >> 11;
  3183.  
  3184. if (mtd->writesize == 4096)
  3185. page = instr->addr >> 12;
  3186.  
  3187. if (mtd->writesize == 8192)
  3188. page = (instr->addr >> 1) >> 12;
  3189.  
  3190. if (instr->addr & (mtd->erasesize - 1)) {
  3191. pr_err("%s: unsupported erase address, 0x%llx\n",
  3192. __func__, instr->addr);
  3193. return -EINVAL;
  3194. }
  3195. if (instr->len != mtd->erasesize) {
  3196. pr_err("%s: unsupported erase len, %lld\n",
  3197. __func__, instr->len);
  3198. return -EINVAL;
  3199. }
  3200.  
  3201. wait_event(chip->wait_queue,
  3202. (dma_buffer = msm_nand_get_dma_buffer(
  3203. chip, sizeof(*dma_buffer))));
  3204.  
  3205. dma_buffer->data[0] = MSM_NAND_CMD_BLOCK_ERASE;
  3206. dma_buffer->data[1] = page;
  3207. dma_buffer->data[2] = 0;
  3208. dma_buffer->data[3] = (1<<4) | 4;
  3209. dma_buffer->data[4] = (1<<4) | 5;
  3210. dma_buffer->data[5] = 1;
  3211. dma_buffer->data[6] = 0xeeeeeeee;
  3212. dma_buffer->data[7] = 0xeeeeeeee;
  3213. dma_buffer->data[8] = chip->CFG0 & (~(7 << 6)); /* CW_PER_PAGE = 0 */
  3214. dma_buffer->data[9] = chip->CFG1;
  3215. dma_buffer->data[10] = 0x00000020;
  3216. dma_buffer->data[11] = 0x000000C0;
  3217.  
  3218. dma_buffer->ebi2_chip_select_cfg0 = 0x00000805;
  3219. dma_buffer->adm_mux_data_ack_req_nc01 = 0x00000A3C;
  3220. dma_buffer->adm_mux_cmd_ack_req_nc01 = 0x0000053C;
  3221. dma_buffer->adm_mux_data_ack_req_nc10 = 0x00000F28;
  3222. dma_buffer->adm_mux_cmd_ack_req_nc10 = 0x00000F14;
  3223. dma_buffer->adm_default_mux = 0x00000FC0;
  3224. dma_buffer->default_ebi2_chip_select_cfg0 = 0x00000801;
  3225.  
  3226. BUILD_BUG_ON(11 != ARRAY_SIZE(dma_buffer->data) - 1);
  3227.  
  3228. /* enable CS1 */
  3229. dma_buffer->cmd[0].cmd = 0 | CMD_OCB;
  3230. dma_buffer->cmd[0].src = msm_virt_to_dma(chip,
  3231. &dma_buffer->ebi2_chip_select_cfg0);
  3232. dma_buffer->cmd[0].dst = EBI2_CHIP_SELECT_CFG0;
  3233. dma_buffer->cmd[0].len = 4;
  3234.  
  3235. /* erase CS0 block now !!! */
  3236. /* 0xF14 */
  3237. dma_buffer->cmd[1].cmd = 0;
  3238. dma_buffer->cmd[1].src = msm_virt_to_dma(chip,
  3239. &dma_buffer->adm_mux_cmd_ack_req_nc10);
  3240. dma_buffer->cmd[1].dst = EBI2_NAND_ADM_MUX;
  3241. dma_buffer->cmd[1].len = 4;
  3242.  
  3243. dma_buffer->cmd[2].cmd = DST_CRCI_NAND_CMD;
  3244. dma_buffer->cmd[2].src = msm_virt_to_dma(chip, &dma_buffer->data[0]);
  3245. dma_buffer->cmd[2].dst = NC01(MSM_NAND_FLASH_CMD);
  3246. dma_buffer->cmd[2].len = 16;
  3247.  
  3248. dma_buffer->cmd[3].cmd = 0;
  3249. dma_buffer->cmd[3].src = msm_virt_to_dma(chip, &dma_buffer->data[8]);
  3250. dma_buffer->cmd[3].dst = NC01(MSM_NAND_DEV0_CFG0);
  3251. dma_buffer->cmd[3].len = 8;
  3252.  
  3253. dma_buffer->cmd[4].cmd = 0;
  3254. dma_buffer->cmd[4].src = msm_virt_to_dma(chip, &dma_buffer->data[5]);
  3255. dma_buffer->cmd[4].dst = NC01(MSM_NAND_EXEC_CMD);
  3256. dma_buffer->cmd[4].len = 4;
  3257.  
  3258. /* 0xF28 */
  3259. dma_buffer->cmd[5].cmd = 0;
  3260. dma_buffer->cmd[5].src = msm_virt_to_dma(chip,
  3261. &dma_buffer->adm_mux_data_ack_req_nc10);
  3262. dma_buffer->cmd[5].dst = EBI2_NAND_ADM_MUX;
  3263. dma_buffer->cmd[5].len = 4;
  3264.  
  3265. dma_buffer->cmd[6].cmd = SRC_CRCI_NAND_DATA;
  3266. dma_buffer->cmd[6].src = NC01(MSM_NAND_FLASH_STATUS);
  3267. dma_buffer->cmd[6].dst = msm_virt_to_dma(chip, &dma_buffer->data[6]);
  3268. dma_buffer->cmd[6].len = 4;
  3269.  
  3270. /* erase CS1 block now !!! */
  3271. /* 0x53C */
  3272. dma_buffer->cmd[7].cmd = 0;
  3273. dma_buffer->cmd[7].src = msm_virt_to_dma(chip,
  3274. &dma_buffer->adm_mux_cmd_ack_req_nc01);
  3275. dma_buffer->cmd[7].dst = EBI2_NAND_ADM_MUX;
  3276. dma_buffer->cmd[7].len = 4;
  3277.  
  3278. dma_buffer->cmd[8].cmd = DST_CRCI_NAND_CMD;
  3279. dma_buffer->cmd[8].src = msm_virt_to_dma(chip, &dma_buffer->data[0]);
  3280. dma_buffer->cmd[8].dst = NC10(MSM_NAND_FLASH_CMD);
  3281. dma_buffer->cmd[8].len = 12;
  3282.  
  3283. dma_buffer->cmd[9].cmd = 0;
  3284. dma_buffer->cmd[9].src = msm_virt_to_dma(chip, &dma_buffer->data[4]);
  3285. dma_buffer->cmd[9].dst = NC10(MSM_NAND_FLASH_CHIP_SELECT);
  3286. dma_buffer->cmd[9].len = 4;
  3287.  
  3288. dma_buffer->cmd[10].cmd = 0;
  3289. dma_buffer->cmd[10].src = msm_virt_to_dma(chip, &dma_buffer->data[8]);
  3290. dma_buffer->cmd[10].dst = NC10(MSM_NAND_DEV1_CFG0);
  3291. dma_buffer->cmd[10].len = 8;
  3292.  
  3293. dma_buffer->cmd[11].cmd = 0;
  3294. dma_buffer->cmd[11].src = msm_virt_to_dma(chip, &dma_buffer->data[5]);
  3295. dma_buffer->cmd[11].dst = NC10(MSM_NAND_EXEC_CMD);
  3296. dma_buffer->cmd[11].len = 4;
  3297.  
  3298. /* 0xA3C */
  3299. dma_buffer->cmd[12].cmd = 0;
  3300. dma_buffer->cmd[12].src = msm_virt_to_dma(chip,
  3301. &dma_buffer->adm_mux_data_ack_req_nc01);
  3302. dma_buffer->cmd[12].dst = EBI2_NAND_ADM_MUX;
  3303. dma_buffer->cmd[12].len = 4;
  3304.  
  3305. dma_buffer->cmd[13].cmd = SRC_CRCI_NAND_DATA;
  3306. dma_buffer->cmd[13].src = NC10(MSM_NAND_FLASH_STATUS);
  3307. dma_buffer->cmd[13].dst = msm_virt_to_dma(chip, &dma_buffer->data[7]);
  3308. dma_buffer->cmd[13].len = 4;
  3309.  
  3310. dma_buffer->cmd[14].cmd = 0;
  3311. dma_buffer->cmd[14].src = msm_virt_to_dma(chip, &dma_buffer->data[8]);
  3312. dma_buffer->cmd[14].dst = NC11(MSM_NAND_FLASH_STATUS);
  3313. dma_buffer->cmd[14].len = 4;
  3314.  
  3315. dma_buffer->cmd[15].cmd = 0;
  3316. dma_buffer->cmd[15].src = msm_virt_to_dma(chip, &dma_buffer->data[9]);
  3317. dma_buffer->cmd[15].dst = NC11(MSM_NAND_READ_STATUS);
  3318. dma_buffer->cmd[15].len = 4;
  3319.  
  3320. dma_buffer->cmd[16].cmd = 0;
  3321. dma_buffer->cmd[16].src = msm_virt_to_dma(chip,
  3322. &dma_buffer->adm_default_mux);
  3323. dma_buffer->cmd[16].dst = EBI2_NAND_ADM_MUX;
  3324. dma_buffer->cmd[16].len = 4;
  3325.  
  3326. /* disable CS1 */
  3327. dma_buffer->cmd[17].cmd = CMD_OCU | CMD_LC;
  3328. dma_buffer->cmd[17].src = msm_virt_to_dma(chip,
  3329. &dma_buffer->default_ebi2_chip_select_cfg0);
  3330. dma_buffer->cmd[17].dst = EBI2_CHIP_SELECT_CFG0;
  3331. dma_buffer->cmd[17].len = 4;
  3332.  
  3333. BUILD_BUG_ON(17 != ARRAY_SIZE(dma_buffer->cmd) - 1);
  3334.  
  3335. dma_buffer->cmdptr =
  3336. (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3) | CMD_PTR_LP;
  3337.  
  3338. dsb();
  3339. msm_dmov_exec_cmd(
  3340. chip->dma_channel, crci_mask, DMOV_CMD_PTR_LIST |
  3341. DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
  3342. dsb();
  3343.  
  3344. /* we fail if there was an operation error, a mpu error, or the
  3345. * erase success bit was not set.
  3346. */
  3347.  
  3348. if (dma_buffer->data[6] & 0x110 || !(dma_buffer->data[6] & 0x80)
  3349. || dma_buffer->data[6] & 0x110 || !(dma_buffer->data[6] & 0x80))
  3350. err = -EIO;
  3351. else
  3352. err = 0;
  3353.  
  3354. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  3355. if (err) {
  3356. pr_err("%s: erase failed, 0x%llx\n", __func__, instr->addr);
  3357. instr->fail_addr = instr->addr;
  3358. instr->state = MTD_ERASE_FAILED;
  3359. } else {
  3360. instr->state = MTD_ERASE_DONE;
  3361. instr->fail_addr = 0xffffffff;
  3362. mtd_erase_callback(instr);
  3363. }
  3364. return err;
  3365. }
  3366.  
  3367. static int
  3368. msm_nand_block_isbad(struct mtd_info *mtd, loff_t ofs)
  3369. {
  3370. struct msm_nand_chip *chip = mtd->priv;
  3371. int ret;
  3372. struct {
  3373. dmov_s cmd[5];
  3374. unsigned cmdptr;
  3375. struct {
  3376. uint32_t cmd;
  3377. uint32_t addr0;
  3378. uint32_t addr1;
  3379. uint32_t chipsel;
  3380. uint32_t cfg0;
  3381. uint32_t cfg1;
  3382. uint32_t exec;
  3383. uint32_t ecccfg;
  3384. struct {
  3385. uint32_t flash_status;
  3386. uint32_t buffer_status;
  3387. } result;
  3388. } data;
  3389. } *dma_buffer;
  3390. dmov_s *cmd;
  3391. uint8_t *buf;
  3392. unsigned page = 0;
  3393. unsigned cwperpage;
  3394.  
  3395. if (mtd->writesize == 2048)
  3396. page = ofs >> 11;
  3397.  
  3398. if (mtd->writesize == 4096)
  3399. page = ofs >> 12;
  3400.  
  3401. cwperpage = (mtd->writesize >> 9);
  3402.  
  3403. /* Check for invalid offset */
  3404. if (ofs > mtd->size)
  3405. return -EINVAL;
  3406. if (ofs & (mtd->erasesize - 1)) {
  3407. pr_err("%s: unsupported block address, 0x%x ( & 0x%x )\n",
  3408. __func__, (uint32_t)ofs, mtd->erasesize - 1);
  3409. return -EINVAL;
  3410. }
  3411.  
  3412. wait_event(chip->wait_queue,
  3413. (dma_buffer = msm_nand_get_dma_buffer(chip ,
  3414. sizeof(*dma_buffer) + 4)));
  3415. buf = (uint8_t *)dma_buffer + sizeof(*dma_buffer);
  3416.  
  3417. /* Read 4 bytes starting from the bad block marker location
  3418. * in the last code word of the page
  3419. */
  3420.  
  3421. cmd = dma_buffer->cmd;
  3422.  
  3423. dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ;
  3424. dma_buffer->data.cfg0 = MSM_NAND_CFG0_RAW & ~(7U << 6);
  3425. dma_buffer->data.cfg1 = MSM_NAND_CFG1_RAW |
  3426. (chip->CFG1 & CFG1_WIDE_FLASH);
  3427.  
  3428. if (chip->CFG1 & CFG1_WIDE_FLASH)
  3429. dma_buffer->data.addr0 = (page << 16) |
  3430. ((528*(cwperpage-1)) >> 1);
  3431. else
  3432. dma_buffer->data.addr0 = (page << 16) |
  3433. (528*(cwperpage-1));
  3434.  
  3435. dma_buffer->data.addr1 = (page >> 16) & 0xff;
  3436. dma_buffer->data.chipsel = 0 | 4;
  3437.  
  3438. dma_buffer->data.exec = 1;
  3439.  
  3440. dma_buffer->data.result.flash_status = 0xeeeeeeee;
  3441. dma_buffer->data.result.buffer_status = 0xeeeeeeee;
  3442.  
  3443. cmd->cmd = DST_CRCI_NAND_CMD;
  3444. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
  3445. cmd->dst = MSM_NAND_FLASH_CMD;
  3446. cmd->len = 16;
  3447. cmd++;
  3448.  
  3449. cmd->cmd = 0;
  3450. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cfg0);
  3451. cmd->dst = MSM_NAND_DEV0_CFG0;
  3452. cmd->len = 8;
  3453. cmd++;
  3454.  
  3455. cmd->cmd = 0;
  3456. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
  3457. cmd->dst = MSM_NAND_EXEC_CMD;
  3458. cmd->len = 4;
  3459. cmd++;
  3460.  
  3461. cmd->cmd = SRC_CRCI_NAND_DATA;
  3462. cmd->src = MSM_NAND_FLASH_STATUS;
  3463. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.result);
  3464. cmd->len = 8;
  3465. cmd++;
  3466.  
  3467. cmd->cmd = 0;
  3468. cmd->src = MSM_NAND_FLASH_BUFFER +
  3469. (mtd->writesize - (528*(cwperpage-1)));
  3470. cmd->dst = msm_virt_to_dma(chip, buf);
  3471. cmd->len = 4;
  3472. cmd++;
  3473.  
  3474. BUILD_BUG_ON(5 != ARRAY_SIZE(dma_buffer->cmd));
  3475. BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
  3476. dma_buffer->cmd[0].cmd |= CMD_OCB;
  3477. cmd[-1].cmd |= CMD_OCU | CMD_LC;
  3478.  
  3479. dma_buffer->cmdptr = (msm_virt_to_dma(chip,
  3480. dma_buffer->cmd) >> 3) | CMD_PTR_LP;
  3481.  
  3482. dsb();
  3483. msm_dmov_exec_cmd(chip->dma_channel, crci_mask, DMOV_CMD_PTR_LIST |
  3484. DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
  3485. dsb();
  3486.  
  3487. ret = 0;
  3488. if (dma_buffer->data.result.flash_status & 0x110)
  3489. ret = -EIO;
  3490.  
  3491. if (!ret) {
  3492. /* Check for bad block marker byte */
  3493. if (chip->CFG1 & CFG1_WIDE_FLASH) {
  3494. if (buf[0] != 0xFF || buf[1] != 0xFF)
  3495. ret = 1;
  3496. } else {
  3497. if (buf[0] != 0xFF)
  3498. ret = 1;
  3499. }
  3500. }
  3501.  
  3502. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer) + 4);
  3503. return ret;
  3504. }
  3505.  
  3506. static int
  3507. msm_nand_block_isbad_dualnandc(struct mtd_info *mtd, loff_t ofs)
  3508. {
  3509. struct msm_nand_chip *chip = mtd->priv;
  3510. int ret;
  3511. struct {
  3512. dmov_s cmd[18];
  3513. unsigned cmdptr;
  3514. struct {
  3515. uint32_t cmd;
  3516. uint32_t addr0;
  3517. uint32_t addr1;
  3518. uint32_t chipsel_cs0;
  3519. uint32_t chipsel_cs1;
  3520. uint32_t cfg0;
  3521. uint32_t cfg1;
  3522. uint32_t exec;
  3523. uint32_t ecccfg;
  3524. uint32_t ebi2_chip_select_cfg0;
  3525. uint32_t adm_mux_data_ack_req_nc01;
  3526. uint32_t adm_mux_cmd_ack_req_nc01;
  3527. uint32_t adm_mux_data_ack_req_nc10;
  3528. uint32_t adm_mux_cmd_ack_req_nc10;
  3529. uint32_t adm_default_mux;
  3530. uint32_t default_ebi2_chip_select_cfg0;
  3531. struct {
  3532. uint32_t flash_status;
  3533. uint32_t buffer_status;
  3534. } result[2];
  3535. } data;
  3536. } *dma_buffer;
  3537. dmov_s *cmd;
  3538. uint8_t *buf01;
  3539. uint8_t *buf10;
  3540. unsigned page = 0;
  3541. unsigned cwperpage;
  3542.  
  3543. if (mtd->writesize == 2048)
  3544. page = ofs >> 11;
  3545.  
  3546. if (mtd->writesize == 4096)
  3547. page = ofs >> 12;
  3548.  
  3549. if (mtd->writesize == 8192)
  3550. page = (ofs >> 1) >> 12;
  3551.  
  3552. cwperpage = ((mtd->writesize >> 1) >> 9);
  3553.  
  3554. /* Check for invalid offset */
  3555. if (ofs > mtd->size)
  3556. return -EINVAL;
  3557. if (ofs & (mtd->erasesize - 1)) {
  3558. pr_err("%s: unsupported block address, 0x%x\n",
  3559. __func__, (uint32_t)ofs);
  3560. return -EINVAL;
  3561. }
  3562.  
  3563. wait_event(chip->wait_queue,
  3564. (dma_buffer = msm_nand_get_dma_buffer(chip ,
  3565. sizeof(*dma_buffer) + 8)));
  3566. buf01 = (uint8_t *)dma_buffer + sizeof(*dma_buffer);
  3567. buf10 = buf01 + 4;
  3568.  
  3569. /* Read 4 bytes starting from the bad block marker location
  3570. * in the last code word of the page
  3571. */
  3572. cmd = dma_buffer->cmd;
  3573.  
  3574. dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ;
  3575. dma_buffer->data.cfg0 = MSM_NAND_CFG0_RAW & ~(7U << 6);
  3576. dma_buffer->data.cfg1 = MSM_NAND_CFG1_RAW |
  3577. (chip->CFG1 & CFG1_WIDE_FLASH);
  3578.  
  3579. if (chip->CFG1 & CFG1_WIDE_FLASH)
  3580. dma_buffer->data.addr0 = (page << 16) |
  3581. ((528*(cwperpage-1)) >> 1);
  3582. else
  3583. dma_buffer->data.addr0 = (page << 16) |
  3584. (528*(cwperpage-1));
  3585.  
  3586. dma_buffer->data.addr1 = (page >> 16) & 0xff;
  3587. dma_buffer->data.chipsel_cs0 = (1<<4) | 4;
  3588. dma_buffer->data.chipsel_cs1 = (1<<4) | 5;
  3589.  
  3590. dma_buffer->data.exec = 1;
  3591.  
  3592. dma_buffer->data.result[0].flash_status = 0xeeeeeeee;
  3593. dma_buffer->data.result[0].buffer_status = 0xeeeeeeee;
  3594. dma_buffer->data.result[1].flash_status = 0xeeeeeeee;
  3595. dma_buffer->data.result[1].buffer_status = 0xeeeeeeee;
  3596.  
  3597. dma_buffer->data.ebi2_chip_select_cfg0 = 0x00000805;
  3598. dma_buffer->data.adm_mux_data_ack_req_nc01 = 0x00000A3C;
  3599. dma_buffer->data.adm_mux_cmd_ack_req_nc01 = 0x0000053C;
  3600. dma_buffer->data.adm_mux_data_ack_req_nc10 = 0x00000F28;
  3601. dma_buffer->data.adm_mux_cmd_ack_req_nc10 = 0x00000F14;
  3602. dma_buffer->data.adm_default_mux = 0x00000FC0;
  3603. dma_buffer->data.default_ebi2_chip_select_cfg0 = 0x00000801;
  3604.  
  3605. /* Reading last code word from NC01 */
  3606. /* enable CS1 */
  3607. cmd->cmd = 0;
  3608. cmd->src = msm_virt_to_dma(chip,
  3609. &dma_buffer->data.ebi2_chip_select_cfg0);
  3610. cmd->dst = EBI2_CHIP_SELECT_CFG0;
  3611. cmd->len = 4;
  3612. cmd++;
  3613.  
  3614. /* 0xF14 */
  3615. cmd->cmd = 0;
  3616. cmd->src = msm_virt_to_dma(chip,
  3617. &dma_buffer->data.adm_mux_cmd_ack_req_nc10);
  3618. cmd->dst = EBI2_NAND_ADM_MUX;
  3619. cmd->len = 4;
  3620. cmd++;
  3621.  
  3622. cmd->cmd = DST_CRCI_NAND_CMD;
  3623. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
  3624. cmd->dst = NC01(MSM_NAND_FLASH_CMD);
  3625. cmd->len = 16;
  3626. cmd++;
  3627.  
  3628. cmd->cmd = 0;
  3629. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cfg0);
  3630. cmd->dst = NC01(MSM_NAND_DEV0_CFG0);
  3631. cmd->len = 8;
  3632. cmd++;
  3633.  
  3634. cmd->cmd = 0;
  3635. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
  3636. cmd->dst = NC01(MSM_NAND_EXEC_CMD);
  3637. cmd->len = 4;
  3638. cmd++;
  3639.  
  3640. /* 0xF28 */
  3641. cmd->cmd = 0;
  3642. cmd->src = msm_virt_to_dma(chip,
  3643. &dma_buffer->data.adm_mux_data_ack_req_nc10);
  3644. cmd->dst = EBI2_NAND_ADM_MUX;
  3645. cmd->len = 4;
  3646. cmd++;
  3647.  
  3648. cmd->cmd = SRC_CRCI_NAND_DATA;
  3649. cmd->src = NC01(MSM_NAND_FLASH_STATUS);
  3650. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.result[0]);
  3651. cmd->len = 8;
  3652. cmd++;
  3653.  
  3654. cmd->cmd = 0;
  3655. cmd->src = NC01(MSM_NAND_FLASH_BUFFER) + ((mtd->writesize >> 1) -
  3656. (528*(cwperpage-1)));
  3657. cmd->dst = msm_virt_to_dma(chip, buf01);
  3658. cmd->len = 4;
  3659. cmd++;
  3660.  
  3661. /* Reading last code word from NC10 */
  3662. /* 0x53C */
  3663. cmd->cmd = 0;
  3664. cmd->src = msm_virt_to_dma(chip,
  3665. &dma_buffer->data.adm_mux_cmd_ack_req_nc01);
  3666. cmd->dst = EBI2_NAND_ADM_MUX;
  3667. cmd->len = 4;
  3668. cmd++;
  3669.  
  3670. cmd->cmd = DST_CRCI_NAND_CMD;
  3671. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
  3672. cmd->dst = NC10(MSM_NAND_FLASH_CMD);
  3673. cmd->len = 12;
  3674. cmd++;
  3675.  
  3676. cmd->cmd = 0;
  3677. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.chipsel_cs1);
  3678. cmd->dst = NC10(MSM_NAND_FLASH_CHIP_SELECT);
  3679. cmd->len = 4;
  3680. cmd++;
  3681.  
  3682. cmd->cmd = 0;
  3683. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cfg0);
  3684. cmd->dst = NC10(MSM_NAND_DEV1_CFG0);
  3685. cmd->len = 8;
  3686. cmd++;
  3687.  
  3688. cmd->cmd = 0;
  3689. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
  3690. cmd->dst = NC10(MSM_NAND_EXEC_CMD);
  3691. cmd->len = 4;
  3692. cmd++;
  3693.  
  3694. /* A3C */
  3695. cmd->cmd = 0;
  3696. cmd->src = msm_virt_to_dma(chip,
  3697. &dma_buffer->data.adm_mux_data_ack_req_nc01);
  3698. cmd->dst = EBI2_NAND_ADM_MUX;
  3699. cmd->len = 4;
  3700. cmd++;
  3701.  
  3702. cmd->cmd = SRC_CRCI_NAND_DATA;
  3703. cmd->src = NC10(MSM_NAND_FLASH_STATUS);
  3704. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.result[1]);
  3705. cmd->len = 8;
  3706. cmd++;
  3707.  
  3708. cmd->cmd = 0;
  3709. cmd->src = NC10(MSM_NAND_FLASH_BUFFER) + ((mtd->writesize >> 1) -
  3710. (528*(cwperpage-1)));
  3711. cmd->dst = msm_virt_to_dma(chip, buf10);
  3712. cmd->len = 4;
  3713. cmd++;
  3714.  
  3715. /* FC0 */
  3716. cmd->cmd = 0;
  3717. cmd->src = msm_virt_to_dma(chip,
  3718. &dma_buffer->data.adm_default_mux);
  3719. cmd->dst = EBI2_NAND_ADM_MUX;
  3720. cmd->len = 4;
  3721. cmd++;
  3722.  
  3723. /* disble CS1 */
  3724. cmd->cmd = 0;
  3725. cmd->src = msm_virt_to_dma(chip,
  3726. &dma_buffer->data.ebi2_chip_select_cfg0);
  3727. cmd->dst = EBI2_CHIP_SELECT_CFG0;
  3728. cmd->len = 4;
  3729. cmd++;
  3730.  
  3731. BUILD_BUG_ON(18 != ARRAY_SIZE(dma_buffer->cmd));
  3732. BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
  3733. dma_buffer->cmd[0].cmd |= CMD_OCB;
  3734. cmd[-1].cmd |= CMD_OCU | CMD_LC;
  3735.  
  3736. dma_buffer->cmdptr = (msm_virt_to_dma(chip,
  3737. dma_buffer->cmd) >> 3) | CMD_PTR_LP;
  3738.  
  3739. dsb();
  3740. msm_dmov_exec_cmd(chip->dma_channel, crci_mask, DMOV_CMD_PTR_LIST |
  3741. DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
  3742. dsb();
  3743.  
  3744. ret = 0;
  3745. if ((dma_buffer->data.result[0].flash_status & 0x110) ||
  3746. (dma_buffer->data.result[1].flash_status & 0x110))
  3747. ret = -EIO;
  3748.  
  3749. if (!ret) {
  3750. /* Check for bad block marker byte for NC01 & NC10 */
  3751. if (chip->CFG1 & CFG1_WIDE_FLASH) {
  3752. if ((buf01[0] != 0xFF || buf01[1] != 0xFF) ||
  3753. (buf10[0] != 0xFF || buf10[1] != 0xFF))
  3754. ret = 1;
  3755. } else {
  3756. if (buf01[0] != 0xFF || buf10[0] != 0xFF)
  3757. ret = 1;
  3758. }
  3759. }
  3760.  
  3761. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer) + 8);
  3762. return ret;
  3763. }
  3764.  
  3765. static int
  3766. msm_nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
  3767. {
  3768. struct mtd_oob_ops ops;
  3769. int ret;
  3770. uint8_t *buf;
  3771.  
  3772. /* Check for invalid offset */
  3773. if (ofs > mtd->size)
  3774. return -EINVAL;
  3775. if (ofs & (mtd->erasesize - 1)) {
  3776. pr_err("%s: unsupported block address, 0x%x\n",
  3777. __func__, (uint32_t)ofs);
  3778. return -EINVAL;
  3779. }
  3780.  
  3781. /*
  3782. Write all 0s to the first page
  3783. This will set the BB marker to 0
  3784. */
  3785. buf = page_address(ZERO_PAGE());
  3786.  
  3787. ops.mode = MTD_OOB_RAW;
  3788. ops.len = mtd->writesize + mtd->oobsize;
  3789. ops.retlen = 0;
  3790. ops.ooblen = 0;
  3791. ops.datbuf = buf;
  3792. ops.oobbuf = NULL;
  3793. if (!interleave_enable)
  3794. ret = msm_nand_write_oob(mtd, ofs, &ops);
  3795. else
  3796. ret = msm_nand_write_oob_dualnandc(mtd, ofs, &ops);
  3797.  
  3798. return ret;
  3799. }
  3800.  
  3801. int msm_nand_read_dpram(char *mBuf, unsigned size)
  3802. {
  3803. char spare_buf[128] = { 0, };
  3804. struct mtd_oob_ops ops = { 0, };
  3805. unsigned offset = 0;
  3806.  
  3807. if(NULL == current_mtd)
  3808. {
  3809. printk("[msm_nand_read_dpram] MTD not initialized\n");
  3810. return -1;
  3811. }
  3812. if(size < current_mtd->writesize)
  3813. {
  3814. printk("[msm_nand_read_dpram] given buffer has invalid size\n");
  3815. return -1;
  3816. }
  3817.  
  3818. /* needed data is hardcoded at 5th page of the last block */
  3819. offset = ((5 * current_mtd->writesize) + (current_mtd->erasesize * (((unsigned )current_mtd->size / (unsigned )current_mtd->erasesize) - 1)));
  3820.  
  3821. ops.mode = MTD_OOB_RAW;
  3822. ops.len = current_mtd->writesize + current_mtd->oobsize;
  3823. ops.ooblen = current_mtd->oobsize;
  3824. ops.datbuf = mBuf;
  3825. ops.oobbuf = spare_buf;
  3826.  
  3827. printk("[msm_nand_read_dpram] number of blocks = %u, offset = %u, page size = %u, block size = %u\n", (unsigned )current_mtd->size / (unsigned )current_mtd->erasesize, offset, current_mtd->writesize, current_mtd->erasesize);
  3828.  
  3829. return msm_nand_read_oob(current_mtd, offset, &ops);
  3830. }
  3831. EXPORT_SYMBOL(msm_nand_read_dpram);
  3832.  
  3833. void msm_read_param(char *mBuf)
  3834. {
  3835. char data_buf[4096] = { 0, };
  3836. char spare_buf[128] = { 0, };
  3837. struct mtd_oob_ops ops = { 0, };
  3838. int data_size = 0;
  3839.  
  3840. if (current_mtd->oobsize == 64) {
  3841. data_size = 2048;
  3842. }
  3843. else if (current_mtd->oobsize == 128) {
  3844. data_size = 4096;
  3845. }
  3846.  
  3847. ops.mode = MTD_OOB_RAW;
  3848. ops.len = data_size+current_mtd->oobsize;
  3849. ops.retlen = 0;
  3850. ops.ooblen = current_mtd->oobsize;
  3851. ops.datbuf = data_buf;
  3852. ops.oobbuf = spare_buf;
  3853.  
  3854. /* erasize == size of entire block == page size * pages per block */
  3855. while(msm_nand_block_isbad(current_mtd, (param_start_block * current_mtd->erasesize)))
  3856. {
  3857. printk("msm_read_param: bad block\n");
  3858. param_start_block++;
  3859. }
  3860.  
  3861. if ( param_start_block >= param_end_block) {
  3862. param_start_block = param_end_block - 1;
  3863. printk("All nand block in param partition has been crashed\n");
  3864. }
  3865.  
  3866. msm_nand_read_oob(current_mtd, (param_start_block * current_mtd->erasesize), &ops);
  3867. memcpy(mBuf,data_buf,sizeof(data_buf));
  3868. }
  3869. EXPORT_SYMBOL(msm_read_param);
  3870.  
  3871. void msm_write_param(char *mBuf)
  3872. {
  3873. char data_buf[4096] = { 0, };
  3874. char spare_buf[128] = { 0, };
  3875. struct mtd_oob_ops ops = { 0, };
  3876. struct erase_info *param_erase_info = 0;
  3877. int data_size = 0;
  3878.  
  3879. if (current_mtd->oobsize == 64) {
  3880. data_size = 2048;
  3881. }
  3882. else if (current_mtd->oobsize == 128) {
  3883. data_size = 4096;
  3884. }
  3885.  
  3886. param_erase_info = kzalloc(sizeof(struct erase_info), GFP_KERNEL);
  3887. if(0 == param_erase_info)
  3888. {
  3889. printk("msm_write_param: memory allocation error\n");
  3890. return;
  3891. }
  3892. param_erase_info->mtd = current_mtd;
  3893. /* erasize == size of entire block == page size * pages per block */
  3894. param_erase_info->addr = param_start_block * current_mtd->erasesize;
  3895. param_erase_info->len = current_mtd->erasesize;
  3896. if(!msm_nand_erase(current_mtd, param_erase_info)) {
  3897. pr_info("parameter block erase success\n");
  3898. }
  3899.  
  3900. memset(spare_buf,0xFF,current_mtd->oobsize);
  3901. memcpy(data_buf,mBuf,sizeof(data_buf));
  3902.  
  3903. ops.mode = MTD_OOB_RAW;
  3904. ops.len = data_size+current_mtd->oobsize;
  3905. ops.retlen = 0;
  3906. ops.ooblen = current_mtd->oobsize;
  3907. ops.datbuf = data_buf;
  3908. ops.oobbuf = spare_buf;
  3909.  
  3910. msm_nand_write_oob(current_mtd, param_erase_info->addr, &ops);
  3911.  
  3912. kfree(param_erase_info);
  3913. }
  3914. EXPORT_SYMBOL(msm_write_param);
  3915.  
  3916. /**
  3917. * msm_nand_suspend - [MTD Interface] Suspend the msm_nand flash
  3918. * @param mtd MTD device structure
  3919. */
  3920. static int msm_nand_suspend(struct mtd_info *mtd)
  3921. {
  3922. return 0;
  3923. }
  3924.  
  3925. /**
  3926. * msm_nand_resume - [MTD Interface] Resume the msm_nand flash
  3927. * @param mtd MTD device structure
  3928. */
  3929. static void msm_nand_resume(struct mtd_info *mtd)
  3930. {
  3931. }
  3932.  
  3933. struct onenand_information {
  3934. uint16_t manufacturer_id;
  3935. uint16_t device_id;
  3936. uint16_t version_id;
  3937. uint16_t data_buf_size;
  3938. uint16_t boot_buf_size;
  3939. uint16_t num_of_buffers;
  3940. uint16_t technology;
  3941. };
  3942.  
  3943. static struct onenand_information onenand_info;
  3944. static uint32_t nand_sfcmd_mode;
  3945.  
  3946. uint32_t flash_onenand_probe(struct msm_nand_chip *chip)
  3947. {
  3948. struct {
  3949. dmov_s cmd[7];
  3950. unsigned cmdptr;
  3951. struct {
  3952. uint32_t bcfg;
  3953. uint32_t cmd;
  3954. uint32_t exec;
  3955. uint32_t status;
  3956. uint32_t addr0;
  3957. uint32_t addr1;
  3958. uint32_t addr2;
  3959. uint32_t addr3;
  3960. uint32_t addr4;
  3961. uint32_t addr5;
  3962. uint32_t addr6;
  3963. uint32_t data0;
  3964. uint32_t data1;
  3965. uint32_t data2;
  3966. uint32_t data3;
  3967. uint32_t data4;
  3968. uint32_t data5;
  3969. uint32_t data6;
  3970. } data;
  3971. } *dma_buffer;
  3972. dmov_s *cmd;
  3973.  
  3974. int err = 0;
  3975. uint32_t initialsflashcmd = 0;
  3976.  
  3977. initialsflashcmd = flash_rd_reg(chip, MSM_NAND_SFLASHC_CMD);
  3978.  
  3979. if ((initialsflashcmd & 0x10) == 0x10)
  3980. nand_sfcmd_mode = MSM_NAND_SFCMD_ASYNC;
  3981. else
  3982. nand_sfcmd_mode = MSM_NAND_SFCMD_BURST;
  3983.  
  3984. printk(KERN_INFO "SFLASHC Async Mode bit: %x \n", nand_sfcmd_mode);
  3985.  
  3986. wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
  3987. (chip, sizeof(*dma_buffer))));
  3988.  
  3989. cmd = dma_buffer->cmd;
  3990.  
  3991. dma_buffer->data.bcfg = SFLASH_BCFG |
  3992. (nand_sfcmd_mode ? 0 : (1 << 24));
  3993. dma_buffer->data.cmd = SFLASH_PREPCMD(7, 0, 0,
  3994. MSM_NAND_SFCMD_DATXS,
  3995. nand_sfcmd_mode,
  3996. MSM_NAND_SFCMD_REGRD);
  3997. dma_buffer->data.exec = 1;
  3998. dma_buffer->data.status = CLEAN_DATA_32;
  3999. dma_buffer->data.addr0 = (ONENAND_DEVICE_ID << 16) |
  4000. (ONENAND_MANUFACTURER_ID);
  4001. dma_buffer->data.addr1 = (ONENAND_DATA_BUFFER_SIZE << 16) |
  4002. (ONENAND_VERSION_ID);
  4003. dma_buffer->data.addr2 = (ONENAND_AMOUNT_OF_BUFFERS << 16) |
  4004. (ONENAND_BOOT_BUFFER_SIZE);
  4005. dma_buffer->data.addr3 = (CLEAN_DATA_16 << 16) |
  4006. (ONENAND_TECHNOLOGY << 0);
  4007. dma_buffer->data.data0 = CLEAN_DATA_32;
  4008. dma_buffer->data.data1 = CLEAN_DATA_32;
  4009. dma_buffer->data.data2 = CLEAN_DATA_32;
  4010. dma_buffer->data.data3 = CLEAN_DATA_32;
  4011.  
  4012. /* Enable and configure the SFlash controller */
  4013. cmd->cmd = 0;
  4014. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.bcfg);
  4015. cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
  4016. cmd->len = 4;
  4017. cmd++;
  4018.  
  4019. /* Block on cmd ready and write CMD register */
  4020. cmd->cmd = DST_CRCI_NAND_CMD;
  4021. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
  4022. cmd->dst = MSM_NAND_SFLASHC_CMD;
  4023. cmd->len = 4;
  4024. cmd++;
  4025.  
  4026. /* Configure the ADDR0 and ADDR1 registers */
  4027. cmd->cmd = 0;
  4028. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr0);
  4029. cmd->dst = MSM_NAND_ADDR0;
  4030. cmd->len = 8;
  4031. cmd++;
  4032.  
  4033. /* Configure the ADDR2 and ADDR3 registers */
  4034. cmd->cmd = 0;
  4035. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr2);
  4036. cmd->dst = MSM_NAND_ADDR2;
  4037. cmd->len = 8;
  4038. cmd++;
  4039.  
  4040. /* Kick the execute command */
  4041. cmd->cmd = 0;
  4042. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
  4043. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  4044. cmd->len = 4;
  4045. cmd++;
  4046.  
  4047. /* Block on data ready, and read the two status registers */
  4048. cmd->cmd = SRC_CRCI_NAND_DATA;
  4049. cmd->src = MSM_NAND_SFLASHC_STATUS;
  4050. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.status);
  4051. cmd->len = 4;
  4052. cmd++;
  4053.  
  4054. /* Read data registers - valid only if status says success */
  4055. cmd->cmd = 0;
  4056. cmd->src = MSM_NAND_GENP_REG0;
  4057. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data0);
  4058. cmd->len = 16;
  4059. cmd++;
  4060.  
  4061. BUILD_BUG_ON(7 != ARRAY_SIZE(dma_buffer->cmd));
  4062. BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
  4063. dma_buffer->cmd[0].cmd |= CMD_OCB;
  4064. cmd[-1].cmd |= CMD_OCU | CMD_LC;
  4065.  
  4066. dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
  4067. >> 3) | CMD_PTR_LP;
  4068.  
  4069. dsb();
  4070. msm_dmov_exec_cmd(chip->dma_channel, crci_mask, DMOV_CMD_PTR_LIST
  4071. | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
  4072. &dma_buffer->cmdptr)));
  4073. dsb();
  4074.  
  4075. /* Check for errors, protection violations etc */
  4076. if (dma_buffer->data.status & 0x110) {
  4077. pr_info("%s: MPU/OP error"
  4078. "(0x%x) during Onenand probe\n",
  4079. __func__, dma_buffer->data.status);
  4080. err = -EIO;
  4081. } else {
  4082.  
  4083. onenand_info.manufacturer_id =
  4084. (dma_buffer->data.data0 >> 0) & 0x0000FFFF;
  4085. onenand_info.device_id =
  4086. (dma_buffer->data.data0 >> 16) & 0x0000FFFF;
  4087. onenand_info.version_id =
  4088. (dma_buffer->data.data1 >> 0) & 0x0000FFFF;
  4089. onenand_info.data_buf_size =
  4090. (dma_buffer->data.data1 >> 16) & 0x0000FFFF;
  4091. onenand_info.boot_buf_size =
  4092. (dma_buffer->data.data2 >> 0) & 0x0000FFFF;
  4093. onenand_info.num_of_buffers =
  4094. (dma_buffer->data.data2 >> 16) & 0x0000FFFF;
  4095. onenand_info.technology =
  4096. (dma_buffer->data.data3 >> 0) & 0x0000FFFF;
  4097.  
  4098.  
  4099. pr_info("======================================="
  4100. "==========================\n");
  4101.  
  4102. pr_info("%s: manufacturer_id = 0x%x\n"
  4103. , __func__, onenand_info.manufacturer_id);
  4104. pr_info("%s: device_id = 0x%x\n"
  4105. , __func__, onenand_info.device_id);
  4106. pr_info("%s: version_id = 0x%x\n"
  4107. , __func__, onenand_info.version_id);
  4108. pr_info("%s: data_buf_size = 0x%x\n"
  4109. , __func__, onenand_info.data_buf_size);
  4110. pr_info("%s: boot_buf_size = 0x%x\n"
  4111. , __func__, onenand_info.boot_buf_size);
  4112. pr_info("%s: num_of_buffers = 0x%x\n"
  4113. , __func__, onenand_info.num_of_buffers);
  4114. pr_info("%s: technology = 0x%x\n"
  4115. , __func__, onenand_info.technology);
  4116.  
  4117. pr_info("======================================="
  4118. "==========================\n");
  4119.  
  4120. if ((onenand_info.manufacturer_id != 0x00EC)
  4121. || ((onenand_info.device_id & 0x0050) != 0x0050)
  4122. || (onenand_info.data_buf_size != 0x0800)
  4123. || (onenand_info.boot_buf_size != 0x0200)
  4124. || (onenand_info.num_of_buffers != 0x0101)
  4125. || (onenand_info.technology != 0)) {
  4126.  
  4127. pr_info("%s: Detected an unsupported device\n"
  4128. , __func__);
  4129. err = -EIO;
  4130. }
  4131. }
  4132.  
  4133. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  4134.  
  4135. return err;
  4136. }
  4137.  
  4138. int msm_onenand_read_oob(struct mtd_info *mtd,
  4139. loff_t from, struct mtd_oob_ops *ops)
  4140. {
  4141. struct msm_nand_chip *chip = mtd->priv;
  4142.  
  4143. struct {
  4144. dmov_s cmd[73];
  4145. unsigned cmdptr;
  4146. struct {
  4147. uint32_t sfbcfg;
  4148. uint32_t sfcmd[13];
  4149. uint32_t sfexec;
  4150. uint32_t sfstat[13];
  4151. uint32_t addr0;
  4152. uint32_t addr1;
  4153. uint32_t addr2;
  4154. uint32_t addr3;
  4155. uint32_t addr4;
  4156. uint32_t addr5;
  4157. uint32_t addr6;
  4158. uint32_t data0;
  4159. uint32_t data1;
  4160. uint32_t data2;
  4161. uint32_t data3;
  4162. uint32_t data4;
  4163. uint32_t data5;
  4164. uint32_t data6;
  4165. uint32_t macro[9];
  4166. } data;
  4167. } *dma_buffer;
  4168. dmov_s *cmd;
  4169.  
  4170. int err = 0;
  4171. int i;
  4172. dma_addr_t data_dma_addr = 0;
  4173. dma_addr_t oob_dma_addr = 0;
  4174. dma_addr_t data_dma_addr_curr = 0;
  4175. dma_addr_t oob_dma_addr_curr = 0;
  4176.  
  4177. loff_t from_curr = 0;
  4178. unsigned page_count;
  4179. unsigned pages_read = 0;
  4180.  
  4181. uint16_t onenand_startaddr1;
  4182. uint16_t onenand_startaddr8;
  4183. uint16_t onenand_startaddr2;
  4184. uint16_t onenand_startbuffer;
  4185. uint16_t onenand_sysconfig1;
  4186. uint16_t controller_status;
  4187. uint16_t interrupt_status;
  4188. uint16_t ecc_status;
  4189. #if VERBOSE
  4190. pr_info("================================================="
  4191. "================\n");
  4192. pr_info("%s: from 0x%llx mode %d \ndatbuf 0x%p datlen 0x%x"
  4193. "\noobbuf 0x%p ooblen 0x%x\n",
  4194. __func__, from, ops->mode, ops->datbuf, ops->len,
  4195. ops->oobbuf, ops->ooblen);
  4196. #endif
  4197. if (!mtd) {
  4198. pr_err("%s: invalid mtd pointer, 0x%x\n", __func__,
  4199. (uint32_t)mtd);
  4200. return -EINVAL;
  4201. }
  4202. if (from & (mtd->writesize - 1)) {
  4203. pr_err("%s: unsupported from, 0x%llx\n", __func__,
  4204. from);
  4205. return -EINVAL;
  4206. }
  4207.  
  4208. if ((ops->mode != MTD_OOB_PLACE) && (ops->mode != MTD_OOB_AUTO) &&
  4209. (ops->mode != MTD_OOB_RAW)) {
  4210. pr_err("%s: unsupported ops->mode, %d\n", __func__,
  4211. ops->mode);
  4212. return -EINVAL;
  4213. }
  4214.  
  4215. if (((ops->datbuf == NULL) || (ops->len == 0)) &&
  4216. ((ops->oobbuf == NULL) || (ops->ooblen == 0))) {
  4217. pr_err("%s: incorrect ops fields - nothing to do\n",
  4218. __func__);
  4219. return -EINVAL;
  4220. }
  4221.  
  4222. if ((ops->datbuf != NULL) && (ops->len == 0)) {
  4223. pr_err("%s: data buffer passed but length 0\n",
  4224. __func__);
  4225. return -EINVAL;
  4226. }
  4227.  
  4228. if ((ops->oobbuf != NULL) && (ops->ooblen == 0)) {
  4229. pr_err("%s: oob buffer passed but length 0\n",
  4230. __func__);
  4231. return -EINVAL;
  4232. }
  4233.  
  4234. if (ops->mode != MTD_OOB_RAW) {
  4235. if (ops->datbuf != NULL && (ops->len % mtd->writesize) != 0) {
  4236. /* when ops->datbuf is NULL, ops->len can be ooblen */
  4237. pr_err("%s: unsupported ops->len, %d\n", __func__,
  4238. ops->len);
  4239. return -EINVAL;
  4240. }
  4241. } else {
  4242. if (ops->datbuf != NULL &&
  4243. (ops->len % (mtd->writesize + mtd->oobsize)) != 0) {
  4244. pr_err("%s: unsupported ops->len,"
  4245. " %d for MTD_OOB_RAW\n", __func__, ops->len);
  4246. return -EINVAL;
  4247. }
  4248. }
  4249.  
  4250. if ((ops->mode == MTD_OOB_RAW) && (ops->oobbuf)) {
  4251. pr_err("%s: unsupported operation, oobbuf pointer "
  4252. "passed in for RAW mode, %x\n", __func__,
  4253. (uint32_t)ops->oobbuf);
  4254. return -EINVAL;
  4255. }
  4256.  
  4257. if (ops->oobbuf && !ops->datbuf)
  4258. page_count = ops->ooblen / ((ops->mode == MTD_OOB_AUTO) ?
  4259. mtd->oobavail : mtd->oobsize);
  4260. else if (ops->mode != MTD_OOB_RAW)
  4261. page_count = ops->len / mtd->writesize;
  4262. else
  4263. page_count = ops->len / (mtd->writesize + mtd->oobsize);
  4264.  
  4265. if ((ops->mode == MTD_OOB_AUTO) && (ops->oobbuf != NULL)) {
  4266. if (page_count * mtd->oobavail > ops->ooblen) {
  4267. pr_err("%s: unsupported ops->ooblen for "
  4268. "AUTO, %d\n", __func__, ops->ooblen);
  4269. return -EINVAL;
  4270. }
  4271. }
  4272.  
  4273. if ((ops->mode == MTD_OOB_PLACE) && (ops->oobbuf != NULL)) {
  4274. if (page_count * mtd->oobsize > ops->ooblen) {
  4275. pr_err("%s: unsupported ops->ooblen for "
  4276. "PLACE, %d\n", __func__, ops->ooblen);
  4277. return -EINVAL;
  4278. }
  4279. }
  4280.  
  4281. if ((ops->mode == MTD_OOB_PLACE) && (ops->ooblen != 0) &&
  4282. (ops->ooboffs != 0)) {
  4283. pr_err("%s: unsupported ops->ooboffs, %d\n", __func__,
  4284. ops->ooboffs);
  4285. return -EINVAL;
  4286. }
  4287.  
  4288. if (ops->datbuf) {
  4289. memset(ops->datbuf, 0x55, ops->len);
  4290. data_dma_addr_curr = data_dma_addr = msm_nand_dma_map(chip->dev,
  4291. ops->datbuf, ops->len, DMA_FROM_DEVICE);
  4292. if (dma_mapping_error(chip->dev, data_dma_addr)) {
  4293. pr_err("%s: failed to get dma addr for %p\n",
  4294. __func__, ops->datbuf);
  4295. return -EIO;
  4296. }
  4297. }
  4298. if (ops->oobbuf) {
  4299. memset(ops->oobbuf, 0x55, ops->ooblen);
  4300. oob_dma_addr_curr = oob_dma_addr = msm_nand_dma_map(chip->dev,
  4301. ops->oobbuf, ops->ooblen, DMA_FROM_DEVICE);
  4302. if (dma_mapping_error(chip->dev, oob_dma_addr)) {
  4303. pr_err("%s: failed to get dma addr for %p\n",
  4304. __func__, ops->oobbuf);
  4305. err = -EIO;
  4306. goto err_dma_map_oobbuf_failed;
  4307. }
  4308. }
  4309.  
  4310. wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
  4311. (chip, sizeof(*dma_buffer))));
  4312.  
  4313. from_curr = from;
  4314.  
  4315. while (page_count-- > 0) {
  4316.  
  4317. cmd = dma_buffer->cmd;
  4318.  
  4319. if ((onenand_info.device_id & ONENAND_DEVICE_IS_DDP)
  4320. && (from_curr >= (mtd->size>>1))) { /* DDP Device */
  4321. onenand_startaddr1 = DEVICE_FLASHCORE_1 |
  4322. (((uint32_t)(from_curr-(mtd->size>>1))
  4323. / mtd->erasesize));
  4324. onenand_startaddr2 = DEVICE_BUFFERRAM_1;
  4325. } else {
  4326. onenand_startaddr1 = DEVICE_FLASHCORE_0 |
  4327. ((uint32_t)from_curr / mtd->erasesize) ;
  4328. onenand_startaddr2 = DEVICE_BUFFERRAM_0;
  4329. }
  4330.  
  4331. onenand_startaddr8 = (((uint32_t)from_curr &
  4332. (mtd->erasesize - 1)) / mtd->writesize) << 2;
  4333. onenand_startbuffer = DATARAM0_0 << 8;
  4334. onenand_sysconfig1 = (ops->mode == MTD_OOB_RAW) ?
  4335. ONENAND_SYSCFG1_ECCDIS(nand_sfcmd_mode) :
  4336. ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode);
  4337.  
  4338. dma_buffer->data.sfbcfg = SFLASH_BCFG |
  4339. (nand_sfcmd_mode ? 0 : (1 << 24));
  4340. dma_buffer->data.sfcmd[0] = SFLASH_PREPCMD(7, 0, 0,
  4341. MSM_NAND_SFCMD_CMDXS,
  4342. nand_sfcmd_mode,
  4343. MSM_NAND_SFCMD_REGWR);
  4344. dma_buffer->data.sfcmd[1] = SFLASH_PREPCMD(0, 0, 32,
  4345. MSM_NAND_SFCMD_CMDXS,
  4346. nand_sfcmd_mode,
  4347. MSM_NAND_SFCMD_INTHI);
  4348. dma_buffer->data.sfcmd[2] = SFLASH_PREPCMD(3, 7, 0,
  4349. MSM_NAND_SFCMD_DATXS,
  4350. nand_sfcmd_mode,
  4351. MSM_NAND_SFCMD_REGRD);
  4352. dma_buffer->data.sfcmd[3] = SFLASH_PREPCMD(256, 0, 0,
  4353. MSM_NAND_SFCMD_DATXS,
  4354. nand_sfcmd_mode,
  4355. MSM_NAND_SFCMD_DATRD);
  4356. dma_buffer->data.sfcmd[4] = SFLASH_PREPCMD(256, 0, 0,
  4357. MSM_NAND_SFCMD_DATXS,
  4358. nand_sfcmd_mode,
  4359. MSM_NAND_SFCMD_DATRD);
  4360. dma_buffer->data.sfcmd[5] = SFLASH_PREPCMD(256, 0, 0,
  4361. MSM_NAND_SFCMD_DATXS,
  4362. nand_sfcmd_mode,
  4363. MSM_NAND_SFCMD_DATRD);
  4364. dma_buffer->data.sfcmd[6] = SFLASH_PREPCMD(256, 0, 0,
  4365. MSM_NAND_SFCMD_DATXS,
  4366. nand_sfcmd_mode,
  4367. MSM_NAND_SFCMD_DATRD);
  4368. dma_buffer->data.sfcmd[7] = SFLASH_PREPCMD(256, 0, 0,
  4369. MSM_NAND_SFCMD_DATXS,
  4370. nand_sfcmd_mode,
  4371. MSM_NAND_SFCMD_DATRD);
  4372. dma_buffer->data.sfcmd[8] = SFLASH_PREPCMD(256, 0, 0,
  4373. MSM_NAND_SFCMD_DATXS,
  4374. nand_sfcmd_mode,
  4375. MSM_NAND_SFCMD_DATRD);
  4376. dma_buffer->data.sfcmd[9] = SFLASH_PREPCMD(256, 0, 0,
  4377. MSM_NAND_SFCMD_DATXS,
  4378. nand_sfcmd_mode,
  4379. MSM_NAND_SFCMD_DATRD);
  4380. dma_buffer->data.sfcmd[10] = SFLASH_PREPCMD(256, 0, 0,
  4381. MSM_NAND_SFCMD_DATXS,
  4382. nand_sfcmd_mode,
  4383. MSM_NAND_SFCMD_DATRD);
  4384. dma_buffer->data.sfcmd[11] = SFLASH_PREPCMD(32, 0, 0,
  4385. MSM_NAND_SFCMD_DATXS,
  4386. nand_sfcmd_mode,
  4387. MSM_NAND_SFCMD_DATRD);
  4388. dma_buffer->data.sfcmd[12] = SFLASH_PREPCMD(4, 10, 0,
  4389. MSM_NAND_SFCMD_CMDXS,
  4390. nand_sfcmd_mode,
  4391. MSM_NAND_SFCMD_REGWR);
  4392. dma_buffer->data.sfexec = 1;
  4393. dma_buffer->data.sfstat[0] = CLEAN_DATA_32;
  4394. dma_buffer->data.sfstat[1] = CLEAN_DATA_32;
  4395. dma_buffer->data.sfstat[2] = CLEAN_DATA_32;
  4396. dma_buffer->data.sfstat[3] = CLEAN_DATA_32;
  4397. dma_buffer->data.sfstat[4] = CLEAN_DATA_32;
  4398. dma_buffer->data.sfstat[5] = CLEAN_DATA_32;
  4399. dma_buffer->data.sfstat[6] = CLEAN_DATA_32;
  4400. dma_buffer->data.sfstat[7] = CLEAN_DATA_32;
  4401. dma_buffer->data.sfstat[8] = CLEAN_DATA_32;
  4402. dma_buffer->data.sfstat[9] = CLEAN_DATA_32;
  4403. dma_buffer->data.sfstat[10] = CLEAN_DATA_32;
  4404. dma_buffer->data.sfstat[11] = CLEAN_DATA_32;
  4405. dma_buffer->data.sfstat[12] = CLEAN_DATA_32;
  4406. dma_buffer->data.addr0 = (ONENAND_INTERRUPT_STATUS << 16) |
  4407. (ONENAND_SYSTEM_CONFIG_1);
  4408. dma_buffer->data.addr1 = (ONENAND_START_ADDRESS_8 << 16) |
  4409. (ONENAND_START_ADDRESS_1);
  4410. dma_buffer->data.addr2 = (ONENAND_START_BUFFER << 16) |
  4411. (ONENAND_START_ADDRESS_2);
  4412. dma_buffer->data.addr3 = (ONENAND_ECC_STATUS << 16) |
  4413. (ONENAND_COMMAND);
  4414. dma_buffer->data.addr4 = (ONENAND_CONTROLLER_STATUS << 16) |
  4415. (ONENAND_INTERRUPT_STATUS);
  4416. dma_buffer->data.addr5 = (ONENAND_INTERRUPT_STATUS << 16) |
  4417. (ONENAND_SYSTEM_CONFIG_1);
  4418. dma_buffer->data.addr6 = (ONENAND_START_ADDRESS_3 << 16) |
  4419. (ONENAND_START_ADDRESS_1);
  4420. dma_buffer->data.data0 = (ONENAND_CLRINTR << 16) |
  4421. (onenand_sysconfig1);
  4422. dma_buffer->data.data1 = (onenand_startaddr8 << 16) |
  4423. (onenand_startaddr1);
  4424. dma_buffer->data.data2 = (onenand_startbuffer << 16) |
  4425. (onenand_startaddr2);
  4426. dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
  4427. (ONENAND_CMDLOADSPARE);
  4428. dma_buffer->data.data4 = (CLEAN_DATA_16 << 16) |
  4429. (CLEAN_DATA_16);
  4430. dma_buffer->data.data5 = (ONENAND_CLRINTR << 16) |
  4431. (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
  4432. dma_buffer->data.data6 = (ONENAND_STARTADDR3_RES << 16) |
  4433. (ONENAND_STARTADDR1_RES);
  4434. dma_buffer->data.macro[0] = 0x0200;
  4435. dma_buffer->data.macro[1] = 0x0300;
  4436. dma_buffer->data.macro[2] = 0x0400;
  4437. dma_buffer->data.macro[3] = 0x0500;
  4438. dma_buffer->data.macro[4] = 0x0600;
  4439. dma_buffer->data.macro[5] = 0x0700;
  4440. dma_buffer->data.macro[6] = 0x0800;
  4441. dma_buffer->data.macro[7] = 0x0900;
  4442. dma_buffer->data.macro[8] = 0x8010;
  4443.  
  4444. /*************************************************************/
  4445. /* Write necessary address registers in the onenand device */
  4446. /*************************************************************/
  4447.  
  4448. /* Enable and configure the SFlash controller */
  4449. cmd->cmd = 0;
  4450. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfbcfg);
  4451. cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
  4452. cmd->len = 4;
  4453. cmd++;
  4454.  
  4455. /* Block on cmd ready and write CMD register */
  4456. cmd->cmd = DST_CRCI_NAND_CMD;
  4457. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[0]);
  4458. cmd->dst = MSM_NAND_SFLASHC_CMD;
  4459. cmd->len = 4;
  4460. cmd++;
  4461.  
  4462. /* Write the ADDR0 and ADDR1 registers */
  4463. cmd->cmd = 0;
  4464. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr0);
  4465. cmd->dst = MSM_NAND_ADDR0;
  4466. cmd->len = 8;
  4467. cmd++;
  4468.  
  4469. /* Write the ADDR2 ADDR3 ADDR4 ADDR5 registers */
  4470. cmd->cmd = 0;
  4471. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr2);
  4472. cmd->dst = MSM_NAND_ADDR2;
  4473. cmd->len = 16;
  4474. cmd++;
  4475.  
  4476. /* Write the ADDR6 registers */
  4477. cmd->cmd = 0;
  4478. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr6);
  4479. cmd->dst = MSM_NAND_ADDR6;
  4480. cmd->len = 4;
  4481. cmd++;
  4482.  
  4483. /* Write the GENP0, GENP1, GENP2, GENP3 registers */
  4484. cmd->cmd = 0;
  4485. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data0);
  4486. cmd->dst = MSM_NAND_GENP_REG0;
  4487. cmd->len = 16;
  4488. cmd++;
  4489.  
  4490. /* Write the FLASH_DEV_CMD4,5,6 registers */
  4491. cmd->cmd = 0;
  4492. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data4);
  4493. cmd->dst = MSM_NAND_DEV_CMD4;
  4494. cmd->len = 12;
  4495. cmd++;
  4496.  
  4497. /* Kick the execute command */
  4498. cmd->cmd = 0;
  4499. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  4500. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  4501. cmd->len = 4;
  4502. cmd++;
  4503.  
  4504. /* Block on data ready, and read the status register */
  4505. cmd->cmd = SRC_CRCI_NAND_DATA;
  4506. cmd->src = MSM_NAND_SFLASHC_STATUS;
  4507. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[0]);
  4508. cmd->len = 4;
  4509. cmd++;
  4510.  
  4511. /*************************************************************/
  4512. /* Wait for the interrupt from the Onenand device controller */
  4513. /*************************************************************/
  4514.  
  4515. /* Block on cmd ready and write CMD register */
  4516. cmd->cmd = DST_CRCI_NAND_CMD;
  4517. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[1]);
  4518. cmd->dst = MSM_NAND_SFLASHC_CMD;
  4519. cmd->len = 4;
  4520. cmd++;
  4521.  
  4522. /* Kick the execute command */
  4523. cmd->cmd = 0;
  4524. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  4525. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  4526. cmd->len = 4;
  4527. cmd++;
  4528.  
  4529. /* Block on data ready, and read the status register */
  4530. cmd->cmd = SRC_CRCI_NAND_DATA;
  4531. cmd->src = MSM_NAND_SFLASHC_STATUS;
  4532. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[1]);
  4533. cmd->len = 4;
  4534. cmd++;
  4535.  
  4536. /*************************************************************/
  4537. /* Read necessary status registers from the onenand device */
  4538. /*************************************************************/
  4539.  
  4540. /* Block on cmd ready and write CMD register */
  4541. cmd->cmd = DST_CRCI_NAND_CMD;
  4542. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[2]);
  4543. cmd->dst = MSM_NAND_SFLASHC_CMD;
  4544. cmd->len = 4;
  4545. cmd++;
  4546.  
  4547. /* Kick the execute command */
  4548. cmd->cmd = 0;
  4549. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  4550. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  4551. cmd->len = 4;
  4552. cmd++;
  4553.  
  4554. /* Block on data ready, and read the status register */
  4555. cmd->cmd = SRC_CRCI_NAND_DATA;
  4556. cmd->src = MSM_NAND_SFLASHC_STATUS;
  4557. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[2]);
  4558. cmd->len = 4;
  4559. cmd++;
  4560.  
  4561. /* Read the GENP3 register */
  4562. cmd->cmd = 0;
  4563. cmd->src = MSM_NAND_GENP_REG3;
  4564. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data3);
  4565. cmd->len = 4;
  4566. cmd++;
  4567.  
  4568. /* Read the DEVCMD4 register */
  4569. cmd->cmd = 0;
  4570. cmd->src = MSM_NAND_DEV_CMD4;
  4571. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data4);
  4572. cmd->len = 4;
  4573. cmd++;
  4574.  
  4575. /*************************************************************/
  4576. /* Read the data ram area from the onenand buffer ram */
  4577. /*************************************************************/
  4578.  
  4579. if (ops->datbuf) {
  4580.  
  4581. dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
  4582. (ONENAND_CMDLOAD);
  4583.  
  4584. for (i = 0; i < 8; i++) {
  4585.  
  4586. /* Block on cmd ready and write CMD register */
  4587. cmd->cmd = DST_CRCI_NAND_CMD;
  4588. cmd->src = msm_virt_to_dma(chip,
  4589. &dma_buffer->data.sfcmd[3+i]);
  4590. cmd->dst = MSM_NAND_SFLASHC_CMD;
  4591. cmd->len = 4;
  4592. cmd++;
  4593.  
  4594. /* Write the MACRO1 register */
  4595. cmd->cmd = 0;
  4596. cmd->src = msm_virt_to_dma(chip,
  4597. &dma_buffer->data.macro[i]);
  4598. cmd->dst = MSM_NAND_MACRO1_REG;
  4599. cmd->len = 4;
  4600. cmd++;
  4601.  
  4602. /* Kick the execute command */
  4603. cmd->cmd = 0;
  4604. cmd->src = msm_virt_to_dma(chip,
  4605. &dma_buffer->data.sfexec);
  4606. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  4607. cmd->len = 4;
  4608. cmd++;
  4609.  
  4610. /* Block on data rdy, & read status register */
  4611. cmd->cmd = SRC_CRCI_NAND_DATA;
  4612. cmd->src = MSM_NAND_SFLASHC_STATUS;
  4613. cmd->dst = msm_virt_to_dma(chip,
  4614. &dma_buffer->data.sfstat[3+i]);
  4615. cmd->len = 4;
  4616. cmd++;
  4617.  
  4618. /* Transfer nand ctlr buf contents to usr buf */
  4619. cmd->cmd = 0;
  4620. cmd->src = MSM_NAND_FLASH_BUFFER;
  4621. cmd->dst = data_dma_addr_curr;
  4622. cmd->len = 512;
  4623. data_dma_addr_curr += 512;
  4624. cmd++;
  4625. }
  4626. }
  4627.  
  4628. if ((ops->oobbuf) || (ops->mode == MTD_OOB_RAW)) {
  4629.  
  4630. /* Block on cmd ready and write CMD register */
  4631. cmd->cmd = DST_CRCI_NAND_CMD;
  4632. cmd->src = msm_virt_to_dma(chip,
  4633. &dma_buffer->data.sfcmd[11]);
  4634. cmd->dst = MSM_NAND_SFLASHC_CMD;
  4635. cmd->len = 4;
  4636. cmd++;
  4637.  
  4638. /* Write the MACRO1 register */
  4639. cmd->cmd = 0;
  4640. cmd->src = msm_virt_to_dma(chip,
  4641. &dma_buffer->data.macro[8]);
  4642. cmd->dst = MSM_NAND_MACRO1_REG;
  4643. cmd->len = 4;
  4644. cmd++;
  4645.  
  4646. /* Kick the execute command */
  4647. cmd->cmd = 0;
  4648. cmd->src = msm_virt_to_dma(chip,
  4649. &dma_buffer->data.sfexec);
  4650. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  4651. cmd->len = 4;
  4652. cmd++;
  4653.  
  4654. /* Block on data ready, and read status register */
  4655. cmd->cmd = SRC_CRCI_NAND_DATA;
  4656. cmd->src = MSM_NAND_SFLASHC_STATUS;
  4657. cmd->dst = msm_virt_to_dma(chip,
  4658. &dma_buffer->data.sfstat[11]);
  4659. cmd->len = 4;
  4660. cmd++;
  4661.  
  4662. /* Transfer nand ctlr buffer contents into usr buf */
  4663. if (ops->mode == MTD_OOB_AUTO) {
  4664. for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) {
  4665. cmd->cmd = 0;
  4666. cmd->src = MSM_NAND_FLASH_BUFFER +
  4667. mtd->ecclayout->oobfree[i].offset;
  4668. cmd->dst = oob_dma_addr_curr;
  4669. cmd->len =
  4670. mtd->ecclayout->oobfree[i].length;
  4671. oob_dma_addr_curr +=
  4672. mtd->ecclayout->oobfree[i].length;
  4673. cmd++;
  4674. }
  4675. }
  4676. if (ops->mode == MTD_OOB_PLACE) {
  4677. cmd->cmd = 0;
  4678. cmd->src = MSM_NAND_FLASH_BUFFER;
  4679. cmd->dst = oob_dma_addr_curr;
  4680. cmd->len = mtd->oobsize;
  4681. oob_dma_addr_curr += mtd->oobsize;
  4682. cmd++;
  4683. }
  4684. if (ops->mode == MTD_OOB_RAW) {
  4685. cmd->cmd = 0;
  4686. cmd->src = MSM_NAND_FLASH_BUFFER;
  4687. cmd->dst = data_dma_addr_curr;
  4688. cmd->len = mtd->oobsize;
  4689. data_dma_addr_curr += mtd->oobsize;
  4690. cmd++;
  4691. }
  4692. }
  4693.  
  4694. /*************************************************************/
  4695. /* Restore the necessary registers to proper values */
  4696. /*************************************************************/
  4697.  
  4698. /* Block on cmd ready and write CMD register */
  4699. cmd->cmd = DST_CRCI_NAND_CMD;
  4700. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[12]);
  4701. cmd->dst = MSM_NAND_SFLASHC_CMD;
  4702. cmd->len = 4;
  4703. cmd++;
  4704.  
  4705. /* Kick the execute command */
  4706. cmd->cmd = 0;
  4707. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  4708. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  4709. cmd->len = 4;
  4710. cmd++;
  4711.  
  4712. /* Block on data ready, and read the status register */
  4713. cmd->cmd = SRC_CRCI_NAND_DATA;
  4714. cmd->src = MSM_NAND_SFLASHC_STATUS;
  4715. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[12]);
  4716. cmd->len = 4;
  4717. cmd++;
  4718.  
  4719.  
  4720. BUILD_BUG_ON(100 != ARRAY_SIZE(dma_buffer->cmd));
  4721. BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
  4722. dma_buffer->cmd[0].cmd |= CMD_OCB;
  4723. cmd[-1].cmd |= CMD_OCU | CMD_LC;
  4724.  
  4725. dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
  4726. >> 3) | CMD_PTR_LP;
  4727.  
  4728. dsb();
  4729. msm_dmov_exec_cmd(chip->dma_channel, crci_mask,
  4730. DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
  4731. &dma_buffer->cmdptr)));
  4732. dsb();
  4733.  
  4734. ecc_status = (dma_buffer->data.data3 >> 16) &
  4735. 0x0000FFFF;
  4736. interrupt_status = (dma_buffer->data.data4 >> 0) &
  4737. 0x0000FFFF;
  4738. controller_status = (dma_buffer->data.data4 >> 16) &
  4739. 0x0000FFFF;
  4740.  
  4741. #if VERBOSE
  4742. pr_info("\n%s: sflash status %x %x %x %x %x %x %x"
  4743. "%x %x\n", __func__,
  4744. dma_buffer->data.sfstat[0],
  4745. dma_buffer->data.sfstat[1],
  4746. dma_buffer->data.sfstat[2],
  4747. dma_buffer->data.sfstat[3],
  4748. dma_buffer->data.sfstat[4],
  4749. dma_buffer->data.sfstat[5],
  4750. dma_buffer->data.sfstat[6],
  4751. dma_buffer->data.sfstat[7],
  4752. dma_buffer->data.sfstat[8],
  4753. dma_buffer->data.sfstat[9],
  4754. dma_buffer->data.sfstat[10],
  4755. dma_buffer->data.sfstat[11],
  4756. dma_buffer->data.sfstat[12]);
  4757.  
  4758. pr_info("%s: controller_status = %x\n", __func__,
  4759. controller_status);
  4760. pr_info("%s: interrupt_status = %x\n", __func__,
  4761. interrupt_status);
  4762. pr_info("%s: ecc_status = %x\n", __func__,
  4763. ecc_status);
  4764. #endif
  4765. /* Check for errors, protection violations etc */
  4766. if ((controller_status != 0)
  4767. || (dma_buffer->data.sfstat[0] & 0x110)
  4768. || (dma_buffer->data.sfstat[1] & 0x110)
  4769. || (dma_buffer->data.sfstat[2] & 0x110)
  4770. || (dma_buffer->data.sfstat[12] & 0x110)
  4771. || ((dma_buffer->data.sfstat[3] & 0x110) &&
  4772. (ops->datbuf))
  4773. || ((dma_buffer->data.sfstat[4] & 0x110) &&
  4774. (ops->datbuf))
  4775. || ((dma_buffer->data.sfstat[5] & 0x110) &&
  4776. (ops->datbuf))
  4777. || ((dma_buffer->data.sfstat[6] & 0x110) &&
  4778. (ops->datbuf))
  4779. || ((dma_buffer->data.sfstat[7] & 0x110) &&
  4780. (ops->datbuf))
  4781. || ((dma_buffer->data.sfstat[8] & 0x110) &&
  4782. (ops->datbuf))
  4783. || ((dma_buffer->data.sfstat[9] & 0x110) &&
  4784. (ops->datbuf))
  4785. || ((dma_buffer->data.sfstat[10] & 0x110) &&
  4786. (ops->datbuf))
  4787. || ((dma_buffer->data.sfstat[11] & 0x110) &&
  4788. ((ops->oobbuf)
  4789. || (ops->mode == MTD_OOB_RAW)))) {
  4790. pr_info("%s: ECC/MPU/OP error\n", __func__);
  4791. err = -EIO;
  4792. }
  4793.  
  4794. if (err)
  4795. break;
  4796. pages_read++;
  4797. from_curr += mtd->writesize;
  4798. }
  4799.  
  4800. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  4801.  
  4802. if (ops->oobbuf) {
  4803. dma_unmap_page(chip->dev, oob_dma_addr, ops->ooblen,
  4804. DMA_FROM_DEVICE);
  4805. }
  4806. err_dma_map_oobbuf_failed:
  4807. if (ops->datbuf) {
  4808. dma_unmap_page(chip->dev, data_dma_addr, ops->len,
  4809. DMA_FROM_DEVICE);
  4810. }
  4811.  
  4812. if (err) {
  4813. pr_err("%s: %llx %x %x failed\n", __func__, from_curr,
  4814. ops->datbuf ? ops->len : 0, ops->ooblen);
  4815. } else {
  4816. ops->retlen = ops->oobretlen = 0;
  4817. if (ops->datbuf != NULL) {
  4818. if (ops->mode != MTD_OOB_RAW)
  4819. ops->retlen = mtd->writesize * pages_read;
  4820. else
  4821. ops->retlen = (mtd->writesize + mtd->oobsize)
  4822. * pages_read;
  4823. }
  4824. if (ops->oobbuf != NULL) {
  4825. if (ops->mode == MTD_OOB_AUTO)
  4826. ops->oobretlen = mtd->oobavail * pages_read;
  4827. else
  4828. ops->oobretlen = mtd->oobsize * pages_read;
  4829. }
  4830. }
  4831.  
  4832. #if VERBOSE
  4833. pr_info("\n%s: ret %d, retlen %d oobretlen %d\n",
  4834. __func__, err, ops->retlen, ops->oobretlen);
  4835.  
  4836. pr_info("==================================================="
  4837. "==============\n");
  4838. #endif
  4839. return err;
  4840. }
  4841.  
  4842. int msm_onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
  4843. size_t *retlen, u_char *buf)
  4844. {
  4845. int ret;
  4846. struct mtd_oob_ops ops;
  4847.  
  4848. ops.mode = MTD_OOB_PLACE;
  4849. ops.datbuf = buf;
  4850. ops.len = len;
  4851. ops.retlen = 0;
  4852. ops.oobbuf = NULL;
  4853. ops.ooblen = 0;
  4854. ops.oobretlen = 0;
  4855. ret = msm_onenand_read_oob(mtd, from, &ops);
  4856. *retlen = ops.retlen;
  4857.  
  4858. return ret;
  4859. }
  4860.  
  4861. static int msm_onenand_write_oob(struct mtd_info *mtd, loff_t to,
  4862. struct mtd_oob_ops *ops)
  4863. {
  4864. struct msm_nand_chip *chip = mtd->priv;
  4865.  
  4866. struct {
  4867. dmov_s cmd[73];
  4868. unsigned cmdptr;
  4869. struct {
  4870. uint32_t sfbcfg;
  4871. uint32_t sfcmd[14];
  4872. uint32_t sfexec;
  4873. uint32_t sfstat[14];
  4874. uint32_t addr0;
  4875. uint32_t addr1;
  4876. uint32_t addr2;
  4877. uint32_t addr3;
  4878. uint32_t addr4;
  4879. uint32_t addr5;
  4880. uint32_t addr6;
  4881. uint32_t data0;
  4882. uint32_t data1;
  4883. uint32_t data2;
  4884. uint32_t data3;
  4885. uint32_t data4;
  4886. uint32_t data5;
  4887. uint32_t data6;
  4888. uint32_t macro[9];
  4889. } data;
  4890. } *dma_buffer;
  4891. dmov_s *cmd;
  4892.  
  4893. int err = 0;
  4894. int i, j, k;
  4895. dma_addr_t data_dma_addr = 0;
  4896. dma_addr_t oob_dma_addr = 0;
  4897. dma_addr_t init_dma_addr = 0;
  4898. dma_addr_t data_dma_addr_curr = 0;
  4899. dma_addr_t oob_dma_addr_curr = 0;
  4900. uint8_t *init_spare_bytes;
  4901.  
  4902. loff_t to_curr = 0;
  4903. unsigned page_count;
  4904. unsigned pages_written = 0;
  4905.  
  4906. uint16_t onenand_startaddr1;
  4907. uint16_t onenand_startaddr8;
  4908. uint16_t onenand_startaddr2;
  4909. uint16_t onenand_startbuffer;
  4910. uint16_t onenand_sysconfig1;
  4911.  
  4912. uint16_t controller_status;
  4913. uint16_t interrupt_status;
  4914. uint16_t ecc_status;
  4915.  
  4916. #if VERBOSE
  4917. pr_info("================================================="
  4918. "================\n");
  4919. pr_info("%s: to 0x%llx mode %d \ndatbuf 0x%p datlen 0x%x"
  4920. "\noobbuf 0x%p ooblen 0x%x\n",
  4921. __func__, to, ops->mode, ops->datbuf, ops->len,
  4922. ops->oobbuf, ops->ooblen);
  4923. #endif
  4924. if (!mtd) {
  4925. pr_err("%s: invalid mtd pointer, 0x%x\n", __func__,
  4926. (uint32_t)mtd);
  4927. return -EINVAL;
  4928. }
  4929. if (to & (mtd->writesize - 1)) {
  4930. pr_err("%s: unsupported to, 0x%llx\n", __func__, to);
  4931. return -EINVAL;
  4932. }
  4933.  
  4934. if ((ops->mode != MTD_OOB_PLACE) && (ops->mode != MTD_OOB_AUTO) &&
  4935. (ops->mode != MTD_OOB_RAW)) {
  4936. pr_err("%s: unsupported ops->mode, %d\n", __func__,
  4937. ops->mode);
  4938. return -EINVAL;
  4939. }
  4940.  
  4941. if (((ops->datbuf == NULL) || (ops->len == 0)) &&
  4942. ((ops->oobbuf == NULL) || (ops->ooblen == 0))) {
  4943. pr_err("%s: incorrect ops fields - nothing to do\n",
  4944. __func__);
  4945. return -EINVAL;
  4946. }
  4947.  
  4948. if ((ops->datbuf != NULL) && (ops->len == 0)) {
  4949. pr_err("%s: data buffer passed but length 0\n",
  4950. __func__);
  4951. return -EINVAL;
  4952. }
  4953.  
  4954. if ((ops->oobbuf != NULL) && (ops->ooblen == 0)) {
  4955. pr_err("%s: oob buffer passed but length 0\n",
  4956. __func__);
  4957. return -EINVAL;
  4958. }
  4959.  
  4960. if (ops->mode != MTD_OOB_RAW) {
  4961. if (ops->datbuf != NULL && (ops->len % mtd->writesize) != 0) {
  4962. /* when ops->datbuf is NULL, ops->len can be ooblen */
  4963. pr_err("%s: unsupported ops->len, %d\n", __func__,
  4964. ops->len);
  4965. return -EINVAL;
  4966. }
  4967. } else {
  4968. if (ops->datbuf != NULL &&
  4969. (ops->len % (mtd->writesize + mtd->oobsize)) != 0) {
  4970. pr_err("%s: unsupported ops->len,"
  4971. " %d for MTD_OOB_RAW\n", __func__, ops->len);
  4972. return -EINVAL;
  4973. }
  4974. }
  4975.  
  4976. if ((ops->mode == MTD_OOB_RAW) && (ops->oobbuf)) {
  4977. pr_err("%s: unsupported operation, oobbuf pointer "
  4978. "passed in for RAW mode, %x\n", __func__,
  4979. (uint32_t)ops->oobbuf);
  4980. return -EINVAL;
  4981. }
  4982.  
  4983. if (ops->oobbuf && !ops->datbuf)
  4984. page_count = ops->ooblen / ((ops->mode == MTD_OOB_AUTO) ?
  4985. mtd->oobavail : mtd->oobsize);
  4986. else if (ops->mode != MTD_OOB_RAW)
  4987. page_count = ops->len / mtd->writesize;
  4988. else
  4989. page_count = ops->len / (mtd->writesize + mtd->oobsize);
  4990.  
  4991. if ((ops->mode == MTD_OOB_AUTO) && (ops->oobbuf != NULL)) {
  4992. if (page_count > 1) {
  4993. pr_err("%s: unsupported ops->ooblen for"
  4994. "AUTO, %d\n", __func__, ops->ooblen);
  4995. return -EINVAL;
  4996. }
  4997. }
  4998.  
  4999. if ((ops->mode == MTD_OOB_PLACE) && (ops->oobbuf != NULL)) {
  5000. if (page_count * mtd->oobsize > ops->ooblen) {
  5001. pr_err("%s: unsupported ops->ooblen for"
  5002. "PLACE, %d\n", __func__, ops->ooblen);
  5003. return -EINVAL;
  5004. }
  5005. }
  5006.  
  5007. if ((ops->mode == MTD_OOB_PLACE) && (ops->ooblen != 0) &&
  5008. (ops->ooboffs != 0)) {
  5009. pr_err("%s: unsupported ops->ooboffs, %d\n",
  5010. __func__, ops->ooboffs);
  5011. return -EINVAL;
  5012. }
  5013.  
  5014. init_spare_bytes = kmalloc(mtd->oobsize, GFP_KERNEL);
  5015. if (!init_spare_bytes) {
  5016. pr_err("%s: failed to alloc init_spare_bytes buffer\n",
  5017. __func__);
  5018. return -ENOMEM;
  5019. }
  5020. for (i = 0; i < mtd->oobsize; i++)
  5021. init_spare_bytes[i] = 0xFF;
  5022.  
  5023. if ((ops->oobbuf) && (ops->mode == MTD_OOB_AUTO)) {
  5024. for (i = 0, k = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++)
  5025. for (j = 0; j < mtd->ecclayout->oobfree[i].length;
  5026. j++) {
  5027. init_spare_bytes[j +
  5028. mtd->ecclayout->oobfree[i].offset]
  5029. = (ops->oobbuf)[k];
  5030. k++;
  5031. }
  5032. }
  5033.  
  5034. if (ops->datbuf) {
  5035. data_dma_addr_curr = data_dma_addr = msm_nand_dma_map(chip->dev,
  5036. ops->datbuf, ops->len, DMA_TO_DEVICE);
  5037. if (dma_mapping_error(chip->dev, data_dma_addr)) {
  5038. pr_err("%s: failed to get dma addr for %p\n",
  5039. __func__, ops->datbuf);
  5040. return -EIO;
  5041. }
  5042. }
  5043. if (ops->oobbuf) {
  5044. oob_dma_addr_curr = oob_dma_addr = msm_nand_dma_map(chip->dev,
  5045. ops->oobbuf, ops->ooblen, DMA_TO_DEVICE);
  5046. if (dma_mapping_error(chip->dev, oob_dma_addr)) {
  5047. pr_err("%s: failed to get dma addr for %p\n",
  5048. __func__, ops->oobbuf);
  5049. err = -EIO;
  5050. goto err_dma_map_oobbuf_failed;
  5051. }
  5052. }
  5053.  
  5054. init_dma_addr = msm_nand_dma_map(chip->dev, init_spare_bytes, mtd->oobsize,
  5055. DMA_TO_DEVICE);
  5056. if (dma_mapping_error(chip->dev, init_dma_addr)) {
  5057. pr_err("%s: failed to get dma addr for %p\n",
  5058. __func__, init_spare_bytes);
  5059. err = -EIO;
  5060. goto err_dma_map_initbuf_failed;
  5061. }
  5062.  
  5063.  
  5064. wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
  5065. (chip, sizeof(*dma_buffer))));
  5066.  
  5067. to_curr = to;
  5068.  
  5069. while (page_count-- > 0) {
  5070. cmd = dma_buffer->cmd;
  5071.  
  5072. if ((onenand_info.device_id & ONENAND_DEVICE_IS_DDP)
  5073. && (to_curr >= (mtd->size>>1))) { /* DDP Device */
  5074. onenand_startaddr1 = DEVICE_FLASHCORE_1 |
  5075. (((uint32_t)(to_curr-(mtd->size>>1))
  5076. / mtd->erasesize));
  5077. onenand_startaddr2 = DEVICE_BUFFERRAM_1;
  5078. } else {
  5079. onenand_startaddr1 = DEVICE_FLASHCORE_0 |
  5080. ((uint32_t)to_curr / mtd->erasesize) ;
  5081. onenand_startaddr2 = DEVICE_BUFFERRAM_0;
  5082. }
  5083.  
  5084. onenand_startaddr8 = (((uint32_t)to_curr &
  5085. (mtd->erasesize - 1)) / mtd->writesize) << 2;
  5086. onenand_startbuffer = DATARAM0_0 << 8;
  5087. onenand_sysconfig1 = (ops->mode == MTD_OOB_RAW) ?
  5088. ONENAND_SYSCFG1_ECCDIS(nand_sfcmd_mode) :
  5089. ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode);
  5090.  
  5091. dma_buffer->data.sfbcfg = SFLASH_BCFG |
  5092. (nand_sfcmd_mode ? 0 : (1 << 24));
  5093. dma_buffer->data.sfcmd[0] = SFLASH_PREPCMD(6, 0, 0,
  5094. MSM_NAND_SFCMD_CMDXS,
  5095. nand_sfcmd_mode,
  5096. MSM_NAND_SFCMD_REGWR);
  5097. dma_buffer->data.sfcmd[1] = SFLASH_PREPCMD(256, 0, 0,
  5098. MSM_NAND_SFCMD_CMDXS,
  5099. nand_sfcmd_mode,
  5100. MSM_NAND_SFCMD_DATWR);
  5101. dma_buffer->data.sfcmd[2] = SFLASH_PREPCMD(256, 0, 0,
  5102. MSM_NAND_SFCMD_CMDXS,
  5103. nand_sfcmd_mode,
  5104. MSM_NAND_SFCMD_DATWR);
  5105. dma_buffer->data.sfcmd[3] = SFLASH_PREPCMD(256, 0, 0,
  5106. MSM_NAND_SFCMD_CMDXS,
  5107. nand_sfcmd_mode,
  5108. MSM_NAND_SFCMD_DATWR);
  5109. dma_buffer->data.sfcmd[4] = SFLASH_PREPCMD(256, 0, 0,
  5110. MSM_NAND_SFCMD_CMDXS,
  5111. nand_sfcmd_mode,
  5112. MSM_NAND_SFCMD_DATWR);
  5113. dma_buffer->data.sfcmd[5] = SFLASH_PREPCMD(256, 0, 0,
  5114. MSM_NAND_SFCMD_CMDXS,
  5115. nand_sfcmd_mode,
  5116. MSM_NAND_SFCMD_DATWR);
  5117. dma_buffer->data.sfcmd[6] = SFLASH_PREPCMD(256, 0, 0,
  5118. MSM_NAND_SFCMD_CMDXS,
  5119. nand_sfcmd_mode,
  5120. MSM_NAND_SFCMD_DATWR);
  5121. dma_buffer->data.sfcmd[7] = SFLASH_PREPCMD(256, 0, 0,
  5122. MSM_NAND_SFCMD_CMDXS,
  5123. nand_sfcmd_mode,
  5124. MSM_NAND_SFCMD_DATWR);
  5125. dma_buffer->data.sfcmd[8] = SFLASH_PREPCMD(256, 0, 0,
  5126. MSM_NAND_SFCMD_CMDXS,
  5127. nand_sfcmd_mode,
  5128. MSM_NAND_SFCMD_DATWR);
  5129. dma_buffer->data.sfcmd[9] = SFLASH_PREPCMD(32, 0, 0,
  5130. MSM_NAND_SFCMD_CMDXS,
  5131. nand_sfcmd_mode,
  5132. MSM_NAND_SFCMD_DATWR);
  5133. dma_buffer->data.sfcmd[10] = SFLASH_PREPCMD(1, 6, 0,
  5134. MSM_NAND_SFCMD_CMDXS,
  5135. nand_sfcmd_mode,
  5136. MSM_NAND_SFCMD_REGWR);
  5137. dma_buffer->data.sfcmd[11] = SFLASH_PREPCMD(0, 0, 32,
  5138. MSM_NAND_SFCMD_CMDXS,
  5139. nand_sfcmd_mode,
  5140. MSM_NAND_SFCMD_INTHI);
  5141. dma_buffer->data.sfcmd[12] = SFLASH_PREPCMD(3, 7, 0,
  5142. MSM_NAND_SFCMD_DATXS,
  5143. nand_sfcmd_mode,
  5144. MSM_NAND_SFCMD_REGRD);
  5145. dma_buffer->data.sfcmd[13] = SFLASH_PREPCMD(4, 10, 0,
  5146. MSM_NAND_SFCMD_CMDXS,
  5147. nand_sfcmd_mode,
  5148. MSM_NAND_SFCMD_REGWR);
  5149. dma_buffer->data.sfexec = 1;
  5150. dma_buffer->data.sfstat[0] = CLEAN_DATA_32;
  5151. dma_buffer->data.sfstat[1] = CLEAN_DATA_32;
  5152. dma_buffer->data.sfstat[2] = CLEAN_DATA_32;
  5153. dma_buffer->data.sfstat[3] = CLEAN_DATA_32;
  5154. dma_buffer->data.sfstat[4] = CLEAN_DATA_32;
  5155. dma_buffer->data.sfstat[5] = CLEAN_DATA_32;
  5156. dma_buffer->data.sfstat[6] = CLEAN_DATA_32;
  5157. dma_buffer->data.sfstat[7] = CLEAN_DATA_32;
  5158. dma_buffer->data.sfstat[8] = CLEAN_DATA_32;
  5159. dma_buffer->data.sfstat[9] = CLEAN_DATA_32;
  5160. dma_buffer->data.sfstat[10] = CLEAN_DATA_32;
  5161. dma_buffer->data.sfstat[11] = CLEAN_DATA_32;
  5162. dma_buffer->data.sfstat[12] = CLEAN_DATA_32;
  5163. dma_buffer->data.sfstat[13] = CLEAN_DATA_32;
  5164. dma_buffer->data.addr0 = (ONENAND_INTERRUPT_STATUS << 16) |
  5165. (ONENAND_SYSTEM_CONFIG_1);
  5166. dma_buffer->data.addr1 = (ONENAND_START_ADDRESS_8 << 16) |
  5167. (ONENAND_START_ADDRESS_1);
  5168. dma_buffer->data.addr2 = (ONENAND_START_BUFFER << 16) |
  5169. (ONENAND_START_ADDRESS_2);
  5170. dma_buffer->data.addr3 = (ONENAND_ECC_STATUS << 16) |
  5171. (ONENAND_COMMAND);
  5172. dma_buffer->data.addr4 = (ONENAND_CONTROLLER_STATUS << 16) |
  5173. (ONENAND_INTERRUPT_STATUS);
  5174. dma_buffer->data.addr5 = (ONENAND_INTERRUPT_STATUS << 16) |
  5175. (ONENAND_SYSTEM_CONFIG_1);
  5176. dma_buffer->data.addr6 = (ONENAND_START_ADDRESS_3 << 16) |
  5177. (ONENAND_START_ADDRESS_1);
  5178. dma_buffer->data.data0 = (ONENAND_CLRINTR << 16) |
  5179. (onenand_sysconfig1);
  5180. dma_buffer->data.data1 = (onenand_startaddr8 << 16) |
  5181. (onenand_startaddr1);
  5182. dma_buffer->data.data2 = (onenand_startbuffer << 16) |
  5183. (onenand_startaddr2);
  5184. dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
  5185. (ONENAND_CMDPROGSPARE);
  5186. dma_buffer->data.data4 = (CLEAN_DATA_16 << 16) |
  5187. (CLEAN_DATA_16);
  5188. dma_buffer->data.data5 = (ONENAND_CLRINTR << 16) |
  5189. (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
  5190. dma_buffer->data.data6 = (ONENAND_STARTADDR3_RES << 16) |
  5191. (ONENAND_STARTADDR1_RES);
  5192. dma_buffer->data.macro[0] = 0x0200;
  5193. dma_buffer->data.macro[1] = 0x0300;
  5194. dma_buffer->data.macro[2] = 0x0400;
  5195. dma_buffer->data.macro[3] = 0x0500;
  5196. dma_buffer->data.macro[4] = 0x0600;
  5197. dma_buffer->data.macro[5] = 0x0700;
  5198. dma_buffer->data.macro[6] = 0x0800;
  5199. dma_buffer->data.macro[7] = 0x0900;
  5200. dma_buffer->data.macro[8] = 0x8010;
  5201.  
  5202.  
  5203. /*************************************************************/
  5204. /* Write necessary address registers in the onenand device */
  5205. /*************************************************************/
  5206.  
  5207. /* Enable and configure the SFlash controller */
  5208. cmd->cmd = 0;
  5209. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfbcfg);
  5210. cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
  5211. cmd->len = 4;
  5212. cmd++;
  5213.  
  5214. /* Block on cmd ready and write CMD register */
  5215. cmd->cmd = DST_CRCI_NAND_CMD;
  5216. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[0]);
  5217. cmd->dst = MSM_NAND_SFLASHC_CMD;
  5218. cmd->len = 4;
  5219. cmd++;
  5220.  
  5221. /* Write the ADDR0 and ADDR1 registers */
  5222. cmd->cmd = 0;
  5223. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr0);
  5224. cmd->dst = MSM_NAND_ADDR0;
  5225. cmd->len = 8;
  5226. cmd++;
  5227.  
  5228. /* Write the ADDR2 ADDR3 ADDR4 ADDR5 registers */
  5229. cmd->cmd = 0;
  5230. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr2);
  5231. cmd->dst = MSM_NAND_ADDR2;
  5232. cmd->len = 16;
  5233. cmd++;
  5234.  
  5235. /* Write the ADDR6 registers */
  5236. cmd->cmd = 0;
  5237. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr6);
  5238. cmd->dst = MSM_NAND_ADDR6;
  5239. cmd->len = 4;
  5240. cmd++;
  5241.  
  5242. /* Write the GENP0, GENP1, GENP2, GENP3 registers */
  5243. cmd->cmd = 0;
  5244. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data0);
  5245. cmd->dst = MSM_NAND_GENP_REG0;
  5246. cmd->len = 16;
  5247. cmd++;
  5248.  
  5249. /* Write the FLASH_DEV_CMD4,5,6 registers */
  5250. cmd->cmd = 0;
  5251. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data4);
  5252. cmd->dst = MSM_NAND_DEV_CMD4;
  5253. cmd->len = 12;
  5254. cmd++;
  5255.  
  5256. /* Kick the execute command */
  5257. cmd->cmd = 0;
  5258. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  5259. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  5260. cmd->len = 4;
  5261. cmd++;
  5262.  
  5263. /* Block on data ready, and read the status register */
  5264. cmd->cmd = SRC_CRCI_NAND_DATA;
  5265. cmd->src = MSM_NAND_SFLASHC_STATUS;
  5266. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[0]);
  5267. cmd->len = 4;
  5268. cmd++;
  5269.  
  5270. /*************************************************************/
  5271. /* Write the data ram area in the onenand buffer ram */
  5272. /*************************************************************/
  5273.  
  5274. if (ops->datbuf) {
  5275. dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
  5276. (ONENAND_CMDPROG);
  5277.  
  5278. for (i = 0; i < 8; i++) {
  5279.  
  5280. /* Block on cmd ready and write CMD register */
  5281. cmd->cmd = DST_CRCI_NAND_CMD;
  5282. cmd->src = msm_virt_to_dma(chip,
  5283. &dma_buffer->data.sfcmd[1+i]);
  5284. cmd->dst = MSM_NAND_SFLASHC_CMD;
  5285. cmd->len = 4;
  5286. cmd++;
  5287.  
  5288. /* Trnsfr usr buf contents to nand ctlr buf */
  5289. cmd->cmd = 0;
  5290. cmd->src = data_dma_addr_curr;
  5291. cmd->dst = MSM_NAND_FLASH_BUFFER;
  5292. cmd->len = 512;
  5293. data_dma_addr_curr += 512;
  5294. cmd++;
  5295.  
  5296. /* Write the MACRO1 register */
  5297. cmd->cmd = 0;
  5298. cmd->src = msm_virt_to_dma(chip,
  5299. &dma_buffer->data.macro[i]);
  5300. cmd->dst = MSM_NAND_MACRO1_REG;
  5301. cmd->len = 4;
  5302. cmd++;
  5303.  
  5304. /* Kick the execute command */
  5305. cmd->cmd = 0;
  5306. cmd->src = msm_virt_to_dma(chip,
  5307. &dma_buffer->data.sfexec);
  5308. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  5309. cmd->len = 4;
  5310. cmd++;
  5311.  
  5312. /* Block on data rdy, & read status register */
  5313. cmd->cmd = SRC_CRCI_NAND_DATA;
  5314. cmd->src = MSM_NAND_SFLASHC_STATUS;
  5315. cmd->dst = msm_virt_to_dma(chip,
  5316. &dma_buffer->data.sfstat[1+i]);
  5317. cmd->len = 4;
  5318. cmd++;
  5319.  
  5320. }
  5321. }
  5322.  
  5323. /* Block on cmd ready and write CMD register */
  5324. cmd->cmd = DST_CRCI_NAND_CMD;
  5325. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[9]);
  5326. cmd->dst = MSM_NAND_SFLASHC_CMD;
  5327. cmd->len = 4;
  5328. cmd++;
  5329.  
  5330. if ((ops->oobbuf) || (ops->mode == MTD_OOB_RAW)) {
  5331.  
  5332. /* Transfer user buf contents into nand ctlr buffer */
  5333. if (ops->mode == MTD_OOB_AUTO) {
  5334. cmd->cmd = 0;
  5335. cmd->src = init_dma_addr;
  5336. cmd->dst = MSM_NAND_FLASH_BUFFER;
  5337. cmd->len = mtd->oobsize;
  5338. cmd++;
  5339. }
  5340. if (ops->mode == MTD_OOB_PLACE) {
  5341. cmd->cmd = 0;
  5342. cmd->src = oob_dma_addr_curr;
  5343. cmd->dst = MSM_NAND_FLASH_BUFFER;
  5344. cmd->len = mtd->oobsize;
  5345. oob_dma_addr_curr += mtd->oobsize;
  5346. cmd++;
  5347. }
  5348. if (ops->mode == MTD_OOB_RAW) {
  5349. cmd->cmd = 0;
  5350. cmd->src = data_dma_addr_curr;
  5351. cmd->dst = MSM_NAND_FLASH_BUFFER;
  5352. cmd->len = mtd->oobsize;
  5353. data_dma_addr_curr += mtd->oobsize;
  5354. cmd++;
  5355. }
  5356. } else {
  5357. cmd->cmd = 0;
  5358. cmd->src = init_dma_addr;
  5359. cmd->dst = MSM_NAND_FLASH_BUFFER;
  5360. cmd->len = mtd->oobsize;
  5361. cmd++;
  5362. }
  5363.  
  5364. /* Write the MACRO1 register */
  5365. cmd->cmd = 0;
  5366. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.macro[8]);
  5367. cmd->dst = MSM_NAND_MACRO1_REG;
  5368. cmd->len = 4;
  5369. cmd++;
  5370.  
  5371. /* Kick the execute command */
  5372. cmd->cmd = 0;
  5373. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  5374. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  5375. cmd->len = 4;
  5376. cmd++;
  5377.  
  5378. /* Block on data ready, and read the status register */
  5379. cmd->cmd = SRC_CRCI_NAND_DATA;
  5380. cmd->src = MSM_NAND_SFLASHC_STATUS;
  5381. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[9]);
  5382. cmd->len = 4;
  5383. cmd++;
  5384.  
  5385. /*********************************************************/
  5386. /* Issuing write command */
  5387. /*********************************************************/
  5388.  
  5389. /* Block on cmd ready and write CMD register */
  5390. cmd->cmd = DST_CRCI_NAND_CMD;
  5391. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[10]);
  5392. cmd->dst = MSM_NAND_SFLASHC_CMD;
  5393. cmd->len = 4;
  5394. cmd++;
  5395.  
  5396. /* Kick the execute command */
  5397. cmd->cmd = 0;
  5398. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  5399. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  5400. cmd->len = 4;
  5401. cmd++;
  5402.  
  5403. /* Block on data ready, and read the status register */
  5404. cmd->cmd = SRC_CRCI_NAND_DATA;
  5405. cmd->src = MSM_NAND_SFLASHC_STATUS;
  5406. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[10]);
  5407. cmd->len = 4;
  5408. cmd++;
  5409.  
  5410. /*************************************************************/
  5411. /* Wait for the interrupt from the Onenand device controller */
  5412. /*************************************************************/
  5413.  
  5414. /* Block on cmd ready and write CMD register */
  5415. cmd->cmd = DST_CRCI_NAND_CMD;
  5416. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[11]);
  5417. cmd->dst = MSM_NAND_SFLASHC_CMD;
  5418. cmd->len = 4;
  5419. cmd++;
  5420.  
  5421. /* Kick the execute command */
  5422. cmd->cmd = 0;
  5423. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  5424. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  5425. cmd->len = 4;
  5426. cmd++;
  5427.  
  5428. /* Block on data ready, and read the status register */
  5429. cmd->cmd = SRC_CRCI_NAND_DATA;
  5430. cmd->src = MSM_NAND_SFLASHC_STATUS;
  5431. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[11]);
  5432. cmd->len = 4;
  5433. cmd++;
  5434.  
  5435. /*************************************************************/
  5436. /* Read necessary status registers from the onenand device */
  5437. /*************************************************************/
  5438.  
  5439. /* Block on cmd ready and write CMD register */
  5440. cmd->cmd = DST_CRCI_NAND_CMD;
  5441. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[12]);
  5442. cmd->dst = MSM_NAND_SFLASHC_CMD;
  5443. cmd->len = 4;
  5444. cmd++;
  5445.  
  5446. /* Kick the execute command */
  5447. cmd->cmd = 0;
  5448. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  5449. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  5450. cmd->len = 4;
  5451. cmd++;
  5452.  
  5453. /* Block on data ready, and read the status register */
  5454. cmd->cmd = SRC_CRCI_NAND_DATA;
  5455. cmd->src = MSM_NAND_SFLASHC_STATUS;
  5456. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[12]);
  5457. cmd->len = 4;
  5458. cmd++;
  5459.  
  5460. /* Read the GENP3 register */
  5461. cmd->cmd = 0;
  5462. cmd->src = MSM_NAND_GENP_REG3;
  5463. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data3);
  5464. cmd->len = 4;
  5465. cmd++;
  5466.  
  5467. /* Read the DEVCMD4 register */
  5468. cmd->cmd = 0;
  5469. cmd->src = MSM_NAND_DEV_CMD4;
  5470. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data4);
  5471. cmd->len = 4;
  5472. cmd++;
  5473.  
  5474. /*************************************************************/
  5475. /* Restore the necessary registers to proper values */
  5476. /*************************************************************/
  5477.  
  5478. /* Block on cmd ready and write CMD register */
  5479. cmd->cmd = DST_CRCI_NAND_CMD;
  5480. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[13]);
  5481. cmd->dst = MSM_NAND_SFLASHC_CMD;
  5482. cmd->len = 4;
  5483. cmd++;
  5484.  
  5485. /* Kick the execute command */
  5486. cmd->cmd = 0;
  5487. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  5488. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  5489. cmd->len = 4;
  5490. cmd++;
  5491.  
  5492. /* Block on data ready, and read the status register */
  5493. cmd->cmd = SRC_CRCI_NAND_DATA;
  5494. cmd->src = MSM_NAND_SFLASHC_STATUS;
  5495. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[13]);
  5496. cmd->len = 4;
  5497. cmd++;
  5498.  
  5499.  
  5500. BUILD_BUG_ON(73 != ARRAY_SIZE(dma_buffer->cmd));
  5501. BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
  5502. dma_buffer->cmd[0].cmd |= CMD_OCB;
  5503. cmd[-1].cmd |= CMD_OCU | CMD_LC;
  5504.  
  5505. dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
  5506. >> 3) | CMD_PTR_LP;
  5507.  
  5508. dsb();
  5509. msm_dmov_exec_cmd(chip->dma_channel, crci_mask,
  5510. DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
  5511. &dma_buffer->cmdptr)));
  5512. dsb();
  5513.  
  5514. ecc_status = (dma_buffer->data.data3 >> 16) & 0x0000FFFF;
  5515. interrupt_status = (dma_buffer->data.data4 >> 0)&0x0000FFFF;
  5516. controller_status = (dma_buffer->data.data4 >> 16)&0x0000FFFF;
  5517.  
  5518. #if VERBOSE
  5519. pr_info("\n%s: sflash status %x %x %x %x %x %x %x"
  5520. " %x %x %x\n", __func__,
  5521. dma_buffer->data.sfstat[0],
  5522. dma_buffer->data.sfstat[1],
  5523. dma_buffer->data.sfstat[2],
  5524. dma_buffer->data.sfstat[3],
  5525. dma_buffer->data.sfstat[4],
  5526. dma_buffer->data.sfstat[5],
  5527. dma_buffer->data.sfstat[6],
  5528. dma_buffer->data.sfstat[7],
  5529. dma_buffer->data.sfstat[8],
  5530. dma_buffer->data.sfstat[9],
  5531. dma_buffer->data.sfstat[10],
  5532. dma_buffer->data.sfstat[11],
  5533. dma_buffer->data.sfstat[12],
  5534. dma_buffer->data.sfstat[13]);
  5535.  
  5536. pr_info("%s: controller_status = %x\n", __func__,
  5537. controller_status);
  5538. pr_info("%s: interrupt_status = %x\n", __func__,
  5539. interrupt_status);
  5540. pr_info("%s: ecc_status = %x\n", __func__,
  5541. ecc_status);
  5542. #endif
  5543. /* Check for errors, protection violations etc */
  5544. if ((controller_status != 0)
  5545. || (dma_buffer->data.sfstat[0] & 0x110)
  5546. || (dma_buffer->data.sfstat[10] & 0x110)
  5547. || (dma_buffer->data.sfstat[11] & 0x110)
  5548. || (dma_buffer->data.sfstat[12] & 0x110)
  5549. || (dma_buffer->data.sfstat[13] & 0x110)
  5550. || ((dma_buffer->data.sfstat[1] & 0x110) &&
  5551. (ops->datbuf))
  5552. || ((dma_buffer->data.sfstat[2] & 0x110) &&
  5553. (ops->datbuf))
  5554. || ((dma_buffer->data.sfstat[3] & 0x110) &&
  5555. (ops->datbuf))
  5556. || ((dma_buffer->data.sfstat[4] & 0x110) &&
  5557. (ops->datbuf))
  5558. || ((dma_buffer->data.sfstat[5] & 0x110) &&
  5559. (ops->datbuf))
  5560. || ((dma_buffer->data.sfstat[6] & 0x110) &&
  5561. (ops->datbuf))
  5562. || ((dma_buffer->data.sfstat[7] & 0x110) &&
  5563. (ops->datbuf))
  5564. || ((dma_buffer->data.sfstat[8] & 0x110) &&
  5565. (ops->datbuf))
  5566. || ((dma_buffer->data.sfstat[9] & 0x110) &&
  5567. ((ops->oobbuf)
  5568. || (ops->mode == MTD_OOB_RAW)))) {
  5569. pr_info("%s: ECC/MPU/OP error\n", __func__);
  5570. err = -EIO;
  5571. }
  5572.  
  5573. if (err)
  5574. break;
  5575. pages_written++;
  5576. to_curr += mtd->writesize;
  5577. }
  5578.  
  5579. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  5580.  
  5581. dma_unmap_page(chip->dev, init_dma_addr, mtd->oobsize, DMA_TO_DEVICE);
  5582.  
  5583. err_dma_map_initbuf_failed:
  5584. if (ops->oobbuf) {
  5585. dma_unmap_page(chip->dev, oob_dma_addr, ops->ooblen,
  5586. DMA_TO_DEVICE);
  5587. }
  5588. err_dma_map_oobbuf_failed:
  5589. if (ops->datbuf) {
  5590. dma_unmap_page(chip->dev, data_dma_addr, ops->len,
  5591. DMA_TO_DEVICE);
  5592. }
  5593.  
  5594. if (err) {
  5595. pr_err("%s: %llx %x %x failed\n", __func__, to_curr,
  5596. ops->datbuf ? ops->len : 0, ops->ooblen);
  5597. } else {
  5598. ops->retlen = ops->oobretlen = 0;
  5599. if (ops->datbuf != NULL) {
  5600. if (ops->mode != MTD_OOB_RAW)
  5601. ops->retlen = mtd->writesize * pages_written;
  5602. else
  5603. ops->retlen = (mtd->writesize + mtd->oobsize)
  5604. * pages_written;
  5605. }
  5606. if (ops->oobbuf != NULL) {
  5607. if (ops->mode == MTD_OOB_AUTO)
  5608. ops->oobretlen = mtd->oobavail * pages_written;
  5609. else
  5610. ops->oobretlen = mtd->oobsize * pages_written;
  5611. }
  5612. }
  5613.  
  5614. #if VERBOSE
  5615. pr_info("\n%s: ret %d, retlen %d oobretlen %d\n",
  5616. __func__, err, ops->retlen, ops->oobretlen);
  5617.  
  5618. pr_info("================================================="
  5619. "================\n");
  5620. #endif
  5621. kfree(init_spare_bytes);
  5622. return err;
  5623. }
  5624.  
  5625. static int msm_onenand_write(struct mtd_info *mtd, loff_t to, size_t len,
  5626. size_t *retlen, const u_char *buf)
  5627. {
  5628. int ret;
  5629. struct mtd_oob_ops ops;
  5630.  
  5631. ops.mode = MTD_OOB_PLACE;
  5632. ops.datbuf = (uint8_t *)buf;
  5633. ops.len = len;
  5634. ops.retlen = 0;
  5635. ops.oobbuf = NULL;
  5636. ops.ooblen = 0;
  5637. ops.oobretlen = 0;
  5638. ret = msm_onenand_write_oob(mtd, to, &ops);
  5639. *retlen = ops.retlen;
  5640.  
  5641. return ret;
  5642. }
  5643.  
  5644. static int msm_onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
  5645. {
  5646. struct msm_nand_chip *chip = mtd->priv;
  5647.  
  5648. struct {
  5649. dmov_s cmd[20];
  5650. unsigned cmdptr;
  5651. struct {
  5652. uint32_t sfbcfg;
  5653. uint32_t sfcmd[4];
  5654. uint32_t sfexec;
  5655. uint32_t sfstat[4];
  5656. uint32_t addr0;
  5657. uint32_t addr1;
  5658. uint32_t addr2;
  5659. uint32_t addr3;
  5660. uint32_t addr4;
  5661. uint32_t addr5;
  5662. uint32_t addr6;
  5663. uint32_t data0;
  5664. uint32_t data1;
  5665. uint32_t data2;
  5666. uint32_t data3;
  5667. uint32_t data4;
  5668. uint32_t data5;
  5669. uint32_t data6;
  5670. } data;
  5671. } *dma_buffer;
  5672. dmov_s *cmd;
  5673.  
  5674. int err = 0;
  5675.  
  5676. uint16_t onenand_startaddr1;
  5677. uint16_t onenand_startaddr8;
  5678. uint16_t onenand_startaddr2;
  5679. uint16_t onenand_startbuffer;
  5680.  
  5681. uint16_t controller_status;
  5682. uint16_t interrupt_status;
  5683. uint16_t ecc_status;
  5684.  
  5685. uint64_t temp;
  5686.  
  5687. #if VERBOSE
  5688. pr_info("================================================="
  5689. "================\n");
  5690. pr_info("%s: addr 0x%llx len 0x%llx\n",
  5691. __func__, instr->addr, instr->len);
  5692. #endif
  5693. if (instr->addr & (mtd->erasesize - 1)) {
  5694. pr_err("%s: Unsupported erase address, 0x%llx\n",
  5695. __func__, instr->addr);
  5696. return -EINVAL;
  5697. }
  5698. if (instr->len != mtd->erasesize) {
  5699. pr_err("%s: Unsupported erase len, %lld\n",
  5700. __func__, instr->len);
  5701. return -EINVAL;
  5702. }
  5703.  
  5704. wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
  5705. (chip, sizeof(*dma_buffer))));
  5706.  
  5707. cmd = dma_buffer->cmd;
  5708.  
  5709. temp = instr->addr;
  5710.  
  5711. if ((onenand_info.device_id & ONENAND_DEVICE_IS_DDP)
  5712. && (temp >= (mtd->size>>1))) { /* DDP Device */
  5713. onenand_startaddr1 = DEVICE_FLASHCORE_1 |
  5714. (((uint32_t)(temp-(mtd->size>>1))
  5715. / mtd->erasesize));
  5716. onenand_startaddr2 = DEVICE_BUFFERRAM_1;
  5717. } else {
  5718. onenand_startaddr1 = DEVICE_FLASHCORE_0 |
  5719. ((uint32_t)temp / mtd->erasesize) ;
  5720. onenand_startaddr2 = DEVICE_BUFFERRAM_0;
  5721. }
  5722.  
  5723. onenand_startaddr8 = 0x0000;
  5724. onenand_startbuffer = DATARAM0_0 << 8;
  5725.  
  5726. dma_buffer->data.sfbcfg = SFLASH_BCFG |
  5727. (nand_sfcmd_mode ? 0 : (1 << 24));
  5728. dma_buffer->data.sfcmd[0] = SFLASH_PREPCMD(7, 0, 0,
  5729. MSM_NAND_SFCMD_CMDXS,
  5730. nand_sfcmd_mode,
  5731. MSM_NAND_SFCMD_REGWR);
  5732. dma_buffer->data.sfcmd[1] = SFLASH_PREPCMD(0, 0, 32,
  5733. MSM_NAND_SFCMD_CMDXS,
  5734. nand_sfcmd_mode,
  5735. MSM_NAND_SFCMD_INTHI);
  5736. dma_buffer->data.sfcmd[2] = SFLASH_PREPCMD(3, 7, 0,
  5737. MSM_NAND_SFCMD_DATXS,
  5738. nand_sfcmd_mode,
  5739. MSM_NAND_SFCMD_REGRD);
  5740. dma_buffer->data.sfcmd[3] = SFLASH_PREPCMD(4, 10, 0,
  5741. MSM_NAND_SFCMD_CMDXS,
  5742. nand_sfcmd_mode,
  5743. MSM_NAND_SFCMD_REGWR);
  5744. dma_buffer->data.sfexec = 1;
  5745. dma_buffer->data.sfstat[0] = CLEAN_DATA_32;
  5746. dma_buffer->data.sfstat[1] = CLEAN_DATA_32;
  5747. dma_buffer->data.sfstat[2] = CLEAN_DATA_32;
  5748. dma_buffer->data.sfstat[3] = CLEAN_DATA_32;
  5749. dma_buffer->data.addr0 = (ONENAND_INTERRUPT_STATUS << 16) |
  5750. (ONENAND_SYSTEM_CONFIG_1);
  5751. dma_buffer->data.addr1 = (ONENAND_START_ADDRESS_8 << 16) |
  5752. (ONENAND_START_ADDRESS_1);
  5753. dma_buffer->data.addr2 = (ONENAND_START_BUFFER << 16) |
  5754. (ONENAND_START_ADDRESS_2);
  5755. dma_buffer->data.addr3 = (ONENAND_ECC_STATUS << 16) |
  5756. (ONENAND_COMMAND);
  5757. dma_buffer->data.addr4 = (ONENAND_CONTROLLER_STATUS << 16) |
  5758. (ONENAND_INTERRUPT_STATUS);
  5759. dma_buffer->data.addr5 = (ONENAND_INTERRUPT_STATUS << 16) |
  5760. (ONENAND_SYSTEM_CONFIG_1);
  5761. dma_buffer->data.addr6 = (ONENAND_START_ADDRESS_3 << 16) |
  5762. (ONENAND_START_ADDRESS_1);
  5763. dma_buffer->data.data0 = (ONENAND_CLRINTR << 16) |
  5764. (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
  5765. dma_buffer->data.data1 = (onenand_startaddr8 << 16) |
  5766. (onenand_startaddr1);
  5767. dma_buffer->data.data2 = (onenand_startbuffer << 16) |
  5768. (onenand_startaddr2);
  5769. dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
  5770. (ONENAND_CMDERAS);
  5771. dma_buffer->data.data4 = (CLEAN_DATA_16 << 16) |
  5772. (CLEAN_DATA_16);
  5773. dma_buffer->data.data5 = (ONENAND_CLRINTR << 16) |
  5774. (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
  5775. dma_buffer->data.data6 = (ONENAND_STARTADDR3_RES << 16) |
  5776. (ONENAND_STARTADDR1_RES);
  5777.  
  5778. /***************************************************************/
  5779. /* Write the necessary address registers in the onenand device */
  5780. /***************************************************************/
  5781.  
  5782. /* Enable and configure the SFlash controller */
  5783. cmd->cmd = 0;
  5784. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfbcfg);
  5785. cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
  5786. cmd->len = 4;
  5787. cmd++;
  5788.  
  5789. /* Block on cmd ready and write CMD register */
  5790. cmd->cmd = DST_CRCI_NAND_CMD;
  5791. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[0]);
  5792. cmd->dst = MSM_NAND_SFLASHC_CMD;
  5793. cmd->len = 4;
  5794. cmd++;
  5795.  
  5796. /* Write the ADDR0 and ADDR1 registers */
  5797. cmd->cmd = 0;
  5798. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr0);
  5799. cmd->dst = MSM_NAND_ADDR0;
  5800. cmd->len = 8;
  5801. cmd++;
  5802.  
  5803. /* Write the ADDR2 ADDR3 ADDR4 ADDR5 registers */
  5804. cmd->cmd = 0;
  5805. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr2);
  5806. cmd->dst = MSM_NAND_ADDR2;
  5807. cmd->len = 16;
  5808. cmd++;
  5809.  
  5810. /* Write the ADDR6 registers */
  5811. cmd->cmd = 0;
  5812. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr6);
  5813. cmd->dst = MSM_NAND_ADDR6;
  5814. cmd->len = 4;
  5815. cmd++;
  5816.  
  5817. /* Write the GENP0, GENP1, GENP2, GENP3, GENP4 registers */
  5818. cmd->cmd = 0;
  5819. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data0);
  5820. cmd->dst = MSM_NAND_GENP_REG0;
  5821. cmd->len = 16;
  5822. cmd++;
  5823.  
  5824. /* Write the FLASH_DEV_CMD4,5,6 registers */
  5825. cmd->cmd = 0;
  5826. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data4);
  5827. cmd->dst = MSM_NAND_DEV_CMD4;
  5828. cmd->len = 12;
  5829. cmd++;
  5830.  
  5831. /* Kick the execute command */
  5832. cmd->cmd = 0;
  5833. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  5834. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  5835. cmd->len = 4;
  5836. cmd++;
  5837.  
  5838. /* Block on data ready, and read the status register */
  5839. cmd->cmd = SRC_CRCI_NAND_DATA;
  5840. cmd->src = MSM_NAND_SFLASHC_STATUS;
  5841. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[0]);
  5842. cmd->len = 4;
  5843. cmd++;
  5844.  
  5845. /***************************************************************/
  5846. /* Wait for the interrupt from the Onenand device controller */
  5847. /***************************************************************/
  5848.  
  5849. /* Block on cmd ready and write CMD register */
  5850. cmd->cmd = DST_CRCI_NAND_CMD;
  5851. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[1]);
  5852. cmd->dst = MSM_NAND_SFLASHC_CMD;
  5853. cmd->len = 4;
  5854. cmd++;
  5855.  
  5856. /* Kick the execute command */
  5857. cmd->cmd = 0;
  5858. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  5859. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  5860. cmd->len = 4;
  5861. cmd++;
  5862.  
  5863. /* Block on data ready, and read the status register */
  5864. cmd->cmd = SRC_CRCI_NAND_DATA;
  5865. cmd->src = MSM_NAND_SFLASHC_STATUS;
  5866. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[1]);
  5867. cmd->len = 4;
  5868. cmd++;
  5869.  
  5870. /***************************************************************/
  5871. /* Read the necessary status registers from the onenand device */
  5872. /***************************************************************/
  5873.  
  5874. /* Block on cmd ready and write CMD register */
  5875. cmd->cmd = DST_CRCI_NAND_CMD;
  5876. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[2]);
  5877. cmd->dst = MSM_NAND_SFLASHC_CMD;
  5878. cmd->len = 4;
  5879. cmd++;
  5880.  
  5881. /* Kick the execute command */
  5882. cmd->cmd = 0;
  5883. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  5884. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  5885. cmd->len = 4;
  5886. cmd++;
  5887.  
  5888. /* Block on data ready, and read the status register */
  5889. cmd->cmd = SRC_CRCI_NAND_DATA;
  5890. cmd->src = MSM_NAND_SFLASHC_STATUS;
  5891. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[2]);
  5892. cmd->len = 4;
  5893. cmd++;
  5894.  
  5895. /* Read the GENP3 register */
  5896. cmd->cmd = 0;
  5897. cmd->src = MSM_NAND_GENP_REG3;
  5898. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data3);
  5899. cmd->len = 4;
  5900. cmd++;
  5901.  
  5902. /* Read the DEVCMD4 register */
  5903. cmd->cmd = 0;
  5904. cmd->src = MSM_NAND_DEV_CMD4;
  5905. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data4);
  5906. cmd->len = 4;
  5907. cmd++;
  5908.  
  5909. /***************************************************************/
  5910. /* Restore the necessary registers to proper values */
  5911. /***************************************************************/
  5912.  
  5913. /* Block on cmd ready and write CMD register */
  5914. cmd->cmd = DST_CRCI_NAND_CMD;
  5915. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[3]);
  5916. cmd->dst = MSM_NAND_SFLASHC_CMD;
  5917. cmd->len = 4;
  5918. cmd++;
  5919.  
  5920. /* Kick the execute command */
  5921. cmd->cmd = 0;
  5922. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  5923. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  5924. cmd->len = 4;
  5925. cmd++;
  5926.  
  5927. /* Block on data ready, and read the status register */
  5928. cmd->cmd = SRC_CRCI_NAND_DATA;
  5929. cmd->src = MSM_NAND_SFLASHC_STATUS;
  5930. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[3]);
  5931. cmd->len = 4;
  5932. cmd++;
  5933.  
  5934.  
  5935. BUILD_BUG_ON(20 != ARRAY_SIZE(dma_buffer->cmd));
  5936. BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
  5937. dma_buffer->cmd[0].cmd |= CMD_OCB;
  5938. cmd[-1].cmd |= CMD_OCU | CMD_LC;
  5939.  
  5940. dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
  5941. >> 3) | CMD_PTR_LP;
  5942.  
  5943. dsb();
  5944. msm_dmov_exec_cmd(chip->dma_channel, crci_mask, DMOV_CMD_PTR_LIST
  5945. | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
  5946. &dma_buffer->cmdptr)));
  5947. dsb();
  5948.  
  5949. ecc_status = (dma_buffer->data.data3 >> 16) & 0x0000FFFF;
  5950. interrupt_status = (dma_buffer->data.data4 >> 0) & 0x0000FFFF;
  5951. controller_status = (dma_buffer->data.data4 >> 16) & 0x0000FFFF;
  5952.  
  5953. #if VERBOSE
  5954. pr_info("\n%s: sflash status %x %x %x %x\n", __func__,
  5955. dma_buffer->data.sfstat[0],
  5956. dma_buffer->data.sfstat[1],
  5957. dma_buffer->data.sfstat[2],
  5958. dma_buffer->data.sfstat[3]);
  5959.  
  5960. pr_info("%s: controller_status = %x\n", __func__,
  5961. controller_status);
  5962. pr_info("%s: interrupt_status = %x\n", __func__,
  5963. interrupt_status);
  5964. pr_info("%s: ecc_status = %x\n", __func__,
  5965. ecc_status);
  5966. #endif
  5967. /* Check for errors, protection violations etc */
  5968. if ((controller_status != 0)
  5969. || (dma_buffer->data.sfstat[0] & 0x110)
  5970. || (dma_buffer->data.sfstat[1] & 0x110)
  5971. || (dma_buffer->data.sfstat[2] & 0x110)
  5972. || (dma_buffer->data.sfstat[3] & 0x110)) {
  5973. pr_err("%s: ECC/MPU/OP error\n", __func__);
  5974. err = -EIO;
  5975. }
  5976.  
  5977. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  5978.  
  5979. if (err) {
  5980. pr_err("%s: Erase failed, 0x%llx\n", __func__,
  5981. instr->addr);
  5982. instr->fail_addr = instr->addr;
  5983. instr->state = MTD_ERASE_FAILED;
  5984. } else {
  5985. instr->state = MTD_ERASE_DONE;
  5986. instr->fail_addr = 0xffffffff;
  5987. mtd_erase_callback(instr);
  5988. }
  5989.  
  5990. #if VERBOSE
  5991. pr_info("\n%s: ret %d\n", __func__, err);
  5992. pr_info("===================================================="
  5993. "=============\n");
  5994. #endif
  5995. return err;
  5996. }
  5997.  
  5998. static int msm_onenand_block_isbad(struct mtd_info *mtd, loff_t ofs)
  5999. {
  6000. struct mtd_oob_ops ops;
  6001. int rval, i;
  6002. int ret = 0;
  6003. uint8_t *buffer;
  6004. uint8_t *oobptr;
  6005.  
  6006. if ((ofs > mtd->size) || (ofs & (mtd->erasesize - 1))) {
  6007. pr_err("%s: unsupported block address, 0x%x\n",
  6008. __func__, (uint32_t)ofs);
  6009. return -EINVAL;
  6010. }
  6011.  
  6012. buffer = kmalloc(4224, GFP_KERNEL|GFP_DMA);
  6013. if (buffer == 0) {
  6014. pr_err("%s: Could not kmalloc for buffer\n",
  6015. __func__);
  6016. return -ENOMEM;
  6017. }
  6018.  
  6019. memset(buffer, 0x00, 4224);
  6020. oobptr = &(buffer[4096]);
  6021.  
  6022. ops.mode = MTD_OOB_RAW;
  6023. ops.len = 4224;
  6024. ops.retlen = 0;
  6025. ops.ooblen = 0;
  6026. ops.oobretlen = 0;
  6027. ops.ooboffs = 0;
  6028. ops.datbuf = buffer;
  6029. ops.oobbuf = NULL;
  6030.  
  6031. for (i = 0; i < 2; i++) {
  6032. ofs = ofs + i*mtd->writesize;
  6033. rval = msm_onenand_read_oob(mtd, ofs, &ops);
  6034. if (rval) {
  6035. pr_err("%s: Error in reading bad blk info\n",
  6036. __func__);
  6037. ret = rval;
  6038. break;
  6039. }
  6040. if ((oobptr[0] != 0xFF) || (oobptr[1] != 0xFF) ||
  6041. (oobptr[16] != 0xFF) || (oobptr[17] != 0xFF) ||
  6042. (oobptr[32] != 0xFF) || (oobptr[33] != 0xFF) ||
  6043. (oobptr[48] != 0xFF) || (oobptr[49] != 0xFF) ||
  6044. (oobptr[64] != 0xFF) || (oobptr[65] != 0xFF) ||
  6045. (oobptr[80] != 0xFF) || (oobptr[81] != 0xFF) ||
  6046. (oobptr[96] != 0xFF) || (oobptr[97] != 0xFF) ||
  6047. (oobptr[112] != 0xFF) || (oobptr[113] != 0xFF)
  6048. ) {
  6049. ret = 1;
  6050. break;
  6051. }
  6052. }
  6053.  
  6054. kfree(buffer);
  6055.  
  6056. #if VERBOSE
  6057. if (ret == 1)
  6058. pr_info("%s : Block containing 0x%x is bad\n",
  6059. __func__, (unsigned int)ofs);
  6060. #endif
  6061. return ret;
  6062. }
  6063.  
  6064. static int msm_onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
  6065. {
  6066. struct mtd_oob_ops ops;
  6067. int rval, i;
  6068. int ret = 0;
  6069. uint8_t *buffer;
  6070.  
  6071. if ((ofs > mtd->size) || (ofs & (mtd->erasesize - 1))) {
  6072. pr_err("%s: unsupported block address, 0x%x\n",
  6073. __func__, (uint32_t)ofs);
  6074. return -EINVAL;
  6075. }
  6076.  
  6077. buffer = page_address(ZERO_PAGE());
  6078.  
  6079. ops.mode = MTD_OOB_RAW;
  6080. ops.len = 4224;
  6081. ops.retlen = 0;
  6082. ops.ooblen = 0;
  6083. ops.oobretlen = 0;
  6084. ops.ooboffs = 0;
  6085. ops.datbuf = buffer;
  6086. ops.oobbuf = NULL;
  6087.  
  6088. for (i = 0; i < 2; i++) {
  6089. ofs = ofs + i*mtd->writesize;
  6090. rval = msm_onenand_write_oob(mtd, ofs, &ops);
  6091. if (rval) {
  6092. pr_err("%s: Error in writing bad blk info\n",
  6093. __func__);
  6094. ret = rval;
  6095. break;
  6096. }
  6097. }
  6098.  
  6099. return ret;
  6100. }
  6101.  
  6102. static int msm_onenand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
  6103. {
  6104. struct msm_nand_chip *chip = mtd->priv;
  6105.  
  6106. struct {
  6107. dmov_s cmd[20];
  6108. unsigned cmdptr;
  6109. struct {
  6110. uint32_t sfbcfg;
  6111. uint32_t sfcmd[4];
  6112. uint32_t sfexec;
  6113. uint32_t sfstat[4];
  6114. uint32_t addr0;
  6115. uint32_t addr1;
  6116. uint32_t addr2;
  6117. uint32_t addr3;
  6118. uint32_t addr4;
  6119. uint32_t addr5;
  6120. uint32_t addr6;
  6121. uint32_t data0;
  6122. uint32_t data1;
  6123. uint32_t data2;
  6124. uint32_t data3;
  6125. uint32_t data4;
  6126. uint32_t data5;
  6127. uint32_t data6;
  6128. } data;
  6129. } *dma_buffer;
  6130. dmov_s *cmd;
  6131.  
  6132. int err = 0;
  6133.  
  6134. uint16_t onenand_startaddr1;
  6135. uint16_t onenand_startaddr8;
  6136. uint16_t onenand_startaddr2;
  6137. uint16_t onenand_startblock;
  6138.  
  6139. uint16_t controller_status;
  6140. uint16_t interrupt_status;
  6141. uint16_t write_prot_status;
  6142.  
  6143. uint64_t start_ofs;
  6144.  
  6145. #if VERBOSE
  6146. pr_info("===================================================="
  6147. "=============\n");
  6148. pr_info("%s: ofs 0x%llx len %lld\n", __func__, ofs, len);
  6149. #endif
  6150. /* 'ofs' & 'len' should align to block size */
  6151. if (ofs&(mtd->erasesize - 1)) {
  6152. pr_err("%s: Unsupported ofs address, 0x%llx\n",
  6153. __func__, ofs);
  6154. return -EINVAL;
  6155. }
  6156.  
  6157. if (len&(mtd->erasesize - 1)) {
  6158. pr_err("%s: Unsupported len, %lld\n",
  6159. __func__, len);
  6160. return -EINVAL;
  6161. }
  6162.  
  6163. if (ofs+len > mtd->size) {
  6164. pr_err("%s: Maximum chip size exceeded\n", __func__);
  6165. return -EINVAL;
  6166. }
  6167.  
  6168. wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
  6169. (chip, sizeof(*dma_buffer))));
  6170.  
  6171. for (start_ofs = ofs; ofs < start_ofs+len; ofs = ofs+mtd->erasesize) {
  6172. #if VERBOSE
  6173. pr_info("%s: ofs 0x%llx len %lld\n", __func__, ofs, len);
  6174. #endif
  6175.  
  6176. cmd = dma_buffer->cmd;
  6177. if ((onenand_info.device_id & ONENAND_DEVICE_IS_DDP)
  6178. && (ofs >= (mtd->size>>1))) { /* DDP Device */
  6179. onenand_startaddr1 = DEVICE_FLASHCORE_1 |
  6180. (((uint32_t)(ofs - (mtd->size>>1))
  6181. / mtd->erasesize));
  6182. onenand_startaddr2 = DEVICE_BUFFERRAM_1;
  6183. onenand_startblock = ((uint32_t)(ofs - (mtd->size>>1))
  6184. / mtd->erasesize);
  6185. } else {
  6186. onenand_startaddr1 = DEVICE_FLASHCORE_0 |
  6187. ((uint32_t)ofs / mtd->erasesize) ;
  6188. onenand_startaddr2 = DEVICE_BUFFERRAM_0;
  6189. onenand_startblock = ((uint32_t)ofs
  6190. / mtd->erasesize);
  6191. }
  6192.  
  6193. onenand_startaddr8 = 0x0000;
  6194. dma_buffer->data.sfbcfg = SFLASH_BCFG |
  6195. (nand_sfcmd_mode ? 0 : (1 << 24));
  6196. dma_buffer->data.sfcmd[0] = SFLASH_PREPCMD(7, 0, 0,
  6197. MSM_NAND_SFCMD_CMDXS,
  6198. nand_sfcmd_mode,
  6199. MSM_NAND_SFCMD_REGWR);
  6200. dma_buffer->data.sfcmd[1] = SFLASH_PREPCMD(0, 0, 32,
  6201. MSM_NAND_SFCMD_CMDXS,
  6202. nand_sfcmd_mode,
  6203. MSM_NAND_SFCMD_INTHI);
  6204. dma_buffer->data.sfcmd[2] = SFLASH_PREPCMD(3, 7, 0,
  6205. MSM_NAND_SFCMD_DATXS,
  6206. nand_sfcmd_mode,
  6207. MSM_NAND_SFCMD_REGRD);
  6208. dma_buffer->data.sfcmd[3] = SFLASH_PREPCMD(4, 10, 0,
  6209. MSM_NAND_SFCMD_CMDXS,
  6210. nand_sfcmd_mode,
  6211. MSM_NAND_SFCMD_REGWR);
  6212. dma_buffer->data.sfexec = 1;
  6213. dma_buffer->data.sfstat[0] = CLEAN_DATA_32;
  6214. dma_buffer->data.sfstat[1] = CLEAN_DATA_32;
  6215. dma_buffer->data.sfstat[2] = CLEAN_DATA_32;
  6216. dma_buffer->data.sfstat[3] = CLEAN_DATA_32;
  6217. dma_buffer->data.addr0 = (ONENAND_INTERRUPT_STATUS << 16) |
  6218. (ONENAND_SYSTEM_CONFIG_1);
  6219. dma_buffer->data.addr1 = (ONENAND_START_ADDRESS_8 << 16) |
  6220. (ONENAND_START_ADDRESS_1);
  6221. dma_buffer->data.addr2 = (ONENAND_START_BLOCK_ADDRESS << 16) |
  6222. (ONENAND_START_ADDRESS_2);
  6223. dma_buffer->data.addr3 = (ONENAND_WRITE_PROT_STATUS << 16) |
  6224. (ONENAND_COMMAND);
  6225. dma_buffer->data.addr4 = (ONENAND_CONTROLLER_STATUS << 16) |
  6226. (ONENAND_INTERRUPT_STATUS);
  6227. dma_buffer->data.addr5 = (ONENAND_INTERRUPT_STATUS << 16) |
  6228. (ONENAND_SYSTEM_CONFIG_1);
  6229. dma_buffer->data.addr6 = (ONENAND_START_ADDRESS_3 << 16) |
  6230. (ONENAND_START_ADDRESS_1);
  6231. dma_buffer->data.data0 = (ONENAND_CLRINTR << 16) |
  6232. (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
  6233. dma_buffer->data.data1 = (onenand_startaddr8 << 16) |
  6234. (onenand_startaddr1);
  6235. dma_buffer->data.data2 = (onenand_startblock << 16) |
  6236. (onenand_startaddr2);
  6237. dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
  6238. (ONENAND_CMD_UNLOCK);
  6239. dma_buffer->data.data4 = (CLEAN_DATA_16 << 16) |
  6240. (CLEAN_DATA_16);
  6241. dma_buffer->data.data5 = (ONENAND_CLRINTR << 16) |
  6242. (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
  6243. dma_buffer->data.data6 = (ONENAND_STARTADDR3_RES << 16) |
  6244. (ONENAND_STARTADDR1_RES);
  6245.  
  6246. /*************************************************************/
  6247. /* Write the necessary address reg in the onenand device */
  6248. /*************************************************************/
  6249.  
  6250. /* Enable and configure the SFlash controller */
  6251. cmd->cmd = 0;
  6252. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfbcfg);
  6253. cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
  6254. cmd->len = 4;
  6255. cmd++;
  6256.  
  6257. /* Block on cmd ready and write CMD register */
  6258. cmd->cmd = DST_CRCI_NAND_CMD;
  6259. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[0]);
  6260. cmd->dst = MSM_NAND_SFLASHC_CMD;
  6261. cmd->len = 4;
  6262. cmd++;
  6263.  
  6264. /* Write the ADDR0 and ADDR1 registers */
  6265. cmd->cmd = 0;
  6266. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr0);
  6267. cmd->dst = MSM_NAND_ADDR0;
  6268. cmd->len = 8;
  6269. cmd++;
  6270.  
  6271. /* Write the ADDR2 ADDR3 ADDR4 ADDR5 registers */
  6272. cmd->cmd = 0;
  6273. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr2);
  6274. cmd->dst = MSM_NAND_ADDR2;
  6275. cmd->len = 16;
  6276. cmd++;
  6277.  
  6278. /* Write the ADDR6 registers */
  6279. cmd->cmd = 0;
  6280. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr6);
  6281. cmd->dst = MSM_NAND_ADDR6;
  6282. cmd->len = 4;
  6283. cmd++;
  6284.  
  6285. /* Write the GENP0, GENP1, GENP2, GENP3, GENP4 registers */
  6286. cmd->cmd = 0;
  6287. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data0);
  6288. cmd->dst = MSM_NAND_GENP_REG0;
  6289. cmd->len = 16;
  6290. cmd++;
  6291.  
  6292. /* Write the FLASH_DEV_CMD4,5,6 registers */
  6293. cmd->cmd = 0;
  6294. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data4);
  6295. cmd->dst = MSM_NAND_DEV_CMD4;
  6296. cmd->len = 12;
  6297. cmd++;
  6298.  
  6299. /* Kick the execute command */
  6300. cmd->cmd = 0;
  6301. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  6302. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  6303. cmd->len = 4;
  6304. cmd++;
  6305.  
  6306. /* Block on data ready, and read the status register */
  6307. cmd->cmd = SRC_CRCI_NAND_DATA;
  6308. cmd->src = MSM_NAND_SFLASHC_STATUS;
  6309. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[0]);
  6310. cmd->len = 4;
  6311. cmd++;
  6312.  
  6313. /*************************************************************/
  6314. /* Wait for the interrupt from the Onenand device controller */
  6315. /*************************************************************/
  6316.  
  6317. /* Block on cmd ready and write CMD register */
  6318. cmd->cmd = DST_CRCI_NAND_CMD;
  6319. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[1]);
  6320. cmd->dst = MSM_NAND_SFLASHC_CMD;
  6321. cmd->len = 4;
  6322. cmd++;
  6323.  
  6324. /* Kick the execute command */
  6325. cmd->cmd = 0;
  6326. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  6327. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  6328. cmd->len = 4;
  6329. cmd++;
  6330.  
  6331. /* Block on data ready, and read the status register */
  6332. cmd->cmd = SRC_CRCI_NAND_DATA;
  6333. cmd->src = MSM_NAND_SFLASHC_STATUS;
  6334. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[1]);
  6335. cmd->len = 4;
  6336. cmd++;
  6337.  
  6338. /*********************************************************/
  6339. /* Read the necessary status reg from the onenand device */
  6340. /*********************************************************/
  6341.  
  6342. /* Block on cmd ready and write CMD register */
  6343. cmd->cmd = DST_CRCI_NAND_CMD;
  6344. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[2]);
  6345. cmd->dst = MSM_NAND_SFLASHC_CMD;
  6346. cmd->len = 4;
  6347. cmd++;
  6348.  
  6349. /* Kick the execute command */
  6350. cmd->cmd = 0;
  6351. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  6352. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  6353. cmd->len = 4;
  6354. cmd++;
  6355.  
  6356. /* Block on data ready, and read the status register */
  6357. cmd->cmd = SRC_CRCI_NAND_DATA;
  6358. cmd->src = MSM_NAND_SFLASHC_STATUS;
  6359. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[2]);
  6360. cmd->len = 4;
  6361. cmd++;
  6362.  
  6363. /* Read the GENP3 register */
  6364. cmd->cmd = 0;
  6365. cmd->src = MSM_NAND_GENP_REG3;
  6366. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data3);
  6367. cmd->len = 4;
  6368. cmd++;
  6369.  
  6370. /* Read the DEVCMD4 register */
  6371. cmd->cmd = 0;
  6372. cmd->src = MSM_NAND_DEV_CMD4;
  6373. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data4);
  6374. cmd->len = 4;
  6375. cmd++;
  6376.  
  6377. /************************************************************/
  6378. /* Restore the necessary registers to proper values */
  6379. /************************************************************/
  6380.  
  6381. /* Block on cmd ready and write CMD register */
  6382. cmd->cmd = DST_CRCI_NAND_CMD;
  6383. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[3]);
  6384. cmd->dst = MSM_NAND_SFLASHC_CMD;
  6385. cmd->len = 4;
  6386. cmd++;
  6387.  
  6388. /* Kick the execute command */
  6389. cmd->cmd = 0;
  6390. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  6391. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  6392. cmd->len = 4;
  6393. cmd++;
  6394.  
  6395. /* Block on data ready, and read the status register */
  6396. cmd->cmd = SRC_CRCI_NAND_DATA;
  6397. cmd->src = MSM_NAND_SFLASHC_STATUS;
  6398. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[3]);
  6399. cmd->len = 4;
  6400. cmd++;
  6401.  
  6402.  
  6403. BUILD_BUG_ON(20 != ARRAY_SIZE(dma_buffer->cmd));
  6404. BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
  6405. dma_buffer->cmd[0].cmd |= CMD_OCB;
  6406. cmd[-1].cmd |= CMD_OCU | CMD_LC;
  6407.  
  6408. dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
  6409. >> 3) | CMD_PTR_LP;
  6410.  
  6411. dsb();
  6412. msm_dmov_exec_cmd(chip->dma_channel, crci_mask,
  6413. DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
  6414. &dma_buffer->cmdptr)));
  6415. dsb();
  6416.  
  6417. write_prot_status = (dma_buffer->data.data3 >> 16) & 0x0000FFFF;
  6418. interrupt_status = (dma_buffer->data.data4 >> 0) & 0x0000FFFF;
  6419. controller_status = (dma_buffer->data.data4 >> 16) & 0x0000FFFF;
  6420.  
  6421. #if VERBOSE
  6422. pr_info("\n%s: sflash status %x %x %x %x\n", __func__,
  6423. dma_buffer->data.sfstat[0],
  6424. dma_buffer->data.sfstat[1],
  6425. dma_buffer->data.sfstat[2],
  6426. dma_buffer->data.sfstat[3]);
  6427.  
  6428. pr_info("%s: controller_status = %x\n", __func__,
  6429. controller_status);
  6430. pr_info("%s: interrupt_status = %x\n", __func__,
  6431. interrupt_status);
  6432. pr_info("%s: write_prot_status = %x\n", __func__,
  6433. write_prot_status);
  6434. #endif
  6435. /* Check for errors, protection violations etc */
  6436. if ((controller_status != 0)
  6437. || (dma_buffer->data.sfstat[0] & 0x110)
  6438. || (dma_buffer->data.sfstat[1] & 0x110)
  6439. || (dma_buffer->data.sfstat[2] & 0x110)
  6440. || (dma_buffer->data.sfstat[3] & 0x110)) {
  6441. pr_err("%s: ECC/MPU/OP error\n", __func__);
  6442. err = -EIO;
  6443. }
  6444.  
  6445. if (!(write_prot_status & ONENAND_WP_US)) {
  6446. pr_err("%s: Unexpected status ofs = 0x%llx,"
  6447. "wp_status = %x\n",
  6448. __func__, ofs, write_prot_status);
  6449. err = -EIO;
  6450. }
  6451.  
  6452. if (err)
  6453. break;
  6454. }
  6455.  
  6456. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  6457.  
  6458. #if VERBOSE
  6459. pr_info("\n%s: ret %d\n", __func__, err);
  6460. pr_info("===================================================="
  6461. "=============\n");
  6462. #endif
  6463. return err;
  6464. }
  6465.  
  6466. static int msm_onenand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
  6467. {
  6468. struct msm_nand_chip *chip = mtd->priv;
  6469.  
  6470. struct {
  6471. dmov_s cmd[20];
  6472. unsigned cmdptr;
  6473. struct {
  6474. uint32_t sfbcfg;
  6475. uint32_t sfcmd[4];
  6476. uint32_t sfexec;
  6477. uint32_t sfstat[4];
  6478. uint32_t addr0;
  6479. uint32_t addr1;
  6480. uint32_t addr2;
  6481. uint32_t addr3;
  6482. uint32_t addr4;
  6483. uint32_t addr5;
  6484. uint32_t addr6;
  6485. uint32_t data0;
  6486. uint32_t data1;
  6487. uint32_t data2;
  6488. uint32_t data3;
  6489. uint32_t data4;
  6490. uint32_t data5;
  6491. uint32_t data6;
  6492. } data;
  6493. } *dma_buffer;
  6494. dmov_s *cmd;
  6495.  
  6496. int err = 0;
  6497.  
  6498. uint16_t onenand_startaddr1;
  6499. uint16_t onenand_startaddr8;
  6500. uint16_t onenand_startaddr2;
  6501. uint16_t onenand_startblock;
  6502.  
  6503. uint16_t controller_status;
  6504. uint16_t interrupt_status;
  6505. uint16_t write_prot_status;
  6506.  
  6507. uint64_t start_ofs;
  6508.  
  6509. #if VERBOSE
  6510. pr_info("===================================================="
  6511. "=============\n");
  6512. pr_info("%s: ofs 0x%llx len %lld\n", __func__, ofs, len);
  6513. #endif
  6514. /* 'ofs' & 'len' should align to block size */
  6515. if (ofs&(mtd->erasesize - 1)) {
  6516. pr_err("%s: Unsupported ofs address, 0x%llx\n",
  6517. __func__, ofs);
  6518. return -EINVAL;
  6519. }
  6520.  
  6521. if (len&(mtd->erasesize - 1)) {
  6522. pr_err("%s: Unsupported len, %lld\n",
  6523. __func__, len);
  6524. return -EINVAL;
  6525. }
  6526.  
  6527. if (ofs+len > mtd->size) {
  6528. pr_err("%s: Maximum chip size exceeded\n", __func__);
  6529. return -EINVAL;
  6530. }
  6531.  
  6532. wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
  6533. (chip, sizeof(*dma_buffer))));
  6534.  
  6535. for (start_ofs = ofs; ofs < start_ofs+len; ofs = ofs+mtd->erasesize) {
  6536. #if VERBOSE
  6537. pr_info("%s: ofs 0x%llx len %lld\n", __func__, ofs, len);
  6538. #endif
  6539.  
  6540. cmd = dma_buffer->cmd;
  6541. if ((onenand_info.device_id & ONENAND_DEVICE_IS_DDP)
  6542. && (ofs >= (mtd->size>>1))) { /* DDP Device */
  6543. onenand_startaddr1 = DEVICE_FLASHCORE_1 |
  6544. (((uint32_t)(ofs - (mtd->size>>1))
  6545. / mtd->erasesize));
  6546. onenand_startaddr2 = DEVICE_BUFFERRAM_1;
  6547. onenand_startblock = ((uint32_t)(ofs - (mtd->size>>1))
  6548. / mtd->erasesize);
  6549. } else {
  6550. onenand_startaddr1 = DEVICE_FLASHCORE_0 |
  6551. ((uint32_t)ofs / mtd->erasesize) ;
  6552. onenand_startaddr2 = DEVICE_BUFFERRAM_0;
  6553. onenand_startblock = ((uint32_t)ofs
  6554. / mtd->erasesize);
  6555. }
  6556.  
  6557. onenand_startaddr8 = 0x0000;
  6558. dma_buffer->data.sfbcfg = SFLASH_BCFG |
  6559. (nand_sfcmd_mode ? 0 : (1 << 24));
  6560. dma_buffer->data.sfcmd[0] = SFLASH_PREPCMD(7, 0, 0,
  6561. MSM_NAND_SFCMD_CMDXS,
  6562. nand_sfcmd_mode,
  6563. MSM_NAND_SFCMD_REGWR);
  6564. dma_buffer->data.sfcmd[1] = SFLASH_PREPCMD(0, 0, 32,
  6565. MSM_NAND_SFCMD_CMDXS,
  6566. nand_sfcmd_mode,
  6567. MSM_NAND_SFCMD_INTHI);
  6568. dma_buffer->data.sfcmd[2] = SFLASH_PREPCMD(3, 7, 0,
  6569. MSM_NAND_SFCMD_DATXS,
  6570. nand_sfcmd_mode,
  6571. MSM_NAND_SFCMD_REGRD);
  6572. dma_buffer->data.sfcmd[3] = SFLASH_PREPCMD(4, 10, 0,
  6573. MSM_NAND_SFCMD_CMDXS,
  6574. nand_sfcmd_mode,
  6575. MSM_NAND_SFCMD_REGWR);
  6576. dma_buffer->data.sfexec = 1;
  6577. dma_buffer->data.sfstat[0] = CLEAN_DATA_32;
  6578. dma_buffer->data.sfstat[1] = CLEAN_DATA_32;
  6579. dma_buffer->data.sfstat[2] = CLEAN_DATA_32;
  6580. dma_buffer->data.sfstat[3] = CLEAN_DATA_32;
  6581. dma_buffer->data.addr0 = (ONENAND_INTERRUPT_STATUS << 16) |
  6582. (ONENAND_SYSTEM_CONFIG_1);
  6583. dma_buffer->data.addr1 = (ONENAND_START_ADDRESS_8 << 16) |
  6584. (ONENAND_START_ADDRESS_1);
  6585. dma_buffer->data.addr2 = (ONENAND_START_BLOCK_ADDRESS << 16) |
  6586. (ONENAND_START_ADDRESS_2);
  6587. dma_buffer->data.addr3 = (ONENAND_WRITE_PROT_STATUS << 16) |
  6588. (ONENAND_COMMAND);
  6589. dma_buffer->data.addr4 = (ONENAND_CONTROLLER_STATUS << 16) |
  6590. (ONENAND_INTERRUPT_STATUS);
  6591. dma_buffer->data.addr5 = (ONENAND_INTERRUPT_STATUS << 16) |
  6592. (ONENAND_SYSTEM_CONFIG_1);
  6593. dma_buffer->data.addr6 = (ONENAND_START_ADDRESS_3 << 16) |
  6594. (ONENAND_START_ADDRESS_1);
  6595. dma_buffer->data.data0 = (ONENAND_CLRINTR << 16) |
  6596. (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
  6597. dma_buffer->data.data1 = (onenand_startaddr8 << 16) |
  6598. (onenand_startaddr1);
  6599. dma_buffer->data.data2 = (onenand_startblock << 16) |
  6600. (onenand_startaddr2);
  6601. dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
  6602. (ONENAND_CMD_LOCK);
  6603. dma_buffer->data.data4 = (CLEAN_DATA_16 << 16) |
  6604. (CLEAN_DATA_16);
  6605. dma_buffer->data.data5 = (ONENAND_CLRINTR << 16) |
  6606. (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
  6607. dma_buffer->data.data6 = (ONENAND_STARTADDR3_RES << 16) |
  6608. (ONENAND_STARTADDR1_RES);
  6609.  
  6610. /*************************************************************/
  6611. /* Write the necessary address reg in the onenand device */
  6612. /*************************************************************/
  6613.  
  6614. /* Enable and configure the SFlash controller */
  6615. cmd->cmd = 0;
  6616. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfbcfg);
  6617. cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
  6618. cmd->len = 4;
  6619. cmd++;
  6620.  
  6621. /* Block on cmd ready and write CMD register */
  6622. cmd->cmd = DST_CRCI_NAND_CMD;
  6623. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[0]);
  6624. cmd->dst = MSM_NAND_SFLASHC_CMD;
  6625. cmd->len = 4;
  6626. cmd++;
  6627.  
  6628. /* Write the ADDR0 and ADDR1 registers */
  6629. cmd->cmd = 0;
  6630. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr0);
  6631. cmd->dst = MSM_NAND_ADDR0;
  6632. cmd->len = 8;
  6633. cmd++;
  6634.  
  6635. /* Write the ADDR2 ADDR3 ADDR4 ADDR5 registers */
  6636. cmd->cmd = 0;
  6637. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr2);
  6638. cmd->dst = MSM_NAND_ADDR2;
  6639. cmd->len = 16;
  6640. cmd++;
  6641.  
  6642. /* Write the ADDR6 registers */
  6643. cmd->cmd = 0;
  6644. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr6);
  6645. cmd->dst = MSM_NAND_ADDR6;
  6646. cmd->len = 4;
  6647. cmd++;
  6648.  
  6649. /* Write the GENP0, GENP1, GENP2, GENP3, GENP4 registers */
  6650. cmd->cmd = 0;
  6651. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data0);
  6652. cmd->dst = MSM_NAND_GENP_REG0;
  6653. cmd->len = 16;
  6654. cmd++;
  6655.  
  6656. /* Write the FLASH_DEV_CMD4,5,6 registers */
  6657. cmd->cmd = 0;
  6658. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data4);
  6659. cmd->dst = MSM_NAND_DEV_CMD4;
  6660. cmd->len = 12;
  6661. cmd++;
  6662.  
  6663. /* Kick the execute command */
  6664. cmd->cmd = 0;
  6665. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  6666. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  6667. cmd->len = 4;
  6668. cmd++;
  6669.  
  6670. /* Block on data ready, and read the status register */
  6671. cmd->cmd = SRC_CRCI_NAND_DATA;
  6672. cmd->src = MSM_NAND_SFLASHC_STATUS;
  6673. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[0]);
  6674. cmd->len = 4;
  6675. cmd++;
  6676.  
  6677. /*************************************************************/
  6678. /* Wait for the interrupt from the Onenand device controller */
  6679. /*************************************************************/
  6680.  
  6681. /* Block on cmd ready and write CMD register */
  6682. cmd->cmd = DST_CRCI_NAND_CMD;
  6683. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[1]);
  6684. cmd->dst = MSM_NAND_SFLASHC_CMD;
  6685. cmd->len = 4;
  6686. cmd++;
  6687.  
  6688. /* Kick the execute command */
  6689. cmd->cmd = 0;
  6690. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  6691. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  6692. cmd->len = 4;
  6693. cmd++;
  6694.  
  6695. /* Block on data ready, and read the status register */
  6696. cmd->cmd = SRC_CRCI_NAND_DATA;
  6697. cmd->src = MSM_NAND_SFLASHC_STATUS;
  6698. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[1]);
  6699. cmd->len = 4;
  6700. cmd++;
  6701.  
  6702. /*********************************************************/
  6703. /* Read the necessary status reg from the onenand device */
  6704. /*********************************************************/
  6705.  
  6706. /* Block on cmd ready and write CMD register */
  6707. cmd->cmd = DST_CRCI_NAND_CMD;
  6708. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[2]);
  6709. cmd->dst = MSM_NAND_SFLASHC_CMD;
  6710. cmd->len = 4;
  6711. cmd++;
  6712.  
  6713. /* Kick the execute command */
  6714. cmd->cmd = 0;
  6715. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  6716. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  6717. cmd->len = 4;
  6718. cmd++;
  6719.  
  6720. /* Block on data ready, and read the status register */
  6721. cmd->cmd = SRC_CRCI_NAND_DATA;
  6722. cmd->src = MSM_NAND_SFLASHC_STATUS;
  6723. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[2]);
  6724. cmd->len = 4;
  6725. cmd++;
  6726.  
  6727. /* Read the GENP3 register */
  6728. cmd->cmd = 0;
  6729. cmd->src = MSM_NAND_GENP_REG3;
  6730. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data3);
  6731. cmd->len = 4;
  6732. cmd++;
  6733.  
  6734. /* Read the DEVCMD4 register */
  6735. cmd->cmd = 0;
  6736. cmd->src = MSM_NAND_DEV_CMD4;
  6737. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data4);
  6738. cmd->len = 4;
  6739. cmd++;
  6740.  
  6741. /************************************************************/
  6742. /* Restore the necessary registers to proper values */
  6743. /************************************************************/
  6744.  
  6745. /* Block on cmd ready and write CMD register */
  6746. cmd->cmd = DST_CRCI_NAND_CMD;
  6747. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[3]);
  6748. cmd->dst = MSM_NAND_SFLASHC_CMD;
  6749. cmd->len = 4;
  6750. cmd++;
  6751.  
  6752. /* Kick the execute command */
  6753. cmd->cmd = 0;
  6754. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  6755. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  6756. cmd->len = 4;
  6757. cmd++;
  6758.  
  6759. /* Block on data ready, and read the status register */
  6760. cmd->cmd = SRC_CRCI_NAND_DATA;
  6761. cmd->src = MSM_NAND_SFLASHC_STATUS;
  6762. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[3]);
  6763. cmd->len = 4;
  6764. cmd++;
  6765.  
  6766.  
  6767. BUILD_BUG_ON(20 != ARRAY_SIZE(dma_buffer->cmd));
  6768. BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
  6769. dma_buffer->cmd[0].cmd |= CMD_OCB;
  6770. cmd[-1].cmd |= CMD_OCU | CMD_LC;
  6771.  
  6772. dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
  6773. >> 3) | CMD_PTR_LP;
  6774.  
  6775. dsb();
  6776. msm_dmov_exec_cmd(chip->dma_channel, crci_mask,
  6777. DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
  6778. &dma_buffer->cmdptr)));
  6779. dsb();
  6780.  
  6781. write_prot_status = (dma_buffer->data.data3 >> 16) & 0x0000FFFF;
  6782. interrupt_status = (dma_buffer->data.data4 >> 0) & 0x0000FFFF;
  6783. controller_status = (dma_buffer->data.data4 >> 16) & 0x0000FFFF;
  6784.  
  6785. #if VERBOSE
  6786. pr_info("\n%s: sflash status %x %x %x %x\n", __func__,
  6787. dma_buffer->data.sfstat[0],
  6788. dma_buffer->data.sfstat[1],
  6789. dma_buffer->data.sfstat[2],
  6790. dma_buffer->data.sfstat[3]);
  6791.  
  6792. pr_info("%s: controller_status = %x\n", __func__,
  6793. controller_status);
  6794. pr_info("%s: interrupt_status = %x\n", __func__,
  6795. interrupt_status);
  6796. pr_info("%s: write_prot_status = %x\n", __func__,
  6797. write_prot_status);
  6798. #endif
  6799. /* Check for errors, protection violations etc */
  6800. if ((controller_status != 0)
  6801. || (dma_buffer->data.sfstat[0] & 0x110)
  6802. || (dma_buffer->data.sfstat[1] & 0x110)
  6803. || (dma_buffer->data.sfstat[2] & 0x110)
  6804. || (dma_buffer->data.sfstat[3] & 0x110)) {
  6805. pr_err("%s: ECC/MPU/OP error\n", __func__);
  6806. err = -EIO;
  6807. }
  6808.  
  6809. if (!(write_prot_status & ONENAND_WP_LS)) {
  6810. pr_err("%s: Unexpected status ofs = 0x%llx,"
  6811. "wp_status = %x\n",
  6812. __func__, ofs, write_prot_status);
  6813. err = -EIO;
  6814. }
  6815.  
  6816. if (err)
  6817. break;
  6818. }
  6819.  
  6820. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  6821.  
  6822. #if VERBOSE
  6823. pr_info("\n%s: ret %d\n", __func__, err);
  6824. pr_info("===================================================="
  6825. "=============\n");
  6826. #endif
  6827. return err;
  6828. }
  6829.  
  6830. static int msm_onenand_suspend(struct mtd_info *mtd)
  6831. {
  6832. return 0;
  6833. }
  6834.  
  6835. static void msm_onenand_resume(struct mtd_info *mtd)
  6836. {
  6837. }
  6838.  
  6839. int msm_onenand_scan(struct mtd_info *mtd, int maxchips)
  6840. {
  6841. struct msm_nand_chip *chip = mtd->priv;
  6842.  
  6843. /* Probe and check whether onenand device is present */
  6844. if (flash_onenand_probe(chip))
  6845. return -ENODEV;
  6846.  
  6847. mtd->size = 0x1000000 << ((onenand_info.device_id & 0xF0) >> 4);
  6848. mtd->writesize = onenand_info.data_buf_size << 1;
  6849. mtd->oobsize = mtd->writesize >> 5;
  6850. mtd->erasesize = mtd->writesize << 6;
  6851. mtd->oobavail = msm_onenand_oob_128.oobavail;
  6852. mtd->ecclayout = &msm_onenand_oob_128;
  6853.  
  6854. mtd->type = MTD_NANDFLASH;
  6855. mtd->flags = MTD_CAP_NANDFLASH;
  6856. mtd->erase = msm_onenand_erase;
  6857. mtd->point = NULL;
  6858. mtd->unpoint = NULL;
  6859. mtd->read = msm_onenand_read;
  6860. mtd->write = msm_onenand_write;
  6861. mtd->read_oob = msm_onenand_read_oob;
  6862. mtd->write_oob = msm_onenand_write_oob;
  6863. mtd->lock = msm_onenand_lock;
  6864. mtd->unlock = msm_onenand_unlock;
  6865. mtd->suspend = msm_onenand_suspend;
  6866. mtd->resume = msm_onenand_resume;
  6867. mtd->block_isbad = msm_onenand_block_isbad;
  6868. mtd->block_markbad = msm_onenand_block_markbad;
  6869. mtd->owner = THIS_MODULE;
  6870.  
  6871. pr_info("Found a supported onenand device\n");
  6872.  
  6873. return 0;
  6874. }
  6875.  
  6876. /**
  6877. * msm_nand_scan - [msm_nand Interface] Scan for the msm_nand device
  6878. * @param mtd MTD device structure
  6879. * @param maxchips Number of chips to scan for
  6880. *
  6881. * This fills out all the not initialized function pointers
  6882. * with the defaults.
  6883. * The flash ID is read and the mtd/chip structures are
  6884. * filled with the appropriate values.
  6885. */
  6886. int msm_nand_scan(struct mtd_info *mtd, int maxchips)
  6887. {
  6888. struct msm_nand_chip *chip = mtd->priv;
  6889. uint32_t flash_id = 0, i = 1, mtd_writesize;
  6890. uint8_t dev_found = 0;
  6891. uint8_t wide_bus;
  6892. uint8_t index;
  6893.  
  6894. /* Probe the Flash device for ONFI compliance */
  6895. #if defined (CONFIG_MACH_COOPER) || defined (CONFIG_MACH_EUROPA)
  6896. /* Read the Flash ID from the Nand Flash Device */
  6897. flash_id = flash_read_id(chip);
  6898. for (index = 1; index < ARRAY_SIZE(supported_flash); index++)
  6899. if ((flash_id & supported_flash[index].mask) ==
  6900. (supported_flash[index].flash_id &
  6901. (supported_flash[index].mask))) {
  6902. dev_found = 1;
  6903. break;
  6904. }
  6905. #else
  6906. if (!flash_onfi_probe(chip)) {
  6907. index = 0;
  6908. dev_found = 1;
  6909. } else {
  6910. /* Read the Flash ID from the Nand Flash Device */
  6911. flash_id = flash_read_id(chip);
  6912. for (index = 1; index < ARRAY_SIZE(supported_flash); index++)
  6913. if ((flash_id & supported_flash[index].mask) ==
  6914. (supported_flash[index].flash_id &
  6915. (supported_flash[index].mask))) {
  6916. dev_found = 1;
  6917. break;
  6918. }
  6919. }
  6920. #endif
  6921.  
  6922. if (dev_found) {
  6923. (!interleave_enable) ? (i = 1) : (i = 2);
  6924. wide_bus = supported_flash[index].widebus;
  6925. mtd->size = supported_flash[index].density * i;
  6926. mtd->writesize = supported_flash[index].pagesize * i;
  6927. mtd->oobsize = supported_flash[index].oobsize * i;
  6928. mtd->erasesize = supported_flash[index].blksize * i;
  6929.  
  6930. if (!interleave_enable)
  6931. mtd_writesize = mtd->writesize;
  6932. else
  6933. mtd_writesize = mtd->writesize >> 1;
  6934.  
  6935. pr_info("Found a supported NAND device\n");
  6936. pr_info("NAND Id : 0x%x\n", supported_flash[index].
  6937. flash_id);
  6938. pr_info("Buswidth : %d Bits \n", (wide_bus) ? 16 : 8);
  6939. pr_info("Density : %lld MByte\n", (mtd->size>>20));
  6940. pr_info("Pagesize : %d Bytes\n", mtd->writesize);
  6941. pr_info("Erasesize: %d Bytes\n", mtd->erasesize);
  6942. pr_info("Oobsize : %d Bytes\n", mtd->oobsize);
  6943. } else {
  6944. pr_err("Unsupported Nand,Id: 0x%x \n", flash_id);
  6945. return -ENODEV;
  6946. }
  6947.  
  6948. chip->CFG0 = (((mtd_writesize >> 9)-1) << 6) /* 4/8 cw/pg for 2/4k */
  6949. | (516 << 9) /* 516 user data bytes */
  6950. | (10 << 19) /* 10 parity bytes */
  6951. | (5 << 27) /* 5 address cycles */
  6952. | (0 << 30) /* Do not read status before data */
  6953. | (1 << 31) /* Send read cmd */
  6954. /* 0 spare bytes for 16 bit nand or 1 spare bytes for 8 bit */
  6955. | ((wide_bus) ? (0 << 23) : (1 << 23));
  6956.  
  6957. chip->CFG1 = (0 << 0) /* Enable ecc */
  6958. | (7 << 2) /* 8 recovery cycles */
  6959. | (0 << 5) /* Allow CS deassertion */
  6960. | ((mtd_writesize - (528 * ((mtd_writesize >> 9) - 1)) + 1)
  6961. << 6) /* Bad block marker location */
  6962. | (0 << 16) /* Bad block in user data area */
  6963. | (2 << 17) /* 6 cycle tWB/tRB */
  6964. | (wide_bus << 1); /* Wide flash bit */
  6965.  
  6966. chip->ecc_buf_cfg = 0x203;
  6967.  
  6968. pr_info("CFG0 Init : 0x%08x \n", chip->CFG0);
  6969. pr_info("CFG1 Init : 0x%08x \n", chip->CFG1);
  6970. pr_info("ECCBUFCFG : 0x%08x \n", chip->ecc_buf_cfg);
  6971.  
  6972. if (mtd->oobsize == 64) {
  6973. mtd->oobavail = msm_nand_oob_64.oobavail;
  6974. mtd->ecclayout = &msm_nand_oob_64;
  6975. } else if (mtd->oobsize == 128) {
  6976. mtd->oobavail = msm_nand_oob_128.oobavail;
  6977. mtd->ecclayout = &msm_nand_oob_128;
  6978. } else if (mtd->oobsize == 256) {
  6979. mtd->oobavail = msm_nand_oob_256.oobavail;
  6980. mtd->ecclayout = &msm_nand_oob_256;
  6981. } else {
  6982. pr_err("Unsupported Nand, oobsize: 0x%x \n",
  6983. mtd->oobsize);
  6984. return -ENODEV;
  6985. }
  6986.  
  6987. /* Fill in remaining MTD driver data */
  6988. mtd->type = MTD_NANDFLASH;
  6989. mtd->flags = MTD_CAP_NANDFLASH;
  6990. /* mtd->ecctype = MTD_ECC_SW; */
  6991. mtd->erase = msm_nand_erase;
  6992. mtd->block_isbad = msm_nand_block_isbad;
  6993. mtd->block_markbad = msm_nand_block_markbad;
  6994. mtd->point = NULL;
  6995. mtd->unpoint = NULL;
  6996. mtd->read = msm_nand_read;
  6997. mtd->write = msm_nand_write;
  6998. mtd->read_oob = msm_nand_read_oob;
  6999. mtd->write_oob = msm_nand_write_oob;
  7000. if (dual_nand_ctlr_present) {
  7001. mtd->read_oob = msm_nand_read_oob_dualnandc;
  7002. mtd->write_oob = msm_nand_write_oob_dualnandc;
  7003. if (interleave_enable) {
  7004. mtd->erase = msm_nand_erase_dualnandc;
  7005. mtd->block_isbad = msm_nand_block_isbad_dualnandc;
  7006. }
  7007. }
  7008.  
  7009. /* mtd->sync = msm_nand_sync; */
  7010. mtd->lock = NULL;
  7011. /* mtd->unlock = msm_nand_unlock; */
  7012. mtd->suspend = msm_nand_suspend;
  7013. mtd->resume = msm_nand_resume;
  7014. mtd->owner = THIS_MODULE;
  7015.  
  7016. /* Unlock whole block */
  7017. /* msm_nand_unlock_all(mtd); */
  7018.  
  7019. /* return this->scan_bbt(mtd); */
  7020.  
  7021. current_mtd = mtd; // for PARAMETER block
  7022.  
  7023. for ( i = 0 ; i < msm_nand_data.nr_parts ; i++) {
  7024. if (!strcmp(msm_nand_data.parts[i].name , "parameter")) {
  7025. param_start_block = msm_nand_data.parts[i].offset;
  7026. param_end_block = msm_nand_data.parts[i].offset + msm_nand_data.parts[i].size; // should match with bootloader
  7027. }
  7028. }
  7029.  
  7030. return 0;
  7031. }
  7032. EXPORT_SYMBOL_GPL(msm_nand_scan);
  7033.  
  7034. /**
  7035. * msm_nand_release - [msm_nand Interface] Free resources held by the msm_nand device
  7036. * @param mtd MTD device structure
  7037. */
  7038. void msm_nand_release(struct mtd_info *mtd)
  7039. {
  7040. /* struct msm_nand_chip *this = mtd->priv; */
  7041.  
  7042. #ifdef CONFIG_MTD_PARTITIONS
  7043. /* Deregister partitions */
  7044. del_mtd_partitions(mtd);
  7045. #endif
  7046. /* Deregister the device */
  7047. del_mtd_device(mtd);
  7048. }
  7049. EXPORT_SYMBOL_GPL(msm_nand_release);
  7050.  
  7051. #ifdef CONFIG_MTD_PARTITIONS
  7052. static const char *part_probes[] = { "cmdlinepart", NULL, };
  7053. #endif
  7054.  
  7055. struct msm_nand_info {
  7056. struct mtd_info mtd;
  7057. struct mtd_partition *parts;
  7058. struct msm_nand_chip msm_nand;
  7059. };
  7060.  
  7061. /* duplicating the NC01 XFR contents to NC10 */
  7062. static int msm_nand_nc10_xfr_settings(struct mtd_info *mtd)
  7063. {
  7064. struct msm_nand_chip *chip = mtd->priv;
  7065.  
  7066. struct {
  7067. dmov_s cmd[2];
  7068. unsigned cmdptr;
  7069. } *dma_buffer;
  7070. dmov_s *cmd;
  7071.  
  7072. wait_event(chip->wait_queue,
  7073. (dma_buffer = msm_nand_get_dma_buffer(
  7074. chip, sizeof(*dma_buffer))));
  7075.  
  7076. cmd = dma_buffer->cmd;
  7077.  
  7078. /* Copying XFR register contents from NC01 --> NC10 */
  7079. cmd->cmd = 0;
  7080. cmd->src = NC01(MSM_NAND_XFR_STEP1);
  7081. cmd->dst = NC10(MSM_NAND_XFR_STEP1);
  7082. cmd->len = 28;
  7083. cmd++;
  7084.  
  7085. BUILD_BUG_ON(2 != ARRAY_SIZE(dma_buffer->cmd));
  7086. BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
  7087. dma_buffer->cmd[0].cmd |= CMD_OCB;
  7088. cmd[-1].cmd |= CMD_OCU | CMD_LC;
  7089. dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3)
  7090. | CMD_PTR_LP;
  7091.  
  7092. dsb();
  7093. msm_dmov_exec_cmd(chip->dma_channel, crci_mask, DMOV_CMD_PTR_LIST
  7094. | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
  7095. &dma_buffer->cmdptr)));
  7096. dsb();
  7097. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  7098. return 0;
  7099. }
  7100.  
  7101. #ifdef CONFIG_MTD_PARTITIONS
  7102. static void setup_mtd_device(struct platform_device *pdev,
  7103. struct msm_nand_info *info)
  7104. {
  7105. int i, nr_parts;
  7106. struct flash_platform_data *pdata = pdev->dev.platform_data;
  7107.  
  7108. for (i = 0; i < pdata->nr_parts; i++) {
  7109. pdata->parts[i].offset = pdata->parts[i].offset
  7110. * info->mtd.erasesize;
  7111. pdata->parts[i].size = pdata->parts[i].size
  7112. * info->mtd.erasesize;
  7113. }
  7114.  
  7115. nr_parts = parse_mtd_partitions(&info->mtd, part_probes, &info->parts,
  7116. 0);
  7117. if (nr_parts > 0)
  7118. add_mtd_partitions(&info->mtd, info->parts, nr_parts);
  7119. else if (nr_parts <= 0 && pdata && pdata->parts)
  7120. add_mtd_partitions(&info->mtd, pdata->parts, pdata->nr_parts);
  7121. else
  7122. add_mtd_device(&info->mtd);
  7123. }
  7124. #else
  7125. static void setup_mtd_device(struct platform_device *pdev,
  7126. struct msm_nand_info *info)
  7127. {
  7128. add_mtd_device(&info->mtd);
  7129. }
  7130. #endif
  7131.  
  7132. static int __devinit msm_nand_probe(struct platform_device *pdev)
  7133. {
  7134. struct msm_nand_info *info;
  7135. struct resource *res;
  7136. int err;
  7137. struct flash_platform_data *plat_data;
  7138.  
  7139. plat_data = pdev->dev.platform_data;
  7140.  
  7141. res = platform_get_resource_byname(pdev,
  7142. IORESOURCE_MEM, "msm_nand_phys");
  7143. if (!res || !res->start) {
  7144. pr_err("%s: msm_nand_phys resource invalid/absent\n",
  7145. __func__);
  7146. return -ENODEV;
  7147. }
  7148. msm_nand_phys = res->start;
  7149. pr_info("%s: phys addr 0x%lx \n", __func__, msm_nand_phys);
  7150.  
  7151. res = platform_get_resource_byname(pdev,
  7152. IORESOURCE_MEM, "msm_nandc01_phys");
  7153. if (!res || !res->start)
  7154. goto no_dual_nand_ctlr_support;
  7155. msm_nandc01_phys = res->start;
  7156.  
  7157. res = platform_get_resource_byname(pdev,
  7158. IORESOURCE_MEM, "msm_nandc10_phys");
  7159. if (!res || !res->start)
  7160. goto no_dual_nand_ctlr_support;
  7161. msm_nandc10_phys = res->start;
  7162.  
  7163. res = platform_get_resource_byname(pdev,
  7164. IORESOURCE_MEM, "msm_nandc11_phys");
  7165. if (!res || !res->start)
  7166. goto no_dual_nand_ctlr_support;
  7167. msm_nandc11_phys = res->start;
  7168.  
  7169. res = platform_get_resource_byname(pdev,
  7170. IORESOURCE_MEM, "ebi2_reg_base");
  7171. if (!res || !res->start)
  7172. goto no_dual_nand_ctlr_support;
  7173. ebi2_register_base = res->start;
  7174.  
  7175. #if defined (CONFIG_MACH_COOPER)
  7176. dual_nand_ctlr_present = 0;
  7177. interleave_enable = 0;
  7178. #else
  7179. dual_nand_ctlr_present = 1;
  7180. if (plat_data != NULL)
  7181. interleave_enable = plat_data->interleave;
  7182. else
  7183. interleave_enable = 0;
  7184. #endif
  7185.  
  7186. if (!interleave_enable)
  7187. pr_info("%s: Dual Nand Ctrl in ping-pong mode\n", __func__);
  7188. else
  7189. pr_info("%s: Dual Nand Ctrl in interleave mode\n", __func__);
  7190.  
  7191. no_dual_nand_ctlr_support:
  7192. res = platform_get_resource_byname(pdev,
  7193. IORESOURCE_DMA, "msm_nand_dmac");
  7194. if (!res || !res->start) {
  7195. pr_err("%s: invalid msm_nand_dmac resource\n", __func__);
  7196. return -ENODEV;
  7197. }
  7198.  
  7199. info = kzalloc(sizeof(struct msm_nand_info), GFP_KERNEL);
  7200. if (!info) {
  7201. pr_err("%s: No memory for msm_nand_info\n", __func__);
  7202. return -ENOMEM;
  7203. }
  7204.  
  7205. info->msm_nand.dev = &pdev->dev;
  7206.  
  7207. init_waitqueue_head(&info->msm_nand.wait_queue);
  7208.  
  7209. info->msm_nand.dma_channel = res->start;
  7210. pr_info("%s: dmac 0x%x\n", __func__, info->msm_nand.dma_channel);
  7211.  
  7212. /* this currently fails if dev is passed in */
  7213. info->msm_nand.dma_buffer =
  7214. dma_alloc_coherent(/*dev*/ NULL, MSM_NAND_DMA_BUFFER_SIZE,
  7215. &info->msm_nand.dma_addr, GFP_KERNEL);
  7216. if (info->msm_nand.dma_buffer == NULL) {
  7217. pr_err("%s: No memory for msm_nand.dma_buffer\n", __func__);
  7218. err = -ENOMEM;
  7219. goto out_free_info;
  7220. }
  7221.  
  7222. pr_info("%s: allocated dma buffer at %p, dma_addr %x\n",
  7223. __func__, info->msm_nand.dma_buffer, info->msm_nand.dma_addr);
  7224.  
  7225. crci_mask = msm_dmov_build_crci_mask(2,
  7226. DMOV_NAND_CRCI_DATA, DMOV_NAND_CRCI_CMD);
  7227.  
  7228. info->mtd.name = dev_name(&pdev->dev);
  7229. info->mtd.priv = &info->msm_nand;
  7230. info->mtd.owner = THIS_MODULE;
  7231.  
  7232. if (dual_nand_ctlr_present)
  7233. msm_nand_nc10_xfr_settings(&info->mtd);
  7234.  
  7235. if (msm_nand_scan(&info->mtd, 1))
  7236. if (msm_onenand_scan(&info->mtd, 1)) {
  7237. pr_err("%s: No nand device found\n", __func__);
  7238. err = -ENXIO;
  7239. goto out_free_dma_buffer;
  7240. }
  7241.  
  7242. setup_mtd_device(pdev, info);
  7243. dev_set_drvdata(&pdev->dev, info);
  7244.  
  7245. return 0;
  7246.  
  7247. out_free_dma_buffer:
  7248. dma_free_coherent(NULL, MSM_NAND_DMA_BUFFER_SIZE,
  7249. info->msm_nand.dma_buffer,
  7250. info->msm_nand.dma_addr);
  7251. out_free_info:
  7252. kfree(info);
  7253.  
  7254. return err;
  7255. }
  7256.  
  7257. static int __devexit msm_nand_remove(struct platform_device *pdev)
  7258. {
  7259. struct msm_nand_info *info = dev_get_drvdata(&pdev->dev);
  7260.  
  7261. dev_set_drvdata(&pdev->dev, NULL);
  7262.  
  7263. if (info) {
  7264. #ifdef CONFIG_MTD_PARTITIONS
  7265. if (info->parts)
  7266. del_mtd_partitions(&info->mtd);
  7267. else
  7268. #endif
  7269. del_mtd_device(&info->mtd);
  7270.  
  7271. msm_nand_release(&info->mtd);
  7272. dma_free_coherent(NULL, MSM_NAND_DMA_BUFFER_SIZE,
  7273. info->msm_nand.dma_buffer,
  7274. info->msm_nand.dma_addr);
  7275. kfree(info);
  7276. }
  7277.  
  7278. return 0;
  7279. }
  7280.  
  7281. #define DRIVER_NAME "msm_nand"
  7282.  
  7283. static struct platform_driver msm_nand_driver = {
  7284. .probe = msm_nand_probe,
  7285. .remove = __devexit_p(msm_nand_remove),
  7286. .driver = {
  7287. .name = DRIVER_NAME,
  7288. }
  7289. };
  7290.  
  7291. MODULE_ALIAS(DRIVER_NAME);
  7292.  
  7293. static int __init msm_nand_init(void)
  7294. {
  7295. return platform_driver_register(&msm_nand_driver);
  7296. }
  7297.  
  7298. static void __exit msm_nand_exit(void)
  7299. {
  7300. platform_driver_unregister(&msm_nand_driver);
  7301. }
  7302.  
  7303. module_init(msm_nand_init);
  7304. module_exit(msm_nand_exit);
  7305.  
  7306. MODULE_LICENSE("GPL");
  7307. MODULE_DESCRIPTION("msm_nand flash driver code");
Add Comment
Please, Sign In to add comment