xartin

VT8251.ahci-2.6.17.diff

Nov 14th, 2023
63
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 21.97 KB | None | 0 0
  1. --- linux-2.6.17/drivers/scsi/ahci.c 2006-06-17 20:49:35.000000000 -0500
  2. +++ linux-2.6.17-rc6-mm2/drivers/scsi/ahci.c 2006-06-12 23:44:25.000000000 -0500
  3. @@ -48,7 +48,7 @@
  4. #include <asm/io.h>
  5.  
  6. #define DRV_NAME "ahci"
  7. -#define DRV_VERSION "1.2"
  8. +#define DRV_VERSION "1.3"
  9.  
  10.  
  11. enum {
  12. @@ -56,12 +56,15 @@
  13. AHCI_MAX_SG = 168, /* hardware max is 64K */
  14. AHCI_DMA_BOUNDARY = 0xffffffff,
  15. AHCI_USE_CLUSTERING = 0,
  16. - AHCI_CMD_SLOT_SZ = 32 * 32,
  17. + AHCI_MAX_CMDS = 32,
  18. + AHCI_CMD_SZ = 32,
  19. + AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
  20. AHCI_RX_FIS_SZ = 256,
  21. - AHCI_CMD_TBL_HDR = 0x80,
  22. AHCI_CMD_TBL_CDB = 0x40,
  23. - AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR + (AHCI_MAX_SG * 16),
  24. - AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_SZ +
  25. + AHCI_CMD_TBL_HDR_SZ = 0x80,
  26. + AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
  27. + AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
  28. + AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
  29. AHCI_RX_FIS_SZ,
  30. AHCI_IRQ_ON_SG = (1 << 31),
  31. AHCI_CMD_ATAPI = (1 << 5),
  32. @@ -71,8 +74,10 @@
  33. AHCI_CMD_CLR_BUSY = (1 << 10),
  34.  
  35. RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
  36. + RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
  37.  
  38. board_ahci = 0,
  39. + board_ahci_vt8251 = 1,
  40.  
  41. /* global controller registers */
  42. HOST_CAP = 0x00, /* host capabilities */
  43. @@ -87,8 +92,9 @@
  44. HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
  45.  
  46. /* HOST_CAP bits */
  47. - HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
  48. HOST_CAP_CLO = (1 << 24), /* Command List Override support */
  49. + HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
  50. + HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
  51.  
  52. /* registers for each SATA port */
  53. PORT_LST_ADDR = 0x00, /* command list DMA addr */
  54. @@ -127,15 +133,16 @@
  55. PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
  56. PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
  57.  
  58. - PORT_IRQ_FATAL = PORT_IRQ_TF_ERR |
  59. - PORT_IRQ_HBUS_ERR |
  60. - PORT_IRQ_HBUS_DATA_ERR |
  61. - PORT_IRQ_IF_ERR,
  62. - DEF_PORT_IRQ = PORT_IRQ_FATAL | PORT_IRQ_PHYRDY |
  63. - PORT_IRQ_CONNECT | PORT_IRQ_SG_DONE |
  64. - PORT_IRQ_UNK_FIS | PORT_IRQ_SDB_FIS |
  65. - PORT_IRQ_DMAS_FIS | PORT_IRQ_PIOS_FIS |
  66. - PORT_IRQ_D2H_REG_FIS,
  67. + PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
  68. + PORT_IRQ_IF_ERR |
  69. + PORT_IRQ_CONNECT |
  70. + PORT_IRQ_UNK_FIS,
  71. + PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
  72. + PORT_IRQ_TF_ERR |
  73. + PORT_IRQ_HBUS_DATA_ERR,
  74. + DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
  75. + PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
  76. + PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
  77.  
  78. /* PORT_CMD bits */
  79. PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
  80. @@ -153,6 +160,9 @@
  81.  
  82. /* hpriv->flags bits */
  83. AHCI_FLAG_MSI = (1 << 0),
  84. +
  85. + /* ap->flags bits */
  86. + AHCI_FLAG_RESET_NEEDS_CLO = (1 << 24),
  87. };
  88.  
  89. struct ahci_cmd_hdr {
  90. @@ -181,7 +191,6 @@
  91. dma_addr_t cmd_slot_dma;
  92. void *cmd_tbl;
  93. dma_addr_t cmd_tbl_dma;
  94. - struct ahci_sg *cmd_tbl_sg;
  95. void *rx_fis;
  96. dma_addr_t rx_fis_dma;
  97. };
  98. @@ -193,13 +202,15 @@
  99. static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
  100. static int ahci_probe_reset(struct ata_port *ap, unsigned int *classes);
  101. static void ahci_irq_clear(struct ata_port *ap);
  102. -static void ahci_eng_timeout(struct ata_port *ap);
  103. static int ahci_port_start(struct ata_port *ap);
  104. static void ahci_port_stop(struct ata_port *ap);
  105. static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
  106. static void ahci_qc_prep(struct ata_queued_cmd *qc);
  107. static u8 ahci_check_status(struct ata_port *ap);
  108. -static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
  109. +static void ahci_freeze(struct ata_port *ap);
  110. +static void ahci_thaw(struct ata_port *ap);
  111. +static void ahci_error_handler(struct ata_port *ap);
  112. +static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
  113. static void ahci_remove_one (struct pci_dev *pdev);
  114.  
  115. static struct scsi_host_template ahci_sht = {
  116. @@ -207,7 +218,8 @@
  117. .name = DRV_NAME,
  118. .ioctl = ata_scsi_ioctl,
  119. .queuecommand = ata_scsi_queuecmd,
  120. - .can_queue = ATA_DEF_QUEUE,
  121. + .change_queue_depth = ata_scsi_change_queue_depth,
  122. + .can_queue = AHCI_MAX_CMDS - 1,
  123. .this_id = ATA_SHT_THIS_ID,
  124. .sg_tablesize = AHCI_MAX_SG,
  125. .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
  126. @@ -233,14 +245,18 @@
  127. .qc_prep = ahci_qc_prep,
  128. .qc_issue = ahci_qc_issue,
  129.  
  130. - .eng_timeout = ahci_eng_timeout,
  131. -
  132. .irq_handler = ahci_interrupt,
  133. .irq_clear = ahci_irq_clear,
  134.  
  135. .scr_read = ahci_scr_read,
  136. .scr_write = ahci_scr_write,
  137.  
  138. + .freeze = ahci_freeze,
  139. + .thaw = ahci_thaw,
  140. +
  141. + .error_handler = ahci_error_handler,
  142. + .post_internal_cmd = ahci_post_internal_cmd,
  143. +
  144. .port_start = ahci_port_start,
  145. .port_stop = ahci_port_stop,
  146. };
  147. @@ -255,6 +271,16 @@
  148. .udma_mask = 0x7f, /* udma0-6 ; FIXME */
  149. .port_ops = &ahci_ops,
  150. },
  151. + /* board_ahci_vt8251 */
  152. + {
  153. + .sht = &ahci_sht,
  154. + .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
  155. + ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
  156. + AHCI_FLAG_RESET_NEEDS_CLO,
  157. + .pio_mask = 0x1f, /* pio0-4 */
  158. + .udma_mask = 0x7f, /* udma0-6 ; FIXME */
  159. + .port_ops = &ahci_ops,
  160. + },
  161. };
  162.  
  163. static const struct pci_device_id ahci_pci_tbl[] = {
  164. @@ -296,6 +322,8 @@
  165. board_ahci }, /* ATI SB600 non-raid */
  166. { PCI_VENDOR_ID_ATI, 0x4381, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
  167. board_ahci }, /* ATI SB600 raid */
  168. + { PCI_VENDOR_ID_VIA, 0x3349, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
  169. + board_ahci_vt8251 }, /* VIA VT8251 */
  170. { } /* terminate list */
  171. };
  172.  
  173. @@ -374,8 +402,6 @@
  174. pp->cmd_tbl = mem;
  175. pp->cmd_tbl_dma = mem_dma;
  176.  
  177. - pp->cmd_tbl_sg = mem + AHCI_CMD_TBL_HDR;
  178. -
  179. ap->private_data = pp;
  180.  
  181. if (hpriv->cap & HOST_CAP_64)
  182. @@ -508,46 +534,60 @@
  183. return ata_dev_classify(&tf);
  184. }
  185.  
  186. -static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, u32 opts)
  187. +static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
  188. + u32 opts)
  189. {
  190. - pp->cmd_slot[0].opts = cpu_to_le32(opts);
  191. - pp->cmd_slot[0].status = 0;
  192. - pp->cmd_slot[0].tbl_addr = cpu_to_le32(pp->cmd_tbl_dma & 0xffffffff);
  193. - pp->cmd_slot[0].tbl_addr_hi = cpu_to_le32((pp->cmd_tbl_dma >> 16) >> 16);
  194. + dma_addr_t cmd_tbl_dma;
  195. +
  196. + cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
  197. +
  198. + pp->cmd_slot[tag].opts = cpu_to_le32(opts);
  199. + pp->cmd_slot[tag].status = 0;
  200. + pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
  201. + pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
  202. }
  203.  
  204. -static int ahci_poll_register(void __iomem *reg, u32 mask, u32 val,
  205. - unsigned long interval_msec,
  206. - unsigned long timeout_msec)
  207. +static int ahci_clo(struct ata_port *ap)
  208. {
  209. - unsigned long timeout;
  210. + void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
  211. + struct ahci_host_priv *hpriv = ap->host_set->private_data;
  212. u32 tmp;
  213.  
  214. - timeout = jiffies + (timeout_msec * HZ) / 1000;
  215. - do {
  216. - tmp = readl(reg);
  217. - if ((tmp & mask) == val)
  218. - return 0;
  219. - msleep(interval_msec);
  220. - } while (time_before(jiffies, timeout));
  221. + if (!(hpriv->cap & HOST_CAP_CLO))
  222. + return -EOPNOTSUPP;
  223.  
  224. - return -1;
  225. + tmp = readl(port_mmio + PORT_CMD);
  226. + tmp |= PORT_CMD_CLO;
  227. + writel(tmp, port_mmio + PORT_CMD);
  228. +
  229. + tmp = ata_wait_register(port_mmio + PORT_CMD,
  230. + PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
  231. + if (tmp & PORT_CMD_CLO)
  232. + return -EIO;
  233. +
  234. + return 0;
  235. }
  236.  
  237. -static int ahci_softreset(struct ata_port *ap, int verbose, unsigned int *class)
  238. +static int ahci_softreset(struct ata_port *ap, unsigned int *class)
  239. {
  240. - struct ahci_host_priv *hpriv = ap->host_set->private_data;
  241. struct ahci_port_priv *pp = ap->private_data;
  242. void __iomem *mmio = ap->host_set->mmio_base;
  243. void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
  244. const u32 cmd_fis_len = 5; /* five dwords */
  245. const char *reason = NULL;
  246. struct ata_taskfile tf;
  247. + u32 tmp;
  248. u8 *fis;
  249. int rc;
  250.  
  251. DPRINTK("ENTER\n");
  252.  
  253. + if (ata_port_offline(ap)) {
  254. + DPRINTK("PHY reports no device\n");
  255. + *class = ATA_DEV_NONE;
  256. + return 0;
  257. + }
  258. +
  259. /* prepare for SRST (AHCI-1.1 10.4.1) */
  260. rc = ahci_stop_engine(ap);
  261. if (rc) {
  262. @@ -558,23 +598,13 @@
  263. /* check BUSY/DRQ, perform Command List Override if necessary */
  264. ahci_tf_read(ap, &tf);
  265. if (tf.command & (ATA_BUSY | ATA_DRQ)) {
  266. - u32 tmp;
  267. + rc = ahci_clo(ap);
  268.  
  269. - if (!(hpriv->cap & HOST_CAP_CLO)) {
  270. - rc = -EIO;
  271. - reason = "port busy but no CLO";
  272. + if (rc == -EOPNOTSUPP) {
  273. + reason = "port busy but CLO unavailable";
  274. goto fail_restart;
  275. - }
  276. -
  277. - tmp = readl(port_mmio + PORT_CMD);
  278. - tmp |= PORT_CMD_CLO;
  279. - writel(tmp, port_mmio + PORT_CMD);
  280. - readl(port_mmio + PORT_CMD); /* flush */
  281. -
  282. - if (ahci_poll_register(port_mmio + PORT_CMD, PORT_CMD_CLO, 0x0,
  283. - 1, 500)) {
  284. - rc = -EIO;
  285. - reason = "CLO failed";
  286. + } else if (rc) {
  287. + reason = "port busy but CLO failed";
  288. goto fail_restart;
  289. }
  290. }
  291. @@ -582,20 +612,21 @@
  292. /* restart engine */
  293. ahci_start_engine(ap);
  294.  
  295. - ata_tf_init(ap, &tf, 0);
  296. + ata_tf_init(ap->device, &tf);
  297. fis = pp->cmd_tbl;
  298.  
  299. /* issue the first D2H Register FIS */
  300. - ahci_fill_cmd_slot(pp, cmd_fis_len | AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY);
  301. + ahci_fill_cmd_slot(pp, 0,
  302. + cmd_fis_len | AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY);
  303.  
  304. tf.ctl |= ATA_SRST;
  305. ata_tf_to_fis(&tf, fis, 0);
  306. fis[1] &= ~(1 << 7); /* turn off Command FIS bit */
  307.  
  308. writel(1, port_mmio + PORT_CMD_ISSUE);
  309. - readl(port_mmio + PORT_CMD_ISSUE); /* flush */
  310.  
  311. - if (ahci_poll_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x0, 1, 500)) {
  312. + tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, 1, 500);
  313. + if (tmp & 0x1) {
  314. rc = -EIO;
  315. reason = "1st FIS failed";
  316. goto fail;
  317. @@ -605,7 +636,7 @@
  318. msleep(1);
  319.  
  320. /* issue the second D2H Register FIS */
  321. - ahci_fill_cmd_slot(pp, cmd_fis_len);
  322. + ahci_fill_cmd_slot(pp, 0, cmd_fis_len);
  323.  
  324. tf.ctl &= ~ATA_SRST;
  325. ata_tf_to_fis(&tf, fis, 0);
  326. @@ -625,7 +656,7 @@
  327. msleep(150);
  328.  
  329. *class = ATA_DEV_NONE;
  330. - if (sata_dev_present(ap)) {
  331. + if (ata_port_online(ap)) {
  332. if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
  333. rc = -EIO;
  334. reason = "device not ready";
  335. @@ -640,25 +671,21 @@
  336. fail_restart:
  337. ahci_start_engine(ap);
  338. fail:
  339. - if (verbose)
  340. - printk(KERN_ERR "ata%u: softreset failed (%s)\n",
  341. - ap->id, reason);
  342. - else
  343. - DPRINTK("EXIT, rc=%d reason=\"%s\"\n", rc, reason);
  344. + ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason);
  345. return rc;
  346. }
  347.  
  348. -static int ahci_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
  349. +static int ahci_hardreset(struct ata_port *ap, unsigned int *class)
  350. {
  351. int rc;
  352.  
  353. DPRINTK("ENTER\n");
  354.  
  355. ahci_stop_engine(ap);
  356. - rc = sata_std_hardreset(ap, verbose, class);
  357. + rc = sata_std_hardreset(ap, class);
  358. ahci_start_engine(ap);
  359.  
  360. - if (rc == 0)
  361. + if (rc == 0 && ata_port_online(ap))
  362. *class = ahci_dev_classify(ap);
  363. if (*class == ATA_DEV_UNKNOWN)
  364. *class = ATA_DEV_NONE;
  365. @@ -688,6 +715,12 @@
  366.  
  367. static int ahci_probe_reset(struct ata_port *ap, unsigned int *classes)
  368. {
  369. + if ((ap->flags & AHCI_FLAG_RESET_NEEDS_CLO) &&
  370. + (ata_busy_wait(ap, ATA_BUSY, 1000) & ATA_BUSY)) {
  371. + /* ATA_BUSY hasn't cleared, so send a CLO */
  372. + ahci_clo(ap);
  373. + }
  374. +
  375. return ata_drive_probe_reset(ap, ata_std_probeinit,
  376. ahci_softreset, ahci_hardreset,
  377. ahci_postreset, classes);
  378. @@ -708,9 +741,8 @@
  379. ata_tf_from_fis(d2h_fis, tf);
  380. }
  381.  
  382. -static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc)
  383. +static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
  384. {
  385. - struct ahci_port_priv *pp = qc->ap->private_data;
  386. struct scatterlist *sg;
  387. struct ahci_sg *ahci_sg;
  388. unsigned int n_sg = 0;
  389. @@ -720,7 +752,7 @@
  390. /*
  391. * Next, the S/G list.
  392. */
  393. - ahci_sg = pp->cmd_tbl_sg;
  394. + ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
  395. ata_for_each_sg(sg, qc) {
  396. dma_addr_t addr = sg_dma_address(sg);
  397. u32 sg_len = sg_dma_len(sg);
  398. @@ -741,6 +773,7 @@
  399. struct ata_port *ap = qc->ap;
  400. struct ahci_port_priv *pp = ap->private_data;
  401. int is_atapi = is_atapi_taskfile(&qc->tf);
  402. + void *cmd_tbl;
  403. u32 opts;
  404. const u32 cmd_fis_len = 5; /* five dwords */
  405. unsigned int n_elem;
  406. @@ -749,16 +782,17 @@
  407. * Fill in command table information. First, the header,
  408. * a SATA Register - Host to Device command FIS.
  409. */
  410. - ata_tf_to_fis(&qc->tf, pp->cmd_tbl, 0);
  411. + cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
  412. +
  413. + ata_tf_to_fis(&qc->tf, cmd_tbl, 0);
  414. if (is_atapi) {
  415. - memset(pp->cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
  416. - memcpy(pp->cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb,
  417. - qc->dev->cdb_len);
  418. + memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
  419. + memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
  420. }
  421.  
  422. n_elem = 0;
  423. if (qc->flags & ATA_QCFLAG_DMAMAP)
  424. - n_elem = ahci_fill_sg(qc);
  425. + n_elem = ahci_fill_sg(qc, cmd_tbl);
  426.  
  427. /*
  428. * Fill in command slot information.
  429. @@ -769,112 +803,123 @@
  430. if (is_atapi)
  431. opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
  432.  
  433. - ahci_fill_cmd_slot(pp, opts);
  434. + ahci_fill_cmd_slot(pp, qc->tag, opts);
  435. }
  436.  
  437. -static void ahci_restart_port(struct ata_port *ap, u32 irq_stat)
  438. +static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
  439. {
  440. - void __iomem *mmio = ap->host_set->mmio_base;
  441. - void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
  442. - u32 tmp;
  443. + struct ahci_port_priv *pp = ap->private_data;
  444. + struct ata_eh_info *ehi = &ap->eh_info;
  445. + unsigned int err_mask = 0, action = 0;
  446. + struct ata_queued_cmd *qc;
  447. + u32 serror;
  448.  
  449. - if ((ap->device[0].class != ATA_DEV_ATAPI) ||
  450. - ((irq_stat & PORT_IRQ_TF_ERR) == 0))
  451. - printk(KERN_WARNING "ata%u: port reset, "
  452. - "p_is %x is %x pis %x cmd %x tf %x ss %x se %x\n",
  453. - ap->id,
  454. - irq_stat,
  455. - readl(mmio + HOST_IRQ_STAT),
  456. - readl(port_mmio + PORT_IRQ_STAT),
  457. - readl(port_mmio + PORT_CMD),
  458. - readl(port_mmio + PORT_TFDATA),
  459. - readl(port_mmio + PORT_SCR_STAT),
  460. - readl(port_mmio + PORT_SCR_ERR));
  461. + ata_ehi_clear_desc(ehi);
  462.  
  463. - /* stop DMA */
  464. - ahci_stop_engine(ap);
  465. + /* AHCI needs SError cleared; otherwise, it might lock up */
  466. + serror = ahci_scr_read(ap, SCR_ERROR);
  467. + ahci_scr_write(ap, SCR_ERROR, serror);
  468.  
  469. - /* clear SATA phy error, if any */
  470. - tmp = readl(port_mmio + PORT_SCR_ERR);
  471. - writel(tmp, port_mmio + PORT_SCR_ERR);
  472. + /* analyze @irq_stat */
  473. + ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
  474.  
  475. - /* if DRQ/BSY is set, device needs to be reset.
  476. - * if so, issue COMRESET
  477. - */
  478. - tmp = readl(port_mmio + PORT_TFDATA);
  479. - if (tmp & (ATA_BUSY | ATA_DRQ)) {
  480. - writel(0x301, port_mmio + PORT_SCR_CTL);
  481. - readl(port_mmio + PORT_SCR_CTL); /* flush */
  482. - udelay(10);
  483. - writel(0x300, port_mmio + PORT_SCR_CTL);
  484. - readl(port_mmio + PORT_SCR_CTL); /* flush */
  485. + if (irq_stat & PORT_IRQ_TF_ERR)
  486. + err_mask |= AC_ERR_DEV;
  487. +
  488. + if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
  489. + err_mask |= AC_ERR_HOST_BUS;
  490. + action |= ATA_EH_SOFTRESET;
  491. }
  492.  
  493. - /* re-start DMA */
  494. - ahci_start_engine(ap);
  495. -}
  496. + if (irq_stat & PORT_IRQ_IF_ERR) {
  497. + err_mask |= AC_ERR_ATA_BUS;
  498. + action |= ATA_EH_SOFTRESET;
  499. + ata_ehi_push_desc(ehi, ", interface fatal error");
  500. + }
  501.  
  502. -static void ahci_eng_timeout(struct ata_port *ap)
  503. -{
  504. - struct ata_host_set *host_set = ap->host_set;
  505. - void __iomem *mmio = host_set->mmio_base;
  506. - void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
  507. - struct ata_queued_cmd *qc;
  508. - unsigned long flags;
  509. + if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
  510. + err_mask |= AC_ERR_ATA_BUS;
  511. + action |= ATA_EH_SOFTRESET;
  512. + ata_ehi_push_desc(ehi, ", %s", irq_stat & PORT_IRQ_CONNECT ?
  513. + "connection status changed" : "PHY RDY changed");
  514. + }
  515.  
  516. - printk(KERN_WARNING "ata%u: handling error/timeout\n", ap->id);
  517. + if (irq_stat & PORT_IRQ_UNK_FIS) {
  518. + u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
  519.  
  520. - spin_lock_irqsave(&host_set->lock, flags);
  521. + err_mask |= AC_ERR_HSM;
  522. + action |= ATA_EH_SOFTRESET;
  523. + ata_ehi_push_desc(ehi, ", unknown FIS %08x %08x %08x %08x",
  524. + unk[0], unk[1], unk[2], unk[3]);
  525. + }
  526.  
  527. - ahci_restart_port(ap, readl(port_mmio + PORT_IRQ_STAT));
  528. - qc = ata_qc_from_tag(ap, ap->active_tag);
  529. - qc->err_mask |= AC_ERR_TIMEOUT;
  530. + /* okay, let's hand over to EH */
  531. + ehi->serror |= serror;
  532. + ehi->action |= action;
  533.  
  534. - spin_unlock_irqrestore(&host_set->lock, flags);
  535. + qc = ata_qc_from_tag(ap, ap->active_tag);
  536. + if (qc)
  537. + qc->err_mask |= err_mask;
  538. + else
  539. + ehi->err_mask |= err_mask;
  540.  
  541. - ata_eh_qc_complete(qc);
  542. + if (irq_stat & PORT_IRQ_FREEZE)
  543. + ata_port_freeze(ap);
  544. + else
  545. + ata_port_abort(ap);
  546. }
  547.  
  548. -static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
  549. +static void ahci_host_intr(struct ata_port *ap)
  550. {
  551. void __iomem *mmio = ap->host_set->mmio_base;
  552. void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
  553. - u32 status, serr, ci;
  554. -
  555. - serr = readl(port_mmio + PORT_SCR_ERR);
  556. - writel(serr, port_mmio + PORT_SCR_ERR);
  557. + struct ata_eh_info *ehi = &ap->eh_info;
  558. + u32 status, qc_active;
  559. + int rc;
  560.  
  561. status = readl(port_mmio + PORT_IRQ_STAT);
  562. writel(status, port_mmio + PORT_IRQ_STAT);
  563.  
  564. - ci = readl(port_mmio + PORT_CMD_ISSUE);
  565. - if (likely((ci & 0x1) == 0)) {
  566. - if (qc) {
  567. - WARN_ON(qc->err_mask);
  568. - ata_qc_complete(qc);
  569. - qc = NULL;
  570. - }
  571. + if (unlikely(status & PORT_IRQ_ERROR)) {
  572. + ahci_error_intr(ap, status);
  573. + return;
  574. }
  575.  
  576. - if (status & PORT_IRQ_FATAL) {
  577. - unsigned int err_mask;
  578. - if (status & PORT_IRQ_TF_ERR)
  579. - err_mask = AC_ERR_DEV;
  580. - else if (status & PORT_IRQ_IF_ERR)
  581. - err_mask = AC_ERR_ATA_BUS;
  582. - else
  583. - err_mask = AC_ERR_HOST_BUS;
  584. -
  585. - /* command processing has stopped due to error; restart */
  586. - ahci_restart_port(ap, status);
  587. -
  588. - if (qc) {
  589. - qc->err_mask |= err_mask;
  590. - ata_qc_complete(qc);
  591. - }
  592. + if (ap->sactive)
  593. + qc_active = readl(port_mmio + PORT_SCR_ACT);
  594. + else
  595. + qc_active = readl(port_mmio + PORT_CMD_ISSUE);
  596. +
  597. + rc = ata_qc_complete_multiple(ap, qc_active, NULL);
  598. + if (rc > 0)
  599. + return;
  600. + if (rc < 0) {
  601. + ehi->err_mask |= AC_ERR_HSM;
  602. + ehi->action |= ATA_EH_SOFTRESET;
  603. + ata_port_freeze(ap);
  604. + return;
  605. }
  606.  
  607. - return 1;
  608. + /* hmmm... a spurious interupt */
  609. +
  610. + /* some devices send D2H reg with I bit set during NCQ command phase */
  611. + if (ap->sactive && status & PORT_IRQ_D2H_REG_FIS)
  612. + return;
  613. +
  614. + /* ignore interim PIO setup fis interrupts */
  615. + if (ata_tag_valid(ap->active_tag)) {
  616. + struct ata_queued_cmd *qc =
  617. + ata_qc_from_tag(ap, ap->active_tag);
  618. +
  619. + if (qc && qc->tf.protocol == ATA_PROT_PIO &&
  620. + (status & PORT_IRQ_PIOS_FIS))
  621. + return;
  622. + }
  623. +
  624. + if (ata_ratelimit())
  625. + ata_port_printk(ap, KERN_INFO, "spurious interrupt "
  626. + "(irq_stat 0x%x active_tag %d sactive 0x%x)\n",
  627. + status, ap->active_tag, ap->sactive);
  628. }
  629.  
  630. static void ahci_irq_clear(struct ata_port *ap)
  631. @@ -882,7 +927,7 @@
  632. /* TODO */
  633. }
  634.  
  635. -static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
  636. +static irqreturn_t ahci_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
  637. {
  638. struct ata_host_set *host_set = dev_instance;
  639. struct ahci_host_priv *hpriv;
  640. @@ -911,14 +956,7 @@
  641.  
  642. ap = host_set->ports[i];
  643. if (ap) {
  644. - struct ata_queued_cmd *qc;
  645. - qc = ata_qc_from_tag(ap, ap->active_tag);
  646. - if (!ahci_host_intr(ap, qc))
  647. - if (ata_ratelimit())
  648. - dev_printk(KERN_WARNING, host_set->dev,
  649. - "unhandled interrupt on port %u\n",
  650. - i);
  651. -
  652. + ahci_host_intr(ap);
  653. VPRINTK("port %u\n", i);
  654. } else {
  655. VPRINTK("port %u (no irq)\n", i);
  656. @@ -935,7 +973,7 @@
  657. handled = 1;
  658. }
  659.  
  660. - spin_unlock(&host_set->lock);
  661. + spin_unlock(&host_set->lock);
  662.  
  663. VPRINTK("EXIT\n");
  664.  
  665. @@ -947,12 +985,64 @@
  666. struct ata_port *ap = qc->ap;
  667. void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
  668.  
  669. - writel(1, port_mmio + PORT_CMD_ISSUE);
  670. + if (qc->tf.protocol == ATA_PROT_NCQ)
  671. + writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
  672. + writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
  673. readl(port_mmio + PORT_CMD_ISSUE); /* flush */
  674.  
  675. return 0;
  676. }
  677.  
  678. +static void ahci_freeze(struct ata_port *ap)
  679. +{
  680. + void __iomem *mmio = ap->host_set->mmio_base;
  681. + void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
  682. +
  683. + /* turn IRQ off */
  684. + writel(0, port_mmio + PORT_IRQ_MASK);
  685. +}
  686. +
  687. +static void ahci_thaw(struct ata_port *ap)
  688. +{
  689. + void __iomem *mmio = ap->host_set->mmio_base;
  690. + void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
  691. + u32 tmp;
  692. +
  693. + /* clear IRQ */
  694. + tmp = readl(port_mmio + PORT_IRQ_STAT);
  695. + writel(tmp, port_mmio + PORT_IRQ_STAT);
  696. + writel(1 << ap->id, mmio + HOST_IRQ_STAT);
  697. +
  698. + /* turn IRQ back on */
  699. + writel(DEF_PORT_IRQ, port_mmio + PORT_IRQ_MASK);
  700. +}
  701. +
  702. +static void ahci_error_handler(struct ata_port *ap)
  703. +{
  704. + if (!(ap->flags & ATA_FLAG_FROZEN)) {
  705. + /* restart engine */
  706. + ahci_stop_engine(ap);
  707. + ahci_start_engine(ap);
  708. + }
  709. +
  710. + /* perform recovery */
  711. + ata_do_eh(ap, ahci_softreset, ahci_hardreset, ahci_postreset);
  712. +}
  713. +
  714. +static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
  715. +{
  716. + struct ata_port *ap = qc->ap;
  717. +
  718. + if (qc->flags & ATA_QCFLAG_FAILED)
  719. + qc->err_mask |= AC_ERR_OTHER;
  720. +
  721. + if (qc->err_mask) {
  722. + /* make DMA engine forget about the failed command */
  723. + ahci_stop_engine(ap);
  724. + ahci_start_engine(ap);
  725. + }
  726. +}
  727. +
  728. static void ahci_setup_port(struct ata_ioports *port, unsigned long base,
  729. unsigned int port_idx)
  730. {
  731. @@ -1097,9 +1187,6 @@
  732. writel(tmp, port_mmio + PORT_IRQ_STAT);
  733.  
  734. writel(1 << i, mmio + HOST_IRQ_STAT);
  735. -
  736. - /* set irq mask (enables interrupts) */
  737. - writel(DEF_PORT_IRQ, port_mmio + PORT_IRQ_MASK);
  738. }
  739.  
  740. tmp = readl(mmio + HOST_CTL);
  741. @@ -1197,6 +1284,8 @@
  742.  
  743. VPRINTK("ENTER\n");
  744.  
  745. + WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
  746. +
  747. if (!printed_version++)
  748. dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
  749.  
  750. @@ -1264,6 +1353,9 @@
  751. if (rc)
  752. goto err_out_hpriv;
  753.  
  754. + if (hpriv->cap & HOST_CAP_NCQ)
  755. + probe_ent->host_flags |= ATA_FLAG_NCQ;
  756. +
  757. ahci_print_info(probe_ent);
  758.  
  759. /* FIXME: check ata_device_add return value */
  760.  
Add Comment
Please, Sign In to add comment