1. /*
  2. * Copyright © 2008 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Keith Packard <keithp@keithp.com>
  25. *
  26. */
  27.  
  28. #include <linux/i2c.h>
  29. #include <linux/slab.h>
  30. #include <linux/export.h>
  31. #include <drm/drmP.h>
  32. #include <drm/drm_crtc.h>
  33. #include <drm/drm_crtc_helper.h>
  34. #include <drm/drm_edid.h>
  35. #include "intel_drv.h"
  36. #include <drm/i915_drm.h>
  37. #include "i915_drv.h"
  38.  
  39. #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
  40.  
  41. /**
  42. * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
  43. * @intel_dp: DP struct
  44. *
  45. * If a CPU or PCH DP output is attached to an eDP panel, this function
  46. * will return true, and false otherwise.
  47. */
  48. static bool is_edp(struct intel_dp *intel_dp)
  49. {
  50. struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  51.  
  52. return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
  53. }
  54.  
  55. /**
  56. * is_pch_edp - is the port on the PCH and attached to an eDP panel?
  57. * @intel_dp: DP struct
  58. *
  59. * Returns true if the given DP struct corresponds to a PCH DP port attached
  60. * to an eDP panel, false otherwise. Helpful for determining whether we
  61. * may need FDI resources for a given DP output or not.
  62. */
  63. static bool is_pch_edp(struct intel_dp *intel_dp)
  64. {
  65. return intel_dp->is_pch_edp;
  66. }
  67.  
  68. /**
  69. * is_cpu_edp - is the port on the CPU and attached to an eDP panel?
  70. * @intel_dp: DP struct
  71. *
  72. * Returns true if the given DP struct corresponds to a CPU eDP port.
  73. */
  74. static bool is_cpu_edp(struct intel_dp *intel_dp)
  75. {
  76. return is_edp(intel_dp) && !is_pch_edp(intel_dp);
  77. }
  78.  
  79. static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
  80. {
  81. struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  82.  
  83. return intel_dig_port->base.base.dev;
  84. }
  85.  
  86. static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
  87. {
  88. return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
  89. }
  90.  
  91. /**
  92. * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP?
  93. * @encoder: DRM encoder
  94. *
  95. * Return true if @encoder corresponds to a PCH attached eDP panel. Needed
  96. * by intel_display.c.
  97. */
  98. bool intel_encoder_is_pch_edp(struct drm_encoder *encoder)
  99. {
  100. struct intel_dp *intel_dp;
  101.  
  102. if (!encoder)
  103. return false;
  104.  
  105. intel_dp = enc_to_intel_dp(encoder);
  106.  
  107. return is_pch_edp(intel_dp);
  108. }
  109.  
  110. static void intel_dp_link_down(struct intel_dp *intel_dp);
  111.  
  112. static int
  113. intel_dp_max_link_bw(struct intel_dp *intel_dp)
  114. {
  115. int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
  116.  
  117. switch (max_link_bw) {
  118. case DP_LINK_BW_1_62:
  119. case DP_LINK_BW_2_7:
  120. break;
  121. default:
  122. max_link_bw = DP_LINK_BW_1_62;
  123. break;
  124. }
  125. return max_link_bw;
  126. }
  127.  
  128. /*
  129. * The units on the numbers in the next two are... bizarre. Examples will
  130. * make it clearer; this one parallels an example in the eDP spec.
  131. *
  132. * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
  133. *
  134. * 270000 * 1 * 8 / 10 == 216000
  135. *
  136. * The actual data capacity of that configuration is 2.16Gbit/s, so the
  137. * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
  138. * or equivalently, kilopixels per second - so for 1680x1050R it'd be
  139. * 119000. At 18bpp that's 2142000 kilobits per second.
  140. *
  141. * Thus the strange-looking division by 10 in intel_dp_link_required, to
  142. * get the result in decakilobits instead of kilobits.
  143. */
  144.  
  145. static int
  146. intel_dp_link_required(int pixel_clock, int bpp)
  147. {
  148. return (pixel_clock * bpp + 9) / 10;
  149. }
  150.  
  151. static int
  152. intel_dp_max_data_rate(int max_link_clock, int max_lanes)
  153. {
  154. return (max_link_clock * max_lanes * 8) / 10;
  155. }
  156.  
  157. static int
  158. intel_dp_mode_valid(struct drm_connector *connector,
  159. struct drm_display_mode *mode)
  160. {
  161. struct intel_dp *intel_dp = intel_attached_dp(connector);
  162. struct intel_connector *intel_connector = to_intel_connector(connector);
  163. struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
  164. int target_clock = mode->clock;
  165. int max_rate, mode_rate, max_lanes, max_link_clock;
  166.  
  167. if (is_edp(intel_dp) && fixed_mode) {
  168. if (mode->hdisplay > fixed_mode->hdisplay)
  169. return MODE_PANEL;
  170.  
  171. if (mode->vdisplay > fixed_mode->vdisplay)
  172. return MODE_PANEL;
  173.  
  174. target_clock = fixed_mode->clock;
  175. }
  176.  
  177. max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
  178. max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
  179.  
  180. max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
  181. mode_rate = intel_dp_link_required(target_clock, 18);
  182.  
  183. if (mode_rate > max_rate)
  184. return MODE_CLOCK_HIGH;
  185.  
  186. if (mode->clock < 10000)
  187. return MODE_CLOCK_LOW;
  188.  
  189. if (mode->flags & DRM_MODE_FLAG_DBLCLK)
  190. return MODE_H_ILLEGAL;
  191.  
  192. return MODE_OK;
  193. }
  194.  
  195. static uint32_t
  196. pack_aux(uint8_t *src, int src_bytes)
  197. {
  198. int i;
  199. uint32_t v = 0;
  200.  
  201. if (src_bytes > 4)
  202. src_bytes = 4;
  203. for (i = 0; i < src_bytes; i++)
  204. v |= ((uint32_t) src[i]) << ((3-i) * 8);
  205. return v;
  206. }
  207.  
  208. static void
  209. unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
  210. {
  211. int i;
  212. if (dst_bytes > 4)
  213. dst_bytes = 4;
  214. for (i = 0; i < dst_bytes; i++)
  215. dst[i] = src >> ((3-i) * 8);
  216. }
  217.  
  218. /* hrawclock is 1/4 the FSB frequency */
  219. static int
  220. intel_hrawclk(struct drm_device *dev)
  221. {
  222. struct drm_i915_private *dev_priv = dev->dev_private;
  223. uint32_t clkcfg;
  224.  
  225. /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
  226. if (IS_VALLEYVIEW(dev))
  227. return 200;
  228.  
  229. clkcfg = I915_READ(CLKCFG);
  230. switch (clkcfg & CLKCFG_FSB_MASK) {
  231. case CLKCFG_FSB_400:
  232. return 100;
  233. case CLKCFG_FSB_533:
  234. return 133;
  235. case CLKCFG_FSB_667:
  236. return 166;
  237. case CLKCFG_FSB_800:
  238. return 200;
  239. case CLKCFG_FSB_1067:
  240. return 266;
  241. case CLKCFG_FSB_1333:
  242. return 333;
  243. /* these two are just a guess; one of them might be right */
  244. case CLKCFG_FSB_1600:
  245. case CLKCFG_FSB_1600_ALT:
  246. return 400;
  247. default:
  248. return 133;
  249. }
  250. }
  251.  
  252. static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
  253. {
  254. struct drm_device *dev = intel_dp_to_dev(intel_dp);
  255. struct drm_i915_private *dev_priv = dev->dev_private;
  256. u32 pp_stat_reg;
  257.  
  258. pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
  259. return (I915_READ(pp_stat_reg) & PP_ON) != 0;
  260. }
  261.  
  262. static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
  263. {
  264. struct drm_device *dev = intel_dp_to_dev(intel_dp);
  265. struct drm_i915_private *dev_priv = dev->dev_private;
  266. u32 pp_ctrl_reg;
  267.  
  268. pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
  269. return (I915_READ(pp_ctrl_reg) & EDP_FORCE_VDD) != 0;
  270. }
  271.  
  272. static void
  273. intel_dp_check_edp(struct intel_dp *intel_dp)
  274. {
  275. struct drm_device *dev = intel_dp_to_dev(intel_dp);
  276. struct drm_i915_private *dev_priv = dev->dev_private;
  277. u32 pp_stat_reg, pp_ctrl_reg;
  278.  
  279. if (!is_edp(intel_dp))
  280. return;
  281.  
  282. pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
  283. pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
  284.  
  285. if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
  286. WARN(1, "eDP powered off while attempting aux channel communication.\n");
  287. DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
  288. I915_READ(pp_stat_reg),
  289. I915_READ(pp_ctrl_reg));
  290. }
  291. }
  292.  
  293. static uint32_t
  294. intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
  295. {
  296. struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  297. struct drm_device *dev = intel_dig_port->base.base.dev;
  298. struct drm_i915_private *dev_priv = dev->dev_private;
  299. uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
  300. uint32_t status;
  301. bool done;
  302.  
  303. #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
  304. if (has_aux_irq)
  305. done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
  306. msecs_to_jiffies_timeout(10));
  307. else
  308. done = wait_for_atomic(C, 10) == 0;
  309. if (!done)
  310. DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
  311. has_aux_irq);
  312. #undef C
  313.  
  314. return status;
  315. }
  316.  
  317. static int
  318. intel_dp_aux_ch(struct intel_dp *intel_dp,
  319. uint8_t *send, int send_bytes,
  320. uint8_t *recv, int recv_size)
  321. {
  322. struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  323. struct drm_device *dev = intel_dig_port->base.base.dev;
  324. struct drm_i915_private *dev_priv = dev->dev_private;
  325. uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
  326. uint32_t ch_data = ch_ctl + 4;
  327. int i, ret, recv_bytes;
  328. uint32_t status;
  329. uint32_t aux_clock_divider;
  330. int try, precharge;
  331. bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
  332.  
  333. /* dp aux is extremely sensitive to irq latency, hence request the
  334. * lowest possible wakeup latency and so prevent the cpu from going into
  335. * deep sleep states.
  336. */
  337. pm_qos_update_request(&dev_priv->pm_qos, 0);
  338.  
  339. intel_dp_check_edp(intel_dp);
  340. /* The clock divider is based off the hrawclk,
  341. * and would like to run at 2MHz. So, take the
  342. * hrawclk value and divide by 2 and use that
  343. *
  344. * Note that PCH attached eDP panels should use a 125MHz input
  345. * clock divider.
  346. */
  347. if (is_cpu_edp(intel_dp)) {
  348. if (HAS_DDI(dev))
  349. aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1;
  350. else if (IS_VALLEYVIEW(dev))
  351. aux_clock_divider = 100;
  352. else if (IS_GEN6(dev) || IS_GEN7(dev))
  353. aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
  354. else
  355. aux_clock_divider = 225; /* eDP input clock at 450Mhz */
  356. } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
  357. /* Workaround for non-ULT HSW */
  358. aux_clock_divider = 74;
  359. } else if (HAS_PCH_SPLIT(dev)) {
  360. aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
  361. } else {
  362. aux_clock_divider = intel_hrawclk(dev) / 2;
  363. }
  364.  
  365. if (IS_GEN6(dev))
  366. precharge = 3;
  367. else
  368. precharge = 5;
  369.  
  370. /* Try to wait for any previous AUX channel activity */
  371. for (try = 0; try < 3; try++) {
  372. status = I915_READ_NOTRACE(ch_ctl);
  373. if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
  374. break;
  375. msleep(1);
  376. }
  377.  
  378. if (try == 3) {
  379. WARN(1, "dp_aux_ch not started status 0x%08x\n",
  380. I915_READ(ch_ctl));
  381. ret = -EBUSY;
  382. goto out;
  383. }
  384.  
  385. /* Must try at least 3 times according to DP spec */
  386. for (try = 0; try < 5; try++) {
  387. /* Load the send data into the aux channel data registers */
  388. for (i = 0; i < send_bytes; i += 4)
  389. I915_WRITE(ch_data + i,
  390. pack_aux(send + i, send_bytes - i));
  391.  
  392. /* Send the command and wait for it to complete */
  393. I915_WRITE(ch_ctl,
  394. DP_AUX_CH_CTL_SEND_BUSY |
  395. (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
  396. DP_AUX_CH_CTL_TIME_OUT_400us |
  397. (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
  398. (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
  399. (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
  400. DP_AUX_CH_CTL_DONE |
  401. DP_AUX_CH_CTL_TIME_OUT_ERROR |
  402. DP_AUX_CH_CTL_RECEIVE_ERROR);
  403.  
  404. status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
  405.  
  406. /* Clear done status and any errors */
  407. I915_WRITE(ch_ctl,
  408. status |
  409. DP_AUX_CH_CTL_DONE |
  410. DP_AUX_CH_CTL_TIME_OUT_ERROR |
  411. DP_AUX_CH_CTL_RECEIVE_ERROR);
  412.  
  413. if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
  414. DP_AUX_CH_CTL_RECEIVE_ERROR))
  415. continue;
  416. if (status & DP_AUX_CH_CTL_DONE)
  417. break;
  418. }
  419.  
  420. if ((status & DP_AUX_CH_CTL_DONE) == 0) {
  421. DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
  422. ret = -EBUSY;
  423. goto out;
  424. }
  425.  
  426. /* Check for timeout or receive error.
  427. * Timeouts occur when the sink is not connected
  428. */
  429. if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
  430. DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
  431. ret = -EIO;
  432. goto out;
  433. }
  434.  
  435. /* Timeouts occur when the device isn't connected, so they're
  436. * "normal" -- don't fill the kernel log with these */
  437. if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
  438. DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
  439. ret = -ETIMEDOUT;
  440. goto out;
  441. }
  442.  
  443. /* Unload any bytes sent back from the other side */
  444. recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
  445. DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
  446. if (recv_bytes > recv_size)
  447. recv_bytes = recv_size;
  448.  
  449. for (i = 0; i < recv_bytes; i += 4)
  450. unpack_aux(I915_READ(ch_data + i),
  451. recv + i, recv_bytes - i);
  452.  
  453. ret = recv_bytes;
  454. out:
  455. pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
  456.  
  457. return ret;
  458. }
  459.  
  460. /* Write data to the aux channel in native mode */
  461. static int
  462. intel_dp_aux_native_write(struct intel_dp *intel_dp,
  463. uint16_t address, uint8_t *send, int send_bytes)
  464. {
  465. int ret;
  466. uint8_t msg[20];
  467. int msg_bytes;
  468. uint8_t ack;
  469.  
  470. intel_dp_check_edp(intel_dp);
  471. if (send_bytes > 16)
  472. return -1;
  473. msg[0] = AUX_NATIVE_WRITE << 4;
  474. msg[1] = address >> 8;
  475. msg[2] = address & 0xff;
  476. msg[3] = send_bytes - 1;
  477. memcpy(&msg[4], send, send_bytes);
  478. msg_bytes = send_bytes + 4;
  479. for (;;) {
  480. ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
  481. if (ret < 0)
  482. return ret;
  483. if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
  484. break;
  485. else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
  486. udelay(100);
  487. else
  488. return -EIO;
  489. }
  490. return send_bytes;
  491. }
  492.  
  493. /* Write a single byte to the aux channel in native mode */
  494. static int
  495. intel_dp_aux_native_write_1(struct intel_dp *intel_dp,
  496. uint16_t address, uint8_t byte)
  497. {
  498. return intel_dp_aux_native_write(intel_dp, address, &byte, 1);
  499. }
  500.  
  501. /* read bytes from a native aux channel */
  502. static int
  503. intel_dp_aux_native_read(struct intel_dp *intel_dp,
  504. uint16_t address, uint8_t *recv, int recv_bytes)
  505. {
  506. uint8_t msg[4];
  507. int msg_bytes;
  508. uint8_t reply[20];
  509. int reply_bytes;
  510. uint8_t ack;
  511. int ret;
  512.  
  513. intel_dp_check_edp(intel_dp);
  514. msg[0] = AUX_NATIVE_READ << 4;
  515. msg[1] = address >> 8;
  516. msg[2] = address & 0xff;
  517. msg[3] = recv_bytes - 1;
  518.  
  519. msg_bytes = 4;
  520. reply_bytes = recv_bytes + 1;
  521.  
  522. for (;;) {
  523. ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
  524. reply, reply_bytes);
  525. if (ret == 0)
  526. return -EPROTO;
  527. if (ret < 0)
  528. return ret;
  529. ack = reply[0];
  530. if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
  531. memcpy(recv, reply + 1, ret - 1);
  532. return ret - 1;
  533. }
  534. else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
  535. udelay(100);
  536. else
  537. return -EIO;
  538. }
  539. }
  540.  
  541. static int
  542. intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
  543. uint8_t write_byte, uint8_t *read_byte)
  544. {
  545. struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
  546. struct intel_dp *intel_dp = container_of(adapter,
  547. struct intel_dp,
  548. adapter);
  549. uint16_t address = algo_data->address;
  550. uint8_t msg[5];
  551. uint8_t reply[2];
  552. unsigned retry;
  553. int msg_bytes;
  554. int reply_bytes;
  555. int ret;
  556.  
  557. intel_dp_check_edp(intel_dp);
  558. /* Set up the command byte */
  559. if (mode & MODE_I2C_READ)
  560. msg[0] = AUX_I2C_READ << 4;
  561. else
  562. msg[0] = AUX_I2C_WRITE << 4;
  563.  
  564. if (!(mode & MODE_I2C_STOP))
  565. msg[0] |= AUX_I2C_MOT << 4;
  566.  
  567. msg[1] = address >> 8;
  568. msg[2] = address;
  569.  
  570. switch (mode) {
  571. case MODE_I2C_WRITE:
  572. msg[3] = 0;
  573. msg[4] = write_byte;
  574. msg_bytes = 5;
  575. reply_bytes = 1;
  576. break;
  577. case MODE_I2C_READ:
  578. msg[3] = 0;
  579. msg_bytes = 4;
  580. reply_bytes = 2;
  581. break;
  582. default:
  583. msg_bytes = 3;
  584. reply_bytes = 1;
  585. break;
  586. }
  587.  
  588. for (retry = 0; retry < 5; retry++) {
  589. ret = intel_dp_aux_ch(intel_dp,
  590. msg, msg_bytes,
  591. reply, reply_bytes);
  592. if (ret < 0) {
  593. DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
  594. return ret;
  595. }
  596.  
  597. switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
  598. case AUX_NATIVE_REPLY_ACK:
  599. /* I2C-over-AUX Reply field is only valid
  600. * when paired with AUX ACK.
  601. */
  602. break;
  603. case AUX_NATIVE_REPLY_NACK:
  604. DRM_DEBUG_KMS("aux_ch native nack\n");
  605. return -EREMOTEIO;
  606. case AUX_NATIVE_REPLY_DEFER:
  607. udelay(100);
  608. continue;
  609. default:
  610. DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
  611. reply[0]);
  612. return -EREMOTEIO;
  613. }
  614.  
  615. switch (reply[0] & AUX_I2C_REPLY_MASK) {
  616. case AUX_I2C_REPLY_ACK:
  617. if (mode == MODE_I2C_READ) {
  618. *read_byte = reply[1];
  619. }
  620. return reply_bytes - 1;
  621. case AUX_I2C_REPLY_NACK:
  622. DRM_DEBUG_KMS("aux_i2c nack\n");
  623. return -EREMOTEIO;
  624. case AUX_I2C_REPLY_DEFER:
  625. DRM_DEBUG_KMS("aux_i2c defer\n");
  626. udelay(100);
  627. break;
  628. default:
  629. DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
  630. return -EREMOTEIO;
  631. }
  632. }
  633.  
  634. DRM_ERROR("too many retries, giving up\n");
  635. return -EREMOTEIO;
  636. }
  637.  
  638. static int
  639. intel_dp_i2c_init(struct intel_dp *intel_dp,
  640. struct intel_connector *intel_connector, const char *name)
  641. {
  642. int ret;
  643.  
  644. DRM_DEBUG_KMS("i2c_init %s\n", name);
  645. intel_dp->algo.running = false;
  646. intel_dp->algo.address = 0;
  647. intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch;
  648.  
  649. memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter));
  650. intel_dp->adapter.owner = THIS_MODULE;
  651. intel_dp->adapter.class = I2C_CLASS_DDC;
  652. strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
  653. intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
  654. intel_dp->adapter.algo_data = &intel_dp->algo;
  655. intel_dp->adapter.dev.parent = &intel_connector->base.kdev;
  656.  
  657. ironlake_edp_panel_vdd_on(intel_dp);
  658. ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
  659. ironlake_edp_panel_vdd_off(intel_dp, false);
  660. return ret;
  661. }
  662.  
  663. bool
  664. intel_dp_compute_config(struct intel_encoder *encoder,
  665. struct intel_crtc_config *pipe_config)
  666. {
  667. struct drm_device *dev = encoder->base.dev;
  668. struct drm_i915_private *dev_priv = dev->dev_private;
  669. struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
  670. struct drm_display_mode *mode = &pipe_config->requested_mode;
  671. struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  672. struct intel_connector *intel_connector = intel_dp->attached_connector;
  673. int lane_count, clock;
  674. int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
  675. int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
  676. int bpp, mode_rate;
  677. static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
  678. int target_clock, link_avail, link_clock;
  679.  
  680. if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && !is_cpu_edp(intel_dp))
  681. pipe_config->has_pch_encoder = true;
  682.  
  683. pipe_config->has_dp_encoder = true;
  684.  
  685. if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
  686. intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
  687. adjusted_mode);
  688. intel_pch_panel_fitting(dev,
  689. intel_connector->panel.fitting_mode,
  690. mode, adjusted_mode);
  691. }
  692. /* We need to take the panel's fixed mode into account. */
  693. target_clock = adjusted_mode->clock;
  694.  
  695. if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
  696. return false;
  697.  
  698. DRM_DEBUG_KMS("DP link computation with max lane count %i "
  699. "max bw %02x pixel clock %iKHz\n",
  700. max_lane_count, bws[max_clock], adjusted_mode->clock);
  701.  
  702. /* Walk through all bpp values. Luckily they're all nicely spaced with 2
  703. * bpc in between. */
  704. bpp = min_t(int, 8*3, pipe_config->pipe_bpp);
  705. if (is_edp(intel_dp) && dev_priv->edp.bpp)
  706. bpp = min_t(int, bpp, dev_priv->edp.bpp);
  707.  
  708. for (; bpp >= 6*3; bpp -= 2*3) {
  709. mode_rate = intel_dp_link_required(target_clock, bpp);
  710.  
  711. for (clock = 0; clock <= max_clock; clock++) {
  712. for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
  713. link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
  714. link_avail = intel_dp_max_data_rate(link_clock,
  715. lane_count);
  716.  
  717. if (mode_rate <= link_avail) {
  718. goto found;
  719. }
  720. }
  721. }
  722. }
  723.  
  724. return false;
  725.  
  726. found:
  727. if (intel_dp->color_range_auto) {
  728. /*
  729. * See:
  730. * CEA-861-E - 5.1 Default Encoding Parameters
  731. * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
  732. */
  733. if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
  734. intel_dp->color_range = DP_COLOR_RANGE_16_235;
  735. else
  736. intel_dp->color_range = 0;
  737. }
  738.  
  739. if (intel_dp->color_range)
  740. pipe_config->limited_color_range = true;
  741.  
  742. intel_dp->link_bw = bws[clock];
  743. intel_dp->lane_count = lane_count;
  744. adjusted_mode->clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
  745. pipe_config->pipe_bpp = bpp;
  746. pipe_config->pixel_target_clock = target_clock;
  747.  
  748. DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
  749. intel_dp->link_bw, intel_dp->lane_count,
  750. adjusted_mode->clock, bpp);
  751. DRM_DEBUG_KMS("DP link bw required %i available %i\n",
  752. mode_rate, link_avail);
  753.  
  754. intel_link_compute_m_n(bpp, lane_count,
  755. target_clock, adjusted_mode->clock,
  756. &pipe_config->dp_m_n);
  757.  
  758. return true;
  759. }
  760.  
  761. void intel_dp_init_link_config(struct intel_dp *intel_dp)
  762. {
  763. memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
  764. intel_dp->link_configuration[0] = intel_dp->link_bw;
  765. intel_dp->link_configuration[1] = intel_dp->lane_count;
  766. intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
  767. /*
  768. * Check for DPCD version > 1.1 and enhanced framing support
  769. */
  770. if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
  771. (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
  772. intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
  773. }
  774. }
  775.  
  776. static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
  777. {
  778. struct drm_device *dev = crtc->dev;
  779. struct drm_i915_private *dev_priv = dev->dev_private;
  780. u32 dpa_ctl;
  781.  
  782. DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
  783. dpa_ctl = I915_READ(DP_A);
  784. dpa_ctl &= ~DP_PLL_FREQ_MASK;
  785.  
  786. if (clock < 200000) {
  787. /* For a long time we've carried around a ILK-DevA w/a for the
  788. * 160MHz clock. If we're really unlucky, it's still required.
  789. */
  790. DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
  791. dpa_ctl |= DP_PLL_FREQ_160MHZ;
  792. } else {
  793. dpa_ctl |= DP_PLL_FREQ_270MHZ;
  794. }
  795.  
  796. I915_WRITE(DP_A, dpa_ctl);
  797.  
  798. POSTING_READ(DP_A);
  799. udelay(500);
  800. }
  801.  
  802. static void
  803. intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
  804. struct drm_display_mode *adjusted_mode)
  805. {
  806. struct drm_device *dev = encoder->dev;
  807. struct drm_i915_private *dev_priv = dev->dev_private;
  808. struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
  809. struct drm_crtc *crtc = encoder->crtc;
  810. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  811.  
  812. /*
  813. * There are four kinds of DP registers:
  814. *
  815. * IBX PCH
  816. * SNB CPU
  817. * IVB CPU
  818. * CPT PCH
  819. *
  820. * IBX PCH and CPU are the same for almost everything,
  821. * except that the CPU DP PLL is configured in this
  822. * register
  823. *
  824. * CPT PCH is quite different, having many bits moved
  825. * to the TRANS_DP_CTL register instead. That
  826. * configuration happens (oddly) in ironlake_pch_enable
  827. */
  828.  
  829. /* Preserve the BIOS-computed detected bit. This is
  830. * supposed to be read-only.
  831. */
  832. intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
  833.  
  834. /* Handle DP bits in common between all three register formats */
  835. intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
  836.  
  837. switch (intel_dp->lane_count) {
  838. case 1:
  839. intel_dp->DP |= DP_PORT_WIDTH_1;
  840. break;
  841. case 2:
  842. intel_dp->DP |= DP_PORT_WIDTH_2;
  843. break;
  844. case 4:
  845. intel_dp->DP |= DP_PORT_WIDTH_4;
  846. break;
  847. }
  848. if (intel_dp->has_audio) {
  849. DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
  850. pipe_name(intel_crtc->pipe));
  851. intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
  852. intel_write_eld(encoder, adjusted_mode);
  853. }
  854.  
  855. intel_dp_init_link_config(intel_dp);
  856.  
  857. /* Split out the IBX/CPU vs CPT settings */
  858.  
  859. if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
  860. if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
  861. intel_dp->DP |= DP_SYNC_HS_HIGH;
  862. if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
  863. intel_dp->DP |= DP_SYNC_VS_HIGH;
  864. intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
  865.  
  866. if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
  867. intel_dp->DP |= DP_ENHANCED_FRAMING;
  868.  
  869. intel_dp->DP |= intel_crtc->pipe << 29;
  870.  
  871. /* don't miss out required setting for eDP */
  872. if (adjusted_mode->clock < 200000)
  873. intel_dp->DP |= DP_PLL_FREQ_160MHZ;
  874. else
  875. intel_dp->DP |= DP_PLL_FREQ_270MHZ;
  876. } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
  877. if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
  878. intel_dp->DP |= intel_dp->color_range;
  879.  
  880. if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
  881. intel_dp->DP |= DP_SYNC_HS_HIGH;
  882. if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
  883. intel_dp->DP |= DP_SYNC_VS_HIGH;
  884. intel_dp->DP |= DP_LINK_TRAIN_OFF;
  885.  
  886. if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
  887. intel_dp->DP |= DP_ENHANCED_FRAMING;
  888.  
  889. if (intel_crtc->pipe == 1)
  890. intel_dp->DP |= DP_PIPEB_SELECT;
  891.  
  892. if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
  893. /* don't miss out required setting for eDP */
  894. if (adjusted_mode->clock < 200000)
  895. intel_dp->DP |= DP_PLL_FREQ_160MHZ;
  896. else
  897. intel_dp->DP |= DP_PLL_FREQ_270MHZ;
  898. }
  899. } else {
  900. intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
  901. }
  902.  
  903. if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev))
  904. ironlake_set_pll_edp(crtc, adjusted_mode->clock);
  905. }
  906.  
  907. #define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
  908. #define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
  909.  
  910. #define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
  911. #define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
  912.  
  913. #define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
  914. #define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
  915.  
  916. static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
  917. u32 mask,
  918. u32 value)
  919. {
  920. struct drm_device *dev = intel_dp_to_dev(intel_dp);
  921. struct drm_i915_private *dev_priv = dev->dev_private;
  922. u32 pp_stat_reg, pp_ctrl_reg;
  923.  
  924. pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
  925. pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
  926.  
  927. DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
  928. mask, value,
  929. I915_READ(pp_stat_reg),
  930. I915_READ(pp_ctrl_reg));
  931.  
  932. if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
  933. DRM_ERROR("Panel status timeout: status %08x control %08x\n",
  934. I915_READ(pp_stat_reg),
  935. I915_READ(pp_ctrl_reg));
  936. }
  937. }
  938.  
  939. static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
  940. {
  941. DRM_DEBUG_KMS("Wait for panel power on\n");
  942. ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
  943. }
  944.  
  945. static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
  946. {
  947. DRM_DEBUG_KMS("Wait for panel power off time\n");
  948. ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
  949. }
  950.  
  951. static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
  952. {
  953. DRM_DEBUG_KMS("Wait for panel power cycle\n");
  954. ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
  955. }
  956.  
  957.  
  958. /* Read the current pp_control value, unlocking the register if it
  959. * is locked
  960. */
  961.  
  962. static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
  963. {
  964. struct drm_device *dev = intel_dp_to_dev(intel_dp);
  965. struct drm_i915_private *dev_priv = dev->dev_private;
  966. u32 control;
  967. u32 pp_ctrl_reg;
  968.  
  969. pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
  970. control = I915_READ(pp_ctrl_reg);
  971.  
  972. control &= ~PANEL_UNLOCK_MASK;
  973. control |= PANEL_UNLOCK_REGS;
  974. return control;
  975. }
  976.  
  977. void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
  978. {
  979. struct drm_device *dev = intel_dp_to_dev(intel_dp);
  980. struct drm_i915_private *dev_priv = dev->dev_private;
  981. u32 pp;
  982. u32 pp_stat_reg, pp_ctrl_reg;
  983.  
  984. if (!is_edp(intel_dp))
  985. return;
  986. DRM_DEBUG_KMS("Turn eDP VDD on\n");
  987.  
  988. WARN(intel_dp->want_panel_vdd,
  989. "eDP VDD already requested on\n");
  990.  
  991. intel_dp->want_panel_vdd = true;
  992.  
  993. if (ironlake_edp_have_panel_vdd(intel_dp)) {
  994. DRM_DEBUG_KMS("eDP VDD already on\n");
  995. return;
  996. }
  997.  
  998. if (!ironlake_edp_have_panel_power(intel_dp))
  999. ironlake_wait_panel_power_cycle(intel_dp);
  1000.  
  1001. pp = ironlake_get_pp_control(intel_dp);
  1002. pp |= EDP_FORCE_VDD;
  1003.  
  1004. pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
  1005. pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
  1006.  
  1007. I915_WRITE(pp_ctrl_reg, pp);
  1008. POSTING_READ(pp_ctrl_reg);
  1009. DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
  1010. I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
  1011. /*
  1012. * If the panel wasn't on, delay before accessing aux channel
  1013. */
  1014. if (!ironlake_edp_have_panel_power(intel_dp)) {
  1015. DRM_DEBUG_KMS("eDP was not running\n");
  1016. msleep(intel_dp->panel_power_up_delay);
  1017. }
  1018. }
  1019.  
  1020. static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
  1021. {
  1022. struct drm_device *dev = intel_dp_to_dev(intel_dp);
  1023. struct drm_i915_private *dev_priv = dev->dev_private;
  1024. u32 pp;
  1025. u32 pp_stat_reg, pp_ctrl_reg;
  1026.  
  1027. WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
  1028.  
  1029. if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
  1030. pp = ironlake_get_pp_control(intel_dp);
  1031. pp &= ~EDP_FORCE_VDD;
  1032.  
  1033. pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
  1034. pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
  1035.  
  1036. I915_WRITE(pp_ctrl_reg, pp);
  1037. POSTING_READ(pp_ctrl_reg);
  1038.  
  1039. /* Make sure sequencer is idle before allowing subsequent activity */
  1040. DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
  1041. I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
  1042. msleep(intel_dp->panel_power_down_delay);
  1043. }
  1044. }
  1045.  
  1046. static void ironlake_panel_vdd_work(struct work_struct *__work)
  1047. {
  1048. struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
  1049. struct intel_dp, panel_vdd_work);
  1050. struct drm_device *dev = intel_dp_to_dev(intel_dp);
  1051.  
  1052. mutex_lock(&dev->mode_config.mutex);
  1053. ironlake_panel_vdd_off_sync(intel_dp);
  1054. mutex_unlock(&dev->mode_config.mutex);
  1055. }
  1056.  
  1057. void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
  1058. {
  1059. if (!is_edp(intel_dp))
  1060. return;
  1061.  
  1062. DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
  1063. WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
  1064.  
  1065. intel_dp->want_panel_vdd = false;
  1066.  
  1067. if (sync) {
  1068. ironlake_panel_vdd_off_sync(intel_dp);
  1069. } else {
  1070. /*
  1071. * Queue the timer to fire a long
  1072. * time from now (relative to the power down delay)
  1073. * to keep the panel power up across a sequence of operations
  1074. */
  1075. schedule_delayed_work(&intel_dp->panel_vdd_work,
  1076. msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
  1077. }
  1078. }
  1079.  
  1080. void ironlake_edp_panel_on(struct intel_dp *intel_dp)
  1081. {
  1082. struct drm_device *dev = intel_dp_to_dev(intel_dp);
  1083. struct drm_i915_private *dev_priv = dev->dev_private;
  1084. u32 pp;
  1085. u32 pp_ctrl_reg;
  1086.  
  1087. if (!is_edp(intel_dp))
  1088. return;
  1089.  
  1090. DRM_DEBUG_KMS("Turn eDP power on\n");
  1091.  
  1092. if (ironlake_edp_have_panel_power(intel_dp)) {
  1093. DRM_DEBUG_KMS("eDP power already on\n");
  1094. return;
  1095. }
  1096.  
  1097. ironlake_wait_panel_power_cycle(intel_dp);
  1098.  
  1099. pp = ironlake_get_pp_control(intel_dp);
  1100. if (IS_GEN5(dev)) {
  1101. /* ILK workaround: disable reset around power sequence */
  1102. pp &= ~PANEL_POWER_RESET;
  1103. I915_WRITE(PCH_PP_CONTROL, pp);
  1104. POSTING_READ(PCH_PP_CONTROL);
  1105. }
  1106.  
  1107. pp |= POWER_TARGET_ON;
  1108. if (!IS_GEN5(dev))
  1109. pp |= PANEL_POWER_RESET;
  1110.  
  1111. pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
  1112.  
  1113. I915_WRITE(pp_ctrl_reg, pp);
  1114. POSTING_READ(pp_ctrl_reg);
  1115.  
  1116. ironlake_wait_panel_on(intel_dp);
  1117.  
  1118. if (IS_GEN5(dev)) {
  1119. pp |= PANEL_POWER_RESET; /* restore panel reset bit */
  1120. I915_WRITE(PCH_PP_CONTROL, pp);
  1121. POSTING_READ(PCH_PP_CONTROL);
  1122. }
  1123. }
  1124.  
  1125. void ironlake_edp_panel_off(struct intel_dp *intel_dp)
  1126. {
  1127. struct drm_device *dev = intel_dp_to_dev(intel_dp);
  1128. struct drm_i915_private *dev_priv = dev->dev_private;
  1129. u32 pp;
  1130. u32 pp_ctrl_reg;
  1131.  
  1132. if (!is_edp(intel_dp))
  1133. return;
  1134.  
  1135. DRM_DEBUG_KMS("Turn eDP power off\n");
  1136.  
  1137. WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
  1138.  
  1139. pp = ironlake_get_pp_control(intel_dp);
  1140. /* We need to switch off panel power _and_ force vdd, for otherwise some
  1141. * panels get very unhappy and cease to work. */
  1142. pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
  1143.  
  1144. pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
  1145.  
  1146. I915_WRITE(pp_ctrl_reg, pp);
  1147. POSTING_READ(pp_ctrl_reg);
  1148.  
  1149. intel_dp->want_panel_vdd = false;
  1150.  
  1151. ironlake_wait_panel_off(intel_dp);
  1152. }
  1153.  
  1154. void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
  1155. {
  1156. struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  1157. struct drm_device *dev = intel_dig_port->base.base.dev;
  1158. struct drm_i915_private *dev_priv = dev->dev_private;
  1159. int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe;
  1160. u32 pp;
  1161. u32 pp_ctrl_reg;
  1162.  
  1163. if (!is_edp(intel_dp))
  1164. return;
  1165.  
  1166. DRM_DEBUG_KMS("\n");
  1167. /*
  1168. * If we enable the backlight right away following a panel power
  1169. * on, we may see slight flicker as the panel syncs with the eDP
  1170. * link. So delay a bit to make sure the image is solid before
  1171. * allowing it to appear.
  1172. */
  1173. msleep(intel_dp->backlight_on_delay);
  1174. pp = ironlake_get_pp_control(intel_dp);
  1175. pp |= EDP_BLC_ENABLE;
  1176.  
  1177. pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
  1178.  
  1179. I915_WRITE(pp_ctrl_reg, pp);
  1180. POSTING_READ(pp_ctrl_reg);
  1181.  
  1182. intel_panel_enable_backlight(dev, pipe);
  1183. }
  1184.  
  1185. void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
  1186. {
  1187. struct drm_device *dev = intel_dp_to_dev(intel_dp);
  1188. struct drm_i915_private *dev_priv = dev->dev_private;
  1189. u32 pp;
  1190. u32 pp_ctrl_reg;
  1191.  
  1192. if (!is_edp(intel_dp))
  1193. return;
  1194.  
  1195. intel_panel_disable_backlight(dev);
  1196.  
  1197. DRM_DEBUG_KMS("\n");
  1198. pp = ironlake_get_pp_control(intel_dp);
  1199. pp &= ~EDP_BLC_ENABLE;
  1200.  
  1201. pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
  1202.  
  1203. I915_WRITE(pp_ctrl_reg, pp);
  1204. POSTING_READ(pp_ctrl_reg);
  1205. msleep(intel_dp->backlight_off_delay);
  1206. }
  1207.  
  1208. static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
  1209. {
  1210. struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  1211. struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
  1212. struct drm_device *dev = crtc->dev;
  1213. struct drm_i915_private *dev_priv = dev->dev_private;
  1214. u32 dpa_ctl;
  1215.  
  1216. assert_pipe_disabled(dev_priv,
  1217. to_intel_crtc(crtc)->pipe);
  1218.  
  1219. DRM_DEBUG_KMS("\n");
  1220. dpa_ctl = I915_READ(DP_A);
  1221. WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
  1222. WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
  1223.  
  1224. /* We don't adjust intel_dp->DP while tearing down the link, to
  1225. * facilitate link retraining (e.g. after hotplug). Hence clear all
  1226. * enable bits here to ensure that we don't enable too much. */
  1227. intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
  1228. intel_dp->DP |= DP_PLL_ENABLE;
  1229. I915_WRITE(DP_A, intel_dp->DP);
  1230. POSTING_READ(DP_A);
  1231. udelay(200);
  1232. }
  1233.  
  1234. static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
  1235. {
  1236. struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  1237. struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
  1238. struct drm_device *dev = crtc->dev;
  1239. struct drm_i915_private *dev_priv = dev->dev_private;
  1240. u32 dpa_ctl;
  1241.  
  1242. assert_pipe_disabled(dev_priv,
  1243. to_intel_crtc(crtc)->pipe);
  1244.  
  1245. dpa_ctl = I915_READ(DP_A);
  1246. WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
  1247. "dp pll off, should be on\n");
  1248. WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
  1249.  
  1250. /* We can't rely on the value tracked for the DP register in
  1251. * intel_dp->DP because link_down must not change that (otherwise link
  1252. * re-training will fail. */
  1253. dpa_ctl &= ~DP_PLL_ENABLE;
  1254. I915_WRITE(DP_A, dpa_ctl);
  1255. POSTING_READ(DP_A);
  1256. udelay(200);
  1257. }
  1258.  
  1259. /* If the sink supports it, try to set the power state appropriately */
  1260. void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
  1261. {
  1262. int ret, i;
  1263.  
  1264. /* Should have a valid DPCD by this point */
  1265. if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
  1266. return;
  1267.  
  1268. if (mode != DRM_MODE_DPMS_ON) {
  1269. ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER,
  1270. DP_SET_POWER_D3);
  1271. if (ret != 1)
  1272. DRM_DEBUG_DRIVER("failed to write sink power state\n");
  1273. } else {
  1274. /*
  1275. * When turning on, we need to retry for 1ms to give the sink
  1276. * time to wake up.
  1277. */
  1278. for (i = 0; i < 3; i++) {
  1279. ret = intel_dp_aux_native_write_1(intel_dp,
  1280. DP_SET_POWER,
  1281. DP_SET_POWER_D0);
  1282. if (ret == 1)
  1283. break;
  1284. msleep(1);
  1285. }
  1286. }
  1287. }
  1288.  
  1289. static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
  1290. enum pipe *pipe)
  1291. {
  1292. struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  1293. struct drm_device *dev = encoder->base.dev;
  1294. struct drm_i915_private *dev_priv = dev->dev_private;
  1295. u32 tmp = I915_READ(intel_dp->output_reg);
  1296.  
  1297. if (!(tmp & DP_PORT_EN))
  1298. return false;
  1299.  
  1300. if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
  1301. *pipe = PORT_TO_PIPE_CPT(tmp);
  1302. } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
  1303. *pipe = PORT_TO_PIPE(tmp);
  1304. } else {
  1305. u32 trans_sel;
  1306. u32 trans_dp;
  1307. int i;
  1308.  
  1309. switch (intel_dp->output_reg) {
  1310. case PCH_DP_B:
  1311. trans_sel = TRANS_DP_PORT_SEL_B;
  1312. break;
  1313. case PCH_DP_C:
  1314. trans_sel = TRANS_DP_PORT_SEL_C;
  1315. break;
  1316. case PCH_DP_D:
  1317. trans_sel = TRANS_DP_PORT_SEL_D;
  1318. break;
  1319. default:
  1320. return true;
  1321. }
  1322.  
  1323. for_each_pipe(i) {
  1324. trans_dp = I915_READ(TRANS_DP_CTL(i));
  1325. if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
  1326. *pipe = i;
  1327. return true;
  1328. }
  1329. }
  1330.  
  1331. DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
  1332. intel_dp->output_reg);
  1333. }
  1334.  
  1335. return true;
  1336. }
  1337.  
  1338. static void intel_disable_dp(struct intel_encoder *encoder)
  1339. {
  1340. struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  1341.  
  1342. /* Make sure the panel is off before trying to change the mode. But also
  1343. * ensure that we have vdd while we switch off the panel. */
  1344. ironlake_edp_panel_vdd_on(intel_dp);
  1345. ironlake_edp_backlight_off(intel_dp);
  1346. intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
  1347. ironlake_edp_panel_off(intel_dp);
  1348.  
  1349. /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
  1350. if (!is_cpu_edp(intel_dp))
  1351. intel_dp_link_down(intel_dp);
  1352. }
  1353.  
  1354. static void intel_post_disable_dp(struct intel_encoder *encoder)
  1355. {
  1356. struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  1357. struct drm_device *dev = encoder->base.dev;
  1358.  
  1359. if (is_cpu_edp(intel_dp)) {
  1360. intel_dp_link_down(intel_dp);
  1361. if (!IS_VALLEYVIEW(dev))
  1362. ironlake_edp_pll_off(intel_dp);
  1363. }
  1364. }
  1365.  
  1366. static void intel_enable_dp(struct intel_encoder *encoder)
  1367. {
  1368. struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  1369. struct drm_device *dev = encoder->base.dev;
  1370. struct drm_i915_private *dev_priv = dev->dev_private;
  1371. uint32_t dp_reg = I915_READ(intel_dp->output_reg);
  1372.  
  1373. if (WARN_ON(dp_reg & DP_PORT_EN))
  1374. return;
  1375.  
  1376. ironlake_edp_panel_vdd_on(intel_dp);
  1377. intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
  1378. intel_dp_start_link_train(intel_dp);
  1379. ironlake_edp_panel_on(intel_dp);
  1380. ironlake_edp_panel_vdd_off(intel_dp, true);
  1381. intel_dp_complete_link_train(intel_dp);
  1382. intel_dp_stop_link_train(intel_dp);
  1383. ironlake_edp_backlight_on(intel_dp);
  1384. }
  1385.  
  1386. static void intel_pre_enable_dp(struct intel_encoder *encoder)
  1387. {
  1388. struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  1389. struct drm_device *dev = encoder->base.dev;
  1390.  
  1391. if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev))
  1392. ironlake_edp_pll_on(intel_dp);
  1393. }
  1394.  
  1395. /*
  1396. * Native read with retry for link status and receiver capability reads for
  1397. * cases where the sink may still be asleep.
  1398. */
  1399. static bool
  1400. intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
  1401. uint8_t *recv, int recv_bytes)
  1402. {
  1403. int ret, i;
  1404.  
  1405. /*
  1406. * Sinks are *supposed* to come up within 1ms from an off state,
  1407. * but we're also supposed to retry 3 times per the spec.
  1408. */
  1409. for (i = 0; i < 3; i++) {
  1410. ret = intel_dp_aux_native_read(intel_dp, address, recv,
  1411. recv_bytes);
  1412. if (ret == recv_bytes)
  1413. return true;
  1414. msleep(1);
  1415. }
  1416.  
  1417. return false;
  1418. }
  1419.  
  1420. /*
  1421. * Fetch AUX CH registers 0x202 - 0x207 which contain
  1422. * link status information
  1423. */
  1424. static bool
  1425. intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
  1426. {
  1427. return intel_dp_aux_native_read_retry(intel_dp,
  1428. DP_LANE0_1_STATUS,
  1429. link_status,
  1430. DP_LINK_STATUS_SIZE);
  1431. }
  1432.  
  1433. #if 0
  1434. static char *voltage_names[] = {
  1435. "0.4V", "0.6V", "0.8V", "1.2V"
  1436. };
  1437. static char *pre_emph_names[] = {
  1438. "0dB", "3.5dB", "6dB", "9.5dB"
  1439. };
  1440. static char *link_train_names[] = {
  1441. "pattern 1", "pattern 2", "idle", "off"
  1442. };
  1443. #endif
  1444.  
  1445. /*
  1446. * These are source-specific values; current Intel hardware supports
  1447. * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
  1448. */
  1449.  
  1450. static uint8_t
  1451. intel_dp_voltage_max(struct intel_dp *intel_dp)
  1452. {
  1453. struct drm_device *dev = intel_dp_to_dev(intel_dp);
  1454.  
  1455. if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
  1456. return DP_TRAIN_VOLTAGE_SWING_800;
  1457. else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
  1458. return DP_TRAIN_VOLTAGE_SWING_1200;
  1459. else
  1460. return DP_TRAIN_VOLTAGE_SWING_800;
  1461. }
  1462.  
  1463. static uint8_t
  1464. intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
  1465. {
  1466. struct drm_device *dev = intel_dp_to_dev(intel_dp);
  1467.  
  1468. if (HAS_DDI(dev)) {
  1469. switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
  1470. case DP_TRAIN_VOLTAGE_SWING_400:
  1471. return DP_TRAIN_PRE_EMPHASIS_9_5;
  1472. case DP_TRAIN_VOLTAGE_SWING_600:
  1473. return DP_TRAIN_PRE_EMPHASIS_6;
  1474. case DP_TRAIN_VOLTAGE_SWING_800:
  1475. return DP_TRAIN_PRE_EMPHASIS_3_5;
  1476. case DP_TRAIN_VOLTAGE_SWING_1200:
  1477. default:
  1478. return DP_TRAIN_PRE_EMPHASIS_0;
  1479. }
  1480. } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
  1481. switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
  1482. case DP_TRAIN_VOLTAGE_SWING_400:
  1483. return DP_TRAIN_PRE_EMPHASIS_6;
  1484. case DP_TRAIN_VOLTAGE_SWING_600:
  1485. case DP_TRAIN_VOLTAGE_SWING_800:
  1486. return DP_TRAIN_PRE_EMPHASIS_3_5;
  1487. default:
  1488. return DP_TRAIN_PRE_EMPHASIS_0;
  1489. }
  1490. } else {
  1491. switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
  1492. case DP_TRAIN_VOLTAGE_SWING_400:
  1493. return DP_TRAIN_PRE_EMPHASIS_6;
  1494. case DP_TRAIN_VOLTAGE_SWING_600:
  1495. return DP_TRAIN_PRE_EMPHASIS_6;
  1496. case DP_TRAIN_VOLTAGE_SWING_800:
  1497. return DP_TRAIN_PRE_EMPHASIS_3_5;
  1498. case DP_TRAIN_VOLTAGE_SWING_1200:
  1499. default:
  1500. return DP_TRAIN_PRE_EMPHASIS_0;
  1501. }
  1502. }
  1503. }
  1504.  
  1505. static void
  1506. intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
  1507. {
  1508. uint8_t v = 0;
  1509. uint8_t p = 0;
  1510. int lane;
  1511. uint8_t voltage_max;
  1512. uint8_t preemph_max;
  1513.  
  1514. for (lane = 0; lane < intel_dp->lane_count; lane++) {
  1515. uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
  1516. uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
  1517.  
  1518. if (this_v > v)
  1519. v = this_v;
  1520. if (this_p > p)
  1521. p = this_p;
  1522. }
  1523.  
  1524. voltage_max = intel_dp_voltage_max(intel_dp);
  1525. if (v >= voltage_max)
  1526. v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
  1527.  
  1528. preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
  1529. if (p >= preemph_max)
  1530. p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
  1531.  
  1532. for (lane = 0; lane < 4; lane++)
  1533. intel_dp->train_set[lane] = v | p;
  1534. }
  1535.  
  1536. static uint32_t
  1537. intel_gen4_signal_levels(uint8_t train_set)
  1538. {
  1539. uint32_t signal_levels = 0;
  1540.  
  1541. switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
  1542. case DP_TRAIN_VOLTAGE_SWING_400:
  1543. default:
  1544. signal_levels |= DP_VOLTAGE_0_4;
  1545. break;
  1546. case DP_TRAIN_VOLTAGE_SWING_600:
  1547. signal_levels |= DP_VOLTAGE_0_6;
  1548. break;
  1549. case DP_TRAIN_VOLTAGE_SWING_800:
  1550. signal_levels |= DP_VOLTAGE_0_8;
  1551. break;
  1552. case DP_TRAIN_VOLTAGE_SWING_1200:
  1553. signal_levels |= DP_VOLTAGE_1_2;
  1554. break;
  1555. }
  1556. switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
  1557. case DP_TRAIN_PRE_EMPHASIS_0:
  1558. default:
  1559. signal_levels |= DP_PRE_EMPHASIS_0;
  1560. break;
  1561. case DP_TRAIN_PRE_EMPHASIS_3_5:
  1562. signal_levels |= DP_PRE_EMPHASIS_3_5;
  1563. break;
  1564. case DP_TRAIN_PRE_EMPHASIS_6:
  1565. signal_levels |= DP_PRE_EMPHASIS_6;
  1566. break;
  1567. case DP_TRAIN_PRE_EMPHASIS_9_5:
  1568. signal_levels |= DP_PRE_EMPHASIS_9_5;
  1569. break;
  1570. }
  1571. return signal_levels;
  1572. }
  1573.  
  1574. /* Gen6's DP voltage swing and pre-emphasis control */
  1575. static uint32_t
  1576. intel_gen6_edp_signal_levels(uint8_t train_set)
  1577. {
  1578. int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
  1579. DP_TRAIN_PRE_EMPHASIS_MASK);
  1580. switch (signal_levels) {
  1581. case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
  1582. case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
  1583. return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
  1584. case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
  1585. return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
  1586. case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
  1587. case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
  1588. return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
  1589. case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
  1590. case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
  1591. return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
  1592. case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
  1593. case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
  1594. return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
  1595. default:
  1596. DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
  1597. "0x%x\n", signal_levels);
  1598. return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
  1599. }
  1600. }
  1601.  
  1602. /* Gen7's DP voltage swing and pre-emphasis control */
  1603. static uint32_t
  1604. intel_gen7_edp_signal_levels(uint8_t train_set)
  1605. {
  1606. int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
  1607. DP_TRAIN_PRE_EMPHASIS_MASK);
  1608. switch (signal_levels) {
  1609. case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
  1610. return EDP_LINK_TRAIN_400MV_0DB_IVB;
  1611. case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
  1612. return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
  1613. case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
  1614. return EDP_LINK_TRAIN_400MV_6DB_IVB;
  1615.  
  1616. case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
  1617. return EDP_LINK_TRAIN_600MV_0DB_IVB;
  1618. case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
  1619. return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
  1620.  
  1621. case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
  1622. return EDP_LINK_TRAIN_800MV_0DB_IVB;
  1623. case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
  1624. return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
  1625.  
  1626. default:
  1627. DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
  1628. "0x%x\n", signal_levels);
  1629. return EDP_LINK_TRAIN_500MV_0DB_IVB;
  1630. }
  1631. }
  1632.  
  1633. /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
  1634. static uint32_t
  1635. intel_hsw_signal_levels(uint8_t train_set)
  1636. {
  1637. int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
  1638. DP_TRAIN_PRE_EMPHASIS_MASK);
  1639. switch (signal_levels) {
  1640. case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
  1641. return DDI_BUF_EMP_400MV_0DB_HSW;
  1642. case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
  1643. return DDI_BUF_EMP_400MV_3_5DB_HSW;
  1644. case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
  1645. return DDI_BUF_EMP_400MV_6DB_HSW;
  1646. case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
  1647. return DDI_BUF_EMP_400MV_9_5DB_HSW;
  1648.  
  1649. case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
  1650. return DDI_BUF_EMP_600MV_0DB_HSW;
  1651. case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
  1652. return DDI_BUF_EMP_600MV_3_5DB_HSW;
  1653. case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
  1654. return DDI_BUF_EMP_600MV_6DB_HSW;
  1655.  
  1656. case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
  1657. return DDI_BUF_EMP_800MV_0DB_HSW;
  1658. case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
  1659. return DDI_BUF_EMP_800MV_3_5DB_HSW;
  1660. default:
  1661. DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
  1662. "0x%x\n", signal_levels);
  1663. return DDI_BUF_EMP_400MV_0DB_HSW;
  1664. }
  1665. }
  1666.  
  1667. /* Properly updates "DP" with the correct signal levels. */
  1668. static void
  1669. intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
  1670. {
  1671. struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  1672. struct drm_device *dev = intel_dig_port->base.base.dev;
  1673. uint32_t signal_levels, mask;
  1674. uint8_t train_set = intel_dp->train_set[0];
  1675.  
  1676. if (HAS_DDI(dev)) {
  1677. signal_levels = intel_hsw_signal_levels(train_set);
  1678. mask = DDI_BUF_EMP_MASK;
  1679. } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
  1680. signal_levels = intel_gen7_edp_signal_levels(train_set);
  1681. mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
  1682. } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
  1683. signal_levels = intel_gen6_edp_signal_levels(train_set);
  1684. mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
  1685. } else {
  1686. signal_levels = intel_gen4_signal_levels(train_set);
  1687. mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
  1688. }
  1689.  
  1690. DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
  1691.  
  1692. *DP = (*DP & ~mask) | signal_levels;
  1693. }
  1694.  
  1695. static bool
  1696. intel_dp_set_link_train(struct intel_dp *intel_dp,
  1697. uint32_t dp_reg_value,
  1698. uint8_t dp_train_pat)
  1699. {
  1700. struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  1701. struct drm_device *dev = intel_dig_port->base.base.dev;
  1702. struct drm_i915_private *dev_priv = dev->dev_private;
  1703. enum port port = intel_dig_port->port;
  1704. int ret;
  1705.  
  1706. if (HAS_DDI(dev)) {
  1707. uint32_t temp = I915_READ(DP_TP_CTL(port));
  1708.  
  1709. if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
  1710. temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
  1711. else
  1712. temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
  1713.  
  1714. temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
  1715. switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
  1716. case DP_TRAINING_PATTERN_DISABLE:
  1717. temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
  1718.  
  1719. break;
  1720. case DP_TRAINING_PATTERN_1:
  1721. temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
  1722. break;
  1723. case DP_TRAINING_PATTERN_2:
  1724. temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
  1725. break;
  1726. case DP_TRAINING_PATTERN_3:
  1727. temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
  1728. break;
  1729. }
  1730. I915_WRITE(DP_TP_CTL(port), temp);
  1731.  
  1732. } else if (HAS_PCH_CPT(dev) &&
  1733. (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
  1734. dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
  1735.  
  1736. switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
  1737. case DP_TRAINING_PATTERN_DISABLE:
  1738. dp_reg_value |= DP_LINK_TRAIN_OFF_CPT;
  1739. break;
  1740. case DP_TRAINING_PATTERN_1:
  1741. dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT;
  1742. break;
  1743. case DP_TRAINING_PATTERN_2:
  1744. dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
  1745. break;
  1746. case DP_TRAINING_PATTERN_3:
  1747. DRM_ERROR("DP training pattern 3 not supported\n");
  1748. dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
  1749. break;
  1750. }
  1751.  
  1752. } else {
  1753. dp_reg_value &= ~DP_LINK_TRAIN_MASK;
  1754.  
  1755. switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
  1756. case DP_TRAINING_PATTERN_DISABLE:
  1757. dp_reg_value |= DP_LINK_TRAIN_OFF;
  1758. break;
  1759. case DP_TRAINING_PATTERN_1:
  1760. dp_reg_value |= DP_LINK_TRAIN_PAT_1;
  1761. break;
  1762. case DP_TRAINING_PATTERN_2:
  1763. dp_reg_value |= DP_LINK_TRAIN_PAT_2;
  1764. break;
  1765. case DP_TRAINING_PATTERN_3:
  1766. DRM_ERROR("DP training pattern 3 not supported\n");
  1767. dp_reg_value |= DP_LINK_TRAIN_PAT_2;
  1768. break;
  1769. }
  1770. }
  1771.  
  1772. I915_WRITE(intel_dp->output_reg, dp_reg_value);
  1773. POSTING_READ(intel_dp->output_reg);
  1774.  
  1775. intel_dp_aux_native_write_1(intel_dp,
  1776. DP_TRAINING_PATTERN_SET,
  1777. dp_train_pat);
  1778.  
  1779. if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
  1780. DP_TRAINING_PATTERN_DISABLE) {
  1781. ret = intel_dp_aux_native_write(intel_dp,
  1782. DP_TRAINING_LANE0_SET,
  1783. intel_dp->train_set,
  1784. intel_dp->lane_count);
  1785. if (ret != intel_dp->lane_count)
  1786. return false;
  1787. }
  1788.  
  1789. return true;
  1790. }
  1791.  
  1792. static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
  1793. {
  1794. struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  1795. struct drm_device *dev = intel_dig_port->base.base.dev;
  1796. struct drm_i915_private *dev_priv = dev->dev_private;
  1797. enum port port = intel_dig_port->port;
  1798. uint32_t val;
  1799.  
  1800. if (!HAS_DDI(dev))
  1801. return;
  1802.  
  1803. val = I915_READ(DP_TP_CTL(port));
  1804. val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
  1805. val |= DP_TP_CTL_LINK_TRAIN_IDLE;
  1806. I915_WRITE(DP_TP_CTL(port), val);
  1807.  
  1808. /*
  1809. * On PORT_A we can have only eDP in SST mode. There the only reason
  1810. * we need to set idle transmission mode is to work around a HW issue
  1811. * where we enable the pipe while not in idle link-training mode.
  1812. * In this case there is requirement to wait for a minimum number of
  1813. * idle patterns to be sent.
  1814. */
  1815. if (port == PORT_A)
  1816. return;
  1817.  
  1818. if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
  1819. 1))
  1820. DRM_ERROR("Timed out waiting for DP idle patterns\n");
  1821. }
  1822.  
  1823. /* Enable corresponding port and start training pattern 1 */
  1824. void
  1825. intel_dp_start_link_train(struct intel_dp *intel_dp)
  1826. {
  1827. struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
  1828. struct drm_device *dev = encoder->dev;
  1829. int i;
  1830. uint8_t voltage;
  1831. bool clock_recovery = false;
  1832. int voltage_tries, loop_tries;
  1833. uint32_t DP = intel_dp->DP;
  1834.  
  1835. if (HAS_DDI(dev))
  1836. intel_ddi_prepare_link_retrain(encoder);
  1837.  
  1838. /* Write the link configuration data */
  1839. intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
  1840. intel_dp->link_configuration,
  1841. DP_LINK_CONFIGURATION_SIZE);
  1842.  
  1843. DP |= DP_PORT_EN;
  1844.  
  1845. memset(intel_dp->train_set, 0, 4);
  1846. voltage = 0xff;
  1847. voltage_tries = 0;
  1848. loop_tries = 0;
  1849. clock_recovery = false;
  1850. for (;;) {
  1851. /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
  1852. uint8_t link_status[DP_LINK_STATUS_SIZE];
  1853.  
  1854. intel_dp_set_signal_levels(intel_dp, &DP);
  1855.  
  1856. /* Set training pattern 1 */
  1857. if (!intel_dp_set_link_train(intel_dp, DP,
  1858. DP_TRAINING_PATTERN_1 |
  1859. DP_LINK_SCRAMBLING_DISABLE))
  1860. break;
  1861.  
  1862. drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
  1863. if (!intel_dp_get_link_status(intel_dp, link_status)) {
  1864. DRM_ERROR("failed to get link status\n");
  1865. break;
  1866. }
  1867.  
  1868. if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
  1869. DRM_DEBUG_KMS("clock recovery OK\n");
  1870. clock_recovery = true;
  1871. break;
  1872. }
  1873.  
  1874. /* Check to see if we've tried the max voltage */
  1875. for (i = 0; i < intel_dp->lane_count; i++)
  1876. if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
  1877. break;
  1878. if (i == intel_dp->lane_count) {
  1879. ++loop_tries;
  1880. if (loop_tries == 5) {
  1881. DRM_DEBUG_KMS("too many full retries, give up\n");
  1882. break;
  1883. }
  1884. memset(intel_dp->train_set, 0, 4);
  1885. voltage_tries = 0;
  1886. continue;
  1887. }
  1888.  
  1889. /* Check to see if we've tried the same voltage 5 times */
  1890. if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
  1891. ++voltage_tries;
  1892. if (voltage_tries == 5) {
  1893. DRM_DEBUG_KMS("too many voltage retries, give up\n");
  1894. break;
  1895. }
  1896. } else
  1897. voltage_tries = 0;
  1898. voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
  1899.  
  1900. /* Compute new intel_dp->train_set as requested by target */
  1901. intel_get_adjust_train(intel_dp, link_status);
  1902. }
  1903.  
  1904. intel_dp->DP = DP;
  1905. }
  1906.  
  1907. void
  1908. intel_dp_complete_link_train(struct intel_dp *intel_dp)
  1909. {
  1910. bool channel_eq = false;
  1911. int tries, cr_tries;
  1912. uint32_t DP = intel_dp->DP;
  1913.  
  1914. /* channel equalization */
  1915. tries = 0;
  1916. cr_tries = 0;
  1917. channel_eq = false;
  1918. for (;;) {
  1919. uint8_t link_status[DP_LINK_STATUS_SIZE];
  1920.  
  1921. if (cr_tries > 5) {
  1922. DRM_ERROR("failed to train DP, aborting\n");
  1923. intel_dp_link_down(intel_dp);
  1924. break;
  1925. }
  1926.  
  1927. intel_dp_set_signal_levels(intel_dp, &DP);
  1928.  
  1929. /* channel eq pattern */
  1930. if (!intel_dp_set_link_train(intel_dp, DP,
  1931. DP_TRAINING_PATTERN_2 |
  1932. DP_LINK_SCRAMBLING_DISABLE))
  1933. break;
  1934.  
  1935. drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
  1936. if (!intel_dp_get_link_status(intel_dp, link_status))
  1937. break;
  1938.  
  1939. /* Make sure clock is still ok */
  1940. if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
  1941. intel_dp_start_link_train(intel_dp);
  1942. cr_tries++;
  1943. continue;
  1944. }
  1945.  
  1946. if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
  1947. channel_eq = true;
  1948. break;
  1949. }
  1950.  
  1951. /* Try 5 times, then try clock recovery if that fails */
  1952. if (tries > 5) {
  1953. intel_dp_link_down(intel_dp);
  1954. intel_dp_start_link_train(intel_dp);
  1955. tries = 0;
  1956. cr_tries++;
  1957. continue;
  1958. }
  1959.  
  1960. /* Compute new intel_dp->train_set as requested by target */
  1961. intel_get_adjust_train(intel_dp, link_status);
  1962. ++tries;
  1963. }
  1964.  
  1965. intel_dp_set_idle_link_train(intel_dp);
  1966.  
  1967. intel_dp->DP = DP;
  1968.  
  1969. if (channel_eq)
  1970. DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
  1971.  
  1972. }
  1973.  
  1974. void intel_dp_stop_link_train(struct intel_dp *intel_dp)
  1975. {
  1976. intel_dp_set_link_train(intel_dp, intel_dp->DP,
  1977. DP_TRAINING_PATTERN_DISABLE);
  1978. }
  1979.  
  1980. static void
  1981. intel_dp_link_down(struct intel_dp *intel_dp)
  1982. {
  1983. struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  1984. struct drm_device *dev = intel_dig_port->base.base.dev;
  1985. struct drm_i915_private *dev_priv = dev->dev_private;
  1986. struct intel_crtc *intel_crtc =
  1987. to_intel_crtc(intel_dig_port->base.base.crtc);
  1988. uint32_t DP = intel_dp->DP;
  1989.  
  1990. /*
  1991. * DDI code has a strict mode set sequence and we should try to respect
  1992. * it, otherwise we might hang the machine in many different ways. So we
  1993. * really should be disabling the port only on a complete crtc_disable
  1994. * sequence. This function is just called under two conditions on DDI
  1995. * code:
  1996. * - Link train failed while doing crtc_enable, and on this case we
  1997. * really should respect the mode set sequence and wait for a
  1998. * crtc_disable.
  1999. * - Someone turned the monitor off and intel_dp_check_link_status
  2000. * called us. We don't need to disable the whole port on this case, so
  2001. * when someone turns the monitor on again,
  2002. * intel_ddi_prepare_link_retrain will take care of redoing the link
  2003. * train.
  2004. */
  2005. if (HAS_DDI(dev))
  2006. return;
  2007.  
  2008. if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
  2009. return;
  2010.  
  2011. DRM_DEBUG_KMS("\n");
  2012.  
  2013. if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
  2014. DP &= ~DP_LINK_TRAIN_MASK_CPT;
  2015. I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
  2016. } else {
  2017. DP &= ~DP_LINK_TRAIN_MASK;
  2018. I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
  2019. }
  2020. POSTING_READ(intel_dp->output_reg);
  2021.  
  2022. /* We don't really know why we're doing this */
  2023. intel_wait_for_vblank(dev, intel_crtc->pipe);
  2024.  
  2025. if (HAS_PCH_IBX(dev) &&
  2026. I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
  2027. struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
  2028.  
  2029. /* Hardware workaround: leaving our transcoder select
  2030. * set to transcoder B while it's off will prevent the
  2031. * corresponding HDMI output on transcoder A.
  2032. *
  2033. * Combine this with another hardware workaround:
  2034. * transcoder select bit can only be cleared while the
  2035. * port is enabled.
  2036. */
  2037. DP &= ~DP_PIPEB_SELECT;
  2038. I915_WRITE(intel_dp->output_reg, DP);
  2039.  
  2040. /* Changes to enable or select take place the vblank
  2041. * after being written.
  2042. */
  2043. if (WARN_ON(crtc == NULL)) {
  2044. /* We should never try to disable a port without a crtc
  2045. * attached. For paranoia keep the code around for a
  2046. * bit. */
  2047. POSTING_READ(intel_dp->output_reg);
  2048. msleep(50);
  2049. } else
  2050. intel_wait_for_vblank(dev, intel_crtc->pipe);
  2051. }
  2052.  
  2053. DP &= ~DP_AUDIO_OUTPUT_ENABLE;
  2054. I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
  2055. POSTING_READ(intel_dp->output_reg);
  2056. msleep(intel_dp->panel_power_down_delay);
  2057. }
  2058.  
  2059. static bool
  2060. intel_dp_get_dpcd(struct intel_dp *intel_dp)
  2061. {
  2062. char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
  2063.  
  2064. if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
  2065. sizeof(intel_dp->dpcd)) == 0)
  2066. return false; /* aux transfer failed */
  2067.  
  2068. hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
  2069. 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
  2070. DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
  2071.  
  2072. if (intel_dp->dpcd[DP_DPCD_REV] == 0)
  2073. return false; /* DPCD not present */
  2074.  
  2075. if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
  2076. DP_DWN_STRM_PORT_PRESENT))
  2077. return true; /* native DP sink */
  2078.  
  2079. if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
  2080. return true; /* no per-port downstream info */
  2081.  
  2082. if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0,
  2083. intel_dp->downstream_ports,
  2084. DP_MAX_DOWNSTREAM_PORTS) == 0)
  2085. return false; /* downstream port status fetch failed */
  2086.  
  2087. return true;
  2088. }
  2089.  
  2090. static void
  2091. intel_dp_probe_oui(struct intel_dp *intel_dp)
  2092. {
  2093. u8 buf[3];
  2094.  
  2095. if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
  2096. return;
  2097.  
  2098. ironlake_edp_panel_vdd_on(intel_dp);
  2099.  
  2100. if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
  2101. DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
  2102. buf[0], buf[1], buf[2]);
  2103.  
  2104. if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
  2105. DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
  2106. buf[0], buf[1], buf[2]);
  2107.  
  2108. ironlake_edp_panel_vdd_off(intel_dp, false);
  2109. }
  2110.  
  2111. static bool
  2112. intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
  2113. {
  2114. int ret;
  2115.  
  2116. ret = intel_dp_aux_native_read_retry(intel_dp,
  2117. DP_DEVICE_SERVICE_IRQ_VECTOR,
  2118. sink_irq_vector, 1);
  2119. if (!ret)
  2120. return false;
  2121.  
  2122. return true;
  2123. }
  2124.  
  2125. static void
  2126. intel_dp_handle_test_request(struct intel_dp *intel_dp)
  2127. {
  2128. /* NAK by default */
  2129. intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK);
  2130. }
  2131.  
  2132. /*
  2133. * According to DP spec
  2134. * 5.1.2:
  2135. * 1. Read DPCD
  2136. * 2. Configure link according to Receiver Capabilities
  2137. * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
  2138. * 4. Check link status on receipt of hot-plug interrupt
  2139. */
  2140.  
  2141. void
  2142. intel_dp_check_link_status(struct intel_dp *intel_dp)
  2143. {
  2144. struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
  2145. u8 sink_irq_vector;
  2146. u8 link_status[DP_LINK_STATUS_SIZE];
  2147.  
  2148. if (!intel_encoder->connectors_active)
  2149. return;
  2150.  
  2151. if (WARN_ON(!intel_encoder->base.crtc))
  2152. return;
  2153.  
  2154. /* Try to read receiver status if the link appears to be up */
  2155. if (!intel_dp_get_link_status(intel_dp, link_status)) {
  2156. intel_dp_link_down(intel_dp);
  2157. return;
  2158. }
  2159.  
  2160. /* Now read the DPCD to see if it's actually running */
  2161. if (!intel_dp_get_dpcd(intel_dp)) {
  2162. intel_dp_link_down(intel_dp);
  2163. return;
  2164. }
  2165.  
  2166. /* Try to read the source of the interrupt */
  2167. if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
  2168. intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
  2169. /* Clear interrupt source */
  2170. intel_dp_aux_native_write_1(intel_dp,
  2171. DP_DEVICE_SERVICE_IRQ_VECTOR,
  2172. sink_irq_vector);
  2173.  
  2174. if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
  2175. intel_dp_handle_test_request(intel_dp);
  2176. if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
  2177. DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
  2178. }
  2179.  
  2180. if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
  2181. DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
  2182. drm_get_encoder_name(&intel_encoder->base));
  2183. intel_dp_start_link_train(intel_dp);
  2184. intel_dp_complete_link_train(intel_dp);
  2185. intel_dp_stop_link_train(intel_dp);
  2186. }
  2187. }
  2188.  
  2189. /* XXX this is probably wrong for multiple downstream ports */
  2190. static enum drm_connector_status
  2191. intel_dp_detect_dpcd(struct intel_dp *intel_dp)
  2192. {
  2193. uint8_t *dpcd = intel_dp->dpcd;
  2194. bool hpd;
  2195. uint8_t type;
  2196.  
  2197. if (!intel_dp_get_dpcd(intel_dp))
  2198. return connector_status_disconnected;
  2199.  
  2200. /* if there's no downstream port, we're done */
  2201. if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
  2202. return connector_status_connected;
  2203.  
  2204. /* If we're HPD-aware, SINK_COUNT changes dynamically */
  2205. hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD);
  2206. if (hpd) {
  2207. uint8_t reg;
  2208. if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
  2209. &reg, 1))
  2210. return connector_status_unknown;
  2211. return DP_GET_SINK_COUNT(reg) ? connector_status_connected
  2212. : connector_status_disconnected;
  2213. }
  2214.  
  2215. /* If no HPD, poke DDC gently */
  2216. if (drm_probe_ddc(&intel_dp->adapter))
  2217. return connector_status_connected;
  2218.  
  2219. /* Well we tried, say unknown for unreliable port types */
  2220. type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
  2221. if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID)
  2222. return connector_status_unknown;
  2223.  
  2224. /* Anything else is out of spec, warn and ignore */
  2225. DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
  2226. return connector_status_disconnected;
  2227. }
  2228.  
  2229. static enum drm_connector_status
  2230. ironlake_dp_detect(struct intel_dp *intel_dp)
  2231. {
  2232. struct drm_device *dev = intel_dp_to_dev(intel_dp);
  2233. struct drm_i915_private *dev_priv = dev->dev_private;
  2234. struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  2235. enum drm_connector_status status;
  2236.  
  2237. /* Can't disconnect eDP, but you can close the lid... */
  2238. if (is_edp(intel_dp)) {
  2239. status = intel_panel_detect(dev);
  2240. if (status == connector_status_unknown)
  2241. status = connector_status_connected;
  2242. return status;
  2243. }
  2244.  
  2245. if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
  2246. return connector_status_disconnected;
  2247.  
  2248. return intel_dp_detect_dpcd(intel_dp);
  2249. }
  2250.  
  2251. static enum drm_connector_status
  2252. g4x_dp_detect(struct intel_dp *intel_dp)
  2253. {
  2254. struct drm_device *dev = intel_dp_to_dev(intel_dp);
  2255. struct drm_i915_private *dev_priv = dev->dev_private;
  2256. struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  2257. uint32_t bit;
  2258.  
  2259. /* Can't disconnect eDP, but you can close the lid... */
  2260. if (is_edp(intel_dp)) {
  2261. enum drm_connector_status status;
  2262.  
  2263. status = intel_panel_detect(dev);
  2264. if (status == connector_status_unknown)
  2265. status = connector_status_connected;
  2266. return status;
  2267. }
  2268.  
  2269. switch (intel_dig_port->port) {
  2270. case PORT_B:
  2271. bit = PORTB_HOTPLUG_LIVE_STATUS;
  2272. break;
  2273. case PORT_C:
  2274. bit = PORTC_HOTPLUG_LIVE_STATUS;
  2275. break;
  2276. case PORT_D:
  2277. bit = PORTD_HOTPLUG_LIVE_STATUS;
  2278. break;
  2279. default:
  2280. return connector_status_unknown;
  2281. }
  2282.  
  2283. if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
  2284. return connector_status_disconnected;
  2285.  
  2286. return intel_dp_detect_dpcd(intel_dp);
  2287. }
  2288.  
  2289. static struct edid *
  2290. intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
  2291. {
  2292. struct intel_connector *intel_connector = to_intel_connector(connector);
  2293.  
  2294. /* use cached edid if we have one */
  2295. if (intel_connector->edid) {
  2296. struct edid *edid;
  2297. int size;
  2298.  
  2299. /* invalid edid */
  2300. if (IS_ERR(intel_connector->edid))
  2301. return NULL;
  2302.  
  2303. size = (intel_connector->edid->extensions + 1) * EDID_LENGTH;
  2304. edid = kmalloc(size, GFP_KERNEL);
  2305. if (!edid)
  2306. return NULL;
  2307.  
  2308. memcpy(edid, intel_connector->edid, size);
  2309. return edid;
  2310. }
  2311.  
  2312. return drm_get_edid(connector, adapter);
  2313. }
  2314.  
  2315. static int
  2316. intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
  2317. {
  2318. struct intel_connector *intel_connector = to_intel_connector(connector);
  2319.  
  2320. /* use cached edid if we have one */
  2321. if (intel_connector->edid) {
  2322. /* invalid edid */
  2323. if (IS_ERR(intel_connector->edid))
  2324. return 0;
  2325.  
  2326. return intel_connector_update_modes(connector,
  2327. intel_connector->edid);
  2328. }
  2329.  
  2330. return intel_ddc_get_modes(connector, adapter);
  2331. }
  2332.  
  2333. static enum drm_connector_status
  2334. intel_dp_detect(struct drm_connector *connector, bool force)
  2335. {
  2336. struct intel_dp *intel_dp = intel_attached_dp(connector);
  2337. struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  2338. struct intel_encoder *intel_encoder = &intel_dig_port->base;
  2339. struct drm_device *dev = connector->dev;
  2340. enum drm_connector_status status;
  2341. struct edid *edid = NULL;
  2342.  
  2343. intel_dp->has_audio = false;
  2344.  
  2345. if (HAS_PCH_SPLIT(dev))
  2346. status = ironlake_dp_detect(intel_dp);
  2347. else
  2348. status = g4x_dp_detect(intel_dp);
  2349.  
  2350. if (status != connector_status_connected)
  2351. return status;
  2352.  
  2353. intel_dp_probe_oui(intel_dp);
  2354.  
  2355. if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
  2356. intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
  2357. } else {
  2358. edid = intel_dp_get_edid(connector, &intel_dp->adapter);
  2359. if (edid) {
  2360. intel_dp->has_audio = drm_detect_monitor_audio(edid);
  2361. kfree(edid);
  2362. }
  2363. }
  2364.  
  2365. if (intel_encoder->type != INTEL_OUTPUT_EDP)
  2366. intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
  2367. return connector_status_connected;
  2368. }
  2369.  
  2370. static int intel_dp_get_modes(struct drm_connector *connector)
  2371. {
  2372. struct intel_dp *intel_dp = intel_attached_dp(connector);
  2373. struct intel_connector *intel_connector = to_intel_connector(connector);
  2374. struct drm_device *dev = connector->dev;
  2375. int ret;
  2376.  
  2377. /* We should parse the EDID data and find out if it has an audio sink
  2378. */
  2379.  
  2380. ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
  2381. if (ret)
  2382. return ret;
  2383.  
  2384. /* if eDP has no EDID, fall back to fixed mode */
  2385. if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
  2386. struct drm_display_mode *mode;
  2387. mode = drm_mode_duplicate(dev,
  2388. intel_connector->panel.fixed_mode);
  2389. if (mode) {
  2390. drm_mode_probed_add(connector, mode);
  2391. return 1;
  2392. }
  2393. }
  2394. return 0;
  2395. }
  2396.  
  2397. static bool
  2398. intel_dp_detect_audio(struct drm_connector *connector)
  2399. {
  2400. struct intel_dp *intel_dp = intel_attached_dp(connector);
  2401. struct edid *edid;
  2402. bool has_audio = false;
  2403.  
  2404. edid = intel_dp_get_edid(connector, &intel_dp->adapter);
  2405. if (edid) {
  2406. has_audio = drm_detect_monitor_audio(edid);
  2407. kfree(edid);
  2408. }
  2409.  
  2410. return has_audio;
  2411. }
  2412.  
  2413. static int
  2414. intel_dp_set_property(struct drm_connector *connector,
  2415. struct drm_property *property,
  2416. uint64_t val)
  2417. {
  2418. struct drm_i915_private *dev_priv = connector->dev->dev_private;
  2419. struct intel_connector *intel_connector = to_intel_connector(connector);
  2420. struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
  2421. struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
  2422. int ret;
  2423.  
  2424. ret = drm_object_property_set_value(&connector->base, property, val);
  2425. if (ret)
  2426. return ret;
  2427.  
  2428. if (property == dev_priv->force_audio_property) {
  2429. int i = val;
  2430. bool has_audio;
  2431.  
  2432. if (i == intel_dp->force_audio)
  2433. return 0;
  2434.  
  2435. intel_dp->force_audio = i;
  2436.  
  2437. if (i == HDMI_AUDIO_AUTO)
  2438. has_audio = intel_dp_detect_audio(connector);
  2439. else
  2440. has_audio = (i == HDMI_AUDIO_ON);
  2441.  
  2442. if (has_audio == intel_dp->has_audio)
  2443. return 0;
  2444.  
  2445. intel_dp->has_audio = has_audio;
  2446. goto done;
  2447. }
  2448.  
  2449. if (property == dev_priv->broadcast_rgb_property) {
  2450. bool old_auto = intel_dp->color_range_auto;
  2451. uint32_t old_range = intel_dp->color_range;
  2452.  
  2453. switch (val) {
  2454. case INTEL_BROADCAST_RGB_AUTO:
  2455. intel_dp->color_range_auto = true;
  2456. break;
  2457. case INTEL_BROADCAST_RGB_FULL:
  2458. intel_dp->color_range_auto = false;
  2459. intel_dp->color_range = 0;
  2460. break;
  2461. case INTEL_BROADCAST_RGB_LIMITED:
  2462. intel_dp->color_range_auto = false;
  2463. intel_dp->color_range = DP_COLOR_RANGE_16_235;
  2464. break;
  2465. default:
  2466. return -EINVAL;
  2467. }
  2468.  
  2469. if (old_auto == intel_dp->color_range_auto &&
  2470. old_range == intel_dp->color_range)
  2471. return 0;
  2472.  
  2473. goto done;
  2474. }
  2475.  
  2476. if (is_edp(intel_dp) &&
  2477. property == connector->dev->mode_config.scaling_mode_property) {
  2478. if (val == DRM_MODE_SCALE_NONE) {
  2479. DRM_DEBUG_KMS("no scaling not supported\n");
  2480. return -EINVAL;
  2481. }
  2482.  
  2483. if (intel_connector->panel.fitting_mode == val) {
  2484. /* the eDP scaling property is not changed */
  2485. return 0;
  2486. }
  2487. intel_connector->panel.fitting_mode = val;
  2488.  
  2489. goto done;
  2490. }
  2491.  
  2492. return -EINVAL;
  2493.  
  2494. done:
  2495. if (intel_encoder->base.crtc)
  2496. intel_crtc_restore_mode(intel_encoder->base.crtc);
  2497.  
  2498. return 0;
  2499. }
  2500.  
  2501. static void
  2502. intel_dp_destroy(struct drm_connector *connector)
  2503. {
  2504. struct intel_dp *intel_dp = intel_attached_dp(connector);
  2505. struct intel_connector *intel_connector = to_intel_connector(connector);
  2506.  
  2507. if (!IS_ERR_OR_NULL(intel_connector->edid))
  2508. kfree(intel_connector->edid);
  2509.  
  2510. if (is_edp(intel_dp))
  2511. intel_panel_fini(&intel_connector->panel);
  2512.  
  2513. drm_sysfs_connector_remove(connector);
  2514. drm_connector_cleanup(connector);
  2515. kfree(connector);
  2516. }
  2517.  
  2518. void intel_dp_encoder_destroy(struct drm_encoder *encoder)
  2519. {
  2520. struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
  2521. struct intel_dp *intel_dp = &intel_dig_port->dp;
  2522. struct drm_device *dev = intel_dp_to_dev(intel_dp);
  2523.  
  2524. i2c_del_adapter(&intel_dp->adapter);
  2525. drm_encoder_cleanup(encoder);
  2526. if (is_edp(intel_dp)) {
  2527. cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
  2528. mutex_lock(&dev->mode_config.mutex);
  2529. ironlake_panel_vdd_off_sync(intel_dp);
  2530. mutex_unlock(&dev->mode_config.mutex);
  2531. }
  2532. kfree(intel_dig_port);
  2533. }
  2534.  
  2535. static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
  2536. .mode_set = intel_dp_mode_set,
  2537. };
  2538.  
  2539. static const struct drm_connector_funcs intel_dp_connector_funcs = {
  2540. .dpms = intel_connector_dpms,
  2541. .detect = intel_dp_detect,
  2542. .fill_modes = drm_helper_probe_single_connector_modes,
  2543. .set_property = intel_dp_set_property,
  2544. .destroy = intel_dp_destroy,
  2545. };
  2546.  
  2547. static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
  2548. .get_modes = intel_dp_get_modes,
  2549. .mode_valid = intel_dp_mode_valid,
  2550. .best_encoder = intel_best_encoder,
  2551. };
  2552.  
  2553. static const struct drm_encoder_funcs intel_dp_enc_funcs = {
  2554. .destroy = intel_dp_encoder_destroy,
  2555. };
  2556.  
  2557. static void
  2558. intel_dp_hot_plug(struct intel_encoder *intel_encoder)
  2559. {
  2560. struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
  2561.  
  2562. intel_dp_check_link_status(intel_dp);
  2563. }
  2564.  
  2565. /* Return which DP Port should be selected for Transcoder DP control */
  2566. int
  2567. intel_trans_dp_port_sel(struct drm_crtc *crtc)
  2568. {
  2569. struct drm_device *dev = crtc->dev;
  2570. struct intel_encoder *intel_encoder;
  2571. struct intel_dp *intel_dp;
  2572.  
  2573. for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
  2574. intel_dp = enc_to_intel_dp(&intel_encoder->base);
  2575.  
  2576. if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
  2577. intel_encoder->type == INTEL_OUTPUT_EDP)
  2578. return intel_dp->output_reg;
  2579. }
  2580.  
  2581. return -1;
  2582. }
  2583.  
  2584. /* check the VBT to see whether the eDP is on DP-D port */
  2585. bool intel_dpd_is_edp(struct drm_device *dev)
  2586. {
  2587. struct drm_i915_private *dev_priv = dev->dev_private;
  2588. struct child_device_config *p_child;
  2589. int i;
  2590.  
  2591. if (!dev_priv->child_dev_num)
  2592. return false;
  2593.  
  2594. for (i = 0; i < dev_priv->child_dev_num; i++) {
  2595. p_child = dev_priv->child_dev + i;
  2596.  
  2597. if (p_child->dvo_port == PORT_IDPD &&
  2598. p_child->device_type == DEVICE_TYPE_eDP)
  2599. return true;
  2600. }
  2601. return false;
  2602. }
  2603.  
  2604. static void
  2605. intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
  2606. {
  2607. struct intel_connector *intel_connector = to_intel_connector(connector);
  2608.  
  2609. intel_attach_force_audio_property(connector);
  2610. intel_attach_broadcast_rgb_property(connector);
  2611. intel_dp->color_range_auto = true;
  2612.  
  2613. if (is_edp(intel_dp)) {
  2614. drm_mode_create_scaling_mode_property(connector->dev);
  2615. drm_object_attach_property(
  2616. &connector->base,
  2617. connector->dev->mode_config.scaling_mode_property,
  2618. DRM_MODE_SCALE_ASPECT);
  2619. intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
  2620. }
  2621. }
  2622.  
  2623. static void
  2624. intel_dp_init_panel_power_sequencer(struct drm_device *dev,
  2625. struct intel_dp *intel_dp,
  2626. struct edp_power_seq *out)
  2627. {
  2628. struct drm_i915_private *dev_priv = dev->dev_private;
  2629. struct edp_power_seq cur, vbt, spec, final;
  2630. u32 pp_on, pp_off, pp_div, pp;
  2631. int pp_control_reg, pp_on_reg, pp_off_reg, pp_div_reg;
  2632.  
  2633. if (HAS_PCH_SPLIT(dev)) {
  2634. pp_control_reg = PCH_PP_CONTROL;
  2635. pp_on_reg = PCH_PP_ON_DELAYS;
  2636. pp_off_reg = PCH_PP_OFF_DELAYS;
  2637. pp_div_reg = PCH_PP_DIVISOR;
  2638. } else {
  2639. pp_control_reg = PIPEA_PP_CONTROL;
  2640. pp_on_reg = PIPEA_PP_ON_DELAYS;
  2641. pp_off_reg = PIPEA_PP_OFF_DELAYS;
  2642. pp_div_reg = PIPEA_PP_DIVISOR;
  2643. }
  2644.  
  2645. /* Workaround: Need to write PP_CONTROL with the unlock key as
  2646. * the very first thing. */
  2647. pp = ironlake_get_pp_control(intel_dp);
  2648. I915_WRITE(pp_control_reg, pp);
  2649.  
  2650. pp_on = I915_READ(pp_on_reg);
  2651. pp_off = I915_READ(pp_off_reg);
  2652. pp_div = I915_READ(pp_div_reg);
  2653.  
  2654. /* Pull timing values out of registers */
  2655. cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
  2656. PANEL_POWER_UP_DELAY_SHIFT;
  2657.  
  2658. cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
  2659. PANEL_LIGHT_ON_DELAY_SHIFT;
  2660.  
  2661. cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
  2662. PANEL_LIGHT_OFF_DELAY_SHIFT;
  2663.  
  2664. cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
  2665. PANEL_POWER_DOWN_DELAY_SHIFT;
  2666.  
  2667. cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
  2668. PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
  2669.  
  2670. DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
  2671. cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
  2672.  
  2673. vbt = dev_priv->edp.pps;
  2674.  
  2675. /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
  2676. * our hw here, which are all in 100usec. */
  2677. spec.t1_t3 = 210 * 10;
  2678. spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
  2679. spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
  2680. spec.t10 = 500 * 10;
  2681. /* This one is special and actually in units of 100ms, but zero
  2682. * based in the hw (so we need to add 100 ms). But the sw vbt
  2683. * table multiplies it with 1000 to make it in units of 100usec,
  2684. * too. */
  2685. spec.t11_t12 = (510 + 100) * 10;
  2686.  
  2687. DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
  2688. vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
  2689.  
  2690. /* Use the max of the register settings and vbt. If both are
  2691. * unset, fall back to the spec limits. */
  2692. #define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \
  2693. spec.field : \
  2694. max(cur.field, vbt.field))
  2695. assign_final(t1_t3);
  2696. assign_final(t8);
  2697. assign_final(t9);
  2698. assign_final(t10);
  2699. assign_final(t11_t12);
  2700. #undef assign_final
  2701.  
  2702. #define get_delay(field) (DIV_ROUND_UP(final.field, 10))
  2703. intel_dp->panel_power_up_delay = get_delay(t1_t3);
  2704. intel_dp->backlight_on_delay = get_delay(t8);
  2705. intel_dp->backlight_off_delay = get_delay(t9);
  2706. intel_dp->panel_power_down_delay = get_delay(t10);
  2707. intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
  2708. #undef get_delay
  2709.  
  2710. DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
  2711. intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
  2712. intel_dp->panel_power_cycle_delay);
  2713.  
  2714. DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
  2715. intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
  2716.  
  2717. if (out)
  2718. *out = final;
  2719. }
  2720.  
  2721. static void
  2722. intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
  2723. struct intel_dp *intel_dp,
  2724. struct edp_power_seq *seq)
  2725. {
  2726. struct drm_i915_private *dev_priv = dev->dev_private;
  2727. u32 pp_on, pp_off, pp_div, port_sel = 0;
  2728. int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
  2729. int pp_on_reg, pp_off_reg, pp_div_reg;
  2730.  
  2731. if (HAS_PCH_SPLIT(dev)) {
  2732. pp_on_reg = PCH_PP_ON_DELAYS;
  2733. pp_off_reg = PCH_PP_OFF_DELAYS;
  2734. pp_div_reg = PCH_PP_DIVISOR;
  2735. } else {
  2736. pp_on_reg = PIPEA_PP_ON_DELAYS;
  2737. pp_off_reg = PIPEA_PP_OFF_DELAYS;
  2738. pp_div_reg = PIPEA_PP_DIVISOR;
  2739. }
  2740.  
  2741. if (IS_VALLEYVIEW(dev))
  2742. port_sel = I915_READ(pp_on_reg) & 0xc0000000;
  2743.  
  2744. /* And finally store the new values in the power sequencer. */
  2745. pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
  2746. (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
  2747. pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
  2748. (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
  2749. /* Compute the divisor for the pp clock, simply match the Bspec
  2750. * formula. */
  2751. pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
  2752. pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
  2753. << PANEL_POWER_CYCLE_DELAY_SHIFT);
  2754.  
  2755. /* Haswell doesn't have any port selection bits for the panel
  2756. * power sequencer any more. */
  2757. if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
  2758. if (is_cpu_edp(intel_dp))
  2759. port_sel = PANEL_POWER_PORT_DP_A;
  2760. else
  2761. port_sel = PANEL_POWER_PORT_DP_D;
  2762. }
  2763.  
  2764. pp_on |= port_sel;
  2765.  
  2766. I915_WRITE(pp_on_reg, pp_on);
  2767. I915_WRITE(pp_off_reg, pp_off);
  2768. I915_WRITE(pp_div_reg, pp_div);
  2769.  
  2770. DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
  2771. I915_READ(pp_on_reg),
  2772. I915_READ(pp_off_reg),
  2773. I915_READ(pp_div_reg));
  2774. }
  2775.  
  2776. void
  2777. intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
  2778. struct intel_connector *intel_connector)
  2779. {
  2780. struct drm_connector *connector = &intel_connector->base;
  2781. struct intel_dp *intel_dp = &intel_dig_port->dp;
  2782. struct intel_encoder *intel_encoder = &intel_dig_port->base;
  2783. struct drm_device *dev = intel_encoder->base.dev;
  2784. struct drm_i915_private *dev_priv = dev->dev_private;
  2785. struct drm_display_mode *fixed_mode = NULL;
  2786. struct edp_power_seq power_seq = { 0 };
  2787. enum port port = intel_dig_port->port;
  2788. const char *name = NULL;
  2789. int type;
  2790.  
  2791. /* Preserve the current hw state. */
  2792. intel_dp->DP = I915_READ(intel_dp->output_reg);
  2793. intel_dp->attached_connector = intel_connector;
  2794.  
  2795. if (HAS_PCH_SPLIT(dev) && port == PORT_D)
  2796. if (intel_dpd_is_edp(dev))
  2797. intel_dp->is_pch_edp = true;
  2798.  
  2799. /*
  2800. * FIXME : We need to initialize built-in panels before external panels.
  2801. * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
  2802. */
  2803. if (IS_VALLEYVIEW(dev) && port == PORT_C) {
  2804. type = DRM_MODE_CONNECTOR_eDP;
  2805. intel_encoder->type = INTEL_OUTPUT_EDP;
  2806. } else if (port == PORT_A || is_pch_edp(intel_dp)) {
  2807. type = DRM_MODE_CONNECTOR_eDP;
  2808. intel_encoder->type = INTEL_OUTPUT_EDP;
  2809. } else {
  2810. /* The intel_encoder->type value may be INTEL_OUTPUT_UNKNOWN for
  2811. * DDI or INTEL_OUTPUT_DISPLAYPORT for the older gens, so don't
  2812. * rewrite it.
  2813. */
  2814. type = DRM_MODE_CONNECTOR_DisplayPort;
  2815. }
  2816.  
  2817. drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
  2818. drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
  2819.  
  2820. connector->interlace_allowed = true;
  2821. connector->doublescan_allowed = 0;
  2822.  
  2823. INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
  2824. ironlake_panel_vdd_work);
  2825.  
  2826. intel_connector_attach_encoder(intel_connector, intel_encoder);
  2827. drm_sysfs_connector_add(connector);
  2828.  
  2829. if (HAS_DDI(dev))
  2830. intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
  2831. else
  2832. intel_connector->get_hw_state = intel_connector_get_hw_state;
  2833.  
  2834. intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
  2835. if (HAS_DDI(dev)) {
  2836. switch (intel_dig_port->port) {
  2837. case PORT_A:
  2838. intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
  2839. break;
  2840. case PORT_B:
  2841. intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
  2842. break;
  2843. case PORT_C:
  2844. intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
  2845. break;
  2846. case PORT_D:
  2847. intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
  2848. break;
  2849. default:
  2850. BUG();
  2851. }
  2852. }
  2853.  
  2854. /* Set up the DDC bus. */
  2855. switch (port) {
  2856. case PORT_A:
  2857. intel_encoder->hpd_pin = HPD_PORT_A;
  2858. name = "DPDDC-A";
  2859. break;
  2860. case PORT_B:
  2861. intel_encoder->hpd_pin = HPD_PORT_B;
  2862. name = "DPDDC-B";
  2863. break;
  2864. case PORT_C:
  2865. intel_encoder->hpd_pin = HPD_PORT_C;
  2866. name = "DPDDC-C";
  2867. break;
  2868. case PORT_D:
  2869. intel_encoder->hpd_pin = HPD_PORT_D;
  2870. name = "DPDDC-D";
  2871. break;
  2872. default:
  2873. BUG();
  2874. }
  2875.  
  2876. if (is_edp(intel_dp))
  2877. intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
  2878.  
  2879. intel_dp_i2c_init(intel_dp, intel_connector, name);
  2880.  
  2881. /* Cache DPCD and EDID for edp. */
  2882. if (is_edp(intel_dp)) {
  2883. bool ret;
  2884. struct drm_display_mode *scan;
  2885. struct edid *edid;
  2886.  
  2887. ironlake_edp_panel_vdd_on(intel_dp);
  2888. ret = intel_dp_get_dpcd(intel_dp);
  2889. ironlake_edp_panel_vdd_off(intel_dp, false);
  2890.  
  2891. if (ret) {
  2892. if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
  2893. dev_priv->no_aux_handshake =
  2894. intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
  2895. DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
  2896. } else {
  2897. /* if this fails, presume the device is a ghost */
  2898. DRM_INFO("failed to retrieve link info, disabling eDP\n");
  2899. intel_dp_encoder_destroy(&intel_encoder->base);
  2900. intel_dp_destroy(connector);
  2901. return;
  2902. }
  2903.  
  2904. /* We now know it's not a ghost, init power sequence regs. */
  2905. intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
  2906. &power_seq);
  2907.  
  2908. ironlake_edp_panel_vdd_on(intel_dp);
  2909. edid = drm_get_edid(connector, &intel_dp->adapter);
  2910. if (edid) {
  2911. if (drm_add_edid_modes(connector, edid)) {
  2912. drm_mode_connector_update_edid_property(connector, edid);
  2913. drm_edid_to_eld(connector, edid);
  2914. } else {
  2915. kfree(edid);
  2916. edid = ERR_PTR(-EINVAL);
  2917. }
  2918. } else {
  2919. edid = ERR_PTR(-ENOENT);
  2920. }
  2921. intel_connector->edid = edid;
  2922.  
  2923. /* prefer fixed mode from EDID if available */
  2924. list_for_each_entry(scan, &connector->probed_modes, head) {
  2925. if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
  2926. fixed_mode = drm_mode_duplicate(dev, scan);
  2927. break;
  2928. }
  2929. }
  2930.  
  2931. /* fallback to VBT if available for eDP */
  2932. if (!fixed_mode && dev_priv->lfp_lvds_vbt_mode) {
  2933. fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
  2934. if (fixed_mode)
  2935. fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
  2936. }
  2937.  
  2938. ironlake_edp_panel_vdd_off(intel_dp, false);
  2939. }
  2940.  
  2941. if (is_edp(intel_dp)) {
  2942. intel_panel_init(&intel_connector->panel, fixed_mode);
  2943. intel_panel_setup_backlight(connector);
  2944. }
  2945.  
  2946. intel_dp_add_properties(intel_dp, connector);
  2947.  
  2948. /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
  2949. * 0xd. Failure to do so will result in spurious interrupts being
  2950. * generated on the port when a cable is not attached.
  2951. */
  2952. if (IS_G4X(dev) && !IS_GM45(dev)) {
  2953. u32 temp = I915_READ(PEG_BAND_GAP_DATA);
  2954. I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
  2955. }
  2956. }
  2957.  
  2958. void
  2959. intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
  2960. {
  2961. struct intel_digital_port *intel_dig_port;
  2962. struct intel_encoder *intel_encoder;
  2963. struct drm_encoder *encoder;
  2964. struct intel_connector *intel_connector;
  2965.  
  2966. intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
  2967. if (!intel_dig_port)
  2968. return;
  2969.  
  2970. intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
  2971. if (!intel_connector) {
  2972. kfree(intel_dig_port);
  2973. return;
  2974. }
  2975.  
  2976. intel_encoder = &intel_dig_port->base;
  2977. encoder = &intel_encoder->base;
  2978.  
  2979. drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
  2980. DRM_MODE_ENCODER_TMDS);
  2981. drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
  2982.  
  2983. intel_encoder->compute_config = intel_dp_compute_config;
  2984. intel_encoder->enable = intel_enable_dp;
  2985. intel_encoder->pre_enable = intel_pre_enable_dp;
  2986. intel_encoder->disable = intel_disable_dp;
  2987. intel_encoder->post_disable = intel_post_disable_dp;
  2988. intel_encoder->get_hw_state = intel_dp_get_hw_state;
  2989.  
  2990. intel_dig_port->port = port;
  2991. intel_dig_port->dp.output_reg = output_reg;
  2992.  
  2993. intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
  2994. intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
  2995. intel_encoder->cloneable = false;
  2996. intel_encoder->hot_plug = intel_dp_hot_plug;
  2997.  
  2998. intel_dp_init_connector(intel_dig_port, intel_connector);
  2999. }