Advertisement
Guest User

Untitled

a guest
Jul 28th, 2015
279
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 82.58 KB | None | 0 0
  1. /*******************************************************************************
  2. * This file contains iSCSI extentions for RDMA (iSER) Verbs
  3. *
  4. * (c) Copyright 2013 Datera, Inc.
  5. *
  6. * Nicholas A. Bellinger <nab@linux-iscsi.org>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. ****************************************************************************/
  18.  
  19. #include <linux/string.h>
  20. #include <linux/module.h>
  21. #include <linux/scatterlist.h>
  22. #include <linux/socket.h>
  23. #include <linux/in.h>
  24. #include <linux/in6.h>
  25. #include <linux/llist.h>
  26. #include <rdma/ib_verbs.h>
  27. #include <rdma/rdma_cm.h>
  28. #include <target/target_core_base.h>
  29. #include <target/target_core_fabric.h>
  30. #include <target/iscsi/iscsi_transport.h>
  31. #include <linux/semaphore.h>
  32.  
  33. #include "isert_proto.h"
  34. #include "ib_isert.h"
  35.  
  36. #define ISERT_MAX_CONN 8
  37. #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
  38. #define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
  39.  
  40. static DEFINE_MUTEX(device_list_mutex);
  41. static LIST_HEAD(device_list);
  42. static struct workqueue_struct *isert_rx_wq;
  43. static struct workqueue_struct *isert_comp_wq;
  44. static struct workqueue_struct *isert_release_wq;
  45.  
  46. static void
  47. isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
  48. static int
  49. isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
  50. struct isert_rdma_wr *wr);
  51. static void
  52. isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
  53. static int
  54. isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
  55. struct isert_rdma_wr *wr);
  56. static int
  57. isert_rdma_post_recvl(struct isert_conn *isert_conn);
  58. static int
  59. isert_rdma_accept(struct isert_conn *isert_conn);
  60. struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
  61.  
  62. static void
  63. isert_qp_event_callback(struct ib_event *e, void *context)
  64. {
  65. struct isert_conn *isert_conn = (struct isert_conn *)context;
  66.  
  67. pr_err("isert_qp_event_callback event: %d\n", e->event);
  68. switch (e->event) {
  69. case IB_EVENT_COMM_EST:
  70. rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST);
  71. break;
  72. case IB_EVENT_QP_LAST_WQE_REACHED:
  73. pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n");
  74. break;
  75. default:
  76. break;
  77. }
  78. }
  79.  
  80. static int
  81. isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
  82. {
  83. int ret;
  84.  
  85. ret = ib_query_device(ib_dev, devattr);
  86. if (ret) {
  87. pr_err("ib_query_device() failed: %d\n", ret);
  88. return ret;
  89. }
  90. pr_debug("devattr->max_sge: %d\n", devattr->max_sge);
  91. pr_debug("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
  92.  
  93. return 0;
  94. }
  95.  
  96. static int
  97. isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
  98. {
  99. struct isert_device *device = isert_conn->conn_device;
  100. struct ib_qp_init_attr attr;
  101. int ret, index, min_index = 0;
  102.  
  103. mutex_lock(&device_list_mutex);
  104. for (index = 0; index < device->cqs_used; index++)
  105. if (device->cq_active_qps[index] <
  106. device->cq_active_qps[min_index])
  107. min_index = index;
  108. device->cq_active_qps[min_index]++;
  109. pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index);
  110. mutex_unlock(&device_list_mutex);
  111.  
  112. memset(&attr, 0, sizeof(struct ib_qp_init_attr));
  113. attr.event_handler = isert_qp_event_callback;
  114. attr.qp_context = isert_conn;
  115. attr.send_cq = device->dev_tx_cq[min_index];
  116. attr.recv_cq = device->dev_rx_cq[min_index];
  117. attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
  118. attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
  119. /*
  120. * FIXME: Use devattr.max_sge - 2 for max_send_sge as
  121. * work-around for RDMA_READs with ConnectX-2.
  122. *
  123. * Also, still make sure to have at least two SGEs for
  124. * outgoing control PDU responses.
  125. */
  126. attr.cap.max_send_sge = max(2, device->dev_attr.max_sge - 2);
  127. isert_conn->max_sge = attr.cap.max_send_sge;
  128.  
  129. attr.cap.max_recv_sge = 1;
  130. attr.sq_sig_type = IB_SIGNAL_REQ_WR;
  131. attr.qp_type = IB_QPT_RC;
  132.  
  133. pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
  134. cma_id->device);
  135. pr_debug("isert_conn_setup_qp conn_pd->device: %p\n",
  136. isert_conn->conn_pd->device);
  137.  
  138. ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
  139. if (ret) {
  140. pr_err("rdma_create_qp failed for cma_id %d\n", ret);
  141. goto err;
  142. }
  143. isert_conn->conn_qp = cma_id->qp;
  144. pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
  145.  
  146. return 0;
  147. err:
  148. mutex_lock(&device_list_mutex);
  149. device->cq_active_qps[min_index]--;
  150. mutex_unlock(&device_list_mutex);
  151.  
  152. return ret;
  153. }
  154.  
  155. static void
  156. isert_cq_event_callback(struct ib_event *e, void *context)
  157. {
  158. pr_debug("isert_cq_event_callback event: %d\n", e->event);
  159. }
  160.  
  161. static int
  162. isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
  163. {
  164. struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
  165. struct iser_rx_desc *rx_desc;
  166. struct ib_sge *rx_sg;
  167. u64 dma_addr;
  168. int i, j;
  169.  
  170. isert_conn->conn_rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
  171. sizeof(struct iser_rx_desc), GFP_KERNEL);
  172. if (!isert_conn->conn_rx_descs)
  173. goto fail;
  174.  
  175. rx_desc = isert_conn->conn_rx_descs;
  176.  
  177. for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
  178. dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
  179. ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
  180. if (ib_dma_mapping_error(ib_dev, dma_addr))
  181. goto dma_map_fail;
  182.  
  183. rx_desc->dma_addr = dma_addr;
  184.  
  185. rx_sg = &rx_desc->rx_sg;
  186. rx_sg->addr = rx_desc->dma_addr;
  187. rx_sg->length = ISER_RX_PAYLOAD_SIZE;
  188. rx_sg->lkey = isert_conn->conn_mr->lkey;
  189. }
  190.  
  191. isert_conn->conn_rx_desc_head = 0;
  192. return 0;
  193.  
  194. dma_map_fail:
  195. rx_desc = isert_conn->conn_rx_descs;
  196. for (j = 0; j < i; j++, rx_desc++) {
  197. ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
  198. ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
  199. }
  200. kfree(isert_conn->conn_rx_descs);
  201. isert_conn->conn_rx_descs = NULL;
  202. fail:
  203. return -ENOMEM;
  204. }
  205.  
  206. static void
  207. isert_free_rx_descriptors(struct isert_conn *isert_conn)
  208. {
  209. struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
  210. struct iser_rx_desc *rx_desc;
  211. int i;
  212.  
  213. if (!isert_conn->conn_rx_descs)
  214. return;
  215.  
  216. rx_desc = isert_conn->conn_rx_descs;
  217. for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
  218. ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
  219. ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
  220. }
  221.  
  222. kfree(isert_conn->conn_rx_descs);
  223. isert_conn->conn_rx_descs = NULL;
  224. }
  225.  
  226. static void isert_cq_tx_work(struct work_struct *);
  227. static void isert_cq_tx_callback(struct ib_cq *, void *);
  228. static void isert_cq_rx_work(struct work_struct *);
  229. static void isert_cq_rx_callback(struct ib_cq *, void *);
  230.  
  231. static int
  232. isert_create_device_ib_res(struct isert_device *device)
  233. {
  234. struct ib_device *ib_dev = device->ib_device;
  235. struct isert_cq_desc *cq_desc;
  236. struct ib_device_attr *dev_attr;
  237. int ret = 0, i, j;
  238. int max_rx_cqe, max_tx_cqe;
  239.  
  240. dev_attr = &device->dev_attr;
  241. ret = isert_query_device(ib_dev, dev_attr);
  242. if (ret)
  243. return ret;
  244.  
  245. max_rx_cqe = min(ISER_MAX_RX_CQ_LEN, dev_attr->max_cqe);
  246. max_tx_cqe = min(ISER_MAX_TX_CQ_LEN, dev_attr->max_cqe);
  247.  
  248. /* asign function handlers */
  249. if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
  250. device->use_fastreg = 1;
  251. device->reg_rdma_mem = isert_reg_rdma;
  252. device->unreg_rdma_mem = isert_unreg_rdma;
  253. } else {
  254. device->use_fastreg = 0;
  255. device->reg_rdma_mem = isert_map_rdma;
  256. device->unreg_rdma_mem = isert_unmap_cmd;
  257. }
  258.  
  259. device->cqs_used = min_t(int, num_online_cpus(),
  260. device->ib_device->num_comp_vectors);
  261. device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
  262. pr_debug("Using %d CQs, device %s supports %d vectors support "
  263. "Fast registration %d\n",
  264. device->cqs_used, device->ib_device->name,
  265. device->ib_device->num_comp_vectors, device->use_fastreg);
  266. device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
  267. device->cqs_used, GFP_KERNEL);
  268. if (!device->cq_desc) {
  269. pr_err("Unable to allocate device->cq_desc\n");
  270. return -ENOMEM;
  271. }
  272. cq_desc = device->cq_desc;
  273.  
  274. for (i = 0; i < device->cqs_used; i++) {
  275. cq_desc[i].device = device;
  276. cq_desc[i].cq_index = i;
  277.  
  278. INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work);
  279. device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
  280. isert_cq_rx_callback,
  281. isert_cq_event_callback,
  282. (void *)&cq_desc[i],
  283. max_rx_cqe, i);
  284. if (IS_ERR(device->dev_rx_cq[i])) {
  285. ret = PTR_ERR(device->dev_rx_cq[i]);
  286. device->dev_rx_cq[i] = NULL;
  287. goto out_cq;
  288. }
  289.  
  290. INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work);
  291. device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
  292. isert_cq_tx_callback,
  293. isert_cq_event_callback,
  294. (void *)&cq_desc[i],
  295. max_tx_cqe, i);
  296. if (IS_ERR(device->dev_tx_cq[i])) {
  297. ret = PTR_ERR(device->dev_tx_cq[i]);
  298. device->dev_tx_cq[i] = NULL;
  299. goto out_cq;
  300. }
  301.  
  302. ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP);
  303. if (ret)
  304. goto out_cq;
  305.  
  306. ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP);
  307. if (ret)
  308. goto out_cq;
  309. }
  310.  
  311. return 0;
  312.  
  313. out_cq:
  314. for (j = 0; j < i; j++) {
  315. cq_desc = &device->cq_desc[j];
  316.  
  317. if (device->dev_rx_cq[j]) {
  318. cancel_work_sync(&cq_desc->cq_rx_work);
  319. ib_destroy_cq(device->dev_rx_cq[j]);
  320. }
  321. if (device->dev_tx_cq[j]) {
  322. cancel_work_sync(&cq_desc->cq_tx_work);
  323. ib_destroy_cq(device->dev_tx_cq[j]);
  324. }
  325. }
  326. kfree(device->cq_desc);
  327.  
  328. return ret;
  329. }
  330.  
  331. static void
  332. isert_free_device_ib_res(struct isert_device *device)
  333. {
  334. struct isert_cq_desc *cq_desc;
  335. int i;
  336.  
  337. for (i = 0; i < device->cqs_used; i++) {
  338. cq_desc = &device->cq_desc[i];
  339.  
  340. cancel_work_sync(&cq_desc->cq_rx_work);
  341. cancel_work_sync(&cq_desc->cq_tx_work);
  342. ib_destroy_cq(device->dev_rx_cq[i]);
  343. ib_destroy_cq(device->dev_tx_cq[i]);
  344. device->dev_rx_cq[i] = NULL;
  345. device->dev_tx_cq[i] = NULL;
  346. }
  347.  
  348. kfree(device->cq_desc);
  349. }
  350.  
  351. static void
  352. isert_device_try_release(struct isert_device *device)
  353. {
  354. mutex_lock(&device_list_mutex);
  355. device->refcount--;
  356. if (!device->refcount) {
  357. isert_free_device_ib_res(device);
  358. list_del(&device->dev_node);
  359. kfree(device);
  360. }
  361. mutex_unlock(&device_list_mutex);
  362. }
  363.  
  364. static struct isert_device *
  365. isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
  366. {
  367. struct isert_device *device;
  368. int ret;
  369.  
  370. mutex_lock(&device_list_mutex);
  371. list_for_each_entry(device, &device_list, dev_node) {
  372. if (device->ib_device->node_guid == cma_id->device->node_guid) {
  373. device->refcount++;
  374. mutex_unlock(&device_list_mutex);
  375. return device;
  376. }
  377. }
  378.  
  379. device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
  380. if (!device) {
  381. mutex_unlock(&device_list_mutex);
  382. return ERR_PTR(-ENOMEM);
  383. }
  384.  
  385. INIT_LIST_HEAD(&device->dev_node);
  386.  
  387. device->ib_device = cma_id->device;
  388. ret = isert_create_device_ib_res(device);
  389. if (ret) {
  390. kfree(device);
  391. mutex_unlock(&device_list_mutex);
  392. return ERR_PTR(ret);
  393. }
  394.  
  395. device->refcount++;
  396. list_add_tail(&device->dev_node, &device_list);
  397. mutex_unlock(&device_list_mutex);
  398.  
  399. return device;
  400. }
  401.  
  402. static void
  403. isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
  404. {
  405. struct fast_reg_descriptor *fr_desc, *tmp;
  406. int i = 0;
  407.  
  408. if (list_empty(&isert_conn->conn_fr_pool))
  409. return;
  410.  
  411. pr_debug("Freeing conn %p fastreg pool", isert_conn);
  412.  
  413. list_for_each_entry_safe(fr_desc, tmp,
  414. &isert_conn->conn_fr_pool, list) {
  415. list_del(&fr_desc->list);
  416. ib_free_fast_reg_page_list(fr_desc->data_frpl);
  417. ib_dereg_mr(fr_desc->data_mr);
  418. kfree(fr_desc);
  419. ++i;
  420. }
  421.  
  422. if (i < isert_conn->conn_fr_pool_size)
  423. pr_warn("Pool still has %d regions registered\n",
  424. isert_conn->conn_fr_pool_size - i);
  425. }
  426.  
  427. static int
  428. isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
  429. struct fast_reg_descriptor *fr_desc)
  430. {
  431. fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
  432. ISCSI_ISER_SG_TABLESIZE);
  433. if (IS_ERR(fr_desc->data_frpl)) {
  434. pr_err("Failed to allocate data frpl err=%ld\n",
  435. PTR_ERR(fr_desc->data_frpl));
  436. return PTR_ERR(fr_desc->data_frpl);
  437. }
  438.  
  439. fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
  440. if (IS_ERR(fr_desc->data_mr)) {
  441. pr_err("Failed to allocate data frmr err=%ld\n",
  442. PTR_ERR(fr_desc->data_mr));
  443. ib_free_fast_reg_page_list(fr_desc->data_frpl);
  444. return PTR_ERR(fr_desc->data_mr);
  445. }
  446. pr_debug("Create fr_desc %p page_list %p\n",
  447. fr_desc, fr_desc->data_frpl->page_list);
  448.  
  449. fr_desc->valid = true;
  450.  
  451. return 0;
  452. }
  453.  
  454. static int
  455. isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
  456. {
  457. struct fast_reg_descriptor *fr_desc;
  458. struct isert_device *device = isert_conn->conn_device;
  459. struct se_session *se_sess = isert_conn->conn->sess->se_sess;
  460. struct se_node_acl *se_nacl = se_sess->se_node_acl;
  461. int i, ret, tag_num;
  462. /*
  463. * Setup the number of FRMRs based upon the number of tags
  464. * available to session in iscsi_target_locate_portal().
  465. */
  466. tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth);
  467. tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
  468.  
  469. isert_conn->conn_fr_pool_size = 0;
  470. for (i = 0; i < tag_num; i++) {
  471. fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
  472. if (!fr_desc) {
  473. pr_err("Failed to allocate fast_reg descriptor\n");
  474. ret = -ENOMEM;
  475. goto err;
  476. }
  477.  
  478. ret = isert_create_fr_desc(device->ib_device,
  479. isert_conn->conn_pd, fr_desc);
  480. if (ret) {
  481. pr_err("Failed to create fastreg descriptor err=%d\n",
  482. ret);
  483. goto err;
  484. }
  485.  
  486. list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
  487. isert_conn->conn_fr_pool_size++;
  488. }
  489.  
  490. pr_debug("Creating conn %p fastreg pool size=%d",
  491. isert_conn, isert_conn->conn_fr_pool_size);
  492.  
  493. return 0;
  494.  
  495. err:
  496. isert_conn_free_fastreg_pool(isert_conn);
  497. return ret;
  498. }
  499.  
  500. static int
  501. isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
  502. {
  503. struct isert_np *isert_np = cma_id->context;
  504. struct iscsi_np *np = isert_np->np;
  505. struct isert_conn *isert_conn;
  506. struct isert_device *device;
  507. struct ib_device *ib_dev = cma_id->device;
  508. int ret = 0;
  509.  
  510. spin_lock_bh(&np->np_thread_lock);
  511. if (!np->enabled) {
  512. spin_unlock_bh(&np->np_thread_lock);
  513. pr_debug("iscsi_np is not enabled, reject connect request\n");
  514. return rdma_reject(cma_id, NULL, 0);
  515. }
  516. spin_unlock_bh(&np->np_thread_lock);
  517.  
  518. pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
  519. cma_id, cma_id->context);
  520.  
  521. isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
  522. if (!isert_conn) {
  523. pr_err("Unable to allocate isert_conn\n");
  524. return -ENOMEM;
  525. }
  526. isert_conn->state = ISER_CONN_INIT;
  527. INIT_LIST_HEAD(&isert_conn->conn_accept_node);
  528. init_completion(&isert_conn->conn_login_comp);
  529. init_completion(&isert_conn->login_req_comp);
  530. init_completion(&isert_conn->conn_wait);
  531. init_completion(&isert_conn->conn_wait_comp_err);
  532. kref_init(&isert_conn->conn_kref);
  533. mutex_init(&isert_conn->conn_mutex);
  534. mutex_init(&isert_conn->conn_comp_mutex);
  535. spin_lock_init(&isert_conn->conn_lock);
  536. INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
  537.  
  538. isert_conn->conn_cm_id = cma_id;
  539. isert_conn->responder_resources = event->param.conn.responder_resources;
  540. isert_conn->initiator_depth = event->param.conn.initiator_depth;
  541. pr_debug("Using responder_resources: %u initiator_depth: %u\n",
  542. isert_conn->responder_resources, isert_conn->initiator_depth);
  543.  
  544. isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
  545. ISER_RX_LOGIN_SIZE, GFP_KERNEL);
  546. if (!isert_conn->login_buf) {
  547. pr_err("Unable to allocate isert_conn->login_buf\n");
  548. ret = -ENOMEM;
  549. goto out;
  550. }
  551.  
  552. isert_conn->login_req_buf = isert_conn->login_buf;
  553. isert_conn->login_rsp_buf = isert_conn->login_buf +
  554. ISCSI_DEF_MAX_RECV_SEG_LEN;
  555. pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
  556. isert_conn->login_buf, isert_conn->login_req_buf,
  557. isert_conn->login_rsp_buf);
  558.  
  559. isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
  560. (void *)isert_conn->login_req_buf,
  561. ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
  562.  
  563. ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
  564. if (ret) {
  565. pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
  566. ret);
  567. isert_conn->login_req_dma = 0;
  568. goto out_login_buf;
  569. }
  570.  
  571. isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
  572. (void *)isert_conn->login_rsp_buf,
  573. ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
  574.  
  575. ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
  576. if (ret) {
  577. pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
  578. ret);
  579. isert_conn->login_rsp_dma = 0;
  580. goto out_req_dma_map;
  581. }
  582.  
  583. device = isert_device_find_by_ib_dev(cma_id);
  584. if (IS_ERR(device)) {
  585. ret = PTR_ERR(device);
  586. goto out_rsp_dma_map;
  587. }
  588.  
  589. isert_conn->conn_device = device;
  590. isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device);
  591. if (IS_ERR(isert_conn->conn_pd)) {
  592. ret = PTR_ERR(isert_conn->conn_pd);
  593. pr_err("ib_alloc_pd failed for conn %p: ret=%d\n",
  594. isert_conn, ret);
  595. goto out_pd;
  596. }
  597.  
  598. isert_conn->conn_mr = ib_get_dma_mr(isert_conn->conn_pd,
  599. IB_ACCESS_LOCAL_WRITE);
  600. if (IS_ERR(isert_conn->conn_mr)) {
  601. ret = PTR_ERR(isert_conn->conn_mr);
  602. pr_err("ib_get_dma_mr failed for conn %p: ret=%d\n",
  603. isert_conn, ret);
  604. goto out_mr;
  605. }
  606.  
  607. ret = isert_conn_setup_qp(isert_conn, cma_id);
  608. if (ret)
  609. goto out_conn_dev;
  610.  
  611. ret = isert_rdma_post_recvl(isert_conn);
  612. if (ret)
  613. goto out_conn_dev;
  614.  
  615. ret = isert_rdma_accept(isert_conn);
  616. if (ret)
  617. goto out_conn_dev;
  618.  
  619. mutex_lock(&isert_np->np_accept_mutex);
  620. list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
  621. mutex_unlock(&isert_np->np_accept_mutex);
  622.  
  623. pr_debug("isert_connect_request() up np_sem np: %p\n", np);
  624. up(&isert_np->np_sem);
  625. return 0;
  626.  
  627. out_conn_dev:
  628. ib_dereg_mr(isert_conn->conn_mr);
  629. out_mr:
  630. ib_dealloc_pd(isert_conn->conn_pd);
  631. out_pd:
  632. isert_device_try_release(device);
  633. out_rsp_dma_map:
  634. ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
  635. ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
  636. out_req_dma_map:
  637. ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
  638. ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
  639. out_login_buf:
  640. kfree(isert_conn->login_buf);
  641. out:
  642. kfree(isert_conn);
  643. rdma_reject(cma_id, NULL, 0);
  644. return ret;
  645. }
  646.  
  647. static void
  648. isert_connect_release(struct isert_conn *isert_conn)
  649. {
  650. struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
  651. struct isert_device *device = isert_conn->conn_device;
  652. int cq_index;
  653.  
  654. pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
  655.  
  656. if (device && device->use_fastreg)
  657. isert_conn_free_fastreg_pool(isert_conn);
  658.  
  659. isert_free_rx_descriptors(isert_conn);
  660. rdma_destroy_id(isert_conn->conn_cm_id);
  661.  
  662. if (isert_conn->conn_qp) {
  663. cq_index = ((struct isert_cq_desc *)
  664. isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
  665. pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
  666. mutex_lock(&device_list_mutex);
  667. isert_conn->conn_device->cq_active_qps[cq_index]--;
  668. mutex_unlock(&device_list_mutex);
  669.  
  670. ib_destroy_qp(isert_conn->conn_qp);
  671. }
  672.  
  673. ib_dereg_mr(isert_conn->conn_mr);
  674. ib_dealloc_pd(isert_conn->conn_pd);
  675.  
  676. if (isert_conn->login_buf) {
  677. ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
  678. ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
  679. ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
  680. ISCSI_DEF_MAX_RECV_SEG_LEN,
  681. DMA_FROM_DEVICE);
  682. kfree(isert_conn->login_buf);
  683. }
  684. kfree(isert_conn);
  685.  
  686. if (device)
  687. isert_device_try_release(device);
  688.  
  689. pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n");
  690. }
  691.  
  692. static void
  693. isert_connected_handler(struct rdma_cm_id *cma_id)
  694. {
  695. struct isert_conn *isert_conn = cma_id->qp->qp_context;
  696.  
  697. pr_info("conn %p\n", isert_conn);
  698.  
  699. if (!kref_get_unless_zero(&isert_conn->conn_kref)) {
  700. pr_warn("conn %p connect_release is running\n", isert_conn);
  701. return;
  702. }
  703.  
  704. mutex_lock(&isert_conn->conn_mutex);
  705. if (isert_conn->state != ISER_CONN_FULL_FEATURE)
  706. isert_conn->state = ISER_CONN_UP;
  707. mutex_unlock(&isert_conn->conn_mutex);
  708. }
  709.  
  710. static void
  711. isert_release_conn_kref(struct kref *kref)
  712. {
  713. struct isert_conn *isert_conn = container_of(kref,
  714. struct isert_conn, conn_kref);
  715.  
  716. pr_debug("Calling isert_connect_release for final kref %s/%d\n",
  717. current->comm, current->pid);
  718.  
  719. isert_connect_release(isert_conn);
  720. }
  721.  
  722. static void
  723. isert_put_conn(struct isert_conn *isert_conn)
  724. {
  725. kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
  726. }
  727.  
  728. /**
  729. * isert_conn_terminate() - Initiate connection termination
  730. * @isert_conn: isert connection struct
  731. *
  732. * Notes:
  733. * In case the connection state is FULL_FEATURE, move state
  734. * to TEMINATING and start teardown sequence (rdma_disconnect).
  735. * In case the connection state is UP, complete flush as well.
  736. *
  737. * This routine must be called with conn_mutex held. Thus it is
  738. * safe to call multiple times.
  739. */
  740. static void
  741. isert_conn_terminate(struct isert_conn *isert_conn)
  742. {
  743. int err;
  744.  
  745. switch (isert_conn->state) {
  746. case ISER_CONN_TERMINATING:
  747. break;
  748. case ISER_CONN_UP:
  749. /*
  750. * No flush completions will occur as we didn't
  751. * get to ISER_CONN_FULL_FEATURE yet, complete
  752. * to allow teardown progress.
  753. */
  754. complete(&isert_conn->conn_wait_comp_err);
  755. case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
  756. pr_info("Terminating conn %p state %d\n",
  757. isert_conn, isert_conn->state);
  758. isert_conn->state = ISER_CONN_TERMINATING;
  759. err = rdma_disconnect(isert_conn->conn_cm_id);
  760. if (err)
  761. pr_warn("Failed rdma_disconnect isert_conn %p\n",
  762. isert_conn);
  763. break;
  764. default:
  765. pr_warn("conn %p teminating in state %d\n",
  766. isert_conn, isert_conn->state);
  767. }
  768. }
  769.  
  770. static int
  771. isert_np_cma_handler(struct isert_np *isert_np,
  772. enum rdma_cm_event_type event)
  773. {
  774. pr_debug("isert np %p, handling event %d\n", isert_np, event);
  775.  
  776. switch (event) {
  777. case RDMA_CM_EVENT_DEVICE_REMOVAL:
  778. isert_np->np_cm_id = NULL;
  779. break;
  780. case RDMA_CM_EVENT_ADDR_CHANGE:
  781. isert_np->np_cm_id = isert_setup_id(isert_np);
  782. if (IS_ERR(isert_np->np_cm_id)) {
  783. pr_err("isert np %p setup id failed: %ld\n",
  784. isert_np, PTR_ERR(isert_np->np_cm_id));
  785. isert_np->np_cm_id = NULL;
  786. }
  787. break;
  788. default:
  789. pr_err("isert np %p Unexpected event %d\n",
  790. isert_np, event);
  791. }
  792.  
  793. return -1;
  794. }
  795.  
  796. static int
  797. isert_disconnected_handler(struct rdma_cm_id *cma_id,
  798. enum rdma_cm_event_type event)
  799. {
  800. struct isert_np *isert_np = cma_id->context;
  801. struct isert_conn *isert_conn;
  802.  
  803. if (isert_np->np_cm_id == cma_id)
  804. return isert_np_cma_handler(cma_id->context, event);
  805.  
  806. isert_conn = cma_id->qp->qp_context;
  807.  
  808. mutex_lock(&isert_conn->conn_mutex);
  809. isert_conn_terminate(isert_conn);
  810. mutex_unlock(&isert_conn->conn_mutex);
  811.  
  812. pr_info("conn %p completing conn_wait\n", isert_conn);
  813. complete(&isert_conn->conn_wait);
  814.  
  815. return 0;
  816. }
  817.  
  818. static void
  819. isert_connect_error(struct rdma_cm_id *cma_id)
  820. {
  821. struct isert_conn *isert_conn = cma_id->qp->qp_context;
  822.  
  823. isert_put_conn(isert_conn);
  824. }
  825.  
  826. static int
  827. isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
  828. {
  829. int ret = 0;
  830.  
  831. pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
  832. event->event, event->status, cma_id->context, cma_id);
  833.  
  834. switch (event->event) {
  835. case RDMA_CM_EVENT_CONNECT_REQUEST:
  836. ret = isert_connect_request(cma_id, event);
  837. if (ret)
  838. pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
  839. event->event, ret);
  840. break;
  841. case RDMA_CM_EVENT_ESTABLISHED:
  842. isert_connected_handler(cma_id);
  843. break;
  844. case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
  845. case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
  846. case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
  847. case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
  848. ret = isert_disconnected_handler(cma_id, event->event);
  849. break;
  850. case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
  851. case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
  852. case RDMA_CM_EVENT_CONNECT_ERROR:
  853. isert_connect_error(cma_id);
  854. break;
  855. default:
  856. pr_err("Unhandled RDMA CMA event: %d\n", event->event);
  857. break;
  858. }
  859.  
  860. return ret;
  861. }
  862.  
  863. static int
  864. isert_post_recv(struct isert_conn *isert_conn, u32 count)
  865. {
  866. struct ib_recv_wr *rx_wr, *rx_wr_failed;
  867. int i, ret;
  868. unsigned int rx_head = isert_conn->conn_rx_desc_head;
  869. struct iser_rx_desc *rx_desc;
  870.  
  871. for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) {
  872. rx_desc = &isert_conn->conn_rx_descs[rx_head];
  873. rx_wr->wr_id = (unsigned long)rx_desc;
  874. rx_wr->sg_list = &rx_desc->rx_sg;
  875. rx_wr->num_sge = 1;
  876. rx_wr->next = rx_wr + 1;
  877. rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1);
  878. }
  879.  
  880. rx_wr--;
  881. rx_wr->next = NULL; /* mark end of work requests list */
  882.  
  883. isert_conn->post_recv_buf_count += count;
  884. ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr,
  885. &rx_wr_failed);
  886. if (ret) {
  887. pr_err("ib_post_recv() failed with ret: %d\n", ret);
  888. isert_conn->post_recv_buf_count -= count;
  889. } else {
  890. pr_debug("isert_post_recv(): Posted %d RX buffers\n", count);
  891. isert_conn->conn_rx_desc_head = rx_head;
  892. }
  893. return ret;
  894. }
  895.  
  896. static int
  897. isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
  898. {
  899. struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
  900. struct ib_send_wr send_wr, *send_wr_failed;
  901. int ret;
  902.  
  903. ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
  904. ISER_HEADERS_LEN, DMA_TO_DEVICE);
  905.  
  906. send_wr.next = NULL;
  907. send_wr.wr_id = (unsigned long)tx_desc;
  908. send_wr.sg_list = tx_desc->tx_sg;
  909. send_wr.num_sge = tx_desc->num_sge;
  910. send_wr.opcode = IB_WR_SEND;
  911. send_wr.send_flags = IB_SEND_SIGNALED;
  912.  
  913. atomic_inc(&isert_conn->post_send_buf_count);
  914.  
  915. ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed);
  916. if (ret) {
  917. pr_err("ib_post_send() failed, ret: %d\n", ret);
  918. atomic_dec(&isert_conn->post_send_buf_count);
  919. }
  920.  
  921. return ret;
  922. }
  923.  
  924. static void
  925. isert_create_send_desc(struct isert_conn *isert_conn,
  926. struct isert_cmd *isert_cmd,
  927. struct iser_tx_desc *tx_desc)
  928. {
  929. struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
  930.  
  931. ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
  932. ISER_HEADERS_LEN, DMA_TO_DEVICE);
  933.  
  934. memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
  935. tx_desc->iser_header.flags = ISER_VER;
  936.  
  937. tx_desc->num_sge = 1;
  938. tx_desc->isert_cmd = isert_cmd;
  939.  
  940. if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) {
  941. tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
  942. pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc);
  943. }
  944. }
  945.  
  946. static int
  947. isert_init_tx_hdrs(struct isert_conn *isert_conn,
  948. struct iser_tx_desc *tx_desc)
  949. {
  950. struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
  951. u64 dma_addr;
  952.  
  953. dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
  954. ISER_HEADERS_LEN, DMA_TO_DEVICE);
  955. if (ib_dma_mapping_error(ib_dev, dma_addr)) {
  956. pr_err("ib_dma_mapping_error() failed\n");
  957. return -ENOMEM;
  958. }
  959.  
  960. tx_desc->dma_addr = dma_addr;
  961. tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
  962. tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
  963. tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
  964.  
  965. pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u"
  966. " lkey: 0x%08x\n", tx_desc->tx_sg[0].addr,
  967. tx_desc->tx_sg[0].length, tx_desc->tx_sg[0].lkey);
  968.  
  969. return 0;
  970. }
  971.  
  972. static void
  973. isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
  974. struct ib_send_wr *send_wr, bool coalesce)
  975. {
  976. struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
  977.  
  978. isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
  979. send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
  980. send_wr->opcode = IB_WR_SEND;
  981. send_wr->sg_list = &tx_desc->tx_sg[0];
  982. send_wr->num_sge = isert_cmd->tx_desc.num_sge;
  983. /*
  984. * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED
  985. * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
  986. */
  987. mutex_lock(&isert_conn->conn_comp_mutex);
  988. if (coalesce &&
  989. ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) {
  990. llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist);
  991. mutex_unlock(&isert_conn->conn_comp_mutex);
  992. return;
  993. }
  994. isert_conn->conn_comp_batch = 0;
  995. tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist);
  996. mutex_unlock(&isert_conn->conn_comp_mutex);
  997.  
  998. send_wr->send_flags = IB_SEND_SIGNALED;
  999. }
  1000.  
  1001. static int
  1002. isert_rdma_post_recvl(struct isert_conn *isert_conn)
  1003. {
  1004. struct ib_recv_wr rx_wr, *rx_wr_fail;
  1005. struct ib_sge sge;
  1006. int ret;
  1007.  
  1008. memset(&sge, 0, sizeof(struct ib_sge));
  1009. sge.addr = isert_conn->login_req_dma;
  1010. sge.length = ISER_RX_LOGIN_SIZE;
  1011. sge.lkey = isert_conn->conn_mr->lkey;
  1012.  
  1013. pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n",
  1014. sge.addr, sge.length, sge.lkey);
  1015.  
  1016. memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
  1017. rx_wr.wr_id = (unsigned long)isert_conn->login_req_buf;
  1018. rx_wr.sg_list = &sge;
  1019. rx_wr.num_sge = 1;
  1020.  
  1021. isert_conn->post_recv_buf_count++;
  1022. ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail);
  1023. if (ret) {
  1024. pr_err("ib_post_recv() failed: %d\n", ret);
  1025. isert_conn->post_recv_buf_count--;
  1026. }
  1027.  
  1028. pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n");
  1029. return ret;
  1030. }
  1031.  
  1032. static int
  1033. isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
  1034. u32 length)
  1035. {
  1036. struct isert_conn *isert_conn = conn->context;
  1037. struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
  1038. struct iser_tx_desc *tx_desc = &isert_conn->conn_login_tx_desc;
  1039. int ret;
  1040.  
  1041. isert_create_send_desc(isert_conn, NULL, tx_desc);
  1042.  
  1043. memcpy(&tx_desc->iscsi_header, &login->rsp[0],
  1044. sizeof(struct iscsi_hdr));
  1045.  
  1046. isert_init_tx_hdrs(isert_conn, tx_desc);
  1047.  
  1048. if (length > 0) {
  1049. struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
  1050.  
  1051. ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
  1052. length, DMA_TO_DEVICE);
  1053.  
  1054. memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
  1055.  
  1056. ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
  1057. length, DMA_TO_DEVICE);
  1058.  
  1059. tx_dsg->addr = isert_conn->login_rsp_dma;
  1060. tx_dsg->length = length;
  1061. tx_dsg->lkey = isert_conn->conn_mr->lkey;
  1062. tx_desc->num_sge = 2;
  1063. }
  1064. if (!login->login_failed) {
  1065. if (login->login_complete) {
  1066. if (isert_conn->conn_device->use_fastreg) {
  1067. ret = isert_conn_create_fastreg_pool(isert_conn);
  1068. if (ret) {
  1069. pr_err("Conn: %p failed to create fastreg pool\n",
  1070. isert_conn);
  1071. return ret;
  1072. }
  1073. }
  1074.  
  1075. ret = isert_alloc_rx_descriptors(isert_conn);
  1076. if (ret)
  1077. return ret;
  1078.  
  1079. ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX);
  1080. if (ret)
  1081. return ret;
  1082.  
  1083. /* Now we are in FULL_FEATURE phase */
  1084. mutex_lock(&isert_conn->conn_mutex);
  1085. isert_conn->state = ISER_CONN_FULL_FEATURE;
  1086. mutex_unlock(&isert_conn->conn_mutex);
  1087. goto post_send;
  1088. }
  1089.  
  1090. ret = isert_rdma_post_recvl(isert_conn);
  1091. if (ret)
  1092. return ret;
  1093. }
  1094. post_send:
  1095. ret = isert_post_send(isert_conn, tx_desc);
  1096. if (ret)
  1097. return ret;
  1098.  
  1099. return 0;
  1100. }
  1101.  
  1102. static void
  1103. isert_rx_login_req(struct isert_conn *isert_conn)
  1104. {
  1105. struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf;
  1106. int rx_buflen = isert_conn->login_req_len;
  1107. struct iscsi_conn *conn = isert_conn->conn;
  1108. struct iscsi_login *login = conn->conn_login;
  1109. int size;
  1110.  
  1111. pr_info("conn %p\n", isert_conn);
  1112.  
  1113. WARN_ON_ONCE(!login);
  1114.  
  1115. if (login->first_request) {
  1116. struct iscsi_login_req *login_req =
  1117. (struct iscsi_login_req *)&rx_desc->iscsi_header;
  1118. /*
  1119. * Setup the initial iscsi_login values from the leading
  1120. * login request PDU.
  1121. */
  1122. login->leading_connection = (!login_req->tsih) ? 1 : 0;
  1123. login->current_stage =
  1124. (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
  1125. >> 2;
  1126. login->version_min = login_req->min_version;
  1127. login->version_max = login_req->max_version;
  1128. memcpy(login->isid, login_req->isid, 6);
  1129. login->cmd_sn = be32_to_cpu(login_req->cmdsn);
  1130. login->init_task_tag = login_req->itt;
  1131. login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
  1132. login->cid = be16_to_cpu(login_req->cid);
  1133. login->tsih = be16_to_cpu(login_req->tsih);
  1134. }
  1135.  
  1136. memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
  1137.  
  1138. size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
  1139. pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n",
  1140. size, rx_buflen, MAX_KEY_VALUE_PAIRS);
  1141. memcpy(login->req_buf, &rx_desc->data[0], size);
  1142.  
  1143. if (login->first_request) {
  1144. complete(&isert_conn->conn_login_comp);
  1145. return;
  1146. }
  1147. schedule_delayed_work(&conn->login_work, 0);
  1148. }
  1149.  
  1150. static struct iscsi_cmd
  1151. *isert_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp)
  1152. {
  1153. struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
  1154. struct isert_cmd *isert_cmd;
  1155. struct iscsi_cmd *cmd;
  1156.  
  1157. cmd = iscsit_allocate_cmd(conn, gfp);
  1158. if (!cmd) {
  1159. pr_err("Unable to allocate iscsi_cmd + isert_cmd\n");
  1160. return NULL;
  1161. }
  1162. isert_cmd = iscsit_priv_cmd(cmd);
  1163. isert_cmd->conn = isert_conn;
  1164. isert_cmd->iscsi_cmd = cmd;
  1165.  
  1166. return cmd;
  1167. }
  1168.  
  1169. static int
  1170. isert_handle_scsi_cmd(struct isert_conn *isert_conn,
  1171. struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd,
  1172. struct iser_rx_desc *rx_desc, unsigned char *buf)
  1173. {
  1174. struct iscsi_conn *conn = isert_conn->conn;
  1175. struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
  1176. struct scatterlist *sg;
  1177. int imm_data, imm_data_len, unsol_data, sg_nents, rc;
  1178. bool dump_payload = false;
  1179.  
  1180. rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
  1181. if (rc < 0)
  1182. return rc;
  1183.  
  1184. imm_data = cmd->immediate_data;
  1185. imm_data_len = cmd->first_burst_len;
  1186. unsol_data = cmd->unsolicited_data;
  1187.  
  1188. rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
  1189. if (rc < 0) {
  1190. return 0;
  1191. } else if (rc > 0) {
  1192. dump_payload = true;
  1193. goto sequence_cmd;
  1194. }
  1195.  
  1196. if (!imm_data)
  1197. return 0;
  1198.  
  1199. sg = &cmd->se_cmd.t_data_sg[0];
  1200. sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
  1201.  
  1202. pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
  1203. sg, sg_nents, &rx_desc->data[0], imm_data_len);
  1204.  
  1205. sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
  1206.  
  1207. cmd->write_data_done += imm_data_len;
  1208.  
  1209. if (cmd->write_data_done == cmd->se_cmd.data_length) {
  1210. spin_lock_bh(&cmd->istate_lock);
  1211. cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
  1212. cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
  1213. spin_unlock_bh(&cmd->istate_lock);
  1214. }
  1215.  
  1216. sequence_cmd:
  1217. rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
  1218.  
  1219. if (!rc && dump_payload == false && unsol_data)
  1220. iscsit_set_unsoliticed_dataout(cmd);
  1221. else if (dump_payload && imm_data)
  1222. target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
  1223.  
  1224. return 0;
  1225. }
  1226.  
  1227. static int
  1228. isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
  1229. struct iser_rx_desc *rx_desc, unsigned char *buf)
  1230. {
  1231. struct scatterlist *sg_start;
  1232. struct iscsi_conn *conn = isert_conn->conn;
  1233. struct iscsi_cmd *cmd = NULL;
  1234. struct iscsi_data *hdr = (struct iscsi_data *)buf;
  1235. u32 unsol_data_len = ntoh24(hdr->dlength);
  1236. int rc, sg_nents, sg_off, page_off;
  1237.  
  1238. rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
  1239. if (rc < 0)
  1240. return rc;
  1241. else if (!cmd)
  1242. return 0;
  1243. /*
  1244. * FIXME: Unexpected unsolicited_data out
  1245. */
  1246. if (!cmd->unsolicited_data) {
  1247. pr_err("Received unexpected solicited data payload\n");
  1248. dump_stack();
  1249. return -1;
  1250. }
  1251.  
  1252. pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n",
  1253. unsol_data_len, cmd->write_data_done, cmd->se_cmd.data_length);
  1254.  
  1255. sg_off = cmd->write_data_done / PAGE_SIZE;
  1256. sg_start = &cmd->se_cmd.t_data_sg[sg_off];
  1257. sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
  1258. page_off = cmd->write_data_done % PAGE_SIZE;
  1259. /*
  1260. * FIXME: Non page-aligned unsolicited_data out
  1261. */
  1262. if (page_off) {
  1263. pr_err("Received unexpected non-page aligned data payload\n");
  1264. dump_stack();
  1265. return -1;
  1266. }
  1267. pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n",
  1268. sg_start, sg_off, sg_nents, &rx_desc->data[0], unsol_data_len);
  1269.  
  1270. sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
  1271. unsol_data_len);
  1272.  
  1273. rc = iscsit_check_dataout_payload(cmd, hdr, false);
  1274. if (rc < 0)
  1275. return rc;
  1276.  
  1277. return 0;
  1278. }
  1279.  
  1280. static int
  1281. isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
  1282. struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
  1283. unsigned char *buf)
  1284. {
  1285. struct iscsi_conn *conn = isert_conn->conn;
  1286. struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
  1287. int rc;
  1288.  
  1289. rc = iscsit_setup_nop_out(conn, cmd, hdr);
  1290. if (rc < 0)
  1291. return rc;
  1292. /*
  1293. * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
  1294. */
  1295.  
  1296. return iscsit_process_nop_out(conn, cmd, hdr);
  1297. }
  1298.  
  1299. static int
  1300. isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
  1301. struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
  1302. struct iscsi_text *hdr)
  1303. {
  1304. struct iscsi_conn *conn = isert_conn->conn;
  1305. u32 payload_length = ntoh24(hdr->dlength);
  1306. int rc;
  1307. unsigned char *text_in;
  1308.  
  1309. rc = iscsit_setup_text_cmd(conn, cmd, hdr);
  1310. if (rc < 0)
  1311. return rc;
  1312.  
  1313. text_in = kzalloc(payload_length, GFP_KERNEL);
  1314. if (!text_in) {
  1315. pr_err("Unable to allocate text_in of payload_length: %u\n",
  1316. payload_length);
  1317. return -ENOMEM;
  1318. }
  1319. cmd->text_in_ptr = text_in;
  1320.  
  1321. memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
  1322.  
  1323. return iscsit_process_text_cmd(conn, cmd, hdr);
  1324. }
  1325.  
  1326. static int
  1327. isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
  1328. uint32_t read_stag, uint64_t read_va,
  1329. uint32_t write_stag, uint64_t write_va)
  1330. {
  1331. struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
  1332. struct iscsi_conn *conn = isert_conn->conn;
  1333. struct iscsi_session *sess = conn->sess;
  1334. struct iscsi_cmd *cmd;
  1335. struct isert_cmd *isert_cmd;
  1336. int ret = -EINVAL;
  1337. u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
  1338.  
  1339. if (sess->sess_ops->SessionType &&
  1340. (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
  1341. pr_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
  1342. " ignoring\n", opcode);
  1343. return 0;
  1344. }
  1345.  
  1346. switch (opcode) {
  1347. case ISCSI_OP_SCSI_CMD:
  1348. cmd = isert_allocate_cmd(conn, GFP_KERNEL);
  1349. if (!cmd)
  1350. break;
  1351.  
  1352. isert_cmd = iscsit_priv_cmd(cmd);
  1353. isert_cmd->read_stag = read_stag;
  1354. isert_cmd->read_va = read_va;
  1355. isert_cmd->write_stag = write_stag;
  1356. isert_cmd->write_va = write_va;
  1357.  
  1358. ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd,
  1359. rx_desc, (unsigned char *)hdr);
  1360. break;
  1361. case ISCSI_OP_NOOP_OUT:
  1362. cmd = isert_allocate_cmd(conn, GFP_KERNEL);
  1363. if (!cmd)
  1364. break;
  1365.  
  1366. isert_cmd = iscsit_priv_cmd(cmd);
  1367. ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd,
  1368. rx_desc, (unsigned char *)hdr);
  1369. break;
  1370. case ISCSI_OP_SCSI_DATA_OUT:
  1371. ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
  1372. (unsigned char *)hdr);
  1373. break;
  1374. case ISCSI_OP_SCSI_TMFUNC:
  1375. cmd = isert_allocate_cmd(conn, GFP_KERNEL);
  1376. if (!cmd)
  1377. break;
  1378.  
  1379. ret = iscsit_handle_task_mgt_cmd(conn, cmd,
  1380. (unsigned char *)hdr);
  1381. break;
  1382. case ISCSI_OP_LOGOUT:
  1383. cmd = isert_allocate_cmd(conn, GFP_KERNEL);
  1384. if (!cmd)
  1385. break;
  1386.  
  1387. ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
  1388. if (ret > 0)
  1389. wait_for_completion_timeout(&conn->conn_logout_comp,
  1390. SECONDS_FOR_LOGOUT_COMP *
  1391. HZ);
  1392. break;
  1393. case ISCSI_OP_TEXT:
  1394. cmd = isert_allocate_cmd(conn, GFP_KERNEL);
  1395. if (!cmd)
  1396. break;
  1397.  
  1398. isert_cmd = iscsit_priv_cmd(cmd);
  1399. ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
  1400. rx_desc, (struct iscsi_text *)hdr);
  1401. break;
  1402. default:
  1403. pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
  1404. dump_stack();
  1405. break;
  1406. }
  1407.  
  1408. return ret;
  1409. }
  1410.  
  1411. static void
  1412. isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
  1413. {
  1414. struct iser_hdr *iser_hdr = &rx_desc->iser_header;
  1415. uint64_t read_va = 0, write_va = 0;
  1416. uint32_t read_stag = 0, write_stag = 0;
  1417. int rc;
  1418.  
  1419. switch (iser_hdr->flags & 0xF0) {
  1420. case ISCSI_CTRL:
  1421. if (iser_hdr->flags & ISER_RSV) {
  1422. read_stag = be32_to_cpu(iser_hdr->read_stag);
  1423. read_va = be64_to_cpu(iser_hdr->read_va);
  1424. pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n",
  1425. read_stag, (unsigned long long)read_va);
  1426. }
  1427. if (iser_hdr->flags & ISER_WSV) {
  1428. write_stag = be32_to_cpu(iser_hdr->write_stag);
  1429. write_va = be64_to_cpu(iser_hdr->write_va);
  1430. pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n",
  1431. write_stag, (unsigned long long)write_va);
  1432. }
  1433.  
  1434. pr_debug("ISER ISCSI_CTRL PDU\n");
  1435. break;
  1436. case ISER_HELLO:
  1437. pr_err("iSER Hello message\n");
  1438. break;
  1439. default:
  1440. pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
  1441. break;
  1442. }
  1443.  
  1444. rc = isert_rx_opcode(isert_conn, rx_desc,
  1445. read_stag, read_va, write_stag, write_va);
  1446. }
  1447.  
  1448. static void
  1449. isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
  1450. unsigned long xfer_len)
  1451. {
  1452. struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
  1453. struct iscsi_hdr *hdr;
  1454. u64 rx_dma;
  1455. int rx_buflen, outstanding;
  1456.  
  1457. if ((char *)desc == isert_conn->login_req_buf) {
  1458. rx_dma = isert_conn->login_req_dma;
  1459. rx_buflen = ISER_RX_LOGIN_SIZE;
  1460. pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
  1461. rx_dma, rx_buflen);
  1462. } else {
  1463. rx_dma = desc->dma_addr;
  1464. rx_buflen = ISER_RX_PAYLOAD_SIZE;
  1465. pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
  1466. rx_dma, rx_buflen);
  1467. }
  1468.  
  1469. ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
  1470.  
  1471. hdr = &desc->iscsi_header;
  1472. pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
  1473. hdr->opcode, hdr->itt, hdr->flags,
  1474. (int)(xfer_len - ISER_HEADERS_LEN));
  1475.  
  1476. if ((char *)desc == isert_conn->login_req_buf) {
  1477. isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN;
  1478. if (isert_conn->conn) {
  1479. struct iscsi_login *login = isert_conn->conn->conn_login;
  1480.  
  1481. if (login && !login->first_request)
  1482. isert_rx_login_req(isert_conn);
  1483. }
  1484. mutex_lock(&isert_conn->conn_mutex);
  1485. complete(&isert_conn->login_req_comp);
  1486. mutex_unlock(&isert_conn->conn_mutex);
  1487. } else {
  1488. isert_rx_do_work(desc, isert_conn);
  1489. }
  1490.  
  1491. ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
  1492. DMA_FROM_DEVICE);
  1493.  
  1494. isert_conn->post_recv_buf_count--;
  1495. pr_debug("iSERT: Decremented post_recv_buf_count: %d\n",
  1496. isert_conn->post_recv_buf_count);
  1497.  
  1498. if ((char *)desc == isert_conn->login_req_buf)
  1499. return;
  1500.  
  1501. outstanding = isert_conn->post_recv_buf_count;
  1502. if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) {
  1503. int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding,
  1504. ISERT_MIN_POSTED_RX);
  1505. err = isert_post_recv(isert_conn, count);
  1506. if (err) {
  1507. pr_err("isert_post_recv() count: %d failed, %d\n",
  1508. count, err);
  1509. }
  1510. }
  1511. }
  1512.  
  1513. static void
  1514. isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
  1515. {
  1516. struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
  1517. struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
  1518.  
  1519. pr_debug("isert_unmap_cmd: %p\n", isert_cmd);
  1520. if (wr->sge) {
  1521. pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd);
  1522. ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
  1523. (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
  1524. DMA_TO_DEVICE : DMA_FROM_DEVICE);
  1525. wr->sge = NULL;
  1526. }
  1527.  
  1528. if (wr->send_wr) {
  1529. pr_debug("isert_unmap_cmd: %p free send_wr\n", isert_cmd);
  1530. kfree(wr->send_wr);
  1531. wr->send_wr = NULL;
  1532. }
  1533.  
  1534. if (wr->ib_sge) {
  1535. pr_debug("isert_unmap_cmd: %p free ib_sge\n", isert_cmd);
  1536. kfree(wr->ib_sge);
  1537. wr->ib_sge = NULL;
  1538. }
  1539. }
  1540.  
  1541. static void
  1542. isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
  1543. {
  1544. struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
  1545. struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
  1546. LIST_HEAD(unmap_list);
  1547.  
  1548. pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd);
  1549.  
  1550. if (wr->fr_desc) {
  1551. pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n",
  1552. isert_cmd, wr->fr_desc);
  1553. spin_lock_bh(&isert_conn->conn_lock);
  1554. list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool);
  1555. spin_unlock_bh(&isert_conn->conn_lock);
  1556. wr->fr_desc = NULL;
  1557. }
  1558.  
  1559. if (wr->sge) {
  1560. pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd);
  1561. ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
  1562. (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
  1563. DMA_TO_DEVICE : DMA_FROM_DEVICE);
  1564. wr->sge = NULL;
  1565. }
  1566.  
  1567. wr->ib_sge = NULL;
  1568. wr->send_wr = NULL;
  1569. }
  1570.  
  1571. static void
  1572. isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
  1573. {
  1574. struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
  1575. struct isert_conn *isert_conn = isert_cmd->conn;
  1576. struct iscsi_conn *conn = isert_conn->conn;
  1577. struct isert_device *device = isert_conn->conn_device;
  1578.  
  1579. pr_debug("Entering isert_put_cmd: %p\n", isert_cmd);
  1580.  
  1581. switch (cmd->iscsi_opcode) {
  1582. case ISCSI_OP_SCSI_CMD:
  1583. spin_lock_bh(&conn->cmd_lock);
  1584. if (!list_empty(&cmd->i_conn_node))
  1585. list_del_init(&cmd->i_conn_node);
  1586. spin_unlock_bh(&conn->cmd_lock);
  1587.  
  1588. if (cmd->data_direction == DMA_TO_DEVICE) {
  1589. iscsit_stop_dataout_timer(cmd);
  1590. /*
  1591. * Check for special case during comp_err where
  1592. * WRITE_PENDING has been handed off from core,
  1593. * but requires an extra target_put_sess_cmd()
  1594. * before transport_generic_free_cmd() below.
  1595. */
  1596. if (comp_err &&
  1597. cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
  1598. struct se_cmd *se_cmd = &cmd->se_cmd;
  1599.  
  1600. target_put_sess_cmd(se_cmd->se_sess, se_cmd);
  1601. }
  1602. }
  1603.  
  1604. device->unreg_rdma_mem(isert_cmd, isert_conn);
  1605. transport_generic_free_cmd(&cmd->se_cmd, 0);
  1606. break;
  1607. case ISCSI_OP_SCSI_TMFUNC:
  1608. spin_lock_bh(&conn->cmd_lock);
  1609. if (!list_empty(&cmd->i_conn_node))
  1610. list_del_init(&cmd->i_conn_node);
  1611. spin_unlock_bh(&conn->cmd_lock);
  1612.  
  1613. transport_generic_free_cmd(&cmd->se_cmd, 0);
  1614. break;
  1615. case ISCSI_OP_REJECT:
  1616. case ISCSI_OP_NOOP_OUT:
  1617. case ISCSI_OP_TEXT:
  1618. spin_lock_bh(&conn->cmd_lock);
  1619. if (!list_empty(&cmd->i_conn_node))
  1620. list_del_init(&cmd->i_conn_node);
  1621. spin_unlock_bh(&conn->cmd_lock);
  1622.  
  1623. /*
  1624. * Handle special case for REJECT when iscsi_add_reject*() has
  1625. * overwritten the original iscsi_opcode assignment, and the
  1626. * associated cmd->se_cmd needs to be released.
  1627. */
  1628. if (cmd->se_cmd.se_tfo != NULL) {
  1629. pr_debug("Calling transport_generic_free_cmd from"
  1630. " isert_put_cmd for 0x%02x\n",
  1631. cmd->iscsi_opcode);
  1632. transport_generic_free_cmd(&cmd->se_cmd, 0);
  1633. break;
  1634. }
  1635. /*
  1636. * Fall-through
  1637. */
  1638. default:
  1639. iscsit_release_cmd(cmd);
  1640. break;
  1641. }
  1642. }
  1643.  
  1644. static void
  1645. isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
  1646. {
  1647. if (tx_desc->dma_addr != 0) {
  1648. pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n");
  1649. ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
  1650. ISER_HEADERS_LEN, DMA_TO_DEVICE);
  1651. tx_desc->dma_addr = 0;
  1652. }
  1653. }
  1654.  
  1655. static void
  1656. isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
  1657. struct ib_device *ib_dev, bool comp_err)
  1658. {
  1659. if (isert_cmd->pdu_buf_dma != 0) {
  1660. pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n");
  1661. ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
  1662. isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
  1663. isert_cmd->pdu_buf_dma = 0;
  1664. }
  1665.  
  1666. isert_unmap_tx_desc(tx_desc, ib_dev);
  1667. isert_put_cmd(isert_cmd, comp_err);
  1668. }
  1669.  
  1670. static void
  1671. isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
  1672. struct isert_cmd *isert_cmd)
  1673. {
  1674. struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
  1675. struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
  1676. struct se_cmd *se_cmd = &cmd->se_cmd;
  1677. struct isert_conn *isert_conn = isert_cmd->conn;
  1678. struct isert_device *device = isert_conn->conn_device;
  1679.  
  1680. iscsit_stop_dataout_timer(cmd);
  1681. device->unreg_rdma_mem(isert_cmd, isert_conn);
  1682. cmd->write_data_done = wr->cur_rdma_length;
  1683. wr->send_wr_num = 0;
  1684.  
  1685. pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
  1686. spin_lock_bh(&cmd->istate_lock);
  1687. cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
  1688. cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
  1689. spin_unlock_bh(&cmd->istate_lock);
  1690.  
  1691. target_execute_cmd(se_cmd);
  1692. }
  1693.  
  1694. static void
  1695. isert_do_control_comp(struct work_struct *work)
  1696. {
  1697. struct isert_cmd *isert_cmd = container_of(work,
  1698. struct isert_cmd, comp_work);
  1699. struct isert_conn *isert_conn = isert_cmd->conn;
  1700. struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
  1701. struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
  1702.  
  1703. switch (cmd->i_state) {
  1704. case ISTATE_SEND_TASKMGTRSP:
  1705. pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n");
  1706.  
  1707. atomic_dec(&isert_conn->post_send_buf_count);
  1708. iscsit_tmr_post_handler(cmd, cmd->conn);
  1709.  
  1710. cmd->i_state = ISTATE_SENT_STATUS;
  1711. isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
  1712. break;
  1713. case ISTATE_SEND_REJECT:
  1714. pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
  1715. atomic_dec(&isert_conn->post_send_buf_count);
  1716.  
  1717. cmd->i_state = ISTATE_SENT_STATUS;
  1718. isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
  1719. break;
  1720. case ISTATE_SEND_LOGOUTRSP:
  1721. pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
  1722.  
  1723. atomic_dec(&isert_conn->post_send_buf_count);
  1724. iscsit_logout_post_handler(cmd, cmd->conn);
  1725. break;
  1726. case ISTATE_SEND_TEXTRSP:
  1727. atomic_dec(&isert_conn->post_send_buf_count);
  1728. cmd->i_state = ISTATE_SENT_STATUS;
  1729. isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
  1730. break;
  1731. default:
  1732. pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
  1733. dump_stack();
  1734. break;
  1735. }
  1736. }
  1737.  
  1738. static void
  1739. isert_response_completion(struct iser_tx_desc *tx_desc,
  1740. struct isert_cmd *isert_cmd,
  1741. struct isert_conn *isert_conn,
  1742. struct ib_device *ib_dev)
  1743. {
  1744. struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
  1745. struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
  1746.  
  1747. if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
  1748. cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
  1749. cmd->i_state == ISTATE_SEND_REJECT ||
  1750. cmd->i_state == ISTATE_SEND_TEXTRSP) {
  1751. isert_unmap_tx_desc(tx_desc, ib_dev);
  1752.  
  1753. INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
  1754. queue_work(isert_comp_wq, &isert_cmd->comp_work);
  1755. return;
  1756. }
  1757. atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
  1758.  
  1759. cmd->i_state = ISTATE_SENT_STATUS;
  1760. isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
  1761. }
  1762.  
  1763. static void
  1764. __isert_send_completion(struct iser_tx_desc *tx_desc,
  1765. struct isert_conn *isert_conn)
  1766. {
  1767. struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
  1768. struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
  1769. struct isert_rdma_wr *wr;
  1770.  
  1771. if (!isert_cmd) {
  1772. atomic_dec(&isert_conn->post_send_buf_count);
  1773. isert_unmap_tx_desc(tx_desc, ib_dev);
  1774. return;
  1775. }
  1776. wr = &isert_cmd->rdma_wr;
  1777.  
  1778. switch (wr->iser_ib_op) {
  1779. case ISER_IB_RECV:
  1780. pr_err("isert_send_completion: Got ISER_IB_RECV\n");
  1781. dump_stack();
  1782. break;
  1783. case ISER_IB_SEND:
  1784. pr_debug("isert_send_completion: Got ISER_IB_SEND\n");
  1785. isert_response_completion(tx_desc, isert_cmd,
  1786. isert_conn, ib_dev);
  1787. break;
  1788. case ISER_IB_RDMA_WRITE:
  1789. pr_err("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
  1790. dump_stack();
  1791. break;
  1792. case ISER_IB_RDMA_READ:
  1793. pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
  1794.  
  1795. atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
  1796. isert_completion_rdma_read(tx_desc, isert_cmd);
  1797. break;
  1798. default:
  1799. pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op);
  1800. dump_stack();
  1801. break;
  1802. }
  1803. }
  1804.  
  1805. static void
  1806. isert_send_completion(struct iser_tx_desc *tx_desc,
  1807. struct isert_conn *isert_conn)
  1808. {
  1809. struct llist_node *llnode = tx_desc->comp_llnode_batch;
  1810. struct iser_tx_desc *t;
  1811. /*
  1812. * Drain coalesced completion llist starting from comp_llnode_batch
  1813. * setup in isert_init_send_wr(), and then complete trailing tx_desc.
  1814. */
  1815. while (llnode) {
  1816. t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
  1817. llnode = llist_next(llnode);
  1818. __isert_send_completion(t, isert_conn);
  1819. }
  1820. __isert_send_completion(tx_desc, isert_conn);
  1821. }
  1822.  
  1823. static void
  1824. isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
  1825. {
  1826. struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
  1827. struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
  1828.  
  1829. if (!isert_cmd)
  1830. isert_unmap_tx_desc(tx_desc, ib_dev);
  1831. else
  1832. isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
  1833. }
  1834.  
  1835. static void
  1836. isert_cq_rx_comp_err(struct isert_conn *isert_conn)
  1837. {
  1838. struct iscsi_conn *conn = isert_conn->conn;
  1839.  
  1840. if (isert_conn->post_recv_buf_count)
  1841. return;
  1842.  
  1843. if (conn->sess) {
  1844. target_sess_cmd_list_set_waiting(conn->sess->se_sess);
  1845. target_wait_for_sess_cmds(conn->sess->se_sess);
  1846. }
  1847.  
  1848. while (atomic_read(&isert_conn->post_send_buf_count))
  1849. msleep(3000);
  1850.  
  1851. mutex_lock(&isert_conn->conn_mutex);
  1852. isert_conn_terminate(isert_conn);
  1853. mutex_unlock(&isert_conn->conn_mutex);
  1854.  
  1855. iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
  1856.  
  1857. complete(&isert_conn->conn_wait_comp_err);
  1858. }
  1859.  
  1860. static void
  1861. isert_cq_tx_work(struct work_struct *work)
  1862. {
  1863. struct isert_cq_desc *cq_desc = container_of(work,
  1864. struct isert_cq_desc, cq_tx_work);
  1865. struct isert_device *device = cq_desc->device;
  1866. int cq_index = cq_desc->cq_index;
  1867. struct ib_cq *tx_cq = device->dev_tx_cq[cq_index];
  1868. struct isert_conn *isert_conn;
  1869. struct iser_tx_desc *tx_desc;
  1870. struct ib_wc wc;
  1871.  
  1872. while (ib_poll_cq(tx_cq, 1, &wc) == 1) {
  1873. tx_desc = (struct iser_tx_desc *)(unsigned long)wc.wr_id;
  1874. isert_conn = wc.qp->qp_context;
  1875.  
  1876. if (wc.status == IB_WC_SUCCESS) {
  1877. isert_send_completion(tx_desc, isert_conn);
  1878. } else {
  1879. pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
  1880. pr_debug("TX wc.status: 0x%08x\n", wc.status);
  1881. pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err);
  1882.  
  1883. if (wc.wr_id != ISER_FASTREG_LI_WRID) {
  1884. atomic_dec(&isert_conn->post_send_buf_count);
  1885. isert_cq_tx_comp_err(tx_desc, isert_conn);
  1886. }
  1887. }
  1888. }
  1889.  
  1890. ib_req_notify_cq(tx_cq, IB_CQ_NEXT_COMP);
  1891. }
  1892.  
  1893. static void
  1894. isert_cq_tx_callback(struct ib_cq *cq, void *context)
  1895. {
  1896. struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
  1897.  
  1898. queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
  1899. }
  1900.  
  1901. static void
  1902. isert_cq_rx_work(struct work_struct *work)
  1903. {
  1904. struct isert_cq_desc *cq_desc = container_of(work,
  1905. struct isert_cq_desc, cq_rx_work);
  1906. struct isert_device *device = cq_desc->device;
  1907. int cq_index = cq_desc->cq_index;
  1908. struct ib_cq *rx_cq = device->dev_rx_cq[cq_index];
  1909. struct isert_conn *isert_conn;
  1910. struct iser_rx_desc *rx_desc;
  1911. struct ib_wc wc;
  1912. unsigned long xfer_len;
  1913.  
  1914. while (ib_poll_cq(rx_cq, 1, &wc) == 1) {
  1915. rx_desc = (struct iser_rx_desc *)(unsigned long)wc.wr_id;
  1916. isert_conn = wc.qp->qp_context;
  1917.  
  1918. if (wc.status == IB_WC_SUCCESS) {
  1919. xfer_len = (unsigned long)wc.byte_len;
  1920. isert_rx_completion(rx_desc, isert_conn, xfer_len);
  1921. } else {
  1922. pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
  1923. if (wc.status != IB_WC_WR_FLUSH_ERR) {
  1924. pr_debug("RX wc.status: 0x%08x\n", wc.status);
  1925. pr_debug("RX wc.vendor_err: 0x%08x\n",
  1926. wc.vendor_err);
  1927. }
  1928. isert_conn->post_recv_buf_count--;
  1929. isert_cq_rx_comp_err(isert_conn);
  1930. }
  1931. }
  1932.  
  1933. ib_req_notify_cq(rx_cq, IB_CQ_NEXT_COMP);
  1934. }
  1935.  
  1936. static void
  1937. isert_cq_rx_callback(struct ib_cq *cq, void *context)
  1938. {
  1939. struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
  1940.  
  1941. queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
  1942. }
  1943.  
  1944. static int
  1945. isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
  1946. {
  1947. struct ib_send_wr *wr_failed;
  1948. int ret;
  1949.  
  1950. atomic_inc(&isert_conn->post_send_buf_count);
  1951.  
  1952. ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr,
  1953. &wr_failed);
  1954. if (ret) {
  1955. pr_err("ib_post_send failed with %d\n", ret);
  1956. atomic_dec(&isert_conn->post_send_buf_count);
  1957. return ret;
  1958. }
  1959. return ret;
  1960. }
  1961.  
  1962. static int
  1963. isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
  1964. {
  1965. struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
  1966. struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
  1967. struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
  1968. struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
  1969. &isert_cmd->tx_desc.iscsi_header;
  1970.  
  1971. isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
  1972. iscsit_build_rsp_pdu(cmd, conn, true, hdr);
  1973. isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
  1974. /*
  1975. * Attach SENSE DATA payload to iSCSI Response PDU
  1976. */
  1977. if (cmd->se_cmd.sense_buffer &&
  1978. ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
  1979. (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
  1980. struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
  1981. struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
  1982. u32 padding, pdu_len;
  1983.  
  1984. put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
  1985. cmd->sense_buffer);
  1986. cmd->se_cmd.scsi_sense_length += sizeof(__be16);
  1987.  
  1988. padding = -(cmd->se_cmd.scsi_sense_length) & 3;
  1989. hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
  1990. pdu_len = cmd->se_cmd.scsi_sense_length + padding;
  1991.  
  1992. isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
  1993. (void *)cmd->sense_buffer, pdu_len,
  1994. DMA_TO_DEVICE);
  1995.  
  1996. isert_cmd->pdu_buf_len = pdu_len;
  1997. tx_dsg->addr = isert_cmd->pdu_buf_dma;
  1998. tx_dsg->length = pdu_len;
  1999. tx_dsg->lkey = isert_conn->conn_mr->lkey;
  2000. isert_cmd->tx_desc.num_sge = 2;
  2001. }
  2002.  
  2003. isert_init_send_wr(isert_conn, isert_cmd, send_wr, true);
  2004.  
  2005. pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
  2006.  
  2007. return isert_post_response(isert_conn, isert_cmd);
  2008. }
  2009.  
  2010. static int
  2011. isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
  2012. bool nopout_response)
  2013. {
  2014. struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
  2015. struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
  2016. struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
  2017.  
  2018. isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
  2019. iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
  2020. &isert_cmd->tx_desc.iscsi_header,
  2021. nopout_response);
  2022. isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
  2023. isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
  2024.  
  2025. pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
  2026.  
  2027. return isert_post_response(isert_conn, isert_cmd);
  2028. }
  2029.  
  2030. static int
  2031. isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
  2032. {
  2033. struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
  2034. struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
  2035. struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
  2036.  
  2037. isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
  2038. iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
  2039. &isert_cmd->tx_desc.iscsi_header);
  2040. isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
  2041. isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
  2042.  
  2043. pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
  2044.  
  2045. return isert_post_response(isert_conn, isert_cmd);
  2046. }
  2047.  
  2048. static int
  2049. isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
  2050. {
  2051. struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
  2052. struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
  2053. struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
  2054.  
  2055. isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
  2056. iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
  2057. &isert_cmd->tx_desc.iscsi_header);
  2058. isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
  2059. isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
  2060.  
  2061. pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
  2062.  
  2063. return isert_post_response(isert_conn, isert_cmd);
  2064. }
  2065.  
  2066. static int
  2067. isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
  2068. {
  2069. struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
  2070. struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
  2071. struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
  2072. struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
  2073. struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
  2074. struct iscsi_reject *hdr =
  2075. (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
  2076.  
  2077. isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
  2078. iscsit_build_reject(cmd, conn, hdr);
  2079. isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
  2080.  
  2081. hton24(hdr->dlength, ISCSI_HDR_LEN);
  2082. isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
  2083. (void *)cmd->buf_ptr, ISCSI_HDR_LEN,
  2084. DMA_TO_DEVICE);
  2085. isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
  2086. tx_dsg->addr = isert_cmd->pdu_buf_dma;
  2087. tx_dsg->length = ISCSI_HDR_LEN;
  2088. tx_dsg->lkey = isert_conn->conn_mr->lkey;
  2089. isert_cmd->tx_desc.num_sge = 2;
  2090.  
  2091. isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
  2092.  
  2093. pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
  2094.  
  2095. return isert_post_response(isert_conn, isert_cmd);
  2096. }
  2097.  
  2098. static int
  2099. isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
  2100. {
  2101. struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
  2102. struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
  2103. struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
  2104. struct iscsi_text_rsp *hdr =
  2105. (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
  2106. u32 txt_rsp_len;
  2107. int rc;
  2108.  
  2109. isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
  2110. rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND);
  2111. if (rc < 0)
  2112. return rc;
  2113.  
  2114. txt_rsp_len = rc;
  2115. isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
  2116.  
  2117. if (txt_rsp_len) {
  2118. struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
  2119. struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
  2120. void *txt_rsp_buf = cmd->buf_ptr;
  2121.  
  2122. isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
  2123. txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
  2124.  
  2125. isert_cmd->pdu_buf_len = txt_rsp_len;
  2126. tx_dsg->addr = isert_cmd->pdu_buf_dma;
  2127. tx_dsg->length = txt_rsp_len;
  2128. tx_dsg->lkey = isert_conn->conn_mr->lkey;
  2129. isert_cmd->tx_desc.num_sge = 2;
  2130. }
  2131. isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
  2132.  
  2133. pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
  2134.  
  2135. return isert_post_response(isert_conn, isert_cmd);
  2136. }
  2137.  
  2138. static int
  2139. isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
  2140. struct ib_sge *ib_sge, struct ib_send_wr *send_wr,
  2141. u32 data_left, u32 offset)
  2142. {
  2143. struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
  2144. struct scatterlist *sg_start, *tmp_sg;
  2145. struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
  2146. u32 sg_off, page_off;
  2147. int i = 0, sg_nents;
  2148.  
  2149. sg_off = offset / PAGE_SIZE;
  2150. sg_start = &cmd->se_cmd.t_data_sg[sg_off];
  2151. sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
  2152. page_off = offset % PAGE_SIZE;
  2153.  
  2154. send_wr->sg_list = ib_sge;
  2155. send_wr->num_sge = sg_nents;
  2156. send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
  2157. /*
  2158. * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
  2159. */
  2160. for_each_sg(sg_start, tmp_sg, sg_nents, i) {
  2161. pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n",
  2162. (unsigned long long)tmp_sg->dma_address,
  2163. tmp_sg->length, page_off);
  2164.  
  2165. ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
  2166. ib_sge->length = min_t(u32, data_left,
  2167. ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
  2168. ib_sge->lkey = isert_conn->conn_mr->lkey;
  2169.  
  2170. pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n",
  2171. ib_sge->addr, ib_sge->length, ib_sge->lkey);
  2172. page_off = 0;
  2173. data_left -= ib_sge->length;
  2174. ib_sge++;
  2175. pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge);
  2176. }
  2177.  
  2178. pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
  2179. send_wr->sg_list, send_wr->num_sge);
  2180.  
  2181. return sg_nents;
  2182. }
  2183.  
  2184. static int
  2185. isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
  2186. struct isert_rdma_wr *wr)
  2187. {
  2188. struct se_cmd *se_cmd = &cmd->se_cmd;
  2189. struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
  2190. struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
  2191. struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
  2192. struct ib_send_wr *send_wr;
  2193. struct ib_sge *ib_sge;
  2194. struct scatterlist *sg_start;
  2195. u32 sg_off = 0, sg_nents;
  2196. u32 offset = 0, data_len, data_left, rdma_write_max, va_offset = 0;
  2197. int ret = 0, count, i, ib_sge_cnt;
  2198.  
  2199. if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
  2200. data_left = se_cmd->data_length;
  2201. } else {
  2202. sg_off = cmd->write_data_done / PAGE_SIZE;
  2203. data_left = se_cmd->data_length - cmd->write_data_done;
  2204. offset = cmd->write_data_done;
  2205. isert_cmd->tx_desc.isert_cmd = isert_cmd;
  2206. }
  2207.  
  2208. sg_start = &cmd->se_cmd.t_data_sg[sg_off];
  2209. sg_nents = se_cmd->t_data_nents - sg_off;
  2210.  
  2211. count = ib_dma_map_sg(ib_dev, sg_start, sg_nents,
  2212. (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
  2213. DMA_TO_DEVICE : DMA_FROM_DEVICE);
  2214. if (unlikely(!count)) {
  2215. pr_err("Cmd: %p unrable to map SGs\n", isert_cmd);
  2216. return -EINVAL;
  2217. }
  2218. wr->sge = sg_start;
  2219. wr->num_sge = sg_nents;
  2220. wr->cur_rdma_length = data_left;
  2221. pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
  2222. isert_cmd, count, sg_start, sg_nents, data_left);
  2223.  
  2224. ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL);
  2225. if (!ib_sge) {
  2226. pr_warn("Unable to allocate ib_sge\n");
  2227. ret = -ENOMEM;
  2228. goto unmap_sg;
  2229. }
  2230. wr->ib_sge = ib_sge;
  2231.  
  2232. wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge);
  2233. wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
  2234. GFP_KERNEL);
  2235. if (!wr->send_wr) {
  2236. pr_debug("Unable to allocate wr->send_wr\n");
  2237. ret = -ENOMEM;
  2238. goto unmap_sg;
  2239. }
  2240.  
  2241. wr->isert_cmd = isert_cmd;
  2242. rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
  2243.  
  2244. for (i = 0; i < wr->send_wr_num; i++) {
  2245. send_wr = &isert_cmd->rdma_wr.send_wr[i];
  2246. data_len = min(data_left, rdma_write_max);
  2247.  
  2248. send_wr->send_flags = 0;
  2249. if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
  2250. send_wr->opcode = IB_WR_RDMA_WRITE;
  2251. send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset;
  2252. send_wr->wr.rdma.rkey = isert_cmd->read_stag;
  2253. if (i + 1 == wr->send_wr_num)
  2254. send_wr->next = &isert_cmd->tx_desc.send_wr;
  2255. else
  2256. send_wr->next = &wr->send_wr[i + 1];
  2257. } else {
  2258. send_wr->opcode = IB_WR_RDMA_READ;
  2259. send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset;
  2260. send_wr->wr.rdma.rkey = isert_cmd->write_stag;
  2261. if (i + 1 == wr->send_wr_num)
  2262. send_wr->send_flags = IB_SEND_SIGNALED;
  2263. else
  2264. send_wr->next = &wr->send_wr[i + 1];
  2265. }
  2266.  
  2267. ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
  2268. send_wr, data_len, offset);
  2269. ib_sge += ib_sge_cnt;
  2270.  
  2271. offset += data_len;
  2272. va_offset += data_len;
  2273. data_left -= data_len;
  2274. }
  2275.  
  2276. return 0;
  2277. unmap_sg:
  2278. ib_dma_unmap_sg(ib_dev, sg_start, sg_nents,
  2279. (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
  2280. DMA_TO_DEVICE : DMA_FROM_DEVICE);
  2281. return ret;
  2282. }
  2283.  
  2284. static int
  2285. isert_map_fr_pagelist(struct ib_device *ib_dev,
  2286. struct scatterlist *sg_start, int sg_nents, u64 *fr_pl)
  2287. {
  2288. u64 start_addr, end_addr, page, chunk_start = 0;
  2289. struct scatterlist *tmp_sg;
  2290. int i = 0, new_chunk, last_ent, n_pages;
  2291.  
  2292. n_pages = 0;
  2293. new_chunk = 1;
  2294. last_ent = sg_nents - 1;
  2295. for_each_sg(sg_start, tmp_sg, sg_nents, i) {
  2296. start_addr = ib_sg_dma_address(ib_dev, tmp_sg);
  2297. if (new_chunk)
  2298. chunk_start = start_addr;
  2299. end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg);
  2300.  
  2301. pr_debug("SGL[%d] dma_addr: 0x%16llx len: %u\n",
  2302. i, (unsigned long long)tmp_sg->dma_address,
  2303. tmp_sg->length);
  2304.  
  2305. if ((end_addr & ~PAGE_MASK) && i < last_ent) {
  2306. new_chunk = 0;
  2307. continue;
  2308. }
  2309. new_chunk = 1;
  2310.  
  2311. page = chunk_start & PAGE_MASK;
  2312. do {
  2313. fr_pl[n_pages++] = page;
  2314. pr_debug("Mapped page_list[%d] page_addr: 0x%16llx\n",
  2315. n_pages - 1, page);
  2316. page += PAGE_SIZE;
  2317. } while (page < end_addr);
  2318. }
  2319.  
  2320. return n_pages;
  2321. }
  2322.  
  2323. static int
  2324. isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
  2325. struct isert_cmd *isert_cmd, struct isert_conn *isert_conn,
  2326. struct ib_sge *ib_sge, u32 offset, unsigned int data_len)
  2327. {
  2328. struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
  2329. struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
  2330. struct scatterlist *sg_start;
  2331. u32 sg_off, page_off;
  2332. struct ib_send_wr fr_wr, inv_wr;
  2333. struct ib_send_wr *bad_wr, *wr = NULL;
  2334. u8 key;
  2335. int ret, sg_nents, pagelist_len;
  2336.  
  2337. sg_off = offset / PAGE_SIZE;
  2338. sg_start = &cmd->se_cmd.t_data_sg[sg_off];
  2339. sg_nents = min_t(unsigned int, cmd->se_cmd.t_data_nents - sg_off,
  2340. ISCSI_ISER_SG_TABLESIZE);
  2341. page_off = offset % PAGE_SIZE;
  2342.  
  2343. pr_debug("Cmd: %p use fr_desc %p sg_nents %d sg_off %d offset %u\n",
  2344. isert_cmd, fr_desc, sg_nents, sg_off, offset);
  2345.  
  2346. pagelist_len = isert_map_fr_pagelist(ib_dev, sg_start, sg_nents,
  2347. &fr_desc->data_frpl->page_list[0]);
  2348.  
  2349. if (!fr_desc->valid) {
  2350. memset(&inv_wr, 0, sizeof(inv_wr));
  2351. inv_wr.wr_id = ISER_FASTREG_LI_WRID;
  2352. inv_wr.opcode = IB_WR_LOCAL_INV;
  2353. inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey;
  2354. wr = &inv_wr;
  2355. /* Bump the key */
  2356. key = (u8)(fr_desc->data_mr->rkey & 0x000000FF);
  2357. ib_update_fast_reg_key(fr_desc->data_mr, ++key);
  2358. }
  2359.  
  2360. /* Prepare FASTREG WR */
  2361. memset(&fr_wr, 0, sizeof(fr_wr));
  2362. fr_wr.wr_id = ISER_FASTREG_LI_WRID;
  2363. fr_wr.opcode = IB_WR_FAST_REG_MR;
  2364. fr_wr.wr.fast_reg.iova_start =
  2365. fr_desc->data_frpl->page_list[0] + page_off;
  2366. fr_wr.wr.fast_reg.page_list = fr_desc->data_frpl;
  2367. fr_wr.wr.fast_reg.page_list_len = pagelist_len;
  2368. fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
  2369. fr_wr.wr.fast_reg.length = data_len;
  2370. fr_wr.wr.fast_reg.rkey = fr_desc->data_mr->rkey;
  2371. fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE;
  2372.  
  2373. if (!wr)
  2374. wr = &fr_wr;
  2375. else
  2376. wr->next = &fr_wr;
  2377.  
  2378. ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
  2379. if (ret) {
  2380. pr_err("fast registration failed, ret:%d\n", ret);
  2381. return ret;
  2382. }
  2383. fr_desc->valid = false;
  2384.  
  2385. ib_sge->lkey = fr_desc->data_mr->lkey;
  2386. ib_sge->addr = fr_desc->data_frpl->page_list[0] + page_off;
  2387. ib_sge->length = data_len;
  2388.  
  2389. pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n",
  2390. ib_sge->addr, ib_sge->length, ib_sge->lkey);
  2391.  
  2392. return ret;
  2393. }
  2394.  
  2395. static int
  2396. isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
  2397. struct isert_rdma_wr *wr)
  2398. {
  2399. struct se_cmd *se_cmd = &cmd->se_cmd;
  2400. struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
  2401. struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
  2402. struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
  2403. struct ib_send_wr *send_wr;
  2404. struct ib_sge *ib_sge;
  2405. struct scatterlist *sg_start;
  2406. struct fast_reg_descriptor *fr_desc;
  2407. u32 sg_off = 0, sg_nents;
  2408. u32 offset = 0, data_len, data_left, rdma_write_max;
  2409. int ret = 0, count;
  2410. unsigned long flags;
  2411.  
  2412. if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
  2413. data_left = se_cmd->data_length;
  2414. } else {
  2415. sg_off = cmd->write_data_done / PAGE_SIZE;
  2416. data_left = se_cmd->data_length - cmd->write_data_done;
  2417. offset = cmd->write_data_done;
  2418. isert_cmd->tx_desc.isert_cmd = isert_cmd;
  2419. }
  2420.  
  2421. sg_start = &cmd->se_cmd.t_data_sg[sg_off];
  2422. sg_nents = se_cmd->t_data_nents - sg_off;
  2423.  
  2424. count = ib_dma_map_sg(ib_dev, sg_start, sg_nents,
  2425. (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
  2426. DMA_TO_DEVICE : DMA_FROM_DEVICE);
  2427. if (unlikely(!count)) {
  2428. pr_err("Cmd: %p unrable to map SGs\n", isert_cmd);
  2429. return -EINVAL;
  2430. }
  2431. wr->sge = sg_start;
  2432. wr->num_sge = sg_nents;
  2433. pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
  2434. isert_cmd, count, sg_start, sg_nents, data_left);
  2435.  
  2436. memset(&wr->s_ib_sge, 0, sizeof(*ib_sge));
  2437. ib_sge = &wr->s_ib_sge;
  2438. wr->ib_sge = ib_sge;
  2439.  
  2440. wr->send_wr_num = 1;
  2441. memset(&wr->s_send_wr, 0, sizeof(*send_wr));
  2442. wr->send_wr = &wr->s_send_wr;
  2443.  
  2444. wr->isert_cmd = isert_cmd;
  2445. rdma_write_max = ISCSI_ISER_SG_TABLESIZE * PAGE_SIZE;
  2446.  
  2447. send_wr = &isert_cmd->rdma_wr.s_send_wr;
  2448. send_wr->sg_list = ib_sge;
  2449. send_wr->num_sge = 1;
  2450. send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
  2451. if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
  2452. send_wr->opcode = IB_WR_RDMA_WRITE;
  2453. send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
  2454. send_wr->wr.rdma.rkey = isert_cmd->read_stag;
  2455. send_wr->send_flags = 0;
  2456. send_wr->next = &isert_cmd->tx_desc.send_wr;
  2457. } else {
  2458. send_wr->opcode = IB_WR_RDMA_READ;
  2459. send_wr->wr.rdma.remote_addr = isert_cmd->write_va;
  2460. send_wr->wr.rdma.rkey = isert_cmd->write_stag;
  2461. send_wr->send_flags = IB_SEND_SIGNALED;
  2462. }
  2463.  
  2464. data_len = min(data_left, rdma_write_max);
  2465. wr->cur_rdma_length = data_len;
  2466.  
  2467. /* if there is a single dma entry, dma mr is sufficient */
  2468. if (count == 1) {
  2469. ib_sge->addr = ib_sg_dma_address(ib_dev, &sg_start[0]);
  2470. ib_sge->length = ib_sg_dma_len(ib_dev, &sg_start[0]);
  2471. ib_sge->lkey = isert_conn->conn_mr->lkey;
  2472. wr->fr_desc = NULL;
  2473. } else {
  2474. spin_lock_irqsave(&isert_conn->conn_lock, flags);
  2475. fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
  2476. struct fast_reg_descriptor, list);
  2477. list_del(&fr_desc->list);
  2478. spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
  2479. wr->fr_desc = fr_desc;
  2480.  
  2481. ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn,
  2482. ib_sge, offset, data_len);
  2483. if (ret) {
  2484. list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
  2485. goto unmap_sg;
  2486. }
  2487. }
  2488.  
  2489. return 0;
  2490.  
  2491. unmap_sg:
  2492. ib_dma_unmap_sg(ib_dev, sg_start, sg_nents,
  2493. (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
  2494. DMA_TO_DEVICE : DMA_FROM_DEVICE);
  2495. return ret;
  2496. }
  2497.  
  2498. static int
  2499. isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
  2500. {
  2501. struct se_cmd *se_cmd = &cmd->se_cmd;
  2502. struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
  2503. struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
  2504. struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
  2505. struct isert_device *device = isert_conn->conn_device;
  2506. struct ib_send_wr *wr_failed;
  2507. int rc;
  2508.  
  2509. pr_debug("Cmd: %p RDMA_WRITE data_length: %u\n",
  2510. isert_cmd, se_cmd->data_length);
  2511. wr->iser_ib_op = ISER_IB_RDMA_WRITE;
  2512. rc = device->reg_rdma_mem(conn, cmd, wr);
  2513. if (rc) {
  2514. pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
  2515. return rc;
  2516. }
  2517.  
  2518. /*
  2519. * Build isert_conn->tx_desc for iSCSI response PDU and attach
  2520. */
  2521. isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
  2522. iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
  2523. &isert_cmd->tx_desc.iscsi_header);
  2524. isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
  2525. isert_init_send_wr(isert_conn, isert_cmd,
  2526. &isert_cmd->tx_desc.send_wr, true);
  2527.  
  2528. atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
  2529.  
  2530. rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
  2531. if (rc) {
  2532. pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
  2533. atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
  2534. }
  2535. pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n",
  2536. isert_cmd);
  2537.  
  2538. return 1;
  2539. }
  2540.  
  2541. static int
  2542. isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
  2543. {
  2544. struct se_cmd *se_cmd = &cmd->se_cmd;
  2545. struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
  2546. struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
  2547. struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
  2548. struct isert_device *device = isert_conn->conn_device;
  2549. struct ib_send_wr *wr_failed;
  2550. int rc;
  2551.  
  2552. pr_debug("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
  2553. isert_cmd, se_cmd->data_length, cmd->write_data_done);
  2554. wr->iser_ib_op = ISER_IB_RDMA_READ;
  2555. rc = device->reg_rdma_mem(conn, cmd, wr);
  2556. if (rc) {
  2557. pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
  2558. return rc;
  2559. }
  2560.  
  2561. atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
  2562.  
  2563. rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
  2564. if (rc) {
  2565. pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
  2566. atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
  2567. }
  2568. pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
  2569. isert_cmd);
  2570.  
  2571. return 0;
  2572. }
  2573.  
  2574. static int
  2575. isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
  2576. {
  2577. int ret;
  2578.  
  2579. switch (state) {
  2580. case ISTATE_SEND_NOPIN_WANT_RESPONSE:
  2581. ret = isert_put_nopin(cmd, conn, false);
  2582. break;
  2583. default:
  2584. pr_err("Unknown immediate state: 0x%02x\n", state);
  2585. ret = -EINVAL;
  2586. break;
  2587. }
  2588.  
  2589. return ret;
  2590. }
  2591.  
  2592. static int
  2593. isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
  2594. {
  2595. int ret;
  2596.  
  2597. switch (state) {
  2598. case ISTATE_SEND_LOGOUTRSP:
  2599. ret = isert_put_logout_rsp(cmd, conn);
  2600. if (!ret) {
  2601. pr_debug("Returning iSER Logout -EAGAIN\n");
  2602. ret = -EAGAIN;
  2603. }
  2604. break;
  2605. case ISTATE_SEND_NOPIN:
  2606. ret = isert_put_nopin(cmd, conn, true);
  2607. break;
  2608. case ISTATE_SEND_TASKMGTRSP:
  2609. ret = isert_put_tm_rsp(cmd, conn);
  2610. break;
  2611. case ISTATE_SEND_REJECT:
  2612. ret = isert_put_reject(cmd, conn);
  2613. break;
  2614. case ISTATE_SEND_TEXTRSP:
  2615. ret = isert_put_text_rsp(cmd, conn);
  2616. break;
  2617. case ISTATE_SEND_STATUS:
  2618. /*
  2619. * Special case for sending non GOOD SCSI status from TX thread
  2620. * context during pre se_cmd excecution failure.
  2621. */
  2622. ret = isert_put_response(conn, cmd);
  2623. break;
  2624. default:
  2625. pr_err("Unknown response state: 0x%02x\n", state);
  2626. ret = -EINVAL;
  2627. break;
  2628. }
  2629.  
  2630. return ret;
  2631. }
  2632.  
  2633. struct rdma_cm_id *
  2634. isert_setup_id(struct isert_np *isert_np)
  2635. {
  2636. struct iscsi_np *np = isert_np->np;
  2637. struct rdma_cm_id *id;
  2638. struct sockaddr *sa;
  2639. int ret;
  2640.  
  2641. sa = (struct sockaddr *)&np->np_sockaddr;
  2642. pr_debug("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
  2643.  
  2644. id = rdma_create_id(isert_cma_handler, isert_np,
  2645. RDMA_PS_TCP, IB_QPT_RC);
  2646. if (IS_ERR(id)) {
  2647. pr_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
  2648. ret = PTR_ERR(id);
  2649. goto out;
  2650. }
  2651. pr_debug("id %p context %p\n", id, id->context);
  2652.  
  2653. ret = rdma_bind_addr(id, sa);
  2654. if (ret) {
  2655. pr_err("rdma_bind_addr() failed: %d\n", ret);
  2656. goto out_id;
  2657. }
  2658.  
  2659. ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG);
  2660. if (ret) {
  2661. pr_err("rdma_listen() failed: %d\n", ret);
  2662. goto out_id;
  2663. }
  2664.  
  2665. return id;
  2666. out_id:
  2667. rdma_destroy_id(id);
  2668. out:
  2669. return ERR_PTR(ret);
  2670. }
  2671.  
  2672. static int
  2673. isert_setup_np(struct iscsi_np *np,
  2674. struct __kernel_sockaddr_storage *ksockaddr)
  2675. {
  2676. struct isert_np *isert_np;
  2677. struct rdma_cm_id *isert_lid;
  2678. int ret;
  2679.  
  2680. isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
  2681. if (!isert_np) {
  2682. pr_err("Unable to allocate struct isert_np\n");
  2683. return -ENOMEM;
  2684. }
  2685. sema_init(&isert_np->np_sem, 0);
  2686. mutex_init(&isert_np->np_accept_mutex);
  2687. INIT_LIST_HEAD(&isert_np->np_accept_list);
  2688. init_completion(&isert_np->np_login_comp);
  2689. isert_np->np = np;
  2690.  
  2691. /*
  2692. * Setup the np->np_sockaddr from the passed sockaddr setup
  2693. * in iscsi_target_configfs.c code..
  2694. */
  2695. memcpy(&np->np_sockaddr, ksockaddr,
  2696. sizeof(struct __kernel_sockaddr_storage));
  2697.  
  2698. isert_lid = isert_setup_id(isert_np);
  2699. if (IS_ERR(isert_lid)) {
  2700. ret = PTR_ERR(isert_lid);
  2701. goto out;
  2702. }
  2703.  
  2704. isert_np->np_cm_id = isert_lid;
  2705. np->np_context = isert_np;
  2706.  
  2707. return 0;
  2708.  
  2709. out:
  2710. kfree(isert_np);
  2711.  
  2712. return ret;
  2713. }
  2714.  
  2715. static int
  2716. isert_rdma_accept(struct isert_conn *isert_conn)
  2717. {
  2718. struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
  2719. struct rdma_conn_param cp;
  2720. int ret;
  2721.  
  2722. memset(&cp, 0, sizeof(struct rdma_conn_param));
  2723. cp.responder_resources = isert_conn->responder_resources;
  2724. cp.initiator_depth = isert_conn->initiator_depth;
  2725. cp.retry_count = 7;
  2726. cp.rnr_retry_count = 7;
  2727.  
  2728. pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n");
  2729.  
  2730. ret = rdma_accept(cm_id, &cp);
  2731. if (ret) {
  2732. pr_err("rdma_accept() failed with: %d\n", ret);
  2733. return ret;
  2734. }
  2735.  
  2736. pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n");
  2737.  
  2738. return 0;
  2739. }
  2740.  
  2741. static int
  2742. isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
  2743. {
  2744. struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
  2745. int ret;
  2746.  
  2747. pr_info("before login_req comp conn: %p\n", isert_conn);
  2748. ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
  2749. if (ret) {
  2750. pr_err("isert_conn %p interrupted before got login req\n",
  2751. isert_conn);
  2752. return ret;
  2753. }
  2754. INIT_COMPLETION(isert_conn->login_req_comp);
  2755.  
  2756. /*
  2757. * For login requests after the first PDU, isert_rx_login_req() will
  2758. * kick schedule_delayed_work(&conn->login_work) as the packet is
  2759. * received, which turns this callback from iscsi_target_do_login_rx()
  2760. * into a NOP.
  2761. */
  2762. if (!login->first_request)
  2763. return 0;
  2764.  
  2765. isert_rx_login_req(isert_conn);
  2766.  
  2767. pr_info("before conn_login_comp conn: %p\n", conn);
  2768. ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
  2769. if (ret)
  2770. return ret;
  2771.  
  2772. pr_info("processing login->req: %p\n", login->req);
  2773.  
  2774. return 0;
  2775. }
  2776.  
  2777. static void
  2778. isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
  2779. struct isert_conn *isert_conn)
  2780. {
  2781. struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
  2782. struct rdma_route *cm_route = &cm_id->route;
  2783. struct sockaddr_in *sock_in;
  2784. struct sockaddr_in6 *sock_in6;
  2785.  
  2786. conn->login_family = np->np_sockaddr.ss_family;
  2787.  
  2788. if (np->np_sockaddr.ss_family == AF_INET6) {
  2789. sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.dst_addr;
  2790. snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
  2791. &sock_in6->sin6_addr.in6_u);
  2792. conn->login_port = ntohs(sock_in6->sin6_port);
  2793.  
  2794. sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.src_addr;
  2795. snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
  2796. &sock_in6->sin6_addr.in6_u);
  2797. conn->local_port = ntohs(sock_in6->sin6_port);
  2798. } else {
  2799. sock_in = (struct sockaddr_in *)&cm_route->addr.dst_addr;
  2800. sprintf(conn->login_ip, "%pI4",
  2801. &sock_in->sin_addr.s_addr);
  2802. conn->login_port = ntohs(sock_in->sin_port);
  2803.  
  2804. sock_in = (struct sockaddr_in *)&cm_route->addr.src_addr;
  2805. sprintf(conn->local_ip, "%pI4",
  2806. &sock_in->sin_addr.s_addr);
  2807. conn->local_port = ntohs(sock_in->sin_port);
  2808. }
  2809. }
  2810.  
  2811. static int
  2812. isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
  2813. {
  2814. struct isert_np *isert_np = (struct isert_np *)np->np_context;
  2815. struct isert_conn *isert_conn;
  2816. int max_accept = 0, ret;
  2817.  
  2818. accept_wait:
  2819. ret = down_interruptible(&isert_np->np_sem);
  2820. if (max_accept > 5)
  2821. return -ENODEV;
  2822.  
  2823. spin_lock_bh(&np->np_thread_lock);
  2824. if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
  2825. spin_unlock_bh(&np->np_thread_lock);
  2826. pr_debug("np_thread_state %d for isert_accept_np\n",
  2827. np->np_thread_state);
  2828. /**
  2829. * No point in stalling here when np_thread
  2830. * is in state RESET/SHUTDOWN/EXIT - bail
  2831. **/
  2832. return -ENODEV;
  2833. }
  2834. spin_unlock_bh(&np->np_thread_lock);
  2835.  
  2836. mutex_lock(&isert_np->np_accept_mutex);
  2837. if (list_empty(&isert_np->np_accept_list)) {
  2838. mutex_unlock(&isert_np->np_accept_mutex);
  2839. max_accept++;
  2840. goto accept_wait;
  2841. }
  2842. isert_conn = list_first_entry(&isert_np->np_accept_list,
  2843. struct isert_conn, conn_accept_node);
  2844. list_del_init(&isert_conn->conn_accept_node);
  2845. mutex_unlock(&isert_np->np_accept_mutex);
  2846.  
  2847. conn->context = isert_conn;
  2848. isert_conn->conn = conn;
  2849. max_accept = 0;
  2850.  
  2851. isert_set_conn_info(np, conn, isert_conn);
  2852.  
  2853. pr_debug("Processing isert_conn: %p\n", isert_conn);
  2854.  
  2855. return 0;
  2856. }
  2857.  
  2858. static void
  2859. isert_free_np(struct iscsi_np *np)
  2860. {
  2861. struct isert_np *isert_np = (struct isert_np *)np->np_context;
  2862.  
  2863. if (isert_np->np_cm_id)
  2864. rdma_destroy_id(isert_np->np_cm_id);
  2865.  
  2866. np->np_context = NULL;
  2867. kfree(isert_np);
  2868. }
  2869.  
  2870. static void isert_release_work(struct work_struct *work)
  2871. {
  2872. struct isert_conn *isert_conn = container_of(work,
  2873. struct isert_conn,
  2874. release_work);
  2875.  
  2876. pr_info("Starting release conn %p\n", isert_conn);
  2877.  
  2878. wait_for_completion(&isert_conn->conn_wait);
  2879.  
  2880. mutex_lock(&isert_conn->conn_mutex);
  2881. isert_conn->state = ISER_CONN_DOWN;
  2882. mutex_unlock(&isert_conn->conn_mutex);
  2883.  
  2884. pr_info("Destroying conn %p\n", isert_conn);
  2885. isert_put_conn(isert_conn);
  2886. }
  2887.  
  2888. static void isert_wait_conn(struct iscsi_conn *conn)
  2889. {
  2890. struct isert_conn *isert_conn = conn->context;
  2891.  
  2892. pr_debug("isert_wait_conn: Starting \n");
  2893.  
  2894. mutex_lock(&isert_conn->conn_mutex);
  2895. /*
  2896. * Only wait for conn_wait_comp_err if the isert_conn made it
  2897. * into full feature phase..
  2898. */
  2899. if (isert_conn->state == ISER_CONN_INIT) {
  2900. mutex_unlock(&isert_conn->conn_mutex);
  2901. return;
  2902. }
  2903. isert_conn_terminate(isert_conn);
  2904. mutex_unlock(&isert_conn->conn_mutex);
  2905.  
  2906. wait_for_completion(&isert_conn->conn_wait_comp_err);
  2907.  
  2908. INIT_WORK(&isert_conn->release_work, isert_release_work);
  2909. queue_work(isert_release_wq, &isert_conn->release_work);
  2910. }
  2911.  
  2912. static void isert_free_conn(struct iscsi_conn *conn)
  2913. {
  2914. struct isert_conn *isert_conn = conn->context;
  2915.  
  2916. isert_put_conn(isert_conn);
  2917. }
  2918.  
  2919. static struct iscsit_transport iser_target_transport = {
  2920. .name = "IB/iSER",
  2921. .transport_type = ISCSI_INFINIBAND,
  2922. .priv_size = sizeof(struct isert_cmd),
  2923. .owner = THIS_MODULE,
  2924. .iscsit_setup_np = isert_setup_np,
  2925. .iscsit_accept_np = isert_accept_np,
  2926. .iscsit_free_np = isert_free_np,
  2927. .iscsit_wait_conn = isert_wait_conn,
  2928. .iscsit_free_conn = isert_free_conn,
  2929. .iscsit_get_login_rx = isert_get_login_rx,
  2930. .iscsit_put_login_tx = isert_put_login_tx,
  2931. .iscsit_immediate_queue = isert_immediate_queue,
  2932. .iscsit_response_queue = isert_response_queue,
  2933. .iscsit_get_dataout = isert_get_dataout,
  2934. .iscsit_queue_data_in = isert_put_datain,
  2935. .iscsit_queue_status = isert_put_response,
  2936. };
  2937.  
  2938. static int __init isert_init(void)
  2939. {
  2940. int ret;
  2941.  
  2942. isert_rx_wq = alloc_workqueue("isert_rx_wq", 0, 0);
  2943. if (!isert_rx_wq) {
  2944. pr_err("Unable to allocate isert_rx_wq\n");
  2945. return -ENOMEM;
  2946. }
  2947.  
  2948. isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
  2949. if (!isert_comp_wq) {
  2950. pr_err("Unable to allocate isert_comp_wq\n");
  2951. ret = -ENOMEM;
  2952. goto destroy_rx_wq;
  2953. }
  2954.  
  2955. isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
  2956. WQ_UNBOUND_MAX_ACTIVE);
  2957. if (!isert_release_wq) {
  2958. pr_err("Unable to allocate isert_release_wq\n");
  2959. ret = -ENOMEM;
  2960. goto destroy_comp_wq;
  2961. }
  2962.  
  2963. iscsit_register_transport(&iser_target_transport);
  2964. pr_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
  2965.  
  2966. return 0;
  2967.  
  2968. destroy_comp_wq:
  2969. destroy_workqueue(isert_comp_wq);
  2970. destroy_rx_wq:
  2971. destroy_workqueue(isert_rx_wq);
  2972. return ret;
  2973. }
  2974.  
  2975. static void __exit isert_exit(void)
  2976. {
  2977. flush_scheduled_work();
  2978. destroy_workqueue(isert_release_wq);
  2979. destroy_workqueue(isert_comp_wq);
  2980. destroy_workqueue(isert_rx_wq);
  2981. iscsit_unregister_transport(&iser_target_transport);
  2982. pr_debug("iSER_TARGET[0] - Released iser_target_transport\n");
  2983. }
  2984.  
  2985. MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
  2986. MODULE_VERSION("0.1");
  2987. MODULE_AUTHOR("nab@Linux-iSCSI.org");
  2988. MODULE_LICENSE("GPL");
  2989.  
  2990. module_init(isert_init);
  2991. module_exit(isert_exit);
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement