Advertisement
Guest User

Backport Rockchip 3288 Crypto patchset into Armbian 4.4

a guest
Apr 8th, 2018
634
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Diff 91.11 KB | None | 0 0
  1. commit 433cd2c617bfbac27a02e40fbcce1713c84ce441
  2. Author: Zain Wang <zain.wang@rock-chips.com>
  3. Date:   Wed Nov 25 13:43:32 2015 +0800
  4.  
  5.     crypto: rockchip - add crypto driver for rk3288
  6.    
  7.     Crypto driver support:
  8.          ecb(aes) cbc(aes) ecb(des) cbc(des) ecb(des3_ede) cbc(des3_ede)
  9.     You can alloc tags above in your case.
  10.    
  11.     And other algorithms and platforms will be added later on.
  12.    
  13.     Signed-off-by: Zain Wang <zain.wang@rock-chips.com>
  14.     Tested-by: Heiko Stuebner <heiko@sntech.de>
  15.     Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
  16.  
  17. diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
  18. index 5357bc1..95dccde 100644
  19. --- a/drivers/crypto/Kconfig
  20. +++ b/drivers/crypto/Kconfig
  21. @@ -497,4 +497,15 @@ config CRYPTO_DEV_SUN4I_SS
  22.       To compile this driver as a module, choose M here: the module
  23.       will be called sun4i-ss.
  24.  
  25. +config CRYPTO_DEV_ROCKCHIP
  26. +   tristate "Rockchip's Cryptographic Engine driver"
  27. +   depends on OF && ARCH_ROCKCHIP
  28. +   select CRYPTO_AES
  29. +   select CRYPTO_DES
  30. +   select CRYPTO_BLKCIPHER
  31. +
  32. +   help
  33. +     This driver interfaces with the hardware crypto accelerator.
  34. +     Supporting cbc/ecb chainmode, and aes/des/des3_ede cipher mode.
  35. +
  36.  endif # CRYPTO_HW
  37. diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
  38. index c3ced6fb..713de9d 100644
  39. --- a/drivers/crypto/Makefile
  40. +++ b/drivers/crypto/Makefile
  41. @@ -29,3 +29,4 @@ obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
  42.  obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
  43.  obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
  44.  obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/
  45. +obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
  46. diff --git a/drivers/crypto/rockchip/Makefile b/drivers/crypto/rockchip/Makefile
  47. new file mode 100644
  48. index 0000000..7051c6c
  49. --- /dev/null
  50. +++ b/drivers/crypto/rockchip/Makefile
  51. @@ -0,0 +1,3 @@
  52. +obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rk_crypto.o
  53. +rk_crypto-objs := rk3288_crypto.o \
  54. +         rk3288_crypto_ablkcipher.o \
  55. diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
  56. new file mode 100644
  57. index 0000000..6b72f8d
  58. --- /dev/null
  59. +++ b/drivers/crypto/rockchip/rk3288_crypto.c
  60. @@ -0,0 +1,393 @@
  61. +/*
  62. + * Crypto acceleration support for Rockchip RK3288
  63. + *
  64. + * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
  65. + *
  66. + * Author: Zain Wang <zain.wang@rock-chips.com>
  67. + *
  68. + * This program is free software; you can redistribute it and/or modify it
  69. + * under the terms and conditions of the GNU General Public License,
  70. + * version 2, as published by the Free Software Foundation.
  71. + *
  72. + * Some ideas are from marvell-cesa.c and s5p-sss.c driver.
  73. + */
  74. +
  75. +#include "rk3288_crypto.h"
  76. +#include <linux/module.h>
  77. +#include <linux/platform_device.h>
  78. +#include <linux/of.h>
  79. +#include <linux/clk.h>
  80. +#include <linux/crypto.h>
  81. +#include <linux/reset.h>
  82. +
  83. +static int rk_crypto_enable_clk(struct rk_crypto_info *dev)
  84. +{
  85. +   int err;
  86. +
  87. +   err = clk_prepare_enable(dev->sclk);
  88. +   if (err) {
  89. +       dev_err(dev->dev, "[%s:%d], Couldn't enable clock sclk\n",
  90. +           __func__, __LINE__);
  91. +       goto err_return;
  92. +   }
  93. +   err = clk_prepare_enable(dev->aclk);
  94. +   if (err) {
  95. +       dev_err(dev->dev, "[%s:%d], Couldn't enable clock aclk\n",
  96. +           __func__, __LINE__);
  97. +       goto err_aclk;
  98. +   }
  99. +   err = clk_prepare_enable(dev->hclk);
  100. +   if (err) {
  101. +       dev_err(dev->dev, "[%s:%d], Couldn't enable clock hclk\n",
  102. +           __func__, __LINE__);
  103. +       goto err_hclk;
  104. +   }
  105. +   err = clk_prepare_enable(dev->dmaclk);
  106. +   if (err) {
  107. +       dev_err(dev->dev, "[%s:%d], Couldn't enable clock dmaclk\n",
  108. +           __func__, __LINE__);
  109. +       goto err_dmaclk;
  110. +   }
  111. +   return err;
  112. +err_dmaclk:
  113. +   clk_disable_unprepare(dev->hclk);
  114. +err_hclk:
  115. +   clk_disable_unprepare(dev->aclk);
  116. +err_aclk:
  117. +   clk_disable_unprepare(dev->sclk);
  118. +err_return:
  119. +   return err;
  120. +}
  121. +
  122. +static void rk_crypto_disable_clk(struct rk_crypto_info *dev)
  123. +{
  124. +   clk_disable_unprepare(dev->dmaclk);
  125. +   clk_disable_unprepare(dev->hclk);
  126. +   clk_disable_unprepare(dev->aclk);
  127. +   clk_disable_unprepare(dev->sclk);
  128. +}
  129. +
  130. +static int check_alignment(struct scatterlist *sg_src,
  131. +              struct scatterlist *sg_dst,
  132. +              int align_mask)
  133. +{
  134. +   int in, out, align;
  135. +
  136. +   in = IS_ALIGNED((uint32_t)sg_src->offset, 4) &&
  137. +        IS_ALIGNED((uint32_t)sg_src->length, align_mask);
  138. +   if (!sg_dst)
  139. +       return in;
  140. +   out = IS_ALIGNED((uint32_t)sg_dst->offset, 4) &&
  141. +         IS_ALIGNED((uint32_t)sg_dst->length, align_mask);
  142. +   align = in && out;
  143. +
  144. +   return (align && (sg_src->length == sg_dst->length));
  145. +}
  146. +
  147. +static int rk_load_data(struct rk_crypto_info *dev,
  148. +           struct scatterlist *sg_src,
  149. +           struct scatterlist *sg_dst)
  150. +{
  151. +   unsigned int count;
  152. +
  153. +   dev->aligned = dev->aligned ?
  154. +       check_alignment(sg_src, sg_dst, dev->align_size) :
  155. +       dev->aligned;
  156. +   if (dev->aligned) {
  157. +       count = min(dev->left_bytes, sg_src->length);
  158. +       dev->left_bytes -= count;
  159. +
  160. +       if (!dma_map_sg(dev->dev, sg_src, 1, DMA_TO_DEVICE)) {
  161. +           dev_err(dev->dev, "[%s:%d] dma_map_sg(src)  error\n",
  162. +               __func__, __LINE__);
  163. +           return -EINVAL;
  164. +       }
  165. +       dev->addr_in = sg_dma_address(sg_src);
  166. +
  167. +       if (sg_dst) {
  168. +           if (!dma_map_sg(dev->dev, sg_dst, 1, DMA_FROM_DEVICE)) {
  169. +               dev_err(dev->dev,
  170. +                   "[%s:%d] dma_map_sg(dst)  error\n",
  171. +                   __func__, __LINE__);
  172. +               dma_unmap_sg(dev->dev, sg_src, 1,
  173. +                        DMA_TO_DEVICE);
  174. +               return -EINVAL;
  175. +           }
  176. +           dev->addr_out = sg_dma_address(sg_dst);
  177. +       }
  178. +   } else {
  179. +       count = (dev->left_bytes > PAGE_SIZE) ?
  180. +           PAGE_SIZE : dev->left_bytes;
  181. +
  182. +       if (!sg_pcopy_to_buffer(dev->first, dev->nents,
  183. +                   dev->addr_vir, count,
  184. +                   dev->total - dev->left_bytes)) {
  185. +           dev_err(dev->dev, "[%s:%d] pcopy err\n",
  186. +               __func__, __LINE__);
  187. +           return -EINVAL;
  188. +       }
  189. +       dev->left_bytes -= count;
  190. +       sg_init_one(&dev->sg_tmp, dev->addr_vir, count);
  191. +       if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, DMA_TO_DEVICE)) {
  192. +           dev_err(dev->dev, "[%s:%d] dma_map_sg(sg_tmp)  error\n",
  193. +               __func__, __LINE__);
  194. +           return -ENOMEM;
  195. +       }
  196. +       dev->addr_in = sg_dma_address(&dev->sg_tmp);
  197. +
  198. +       if (sg_dst) {
  199. +           if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1,
  200. +                   DMA_FROM_DEVICE)) {
  201. +               dev_err(dev->dev,
  202. +                   "[%s:%d] dma_map_sg(sg_tmp)  error\n",
  203. +                   __func__, __LINE__);
  204. +               dma_unmap_sg(dev->dev, &dev->sg_tmp, 1,
  205. +                        DMA_TO_DEVICE);
  206. +               return -ENOMEM;
  207. +           }
  208. +           dev->addr_out = sg_dma_address(&dev->sg_tmp);
  209. +       }
  210. +   }
  211. +   dev->count = count;
  212. +   return 0;
  213. +}
  214. +
  215. +static void rk_unload_data(struct rk_crypto_info *dev)
  216. +{
  217. +   struct scatterlist *sg_in, *sg_out;
  218. +
  219. +   sg_in = dev->aligned ? dev->sg_src : &dev->sg_tmp;
  220. +   dma_unmap_sg(dev->dev, sg_in, 1, DMA_TO_DEVICE);
  221. +
  222. +   if (dev->sg_dst) {
  223. +       sg_out = dev->aligned ? dev->sg_dst : &dev->sg_tmp;
  224. +       dma_unmap_sg(dev->dev, sg_out, 1, DMA_FROM_DEVICE);
  225. +   }
  226. +}
  227. +
  228. +static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id)
  229. +{
  230. +   struct rk_crypto_info *dev  = platform_get_drvdata(dev_id);
  231. +   u32 interrupt_status;
  232. +   int err = 0;
  233. +
  234. +   spin_lock(&dev->lock);
  235. +   interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS);
  236. +   CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status);
  237. +   if (interrupt_status & 0x0a) {
  238. +       dev_warn(dev->dev, "DMA Error\n");
  239. +       err = -EFAULT;
  240. +   } else if (interrupt_status & 0x05) {
  241. +       err = dev->update(dev);
  242. +   }
  243. +   if (err)
  244. +       dev->complete(dev, err);
  245. +   spin_unlock(&dev->lock);
  246. +   return IRQ_HANDLED;
  247. +}
  248. +
  249. +static void rk_crypto_tasklet_cb(unsigned long data)
  250. +{
  251. +   struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
  252. +   struct crypto_async_request *async_req, *backlog;
  253. +   int err = 0;
  254. +
  255. +   spin_lock(&dev->lock);
  256. +   backlog   = crypto_get_backlog(&dev->queue);
  257. +   async_req = crypto_dequeue_request(&dev->queue);
  258. +   spin_unlock(&dev->lock);
  259. +   if (!async_req) {
  260. +       dev_err(dev->dev, "async_req is NULL !!\n");
  261. +       return;
  262. +   }
  263. +   if (backlog) {
  264. +       backlog->complete(backlog, -EINPROGRESS);
  265. +       backlog = NULL;
  266. +   }
  267. +
  268. +   if (crypto_tfm_alg_type(async_req->tfm) == CRYPTO_ALG_TYPE_ABLKCIPHER)
  269. +       dev->ablk_req = ablkcipher_request_cast(async_req);
  270. +   err = dev->start(dev);
  271. +   if (err)
  272. +       dev->complete(dev, err);
  273. +}
  274. +
  275. +static struct rk_crypto_tmp *rk_cipher_algs[] = {
  276. +   &rk_ecb_aes_alg,
  277. +   &rk_cbc_aes_alg,
  278. +   &rk_ecb_des_alg,
  279. +   &rk_cbc_des_alg,
  280. +   &rk_ecb_des3_ede_alg,
  281. +   &rk_cbc_des3_ede_alg,
  282. +};
  283. +
  284. +static int rk_crypto_register(struct rk_crypto_info *crypto_info)
  285. +{
  286. +   unsigned int i, k;
  287. +   int err = 0;
  288. +
  289. +   for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
  290. +       rk_cipher_algs[i]->dev = crypto_info;
  291. +       err = crypto_register_alg(&rk_cipher_algs[i]->alg);
  292. +       if (err)
  293. +           goto err_cipher_algs;
  294. +   }
  295. +   return 0;
  296. +
  297. +err_cipher_algs:
  298. +   for (k = 0; k < i; k++)
  299. +       crypto_unregister_alg(&rk_cipher_algs[k]->alg);
  300. +   return err;
  301. +}
  302. +
  303. +static void rk_crypto_unregister(void)
  304. +{
  305. +   unsigned int i;
  306. +
  307. +   for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++)
  308. +       crypto_unregister_alg(&rk_cipher_algs[i]->alg);
  309. +}
  310. +
  311. +static void rk_crypto_action(void *data)
  312. +{
  313. +   struct rk_crypto_info *crypto_info = data;
  314. +
  315. +   reset_control_assert(crypto_info->rst);
  316. +}
  317. +
  318. +static const struct of_device_id crypto_of_id_table[] = {
  319. +   { .compatible = "rockchip,rk3288-crypto" },
  320. +   {}
  321. +};
  322. +MODULE_DEVICE_TABLE(of, crypto_of_id_table);
  323. +
  324. +static int rk_crypto_probe(struct platform_device *pdev)
  325. +{
  326. +   struct resource *res;
  327. +   struct device *dev = &pdev->dev;
  328. +   struct rk_crypto_info *crypto_info;
  329. +   int err = 0;
  330. +
  331. +   crypto_info = devm_kzalloc(&pdev->dev,
  332. +                  sizeof(*crypto_info), GFP_KERNEL);
  333. +   if (!crypto_info) {
  334. +       err = -ENOMEM;
  335. +       goto err_crypto;
  336. +   }
  337. +
  338. +   crypto_info->rst = devm_reset_control_get(dev, "crypto-rst");
  339. +   if (IS_ERR(crypto_info->rst)) {
  340. +       err = PTR_ERR(crypto_info->rst);
  341. +       goto err_crypto;
  342. +   }
  343. +
  344. +   reset_control_assert(crypto_info->rst);
  345. +   usleep_range(10, 20);
  346. +   reset_control_deassert(crypto_info->rst);
  347. +
  348. +   err = devm_add_action(dev, rk_crypto_action, crypto_info);
  349. +   if (err) {
  350. +       reset_control_assert(crypto_info->rst);
  351. +       goto err_crypto;
  352. +   }
  353. +
  354. +   spin_lock_init(&crypto_info->lock);
  355. +
  356. +   res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  357. +   crypto_info->reg = devm_ioremap_resource(&pdev->dev, res);
  358. +   if (IS_ERR(crypto_info->reg)) {
  359. +       err = PTR_ERR(crypto_info->reg);
  360. +       goto err_crypto;
  361. +   }
  362. +
  363. +   crypto_info->aclk = devm_clk_get(&pdev->dev, "aclk");
  364. +   if (IS_ERR(crypto_info->aclk)) {
  365. +       err = PTR_ERR(crypto_info->aclk);
  366. +       goto err_crypto;
  367. +   }
  368. +
  369. +   crypto_info->hclk = devm_clk_get(&pdev->dev, "hclk");
  370. +   if (IS_ERR(crypto_info->hclk)) {
  371. +       err = PTR_ERR(crypto_info->hclk);
  372. +       goto err_crypto;
  373. +   }
  374. +
  375. +   crypto_info->sclk = devm_clk_get(&pdev->dev, "sclk");
  376. +   if (IS_ERR(crypto_info->sclk)) {
  377. +       err = PTR_ERR(crypto_info->sclk);
  378. +       goto err_crypto;
  379. +   }
  380. +
  381. +   crypto_info->dmaclk = devm_clk_get(&pdev->dev, "apb_pclk");
  382. +   if (IS_ERR(crypto_info->dmaclk)) {
  383. +       err = PTR_ERR(crypto_info->dmaclk);
  384. +       goto err_crypto;
  385. +   }
  386. +
  387. +   crypto_info->irq = platform_get_irq(pdev, 0);
  388. +   if (crypto_info->irq < 0) {
  389. +       dev_warn(crypto_info->dev,
  390. +            "control Interrupt is not available.\n");
  391. +       err = crypto_info->irq;
  392. +       goto err_crypto;
  393. +   }
  394. +
  395. +   err = devm_request_irq(&pdev->dev, crypto_info->irq,
  396. +                  rk_crypto_irq_handle, IRQF_SHARED,
  397. +                  "rk-crypto", pdev);
  398. +
  399. +   if (err) {
  400. +       dev_err(crypto_info->dev, "irq request failed.\n");
  401. +       goto err_crypto;
  402. +   }
  403. +
  404. +   crypto_info->dev = &pdev->dev;
  405. +   platform_set_drvdata(pdev, crypto_info);
  406. +
  407. +   tasklet_init(&crypto_info->crypto_tasklet,
  408. +            rk_crypto_tasklet_cb, (unsigned long)crypto_info);
  409. +   crypto_init_queue(&crypto_info->queue, 50);
  410. +
  411. +   crypto_info->enable_clk = rk_crypto_enable_clk;
  412. +   crypto_info->disable_clk = rk_crypto_disable_clk;
  413. +   crypto_info->load_data = rk_load_data;
  414. +   crypto_info->unload_data = rk_unload_data;
  415. +
  416. +   err = rk_crypto_register(crypto_info);
  417. +   if (err) {
  418. +       dev_err(dev, "err in register alg");
  419. +       goto err_register_alg;
  420. +   }
  421. +
  422. +   dev_info(dev, "Crypto Accelerator successfully registered\n");
  423. +   return 0;
  424. +
  425. +err_register_alg:
  426. +   tasklet_kill(&crypto_info->crypto_tasklet);
  427. +err_crypto:
  428. +   return err;
  429. +}
  430. +
  431. +static int rk_crypto_remove(struct platform_device *pdev)
  432. +{
  433. +   struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev);
  434. +
  435. +   rk_crypto_unregister();
  436. +   tasklet_kill(&crypto_tmp->crypto_tasklet);
  437. +   return 0;
  438. +}
  439. +
  440. +static struct platform_driver crypto_driver = {
  441. +   .probe      = rk_crypto_probe,
  442. +   .remove     = rk_crypto_remove,
  443. +   .driver     = {
  444. +       .name   = "rk3288-crypto",
  445. +       .of_match_table = crypto_of_id_table,
  446. +   },
  447. +};
  448. +
  449. +module_platform_driver(crypto_driver);
  450. +
  451. +MODULE_AUTHOR("Zain Wang <zain.wang@rock-chips.com>");
  452. +MODULE_DESCRIPTION("Support for Rockchip's cryptographic engine");
  453. +MODULE_LICENSE("GPL");
  454. diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h
  455. new file mode 100644
  456. index 0000000..e499c2c
  457. --- /dev/null
  458. +++ b/drivers/crypto/rockchip/rk3288_crypto.h
  459. @@ -0,0 +1,216 @@
  460. +#ifndef __RK3288_CRYPTO_H__
  461. +#define __RK3288_CRYPTO_H__
  462. +
  463. +#include <crypto/aes.h>
  464. +#include <crypto/des.h>
  465. +#include <crypto/algapi.h>
  466. +#include <linux/interrupt.h>
  467. +#include <linux/delay.h>
  468. +
  469. +#define _SBF(v, f)         ((v) << (f))
  470. +
  471. +/* Crypto control registers*/
  472. +#define RK_CRYPTO_INTSTS       0x0000
  473. +#define RK_CRYPTO_PKA_DONE_INT     BIT(5)
  474. +#define RK_CRYPTO_HASH_DONE_INT        BIT(4)
  475. +#define RK_CRYPTO_HRDMA_ERR_INT        BIT(3)
  476. +#define RK_CRYPTO_HRDMA_DONE_INT   BIT(2)
  477. +#define RK_CRYPTO_BCDMA_ERR_INT        BIT(1)
  478. +#define RK_CRYPTO_BCDMA_DONE_INT   BIT(0)
  479. +
  480. +#define RK_CRYPTO_INTENA       0x0004
  481. +#define RK_CRYPTO_PKA_DONE_ENA     BIT(5)
  482. +#define RK_CRYPTO_HASH_DONE_ENA        BIT(4)
  483. +#define RK_CRYPTO_HRDMA_ERR_ENA        BIT(3)
  484. +#define RK_CRYPTO_HRDMA_DONE_ENA   BIT(2)
  485. +#define RK_CRYPTO_BCDMA_ERR_ENA        BIT(1)
  486. +#define RK_CRYPTO_BCDMA_DONE_ENA   BIT(0)
  487. +
  488. +#define RK_CRYPTO_CTRL         0x0008
  489. +#define RK_CRYPTO_WRITE_MASK       _SBF(0xFFFF, 16)
  490. +#define RK_CRYPTO_TRNG_FLUSH       BIT(9)
  491. +#define RK_CRYPTO_TRNG_START       BIT(8)
  492. +#define RK_CRYPTO_PKA_FLUSH        BIT(7)
  493. +#define RK_CRYPTO_HASH_FLUSH       BIT(6)
  494. +#define RK_CRYPTO_BLOCK_FLUSH      BIT(5)
  495. +#define RK_CRYPTO_PKA_START        BIT(4)
  496. +#define RK_CRYPTO_HASH_START       BIT(3)
  497. +#define RK_CRYPTO_BLOCK_START      BIT(2)
  498. +#define RK_CRYPTO_TDES_START       BIT(1)
  499. +#define RK_CRYPTO_AES_START        BIT(0)
  500. +
  501. +#define RK_CRYPTO_CONF         0x000c
  502. +/* HASH Receive DMA Address Mode:   fix | increment */
  503. +#define RK_CRYPTO_HR_ADDR_MODE     BIT(8)
  504. +/* Block Transmit DMA Address Mode: fix | increment */
  505. +#define RK_CRYPTO_BT_ADDR_MODE     BIT(7)
  506. +/* Block Receive DMA Address Mode:  fix | increment */
  507. +#define RK_CRYPTO_BR_ADDR_MODE     BIT(6)
  508. +#define RK_CRYPTO_BYTESWAP_HRFIFO  BIT(5)
  509. +#define RK_CRYPTO_BYTESWAP_BTFIFO  BIT(4)
  510. +#define RK_CRYPTO_BYTESWAP_BRFIFO  BIT(3)
  511. +/* AES = 0 OR DES = 1 */
  512. +#define RK_CRYPTO_DESSEL               BIT(2)
  513. +#define RK_CYYPTO_HASHINSEL_INDEPENDENT_SOURCE     _SBF(0x00, 0)
  514. +#define RK_CYYPTO_HASHINSEL_BLOCK_CIPHER_INPUT     _SBF(0x01, 0)
  515. +#define RK_CYYPTO_HASHINSEL_BLOCK_CIPHER_OUTPUT        _SBF(0x02, 0)
  516. +
  517. +/* Block Receiving DMA Start Address Register */
  518. +#define RK_CRYPTO_BRDMAS       0x0010
  519. +/* Block Transmitting DMA Start Address Register */
  520. +#define RK_CRYPTO_BTDMAS       0x0014
  521. +/* Block Receiving DMA Length Register */
  522. +#define RK_CRYPTO_BRDMAL       0x0018
  523. +/* Hash Receiving DMA Start Address Register */
  524. +#define RK_CRYPTO_HRDMAS       0x001c
  525. +/* Hash Receiving DMA Length Register */
  526. +#define RK_CRYPTO_HRDMAL       0x0020
  527. +
  528. +/* AES registers */
  529. +#define RK_CRYPTO_AES_CTRL           0x0080
  530. +#define RK_CRYPTO_AES_BYTESWAP_CNT BIT(11)
  531. +#define RK_CRYPTO_AES_BYTESWAP_KEY BIT(10)
  532. +#define RK_CRYPTO_AES_BYTESWAP_IV  BIT(9)
  533. +#define RK_CRYPTO_AES_BYTESWAP_DO  BIT(8)
  534. +#define RK_CRYPTO_AES_BYTESWAP_DI  BIT(7)
  535. +#define RK_CRYPTO_AES_KEY_CHANGE   BIT(6)
  536. +#define RK_CRYPTO_AES_ECB_MODE     _SBF(0x00, 4)
  537. +#define RK_CRYPTO_AES_CBC_MODE     _SBF(0x01, 4)
  538. +#define RK_CRYPTO_AES_CTR_MODE     _SBF(0x02, 4)
  539. +#define RK_CRYPTO_AES_128BIT_key   _SBF(0x00, 2)
  540. +#define RK_CRYPTO_AES_192BIT_key   _SBF(0x01, 2)
  541. +#define RK_CRYPTO_AES_256BIT_key   _SBF(0x02, 2)
  542. +/* Slave = 0 / fifo = 1 */
  543. +#define RK_CRYPTO_AES_FIFO_MODE        BIT(1)
  544. +/* Encryption = 0 , Decryption = 1 */
  545. +#define RK_CRYPTO_AES_DEC      BIT(0)
  546. +
  547. +#define RK_CRYPTO_AES_STS      0x0084
  548. +#define RK_CRYPTO_AES_DONE     BIT(0)
  549. +
  550. +/* AES Input Data 0-3 Register */
  551. +#define RK_CRYPTO_AES_DIN_0        0x0088
  552. +#define RK_CRYPTO_AES_DIN_1        0x008c
  553. +#define RK_CRYPTO_AES_DIN_2        0x0090
  554. +#define RK_CRYPTO_AES_DIN_3        0x0094
  555. +
  556. +/* AES output Data 0-3 Register */
  557. +#define RK_CRYPTO_AES_DOUT_0       0x0098
  558. +#define RK_CRYPTO_AES_DOUT_1       0x009c
  559. +#define RK_CRYPTO_AES_DOUT_2       0x00a0
  560. +#define RK_CRYPTO_AES_DOUT_3       0x00a4
  561. +
  562. +/* AES IV Data 0-3 Register */
  563. +#define RK_CRYPTO_AES_IV_0     0x00a8
  564. +#define RK_CRYPTO_AES_IV_1     0x00ac
  565. +#define RK_CRYPTO_AES_IV_2     0x00b0
  566. +#define RK_CRYPTO_AES_IV_3     0x00b4
  567. +
  568. +/* AES Key Data 0-3 Register */
  569. +#define RK_CRYPTO_AES_KEY_0        0x00b8
  570. +#define RK_CRYPTO_AES_KEY_1        0x00bc
  571. +#define RK_CRYPTO_AES_KEY_2        0x00c0
  572. +#define RK_CRYPTO_AES_KEY_3        0x00c4
  573. +#define RK_CRYPTO_AES_KEY_4        0x00c8
  574. +#define RK_CRYPTO_AES_KEY_5        0x00cc
  575. +#define RK_CRYPTO_AES_KEY_6        0x00d0
  576. +#define RK_CRYPTO_AES_KEY_7        0x00d4
  577. +
  578. +/* des/tdes */
  579. +#define RK_CRYPTO_TDES_CTRL        0x0100
  580. +#define RK_CRYPTO_TDES_BYTESWAP_KEY    BIT(8)
  581. +#define RK_CRYPTO_TDES_BYTESWAP_IV BIT(7)
  582. +#define RK_CRYPTO_TDES_BYTESWAP_DO BIT(6)
  583. +#define RK_CRYPTO_TDES_BYTESWAP_DI BIT(5)
  584. +/* 0: ECB, 1: CBC */
  585. +#define RK_CRYPTO_TDES_CHAINMODE_CBC   BIT(4)
  586. +/* TDES Key Mode, 0 : EDE, 1 : EEE */
  587. +#define RK_CRYPTO_TDES_EEE     BIT(3)
  588. +/* 0: DES, 1:TDES */
  589. +#define RK_CRYPTO_TDES_SELECT      BIT(2)
  590. +/* 0: Slave, 1:Fifo */
  591. +#define RK_CRYPTO_TDES_FIFO_MODE   BIT(1)
  592. +/* Encryption = 0 , Decryption = 1 */
  593. +#define RK_CRYPTO_TDES_DEC     BIT(0)
  594. +
  595. +#define RK_CRYPTO_TDES_STS     0x0104
  596. +#define RK_CRYPTO_TDES_DONE        BIT(0)
  597. +
  598. +#define RK_CRYPTO_TDES_DIN_0       0x0108
  599. +#define RK_CRYPTO_TDES_DIN_1       0x010c
  600. +#define RK_CRYPTO_TDES_DOUT_0      0x0110
  601. +#define RK_CRYPTO_TDES_DOUT_1      0x0114
  602. +#define RK_CRYPTO_TDES_IV_0        0x0118
  603. +#define RK_CRYPTO_TDES_IV_1        0x011c
  604. +#define RK_CRYPTO_TDES_KEY1_0      0x0120
  605. +#define RK_CRYPTO_TDES_KEY1_1      0x0124
  606. +#define RK_CRYPTO_TDES_KEY2_0      0x0128
  607. +#define RK_CRYPTO_TDES_KEY2_1      0x012c
  608. +#define RK_CRYPTO_TDES_KEY3_0      0x0130
  609. +#define RK_CRYPTO_TDES_KEY3_1      0x0134
  610. +
  611. +#define CRYPTO_READ(dev, offset)         \
  612. +       readl_relaxed(((dev)->reg + (offset)))
  613. +#define CRYPTO_WRITE(dev, offset, val)   \
  614. +       writel_relaxed((val), ((dev)->reg + (offset)))
  615. +
  616. +struct rk_crypto_info {
  617. +   struct device           *dev;
  618. +   struct clk          *aclk;
  619. +   struct clk          *hclk;
  620. +   struct clk          *sclk;
  621. +   struct clk          *dmaclk;
  622. +   struct reset_control        *rst;
  623. +   void __iomem            *reg;
  624. +   int             irq;
  625. +   struct crypto_queue     queue;
  626. +   struct tasklet_struct       crypto_tasklet;
  627. +   struct ablkcipher_request   *ablk_req;
  628. +   /* device lock */
  629. +   spinlock_t          lock;
  630. +
  631. +   /* the public variable */
  632. +   struct scatterlist      *sg_src;
  633. +   struct scatterlist      *sg_dst;
  634. +   struct scatterlist      sg_tmp;
  635. +   struct scatterlist      *first;
  636. +   unsigned int            left_bytes;
  637. +   void                *addr_vir;
  638. +   int             aligned;
  639. +   int             align_size;
  640. +   size_t              nents;
  641. +   unsigned int            total;
  642. +   unsigned int            count;
  643. +   u32             mode;
  644. +   dma_addr_t          addr_in;
  645. +   dma_addr_t          addr_out;
  646. +   int (*start)(struct rk_crypto_info *dev);
  647. +   int (*update)(struct rk_crypto_info *dev);
  648. +   void (*complete)(struct rk_crypto_info *dev, int err);
  649. +   int (*enable_clk)(struct rk_crypto_info *dev);
  650. +   void (*disable_clk)(struct rk_crypto_info *dev);
  651. +   int (*load_data)(struct rk_crypto_info *dev,
  652. +            struct scatterlist *sg_src,
  653. +            struct scatterlist *sg_dst);
  654. +   void (*unload_data)(struct rk_crypto_info *dev);
  655. +};
  656. +
  657. +/* the private variable of cipher */
  658. +struct rk_cipher_ctx {
  659. +   struct rk_crypto_info       *dev;
  660. +   unsigned int            keylen;
  661. +};
  662. +
  663. +struct rk_crypto_tmp {
  664. +   struct rk_crypto_info *dev;
  665. +   struct crypto_alg alg;
  666. +};
  667. +
  668. +extern struct rk_crypto_tmp rk_ecb_aes_alg;
  669. +extern struct rk_crypto_tmp rk_cbc_aes_alg;
  670. +extern struct rk_crypto_tmp rk_ecb_des_alg;
  671. +extern struct rk_crypto_tmp rk_cbc_des_alg;
  672. +extern struct rk_crypto_tmp rk_ecb_des3_ede_alg;
  673. +extern struct rk_crypto_tmp rk_cbc_des3_ede_alg;
  674. +
  675. +#endif
  676. diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
  677. new file mode 100644
  678. index 0000000..4a8f9de
  679. --- /dev/null
  680. +++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
  681. @@ -0,0 +1,503 @@
  682. +/*
  683. + * Crypto acceleration support for Rockchip RK3288
  684. + *
  685. + * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
  686. + *
  687. + * Author: Zain Wang <zain.wang@rock-chips.com>
  688. + *
  689. + * This program is free software; you can redistribute it and/or modify it
  690. + * under the terms and conditions of the GNU General Public License,
  691. + * version 2, as published by the Free Software Foundation.
  692. + *
  693. + * Some ideas are from marvell-cesa.c and s5p-sss.c driver.
  694. + */
  695. +#include "rk3288_crypto.h"
  696. +
  697. +#define RK_CRYPTO_DEC          BIT(0)
  698. +
  699. +static void rk_crypto_complete(struct rk_crypto_info *dev, int err)
  700. +{
  701. +   if (dev->ablk_req->base.complete)
  702. +       dev->ablk_req->base.complete(&dev->ablk_req->base, err);
  703. +}
  704. +
  705. +static int rk_handle_req(struct rk_crypto_info *dev,
  706. +            struct ablkcipher_request *req)
  707. +{
  708. +   int err;
  709. +
  710. +   if (!IS_ALIGNED(req->nbytes, dev->align_size))
  711. +       return -EINVAL;
  712. +
  713. +   dev->left_bytes = req->nbytes;
  714. +   dev->total = req->nbytes;
  715. +   dev->sg_src = req->src;
  716. +   dev->first = req->src;
  717. +   dev->nents = sg_nents(req->src);
  718. +   dev->sg_dst = req->dst;
  719. +   dev->aligned = 1;
  720. +   dev->ablk_req = req;
  721. +
  722. +   spin_lock(&dev->lock);
  723. +   err = ablkcipher_enqueue_request(&dev->queue, req);
  724. +   spin_unlock(&dev->lock);
  725. +   tasklet_schedule(&dev->crypto_tasklet);
  726. +   return err;
  727. +}
  728. +
  729. +static int rk_aes_setkey(struct crypto_ablkcipher *cipher,
  730. +            const u8 *key, unsigned int keylen)
  731. +{
  732. +   struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
  733. +   struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
  734. +
  735. +   if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
  736. +       keylen != AES_KEYSIZE_256) {
  737. +       crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  738. +       return -EINVAL;
  739. +   }
  740. +   ctx->keylen = keylen;
  741. +   memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen);
  742. +   return 0;
  743. +}
  744. +
  745. +static int rk_tdes_setkey(struct crypto_ablkcipher *cipher,
  746. +             const u8 *key, unsigned int keylen)
  747. +{
  748. +   struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
  749. +   struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
  750. +   u32 tmp[DES_EXPKEY_WORDS];
  751. +
  752. +   if (keylen != DES_KEY_SIZE && keylen != DES3_EDE_KEY_SIZE) {
  753. +       crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  754. +       return -EINVAL;
  755. +   }
  756. +
  757. +   if (keylen == DES_KEY_SIZE) {
  758. +       if (!des_ekey(tmp, key) &&
  759. +           (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
  760. +           tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
  761. +           return -EINVAL;
  762. +       }
  763. +   }
  764. +
  765. +   ctx->keylen = keylen;
  766. +   memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
  767. +   return 0;
  768. +}
  769. +
  770. +static int rk_aes_ecb_encrypt(struct ablkcipher_request *req)
  771. +{
  772. +   struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  773. +   struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  774. +   struct rk_crypto_info *dev = ctx->dev;
  775. +
  776. +   dev->mode = RK_CRYPTO_AES_ECB_MODE;
  777. +   return rk_handle_req(dev, req);
  778. +}
  779. +
  780. +static int rk_aes_ecb_decrypt(struct ablkcipher_request *req)
  781. +{
  782. +   struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  783. +   struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  784. +   struct rk_crypto_info *dev = ctx->dev;
  785. +
  786. +   dev->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
  787. +   return rk_handle_req(dev, req);
  788. +}
  789. +
  790. +static int rk_aes_cbc_encrypt(struct ablkcipher_request *req)
  791. +{
  792. +   struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  793. +   struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  794. +   struct rk_crypto_info *dev = ctx->dev;
  795. +
  796. +   dev->mode = RK_CRYPTO_AES_CBC_MODE;
  797. +   return rk_handle_req(dev, req);
  798. +}
  799. +
  800. +static int rk_aes_cbc_decrypt(struct ablkcipher_request *req)
  801. +{
  802. +   struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  803. +   struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  804. +   struct rk_crypto_info *dev = ctx->dev;
  805. +
  806. +   dev->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
  807. +   return rk_handle_req(dev, req);
  808. +}
  809. +
  810. +static int rk_des_ecb_encrypt(struct ablkcipher_request *req)
  811. +{
  812. +   struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  813. +   struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  814. +   struct rk_crypto_info *dev = ctx->dev;
  815. +
  816. +   dev->mode = 0;
  817. +   return rk_handle_req(dev, req);
  818. +}
  819. +
  820. +static int rk_des_ecb_decrypt(struct ablkcipher_request *req)
  821. +{
  822. +   struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  823. +   struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  824. +   struct rk_crypto_info *dev = ctx->dev;
  825. +
  826. +   dev->mode = RK_CRYPTO_DEC;
  827. +   return rk_handle_req(dev, req);
  828. +}
  829. +
  830. +static int rk_des_cbc_encrypt(struct ablkcipher_request *req)
  831. +{
  832. +   struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  833. +   struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  834. +   struct rk_crypto_info *dev = ctx->dev;
  835. +
  836. +   dev->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
  837. +   return rk_handle_req(dev, req);
  838. +}
  839. +
  840. +static int rk_des_cbc_decrypt(struct ablkcipher_request *req)
  841. +{
  842. +   struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  843. +   struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  844. +   struct rk_crypto_info *dev = ctx->dev;
  845. +
  846. +   dev->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
  847. +   return rk_handle_req(dev, req);
  848. +}
  849. +
  850. +static int rk_des3_ede_ecb_encrypt(struct ablkcipher_request *req)
  851. +{
  852. +   struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  853. +   struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  854. +   struct rk_crypto_info *dev = ctx->dev;
  855. +
  856. +   dev->mode = RK_CRYPTO_TDES_SELECT;
  857. +   return rk_handle_req(dev, req);
  858. +}
  859. +
  860. +static int rk_des3_ede_ecb_decrypt(struct ablkcipher_request *req)
  861. +{
  862. +   struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  863. +   struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  864. +   struct rk_crypto_info *dev = ctx->dev;
  865. +
  866. +   dev->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
  867. +   return rk_handle_req(dev, req);
  868. +}
  869. +
  870. +static int rk_des3_ede_cbc_encrypt(struct ablkcipher_request *req)
  871. +{
  872. +   struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  873. +   struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  874. +   struct rk_crypto_info *dev = ctx->dev;
  875. +
  876. +   dev->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
  877. +   return rk_handle_req(dev, req);
  878. +}
  879. +
  880. +static int rk_des3_ede_cbc_decrypt(struct ablkcipher_request *req)
  881. +{
  882. +   struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  883. +   struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  884. +   struct rk_crypto_info *dev = ctx->dev;
  885. +
  886. +   dev->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
  887. +           RK_CRYPTO_DEC;
  888. +   return rk_handle_req(dev, req);
  889. +}
  890. +
  891. +static void rk_ablk_hw_init(struct rk_crypto_info *dev)
  892. +{
  893. +   struct crypto_ablkcipher *cipher =
  894. +       crypto_ablkcipher_reqtfm(dev->ablk_req);
  895. +   struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
  896. +   struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher);
  897. +   u32 ivsize, block, conf_reg = 0;
  898. +
  899. +   block = crypto_tfm_alg_blocksize(tfm);
  900. +   ivsize = crypto_ablkcipher_ivsize(cipher);
  901. +
  902. +   if (block == DES_BLOCK_SIZE) {
  903. +       dev->mode |= RK_CRYPTO_TDES_FIFO_MODE |
  904. +                RK_CRYPTO_TDES_BYTESWAP_KEY |
  905. +                RK_CRYPTO_TDES_BYTESWAP_IV;
  906. +       CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, dev->mode);
  907. +       memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0,
  908. +               dev->ablk_req->info, ivsize);
  909. +       conf_reg = RK_CRYPTO_DESSEL;
  910. +   } else {
  911. +       dev->mode |= RK_CRYPTO_AES_FIFO_MODE |
  912. +                RK_CRYPTO_AES_KEY_CHANGE |
  913. +                RK_CRYPTO_AES_BYTESWAP_KEY |
  914. +                RK_CRYPTO_AES_BYTESWAP_IV;
  915. +       if (ctx->keylen == AES_KEYSIZE_192)
  916. +           dev->mode |= RK_CRYPTO_AES_192BIT_key;
  917. +       else if (ctx->keylen == AES_KEYSIZE_256)
  918. +           dev->mode |= RK_CRYPTO_AES_256BIT_key;
  919. +       CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, dev->mode);
  920. +       memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0,
  921. +               dev->ablk_req->info, ivsize);
  922. +   }
  923. +   conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO |
  924. +           RK_CRYPTO_BYTESWAP_BRFIFO;
  925. +   CRYPTO_WRITE(dev, RK_CRYPTO_CONF, conf_reg);
  926. +   CRYPTO_WRITE(dev, RK_CRYPTO_INTENA,
  927. +            RK_CRYPTO_BCDMA_ERR_ENA | RK_CRYPTO_BCDMA_DONE_ENA);
  928. +}
  929. +
  930. +static void crypto_dma_start(struct rk_crypto_info *dev)
  931. +{
  932. +   CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, dev->addr_in);
  933. +   CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, dev->count / 4);
  934. +   CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, dev->addr_out);
  935. +   CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_BLOCK_START |
  936. +            _SBF(RK_CRYPTO_BLOCK_START, 16));
  937. +}
  938. +
  939. +static int rk_set_data_start(struct rk_crypto_info *dev)
  940. +{
  941. +   int err;
  942. +
  943. +   err = dev->load_data(dev, dev->sg_src, dev->sg_dst);
  944. +   if (!err)
  945. +       crypto_dma_start(dev);
  946. +   return err;
  947. +}
  948. +
  949. +static int rk_ablk_start(struct rk_crypto_info *dev)
  950. +{
  951. +   int err;
  952. +
  953. +   spin_lock(&dev->lock);
  954. +   rk_ablk_hw_init(dev);
  955. +   err = rk_set_data_start(dev);
  956. +   spin_unlock(&dev->lock);
  957. +   return err;
  958. +}
  959. +
  960. +static void rk_iv_copyback(struct rk_crypto_info *dev)
  961. +{
  962. +   struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(dev->ablk_req);
  963. +   u32 ivsize = crypto_ablkcipher_ivsize(tfm);
  964. +
  965. +   if (ivsize == DES_BLOCK_SIZE)
  966. +       memcpy_fromio(dev->ablk_req->info,
  967. +                 dev->reg + RK_CRYPTO_TDES_IV_0, ivsize);
  968. +   else if (ivsize == AES_BLOCK_SIZE)
  969. +       memcpy_fromio(dev->ablk_req->info,
  970. +                 dev->reg + RK_CRYPTO_AES_IV_0, ivsize);
  971. +}
  972. +
  973. +/* return:
  974. + * true    some err was occurred
  975. + * fault   no err, continue
  976. + */
  977. +static int rk_ablk_rx(struct rk_crypto_info *dev)
  978. +{
  979. +   int err = 0;
  980. +
  981. +   dev->unload_data(dev);
  982. +   if (!dev->aligned) {
  983. +       if (!sg_pcopy_from_buffer(dev->ablk_req->dst, dev->nents,
  984. +                     dev->addr_vir, dev->count,
  985. +                     dev->total - dev->left_bytes -
  986. +                     dev->count)) {
  987. +           err = -EINVAL;
  988. +           goto out_rx;
  989. +       }
  990. +   }
  991. +   if (dev->left_bytes) {
  992. +       if (dev->aligned) {
  993. +           if (sg_is_last(dev->sg_src)) {
  994. +               dev_err(dev->dev, "[%s:%d] Lack of data\n",
  995. +                   __func__, __LINE__);
  996. +               err = -ENOMEM;
  997. +               goto out_rx;
  998. +           }
  999. +           dev->sg_src = sg_next(dev->sg_src);
  1000. +           dev->sg_dst = sg_next(dev->sg_dst);
  1001. +       }
  1002. +       err = rk_set_data_start(dev);
  1003. +   } else {
  1004. +       rk_iv_copyback(dev);
  1005. +       /* here show the calculation is over without any err */
  1006. +       dev->complete(dev, 0);
  1007. +   }
  1008. +out_rx:
  1009. +   return err;
  1010. +}
  1011. +
  1012. +static int rk_ablk_cra_init(struct crypto_tfm *tfm)
  1013. +{
  1014. +   struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
  1015. +   struct crypto_alg *alg = tfm->__crt_alg;
  1016. +   struct rk_crypto_tmp *algt;
  1017. +
  1018. +   algt = container_of(alg, struct rk_crypto_tmp, alg);
  1019. +
  1020. +   ctx->dev = algt->dev;
  1021. +   ctx->dev->align_size = crypto_tfm_alg_alignmask(tfm) + 1;
  1022. +   ctx->dev->start = rk_ablk_start;
  1023. +   ctx->dev->update = rk_ablk_rx;
  1024. +   ctx->dev->complete = rk_crypto_complete;
  1025. +   ctx->dev->addr_vir = (char *)__get_free_page(GFP_KERNEL);
  1026. +
  1027. +   return ctx->dev->addr_vir ? ctx->dev->enable_clk(ctx->dev) : -ENOMEM;
  1028. +}
  1029. +
  1030. +static void rk_ablk_cra_exit(struct crypto_tfm *tfm)
  1031. +{
  1032. +   struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
  1033. +
  1034. +   free_page((unsigned long)ctx->dev->addr_vir);
  1035. +   ctx->dev->disable_clk(ctx->dev);
  1036. +}
  1037. +
  1038. +struct rk_crypto_tmp rk_ecb_aes_alg = {
  1039. +   .alg = {
  1040. +       .cra_name       = "ecb(aes)",
  1041. +       .cra_driver_name    = "ecb-aes-rk",
  1042. +       .cra_priority       = 300,
  1043. +       .cra_flags      = CRYPTO_ALG_TYPE_ABLKCIPHER |
  1044. +                     CRYPTO_ALG_ASYNC,
  1045. +       .cra_blocksize      = AES_BLOCK_SIZE,
  1046. +       .cra_ctxsize        = sizeof(struct rk_cipher_ctx),
  1047. +       .cra_alignmask      = 0x0f,
  1048. +       .cra_type       = &crypto_ablkcipher_type,
  1049. +       .cra_module     = THIS_MODULE,
  1050. +       .cra_init       = rk_ablk_cra_init,
  1051. +       .cra_exit       = rk_ablk_cra_exit,
  1052. +       .cra_u.ablkcipher   = {
  1053. +           .min_keysize    = AES_MIN_KEY_SIZE,
  1054. +           .max_keysize    = AES_MAX_KEY_SIZE,
  1055. +           .setkey     = rk_aes_setkey,
  1056. +           .encrypt    = rk_aes_ecb_encrypt,
  1057. +           .decrypt    = rk_aes_ecb_decrypt,
  1058. +       }
  1059. +   }
  1060. +};
  1061. +
  1062. +struct rk_crypto_tmp rk_cbc_aes_alg = {
  1063. +   .alg = {
  1064. +       .cra_name       = "cbc(aes)",
  1065. +       .cra_driver_name    = "cbc-aes-rk",
  1066. +       .cra_priority       = 300,
  1067. +       .cra_flags      = CRYPTO_ALG_TYPE_ABLKCIPHER |
  1068. +                     CRYPTO_ALG_ASYNC,
  1069. +       .cra_blocksize      = AES_BLOCK_SIZE,
  1070. +       .cra_ctxsize        = sizeof(struct rk_cipher_ctx),
  1071. +       .cra_alignmask      = 0x0f,
  1072. +       .cra_type       = &crypto_ablkcipher_type,
  1073. +       .cra_module     = THIS_MODULE,
  1074. +       .cra_init       = rk_ablk_cra_init,
  1075. +       .cra_exit       = rk_ablk_cra_exit,
  1076. +       .cra_u.ablkcipher   = {
  1077. +           .min_keysize    = AES_MIN_KEY_SIZE,
  1078. +           .max_keysize    = AES_MAX_KEY_SIZE,
  1079. +           .ivsize     = AES_BLOCK_SIZE,
  1080. +           .setkey     = rk_aes_setkey,
  1081. +           .encrypt    = rk_aes_cbc_encrypt,
  1082. +           .decrypt    = rk_aes_cbc_decrypt,
  1083. +       }
  1084. +   }
  1085. +};
  1086. +
  1087. +struct rk_crypto_tmp rk_ecb_des_alg = {
  1088. +   .alg = {
  1089. +       .cra_name       = "ecb(des)",
  1090. +       .cra_driver_name    = "ecb-des-rk",
  1091. +       .cra_priority       = 300,
  1092. +       .cra_flags      = CRYPTO_ALG_TYPE_ABLKCIPHER |
  1093. +                     CRYPTO_ALG_ASYNC,
  1094. +       .cra_blocksize      = DES_BLOCK_SIZE,
  1095. +       .cra_ctxsize        = sizeof(struct rk_cipher_ctx),
  1096. +       .cra_alignmask      = 0x07,
  1097. +       .cra_type       = &crypto_ablkcipher_type,
  1098. +       .cra_module     = THIS_MODULE,
  1099. +       .cra_init       = rk_ablk_cra_init,
  1100. +       .cra_exit       = rk_ablk_cra_exit,
  1101. +       .cra_u.ablkcipher   = {
  1102. +           .min_keysize    = DES_KEY_SIZE,
  1103. +           .max_keysize    = DES_KEY_SIZE,
  1104. +           .setkey     = rk_tdes_setkey,
  1105. +           .encrypt    = rk_des_ecb_encrypt,
  1106. +           .decrypt    = rk_des_ecb_decrypt,
  1107. +       }
  1108. +   }
  1109. +};
  1110. +
  1111. +struct rk_crypto_tmp rk_cbc_des_alg = {
  1112. +   .alg = {
  1113. +       .cra_name       = "cbc(des)",
  1114. +       .cra_driver_name    = "cbc-des-rk",
  1115. +       .cra_priority       = 300,
  1116. +       .cra_flags      = CRYPTO_ALG_TYPE_ABLKCIPHER |
  1117. +                     CRYPTO_ALG_ASYNC,
  1118. +       .cra_blocksize      = DES_BLOCK_SIZE,
  1119. +       .cra_ctxsize        = sizeof(struct rk_cipher_ctx),
  1120. +       .cra_alignmask      = 0x07,
  1121. +       .cra_type       = &crypto_ablkcipher_type,
  1122. +       .cra_module     = THIS_MODULE,
  1123. +       .cra_init       = rk_ablk_cra_init,
  1124. +       .cra_exit       = rk_ablk_cra_exit,
  1125. +       .cra_u.ablkcipher   = {
  1126. +           .min_keysize    = DES_KEY_SIZE,
  1127. +           .max_keysize    = DES_KEY_SIZE,
  1128. +           .ivsize     = DES_BLOCK_SIZE,
  1129. +           .setkey     = rk_tdes_setkey,
  1130. +           .encrypt    = rk_des_cbc_encrypt,
  1131. +           .decrypt    = rk_des_cbc_decrypt,
  1132. +       }
  1133. +   }
  1134. +};
  1135. +
  1136. +struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
  1137. +   .alg = {
  1138. +       .cra_name       = "ecb(des3_ede)",
  1139. +       .cra_driver_name    = "ecb-des3-ede-rk",
  1140. +       .cra_priority       = 300,
  1141. +       .cra_flags      = CRYPTO_ALG_TYPE_ABLKCIPHER |
  1142. +                     CRYPTO_ALG_ASYNC,
  1143. +       .cra_blocksize      = DES_BLOCK_SIZE,
  1144. +       .cra_ctxsize        = sizeof(struct rk_cipher_ctx),
  1145. +       .cra_alignmask      = 0x07,
  1146. +       .cra_type       = &crypto_ablkcipher_type,
  1147. +       .cra_module     = THIS_MODULE,
  1148. +       .cra_init       = rk_ablk_cra_init,
  1149. +       .cra_exit       = rk_ablk_cra_exit,
  1150. +       .cra_u.ablkcipher   = {
  1151. +           .min_keysize    = DES3_EDE_KEY_SIZE,
  1152. +           .max_keysize    = DES3_EDE_KEY_SIZE,
  1153. +           .ivsize     = DES_BLOCK_SIZE,
  1154. +           .setkey     = rk_tdes_setkey,
  1155. +           .encrypt    = rk_des3_ede_ecb_encrypt,
  1156. +           .decrypt    = rk_des3_ede_ecb_decrypt,
  1157. +       }
  1158. +   }
  1159. +};
  1160. +
  1161. +struct rk_crypto_tmp rk_cbc_des3_ede_alg = {
  1162. +   .alg = {
  1163. +       .cra_name       = "cbc(des3_ede)",
  1164. +       .cra_driver_name    = "cbc-des3-ede-rk",
  1165. +       .cra_priority       = 300,
  1166. +       .cra_flags      = CRYPTO_ALG_TYPE_ABLKCIPHER |
  1167. +                     CRYPTO_ALG_ASYNC,
  1168. +       .cra_blocksize      = DES_BLOCK_SIZE,
  1169. +       .cra_ctxsize        = sizeof(struct rk_cipher_ctx),
  1170. +       .cra_alignmask      = 0x07,
  1171. +       .cra_type       = &crypto_ablkcipher_type,
  1172. +       .cra_module     = THIS_MODULE,
  1173. +       .cra_init       = rk_ablk_cra_init,
  1174. +       .cra_exit       = rk_ablk_cra_exit,
  1175. +       .cra_u.ablkcipher   = {
  1176. +           .min_keysize    = DES3_EDE_KEY_SIZE,
  1177. +           .max_keysize    = DES3_EDE_KEY_SIZE,
  1178. +           .ivsize     = DES_BLOCK_SIZE,
  1179. +           .setkey     = rk_tdes_setkey,
  1180. +           .encrypt    = rk_des3_ede_cbc_encrypt,
  1181. +           .decrypt    = rk_des3_ede_cbc_decrypt,
  1182. +       }
  1183. +   }
  1184. +};
  1185. commit ac7c8e6b6dc959d285382c7e9cdfe608205f0c68
  1186. Author: Heiko Stuebner <heiko@sntech.de>
  1187. Date:   Sat Nov 28 13:27:48 2015 +0100
  1188.  
  1189.     crypto: rockchip - fix possible deadlock
  1190.    
  1191.     Lockdep warns about a possible deadlock resulting from the use of regular
  1192.     spin_locks:
  1193.    
  1194.     =================================
  1195.     [ INFO: inconsistent lock state ]
  1196.     4.4.0-rc2+ #2724 Not tainted
  1197.     ---------------------------------
  1198.     inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage.
  1199.     ksoftirqd/0/3 [HC0[0]:SC1[1]:HE1:SE0] takes:
  1200.     (&(&crypto_info->lock)->rlock){+.?...}, at: [<bf14a65c>] rk_crypto_tasklet_cb+0x24/0xb4 [rk_crypto]
  1201.     {SOFTIRQ-ON-W} state was registered at:
  1202.       [<c007f4ac>] lock_acquire+0x178/0x218
  1203.       [<c0759bac>] _raw_spin_lock+0x54/0x64
  1204.       [<bf14af88>] rk_handle_req+0x7c/0xbc [rk_crypto]
  1205.       [<bf14b040>] rk_des_ecb_encrypt+0x2c/0x30 [rk_crypto]
  1206.       [<bf14b05c>] rk_aes_ecb_encrypt+0x18/0x1c [rk_crypto]
  1207.       [<c028c820>] skcipher_encrypt_ablkcipher+0x64/0x68
  1208.       [<c0290770>] __test_skcipher+0x2a8/0x8dc
  1209.       [<c0292e94>] test_skcipher+0x38/0xc4
  1210.       [<c0292fb0>] alg_test_skcipher+0x90/0xb0
  1211.       [<c0292158>] alg_test+0x1e8/0x280
  1212.       [<c028f6f4>] cryptomgr_test+0x34/0x54
  1213.       [<c004bbe8>] kthread+0xf4/0x10c
  1214.       [<c0010010>] ret_from_fork+0x14/0x24
  1215.     irq event stamp: 10672
  1216.     hardirqs last  enabled at (10672): [<c002fac8>] tasklet_action+0x48/0x104
  1217.     hardirqs last disabled at (10671): [<c002faa0>] tasklet_action+0x20/0x104
  1218.     softirqs last  enabled at (10658): [<c002ef84>] __do_softirq+0x358/0x49c
  1219.     softirqs last disabled at (10669): [<c002f108>] run_ksoftirqd+0x40/0x80
  1220.    
  1221.     other info that might help us debug this:
  1222.     Possible unsafe locking scenario:
  1223.    
  1224.         CPU0
  1225.         ----
  1226.       lock(&(&crypto_info->lock)->rlock);
  1227.       <Interrupt>
  1228.         lock(&(&crypto_info->lock)->rlock);
  1229.    
  1230.      *** DEADLOCK ***
  1231.    
  1232.     Fix this by moving to irq-disabling spinlocks.
  1233.    
  1234.     Signed-off-by: Heiko Stuebner <heiko@sntech.de>
  1235.     Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
  1236.  
  1237. diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
  1238. index 6b72f8d..da9c73d 100644
  1239. --- a/drivers/crypto/rockchip/rk3288_crypto.c
  1240. +++ b/drivers/crypto/rockchip/rk3288_crypto.c
  1241. @@ -190,12 +190,13 @@ static void rk_crypto_tasklet_cb(unsigned long data)
  1242.  {
  1243.     struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
  1244.     struct crypto_async_request *async_req, *backlog;
  1245. +   unsigned long flags;
  1246.     int err = 0;
  1247.  
  1248. -   spin_lock(&dev->lock);
  1249. +   spin_lock_irqsave(&dev->lock, flags);
  1250.     backlog   = crypto_get_backlog(&dev->queue);
  1251.     async_req = crypto_dequeue_request(&dev->queue);
  1252. -   spin_unlock(&dev->lock);
  1253. +   spin_unlock_irqrestore(&dev->lock, flags);
  1254.     if (!async_req) {
  1255.         dev_err(dev->dev, "async_req is NULL !!\n");
  1256.         return;
  1257. diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
  1258. index 4a8f9de..d98b681 100644
  1259. --- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
  1260. +++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
  1261. @@ -24,6 +24,7 @@ static void rk_crypto_complete(struct rk_crypto_info *dev, int err)
  1262.  static int rk_handle_req(struct rk_crypto_info *dev,
  1263.              struct ablkcipher_request *req)
  1264.  {
  1265. +   unsigned long flags;
  1266.     int err;
  1267.  
  1268.     if (!IS_ALIGNED(req->nbytes, dev->align_size))
  1269. @@ -38,9 +39,9 @@ static int rk_handle_req(struct rk_crypto_info *dev,
  1270.     dev->aligned = 1;
  1271.     dev->ablk_req = req;
  1272.  
  1273. -   spin_lock(&dev->lock);
  1274. +   spin_lock_irqsave(&dev->lock, flags);
  1275.     err = ablkcipher_enqueue_request(&dev->queue, req);
  1276. -   spin_unlock(&dev->lock);
  1277. +   spin_unlock_irqrestore(&dev->lock, flags);
  1278.     tasklet_schedule(&dev->crypto_tasklet);
  1279.     return err;
  1280.  }
  1281. @@ -267,12 +268,13 @@ static int rk_set_data_start(struct rk_crypto_info *dev)
  1282.  
  1283.  static int rk_ablk_start(struct rk_crypto_info *dev)
  1284.  {
  1285. +   unsigned long flags;
  1286.     int err;
  1287.  
  1288. -   spin_lock(&dev->lock);
  1289. +   spin_lock_irqsave(&dev->lock, flags);
  1290.     rk_ablk_hw_init(dev);
  1291.     err = rk_set_data_start(dev);
  1292. -   spin_unlock(&dev->lock);
  1293. +   spin_unlock_irqrestore(&dev->lock, flags);
  1294.     return err;
  1295.  }
  1296.  
  1297. commit bfd927ffa219ac81082b2dcc61a1c4037869befc
  1298. Author: Zain Wang <zain.wang@rock-chips.com>
  1299. Date:   Tue Feb 16 10:15:01 2016 +0800
  1300.  
  1301.     crypto: rockchip - add hash support for crypto engine in rk3288
  1302.    
  1303.     Add md5 sha1 sha256 support for crypto engine in rk3288.
  1304.    
  1305.     Signed-off-by: Zain Wang <zain.wang@rock-chips.com>
  1306.     Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
  1307.  
  1308. diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
  1309. index fed3ffb..477fffd 100644
  1310. --- a/drivers/crypto/Kconfig
  1311. +++ b/drivers/crypto/Kconfig
  1312. @@ -508,6 +508,10 @@ config CRYPTO_DEV_ROCKCHIP
  1313.     depends on OF && ARCH_ROCKCHIP
  1314.     select CRYPTO_AES
  1315.     select CRYPTO_DES
  1316. +   select CRYPTO_MD5
  1317. +   select CRYPTO_SHA1
  1318. +   select CRYPTO_SHA256
  1319. +   select CRYPTO_HASH
  1320.     select CRYPTO_BLKCIPHER
  1321.  
  1322.     help
  1323. diff --git a/drivers/crypto/rockchip/Makefile b/drivers/crypto/rockchip/Makefile
  1324. index 7051c6c..30f9129 100644
  1325. --- a/drivers/crypto/rockchip/Makefile
  1326. +++ b/drivers/crypto/rockchip/Makefile
  1327. @@ -1,3 +1,4 @@
  1328.  obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rk_crypto.o
  1329.  rk_crypto-objs := rk3288_crypto.o \
  1330.           rk3288_crypto_ablkcipher.o \
  1331. +         rk3288_crypto_ahash.o
  1332. diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
  1333. index da9c73d..af50825 100644
  1334. --- a/drivers/crypto/rockchip/rk3288_crypto.c
  1335. +++ b/drivers/crypto/rockchip/rk3288_crypto.c
  1336. @@ -208,6 +208,8 @@ static void rk_crypto_tasklet_cb(unsigned long data)
  1337.  
  1338.     if (crypto_tfm_alg_type(async_req->tfm) == CRYPTO_ALG_TYPE_ABLKCIPHER)
  1339.         dev->ablk_req = ablkcipher_request_cast(async_req);
  1340. +   else
  1341. +       dev->ahash_req = ahash_request_cast(async_req);
  1342.     err = dev->start(dev);
  1343.     if (err)
  1344.         dev->complete(dev, err);
  1345. @@ -220,6 +222,9 @@ static void rk_crypto_tasklet_cb(unsigned long data)
  1346.     &rk_cbc_des_alg,
  1347.     &rk_ecb_des3_ede_alg,
  1348.     &rk_cbc_des3_ede_alg,
  1349. +   &rk_ahash_sha1,
  1350. +   &rk_ahash_sha256,
  1351. +   &rk_ahash_md5,
  1352.  };
  1353.  
  1354.  static int rk_crypto_register(struct rk_crypto_info *crypto_info)
  1355. @@ -229,15 +234,24 @@ static int rk_crypto_register(struct rk_crypto_info *crypto_info)
  1356.  
  1357.     for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
  1358.         rk_cipher_algs[i]->dev = crypto_info;
  1359. -       err = crypto_register_alg(&rk_cipher_algs[i]->alg);
  1360. +       if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
  1361. +           err = crypto_register_alg(
  1362. +                   &rk_cipher_algs[i]->alg.crypto);
  1363. +       else
  1364. +           err = crypto_register_ahash(
  1365. +                   &rk_cipher_algs[i]->alg.hash);
  1366.         if (err)
  1367.             goto err_cipher_algs;
  1368.     }
  1369.     return 0;
  1370.  
  1371.  err_cipher_algs:
  1372. -   for (k = 0; k < i; k++)
  1373. -       crypto_unregister_alg(&rk_cipher_algs[k]->alg);
  1374. +   for (k = 0; k < i; k++) {
  1375. +       if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
  1376. +           crypto_unregister_alg(&rk_cipher_algs[k]->alg.crypto);
  1377. +       else
  1378. +           crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
  1379. +   }
  1380.     return err;
  1381.  }
  1382.  
  1383. @@ -245,8 +259,12 @@ static void rk_crypto_unregister(void)
  1384.  {
  1385.     unsigned int i;
  1386.  
  1387. -   for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++)
  1388. -       crypto_unregister_alg(&rk_cipher_algs[i]->alg);
  1389. +   for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
  1390. +       if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
  1391. +           crypto_unregister_alg(&rk_cipher_algs[i]->alg.crypto);
  1392. +       else
  1393. +           crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
  1394. +   }
  1395.  }
  1396.  
  1397.  static void rk_crypto_action(void *data)
  1398. diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h
  1399. index e499c2c..d7b71fe 100644
  1400. --- a/drivers/crypto/rockchip/rk3288_crypto.h
  1401. +++ b/drivers/crypto/rockchip/rk3288_crypto.h
  1402. @@ -6,6 +6,10 @@
  1403.  #include <crypto/algapi.h>
  1404.  #include <linux/interrupt.h>
  1405.  #include <linux/delay.h>
  1406. +#include <crypto/internal/hash.h>
  1407. +
  1408. +#include <crypto/md5.h>
  1409. +#include <crypto/sha.h>
  1410.  
  1411.  #define _SBF(v, f)         ((v) << (f))
  1412.  
  1413. @@ -149,6 +153,28 @@
  1414.  #define RK_CRYPTO_TDES_KEY3_0      0x0130
  1415.  #define RK_CRYPTO_TDES_KEY3_1      0x0134
  1416.  
  1417. +/* HASH */
  1418. +#define RK_CRYPTO_HASH_CTRL        0x0180
  1419. +#define RK_CRYPTO_HASH_SWAP_DO     BIT(3)
  1420. +#define RK_CRYPTO_HASH_SWAP_DI     BIT(2)
  1421. +#define RK_CRYPTO_HASH_SHA1        _SBF(0x00, 0)
  1422. +#define RK_CRYPTO_HASH_MD5     _SBF(0x01, 0)
  1423. +#define RK_CRYPTO_HASH_SHA256      _SBF(0x02, 0)
  1424. +#define RK_CRYPTO_HASH_PRNG        _SBF(0x03, 0)
  1425. +
  1426. +#define RK_CRYPTO_HASH_STS     0x0184
  1427. +#define RK_CRYPTO_HASH_DONE        BIT(0)
  1428. +
  1429. +#define RK_CRYPTO_HASH_MSG_LEN     0x0188
  1430. +#define RK_CRYPTO_HASH_DOUT_0      0x018c
  1431. +#define RK_CRYPTO_HASH_DOUT_1      0x0190
  1432. +#define RK_CRYPTO_HASH_DOUT_2      0x0194
  1433. +#define RK_CRYPTO_HASH_DOUT_3      0x0198
  1434. +#define RK_CRYPTO_HASH_DOUT_4      0x019c
  1435. +#define RK_CRYPTO_HASH_DOUT_5      0x01a0
  1436. +#define RK_CRYPTO_HASH_DOUT_6      0x01a4
  1437. +#define RK_CRYPTO_HASH_DOUT_7      0x01a8
  1438. +
  1439.  #define CRYPTO_READ(dev, offset)         \
  1440.         readl_relaxed(((dev)->reg + (offset)))
  1441.  #define CRYPTO_WRITE(dev, offset, val)   \
  1442. @@ -166,6 +192,7 @@ struct rk_crypto_info {
  1443.     struct crypto_queue     queue;
  1444.     struct tasklet_struct       crypto_tasklet;
  1445.     struct ablkcipher_request   *ablk_req;
  1446. +   struct ahash_request        *ahash_req;
  1447.     /* device lock */
  1448.     spinlock_t          lock;
  1449.  
  1450. @@ -195,15 +222,36 @@ struct rk_crypto_info {
  1451.     void (*unload_data)(struct rk_crypto_info *dev);
  1452.  };
  1453.  
  1454. +/* the private variable of hash */
  1455. +struct rk_ahash_ctx {
  1456. +   struct rk_crypto_info       *dev;
  1457. +   /* for fallback */
  1458. +   struct crypto_ahash     *fallback_tfm;
  1459. +};
  1460. +
  1461. +/* the privete variable of hash for fallback */
  1462. +struct rk_ahash_rctx {
  1463. +   struct ahash_request        fallback_req;
  1464. +};
  1465. +
  1466.  /* the private variable of cipher */
  1467.  struct rk_cipher_ctx {
  1468.     struct rk_crypto_info       *dev;
  1469.     unsigned int            keylen;
  1470.  };
  1471.  
  1472. +enum alg_type {
  1473. +   ALG_TYPE_HASH,
  1474. +   ALG_TYPE_CIPHER,
  1475. +};
  1476. +
  1477.  struct rk_crypto_tmp {
  1478. -   struct rk_crypto_info *dev;
  1479. -   struct crypto_alg alg;
  1480. +   struct rk_crypto_info       *dev;
  1481. +   union {
  1482. +       struct crypto_alg   crypto;
  1483. +       struct ahash_alg    hash;
  1484. +   } alg;
  1485. +   enum alg_type           type;
  1486.  };
  1487.  
  1488.  extern struct rk_crypto_tmp rk_ecb_aes_alg;
  1489. @@ -213,4 +261,8 @@ struct rk_crypto_tmp {
  1490.  extern struct rk_crypto_tmp rk_ecb_des3_ede_alg;
  1491.  extern struct rk_crypto_tmp rk_cbc_des3_ede_alg;
  1492.  
  1493. +extern struct rk_crypto_tmp rk_ahash_sha1;
  1494. +extern struct rk_crypto_tmp rk_ahash_sha256;
  1495. +extern struct rk_crypto_tmp rk_ahash_md5;
  1496. +
  1497.  #endif
  1498. diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
  1499. index d98b681..b5a3afe 100644
  1500. --- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
  1501. +++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
  1502. @@ -336,7 +336,7 @@ static int rk_ablk_cra_init(struct crypto_tfm *tfm)
  1503.     struct crypto_alg *alg = tfm->__crt_alg;
  1504.     struct rk_crypto_tmp *algt;
  1505.  
  1506. -   algt = container_of(alg, struct rk_crypto_tmp, alg);
  1507. +   algt = container_of(alg, struct rk_crypto_tmp, alg.crypto);
  1508.  
  1509.     ctx->dev = algt->dev;
  1510.     ctx->dev->align_size = crypto_tfm_alg_alignmask(tfm) + 1;
  1511. @@ -357,7 +357,8 @@ static void rk_ablk_cra_exit(struct crypto_tfm *tfm)
  1512.  }
  1513.  
  1514.  struct rk_crypto_tmp rk_ecb_aes_alg = {
  1515. -   .alg = {
  1516. +   .type = ALG_TYPE_CIPHER,
  1517. +   .alg.crypto = {
  1518.         .cra_name       = "ecb(aes)",
  1519.         .cra_driver_name    = "ecb-aes-rk",
  1520.         .cra_priority       = 300,
  1521. @@ -381,7 +382,8 @@ struct rk_crypto_tmp rk_ecb_aes_alg = {
  1522.  };
  1523.  
  1524.  struct rk_crypto_tmp rk_cbc_aes_alg = {
  1525. -   .alg = {
  1526. +   .type = ALG_TYPE_CIPHER,
  1527. +   .alg.crypto = {
  1528.         .cra_name       = "cbc(aes)",
  1529.         .cra_driver_name    = "cbc-aes-rk",
  1530.         .cra_priority       = 300,
  1531. @@ -406,7 +408,8 @@ struct rk_crypto_tmp rk_cbc_aes_alg = {
  1532.  };
  1533.  
  1534.  struct rk_crypto_tmp rk_ecb_des_alg = {
  1535. -   .alg = {
  1536. +   .type = ALG_TYPE_CIPHER,
  1537. +   .alg.crypto = {
  1538.         .cra_name       = "ecb(des)",
  1539.         .cra_driver_name    = "ecb-des-rk",
  1540.         .cra_priority       = 300,
  1541. @@ -430,7 +433,8 @@ struct rk_crypto_tmp rk_ecb_des_alg = {
  1542.  };
  1543.  
  1544.  struct rk_crypto_tmp rk_cbc_des_alg = {
  1545. -   .alg = {
  1546. +   .type = ALG_TYPE_CIPHER,
  1547. +   .alg.crypto = {
  1548.         .cra_name       = "cbc(des)",
  1549.         .cra_driver_name    = "cbc-des-rk",
  1550.         .cra_priority       = 300,
  1551. @@ -455,7 +459,8 @@ struct rk_crypto_tmp rk_cbc_des_alg = {
  1552.  };
  1553.  
  1554.  struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
  1555. -   .alg = {
  1556. +   .type = ALG_TYPE_CIPHER,
  1557. +   .alg.crypto = {
  1558.         .cra_name       = "ecb(des3_ede)",
  1559.         .cra_driver_name    = "ecb-des3-ede-rk",
  1560.         .cra_priority       = 300,
  1561. @@ -480,7 +485,8 @@ struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
  1562.  };
  1563.  
  1564.  struct rk_crypto_tmp rk_cbc_des3_ede_alg = {
  1565. -   .alg = {
  1566. +   .type = ALG_TYPE_CIPHER,
  1567. +   .alg.crypto = {
  1568.         .cra_name       = "cbc(des3_ede)",
  1569.         .cra_driver_name    = "cbc-des3-ede-rk",
  1570.         .cra_priority       = 300,
  1571. diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
  1572. new file mode 100644
  1573. index 0000000..7185882
  1574. --- /dev/null
  1575. +++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
  1576. @@ -0,0 +1,404 @@
  1577. +/*
  1578. + * Crypto acceleration support for Rockchip RK3288
  1579. + *
  1580. + * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
  1581. + *
  1582. + * Author: Zain Wang <zain.wang@rock-chips.com>
  1583. + *
  1584. + * This program is free software; you can redistribute it and/or modify it
  1585. + * under the terms and conditions of the GNU General Public License,
  1586. + * version 2, as published by the Free Software Foundation.
  1587. + *
  1588. + * Some ideas are from marvell/cesa.c and s5p-sss.c driver.
  1589. + */
  1590. +#include "rk3288_crypto.h"
  1591. +
  1592. +/*
  1593. + * IC can not process zero message hash,
  1594. + * so we put the fixed hash out when met zero message.
  1595. + */
  1596. +
  1597. +static int zero_message_process(struct ahash_request *req)
  1598. +{
  1599. +   struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1600. +   int rk_digest_size = crypto_ahash_digestsize(tfm);
  1601. +
  1602. +   switch (rk_digest_size) {
  1603. +   case SHA1_DIGEST_SIZE:
  1604. +       memcpy(req->result, sha1_zero_message_hash, rk_digest_size);
  1605. +       break;
  1606. +   case SHA256_DIGEST_SIZE:
  1607. +       memcpy(req->result, sha256_zero_message_hash, rk_digest_size);
  1608. +       break;
  1609. +   case MD5_DIGEST_SIZE:
  1610. +       memcpy(req->result, md5_zero_message_hash, rk_digest_size);
  1611. +       break;
  1612. +   default:
  1613. +       return -EINVAL;
  1614. +   }
  1615. +
  1616. +   return 0;
  1617. +}
  1618. +
  1619. +static void rk_ahash_crypto_complete(struct rk_crypto_info *dev, int err)
  1620. +{
  1621. +   if (dev->ahash_req->base.complete)
  1622. +       dev->ahash_req->base.complete(&dev->ahash_req->base, err);
  1623. +}
  1624. +
  1625. +static void rk_ahash_reg_init(struct rk_crypto_info *dev)
  1626. +{
  1627. +   int reg_status = 0;
  1628. +
  1629. +   reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) |
  1630. +            RK_CRYPTO_HASH_FLUSH | _SBF(0xffff, 16);
  1631. +   CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, reg_status);
  1632. +
  1633. +   reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL);
  1634. +   reg_status &= (~RK_CRYPTO_HASH_FLUSH);
  1635. +   reg_status |= _SBF(0xffff, 16);
  1636. +   CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, reg_status);
  1637. +
  1638. +   memset_io(dev->reg + RK_CRYPTO_HASH_DOUT_0, 0, 32);
  1639. +
  1640. +   CRYPTO_WRITE(dev, RK_CRYPTO_INTENA, RK_CRYPTO_HRDMA_ERR_ENA |
  1641. +                       RK_CRYPTO_HRDMA_DONE_ENA);
  1642. +
  1643. +   CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, RK_CRYPTO_HRDMA_ERR_INT |
  1644. +                       RK_CRYPTO_HRDMA_DONE_INT);
  1645. +
  1646. +   CRYPTO_WRITE(dev, RK_CRYPTO_HASH_CTRL, dev->mode |
  1647. +                          RK_CRYPTO_HASH_SWAP_DO);
  1648. +
  1649. +   CRYPTO_WRITE(dev, RK_CRYPTO_CONF, RK_CRYPTO_BYTESWAP_HRFIFO |
  1650. +                     RK_CRYPTO_BYTESWAP_BRFIFO |
  1651. +                     RK_CRYPTO_BYTESWAP_BTFIFO);
  1652. +
  1653. +   CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, dev->total);
  1654. +}
  1655. +
  1656. +static int rk_ahash_init(struct ahash_request *req)
  1657. +{
  1658. +   struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
  1659. +   struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1660. +   struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
  1661. +
  1662. +   ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
  1663. +   rctx->fallback_req.base.flags = req->base.flags &
  1664. +                   CRYPTO_TFM_REQ_MAY_SLEEP;
  1665. +
  1666. +   return crypto_ahash_init(&rctx->fallback_req);
  1667. +}
  1668. +
  1669. +static int rk_ahash_update(struct ahash_request *req)
  1670. +{
  1671. +   struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
  1672. +   struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1673. +   struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
  1674. +
  1675. +   ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
  1676. +   rctx->fallback_req.base.flags = req->base.flags &
  1677. +                   CRYPTO_TFM_REQ_MAY_SLEEP;
  1678. +   rctx->fallback_req.nbytes = req->nbytes;
  1679. +   rctx->fallback_req.src = req->src;
  1680. +
  1681. +   return crypto_ahash_update(&rctx->fallback_req);
  1682. +}
  1683. +
  1684. +static int rk_ahash_final(struct ahash_request *req)
  1685. +{
  1686. +   struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
  1687. +   struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1688. +   struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
  1689. +
  1690. +   ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
  1691. +   rctx->fallback_req.base.flags = req->base.flags &
  1692. +                   CRYPTO_TFM_REQ_MAY_SLEEP;
  1693. +   rctx->fallback_req.result = req->result;
  1694. +
  1695. +   return crypto_ahash_final(&rctx->fallback_req);
  1696. +}
  1697. +
  1698. +static int rk_ahash_finup(struct ahash_request *req)
  1699. +{
  1700. +   struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
  1701. +   struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1702. +   struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
  1703. +
  1704. +   ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
  1705. +   rctx->fallback_req.base.flags = req->base.flags &
  1706. +                   CRYPTO_TFM_REQ_MAY_SLEEP;
  1707. +
  1708. +   rctx->fallback_req.nbytes = req->nbytes;
  1709. +   rctx->fallback_req.src = req->src;
  1710. +   rctx->fallback_req.result = req->result;
  1711. +
  1712. +   return crypto_ahash_finup(&rctx->fallback_req);
  1713. +}
  1714. +
  1715. +static int rk_ahash_import(struct ahash_request *req, const void *in)
  1716. +{
  1717. +   struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
  1718. +   struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1719. +   struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
  1720. +
  1721. +   ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
  1722. +   rctx->fallback_req.base.flags = req->base.flags &
  1723. +                   CRYPTO_TFM_REQ_MAY_SLEEP;
  1724. +
  1725. +   return crypto_ahash_import(&rctx->fallback_req, in);
  1726. +}
  1727. +
  1728. +static int rk_ahash_export(struct ahash_request *req, void *out)
  1729. +{
  1730. +   struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
  1731. +   struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1732. +   struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
  1733. +
  1734. +   ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
  1735. +   rctx->fallback_req.base.flags = req->base.flags &
  1736. +                   CRYPTO_TFM_REQ_MAY_SLEEP;
  1737. +
  1738. +   return crypto_ahash_export(&rctx->fallback_req, out);
  1739. +}
  1740. +
  1741. +static int rk_ahash_digest(struct ahash_request *req)
  1742. +{
  1743. +   struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1744. +   struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
  1745. +   struct rk_crypto_info *dev = NULL;
  1746. +   unsigned long flags;
  1747. +   int ret;
  1748. +
  1749. +   if (!req->nbytes)
  1750. +       return zero_message_process(req);
  1751. +
  1752. +   dev = tctx->dev;
  1753. +   dev->total = req->nbytes;
  1754. +   dev->left_bytes = req->nbytes;
  1755. +   dev->aligned = 0;
  1756. +   dev->mode = 0;
  1757. +   dev->align_size = 4;
  1758. +   dev->sg_dst = NULL;
  1759. +   dev->sg_src = req->src;
  1760. +   dev->first = req->src;
  1761. +   dev->nents = sg_nents(req->src);
  1762. +
  1763. +   switch (crypto_ahash_digestsize(tfm)) {
  1764. +   case SHA1_DIGEST_SIZE:
  1765. +       dev->mode = RK_CRYPTO_HASH_SHA1;
  1766. +       break;
  1767. +   case SHA256_DIGEST_SIZE:
  1768. +       dev->mode = RK_CRYPTO_HASH_SHA256;
  1769. +       break;
  1770. +   case MD5_DIGEST_SIZE:
  1771. +       dev->mode = RK_CRYPTO_HASH_MD5;
  1772. +       break;
  1773. +   default:
  1774. +       return -EINVAL;
  1775. +   }
  1776. +
  1777. +   rk_ahash_reg_init(dev);
  1778. +
  1779. +   spin_lock_irqsave(&dev->lock, flags);
  1780. +   ret = crypto_enqueue_request(&dev->queue, &req->base);
  1781. +   spin_unlock_irqrestore(&dev->lock, flags);
  1782. +
  1783. +   tasklet_schedule(&dev->crypto_tasklet);
  1784. +
  1785. +   /*
  1786. +    * it will take some time to process date after last dma transmission.
  1787. +    *
  1788. +    * waiting time is relative with the last date len,
  1789. +    * so cannot set a fixed time here.
  1790. +    * 10-50 makes system not call here frequently wasting
  1791. +    * efficiency, and make it response quickly when dma
  1792. +    * complete.
  1793. +    */
  1794. +   while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS))
  1795. +       usleep_range(10, 50);
  1796. +
  1797. +   memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0,
  1798. +             crypto_ahash_digestsize(tfm));
  1799. +
  1800. +   return 0;
  1801. +}
  1802. +
  1803. +static void crypto_ahash_dma_start(struct rk_crypto_info *dev)
  1804. +{
  1805. +   CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, dev->addr_in);
  1806. +   CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, (dev->count + 3) / 4);
  1807. +   CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_HASH_START |
  1808. +                     (RK_CRYPTO_HASH_START << 16));
  1809. +}
  1810. +
  1811. +static int rk_ahash_set_data_start(struct rk_crypto_info *dev)
  1812. +{
  1813. +   int err;
  1814. +
  1815. +   err = dev->load_data(dev, dev->sg_src, NULL);
  1816. +   if (!err)
  1817. +       crypto_ahash_dma_start(dev);
  1818. +   return err;
  1819. +}
  1820. +
  1821. +static int rk_ahash_start(struct rk_crypto_info *dev)
  1822. +{
  1823. +   return rk_ahash_set_data_start(dev);
  1824. +}
  1825. +
  1826. +static int rk_ahash_crypto_rx(struct rk_crypto_info *dev)
  1827. +{
  1828. +   int err = 0;
  1829. +
  1830. +   dev->unload_data(dev);
  1831. +   if (dev->left_bytes) {
  1832. +       if (dev->aligned) {
  1833. +           if (sg_is_last(dev->sg_src)) {
  1834. +               dev_warn(dev->dev, "[%s:%d], Lack of data\n",
  1835. +                    __func__, __LINE__);
  1836. +               err = -ENOMEM;
  1837. +               goto out_rx;
  1838. +           }
  1839. +           dev->sg_src = sg_next(dev->sg_src);
  1840. +       }
  1841. +       err = rk_ahash_set_data_start(dev);
  1842. +   } else {
  1843. +       dev->complete(dev, 0);
  1844. +   }
  1845. +
  1846. +out_rx:
  1847. +   return err;
  1848. +}
  1849. +
  1850. +static int rk_cra_hash_init(struct crypto_tfm *tfm)
  1851. +{
  1852. +   struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
  1853. +   struct rk_crypto_tmp *algt;
  1854. +   struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
  1855. +
  1856. +   const char *alg_name = crypto_tfm_alg_name(tfm);
  1857. +
  1858. +   algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
  1859. +
  1860. +   tctx->dev = algt->dev;
  1861. +   tctx->dev->addr_vir = (void *)__get_free_page(GFP_KERNEL);
  1862. +   if (!tctx->dev->addr_vir) {
  1863. +       dev_err(tctx->dev->dev, "failed to kmalloc for addr_vir\n");
  1864. +       return -ENOMEM;
  1865. +   }
  1866. +   tctx->dev->start = rk_ahash_start;
  1867. +   tctx->dev->update = rk_ahash_crypto_rx;
  1868. +   tctx->dev->complete = rk_ahash_crypto_complete;
  1869. +
  1870. +   /* for fallback */
  1871. +   tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0,
  1872. +                          CRYPTO_ALG_NEED_FALLBACK);
  1873. +   if (IS_ERR(tctx->fallback_tfm)) {
  1874. +       dev_err(tctx->dev->dev, "Could not load fallback driver.\n");
  1875. +       return PTR_ERR(tctx->fallback_tfm);
  1876. +   }
  1877. +   crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  1878. +                sizeof(struct rk_ahash_rctx) +
  1879. +                crypto_ahash_reqsize(tctx->fallback_tfm));
  1880. +
  1881. +   return tctx->dev->enable_clk(tctx->dev);
  1882. +}
  1883. +
  1884. +static void rk_cra_hash_exit(struct crypto_tfm *tfm)
  1885. +{
  1886. +   struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
  1887. +
  1888. +   free_page((unsigned long)tctx->dev->addr_vir);
  1889. +   return tctx->dev->disable_clk(tctx->dev);
  1890. +}
  1891. +
  1892. +struct rk_crypto_tmp rk_ahash_sha1 = {
  1893. +   .type = ALG_TYPE_HASH,
  1894. +   .alg.hash = {
  1895. +       .init = rk_ahash_init,
  1896. +       .update = rk_ahash_update,
  1897. +       .final = rk_ahash_final,
  1898. +       .finup = rk_ahash_finup,
  1899. +       .export = rk_ahash_export,
  1900. +       .import = rk_ahash_import,
  1901. +       .digest = rk_ahash_digest,
  1902. +       .halg = {
  1903. +            .digestsize = SHA1_DIGEST_SIZE,
  1904. +            .statesize = sizeof(struct sha1_state),
  1905. +            .base = {
  1906. +                 .cra_name = "sha1",
  1907. +                 .cra_driver_name = "rk-sha1",
  1908. +                 .cra_priority = 300,
  1909. +                 .cra_flags = CRYPTO_ALG_ASYNC |
  1910. +                          CRYPTO_ALG_NEED_FALLBACK,
  1911. +                 .cra_blocksize = SHA1_BLOCK_SIZE,
  1912. +                 .cra_ctxsize = sizeof(struct rk_ahash_ctx),
  1913. +                 .cra_alignmask = 3,
  1914. +                 .cra_init = rk_cra_hash_init,
  1915. +                 .cra_exit = rk_cra_hash_exit,
  1916. +                 .cra_module = THIS_MODULE,
  1917. +                 }
  1918. +            }
  1919. +   }
  1920. +};
  1921. +
  1922. +struct rk_crypto_tmp rk_ahash_sha256 = {
  1923. +   .type = ALG_TYPE_HASH,
  1924. +   .alg.hash = {
  1925. +       .init = rk_ahash_init,
  1926. +       .update = rk_ahash_update,
  1927. +       .final = rk_ahash_final,
  1928. +       .finup = rk_ahash_finup,
  1929. +       .export = rk_ahash_export,
  1930. +       .import = rk_ahash_import,
  1931. +       .digest = rk_ahash_digest,
  1932. +       .halg = {
  1933. +            .digestsize = SHA256_DIGEST_SIZE,
  1934. +            .statesize = sizeof(struct sha256_state),
  1935. +            .base = {
  1936. +                 .cra_name = "sha256",
  1937. +                 .cra_driver_name = "rk-sha256",
  1938. +                 .cra_priority = 300,
  1939. +                 .cra_flags = CRYPTO_ALG_ASYNC |
  1940. +                          CRYPTO_ALG_NEED_FALLBACK,
  1941. +                 .cra_blocksize = SHA256_BLOCK_SIZE,
  1942. +                 .cra_ctxsize = sizeof(struct rk_ahash_ctx),
  1943. +                 .cra_alignmask = 3,
  1944. +                 .cra_init = rk_cra_hash_init,
  1945. +                 .cra_exit = rk_cra_hash_exit,
  1946. +                 .cra_module = THIS_MODULE,
  1947. +                 }
  1948. +            }
  1949. +   }
  1950. +};
  1951. +
  1952. +struct rk_crypto_tmp rk_ahash_md5 = {
  1953. +   .type = ALG_TYPE_HASH,
  1954. +   .alg.hash = {
  1955. +       .init = rk_ahash_init,
  1956. +       .update = rk_ahash_update,
  1957. +       .final = rk_ahash_final,
  1958. +       .finup = rk_ahash_finup,
  1959. +       .export = rk_ahash_export,
  1960. +       .import = rk_ahash_import,
  1961. +       .digest = rk_ahash_digest,
  1962. +       .halg = {
  1963. +            .digestsize = MD5_DIGEST_SIZE,
  1964. +            .statesize = sizeof(struct md5_state),
  1965. +            .base = {
  1966. +                 .cra_name = "md5",
  1967. +                 .cra_driver_name = "rk-md5",
  1968. +                 .cra_priority = 300,
  1969. +                 .cra_flags = CRYPTO_ALG_ASYNC |
  1970. +                          CRYPTO_ALG_NEED_FALLBACK,
  1971. +                 .cra_blocksize = SHA1_BLOCK_SIZE,
  1972. +                 .cra_ctxsize = sizeof(struct rk_ahash_ctx),
  1973. +                 .cra_alignmask = 3,
  1974. +                 .cra_init = rk_cra_hash_init,
  1975. +                 .cra_exit = rk_cra_hash_exit,
  1976. +                 .cra_module = THIS_MODULE,
  1977. +                 }
  1978. +           }
  1979. +   }
  1980. +};
  1981. commit 641eacd15696a65b08880985701c8082872da136
  1982. Author: Zain Wang <wzz@rock-chips.com>
  1983. Date:   Mon Jul 24 09:23:13 2017 +0800
  1984.  
  1985.     crypto: rockchip - move the crypto completion from interrupt context
  1986.    
  1987.     It's illegal to call the completion function from hardirq context,
  1988.     it will cause runtime tests to fail. Let's build a new task (done_task)
  1989.     for moving update operation from hardirq context.
  1990.    
  1991.     Signed-off-by: zain wang <wzz@rock-chips.com>
  1992.     Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
  1993.  
  1994. diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
  1995. index d0f80c6..c2b1dd7 100644
  1996. --- a/drivers/crypto/rockchip/rk3288_crypto.c
  1997. +++ b/drivers/crypto/rockchip/rk3288_crypto.c
  1998. @@ -169,24 +169,22 @@ static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id)
  1999.  {
  2000.     struct rk_crypto_info *dev  = platform_get_drvdata(dev_id);
  2001.     u32 interrupt_status;
  2002. -   int err = 0;
  2003.  
  2004.     spin_lock(&dev->lock);
  2005.     interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS);
  2006.     CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status);
  2007. +
  2008.     if (interrupt_status & 0x0a) {
  2009.         dev_warn(dev->dev, "DMA Error\n");
  2010. -       err = -EFAULT;
  2011. -   } else if (interrupt_status & 0x05) {
  2012. -       err = dev->update(dev);
  2013. +       dev->err = -EFAULT;
  2014.     }
  2015. -   if (err)
  2016. -       dev->complete(dev, err);
  2017. +   tasklet_schedule(&dev->done_task);
  2018. +
  2019.     spin_unlock(&dev->lock);
  2020.     return IRQ_HANDLED;
  2021.  }
  2022.  
  2023. -static void rk_crypto_tasklet_cb(unsigned long data)
  2024. +static void rk_crypto_queue_task_cb(unsigned long data)
  2025.  {
  2026.     struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
  2027.     struct crypto_async_request *async_req, *backlog;
  2028. @@ -210,11 +208,26 @@ static void rk_crypto_tasklet_cb(unsigned long data)
  2029.         dev->ablk_req = ablkcipher_request_cast(async_req);
  2030.     else
  2031.         dev->ahash_req = ahash_request_cast(async_req);
  2032. +   dev->err = 0;
  2033.     err = dev->start(dev);
  2034.     if (err)
  2035.         dev->complete(dev, err);
  2036.  }
  2037.  
  2038. +static void rk_crypto_done_task_cb(unsigned long data)
  2039. +{
  2040. +   struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
  2041. +
  2042. +   if (dev->err) {
  2043. +       dev->complete(dev, dev->err);
  2044. +       return;
  2045. +   }
  2046. +
  2047. +   dev->err = dev->update(dev);
  2048. +   if (dev->err)
  2049. +       dev->complete(dev, dev->err);
  2050. +}
  2051. +
  2052.  static struct rk_crypto_tmp *rk_cipher_algs[] = {
  2053.     &rk_ecb_aes_alg,
  2054.     &rk_cbc_aes_alg,
  2055. @@ -361,8 +374,10 @@ static int rk_crypto_probe(struct platform_device *pdev)
  2056.     crypto_info->dev = &pdev->dev;
  2057.     platform_set_drvdata(pdev, crypto_info);
  2058.  
  2059. -   tasklet_init(&crypto_info->crypto_tasklet,
  2060. -            rk_crypto_tasklet_cb, (unsigned long)crypto_info);
  2061. +   tasklet_init(&crypto_info->queue_task,
  2062. +            rk_crypto_queue_task_cb, (unsigned long)crypto_info);
  2063. +   tasklet_init(&crypto_info->done_task,
  2064. +            rk_crypto_done_task_cb, (unsigned long)crypto_info);
  2065.     crypto_init_queue(&crypto_info->queue, 50);
  2066.  
  2067.     crypto_info->enable_clk = rk_crypto_enable_clk;
  2068. @@ -380,7 +395,8 @@ static int rk_crypto_probe(struct platform_device *pdev)
  2069.     return 0;
  2070.  
  2071.  err_register_alg:
  2072. -   tasklet_kill(&crypto_info->crypto_tasklet);
  2073. +   tasklet_kill(&crypto_info->queue_task);
  2074. +   tasklet_kill(&crypto_info->done_task);
  2075.  err_crypto:
  2076.     return err;
  2077.  }
  2078. @@ -390,7 +406,8 @@ static int rk_crypto_remove(struct platform_device *pdev)
  2079.     struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev);
  2080.  
  2081.     rk_crypto_unregister();
  2082. -   tasklet_kill(&crypto_tmp->crypto_tasklet);
  2083. +   tasklet_kill(&crypto_tmp->done_task);
  2084. +   tasklet_kill(&crypto_tmp->queue_task);
  2085.     return 0;
  2086.  }
  2087.  
  2088. diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h
  2089. index d7b71fe..65ad1c2 100644
  2090. --- a/drivers/crypto/rockchip/rk3288_crypto.h
  2091. +++ b/drivers/crypto/rockchip/rk3288_crypto.h
  2092. @@ -190,9 +190,11 @@ struct rk_crypto_info {
  2093.     void __iomem            *reg;
  2094.     int             irq;
  2095.     struct crypto_queue     queue;
  2096. -   struct tasklet_struct       crypto_tasklet;
  2097. +   struct tasklet_struct       queue_task;
  2098. +   struct tasklet_struct       done_task;
  2099.     struct ablkcipher_request   *ablk_req;
  2100.     struct ahash_request        *ahash_req;
  2101. +   int                 err;
  2102.     /* device lock */
  2103.     spinlock_t          lock;
  2104.  
  2105. diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
  2106. index b5a3afe..8787e44 100644
  2107. --- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
  2108. +++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
  2109. @@ -42,7 +42,7 @@ static int rk_handle_req(struct rk_crypto_info *dev,
  2110.     spin_lock_irqsave(&dev->lock, flags);
  2111.     err = ablkcipher_enqueue_request(&dev->queue, req);
  2112.     spin_unlock_irqrestore(&dev->lock, flags);
  2113. -   tasklet_schedule(&dev->crypto_tasklet);
  2114. +   tasklet_schedule(&dev->queue_task);
  2115.     return err;
  2116.  }
  2117.  
  2118. diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
  2119. index 7185882..9b55585 100644
  2120. --- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
  2121. +++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
  2122. @@ -204,7 +204,7 @@ static int rk_ahash_digest(struct ahash_request *req)
  2123.     ret = crypto_enqueue_request(&dev->queue, &req->base);
  2124.     spin_unlock_irqrestore(&dev->lock, flags);
  2125.  
  2126. -   tasklet_schedule(&dev->crypto_tasklet);
  2127. +   tasklet_schedule(&dev->queue_task);
  2128.  
  2129.     /*
  2130.      * it will take some time to process date after last dma transmission.
  2131. commit 9a42e4eed3fcd7ba8dff6622384cd08bfe5ef707
  2132. Author: Zain Wang <wzz@rock-chips.com>
  2133. Date:   Mon Jul 24 09:23:14 2017 +0800
  2134.  
  2135.     crypto: rockchip - return the err code when unable dequeue the crypto request
  2136.    
  2137.     Sometime we would unable to dequeue the crypto request, in this case,
  2138.     we should finish crypto and return the err code.
  2139.    
  2140.     Signed-off-by: zain wang <wzz@rock-chips.com>
  2141.     Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
  2142.  
  2143. diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
  2144. index c2b1dd7..57c3783 100644
  2145. --- a/drivers/crypto/rockchip/rk3288_crypto.c
  2146. +++ b/drivers/crypto/rockchip/rk3288_crypto.c
  2147. @@ -187,27 +187,8 @@ static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id)
  2148.  static void rk_crypto_queue_task_cb(unsigned long data)
  2149.  {
  2150.     struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
  2151. -   struct crypto_async_request *async_req, *backlog;
  2152. -   unsigned long flags;
  2153.     int err = 0;
  2154.  
  2155. -   spin_lock_irqsave(&dev->lock, flags);
  2156. -   backlog   = crypto_get_backlog(&dev->queue);
  2157. -   async_req = crypto_dequeue_request(&dev->queue);
  2158. -   spin_unlock_irqrestore(&dev->lock, flags);
  2159. -   if (!async_req) {
  2160. -       dev_err(dev->dev, "async_req is NULL !!\n");
  2161. -       return;
  2162. -   }
  2163. -   if (backlog) {
  2164. -       backlog->complete(backlog, -EINPROGRESS);
  2165. -       backlog = NULL;
  2166. -   }
  2167. -
  2168. -   if (crypto_tfm_alg_type(async_req->tfm) == CRYPTO_ALG_TYPE_ABLKCIPHER)
  2169. -       dev->ablk_req = ablkcipher_request_cast(async_req);
  2170. -   else
  2171. -       dev->ahash_req = ahash_request_cast(async_req);
  2172.     dev->err = 0;
  2173.     err = dev->start(dev);
  2174.     if (err)
  2175. diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
  2176. index 8787e44..dbe78de 100644
  2177. --- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
  2178. +++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
  2179. @@ -25,6 +25,7 @@ static int rk_handle_req(struct rk_crypto_info *dev,
  2180.              struct ablkcipher_request *req)
  2181.  {
  2182.     unsigned long flags;
  2183. +   struct crypto_async_request *async_req, *backlog;
  2184.     int err;
  2185.  
  2186.     if (!IS_ALIGNED(req->nbytes, dev->align_size))
  2187. @@ -41,7 +42,21 @@ static int rk_handle_req(struct rk_crypto_info *dev,
  2188.  
  2189.     spin_lock_irqsave(&dev->lock, flags);
  2190.     err = ablkcipher_enqueue_request(&dev->queue, req);
  2191. +   backlog   = crypto_get_backlog(&dev->queue);
  2192. +   async_req = crypto_dequeue_request(&dev->queue);
  2193.     spin_unlock_irqrestore(&dev->lock, flags);
  2194. +
  2195. +   if (!async_req) {
  2196. +       dev_err(dev->dev, "async_req is NULL !!\n");
  2197. +       return err;
  2198. +   }
  2199. +   if (backlog) {
  2200. +       backlog->complete(backlog, -EINPROGRESS);
  2201. +       backlog = NULL;
  2202. +   }
  2203. +
  2204. +   dev->ablk_req = ablkcipher_request_cast(async_req);
  2205. +
  2206.     tasklet_schedule(&dev->queue_task);
  2207.     return err;
  2208.  }
  2209. diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
  2210. index 9b55585..ebc46e0 100644
  2211. --- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
  2212. +++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
  2213. @@ -166,6 +166,7 @@ static int rk_ahash_digest(struct ahash_request *req)
  2214.  {
  2215.     struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  2216.     struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
  2217. +   struct crypto_async_request *async_req, *backlog;
  2218.     struct rk_crypto_info *dev = NULL;
  2219.     unsigned long flags;
  2220.     int ret;
  2221. @@ -202,8 +203,21 @@ static int rk_ahash_digest(struct ahash_request *req)
  2222.  
  2223.     spin_lock_irqsave(&dev->lock, flags);
  2224.     ret = crypto_enqueue_request(&dev->queue, &req->base);
  2225. +   backlog   = crypto_get_backlog(&dev->queue);
  2226. +   async_req = crypto_dequeue_request(&dev->queue);
  2227.     spin_unlock_irqrestore(&dev->lock, flags);
  2228.  
  2229. +   if (!async_req) {
  2230. +       dev_err(dev->dev, "async_req is NULL !!\n");
  2231. +       return ret;
  2232. +   }
  2233. +   if (backlog) {
  2234. +       backlog->complete(backlog, -EINPROGRESS);
  2235. +       backlog = NULL;
  2236. +   }
  2237. +
  2238. +   dev->ahash_req = ahash_request_cast(async_req);
  2239. +
  2240.     tasklet_schedule(&dev->queue_task);
  2241.  
  2242.     /*
  2243. commit 5a7801f6634b1e2888bcb1a85bedc50e46dcd757
  2244. Author: Zain Wang <wzz@rock-chips.com>
  2245. Date:   Tue Aug 15 15:48:15 2017 +0800
  2246.  
  2247.     crypto: rockchip - Don't dequeue the request when device is busy
  2248.    
  2249.     The device can only process one request at a time. So if multiple
  2250.     requests came at the same time, we can enqueue them first, and
  2251.     dequeue them one by one when the device is idle.
  2252.    
  2253.     Signed-off-by: zain wang <wzz@rock-chips.com>
  2254.     Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
  2255.  
  2256. diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
  2257. index 57c3783..c9d622a 100644
  2258. --- a/drivers/crypto/rockchip/rk3288_crypto.c
  2259. +++ b/drivers/crypto/rockchip/rk3288_crypto.c
  2260. @@ -184,15 +184,53 @@ static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id)
  2261.     return IRQ_HANDLED;
  2262.  }
  2263.  
  2264. +static int rk_crypto_enqueue(struct rk_crypto_info *dev,
  2265. +                 struct crypto_async_request *async_req)
  2266. +{
  2267. +   unsigned long flags;
  2268. +   int ret;
  2269. +
  2270. +   spin_lock_irqsave(&dev->lock, flags);
  2271. +   ret = crypto_enqueue_request(&dev->queue, async_req);
  2272. +   if (dev->busy) {
  2273. +       spin_unlock_irqrestore(&dev->lock, flags);
  2274. +       return ret;
  2275. +   }
  2276. +   dev->busy = true;
  2277. +   spin_unlock_irqrestore(&dev->lock, flags);
  2278. +   tasklet_schedule(&dev->queue_task);
  2279. +
  2280. +   return ret;
  2281. +}
  2282. +
  2283.  static void rk_crypto_queue_task_cb(unsigned long data)
  2284.  {
  2285.     struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
  2286. +   struct crypto_async_request *async_req, *backlog;
  2287. +   unsigned long flags;
  2288.     int err = 0;
  2289.  
  2290.     dev->err = 0;
  2291. +   spin_lock_irqsave(&dev->lock, flags);
  2292. +   backlog   = crypto_get_backlog(&dev->queue);
  2293. +   async_req = crypto_dequeue_request(&dev->queue);
  2294. +
  2295. +   if (!async_req) {
  2296. +       dev->busy = false;
  2297. +       spin_unlock_irqrestore(&dev->lock, flags);
  2298. +       return;
  2299. +   }
  2300. +   spin_unlock_irqrestore(&dev->lock, flags);
  2301. +
  2302. +   if (backlog) {
  2303. +       backlog->complete(backlog, -EINPROGRESS);
  2304. +       backlog = NULL;
  2305. +   }
  2306. +
  2307. +   dev->async_req = async_req;
  2308.     err = dev->start(dev);
  2309.     if (err)
  2310. -       dev->complete(dev, err);
  2311. +       dev->complete(dev->async_req, err);
  2312.  }
  2313.  
  2314.  static void rk_crypto_done_task_cb(unsigned long data)
  2315. @@ -200,13 +238,13 @@ static void rk_crypto_done_task_cb(unsigned long data)
  2316.     struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
  2317.  
  2318.     if (dev->err) {
  2319. -       dev->complete(dev, dev->err);
  2320. +       dev->complete(dev->async_req, dev->err);
  2321.         return;
  2322.     }
  2323.  
  2324.     dev->err = dev->update(dev);
  2325.     if (dev->err)
  2326. -       dev->complete(dev, dev->err);
  2327. +       dev->complete(dev->async_req, dev->err);
  2328.  }
  2329.  
  2330.  static struct rk_crypto_tmp *rk_cipher_algs[] = {
  2331. @@ -365,6 +403,8 @@ static int rk_crypto_probe(struct platform_device *pdev)
  2332.     crypto_info->disable_clk = rk_crypto_disable_clk;
  2333.     crypto_info->load_data = rk_load_data;
  2334.     crypto_info->unload_data = rk_unload_data;
  2335. +   crypto_info->enqueue = rk_crypto_enqueue;
  2336. +   crypto_info->busy = false;
  2337.  
  2338.     err = rk_crypto_register(crypto_info);
  2339.     if (err) {
  2340. diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h
  2341. index 65ad1c2..ab6a1b4 100644
  2342. --- a/drivers/crypto/rockchip/rk3288_crypto.h
  2343. +++ b/drivers/crypto/rockchip/rk3288_crypto.h
  2344. @@ -192,8 +192,7 @@ struct rk_crypto_info {
  2345.     struct crypto_queue     queue;
  2346.     struct tasklet_struct       queue_task;
  2347.     struct tasklet_struct       done_task;
  2348. -   struct ablkcipher_request   *ablk_req;
  2349. -   struct ahash_request        *ahash_req;
  2350. +   struct crypto_async_request *async_req;
  2351.     int                 err;
  2352.     /* device lock */
  2353.     spinlock_t          lock;
  2354. @@ -210,18 +209,20 @@ struct rk_crypto_info {
  2355.     size_t              nents;
  2356.     unsigned int            total;
  2357.     unsigned int            count;
  2358. -   u32             mode;
  2359.     dma_addr_t          addr_in;
  2360.     dma_addr_t          addr_out;
  2361. +   bool                busy;
  2362.     int (*start)(struct rk_crypto_info *dev);
  2363.     int (*update)(struct rk_crypto_info *dev);
  2364. -   void (*complete)(struct rk_crypto_info *dev, int err);
  2365. +   void (*complete)(struct crypto_async_request *base, int err);
  2366.     int (*enable_clk)(struct rk_crypto_info *dev);
  2367.     void (*disable_clk)(struct rk_crypto_info *dev);
  2368.     int (*load_data)(struct rk_crypto_info *dev,
  2369.              struct scatterlist *sg_src,
  2370.              struct scatterlist *sg_dst);
  2371.     void (*unload_data)(struct rk_crypto_info *dev);
  2372. +   int (*enqueue)(struct rk_crypto_info *dev,
  2373. +              struct crypto_async_request *async_req);
  2374.  };
  2375.  
  2376.  /* the private variable of hash */
  2377. @@ -234,12 +235,14 @@ struct rk_ahash_ctx {
  2378.  /* the privete variable of hash for fallback */
  2379.  struct rk_ahash_rctx {
  2380.     struct ahash_request        fallback_req;
  2381. +   u32             mode;
  2382.  };
  2383.  
  2384.  /* the private variable of cipher */
  2385.  struct rk_cipher_ctx {
  2386.     struct rk_crypto_info       *dev;
  2387.     unsigned int            keylen;
  2388. +   u32             mode;
  2389.  };
  2390.  
  2391.  enum alg_type {
  2392. diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
  2393. index dbe78de..639c15c 100644
  2394. --- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
  2395. +++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
  2396. @@ -15,50 +15,19 @@
  2397.  
  2398.  #define RK_CRYPTO_DEC          BIT(0)
  2399.  
  2400. -static void rk_crypto_complete(struct rk_crypto_info *dev, int err)
  2401. +static void rk_crypto_complete(struct crypto_async_request *base, int err)
  2402.  {
  2403. -   if (dev->ablk_req->base.complete)
  2404. -       dev->ablk_req->base.complete(&dev->ablk_req->base, err);
  2405. +   if (base->complete)
  2406. +       base->complete(base, err);
  2407.  }
  2408.  
  2409.  static int rk_handle_req(struct rk_crypto_info *dev,
  2410.              struct ablkcipher_request *req)
  2411.  {
  2412. -   unsigned long flags;
  2413. -   struct crypto_async_request *async_req, *backlog;
  2414. -   int err;
  2415. -
  2416.     if (!IS_ALIGNED(req->nbytes, dev->align_size))
  2417.         return -EINVAL;
  2418. -
  2419. -   dev->left_bytes = req->nbytes;
  2420. -   dev->total = req->nbytes;
  2421. -   dev->sg_src = req->src;
  2422. -   dev->first = req->src;
  2423. -   dev->nents = sg_nents(req->src);
  2424. -   dev->sg_dst = req->dst;
  2425. -   dev->aligned = 1;
  2426. -   dev->ablk_req = req;
  2427. -
  2428. -   spin_lock_irqsave(&dev->lock, flags);
  2429. -   err = ablkcipher_enqueue_request(&dev->queue, req);
  2430. -   backlog   = crypto_get_backlog(&dev->queue);
  2431. -   async_req = crypto_dequeue_request(&dev->queue);
  2432. -   spin_unlock_irqrestore(&dev->lock, flags);
  2433. -
  2434. -   if (!async_req) {
  2435. -       dev_err(dev->dev, "async_req is NULL !!\n");
  2436. -       return err;
  2437. -   }
  2438. -   if (backlog) {
  2439. -       backlog->complete(backlog, -EINPROGRESS);
  2440. -       backlog = NULL;
  2441. -   }
  2442. -
  2443. -   dev->ablk_req = ablkcipher_request_cast(async_req);
  2444. -
  2445. -   tasklet_schedule(&dev->queue_task);
  2446. -   return err;
  2447. +   else
  2448. +       return dev->enqueue(dev, &req->base);
  2449.  }
  2450.  
  2451.  static int rk_aes_setkey(struct crypto_ablkcipher *cipher,
  2452. @@ -108,7 +77,7 @@ static int rk_aes_ecb_encrypt(struct ablkcipher_request *req)
  2453.     struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  2454.     struct rk_crypto_info *dev = ctx->dev;
  2455.  
  2456. -   dev->mode = RK_CRYPTO_AES_ECB_MODE;
  2457. +   ctx->mode = RK_CRYPTO_AES_ECB_MODE;
  2458.     return rk_handle_req(dev, req);
  2459.  }
  2460.  
  2461. @@ -118,7 +87,7 @@ static int rk_aes_ecb_decrypt(struct ablkcipher_request *req)
  2462.     struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  2463.     struct rk_crypto_info *dev = ctx->dev;
  2464.  
  2465. -   dev->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
  2466. +   ctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
  2467.     return rk_handle_req(dev, req);
  2468.  }
  2469.  
  2470. @@ -128,7 +97,7 @@ static int rk_aes_cbc_encrypt(struct ablkcipher_request *req)
  2471.     struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  2472.     struct rk_crypto_info *dev = ctx->dev;
  2473.  
  2474. -   dev->mode = RK_CRYPTO_AES_CBC_MODE;
  2475. +   ctx->mode = RK_CRYPTO_AES_CBC_MODE;
  2476.     return rk_handle_req(dev, req);
  2477.  }
  2478.  
  2479. @@ -138,7 +107,7 @@ static int rk_aes_cbc_decrypt(struct ablkcipher_request *req)
  2480.     struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  2481.     struct rk_crypto_info *dev = ctx->dev;
  2482.  
  2483. -   dev->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
  2484. +   ctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
  2485.     return rk_handle_req(dev, req);
  2486.  }
  2487.  
  2488. @@ -148,7 +117,7 @@ static int rk_des_ecb_encrypt(struct ablkcipher_request *req)
  2489.     struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  2490.     struct rk_crypto_info *dev = ctx->dev;
  2491.  
  2492. -   dev->mode = 0;
  2493. +   ctx->mode = 0;
  2494.     return rk_handle_req(dev, req);
  2495.  }
  2496.  
  2497. @@ -158,7 +127,7 @@ static int rk_des_ecb_decrypt(struct ablkcipher_request *req)
  2498.     struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  2499.     struct rk_crypto_info *dev = ctx->dev;
  2500.  
  2501. -   dev->mode = RK_CRYPTO_DEC;
  2502. +   ctx->mode = RK_CRYPTO_DEC;
  2503.     return rk_handle_req(dev, req);
  2504.  }
  2505.  
  2506. @@ -168,7 +137,7 @@ static int rk_des_cbc_encrypt(struct ablkcipher_request *req)
  2507.     struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  2508.     struct rk_crypto_info *dev = ctx->dev;
  2509.  
  2510. -   dev->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
  2511. +   ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
  2512.     return rk_handle_req(dev, req);
  2513.  }
  2514.  
  2515. @@ -178,7 +147,7 @@ static int rk_des_cbc_decrypt(struct ablkcipher_request *req)
  2516.     struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  2517.     struct rk_crypto_info *dev = ctx->dev;
  2518.  
  2519. -   dev->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
  2520. +   ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
  2521.     return rk_handle_req(dev, req);
  2522.  }
  2523.  
  2524. @@ -188,7 +157,7 @@ static int rk_des3_ede_ecb_encrypt(struct ablkcipher_request *req)
  2525.     struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  2526.     struct rk_crypto_info *dev = ctx->dev;
  2527.  
  2528. -   dev->mode = RK_CRYPTO_TDES_SELECT;
  2529. +   ctx->mode = RK_CRYPTO_TDES_SELECT;
  2530.     return rk_handle_req(dev, req);
  2531.  }
  2532.  
  2533. @@ -198,7 +167,7 @@ static int rk_des3_ede_ecb_decrypt(struct ablkcipher_request *req)
  2534.     struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  2535.     struct rk_crypto_info *dev = ctx->dev;
  2536.  
  2537. -   dev->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
  2538. +   ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
  2539.     return rk_handle_req(dev, req);
  2540.  }
  2541.  
  2542. @@ -208,7 +177,7 @@ static int rk_des3_ede_cbc_encrypt(struct ablkcipher_request *req)
  2543.     struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  2544.     struct rk_crypto_info *dev = ctx->dev;
  2545.  
  2546. -   dev->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
  2547. +   ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
  2548.     return rk_handle_req(dev, req);
  2549.  }
  2550.  
  2551. @@ -218,15 +187,16 @@ static int rk_des3_ede_cbc_decrypt(struct ablkcipher_request *req)
  2552.     struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  2553.     struct rk_crypto_info *dev = ctx->dev;
  2554.  
  2555. -   dev->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
  2556. +   ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
  2557.             RK_CRYPTO_DEC;
  2558.     return rk_handle_req(dev, req);
  2559.  }
  2560.  
  2561.  static void rk_ablk_hw_init(struct rk_crypto_info *dev)
  2562.  {
  2563. -   struct crypto_ablkcipher *cipher =
  2564. -       crypto_ablkcipher_reqtfm(dev->ablk_req);
  2565. +   struct ablkcipher_request *req =
  2566. +       ablkcipher_request_cast(dev->async_req);
  2567. +   struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
  2568.     struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
  2569.     struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher);
  2570.     u32 ivsize, block, conf_reg = 0;
  2571. @@ -235,25 +205,23 @@ static void rk_ablk_hw_init(struct rk_crypto_info *dev)
  2572.     ivsize = crypto_ablkcipher_ivsize(cipher);
  2573.  
  2574.     if (block == DES_BLOCK_SIZE) {
  2575. -       dev->mode |= RK_CRYPTO_TDES_FIFO_MODE |
  2576. +       ctx->mode |= RK_CRYPTO_TDES_FIFO_MODE |
  2577.                  RK_CRYPTO_TDES_BYTESWAP_KEY |
  2578.                  RK_CRYPTO_TDES_BYTESWAP_IV;
  2579. -       CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, dev->mode);
  2580. -       memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0,
  2581. -               dev->ablk_req->info, ivsize);
  2582. +       CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, ctx->mode);
  2583. +       memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, req->info, ivsize);
  2584.         conf_reg = RK_CRYPTO_DESSEL;
  2585.     } else {
  2586. -       dev->mode |= RK_CRYPTO_AES_FIFO_MODE |
  2587. +       ctx->mode |= RK_CRYPTO_AES_FIFO_MODE |
  2588.                  RK_CRYPTO_AES_KEY_CHANGE |
  2589.                  RK_CRYPTO_AES_BYTESWAP_KEY |
  2590.                  RK_CRYPTO_AES_BYTESWAP_IV;
  2591.         if (ctx->keylen == AES_KEYSIZE_192)
  2592. -           dev->mode |= RK_CRYPTO_AES_192BIT_key;
  2593. +           ctx->mode |= RK_CRYPTO_AES_192BIT_key;
  2594.         else if (ctx->keylen == AES_KEYSIZE_256)
  2595. -           dev->mode |= RK_CRYPTO_AES_256BIT_key;
  2596. -       CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, dev->mode);
  2597. -       memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0,
  2598. -               dev->ablk_req->info, ivsize);
  2599. +           ctx->mode |= RK_CRYPTO_AES_256BIT_key;
  2600. +       CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, ctx->mode);
  2601. +       memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, req->info, ivsize);
  2602.     }
  2603.     conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO |
  2604.             RK_CRYPTO_BYTESWAP_BRFIFO;
  2605. @@ -283,8 +251,18 @@ static int rk_set_data_start(struct rk_crypto_info *dev)
  2606.  
  2607.  static int rk_ablk_start(struct rk_crypto_info *dev)
  2608.  {
  2609. +   struct ablkcipher_request *req =
  2610. +       ablkcipher_request_cast(dev->async_req);
  2611.     unsigned long flags;
  2612. -   int err;
  2613. +   int err = 0;
  2614. +
  2615. +   dev->left_bytes = req->nbytes;
  2616. +   dev->total = req->nbytes;
  2617. +   dev->sg_src = req->src;
  2618. +   dev->first = req->src;
  2619. +   dev->nents = sg_nents(req->src);
  2620. +   dev->sg_dst = req->dst;
  2621. +   dev->aligned = 1;
  2622.  
  2623.     spin_lock_irqsave(&dev->lock, flags);
  2624.     rk_ablk_hw_init(dev);
  2625. @@ -295,15 +273,16 @@ static int rk_ablk_start(struct rk_crypto_info *dev)
  2626.  
  2627.  static void rk_iv_copyback(struct rk_crypto_info *dev)
  2628.  {
  2629. -   struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(dev->ablk_req);
  2630. +   struct ablkcipher_request *req =
  2631. +       ablkcipher_request_cast(dev->async_req);
  2632. +   struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  2633.     u32 ivsize = crypto_ablkcipher_ivsize(tfm);
  2634.  
  2635.     if (ivsize == DES_BLOCK_SIZE)
  2636. -       memcpy_fromio(dev->ablk_req->info,
  2637. -                 dev->reg + RK_CRYPTO_TDES_IV_0, ivsize);
  2638. +       memcpy_fromio(req->info, dev->reg + RK_CRYPTO_TDES_IV_0,
  2639. +                 ivsize);
  2640.     else if (ivsize == AES_BLOCK_SIZE)
  2641. -       memcpy_fromio(dev->ablk_req->info,
  2642. -                 dev->reg + RK_CRYPTO_AES_IV_0, ivsize);
  2643. +       memcpy_fromio(req->info, dev->reg + RK_CRYPTO_AES_IV_0, ivsize);
  2644.  }
  2645.  
  2646.  /* return:
  2647. @@ -313,10 +292,12 @@ static void rk_iv_copyback(struct rk_crypto_info *dev)
  2648.  static int rk_ablk_rx(struct rk_crypto_info *dev)
  2649.  {
  2650.     int err = 0;
  2651. +   struct ablkcipher_request *req =
  2652. +       ablkcipher_request_cast(dev->async_req);
  2653.  
  2654.     dev->unload_data(dev);
  2655.     if (!dev->aligned) {
  2656. -       if (!sg_pcopy_from_buffer(dev->ablk_req->dst, dev->nents,
  2657. +       if (!sg_pcopy_from_buffer(req->dst, dev->nents,
  2658.                       dev->addr_vir, dev->count,
  2659.                       dev->total - dev->left_bytes -
  2660.                       dev->count)) {
  2661. @@ -339,7 +320,8 @@ static int rk_ablk_rx(struct rk_crypto_info *dev)
  2662.     } else {
  2663.         rk_iv_copyback(dev);
  2664.         /* here show the calculation is over without any err */
  2665. -       dev->complete(dev, 0);
  2666. +       dev->complete(dev->async_req, 0);
  2667. +       tasklet_schedule(&dev->queue_task);
  2668.     }
  2669.  out_rx:
  2670.     return err;
  2671. diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
  2672. index ebc46e0..821a506 100644
  2673. --- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
  2674. +++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
  2675. @@ -40,14 +40,16 @@ static int zero_message_process(struct ahash_request *req)
  2676.     return 0;
  2677.  }
  2678.  
  2679. -static void rk_ahash_crypto_complete(struct rk_crypto_info *dev, int err)
  2680. +static void rk_ahash_crypto_complete(struct crypto_async_request *base, int err)
  2681.  {
  2682. -   if (dev->ahash_req->base.complete)
  2683. -       dev->ahash_req->base.complete(&dev->ahash_req->base, err);
  2684. +   if (base->complete)
  2685. +       base->complete(base, err);
  2686.  }
  2687.  
  2688.  static void rk_ahash_reg_init(struct rk_crypto_info *dev)
  2689.  {
  2690. +   struct ahash_request *req = ahash_request_cast(dev->async_req);
  2691. +   struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
  2692.     int reg_status = 0;
  2693.  
  2694.     reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) |
  2695. @@ -67,7 +69,7 @@ static void rk_ahash_reg_init(struct rk_crypto_info *dev)
  2696.     CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, RK_CRYPTO_HRDMA_ERR_INT |
  2697.                         RK_CRYPTO_HRDMA_DONE_INT);
  2698.  
  2699. -   CRYPTO_WRITE(dev, RK_CRYPTO_HASH_CTRL, dev->mode |
  2700. +   CRYPTO_WRITE(dev, RK_CRYPTO_HASH_CTRL, rctx->mode |
  2701.                            RK_CRYPTO_HASH_SWAP_DO);
  2702.  
  2703.     CRYPTO_WRITE(dev, RK_CRYPTO_CONF, RK_CRYPTO_BYTESWAP_HRFIFO |
  2704. @@ -164,78 +166,13 @@ static int rk_ahash_export(struct ahash_request *req, void *out)
  2705.  
  2706.  static int rk_ahash_digest(struct ahash_request *req)
  2707.  {
  2708. -   struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  2709.     struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
  2710. -   struct crypto_async_request *async_req, *backlog;
  2711. -   struct rk_crypto_info *dev = NULL;
  2712. -   unsigned long flags;
  2713. -   int ret;
  2714. +   struct rk_crypto_info *dev = tctx->dev;
  2715.  
  2716.     if (!req->nbytes)
  2717.         return zero_message_process(req);
  2718. -
  2719. -   dev = tctx->dev;
  2720. -   dev->total = req->nbytes;
  2721. -   dev->left_bytes = req->nbytes;
  2722. -   dev->aligned = 0;
  2723. -   dev->mode = 0;
  2724. -   dev->align_size = 4;
  2725. -   dev->sg_dst = NULL;
  2726. -   dev->sg_src = req->src;
  2727. -   dev->first = req->src;
  2728. -   dev->nents = sg_nents(req->src);
  2729. -
  2730. -   switch (crypto_ahash_digestsize(tfm)) {
  2731. -   case SHA1_DIGEST_SIZE:
  2732. -       dev->mode = RK_CRYPTO_HASH_SHA1;
  2733. -       break;
  2734. -   case SHA256_DIGEST_SIZE:
  2735. -       dev->mode = RK_CRYPTO_HASH_SHA256;
  2736. -       break;
  2737. -   case MD5_DIGEST_SIZE:
  2738. -       dev->mode = RK_CRYPTO_HASH_MD5;
  2739. -       break;
  2740. -   default:
  2741. -       return -EINVAL;
  2742. -   }
  2743. -
  2744. -   rk_ahash_reg_init(dev);
  2745. -
  2746. -   spin_lock_irqsave(&dev->lock, flags);
  2747. -   ret = crypto_enqueue_request(&dev->queue, &req->base);
  2748. -   backlog   = crypto_get_backlog(&dev->queue);
  2749. -   async_req = crypto_dequeue_request(&dev->queue);
  2750. -   spin_unlock_irqrestore(&dev->lock, flags);
  2751. -
  2752. -   if (!async_req) {
  2753. -       dev_err(dev->dev, "async_req is NULL !!\n");
  2754. -       return ret;
  2755. -   }
  2756. -   if (backlog) {
  2757. -       backlog->complete(backlog, -EINPROGRESS);
  2758. -       backlog = NULL;
  2759. -   }
  2760. -
  2761. -   dev->ahash_req = ahash_request_cast(async_req);
  2762. -
  2763. -   tasklet_schedule(&dev->queue_task);
  2764. -
  2765. -   /*
  2766. -    * it will take some time to process date after last dma transmission.
  2767. -    *
  2768. -    * waiting time is relative with the last date len,
  2769. -    * so cannot set a fixed time here.
  2770. -    * 10-50 makes system not call here frequently wasting
  2771. -    * efficiency, and make it response quickly when dma
  2772. -    * complete.
  2773. -    */
  2774. -   while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS))
  2775. -       usleep_range(10, 50);
  2776. -
  2777. -   memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0,
  2778. -             crypto_ahash_digestsize(tfm));
  2779. -
  2780. -   return 0;
  2781. +   else
  2782. +       return dev->enqueue(dev, &req->base);
  2783.  }
  2784.  
  2785.  static void crypto_ahash_dma_start(struct rk_crypto_info *dev)
  2786. @@ -258,12 +195,45 @@ static int rk_ahash_set_data_start(struct rk_crypto_info *dev)
  2787.  
  2788.  static int rk_ahash_start(struct rk_crypto_info *dev)
  2789.  {
  2790. +   struct ahash_request *req = ahash_request_cast(dev->async_req);
  2791. +   struct crypto_ahash *tfm;
  2792. +   struct rk_ahash_rctx *rctx;
  2793. +
  2794. +   dev->total = req->nbytes;
  2795. +   dev->left_bytes = req->nbytes;
  2796. +   dev->aligned = 0;
  2797. +   dev->align_size = 4;
  2798. +   dev->sg_dst = NULL;
  2799. +   dev->sg_src = req->src;
  2800. +   dev->first = req->src;
  2801. +   dev->nents = sg_nents(req->src);
  2802. +   rctx = ahash_request_ctx(req);
  2803. +   rctx->mode = 0;
  2804. +
  2805. +   tfm = crypto_ahash_reqtfm(req);
  2806. +   switch (crypto_ahash_digestsize(tfm)) {
  2807. +   case SHA1_DIGEST_SIZE:
  2808. +       rctx->mode = RK_CRYPTO_HASH_SHA1;
  2809. +       break;
  2810. +   case SHA256_DIGEST_SIZE:
  2811. +       rctx->mode = RK_CRYPTO_HASH_SHA256;
  2812. +       break;
  2813. +   case MD5_DIGEST_SIZE:
  2814. +       rctx->mode = RK_CRYPTO_HASH_MD5;
  2815. +       break;
  2816. +   default:
  2817. +       return -EINVAL;
  2818. +   }
  2819. +
  2820. +   rk_ahash_reg_init(dev);
  2821.     return rk_ahash_set_data_start(dev);
  2822.  }
  2823.  
  2824.  static int rk_ahash_crypto_rx(struct rk_crypto_info *dev)
  2825.  {
  2826.     int err = 0;
  2827. +   struct ahash_request *req = ahash_request_cast(dev->async_req);
  2828. +   struct crypto_ahash *tfm;
  2829.  
  2830.     dev->unload_data(dev);
  2831.     if (dev->left_bytes) {
  2832. @@ -278,7 +248,24 @@ static int rk_ahash_crypto_rx(struct rk_crypto_info *dev)
  2833.         }
  2834.         err = rk_ahash_set_data_start(dev);
  2835.     } else {
  2836. -       dev->complete(dev, 0);
  2837. +       /*
  2838. +        * it will take some time to process date after last dma
  2839. +        * transmission.
  2840. +        *
  2841. +        * waiting time is relative with the last date len,
  2842. +        * so cannot set a fixed time here.
  2843. +        * 10us makes system not call here frequently wasting
  2844. +        * efficiency, and make it response quickly when dma
  2845. +        * complete.
  2846. +        */
  2847. +       while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS))
  2848. +           udelay(10);
  2849. +
  2850. +       tfm = crypto_ahash_reqtfm(req);
  2851. +       memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0,
  2852. +                 crypto_ahash_digestsize(tfm));
  2853. +       dev->complete(dev->async_req, 0);
  2854. +       tasklet_schedule(&dev->queue_task);
  2855.     }
  2856.  
  2857.  out_rx:
  2858. commit 0c4c78de0417ced1da92351a3013e631860ea576
  2859. Author: LABBE Corentin <clabbe.montjoie@gmail.com>
  2860. Date:   Thu Dec 17 13:45:39 2015 +0100
  2861.  
  2862.     crypto: hash - add zero length message hash for shax and md5
  2863.    
  2864.     Some crypto drivers cannot process empty data message and return a
  2865.     precalculated hash for md5/sha1/sha224/sha256.
  2866.    
  2867.     This patch add thoses precalculated hash in include/crypto.
  2868.    
  2869.     Signed-off-by: LABBE Corentin <clabbe.montjoie@gmail.com>
  2870.     Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
  2871.  
  2872. diff --git a/crypto/md5.c b/crypto/md5.c
  2873. index 33d17e9..2355a7c 100644
  2874. --- a/crypto/md5.c
  2875. +++ b/crypto/md5.c
  2876. @@ -24,6 +24,12 @@
  2877.  #include <linux/cryptohash.h>
  2878.  #include <asm/byteorder.h>
  2879.  
  2880. +const u8 md5_zero_message_hash[MD5_DIGEST_SIZE] = {
  2881. +   0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
  2882. +   0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e,
  2883. +};
  2884. +EXPORT_SYMBOL_GPL(md5_zero_message_hash);
  2885. +
  2886.  /* XXX: this stuff can be optimized */
  2887.  static inline void le32_to_cpu_array(u32 *buf, unsigned int words)
  2888.  {
  2889. diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
  2890. index 39e3acc..6877cbb 100644
  2891. --- a/crypto/sha1_generic.c
  2892. +++ b/crypto/sha1_generic.c
  2893. @@ -26,6 +26,13 @@
  2894.  #include <crypto/sha1_base.h>
  2895.  #include <asm/byteorder.h>
  2896.  
  2897. +const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE] = {
  2898. +   0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
  2899. +   0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
  2900. +   0xaf, 0xd8, 0x07, 0x09
  2901. +};
  2902. +EXPORT_SYMBOL_GPL(sha1_zero_message_hash);
  2903. +
  2904.  static void sha1_generic_block_fn(struct sha1_state *sst, u8 const *src,
  2905.                   int blocks)
  2906.  {
  2907. diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
  2908. index 7843116..8f9c47e 100644
  2909. --- a/crypto/sha256_generic.c
  2910. +++ b/crypto/sha256_generic.c
  2911. @@ -27,6 +27,22 @@
  2912.  #include <asm/byteorder.h>
  2913.  #include <asm/unaligned.h>
  2914.  
  2915. +const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE] = {
  2916. +   0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47,
  2917. +   0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2,
  2918. +   0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4,
  2919. +   0x2f
  2920. +};
  2921. +EXPORT_SYMBOL_GPL(sha224_zero_message_hash);
  2922. +
  2923. +const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE] = {
  2924. +   0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
  2925. +   0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
  2926. +   0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
  2927. +   0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55
  2928. +};
  2929. +EXPORT_SYMBOL_GPL(sha256_zero_message_hash);
  2930. +
  2931.  static inline u32 Ch(u32 x, u32 y, u32 z)
  2932.  {
  2933.     return z ^ (x & (y ^ z));
  2934. diff --git a/include/crypto/md5.h b/include/crypto/md5.h
  2935. index 146af825..327deac 100644
  2936. --- a/include/crypto/md5.h
  2937. +++ b/include/crypto/md5.h
  2938. @@ -13,6 +13,8 @@
  2939.  #define MD5_H2 0x98badcfeUL
  2940.  #define MD5_H3 0x10325476UL
  2941.  
  2942. +extern const u8 md5_zero_message_hash[MD5_DIGEST_SIZE];
  2943. +
  2944.  struct md5_state {
  2945.     u32 hash[MD5_HASH_WORDS];
  2946.     u32 block[MD5_BLOCK_WORDS];
  2947. diff --git a/include/crypto/sha.h b/include/crypto/sha.h
  2948. index dd7905a..c94d3eb 100644
  2949. --- a/include/crypto/sha.h
  2950. +++ b/include/crypto/sha.h
  2951. @@ -64,6 +64,12 @@
  2952.  #define SHA512_H6  0x1f83d9abfb41bd6bULL
  2953.  #define SHA512_H7  0x5be0cd19137e2179ULL
  2954.  
  2955. +extern const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE];
  2956. +
  2957. +extern const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE];
  2958. +
  2959. +extern const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE];
  2960. +
  2961.  struct sha1_state {
  2962.     u32 state[SHA1_DIGEST_SIZE / 4];
  2963.     u64 count;
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement