Advertisement
Guest User

omap-aes

a guest
Aug 21st, 2010
174
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 24.09 KB | None | 0 0
  1. /*
  2. * Cryptographic API.
  3. *
  4. * Support for OMAP AES HW acceleration.
  5. *
  6. * Copyright (c) 2010 Nokia Corporation
  7. * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as published
  11. * by the Free Software Foundation.
  12. *
  13. */
  14.  
  15. #define pr_fmt(fmt) "%s: " fmt, __func__
  16.  
  17. #include <linux/err.h>
  18. #include <linux/module.h>
  19. #include <linux/init.h>
  20. #include <linux/errno.h>
  21. #include <linux/kernel.h>
  22. #include <linux/clk.h>
  23. #include <linux/platform_device.h>
  24. #include <linux/scatterlist.h>
  25. #include <linux/dma-mapping.h>
  26. #include <linux/io.h>
  27. #include <linux/crypto.h>
  28. #include <linux/interrupt.h>
  29. #include <crypto/scatterwalk.h>
  30. #include <crypto/aes.h>
  31.  
  32. #include <mach/cpu.h>
  33. #include <mach/dma.h>
  34.  
  35. /* #define L4_34XX_BASE 0x48000000 */
  36. #define OMAP34XX_SEC_BASE (L4_34XX_BASE + 0xA0000)
  37. #define OMAP34XX_SEC_AES_BASE (OMAP34XX_SEC_BASE + 0x25000)
  38.  
  39. /* OMAP TRM gives bitfields as start:end, where start is the higher bit
  40. number. For example 7:0 */
  41. #define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
  42. #define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
  43.  
  44. #define AES_REG_KEY(x) (0x1C - ((x ^ 0x01) * 0x04))
  45. #define AES_REG_IV(x) (0x20 + ((x) * 0x04))
  46.  
  47. #define AES_REG_CTRL 0x30
  48. #define AES_REG_CTRL_CTR_WIDTH (1 << 7)
  49. #define AES_REG_CTRL_CTR (1 << 6)
  50. #define AES_REG_CTRL_CBC (1 << 5)
  51. #define AES_REG_CTRL_KEY_SIZE (3 << 3)
  52. #define AES_REG_CTRL_DIRECTION (1 << 2)
  53. #define AES_REG_CTRL_INPUT_READY (1 << 1)
  54. #define AES_REG_CTRL_OUTPUT_READY (1 << 0)
  55.  
  56. #define AES_REG_DATA 0x34
  57. #define AES_REG_DATA_N(x) (0x34 + ((x) * 0x04))
  58.  
  59. #define AES_REG_REV 0x44
  60. #define AES_REG_REV_MAJOR 0xF0
  61. #define AES_REG_REV_MINOR 0x0F
  62.  
  63. #define AES_REG_MASK 0x48
  64. #define AES_REG_MASK_SIDLE (1 << 6)
  65. #define AES_REG_MASK_START (1 << 5)
  66. #define AES_REG_MASK_DMA_OUT_EN (1 << 3)
  67. #define AES_REG_MASK_DMA_IN_EN (1 << 2)
  68. #define AES_REG_MASK_SOFTRESET (1 << 1)
  69. #define AES_REG_AUTOIDLE (1 << 0)
  70.  
  71. #define AES_REG_SYSSTATUS 0x4C
  72. #define AES_REG_SYSSTATUS_RESETDONE (1 << 0)
  73.  
  74. #ifdef CONFIG_ARCH_OMAP24XX
  75. #define AES_ICLK "aes_ick"
  76. #endif
  77. #ifdef CONFIG_ARCH_OMAP34XX
  78. #define AES_ICLK "aes2_ick"
  79. #endif
  80.  
  81. #define DEFAULT_TIMEOUT (5*HZ)
  82.  
  83. #define FLAGS_MODE_MASK 0x000f
  84. #define FLAGS_ENCRYPT BIT(0)
  85. #define FLAGS_CBC BIT(1)
  86. #define FLAGS_GIV BIT(2)
  87.  
  88. #define FLAGS_NEW_KEY BIT(4)
  89. #define FLAGS_NEW_IV BIT(5)
  90. #define FLAGS_INIT BIT(6)
  91. #define FLAGS_FAST BIT(7)
  92. #define FLAGS_BUSY 8
  93.  
  94. struct omap_aes_ctx {
  95. struct omap_aes_dev *dd;
  96.  
  97. int keylen;
  98. u32 key[AES_KEYSIZE_256 / sizeof(u32)];
  99. unsigned long flags;
  100. };
  101.  
  102. struct omap_aes_reqctx {
  103. unsigned long mode;
  104. };
  105.  
  106. #define OMAP_AES_QUEUE_LENGTH 1
  107. #define OMAP_AES_CACHE_SIZE 0
  108.  
  109. struct omap_aes_dev {
  110. struct list_head list;
  111. unsigned long phys_base;
  112. void __iomem *io_base;
  113. struct clk *iclk;
  114. struct omap_aes_ctx *ctx;
  115. struct device *dev;
  116. unsigned long flags;
  117.  
  118. u32 *iv;
  119. u32 ctrl;
  120.  
  121. spinlock_t lock;
  122. struct crypto_queue queue;
  123.  
  124. struct tasklet_struct task;
  125.  
  126. struct ablkcipher_request *req;
  127. size_t total;
  128. struct scatterlist *in_sg;
  129. size_t in_offset;
  130. struct scatterlist *out_sg;
  131. size_t out_offset;
  132.  
  133. size_t buflen;
  134. void *buf_in;
  135. size_t dma_size;
  136. int dma_in;
  137. int dma_lch_in;
  138. dma_addr_t dma_addr_in;
  139. void *buf_out;
  140. int dma_out;
  141. int dma_lch_out;
  142. dma_addr_t dma_addr_out;
  143. };
  144.  
  145. /* keep registered devices data here */
  146. static LIST_HEAD(dev_list);
  147. static DEFINE_SPINLOCK(list_lock);
  148.  
  149. static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset)
  150. {
  151. return __raw_readl(dd->io_base + offset);
  152. }
  153.  
  154. static inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset,
  155. u32 value)
  156. {
  157. __raw_writel(value, dd->io_base + offset);
  158. }
  159.  
  160. static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset,
  161. u32 value, u32 mask)
  162. {
  163. u32 val;
  164.  
  165. val = omap_aes_read(dd, offset);
  166. val &= ~mask;
  167. val |= value;
  168. omap_aes_write(dd, offset, val);
  169. }
  170.  
  171. static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset,
  172. u32 *value, int count)
  173. {
  174. for (; count--; value++, offset += 4)
  175. omap_aes_write(dd, offset, *value);
  176. }
  177.  
  178. static int omap_aes_wait(struct omap_aes_dev *dd, u32 offset, u32 bit)
  179. {
  180. unsigned long timeout = jiffies + DEFAULT_TIMEOUT;
  181.  
  182. while (!(omap_aes_read(dd, offset) & bit)) {
  183. if (time_is_before_jiffies(timeout)) {
  184. dev_err(dd->dev, "omap-aes timeout\n");
  185. return -ETIMEDOUT;
  186. }
  187. }
  188. return 0;
  189. }
  190.  
  191. static int omap_aes_hw_init(struct omap_aes_dev *dd)
  192. {
  193. int err = 0;
  194.  
  195. clk_enable(dd->iclk);
  196. if (!(dd->flags & FLAGS_INIT)) {
  197. /* is it necessary to reset before every operation? */
  198. omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_SOFTRESET,
  199. AES_REG_MASK_SOFTRESET);
  200. /*
  201. * prevent OCP bus error (SRESP) in case an access to the module
  202. * is performed while the module is coming out of soft reset
  203. */
  204. __asm__ __volatile__("nop");
  205. __asm__ __volatile__("nop");
  206.  
  207. err = omap_aes_wait(dd, AES_REG_SYSSTATUS,
  208. AES_REG_SYSSTATUS_RESETDONE);
  209. if (!err)
  210. dd->flags |= FLAGS_INIT;
  211. }
  212.  
  213. return err;
  214. }
  215.  
  216. static void omap_aes_hw_cleanup(struct omap_aes_dev *dd)
  217. {
  218. clk_disable(dd->iclk);
  219. }
  220.  
  221. static void omap_aes_write_ctrl(struct omap_aes_dev *dd)
  222. {
  223. unsigned int key32;
  224. int i;
  225. u32 val, mask;
  226.  
  227. val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
  228. if (dd->flags & FLAGS_CBC)
  229. val |= AES_REG_CTRL_CBC;
  230. if (dd->flags & FLAGS_ENCRYPT)
  231. val |= AES_REG_CTRL_DIRECTION;
  232.  
  233. if (dd->ctrl == val && !(dd->flags & FLAGS_NEW_IV) &&
  234. !(dd->ctx->flags & FLAGS_NEW_KEY))
  235. goto out;
  236.  
  237. /* only need to write control registers for new settings */
  238.  
  239. dd->ctrl = val;
  240.  
  241. val = 0;
  242. if (dd->dma_lch_out >= 0)
  243. val |= AES_REG_MASK_DMA_OUT_EN;
  244. if (dd->dma_lch_in >= 0)
  245. val |= AES_REG_MASK_DMA_IN_EN;
  246.  
  247. mask = AES_REG_MASK_DMA_IN_EN | AES_REG_MASK_DMA_OUT_EN;
  248.  
  249. omap_aes_write_mask(dd, AES_REG_MASK, val, mask);
  250.  
  251. pr_debug("Set key\n");
  252. key32 = dd->ctx->keylen / sizeof(u32);
  253. /* set a key */
  254. for (i = 0; i < key32; i++) {
  255. omap_aes_write(dd, AES_REG_KEY(i),
  256. __le32_to_cpu(dd->ctx->key[i]));
  257. }
  258. dd->ctx->flags &= ~FLAGS_NEW_KEY;
  259.  
  260. if (dd->flags & FLAGS_NEW_IV) {
  261. pr_debug("Set IV\n");
  262. omap_aes_write_n(dd, AES_REG_IV(0), dd->iv, 4);
  263. dd->flags &= ~FLAGS_NEW_IV;
  264. }
  265.  
  266. mask = AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION |
  267. AES_REG_CTRL_KEY_SIZE;
  268.  
  269. omap_aes_write_mask(dd, AES_REG_CTRL, dd->ctrl, mask);
  270.  
  271. out:
  272. /* start DMA or disable idle mode */
  273. omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START,
  274. AES_REG_MASK_START);
  275. }
  276.  
  277. static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx)
  278. {
  279. struct omap_aes_dev *dd = NULL, *tmp;
  280.  
  281. spin_lock_bh(&list_lock);
  282. if (!ctx->dd) {
  283. list_for_each_entry(tmp, &dev_list, list) {
  284. /* FIXME: take fist available aes core */
  285. dd = tmp;
  286. break;
  287. }
  288. ctx->dd = dd;
  289. } else {
  290. /* already found before */
  291. dd = ctx->dd;
  292. }
  293. spin_unlock_bh(&list_lock);
  294.  
  295. return dd;
  296. }
  297.  
  298. static void omap_aes_dma_callback(int lch, u16 ch_status, void *data)
  299. {
  300. struct omap_aes_dev *dd = data;
  301.  
  302. if (lch == dd->dma_lch_out)
  303. tasklet_schedule(&dd->task);
  304. }
  305.  
  306. static int omap_aes_dma_init(struct omap_aes_dev *dd)
  307. {
  308. int err = -ENOMEM;
  309.  
  310. dd->dma_lch_out = -1;
  311. dd->dma_lch_in = -1;
  312.  
  313. dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE);
  314. dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE);
  315. dd->buflen = PAGE_SIZE << OMAP_AES_CACHE_SIZE;
  316. dd->buflen &= ~(AES_BLOCK_SIZE - 1);
  317.  
  318. if (!dd->buf_in || !dd->buf_out) {
  319. dev_err(dd->dev, "unable to alloc pages.\n");
  320. goto err_alloc;
  321. }
  322.  
  323. /* MAP here */
  324. dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in, dd->buflen,
  325. DMA_TO_DEVICE);
  326. if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
  327. dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
  328. err = -EINVAL;
  329. goto err_map_in;
  330. }
  331.  
  332. dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out, dd->buflen,
  333. DMA_FROM_DEVICE);
  334. if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
  335. dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
  336. err = -EINVAL;
  337. goto err_map_out;
  338. }
  339.  
  340. err = omap_request_dma(dd->dma_in, "omap-aes-rx",
  341. omap_aes_dma_callback, dd, &dd->dma_lch_in);
  342. if (err) {
  343. dev_err(dd->dev, "Unable to request DMA channel\n");
  344. goto err_dma_in;
  345. }
  346. err = omap_request_dma(dd->dma_out, "omap-aes-tx",
  347. omap_aes_dma_callback, dd, &dd->dma_lch_out);
  348. if (err) {
  349. dev_err(dd->dev, "Unable to request DMA channel\n");
  350. goto err_dma_out;
  351. }
  352.  
  353. omap_set_dma_dest_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_CONSTANT,
  354. dd->phys_base + AES_REG_DATA, 0, 4);
  355.  
  356. omap_set_dma_dest_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
  357. omap_set_dma_src_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
  358.  
  359. omap_set_dma_src_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_CONSTANT,
  360. dd->phys_base + AES_REG_DATA, 0, 4);
  361.  
  362. omap_set_dma_src_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
  363. omap_set_dma_dest_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
  364.  
  365. return 0;
  366.  
  367. err_dma_out:
  368. omap_free_dma(dd->dma_lch_in);
  369. err_dma_in:
  370. dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
  371. DMA_FROM_DEVICE);
  372. err_map_out:
  373. dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE);
  374. err_map_in:
  375. free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE);
  376. free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE);
  377. err_alloc:
  378. if (err)
  379. pr_err("error: %d\n", err);
  380. return err;
  381. }
  382.  
  383. static void omap_aes_dma_cleanup(struct omap_aes_dev *dd)
  384. {
  385. omap_free_dma(dd->dma_lch_out);
  386. omap_free_dma(dd->dma_lch_in);
  387. dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
  388. DMA_FROM_DEVICE);
  389. dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE);
  390. free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE);
  391. free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE);
  392. }
  393.  
  394. static void sg_copy_buf(void *buf, struct scatterlist *sg,
  395. unsigned int start, unsigned int nbytes, int out)
  396. {
  397. struct scatter_walk walk;
  398.  
  399. if (!nbytes)
  400. return;
  401.  
  402. scatterwalk_start(&walk, sg);
  403. scatterwalk_advance(&walk, start);
  404. scatterwalk_copychunks(buf, &walk, nbytes, out);
  405. scatterwalk_done(&walk, out, 0);
  406. }
  407.  
  408. static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf,
  409. size_t buflen, size_t total, int out)
  410. {
  411. unsigned int count, off = 0;
  412.  
  413. while (buflen && total) {
  414. count = min((*sg)->length - *offset, total);
  415. count = min(count, buflen);
  416.  
  417. if (!count)
  418. return off;
  419.  
  420. sg_copy_buf(buf + off, *sg, *offset, count, out);
  421.  
  422. off += count;
  423. buflen -= count;
  424. *offset += count;
  425. total -= count;
  426.  
  427. if (*offset == (*sg)->length) {
  428. *sg = sg_next(*sg);
  429. if (*sg)
  430. *offset = 0;
  431. else
  432. total = 0;
  433. }
  434. }
  435.  
  436. return off;
  437. }
  438.  
  439. static int omap_aes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
  440. dma_addr_t dma_addr_out, int length)
  441. {
  442. struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
  443. struct omap_aes_dev *dd = ctx->dd;
  444. int len32;
  445.  
  446. pr_debug("len: %d\n", length);
  447.  
  448. dd->dma_size = length;
  449.  
  450. if (!(dd->flags & FLAGS_FAST))
  451. dma_sync_single_for_device(dd->dev, dma_addr_in, length,
  452. DMA_TO_DEVICE);
  453.  
  454. len32 = DIV_ROUND_UP(length, sizeof(u32));
  455.  
  456. /* IN */
  457. omap_set_dma_transfer_params(dd->dma_lch_in, OMAP_DMA_DATA_TYPE_S32,
  458. len32, 1, OMAP_DMA_SYNC_PACKET, dd->dma_in,
  459. OMAP_DMA_DST_SYNC);
  460.  
  461. omap_set_dma_src_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_POST_INC,
  462. dma_addr_in, 0, 0);
  463.  
  464. /* OUT */
  465. omap_set_dma_transfer_params(dd->dma_lch_out, OMAP_DMA_DATA_TYPE_S32,
  466. len32, 1, OMAP_DMA_SYNC_PACKET,
  467. dd->dma_out, OMAP_DMA_SRC_SYNC);
  468.  
  469. omap_set_dma_dest_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_POST_INC,
  470. dma_addr_out, 0, 0);
  471.  
  472. omap_start_dma(dd->dma_lch_in);
  473. omap_start_dma(dd->dma_lch_out);
  474.  
  475. omap_aes_write_ctrl(dd);
  476.  
  477. return 0;
  478. }
  479.  
  480. static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
  481. {
  482. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
  483. crypto_ablkcipher_reqtfm(dd->req));
  484. int err, fast = 0, in, out;
  485. size_t count;
  486. dma_addr_t addr_in, addr_out;
  487.  
  488. pr_debug("total: %d\n", dd->total);
  489.  
  490. if (sg_is_last(dd->in_sg) && sg_is_last(dd->out_sg)) {
  491. /* check for alignment */
  492. in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32));
  493. out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32));
  494.  
  495. fast = in && out;
  496. }
  497.  
  498. if (fast) {
  499. count = min(dd->total, sg_dma_len(dd->in_sg));
  500. count = min(count, sg_dma_len(dd->out_sg));
  501.  
  502. if (count != dd->total)
  503. return -EINVAL;
  504.  
  505. pr_debug("fast\n");
  506.  
  507. err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
  508. if (!err) {
  509. dev_err(dd->dev, "dma_map_sg() error\n");
  510. return -EINVAL;
  511. }
  512.  
  513. err = dma_map_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
  514. if (!err) {
  515. dev_err(dd->dev, "dma_map_sg() error\n");
  516. dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
  517. return -EINVAL;
  518. }
  519.  
  520. addr_in = sg_dma_address(dd->in_sg);
  521. addr_out = sg_dma_address(dd->out_sg);
  522.  
  523. dd->flags |= FLAGS_FAST;
  524.  
  525. } else {
  526. /* use cache buffers */
  527. count = sg_copy(&dd->in_sg, &dd->in_offset, dd->buf_in,
  528. dd->buflen, dd->total, 0);
  529.  
  530. addr_in = dd->dma_addr_in;
  531. addr_out = dd->dma_addr_out;
  532.  
  533. dd->flags &= ~FLAGS_FAST;
  534.  
  535. }
  536.  
  537. dd->total -= count;
  538.  
  539. err = omap_aes_hw_init(dd);
  540.  
  541. err = omap_aes_crypt_dma(tfm, addr_in, addr_out, count);
  542.  
  543. return err;
  544. }
  545.  
  546. static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
  547. {
  548. struct omap_aes_ctx *ctx;
  549.  
  550. pr_debug("err: %d\n", err);
  551.  
  552. ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(dd->req));
  553.  
  554. if (!dd->total)
  555. dd->req->base.complete(&dd->req->base, err);
  556. }
  557.  
  558. static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
  559. {
  560. int err = 0;
  561. size_t count;
  562.  
  563. pr_debug("total: %d\n", dd->total);
  564.  
  565. omap_aes_write_mask(dd, AES_REG_MASK, 0, AES_REG_MASK_START);
  566.  
  567. omap_aes_hw_cleanup(dd);
  568.  
  569. omap_stop_dma(dd->dma_lch_in);
  570. omap_stop_dma(dd->dma_lch_out);
  571.  
  572. if (dd->flags & FLAGS_FAST) {
  573. dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
  574. dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
  575. } else {
  576. dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
  577. dd->dma_size, DMA_FROM_DEVICE);
  578.  
  579. /* copy data */
  580. count = sg_copy(&dd->out_sg, &dd->out_offset, dd->buf_out,
  581. dd->buflen, dd->dma_size, 1);
  582. if (count != dd->dma_size) {
  583. err = -EINVAL;
  584. pr_err("not all data converted: %u\n", count);
  585. }
  586. }
  587.  
  588. if (err || !dd->total)
  589. omap_aes_finish_req(dd, err);
  590.  
  591. return err;
  592. }
  593.  
  594. static int omap_aes_handle_req(struct omap_aes_dev *dd)
  595. {
  596. struct crypto_async_request *async_req, *backlog;
  597. struct omap_aes_ctx *ctx;
  598. struct omap_aes_reqctx *rctx;
  599. struct ablkcipher_request *req;
  600. unsigned long flags;
  601.  
  602. if (dd->total)
  603. goto start;
  604.  
  605. spin_lock_irqsave(&dd->lock, flags);
  606. backlog = crypto_get_backlog(&dd->queue);
  607. async_req = crypto_dequeue_request(&dd->queue);
  608. if (!async_req)
  609. clear_bit(FLAGS_BUSY, &dd->flags);
  610. spin_unlock_irqrestore(&dd->lock, flags);
  611.  
  612. if (!async_req)
  613. return 0;
  614.  
  615. if (backlog)
  616. backlog->complete(backlog, -EINPROGRESS);
  617.  
  618. req = ablkcipher_request_cast(async_req);
  619.  
  620. pr_debug("get new req\n");
  621.  
  622. /* assign new request to device */
  623. dd->req = req;
  624. dd->total = req->nbytes;
  625. dd->in_offset = 0;
  626. dd->in_sg = req->src;
  627. dd->out_offset = 0;
  628. dd->out_sg = req->dst;
  629.  
  630. rctx = ablkcipher_request_ctx(req);
  631. ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
  632. rctx->mode &= FLAGS_MODE_MASK;
  633. dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
  634.  
  635. dd->iv = req->info;
  636. if ((dd->flags & FLAGS_CBC) && dd->iv)
  637. dd->flags |= FLAGS_NEW_IV;
  638. else
  639. dd->flags &= ~FLAGS_NEW_IV;
  640.  
  641. ctx->dd = dd;
  642. if (dd->ctx != ctx) {
  643. /* assign new context to device */
  644. dd->ctx = ctx;
  645. ctx->flags |= FLAGS_NEW_KEY;
  646. }
  647.  
  648. if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE))
  649. pr_err("request size is not exact amount of AES blocks\n");
  650.  
  651. start:
  652. return omap_aes_crypt_dma_start(dd);
  653. }
  654.  
  655. static void omap_aes_task(unsigned long data)
  656. {
  657. struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
  658. int err;
  659.  
  660. pr_debug("enter\n");
  661.  
  662. err = omap_aes_crypt_dma_stop(dd);
  663.  
  664. err = omap_aes_handle_req(dd);
  665.  
  666. pr_debug("exit\n");
  667. }
  668.  
  669. static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
  670. {
  671. struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
  672. crypto_ablkcipher_reqtfm(req));
  673. struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
  674. struct omap_aes_dev *dd;
  675. unsigned long flags;
  676. int err;
  677.  
  678. pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
  679. !!(mode & FLAGS_ENCRYPT),
  680. !!(mode & FLAGS_CBC));
  681.  
  682. dd = omap_aes_find_dev(ctx);
  683. if (!dd)
  684. return -ENODEV;
  685.  
  686. rctx->mode = mode;
  687.  
  688. spin_lock_irqsave(&dd->lock, flags);
  689. err = ablkcipher_enqueue_request(&dd->queue, req);
  690. spin_unlock_irqrestore(&dd->lock, flags);
  691.  
  692. if (!test_and_set_bit(FLAGS_BUSY, &dd->flags))
  693. omap_aes_handle_req(dd);
  694.  
  695. pr_debug("exit\n");
  696.  
  697. return err;
  698. }
  699.  
  700. /* ********************** ALG API ************************************ */
  701.  
  702. static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  703. unsigned int keylen)
  704. {
  705. struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  706.  
  707. if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
  708. keylen != AES_KEYSIZE_256)
  709. return -EINVAL;
  710.  
  711. pr_debug("enter, keylen: %d\n", keylen);
  712.  
  713. memcpy(ctx->key, key, keylen);
  714. ctx->keylen = keylen;
  715. ctx->flags |= FLAGS_NEW_KEY;
  716.  
  717. return 0;
  718. }
  719.  
  720. static int omap_aes_ecb_encrypt(struct ablkcipher_request *req)
  721. {
  722. return omap_aes_crypt(req, FLAGS_ENCRYPT);
  723. }
  724.  
  725. static int omap_aes_ecb_decrypt(struct ablkcipher_request *req)
  726. {
  727. return omap_aes_crypt(req, 0);
  728. }
  729.  
  730. static int omap_aes_cbc_encrypt(struct ablkcipher_request *req)
  731. {
  732. return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
  733. }
  734.  
  735. static int omap_aes_cbc_decrypt(struct ablkcipher_request *req)
  736. {
  737. return omap_aes_crypt(req, FLAGS_CBC);
  738. }
  739.  
  740. static int omap_aes_cra_init(struct crypto_tfm *tfm)
  741. {
  742. pr_debug("enter\n");
  743.  
  744. tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx);
  745.  
  746. return 0;
  747. }
  748.  
  749. static void omap_aes_cra_exit(struct crypto_tfm *tfm)
  750. {
  751. pr_debug("enter\n");
  752. }
  753.  
  754. /* ********************** ALGS ************************************ */
  755.  
  756. static struct crypto_alg algs[] = {
  757. {
  758. .cra_name = "ecb(aes)",
  759. .cra_driver_name = "ecb-aes-omap",
  760. .cra_priority = 100,
  761. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  762. .cra_blocksize = AES_BLOCK_SIZE,
  763. .cra_ctxsize = sizeof(struct omap_aes_ctx),
  764. .cra_alignmask = 0,
  765. .cra_type = &crypto_ablkcipher_type,
  766. .cra_module = THIS_MODULE,
  767. .cra_init = omap_aes_cra_init,
  768. .cra_exit = omap_aes_cra_exit,
  769. .cra_u.ablkcipher = {
  770. .min_keysize = AES_MIN_KEY_SIZE,
  771. .max_keysize = AES_MAX_KEY_SIZE,
  772. .setkey = omap_aes_setkey,
  773. .encrypt = omap_aes_ecb_encrypt,
  774. .decrypt = omap_aes_ecb_decrypt,
  775. }
  776. },
  777. {
  778. .cra_name = "cbc(aes)",
  779. .cra_driver_name = "cbc-aes-omap",
  780. .cra_priority = 100,
  781. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  782. .cra_blocksize = AES_BLOCK_SIZE,
  783. .cra_ctxsize = sizeof(struct omap_aes_ctx),
  784. .cra_alignmask = 0,
  785. .cra_type = &crypto_ablkcipher_type,
  786. .cra_module = THIS_MODULE,
  787. .cra_init = omap_aes_cra_init,
  788. .cra_exit = omap_aes_cra_exit,
  789. .cra_u.ablkcipher = {
  790. .min_keysize = AES_MIN_KEY_SIZE,
  791. .max_keysize = AES_MAX_KEY_SIZE,
  792. .ivsize = AES_BLOCK_SIZE,
  793. .setkey = omap_aes_setkey,
  794. .encrypt = omap_aes_cbc_encrypt,
  795. .decrypt = omap_aes_cbc_decrypt,
  796. }
  797. }
  798. };
  799.  
  800. static int omap_aes_probe(struct platform_device *pdev)
  801. {
  802. struct device *dev = &pdev->dev;
  803. struct omap_aes_dev *dd;
  804. struct resource *res;
  805. int err = -ENOMEM, i, j;
  806. u32 reg;
  807.  
  808. dd = kzalloc(sizeof(struct omap_aes_dev), GFP_KERNEL);
  809. if (dd == NULL) {
  810. dev_err(dev, "unable to alloc data struct.\n");
  811. goto err_data;
  812. }
  813. dd->dev = dev;
  814. platform_set_drvdata(pdev, dd);
  815.  
  816. spin_lock_init(&dd->lock);
  817. crypto_init_queue(&dd->queue, OMAP_AES_QUEUE_LENGTH);
  818.  
  819. /* Get the base address */
  820. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  821. if (!res) {
  822. dev_err(dev, "invalid resource type\n");
  823. err = -ENODEV;
  824. goto err_res;
  825. }
  826. dd->phys_base = res->start;
  827.  
  828. /* Get the DMA */
  829. res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
  830. if (!res)
  831. dev_info(dev, "no DMA info\n");
  832. else
  833. dd->dma_out = res->start;
  834.  
  835. /* Get the DMA */
  836. res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
  837. if (!res)
  838. dev_info(dev, "no DMA info\n");
  839. else
  840. dd->dma_in = res->start;
  841.  
  842. /* Initializing the clock */
  843. dd->iclk = clk_get(NULL, AES_ICLK);
  844. if (!dd->iclk) {
  845. dev_err(dev, "clock intialization failed.\n");
  846. err = -ENODEV;
  847. goto err_res;
  848. }
  849.  
  850. dd->io_base = ioremap(dd->phys_base, SZ_4K);
  851. if (!dd->io_base) {
  852. dev_err(dev, "can't ioremap\n");
  853. err = -ENOMEM;
  854. goto err_io;
  855. }
  856.  
  857. clk_enable(dd->iclk);
  858. reg = omap_aes_read(dd, AES_REG_REV);
  859. dev_info(dev, "OMAP AES hw accel rev: %u.%u\n",
  860. (reg & AES_REG_REV_MAJOR) >> 4, reg & AES_REG_REV_MINOR);
  861. clk_disable(dd->iclk);
  862.  
  863. tasklet_init(&dd->task, omap_aes_task, (unsigned long)dd);
  864.  
  865. err = omap_aes_dma_init(dd);
  866. if (err)
  867. goto err_dma;
  868.  
  869. INIT_LIST_HEAD(&dd->list);
  870. spin_lock(&list_lock);
  871. list_add_tail(&dd->list, &dev_list);
  872. spin_unlock(&list_lock);
  873.  
  874. for (i = 0; i < ARRAY_SIZE(algs); i++) {
  875. pr_debug("i: %d\n", i);
  876. INIT_LIST_HEAD(&algs[i].cra_list);
  877. err = crypto_register_alg(&algs[i]);
  878. if (err)
  879. goto err_algs;
  880. }
  881.  
  882. pr_info("probe() done\n");
  883.  
  884. return 0;
  885. err_algs:
  886. for (j = 0; j < i; j++)
  887. crypto_unregister_alg(&algs[j]);
  888. omap_aes_dma_cleanup(dd);
  889. err_dma:
  890. tasklet_kill(&dd->task);
  891. iounmap(dd->io_base);
  892. err_io:
  893. clk_put(dd->iclk);
  894. err_res:
  895. kfree(dd);
  896. dd = NULL;
  897. err_data:
  898. dev_err(dev, "initialization failed.\n");
  899. return err;
  900. }
  901.  
  902. static int omap_aes_remove(struct platform_device *pdev)
  903. {
  904. struct omap_aes_dev *dd = platform_get_drvdata(pdev);
  905. int i;
  906.  
  907. if (!dd)
  908. return -ENODEV;
  909.  
  910. spin_lock(&list_lock);
  911. list_del(&dd->list);
  912. spin_unlock(&list_lock);
  913.  
  914. for (i = 0; i < ARRAY_SIZE(algs); i++)
  915. crypto_unregister_alg(&algs[i]);
  916.  
  917. tasklet_kill(&dd->task);
  918. omap_aes_dma_cleanup(dd);
  919. iounmap(dd->io_base);
  920. clk_put(dd->iclk);
  921. kfree(dd);
  922. dd = NULL;
  923.  
  924. return 0;
  925. }
  926.  
  927. #ifdef CONFIG_ARCH_OMAP24XX
  928. static struct resource aes_resources[] = {
  929. {
  930. .start = OMAP24XX_SEC_AES_BASE,
  931. .end = OMAP24XX_SEC_AES_BASE + 0x4C,
  932. .flags = IORESOURCE_MEM,
  933. },
  934. {
  935. .start = OMAP24XX_DMA_AES_TX,
  936. .flags = IORESOURCE_DMA,
  937. },
  938. {
  939. .start = OMAP24XX_DMA_AES_RX,
  940. .flags = IORESOURCE_DMA,
  941. }
  942. };
  943. #endif
  944. #ifdef CONFIG_ARCH_OMAP34XX
  945. static struct resource aes_resources[] = {
  946. {
  947. .start = OMAP34XX_SEC_AES_BASE,
  948. .end = OMAP34XX_SEC_AES_BASE + 0x4C,
  949. .flags = IORESOURCE_MEM,
  950. },
  951. {
  952. .start = OMAP34XX_DMA_AES2_TX,
  953. .flags = IORESOURCE_DMA,
  954. },
  955. {
  956. .start = OMAP34XX_DMA_AES2_RX,
  957. .flags = IORESOURCE_DMA,
  958. }
  959. };
  960. #endif
  961.  
  962. static void omap_aes_release(struct device *dev)
  963. {
  964. }
  965.  
  966. static struct platform_device aes_device = {
  967. .name = "omap-aes",
  968. .id = -1,
  969. .num_resources = ARRAY_SIZE(aes_resources),
  970. .resource = aes_resources,
  971. .dev.release = omap_aes_release,
  972. };
  973.  
  974. static struct platform_driver omap_aes_driver = {
  975. .probe = omap_aes_probe,
  976. .remove = omap_aes_remove,
  977. .driver = {
  978. .name = "omap-aes",
  979. .owner = THIS_MODULE,
  980. },
  981. };
  982.  
  983. static int __init omap_aes_mod_init(void)
  984. {
  985. int ret;
  986.  
  987. pr_info("loading %s driver\n", "omap-aes");
  988.  
  989. if (!cpu_class_is_omap2() ||
  990. omap_type() != OMAP2_DEVICE_TYPE_SEC) {
  991. pr_err("Unsupported cpu\n");
  992. return -ENODEV;
  993. }
  994.  
  995. ret = platform_driver_register(&omap_aes_driver);
  996. if (ret)
  997. return ret;
  998.  
  999. ret = platform_device_register(&aes_device);
  1000. if (ret)
  1001. goto err;
  1002.  
  1003. return 0;
  1004.  
  1005. err:
  1006. platform_driver_unregister(&omap_aes_driver);
  1007.  
  1008. return ret;
  1009. }
  1010.  
  1011. static void __exit omap_aes_mod_exit(void)
  1012. {
  1013. platform_device_unregister(&aes_device);
  1014. platform_driver_unregister(&omap_aes_driver);
  1015. }
  1016.  
  1017. module_init(omap_aes_mod_init);
  1018. module_exit(omap_aes_mod_exit);
  1019.  
  1020. MODULE_DESCRIPTION("OMAP AES hw acceleration support.");
  1021. MODULE_LICENSE("GPL v2");
  1022. MODULE_AUTHOR("Dmitry Kasatkin");
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement