Advertisement
Guest User

Untitled

a guest
Aug 22nd, 2010
169
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 22.66 KB | None | 0 0
  1. /*
  2. * Cryptographic API.
  3. *
  4. * Support for OMAP AES HW acceleration.
  5. *
  6. * Copyright (c) 2010 Nokia Corporation
  7. * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as published
  11. * by the Free Software Foundation.
  12. *
  13. */
  14.  
  15. #define pr_fmt(fmt) "%s: " fmt, __func__
  16.  
  17. #include <linux/err.h>
  18. #include <linux/module.h>
  19. #include <linux/init.h>
  20. #include <linux/errno.h>
  21. #include <linux/kernel.h>
  22. #include <linux/clk.h>
  23. #include <linux/platform_device.h>
  24. #include <linux/scatterlist.h>
  25. #include <linux/dma-mapping.h>
  26. #include <linux/io.h>
  27. #include <linux/crypto.h>
  28. #include <linux/interrupt.h>
  29. #include <crypto/scatterwalk.h>
  30. #include <crypto/aes.h>
  31.  
  32. #include <plat/cpu.h>
  33. #include <plat/dma.h>
  34.  
  35. /* OMAP TRM gives bitfields as start:end, where start is the higher bit
  36. number. For example 7:0 */
  37. #define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
  38. #define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
  39.  
  40. #define AES_REG_KEY(x) (0x1C - ((x ^ 0x01) * 0x04))
  41. #define AES_REG_IV(x) (0x20 + ((x) * 0x04))
  42.  
  43. #define AES_REG_CTRL 0x30
  44. #define AES_REG_CTRL_CTR_WIDTH (1 << 7)
  45. #define AES_REG_CTRL_CTR (1 << 6)
  46. #define AES_REG_CTRL_CBC (1 << 5)
  47. #define AES_REG_CTRL_KEY_SIZE (3 << 3)
  48. #define AES_REG_CTRL_DIRECTION (1 << 2)
  49. #define AES_REG_CTRL_INPUT_READY (1 << 1)
  50. #define AES_REG_CTRL_OUTPUT_READY (1 << 0)
  51.  
  52. #define AES_REG_DATA 0x34
  53. #define AES_REG_DATA_N(x) (0x34 + ((x) * 0x04))
  54.  
  55. #define AES_REG_REV 0x44
  56. #define AES_REG_REV_MAJOR 0xF0
  57. #define AES_REG_REV_MINOR 0x0F
  58.  
  59. #define AES_REG_MASK 0x48
  60. #define AES_REG_MASK_SIDLE (1 << 6)
  61. #define AES_REG_MASK_START (1 << 5)
  62. #define AES_REG_MASK_DMA_OUT_EN (1 << 3)
  63. #define AES_REG_MASK_DMA_IN_EN (1 << 2)
  64. #define AES_REG_MASK_SOFTRESET (1 << 1)
  65. #define AES_REG_AUTOIDLE (1 << 0)
  66.  
  67. #define AES_REG_SYSSTATUS 0x4C
  68. #define AES_REG_SYSSTATUS_RESETDONE (1 << 0)
  69.  
  70. #define DEFAULT_TIMEOUT (5*HZ)
  71.  
  72. #define FLAGS_MODE_MASK 0x000f
  73. #define FLAGS_ENCRYPT BIT(0)
  74. #define FLAGS_CBC BIT(1)
  75. #define FLAGS_GIV BIT(2)
  76.  
  77. #define FLAGS_NEW_KEY BIT(4)
  78. #define FLAGS_NEW_IV BIT(5)
  79. #define FLAGS_INIT BIT(6)
  80. #define FLAGS_FAST BIT(7)
  81. #define FLAGS_BUSY 8
  82.  
  83. struct omap_aes_ctx {
  84. struct omap_aes_dev *dd;
  85.  
  86. int keylen;
  87. u32 key[AES_KEYSIZE_256 / sizeof(u32)];
  88. unsigned long flags;
  89. };
  90.  
  91. struct omap_aes_reqctx {
  92. unsigned long mode;
  93. };
  94.  
  95. #define OMAP_AES_QUEUE_LENGTH 1
  96. #define OMAP_AES_CACHE_SIZE 0
  97.  
  98. struct omap_aes_dev {
  99. struct list_head list;
  100. unsigned long phys_base;
  101. void __iomem *io_base;
  102. struct clk *iclk;
  103. struct omap_aes_ctx *ctx;
  104. struct device *dev;
  105. unsigned long flags;
  106.  
  107. u32 *iv;
  108. u32 ctrl;
  109.  
  110. spinlock_t lock;
  111. struct crypto_queue queue;
  112.  
  113. struct tasklet_struct task;
  114.  
  115. struct ablkcipher_request *req;
  116. size_t total;
  117. struct scatterlist *in_sg;
  118. size_t in_offset;
  119. struct scatterlist *out_sg;
  120. size_t out_offset;
  121.  
  122. size_t buflen;
  123. void *buf_in;
  124. size_t dma_size;
  125. int dma_in;
  126. int dma_lch_in;
  127. dma_addr_t dma_addr_in;
  128. void *buf_out;
  129. int dma_out;
  130. int dma_lch_out;
  131. dma_addr_t dma_addr_out;
  132. };
  133.  
  134. /* keep registered devices data here */
  135. static LIST_HEAD(dev_list);
  136. static DEFINE_SPINLOCK(list_lock);
  137.  
  138. static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset)
  139. {
  140. return __raw_readl(dd->io_base + offset);
  141. }
  142.  
  143. static inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset,
  144. u32 value)
  145. {
  146. __raw_writel(value, dd->io_base + offset);
  147. }
  148.  
  149. static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset,
  150. u32 value, u32 mask)
  151. {
  152. u32 val;
  153.  
  154. val = omap_aes_read(dd, offset);
  155. val &= ~mask;
  156. val |= value;
  157. omap_aes_write(dd, offset, val);
  158. }
  159.  
  160. static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset,
  161. u32 *value, int count)
  162. {
  163. for (; count--; value++, offset += 4)
  164. omap_aes_write(dd, offset, *value);
  165. }
  166.  
  167. static int omap_aes_wait(struct omap_aes_dev *dd, u32 offset, u32 bit)
  168. {
  169. unsigned long timeout = jiffies + DEFAULT_TIMEOUT;
  170.  
  171. while (!(omap_aes_read(dd, offset) & bit)) {
  172. if (time_is_before_jiffies(timeout)) {
  173. dev_err(dd->dev, "omap-aes timeout\n");
  174. return -ETIMEDOUT;
  175. }
  176. }
  177. return 0;
  178. }
  179.  
  180. static int omap_aes_hw_init(struct omap_aes_dev *dd)
  181. {
  182. int err = 0;
  183.  
  184. clk_enable(dd->iclk);
  185. if (!(dd->flags & FLAGS_INIT)) {
  186. /* is it necessary to reset before every operation? */
  187. omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_SOFTRESET,
  188. AES_REG_MASK_SOFTRESET);
  189. /*
  190. * prevent OCP bus error (SRESP) in case an access to the module
  191. * is performed while the module is coming out of soft reset
  192. */
  193. __asm__ __volatile__("nop");
  194. __asm__ __volatile__("nop");
  195.  
  196. err = omap_aes_wait(dd, AES_REG_SYSSTATUS,
  197. AES_REG_SYSSTATUS_RESETDONE);
  198. if (!err)
  199. dd->flags |= FLAGS_INIT;
  200. }
  201.  
  202. return err;
  203. }
  204.  
  205. static void omap_aes_hw_cleanup(struct omap_aes_dev *dd)
  206. {
  207. clk_disable(dd->iclk);
  208. }
  209.  
  210. static void omap_aes_write_ctrl(struct omap_aes_dev *dd)
  211. {
  212. unsigned int key32;
  213. int i;
  214. u32 val, mask;
  215.  
  216. val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
  217. if (dd->flags & FLAGS_CBC)
  218. val |= AES_REG_CTRL_CBC;
  219. if (dd->flags & FLAGS_ENCRYPT)
  220. val |= AES_REG_CTRL_DIRECTION;
  221.  
  222. if (dd->ctrl == val && !(dd->flags & FLAGS_NEW_IV) &&
  223. !(dd->ctx->flags & FLAGS_NEW_KEY))
  224. goto out;
  225.  
  226. /* only need to write control registers for new settings */
  227.  
  228. dd->ctrl = val;
  229.  
  230. val = 0;
  231. if (dd->dma_lch_out >= 0)
  232. val |= AES_REG_MASK_DMA_OUT_EN;
  233. if (dd->dma_lch_in >= 0)
  234. val |= AES_REG_MASK_DMA_IN_EN;
  235.  
  236. mask = AES_REG_MASK_DMA_IN_EN | AES_REG_MASK_DMA_OUT_EN;
  237.  
  238. omap_aes_write_mask(dd, AES_REG_MASK, val, mask);
  239.  
  240. pr_debug("Set key\n");
  241. key32 = dd->ctx->keylen / sizeof(u32);
  242. /* set a key */
  243. for (i = 0; i < key32; i++) {
  244. omap_aes_write(dd, AES_REG_KEY(i),
  245. __le32_to_cpu(dd->ctx->key[i]));
  246. }
  247. dd->ctx->flags &= ~FLAGS_NEW_KEY;
  248.  
  249. if (dd->flags & FLAGS_NEW_IV) {
  250. pr_debug("Set IV\n");
  251. omap_aes_write_n(dd, AES_REG_IV(0), dd->iv, 4);
  252. dd->flags &= ~FLAGS_NEW_IV;
  253. }
  254.  
  255. mask = AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION |
  256. AES_REG_CTRL_KEY_SIZE;
  257.  
  258. omap_aes_write_mask(dd, AES_REG_CTRL, dd->ctrl, mask);
  259.  
  260. out:
  261. /* start DMA or disable idle mode */
  262. omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START,
  263. AES_REG_MASK_START);
  264. }
  265.  
  266. static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx)
  267. {
  268. struct omap_aes_dev *dd = NULL, *tmp;
  269.  
  270. spin_lock_bh(&list_lock);
  271. if (!ctx->dd) {
  272. list_for_each_entry(tmp, &dev_list, list) {
  273. /* FIXME: take fist available aes core */
  274. dd = tmp;
  275. break;
  276. }
  277. ctx->dd = dd;
  278. } else {
  279. /* already found before */
  280. dd = ctx->dd;
  281. }
  282. spin_unlock_bh(&list_lock);
  283.  
  284. return dd;
  285. }
  286.  
  287. static void omap_aes_dma_callback(int lch, u16 ch_status, void *data)
  288. {
  289. struct omap_aes_dev *dd = data;
  290.  
  291. if (lch == dd->dma_lch_out)
  292. tasklet_schedule(&dd->task);
  293. }
  294.  
  295. static int omap_aes_dma_init(struct omap_aes_dev *dd)
  296. {
  297. int err = -ENOMEM;
  298.  
  299. dd->dma_lch_out = -1;
  300. dd->dma_lch_in = -1;
  301.  
  302. dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE);
  303. dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE);
  304. dd->buflen = PAGE_SIZE << OMAP_AES_CACHE_SIZE;
  305. dd->buflen &= ~(AES_BLOCK_SIZE - 1);
  306.  
  307. if (!dd->buf_in || !dd->buf_out) {
  308. dev_err(dd->dev, "unable to alloc pages.\n");
  309. goto err_alloc;
  310. }
  311.  
  312. /* MAP here */
  313. dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in, dd->buflen,
  314. DMA_TO_DEVICE);
  315. if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
  316. dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
  317. err = -EINVAL;
  318. goto err_map_in;
  319. }
  320.  
  321. dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out, dd->buflen,
  322. DMA_FROM_DEVICE);
  323. if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
  324. dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
  325. err = -EINVAL;
  326. goto err_map_out;
  327. }
  328.  
  329. err = omap_request_dma(dd->dma_in, "omap-aes-rx",
  330. omap_aes_dma_callback, dd, &dd->dma_lch_in);
  331. if (err) {
  332. dev_err(dd->dev, "Unable to request DMA channel\n");
  333. goto err_dma_in;
  334. }
  335. err = omap_request_dma(dd->dma_out, "omap-aes-tx",
  336. omap_aes_dma_callback, dd, &dd->dma_lch_out);
  337. if (err) {
  338. dev_err(dd->dev, "Unable to request DMA channel\n");
  339. goto err_dma_out;
  340. }
  341.  
  342. omap_set_dma_dest_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_CONSTANT,
  343. dd->phys_base + AES_REG_DATA, 0, 4);
  344.  
  345. omap_set_dma_dest_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
  346. omap_set_dma_src_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
  347.  
  348. omap_set_dma_src_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_CONSTANT,
  349. dd->phys_base + AES_REG_DATA, 0, 4);
  350.  
  351. omap_set_dma_src_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
  352. omap_set_dma_dest_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
  353.  
  354. return 0;
  355.  
  356. err_dma_out:
  357. omap_free_dma(dd->dma_lch_in);
  358. err_dma_in:
  359. dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
  360. DMA_FROM_DEVICE);
  361. err_map_out:
  362. dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE);
  363. err_map_in:
  364. free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE);
  365. free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE);
  366. err_alloc:
  367. if (err)
  368. pr_err("error: %d\n", err);
  369. return err;
  370. }
  371.  
  372. static void omap_aes_dma_cleanup(struct omap_aes_dev *dd)
  373. {
  374. omap_free_dma(dd->dma_lch_out);
  375. omap_free_dma(dd->dma_lch_in);
  376. dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
  377. DMA_FROM_DEVICE);
  378. dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE);
  379. free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE);
  380. free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE);
  381. }
  382.  
  383. static void sg_copy_buf(void *buf, struct scatterlist *sg,
  384. unsigned int start, unsigned int nbytes, int out)
  385. {
  386. struct scatter_walk walk;
  387.  
  388. if (!nbytes)
  389. return;
  390.  
  391. scatterwalk_start(&walk, sg);
  392. scatterwalk_advance(&walk, start);
  393. scatterwalk_copychunks(buf, &walk, nbytes, out);
  394. scatterwalk_done(&walk, out, 0);
  395. }
  396.  
  397. static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf,
  398. size_t buflen, size_t total, int out)
  399. {
  400. unsigned int count, off = 0;
  401.  
  402. while (buflen && total) {
  403. count = min((*sg)->length - *offset, total);
  404. count = min(count, buflen);
  405.  
  406. if (!count)
  407. return off;
  408.  
  409. sg_copy_buf(buf + off, *sg, *offset, count, out);
  410.  
  411. off += count;
  412. buflen -= count;
  413. *offset += count;
  414. total -= count;
  415.  
  416. if (*offset == (*sg)->length) {
  417. *sg = sg_next(*sg);
  418. if (*sg)
  419. *offset = 0;
  420. else
  421. total = 0;
  422. }
  423. }
  424.  
  425. return off;
  426. }
  427.  
  428. static int omap_aes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
  429. dma_addr_t dma_addr_out, int length)
  430. {
  431. struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
  432. struct omap_aes_dev *dd = ctx->dd;
  433. int len32;
  434.  
  435. pr_debug("len: %d\n", length);
  436.  
  437. dd->dma_size = length;
  438.  
  439. if (!(dd->flags & FLAGS_FAST))
  440. dma_sync_single_for_device(dd->dev, dma_addr_in, length,
  441. DMA_TO_DEVICE);
  442.  
  443. len32 = DIV_ROUND_UP(length, sizeof(u32));
  444.  
  445. /* IN */
  446. omap_set_dma_transfer_params(dd->dma_lch_in, OMAP_DMA_DATA_TYPE_S32,
  447. len32, 1, OMAP_DMA_SYNC_PACKET, dd->dma_in,
  448. OMAP_DMA_DST_SYNC);
  449.  
  450. omap_set_dma_src_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_POST_INC,
  451. dma_addr_in, 0, 0);
  452.  
  453. /* OUT */
  454. omap_set_dma_transfer_params(dd->dma_lch_out, OMAP_DMA_DATA_TYPE_S32,
  455. len32, 1, OMAP_DMA_SYNC_PACKET,
  456. dd->dma_out, OMAP_DMA_SRC_SYNC);
  457.  
  458. omap_set_dma_dest_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_POST_INC,
  459. dma_addr_out, 0, 0);
  460.  
  461. omap_start_dma(dd->dma_lch_in);
  462. omap_start_dma(dd->dma_lch_out);
  463.  
  464. omap_aes_write_ctrl(dd);
  465.  
  466. return 0;
  467. }
  468.  
  469. static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
  470. {
  471. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
  472. crypto_ablkcipher_reqtfm(dd->req));
  473. int err, fast = 0, in, out;
  474. size_t count;
  475. dma_addr_t addr_in, addr_out;
  476.  
  477. pr_debug("total: %d\n", dd->total);
  478.  
  479. if (sg_is_last(dd->in_sg) && sg_is_last(dd->out_sg)) {
  480. /* check for alignment */
  481. in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32));
  482. out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32));
  483.  
  484. fast = in && out;
  485. }
  486.  
  487. if (fast) {
  488. count = min(dd->total, sg_dma_len(dd->in_sg));
  489. count = min(count, sg_dma_len(dd->out_sg));
  490.  
  491. if (count != dd->total)
  492. return -EINVAL;
  493.  
  494. pr_debug("fast\n");
  495.  
  496. err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
  497. if (!err) {
  498. dev_err(dd->dev, "dma_map_sg() error\n");
  499. return -EINVAL;
  500. }
  501.  
  502. err = dma_map_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
  503. if (!err) {
  504. dev_err(dd->dev, "dma_map_sg() error\n");
  505. dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
  506. return -EINVAL;
  507. }
  508.  
  509. addr_in = sg_dma_address(dd->in_sg);
  510. addr_out = sg_dma_address(dd->out_sg);
  511.  
  512. dd->flags |= FLAGS_FAST;
  513.  
  514. } else {
  515. /* use cache buffers */
  516. count = sg_copy(&dd->in_sg, &dd->in_offset, dd->buf_in,
  517. dd->buflen, dd->total, 0);
  518.  
  519. addr_in = dd->dma_addr_in;
  520. addr_out = dd->dma_addr_out;
  521.  
  522. dd->flags &= ~FLAGS_FAST;
  523.  
  524. }
  525.  
  526. dd->total -= count;
  527.  
  528. err = omap_aes_hw_init(dd);
  529.  
  530. err = omap_aes_crypt_dma(tfm, addr_in, addr_out, count);
  531.  
  532. return err;
  533. }
  534.  
  535. static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
  536. {
  537. struct omap_aes_ctx *ctx;
  538.  
  539. pr_debug("err: %d\n", err);
  540.  
  541. ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(dd->req));
  542.  
  543. if (!dd->total)
  544. dd->req->base.complete(&dd->req->base, err);
  545. }
  546.  
  547. static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
  548. {
  549. int err = 0;
  550. size_t count;
  551.  
  552. pr_debug("total: %d\n", dd->total);
  553.  
  554. omap_aes_write_mask(dd, AES_REG_MASK, 0, AES_REG_MASK_START);
  555.  
  556. omap_aes_hw_cleanup(dd);
  557.  
  558. omap_stop_dma(dd->dma_lch_in);
  559. omap_stop_dma(dd->dma_lch_out);
  560.  
  561. if (dd->flags & FLAGS_FAST) {
  562. dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
  563. dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
  564. } else {
  565. dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
  566. dd->dma_size, DMA_FROM_DEVICE);
  567.  
  568. /* copy data */
  569. count = sg_copy(&dd->out_sg, &dd->out_offset, dd->buf_out,
  570. dd->buflen, dd->dma_size, 1);
  571. if (count != dd->dma_size) {
  572. err = -EINVAL;
  573. pr_err("not all data converted: %u\n", count);
  574. }
  575. }
  576.  
  577. if (err || !dd->total)
  578. omap_aes_finish_req(dd, err);
  579.  
  580. return err;
  581. }
  582.  
  583. static int omap_aes_handle_req(struct omap_aes_dev *dd)
  584. {
  585. struct crypto_async_request *async_req, *backlog;
  586. struct omap_aes_ctx *ctx;
  587. struct omap_aes_reqctx *rctx;
  588. struct ablkcipher_request *req;
  589. unsigned long flags;
  590.  
  591. if (dd->total)
  592. goto start;
  593.  
  594. spin_lock_irqsave(&dd->lock, flags);
  595. backlog = crypto_get_backlog(&dd->queue);
  596. async_req = crypto_dequeue_request(&dd->queue);
  597. if (!async_req)
  598. clear_bit(FLAGS_BUSY, &dd->flags);
  599. spin_unlock_irqrestore(&dd->lock, flags);
  600.  
  601. if (!async_req)
  602. return 0;
  603.  
  604. if (backlog)
  605. backlog->complete(backlog, -EINPROGRESS);
  606.  
  607. req = ablkcipher_request_cast(async_req);
  608.  
  609. pr_debug("get new req\n");
  610.  
  611. /* assign new request to device */
  612. dd->req = req;
  613. dd->total = req->nbytes;
  614. dd->in_offset = 0;
  615. dd->in_sg = req->src;
  616. dd->out_offset = 0;
  617. dd->out_sg = req->dst;
  618.  
  619. rctx = ablkcipher_request_ctx(req);
  620. ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
  621. rctx->mode &= FLAGS_MODE_MASK;
  622. dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
  623.  
  624. dd->iv = req->info;
  625. if ((dd->flags & FLAGS_CBC) && dd->iv)
  626. dd->flags |= FLAGS_NEW_IV;
  627. else
  628. dd->flags &= ~FLAGS_NEW_IV;
  629.  
  630. ctx->dd = dd;
  631. if (dd->ctx != ctx) {
  632. /* assign new context to device */
  633. dd->ctx = ctx;
  634. ctx->flags |= FLAGS_NEW_KEY;
  635. }
  636.  
  637. if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE))
  638. pr_err("request size is not exact amount of AES blocks\n");
  639.  
  640. start:
  641. return omap_aes_crypt_dma_start(dd);
  642. }
  643.  
  644. static void omap_aes_task(unsigned long data)
  645. {
  646. struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
  647. int err;
  648.  
  649. pr_debug("enter\n");
  650.  
  651. err = omap_aes_crypt_dma_stop(dd);
  652.  
  653. err = omap_aes_handle_req(dd);
  654.  
  655. pr_debug("exit\n");
  656. }
  657.  
  658. static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
  659. {
  660. struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
  661. crypto_ablkcipher_reqtfm(req));
  662. struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
  663. struct omap_aes_dev *dd;
  664. unsigned long flags;
  665. int err;
  666.  
  667. pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
  668. !!(mode & FLAGS_ENCRYPT),
  669. !!(mode & FLAGS_CBC));
  670.  
  671. dd = omap_aes_find_dev(ctx);
  672. if (!dd)
  673. return -ENODEV;
  674.  
  675. rctx->mode = mode;
  676.  
  677. spin_lock_irqsave(&dd->lock, flags);
  678. err = ablkcipher_enqueue_request(&dd->queue, req);
  679. spin_unlock_irqrestore(&dd->lock, flags);
  680.  
  681. if (!test_and_set_bit(FLAGS_BUSY, &dd->flags))
  682. omap_aes_handle_req(dd);
  683.  
  684. pr_debug("exit\n");
  685.  
  686. return err;
  687. }
  688.  
  689. /* ********************** ALG API ************************************ */
  690.  
  691. static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  692. unsigned int keylen)
  693. {
  694. struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  695.  
  696. if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
  697. keylen != AES_KEYSIZE_256)
  698. return -EINVAL;
  699.  
  700. pr_debug("enter, keylen: %d\n", keylen);
  701.  
  702. memcpy(ctx->key, key, keylen);
  703. ctx->keylen = keylen;
  704. ctx->flags |= FLAGS_NEW_KEY;
  705.  
  706. return 0;
  707. }
  708.  
  709. static int omap_aes_ecb_encrypt(struct ablkcipher_request *req)
  710. {
  711. return omap_aes_crypt(req, FLAGS_ENCRYPT);
  712. }
  713.  
  714. static int omap_aes_ecb_decrypt(struct ablkcipher_request *req)
  715. {
  716. return omap_aes_crypt(req, 0);
  717. }
  718.  
  719. static int omap_aes_cbc_encrypt(struct ablkcipher_request *req)
  720. {
  721. return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
  722. }
  723.  
  724. static int omap_aes_cbc_decrypt(struct ablkcipher_request *req)
  725. {
  726. return omap_aes_crypt(req, FLAGS_CBC);
  727. }
  728.  
  729. static int omap_aes_cra_init(struct crypto_tfm *tfm)
  730. {
  731. pr_debug("enter\n");
  732.  
  733. tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx);
  734.  
  735. return 0;
  736. }
  737.  
  738. static void omap_aes_cra_exit(struct crypto_tfm *tfm)
  739. {
  740. pr_debug("enter\n");
  741. }
  742.  
  743. /* ********************** ALGS ************************************ */
  744.  
  745. static struct crypto_alg algs[] = {
  746. {
  747. .cra_name = "ecb(aes)",
  748. .cra_driver_name = "ecb-aes-omap",
  749. .cra_priority = 100,
  750. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  751. .cra_blocksize = AES_BLOCK_SIZE,
  752. .cra_ctxsize = sizeof(struct omap_aes_ctx),
  753. .cra_alignmask = 0,
  754. .cra_type = &crypto_ablkcipher_type,
  755. .cra_module = THIS_MODULE,
  756. .cra_init = omap_aes_cra_init,
  757. .cra_exit = omap_aes_cra_exit,
  758. .cra_u.ablkcipher = {
  759. .min_keysize = AES_MIN_KEY_SIZE,
  760. .max_keysize = AES_MAX_KEY_SIZE,
  761. .setkey = omap_aes_setkey,
  762. .encrypt = omap_aes_ecb_encrypt,
  763. .decrypt = omap_aes_ecb_decrypt,
  764. }
  765. },
  766. {
  767. .cra_name = "cbc(aes)",
  768. .cra_driver_name = "cbc-aes-omap",
  769. .cra_priority = 100,
  770. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  771. .cra_blocksize = AES_BLOCK_SIZE,
  772. .cra_ctxsize = sizeof(struct omap_aes_ctx),
  773. .cra_alignmask = 0,
  774. .cra_type = &crypto_ablkcipher_type,
  775. .cra_module = THIS_MODULE,
  776. .cra_init = omap_aes_cra_init,
  777. .cra_exit = omap_aes_cra_exit,
  778. .cra_u.ablkcipher = {
  779. .min_keysize = AES_MIN_KEY_SIZE,
  780. .max_keysize = AES_MAX_KEY_SIZE,
  781. .ivsize = AES_BLOCK_SIZE,
  782. .setkey = omap_aes_setkey,
  783. .encrypt = omap_aes_cbc_encrypt,
  784. .decrypt = omap_aes_cbc_decrypt,
  785. }
  786. }
  787. };
  788.  
  789. static int omap_aes_probe(struct platform_device *pdev)
  790. {
  791. struct device *dev = &pdev->dev;
  792. struct omap_aes_dev *dd;
  793. struct resource *res;
  794. int err = -ENOMEM, i, j;
  795. u32 reg;
  796.  
  797. dd = kzalloc(sizeof(struct omap_aes_dev), GFP_KERNEL);
  798. if (dd == NULL) {
  799. dev_err(dev, "unable to alloc data struct.\n");
  800. goto err_data;
  801. }
  802. dd->dev = dev;
  803. platform_set_drvdata(pdev, dd);
  804.  
  805. spin_lock_init(&dd->lock);
  806. crypto_init_queue(&dd->queue, OMAP_AES_QUEUE_LENGTH);
  807.  
  808. /* Get the base address */
  809. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  810. if (!res) {
  811. dev_err(dev, "invalid resource type\n");
  812. err = -ENODEV;
  813. goto err_res;
  814. }
  815. dd->phys_base = res->start;
  816.  
  817. /* Get the DMA */
  818. res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
  819. if (!res)
  820. dev_info(dev, "no DMA info\n");
  821. else
  822. dd->dma_out = res->start;
  823.  
  824. /* Get the DMA */
  825. res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
  826. if (!res)
  827. dev_info(dev, "no DMA info\n");
  828. else
  829. dd->dma_in = res->start;
  830.  
  831. /* Initializing the clock */
  832. dd->iclk = clk_get(dev, "ick");
  833. if (!dd->iclk) {
  834. dev_err(dev, "clock intialization failed.\n");
  835. err = -ENODEV;
  836. goto err_res;
  837. }
  838.  
  839. dd->io_base = ioremap(dd->phys_base, SZ_4K);
  840. if (!dd->io_base) {
  841. dev_err(dev, "can't ioremap\n");
  842. err = -ENOMEM;
  843. goto err_io;
  844. }
  845.  
  846. clk_enable(dd->iclk);
  847. reg = omap_aes_read(dd, AES_REG_REV);
  848. dev_info(dev, "OMAP AES hw accel rev: %u.%u\n",
  849. (reg & AES_REG_REV_MAJOR) >> 4, reg & AES_REG_REV_MINOR);
  850. clk_disable(dd->iclk);
  851.  
  852. tasklet_init(&dd->task, omap_aes_task, (unsigned long)dd);
  853.  
  854. err = omap_aes_dma_init(dd);
  855. if (err)
  856. goto err_dma;
  857.  
  858. INIT_LIST_HEAD(&dd->list);
  859. spin_lock(&list_lock);
  860. list_add_tail(&dd->list, &dev_list);
  861. spin_unlock(&list_lock);
  862.  
  863. for (i = 0; i < ARRAY_SIZE(algs); i++) {
  864. pr_debug("i: %d\n", i);
  865. INIT_LIST_HEAD(&algs[i].cra_list);
  866. err = crypto_register_alg(&algs[i]);
  867. if (err)
  868. goto err_algs;
  869. }
  870.  
  871. pr_info("probe() done\n");
  872.  
  873. return 0;
  874. err_algs:
  875. for (j = 0; j < i; j++)
  876. crypto_unregister_alg(&algs[j]);
  877. omap_aes_dma_cleanup(dd);
  878. err_dma:
  879. tasklet_kill(&dd->task);
  880. iounmap(dd->io_base);
  881. err_io:
  882. clk_put(dd->iclk);
  883. err_res:
  884. kfree(dd);
  885. dd = NULL;
  886. err_data:
  887. dev_err(dev, "initialization failed.\n");
  888. return err;
  889. }
  890.  
  891. static int omap_aes_remove(struct platform_device *pdev)
  892. {
  893. struct omap_aes_dev *dd = platform_get_drvdata(pdev);
  894. int i;
  895.  
  896. if (!dd)
  897. return -ENODEV;
  898.  
  899. spin_lock(&list_lock);
  900. list_del(&dd->list);
  901. spin_unlock(&list_lock);
  902.  
  903. for (i = 0; i < ARRAY_SIZE(algs); i++)
  904. crypto_unregister_alg(&algs[i]);
  905.  
  906. tasklet_kill(&dd->task);
  907. omap_aes_dma_cleanup(dd);
  908. iounmap(dd->io_base);
  909. clk_put(dd->iclk);
  910. kfree(dd);
  911. dd = NULL;
  912.  
  913. return 0;
  914. }
  915.  
  916. static struct platform_driver omap_aes_driver = {
  917. .probe = omap_aes_probe,
  918. .remove = omap_aes_remove,
  919. .driver = {
  920. .name = "omap-aes",
  921. .owner = THIS_MODULE,
  922. },
  923. };
  924.  
  925. static int __init omap_aes_mod_init(void)
  926. {
  927. pr_info("loading %s driver\n", "omap-aes");
  928.  
  929. if (!cpu_class_is_omap2() || omap_type() != OMAP2_DEVICE_TYPE_SEC) {
  930. pr_err("Unsupported cpu\n");
  931. return -ENODEV;
  932. }
  933.  
  934. return platform_driver_register(&omap_aes_driver);
  935. }
  936.  
  937. static void __exit omap_aes_mod_exit(void)
  938. {
  939. platform_driver_unregister(&omap_aes_driver);
  940. }
  941.  
  942. module_init(omap_aes_mod_init);
  943. module_exit(omap_aes_mod_exit);
  944.  
  945. MODULE_DESCRIPTION("OMAP AES hw acceleration support.");
  946. MODULE_LICENSE("GPL v2");
  947. MODULE_AUTHOR("Dmitry Kasatkin");
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement