Guest User

Untitled

a guest
Nov 9th, 2010
311
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C 23.81 KB | None | 0 0
  1. /*
  2.  * Cryptographic API.
  3.  *
  4.  * Support for OMAP AES HW acceleration.
  5.  *
  6.  * Copyright (c) 2010 Nokia Corporation
  7.  * Author: Dmitry Kasatkin <[email protected]>
  8.  *
  9.  * This program is free software; you can redistribute it and/or modify
  10.  * it under the terms of the GNU General Public License version 2 as published
  11.  * by the Free Software Foundation.
  12.  *
  13.  */
  14.  
  15. #define pr_fmt(fmt) "%s: " fmt, __func__
  16.  
  17. #include <linux/err.h>
  18. #include <linux/module.h>
  19. #include <linux/init.h>
  20. #include <linux/errno.h>
  21. #include <linux/kernel.h>
  22. #include <linux/clk.h>
  23. #include <linux/platform_device.h>
  24. #include <linux/scatterlist.h>
  25. #include <linux/dma-mapping.h>
  26. #include <linux/io.h>
  27. #include <linux/crypto.h>
  28. #include <linux/interrupt.h>
  29. #include <crypto/scatterwalk.h>
  30. #include <crypto/aes.h>
  31.  
  32. #include <mach/cpu.h>
  33. #include <mach/dma.h>
  34.  
  35. #define OMAP34XX_SEC_BASE   (L4_34XX_BASE + 0xA0000)
  36. #define OMAP34XX_SEC_AES_BASE   (OMAP34XX_SEC_BASE + 0x25000)
  37.  
  38. /* OMAP TRM gives bitfields as start:end, where start is the higher bit
  39.    number. For example 7:0 */
  40. #define FLD_MASK(start, end)    (((1 << ((start) - (end) + 1)) - 1) << (end))
  41. #define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
  42.  
  43. #define AES_REG_KEY(x)          (0x1C - ((x ^ 0x01) * 0x04))
  44. #define AES_REG_IV(x)           (0x20 + ((x) * 0x04))
  45.  
  46. #define AES_REG_CTRL            0x30
  47. #define AES_REG_CTRL_CTR_WIDTH      (1 << 7)
  48. #define AES_REG_CTRL_CTR        (1 << 6)
  49. #define AES_REG_CTRL_CBC        (1 << 5)
  50. #define AES_REG_CTRL_KEY_SIZE       (3 << 3)
  51. #define AES_REG_CTRL_DIRECTION      (1 << 2)
  52. #define AES_REG_CTRL_INPUT_READY    (1 << 1)
  53. #define AES_REG_CTRL_OUTPUT_READY   (1 << 0)
  54.  
  55. #define AES_REG_DATA            0x34
  56. #define AES_REG_DATA_N(x)       (0x34 + ((x) * 0x04))
  57.  
  58. #define AES_REG_REV         0x44
  59. #define AES_REG_REV_MAJOR       0xF0
  60. #define AES_REG_REV_MINOR       0x0F
  61.  
  62. #define AES_REG_MASK            0x48
  63. #define AES_REG_MASK_SIDLE      (1 << 6)
  64. #define AES_REG_MASK_START      (1 << 5)
  65. #define AES_REG_MASK_DMA_OUT_EN     (1 << 3)
  66. #define AES_REG_MASK_DMA_IN_EN      (1 << 2)
  67. #define AES_REG_MASK_SOFTRESET      (1 << 1)
  68. #define AES_REG_AUTOIDLE        (1 << 0)
  69.  
  70. #define AES_REG_SYSSTATUS       0x4C
  71. #define AES_REG_SYSSTATUS_RESETDONE (1 << 0)
  72.  
  73. #define DEFAULT_TIMEOUT     (5*HZ)
  74.  
  75. #define FLAGS_MODE_MASK     0x000f
  76. #define FLAGS_ENCRYPT       BIT(0)
  77. #define FLAGS_CBC       BIT(1)
  78. #define FLAGS_GIV       BIT(2)
  79.  
  80. #define FLAGS_NEW_KEY       BIT(4)
  81. #define FLAGS_NEW_IV        BIT(5)
  82. #define FLAGS_INIT      BIT(6)
  83. #define FLAGS_FAST      BIT(7)
  84. #define FLAGS_BUSY      8
  85.  
  86. #ifdef CONFIG_ARCH_OMAP24XX
  87. #define AES_ICLK    "aes_ick"
  88. #endif
  89. #ifdef CONFIG_ARCH_OMAP34XX
  90. #define AES_ICLK    "aes2_ick"
  91. #endif
  92.  
  93. struct omap_aes_ctx {
  94.     struct omap_aes_dev *dd;
  95.  
  96.     int     keylen;
  97.     u32     key[AES_KEYSIZE_256 / sizeof(u32)];
  98.     unsigned long   flags;
  99. };
  100.  
  101. struct omap_aes_reqctx {
  102.     unsigned long mode;
  103. };
  104.  
  105. #define OMAP_AES_QUEUE_LENGTH   1
  106. #define OMAP_AES_CACHE_SIZE 0
  107.  
  108. struct omap_aes_dev {
  109.     struct list_head    list;
  110.     unsigned long       phys_base;
  111.     void __iomem        *io_base;
  112.     struct clk      *iclk;
  113.     struct omap_aes_ctx *ctx;
  114.     struct device       *dev;
  115.     unsigned long       flags;
  116.  
  117.     u32         *iv;
  118.     u32         ctrl;
  119.  
  120.     spinlock_t          lock;
  121.     struct crypto_queue     queue;
  122.  
  123.     struct tasklet_struct       task;
  124.  
  125.     struct ablkcipher_request   *req;
  126.     size_t              total;
  127.     struct scatterlist      *in_sg;
  128.     size_t              in_offset;
  129.     struct scatterlist      *out_sg;
  130.     size_t              out_offset;
  131.  
  132.     size_t          buflen;
  133.     void            *buf_in;
  134.     size_t          dma_size;
  135.     int         dma_in;
  136.     int         dma_lch_in;
  137.     dma_addr_t      dma_addr_in;
  138.     void            *buf_out;
  139.     int         dma_out;
  140.     int         dma_lch_out;
  141.     dma_addr_t      dma_addr_out;
  142. };
  143.  
  144. /* keep registered devices data here */
  145. static LIST_HEAD(dev_list);
  146. static DEFINE_SPINLOCK(list_lock);
  147.  
  148. static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset)
  149. {
  150.     return __raw_readl(dd->io_base + offset);
  151. }
  152.  
  153. static inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset,
  154.                   u32 value)
  155. {
  156.     __raw_writel(value, dd->io_base + offset);
  157. }
  158.  
  159. static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset,
  160.                     u32 value, u32 mask)
  161. {
  162.     u32 val;
  163.  
  164.     val = omap_aes_read(dd, offset);
  165.     val &= ~mask;
  166.     val |= value;
  167.     omap_aes_write(dd, offset, val);
  168. }
  169.  
  170. static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset,
  171.                     u32 *value, int count)
  172. {
  173.     for (; count--; value++, offset += 4)
  174.         omap_aes_write(dd, offset, *value);
  175. }
  176.  
  177. static int omap_aes_wait(struct omap_aes_dev *dd, u32 offset, u32 bit)
  178. {
  179.     unsigned long timeout = jiffies + DEFAULT_TIMEOUT;
  180.  
  181.     while (!(omap_aes_read(dd, offset) & bit)) {
  182.         if (time_is_before_jiffies(timeout)) {
  183.             dev_err(dd->dev, "omap-aes timeout\n");
  184.             return -ETIMEDOUT;
  185.         }
  186.     }
  187.     return 0;
  188. }
  189.  
  190. static int omap_aes_hw_init(struct omap_aes_dev *dd)
  191. {
  192.     int err = 0;
  193.  
  194.     clk_enable(dd->iclk);
  195.     if (!(dd->flags & FLAGS_INIT)) {
  196.         /* is it necessary to reset before every operation? */
  197.         omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_SOFTRESET,
  198.                     AES_REG_MASK_SOFTRESET);
  199.         /*
  200.          * prevent OCP bus error (SRESP) in case an access to the module
  201.          * is performed while the module is coming out of soft reset
  202.          */
  203.         __asm__ __volatile__("nop");
  204.         __asm__ __volatile__("nop");
  205.  
  206.         err = omap_aes_wait(dd, AES_REG_SYSSTATUS,
  207.                 AES_REG_SYSSTATUS_RESETDONE);
  208.         if (!err)
  209.             dd->flags |= FLAGS_INIT;
  210.     }
  211.  
  212.     return err;
  213. }
  214.  
  215. static void omap_aes_hw_cleanup(struct omap_aes_dev *dd)
  216. {
  217.     clk_disable(dd->iclk);
  218. }
  219.  
  220. static void omap_aes_write_ctrl(struct omap_aes_dev *dd)
  221. {
  222.     unsigned int key32;
  223.     int i;
  224.     u32 val, mask;
  225.  
  226.     val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
  227.     if (dd->flags & FLAGS_CBC)
  228.         val |= AES_REG_CTRL_CBC;
  229.     if (dd->flags & FLAGS_ENCRYPT)
  230.         val |= AES_REG_CTRL_DIRECTION;
  231.  
  232. #if 0
  233.     /* it will not affect DMA error but might solve crypto issue */
  234.     dd->ctx->flags |= FLAGS_NEW_KEY;
  235.     dd->ctx->flags |= FLAGS_NEW_IV;
  236. #endif
  237.  
  238.     if (dd->ctrl == val && !(dd->flags & FLAGS_NEW_IV) &&
  239.            !(dd->ctx->flags & FLAGS_NEW_KEY))
  240.         goto out;
  241.  
  242.     /* only need to write control registers for new settings */
  243.  
  244.     dd->ctrl = val;
  245.  
  246.     val = 0;
  247.     if (dd->dma_lch_out >= 0)
  248.         val |= AES_REG_MASK_DMA_OUT_EN;
  249.     if (dd->dma_lch_in >= 0)
  250.         val |= AES_REG_MASK_DMA_IN_EN;
  251.  
  252.     mask = AES_REG_MASK_DMA_IN_EN | AES_REG_MASK_DMA_OUT_EN;
  253.  
  254.     omap_aes_write_mask(dd, AES_REG_MASK, val, mask);
  255.  
  256.     key32 = dd->ctx->keylen / sizeof(u32);
  257.     /* set a key */
  258.     for (i = 0; i < key32; i++) {
  259.         omap_aes_write(dd, AES_REG_KEY(i),
  260.             __le32_to_cpu(dd->ctx->key[i]));
  261.     }
  262.     dd->ctx->flags &= ~FLAGS_NEW_KEY;
  263.  
  264.     if (dd->flags & FLAGS_NEW_IV) {
  265.         omap_aes_write_n(dd, AES_REG_IV(0), dd->iv, 4);
  266.         dd->flags &= ~FLAGS_NEW_IV;
  267.     }
  268.  
  269.     mask = AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION |
  270.             AES_REG_CTRL_KEY_SIZE;
  271.  
  272.     omap_aes_write_mask(dd, AES_REG_CTRL, dd->ctrl, mask);
  273.  
  274. out:
  275.     /* start DMA or disable idle mode */
  276.     omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START,
  277.                 AES_REG_MASK_START);
  278. }
  279.  
  280. static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx)
  281. {
  282.     struct omap_aes_dev *dd = NULL, *tmp;
  283.  
  284.     spin_lock_bh(&list_lock);
  285.     if (!ctx->dd) {
  286.         list_for_each_entry(tmp, &dev_list, list) {
  287.             /* FIXME: take fist available aes core */
  288.             dd = tmp;
  289.             break;
  290.         }
  291.         ctx->dd = dd;
  292.     } else {
  293.         /* already found before */
  294.         dd = ctx->dd;
  295.     }
  296.     spin_unlock_bh(&list_lock);
  297.  
  298.     return dd;
  299. }
  300.  
  301. static void omap_aes_dma_callback(int lch, u16 ch_status, void *data)
  302. {
  303.     struct omap_aes_dev *dd = data;
  304.  
  305.     if (lch == dd->dma_lch_out)
  306.         tasklet_schedule(&dd->task);
  307. }
  308.  
  309. static int omap_aes_dma_init(struct omap_aes_dev *dd)
  310. {
  311.     int err = -ENOMEM;
  312.  
  313.     dd->dma_lch_out = -1;
  314.     dd->dma_lch_in = -1;
  315.  
  316.     dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE);
  317.     dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE);
  318.     dd->buflen = PAGE_SIZE << OMAP_AES_CACHE_SIZE;
  319.     dd->buflen &= ~(AES_BLOCK_SIZE - 1);
  320.  
  321.     if (!dd->buf_in || !dd->buf_out) {
  322.         dev_err(dd->dev, "unable to alloc pages.\n");
  323.         goto err_alloc;
  324.     }
  325.  
  326.     /* MAP here */
  327.     dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in, dd->buflen,
  328.                      DMA_TO_DEVICE);
  329.     if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
  330.         dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
  331.         err = -EINVAL;
  332.         goto err_map_in;
  333.     }
  334.  
  335.     dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out, dd->buflen,
  336.                       DMA_FROM_DEVICE);
  337.     if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
  338.         dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
  339.         err = -EINVAL;
  340.         goto err_map_out;
  341.     }
  342.  
  343.     err = omap_request_dma(dd->dma_in, "omap-aes-rx",
  344.                    omap_aes_dma_callback, dd, &dd->dma_lch_in);
  345.     if (err) {
  346.         dev_err(dd->dev, "Unable to request DMA channel\n");
  347.         goto err_dma_in;
  348.     }
  349.     err = omap_request_dma(dd->dma_out, "omap-aes-tx",
  350.                    omap_aes_dma_callback, dd, &dd->dma_lch_out);
  351.     if (err) {
  352.         dev_err(dd->dev, "Unable to request DMA channel\n");
  353.         goto err_dma_out;
  354.     }
  355.  
  356.     return 0;
  357.  
  358. err_dma_out:
  359.     omap_free_dma(dd->dma_lch_in);
  360. err_dma_in:
  361.     dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
  362.              DMA_FROM_DEVICE);
  363. err_map_out:
  364.     dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE);
  365. err_map_in:
  366.     free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE);
  367.     free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE);
  368. err_alloc:
  369.     if (err)
  370.         pr_err("error: %d\n", err);
  371.     return err;
  372. }
  373.  
  374. static void omap_aes_dma_cleanup(struct omap_aes_dev *dd)
  375. {
  376.     omap_free_dma(dd->dma_lch_out);
  377.     omap_free_dma(dd->dma_lch_in);
  378.     dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
  379.              DMA_FROM_DEVICE);
  380.     dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE);
  381.     free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE);
  382.     free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE);
  383. }
  384.  
  385. static void sg_copy_buf(void *buf, struct scatterlist *sg,
  386.                   unsigned int start, unsigned int nbytes, int out)
  387. {
  388.     struct scatter_walk walk;
  389.  
  390.     if (!nbytes)
  391.         return;
  392.  
  393.     scatterwalk_start(&walk, sg);
  394.     scatterwalk_advance(&walk, start);
  395.     scatterwalk_copychunks(buf, &walk, nbytes, out);
  396.     scatterwalk_done(&walk, out, 0);
  397. }
  398.  
  399. static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf,
  400.            size_t buflen, size_t total, int out)
  401. {
  402.     unsigned int count, off = 0;
  403.  
  404.     while (buflen && total) {
  405.         count = min((*sg)->length - *offset, total);
  406.         count = min(count, buflen);
  407.  
  408.         if (!count)
  409.             return off;
  410.  
  411.         sg_copy_buf(buf + off, *sg, *offset, count, out);
  412.  
  413.         off += count;
  414.         buflen -= count;
  415.         *offset += count;
  416.         total -= count;
  417.  
  418.         if (*offset == (*sg)->length) {
  419.             *sg = sg_next(*sg);
  420.             if (*sg)
  421.                 *offset = 0;
  422.             else
  423.                 total = 0;
  424.         }
  425.     }
  426.  
  427.     return off;
  428. }
  429.  
  430. static int omap_aes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
  431.                    dma_addr_t dma_addr_out, int length)
  432. {
  433.     struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
  434.     struct omap_aes_dev *dd = ctx->dd;
  435.     int len32;
  436.  
  437.     dd->dma_size = length;
  438.  
  439.     if (!(dd->flags & FLAGS_FAST))
  440.         dma_sync_single_for_device(dd->dev, dma_addr_in, length,
  441.                        DMA_TO_DEVICE);
  442.  
  443.     len32 = DIV_ROUND_UP(length, sizeof(u32));
  444.  
  445.     /* IN */
  446.     omap_set_dma_transfer_params(dd->dma_lch_in, OMAP_DMA_DATA_TYPE_S32,
  447.                      len32, 1, OMAP_DMA_SYNC_PACKET, dd->dma_in,
  448.                     OMAP_DMA_DST_SYNC);
  449.  
  450.     omap_set_dma_src_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_POST_INC,
  451.                 dma_addr_in, 0, 0);
  452.  
  453.     /* OUT */
  454.     omap_set_dma_transfer_params(dd->dma_lch_out, OMAP_DMA_DATA_TYPE_S32,
  455.                      len32, 1, OMAP_DMA_SYNC_PACKET,
  456.                     dd->dma_out, OMAP_DMA_SRC_SYNC);
  457.  
  458.     omap_set_dma_dest_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_POST_INC,
  459.                  dma_addr_out, 0, 0);
  460.  
  461.     omap_set_dma_dest_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_CONSTANT,
  462.                  dd->phys_base + AES_REG_DATA, 0, 4);
  463.  
  464.     omap_set_dma_dest_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
  465.     omap_set_dma_src_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
  466.  
  467.     omap_set_dma_src_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_CONSTANT,
  468.                 dd->phys_base + AES_REG_DATA, 0, 4);
  469.  
  470.     omap_set_dma_src_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
  471.     omap_set_dma_dest_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
  472.  
  473.     omap_aes_write_ctrl(dd);
  474.  
  475.     omap_start_dma(dd->dma_lch_in);
  476.     omap_start_dma(dd->dma_lch_out);
  477.  
  478.     return 0;
  479. }
  480.  
  481. static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
  482. {
  483.     struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
  484.                     crypto_ablkcipher_reqtfm(dd->req));
  485.     int err, fast = 0, in, out;
  486.     size_t count;
  487.     dma_addr_t addr_in, addr_out;
  488.  
  489.     if (sg_is_last(dd->in_sg) && sg_is_last(dd->out_sg)) {
  490.         /* check for alignment */
  491.         in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32));
  492.         out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32));
  493.  
  494.         fast = in && out;
  495.     }
  496.  
  497.     if (fast)  {
  498.         count = min(dd->total, sg_dma_len(dd->in_sg));
  499.         count = min(count, sg_dma_len(dd->out_sg));
  500.  
  501.         if (count != dd->total)
  502.             return -EINVAL;
  503.  
  504.         err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
  505.         if (!err) {
  506.             dev_err(dd->dev, "dma_map_sg() error\n");
  507.             return -EINVAL;
  508.         }
  509.  
  510.         err = dma_map_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
  511.         if (!err) {
  512.             dev_err(dd->dev, "dma_map_sg() error\n");
  513.             dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
  514.             return -EINVAL;
  515.         }
  516.  
  517.         addr_in = sg_dma_address(dd->in_sg);
  518.         addr_out = sg_dma_address(dd->out_sg);
  519.  
  520.         dd->flags |= FLAGS_FAST;
  521.  
  522.     } else {
  523.         /* use cache buffers */
  524.         count = sg_copy(&dd->in_sg, &dd->in_offset, dd->buf_in,
  525.                  dd->buflen, dd->total, 0);
  526.  
  527.         addr_in = dd->dma_addr_in;
  528.         addr_out = dd->dma_addr_out;
  529.  
  530.         dd->flags &= ~FLAGS_FAST;
  531.  
  532.     }
  533.  
  534.     dd->total -= count;
  535.  
  536.     err = omap_aes_crypt_dma(tfm, addr_in, addr_out, count);
  537.  
  538.     return err;
  539. }
  540.  
  541. static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
  542. {
  543.     struct omap_aes_ctx *ctx;
  544.  
  545.     ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(dd->req));
  546.  
  547.     if (!dd->total)
  548.         dd->req->base.complete(&dd->req->base, err);
  549. }
  550.  
  551. static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
  552. {
  553.     int err = 0;
  554.     size_t count;
  555.  
  556.     omap_aes_write_mask(dd, AES_REG_MASK, 0, AES_REG_MASK_START);
  557.  
  558.     omap_stop_dma(dd->dma_lch_in);
  559.     omap_stop_dma(dd->dma_lch_out);
  560.  
  561.     if (dd->flags & FLAGS_FAST) {
  562.         dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
  563.         dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
  564.     } else {
  565.         dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
  566.                        dd->dma_size, DMA_FROM_DEVICE);
  567.  
  568.         /* copy data */
  569.         count = sg_copy(&dd->out_sg, &dd->out_offset, dd->buf_out,
  570.                  dd->buflen, dd->dma_size, 1);
  571.         if (count != dd->dma_size) {
  572.             err = -EINVAL;
  573.             pr_err("not all data converted: %u\n", count);
  574.         }
  575.     }
  576.  
  577.     if (err || !dd->total)
  578.         omap_aes_finish_req(dd, err);
  579.  
  580.     return err;
  581. }
  582.  
  583. static int omap_aes_handle_req(struct omap_aes_dev *dd)
  584. {
  585.     struct crypto_async_request *async_req, *backlog;
  586.     struct omap_aes_ctx *ctx;
  587.     struct omap_aes_reqctx *rctx;
  588.     struct ablkcipher_request *req;
  589.     unsigned long flags;
  590.  
  591.     if (dd->total)
  592.         goto start;
  593.  
  594.     spin_lock_irqsave(&dd->lock, flags);
  595.     backlog = crypto_get_backlog(&dd->queue);
  596.     async_req = crypto_dequeue_request(&dd->queue);
  597.     if (!async_req) {
  598.         omap_aes_hw_cleanup(dd);
  599.         clear_bit(FLAGS_BUSY, &dd->flags);
  600.     }
  601.     spin_unlock_irqrestore(&dd->lock, flags);
  602.  
  603.     if (!async_req)
  604.         return 0;
  605.  
  606.     if (backlog)
  607.         backlog->complete(backlog, -EINPROGRESS);
  608.  
  609.     req = ablkcipher_request_cast(async_req);
  610.  
  611.     /* assign new request to device */
  612.     dd->req = req;
  613.     dd->total = req->nbytes;
  614.     dd->in_offset = 0;
  615.     dd->in_sg = req->src;
  616.     dd->out_offset = 0;
  617.     dd->out_sg = req->dst;
  618.  
  619.     rctx = ablkcipher_request_ctx(req);
  620.     ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
  621.     rctx->mode &= FLAGS_MODE_MASK;
  622.     dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
  623.  
  624.     dd->iv = req->info;
  625.     if ((dd->flags & FLAGS_CBC) && dd->iv)
  626.         dd->flags |= FLAGS_NEW_IV;
  627.     else
  628.         dd->flags &= ~FLAGS_NEW_IV;
  629.  
  630.     ctx->dd = dd;
  631.     if (dd->ctx != ctx) {
  632.         /* assign new context to device */
  633.         dd->ctx = ctx;
  634.         ctx->flags |= FLAGS_NEW_KEY;
  635.     }
  636.  
  637.     if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE))
  638.         pr_err("request size is not exact amount of AES blocks\n");
  639.  
  640. start:
  641.     return omap_aes_crypt_dma_start(dd);
  642. }
  643.  
  644. static void omap_aes_task(unsigned long data)
  645. {
  646.     struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
  647.     int err;
  648.  
  649.     err = omap_aes_crypt_dma_stop(dd);
  650.  
  651.     err = omap_aes_handle_req(dd);
  652. }
  653.  
  654. static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
  655. {
  656.     struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
  657.             crypto_ablkcipher_reqtfm(req));
  658.     struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
  659.     struct omap_aes_dev *dd;
  660.     unsigned long flags;
  661.     int err;
  662.  
  663.     dd = omap_aes_find_dev(ctx);
  664.     if (!dd)
  665.         return -ENODEV;
  666.  
  667.     rctx->mode = mode;
  668.  
  669.     spin_lock_irqsave(&dd->lock, flags);
  670.     err = ablkcipher_enqueue_request(&dd->queue, req);
  671.     spin_unlock_irqrestore(&dd->lock, flags);
  672.  
  673.     if (!test_and_set_bit(FLAGS_BUSY, &dd->flags)) {
  674.         omap_aes_hw_init(dd);
  675.         omap_aes_handle_req(dd);
  676.     }
  677.  
  678.     return err;
  679. }
  680.  
  681. /* ********************** ALG API ************************************ */
  682.  
  683. static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  684.                unsigned int keylen)
  685. {
  686.     struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  687.  
  688.     if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
  689.            keylen != AES_KEYSIZE_256)
  690.         return -EINVAL;
  691.  
  692.     memcpy(ctx->key, key, keylen);
  693.     ctx->keylen = keylen;
  694.     ctx->flags |= FLAGS_NEW_KEY;
  695.  
  696.     return 0;
  697. }
  698.  
  699. static int omap_aes_ecb_encrypt(struct ablkcipher_request *req)
  700. {
  701.     return omap_aes_crypt(req, FLAGS_ENCRYPT);
  702. }
  703.  
  704. static int omap_aes_ecb_decrypt(struct ablkcipher_request *req)
  705. {
  706.     return omap_aes_crypt(req, 0);
  707. }
  708.  
  709. static int omap_aes_cbc_encrypt(struct ablkcipher_request *req)
  710. {
  711.     return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
  712. }
  713.  
  714. static int omap_aes_cbc_decrypt(struct ablkcipher_request *req)
  715. {
  716.     return omap_aes_crypt(req, FLAGS_CBC);
  717. }
  718.  
  719. static int omap_aes_cra_init(struct crypto_tfm *tfm)
  720. {
  721.     tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx);
  722.  
  723.     return 0;
  724. }
  725.  
  726. static void omap_aes_cra_exit(struct crypto_tfm *tfm)
  727. {
  728. }
  729.  
  730. /* ********************** ALGS ************************************ */
  731.  
  732. static struct crypto_alg ecb_aes_alg = {
  733.     .cra_name       = "ecb(aes)",
  734.     .cra_driver_name    = "ecb-aes-omap",
  735.     .cra_priority       = 100,
  736.     .cra_flags      = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  737.     .cra_blocksize      = AES_BLOCK_SIZE,
  738.     .cra_ctxsize        = sizeof(struct omap_aes_ctx),
  739.     .cra_alignmask      = 0,
  740.     .cra_type       = &crypto_ablkcipher_type,
  741.     .cra_module     = THIS_MODULE,
  742.     .cra_list       = LIST_HEAD_INIT(ecb_aes_alg.cra_list),
  743.     .cra_init       = omap_aes_cra_init,
  744.     .cra_exit       = omap_aes_cra_exit,
  745.     .cra_u = {
  746.         .ablkcipher = {
  747.             .min_keysize    = AES_MIN_KEY_SIZE,
  748.             .max_keysize    = AES_MAX_KEY_SIZE,
  749.             .setkey     = omap_aes_setkey,
  750.             .encrypt    = omap_aes_ecb_encrypt,
  751.             .decrypt    = omap_aes_ecb_decrypt,
  752.         }
  753.     }
  754. };
  755.  
  756. static struct crypto_alg cbc_aes_alg = {
  757.     .cra_name       = "cbc(aes)",
  758.     .cra_driver_name    = "cbc-aes-omap",
  759.     .cra_priority       = 100,
  760.     .cra_flags      = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  761.     .cra_blocksize      = AES_BLOCK_SIZE,
  762.     .cra_ctxsize        = sizeof(struct omap_aes_ctx),
  763.     .cra_alignmask      = 0,
  764.     .cra_type       = &crypto_ablkcipher_type,
  765.     .cra_module     = THIS_MODULE,
  766.     .cra_list       = LIST_HEAD_INIT(cbc_aes_alg.cra_list),
  767.     .cra_init       = omap_aes_cra_init,
  768.     .cra_exit       = omap_aes_cra_exit,
  769.     .cra_u = {
  770.         .ablkcipher = {
  771.             .min_keysize    = AES_MIN_KEY_SIZE,
  772.             .max_keysize    = AES_MAX_KEY_SIZE,
  773.             .ivsize     = AES_BLOCK_SIZE,
  774.             .setkey     = omap_aes_setkey,
  775.             .encrypt    = omap_aes_cbc_encrypt,
  776.             .decrypt    = omap_aes_cbc_decrypt,
  777.         }
  778.     }
  779. };
  780.  
  781. static int omap_aes_probe(struct platform_device *pdev)
  782. {
  783.     struct device *dev = &pdev->dev;
  784.     struct omap_aes_dev *dd;
  785.     struct resource *res;
  786.     int err = -ENOMEM;
  787.     u32 reg;
  788.  
  789.     dd = kzalloc(sizeof(struct omap_aes_dev), GFP_KERNEL);
  790.     if (dd == NULL) {
  791.         dev_err(dev, "unable to alloc data struct.\n");
  792.         goto err_data;
  793.     }
  794.     dd->dev = dev;
  795.     platform_set_drvdata(pdev, dd);
  796.  
  797.     spin_lock_init(&dd->lock);
  798.     crypto_init_queue(&dd->queue, OMAP_AES_QUEUE_LENGTH);
  799.  
  800.     /* Get the base address */
  801.     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  802.     if (!res) {
  803.         dev_err(dev, "invalid resource type\n");
  804.         err = -ENODEV;
  805.         goto err_res;
  806.     }
  807.     dd->phys_base = res->start;
  808.  
  809.     /* Get the DMA */
  810.     res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
  811.     if (!res)
  812.         dev_info(dev, "no DMA info\n");
  813.     else
  814.         dd->dma_out = res->start;
  815.  
  816.     /* Get the DMA */
  817.     res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
  818.     if (!res)
  819.         dev_info(dev, "no DMA info\n");
  820.     else
  821.         dd->dma_in = res->start;
  822.  
  823.     /* Initializing the clock */
  824.     dd->iclk = clk_get(NULL, AES_ICLK);
  825.     if (!dd->iclk) {
  826.         dev_err(dev, "clock intialization failed.\n");
  827.         err = -ENODEV;
  828.         goto err_res;
  829.     }
  830.  
  831.     dd->io_base = ioremap(dd->phys_base, SZ_4K);
  832.     if (!dd->io_base) {
  833.         dev_err(dev, "can't ioremap\n");
  834.         err = -ENOMEM;
  835.         goto err_io;
  836.     }
  837.  
  838.     clk_enable(dd->iclk);
  839.     reg = omap_aes_read(dd, AES_REG_REV);
  840.     dev_info(dev, "OMAP AES hw accel rev: %u.%u\n",
  841.          (reg & AES_REG_REV_MAJOR) >> 4, reg & AES_REG_REV_MINOR);
  842.     clk_disable(dd->iclk);
  843.  
  844.     tasklet_init(&dd->task, omap_aes_task, (unsigned long)dd);
  845.  
  846.     err = omap_aes_dma_init(dd);
  847.     if (err)
  848.         goto err_dma;
  849.  
  850.     INIT_LIST_HEAD(&dd->list);
  851.     spin_lock(&list_lock);
  852.     list_add_tail(&dd->list, &dev_list);
  853.     spin_unlock(&list_lock);
  854.  
  855.     /*now register API*/
  856.     err = crypto_register_alg(&ecb_aes_alg);
  857.     if (err)
  858.         goto ecb_err;
  859.     err = crypto_register_alg(&cbc_aes_alg);
  860.     if (err)
  861.         goto cbc_err;
  862.  
  863.     pr_info("probe() done\n");
  864.  
  865.     return 0;
  866. cbc_err:
  867.     crypto_unregister_alg(&ecb_aes_alg);
  868. ecb_err:
  869.     omap_aes_dma_cleanup(dd);
  870. err_dma:
  871.     tasklet_kill(&dd->task);
  872.     iounmap(dd->io_base);
  873. err_io:
  874.     clk_put(dd->iclk);
  875. err_res:
  876.     kfree(dd);
  877.     dd = NULL;
  878. err_data:
  879.     dev_err(dev, "initialization failed.\n");
  880.     return err;
  881. }
  882.  
  883. static int omap_aes_remove(struct platform_device *pdev)
  884. {
  885.     struct omap_aes_dev *dd = platform_get_drvdata(pdev);
  886.  
  887.     if (!dd)
  888.         return -ENODEV;
  889.  
  890.     spin_lock(&list_lock);
  891.     list_del(&dd->list);
  892.     spin_unlock(&list_lock);
  893.  
  894.     crypto_unregister_alg(&ecb_aes_alg);
  895.     crypto_unregister_alg(&cbc_aes_alg);
  896.  
  897.     tasklet_kill(&dd->task);
  898.     omap_aes_dma_cleanup(dd);
  899.     iounmap(dd->io_base);
  900.     clk_put(dd->iclk);
  901.     kfree(dd);
  902.     dd = NULL;
  903.  
  904.     return 0;
  905. }
  906.  
  907. #ifdef CONFIG_ARCH_OMAP24XX
  908. static struct resource aes_resources[] = {
  909.     {
  910.         .start  = OMAP24XX_SEC_AES_BASE,
  911.         .end    = OMAP24XX_SEC_AES_BASE + 0x4C,
  912.         .flags  = IORESOURCE_MEM,
  913.     },
  914.     {
  915.         .start  = OMAP24XX_DMA_AES_TX,
  916.         .flags  = IORESOURCE_DMA,
  917.     },
  918.     {
  919.         .start  = OMAP24XX_DMA_AES_RX,
  920.         .flags  = IORESOURCE_DMA,
  921.     }
  922. };
  923. #endif
  924. #ifdef CONFIG_ARCH_OMAP34XX
  925. static struct resource aes_resources[] = {
  926.     {
  927.         .start  = OMAP34XX_SEC_AES_BASE,
  928.         .end    = OMAP34XX_SEC_AES_BASE + 0x4C,
  929.         .flags  = IORESOURCE_MEM,
  930.     },
  931.     {
  932.         .start  = OMAP34XX_DMA_AES2_TX,
  933.         .flags  = IORESOURCE_DMA,
  934.     },
  935.     {
  936.         .start  = OMAP34XX_DMA_AES2_RX,
  937.         .flags  = IORESOURCE_DMA,
  938.     }
  939. };
  940. #endif
  941.  
  942. static void omap_aes_release(struct device *dev)
  943. {
  944. }
  945.  
  946. static struct platform_device aes_device = {
  947.     .name       = "omap-aes",
  948.     .id     = -1,
  949.     .num_resources  = ARRAY_SIZE(aes_resources),
  950.     .resource   = aes_resources,
  951.     .dev.release    = omap_aes_release,
  952. };
  953.  
  954. static struct platform_driver omap_aes_driver = {
  955.     .probe  = omap_aes_probe,
  956.     .remove = omap_aes_remove,
  957.     .driver = {
  958.         .name   = "omap-aes",
  959.         .owner  = THIS_MODULE,
  960.     },
  961. };
  962.  
  963. static int __init omap_aes_mod_init(void)
  964. {
  965.     int ret;
  966.  
  967.     pr_info("loading %s driver\n", "omap-aes");
  968.  
  969.     if (!cpu_class_is_omap2() ||
  970.         omap_type() != OMAP2_DEVICE_TYPE_SEC) {
  971.         pr_err("Unsupported cpu\n");
  972.         return -ENODEV;
  973.     }
  974.  
  975.     ret = platform_driver_register(&omap_aes_driver);
  976.     if (ret)
  977.         return ret;
  978.  
  979.     ret = platform_device_register(&aes_device);
  980.     if (ret)
  981.         goto err;
  982.  
  983.     return 0;
  984.  
  985. err:
  986.     platform_driver_unregister(&omap_aes_driver);
  987.  
  988.     return ret;
  989. }
  990.  
  991. static void __exit omap_aes_mod_exit(void)
  992. {
  993.     platform_device_unregister(&aes_device);
  994.     platform_driver_unregister(&omap_aes_driver);
  995. }
  996.  
  997. module_init(omap_aes_mod_init);
  998. module_exit(omap_aes_mod_exit);
  999.  
  1000. MODULE_DESCRIPTION("OMAP AES hw acceleration support.");
  1001. MODULE_LICENSE("GPL v2");
  1002. MODULE_AUTHOR("Dmitry Kasatkin");
Advertisement
Add Comment
Please, Sign In to add comment