SHARE
TWEET

Untitled

a guest Jul 22nd, 2019 68 Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
  1. // Public domain
  2. // Based on public domain 7zip implementation by Igor Pavlov and Samuel Neves
  3. #include "blake2sp.h"
  4.  
  5. // Set this if your processor is unaligned little endian
  6. #define LITTLE_ENDIAN_UNALIGNED
  7.  
  8. #ifdef LITTLE_ENDIAN_UNALIGNED
  9.  
  10. #define GetUi32(p) (*(const uint32_t *)(const void *)(p))
  11. #define SetUi32(p, v) { *(uint32_t *)(p) = (v); }
  12.  
  13. #else
  14.  
  15. #define GetUi32(p) ( \
  16.              ((const uint8_t *)(p))[0]        | \
  17.     ((uint32_t)((const uint8_t *)(p))[1] <<  8) | \
  18.     ((uint32_t)((const uint8_t *)(p))[2] << 16) | \
  19.     ((uint32_t)((const uint8_t *)(p))[3] << 24))
  20.  
  21. #define SetUi32(p, v) { uint8_t *_ppp_ = (uint8_t *)(p); uint32_t _vvv_ = (v); \
  22.     _ppp_[0] = (uint8_t)_vvv_; \
  23.     _ppp_[1] = (uint8_t)(_vvv_ >> 8); \
  24.     _ppp_[2] = (uint8_t)(_vvv_ >> 16); \
  25.     _ppp_[3] = (uint8_t)(_vvv_ >> 24); }
  26.  
  27. #endif
  28.  
  29. #ifdef _MSC_VER
  30.  
  31. /* don't use _rotl with MINGW. It can insert slow call to function. */
  32.  
  33. /* #if (_MSC_VER >= 1200) */
  34. #pragma intrinsic(_rotl)
  35. #pragma intrinsic(_rotr)
  36. /* #endif */
  37.  
  38. #define rotlFixed(x, n) _rotl((x), (n))
  39. #define rotrFixed(x, n) _rotr((x), (n))
  40.  
  41. #else
  42.  
  43. /* new compilers can translate these macros to fast commands. */
  44.  
  45. #define rotlFixed(x, n) (((x) << (n)) | ((x) >> (32 - (n))))
  46. #define rotrFixed(x, n) (((x) >> (n)) | ((x) << (32 - (n))))
  47.  
  48. #endif
  49.  
  50. #define rotr32 rotrFixed
  51.  
  52. #define BLAKE2S_NUM_ROUNDS 10
  53. #define BLAKE2S_FINAL_FLAG (~(uint32_t)0)
  54.  
  55. static const uint32_t k_Blake2s_IV[8] =
  56. {
  57.   0x6A09E667UL, 0xBB67AE85UL, 0x3C6EF372UL, 0xA54FF53AUL,
  58.   0x510E527FUL, 0x9B05688CUL, 0x1F83D9ABUL, 0x5BE0CD19UL
  59. };
  60.  
  61. static const uint8_t k_Blake2s_Sigma[BLAKE2S_NUM_ROUNDS][16] =
  62. {
  63.   {  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15 } ,
  64.   { 14, 10,  4,  8,  9, 15, 13,  6,  1, 12,  0,  2, 11,  7,  5,  3 } ,
  65.   { 11,  8, 12,  0,  5,  2, 15, 13, 10, 14,  3,  6,  7,  1,  9,  4 } ,
  66.   {  7,  9,  3,  1, 13, 12, 11, 14,  2,  6,  5, 10,  4,  0, 15,  8 } ,
  67.   {  9,  0,  5,  7,  2,  4, 10, 15, 14,  1, 11, 12,  6,  8,  3, 13 } ,
  68.   {  2, 12,  6, 10,  0, 11,  8,  3,  4, 13,  7,  5, 15, 14,  1,  9 } ,
  69.   { 12,  5,  1, 15, 14, 13,  4, 10,  0,  7,  6,  3,  9,  2,  8, 11 } ,
  70.   { 13, 11,  7, 14, 12,  1,  3,  9,  5,  0, 15,  4,  8,  6,  2, 10 } ,
  71.   {  6, 15, 14,  9, 11,  3,  0,  8, 12,  2, 13,  7,  1,  4, 10,  5 } ,
  72.   { 10,  2,  8,  4,  7,  6,  1,  5, 15, 11,  9, 14,  3, 12, 13 , 0 } ,
  73. };
  74.  
  75.  
  76. void Blake2s_Init0(CBlake2s *p)
  77. {
  78.   unsigned i;
  79.   for (i = 0; i < 8; i++)
  80.     p->h[i] = k_Blake2s_IV[i];
  81.   p->t[0] = 0;
  82.   p->t[1] = 0;
  83.   p->f[0] = 0;
  84.   p->f[1] = 0;
  85.   p->bufPos = 0;
  86.   p->lastNode_f1 = 0;
  87. }
  88.  
  89.  
  90. static void Blake2s_Compress(CBlake2s *p)
  91. {
  92.   uint32_t m[16];
  93.   uint32_t v[16];
  94.  
  95.   {
  96.     unsigned i;
  97.    
  98.     for (i = 0; i < 16; i++)
  99.       m[i] = *(const uint32_t*)(p->buf + i * sizeof(m[i]));
  100.    
  101.     for (i = 0; i < 8; i++)
  102.       v[i] = p->h[i];
  103.   }
  104.  
  105.   v[ 8] = k_Blake2s_IV[0];
  106.   v[ 9] = k_Blake2s_IV[1];
  107.   v[10] = k_Blake2s_IV[2];
  108.   v[11] = k_Blake2s_IV[3];
  109.  
  110.   v[12] = p->t[0] ^ k_Blake2s_IV[4];
  111.   v[13] = p->t[1] ^ k_Blake2s_IV[5];
  112.   v[14] = p->f[0] ^ k_Blake2s_IV[6];
  113.   v[15] = p->f[1] ^ k_Blake2s_IV[7];
  114.  
  115.   #define G(r,i,a,b,c,d) \
  116.     a += b + m[sigma[2*i+0]];  d ^= a; d = rotr32(d, 16);  c += d;  b ^= c; b = rotr32(b, 12); \
  117.     a += b + m[sigma[2*i+1]];  d ^= a; d = rotr32(d,  8);  c += d;  b ^= c; b = rotr32(b,  7); \
  118.  
  119.   #define R(r) \
  120.     G(r,0,v[ 0],v[ 4],v[ 8],v[12]); \
  121.     G(r,1,v[ 1],v[ 5],v[ 9],v[13]); \
  122.     G(r,2,v[ 2],v[ 6],v[10],v[14]); \
  123.     G(r,3,v[ 3],v[ 7],v[11],v[15]); \
  124.     G(r,4,v[ 0],v[ 5],v[10],v[15]); \
  125.     G(r,5,v[ 1],v[ 6],v[11],v[12]); \
  126.     G(r,6,v[ 2],v[ 7],v[ 8],v[13]); \
  127.     G(r,7,v[ 3],v[ 4],v[ 9],v[14]); \
  128.  
  129.   {
  130.     unsigned r;
  131.     for (r = 0; r < BLAKE2S_NUM_ROUNDS; r++)
  132.     {
  133.       const uint8_t *sigma = k_Blake2s_Sigma[r];
  134.       R(r);
  135.     }
  136.     /* R(0); R(1); R(2); R(3); R(4); R(5); R(6); R(7); R(8); R(9); */
  137.   }
  138.  
  139.   #undef G
  140.   #undef R
  141.  
  142.   {
  143.     unsigned i;
  144.     for (i = 0; i < 8; i++)
  145.       p->h[i] ^= v[i] ^ v[i + 8];
  146.   }
  147. }
  148.  
  149.  
  150. #define Blake2s_Increment_Counter(S, inc) \
  151.   { p->t[0] += (inc); p->t[1] += (p->t[0] < (inc)); }
  152.  
  153. #define Blake2s_Set_LastBlock(p) \
  154.   { p->f[0] = BLAKE2S_FINAL_FLAG; p->f[1] = p->lastNode_f1; }
  155.  
  156.  
  157. static void Blake2s_Update(CBlake2s *p, const uint8_t *data, size_t size)
  158. {
  159.   while (size != 0)
  160.   {
  161.     unsigned pos = (unsigned)p->bufPos;
  162.     unsigned rem = BLAKE2S_BLOCK_SIZE - pos;
  163.  
  164.     if (size <= rem)
  165.     {
  166.       memcpy(p->buf + pos, data, size);
  167.       p->bufPos += (uint32_t)size;
  168.       return;
  169.     }
  170.  
  171.     memcpy(p->buf + pos, data, rem);
  172.     Blake2s_Increment_Counter(S, BLAKE2S_BLOCK_SIZE);
  173.     Blake2s_Compress(p);
  174.     p->bufPos = 0;
  175.     data += rem;
  176.     size -= rem;
  177.   }
  178. }
  179.  
  180.  
  181. static void Blake2s_Final(CBlake2s *p, uint8_t *digest)
  182. {
  183.   unsigned i;
  184.  
  185.   Blake2s_Increment_Counter(S, (uint32_t)p->bufPos);
  186.   Blake2s_Set_LastBlock(p);
  187.   memset(p->buf + p->bufPos, 0, BLAKE2S_BLOCK_SIZE - p->bufPos);
  188.   Blake2s_Compress(p);
  189.  
  190.   for (i = 0; i < 8; i++)
  191.     SetUi32(digest + sizeof(p->h[i]) * i, p->h[i]);
  192. }
  193.  
  194.  
  195. /* ---------- BLAKE2s ---------- */
  196.  
  197. /* we need to xor CBlake2s::h[i] with input parameter block after Blake2s_Init0() */
  198. /*
  199. typedef struct
  200. {
  201.   uint8_t  digest_length;
  202.   uint8_t  key_length;
  203.   uint8_t  fanout;
  204.   uint8_t  depth;
  205.   uint32_t leaf_length;
  206.   uint8_t  node_offset[6];
  207.   uint8_t  node_depth;
  208.   uint8_t  inner_length;
  209.   uint8_t  salt[BLAKE2S_SALTuint8_tS];
  210.   uint8_t  personal[BLAKE2S_PERSONALuint8_tS];
  211. } CBlake2sParam;
  212. */
  213.  
  214.  
  215. static void Blake2sp_Init_Spec(CBlake2s *p, unsigned node_offset, unsigned node_depth)
  216. {
  217.   Blake2s_Init0(p);
  218.  
  219.   p->h[0] ^= (BLAKE2S_DIGEST_SIZE | ((uint32_t)BLAKE2SP_PARALLEL_DEGREE << 16) | ((uint32_t)2 << 24));
  220.   p->h[2] ^= ((uint32_t)node_offset);
  221.   p->h[3] ^= ((uint32_t)node_depth << 16) | ((uint32_t)BLAKE2S_DIGEST_SIZE << 24);
  222.   /*
  223.   P->digest_length = BLAKE2S_DIGEST_SIZE;
  224.   P->key_length = 0;
  225.   P->fanout = BLAKE2SP_PARALLEL_DEGREE;
  226.   P->depth = 2;
  227.   P->leaf_length = 0;
  228.   store48(P->node_offset, node_offset);
  229.   P->node_depth = node_depth;
  230.   P->inner_length = BLAKE2S_DIGEST_SIZE;
  231.   */
  232. }
  233.  
  234.  
  235. void Blake2sp_Init(CBlake2sp *p)
  236. {
  237.   unsigned i;
  238.  
  239.   p->bufPos = 0;
  240.  
  241.   for (i = 0; i < BLAKE2SP_PARALLEL_DEGREE; i++)
  242.     Blake2sp_Init_Spec(&p->S[i], i, 0);
  243.  
  244.   p->S[BLAKE2SP_PARALLEL_DEGREE - 1].lastNode_f1 = BLAKE2S_FINAL_FLAG;
  245. }
  246.  
  247.  
  248. void Blake2sp_Update(CBlake2sp *p, const uint8_t *data, size_t size)
  249. {
  250.   unsigned pos = p->bufPos;
  251.   while (size != 0)
  252.   {
  253.     unsigned index = pos / BLAKE2S_BLOCK_SIZE;
  254.     unsigned rem = BLAKE2S_BLOCK_SIZE - (pos & (BLAKE2S_BLOCK_SIZE - 1));
  255.     if (rem > size)
  256.       rem = (unsigned)size;
  257.     Blake2s_Update(&p->S[index], data, rem);
  258.     size -= rem;
  259.     data += rem;
  260.     pos += rem;
  261.     pos &= (BLAKE2S_BLOCK_SIZE * BLAKE2SP_PARALLEL_DEGREE - 1);
  262.   }
  263.   p->bufPos = pos;
  264. }
  265.  
  266.  
  267. void Blake2sp_Final(CBlake2sp *p, uint8_t *digest)
  268. {
  269.   CBlake2s R;
  270.   unsigned i;
  271.  
  272.   Blake2sp_Init_Spec(&R, 0, 1);
  273.   R.lastNode_f1 = BLAKE2S_FINAL_FLAG;
  274.  
  275.   for (i = 0; i < BLAKE2SP_PARALLEL_DEGREE; i++)
  276.   {
  277.     uint8_t hash[BLAKE2S_DIGEST_SIZE];
  278.     Blake2s_Final(&p->S[i], hash);
  279.     Blake2s_Update(&R, hash, BLAKE2S_DIGEST_SIZE);
  280.   }
  281.  
  282.   Blake2s_Final(&R, digest);
  283. }
RAW Paste Data
We use cookies for various purposes including analytics. By continuing to use Pastebin, you agree to our use of cookies as described in the Cookies Policy. OK, I Understand
 
Top