Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- //This kernel file should define LUTSTEP
- //assuming SIEVESTEP = 32
- //-cl-finite-math-only and -cl-no-signed-zeros may be used for faster floating point calculations
- //constants
- #define MAXSTEP 0xfffffffu
- #define BITDIFF (32-LUTSTEP)
- #define TABLEMASK ((1u << LUTSTEP) - 1)
- #define ODDSCORE 1.5849625f
- //compareUint4() : compares two 128-bit unsigned integers in uint4
- //returns negative number if a < b, positive number if a > b, 0 if a==b;
- inline int compareUint4(uint4 a, uint4 b){
- int4 comp = (a<b) - (a>b);
- comp <<= (int4)(0, 1, 2, 3);
- return (comp.s0 + comp.s1 + comp.s2 + comp.s3);
- } //compareUint4()
- //compareUint() : compares two 32-bit unsigned integers
- //returns negative number if a < b, positive number if a > b, 0 if a==b;
- inline int compareUint(uint a, uint b){
- return (a<b) - (a>b);
- } //compareUint()
- //Table of 3^N
- __constant uint power3[21] =
- {1u,3u,9u,27u,81u,243u,729u,2187u,6561u,19683u,
- 59049u,177147u,531441u,1594323u,4782969u,14348907u,43046721u,129140163u,387420489u,1162261467u,
- 3486784401u
- };
- //mul64() : takes two 32-bit uint and returns a uint2 as the multiply result
- inline uint2 mul64(const uint a, const uint b){
- return (uint2)(a*b, mul_hi(a,b));
- }
- //clearRes() : fill the result array zero; clEnqueueFillBuffer() can be used in openCL 1.2 instead.
- __kernel void clearRes(__global uint4 *results){
- results[get_global_id(0)] = (uint4)0;
- }
- //collatzGlide() : count collatz glide and store the result of maximum glide.
- //-----parameters-----
- //result : s0 : max glide step. s1 : 'offset' of max glide. s23 : sum of steps (64-bit). Length = global work size
- //sieve : Numbers needed to compute for every 2^32 numbers. Length = global work size
- //table : look-up table for jumping. s0: x of 3^x mulVal. s1 : transformed b. Length = 2^ (LUTSTEP)
- //negNadir : negative nadir score during a 'jump' by look-up table. Score is an approximation of log2 (val' / val). Length = 2^ (LUTSTEP)
- //start : starting value (128-bit uint) as uint4
- //offset32 : actual offset = offset32 * (2^32)
- __kernel void collatzGlide( __global uint4 *results,
- __global const uint *sieve,
- __global const uint2 *table,
- __global const float *negNadir,
- uint4 start,
- const uint offset32){
- const uint gid = get_global_id(0);
- uint4 val = (uint4)(sieve[gid], offset32 + start.s1, start.s23); //val : first 128-bit of val
- val.s2 += (val.s1 < start.s1); //carry operation of val
- val.s3 += (val.s2 < start.s2);
- uint2 val_h = (uint2)(val.s3 < start.s3 , 0u ); //val_h : higher 64-bit of val
- //record original value
- start = val;
- const uint originalVal_h = val_h.s0;
- uint carryIdx = val.s0 & TABLEMASK; //carryIdx : 1. carry bits, 2. look-up table index
- uint stepCnt = 0; //stepCnt : count glide steps
- uint mulVal = 0; //mulVal : multiply value
- uint2 lut; //lut : look-up table item
- float score = 0.0f; ////score : approximation of log2(val'/ val)
- //The main loop, jump (LUTSTEP) iterations at a time, until the score hit nadir => the value will be less than its original value during next jump
- //The first iteration, val will always greater than its original because the sieve is applied; do{}while is used.
- do{
- lut = table[carryIdx]; //retrieve 'jump' table item
- stepCnt += LUTSTEP + lut.s0; //accumulate glide step
- mulVal = power3[lut.s0]; //multiply value = 3^(# odd encountered the in jump table)
- carryIdx = lut.s1;
- score += convert_float(lut.s0)* ODDSCORE - convert_float(LUTSTEP); //accumulate score, may use fma()
- //Do n = (n>>LUTSTEP)*a + b
- lut = mul64((val.s0 >> LUTSTEP) | (val.s1 << BITDIFF), mulVal); //bit 0-31
- val.s0 = lut.s0 + carryIdx;
- carryIdx = lut.s1 + (val.s0 < lut.s0); //carry bits
- lut = mul64((val.s1 >> LUTSTEP) | (val.s2 << BITDIFF), mulVal);//bit 32-63
- val.s1 = lut.s0 + carryIdx;
- carryIdx = lut.s1 + (val.s1 < lut.s0);
- lut = mul64((val.s2 >> LUTSTEP) | (val.s3 << BITDIFF), mulVal);//bit 64-95
- val.s2 = lut.s0 + carryIdx;
- carryIdx = lut.s1 + (val.s2 < lut.s0);
- lut = mul64((val.s3 >> LUTSTEP) | (val_h.s0 << BITDIFF), mulVal);//bit 96-127
- val.s3 = lut.s0 + carryIdx;
- carryIdx = lut.s1 + (val.s3 < lut.s0);
- lut = mul64((val_h.s0 >> LUTSTEP) | (val_h.s1 << BITDIFF), mulVal);//bit 128-159
- val_h.s0 = lut.s0 + carryIdx;
- carryIdx = lut.s1 + (val_h.s0 < lut.s0);
- lut = mul64((val_h.s1 >> LUTSTEP), mulVal);//bit 160-191 and overflow detection
- val_h.s1 = lut.s0 + carryIdx;
- lut.s1 += (val_h.s1 < lut.s0); //lut.s1 act as overflow bits
- carryIdx = val.s0 & TABLEMASK; //next table index
- //the loop should continue when all of the below are true
- //1. next nadir + score > 0 (val will be always greater than its original in the next jump so we can safely jump 'LUTSTEP' iterations)
- //2. no overflow
- //3. step < maxStep
- lut.s0 = (score > negNadir[carryIdx]) && (lut.s1 == 0) && (stepCnt < MAXSTEP);
- }while(lut.s0);
- //Phase2 : find the position where the val fall under its original.
- uint iterations = 0; //count iterations where the fail position is
- mulVal = 0; //mulVal : counting 3^x;
- do{
- const uint odd = carryIdx & 1u;
- mulVal += odd;
- carryIdx += select((uint)0, 1 + (carryIdx << 1), odd);
- carryIdx >>= 1;
- score += convert_float(odd) * ODDSCORE - 1.0f;
- ++iterations;
- }while(signbit(score) == 0); //while(score >= 0)
- stepCnt += iterations + mulVal;
- mulVal = power3[mulVal];
- const uint newBitDiff = 32 - iterations;
- //Do n = (n >> iterations)*a + b
- lut = mul64((val.s0 >> iterations) | (val.s1 << newBitDiff), mulVal); //bit 0-31
- val.s0 = lut.s0 + carryIdx;
- carryIdx = lut.s1 + (val.s0 < lut.s0); //carry bits
- lut = mul64((val.s1 >> iterations) | (val.s2 << newBitDiff), mulVal); //bit 32-63
- val.s1 = lut.s0 + carryIdx;
- carryIdx = lut.s1 + (val.s1 < lut.s0);
- lut = mul64((val.s2 >> iterations) | (val.s3 << newBitDiff), mulVal); //bit 64-95
- val.s2 = lut.s0 + carryIdx;
- carryIdx = lut.s1 + (val.s2 < lut.s0);
- lut = mul64((val.s3 >> iterations) | (val_h.s0 <<newBitDiff), mulVal); //bit 96-127
- val.s3 = lut.s0 + carryIdx;
- carryIdx = lut.s1 + (val.s3 < lut.s0);
- lut = mul64((val_h.s0 >> iterations) | (val_h.s1 << newBitDiff), mulVal); //bit 128-159
- val_h.s0 = lut.s0 + carryIdx;
- carryIdx = lut.s1 + (val_h.s0 < lut.s0);
- lut = mul64((val_h.s1 >> iterations), mulVal); //bit 160-191
- val_h.s1 = lut.s0 + carryIdx;
- //if transformed value >= original value, raise an error in the steps (set it to maxStep). Right now compares lower 128-bit only
- int comp = compareUint4(val, start)+ (compareUint(val_h.s0, originalVal_h) << 4) + ((val_h.s1>0) << 5);
- stepCnt = select(stepCnt, MAXSTEP, comp >= 0);
- //compare and store the results from this kernel to the previous results
- val = results[gid]; //Use val as the result to save space as it's no longer used
- //If stepCount > that of the buffer, replace the results of the buffer with that (stepCount& kNo) of this kernel.
- comp = stepCnt > val.s0;
- val.s0 = select(val.s0, stepCnt, comp);
- val.s1 = select(val.s1, offset32, comp);
- val.s2 += stepCnt; //accumulate the step
- val.s3 += val.s2 < stepCnt; //Check if the sum overflows 32-bit
- results[gid] = val; //save result to output array
- } //collatzGlide()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement