static const char float64_source[] = "/*\n" " * The implementations contained in this file are heavily based on the\n" " * implementations found in the Berkeley SoftFloat library. As such, they are\n" " * licensed under the same 3-clause BSD license:\n" " *\n" " * License for Berkeley SoftFloat Release 3e\n" " *\n" " * John R. Hauser\n" " * 2018 January 20\n" " *\n" " * The following applies to the whole of SoftFloat Release 3e as well as to\n" " * each source file individually.\n" " *\n" " * Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018 The Regents of the\n" " * University of California. All rights reserved.\n" " *\n" " * Redistribution and use in source and binary forms, with or without\n" " * modification, are permitted provided that the following conditions are met:\n" " *\n" " * 1. Redistributions of source code must retain the above copyright notice,\n" " * this list of conditions, and the following disclaimer.\n" " *\n" " * 2. Redistributions in binary form must reproduce the above copyright\n" " * notice, this list of conditions, and the following disclaimer in the\n" " * documentation and/or other materials provided with the distribution.\n" " *\n" " * 3. Neither the name of the University nor the names of its contributors\n" " * may be used to endorse or promote products derived from this software\n" " * without specific prior written permission.\n" " *\n" " * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS \"AS IS\", AND ANY\n" " * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n" " * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE\n" " * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY\n" " * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n" " * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n" " * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n" " * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n" " * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n" " * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n" "*/\n" "\n" "#version 430\n" "#extension GL_ARB_gpu_shader_int64 : enable\n" "#extension GL_ARB_shader_bit_encoding : enable\n" "#extension GL_EXT_shader_integer_mix : enable\n" "#extension GL_MESA_shader_integer_functions : enable\n" "\n" "#pragma warning(off)\n" "\n" "/* Software IEEE floating-point rounding mode.\n" " * GLSL spec section \"4.7.1 Range and Precision\":\n" " * The rounding mode cannot be set and is undefined.\n" " * But here, we are able to define the rounding mode at the compilation time.\n" " */\n" "#define FLOAT_ROUND_NEAREST_EVEN 0\n" "#define FLOAT_ROUND_TO_ZERO 1\n" "#define FLOAT_ROUND_DOWN 2\n" "#define FLOAT_ROUND_UP 3\n" "#define FLOAT_ROUNDING_MODE FLOAT_ROUND_NEAREST_EVEN\n" "\n" "/* Absolute value of a Float64 :\n" " * Clear the sign bit\n" " */\n" "uint64_t\n" "__fabs64(uint64_t __a)\n" "{\n" " uvec2 a = unpackUint2x32(__a);\n" " a.y &= 0x7FFFFFFFu;\n" " return packUint2x32(a);\n" "}\n" "\n" "/* Returns 1 if the double-precision floating-point value `a' is a NaN;\n" " * otherwise returns 0.\n" " */\n" "bool\n" "__is_nan(uint64_t __a)\n" "{\n" " uvec2 a = unpackUint2x32(__a);\n" " return (0xFFE00000u <= (a.y<<1)) &&\n" " ((a.x != 0u) || ((a.y & 0x000FFFFFu) != 0u));\n" "}\n" "\n" "/* Negate value of a Float64 :\n" " * Toggle the sign bit\n" " */\n" "uint64_t\n" "__fneg64(uint64_t __a)\n" "{\n" " uvec2 a = unpackUint2x32(__a);\n" " uint t = a.y;\n" "\n" " t ^= (1u << 31);\n" " a.y = mix(t, a.y, __is_nan(__a));\n" " return packUint2x32(a);\n" "}\n" "\n" "uint64_t\n" "__fsign64(uint64_t __a)\n" "{\n" " uvec2 a = unpackUint2x32(__a);\n" " uvec2 retval;\n" " retval.x = 0u;\n" " retval.y = mix((a.y & 0x80000000u) | 0x3FF00000u, 0u, (a.y << 1 | a.x) == 0u);\n" " return packUint2x32(retval);\n" "}\n" "\n" "/* Returns the fraction bits of the double-precision floating-point value `a'.*/\n" "uint\n" "__extractFloat64FracLo(uint64_t a)\n" "{\n" " return unpackUint2x32(a).x;\n" "}\n" "\n" "uint\n" "__extractFloat64FracHi(uint64_t a)\n" "{\n" " return unpackUint2x32(a).y & 0x000FFFFFu;\n" "}\n" "\n" "/* Returns the exponent bits of the double-precision floating-point value `a'.*/\n" "int\n" "__extractFloat64Exp(uint64_t __a)\n" "{\n" " uvec2 a = unpackUint2x32(__a);\n" " return int((a.y>>20) & 0x7FFu);\n" "}\n" "\n" "bool\n" "__feq64_nonnan(uint64_t __a, uint64_t __b)\n" "{\n" " uvec2 a = unpackUint2x32(__a);\n" " uvec2 b = unpackUint2x32(__b);\n" " return (a.x == b.x) &&\n" " ((a.y == b.y) || ((a.x == 0u) && (((a.y | b.y)<<1) == 0u)));\n" "}\n" "\n" "/* Returns true if the double-precision floating-point value `a' is equal to the\n" " * corresponding value `b', and false otherwise. The comparison is performed\n" " * according to the IEEE Standard for Floating-Point Arithmetic.\n" " */\n" "bool\n" "__feq64(uint64_t a, uint64_t b)\n" "{\n" " if (__is_nan(a) || __is_nan(b))\n" " return false;\n" "\n" " return __feq64_nonnan(a, b);\n" "}\n" "\n" "/* Returns true if the double-precision floating-point value `a' is not equal\n" " * to the corresponding value `b', and false otherwise. The comparison is\n" " * performed according to the IEEE Standard for Floating-Point Arithmetic.\n" " */\n" "bool\n" "__fne64(uint64_t a, uint64_t b)\n" "{\n" " if (__is_nan(a) || __is_nan(b))\n" " return true;\n" "\n" " return !__feq64_nonnan(a, b);\n" "}\n" "\n" "/* Returns the sign bit of the double-precision floating-point value `a'.*/\n" "uint\n" "__extractFloat64Sign(uint64_t a)\n" "{\n" " return unpackUint2x32(a).y >> 31;\n" "}\n" "\n" "/* Returns true if the 64-bit value formed by concatenating `a0' and `a1' is less\n" " * than the 64-bit value formed by concatenating `b0' and `b1'. Otherwise,\n" " * returns false.\n" " */\n" "bool\n" "lt64(uint a0, uint a1, uint b0, uint b1)\n" "{\n" " return (a0 < b0) || ((a0 == b0) && (a1 < b1));\n" "}\n" "\n" "bool\n" "__flt64_nonnan(uint64_t __a, uint64_t __b)\n" "{\n" " uvec2 a = unpackUint2x32(__a);\n" " uvec2 b = unpackUint2x32(__b);\n" " uint aSign = __extractFloat64Sign(__a);\n" " uint bSign = __extractFloat64Sign(__b);\n" " if (aSign != bSign)\n" " return (aSign != 0u) && ((((a.y | b.y)<<1) | a.x | b.x) != 0u);\n" "\n" " return mix(lt64(a.y, a.x, b.y, b.x), lt64(b.y, b.x, a.y, a.x), aSign != 0u);\n" "}\n" "\n" "/* Returns true if the double-precision floating-point value `a' is less than\n" " * the corresponding value `b', and false otherwise. The comparison is performed\n" " * according to the IEEE Standard for Floating-Point Arithmetic.\n" " */\n" "bool\n" "__flt64(uint64_t a, uint64_t b)\n" "{\n" " if (__is_nan(a) || __is_nan(b))\n" " return false;\n" "\n" " return __flt64_nonnan(a, b);\n" "}\n" "\n" "/* Returns true if the double-precision floating-point value `a' is greater\n" " * than or equal to * the corresponding value `b', and false otherwise. The\n" " * comparison is performed * according to the IEEE Standard for Floating-Point\n" " * Arithmetic.\n" " */\n" "bool\n" "__fge64(uint64_t a, uint64_t b)\n" "{\n" " if (__is_nan(a) || __is_nan(b))\n" " return false;\n" "\n" " return !__flt64_nonnan(a, b);\n" "}\n" "\n" "/* Adds the 64-bit value formed by concatenating `a0' and `a1' to the 64-bit\n" " * value formed by concatenating `b0' and `b1'. Addition is modulo 2^64, so\n" " * any carry out is lost. The result is broken into two 32-bit pieces which\n" " * are stored at the locations pointed to by `z0Ptr' and `z1Ptr'.\n" " */\n" "void\n" "__add64(uint a0, uint a1, uint b0, uint b1,\n" " out uint z0Ptr,\n" " out uint z1Ptr)\n" "{\n" " uint z1 = a1 + b1;\n" " z1Ptr = z1;\n" " z0Ptr = a0 + b0 + uint(z1 < a1);\n" "}\n" "\n" "\n" "/* Subtracts the 64-bit value formed by concatenating `b0' and `b1' from the\n" " * 64-bit value formed by concatenating `a0' and `a1'. Subtraction is modulo\n" " * 2^64, so any borrow out (carry out) is lost. The result is broken into two\n" " * 32-bit pieces which are stored at the locations pointed to by `z0Ptr' and\n" " * `z1Ptr'.\n" " */\n" "void\n" "__sub64(uint a0, uint a1, uint b0, uint b1,\n" " out uint z0Ptr,\n" " out uint z1Ptr)\n" "{\n" " z1Ptr = a1 - b1;\n" " z0Ptr = a0 - b0 - uint(a1 < b1);\n" "}\n" "\n" "/* Shifts the 64-bit value formed by concatenating `a0' and `a1' right by the\n" " * number of bits given in `count'. If any nonzero bits are shifted off, they\n" " * are \"jammed\" into the least significant bit of the result by setting the\n" " * least significant bit to 1. The value of `count' can be arbitrarily large;\n" " * in particular, if `count' is greater than 64, the result will be either 0\n" " * or 1, depending on whether the concatenation of `a0' and `a1' is zero or\n" " * nonzero. The result is broken into two 32-bit pieces which are stored at\n" " * the locations pointed to by `z0Ptr' and `z1Ptr'.\n" " */\n" "void\n" "__shift64RightJamming(uint a0,\n" " uint a1,\n" " int count,\n" " out uint z0Ptr,\n" " out uint z1Ptr)\n" "{\n" " uint z0;\n" " uint z1;\n" " int negCount = (-count) & 31;\n" "\n" " z0 = mix(0u, a0, count == 0);\n" " z0 = mix(z0, (a0 >> count), count < 32);\n" "\n" " z1 = uint((a0 | a1) != 0u); /* count >= 64 */\n" " uint z1_lt64 = (a0>>(count & 31)) | uint(((a0<>count) | uint ((a1<> (count & 31)), count < 64);\n" " z1 = mix(z1, (a0<>count), count < 32);\n" "\n" " a2 = mix(a2 | a1, a2, count < 32);\n" " z0 = mix(z0, a0 >> count, count < 32);\n" " z2 |= uint(a2 != 0u);\n" "\n" " z0 = mix(z0, 0u, (count == 32));\n" " z1 = mix(z1, a0, (count == 32));\n" " z2 = mix(z2, a1, (count == 32));\n" " z0 = mix(z0, a0, (count == 0));\n" " z1 = mix(z1, a1, (count == 0));\n" " z2 = mix(z2, a2, (count == 0));\n" " z2Ptr = z2;\n" " z1Ptr = z1;\n" " z0Ptr = z0;\n" "}\n" "\n" "/* Shifts the 64-bit value formed by concatenating `a0' and `a1' left by the\n" " * number of bits given in `count'. Any bits shifted off are lost. The value\n" " * of `count' must be less than 32. The result is broken into two 32-bit\n" " * pieces which are stored at the locations pointed to by `z0Ptr' and `z1Ptr'.\n" " */\n" "void\n" "__shortShift64Left(uint a0, uint a1,\n" " int count,\n" " out uint z0Ptr,\n" " out uint z1Ptr)\n" "{\n" " z1Ptr = a1<> ((-count) & 31))), a0, count == 0);\n" "}\n" "\n" "/* Packs the sign `zSign', the exponent `zExp', and the significand formed by\n" " * the concatenation of `zFrac0' and `zFrac1' into a double-precision floating-\n" " * point value, returning the result. After being shifted into the proper\n" " * positions, the three fields `zSign', `zExp', and `zFrac0' are simply added\n" " * together to form the most significant 32 bits of the result. This means\n" " * that any integer portion of `zFrac0' will be added into the exponent. Since\n" " * a properly normalized significand will have an integer portion equal to 1,\n" " * the `zExp' input should be 1 less than the desired result exponent whenever\n" " * `zFrac0' and `zFrac1' concatenated form a complete, normalized significand.\n" " */\n" "uint64_t\n" "__packFloat64(uint zSign, int zExp, uint zFrac0, uint zFrac1)\n" "{\n" " uvec2 z;\n" "\n" " z.y = (zSign << 31) + (uint(zExp) << 20) + zFrac0;\n" " z.x = zFrac1;\n" " return packUint2x32(z);\n" "}\n" "\n" "/* Takes an abstract floating-point value having sign `zSign', exponent `zExp',\n" " * and extended significand formed by the concatenation of `zFrac0', `zFrac1',\n" " * and `zFrac2', and returns the proper double-precision floating-point value\n" " * corresponding to the abstract input. Ordinarily, the abstract value is\n" " * simply rounded and packed into the double-precision format, with the inexact\n" " * exception raised if the abstract input cannot be represented exactly.\n" " * However, if the abstract value is too large, the overflow and inexact\n" " * exceptions are raised and an infinity or maximal finite value is returned.\n" " * If the abstract value is too small, the input value is rounded to a\n" " * subnormal number, and the underflow and inexact exceptions are raised if the\n" " * abstract input cannot be represented exactly as a subnormal double-precision\n" " * floating-point number.\n" " * The input significand must be normalized or smaller. If the input\n" " * significand is not normalized, `zExp' must be 0; in that case, the result\n" " * returned is a subnormal number, and it must not require rounding. In the\n" " * usual case that the input significand is normalized, `zExp' must be 1 less\n" " * than the \"true\" floating-point exponent. The handling of underflow and\n" " * overflow follows the IEEE Standard for Floating-Point Arithmetic.\n" " */\n" "uint64_t\n" "__roundAndPackFloat64(uint zSign,\n" " int zExp,\n" " uint zFrac0,\n" " uint zFrac1,\n" " uint zFrac2)\n" "{\n" " bool roundNearestEven;\n" " bool increment;\n" "\n" " roundNearestEven = FLOAT_ROUNDING_MODE == FLOAT_ROUND_NEAREST_EVEN;\n" " increment = int(zFrac2) < 0;\n" " if (!roundNearestEven) {\n" " if (FLOAT_ROUNDING_MODE == FLOAT_ROUND_TO_ZERO) {\n" " increment = false;\n" " } else {\n" " if (zSign != 0u) {\n" " increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN) &&\n" " (zFrac2 != 0u);\n" " } else {\n" " increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP) &&\n" " (zFrac2 != 0u);\n" " }\n" " }\n" " }\n" " if (0x7FD <= zExp) {\n" " if ((0x7FD < zExp) ||\n" " ((zExp == 0x7FD) &&\n" " (0x001FFFFFu == zFrac0 && 0xFFFFFFFFu == zFrac1) &&\n" " increment)) {\n" " if ((FLOAT_ROUNDING_MODE == FLOAT_ROUND_TO_ZERO) ||\n" " ((zSign != 0u) && (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP)) ||\n" " ((zSign == 0u) && (FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN))) {\n" " return __packFloat64(zSign, 0x7FE, 0x000FFFFFu, 0xFFFFFFFFu);\n" " }\n" " return __packFloat64(zSign, 0x7FF, 0u, 0u);\n" " }\n" " if (zExp < 0) {\n" " __shift64ExtraRightJamming(\n" " zFrac0, zFrac1, zFrac2, -zExp, zFrac0, zFrac1, zFrac2);\n" " zExp = 0;\n" " if (roundNearestEven) {\n" " increment = zFrac2 < 0u;\n" " } else {\n" " if (zSign != 0u) {\n" " increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN) &&\n" " (zFrac2 != 0u);\n" " } else {\n" " increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP) &&\n" " (zFrac2 != 0u);\n" " }\n" " }\n" " }\n" " }\n" " if (increment) {\n" " __add64(zFrac0, zFrac1, 0u, 1u, zFrac0, zFrac1);\n" " zFrac1 &= ~((zFrac2 + uint(zFrac2 == 0u)) & uint(roundNearestEven));\n" " } else {\n" " zExp = mix(zExp, 0, (zFrac0 | zFrac1) == 0u);\n" " }\n" " return __packFloat64(zSign, zExp, zFrac0, zFrac1);\n" "}\n" "\n" "uint64_t\n" "__roundAndPackUInt64(uint zSign, uint zFrac0, uint zFrac1, uint zFrac2)\n" "{\n" " bool roundNearestEven;\n" " bool increment;\n" " uint64_t default_nan = 0xFFFFFFFFFFFFFFFFUL;\n" "\n" " roundNearestEven = FLOAT_ROUNDING_MODE == FLOAT_ROUND_NEAREST_EVEN;\n" "\n" " if (zFrac2 >= 0x80000000u)\n" " increment = false;\n" "\n" " if (!roundNearestEven) {\n" " if (zSign != 0u) {\n" " if ((FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN) && (zFrac2 != 0u)) {\n" " increment = false;\n" " }\n" " } else {\n" " increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP) &&\n" " (zFrac2 != 0u);\n" " }\n" " }\n" "\n" " if (increment) {\n" " __add64(zFrac0, zFrac1, 0u, 1u, zFrac0, zFrac1);\n" " if ((zFrac0 | zFrac1) != 0u)\n" " zFrac1 &= ~(1u) + uint(zFrac2 == 0u) & uint(roundNearestEven);\n" " }\n" " return mix(packUint2x32(uvec2(zFrac1, zFrac0)), default_nan,\n" " (zSign !=0u && (zFrac0 | zFrac1) != 0u));\n" "}\n" "\n" "int64_t\n" "__roundAndPackInt64(uint zSign, uint zFrac0, uint zFrac1, uint zFrac2)\n" "{\n" " bool roundNearestEven;\n" " bool increment;\n" " int64_t default_NegNaN = -0x7FFFFFFFFFFFFFFEL;\n" " int64_t default_PosNaN = 0xFFFFFFFFFFFFFFFFL;\n" "\n" " roundNearestEven = FLOAT_ROUNDING_MODE == FLOAT_ROUND_NEAREST_EVEN;\n" "\n" " if (zFrac2 >= 0x80000000u)\n" " increment = false;\n" "\n" " if (!roundNearestEven) {\n" " if (zSign != 0u) {\n" " increment = ((FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN) &&\n" " (zFrac2 != 0u));\n" " } else {\n" " increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP) &&\n" " (zFrac2 != 0u);\n" " }\n" " }\n" "\n" " if (increment) {\n" " __add64(zFrac0, zFrac1, 0u, 1u, zFrac0, zFrac1);\n" " if ((zFrac0 | zFrac1) != 0u)\n" " zFrac1 &= ~(1u) + uint(zFrac2 == 0u) & uint(roundNearestEven);\n" " }\n" "\n" " int64_t absZ = mix(int64_t(packUint2x32(uvec2(zFrac1, zFrac0))),\n" " -int64_t(packUint2x32(uvec2(zFrac1, zFrac0))),\n" " (zSign != 0u));\n" " int64_t nan = mix(default_PosNaN, default_NegNaN, bool(zSign));\n" " return mix(absZ, nan, bool(zSign ^ uint(absZ < 0)) && bool(absZ));\n" "}\n" "\n" "/* Returns the number of leading 0 bits before the most-significant 1 bit of\n" " * `a'. If `a' is zero, 32 is returned.\n" " */\n" "int\n" "__countLeadingZeros32(uint a)\n" "{\n" " int shiftCount;\n" " shiftCount = mix(31 - findMSB(a), 32, a == 0u);\n" " return shiftCount;\n" "}\n" "\n" "/* Takes an abstract floating-point value having sign `zSign', exponent `zExp',\n" " * and significand formed by the concatenation of `zSig0' and `zSig1', and\n" " * returns the proper double-precision floating-point value corresponding\n" " * to the abstract input. This routine is just like `__roundAndPackFloat64'\n" " * except that the input significand has fewer bits and does not have to be\n" " * normalized. In all cases, `zExp' must be 1 less than the \"true\" floating-\n" " * point exponent.\n" " */\n" "uint64_t\n" "__normalizeRoundAndPackFloat64(uint zSign,\n" " int zExp,\n" " uint zFrac0,\n" " uint zFrac1)\n" "{\n" " int shiftCount;\n" " uint zFrac2;\n" "\n" " if (zFrac0 == 0u) {\n" " zExp -= 32;\n" " zFrac0 = zFrac1;\n" " zFrac1 = 0u;\n" " }\n" "\n" " shiftCount = __countLeadingZeros32(zFrac0) - 11;\n" " if (0 <= shiftCount) {\n" " zFrac2 = 0u;\n" " __shortShift64Left(zFrac0, zFrac1, shiftCount, zFrac0, zFrac1);\n" " } else {\n" " __shift64ExtraRightJamming(\n" " zFrac0, zFrac1, 0u, -shiftCount, zFrac0, zFrac1, zFrac2);\n" " }\n" " zExp -= shiftCount;\n" " return __roundAndPackFloat64(zSign, zExp, zFrac0, zFrac1, zFrac2);\n" "}\n" "\n" "/* Takes two double-precision floating-point values `a' and `b', one of which\n" " * is a NaN, and returns the appropriate NaN result.\n" " */\n" "uint64_t\n" "__propagateFloat64NaN(uint64_t __a, uint64_t __b)\n" "{\n" " bool aIsNaN = __is_nan(__a);\n" " bool bIsNaN = __is_nan(__b);\n" " uvec2 a = unpackUint2x32(__a);\n" " uvec2 b = unpackUint2x32(__b);\n" " a.y |= 0x00080000u;\n" " b.y |= 0x00080000u;\n" "\n" " return packUint2x32(mix(b, mix(a, b, bvec2(bIsNaN, bIsNaN)), bvec2(aIsNaN, aIsNaN)));\n" "}\n" "\n" "/* Returns the result of adding the double-precision floating-point values\n" " * `a' and `b'. The operation is performed according to the IEEE Standard for\n" " * Floating-Point Arithmetic.\n" " */\n" "uint64_t\n" "__fadd64(uint64_t a, uint64_t b)\n" "{\n" " uint aSign = __extractFloat64Sign(a);\n" " uint bSign = __extractFloat64Sign(b);\n" " uint aFracLo = __extractFloat64FracLo(a);\n" " uint aFracHi = __extractFloat64FracHi(a);\n" " uint bFracLo = __extractFloat64FracLo(b);\n" " uint bFracHi = __extractFloat64FracHi(b);\n" " int aExp = __extractFloat64Exp(a);\n" " int bExp = __extractFloat64Exp(b);\n" " uint zFrac0 = 0u;\n" " uint zFrac1 = 0u;\n" " int expDiff = aExp - bExp;\n" " if (aSign == bSign) {\n" " uint zFrac2 = 0u;\n" " int zExp;\n" " bool orig_exp_diff_is_zero = (expDiff == 0);\n" "\n" " if (orig_exp_diff_is_zero) {\n" " if (aExp == 0x7FF) {\n" " bool propagate = (aFracHi | aFracLo | bFracHi | bFracLo) != 0u;\n" " return mix(a, __propagateFloat64NaN(a, b), propagate);\n" " }\n" " __add64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);\n" " if (aExp == 0)\n" " return __packFloat64(aSign, 0, zFrac0, zFrac1);\n" " zFrac2 = 0u;\n" " zFrac0 |= 0x00200000u;\n" " zExp = aExp;\n" " __shift64ExtraRightJamming(\n" " zFrac0, zFrac1, zFrac2, 1, zFrac0, zFrac1, zFrac2);\n" " } else if (0 < expDiff) {\n" " if (aExp == 0x7FF) {\n" " bool propagate = (aFracHi | aFracLo) != 0u;\n" " return mix(a, __propagateFloat64NaN(a, b), propagate);\n" " }\n" "\n" " expDiff = mix(expDiff, expDiff - 1, bExp == 0);\n" " bFracHi = mix(bFracHi | 0x00100000u, bFracHi, bExp == 0);\n" " __shift64ExtraRightJamming(\n" " bFracHi, bFracLo, 0u, expDiff, bFracHi, bFracLo, zFrac2);\n" " zExp = aExp;\n" " } else if (expDiff < 0) {\n" " if (bExp == 0x7FF) {\n" " bool propagate = (bFracHi | bFracLo) != 0u;\n" " return mix(__packFloat64(aSign, 0x7ff, 0u, 0u), __propagateFloat64NaN(a, b), propagate);\n" " }\n" " expDiff = mix(expDiff, expDiff + 1, aExp == 0);\n" " aFracHi = mix(aFracHi | 0x00100000u, aFracHi, aExp == 0);\n" " __shift64ExtraRightJamming(\n" " aFracHi, aFracLo, 0u, - expDiff, aFracHi, aFracLo, zFrac2);\n" " zExp = bExp;\n" " }\n" " if (!orig_exp_diff_is_zero) {\n" " aFracHi |= 0x00100000u;\n" " __add64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);\n" " --zExp;\n" " if (!(zFrac0 < 0x00200000u)) {\n" " __shift64ExtraRightJamming(zFrac0, zFrac1, zFrac2, 1, zFrac0, zFrac1, zFrac2);\n" " ++zExp;\n" " }\n" " }\n" " return __roundAndPackFloat64(aSign, zExp, zFrac0, zFrac1, zFrac2);\n" "\n" " } else {\n" " int zExp;\n" "\n" " __shortShift64Left(aFracHi, aFracLo, 10, aFracHi, aFracLo);\n" " __shortShift64Left(bFracHi, bFracLo, 10, bFracHi, bFracLo);\n" " if (0 < expDiff) {\n" " if (aExp == 0x7FF) {\n" " bool propagate = (aFracHi | aFracLo) != 0u;\n" " return mix(a, __propagateFloat64NaN(a, b), propagate);\n" " }\n" " expDiff = mix(expDiff, expDiff - 1, bExp == 0);\n" " bFracHi = mix(bFracHi | 0x40000000u, bFracHi, bExp == 0);\n" " __shift64RightJamming(bFracHi, bFracLo, expDiff, bFracHi, bFracLo);\n" " aFracHi |= 0x40000000u;\n" " __sub64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);\n" " zExp = aExp;\n" " --zExp;\n" " return __normalizeRoundAndPackFloat64(aSign, zExp - 10, zFrac0, zFrac1);\n" " }\n" " if (expDiff < 0) {\n" " if (bExp == 0x7FF) {\n" " bool propagate = (bFracHi | bFracLo) != 0u;\n" " return mix(__packFloat64(aSign ^ 1u, 0x7ff, 0u, 0u), __propagateFloat64NaN(a, b), propagate);\n" " }\n" " expDiff = mix(expDiff, expDiff + 1, aExp == 0);\n" " aFracHi = mix(aFracHi | 0x40000000u, aFracHi, aExp == 0);\n" " __shift64RightJamming(aFracHi, aFracLo, - expDiff, aFracHi, aFracLo);\n" " bFracHi |= 0x40000000u;\n" " __sub64(bFracHi, bFracLo, aFracHi, aFracLo, zFrac0, zFrac1);\n" " zExp = bExp;\n" " aSign ^= 1u;\n" " --zExp;\n" " return __normalizeRoundAndPackFloat64(aSign, zExp - 10, zFrac0, zFrac1);\n" " }\n" " if (aExp == 0x7FF) {\n" " bool propagate = (aFracHi | aFracLo | bFracHi | bFracLo) != 0u;\n" " return mix(0xFFFFFFFFFFFFFFFFUL, __propagateFloat64NaN(a, b), propagate);\n" " }\n" " bExp = mix(bExp, 1, aExp == 0);\n" " aExp = mix(aExp, 1, aExp == 0);\n" " bool zexp_normal = false;\n" " bool blta = true;\n" " if (bFracHi < aFracHi) {\n" " __sub64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);\n" " zexp_normal = true;\n" " }\n" " else if (aFracHi < bFracHi) {\n" " __sub64(bFracHi, bFracLo, aFracHi, aFracLo, zFrac0, zFrac1);\n" " blta = false;\n" " zexp_normal = true;\n" " }\n" " else if (bFracLo < aFracLo) {\n" " __sub64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);\n" " zexp_normal = true;\n" " }\n" " else if (aFracLo < bFracLo) {\n" " __sub64(bFracHi, bFracLo, aFracHi, aFracLo, zFrac0, zFrac1);\n" " blta = false;\n" " zexp_normal = true;\n" " }\n" " zExp = mix(bExp, aExp, blta);\n" " aSign = mix(aSign ^ 1u, aSign, blta);\n" " uint64_t retval_0 = __packFloat64(uint(FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN), 0, 0u, 0u);\n" " uint64_t retval_1 = __normalizeRoundAndPackFloat64(aSign, zExp - 11, zFrac0, zFrac1);\n" " return mix(retval_0, retval_1, zexp_normal);\n" " }\n" "}\n" "\n" "/* Multiplies `a' by `b' to obtain a 64-bit product. The product is broken\n" " * into two 32-bit pieces which are stored at the locations pointed to by\n" " * `z0Ptr' and `z1Ptr'.\n" " */\n" "void\n" "__mul32To64(uint a, uint b, out uint z0Ptr, out uint z1Ptr)\n" "{\n" " uint aLow = a & 0x0000FFFFu;\n" " uint aHigh = a>>16;\n" " uint bLow = b & 0x0000FFFFu;\n" " uint bHigh = b>>16;\n" " uint z1 = aLow * bLow;\n" " uint zMiddleA = aLow * bHigh;\n" " uint zMiddleB = aHigh * bLow;\n" " uint z0 = aHigh * bHigh;\n" " zMiddleA += zMiddleB;\n" " z0 += ((uint(zMiddleA < zMiddleB)) << 16) + (zMiddleA >> 16);\n" " zMiddleA <<= 16;\n" " z1 += zMiddleA;\n" " z0 += uint(z1 < zMiddleA);\n" " z1Ptr = z1;\n" " z0Ptr = z0;\n" "}\n" "\n" "/* Multiplies the 64-bit value formed by concatenating `a0' and `a1' to the\n" " * 64-bit value formed by concatenating `b0' and `b1' to obtain a 128-bit\n" " * product. The product is broken into four 32-bit pieces which are stored at\n" " * the locations pointed to by `z0Ptr', `z1Ptr', `z2Ptr', and `z3Ptr'.\n" " */\n" "void\n" "__mul64To128(uint a0, uint a1, uint b0, uint b1,\n" " out uint z0Ptr,\n" " out uint z1Ptr,\n" " out uint z2Ptr,\n" " out uint z3Ptr)\n" "{\n" " uint z0 = 0u;\n" " uint z1 = 0u;\n" " uint z2 = 0u;\n" " uint z3 = 0u;\n" " uint more1 = 0u;\n" " uint more2 = 0u;\n" "\n" " __mul32To64(a1, b1, z2, z3);\n" " __mul32To64(a1, b0, z1, more2);\n" " __add64(z1, more2, 0u, z2, z1, z2);\n" " __mul32To64(a0, b0, z0, more1);\n" " __add64(z0, more1, 0u, z1, z0, z1);\n" " __mul32To64(a0, b1, more1, more2);\n" " __add64(more1, more2, 0u, z2, more1, z2);\n" " __add64(z0, z1, 0u, more1, z0, z1);\n" " z3Ptr = z3;\n" " z2Ptr = z2;\n" " z1Ptr = z1;\n" " z0Ptr = z0;\n" "}\n" "\n" "/* Normalizes the subnormal double-precision floating-point value represented\n" " * by the denormalized significand formed by the concatenation of `aFrac0' and\n" " * `aFrac1'. The normalized exponent is stored at the location pointed to by\n" " * `zExpPtr'. The most significant 21 bits of the normalized significand are\n" " * stored at the location pointed to by `zFrac0Ptr', and the least significant\n" " * 32 bits of the normalized significand are stored at the location pointed to\n" " * by `zFrac1Ptr'.\n" " */\n" "void\n" "__normalizeFloat64Subnormal(uint aFrac0, uint aFrac1,\n" " out int zExpPtr,\n" " out uint zFrac0Ptr,\n" " out uint zFrac1Ptr)\n" "{\n" " int shiftCount;\n" " uint temp_zfrac0, temp_zfrac1;\n" " shiftCount = __countLeadingZeros32(mix(aFrac0, aFrac1, aFrac0 == 0u)) - 11;\n" " zExpPtr = mix(1 - shiftCount, -shiftCount - 31, aFrac0 == 0u);\n" "\n" " temp_zfrac0 = mix(aFrac1<>(-shiftCount), shiftCount < 0);\n" " temp_zfrac1 = mix(0u, aFrac1<<(shiftCount & 31), shiftCount < 0);\n" "\n" " __shortShift64Left(aFrac0, aFrac1, shiftCount, zFrac0Ptr, zFrac1Ptr);\n" "\n" " zFrac0Ptr = mix(zFrac0Ptr, temp_zfrac0, aFrac0 == 0);\n" " zFrac1Ptr = mix(zFrac1Ptr, temp_zfrac1, aFrac0 == 0);\n" "}\n" "\n" "/* Returns the result of multiplying the double-precision floating-point values\n" " * `a' and `b'. The operation is performed according to the IEEE Standard for\n" " * Floating-Point Arithmetic.\n" " */\n" "uint64_t\n" "__fmul64(uint64_t a, uint64_t b)\n" "{\n" " uint zFrac0 = 0u;\n" " uint zFrac1 = 0u;\n" " uint zFrac2 = 0u;\n" " uint zFrac3 = 0u;\n" " int zExp;\n" "\n" " uint aFracLo = __extractFloat64FracLo(a);\n" " uint aFracHi = __extractFloat64FracHi(a);\n" " uint bFracLo = __extractFloat64FracLo(b);\n" " uint bFracHi = __extractFloat64FracHi(b);\n" " int aExp = __extractFloat64Exp(a);\n" " uint aSign = __extractFloat64Sign(a);\n" " int bExp = __extractFloat64Exp(b);\n" " uint bSign = __extractFloat64Sign(b);\n" " uint zSign = aSign ^ bSign;\n" " if (aExp == 0x7FF) {\n" " if (((aFracHi | aFracLo) != 0u) ||\n" " ((bExp == 0x7FF) && ((bFracHi | bFracLo) != 0u))) {\n" " return __propagateFloat64NaN(a, b);\n" " }\n" " if ((uint(bExp) | bFracHi | bFracLo) == 0u)\n" " return 0xFFFFFFFFFFFFFFFFUL;\n" " return __packFloat64(zSign, 0x7FF, 0u, 0u);\n" " }\n" " if (bExp == 0x7FF) {\n" " if ((bFracHi | bFracLo) != 0u)\n" " return __propagateFloat64NaN(a, b);\n" " if ((uint(aExp) | aFracHi | aFracLo) == 0u)\n" " return 0xFFFFFFFFFFFFFFFFUL;\n" " return __packFloat64(zSign, 0x7FF, 0u, 0u);\n" " }\n" " if (aExp == 0) {\n" " if ((aFracHi | aFracLo) == 0u)\n" " return __packFloat64(zSign, 0, 0u, 0u);\n" " __normalizeFloat64Subnormal(aFracHi, aFracLo, aExp, aFracHi, aFracLo);\n" " }\n" " if (bExp == 0) {\n" " if ((bFracHi | bFracLo) == 0u)\n" " return __packFloat64(zSign, 0, 0u, 0u);\n" " __normalizeFloat64Subnormal(bFracHi, bFracLo, bExp, bFracHi, bFracLo);\n" " }\n" " zExp = aExp + bExp - 0x400;\n" " aFracHi |= 0x00100000u;\n" " __shortShift64Left(bFracHi, bFracLo, 12, bFracHi, bFracLo);\n" " __mul64To128(\n" " aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1, zFrac2, zFrac3);\n" " __add64(zFrac0, zFrac1, aFracHi, aFracLo, zFrac0, zFrac1);\n" " zFrac2 |= uint(zFrac3 != 0u);\n" " if (0x00200000u <= zFrac0) {\n" " __shift64ExtraRightJamming(\n" " zFrac0, zFrac1, zFrac2, 1, zFrac0, zFrac1, zFrac2);\n" " ++zExp;\n" " }\n" " return __roundAndPackFloat64(zSign, zExp, zFrac0, zFrac1, zFrac2);\n" "}\n" "\n" "uint64_t\n" "__ffma64(uint64_t a, uint64_t b, uint64_t c)\n" "{\n" " return __fadd64(__fmul64(a, b), c);\n" "}\n" "\n" "/* Shifts the 64-bit value formed by concatenating `a0' and `a1' right by the\n" " * number of bits given in `count'. Any bits shifted off are lost. The value\n" " * of `count' can be arbitrarily large; in particular, if `count' is greater\n" " * than 64, the result will be 0. The result is broken into two 32-bit pieces\n" " * which are stored at the locations pointed to by `z0Ptr' and `z1Ptr'.\n" " */\n" "void\n" "__shift64Right(uint a0, uint a1,\n" " int count,\n" " out uint z0Ptr,\n" " out uint z1Ptr)\n" "{\n" " uint z0;\n" " uint z1;\n" " int negCount = (-count) & 31;\n" "\n" " z0 = 0u;\n" " z0 = mix(z0, (a0 >> count), count < 32);\n" " z0 = mix(z0, a0, count == 0);\n" "\n" " z1 = mix(0u, (a0 >> (count & 31)), count < 64);\n" " z1 = mix(z1, (a0<>count), count < 32);\n" " z1 = mix(z1, a0, count == 0);\n" "\n" " z1Ptr = z1;\n" " z0Ptr = z0;\n" "}\n" "\n" "/* Returns the result of converting the double-precision floating-point value\n" " * `a' to the unsigned integer format. The conversion is performed according\n" " * to the IEEE Standard for Floating-Point Arithmetic.\n" " */\n" "uint\n" "__fp64_to_uint(uint64_t a)\n" "{\n" " uint aFracLo = __extractFloat64FracLo(a);\n" " uint aFracHi = __extractFloat64FracHi(a);\n" " int aExp = __extractFloat64Exp(a);\n" " uint aSign = __extractFloat64Sign(a);\n" "\n" " if ((aExp == 0x7FF) && ((aFracHi | aFracLo) != 0u))\n" " return 0xFFFFFFFFu;\n" "\n" " aFracHi |= mix(0u, 0x00100000u, aExp != 0);\n" "\n" " int shiftDist = 0x427 - aExp;\n" " if (0 < shiftDist)\n" " __shift64RightJamming(aFracHi, aFracLo, shiftDist, aFracHi, aFracLo);\n" "\n" " if ((aFracHi & 0xFFFFF000u) != 0u)\n" " return mix(~0u, 0u, (aSign != 0u));\n" "\n" " uint z = 0u;\n" " uint zero = 0u;\n" " __shift64Right(aFracHi, aFracLo, 12, zero, z);\n" "\n" " uint expt = mix(~0u, 0u, (aSign != 0u));\n" "\n" " return mix(z, expt, (aSign != 0u) && (z != 0u));\n" "}\n" "\n" "uint64_t\n" "__uint_to_fp64(uint a)\n" "{\n" " if (a == 0u)\n" " return 0ul;\n" "\n" " int shiftDist = __countLeadingZeros32(a) + 21;\n" "\n" " uint aHigh = 0u;\n" " uint aLow = 0u;\n" " int negCount = (- shiftDist) & 31;\n" "\n" " aHigh = mix(0u, a<< shiftDist - 32, shiftDist < 64);\n" " aLow = 0u;\n" " aHigh = mix(aHigh, 0u, shiftDist == 0);\n" " aLow = mix(aLow, a, shiftDist ==0);\n" " aHigh = mix(aHigh, a >> negCount, shiftDist < 32);\n" " aLow = mix(aLow, a << shiftDist, shiftDist < 32);\n" "\n" " return __packFloat64(0u, 0x432 - shiftDist, aHigh, aLow);\n" "}\n" "\n" "uint64_t\n" "__uint64_to_fp64(uint64_t a)\n" "{\n" " if (a == 0u)\n" " return 0ul;\n" "\n" " uvec2 aFrac = unpackUint2x32(a);\n" " uint aFracLo = __extractFloat64FracLo(a);\n" " uint aFracHi = __extractFloat64FracHi(a);\n" "\n" " if ((aFracHi & 0x80000000u) != 0u) {\n" " __shift64RightJamming(aFracHi, aFracLo, 1, aFracHi, aFracLo);\n" " return __roundAndPackFloat64(0, 0x433, aFracHi, aFracLo, 0u);\n" " } else {\n" " return __normalizeRoundAndPackFloat64(0, 0x432, aFrac.y, aFrac.x);\n" " }\n" "}\n" "\n" "uint64_t\n" "__fp64_to_uint64(uint64_t a)\n" "{\n" " uint aFracLo = __extractFloat64FracLo(a);\n" " uint aFracHi = __extractFloat64FracHi(a);\n" " int aExp = __extractFloat64Exp(a);\n" " uint aSign = __extractFloat64Sign(a);\n" " uint zFrac2 = 0u;\n" " uint64_t default_nan = 0xFFFFFFFFFFFFFFFFUL;\n" "\n" " aFracHi = mix(aFracHi, aFracHi | 0x00100000u, aExp != 0);\n" " int shiftCount = 0x433 - aExp;\n" "\n" " if ( shiftCount <= 0 ) {\n" " if (shiftCount < -11 && aExp == 0x7FF) {\n" " if ((aFracHi | aFracLo) != 0u)\n" " return __propagateFloat64NaN(a, a);\n" " return mix(default_nan, a, aSign == 0u);\n" " }\n" " __shortShift64Left(aFracHi, aFracLo, -shiftCount, aFracHi, aFracLo);\n" " } else {\n" " __shift64ExtraRightJamming(aFracHi, aFracLo, zFrac2, shiftCount,\n" " aFracHi, aFracLo, zFrac2);\n" " }\n" " return __roundAndPackUInt64(aSign, aFracHi, aFracLo, zFrac2);\n" "}\n" "\n" "int64_t\n" "__fp64_to_int64(uint64_t a)\n" "{\n" " uint zFrac2 = 0u;\n" " uint aFracLo = __extractFloat64FracLo(a);\n" " uint aFracHi = __extractFloat64FracHi(a);\n" " int aExp = __extractFloat64Exp(a);\n" " uint aSign = __extractFloat64Sign(a);\n" " int64_t default_NegNaN = -0x7FFFFFFFFFFFFFFEL;\n" " int64_t default_PosNaN = 0xFFFFFFFFFFFFFFFFL;\n" "\n" " aFracHi = mix(aFracHi, aFracHi | 0x00100000u, aExp != 0);\n" " int shiftCount = 0x433 - aExp;\n" "\n" " if (shiftCount <= 0) {\n" " if (shiftCount < -11 && aExp == 0x7FF) {\n" " if ((aFracHi | aFracLo) != 0u)\n" " return default_NegNaN;\n" " return mix(default_NegNaN, default_PosNaN, aSign == 0u);\n" " }\n" " __shortShift64Left(aFracHi, aFracLo, -shiftCount, aFracHi, aFracLo);\n" " } else {\n" " __shift64ExtraRightJamming(aFracHi, aFracLo, zFrac2, shiftCount,\n" " aFracHi, aFracLo, zFrac2);\n" " }\n" "\n" " return __roundAndPackInt64(aSign, aFracHi, aFracLo, zFrac2);\n" "}\n" "\n" "uint64_t\n" "__fp32_to_uint64(float f)\n" "{\n" " uint a = floatBitsToUint(f);\n" " uint aFrac = a & 0x007FFFFFu;\n" " int aExp = int((a>>23) & 0xFFu);\n" " uint aSign = a>>31;\n" " uint zFrac0 = 0u;\n" " uint zFrac1 = 0u;\n" " uint zFrac2 = 0u;\n" " uint64_t default_nan = 0xFFFFFFFFFFFFFFFFUL;\n" " int shiftCount = 0xBE - aExp;\n" "\n" " if (shiftCount <0) {\n" " if (aExp == 0xFF)\n" " return default_nan;\n" " }\n" "\n" " aFrac = mix(aFrac, aFrac | 0x00800000u, aExp != 0);\n" " __shortShift64Left(aFrac, 0, 40, zFrac0, zFrac1);\n" "\n" " if (shiftCount != 0) {\n" " __shift64ExtraRightJamming(zFrac0, zFrac1, zFrac2, shiftCount,\n" " zFrac0, zFrac1, zFrac2);\n" " }\n" "\n" " return __roundAndPackUInt64(aSign, zFrac0, zFrac1, zFrac2);\n" "}\n" "\n" "int64_t\n" "__fp32_to_int64(float f)\n" "{\n" " uint a = floatBitsToUint(f);\n" " uint aFrac = a & 0x007FFFFFu;\n" " int aExp = int((a>>23) & 0xFFu);\n" " uint aSign = a>>31;\n" " uint zFrac0 = 0u;\n" " uint zFrac1 = 0u;\n" " uint zFrac2 = 0u;\n" " int64_t default_NegNaN = -0x7FFFFFFFFFFFFFFEL;\n" " int64_t default_PosNaN = 0xFFFFFFFFFFFFFFFFL;\n" " int shiftCount = 0xBE - aExp;\n" "\n" " if (shiftCount <0) {\n" " if (aExp == 0xFF && aFrac != 0u)\n" " return default_NegNaN;\n" " return mix(default_NegNaN, default_PosNaN, aSign == 0u);\n" " }\n" "\n" " aFrac = mix(aFrac, aFrac | 0x00800000u, aExp != 0);\n" " __shortShift64Left(aFrac, 0, 40, zFrac0, zFrac1);\n" "\n" " if (shiftCount != 0) {\n" " __shift64ExtraRightJamming(zFrac0, zFrac1, zFrac2, shiftCount,\n" " zFrac0, zFrac1, zFrac2);\n" " }\n" "\n" " return __roundAndPackInt64(aSign, zFrac0, zFrac1, zFrac2);\n" "}\n" "\n" "uint64_t\n" "__int64_to_fp64(int64_t a)\n" "{\n" " if (a==0)\n" " return 0ul;\n" "\n" " uint64_t absA = mix(uint64_t(a), uint64_t(-a), a < 0);\n" " uint aFracHi = __extractFloat64FracHi(absA);\n" " uvec2 aFrac = unpackUint2x32(absA);\n" " uint zSign = uint(a < 0);\n" "\n" " if ((aFracHi & 0x80000000u) != 0u) {\n" " return mix(0ul, __packFloat64(1, 0x434, 0u, 0u), a < 0);\n" " }\n" "\n" " return __normalizeRoundAndPackFloat64(zSign, 0x432, aFrac.y, aFrac.x);\n" "}\n" "\n" "/* Returns the result of converting the double-precision floating-point value\n" " * `a' to the 32-bit two's complement integer format. The conversion is\n" " * performed according to the IEEE Standard for Floating-Point Arithmetic---\n" " * which means in particular that the conversion is rounded according to the\n" " * current rounding mode. If `a' is a NaN, the largest positive integer is\n" " * returned. Otherwise, if the conversion overflows, the largest integer with\n" " * the same sign as `a' is returned.\n" " */\n" "int\n" "__fp64_to_int(uint64_t a)\n" "{\n" " uint aFracLo = __extractFloat64FracLo(a);\n" " uint aFracHi = __extractFloat64FracHi(a);\n" " int aExp = __extractFloat64Exp(a);\n" " uint aSign = __extractFloat64Sign(a);\n" "\n" " uint absZ = 0u;\n" " uint aFracExtra = 0u;\n" " int shiftCount = aExp - 0x413;\n" "\n" " if (0 <= shiftCount) {\n" " if (0x41E < aExp) {\n" " if ((aExp == 0x7FF) && bool(aFracHi | aFracLo))\n" " aSign = 0u;\n" " return mix(0x7FFFFFFF, 0x80000000, bool(aSign));\n" " }\n" " __shortShift64Left(aFracHi | 0x00100000u, aFracLo, shiftCount, absZ, aFracExtra);\n" " } else {\n" " if (aExp < 0x3FF)\n" " return 0;\n" "\n" " aFracHi |= 0x00100000u;\n" " aFracExtra = ( aFracHi << (shiftCount & 31)) | aFracLo;\n" " absZ = aFracHi >> (- shiftCount);\n" " }\n" "\n" " int z = mix(int(absZ), -int(absZ), (aSign != 0u));\n" " int nan = mix(0x7FFFFFFF, 0x80000000, bool(aSign));\n" " return mix(z, nan, bool(aSign ^ uint(z < 0)) && bool(z));\n" "}\n" "\n" "/* Returns the result of converting the 32-bit two's complement integer `a'\n" " * to the double-precision floating-point format. The conversion is performed\n" " * according to the IEEE Standard for Floating-Point Arithmetic.\n" " */\n" "uint64_t\n" "__int_to_fp64(int a)\n" "{\n" " uint zFrac0 = 0u;\n" " uint zFrac1 = 0u;\n" " if (a==0)\n" " return __packFloat64(0u, 0, 0u, 0u);\n" " uint zSign = uint(a < 0);\n" " uint absA = mix(uint(a), uint(-a), a < 0);\n" " int shiftCount = __countLeadingZeros32(absA) - 11;\n" " if (0 <= shiftCount) {\n" " zFrac0 = absA << shiftCount;\n" " zFrac1 = 0u;\n" " } else {\n" " __shift64Right(absA, 0u, -shiftCount, zFrac0, zFrac1);\n" " }\n" " return __packFloat64(zSign, 0x412 - shiftCount, zFrac0, zFrac1);\n" "}\n" "\n" "bool\n" "__fp64_to_bool(uint64_t a)\n" "{\n" " return !__feq64_nonnan(__fabs64(a), 0ul);\n" "}\n" "\n" "uint64_t\n" "__bool_to_fp64(bool a)\n" "{\n" " return __int_to_fp64(int(a));\n" "}\n" "\n" "/* Packs the sign `zSign', exponent `zExp', and significand `zFrac' into a\n" " * single-precision floating-point value, returning the result. After being\n" " * shifted into the proper positions, the three fields are simply added\n" " * together to form the result. This means that any integer portion of `zSig'\n" " * will be added into the exponent. Since a properly normalized significand\n" " * will have an integer portion equal to 1, the `zExp' input should be 1 less\n" " * than the desired result exponent whenever `zFrac' is a complete, normalized\n" " * significand.\n" " */\n" "float\n" "__packFloat32(uint zSign, int zExp, uint zFrac)\n" "{\n" " return uintBitsToFloat((zSign<<31) + (uint(zExp)<<23) + zFrac);\n" "}\n" "\n" "/* Takes an abstract floating-point value having sign `zSign', exponent `zExp',\n" " * and significand `zFrac', and returns the proper single-precision floating-\n" " * point value corresponding to the abstract input. Ordinarily, the abstract\n" " * value is simply rounded and packed into the single-precision format, with\n" " * the inexact exception raised if the abstract input cannot be represented\n" " * exactly. However, if the abstract value is too large, the overflow and\n" " * inexact exceptions are raised and an infinity or maximal finite value is\n" " * returned. If the abstract value is too small, the input value is rounded to\n" " * a subnormal number, and the underflow and inexact exceptions are raised if\n" " * the abstract input cannot be represented exactly as a subnormal single-\n" " * precision floating-point number.\n" " * The input significand `zFrac' has its binary point between bits 30\n" " * and 29, which is 7 bits to the left of the usual location. This shifted\n" " * significand must be normalized or smaller. If `zFrac' is not normalized,\n" " * `zExp' must be 0; in that case, the result returned is a subnormal number,\n" " * and it must not require rounding. In the usual case that `zFrac' is\n" " * normalized, `zExp' must be 1 less than the \"true\" floating-point exponent.\n" " * The handling of underflow and overflow follows the IEEE Standard for\n" " * Floating-Point Arithmetic.\n" " */\n" "float\n" "__roundAndPackFloat32(uint zSign, int zExp, uint zFrac)\n" "{\n" " bool roundNearestEven;\n" " int roundIncrement;\n" " int roundBits;\n" "\n" " roundNearestEven = FLOAT_ROUNDING_MODE == FLOAT_ROUND_NEAREST_EVEN;\n" " roundIncrement = 0x40;\n" " if (!roundNearestEven) {\n" " if (FLOAT_ROUNDING_MODE == FLOAT_ROUND_TO_ZERO) {\n" " roundIncrement = 0;\n" " } else {\n" " roundIncrement = 0x7F;\n" " if (zSign != 0u) {\n" " if (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP)\n" " roundIncrement = 0;\n" " } else {\n" " if (FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN)\n" " roundIncrement = 0;\n" " }\n" " }\n" " }\n" " roundBits = int(zFrac & 0x7Fu);\n" " if (0xFDu <= uint(zExp)) {\n" " if ((0xFD < zExp) || ((zExp == 0xFD) && (int(zFrac) + roundIncrement) < 0))\n" " return __packFloat32(zSign, 0xFF, 0u) - float(roundIncrement == 0);\n" " int count = -zExp;\n" " bool zexp_lt0 = zExp < 0;\n" " uint zFrac_lt0 = mix(uint(zFrac != 0u), (zFrac>>count) | uint((zFrac<<((-count) & 31)) != 0u), (-zExp) < 32);\n" " zFrac = mix(zFrac, zFrac_lt0, zexp_lt0);\n" " roundBits = mix(roundBits, int(zFrac) & 0x7f, zexp_lt0);\n" " zExp = mix(zExp, 0, zexp_lt0);\n" " }\n" " zFrac = (zFrac + uint(roundIncrement))>>7;\n" " zFrac &= ~uint(((roundBits ^ 0x40) == 0) && roundNearestEven);\n" "\n" " return __packFloat32(zSign, mix(zExp, 0, zFrac == 0u), zFrac);\n" "}\n" "\n" "/* Returns the result of converting the double-precision floating-point value\n" " * `a' to the single-precision floating-point format. The conversion is\n" " * performed according to the IEEE Standard for Floating-Point Arithmetic.\n" " */\n" "float\n" "__fp64_to_fp32(uint64_t __a)\n" "{\n" " uvec2 a = unpackUint2x32(__a);\n" " uint zFrac = 0u;\n" " uint allZero = 0u;\n" "\n" " uint aFracLo = __extractFloat64FracLo(__a);\n" " uint aFracHi = __extractFloat64FracHi(__a);\n" " int aExp = __extractFloat64Exp(__a);\n" " uint aSign = __extractFloat64Sign(__a);\n" " if (aExp == 0x7FF) {\n" " __shortShift64Left(a.y, a.x, 12, a.y, a.x);\n" " float rval = uintBitsToFloat((aSign<<31) | 0x7FC00000u | (a.y>>9));\n" " rval = mix(__packFloat32(aSign, 0xFF, 0u), rval, (aFracHi | aFracLo) != 0u);\n" " return rval;\n" " }\n" " __shift64RightJamming(aFracHi, aFracLo, 22, allZero, zFrac);\n" " zFrac = mix(zFrac, zFrac | 0x40000000u, aExp != 0);\n" " return __roundAndPackFloat32(aSign, aExp - 0x381, zFrac);\n" "}\n" "\n" "float\n" "__uint64_to_fp32(uint64_t __a)\n" "{\n" " uint zFrac = 0u;\n" " uvec2 aFrac = unpackUint2x32(__a);\n" " int shiftCount = __countLeadingZeros32(mix(aFrac.y, aFrac.x, aFrac.y == 0u));\n" " shiftCount -= mix(40, 8, aFrac.y == 0u);\n" "\n" " if (0 <= shiftCount) {\n" " __shortShift64Left(aFrac.y, aFrac.x, shiftCount, aFrac.y, aFrac.x);\n" " bool is_zero = (aFrac.y | aFrac.x) == 0u;\n" " return mix(__packFloat32(0u, 0x95 - shiftCount, aFrac.x), 0, is_zero);\n" " }\n" "\n" " shiftCount += 7;\n" " __shift64RightJamming(aFrac.y, aFrac.x, -shiftCount, aFrac.y, aFrac.x);\n" " zFrac = mix(aFrac.x<>23) & 0xFFu);\n" " uint aSign = a>>31;\n" " uint zFrac0 = 0u;\n" " uint zFrac1 = 0u;\n" "\n" " if (aExp == 0xFF) {\n" " if (aFrac != 0u) {\n" " uint nanLo = 0u;\n" " uint nanHi = a<<9;\n" " __shift64Right(nanHi, nanLo, 12, nanHi, nanLo);\n" " nanHi |= ((aSign<<31) | 0x7FF80000u);\n" " return packUint2x32(uvec2(nanLo, nanHi));\n" " }\n" " return __packFloat64(aSign, 0x7FF, 0u, 0u);\n" " }\n" "\n" " if (aExp == 0) {\n" " if (aFrac == 0u)\n" " return __packFloat64(aSign, 0, 0u, 0u);\n" " /* Normalize subnormal */\n" " int shiftCount = __countLeadingZeros32(aFrac) - 8;\n" " aFrac <<= shiftCount;\n" " aExp = 1 - shiftCount;\n" " --aExp;\n" " }\n" "\n" " __shift64Right(aFrac, 0u, 3, zFrac0, zFrac1);\n" " return __packFloat64(aSign, aExp + 0x380, zFrac0, zFrac1);\n" "}\n" "\n" "/* Adds the 96-bit value formed by concatenating `a0', `a1', and `a2' to the\n" " * 96-bit value formed by concatenating `b0', `b1', and `b2'. Addition is\n" " * modulo 2^96, so any carry out is lost. The result is broken into three\n" " * 32-bit pieces which are stored at the locations pointed to by `z0Ptr',\n" " * `z1Ptr', and `z2Ptr'.\n" " */\n" "void\n" "__add96(uint a0, uint a1, uint a2,\n" " uint b0, uint b1, uint b2,\n" " out uint z0Ptr,\n" " out uint z1Ptr,\n" " out uint z2Ptr)\n" "{\n" " uint z2 = a2 + b2;\n" " uint carry1 = uint(z2 < a2);\n" " uint z1 = a1 + b1;\n" " uint carry0 = uint(z1 < a1);\n" " uint z0 = a0 + b0;\n" " z1 += carry1;\n" " z0 += uint(z1 < carry1);\n" " z0 += carry0;\n" " z2Ptr = z2;\n" " z1Ptr = z1;\n" " z0Ptr = z0;\n" "}\n" "\n" "/* Subtracts the 96-bit value formed by concatenating `b0', `b1', and `b2' from\n" " * the 96-bit value formed by concatenating `a0', `a1', and `a2'. Subtraction\n" " * is modulo 2^96, so any borrow out (carry out) is lost. The result is broken\n" " * into three 32-bit pieces which are stored at the locations pointed to by\n" " * `z0Ptr', `z1Ptr', and `z2Ptr'.\n" " */\n" "void\n" "__sub96(uint a0, uint a1, uint a2,\n" " uint b0, uint b1, uint b2,\n" " out uint z0Ptr,\n" " out uint z1Ptr,\n" " out uint z2Ptr)\n" "{\n" " uint z2 = a2 - b2;\n" " uint borrow1 = uint(a2 < b2);\n" " uint z1 = a1 - b1;\n" " uint borrow0 = uint(a1 < b1);\n" " uint z0 = a0 - b0;\n" " z0 -= uint(z1 < borrow1);\n" " z1 -= borrow1;\n" " z0 -= borrow0;\n" " z2Ptr = z2;\n" " z1Ptr = z1;\n" " z0Ptr = z0;\n" "}\n" "\n" "/* Returns an approximation to the 32-bit integer quotient obtained by dividing\n" " * `b' into the 64-bit value formed by concatenating `a0' and `a1'. The\n" " * divisor `b' must be at least 2^31. If q is the exact quotient truncated\n" " * toward zero, the approximation returned lies between q and q + 2 inclusive.\n" " * If the exact quotient q is larger than 32 bits, the maximum positive 32-bit\n" " * unsigned integer is returned.\n" " */\n" "uint\n" "__estimateDiv64To32(uint a0, uint a1, uint b)\n" "{\n" " uint b0;\n" " uint b1;\n" " uint rem0 = 0u;\n" " uint rem1 = 0u;\n" " uint term0 = 0u;\n" " uint term1 = 0u;\n" " uint z;\n" "\n" " if (b <= a0)\n" " return 0xFFFFFFFFu;\n" " b0 = b>>16;\n" " z = (b0<<16 <= a0) ? 0xFFFF0000u : (a0 / b0)<<16;\n" " __mul32To64(b, z, term0, term1);\n" " __sub64(a0, a1, term0, term1, rem0, rem1);\n" " while (int(rem0) < 0) {\n" " z -= 0x10000u;\n" " b1 = b<<16;\n" " __add64(rem0, rem1, b0, b1, rem0, rem1);\n" " }\n" " rem0 = (rem0<<16) | (rem1>>16);\n" " z |= (b0<<16 <= rem0) ? 0xFFFFu : rem0 / b0;\n" " return z;\n" "}\n" "\n" "uint\n" "__sqrtOddAdjustments(int index)\n" "{\n" " uint res = 0u;\n" " if (index == 0)\n" " res = 0x0004u;\n" " if (index == 1)\n" " res = 0x0022u;\n" " if (index == 2)\n" " res = 0x005Du;\n" " if (index == 3)\n" " res = 0x00B1u;\n" " if (index == 4)\n" " res = 0x011Du;\n" " if (index == 5)\n" " res = 0x019Fu;\n" " if (index == 6)\n" " res = 0x0236u;\n" " if (index == 7)\n" " res = 0x02E0u;\n" " if (index == 8)\n" " res = 0x039Cu;\n" " if (index == 9)\n" " res = 0x0468u;\n" " if (index == 10)\n" " res = 0x0545u;\n" " if (index == 11)\n" " res = 0x631u;\n" " if (index == 12)\n" " res = 0x072Bu;\n" " if (index == 13)\n" " res = 0x0832u;\n" " if (index == 14)\n" " res = 0x0946u;\n" " if (index == 15)\n" " res = 0x0A67u;\n" "\n" " return res;\n" "}\n" "\n" "uint\n" "__sqrtEvenAdjustments(int index)\n" "{\n" " uint res = 0u;\n" " if (index == 0)\n" " res = 0x0A2Du;\n" " if (index == 1)\n" " res = 0x08AFu;\n" " if (index == 2)\n" " res = 0x075Au;\n" " if (index == 3)\n" " res = 0x0629u;\n" " if (index == 4)\n" " res = 0x051Au;\n" " if (index == 5)\n" " res = 0x0429u;\n" " if (index == 6)\n" " res = 0x0356u;\n" " if (index == 7)\n" " res = 0x029Eu;\n" " if (index == 8)\n" " res = 0x0200u;\n" " if (index == 9)\n" " res = 0x0179u;\n" " if (index == 10)\n" " res = 0x0109u;\n" " if (index == 11)\n" " res = 0x00AFu;\n" " if (index == 12)\n" " res = 0x0068u;\n" " if (index == 13)\n" " res = 0x0034u;\n" " if (index == 14)\n" " res = 0x0012u;\n" " if (index == 15)\n" " res = 0x0002u;\n" "\n" " return res;\n" "}\n" "\n" "/* Returns an approximation to the square root of the 32-bit significand given\n" " * by `a'. Considered as an integer, `a' must be at least 2^31. If bit 0 of\n" " * `aExp' (the least significant bit) is 1, the integer returned approximates\n" " * 2^31*sqrt(`a'/2^31), where `a' is considered an integer. If bit 0 of `aExp'\n" " * is 0, the integer returned approximates 2^31*sqrt(`a'/2^30). In either\n" " * case, the approximation returned lies strictly within +/-2 of the exact\n" " * value.\n" " */\n" "uint\n" "__estimateSqrt32(int aExp, uint a)\n" "{\n" " uint z;\n" "\n" " int index = int(a>>27 & 15u);\n" " if ((aExp & 1) != 0) {\n" " z = 0x4000u + (a>>17) - __sqrtOddAdjustments(index);\n" " z = ((a / z)<<14) + (z<<15);\n" " a >>= 1;\n" " } else {\n" " z = 0x8000u + (a>>17) - __sqrtEvenAdjustments(index);\n" " z = a / z + z;\n" " z = (0x20000u <= z) ? 0xFFFF8000u : (z<<15);\n" " if (z <= a)\n" " return uint(int(a)>>1);\n" " }\n" " return ((__estimateDiv64To32(a, 0u, z))>>1) + (z>>1);\n" "}\n" "\n" "/* Returns the square root of the double-precision floating-point value `a'.\n" " * The operation is performed according to the IEEE Standard for Floating-Point\n" " * Arithmetic.\n" " */\n" "uint64_t\n" "__fsqrt64(uint64_t a)\n" "{\n" " uint zFrac0 = 0u;\n" " uint zFrac1 = 0u;\n" " uint zFrac2 = 0u;\n" " uint doubleZFrac0 = 0u;\n" " uint rem0 = 0u;\n" " uint rem1 = 0u;\n" " uint rem2 = 0u;\n" " uint rem3 = 0u;\n" " uint term0 = 0u;\n" " uint term1 = 0u;\n" " uint term2 = 0u;\n" " uint term3 = 0u;\n" " uint64_t default_nan = 0xFFFFFFFFFFFFFFFFUL;\n" "\n" " uint aFracLo = __extractFloat64FracLo(a);\n" " uint aFracHi = __extractFloat64FracHi(a);\n" " int aExp = __extractFloat64Exp(a);\n" " uint aSign = __extractFloat64Sign(a);\n" " if (aExp == 0x7FF) {\n" " if ((aFracHi | aFracLo) != 0u)\n" " return __propagateFloat64NaN(a, a);\n" " if (aSign == 0u)\n" " return a;\n" " return default_nan;\n" " }\n" " if (aSign != 0u) {\n" " if ((uint(aExp) | aFracHi | aFracLo) == 0u)\n" " return a;\n" " return default_nan;\n" " }\n" " if (aExp == 0) {\n" " if ((aFracHi | aFracLo) == 0u)\n" " return __packFloat64(0u, 0, 0u, 0u);\n" " __normalizeFloat64Subnormal(aFracHi, aFracLo, aExp, aFracHi, aFracLo);\n" " }\n" " int zExp = ((aExp - 0x3FF)>>1) + 0x3FE;\n" " aFracHi |= 0x00100000u;\n" " __shortShift64Left(aFracHi, aFracLo, 11, term0, term1);\n" " zFrac0 = (__estimateSqrt32(aExp, term0)>>1) + 1u;\n" " if (zFrac0 == 0u)\n" " zFrac0 = 0x7FFFFFFFu;\n" " doubleZFrac0 = zFrac0 + zFrac0;\n" " __shortShift64Left(aFracHi, aFracLo, 9 - (aExp & 1), aFracHi, aFracLo);\n" " __mul32To64(zFrac0, zFrac0, term0, term1);\n" " __sub64(aFracHi, aFracLo, term0, term1, rem0, rem1);\n" " while (int(rem0) < 0) {\n" " --zFrac0;\n" " doubleZFrac0 -= 2u;\n" " __add64(rem0, rem1, 0u, doubleZFrac0 | 1u, rem0, rem1);\n" " }\n" " zFrac1 = __estimateDiv64To32(rem1, 0u, doubleZFrac0);\n" " if ((zFrac1 & 0x1FFu) <= 5u) {\n" " if (zFrac1 == 0u)\n" " zFrac1 = 1u;\n" " __mul32To64(doubleZFrac0, zFrac1, term1, term2);\n" " __sub64(rem1, 0u, term1, term2, rem1, rem2);\n" " __mul32To64(zFrac1, zFrac1, term2, term3);\n" " __sub96(rem1, rem2, 0u, 0u, term2, term3, rem1, rem2, rem3);\n" " while (int(rem1) < 0) {\n" " --zFrac1;\n" " __shortShift64Left(0u, zFrac1, 1, term2, term3);\n" " term3 |= 1u;\n" " term2 |= doubleZFrac0;\n" " __add96(rem1, rem2, rem3, 0u, term2, term3, rem1, rem2, rem3);\n" " }\n" " zFrac1 |= uint((rem1 | rem2 | rem3) != 0u);\n" " }\n" " __shift64ExtraRightJamming(zFrac0, zFrac1, 0u, 10, zFrac0, zFrac1, zFrac2);\n" " return __roundAndPackFloat64(0u, zExp, zFrac0, zFrac1, zFrac2);\n" "}\n" "\n" "uint64_t\n" "__ftrunc64(uint64_t __a)\n" "{\n" " uvec2 a = unpackUint2x32(__a);\n" " int aExp = __extractFloat64Exp(__a);\n" " uint zLo;\n" " uint zHi;\n" "\n" " int unbiasedExp = aExp - 1023;\n" " int fracBits = 52 - unbiasedExp;\n" " uint maskLo = mix(~0u << fracBits, 0u, fracBits >= 32);\n" " uint maskHi = mix(~0u << (fracBits - 32), ~0u, fracBits < 33);\n" " zLo = maskLo & a.x;\n" " zHi = maskHi & a.y;\n" "\n" " zLo = mix(zLo, 0u, unbiasedExp < 0);\n" " zHi = mix(zHi, 0u, unbiasedExp < 0);\n" " zLo = mix(zLo, a.x, unbiasedExp > 52);\n" " zHi = mix(zHi, a.y, unbiasedExp > 52);\n" " return packUint2x32(uvec2(zLo, zHi));\n" "}\n" "\n" "uint64_t\n" "__ffloor64(uint64_t a)\n" "{\n" " bool is_positive = __fge64(a, 0ul);\n" " uint64_t tr = __ftrunc64(a);\n" "\n" " if (is_positive || __feq64(tr, a)) {\n" " return tr;\n" " } else {\n" " return __fadd64(tr, 0xbff0000000000000ul /* -1.0 */);\n" " }\n" "}\n" "\n" "uint64_t\n" "__fround64(uint64_t __a)\n" "{\n" " uvec2 a = unpackUint2x32(__a);\n" " int unbiasedExp = __extractFloat64Exp(__a) - 1023;\n" " uint aHi = a.y;\n" " uint aLo = a.x;\n" "\n" " if (unbiasedExp < 20) {\n" " if (unbiasedExp < 0) {\n" " if ((aHi & 0x80000000u) != 0u && aLo == 0u) {\n" " return 0;\n" " }\n" " aHi &= 0x80000000u;\n" " if ((a.y & 0x000FFFFFu) == 0u && a.x == 0u) {\n" " aLo = 0u;\n" " return packUint2x32(uvec2(aLo, aHi));\n" " }\n" " aHi = mix(aHi, (aHi | 0x3FF00000u), unbiasedExp == -1);\n" " aLo = 0u;\n" " } else {\n" " uint maskExp = 0x000FFFFFu >> unbiasedExp;\n" " uint lastBit = maskExp + 1;\n" " aHi += 0x00080000u >> unbiasedExp;\n" " if ((aHi & maskExp) == 0u)\n" " aHi &= ~lastBit;\n" " aHi &= ~maskExp;\n" " aLo = 0u;\n" " }\n" " } else if (unbiasedExp > 51 || unbiasedExp == 1024) {\n" " return __a;\n" " } else {\n" " uint maskExp = 0xFFFFFFFFu >> (unbiasedExp - 20);\n" " if ((aLo & maskExp) == 0u)\n" " return __a;\n" " uint tmp = aLo + (1u << (51 - unbiasedExp));\n" " if(tmp < aLo)\n" " aHi += 1u;\n" " aLo = tmp;\n" " aLo &= ~maskExp;\n" " }\n" "\n" " return packUint2x32(uvec2(aLo, aHi));\n" "}\n" "\n" "uint64_t\n" "__fmin64(uint64_t a, uint64_t b)\n" "{\n" " if (__is_nan(a)) return b;\n" " if (__is_nan(b)) return a;\n" "\n" " if (__flt64_nonnan(a, b)) return a;\n" " return b;\n" "}\n" "\n" "uint64_t\n" "__fmax64(uint64_t a, uint64_t b)\n" "{\n" " if (__is_nan(a)) return b;\n" " if (__is_nan(b)) return a;\n" "\n" " if (__flt64_nonnan(a, b)) return b;\n" " return a;\n" "}\n" "\n" "uint64_t\n" "__ffract64(uint64_t a)\n" "{\n" " return __fadd64(a, __fneg64(__ffloor64(a)));\n" "}\n" "" ;