summaryrefslogtreecommitdiff
path: root/libfixmath
diff options
context:
space:
mode:
authorXavi Del Campo <xavi.dcr@tutanota.com>2020-01-31 10:32:23 +0100
committerXavi Del Campo <xavi.dcr@tutanota.com>2020-01-31 10:32:23 +0100
commit7c24e9a9b02b04dcaf9507acb94091ea70a2c02d (patch)
treec28d0748652ad4b4222309e46e6cfc82c0906220 /libfixmath
parenta2b7b6bb1cc2f4a3258b7b2dbc92399d151f864d (diff)
Imported pristine psxsdk-20190410 from official repo
Diffstat (limited to 'libfixmath')
-rwxr-xr-xlibfixmath/Makefile50
-rwxr-xr-xlibfixmath/fix16.c484
-rwxr-xr-xlibfixmath/fix16_exp.c48
-rwxr-xr-xlibfixmath/fix16_sqrt.c83
-rwxr-xr-xlibfixmath/fix16_trig.c171
-rwxr-xr-xlibfixmath/fixmath.h23
-rwxr-xr-xlibfixmath/fixmath/fix16.h167
-rwxr-xr-xlibfixmath/fixmath/fract32.h38
-rwxr-xr-xlibfixmath/fixmath/int64.h162
-rwxr-xr-xlibfixmath/fixmath/uint32.h19
-rwxr-xr-xlibfixmath/fract32.c27
-rwxr-xr-xlibfixmath/uint32.c15
12 files changed, 1287 insertions, 0 deletions
diff --git a/libfixmath/Makefile b/libfixmath/Makefile
new file mode 100755
index 0000000..e490e92
--- /dev/null
+++ b/libfixmath/Makefile
@@ -0,0 +1,50 @@
+include ../Makefile.cfg
+
+#Project settings
+PROJECT = libfixmath
+LIB =
+SRC = .
+INC =
+
+#Compiler settings
+AR = mipsel-unknown-elf-ar
+CPP_FLAGS = $(CFLAGS) -O2 $(INC) -c
+CC_FLAGS = $(CFLAGS) -O2 $(INC) -c
+AS_FLAGS = $(CC_FLAGS) -D_ASSEMBLER_
+LD_FLAGS = -Wall
+
+# Find all source files
+SRC_CPP = $(foreach dir, $(SRC), $(wildcard $(dir)/*.cpp))
+SRC_C = $(foreach dir, $(SRC), $(wildcard $(dir)/*.c))
+SRC_S = $(foreach dir, $(SRC), $(wildcard $(dir)/*.S))
+OBJ_CPP = $(patsubst %.cpp, %.o, $(SRC_CPP))
+OBJ_C = $(patsubst %.c, %.o, $(SRC_C))
+OBJ_S = $(patsubst %.S, %.o, $(SRC_S))
+OBJ = $(OBJ_CPP) $(OBJ_C) $(OBJ_S)
+
+# Compile rules.
+.PHONY : all
+all: $(PROJECT).a
+
+$(PROJECT).a: $(OBJ)
+ $(AR) rcs $(PROJECT).a $(OBJ)
+
+$(OBJ_CPP) : %.o : %.cpp
+ $(CPP) $(CPP_FLAGS) -o $@ $<
+
+$(OBJ_C) : %.o : %.c
+ $(CC) $(CC_FLAGS) -Ifixmath -o $@ $<
+
+$(OBJ_S) : %.o : %.S
+ $(AS) $(AS_FLAGS) -o $@ $<
+
+install:
+ cp libfixmath.a $(TOOLCHAIN_PREFIX)/lib
+ mkdir -p $(TOOLCHAIN_PREFIX)/include/fixmath
+ cp fixmath/*.h $(TOOLCHAIN_PREFIX)/include/fixmath
+ cp fixmath.h $(TOOLCHAIN_PREFIX)/include
+
+# Clean rules
+.PHONY : clean
+clean:
+ rm -f $(PROJECT).a $(OBJ)
diff --git a/libfixmath/fix16.c b/libfixmath/fix16.c
new file mode 100755
index 0000000..72e1b4a
--- /dev/null
+++ b/libfixmath/fix16.c
@@ -0,0 +1,484 @@
+#include "fix16.h"
+#include "int64.h"
+
+
+/* Subtraction and addition with overflow detection.
+ * The versions without overflow detection are inlined in the header.
+ */
+#ifndef FIXMATH_NO_OVERFLOW
+fix16_t fix16_add(fix16_t a, fix16_t b)
+{
+ // Use unsigned integers because overflow with signed integers is
+ // an undefined operation (http://www.airs.com/blog/archives/120).
+ uint32_t _a = a, _b = b;
+ uint32_t sum = _a + _b;
+
+ // Overflow can only happen if sign of a == sign of b, and then
+ // it causes sign of sum != sign of a.
+ if (!((_a ^ _b) & 0x80000000) && ((_a ^ sum) & 0x80000000))
+ return fix16_overflow;
+
+ return sum;
+}
+
+fix16_t fix16_sub(fix16_t a, fix16_t b)
+{
+ uint32_t _a = a, _b = b;
+ uint32_t diff = _a - _b;
+
+ // Overflow can only happen if sign of a != sign of b, and then
+ // it causes sign of diff != sign of a.
+ if (((_a ^ _b) & 0x80000000) && ((_a ^ diff) & 0x80000000))
+ return fix16_overflow;
+
+ return diff;
+}
+
+/* Saturating arithmetic */
+fix16_t fix16_sadd(fix16_t a, fix16_t b)
+{
+ fix16_t result = fix16_add(a, b);
+
+ if (result == fix16_overflow)
+ return (a > 0) ? fix16_max : fix16_min;
+
+ return result;
+}
+
+fix16_t fix16_ssub(fix16_t a, fix16_t b)
+{
+ fix16_t result = fix16_sub(a, b);
+
+ if (result == fix16_overflow)
+ return (a > 0) ? fix16_max : fix16_min;
+
+ return result;
+}
+#endif
+
+
+
+/* 64-bit implementation for fix16_mul. Fastest version for e.g. ARM Cortex M3.
+ * Performs a 32*32 -> 64bit multiplication. The middle 32 bits are the result,
+ * bottom 16 bits are used for rounding, and upper 16 bits are used for overflow
+ * detection.
+ */
+
+#if !defined(FIXMATH_NO_64BIT) && !defined(FIXMATH_OPTIMIZE_8BIT)
+fix16_t fix16_mul(fix16_t inArg0, fix16_t inArg1)
+{
+ int64_t product = (int64_t)inArg0 * inArg1;
+
+ #ifndef FIXMATH_NO_OVERFLOW
+ // The upper 17 bits should all be the same (the sign).
+ uint32_t upper = (product >> 47);
+ #endif
+
+ if (product < 0)
+ {
+ #ifndef FIXMATH_NO_OVERFLOW
+ if (~upper)
+ return fix16_overflow;
+ #endif
+
+ #ifndef FIXMATH_NO_ROUNDING
+ // This adjustment is required in order to round -1/2 correctly
+ product--;
+ #endif
+ }
+ else
+ {
+ #ifndef FIXMATH_NO_OVERFLOW
+ if (upper)
+ return fix16_overflow;
+ #endif
+ }
+
+ #ifdef FIXMATH_NO_ROUNDING
+ return product >> 16;
+ #else
+ fix16_t result = product >> 16;
+ result += (product & 0x8000) >> 15;
+
+ return result;
+ #endif
+}
+#endif
+
+/* 32-bit implementation of fix16_mul. Potentially fast on 16-bit processors,
+ * and this is a relatively good compromise for compilers that do not support
+ * uint64_t. Uses 16*16->32bit multiplications.
+ */
+#if defined(FIXMATH_NO_64BIT) && !defined(FIXMATH_OPTIMIZE_8BIT)
+fix16_t fix16_mul(fix16_t inArg0, fix16_t inArg1)
+{
+ // Each argument is divided to 16-bit parts.
+ // AB
+ // * CD
+ // -----------
+ // BD 16 * 16 -> 32 bit products
+ // CB
+ // AD
+ // AC
+ // |----| 64 bit product
+ int32_t A = (inArg0 >> 16), C = (inArg1 >> 16);
+ uint32_t B = (inArg0 & 0xFFFF), D = (inArg1 & 0xFFFF);
+
+ int32_t AC = A*C;
+ int32_t AD_CB = A*D + C*B;
+ uint32_t BD = B*D;
+
+ int32_t product_hi = AC + (AD_CB >> 16);
+
+ // Handle carry from lower 32 bits to upper part of result.
+ uint32_t ad_cb_temp = AD_CB << 16;
+ uint32_t product_lo = BD + ad_cb_temp;
+ if (product_lo < BD)
+ product_hi++;
+
+#ifndef FIXMATH_NO_OVERFLOW
+ // The upper 17 bits should all be the same (the sign).
+ if (product_hi >> 31 != product_hi >> 15)
+ return fix16_overflow;
+#endif
+
+#ifdef FIXMATH_NO_ROUNDING
+ return (product_hi << 16) | (product_lo >> 16);
+#else
+ // Subtracting 0x8000 (= 0.5) and then using signed right shift
+ // achieves proper rounding to result-1, except in the corner
+ // case of negative numbers and lowest word = 0x8000.
+ // To handle that, we also have to subtract 1 for negative numbers.
+ uint32_t product_lo_tmp = product_lo;
+ product_lo -= 0x8000;
+ product_lo -= (uint32_t)product_hi >> 31;
+ if (product_lo > product_lo_tmp)
+ product_hi--;
+
+ // Discard the lowest 16 bits. Note that this is not exactly the same
+ // as dividing by 0x10000. For example if product = -1, result will
+ // also be -1 and not 0. This is compensated by adding +1 to the result
+ // and compensating this in turn in the rounding above.
+ fix16_t result = (product_hi << 16) | (product_lo >> 16);
+ result += 1;
+ return result;
+#endif
+}
+#endif
+
+/* 8-bit implementation of fix16_mul. Fastest on e.g. Atmel AVR.
+ * Uses 8*8->16bit multiplications, and also skips any bytes that
+ * are zero.
+ */
+#if defined(FIXMATH_OPTIMIZE_8BIT)
+fix16_t fix16_mul(fix16_t inArg0, fix16_t inArg1)
+{
+ uint32_t _a = (inArg0 >= 0) ? inArg0 : (-inArg0);
+ uint32_t _b = (inArg1 >= 0) ? inArg1 : (-inArg1);
+
+ uint8_t va[4] = {_a, (_a >> 8), (_a >> 16), (_a >> 24)};
+ uint8_t vb[4] = {_b, (_b >> 8), (_b >> 16), (_b >> 24)};
+
+ uint32_t low = 0;
+ uint32_t mid = 0;
+
+ // Result column i depends on va[0..i] and vb[i..0]
+
+ #ifndef FIXMATH_NO_OVERFLOW
+ // i = 6
+ if (va[3] && vb[3]) return fix16_overflow;
+ #endif
+
+ // i = 5
+ if (va[2] && vb[3]) mid += (uint16_t)va[2] * vb[3];
+ if (va[3] && vb[2]) mid += (uint16_t)va[3] * vb[2];
+ mid <<= 8;
+
+ // i = 4
+ if (va[1] && vb[3]) mid += (uint16_t)va[1] * vb[3];
+ if (va[2] && vb[2]) mid += (uint16_t)va[2] * vb[2];
+ if (va[3] && vb[1]) mid += (uint16_t)va[3] * vb[1];
+
+ #ifndef FIXMATH_NO_OVERFLOW
+ if (mid & 0xFF000000) return fix16_overflow;
+ #endif
+ mid <<= 8;
+
+ // i = 3
+ if (va[0] && vb[3]) mid += (uint16_t)va[0] * vb[3];
+ if (va[1] && vb[2]) mid += (uint16_t)va[1] * vb[2];
+ if (va[2] && vb[1]) mid += (uint16_t)va[2] * vb[1];
+ if (va[3] && vb[0]) mid += (uint16_t)va[3] * vb[0];
+
+ #ifndef FIXMATH_NO_OVERFLOW
+ if (mid & 0xFF000000) return fix16_overflow;
+ #endif
+ mid <<= 8;
+
+ // i = 2
+ if (va[0] && vb[2]) mid += (uint16_t)va[0] * vb[2];
+ if (va[1] && vb[1]) mid += (uint16_t)va[1] * vb[1];
+ if (va[2] && vb[0]) mid += (uint16_t)va[2] * vb[0];
+
+ // i = 1
+ if (va[0] && vb[1]) low += (uint16_t)va[0] * vb[1];
+ if (va[1] && vb[0]) low += (uint16_t)va[1] * vb[0];
+ low <<= 8;
+
+ // i = 0
+ if (va[0] && vb[0]) low += (uint16_t)va[0] * vb[0];
+
+ #ifndef FIXMATH_NO_ROUNDING
+ low += 0x8000;
+ #endif
+ mid += (low >> 16);
+
+ #ifndef FIXMATH_NO_OVERFLOW
+ if (mid & 0x80000000)
+ return fix16_overflow;
+ #endif
+
+ fix16_t result = mid;
+
+ /* Figure out the sign of result */
+ if ((inArg0 >= 0) != (inArg1 >= 0))
+ {
+ result = -result;
+ }
+
+ return result;
+}
+#endif
+
+#ifndef FIXMATH_NO_OVERFLOW
+/* Wrapper around fix16_mul to add saturating arithmetic. */
+fix16_t fix16_smul(fix16_t inArg0, fix16_t inArg1) {
+ fix16_t result = fix16_mul(inArg0, inArg1);
+
+ if (result == fix16_overflow)
+ {
+ if ((inArg0 >= 0) == (inArg1 >= 0))
+ return fix16_max;
+ else
+ return fix16_min;
+ }
+
+ return result;
+}
+#endif
+
+/* 32-bit implementation of fix16_div. Fastest version for e.g. ARM Cortex M3.
+ * Performs 32-bit divisions repeatedly to reduce the remainder. For this to
+ * be efficient, the processor has to have 32-bit hardware division.
+ */
+#if !defined(FIXMATH_OPTIMIZE_8BIT)
+#ifdef __GNUC__
+// Count leading zeros, using processor-specific instruction if available.
+#define clz(x) __builtin_clzl(x)
+#else
+static uint8_t clz(uint32_t x)
+{
+ uint8_t result = 0;
+ if (x == 0) return 32;
+ while (!(x & 0xF0000000)) { result += 4; x <<= 4; }
+ while (!(x & 0x80000000)) { result += 1; x <<= 1; }
+ return result;
+}
+#endif
+
+fix16_t fix16_div(fix16_t a, fix16_t b)
+{
+ // This uses a hardware 32/32 bit division multiple times, until we have
+ // computed all the bits in (a<<17)/b. Usually this takes 1-3 iterations.
+
+ if (b == 0)
+ return fix16_min;
+
+ uint32_t remainder = (a >= 0) ? a : (-a);
+ uint32_t divider = (b >= 0) ? b : (-b);
+ uint32_t quotient = 0;
+ int bit_pos = 17;
+
+ // Kick-start the division a bit.
+ // This improves speed in the worst-case scenarios where N and D are large
+ // It gets a lower estimate for the result by N/(D >> 17 + 1).
+ if (divider & 0xFFF00000)
+ {
+ uint32_t shifted_div = ((divider >> 17) + 1);
+ quotient = remainder / shifted_div;
+ remainder -= ((uint64_t)quotient * divider) >> 17;
+ }
+
+ // If the divider is divisible by 2^n, take advantage of it.
+ while (!(divider & 0xF) && bit_pos >= 4)
+ {
+ divider >>= 4;
+ bit_pos -= 4;
+ }
+
+ while (remainder && bit_pos >= 0)
+ {
+ // Shift remainder as much as we can without overflowing
+ int shift = clz(remainder);
+ if (shift > bit_pos) shift = bit_pos;
+ remainder <<= shift;
+ bit_pos -= shift;
+
+ uint32_t div = remainder / divider;
+ remainder = remainder % divider;
+ quotient += div << bit_pos;
+
+ #ifndef FIXMATH_NO_OVERFLOW
+ if (div & ~(0xFFFFFFFF >> bit_pos))
+ return fix16_overflow;
+ #endif
+
+ remainder <<= 1;
+ bit_pos--;
+ }
+
+ #ifndef FIXMATH_NO_ROUNDING
+ // Quotient is always positive so rounding is easy
+ quotient++;
+ #endif
+
+ fix16_t result = quotient >> 1;
+
+ // Figure out the sign of the result
+ if ((a ^ b) & 0x80000000)
+ {
+ #ifndef FIXMATH_NO_OVERFLOW
+ if (result == fix16_min)
+ return fix16_overflow;
+ #endif
+
+ result = -result;
+ }
+
+ return result;
+}
+#endif
+
+/* Alternative 32-bit implementation of fix16_div. Fastest on e.g. Atmel AVR.
+ * This does the division manually, and is therefore good for processors that
+ * do not have hardware division.
+ */
+#if defined(FIXMATH_OPTIMIZE_8BIT)
+fix16_t fix16_div(fix16_t a, fix16_t b)
+{
+ // This uses the basic binary restoring division algorithm.
+ // It appears to be faster to do the whole division manually than
+ // trying to compose a 64-bit divide out of 32-bit divisions on
+ // platforms without hardware divide.
+
+ if (b == 0)
+ return fix16_min;
+
+ uint32_t remainder = (a >= 0) ? a : (-a);
+ uint32_t divider = (b >= 0) ? b : (-b);
+
+ uint32_t quotient = 0;
+ uint32_t bit = 0x10000;
+
+ /* The algorithm requires D >= R */
+ while (divider < remainder)
+ {
+ divider <<= 1;
+ bit <<= 1;
+ }
+
+ #ifndef FIXMATH_NO_OVERFLOW
+ if (!bit)
+ return fix16_overflow;
+ #endif
+
+ if (divider & 0x80000000)
+ {
+ // Perform one step manually to avoid overflows later.
+ // We know that divider's bottom bit is 0 here.
+ if (remainder >= divider)
+ {
+ quotient |= bit;
+ remainder -= divider;
+ }
+ divider >>= 1;
+ bit >>= 1;
+ }
+
+ /* Main division loop */
+ while (bit && remainder)
+ {
+ if (remainder >= divider)
+ {
+ quotient |= bit;
+ remainder -= divider;
+ }
+
+ remainder <<= 1;
+ bit >>= 1;
+ }
+
+ #ifndef FIXMATH_NO_ROUNDING
+ if (remainder >= divider)
+ {
+ quotient++;
+ }
+ #endif
+
+ fix16_t result = quotient;
+
+ /* Figure out the sign of result */
+ if ((a ^ b) & 0x80000000)
+ {
+ #ifndef FIXMATH_NO_OVERFLOW
+ if (result == fix16_min)
+ return fix16_overflow;
+ #endif
+
+ result = -result;
+ }
+
+ return result;
+}
+#endif
+
+#ifndef FIXMATH_NO_OVERFLOW
+/* Wrapper around fix16_div to add saturating arithmetic. */
+fix16_t fix16_sdiv(fix16_t inArg0, fix16_t inArg1) {
+ fix16_t result = fix16_div(inArg0, inArg1);
+
+ if (result == fix16_overflow)
+ {
+ if ((inArg0 >= 0) == (inArg1 >= 0))
+ return fix16_max;
+ else
+ return fix16_min;
+ }
+
+ return result;
+}
+#endif
+
+fix16_t fix16_lerp8(fix16_t inArg0, fix16_t inArg1, uint8_t inFract) {
+ int64_t tempOut = int64_mul_i32_i32(inArg0, ((1 << 8) - inFract));
+ tempOut = int64_add(tempOut, int64_mul_i32_i32(inArg1, inFract));
+ tempOut = int64_shift(tempOut, -8);
+ return (fix16_t)int64_lo(tempOut);
+}
+
+fix16_t fix16_lerp16(fix16_t inArg0, fix16_t inArg1, uint16_t inFract) {
+ int64_t tempOut = int64_mul_i32_i32(inArg0, ((1 << 16) - inFract));
+ tempOut = int64_add(tempOut, int64_mul_i32_i32(inArg1, inFract));
+ tempOut = int64_shift(tempOut, -16);
+ return (fix16_t)int64_lo(tempOut);
+}
+
+#ifndef FIXMATH_NO_64BIT
+fix16_t fix16_lerp32(fix16_t inArg0, fix16_t inArg1, uint32_t inFract) {
+ int64_t tempOut;
+ tempOut = ((int64_t)inArg0 * (0 - inFract));
+ tempOut += ((int64_t)inArg1 * inFract);
+ tempOut >>= 32;
+ return (fix16_t)tempOut;
+}
+#endif
diff --git a/libfixmath/fix16_exp.c b/libfixmath/fix16_exp.c
new file mode 100755
index 0000000..e7e92e4
--- /dev/null
+++ b/libfixmath/fix16_exp.c
@@ -0,0 +1,48 @@
+#include "fix16.h"
+#include "int64.h"
+
+
+
+#ifndef FIXMATH_NO_CACHE
+static fix16_t _fix16_exp_cache_index[4096] = { 0 };
+static fix16_t _fix16_exp_cache_value[4096] = { 0 };
+#endif
+
+
+
+fix16_t fix16_exp(fix16_t inValue) {
+ if(inValue == 0)
+ return fix16_one;
+ if(inValue == fix16_one)
+ return fix16_e;
+ if(inValue > 681391)
+ return fix16_max;
+ if(inValue < -726817)
+ return 0;
+
+ #ifndef FIXMATH_NO_CACHE
+ fix16_t tempIndex = (inValue ^ (inValue >> 16));
+ tempIndex = (inValue ^ (inValue >> 4)) & 0x0FFF;
+ if(_fix16_exp_cache_index[tempIndex] == inValue)
+ return _fix16_exp_cache_value[tempIndex];
+ #endif
+
+ int64_t tempOut = int64_add(int64_from_int32(fix16_one), int64_from_int32(inValue));
+ int64_t tempValue = int64_from_int32(inValue);
+ uint32_t i, n;
+ for(i = 3, n = 2; i < 13; n *= i, i++) {
+ tempValue = int64_mul_i64_i32(tempValue, inValue);
+ #ifndef FIXMATH_NO_ROUNDING
+ tempValue = int64_add(tempValue, int64_from_int32(fix16_one >> 1));
+ #endif
+ tempValue = int64_shift(tempValue, -16);
+ tempOut = int64_add(tempOut, int64_div_i64_i32(tempValue, n));
+ }
+
+ #ifndef FIXMATH_NO_CACHE
+ _fix16_exp_cache_index[tempIndex] = inValue;
+ _fix16_exp_cache_value[tempIndex] = int64_lo(tempOut);
+ #endif
+
+ return int64_lo(tempOut);
+}
diff --git a/libfixmath/fix16_sqrt.c b/libfixmath/fix16_sqrt.c
new file mode 100755
index 0000000..13d31a8
--- /dev/null
+++ b/libfixmath/fix16_sqrt.c
@@ -0,0 +1,83 @@
+#include "fix16.h"
+
+/* The square root algorithm is quite directly from
+ * http://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Binary_numeral_system_.28base_2.29
+ * An important difference is that it is split to two parts
+ * in order to use only 32-bit operations.
+ *
+ * Note that for negative numbers we return -sqrt(-inValue).
+ * Not sure if someone relies on this behaviour, but not going
+ * to break it for now. It doesn't slow the code much overall.
+ */
+fix16_t fix16_sqrt(fix16_t inValue) {
+ uint8_t neg = (inValue < 0);
+ uint32_t num = (neg ? -inValue : inValue);
+ uint32_t result = 0;
+ uint32_t bit;
+ uint8_t n;
+
+ // Many numbers will be less than 15, so
+ // this gives a good balance between time spent
+ // in if vs. time spent in the while loop
+ // when searching for the starting value.
+ if (num & 0xFFF00000)
+ bit = (uint32_t)1 << 30;
+ else
+ bit = (uint32_t)1 << 18;
+
+ while (bit > num) bit >>= 2;
+
+ // The main part is executed twice, in order to avoid
+ // using 64 bit values in computations.
+ for (n = 0; n < 2; n++)
+ {
+ // First we get the top 24 bits of the answer.
+ while (bit)
+ {
+ if (num >= result + bit)
+ {
+ num -= result + bit;
+ result = (result >> 1) + bit;
+ }
+ else
+ {
+ result = (result >> 1);
+ }
+ bit >>= 2;
+ }
+
+ if (n == 0)
+ {
+ // Then process it again to get the lowest 8 bits.
+ if (num > 65535)
+ {
+ // The remainder 'num' is too large to be shifted left
+ // by 16, so we have to add 1 to result manually and
+ // adjust 'num' accordingly.
+ // num = a - (result + 0.5)^2
+ // = num + result^2 - (result + 0.5)^2
+ // = num - result - 0.5
+ num -= result;
+ num = (num << 16) - 0x8000;
+ result = (result << 16) + 0x8000;
+ }
+ else
+ {
+ num <<= 16;
+ result <<= 16;
+ }
+
+ bit = 1 << 14;
+ }
+ }
+
+#ifndef FIXMATH_NO_ROUNDING
+ // Finally, if next bit would have been 1, round the result upwards.
+ if (num > result)
+ {
+ result++;
+ }
+#endif
+
+ return (neg ? -result : result);
+}
diff --git a/libfixmath/fix16_trig.c b/libfixmath/fix16_trig.c
new file mode 100755
index 0000000..6b53682
--- /dev/null
+++ b/libfixmath/fix16_trig.c
@@ -0,0 +1,171 @@
+#include <limits.h>
+#include "fix16.h"
+
+#if defined(FIXMATH_SIN_LUT)
+#include "fix16_trig_sin_lut.h"
+#elif !defined(FIXMATH_NO_CACHE)
+static fix16_t _fix16_sin_cache_index[4096] = { 0 };
+static fix16_t _fix16_sin_cache_value[4096] = { 0 };
+#endif
+
+#ifndef FIXMATH_NO_CACHE
+static fix16_t _fix16_atan_cache_index[2][4096] = { { 0 }, { 0 } };
+static fix16_t _fix16_atan_cache_value[4096] = { 0 };
+#endif
+
+
+fix16_t fix16_sin_parabola(fix16_t inAngle)
+{
+ fix16_t abs_inAngle, abs_retval, retval;
+ fix16_t mask;
+
+ /* Absolute function */
+ mask = (inAngle >> (sizeof(fix16_t)*CHAR_BIT-1));
+ abs_inAngle = (inAngle + mask) ^ mask;
+
+ /* On 0->PI, sin looks like x² that is :
+ - centered on PI/2,
+ - equals 1 on PI/2,
+ - equals 0 on 0 and PI
+ that means : 4/PI * x - 4/PI² * x²
+ Use abs(x) to handle (-PI) -> 0 zone.
+ */
+ retval = fix16_mul(FOUR_DIV_PI, inAngle) + fix16_mul( fix16_mul(_FOUR_DIV_PI2, inAngle), abs_inAngle );
+ /* At this point, retval equals sin(inAngle) on important points ( -PI, -PI/2, 0, PI/2, PI),
+ but is not very precise between these points
+ */
+ #ifndef FIXMATH_FAST_SIN
+ /* Absolute value of retval */
+ mask = (retval >> (sizeof(fix16_t)*CHAR_BIT-1));
+ abs_retval = (retval + mask) ^ mask;
+ /* So improve its precision by adding some x^4 component to retval */
+ retval += fix16_mul(X4_CORRECTION_COMPONENT, fix16_mul(retval, abs_retval) - retval );
+ #endif
+ return retval;
+}
+
+fix16_t fix16_sin(fix16_t inAngle) {
+ fix16_t tempAngle = inAngle % (fix16_pi << 1);
+
+ #ifdef FIXMATH_SIN_LUT
+ if(tempAngle < 0)
+ tempAngle += (fix16_pi << 1);
+
+ fix16_t tempOut;
+ if(tempAngle >= fix16_pi) {
+ tempAngle -= fix16_pi;
+ if(tempAngle >= (fix16_pi >> 1))
+ tempAngle = fix16_pi - tempAngle;
+ tempOut = -(tempAngle >= _fix16_sin_lut_count ? fix16_one : _fix16_sin_lut[tempAngle]);
+ } else {
+ if(tempAngle >= (fix16_pi >> 1))
+ tempAngle = fix16_pi - tempAngle;
+ tempOut = (tempAngle >= _fix16_sin_lut_count ? fix16_one : _fix16_sin_lut[tempAngle]);
+ }
+ #else
+ if(tempAngle > fix16_pi)
+ tempAngle -= (fix16_pi << 1);
+ else if(tempAngle < -fix16_pi)
+ tempAngle += (fix16_pi << 1);
+
+ #ifndef FIXMATH_NO_CACHE
+ fix16_t tempIndex = ((inAngle >> 5) & 0x00000FFF);
+ if(_fix16_sin_cache_index[tempIndex] == inAngle)
+ return _fix16_sin_cache_value[tempIndex];
+ #endif
+
+ fix16_t tempAngleSq = fix16_mul(tempAngle, tempAngle);
+
+ #ifndef FIXMATH_FAST_SIN // Most accurate version, accurate to ~2.1%
+ fix16_t tempOut = tempAngle;
+ tempAngle = fix16_mul(tempAngle, tempAngleSq);
+ tempOut -= (tempAngle / 6);
+ tempAngle = fix16_mul(tempAngle, tempAngleSq);
+ tempOut += (tempAngle / 120);
+ tempAngle = fix16_mul(tempAngle, tempAngleSq);
+ tempOut -= (tempAngle / 5040);
+ tempAngle = fix16_mul(tempAngle, tempAngleSq);
+ tempOut += (tempAngle / 362880);
+ tempAngle = fix16_mul(tempAngle, tempAngleSq);
+ tempOut -= (tempAngle / 39916800);
+ #else // Fast implementation, runs at 159% the speed of above 'accurate' version with an slightly lower accuracy of ~2.3%
+ fix16_t tempOut;
+ tempOut = fix16_mul(-13, tempAngleSq) + 546;
+ tempOut = fix16_mul(tempOut, tempAngleSq) - 10923;
+ tempOut = fix16_mul(tempOut, tempAngleSq) + 65536;
+ tempOut = fix16_mul(tempOut, tempAngle);
+ #endif
+
+ #ifndef FIXMATH_NO_CACHE
+ _fix16_sin_cache_index[tempIndex] = inAngle;
+ _fix16_sin_cache_value[tempIndex] = tempOut;
+ #endif
+ #endif
+
+ return tempOut;
+}
+
+fix16_t fix16_cos(fix16_t inAngle) {
+ return fix16_sin(inAngle + (fix16_pi >> 1));
+}
+
+fix16_t fix16_tan(fix16_t inAngle) {
+ return fix16_sdiv(fix16_sin(inAngle), fix16_cos(inAngle));
+}
+
+fix16_t fix16_asin(fix16_t inValue) {
+ if((inValue > fix16_one) || (inValue < -fix16_one))
+ return 0;
+ fix16_t tempOut;
+ tempOut = (fix16_one - fix16_mul(inValue, inValue));
+ tempOut = fix16_div(inValue, fix16_sqrt(tempOut));
+ tempOut = fix16_atan(tempOut);
+ return tempOut;
+}
+
+fix16_t fix16_acos(fix16_t inValue) {
+ return ((fix16_pi >> 1) - fix16_asin(inValue));
+}
+
+fix16_t fix16_atan2(fix16_t inY , fix16_t inX) {
+ fix16_t abs_inY, mask, angle, r, r_3;
+
+ #ifndef FIXMATH_NO_CACHE
+ uintptr_t hash = (inX ^ inY);
+ hash ^= hash >> 20;
+ hash &= 0x0FFF;
+ if((_fix16_atan_cache_index[0][hash] == inX) && (_fix16_atan_cache_index[1][hash] == inY))
+ return _fix16_atan_cache_value[hash];
+ #endif
+
+ /* Absolute inY */
+ mask = (inY >> (sizeof(fix16_t)*CHAR_BIT-1));
+ abs_inY = (inY + mask) ^ mask;
+
+ if (inX >= 0)
+ {
+ r = fix16_div( (inX - abs_inY), (inX + abs_inY));
+ r_3 = fix16_mul(fix16_mul(r, r),r);
+ angle = fix16_mul(0x00003240 , r_3) - fix16_mul(0x0000FB50,r) + PI_DIV_4;
+ } else {
+ r = fix16_div( (inX + abs_inY), (abs_inY - inX));
+ r_3 = fix16_mul(fix16_mul(r, r),r);
+ angle = fix16_mul(0x00003240 , r_3) - fix16_mul(0x0000FB50,r) + THREE_PI_DIV_4;
+ }
+ if (inY < 0)
+ {
+ angle = -angle;
+ }
+
+ #ifndef FIXMATH_NO_CACHE
+ _fix16_atan_cache_index[0][hash] = inX;
+ _fix16_atan_cache_index[1][hash] = inY;
+ _fix16_atan_cache_value[hash] = angle;
+ #endif
+
+ return angle;
+}
+
+fix16_t fix16_atan(fix16_t inValue) {
+ return fix16_atan2(inValue, fix16_one);
+}
diff --git a/libfixmath/fixmath.h b/libfixmath/fixmath.h
new file mode 100755
index 0000000..3f3cc74
--- /dev/null
+++ b/libfixmath/fixmath.h
@@ -0,0 +1,23 @@
+#ifndef __libfixmath_fixmath_h__
+#define __libfixmath_fixmath_h__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/*!
+ \file fixmath.h
+ \brief Functions to perform fast accurate fixed-point math operations.
+*/
+
+#include <fixmath/uint32.h>
+#include <fixmath/int64.h>
+#include <fixmath/fract32.h>
+#include <fixmath/fix16.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/libfixmath/fixmath/fix16.h b/libfixmath/fixmath/fix16.h
new file mode 100755
index 0000000..a2a49b1
--- /dev/null
+++ b/libfixmath/fixmath/fix16.h
@@ -0,0 +1,167 @@
+#ifndef __libfixmath_fix16_h__
+#define __libfixmath_fix16_h__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/* These options may let the optimizer to remove some calls to the functions.
+ * Refer to http://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html
+ */
+#ifndef FIXMATH_FUNC_ATTRS
+# ifdef __GNUC__
+# define FIXMATH_FUNC_ATTRS __attribute__((leaf, nothrow, pure))
+# else
+# define FIXMATH_FUNC_ATTRS
+# endif
+#endif
+
+#include <stdint.h>
+
+typedef int32_t fix16_t;
+
+static const fix16_t FOUR_DIV_PI = 0x145F3; /*!< Fix16 value of 4/PI */
+static const fix16_t _FOUR_DIV_PI2 = 0xFFFF9840; /*!< Fix16 value of -4/PI² */
+static const fix16_t X4_CORRECTION_COMPONENT = 0x399A; /*!< Fix16 value of 0.225 */
+static const fix16_t PI_DIV_4 = 0x0000C90F; /*!< Fix16 value of PI/4 */
+static const fix16_t THREE_PI_DIV_4 = 0x00025B2F; /*!< Fix16 value of 3PI/4 */
+
+static const fix16_t fix16_max = 0x7FFFFFFF; /*!< the maximum value of fix16_t */
+static const fix16_t fix16_min = 0x80000000; /*!< the minimum value of fix16_t */
+static const fix16_t fix16_overflow = 0x80000000; /*!< the value used to indicate overflows when FIXMATH_NO_OVERFLOW is not specified */
+
+static const fix16_t fix16_pi = 205887; /*!< fix16_t value of pi */
+static const fix16_t fix16_e = 178145; /*!< fix16_t value of e */
+static const fix16_t fix16_one = 0x00010000; /*!< fix16_t value of 1 */
+
+/* Conversion functions between fix16_t and float/integer.
+ * These are inlined to allow compiler to optimize away constant numbers
+ */
+static inline fix16_t fix16_from_int(int a) { return a * fix16_one; }
+static inline float fix16_to_float(fix16_t a) { return (float)a / fix16_one; }
+static inline double fix16_to_dbl(fix16_t a) { return (double)a / fix16_one; }
+
+static inline int fix16_to_int(fix16_t a)
+{
+#ifdef FIXMATH_NO_ROUNDING
+ return a >> 16;
+#else
+ if (a >= 0)
+ return (a + fix16_one / 2) / fix16_one;
+ else
+ return (a - fix16_one / 2) / fix16_one;
+#endif
+}
+
+static inline fix16_t fix16_from_float(float a)
+{
+ float temp = a * fix16_one;
+#ifndef FIXMATH_NO_ROUNDING
+ temp += (temp >= 0) ? 0.5f : -0.5f;
+#endif
+ return (fix16_t)temp;
+}
+
+static inline fix16_t fix16_from_dbl(double a)
+{
+ double temp = a * fix16_one;
+#ifndef FIXMATH_NO_ROUNDING
+ temp += (temp >= 0) ? 0.5f : -0.5f;
+#endif
+ return (fix16_t)temp;
+}
+
+/* Subtraction and addition with (optional) overflow detection. */
+#ifdef FIXMATH_NO_OVERFLOW
+
+static inline fix16_t fix16_add(fix16_t inArg0, fix16_t inArg1) { return (inArg0 + inArg1); }
+static inline fix16_t fix16_sub(fix16_t inArg0, fix16_t inArg1) { return (inArg0 - inArg1); }
+
+#else
+
+extern fix16_t fix16_add(fix16_t a, fix16_t b) FIXMATH_FUNC_ATTRS;
+extern fix16_t fix16_sub(fix16_t a, fix16_t b) FIXMATH_FUNC_ATTRS;
+
+/* Saturating arithmetic */
+extern fix16_t fix16_sadd(fix16_t a, fix16_t b) FIXMATH_FUNC_ATTRS;
+extern fix16_t fix16_ssub(fix16_t a, fix16_t b) FIXMATH_FUNC_ATTRS;
+
+#endif
+
+/*! Multiplies the two given fix16_t's and returns the result.
+*/
+extern fix16_t fix16_mul(fix16_t inArg0, fix16_t inArg1) FIXMATH_FUNC_ATTRS;
+
+/*! Divides the first given fix16_t by the second and returns the result.
+*/
+extern fix16_t fix16_div(fix16_t inArg0, fix16_t inArg1) FIXMATH_FUNC_ATTRS;
+
+#ifndef FIXMATH_NO_OVERFLOW
+/*! Performs a saturated multiplication (overflow-protected) of the two given fix16_t's and returns the result.
+*/
+extern fix16_t fix16_smul(fix16_t inArg0, fix16_t inArg1) FIXMATH_FUNC_ATTRS;
+
+/*! Performs a saturated division (overflow-protected) of the first fix16_t by the second and returns the result.
+*/
+extern fix16_t fix16_sdiv(fix16_t inArg0, fix16_t inArg1) FIXMATH_FUNC_ATTRS;
+#endif
+
+/*! Returns the linear interpolation: (inArg0 * (1 - inFract)) + (inArg1 * inFract)
+*/
+extern fix16_t fix16_lerp8(fix16_t inArg0, fix16_t inArg1, uint8_t inFract) FIXMATH_FUNC_ATTRS;
+extern fix16_t fix16_lerp16(fix16_t inArg0, fix16_t inArg1, uint16_t inFract) FIXMATH_FUNC_ATTRS;
+#ifndef FIXMATH_NO_64BIT
+extern fix16_t fix16_lerp32(fix16_t inArg0, fix16_t inArg1, uint32_t inFract) FIXMATH_FUNC_ATTRS;
+#endif
+
+/*! Returns the sine of the given fix16_t.
+*/
+extern fix16_t fix16_sin_parabola(fix16_t inAngle) FIXMATH_FUNC_ATTRS;
+
+/*! Returns the sine of the given fix16_t.
+*/
+extern fix16_t fix16_sin(fix16_t inAngle) FIXMATH_FUNC_ATTRS;
+
+/*! Returns the cosine of the given fix16_t.
+*/
+extern fix16_t fix16_cos(fix16_t inAngle) FIXMATH_FUNC_ATTRS;
+
+/*! Returns the tangent of the given fix16_t.
+*/
+extern fix16_t fix16_tan(fix16_t inAngle) FIXMATH_FUNC_ATTRS;
+
+/*! Returns the arcsine of the given fix16_t.
+*/
+extern fix16_t fix16_asin(fix16_t inValue) FIXMATH_FUNC_ATTRS;
+
+/*! Returns the arccosine of the given fix16_t.
+*/
+extern fix16_t fix16_acos(fix16_t inValue) FIXMATH_FUNC_ATTRS;
+
+/*! Returns the arctangent of the given fix16_t.
+*/
+extern fix16_t fix16_atan(fix16_t inValue) FIXMATH_FUNC_ATTRS;
+
+/*! Returns the arctangent of inY/inX.
+*/
+extern fix16_t fix16_atan2(fix16_t inY, fix16_t inX) FIXMATH_FUNC_ATTRS;
+
+
+
+/*! Returns the square root of the given fix16_t.
+*/
+extern fix16_t fix16_sqrt(fix16_t inValue) FIXMATH_FUNC_ATTRS;
+
+
+
+/*! Returns the exponent (e^) of the given fix16_t.
+*/
+extern fix16_t fix16_exp(fix16_t inValue) FIXMATH_FUNC_ATTRS;
+
+#ifdef __cplusplus
+}
+#include "fix16.hpp"
+#endif
+
+#endif
diff --git a/libfixmath/fixmath/fract32.h b/libfixmath/fixmath/fract32.h
new file mode 100755
index 0000000..ee1f1c6
--- /dev/null
+++ b/libfixmath/fixmath/fract32.h
@@ -0,0 +1,38 @@
+#ifndef __libfixmath_fract32_h__
+#define __libfixmath_fract32_h__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#include <stdint.h>
+
+typedef uint32_t fract32_t;
+
+/*! Creates a fraction using unsigned integers.
+ \param inNumerator the unsigned integer numerator
+ \param inDenominator the unsigned integer denominator
+ \return a fraction using the given numerator and denominator
+*/
+extern fract32_t fract32_create(uint32_t inNumerator, uint32_t inDenominator);
+
+/*! Inverts the given fraction, swapping the numerator and the denominator.
+*/
+extern fract32_t fract32_invert(fract32_t inFract);
+
+#ifndef FIXMATH_NO_64BIT
+/*! Performs unsigned saturated (overflow-protected) multiplication with the two given fractions and returns the result as an unsigned integer.
+*/
+extern uint32_t fract32_usmul(uint32_t inVal, fract32_t inFract);
+
+/*! Performs saturated (overflow-protected) multiplication with the two given fractions and returns the result as a signed integer.
+*/
+extern int32_t fract32_smul(int32_t inVal, fract32_t inFract);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/libfixmath/fixmath/int64.h b/libfixmath/fixmath/int64.h
new file mode 100755
index 0000000..4d716bf
--- /dev/null
+++ b/libfixmath/fixmath/int64.h
@@ -0,0 +1,162 @@
+#ifndef __libfixmath_int64_h__
+#define __libfixmath_int64_h__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#ifndef FIXMATH_NO_64BIT
+static inline int64_t int64_const(int32_t hi, uint32_t lo) { return (((int64_t)hi << 32) | lo); }
+static inline int64_t int64_from_int32(int32_t x) { return (int64_t)x; }
+static inline int32_t int64_hi(int64_t x) { return (x >> 32); }
+static inline uint32_t int64_lo(int64_t x) { return (x & ((1ULL << 32) - 1)); }
+
+static inline int64_t int64_add(int64_t x, int64_t y) { return (x + y); }
+static inline int64_t int64_neg(int64_t x) { return (-x); }
+static inline int64_t int64_sub(int64_t x, int64_t y) { return (x - y); }
+static inline int64_t int64_shift(int64_t x, int8_t y) { return (y < 0 ? (x >> -y) : (x << y)); }
+
+static inline int64_t int64_mul_i32_i32(int32_t x, int32_t y) { return (x * y); }
+static inline int64_t int64_mul_i64_i32(int64_t x, int32_t y) { return (x * y); }
+
+static inline int64_t int64_div_i64_i32(int64_t x, int32_t y) { return (x / y); }
+
+static inline int int64_cmp_eq(int64_t x, int64_t y) { return (x == y); }
+static inline int int64_cmp_ne(int64_t x, int64_t y) { return (x != y); }
+static inline int int64_cmp_gt(int64_t x, int64_t y) { return (x > y); }
+static inline int int64_cmp_ge(int64_t x, int64_t y) { return (x >= y); }
+static inline int int64_cmp_lt(int64_t x, int64_t y) { return (x < y); }
+static inline int int64_cmp_le(int64_t x, int64_t y) { return (x <= y); }
+#else
+
+typedef struct {
+ int32_t hi;
+ uint32_t lo;
+} __int64_t;
+
+static inline __int64_t int64_const(int32_t hi, uint32_t lo) { return (__int64_t){ hi, lo }; }
+static inline __int64_t int64_from_int32(int32_t x) { return (__int64_t){ (x < 0 ? -1 : 0), x }; }
+static inline int32_t int64_hi(__int64_t x) { return x.hi; }
+static inline uint32_t int64_lo(__int64_t x) { return x.lo; }
+
+static inline int int64_cmp_eq(__int64_t x, __int64_t y) { return ((x.hi == y.hi) && (x.lo == y.lo)); }
+static inline int int64_cmp_ne(__int64_t x, __int64_t y) { return ((x.hi != y.hi) || (x.lo != y.lo)); }
+static inline int int64_cmp_gt(__int64_t x, __int64_t y) { return ((x.hi > y.hi) || ((x.hi == y.hi) && (x.lo > y.lo))); }
+static inline int int64_cmp_ge(__int64_t x, __int64_t y) { return ((x.hi > y.hi) || ((x.hi == y.hi) && (x.lo >= y.lo))); }
+static inline int int64_cmp_lt(__int64_t x, __int64_t y) { return ((x.hi < y.hi) || ((x.hi == y.hi) && (x.lo < y.lo))); }
+static inline int int64_cmp_le(__int64_t x, __int64_t y) { return ((x.hi < y.hi) || ((x.hi == y.hi) && (x.lo <= y.lo))); }
+
+static inline __int64_t int64_add(__int64_t x, __int64_t y) {
+ __int64_t ret;
+ ret.hi = x.hi + y.hi;
+ ret.lo = x.lo + y.lo;
+ if((ret.lo < x.lo) || (ret.hi < y.hi))
+ ret.hi++;
+ return ret;
+}
+
+static inline __int64_t int64_neg(__int64_t x) {
+ __int64_t ret;
+ ret.hi = ~x.hi;
+ ret.lo = ~x.lo + 1;
+ if(ret.lo == 0)
+ ret.hi++;
+ return ret;
+}
+
+static inline __int64_t int64_sub(__int64_t x, __int64_t y) {
+ return int64_add(x, int64_neg(y));
+}
+
+static inline __int64_t int64_shift(__int64_t x, int8_t y) {
+ __int64_t ret;
+ if(y > 0) {
+ if(y >= 32)
+ return (__int64_t){ 0, 0 };
+ ret.hi = (x.hi << y) | (x.lo >> (32 - y));
+ ret.lo = (x.lo << y);
+ } else {
+ y = -y;
+ if(y >= 32)
+ return (__int64_t){ 0, 0 };
+ ret.lo = (x.lo >> y) | (x.hi << (32 - y));
+ ret.hi = (x.hi >> y);
+ }
+ return ret;
+}
+
+static inline __int64_t int64_mul_i32_i32(int32_t x, int32_t y) {
+ int16_t hi[2] = { (x >> 16), (y >> 16) };
+ uint16_t lo[2] = { (x & 0xFFFF), (y & 0xFFFF) };
+
+ int32_t r_hi = hi[0] * hi[1];
+ int32_t r_md = (hi[0] * lo[1]) + (hi[1] * lo[0]);
+ uint32_t r_lo = lo[0] * lo[1];
+
+ r_hi += (r_md >> 16);
+ r_lo += (r_md << 16);
+
+ return (__int64_t){ r_hi, r_lo };
+}
+
+static inline __int64_t int64_mul_i64_i32(__int64_t x, int32_t y) {
+ int neg = ((x.hi ^ y) < 0);
+ if(x.hi < 0)
+ x = int64_neg(x);
+ if(y < 0)
+ y = -y;
+
+ uint32_t _x[4] = { (x.hi >> 16), (x.hi & 0xFFFF), (x.lo >> 16), (x.lo & 0xFFFF) };
+ uint32_t _y[2] = { (y >> 16), (y & 0xFFFF) };
+
+ uint32_t r[4];
+ r[0] = (_x[0] * _y[0]);
+ r[1] = (_x[1] * _y[0]) + (_x[0] * _y[1]);
+ r[2] = (_x[1] * _y[1]) + (_x[2] * _y[0]);
+ r[3] = (_x[2] * _y[0]) + (_x[1] * _y[1]);
+
+ __int64_t ret;
+ ret.lo = r[0] + (r[1] << 16);
+ ret.hi = (r[3] << 16) + r[2] + (r[1] >> 16);
+ return (neg ? int64_neg(ret) : ret);
+}
+
+static inline __int64_t int64_div_i64_i32(__int64_t x, int32_t y) {
+ int neg = ((x.hi ^ y) < 0);
+ if(x.hi < 0)
+ x = int64_neg(x);
+ if(y < 0)
+ y = -y;
+
+ __int64_t ret = { (x.hi / y) , (x.lo / y) };
+ x.hi = x.hi % y;
+ x.lo = x.lo % y;
+
+ __int64_t _y = int64_from_int32(y);
+
+ __int64_t i;
+ for(i = int64_from_int32(1); int64_cmp_lt(_y, x); _y = int64_shift(_y, 1), i = int64_shift(i, 1));
+
+ while(x.hi) {
+ _y = int64_shift(_y, -1);
+ i = int64_shift(i, -1);
+ if(int64_cmp_ge(x, _y)) {
+ x = int64_sub(x, _y);
+ ret = int64_add(ret, i);
+ }
+ }
+
+ ret = int64_add(ret, int64_from_int32(x.lo / y));
+ return (neg ? int64_neg(ret) : ret);
+}
+
+#define int64_t __int64_t
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/libfixmath/fixmath/uint32.h b/libfixmath/fixmath/uint32.h
new file mode 100755
index 0000000..1303338
--- /dev/null
+++ b/libfixmath/fixmath/uint32.h
@@ -0,0 +1,19 @@
+#ifndef __libfixmath_uint32_h__
+#define __libfixmath_uint32_h__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#include <stdint.h>
+
+/*! Performs an unsigned log-base2 on the specified unsigned integer and returns the result.
+*/
+extern uint32_t uint32_log2(uint32_t inVal);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/libfixmath/fract32.c b/libfixmath/fract32.c
new file mode 100755
index 0000000..397e000
--- /dev/null
+++ b/libfixmath/fract32.c
@@ -0,0 +1,27 @@
+#include "fract32.h"
+
+
+
+fract32_t fract32_create(uint32_t inNumerator, uint32_t inDenominator) {
+ if(inDenominator <= inNumerator)
+ return 0xFFFFFFFF;
+ uint32_t tempMod = (inNumerator % inDenominator);
+ uint32_t tempDiv = (0xFFFFFFFF / (inDenominator - 1));
+ return (tempMod * tempDiv);
+}
+
+fract32_t fract32_invert(fract32_t inFract) {
+ return (0xFFFFFFFF - inFract);
+}
+
+#ifndef FIXMATH_NO_64BIT
+uint32_t fract32_usmul(uint32_t inVal, fract32_t inFract) {
+ return (uint32_t)(((uint64_t)inVal * (uint64_t)inFract) >> 32);
+}
+
+int32_t fract32_smul(int32_t inVal, fract32_t inFract) {
+ if(inVal < 0)
+ return -fract32_usmul(-inVal, inFract);
+ return fract32_usmul(inVal, inFract);
+}
+#endif
diff --git a/libfixmath/uint32.c b/libfixmath/uint32.c
new file mode 100755
index 0000000..2980ab9
--- /dev/null
+++ b/libfixmath/uint32.c
@@ -0,0 +1,15 @@
+#include "uint32.h"
+
+
+
+uint32_t uint32_log2(uint32_t inVal) {
+ if(inVal == 0)
+ return 0;
+ uint32_t tempOut = 0;
+ if(inVal >= (1 << 16)) { inVal >>= 16; tempOut += 16; }
+ if(inVal >= (1 << 8)) { inVal >>= 8; tempOut += 8; }
+ if(inVal >= (1 << 4)) { inVal >>= 4; tempOut += 4; }
+ if(inVal >= (1 << 2)) { inVal >>= 2; tempOut += 2; }
+ if(inVal >= (1 << 1)) { tempOut += 1; }
+ return tempOut;
+}