forked from Telodendria/Telodendria
Add signed 64-bit integer support.
This commit is contained in:
parent
92b1e60c4c
commit
63f835d006
6 changed files with 966 additions and 189 deletions
251
src/Int64.c
251
src/Int64.c
|
@ -26,15 +26,28 @@
|
|||
#include <stddef.h>
|
||||
#include <signal.h>
|
||||
|
||||
#ifdef INT64_NATIVE
|
||||
#define Int64Sign(x) ((int) (((UInt64) (x)) >> 63))
|
||||
#else
|
||||
#define Int64Sign(x) ((int) ((x).i[1] >> 31))
|
||||
#endif
|
||||
|
||||
size_t
|
||||
UInt64Str(UInt64 x, int base, char *out, size_t len)
|
||||
Int64Str(Int64 x, int base, char *out, size_t len)
|
||||
{
|
||||
static const char symbols[] = "0123456789ABCDEF";
|
||||
|
||||
size_t i = len - 1;
|
||||
size_t j = 0;
|
||||
|
||||
UInt64 base64 = UInt64Create(0, base);
|
||||
int neg = Int64Sign(x);
|
||||
|
||||
Int64 base64 = Int64Create(0, base);
|
||||
|
||||
if (neg)
|
||||
{
|
||||
x = Int64Neg(x);
|
||||
}
|
||||
|
||||
/* We only have symbols up to base 16 */
|
||||
if (base < 2 || base > 16)
|
||||
|
@ -44,13 +57,27 @@ UInt64Str(UInt64 x, int base, char *out, size_t len)
|
|||
|
||||
do
|
||||
{
|
||||
UInt64 mod = UInt64Rem(x, base64);
|
||||
UInt32 low = UInt64Low(mod);
|
||||
Int64 mod = Int64Rem(x, base64);
|
||||
Int32 low = Int64Low(mod);
|
||||
|
||||
out[i] = symbols[low];
|
||||
i--;
|
||||
x = UInt64Div(x, base64);
|
||||
} while (UInt64Gt(x, UInt64Create(0, 0)));
|
||||
x = Int64Div(x, base64);
|
||||
} while (Int64Gt(x, Int64Create(0, 0)));
|
||||
|
||||
/*
|
||||
* Binary, octal, and hexadecimal are known to
|
||||
* be bit representations. Everything else (notably
|
||||
* decimal) should include the negative sign.
|
||||
*/
|
||||
if (base != 2 && base != 8 && base != 16)
|
||||
{
|
||||
if (neg)
|
||||
{
|
||||
out[i] = '-';
|
||||
i--;
|
||||
}
|
||||
}
|
||||
|
||||
while (++i < len)
|
||||
{
|
||||
|
@ -62,14 +89,14 @@ UInt64Str(UInt64 x, int base, char *out, size_t len)
|
|||
return j;
|
||||
}
|
||||
|
||||
#ifndef UINT64_NATIVE
|
||||
#ifndef INT64_NATIVE
|
||||
|
||||
/* No native 64-bit support, add our own */
|
||||
|
||||
UInt64
|
||||
UInt64Create(UInt32 high, UInt32 low)
|
||||
Int64
|
||||
Int64Create(UInt32 high, UInt32 low)
|
||||
{
|
||||
UInt64 x;
|
||||
Int64 x;
|
||||
|
||||
x.i[0] = low;
|
||||
x.i[1] = high;
|
||||
|
@ -77,10 +104,10 @@ UInt64Create(UInt32 high, UInt32 low)
|
|||
return x;
|
||||
}
|
||||
|
||||
UInt64
|
||||
UInt64Add(UInt64 x, UInt64 y)
|
||||
Int64
|
||||
Int64Add(Int64 x, Int64 y)
|
||||
{
|
||||
UInt64 z = UInt64Create(0, 0);
|
||||
Int64 z = Int64Create(0, 0);
|
||||
int carry;
|
||||
|
||||
z.i[0] = x.i[0] + y.i[0];
|
||||
|
@ -90,30 +117,46 @@ UInt64Add(UInt64 x, UInt64 y)
|
|||
return z;
|
||||
}
|
||||
|
||||
UInt64
|
||||
UInt64Sub(UInt64 x, UInt64 y)
|
||||
Int64
|
||||
Int64Sub(Int64 x, Int64 y)
|
||||
{
|
||||
UInt64 twosCompl = UInt64Add(UInt64Not(y), UInt64Create(0, 1));
|
||||
|
||||
return UInt64Add(x, twosCompl);
|
||||
return Int64Add(x, Int64Neg(y));
|
||||
}
|
||||
|
||||
UInt64
|
||||
UInt64Mul(UInt64 x, UInt64 y)
|
||||
Int64
|
||||
Int64Mul(Int64 x, Int64 y)
|
||||
{
|
||||
UInt64 z = UInt64Create(0, 0);
|
||||
Int64 z = Int64Create(0, 0);
|
||||
|
||||
/* while (y > 0) */
|
||||
while (UInt64Gt(y, UInt64Create(0, 0)))
|
||||
int xneg = Int64Sign(x);
|
||||
int yneg = Int64Sign(y);
|
||||
|
||||
if (xneg)
|
||||
{
|
||||
/* if (y & 1 != 0) */
|
||||
if (UInt64Neq(UInt64And(y, UInt64Create(0, 1)), UInt64Create(0, 0)))
|
||||
{
|
||||
z = UInt64Add(z, x);
|
||||
x = Int64Neg(x);
|
||||
}
|
||||
|
||||
x = UInt64Sll(x, 1);
|
||||
y = UInt64Srl(y, 1);
|
||||
if (yneg)
|
||||
{
|
||||
y = Int64Neg(y);
|
||||
}
|
||||
|
||||
/* while (y > 0) */
|
||||
while (Int64Gt(y, Int64Create(0, 0)))
|
||||
{
|
||||
/* if (y & 1 != 0) */
|
||||
if (Int64Neq(Int64And(y, Int64Create(0, 1)), Int64Create(0, 0)))
|
||||
{
|
||||
z = Int64Add(z, x);
|
||||
}
|
||||
|
||||
x = Int64Sll(x, 1);
|
||||
y = Int64Sra(y, 1);
|
||||
}
|
||||
|
||||
if (xneg != yneg)
|
||||
{
|
||||
z = Int64Neg(z);
|
||||
}
|
||||
|
||||
return z;
|
||||
|
@ -121,65 +164,84 @@ UInt64Mul(UInt64 x, UInt64 y)
|
|||
|
||||
typedef struct
|
||||
{
|
||||
UInt64 q;
|
||||
UInt64 r;
|
||||
} UInt64Ldiv;
|
||||
Int64 q;
|
||||
Int64 r;
|
||||
} Int64Ldiv;
|
||||
|
||||
static UInt64Ldiv
|
||||
UInt64LongDivision(UInt64 n, UInt64 d)
|
||||
static Int64Ldiv
|
||||
Int64LongDivision(Int64 n, Int64 d)
|
||||
{
|
||||
UInt64Ldiv o;
|
||||
Int64Ldiv o;
|
||||
|
||||
int i;
|
||||
|
||||
o.q = UInt64Create(0, 0);
|
||||
o.r = UInt64Create(0, 0);
|
||||
int nneg = Int64Sign(n);
|
||||
int dneg = Int64Sign(d);
|
||||
|
||||
if (UInt64Eq(d, UInt64Create(0, 0)))
|
||||
o.q = Int64Create(0, 0);
|
||||
o.r = Int64Create(0, 0);
|
||||
|
||||
if (Int64Eq(d, Int64Create(0, 0)))
|
||||
{
|
||||
raise(SIGFPE);
|
||||
return o;
|
||||
}
|
||||
|
||||
if (nneg)
|
||||
{
|
||||
n = Int64Neg(n);
|
||||
}
|
||||
|
||||
if (dneg)
|
||||
{
|
||||
d = Int64Neg(d);
|
||||
}
|
||||
|
||||
for (i = 63; i >= 0; i--)
|
||||
{
|
||||
UInt64 bit = UInt64And(UInt64Srl(n, i), UInt64Create(0, 1));
|
||||
o.r = UInt64Sll(o.r, 1);
|
||||
o.r = UInt64Or(o.r, bit);
|
||||
Int64 bit = Int64And(Int64Sra(n, i), Int64Create(0, 1));
|
||||
o.r = Int64Sll(o.r, 1);
|
||||
o.r = Int64Or(o.r, bit);
|
||||
|
||||
if (UInt64Geq(o.r, d))
|
||||
if (Int64Geq(o.r, d))
|
||||
{
|
||||
o.r = UInt64Sub(o.r, d);
|
||||
o.q = UInt64Or(o.q, UInt64Sll(UInt64Create(0, 1), i));
|
||||
o.r = Int64Sub(o.r, d);
|
||||
o.q = Int64Or(o.q, Int64Sll(Int64Create(0, 1), i));
|
||||
}
|
||||
}
|
||||
|
||||
if (nneg != dneg)
|
||||
{
|
||||
o.r = Int64Neg(o.r);
|
||||
o.q = Int64Neg(o.q);
|
||||
}
|
||||
|
||||
return o;
|
||||
}
|
||||
|
||||
UInt64
|
||||
UInt64Div(UInt64 x, UInt64 y)
|
||||
Int64
|
||||
Int64Div(Int64 x, Int64 y)
|
||||
{
|
||||
return UInt64LongDivision(x, y).q;
|
||||
return Int64LongDivision(x, y).q;
|
||||
}
|
||||
|
||||
UInt64
|
||||
UInt64Rem(UInt64 x, UInt64 y)
|
||||
Int64
|
||||
Int64Rem(Int64 x, Int64 y)
|
||||
{
|
||||
return UInt64LongDivision(x, y).r;
|
||||
return Int64LongDivision(x, y).r;
|
||||
}
|
||||
|
||||
UInt64
|
||||
UInt64Sll(UInt64 x, int y)
|
||||
Int64
|
||||
Int64Sll(Int64 x, int y)
|
||||
{
|
||||
UInt64 z;
|
||||
Int64 z;
|
||||
|
||||
if (!y)
|
||||
{
|
||||
return x;
|
||||
}
|
||||
|
||||
z = UInt64Create(0, 0);
|
||||
z = Int64Create(0, 0);
|
||||
|
||||
if (y < 32)
|
||||
{
|
||||
|
@ -194,17 +256,19 @@ UInt64Sll(UInt64 x, int y)
|
|||
return z;
|
||||
}
|
||||
|
||||
UInt64
|
||||
UInt64Srl(UInt64 x, int y)
|
||||
Int64
|
||||
Int64Sra(Int64 x, int y)
|
||||
{
|
||||
UInt64 z;
|
||||
Int64 z;
|
||||
|
||||
int neg = Int64Sign(x);
|
||||
|
||||
if (!y)
|
||||
{
|
||||
return x;
|
||||
}
|
||||
|
||||
z = UInt64Create(0, 0);
|
||||
z = Int64Create(0, 0);
|
||||
|
||||
if (y < 32)
|
||||
{
|
||||
|
@ -216,49 +280,90 @@ UInt64Srl(UInt64 x, int y)
|
|||
z.i[0] = x.i[1] >> (y - 32);
|
||||
}
|
||||
|
||||
if (neg)
|
||||
{
|
||||
Int64 mask = Int64Create(0xFFFFFFFF, 0xFFFFFFFF);
|
||||
z = Int64Or(Int64Sll(mask, (64 - y)), z);
|
||||
}
|
||||
|
||||
return z;
|
||||
}
|
||||
|
||||
UInt64
|
||||
UInt64And(UInt64 x, UInt64 y)
|
||||
Int64
|
||||
Int64And(Int64 x, Int64 y)
|
||||
{
|
||||
return UInt64Create(x.i[1] & y.i[1], x.i[0] & y.i[0]);
|
||||
return Int64Create(x.i[1] & y.i[1], x.i[0] & y.i[0]);
|
||||
}
|
||||
|
||||
UInt64
|
||||
UInt64Or(UInt64 x, UInt64 y)
|
||||
Int64
|
||||
Int64Or(Int64 x, Int64 y)
|
||||
{
|
||||
return UInt64Create(x.i[1] | y.i[1], x.i[0] | y.i[0]);
|
||||
return Int64Create(x.i[1] | y.i[1], x.i[0] | y.i[0]);
|
||||
}
|
||||
|
||||
UInt64
|
||||
UInt64Xor(UInt64 x, UInt64 y)
|
||||
Int64
|
||||
Int64Xor(Int64 x, Int64 y)
|
||||
{
|
||||
return UInt64Create(x.i[1] ^ y.i[1], x.i[0] ^ y.i[0]);
|
||||
return Int64Create(x.i[1] ^ y.i[1], x.i[0] ^ y.i[0]);
|
||||
}
|
||||
|
||||
UInt64
|
||||
UInt64Not(UInt64 x)
|
||||
Int64
|
||||
Int64Not(Int64 x)
|
||||
{
|
||||
return UInt64Create(~(x.i[1]), ~(x.i[0]));
|
||||
return Int64Create(~(x.i[1]), ~(x.i[0]));
|
||||
}
|
||||
|
||||
int
|
||||
UInt64Eq(UInt64 x, UInt64 y)
|
||||
Int64Eq(Int64 x, Int64 y)
|
||||
{
|
||||
return x.i[0] == y.i[0] && x.i[1] == y.i[1];
|
||||
}
|
||||
|
||||
int
|
||||
UInt64Lt(UInt64 x, UInt64 y)
|
||||
Int64Lt(Int64 x, Int64 y)
|
||||
{
|
||||
int xneg = Int64Sign(x);
|
||||
int yneg = Int64Sign(y);
|
||||
|
||||
if (xneg != yneg)
|
||||
{
|
||||
return xneg > yneg;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (xneg) /* Both negative */
|
||||
{
|
||||
return x.i[1] > y.i[1] || (x.i[1] == y.i[1] && x.i[0] > y.i[0]);
|
||||
}
|
||||
else /* Both positive */
|
||||
{
|
||||
return x.i[1] < y.i[1] || (x.i[1] == y.i[1] && x.i[0] < y.i[0]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
UInt64Gt(UInt64 x, UInt64 y)
|
||||
Int64Gt(Int64 x, Int64 y)
|
||||
{
|
||||
int xneg = Int64Sign(x);
|
||||
int yneg = Int64Sign(y);
|
||||
|
||||
if (xneg != yneg)
|
||||
{
|
||||
return xneg < yneg;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (xneg) /* Both negative */
|
||||
{
|
||||
return x.i[1] < y.i[1] || (x.i[1] == y.i[1] && x.i[0] < y.i[0]);
|
||||
}
|
||||
else /* Both positive */
|
||||
{
|
||||
return x.i[1] > y.i[1] || (x.i[1] == y.i[1] && x.i[0] > y.i[0]);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
264
src/UInt64.c
Normal file
264
src/UInt64.c
Normal file
|
@ -0,0 +1,264 @@
|
|||
/*
|
||||
* Copyright (C) 2022-2023 Jordan Bancino <@jordan:bancino.net>
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person
|
||||
* obtaining a copy of this software and associated documentation files
|
||||
* (the "Software"), to deal in the Software without restriction,
|
||||
* including without limitation the rights to use, copy, modify, merge,
|
||||
* publish, distribute, sublicense, and/or sell copies of the Software,
|
||||
* and to permit persons to whom the Software is furnished to do so,
|
||||
* subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be
|
||||
* included in all copies or portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#include <UInt64.h>
|
||||
|
||||
#include <stddef.h>
|
||||
#include <signal.h>
|
||||
|
||||
size_t
|
||||
UInt64Str(UInt64 x, int base, char *out, size_t len)
|
||||
{
|
||||
static const char symbols[] = "0123456789ABCDEF";
|
||||
|
||||
size_t i = len - 1;
|
||||
size_t j = 0;
|
||||
|
||||
UInt64 base64 = UInt64Create(0, base);
|
||||
|
||||
/* We only have symbols up to base 16 */
|
||||
if (base < 2 || base > 16)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
do
|
||||
{
|
||||
UInt64 mod = UInt64Rem(x, base64);
|
||||
UInt32 low = UInt64Low(mod);
|
||||
|
||||
out[i] = symbols[low];
|
||||
i--;
|
||||
x = UInt64Div(x, base64);
|
||||
} while (UInt64Gt(x, UInt64Create(0, 0)));
|
||||
|
||||
while (++i < len)
|
||||
{
|
||||
out[j++] = out[i];
|
||||
}
|
||||
|
||||
out[j] = '\0';
|
||||
|
||||
return j;
|
||||
}
|
||||
|
||||
#ifndef UINT64_NATIVE
|
||||
|
||||
/* No native 64-bit support, add our own */
|
||||
|
||||
UInt64
|
||||
UInt64Create(UInt32 high, UInt32 low)
|
||||
{
|
||||
UInt64 x;
|
||||
|
||||
x.i[0] = low;
|
||||
x.i[1] = high;
|
||||
|
||||
return x;
|
||||
}
|
||||
|
||||
UInt64
|
||||
UInt64Add(UInt64 x, UInt64 y)
|
||||
{
|
||||
UInt64 z = UInt64Create(0, 0);
|
||||
int carry;
|
||||
|
||||
z.i[0] = x.i[0] + y.i[0];
|
||||
carry = z.i[0] < x.i[0];
|
||||
z.i[1] = x.i[1] + y.i[1] + carry;
|
||||
|
||||
return z;
|
||||
}
|
||||
|
||||
UInt64
|
||||
UInt64Sub(UInt64 x, UInt64 y)
|
||||
{
|
||||
UInt64 twosCompl = UInt64Add(UInt64Not(y), UInt64Create(0, 1));
|
||||
|
||||
return UInt64Add(x, twosCompl);
|
||||
}
|
||||
|
||||
UInt64
|
||||
UInt64Mul(UInt64 x, UInt64 y)
|
||||
{
|
||||
UInt64 z = UInt64Create(0, 0);
|
||||
|
||||
/* while (y > 0) */
|
||||
while (UInt64Gt(y, UInt64Create(0, 0)))
|
||||
{
|
||||
/* if (y & 1 != 0) */
|
||||
if (UInt64Neq(UInt64And(y, UInt64Create(0, 1)), UInt64Create(0, 0)))
|
||||
{
|
||||
z = UInt64Add(z, x);
|
||||
}
|
||||
|
||||
x = UInt64Sll(x, 1);
|
||||
y = UInt64Srl(y, 1);
|
||||
}
|
||||
|
||||
return z;
|
||||
}
|
||||
|
||||
typedef struct
|
||||
{
|
||||
UInt64 q;
|
||||
UInt64 r;
|
||||
} UInt64Ldiv;
|
||||
|
||||
static UInt64Ldiv
|
||||
UInt64LongDivision(UInt64 n, UInt64 d)
|
||||
{
|
||||
UInt64Ldiv o;
|
||||
|
||||
int i;
|
||||
|
||||
o.q = UInt64Create(0, 0);
|
||||
o.r = UInt64Create(0, 0);
|
||||
|
||||
if (UInt64Eq(d, UInt64Create(0, 0)))
|
||||
{
|
||||
raise(SIGFPE);
|
||||
return o;
|
||||
}
|
||||
|
||||
for (i = 63; i >= 0; i--)
|
||||
{
|
||||
UInt64 bit = UInt64And(UInt64Srl(n, i), UInt64Create(0, 1));
|
||||
o.r = UInt64Sll(o.r, 1);
|
||||
o.r = UInt64Or(o.r, bit);
|
||||
|
||||
if (UInt64Geq(o.r, d))
|
||||
{
|
||||
o.r = UInt64Sub(o.r, d);
|
||||
o.q = UInt64Or(o.q, UInt64Sll(UInt64Create(0, 1), i));
|
||||
}
|
||||
}
|
||||
|
||||
return o;
|
||||
}
|
||||
|
||||
UInt64
|
||||
UInt64Div(UInt64 x, UInt64 y)
|
||||
{
|
||||
return UInt64LongDivision(x, y).q;
|
||||
}
|
||||
|
||||
UInt64
|
||||
UInt64Rem(UInt64 x, UInt64 y)
|
||||
{
|
||||
return UInt64LongDivision(x, y).r;
|
||||
}
|
||||
|
||||
UInt64
|
||||
UInt64Sll(UInt64 x, int y)
|
||||
{
|
||||
UInt64 z;
|
||||
|
||||
if (!y)
|
||||
{
|
||||
return x;
|
||||
}
|
||||
|
||||
z = UInt64Create(0, 0);
|
||||
|
||||
if (y < 32)
|
||||
{
|
||||
z.i[1] = (x.i[0] >> (32 - y)) | (x.i[1] << y);
|
||||
z.i[0] = x.i[0] << y;
|
||||
}
|
||||
else
|
||||
{
|
||||
z.i[1] = x.i[0] << (y - 32);
|
||||
}
|
||||
|
||||
return z;
|
||||
}
|
||||
|
||||
UInt64
|
||||
UInt64Srl(UInt64 x, int y)
|
||||
{
|
||||
UInt64 z;
|
||||
|
||||
if (!y)
|
||||
{
|
||||
return x;
|
||||
}
|
||||
|
||||
z = UInt64Create(0, 0);
|
||||
|
||||
if (y < 32)
|
||||
{
|
||||
z.i[0] = (x.i[1] << (32 - y)) | (x.i[0] >> y);
|
||||
z.i[1] = x.i[1] >> y;
|
||||
}
|
||||
else
|
||||
{
|
||||
z.i[0] = x.i[1] >> (y - 32);
|
||||
}
|
||||
|
||||
return z;
|
||||
}
|
||||
|
||||
UInt64
|
||||
UInt64And(UInt64 x, UInt64 y)
|
||||
{
|
||||
return UInt64Create(x.i[1] & y.i[1], x.i[0] & y.i[0]);
|
||||
}
|
||||
|
||||
UInt64
|
||||
UInt64Or(UInt64 x, UInt64 y)
|
||||
{
|
||||
return UInt64Create(x.i[1] | y.i[1], x.i[0] | y.i[0]);
|
||||
}
|
||||
|
||||
UInt64
|
||||
UInt64Xor(UInt64 x, UInt64 y)
|
||||
{
|
||||
return UInt64Create(x.i[1] ^ y.i[1], x.i[0] ^ y.i[0]);
|
||||
}
|
||||
|
||||
UInt64
|
||||
UInt64Not(UInt64 x)
|
||||
{
|
||||
return UInt64Create(~(x.i[1]), ~(x.i[0]));
|
||||
}
|
||||
|
||||
int
|
||||
UInt64Eq(UInt64 x, UInt64 y)
|
||||
{
|
||||
return x.i[0] == y.i[0] && x.i[1] == y.i[1];
|
||||
}
|
||||
|
||||
int
|
||||
UInt64Lt(UInt64 x, UInt64 y)
|
||||
{
|
||||
return x.i[1] < y.i[1] || (x.i[1] == y.i[1] && x.i[0] < y.i[0]);
|
||||
}
|
||||
|
||||
int
|
||||
UInt64Gt(UInt64 x, UInt64 y)
|
||||
{
|
||||
return x.i[1] > y.i[1] || (x.i[1] == y.i[1] && x.i[0] > y.i[0]);
|
||||
}
|
||||
|
||||
#endif
|
|
@ -65,57 +65,62 @@
|
|||
*/
|
||||
|
||||
#include <Int.h>
|
||||
#include <UInt64.h>
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#if 0 /* TODO REMOVE */
|
||||
|
||||
#define BIT64_MAX 18446744073709551615UL
|
||||
|
||||
/* TODO: Implement signed arithmetic */
|
||||
|
||||
#if UINT_MAX == BIT64_MAX
|
||||
/* typedef signed int Int64; */
|
||||
typedef unsigned int UInt64;
|
||||
typedef signed int Int64;
|
||||
|
||||
#define UINT64_NATIVE
|
||||
#define INT64_NATIVE
|
||||
|
||||
#elif ULONG_MAX == BIT64_MAX
|
||||
/* typedef signed int Int64; */
|
||||
typedef unsigned long UInt64;
|
||||
typedef signed long Int64;
|
||||
|
||||
#define UINT64_NATIVE
|
||||
#define INT64_NATIVE
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef UINT64_NATIVE
|
||||
#endif /* TODO REMOVE */
|
||||
|
||||
#define UInt64Create(high, low) (((UInt64) (high) << 32) | (low))
|
||||
#define UInt64Low(a) ((UInt32) ((a) & 0x00000000FFFFFFFF))
|
||||
#define UInt64High(a) ((UInt32) ((a) >> 32))
|
||||
#ifdef INT64_NATIVE
|
||||
|
||||
#define UInt64Add(a, b) ((a) + (b))
|
||||
#define UInt64Sub(a, b) ((a) - (b))
|
||||
#define UInt64Mul(a, b) ((a) * (b))
|
||||
#define UInt64Div(a, b) ((a) / (b))
|
||||
#define UInt64Rem(a, b) ((a) % (b))
|
||||
#define Int64Create(high, low) ((Int64) (((UInt64) (high) << 32) | (low)))
|
||||
#define Int64Neg(x) (-(x))
|
||||
|
||||
#define UInt64Bsl(a, b) ((a) << (b))
|
||||
#define UInt64Bsr(a, b) ((a) >> (b))
|
||||
#define Int64Low(a) ((UInt32) (a))
|
||||
#define Int64High(a) ((UInt32) ((a) >> 32))
|
||||
|
||||
#define UInt64And(a, b) ((a) & (b))
|
||||
#define UInt64Or(a, b) ((a) | (b))
|
||||
#define UInt64Xor(a, b) ((a) ^ (b))
|
||||
#define UInt64Not(a) (~(a))
|
||||
#define Int64Add(a, b) ((a) + (b))
|
||||
#define Int64Sub(a, b) ((a) - (b))
|
||||
#define Int64Mul(a, b) ((a) * (b))
|
||||
#define Int64Div(a, b) ((a) / (b))
|
||||
#define Int64Rem(a, b) ((a) % (b))
|
||||
|
||||
#define UInt64Eq(a, b) ((a) == (b))
|
||||
#define UInt64Lt(a, b) ((a) < (b))
|
||||
#define UInt64Gt(a, b) ((a) > (b))
|
||||
#define Int64Sll(a, b) ((a) << (b))
|
||||
#define Int64Sra(a, b) ((a) >> (b))
|
||||
|
||||
#define UInt64Neq(a, b) ((a) != (b))
|
||||
#define UInt64Leq(a, b) ((a) <= (b))
|
||||
#define UInt64Geq(a, b) ((a) >= (b))
|
||||
#define Int64And(a, b) ((a) & (b))
|
||||
#define Int64Or(a, b) ((a) | (b))
|
||||
#define Int64Xor(a, b) ((a) ^ (b))
|
||||
#define Int64Not(a) (~(a))
|
||||
|
||||
#define Int64Eq(a, b) ((a) == (b))
|
||||
#define Int64Lt(a, b) ((a) < (b))
|
||||
#define Int64Gt(a, b) ((a) > (b))
|
||||
|
||||
#define Int64Neq(a, b) ((a) != (b))
|
||||
#define Int64Leq(a, b) ((a) <= (b))
|
||||
#define Int64Geq(a, b) ((a) >= (b))
|
||||
|
||||
#else
|
||||
|
||||
#define Int64Neg(x) (Int64Add(Int64Not(x), Int64Create(0, 1)))
|
||||
|
||||
/**
|
||||
* For platforms that do not have a native integer large enough to
|
||||
* store a 64 bit integer, this struct is used. i[0] contains the low
|
||||
|
@ -128,24 +133,30 @@ typedef unsigned long UInt64;
|
|||
*/
|
||||
typedef struct
|
||||
{
|
||||
/*
|
||||
* Unsigned, because we will deal with the sign bits ourselves.
|
||||
* This also allows well-defined casting between signed and
|
||||
* unsigned integers.
|
||||
*/
|
||||
UInt32 i[2];
|
||||
} UInt64;
|
||||
} Int64;
|
||||
|
||||
|
||||
/**
|
||||
* Create a new unsigned 64 bit integer using the given high and low
|
||||
* Create a new signed 64 bit integer using the given high and low
|
||||
* bits.
|
||||
*/
|
||||
extern UInt64 UInt64Create(UInt32, UInt32);
|
||||
extern Int64 Int64Create(UInt32, UInt32);
|
||||
|
||||
/**
|
||||
* Add two unsigned 64 bit integers together.
|
||||
* Add two signed 64 bit integers together.
|
||||
*/
|
||||
extern UInt64 UInt64Add(UInt64, UInt64);
|
||||
extern Int64 Int64Add(Int64, Int64);
|
||||
|
||||
/**
|
||||
* Subtract the second 64 bit integer from the first.
|
||||
*/
|
||||
extern UInt64 UInt64Sub(UInt64, UInt64);
|
||||
extern Int64 Int64Sub(Int64, Int64);
|
||||
|
||||
/**
|
||||
* Multiply two 64 bit integers together. The non-native version of
|
||||
|
@ -154,7 +165,7 @@ extern UInt64 UInt64Sub(UInt64, UInt64);
|
|||
* addition, but it is still rather slow and depends on the size of
|
||||
* the integers being multiplied.
|
||||
*/
|
||||
extern UInt64 UInt64Mul(UInt64, UInt64);
|
||||
extern Int64 Int64Mul(Int64, Int64);
|
||||
|
||||
/**
|
||||
* Divide the first 64 bit integer by the second and return the
|
||||
|
@ -162,7 +173,7 @@ extern UInt64 UInt64Mul(UInt64, UInt64);
|
|||
* long division, which is slow, but gauranteed to finish in constant
|
||||
* time.
|
||||
*/
|
||||
extern UInt64 UInt64Div(UInt64, UInt64);
|
||||
extern Int64 Int64Div(Int64, Int64);
|
||||
|
||||
/**
|
||||
* Divide the first 64 bit integer by the second and return the
|
||||
|
@ -170,80 +181,86 @@ extern UInt64 UInt64Div(UInt64, UInt64);
|
|||
* long division, which is slow, but gauranteed to finish in constant
|
||||
* time.
|
||||
*/
|
||||
extern UInt64 UInt64Rem(UInt64, UInt64);
|
||||
extern Int64 Int64Rem(Int64, Int64);
|
||||
|
||||
/**
|
||||
* Perform a left logical bit shift of a 64 bit integer. The second
|
||||
* parameter is how many places to shift, and is declared as a regular
|
||||
* integer because anything more than 64 does not make sense.
|
||||
*/
|
||||
extern UInt64 UInt64Sll(UInt64, int);
|
||||
extern Int64 Int64Sll(Int64, int);
|
||||
|
||||
/**
|
||||
* Perform a right logical bit shift of a 64 bit integer. The second
|
||||
* Perform a right arithmetic bit shift of a 64 bit integer. The second
|
||||
* parameter is how many places to shift, and is declared as a regular
|
||||
* integer because anything more than 64 does not make sense.
|
||||
* .Pp
|
||||
* Note that on platforms that use the native 64-bit implementation,
|
||||
* this is technically implementation-defined, and may in fact be a
|
||||
* logical shift instead of an arithmetic shift. Note that typically
|
||||
* this operation is not performed on signed integers.
|
||||
*/
|
||||
extern UInt64 UInt64Srl(UInt64, int);
|
||||
extern Int64 Int64Sra(Int64, int);
|
||||
|
||||
/**
|
||||
* Perform a bitwise AND (&) of the provided 64 bit integers.
|
||||
*/
|
||||
extern UInt64 UInt64And(UInt64, UInt64);
|
||||
extern Int64 Int64And(Int64, Int64);
|
||||
|
||||
/**
|
||||
* Perform a bitwise OR (|) of the provided 64 bit integers.
|
||||
*/
|
||||
extern UInt64 UInt64Or(UInt64, UInt64);
|
||||
extern Int64 Int64Or(Int64, Int64);
|
||||
|
||||
/**
|
||||
* Perform a bitwise XOR (^) of the provided 64 bit integers.
|
||||
*/
|
||||
extern UInt64 UInt64Xor(UInt64, UInt64);
|
||||
extern Int64 Int64Xor(Int64, Int64);
|
||||
|
||||
/**
|
||||
* Perform a bitwise NOT (~) of the provided 64 bit integer.
|
||||
*/
|
||||
extern UInt64 UInt64Not(UInt64);
|
||||
extern Int64 Int64Not(Int64);
|
||||
|
||||
/**
|
||||
* Perform a comparison of the provided 64 bit integers and return a C
|
||||
* boolean that is true if and only if they are equal.
|
||||
extern int UInt64Eq(UInt64, UInt64);
|
||||
*/
|
||||
extern int Int64Eq(Int64, Int64);
|
||||
|
||||
/**
|
||||
* Perform a comparison of the provided 64 bit integers and return a C
|
||||
* boolean that is true if and only if the second operand is strictly
|
||||
* less than the first.
|
||||
*/
|
||||
extern int UInt64Lt(UInt64, UInt64);
|
||||
extern int Int64Lt(Int64, Int64);
|
||||
|
||||
/**
|
||||
* Perform a comparison of the provided 64 bit integers and return a C
|
||||
* boolean that is true if and only if the second operand is strictly
|
||||
* greater than the first.
|
||||
*/
|
||||
extern int UInt64Gt(UInt64, UInt64);
|
||||
extern int Int64Gt(Int64, Int64);
|
||||
|
||||
#define UInt64Low(a) ((a).i[0])
|
||||
#define UInt64High(a) ((a).i[1])
|
||||
#define Int64Low(a) ((a).i[0])
|
||||
#define Int64High(a) ((a).i[1])
|
||||
|
||||
#define UInt64Neq(a, b) (!UInt64Eq(a, b))
|
||||
#define UInt64Leq(a, b) (UInt64Eq(a, b) || UInt64Lt(a, b))
|
||||
#define UInt64Geq(a, b) (UInt64Eq(a, b) || UInt64Gt(a, b))
|
||||
#define Int64Neq(a, b) (!Int64Eq(a, b))
|
||||
#define Int64Leq(a, b) (Int64Eq(a, b) || Int64Lt(a, b))
|
||||
#define Int64Geq(a, b) (Int64Eq(a, b) || Int64Gt(a, b))
|
||||
|
||||
#endif
|
||||
|
||||
#define UINT64_STRBUF 65 /* Base 2 representation with '\0' */
|
||||
#define INT64_STRBUF 65 /* Base 2 representation with '\0' */
|
||||
|
||||
/**
|
||||
* Convert a 64 bit integer to a string in an arbitrary base
|
||||
* representation specified by the second parameter, using the provided
|
||||
* buffer and length specified by the third and fourth parameters. To
|
||||
* guarantee that the string will fit in the buffer, allocate it of
|
||||
* size UINT64_STRBUF or larger. Note that a buffer size smaller than
|
||||
* UINT64_STRBUF will invoke undefined behavior.
|
||||
* size INT64_STRBUF or larger. Note that a buffer size smaller than
|
||||
* INT64_STRBUF will invoke undefined behavior.
|
||||
*/
|
||||
extern size_t UInt64Str(UInt64, int, char *, size_t);
|
||||
extern size_t Int64Str(Int64, int, char *, size_t);
|
||||
|
||||
#endif /* CYTOPLASM_INT64_H */
|
||||
|
|
249
src/include/UInt64.h
Normal file
249
src/include/UInt64.h
Normal file
|
@ -0,0 +1,249 @@
|
|||
/*
|
||||
* Copyright (C) 2022-2023 Jordan Bancino <@jordan:bancino.net>
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person
|
||||
* obtaining a copy of this software and associated documentation files
|
||||
* (the "Software"), to deal in the Software without restriction,
|
||||
* including without limitation the rights to use, copy, modify, merge,
|
||||
* publish, distribute, sublicense, and/or sell copies of the Software,
|
||||
* and to permit persons to whom the Software is furnished to do so,
|
||||
* subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be
|
||||
* included in all copies or portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#ifndef CYTOPLASM_UINT64_H
|
||||
#define CYTOPLASM_UINT64_H
|
||||
|
||||
/***
|
||||
* @Nm Int64
|
||||
* @Nd Fixed-width 64 bit integers.
|
||||
* @Dd August 11, 2023
|
||||
*
|
||||
* .Pp
|
||||
* ANSI C89 (or C99 for that matter) provides no required mechanism
|
||||
* for 64 bit integers. Nevertheless, many compilers provide them as
|
||||
* extensions. However, since it is not a gaurantee, and to be fully
|
||||
* standards-compliant and thus portable, a platform-agnostic interface
|
||||
* is required. This header provides such an interface. If the platform
|
||||
* has a 64 bit integer type, that is used, and native operations are
|
||||
* performed by C preprocessor macro expansion. Otherwise, a
|
||||
* compatibility layer is provided, which implements 64-bit
|
||||
* arithmetic on an array of 2 32-bit numbers which are provided by
|
||||
* .Xr Int 3 .
|
||||
* .Pp
|
||||
* Note that 64-bit emulation is certainly not as performant as using
|
||||
* native 64-bit operations, so whenever possible, the native
|
||||
* operations should be preferred. However, since C provides no required
|
||||
* 64 bit integer on 32-bit and less platforms, this API can be used as
|
||||
* a "good enough" fallback mechanism.
|
||||
* .Pp
|
||||
* Also note that this implementation, both in the native and
|
||||
* non-native forms, makes some assumptions:
|
||||
* .Bl -bullet -width Ds
|
||||
* .It
|
||||
* When a cast from a larger integer to a smaller integer is performed,
|
||||
* the upper bits are truncated, not the lower bits.
|
||||
* .It
|
||||
* Negative numbers are represented in memory and in registers in two's
|
||||
* compliment form.
|
||||
* .El
|
||||
* .Pp
|
||||
* This API may provide unexpected output if these assumptions are
|
||||
* false for a given platform.
|
||||
*
|
||||
* @ignore-typedefs
|
||||
*/
|
||||
|
||||
#include <Int.h>
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#define BIT64_MAX 18446744073709551615UL
|
||||
|
||||
/* TODO: Implement signed arithmetic */
|
||||
|
||||
#if UINT_MAX == BIT64_MAX
|
||||
/* typedef signed int Int64; */
|
||||
typedef unsigned int UInt64;
|
||||
|
||||
#define UINT64_NATIVE
|
||||
|
||||
#elif ULONG_MAX == BIT64_MAX
|
||||
/* typedef signed int Int64; */
|
||||
typedef unsigned long UInt64;
|
||||
|
||||
#define UINT64_NATIVE
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef UINT64_NATIVE
|
||||
|
||||
#define UInt64Create(high, low) (((UInt64) (high) << 32) | (low))
|
||||
#define UInt64Low(a) ((UInt32) ((a) & 0x00000000FFFFFFFF))
|
||||
#define UInt64High(a) ((UInt32) ((a) >> 32))
|
||||
|
||||
#define UInt64Add(a, b) ((a) + (b))
|
||||
#define UInt64Sub(a, b) ((a) - (b))
|
||||
#define UInt64Mul(a, b) ((a) * (b))
|
||||
#define UInt64Div(a, b) ((a) / (b))
|
||||
#define UInt64Rem(a, b) ((a) % (b))
|
||||
|
||||
#define UInt64Sll(a, b) ((a) << (b))
|
||||
#define UInt64Srl(a, b) ((a) >> (b))
|
||||
|
||||
#define UInt64And(a, b) ((a) & (b))
|
||||
#define UInt64Or(a, b) ((a) | (b))
|
||||
#define UInt64Xor(a, b) ((a) ^ (b))
|
||||
#define UInt64Not(a) (~(a))
|
||||
|
||||
#define UInt64Eq(a, b) ((a) == (b))
|
||||
#define UInt64Lt(a, b) ((a) < (b))
|
||||
#define UInt64Gt(a, b) ((a) > (b))
|
||||
|
||||
#define UInt64Neq(a, b) ((a) != (b))
|
||||
#define UInt64Leq(a, b) ((a) <= (b))
|
||||
#define UInt64Geq(a, b) ((a) >= (b))
|
||||
|
||||
#else
|
||||
|
||||
/**
|
||||
* For platforms that do not have a native integer large enough to
|
||||
* store a 64 bit integer, this struct is used. i[0] contains the low
|
||||
* bits of integer, and i[1] contains the high bits of the integer.
|
||||
* .Pp
|
||||
* This struct should not be accessed directly, because UInt64 may not
|
||||
* actually be this struct, it might be an actual integer type. For
|
||||
* maximum portability, only use the functions defined here to
|
||||
* manipulate 64 bit integers.
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
UInt32 i[2];
|
||||
} UInt64;
|
||||
|
||||
/**
|
||||
* Create a new unsigned 64 bit integer using the given high and low
|
||||
* bits.
|
||||
*/
|
||||
extern UInt64 UInt64Create(UInt32, UInt32);
|
||||
|
||||
/**
|
||||
* Add two unsigned 64 bit integers together.
|
||||
*/
|
||||
extern UInt64 UInt64Add(UInt64, UInt64);
|
||||
|
||||
/**
|
||||
* Subtract the second 64 bit integer from the first.
|
||||
*/
|
||||
extern UInt64 UInt64Sub(UInt64, UInt64);
|
||||
|
||||
/**
|
||||
* Multiply two 64 bit integers together. The non-native version of
|
||||
* this function uses the Russian Peasant method of multiplication,
|
||||
* which should afford more performance than a naive multiplication by
|
||||
* addition, but it is still rather slow and depends on the size of
|
||||
* the integers being multiplied.
|
||||
*/
|
||||
extern UInt64 UInt64Mul(UInt64, UInt64);
|
||||
|
||||
/**
|
||||
* Divide the first 64 bit integer by the second and return the
|
||||
* quotient. The non-native version of this function uses naive binary
|
||||
* long division, which is slow, but gauranteed to finish in constant
|
||||
* time.
|
||||
*/
|
||||
extern UInt64 UInt64Div(UInt64, UInt64);
|
||||
|
||||
/**
|
||||
* Divide the first 64 bit integer by the second and return the
|
||||
* remainder. The non-native version of this function uses naive binary
|
||||
* long division, which is slow, but gauranteed to finish in constant
|
||||
* time.
|
||||
*/
|
||||
extern UInt64 UInt64Rem(UInt64, UInt64);
|
||||
|
||||
/**
|
||||
* Perform a left logical bit shift of a 64 bit integer. The second
|
||||
* parameter is how many places to shift, and is declared as a regular
|
||||
* integer because anything more than 64 does not make sense.
|
||||
*/
|
||||
extern UInt64 UInt64Sll(UInt64, int);
|
||||
|
||||
/**
|
||||
* Perform a right logical bit shift of a 64 bit integer. The second
|
||||
* parameter is how many places to shift, and is declared as a regular
|
||||
* integer because anything more than 64 does not make sense.
|
||||
*/
|
||||
extern UInt64 UInt64Srl(UInt64, int);
|
||||
|
||||
/**
|
||||
* Perform a bitwise AND (&) of the provided 64 bit integers.
|
||||
*/
|
||||
extern UInt64 UInt64And(UInt64, UInt64);
|
||||
|
||||
/**
|
||||
* Perform a bitwise OR (|) of the provided 64 bit integers.
|
||||
*/
|
||||
extern UInt64 UInt64Or(UInt64, UInt64);
|
||||
|
||||
/**
|
||||
* Perform a bitwise XOR (^) of the provided 64 bit integers.
|
||||
*/
|
||||
extern UInt64 UInt64Xor(UInt64, UInt64);
|
||||
|
||||
/**
|
||||
* Perform a bitwise NOT (~) of the provided 64 bit integer.
|
||||
*/
|
||||
extern UInt64 UInt64Not(UInt64);
|
||||
|
||||
/**
|
||||
* Perform a comparison of the provided 64 bit integers and return a C
|
||||
* boolean that is true if and only if they are equal.
|
||||
extern int UInt64Eq(UInt64, UInt64);
|
||||
|
||||
/**
|
||||
* Perform a comparison of the provided 64 bit integers and return a C
|
||||
* boolean that is true if and only if the second operand is strictly
|
||||
* less than the first.
|
||||
*/
|
||||
extern int UInt64Lt(UInt64, UInt64);
|
||||
|
||||
/**
|
||||
* Perform a comparison of the provided 64 bit integers and return a C
|
||||
* boolean that is true if and only if the second operand is strictly
|
||||
* greater than the first.
|
||||
*/
|
||||
extern int UInt64Gt(UInt64, UInt64);
|
||||
|
||||
#define UInt64Low(a) ((a).i[0])
|
||||
#define UInt64High(a) ((a).i[1])
|
||||
|
||||
#define UInt64Neq(a, b) (!UInt64Eq(a, b))
|
||||
#define UInt64Leq(a, b) (UInt64Eq(a, b) || UInt64Lt(a, b))
|
||||
#define UInt64Geq(a, b) (UInt64Eq(a, b) || UInt64Gt(a, b))
|
||||
|
||||
#endif
|
||||
|
||||
#define UINT64_STRBUF 65 /* Base 2 representation with '\0' */
|
||||
|
||||
/**
|
||||
* Convert a 64 bit integer to a string in an arbitrary base
|
||||
* representation specified by the second parameter, using the provided
|
||||
* buffer and length specified by the third and fourth parameters. To
|
||||
* guarantee that the string will fit in the buffer, allocate it of
|
||||
* size UINT64_STRBUF or larger. Note that a buffer size smaller than
|
||||
* UINT64_STRBUF will invoke undefined behavior.
|
||||
*/
|
||||
extern size_t UInt64Str(UInt64, int, char *, size_t);
|
||||
|
||||
#endif /* CYTOPLASM_UINT64_H */
|
145
tools/int64.c
145
tools/int64.c
|
@ -3,13 +3,13 @@
|
|||
#include <Log.h>
|
||||
|
||||
/* AssertEquals(actual, expected) */
|
||||
int AssertEquals(char *msg, UInt64 x, UInt64 y)
|
||||
int AssertEquals(char *msg, Int64 x, Int64 y)
|
||||
{
|
||||
if (!UInt64Eq(x, y))
|
||||
if (!Int64Eq(x, y))
|
||||
{
|
||||
Log(LOG_ERR, "%s: Expected 0x%X 0x%X, got 0x%X 0x%X", msg,
|
||||
UInt64High(y), UInt64Low(y),
|
||||
UInt64High(x), UInt64Low(x));
|
||||
Int64High(y), Int64Low(y),
|
||||
Int64High(x), Int64Low(x));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -19,11 +19,11 @@ int AssertEquals(char *msg, UInt64 x, UInt64 y)
|
|||
|
||||
int Main(void)
|
||||
{
|
||||
UInt64 x, y;
|
||||
Int64 x, y;
|
||||
|
||||
Log(LOG_INFO, "sizeof(UInt64) = %lu", sizeof(UInt64));
|
||||
Log(LOG_INFO, "sizeof(Int64) = %lu", sizeof(Int64));
|
||||
|
||||
#ifdef UINT64_NATIVE
|
||||
#ifdef INT64_NATIVE
|
||||
Log(LOG_INFO, "Using native 64-bit integers.");
|
||||
#else
|
||||
Log(LOG_INFO, "Using emulated 64-bit integers.");
|
||||
|
@ -31,87 +31,112 @@ int Main(void)
|
|||
|
||||
/* BSR Tests */
|
||||
|
||||
x = UInt64Create(0x000000FF, 0x00000000);
|
||||
x = Int64Create(0x000000FF, 0x00000000);
|
||||
|
||||
y = UInt64Bsr(x, 4);
|
||||
AssertEquals("x >> 4", y, UInt64Create(0x0000000F, 0xF0000000));
|
||||
y = Int64Sra(x, 4);
|
||||
AssertEquals("x >> 4", y, Int64Create(0x0000000F, 0xF0000000));
|
||||
|
||||
y = UInt64Bsr(x, 8);
|
||||
AssertEquals("x >> 8", y, UInt64Create(0x00000000, 0xFF000000));
|
||||
y = Int64Sra(x, 8);
|
||||
AssertEquals("x >> 8", y, Int64Create(0x00000000, 0xFF000000));
|
||||
|
||||
y = UInt64Bsr(x, 36);
|
||||
AssertEquals("x >> 36", y, UInt64Create(0x00000000, 0x0000000F));
|
||||
y = Int64Sra(x, 36);
|
||||
AssertEquals("x >> 36", y, Int64Create(0x00000000, 0x0000000F));
|
||||
|
||||
x = Int64Create(0xFF000000, 0x00000000);
|
||||
|
||||
y = Int64Sra(x, 4);
|
||||
AssertEquals("x >> 4", y, Int64Create(0xFFF00000, 0x00000000));
|
||||
|
||||
y = Int64Sra(x, 8);
|
||||
AssertEquals("x >> 8", y, Int64Create(0xFFFF0000, 0x00000000));
|
||||
|
||||
y = Int64Sra(x, 63);
|
||||
AssertEquals("x >> 63", y, Int64Create(0xFFFFFFFF, 0xFFFFFFFF));
|
||||
|
||||
/* BSL Tests */
|
||||
|
||||
x = UInt64Create(0x00000000, 0xFF000000);
|
||||
x = Int64Create(0x00000000, 0xFF000000);
|
||||
|
||||
y = UInt64Bsl(x, 4);
|
||||
AssertEquals("x << 4", y, UInt64Create(0x0000000F, 0xF0000000));
|
||||
y = Int64Sll(x, 4);
|
||||
AssertEquals("x << 4", y, Int64Create(0x0000000F, 0xF0000000));
|
||||
|
||||
y = UInt64Bsl(x, 8);
|
||||
AssertEquals("x << 8", y, UInt64Create(0x000000FF, 0x00000000));
|
||||
y = Int64Sll(x, 8);
|
||||
AssertEquals("x << 8", y, Int64Create(0x000000FF, 0x00000000));
|
||||
|
||||
y = UInt64Bsl(x, 36);
|
||||
AssertEquals("x << 36", y, UInt64Create(0xF0000000, 0x00000000));
|
||||
y = Int64Sll(x, 36);
|
||||
AssertEquals("x << 36", y, Int64Create(0xF0000000, 0x00000000));
|
||||
|
||||
/* ADD Tests */
|
||||
|
||||
x = UInt64Create(0x00000000, 0xF0000001);
|
||||
y = UInt64Create(0x00000000, 0x00000002);
|
||||
AssertEquals("0xF0000001 + 0x00000002", UInt64Add(x, y), UInt64Create(0x00000000, 0xF0000003));
|
||||
x = Int64Create(0x00000000, 0xF0000001);
|
||||
y = Int64Create(0x00000000, 0x00000002);
|
||||
AssertEquals("0xF0000001 + 0x00000002", Int64Add(x, y), Int64Create(0x00000000, 0xF0000003));
|
||||
|
||||
x = UInt64Create(0x00000000, 0xF0000000);
|
||||
y = UInt64Create(0x00000000, 0x10000000);
|
||||
AssertEquals("0xF0000000 + 0x10000000", UInt64Add(x, y), UInt64Create(0x00000001, 0x00000000));
|
||||
x = Int64Create(0x00000000, 0xF0000000);
|
||||
y = Int64Create(0x00000000, 0x10000000);
|
||||
AssertEquals("0xF0000000 + 0x10000000", Int64Add(x, y), Int64Create(0x00000001, 0x00000000));
|
||||
|
||||
x = Int64Create(0, 5);
|
||||
y = Int64Neg(Int64Create(0, 10));
|
||||
AssertEquals("5 + (-10)", Int64Add(x, y), Int64Neg(Int64Create(0, 5)));
|
||||
|
||||
/* SUB Tests */
|
||||
x = UInt64Create(0x00000000, 0x00000005);
|
||||
y = UInt64Create(0x00000000, 0x00000002);
|
||||
AssertEquals("0x00000005 - 0x00000002", UInt64Sub(x, y), UInt64Create(0x00000000, 0x00000003));
|
||||
x = Int64Create(0x00000000, 0x00000005);
|
||||
y = Int64Create(0x00000000, 0x00000002);
|
||||
AssertEquals("0x00000005 - 0x00000002", Int64Sub(x, y), Int64Create(0x00000000, 0x00000003));
|
||||
|
||||
x = UInt64Create(0x00000001, 0x00000000);
|
||||
y = UInt64Create(0x00000000, 0x00000001);
|
||||
AssertEquals("0x00000001 0x00000000 - 0x00000001", UInt64Sub(x, y), UInt64Create(0x00000000, 0xFFFFFFFF));
|
||||
x = Int64Create(0x00000001, 0x00000000);
|
||||
y = Int64Create(0x00000000, 0x00000001);
|
||||
AssertEquals("0x00000001 0x00000000 - 0x00000001", Int64Sub(x, y), Int64Create(0x00000000, 0xFFFFFFFF));
|
||||
|
||||
x = Int64Create(0, 5);
|
||||
y = Int64Create(0, 10);
|
||||
AssertEquals("5 - 10", Int64Sub(x, y), Int64Neg(Int64Create(0, 5)));
|
||||
|
||||
x = Int64Create(0, 5);
|
||||
y = Int64Neg(Int64Create(0, 10));
|
||||
AssertEquals("5 - (-10)", Int64Sub(x, y), Int64Create(0, 15));
|
||||
|
||||
/* MUL Tests */
|
||||
x = UInt64Create(0, 18);
|
||||
y = UInt64Create(0, 1);
|
||||
AssertEquals("18 * 1", UInt64Mul(x, y), UInt64Create(0, 18));
|
||||
x = Int64Create(0, 18);
|
||||
y = Int64Create(0, 1);
|
||||
AssertEquals("18 * 1", Int64Mul(x, y), Int64Create(0, 18));
|
||||
|
||||
x = UInt64Create(0, 20);
|
||||
y = UInt64Create(0, 12);
|
||||
AssertEquals("20 * 12", UInt64Mul(x, y), UInt64Create(0, 240));
|
||||
x = Int64Create(0, 20);
|
||||
y = Int64Create(0, 12);
|
||||
AssertEquals("20 * 12", Int64Mul(x, y), Int64Create(0, 240));
|
||||
|
||||
x = UInt64Create(0x00000000, 0x00000005);
|
||||
y = UInt64Create(0x00000000, 0x00000005);
|
||||
AssertEquals("0x00000005 * 0x00000005", UInt64Mul(x, y), UInt64Create(0x00000000, 0x00000019));
|
||||
x = Int64Create(0x00000000, 0x00000005);
|
||||
y = Int64Create(0x00000000, 0x00000005);
|
||||
AssertEquals("0x00000005 * 0x00000005", Int64Mul(x, y), Int64Create(0x00000000, 0x00000019));
|
||||
|
||||
x = UInt64Create(0x00000001, 0x00000000);
|
||||
y = UInt64Create(0x00000000, 0x00000005);
|
||||
AssertEquals("0x00000001 0x00000000 * 0x00000005", UInt64Mul(x, y), UInt64Create(0x00000005, 0x00000000));
|
||||
x = Int64Create(0x00000001, 0x00000000);
|
||||
y = Int64Create(0x00000000, 0x00000005);
|
||||
AssertEquals("0x00000001 0x00000000 * 0x00000005", Int64Mul(x, y), Int64Create(0x00000005, 0x00000000));
|
||||
|
||||
/* DIV Tests */
|
||||
x = UInt64Create(0, 12);
|
||||
y = UInt64Create(0, 4);
|
||||
AssertEquals("12 / 4", UInt64Div(x, y), UInt64Create(0, 3));
|
||||
x = Int64Create(0, 12);
|
||||
y = Int64Create(0, 4);
|
||||
AssertEquals("12 / 4", Int64Div(x, y), Int64Create(0, 3));
|
||||
|
||||
/* MOD Tests */
|
||||
x = UInt64Create(0x000000FF, 0x00000000);
|
||||
y = UInt64Create(0x00000000, 0x00000010);
|
||||
AssertEquals("0x000000FF 0x00000000 mod 0x00000010", UInt64Rem(x, y), UInt64Create(0, 0));
|
||||
x = Int64Create(0x000000FF, 0x00000000);
|
||||
y = Int64Create(0x00000000, 0x00000010);
|
||||
AssertEquals("0x000000FF 0x00000000 mod 0x00000010", Int64Rem(x, y), Int64Create(0, 0));
|
||||
|
||||
x = UInt64Create(0x00000000, 0xFF000000);
|
||||
y = UInt64Create(0x00000000, 0x00000010);
|
||||
AssertEquals("0x00000000 0xFF000000 mod 0x00000010", UInt64Rem(x, y), UInt64Create(0, 0));
|
||||
x = Int64Create(0x00000000, 0xFF000000);
|
||||
y = Int64Create(0x00000000, 0x00000010);
|
||||
AssertEquals("0x00000000 0xFF000000 mod 0x00000010", Int64Rem(x, y), Int64Create(0, 0));
|
||||
|
||||
x = UInt64Create(0xFF000000, 0x00000000);
|
||||
y = UInt64Create(0x00000000, 0x00000010);
|
||||
AssertEquals("0xFF000000 0x00000000 mod 0x00000010", UInt64Rem(x, y), UInt64Create(0, 0));
|
||||
x = Int64Create(0xFF000000, 0x00000000);
|
||||
y = Int64Create(0x00000000, 0x00000010);
|
||||
AssertEquals("0xFF000000 0x00000000 mod 0x00000010", Int64Rem(x, y), Int64Create(0, 0));
|
||||
|
||||
x = UInt64Create(0x00000000, 0x000000F0);
|
||||
y = UInt64Create(0x00000000, 0x00000010);
|
||||
AssertEquals("0x00000000 0x000000F0 mod 0x00000010", UInt64Rem(x, y), UInt64Create(0, 0));
|
||||
x = Int64Create(0x00000000, 0x000000F0);
|
||||
y = Int64Create(0x00000000, 0x00000010);
|
||||
AssertEquals("0x00000000 0x000000F0 mod 0x00000010", Int64Rem(x, y), Int64Create(0, 0));
|
||||
|
||||
/* TODO: Add more tests for negative multiplication, division, and mod */
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
117
tools/uint64.c
Normal file
117
tools/uint64.c
Normal file
|
@ -0,0 +1,117 @@
|
|||
#include <UInt64.h>
|
||||
|
||||
#include <Log.h>
|
||||
|
||||
/* AssertEquals(actual, expected) */
|
||||
int AssertEquals(char *msg, UInt64 x, UInt64 y)
|
||||
{
|
||||
if (!UInt64Eq(x, y))
|
||||
{
|
||||
Log(LOG_ERR, "%s: Expected 0x%X 0x%X, got 0x%X 0x%X", msg,
|
||||
UInt64High(y), UInt64Low(y),
|
||||
UInt64High(x), UInt64Low(x));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int Main(void)
|
||||
{
|
||||
UInt64 x, y;
|
||||
|
||||
Log(LOG_INFO, "sizeof(UInt64) = %lu", sizeof(UInt64));
|
||||
|
||||
#ifdef UINT64_NATIVE
|
||||
Log(LOG_INFO, "Using native 64-bit integers.");
|
||||
#else
|
||||
Log(LOG_INFO, "Using emulated 64-bit integers.");
|
||||
#endif
|
||||
|
||||
/* BSR Tests */
|
||||
|
||||
x = UInt64Create(0x000000FF, 0x00000000);
|
||||
|
||||
y = UInt64Srl(x, 4);
|
||||
AssertEquals("x >> 4", y, UInt64Create(0x0000000F, 0xF0000000));
|
||||
|
||||
y = UInt64Srl(x, 8);
|
||||
AssertEquals("x >> 8", y, UInt64Create(0x00000000, 0xFF000000));
|
||||
|
||||
y = UInt64Srl(x, 36);
|
||||
AssertEquals("x >> 36", y, UInt64Create(0x00000000, 0x0000000F));
|
||||
|
||||
/* BSL Tests */
|
||||
|
||||
x = UInt64Create(0x00000000, 0xFF000000);
|
||||
|
||||
y = UInt64Sll(x, 4);
|
||||
AssertEquals("x << 4", y, UInt64Create(0x0000000F, 0xF0000000));
|
||||
|
||||
y = UInt64Sll(x, 8);
|
||||
AssertEquals("x << 8", y, UInt64Create(0x000000FF, 0x00000000));
|
||||
|
||||
y = UInt64Sll(x, 36);
|
||||
AssertEquals("x << 36", y, UInt64Create(0xF0000000, 0x00000000));
|
||||
|
||||
/* ADD Tests */
|
||||
|
||||
x = UInt64Create(0x00000000, 0xF0000001);
|
||||
y = UInt64Create(0x00000000, 0x00000002);
|
||||
AssertEquals("0xF0000001 + 0x00000002", UInt64Add(x, y), UInt64Create(0x00000000, 0xF0000003));
|
||||
|
||||
x = UInt64Create(0x00000000, 0xF0000000);
|
||||
y = UInt64Create(0x00000000, 0x10000000);
|
||||
AssertEquals("0xF0000000 + 0x10000000", UInt64Add(x, y), UInt64Create(0x00000001, 0x00000000));
|
||||
|
||||
/* SUB Tests */
|
||||
x = UInt64Create(0x00000000, 0x00000005);
|
||||
y = UInt64Create(0x00000000, 0x00000002);
|
||||
AssertEquals("0x00000005 - 0x00000002", UInt64Sub(x, y), UInt64Create(0x00000000, 0x00000003));
|
||||
|
||||
x = UInt64Create(0x00000001, 0x00000000);
|
||||
y = UInt64Create(0x00000000, 0x00000001);
|
||||
AssertEquals("0x00000001 0x00000000 - 0x00000001", UInt64Sub(x, y), UInt64Create(0x00000000, 0xFFFFFFFF));
|
||||
|
||||
/* MUL Tests */
|
||||
x = UInt64Create(0, 18);
|
||||
y = UInt64Create(0, 1);
|
||||
AssertEquals("18 * 1", UInt64Mul(x, y), UInt64Create(0, 18));
|
||||
|
||||
x = UInt64Create(0, 20);
|
||||
y = UInt64Create(0, 12);
|
||||
AssertEquals("20 * 12", UInt64Mul(x, y), UInt64Create(0, 240));
|
||||
|
||||
x = UInt64Create(0x00000000, 0x00000005);
|
||||
y = UInt64Create(0x00000000, 0x00000005);
|
||||
AssertEquals("0x00000005 * 0x00000005", UInt64Mul(x, y), UInt64Create(0x00000000, 0x00000019));
|
||||
|
||||
x = UInt64Create(0x00000001, 0x00000000);
|
||||
y = UInt64Create(0x00000000, 0x00000005);
|
||||
AssertEquals("0x00000001 0x00000000 * 0x00000005", UInt64Mul(x, y), UInt64Create(0x00000005, 0x00000000));
|
||||
|
||||
/* DIV Tests */
|
||||
x = UInt64Create(0, 12);
|
||||
y = UInt64Create(0, 4);
|
||||
AssertEquals("12 / 4", UInt64Div(x, y), UInt64Create(0, 3));
|
||||
|
||||
/* MOD Tests */
|
||||
x = UInt64Create(0x000000FF, 0x00000000);
|
||||
y = UInt64Create(0x00000000, 0x00000010);
|
||||
AssertEquals("0x000000FF 0x00000000 mod 0x00000010", UInt64Rem(x, y), UInt64Create(0, 0));
|
||||
|
||||
x = UInt64Create(0x00000000, 0xFF000000);
|
||||
y = UInt64Create(0x00000000, 0x00000010);
|
||||
AssertEquals("0x00000000 0xFF000000 mod 0x00000010", UInt64Rem(x, y), UInt64Create(0, 0));
|
||||
|
||||
x = UInt64Create(0xFF000000, 0x00000000);
|
||||
y = UInt64Create(0x00000000, 0x00000010);
|
||||
AssertEquals("0xFF000000 0x00000000 mod 0x00000010", UInt64Rem(x, y), UInt64Create(0, 0));
|
||||
|
||||
x = UInt64Create(0x00000000, 0x000000F0);
|
||||
y = UInt64Create(0x00000000, 0x00000010);
|
||||
AssertEquals("0x00000000 0x000000F0 mod 0x00000010", UInt64Rem(x, y), UInt64Create(0, 0));
|
||||
|
||||
return 0;
|
||||
}
|
Loading…
Reference in a new issue