diff --git a/fmt/fmt_asn1dertag.c b/fmt/fmt_asn1dertag.c index c91f506..9d2d0cc 100644 --- a/fmt/fmt_asn1dertag.c +++ b/fmt/fmt_asn1dertag.c @@ -3,7 +3,7 @@ /* write int in least amount of bytes, return number of bytes */ /* as used in ASN.1 DER tag */ size_t fmt_asn1dertag(char* dest,unsigned long long l) { - /* encoding is either l%128 or (0x80+number of bytes,bytes) */ + /* high bit says if more bytes are coming, lower 7 bits are payload; big endian */ size_t n=0,i; unsigned long long t; for (t=l, n=1; t>0x7f; t>>=7) ++n; diff --git a/mult/add_of.3 b/mult/add_of.3 new file mode 100644 index 0000000..ebdcc7a --- /dev/null +++ b/mult/add_of.3 @@ -0,0 +1,21 @@ +.TH add_of 3 +.SH NAME +add_of \- add two integers, check for arithmetic overflow +.SH SYNTAX +.B #include + +int \fBadd_of\fP(dest,a,b); +.SH DESCRIPTION +If calculating a+b is possible without causing undefined behavior or an +arithmetic overflow in C, and the sum fits into the destination integer +type, do dest=a+b and return 0. + +Otherwise, return 1. + +Note: This is a macro, so dest does not have to be a pointer. +.SH BUGS +In the multiplication functions, a return value of 1 signals success and +0 failure. In add_of, sub_of and assign it's the other way around. +.SH "SEE ALSO" +sub_of(3), assign(3), imult16(3), umult16(3), imult32(3), umult32(3), +imult64(3), umult64(3) diff --git a/mult/assign.3 b/mult/assign.3 new file mode 100644 index 0000000..143fbaf --- /dev/null +++ b/mult/assign.3 @@ -0,0 +1,20 @@ +.TH assign 3 +.SH NAME +assign \- assign an integer value, check for truncation +.SH SYNTAX +.B #include + +int \fBassign\fP(dest,a); +.SH DESCRIPTION +If a and dest have the same type, or the value of a is representable in +the type of dest, do dest=a and return 0. + +Otherwise, return 1. + +Note: This is a macro, so dest does not have to be a pointer. +.SH BUGS +In the multiplication functions, a return value of 1 signals success and +0 failure. In add_of, sub_of and assign it's the other way around. +.SH "SEE ALSO" +add_of(3), sub_of(3), imult16(3), umult16(3), imult32(3), umult32(3), +imult64(3), umult64(3) diff --git a/mult/imult16.c b/mult/imult16.c index adfdd3d..13d86f4 100644 --- a/mult/imult16.c +++ b/mult/imult16.c @@ -1,3 +1,11 @@ +#if defined(__GNUC__) && (__GNUC__ >= 5) + +#include "uint16.h" + +int imult16( int16 a, int16 b, int16* c) { return !__builtin_mul_overflow(a,b,c); } + +#else + #include "safemult.h" int imult16(int16 a,int16 b,int16* c) { @@ -6,3 +14,5 @@ int imult16(int16 a,int16 b,int16* c) { *c=x; return 1; } + +#endif diff --git a/mult/imult32.c b/mult/imult32.c index 1751960..1073490 100644 --- a/mult/imult32.c +++ b/mult/imult32.c @@ -1,3 +1,11 @@ +#if defined(__GNUC__) && (__GNUC__ >= 5) + +#include "uint32.h" + +int imult32( int32 a, int32 b, int32* c) { return !__builtin_mul_overflow(a,b,c); } + +#else + #include "safemult.h" int imult32(int32 a,int32 b,int32* c) { @@ -6,3 +14,5 @@ int imult32(int32 a,int32 b,int32* c) { *c=x; return 1; } + +#endif diff --git a/mult/imult64.c b/mult/imult64.c index 231ca55..78fc919 100644 --- a/mult/imult64.c +++ b/mult/imult64.c @@ -1,3 +1,11 @@ +#if defined(__GNUC__) && (__GNUC__ >= 5) + +#include "uint64.h" + +int imult64( int64 a, int64 b, int64* c) { return !__builtin_mul_overflow(a,b,c); } + +#else + #if defined(__x86_64__) && defined(__OPTIMIZE__) /* WARNING: this only works if compiled with -fomit-frame-pointer */ @@ -46,3 +54,5 @@ int imult64(int64 a,int64 b,int64* c) { #endif #endif + +#endif diff --git a/mult/sub_of.3 b/mult/sub_of.3 new file mode 100644 index 0000000..9f54207 --- /dev/null +++ b/mult/sub_of.3 @@ -0,0 +1,21 @@ +.TH sub_of 3 +.SH NAME +sub_of \- subtract two integers, check for arithmetic overflow +.SH SYNTAX +.B #include + +int \fBsub_of\fP(dest,a,b); +.SH DESCRIPTION +If calculating a-b is possible without causing undefined behavior or an +arithmetic overflow in C, and the result fits into the destination +integer type, do dest=a-b and return 0. + +Otherwise, return 1. + +Note: This is a macro, so dest does not have to be a pointer. +.SH BUGS +In the multiplication functions, a return value of 1 signals success and +0 failure. In add_of, sub_of and assign it's the other way around. +.SH "SEE ALSO" +add_of(3), assign(3), imult16(3), umult16(3), imult32(3), umult32(3), +imult64(3), umult64(3) diff --git a/mult/umult16.c b/mult/umult16.c index 8007f6e..eba66dd 100644 --- a/mult/umult16.c +++ b/mult/umult16.c @@ -1,3 +1,11 @@ +#if defined(__GNUC__) && (__GNUC__ >= 5) + +#include "uint16.h" + +int umult16(uint16 a,uint16 b,uint16* c) { return !__builtin_mul_overflow(a,b,c); } + +#else + #include "safemult.h" int umult16(uint16 a,uint16 b,uint16* c) { @@ -6,3 +14,5 @@ int umult16(uint16 a,uint16 b,uint16* c) { *c=x&0xffff; return 1; } + +#endif diff --git a/mult/umult32.c b/mult/umult32.c index 89ee9a6..948cf0f 100644 --- a/mult/umult32.c +++ b/mult/umult32.c @@ -1,3 +1,11 @@ +#if defined(__GNUC__) && (__GNUC__ >= 5) + +#include "uint32.h" + +int umult32(uint32 a,uint32 b,uint32* c) { return !__builtin_mul_overflow(a,b,c); } + +#else + #include "safemult.h" int umult32(uint32 a,uint32 b,uint32* c) { @@ -6,3 +14,5 @@ int umult32(uint32 a,uint32 b,uint32* c) { *c=x&0xffffffff; return 1; } + +#endif diff --git a/mult/umult64.c b/mult/umult64.c index 3c53f51..3e7071c 100644 --- a/mult/umult64.c +++ b/mult/umult64.c @@ -1,3 +1,11 @@ +#if defined(__GNUC__) && (__GNUC__ >= 5) + +#include "uint64.h" + +int umult64(uint64 a,uint64 b,uint64* c) { return !__builtin_mul_overflow(a,b,c); } + +#else + #include "haveuint128.h" #if defined(__x86_64__) && defined(__OPTIMIZE__) @@ -60,3 +68,5 @@ int umult64(uint64 a,uint64 b,uint64* c) { #endif #endif + +#endif diff --git a/safemult.h b/safemult.h index 3e81f29..e2a83ca 100644 --- a/safemult.h +++ b/safemult.h @@ -10,6 +10,22 @@ extern "C" { #endif +#if defined(__GNUC__) && (__GNUC__ >= 5) + +/* for historical reasons, the mult interface uses 0 for failure and 1 + * for success, while the builtins (and my own addition and + * subtraction routines in rangecheck.h) do it the other way around. */ +extern inline int umult16(uint16 a,uint16 b,uint16* c) { return !__builtin_mul_overflow(a,b,c); } +extern inline int imult16( int16 a, int16 b, int16* c) { return !__builtin_mul_overflow(a,b,c); } + +extern inline int umult32(uint32 a,uint32 b,uint32* c) { return !__builtin_mul_overflow(a,b,c); } +extern inline int imult32( int32 a, int32 b, int32* c) { return !__builtin_mul_overflow(a,b,c); } + +extern inline int umult64(uint64 a,uint64 b,uint64* c) { return !__builtin_mul_overflow(a,b,c); } +extern inline int imult64( int64 a, int64 b, int64* c) { return !__builtin_mul_overflow(a,b,c); } + +#else + /* return 0 for overflow, 1 for ok */ int umult16(uint16 a,uint16 b,uint16* c); int imult16( int16 a, int16 b, int16* c); @@ -20,6 +36,8 @@ int imult32( int32 a, int32 b, int32* c); int umult64(uint64 a,uint64 b,uint64* c); int imult64( int64 a, int64 b, int64* c); +#endif + #ifdef __cplusplus } #endif