another gcc 6 warning fix

compile on osx again
remove asm version of i?mult64 (was needed only for clang because it
didn't have __uint128_t, but it now has it)
master
leitner 9 years ago
parent d26b8082d8
commit d020e6d88b

@ -5,10 +5,18 @@ size_t fmt_strn(char *out,const char *in,size_t limit) {
register const char* t=in; register const char* t=in;
register const char* u=in+limit; register const char* u=in+limit;
for (;;) { for (;;) {
if (!*t || t==u) break; if (s) { *s=*t; ++s; } ++t; if (!*t || t==u) break;
if (!*t || t==u) break; if (s) { *s=*t; ++s; } ++t; if (s) { *s=*t; ++s; }
if (!*t || t==u) break; if (s) { *s=*t; ++s; } ++t; ++t;
if (!*t || t==u) break; if (s) { *s=*t; ++s; } ++t; if (!*t || t==u) break;
if (s) { *s=*t; ++s; }
++t;
if (!*t || t==u) break;
if (s) { *s=*t; ++s; }
++t;
if (!*t || t==u) break;
if (s) { *s=*t; ++s; }
++t;
} }
return (size_t)(t-in); return (size_t)(t-in);
} }

@ -15,4 +15,10 @@ size_t fmt_varint(char* dest,unsigned long long l) {
return i; return i;
} }
#ifdef __ELF__
size_t fmt_pb_type0_int(char* dest,unsigned long long l) __attribute__((alias("fmt_varint"))); size_t fmt_pb_type0_int(char* dest,unsigned long long l) __attribute__((alias("fmt_varint")));
#else
size_t fmt_pb_type0_int(char* dest,unsigned long long l) {
return fmt_varint(dest,l);
}
#endif

@ -4,28 +4,6 @@
int imult64( int64 a, int64 b, int64* c) { return !__builtin_mul_overflow(a,b,c); } int imult64( int64 a, int64 b, int64* c) { return !__builtin_mul_overflow(a,b,c); }
#else
#if defined(__x86_64__) && defined(__OPTIMIZE__)
/* WARNING: this only works if compiled with -fomit-frame-pointer */
void imult64() {
asm volatile(
"xchgq %rdx,%rsi\n"
"movq %rdi,%rax\n"
"imulq %rdx\n"
"jc 1f\n" /* overflow */
"movq %rax,(%rsi)\n"
"xorq %rax,%rax\n"
"inc %rax\n"
"ret\n"
"1:\n"
"xorq %rax,%rax\n"
/* the closing ret is renerated by gcc */
);
}
#else #else
#include "safemult.h" #include "safemult.h"
@ -54,5 +32,3 @@ int imult64(int64 a,int64 b,int64* c) {
#endif #endif
#endif #endif
#endif

@ -8,27 +8,6 @@ int umult64(uint64 a,uint64 b,uint64* c) { return !__builtin_mul_overflow(a,b,c)
#include "haveuint128.h" #include "haveuint128.h"
#if defined(__x86_64__) && defined(__OPTIMIZE__)
/* WARNING: this only works if compiled with -fomit-frame-pointer */
void umult64() {
asm volatile(
"xchgq %rdx,%rsi\n"
"movq %rdi,%rax\n"
"mulq %rdx\n"
"jc 1f\n" /* overflow */
"movq %rax,(%rsi)\n"
"xorq %rax,%rax\n"
"inc %rax\n"
"ret\n"
"1:\n"
"xorq %rax,%rax\n"
/* the closing ret is renerated by gcc */
);
}
#else
#include "safemult.h" #include "safemult.h"
#if defined(HAVE_UINT128) #if defined(HAVE_UINT128)
@ -68,5 +47,3 @@ int umult64(uint64 a,uint64 b,uint64* c) {
#endif #endif
#endif #endif
#endif

Loading…
Cancel
Save