diff --git a/src/pkg/runtime/vlop_386.s b/src/pkg/runtime/vlop_386.s index edc659b848..9783fdc936 100644 --- a/src/pkg/runtime/vlop_386.s +++ b/src/pkg/runtime/vlop_386.s @@ -29,6 +29,8 @@ * C runtime for 64-bit divide. */ +// _mul64x32(r *uint64, a uint64, b uint32) +// sets *r = low 64 bits of 96-bit product a*b; returns high 32 bits. TEXT _mul64by32(SB), NOSPLIT, $0 MOVL r+0(FP), CX MOVL a+4(FP), AX @@ -38,7 +40,9 @@ TEXT _mul64by32(SB), NOSPLIT, $0 MOVL a+8(FP), AX MULL b+12(FP) ADDL AX, BX + ADCL $0, DX MOVL BX, 4(CX) + MOVL DX, AX RET TEXT _div64by32(SB), NOSPLIT, $0 diff --git a/src/pkg/runtime/vlrt_386.c b/src/pkg/runtime/vlrt_386.c index d8bc94bd94..8d965c086e 100644 --- a/src/pkg/runtime/vlrt_386.c +++ b/src/pkg/runtime/vlrt_386.c @@ -147,7 +147,7 @@ _v2f(Vlong x) } ulong _div64by32(Vlong, ulong, ulong*); -void _mul64by32(Vlong*, Vlong, ulong); +int _mul64by32(Vlong*, Vlong, ulong); static void slowdodiv(Vlong num, Vlong den, Vlong *q, Vlong *r) @@ -232,8 +232,7 @@ dodiv(Vlong num, Vlong den, Vlong *qp, Vlong *rp) if(den.hi != 0){ q.hi = 0; n = num.hi/den.hi; - _mul64by32(&x, den, n); - if(x.hi > num.hi || (x.hi == num.hi && x.lo > num.lo)) + if(_mul64by32(&x, den, n) || x.hi > num.hi || (x.hi == num.hi && x.lo > num.lo)) slowdodiv(num, den, &q, &r); else { q.lo = n;