diff --git a/src/strconv/extfloat.go b/src/strconv/extfloat.go index 2a2dd7a408..793a34d83f 100644 --- a/src/strconv/extfloat.go +++ b/src/strconv/extfloat.go @@ -231,8 +231,30 @@ var uint64pow10 = [...]uint64{ // float32 depending on flt. func (f *extFloat) AssignDecimal(mantissa uint64, exp10 int, neg bool, trunc bool, flt *floatInfo) (ok bool) { const uint64digits = 19 + + // Errors (in the "numerical approximation" sense, not the "Go's error + // type" sense) in this function are measured as multiples of 1/8 of a ULP, + // so that "1/2 of a ULP" can be represented in integer arithmetic. + // + // The C++ double-conversion library also uses this 8x scaling factor: + // https://github.com/google/double-conversion/blob/f4cb2384/double-conversion/strtod.cc#L291 + // but this Go implementation has a bug, where it forgets to scale other + // calculations (further below in this function) by the same number. The + // C++ implementation does not forget: + // https://github.com/google/double-conversion/blob/f4cb2384/double-conversion/strtod.cc#L366 + // + // Scaling the "errors" in the "is mant_extra in the range (halfway ± + // errors)" check, but not scaling the other values, means that we return + // ok=false (and fall back to a slower atof code path) more often than we + // could. This affects performance but not correctness. + // + // Longer term, we could fix the forgot-to-scale bug (and look carefully + // for correctness regressions; https://codereview.appspot.com/5494068 + // landed in 2011), or replace this atof algorithm with a faster one (e.g. + // Ryu). Shorter term, this comment will suffice. const errorscale = 8 - errors := 0 // An upper bound for error, computed in errorscale*ulp. + + errors := 0 // An upper bound for error, computed in ULP/errorscale. if trunc { // the decimal number was truncated. errors += errorscale / 2