From e1a6cfea17cab8f0671bc5a07c642185852fdda1 Mon Sep 17 00:00:00 2001 From: Eric Lagergren Date: Wed, 31 Jan 2018 21:30:58 -0800 Subject: [PATCH] *: minor fixes to Log, Sqrt, Int64, ... Fixes: #76 Updates: #46 Former-commit-id: 1d6ba9e5a075981548170bece262de4675f72ebd [formerly 719ede172a7182301fc1a4b8326732e7e80054be] Former-commit-id: a74d44905a61cb8fda72f0325b31cb37efea66ba --- .gitignore | 1 + big.go | 13 +++---- big_ctx.go | 5 +-- internal/c/{const_386.go => const32.go} | 2 +- internal/c/{const_amd64.go => const64.go} | 2 +- math/const.go | 18 ++++++---- math/continued_frac.go | 4 +++ math/log.go | 44 +++++++++++------------ math/sqrt.go | 35 +++--------------- scan.go | 2 ++ 10 files changed, 55 insertions(+), 71 deletions(-) rename internal/c/{const_386.go => const32.go} (70%) rename internal/c/{const_amd64.go => const64.go} (56%) diff --git a/.gitignore b/.gitignore index 9a48d97..879951c 100644 --- a/.gitignore +++ b/.gitignore @@ -41,3 +41,4 @@ benchmarks/decbench *.class *.gz internal/nat/ +x.bash diff --git a/big.go b/big.go index 4601104..d1b94c1 100644 --- a/big.go +++ b/big.go @@ -759,7 +759,7 @@ func (x *Big) Int64() (int64, bool) { return 0, false } - // x might be too large to fit into an uint64 *now*, but rescaling x might + // x might be too large to fit into an int64 *now*, but rescaling x might // shrink it enough. See issue #20. if !x.isCompact() { xb := x.Int(nil) @@ -773,14 +773,11 @@ func (x *Big) Int64() (int64, bool) { return 0, false } } - if u > math.MaxInt64 { - return 0, false - } - b := int64(u) - if x.form&signbit != 0 { - b = -b + su := int64(u) + if su >= 0 || x.Signbit() && su == -su { + return su, true } - return b, true + return 0, false } // Uint64 returns x as an int64, truncating towards zero. The returned boolean diff --git a/big_ctx.go b/big_ctx.go index fd440a8..7ffddbe 100644 --- a/big_ctx.go +++ b/big_ctx.go @@ -909,11 +909,12 @@ func (c Context) Round(z *Big) *Big { return z } - if z.Precision() <= n { + zp := z.Precision() + if zp <= n { return c.fix(z) } - shift := z.Precision() - n + shift := zp - n if shift > c.maxScale() { return z.xflow(c.minScale(), false, true) } diff --git a/internal/c/const_386.go b/internal/c/const32.go similarity index 70% rename from internal/c/const_386.go rename to internal/c/const32.go index b6bb423..263bcaa 100644 --- a/internal/c/const_386.go +++ b/internal/c/const32.go @@ -1,4 +1,4 @@ -// +build 386,!amd64 +// +build 386 mips mipsle arm package c diff --git a/internal/c/const_amd64.go b/internal/c/const64.go similarity index 56% rename from internal/c/const_amd64.go rename to internal/c/const64.go index 7d192cf..60a9feb 100644 --- a/internal/c/const_amd64.go +++ b/internal/c/const64.go @@ -1,4 +1,4 @@ -// +build amd64,!386 +// +build ppc64le ppc64 amd64p32 s390x arm64 mips64 mips64le amd64 package c diff --git a/math/const.go b/math/const.go index 4e6a95e..13e8e91 100644 --- a/math/const.go +++ b/math/const.go @@ -103,15 +103,15 @@ func pi(z *decimal.Big, ctx decimal.Context) *decimal.Big { } // ln10 sets z to log(10) and returns z. -func ln10(z *decimal.Big, prec int) *decimal.Big { +func ln10(z *decimal.Big, prec int, t *Term) *decimal.Big { ctx := decimal.Context{Precision: prec} if ctx.Precision <= constPrec { return ctx.Set(z, _Ln10) } - // TODO(eric): we can (possibly?) speed this up by selecting a log10 constant - // that's some truncation of our continued fraction and setting the starting - // term to that position in our continued fraction. + // TODO(eric): we can speed this up by selecting a log10 constant that's + // some truncation of our continued fraction and setting the starting term + // to that position in our continued fraction. ctx.Precision += 3 g := lgen{ @@ -119,10 +119,14 @@ func ln10(z *decimal.Big, prec int) *decimal.Big { pow: eightyOne, // 9 * 9 z2: eleven, // 9 + 2 k: -1, - t: Term{A: new(decimal.Big), B: new(decimal.Big)}, } - ctx.Quo(z, eighteen /* 9 * 2 */, Lentz(z, &g)) - ctx.Precision -= 3 + if t != nil { + g.t = *t + } else { + g.t = makeTerm() + } + ctx.Quo(z, eighteen /* 9 * 2 */, Wallis(z, &g)) + ctx.Precision = prec return ctx.Round(z) } diff --git a/math/continued_frac.go b/math/continued_frac.go index 763aa3c..dcc6d91 100644 --- a/math/continued_frac.go +++ b/math/continued_frac.go @@ -18,6 +18,10 @@ func (t Term) String() string { return fmt.Sprintf("[%s / %s]", t.A, t.B) } +func makeTerm() Term { + return Term{A: new(decimal.Big), B: new(decimal.Big)} +} + // Generator represents a continued fraction. type Generator interface { // Next returns true if there are future terms. Every call to Term—even the diff --git a/math/log.go b/math/log.go index 9a0d89e..cb8fa47 100644 --- a/math/log.go +++ b/math/log.go @@ -1,8 +1,6 @@ package math import ( - stdMath "math" - "github.com/ericlagergren/decimal" "github.com/ericlagergren/decimal/internal/arith" "github.com/ericlagergren/decimal/internal/c" @@ -41,7 +39,7 @@ func Log(z, x *decimal.Big) *decimal.Big { return z.SetMantScale(0, 0) case 10: // Specialized function. - return ln10(z, precision(z)) + return ln10(z, precision(z), nil) } } } @@ -77,8 +75,6 @@ func logSpecials(z, x *decimal.Big) bool { // log set z to log(x), or log10(x) if ten. It does not check for special values, // nor implement any special casing. func log(z, x *decimal.Big, ten bool) *decimal.Big { - prec := precision(z) - t := int64(adjusted(x)) if t < 0 { t = -t - 1 @@ -102,13 +98,15 @@ func log(z, x *decimal.Big, ten bool) *decimal.Big { // Compute // log(y) + p*log(10) - // TODO(eric): adj should be large enough. It's passed multiple iterations - // of with a precision in [1, 5000) and a 128-bit decimal. - adj := 7 + int(4*stdMath.Log(float64(x.Precision()))) + // TODO(eric): the precision adjustment should be large enough. It's passed + // multiple iterations of with a precision in [1, 5000) and a 128-bit decimal. + prec := precision(z) + ctx := decimal.Context{ + Precision: prec + arith.Length(uint64(prec+x.Precision())) + 5, + } if ten { - adj += 3 + ctx.Precision += 3 } - ctx := decimal.Context{Precision: prec + adj} var p int64 switch { @@ -118,15 +116,16 @@ func log(z, x *decimal.Big, ten bool) *decimal.Big { // 0.0001 case x.Scale() >= x.Precision(): p = -int64(x.Scale() - x.Precision() + 1) + ctx.Precision = int(float64(ctx.Precision) * 1.5) // 12.345 default: p = int64(-x.Scale() + x.Precision() - 1) } - // Rescale to 1 <= x <= 10 - y := decimal.WithContext(ctx).Copy(x).SetScale(x.Precision() - 1) + // Rescale to 1 <= x < 10 + y := new(decimal.Big).Copy(x).SetScale(x.Precision() - 1) // Continued fraction algorithm is for log(1+x) - y.Sub(y, one) + ctx.Sub(y, y, one) g := lgen{ ctx: ctx, @@ -140,10 +139,10 @@ func log(z, x *decimal.Big, ten bool) *decimal.Big { // better performance at ~750 digits of precision. Consider using Newton's // method or another algorithm for lower precision ranges. - ctx.Quo(z, y.Mul(y, two), Lentz(z, &g)) + ctx.Quo(z, ctx.Mul(y, y, two), Wallis(z, &g)) if p != 0 || ten { - t := ln10(y, ctx.Precision) // recycle y + t := ln10(y, ctx.Precision, &g.t) // recycle y, g.t // Avoid doing unnecessary work. switch p { @@ -164,7 +163,7 @@ func log(z, x *decimal.Big, ten bool) *decimal.Big { ctx.Quo(z, z, t) } } - ctx.Precision -= adj + ctx.Precision = prec return ctx.Round(z) } @@ -178,13 +177,14 @@ type lgen struct { func (l *lgen) Context() decimal.Context { return l.ctx } -func (l *lgen) Lentz() (f, Δ, C, D, eps *decimal.Big) { - f = decimal.WithContext(l.ctx) - Δ = decimal.WithContext(l.ctx) - C = decimal.WithContext(l.ctx) - D = decimal.WithContext(l.ctx) +func (l *lgen) Wallis() (a, a1, b, b1, p, eps *decimal.Big) { + a = decimal.WithContext(l.ctx) + a1 = decimal.WithContext(l.ctx) + b = decimal.WithContext(l.ctx) + b1 = decimal.WithContext(l.ctx) + p = decimal.WithContext(l.ctx) eps = decimal.New(1, l.ctx.Precision) - return + return a, a1, b, b1, p, eps } func (a *lgen) Next() bool { return true } diff --git a/math/sqrt.go b/math/sqrt.go index bae1bdb..15541d0 100644 --- a/math/sqrt.go +++ b/math/sqrt.go @@ -47,7 +47,6 @@ func Sqrt(z, x *decimal.Big) *decimal.Big { if xs == 0 { return z.SetMantScale(0, ideal).CopySign(z, x) } - // errors.New("math.Sqrt: cannot take square root of negative number"), z.Context.Conditions |= decimal.InvalidOperation return z.SetNaN(false) } @@ -66,20 +65,7 @@ func Sqrt(z, x *decimal.Big) *decimal.Big { // Fast path #1: use math.Sqrt if our decimal is small enough. if f, exact := x.Float64(); exact && prec <= 15 { - ctx.Round(z.SetFloat64(math.Sqrt(f))) - ctx.Precision = x.Precision() - - var tmp decimal.Big - if ctx.Mul(&tmp, z, z).Cmp(x) == 0 { - ctx.Reduce(z) - if !rnd { - z.Context.Conditions &= ^decimal.Rounded - } - if !ixt { - z.Context.Conditions &= ^decimal.Inexact - } - } - return z + return ctx.Reduce(z.SetFloat64(math.Sqrt(f))) } // Source for the following algorithm: @@ -128,20 +114,8 @@ func Sqrt(z, x *decimal.Big) *decimal.Big { // rounding mode half even (speleotrove.com/decimal/daops.html#refsqrt) // anyway. - z.SetScale(z.Scale() - e/2) - if z.Precision() > prec { - if !rnd { - z.Context.Conditions &= ^decimal.Rounded - } - if !ixt { - z.Context.Conditions &= ^decimal.Inexact - } - ctx.Precision = prec - return ctx.Round(z) - } - // Perfect square. - if ctx.Mul(&tmp, z, z).Cmp(x) == 0 { - ctx.Reduce(z) + ctx.Reduce(z.SetScale(z.Scale() - e/2)) + if z.Precision() <= prec { if !rnd { z.Context.Conditions &= ^decimal.Rounded } @@ -149,5 +123,6 @@ func Sqrt(z, x *decimal.Big) *decimal.Big { z.Context.Conditions &= ^decimal.Inexact } } - return z + ctx.Precision = prec + return ctx.Round(z) } diff --git a/scan.go b/scan.go index 45fd6ae..f2d8960 100644 --- a/scan.go +++ b/scan.go @@ -354,6 +354,8 @@ Loop: if err == nil { if scale != noScale { z.exp = -int(length - scale) + } else { + z.exp = 0 } z.precision = arith.Length(z.compact) return nil