diff --git a/src/basicfuns.jl b/src/basicfuns.jl index 5c39104..0769860 100644 --- a/src/basicfuns.jl +++ b/src/basicfuns.jl @@ -63,8 +63,8 @@ Return `log(1+exp(x))` evaluated carefully for largish `x`. This is also called the ["softplus"](https://en.wikipedia.org/wiki/Rectifier_(neural_networks)) transformation, being a smooth approximation to `max(0,x)`. Its inverse is [`logexpm1`](@ref). """ -log1pexp(x::Real) = x < 18.0 ? log1p(exp(x)) : x < 33.3 ? x + exp(-x) : oftype(exp(-x), x) -log1pexp(x::Float32) = x < 9.0f0 ? log1p(exp(x)) : x < 16.0f0 ? x + exp(-x) : oftype(exp(-x), x) +log1pexp(x::Real) = x ≤ -37. ? exp(x) : x ≤ 18. ? log1p(exp(x)) : x ≤ 33.3 ? x + exp(-x) : float(x) +log1pexp(x::Float32) = x < 9.0f0 ? log1p(exp(x)) : x < 16.0f0 ? x + exp(-x) : x """ log1mexp(x::Real) diff --git a/test/basicfuns.jl b/test/basicfuns.jl index 02e00f4..03d7984 100644 --- a/test/basicfuns.jl +++ b/test/basicfuns.jl @@ -27,11 +27,13 @@ end @test log1pexp(-2.0) ≈ log(1.0 + exp(-2.0)) @test log1pexp(10000) ≈ 10000.0 @test log1pexp(-10000) ≈ 0.0 - @test log1pexp(2f0) ≈ log(1f0 + exp(2f0)) @test log1pexp(-2f0) ≈ log(1f0 + exp(-2f0)) @test log1pexp(10000f0) ≈ 10000f0 @test log1pexp(-10000f0) ≈ 0f0 + @inferred log1pexp(0) + @inferred log1pexp(0.) + @inferred log1pexp(0.0f0) end @testset "log1mexp" begin