Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

improve inferability of base #22019

Merged
merged 12 commits into from
May 24, 2017
Merged
60 changes: 30 additions & 30 deletions base/abstractarray.jl
Original file line number Diff line number Diff line change
Expand Up @@ -202,13 +202,11 @@ julia> strides(A)
(1, 3, 12)
```
"""
strides(A::AbstractArray) = _strides((1,), A)
_strides(out::Tuple{Int}, A::AbstractArray{<:Any,0}) = ()
_strides(out::NTuple{N,Int}, A::AbstractArray{<:Any,N}) where {N} = out
function _strides(out::NTuple{M,Int}, A::AbstractArray) where M
@_inline_meta
_strides((out..., out[M]*size(A, M)), A)
end
strides(A::AbstractArray) = size_to_strides(1, size(A)...)
@inline size_to_strides(s, d, sz...) = (s, size_to_strides(s * d, sz...)...)
size_to_strides(s, d) = (s,)
size_to_strides(s) = ()


function isassigned(a::AbstractArray, i::Int...)
try
Expand Down Expand Up @@ -1160,30 +1158,32 @@ cat_similar(A::AbstractArray, T, shape) = similar(A, T, shape)

cat_shape(dims, shape::Tuple) = shape
@inline cat_shape(dims, shape::Tuple, nshape::Tuple, shapes::Tuple...) =
cat_shape(dims, _cshp(dims, (), shape, nshape), shapes...)
cat_shape(dims, _cshp(1, dims, shape, nshape), shapes...)

_cshp(::Tuple{}, out, ::Tuple{}, ::Tuple{}) = out
_cshp(::Tuple{}, out, ::Tuple{}, nshape) = (out..., nshape...)
_cshp(dims, out, ::Tuple{}, ::Tuple{}) = (out..., map(b -> 1, dims)...)
@inline _cshp(dims, out, shape, ::Tuple{}) =
_cshp(tail(dims), (out..., shape[1] + dims[1]), tail(shape), ())
@inline _cshp(dims, out, ::Tuple{}, nshape) =
_cshp(tail(dims), (out..., nshape[1]), (), tail(nshape))
@inline function _cshp(::Tuple{}, out, shape, ::Tuple{})
_cs(length(out) + 1, false, shape[1], 1)
_cshp((), (out..., 1), tail(shape), ())
_cshp(ndim::Int, ::Tuple{}, ::Tuple{}, ::Tuple{}) = ()
_cshp(ndim::Int, ::Tuple{}, ::Tuple{}, nshape) = nshape
_cshp(ndim::Int, dims, ::Tuple{}, ::Tuple{}) = ntuple(b -> 1, Val{length(dims)})
@inline _cshp(ndim::Int, dims, shape, ::Tuple{}) =
(shape[1] + dims[1], _cshp(ndim + 1, tail(dims), tail(shape), ())...)
@inline _cshp(ndim::Int, dims, ::Tuple{}, nshape) =
(nshape[1], _cshp(ndim + 1, tail(dims), (), tail(nshape))...)
@inline function _cshp(ndim::Int, ::Tuple{}, shape, ::Tuple{})
_cs(ndim, shape[1], 1)
(1, _cshp(ndim + 1, (), tail(shape), ())...)
end
@inline function _cshp(::Tuple{}, out, shape, nshape)
next = _cs(length(out) + 1, false, shape[1], nshape[1])
_cshp((), (out..., next), tail(shape), tail(nshape))
@inline function _cshp(ndim::Int, ::Tuple{}, shape, nshape)
next = _cs(ndim, shape[1], nshape[1])
(next, _cshp(ndim + 1, (), tail(shape), tail(nshape))...)
end
@inline function _cshp(dims, out, shape, nshape)
next = _cs(length(out) + 1, dims[1], shape[1], nshape[1])
_cshp(tail(dims), (out..., next), tail(shape), tail(nshape))
@inline function _cshp(ndim::Int, dims, shape, nshape)
a = shape[1]
b = nshape[1]
next = dims[1] ? a + b : _cs(ndim, a, b)
(next, _cshp(ndim + 1, tail(dims), tail(shape), tail(nshape))...)
end

_cs(d, concat, a, b) = concat ? (a + b) : (a == b ? a : throw(DimensionMismatch(string(
"mismatch in dimension ", d, " (expected ", a, " got ", b, ")"))))
_cs(d, a, b) = (a == b ? a : throw(DimensionMismatch(
"mismatch in dimension $d (expected $a got $b)")))

dims2cat{n}(::Type{Val{n}}) = ntuple(i -> (i == n), Val{n})
dims2cat(dims) = ntuple(i -> (i in dims), maximum(dims))
Expand Down Expand Up @@ -1668,15 +1668,15 @@ end
function _sub2ind!(Iout, inds, Iinds, I)
@_noinline_meta
for i in Iinds
# Iout[i] = sub2ind(inds, map(Ij->Ij[i], I)...)
# Iout[i] = sub2ind(inds, map(Ij -> Ij[i], I)...)
Iout[i] = sub2ind_vec(inds, i, I)
end
Iout
end

sub2ind_vec(inds, i, I) = (@_inline_meta; _sub2ind_vec(inds, (), i, I...))
_sub2ind_vec(inds, out, i, I1, I...) = (@_inline_meta; _sub2ind_vec(inds, (out..., I1[i]), i, I...))
_sub2ind_vec(inds, out, i) = (@_inline_meta; sub2ind(inds, out...))
sub2ind_vec(inds, i, I) = (@_inline_meta; sub2ind(inds, _sub2ind_vec(i, I...)...))
_sub2ind_vec(i, I1, I...) = (@_inline_meta; (I1[i], _sub2ind_vec(i, I...)...))
_sub2ind_vec(i) = ()

function ind2sub(inds::Union{DimsInteger{N},Indices{N}}, ind::AbstractVector{<:Integer}) where N
M = length(ind)
Expand Down
7 changes: 1 addition & 6 deletions base/array.jl
Original file line number Diff line number Diff line change
Expand Up @@ -73,12 +73,7 @@ end
size(a::Array, d) = arraysize(a, d)
size(a::Vector) = (arraysize(a,1),)
size(a::Matrix) = (arraysize(a,1), arraysize(a,2))
size(a::Array) = (@_inline_meta; _size((), a))
_size(out::NTuple{N}, A::Array{_,N}) where {_,N} = out
function _size(out::NTuple{M}, A::Array{_,N}) where _ where M where N
@_inline_meta
_size((out..., size(A,M+1)), A)
end
size(a::Array{<:Any,N}) where {N} = (@_inline_meta; ntuple(M -> size(a, M), Val{N}))

asize_from(a::Array, n) = n > ndims(a) ? () : (arraysize(a,n), asize_from(a, n+1)...)

Expand Down
25 changes: 12 additions & 13 deletions base/broadcast.jl
Original file line number Diff line number Diff line change
Expand Up @@ -45,23 +45,22 @@ promote_containertype(::Type{T}, ::Type{T}) where {T} = T
## Calculate the broadcast indices of the arguments, or error if incompatible
# array inputs
broadcast_indices() = ()
broadcast_indices(A) = broadcast_indices(containertype(A), A)
broadcast_indices(::ScalarType, A) = ()
broadcast_indices(::Type{Tuple}, A) = (OneTo(length(A)),)
broadcast_indices(::Type{Array}, A::Ref) = ()
broadcast_indices(::Type{Array}, A) = indices(A)
@inline broadcast_indices(A, B...) = broadcast_shape((), broadcast_indices(A), map(broadcast_indices, B)...)
broadcast_indices(A) = _broadcast_indices(containertype(A), A)
@inline broadcast_indices(A, B...) = broadcast_shape(broadcast_indices(A), broadcast_indices(B...))
_broadcast_indices(::Type, A) = ()
_broadcast_indices(::Type{Tuple}, A) = (OneTo(length(A)),)
_broadcast_indices(::Type{Array}, A::Ref) = ()
_broadcast_indices(::Type{Array}, A) = indices(A)

# shape (i.e., tuple-of-indices) inputs
broadcast_shape(shape::Tuple) = shape
@inline broadcast_shape(shape::Tuple, shape1::Tuple, shapes::Tuple...) = broadcast_shape(_bcs((), shape, shape1), shapes...)
@inline broadcast_shape(shape::Tuple, shape1::Tuple, shapes::Tuple...) = broadcast_shape(_bcs(shape, shape1), shapes...)
# _bcs consolidates two shapes into a single output shape
_bcs(out, ::Tuple{}, ::Tuple{}) = out
@inline _bcs(out, ::Tuple{}, newshape) = _bcs((out..., newshape[1]), (), tail(newshape))
@inline _bcs(out, shape, ::Tuple{}) = _bcs((out..., shape[1]), tail(shape), ())
@inline function _bcs(out, shape, newshape)
newout = _bcs1(shape[1], newshape[1])
_bcs((out..., newout), tail(shape), tail(newshape))
_bcs(::Tuple{}, ::Tuple{}) = ()
@inline _bcs(::Tuple{}, newshape::Tuple) = (newshape[1], _bcs((), tail(newshape))...)
@inline _bcs(shape::Tuple, ::Tuple{}) = (shape[1], _bcs(tail(shape), ())...)
@inline function _bcs(shape::Tuple, newshape::Tuple)
return (_bcs1(shape[1], newshape[1]), _bcs(tail(shape), tail(newshape))...)
end
# _bcs1 handles the logic for a single dimension
_bcs1(a::Integer, b::Integer) = a == 1 ? b : (b == 1 ? a : (a == b ? a : throw(DimensionMismatch("arrays could not be broadcast to a common size"))))
Expand Down
112 changes: 71 additions & 41 deletions base/inference.jl
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ struct InferenceParams
inlining::Bool

# parameters limiting potentially-infinite types (configurable)
MAX_METHODS::Int
MAX_TUPLETYPE_LEN::Int
MAX_TUPLE_DEPTH::Int
MAX_TUPLE_SPLAT::Int
Expand All @@ -24,12 +25,13 @@ struct InferenceParams
# reasonable defaults
function InferenceParams(world::UInt;
inlining::Bool = inlining_enabled(),
max_methods::Int = 4,
tupletype_len::Int = 15,
tuple_depth::Int = 4,
tuple_splat::Int = 16,
union_splitting::Int = 4,
apply_union_enum::Int = 8)
return new(world, inlining, tupletype_len,
return new(world, inlining, max_methods, tupletype_len,
tuple_depth, tuple_splat, union_splitting, apply_union_enum)
end
end
Expand Down Expand Up @@ -1280,7 +1282,7 @@ function abstract_call_gf_by_type(f::ANY, atype::ANY, sv::InferenceState)
end
min_valid = UInt[typemin(UInt)]
max_valid = UInt[typemax(UInt)]
applicable = _methods_by_ftype(argtype, 4, sv.params.world, min_valid, max_valid)
applicable = _methods_by_ftype(argtype, sv.params.MAX_METHODS, sv.params.world, min_valid, max_valid)
rettype = Bottom
if applicable === false
# this means too many methods matched
Expand Down Expand Up @@ -1431,7 +1433,7 @@ function precise_container_type(arg::ANY, typ::ANY, vtypes::VarTable, sv::Infere
if isa(typ, Const)
val = typ.val
if isa(val, SimpleVector) || isa(val, Tuple)
return Any[ abstract_eval_constant(x) for x in val ]
return Any[ Const(val[i]) for i in 1:length(val) ] # avoid making a tuple Generator here!
end
end

Expand Down Expand Up @@ -1499,44 +1501,64 @@ function abstract_iteration(itertype::ANY, vtypes::VarTable, sv::InferenceState)
return Vararg{valtype}
end

function tuple_tail_elem(init::ANY, ct)
return Vararg{widenconst(foldl((a, b) -> tmerge(a, unwrapva(b)), init, ct))}
end

# do apply(af, fargs...), where af is a function value
function abstract_apply(af::ANY, fargs::Vector{Any}, aargtypes::Vector{Any}, vtypes::VarTable, sv::InferenceState)
function abstract_apply(aft::ANY, fargs::Vector{Any}, aargtypes::Vector{Any}, vtypes::VarTable, sv::InferenceState)
if !isa(aft, Const) && !isconstType(aft)
if !(isleaftype(aft) || aft <: Type) || (aft <: Builtin) || (aft <: IntrinsicFunction)
return Any
end
# non-constant function, but type is known
end
res = Union{}
nargs = length(fargs)
assert(nargs == length(aargtypes))
splitunions = countunionsplit(aargtypes) <= sv.params.MAX_APPLY_UNION_ENUM
ctypes = Any[Any[]]
splitunions = 1 < countunionsplit(aargtypes) <= sv.params.MAX_APPLY_UNION_ENUM
ctypes = Any[Any[aft]]
for i = 1:nargs
if aargtypes[i] === Any
# bail out completely and infer as f(::Any...)
# instead could keep what we got so far and just append a Vararg{Any} (by just
# using the normal logic from below), but that makes the time of the subarray
# test explode
ctypes = Any[Any[Vararg{Any}]]
# instead could infer the precise types for the types up to this point and just append a Vararg{Any}
# (by just using the normal logic from below), but that makes the time of the subarray test explode
push!(ctypes[1], Vararg{Any})
break
end
ctypes´ = []
for ti in (splitunions ? uniontypes(aargtypes[i]) : Any[aargtypes[i]])
cti = precise_container_type(fargs[i], ti, vtypes, sv)
for ct in ctypes
if !isempty(ct) && isvarargtype(ct[end])
tail = foldl((a,b)->tmerge(a,unwrapva(b)), unwrapva(ct[end]), cti)
push!(ctypes´, push!(ct[1:end-1], Vararg{widenconst(tail)}))
else
push!(ctypes´, append_any(ct, cti))
end
if length(ctypes[1]) == 1
for i = 1:nargs
ctypes´ = []
for ti in (splitunions ? uniontypes(aargtypes[i]) : Any[aargtypes[i]])
cti = precise_container_type(fargs[i], ti, vtypes, sv)
for ct in ctypes
if !isempty(ct) && isvarargtype(ct[end])
tail = tuple_tail_elem(unwrapva(ct[end]), cti)
push!(ctypes´, push!(ct[1:(end - 1)], tail))
else
push!(ctypes´, append_any(ct, cti))
end
end
end
ctypes = ctypes´
end
ctypes = ctypes´
end
for ct in ctypes
if length(ct) > sv.params.MAX_TUPLETYPE_LEN
tail = foldl((a,b)->tmerge(a,unwrapva(b)), Bottom, ct[sv.params.MAX_TUPLETYPE_LEN:end])
tail = tuple_tail_elem(Bottom, ct[sv.params.MAX_TUPLETYPE_LEN:end])
resize!(ct, sv.params.MAX_TUPLETYPE_LEN)
ct[end] = Vararg{widenconst(tail)}
ct[end] = tail
end
if isa(aft, Const)
rt = abstract_call(aft.val, (), ct, vtypes, sv)
elseif isconstType(aft)
rt = abstract_call(aft.parameters[1], (), ct, vtypes, sv)
else
astype = argtypes_to_type(ct)
rt = abstract_call_gf_by_type(nothing, astype, sv)
end
at = append_any(Any[Const(af)], ct)
res = tmerge(res, abstract_call(af, (), at, vtypes, sv))
res = tmerge(res, rt)
if res === Any
break
end
Expand Down Expand Up @@ -1651,20 +1673,7 @@ typename_static(t::ANY) = isType(t) ? _typename(t.parameters[1]) : Any
function abstract_call(f::ANY, fargs::Union{Tuple{},Vector{Any}}, argtypes::Vector{Any}, vtypes::VarTable, sv::InferenceState)
if f === _apply
length(fargs) > 1 || return Any
aft = argtypes[2]
if isa(aft, Const)
af = aft.val
else
if isType(aft) && isleaftype(aft.parameters[1])
af = aft.parameters[1]
elseif isleaftype(aft) && isdefined(aft, :instance)
af = aft.instance
else
# TODO jb/functions: take advantage of case where non-constant `af`'s type is known
return Any
end
end
return abstract_apply(af, fargs[3:end], argtypes[3:end], vtypes, sv)
return abstract_apply(argtypes[2], fargs[3:end], argtypes[3:end], vtypes, sv)
end

la = length(argtypes)
Expand Down Expand Up @@ -2508,12 +2517,14 @@ function typeinf_edge(method::Method, atypes::ANY, sparams::SimpleVector, caller
frame = resolve_call_cycle!(code, caller)
if frame === nothing
code.inInference = true
frame = InferenceState(code, true, true, caller.params) # always optimize and cache edge targets
frame = InferenceState(code, #=optimize=#true, #=cached=#true, caller.params) # always optimize and cache edge targets
if frame === nothing
code.inInference = false
return Any, nothing
end
frame.parent = caller
if caller.cached # don't involve uncached functions in cycle resolution
frame.parent = caller
end
typeinf(frame)
return frame.bestguess, frame.inferred ? frame.linfo : nothing
end
Expand Down Expand Up @@ -2849,6 +2860,7 @@ end
#### finalize and record the result of running type inference ####

function isinlineable(m::Method, src::CodeInfo)
# compute the cost (size) of inlining this code
inlineable = false
cost = 1000
if m.module === _topmod(m.module)
Expand Down Expand Up @@ -2941,7 +2953,25 @@ function optimize(me::InferenceState)
end

# determine and cache inlineability
if !me.src.inlineable && !force_noinline && isdefined(me.linfo, :def)
if !force_noinline
# don't keep ASTs for functions specialized on a Union argument
# TODO: this helps avoid a type-system bug mis-computing sparams during intersection
sig = unwrap_unionall(me.linfo.specTypes)
if isa(sig, DataType) && sig.name === Tuple.name
for P in sig.parameters
P = unwrap_unionall(P)
if isa(P, Union)
force_noinline = true
break
end
end
else
force_noinline = true
end
end
if force_noinline
me.src.inlineable = false
elseif !me.src.inlineable && isdefined(me.linfo, :def)
me.src.inlineable = isinlineable(me.linfo.def, me.src)
end
me.src.inferred = true
Expand Down
34 changes: 18 additions & 16 deletions base/int.jl
Original file line number Diff line number Diff line change
Expand Up @@ -362,22 +362,24 @@ end
# @doc isn't available when running in Core at this point.
# Tuple syntax for documention two function signatures at the same time
# doesn't work either at this point.
isdefined(Main, :Base) && for fname in (:mod, :rem)
@eval @doc """
rem(x::Integer, T::Type{<:Integer}) -> T
mod(x::Integer, T::Type{<:Integer}) -> T
%(x::Integer, T::Type{<:Integer}) -> T

Find `y::T` such that `x` ≡ `y` (mod n), where n is the number of integers representable
in `T`, and `y` is an integer in `[typemin(T),typemax(T)]`.
If `T` can represent any integer (e.g. `T == BigInt`), then this operation corresponds to
a conversion to `T`.

```jldoctest
julia> 129 % Int8
-127
```
""" -> $fname(x::Integer, T::Type{<:Integer})
if module_name(current_module()) === :Base
for fname in (:mod, :rem)
@eval @doc ("""
rem(x::Integer, T::Type{<:Integer}) -> T
mod(x::Integer, T::Type{<:Integer}) -> T
%(x::Integer, T::Type{<:Integer}) -> T

Find `y::T` such that `x` ≡ `y` (mod n), where n is the number of integers representable
in `T`, and `y` is an integer in `[typemin(T),typemax(T)]`.
If `T` can represent any integer (e.g. `T == BigInt`), then this operation corresponds to
a conversion to `T`.

```jldoctest
julia> 129 % Int8
-127
```
""" -> $fname(x::Integer, T::Type{<:Integer}))
end
end

rem(x::T, ::Type{T}) where {T<:Integer} = x
Expand Down
7 changes: 4 additions & 3 deletions base/multidimensional.jl
Original file line number Diff line number Diff line change
Expand Up @@ -138,9 +138,10 @@ module IteratorsMD
eachindex(::IndexCartesian, A::AbstractArray) = CartesianRange(indices(A))

@inline eachindex(::IndexCartesian, A::AbstractArray, B::AbstractArray...) =
CartesianRange(maxsize((), A, B...))
maxsize(sz) = sz
@inline maxsize(sz, A, B...) = maxsize(maxt(sz, size(A)), B...)
CartesianRange(maxsize(A, B...))
maxsize() = ()
@inline maxsize(A) = size(A)
@inline maxsize(A, B...) = maxt(size(A), maxsize(B...))
@inline maxt(a::Tuple{}, b::Tuple{}) = ()
@inline maxt(a::Tuple{}, b::Tuple) = b
@inline maxt(a::Tuple, b::Tuple{}) = a
Expand Down
Loading