diff --git a/base/inference.jl b/base/inference.jl index faaa040274e896..10a8b996cc563c 100644 --- a/base/inference.jl +++ b/base/inference.jl @@ -47,7 +47,6 @@ type InferenceState # info on the state of inference and the linfo linfo::LambdaInfo - destination::LambdaInfo # results need to be copied here when we finish nargs::Int stmt_types::Vector{Any} # return type @@ -73,11 +72,9 @@ type InferenceState inworkq::Bool optimize::Bool inferred::Bool - tfunc_bp::Union{TypeMapEntry, Void} function InferenceState(linfo::LambdaInfo, atypes::ANY, sparams::SimpleVector, optimize::Bool) @assert isa(linfo.code,Array{Any,1}) - linfo.inInference = true nslots = length(linfo.slotnames) nl = label_counter(linfo.code)+1 @@ -157,12 +154,12 @@ type InferenceState inmodule = isdefined(linfo, :def) ? linfo.def.module : current_module() # toplevel thunks are inferred in the current module frame = new( sp, nl, Dict{SSAValue, Bool}(), inmodule, 0, false, - linfo, linfo, la, s, Union{}, W, n, + linfo, la, s, Union{}, W, n, cur_hand, handler_at, n_handlers, ssavalue_uses, ssavalue_init, ObjectIdDict(), #Dict{InferenceState, Vector{LineNum}}(), Vector{Tuple{InferenceState, Vector{LineNum}}}(), - false, false, false, optimize, false, nothing) + false, false, false, optimize, false) push!(active, frame) nactive[] += 1 return frame @@ -1385,25 +1382,25 @@ function newvar!(sv::InferenceState, typ) end # create a specialized LambdaInfo from a method -function specialize_method(method::Method, types::ANY, sp::SimpleVector) - li = ccall(:jl_get_specialized, Ref{LambdaInfo}, (Any, Any, Any), method, types, sp) - return li +function specialize_method(method::Method, types::ANY, sp::SimpleVector, cached) + if cached + return ccall(:jl_specializations_get_linfo, Ref{LambdaInfo}, (Any, Any, Any), method, types, sp) + else + return ccall(:jl_get_specialized, Ref{LambdaInfo}, (Any, Any, Any), method, types, sp) + end end # create copies of any field that type-inference might modify function unshare_linfo!(li::LambdaInfo) - if !isa(li.code, Array{Any,1}) + orig = li.def.lambda_template + if isa(li.code, Array{UInt8,1}) li.code = ccall(:jl_uncompress_ast, Any, (Any,Any), li, li.code) - else - li.code = copy_exprargs(li.code) + elseif li.code === orig.code + li.code = copy_exprargs(orig.code) end - li.slotnames = copy(li.slotnames) - li.slotflags = copy(li.slotflags) - if isa(li.slottypes, Array) - li.slottypes = copy(li.slottypes) - end - if isa(li.ssavaluetypes, Array) - li.ssavaluetypes = copy(li.ssavaluetypes) + if !li.def.isstaged + li.slotnames = copy(li.slotnames) + li.slotflags = copy(li.slotflags) end return li end @@ -1412,9 +1409,11 @@ end function typeinf_edge(method::Method, atypes::ANY, sparams::SimpleVector, needtree::Bool, optimize::Bool, cached::Bool, caller) local code = nothing local frame = nothing - # check cached specializations - # for an existing result stored there - if cached + if isa(caller, LambdaInfo) + code = caller + elseif cached + # check cached specializations + # for an existing result stored there if !is(method.specializations, nothing) code = ccall(:jl_specializations_lookup, Any, (Any, Any), method, atypes) if isa(code, Void) @@ -1434,89 +1433,80 @@ function typeinf_edge(method::Method, atypes::ANY, sparams::SimpleVector, needtr code = nothing end end + end - if isa(code, LambdaInfo) && code.inInference - # inference on this signature may be in progress, - # find the corresponding frame in the active list - for infstate in active - infstate === nothing && continue - infstate = infstate::InferenceState - if code === infstate.linfo - frame = infstate - break + if caller === nothing && in_typeinf_loop + # if the caller needed the ast, but we are already in the typeinf loop + # then just return early -- we can't fulfill this request + # if the client was inlining, then this means we decided not to try to infer this + # particular signature (due to signature coarsening in abstract_call_gf_by_type) + # and attempting to force it now would be a bad idea (non terminating) + skip = true + if method.module == _topmod(method.module) || (isdefined(Main, :Base) && method.module == Main.Base) + # however, some gf have special tfunc and meaning they wouldn't have been inferred yet + # check the same conditions from abstract_call to detect this case + if method.name == :promote_type || method.name == :typejoin + skip = false + elseif method.name == :getindex || method.name == :next || method.name == :indexed_next + argtypes = atypes.parameters + if length(argtypes)>2 && argtypes[3] ⊑ Int + at2 = widenconst(argtypes[2]) + if (at2 <: Tuple || + (isa(at2, DataType) && isdefined(Main, :Base) && isdefined(Main.Base, :Pair) && + (at2::DataType).name === Main.Base.Pair.name)) + skip = false + end end end end + if skip + return (nothing, Union{}, false) + end end - if isa(caller, LambdaInfo) - code = caller - end - - if frame === nothing - # inference not started yet, make a new frame for a new lambda - # add lam to be inferred and record the edge - - if caller === nothing && in_typeinf_loop - # if the caller needed the ast, but we are already in the typeinf loop - # then just return early -- we can't fulfill this request - # if the client was inlining, then this means we decided not to try to infer this - # particular signature (due to signature coarsening in abstract_call_gf_by_type) - # and attempting to force it now would be a bad idea (non terminating) - skip = true - if method.module == _topmod(method.module) || (isdefined(Main, :Base) && method.module == Main.Base) - # however, some gf have special tfunc and meaning they wouldn't have been inferred yet - # check the same conditions from abstract_call to detect this case - if method.name == :promote_type || method.name == :typejoin - skip = false - elseif method.name == :getindex || method.name == :next || method.name == :indexed_next - argtypes = atypes.parameters - if length(argtypes)>2 && argtypes[3] ⊑ Int - at2 = widenconst(argtypes[2]) - if (at2 <: Tuple || - (isa(at2, DataType) && isdefined(Main, :Base) && isdefined(Main.Base, :Pair) && - (at2::DataType).name === Main.Base.Pair.name)) - skip = false - end - end - end - end - if skip - return (nothing, Union{}, false) - end + if isa(code, LambdaInfo) && code.code !== nothing + # reuse the existing code object + linfo = code + @assert typeseq(linfo.specTypes, atypes) + elseif method.isstaged + if !isleaftype(atypes) + # don't call staged functions on abstract types. + # (see issues #8504, #10230) + # we can't guarantee that their type behavior is monotonic. + return (nothing, Any, false) + end + try + # user code might throw errors – ignore them + linfo = specialize_method(method, atypes, sparams, cached) + catch + return (nothing, Any, false) end + else + linfo = specialize_method(method, atypes, sparams, cached) + end - if isa(code, LambdaInfo) && code.code !== nothing - # reuse the existing code object - linfo = code - @assert typeseq(linfo.specTypes, atypes) - elseif method.isstaged - if !isleaftype(atypes) - # don't call staged functions on abstract types. - # (see issues #8504, #10230) - # we can't guarantee that their type behavior is monotonic. - return (nothing, Any, false) - end - try - # user code might throw errors – ignore them - linfo = specialize_method(method, atypes, sparams) - catch - return (nothing, Any, false) + if linfo.inInference + # inference on this signature may be in progress, + # find the corresponding frame in the active list + for infstate in active + infstate === nothing && continue + infstate = infstate::InferenceState + if linfo === infstate.linfo + frame = infstate + break end - else - linfo = specialize_method(method, atypes, sparams) end - # our stack frame inference context + # TODO: this assertion seems iffy + assert(frame !== nothing) + else + # inference not started yet, make a new frame for a new lambda + linfo.inInference = true frame = InferenceState(unshare_linfo!(linfo::LambdaInfo), atypes, sparams, optimize) - if cached - frame.tfunc_bp = ccall(:jl_specializations_insert, Ref{TypeMapEntry}, (Any, Any, Any), method, atypes, linfo) - end end frame = frame::InferenceState - if !isa(caller, Void) && !isa(caller, LambdaInfo) - # if we were called from inside inference, - # the caller will be the InferenceState object + if isa(caller, InferenceState) + # if we were called from inside inference, the caller will be the InferenceState object # for which the edge was required caller = caller::InferenceState if haskey(caller.edges, frame) @@ -1552,26 +1542,30 @@ function typeinf_ext(linfo::LambdaInfo) if isdefined(linfo, :def) # method lambda - infer this specialization via the method cache (code, _t, _) = typeinf_edge(linfo.def, linfo.specTypes, svec(), true, true, true, linfo) - if code.inferred + if code.inferred && linfo !== code + # This case occurs when the IR for a function has been deleted. + # `code` will be a newly-created LambdaInfo, and we need to copy its + # contents to the existing one to copy the info to the method cache. linfo.inferred = true linfo.inInference = false - if linfo !== code - linfo.code = code.code - linfo.slotnames = code.slotnames - linfo.slottypes = code.slottypes - linfo.slotflags = code.slotflags - linfo.ssavaluetypes = code.ssavaluetypes - linfo.rettype = code.rettype - linfo.pure = code.pure - end - end + linfo.code = code.code + linfo.slotnames = code.slotnames + linfo.slottypes = code.slottypes + linfo.slotflags = code.slotflags + linfo.ssavaluetypes = code.ssavaluetypes + linfo.rettype = code.rettype + linfo.pure = code.pure + linfo.inlineable = code.inlineable + end + return code else # toplevel lambda - infer directly + linfo.inInference = true frame = InferenceState(linfo, linfo.specTypes, svec(), true) typeinf_loop(frame) @assert frame.inferred # TODO: deal with this better + return linfo end - nothing end @@ -1945,20 +1939,6 @@ function finish(me::InferenceState) end me.linfo.rettype = me.bestguess - if me.destination !== me.linfo - out = me.destination - out.inferred = true - out.inInference = false - out.code = me.linfo.code - out.slotnames = me.linfo.slotnames - out.slottypes = me.linfo.slottypes - out.slotflags = me.linfo.slotflags - out.ssavaluetypes = me.linfo.ssavaluetypes - out.rettype = me.linfo.rettype - out.pure = me.linfo.pure - out.inlineable = me.linfo.inlineable - end - # lazy-delete the item from active for several reasons: # efficiency, correctness, and recursion-safety nactive[] -= 1 diff --git a/src/codegen.cpp b/src/codegen.cpp index 4d18e27392fa3b..0dcf783927ae2b 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -1141,7 +1141,7 @@ void *jl_get_llvmf(jl_tupletype_t *tt, bool getwrapper, bool getdeclarations) if (linfo->code == jl_nothing) { // re-infer if we've deleted the code - jl_type_infer(linfo, 0); + linfo = jl_type_infer(linfo, 0); if (linfo->code == jl_nothing) { JL_GC_POP(); return NULL; diff --git a/src/gf.c b/src/gf.c index 88b12c7bedc3b4..983d8ca987493d 100644 --- a/src/gf.c +++ b/src/gf.c @@ -111,10 +111,20 @@ static int8_t jl_cachearg_offset(jl_methtable_t *mt) /// ----- Insertion logic for special entries ----- /// -JL_DLLEXPORT jl_typemap_entry_t *jl_specializations_insert(jl_method_t *m, jl_tupletype_t *type, - jl_value_t *value) +JL_DLLEXPORT jl_lambda_info_t *jl_get_specialized(jl_method_t *m, jl_tupletype_t *types, jl_svec_t *sp); + +// get or create the LambdaInfo for a specialization +JL_DLLEXPORT jl_lambda_info_t *jl_specializations_get_linfo(jl_method_t *m, jl_tupletype_t *type, jl_svec_t *sparams) { - return jl_typemap_insert(&m->specializations, (jl_value_t*)m, type, jl_emptysvec, NULL, jl_emptysvec, value, /*offs*/0, &tfunc_cache, NULL); + jl_typemap_entry_t *sf = jl_typemap_assoc_by_type(m->specializations, type, NULL, 1, /*subtype*/0, /*offs*/0); + if (sf && jl_is_lambda_info(sf->func.value) && ((jl_lambda_info_t*)sf->func.value)->code != jl_nothing) + return (jl_lambda_info_t*)sf->func.value; + jl_lambda_info_t *li = jl_get_specialized(m, type, sparams); + JL_GC_PUSH1(&li); + // TODO: fuse lookup and insert steps + jl_typemap_insert(&m->specializations, (jl_value_t*)m, type, jl_emptysvec, NULL, jl_emptysvec, (jl_value_t*)li, 0, &tfunc_cache, NULL); + JL_GC_POP(); + return li; } JL_DLLEXPORT jl_value_t *jl_specializations_lookup(jl_method_t *m, jl_tupletype_t *type) @@ -154,8 +164,6 @@ jl_value_t *jl_mk_builtin_func(const char *name, jl_fptr_t fptr) return f; } -JL_DLLEXPORT jl_lambda_info_t *jl_get_specialized(jl_method_t *m, jl_tupletype_t *types, jl_svec_t *sp); - jl_lambda_info_t *jl_get_unspecialized(jl_lambda_info_t *method) { // one unspecialized version of a function can be shared among all cached specializations @@ -189,11 +197,11 @@ jl_lambda_info_t *jl_get_unspecialized(jl_lambda_info_t *method) } /* - run type inference on lambda "li" in-place, for given argument types. - "def" is the original method definition of which this is an instance; - can be equal to "li->def" if not applicable. + run type inference on lambda "li" for given argument types. + if "li" has been inferred before but the IR was deleted, returns a + new LambdaInfo with the IR reconstituted. */ -void jl_type_infer(jl_lambda_info_t *li, int force) +jl_lambda_info_t *jl_type_infer(jl_lambda_info_t *li, int force) { #ifdef ENABLE_INFERENCE jl_module_t *mod = NULL; @@ -207,7 +215,6 @@ void jl_type_infer(jl_lambda_info_t *li, int force) (mod != jl_core_module || !lastIn)))) { // avoid any potential recursion in calling jl_typeinf_func on itself JL_LOCK(&codegen_lock); // Might GC assert(li->inInference == 0); - li->inInference = 1; jl_value_t *fargs[2]; fargs[0] = (jl_value_t*)jl_typeinf_func; fargs[1] = (jl_value_t*)li; @@ -216,12 +223,13 @@ void jl_type_infer(jl_lambda_info_t *li, int force) jl_static_show_func_sig(JL_STDERR, (jl_value_t*)li->specTypes); jl_printf(JL_STDERR, "\n"); #endif - jl_value_t *info = jl_apply(fargs, 2); (void)info; + li = (jl_lambda_info_t*)jl_apply(fargs, 2); assert(li->def || li->inInference == 0); // if this is toplevel expr, make sure inference finished JL_UNLOCK(&codegen_lock); // Might GC } inInference = lastIn; #endif + return li; } static int get_spec_unspec_list(jl_typemap_entry_t *l, void *closure) @@ -610,19 +618,7 @@ static jl_lambda_info_t *cache_method(jl_methtable_t *mt, union jl_typemap_t *ca } // here we infer types and specialize the method - int from_specializations = 1; - // get a specialized version of the method (or reuse an existing one) - if (definition->specializations.unknown != jl_nothing) { - jl_value_t *li = jl_specializations_lookup(definition, type); - if (jl_typeof(li) == (jl_value_t*)jl_lambda_info_type) { - newmeth = (jl_lambda_info_t*)li; - } - } - - if (!newmeth) { - from_specializations = 0; - newmeth = jl_get_specialized(definition, type, sparams); - } + newmeth = jl_specializations_get_linfo(definition, type, sparams); if (cache_with_orig) { // if there is a need to cache with one of the original signatures, @@ -662,15 +658,7 @@ static jl_lambda_info_t *cache_method(jl_methtable_t *mt, union jl_typemap_t *ca jl_typemap_insert(cache, parent, origtype, jl_emptysvec, type, guardsigs, (jl_value_t*)newmeth, jl_cachearg_offset(mt), &lambda_cache, NULL); - if (from_specializations) { - JL_UNLOCK(&codegen_lock); // Might GC - JL_GC_POP(); - return newmeth; - } - - if (newmeth->code != NULL && !newmeth->inferred && !newmeth->inInference) { - // notify type inference of the new method it should infer - jl_specializations_insert(definition, newmeth->specTypes, (jl_value_t*)newmeth); + if (!newmeth->inferred && !newmeth->inInference) { if (jl_options.compile_enabled != JL_OPTIONS_COMPILE_OFF) // don't bother with typeinf if compile is off if (jl_symbol_name(definition->name)[0] != '@') // don't bother with typeinf on macros jl_type_infer(newmeth, 0); @@ -705,9 +693,8 @@ static jl_lambda_info_t *jl_mt_assoc_by_type(jl_methtable_t *mt, jl_datatype_t * jl_lambda_info_t *nf; if (!cache) nf = jl_get_specialized(m, sig, env); - else { + else nf = cache_method(mt, &mt->cache, (jl_value_t*)mt, sig, tt, entry, env); - } JL_GC_POP(); return nf; } diff --git a/src/julia_internal.h b/src/julia_internal.h index c13925b82334f8..712256f1b03cf5 100644 --- a/src/julia_internal.h +++ b/src/julia_internal.h @@ -69,7 +69,7 @@ STATIC_INLINE jl_value_t *newstruct(jl_datatype_t *type) return jv; } -void jl_type_infer(jl_lambda_info_t *li, int force); +jl_lambda_info_t *jl_type_infer(jl_lambda_info_t *li, int force); void jl_generate_fptr(jl_lambda_info_t *li); void jl_compile_linfo(jl_lambda_info_t *li); jl_lambda_info_t *jl_compile_for_dispatch(jl_lambda_info_t *li);