diff --git a/base/inference.jl b/base/inference.jl index 67fa82f51a73a..ec75ce52ebdd1 100644 --- a/base/inference.jl +++ b/base/inference.jl @@ -38,7 +38,6 @@ immutable Const end type InferenceState - atypes #::Type # type sig sp::SimpleVector # static parameters label_counter::Int # index of the current highest label for this function fedbackvars::Dict{SSAValue, Bool} @@ -78,6 +77,7 @@ type InferenceState function InferenceState(linfo::LambdaInfo, atypes::ANY, sparams::SimpleVector, optimize::Bool) @assert isa(linfo.code,Array{Any,1}) + linfo.inInference = true nslots = length(linfo.slotnames) nl = label_counter(linfo.code)+1 @@ -156,7 +156,7 @@ type InferenceState inmodule = isdefined(linfo, :def) ? linfo.def.module : current_module() # toplevel thunks are inferred in the current module frame = new( - atypes, sp, nl, Dict{SSAValue, Bool}(), inmodule, 0, false, + sp, nl, Dict{SSAValue, Bool}(), inmodule, 0, false, linfo, linfo, la, s, Union{}, W, n, cur_hand, handler_at, n_handlers, ssavalue_uses, ssavalue_init, @@ -768,7 +768,7 @@ function abstract_call_gf_by_type(f::ANY, argtype::ANY, sv) limitlength = false for (callee, _) in sv.edges callee = callee::InferenceState - if method === callee.linfo.def && ls > length(callee.atypes.parameters) + if method === callee.linfo.def && ls > length(callee.linfo.specTypes.parameters) limitlength = true break end @@ -781,16 +781,16 @@ function abstract_call_gf_by_type(f::ANY, argtype::ANY, sv) infstate = infstate::InferenceState if isdefined(infstate.linfo, :def) && method === infstate.linfo.def td = type_depth(sig) - if ls > length(infstate.atypes.parameters) + if ls > length(infstate.linfo.specTypes.parameters) limitlength = true end - if td > type_depth(infstate.atypes) + if td > type_depth(infstate.linfo.specTypes) # impose limit if we recur and the argument types grow beyond MAX_TYPE_DEPTH if td > MAX_TYPE_DEPTH sig = limit_type_depth(sig, 0, true, []) break else - p1, p2 = sig.parameters, infstate.atypes.parameters + p1, p2 = sig.parameters, infstate.linfo.specTypes.parameters if length(p2) == ls limitdepth = false newsig = Array{Any}(ls) @@ -1117,7 +1117,7 @@ function abstract_eval(e::ANY, vtypes::VarTable, sv::InferenceState) end elseif isleaftype(t) t = Type{t} - elseif isleaftype(sv.atypes) + elseif isleaftype(sv.linfo.specTypes) if isa(t,TypeVar) t = Type{t.ub} else @@ -1386,7 +1386,8 @@ end # create a specialized LambdaInfo from a method function specialize_method(method::Method, types::ANY, sp::SimpleVector) - li = ccall(:jl_get_specialized, Any, (Any, Any, Any), method, types, sp)::LambdaInfo + li = ccall(:jl_get_specialized, Ref{LambdaInfo}, (Any, Any, Any), method, types, sp) + return li end # create copies of any field that type-inference might modify @@ -1408,38 +1409,54 @@ function unshare_linfo!(li::LambdaInfo) end #### entry points for inferring a LambdaInfo given a type signature #### - function typeinf_edge(method::Method, atypes::ANY, sparams::SimpleVector, needtree::Bool, optimize::Bool, cached::Bool, caller) + local code = nothing local frame = nothing - offs = 0 - # check cached t-functions - # aggregate all saved type inference data there - if cached && !is(method.tfunc, nothing) - code = ccall(:jl_tfunc_cache_lookup, Any, (Any, Any, Int8), method, atypes, offs) - if isa(code, InferenceState) - # inference on this signature is in progress - frame = code - if isa(caller, LambdaInfo) - # record the LambdaInfo where this result should be cached when it is finished - @assert frame.destination === frame.linfo || frame.destination === caller - frame.destination = caller - end - elseif isa(code, Type) - # sometimes just a return type is stored here. if a full AST - # is not needed, we can return it. - if !needtree - return (nothing, code, true) - end - elseif isa(code,LambdaInfo) - @assert code.inferred - return (code, code.rettype, true) - else - # otherwise this is an InferenceState from a different bootstrap stage's - # copy of the inference code; ignore it. + # check cached specializations + # for an existing result stored there + if cached + if !is(method.specializations, nothing) + code = ccall(:jl_specializations_lookup, Any, (Any, Any), method, atypes) + if isa(code, Void) + # something completely new + elseif isa(code, LambdaInfo) + # something existing + if code.inferred + return (code, code.rettype, true) + end + else + # sometimes just a return type is stored here. if a full AST + # is not needed, we can return it. + typeassert(code, Type) + if !needtree + return (nothing, code, true) + end + code = nothing + end + end + + if isa(code, LambdaInfo) && code.inInference + # inference on this signature may be in progress, + # find the corresponding frame in the active list + for infstate in active + infstate === nothing && continue + infstate = infstate::InferenceState + if code === infstate.linfo + frame = infstate + break + end + end end end + if isa(caller, LambdaInfo) + code = caller + end + if frame === nothing + # inference not started yet, make a new frame for a new lambda + # add lam to be inferred and record the edge + if caller === nothing && needtree && in_typeinf_loop # if the caller needed the ast, but we are already in the typeinf loop # then just return early -- we can't fulfill this request @@ -1468,9 +1485,11 @@ function typeinf_edge(method::Method, atypes::ANY, sparams::SimpleVector, needtr return (nothing, Union{}, false) end end - # add lam to be inferred and record the edge - if isa(caller, LambdaInfo) - linfo = caller + + if isa(code, LambdaInfo) + # reuse the existing code object + linfo = code + @assert typeseq(linfo.specTypes, atypes) elseif method.isstaged if !isleaftype(atypes) # don't call staged functions on abstract types. @@ -1488,16 +1507,18 @@ function typeinf_edge(method::Method, atypes::ANY, sparams::SimpleVector, needtr linfo = specialize_method(method, atypes, sparams) end # our stack frame inference context - frame = InferenceState(unshare_linfo!(linfo), atypes, sparams, optimize) - + frame = InferenceState(unshare_linfo!(linfo::LambdaInfo), atypes, sparams, optimize) if cached - tfunc_bp = ccall(:jl_tfunc_cache_insert, Ref{TypeMapEntry}, (Any, Any, Any, Int8), method, atypes, frame, offs) - frame.tfunc_bp = tfunc_bp + frame.tfunc_bp = ccall(:jl_specializations_insert, Ref{TypeMapEntry}, (Any, Any, Any), method, atypes, linfo) end end + frame = frame::InferenceState if !isa(caller, Void) && !isa(caller, LambdaInfo) - @assert isa(caller, InferenceState) + # if we were called from inside inference, + # the caller will be the InferenceState object + # for which the edge was required + caller = caller::InferenceState if haskey(caller.edges, frame) Ws = caller.edges[frame]::Vector{Int} if !(caller.currpc in Ws) @@ -1869,10 +1890,6 @@ end # inference completed on `me` # update the LambdaInfo and notify the edges function finish(me::InferenceState) - # lazy-delete the item from active for several reasons: - # efficiency, correctness, and recursion-safety - nactive[] -= 1 - active[findlast(active, me)] = nothing for (i,_) in me.edges @assert (i::InferenceState).fixedpoint end @@ -1941,9 +1958,11 @@ function finish(me::InferenceState) out.pure = me.linfo.pure out.inlineable = me.linfo.inlineable end - if me.tfunc_bp !== nothing - me.tfunc_bp.func = me.linfo - end + + # lazy-delete the item from active for several reasons: + # efficiency, correctness, and recursion-safety + nactive[] -= 1 + active[findlast(active, me)] = nothing # update all of the callers by traversing the backedges for (i,_) in me.backedges diff --git a/src/alloc.c b/src/alloc.c index cc02c98ff6698..ad5a3e8f2d5e4 100644 --- a/src/alloc.c +++ b/src/alloc.c @@ -566,14 +566,13 @@ JL_DLLEXPORT jl_method_t *jl_new_method_uninit(void) jl_method_t *m = (jl_method_t*)newobj((jl_value_t*)jl_method_type, NWORDS(sizeof(jl_method_t))); - m->tfunc.unknown = jl_nothing; + m->specializations.unknown = jl_nothing; m->sig = NULL; m->tvars = NULL; m->ambig = NULL; m->roots = NULL; m->module = jl_current_module; m->lambda_template = NULL; - m->specializations = NULL; m->name = NULL; m->file = null_sym; m->line = 0; diff --git a/src/dump.c b/src/dump.c index 3e929b8c7e007..2f4f900ef94cb 100644 --- a/src/dump.c +++ b/src/dump.c @@ -725,7 +725,7 @@ static void jl_serialize_value_(ios_t *s, jl_value_t *v) arraylist_push(&reinit_list, (void*)pos); arraylist_push(&reinit_list, (void*)3); } - if (jl_is_method(v) && jl_typeof(((jl_method_t*)v)->tfunc.unknown) == (jl_value_t*)jl_typemap_level_type) { + if (jl_is_method(v) && jl_typeof(((jl_method_t*)v)->specializations.unknown) == (jl_value_t*)jl_typemap_level_type) { arraylist_push(&reinit_list, (void*)pos); arraylist_push(&reinit_list, (void*)4); } @@ -835,7 +835,7 @@ static void jl_serialize_value_(ios_t *s, jl_value_t *v) else if (jl_is_method(v)) { writetag(s, jl_method_type); jl_method_t *m = (jl_method_t*)v; - union jl_typemap_t *tf = &m->tfunc; + union jl_typemap_t *tf = &m->specializations; if (tf->unknown && tf->unknown != jl_nothing) { // go through the t-func cache, replacing ASTs with just return // types for abstract argument types. these ASTs are generally @@ -844,7 +844,6 @@ static void jl_serialize_value_(ios_t *s, jl_value_t *v) } jl_serialize_value(s, tf->unknown); jl_serialize_value(s, (jl_value_t*)m->name); - jl_serialize_value(s, (jl_value_t*)m->specializations); write_int8(s, m->isstaged); jl_serialize_value(s, (jl_value_t*)m->file); write_int32(s, m->line); @@ -1445,12 +1444,10 @@ static jl_value_t *jl_deserialize_value_(ios_t *s, jl_value_t *vtag, jl_value_t NWORDS(sizeof(jl_method_t))); if (usetable) arraylist_push(&backref_list, m); - m->tfunc.unknown = jl_deserialize_value(s, (jl_value_t**)&m->tfunc); - jl_gc_wb(m, m->tfunc.unknown); + m->specializations.unknown = jl_deserialize_value(s, (jl_value_t**)&m->specializations); + jl_gc_wb(m, m->specializations.unknown); m->name = (jl_sym_t*)jl_deserialize_value(s, NULL); jl_gc_wb(m, m->name); - m->specializations = (jl_array_t*)jl_deserialize_value(s, (jl_value_t**)&m->specializations); - if (m->specializations) jl_gc_wb(m, m->specializations); m->isstaged = read_int8(s); m->file = (jl_sym_t*)jl_deserialize_value(s, NULL); m->line = read_int32(s); @@ -1834,9 +1831,9 @@ static void jl_reinit_item(ios_t *f, jl_value_t *v, int how, arraylist_t *tracee arraylist_push(tracee_list, mt); break; } - case 4: { // rehash tfunc + case 4: { // rehash specializations tfunc jl_method_t *m = (jl_method_t*)v; - jl_typemap_rehash(m->tfunc, 0); + jl_typemap_rehash(m->specializations, 0); break; } default: diff --git a/src/gf.c b/src/gf.c index acae6a3b90890..119bd98517f68 100644 --- a/src/gf.c +++ b/src/gf.c @@ -111,15 +111,15 @@ static int8_t jl_cachearg_offset(jl_methtable_t *mt) /// ----- Insertion logic for special entries ----- /// -JL_DLLEXPORT jl_typemap_entry_t *jl_tfunc_cache_insert(jl_method_t *m, jl_tupletype_t *type, - jl_value_t *value, int8_t offs) +JL_DLLEXPORT jl_typemap_entry_t *jl_specializations_insert(jl_method_t *m, jl_tupletype_t *type, + jl_value_t *value) { - return jl_typemap_insert(&m->tfunc, (jl_value_t*)m, type, jl_emptysvec, NULL, jl_emptysvec, value, offs, &tfunc_cache, NULL); + return jl_typemap_insert(&m->specializations, (jl_value_t*)m, type, jl_emptysvec, NULL, jl_emptysvec, value, /*offs*/0, &tfunc_cache, NULL); } -JL_DLLEXPORT jl_value_t *jl_tfunc_cache_lookup(jl_method_t *m, jl_tupletype_t *type, int8_t offs) +JL_DLLEXPORT jl_value_t *jl_specializations_lookup(jl_method_t *m, jl_tupletype_t *type) { - jl_typemap_entry_t *sf = jl_typemap_assoc_by_type(m->tfunc, type, NULL, 1, 0, offs); + jl_typemap_entry_t *sf = jl_typemap_assoc_by_type(m->specializations, type, NULL, 1, /*subtype*/0, /*offs*/0); if (!sf) return jl_nothing; return sf->func.value; @@ -127,7 +127,7 @@ JL_DLLEXPORT jl_value_t *jl_tfunc_cache_lookup(jl_method_t *m, jl_tupletype_t *t JL_DLLEXPORT jl_value_t *jl_methtable_lookup(jl_methtable_t *mt, jl_tupletype_t *type) { - jl_typemap_entry_t *sf = jl_typemap_assoc_by_type(mt->defs, type, NULL, 1, 0, 0); + jl_typemap_entry_t *sf = jl_typemap_assoc_by_type(mt->defs, type, NULL, 1, /*subtype*/0, /*offs*/0); if (!sf) return jl_nothing; return sf->func.value; @@ -224,17 +224,16 @@ void jl_type_infer(jl_lambda_info_t *li, int force) #endif } +static int get_spec_unspec_list(jl_typemap_entry_t *l, void *closure) +{ + if (jl_is_lambda_info(l->func.value) && !l->func.linfo->inferred) + jl_array_ptr_1d_push((jl_array_t*)closure, l->func.value); + return 1; +} + static int get_method_unspec_list(jl_typemap_entry_t *def, void *closure) { - jl_array_t *spec = def->func.method->specializations; - if (spec == NULL) - return 1; - size_t i, l; - for (i = 0, l = jl_array_len(spec); i < l; i++) { - jl_value_t *li = jl_array_ptr_ref(spec, i); - if (jl_is_lambda_info(li) && !((jl_lambda_info_t*)li)->inferred) - jl_array_ptr_1d_push((jl_array_t*)closure, li); - } + jl_typemap_visitor(def->func.method->specializations, get_spec_unspec_list, closure); return 1; } @@ -612,22 +611,15 @@ static jl_lambda_info_t *cache_method(jl_methtable_t *mt, union jl_typemap_t *ca // here we infer types and specialize the method int from_specializations = 1; - if (definition->specializations != NULL) { - // reuse code already generated for this combination of lambda and - // arguments types. this happens for inner generic functions where - // a new closure is generated on each call to the enclosing function. - jl_array_t *lilist = definition->specializations; - int k; - for (k = 0; k < lilist->nrows; k++) { - jl_lambda_info_t *li = (jl_lambda_info_t*)jl_array_ptr_ref(lilist, k); - if (jl_types_equal((jl_value_t*)li->specTypes, (jl_value_t*)type)) { - newmeth = li; - break; - } + // get a specialized version of the method (or reuse an existing one) + if (definition->specializations.unknown != jl_nothing) { + jl_value_t *li = jl_specializations_lookup(definition, type); + if (jl_typeof(li) == (jl_value_t*)jl_lambda_info_type) { + newmeth = (jl_lambda_info_t*)li; } } - if (!newmeth || newmeth->inInference) { + if (!newmeth) { from_specializations = 0; newmeth = jl_get_specialized(definition, type, sparams); } @@ -676,17 +668,9 @@ static jl_lambda_info_t *cache_method(jl_methtable_t *mt, union jl_typemap_t *ca return newmeth; } - if (newmeth->code != NULL) { - jl_array_t *spe = definition->specializations; - if (spe == NULL) { - spe = jl_alloc_vec_any(1); - jl_array_ptr_set(spe, 0, newmeth); - } - else { - jl_array_ptr_1d_push(spe, (jl_value_t*)newmeth); - } - definition->specializations = spe; - jl_gc_wb(definition, definition->specializations); + if (newmeth->code != NULL && !newmeth->inferred && !newmeth->inInference) { + // notify type inference of the new method it should infer + jl_specializations_insert(definition, newmeth->specTypes, (jl_value_t*)newmeth); if (jl_options.compile_enabled != JL_OPTIONS_COMPILE_OFF) // don't bother with typeinf if compile is off if (jl_symbol_name(definition->name)[0] != '@') // don't bother with typeinf on macros jl_type_infer(newmeth, 0); @@ -1469,16 +1453,7 @@ static int _precompile_enq_tfunc(jl_typemap_entry_t *l, void *closure) static int _precompile_enq_spec(jl_typemap_entry_t *def, void *closure) { - jl_typemap_visitor(def->func.method->tfunc, _precompile_enq_tfunc, closure); - jl_array_t *spec = def->func.method->specializations; - if (spec == NULL) - return 1; - size_t i, l; - for (i = 0, l = jl_array_len(spec); i < l; i++) { - jl_value_t *li = jl_array_ptr_ref(spec, i); - if (jl_is_lambda_info(li) && !((jl_lambda_info_t*)li)->functionID) - jl_array_ptr_1d_push((jl_array_t*)closure, (jl_value_t*)((jl_lambda_info_t*)li)->specTypes); - } + jl_typemap_visitor(def->func.method->specializations, _precompile_enq_tfunc, closure); return 1; } diff --git a/src/jltypes.c b/src/jltypes.c index 9680b4df17820..284b9526fb159 100644 --- a/src/jltypes.c +++ b/src/jltypes.c @@ -3592,7 +3592,7 @@ void jl_init_types(void) jl_method_type = jl_new_datatype(jl_symbol("Method"), jl_any_type, jl_emptysvec, - jl_svec(16, + jl_svec(15, jl_symbol("name"), jl_symbol("module"), jl_symbol("file"), @@ -3601,7 +3601,6 @@ void jl_init_types(void) jl_symbol("tvars"), jl_symbol("ambig"), jl_symbol("specializations"), - jl_symbol("tfunc"), jl_symbol("lambda_template"), jl_symbol("roots"), jl_symbol("invokes"), @@ -3609,7 +3608,7 @@ void jl_init_types(void) jl_symbol("isstaged"), jl_symbol("needs_sparam_vals_ducttape"), jl_symbol("")), - jl_svec(16, + jl_svec(15, jl_sym_type, jl_module_type, jl_sym_type, @@ -3617,7 +3616,6 @@ void jl_init_types(void) jl_type_type, jl_any_type, jl_any_type, // Union{Array, Void} - jl_array_any_type, jl_any_type, jl_any_type, jl_array_any_type, @@ -3626,7 +3624,7 @@ void jl_init_types(void) jl_bool_type, jl_bool_type, jl_bool_type), - 0, 1, 9); + 0, 1, 8); jl_lambda_info_type = jl_new_datatype(jl_symbol("LambdaInfo"), @@ -3681,7 +3679,7 @@ void jl_init_types(void) jl_int32_type, jl_int32_type), 0, 1, 10); jl_svecset(jl_lambda_info_type->types, 9, jl_lambda_info_type); - jl_svecset(jl_method_type->types, 9, jl_lambda_info_type); + jl_svecset(jl_method_type->types, 8, jl_lambda_info_type); jl_typector_type = jl_new_datatype(jl_symbol("TypeConstructor"), diff --git a/src/julia.h b/src/julia.h index da47db61bcae0..aef3da35565c6 100644 --- a/src/julia.h +++ b/src/julia.h @@ -202,10 +202,8 @@ typedef struct _jl_method_t { // list of potentially-ambiguous methods (nothing = none, Vector{Any} of Methods otherwise) jl_value_t *ambig; - // array of all lambda infos with code generated from this one - jl_array_t *specializations; - // table of all argument types for which we've inferred this code - union jl_typemap_t tfunc; + // table of all argument types for which we've inferred or compiled this code + union jl_typemap_t specializations; // the AST template (or, for isstaged, code for the generator) struct _jl_lambda_info_t *lambda_template; diff --git a/src/typemap.c b/src/typemap.c index 63b030a427cfb..19a004499fd3e 100644 --- a/src/typemap.c +++ b/src/typemap.c @@ -563,7 +563,7 @@ static jl_typemap_entry_t *jl_typemap_assoc_by_type_(jl_typemap_entry_t *ml, jl_ else { // TODO: this is missing the actual subtype test, // which works currently because types is typically a leaf tt, - // or inexact is set (which then does the subtype test) + // or inexact is set (which then does a sort of subtype test via jl_types_equal) // but this isn't entirely general jl_value_t *ti = jl_lookup_match((jl_value_t*)types, (jl_value_t*)ml->sig, penv, ml->tvars); resetenv = 1; @@ -642,24 +642,32 @@ jl_typemap_entry_t *jl_typemap_assoc_by_type(union jl_typemap_t ml_or_cache, jl_ // called object is the primary key for constructors, otherwise first argument jl_value_t *ty = NULL; size_t l = jl_datatype_nfields(types); + int isva = 0; // compute the type at offset `offs` into `types`, which may be a Vararg if (l <= offs + 1) { ty = jl_tparam(types, l - 1); - if (jl_is_vararg_type(ty)) + if (jl_is_vararg_type(ty)) { ty = jl_tparam0(ty); - else if (l <= offs) + isva = 1; + } + else if (l <= offs) { ty = NULL; + } } else if (l > offs) { ty = jl_tparam(types, offs); } // If there is a type at offs, look in the optimized caches - if (ty) { - if (!subtype && jl_is_any(ty)) + if (!subtype) { + if (ty && jl_is_any(ty)) return jl_typemap_assoc_by_type(cache->any, types, penv, subtype_inexact__sigseq_useenv, subtype, offs+1); + if (isva) // in lookup mode, want to match Vararg exactly, not as a subtype + ty = NULL; + } + if (ty) { if (cache->targ != (void*)jl_nothing && jl_is_type_type(ty)) { jl_value_t *a0 = jl_tparam0(ty); - if (jl_is_datatype(a0)) { + if (cache->targ != (void*)jl_nothing && jl_is_datatype(a0)) { union jl_typemap_t ml = mtcache_hash_lookup(cache->targ, a0, 1, offs); if (ml.unknown != jl_nothing) { jl_typemap_entry_t *li = jl_typemap_assoc_by_type(ml, types, penv, @@ -667,6 +675,7 @@ jl_typemap_entry_t *jl_typemap_assoc_by_type(union jl_typemap_t ml_or_cache, jl_ if (li) return li; } } + if (!subtype && is_cache_leaf(a0)) return NULL; } if (cache->arg1 != (void*)jl_nothing && jl_is_datatype(ty)) { union jl_typemap_t ml = mtcache_hash_lookup(cache->arg1, ty, 0, offs); @@ -676,6 +685,7 @@ jl_typemap_entry_t *jl_typemap_assoc_by_type(union jl_typemap_t ml_or_cache, jl_ if (li) return li; } } + if (!subtype && is_cache_leaf(ty)) return NULL; } // Always check the list (since offs doesn't always start at 0) if (subtype) {