diff --git a/base/boot.jl b/base/boot.jl index 149b940d5d352..65dc5137d34cb 100644 --- a/base/boot.jl +++ b/base/boot.jl @@ -803,4 +803,7 @@ Integer(x::Union{Float16, Float32, Float64}) = Int(x) # The internal jl_parse which will call into Core._parse if not `nothing`. _parse = nothing +# support for deprecated uses of internal _apply function +_apply(x...) = Core._apply_iterate(Main.Base.iterate, x...) + ccall(:jl_set_istopmod, Cvoid, (Any, Bool), Core, true) diff --git a/base/compiler/abstractinterpretation.jl b/base/compiler/abstractinterpretation.jl index 4ef63f3cbaae1..676ed332fb0e0 100644 --- a/base/compiler/abstractinterpretation.jl +++ b/base/compiler/abstractinterpretation.jl @@ -577,13 +577,7 @@ end # simulate iteration protocol on container type up to fixpoint function abstract_iteration(interp::AbstractInterpreter, @nospecialize(itft), @nospecialize(itertype), sv::InferenceState) - if !isdefined(Main, :Base) || !isdefined(Main.Base, :iterate) || !isconst(Main.Base, :iterate) - return Any[Vararg{Any}], nothing - end - if itft === nothing - iteratef = getfield(Main.Base, :iterate) - itft = Const(iteratef) - elseif isa(itft, Const) + if isa(itft, Const) iteratef = itft.val else return Any[Vararg{Any}], nothing @@ -595,6 +589,7 @@ function abstract_iteration(interp::AbstractInterpreter, @nospecialize(itft), @n # Return Bottom if this is not an iterator. # WARNING: Changes to the iteration protocol must be reflected here, # this is not just an optimization. + # TODO: this doesn't realize that Array, SimpleVector, Tuple, and NamedTuple do not use the iterate protocol stateordonet === Bottom && return Any[Bottom], AbstractIterationInfo(CallMeta[CallMeta(Bottom, info)]) valtype = statetype = Bottom ret = Any[] @@ -658,7 +653,7 @@ function abstract_apply(interp::AbstractInterpreter, @nospecialize(itft), @nospe aftw = widenconst(aft) if !isa(aft, Const) && (!isType(aftw) || has_free_typevars(aftw)) if !isconcretetype(aftw) || (aftw <: Builtin) - add_remark!(interp, sv, "Core._apply called on a function of a non-concrete type") + add_remark!(interp, sv, "Core._apply_iterate called on a function of a non-concrete type") # bail now, since it seems unlikely that abstract_call will be able to do any better after splitting # this also ensures we don't call abstract_call_gf_by_type below on an IntrinsicFunction or Builtin return CallMeta(Any, false) @@ -805,7 +800,8 @@ function abstract_call_builtin(interp::AbstractInterpreter, f::Builtin, fargs::U end rt = builtin_tfunction(interp, f, argtypes[2:end], sv) if f === getfield && isa(fargs, Vector{Any}) && la == 3 && isa(argtypes[3], Const) && isa(argtypes[3].val, Int) && argtypes[2] ⊑ Tuple - cti, _ = precise_container_type(interp, nothing, argtypes[2], sv) + # TODO: why doesn't this use the getfield_tfunc? + cti, _ = precise_container_type(interp, iterate, argtypes[2], sv) idx = argtypes[3].val if 1 <= idx <= length(cti) rt = unwrapva(cti[idx]) @@ -923,11 +919,7 @@ function abstract_call_known(interp::AbstractInterpreter, @nospecialize(f), la = length(argtypes) if isa(f, Builtin) - if f === _apply - ft = argtype_by_index(argtypes, 2) - ft === Bottom && return CallMeta(Bottom, false) - return abstract_apply(interp, nothing, ft, argtype_tail(argtypes, 3), sv, max_methods) - elseif f === _apply_iterate + if f === _apply_iterate itft = argtype_by_index(argtypes, 2) ft = argtype_by_index(argtypes, 3) (itft === Bottom || ft === Bottom) && return CallMeta(Bottom, false) diff --git a/base/compiler/compiler.jl b/base/compiler/compiler.jl index 37e281f2f2724..0b1d03f61b704 100644 --- a/base/compiler/compiler.jl +++ b/base/compiler/compiler.jl @@ -1,11 +1,11 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -getfield(getfield(Main, :Core), :eval)(getfield(Main, :Core), :(baremodule Compiler +getfield(Core, :eval)(Core, :(baremodule Compiler using Core.Intrinsics, Core.IR import Core: print, println, show, write, unsafe_write, stdout, stderr, - _apply, _apply_iterate, svec, apply_type, Builtin, IntrinsicFunction, + _apply_iterate, svec, apply_type, Builtin, IntrinsicFunction, MethodInstance, CodeInstance, MethodMatch const getproperty = Core.getfield diff --git a/base/compiler/optimize.jl b/base/compiler/optimize.jl index 9d9bc45dc1e9f..f66d797b866db 100644 --- a/base/compiler/optimize.jl +++ b/base/compiler/optimize.jl @@ -326,19 +326,19 @@ function statement_cost(ex::Expr, line::Int, src::CodeInfo, sptypes::Vector{Any} # The efficiency of operations like a[i] and s.b # depend strongly on whether the result can be # inferred, so check the type of ex - if f === Main.Core.getfield || f === Main.Core.tuple + if f === Core.getfield || f === Core.tuple # we might like to penalize non-inferrability, but # tuple iteration/destructuring makes that impossible # return plus_saturate(argcost, isknowntype(extyp) ? 1 : params.inline_nonleaf_penalty) return 0 - elseif f === Main.Core.isa + elseif f === Core.isa # If we're in a union context, we penalize type computations # on union types. In such cases, it is usually better to perform # union splitting on the outside. if union_penalties && isa(argextype(ex.args[2], src, sptypes, slottypes), Union) return params.inline_nonleaf_penalty end - elseif (f === Main.Core.arrayref || f === Main.Core.const_arrayref) && length(ex.args) >= 3 + elseif (f === Core.arrayref || f === Core.const_arrayref) && length(ex.args) >= 3 atyp = argextype(ex.args[3], src, sptypes, slottypes) return isknowntype(atyp) ? 4 : error_path ? params.inline_error_path_cost : params.inline_nonleaf_penalty end diff --git a/base/compiler/ssair/inlining.jl b/base/compiler/ssair/inlining.jl index 22b905096d85e..043272fed7f1c 100644 --- a/base/compiler/ssair/inlining.jl +++ b/base/compiler/ssair/inlining.jl @@ -573,7 +573,7 @@ function batch_inline!(todo::Vector{Pair{Int, Any}}, ir::IRCode, linetable::Vect return ir end -# This assumes the caller has verified that all arguments to the _apply call are Tuples. +# This assumes the caller has verified that all arguments to the _apply_iterate call are Tuples. function rewrite_apply_exprargs!(ir::IRCode, todo::Vector{Pair{Int, Any}}, idx::Int, argexprs::Vector{Any}, atypes::Vector{Any}, arginfos::Vector{Any}, arg_start::Int, et::Union{EdgeTracker, Nothing}, caches::Union{InferenceCaches, Nothing}, @@ -909,7 +909,7 @@ end function inline_apply!(ir::IRCode, todo::Vector{Pair{Int, Any}}, idx::Int, sig::Signature, et, caches, params::OptimizationParams) stmt = ir.stmts[idx][:inst] - while sig.f === Core._apply || sig.f === Core._apply_iterate + while sig.f === Core._apply_iterate info = ir.stmts[idx][:info] if isa(info, UnionSplitApplyCallInfo) if length(info.infos) != 1 @@ -923,7 +923,7 @@ function inline_apply!(ir::IRCode, todo::Vector{Pair{Int, Any}}, idx::Int, sig:: @assert info === nothing || info === false new_info = info = nothing end - arg_start = sig.f === Core._apply ? 2 : 3 + arg_start = 3 atypes = sig.atypes if arg_start > length(atypes) return nothing @@ -1010,7 +1010,7 @@ function process_simple!(ir::IRCode, todo::Vector{Pair{Int, Any}}, idx::Int, sta sig = call_sig(ir, stmt) sig === nothing && return nothing - # Handle _apply + # Handle _apply_iterate sig = inline_apply!(ir, todo, idx, sig, state.et, state.caches, state.params) sig === nothing && return nothing diff --git a/base/compiler/types.jl b/base/compiler/types.jl index 1a1cbb0890e65..24e8c24379886 100644 --- a/base/compiler/types.jl +++ b/base/compiler/types.jl @@ -102,14 +102,14 @@ struct InferenceParams # before computing the set of matching methods MAX_UNION_SPLITTING::Int # the maximum number of union-tuples to swap / expand - # when inferring a call to _apply + # when inferring a call to _apply_iterate MAX_APPLY_UNION_ENUM::Int # parameters limiting large (tuple) types TUPLE_COMPLEXITY_LIMIT_DEPTH::Int - # when attempting to inlining _apply, abort the optimization if the tuple - # contains more than this many elements + # when attempting to inline _apply_iterate, abort the optimization if the + # tuple contains more than this many elements MAX_TUPLE_SPLAT::Int function InferenceParams(; diff --git a/src/builtin_proto.h b/src/builtin_proto.h index e66af64eb4118..c4d6166a5c194 100644 --- a/src/builtin_proto.h +++ b/src/builtin_proto.h @@ -22,7 +22,7 @@ extern "C" { DECLARE_BUILTIN(throw); DECLARE_BUILTIN(is); DECLARE_BUILTIN(typeof); DECLARE_BUILTIN(sizeof); DECLARE_BUILTIN(issubtype); DECLARE_BUILTIN(isa); -DECLARE_BUILTIN(_apply); DECLARE_BUILTIN(_apply_pure); +DECLARE_BUILTIN(_apply_pure); DECLARE_BUILTIN(_call_latest); DECLARE_BUILTIN(_apply_iterate); DECLARE_BUILTIN(_call_in_world); DECLARE_BUILTIN(isdefined); DECLARE_BUILTIN(nfields); diff --git a/src/builtins.c b/src/builtins.c index 96637080af537..088203acda37e 100644 --- a/src/builtins.c +++ b/src/builtins.c @@ -525,7 +525,7 @@ STATIC_INLINE void _grow_to(jl_value_t **root, jl_value_t ***oldargs, jl_svec_t static jl_function_t *jl_iterate_func JL_GLOBALLY_ROOTED; -static jl_value_t *do_apply(jl_value_t *F, jl_value_t **args, uint32_t nargs, jl_value_t *iterate) +static jl_value_t *do_apply( jl_value_t **args, uint32_t nargs, jl_value_t *iterate) { jl_function_t *f = args[0]; if (nargs == 2) { @@ -567,12 +567,7 @@ static jl_value_t *do_apply(jl_value_t *F, jl_value_t **args, uint32_t nargs, jl } } if (extra && iterate == NULL) { - if (jl_iterate_func == NULL) { - jl_iterate_func = jl_get_function(jl_top_module, "iterate"); - if (jl_iterate_func == NULL) - jl_undefined_var_error(jl_symbol("iterate")); - } - iterate = jl_iterate_func; + jl_undefined_var_error(jl_symbol("iterate")); } // allocate space for the argument array and gc roots for it // based on our previous estimates @@ -696,13 +691,7 @@ static jl_value_t *do_apply(jl_value_t *F, jl_value_t **args, uint32_t nargs, jl JL_CALLABLE(jl_f__apply_iterate) { JL_NARGSV(_apply_iterate, 2); - return do_apply(F, args+1, nargs-1, args[0]); -} - -JL_CALLABLE(jl_f__apply) -{ - JL_NARGSV(_apply, 1); - return do_apply(F, args, nargs, NULL); + return do_apply(args + 1, nargs - 1, args[0]); } // this is like `_apply`, but with quasi-exact checks to make sure it is pure @@ -720,7 +709,7 @@ JL_CALLABLE(jl_f__apply_pure) // and `promote` works better this way size_t last_age = ptls->world_age; ptls->world_age = jl_world_counter; - ret = jl_f__apply(NULL, args, nargs); + ret = do_apply(args, nargs, NULL); ptls->world_age = last_age; ptls->in_pure_callback = last_in; } @@ -1578,7 +1567,6 @@ void jl_init_primitives(void) JL_GC_DISABLED // internal functions jl_builtin_apply_type = add_builtin_func("apply_type", jl_f_apply_type); - jl_builtin__apply = add_builtin_func("_apply", jl_f__apply); jl_builtin__apply_iterate = add_builtin_func("_apply_iterate", jl_f__apply_iterate); jl_builtin__expr = add_builtin_func("_expr", jl_f__expr); jl_builtin_svec = add_builtin_func("svec", jl_f_svec); diff --git a/src/codegen.cpp b/src/codegen.cpp index 83fcf690215f7..40f0999667b09 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -838,7 +838,6 @@ static const std::map builtin_func_map = { { &jl_f_isa, new JuliaFunction{"jl_f_isa", get_func_sig, get_func_attrs} }, { &jl_f_typeassert, new JuliaFunction{"jl_f_typeassert", get_func_sig, get_func_attrs} }, { &jl_f_ifelse, new JuliaFunction{"jl_f_ifelse", get_func_sig, get_func_attrs} }, - { &jl_f__apply, new JuliaFunction{"jl_f__apply", get_func_sig, get_func_attrs} }, { &jl_f__apply_iterate, new JuliaFunction{"jl_f__apply_iterate", get_func_sig, get_func_attrs} }, { &jl_f__apply_pure, new JuliaFunction{"jl_f__apply_pure", get_func_sig, get_func_attrs} }, { &jl_f__call_latest, new JuliaFunction{"jl_f__call_latest", get_func_sig, get_func_attrs} }, @@ -2684,13 +2683,11 @@ static bool emit_builtin_call(jl_codectx_t &ctx, jl_cgval_t *ret, jl_value_t *f, } } - else if (((f == jl_builtin__apply && nargs == 2) || - (f == jl_builtin__apply_iterate && nargs == 3)) && ctx.vaSlot > 0) { - int arg_start = f == jl_builtin__apply ? 2 : 3; - // turn Core._apply(f, Tuple) ==> f(Tuple...) using the jlcall calling convention if Tuple is the va allocation - if (LoadInst *load = dyn_cast_or_null(argv[arg_start].V)) { + else if ((f == jl_builtin__apply_iterate && nargs == 3) && ctx.vaSlot > 0) { + // turn Core._apply_iterate(iter, f, Tuple) ==> f(Tuple...) using the jlcall calling convention if Tuple is the va allocation + if (LoadInst *load = dyn_cast_or_null(argv[3].V)) { if (load->getPointerOperand() == ctx.slots[ctx.vaSlot].boxroot && ctx.argArray) { - Value *theF = boxed(ctx, argv[arg_start-1]); + Value *theF = boxed(ctx, argv[2]); Value *nva = emit_n_varargs(ctx); #ifdef _P64 nva = ctx.builder.CreateTrunc(nva, T_int32); diff --git a/src/module.c b/src/module.c index 3bc1f22ea4cbc..20c119bedc27c 100644 --- a/src/module.c +++ b/src/module.c @@ -623,6 +623,8 @@ JL_DLLEXPORT jl_value_t *jl_get_global(jl_module_t *m, jl_sym_t *var) JL_DLLEXPORT void jl_set_global(jl_module_t *m JL_ROOTING_ARGUMENT, jl_sym_t *var, jl_value_t *val JL_ROOTED_ARGUMENT) { + JL_TYPECHK(jl_set_global, module, (jl_value_t*)m); + JL_TYPECHK(jl_set_global, symbol, (jl_value_t*)var); jl_binding_t *bp = jl_get_binding_wr(m, var, 1); JL_GC_PROMISE_ROOTED(bp); jl_checked_assignment(bp, val); diff --git a/src/staticdata.c b/src/staticdata.c index 1b827bd12af17..9b599c0ff1a7f 100644 --- a/src/staticdata.c +++ b/src/staticdata.c @@ -30,7 +30,7 @@ extern "C" { // TODO: put WeakRefs on the weak_refs list during deserialization // TODO: handle finalizers -#define NUM_TAGS 145 +#define NUM_TAGS 144 // An array of references that need to be restored from the sysimg // This is a manually constructed dual of the gvars array, which would be produced by codegen for Julia code, for C. @@ -175,7 +175,6 @@ jl_value_t **const*const get_tags(void) { INSERT_TAG(jl_builtin_issubtype); INSERT_TAG(jl_builtin_isa); INSERT_TAG(jl_builtin_typeassert); - INSERT_TAG(jl_builtin__apply); INSERT_TAG(jl_builtin__apply_iterate); INSERT_TAG(jl_builtin_isdefined); INSERT_TAG(jl_builtin_nfields); @@ -235,7 +234,7 @@ void *native_functions; // This is a manually constructed dual of the fvars array, which would be produced by codegen for Julia code, for C. static const jl_fptr_args_t id_to_fptrs[] = { &jl_f_throw, &jl_f_is, &jl_f_typeof, &jl_f_issubtype, &jl_f_isa, - &jl_f_typeassert, &jl_f__apply, &jl_f__apply_iterate, &jl_f__apply_pure, + &jl_f_typeassert, &jl_f__apply_iterate, &jl_f__apply_pure, &jl_f__call_latest, &jl_f__call_in_world, &jl_f_isdefined, &jl_f_tuple, &jl_f_svec, &jl_f_intrinsic_call, &jl_f_invoke_kwsorter, &jl_f_getfield, &jl_f_setfield, &jl_f_fieldtype, &jl_f_nfields, diff --git a/src/toplevel.c b/src/toplevel.c index 5ddf139e5f086..56523f2d09b54 100644 --- a/src/toplevel.c +++ b/src/toplevel.c @@ -45,15 +45,13 @@ JL_DLLEXPORT void jl_add_standard_imports(jl_module_t *m) // create a new top-level module void jl_init_main_module(void) { - if (jl_main_module != NULL) - jl_error("Main module already initialized."); - + assert(jl_main_module == NULL); jl_main_module = jl_new_module(jl_symbol("Main")); jl_main_module->parent = jl_main_module; jl_set_const(jl_main_module, jl_symbol("Core"), (jl_value_t*)jl_core_module); - jl_set_global(jl_core_module, jl_symbol("Main"), - (jl_value_t*)jl_main_module); + jl_set_const(jl_core_module, jl_symbol("Main"), + (jl_value_t*)jl_main_module); } static jl_function_t *jl_module_get_initializer(jl_module_t *m JL_PROPAGATES_ROOT) diff --git a/stdlib/InteractiveUtils/src/macros.jl b/stdlib/InteractiveUtils/src/macros.jl index dd637c8169b87..3805e2e97832b 100644 --- a/stdlib/InteractiveUtils/src/macros.jl +++ b/stdlib/InteractiveUtils/src/macros.jl @@ -153,12 +153,12 @@ function gen_call_with_extracted_types(__module__, fcn, ex0, kws=Expr[]) exret = Expr(:none) if ex.head === :call if any(e->(isa(e, Expr) && e.head === :(...)), ex0.args) && - (ex.args[1] === GlobalRef(Core,:_apply) || - ex.args[1] === GlobalRef(Base,:_apply)) + (ex.args[1] === GlobalRef(Core,:_apply_iterate) || + ex.args[1] === GlobalRef(Base,:_apply_iterate)) # check for splatting - exret = Expr(:call, ex.args[1], fcn, - Expr(:tuple, esc(ex.args[2]), - Expr(:call, typesof, map(esc, ex.args[3:end])...))) + exret = Expr(:call, ex.args[2], fcn, + Expr(:tuple, esc(ex.args[3]), + Expr(:call, typesof, map(esc, ex.args[4:end])...))) else exret = Expr(:call, fcn, esc(ex.args[1]), Expr(:call, typesof, map(esc, ex.args[2:end])...), kws...) diff --git a/test/compiler/inference.jl b/test/compiler/inference.jl index 304a704856ebb..df9d511c90b07 100644 --- a/test/compiler/inference.jl +++ b/test/compiler/inference.jl @@ -855,7 +855,7 @@ end aa20704(x) = x(nothing) @test code_typed(aa20704, (typeof(a20704),))[1][1].pure -#issue #21065, elision of _apply when splatted expression is not effect_free +#issue #21065, elision of _apply_iterate when splatted expression is not effect_free function f21065(x,y) println("x=$x, y=$y") return x, y @@ -865,7 +865,7 @@ function test_no_apply(expr::Expr) return all(test_no_apply, expr.args) end function test_no_apply(ref::GlobalRef) - return ref.mod != Core || ref.name !== :_apply + return ref.mod != Core || ref.name !== :_apply_iterate end test_no_apply(::Any) = true @test all(test_no_apply, code_typed(g21065, Tuple{Int,Int})[1].first.code) @@ -2041,6 +2041,7 @@ T27078 = Vector{Vector{T}} where T # issue #28070 g28070(f, args...) = f(args...) @test @inferred g28070(Core._apply, Base.:/, (1.0, 1.0)) == 1.0 +@test @inferred g28070(Core._apply_iterate, Base.iterate, Base.:/, (1.0, 1.0)) == 1.0 # issue #28079 struct Foo28079 end @@ -2298,9 +2299,9 @@ end @test @inferred(g28955((1,), 1.0)) === Bool -# Test that inlining can look through repeated _applys +# Test that inlining can look through repeated _apply_iterates foo_inlining_apply(args...) = ccall(:jl_, Nothing, (Any,), args[1]) -bar_inlining_apply() = Core._apply(Core._apply, (foo_inlining_apply,), ((1,),)) +bar_inlining_apply() = Core._apply_iterate(iterate, Core._apply_iterate, (iterate,), (foo_inlining_apply,), ((1,),)) let ci = code_typed(bar_inlining_apply, Tuple{})[1].first @test length(ci.code) == 2 @test ci.code[1].head == :foreigncall diff --git a/test/compiler/inline.jl b/test/compiler/inline.jl index 2c1992ccd2cd8..8f7c6b831e185 100644 --- a/test/compiler/inline.jl +++ b/test/compiler/inline.jl @@ -175,7 +175,7 @@ end # 2 for now because the compiler leaves a GotoNode around @test_broken length(code_typed(f_ifelse, (String,))[1][1].code) <= 2 -# Test that inlining of _apply properly hits the inference cache +# Test that inlining of _apply_iterate properly hits the inference cache @noinline cprop_inline_foo1() = (1, 1) @noinline cprop_inline_foo2() = (2, 2) function cprop_inline_bar(x...)