Skip to content

Commit

Permalink
Merge pull request #17638 from JuliaLang/jb/cleanup
Browse files Browse the repository at this point in the history
formatting fixes and code cleanup
  • Loading branch information
JeffBezanson authored Jul 27, 2016
2 parents d0a378d + 276c52e commit bcc2121
Show file tree
Hide file tree
Showing 24 changed files with 138 additions and 425 deletions.
173 changes: 15 additions & 158 deletions base/inference.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2488,41 +2488,9 @@ function inlineable(f::ANY, ft::ANY, e::Expr, atypes::Vector{Any}, sv::Inference
end

methsig = method.sig
incompletematch = false
if !(atype <: metharg)
incompletematch = true
if !inline_incompletematch_allowed || !isdefined(Main,:Base)
# provide global disable if this optimization is not desirable
# need Main.Base defined for MethodError
return invoke_NF()
end
end

## This code tries to limit the argument list length only when it is
## growing due to recursion.
## It might be helpful for some things, but turns out not to be
## necessary to get max performance from recursive varargs functions.
# if length(atypes) > MAX_TUPLETYPE_LEN
# # check call stack to see if this argument list is growing
# st = inference_stack
# while !isa(st, EmptyCallStack)
# if st.code === linfo.def.code && length(atypes) > length(st.types)
# atypes = limit_tuple_type(atypes)
# meth = _methods(f, atypes, 1)
# if meth === false || length(meth) != 1
# return NF
# end
# meth = meth[1]::Tuple
# linfo2 = meth[3].func.code
# if linfo2 !== linfo
# return NF
# end
# linfo = linfo2
# break
# end
# st = st.prev
# end
# end
return invoke_NF()
end

(linfo, ty, inferred) = typeinf(method, metharg, methsp, false)
if linfo === nothing || !inferred
Expand All @@ -2532,31 +2500,6 @@ function inlineable(f::ANY, ft::ANY, e::Expr, atypes::Vector{Any}, sv::Inference
# in this case function can be inlined to a constant
return inline_as_constant(linfo.constval, argexprs, sv)
elseif linfo !== nothing && !linfo.inlineable
# TODO
#=
if incompletematch
# inline a typeassert-based call-site, rather than a
# full generic lookup, using the inliner to handle
# all the fiddly details
numarg = length(argexprs)
newnames = unique_names(ast,numarg)
spnames = []
spvals = []
locals = []
newcall = Expr(:call, e.args[1])
newcall.typ = ty
for i = 1:numarg
name = newnames[i]
argtype = exprtype(argexprs[i],sv)
push!(locals, Any[name,argtype,0])
push!(newcall.args, argtype===Any ? name : SymbolNode(name, argtype))
end
body.args = Any[Expr(:return, newcall)]
ast = Expr(:lambda, newnames, Any[[], locals, [], 0], body)
else
return invoke_NF()
end
=#
return invoke_NF()
elseif linfo === nothing || linfo.code === nothing
(linfo, ty, inferred) = typeinf(method, metharg, methsp, true)
Expand Down Expand Up @@ -2618,101 +2561,27 @@ function inlineable(f::ANY, ft::ANY, e::Expr, atypes::Vector{Any}, sv::Inference
prelude_stmts = Any[]
stmts_free = true # true = all entries of stmts are effect_free

# when 1 method matches the inferred types, there is still a chance
# of a no-method error at run time, unless the inferred types are a
# subset of the method signature.
if incompletematch
t = Expr(:call) # tuple(args...)
t.typ = Tuple
argexprs2 = t.args
icall = LabelNode(label_counter(body.args)+1)
partmatch = Expr(:gotoifnot, false, icall.label)
thrw = Expr(:call, :throw, Expr(:call, GlobalRef(Main.Base,:MethodError), Expr(:call, top_tuple, e.args[1], QuoteNode(:inline)), t))
thrw.typ = Bottom
end

for i=na:-1:1 # stmts_free needs to be calculated in reverse-argument order
#args_i = args[i]
aei = argexprs[i]
aeitype = argtype = widenconst(exprtype(aei,sv))
needtypeassert = false
if incompletematch
if isva
if nm == 0
methitype = Tuple{}
elseif i > nm
methitype = methargs[end]
if isvarargtype(methitype)
methitype = Tuple{methitype}
else
methitype = Tuple{}
end
else
methitype = tupletype_tail(metharg,i)
end
isva = false
else
if i < nm
methitype = methargs[i]
else
methitype = methargs[end]
if isvarargtype(methitype)
methitype = methitype.parameters[1]
else
@assert i==nm
end
end
end
if isa(methitype, TypeVar)
methitype = methitype.ub
end
if !(aeitype <: methitype)
#TODO: make Undef a faster special-case?
needtypeassert = true
aeitype = methitype
end
end

# ok for argument to occur more than once if the actual argument
# is a symbol or constant, or is not affected by previous statements
# that will exist after the inlining pass finishes
if needtypeassert
vnew1 = unique_name(enclosing_ast, ast)
add_variable(enclosing_ast, vnew1, aeitype, true)
v1 = (aeitype===Any ? vnew1 : SymbolNode(vnew1,aeitype))
push!(spvals, v1)
vnew2 = unique_name(enclosing_ast, ast)
v2 = (argtype===Any ? vnew2 : SymbolNode(vnew2,argtype))
unshift!(body.args, Expr(:(=), args_i, v2))
args[i] = args_i = vnew2
aeitype = argtype
affect_free = stmts_free
occ = 3
# it's really late in codegen, so we expand the typeassert manually: cond = !isa(vnew2, methitype) | cond
cond = Expr(:call, Intrinsics.isa, v2, methitype)
cond.typ = Bool
cond = Expr(:call, Intrinsics.not_int, cond)
cond.typ = Bool
cond = Expr(:call, Intrinsics.or_int, cond, partmatch.args[1])
cond.typ = Bool
cond = Expr(:call, Intrinsics.box, Bool, cond)
cond.typ = Bool
partmatch.args[1] = cond
else
affect_free = stmts_free # false = previous statements might affect the result of evaluating argument
occ = 0
for j = length(body.args):-1:1
b = body.args[j]
if occ < 6
occ += occurs_more(b, x->(isa(x,Slot)&&x.id==i), 6)
end
# TODO: passing `sv` here is wrong since it refers to the enclosing function
if occ > 0 && affect_free && !effect_free(b, sv, true) #TODO: we could short-circuit this test better by memoizing effect_free(b) in the for loop over i
affect_free = false
end
if occ > 5 && !affect_free
break
end
affect_free = stmts_free # false = previous statements might affect the result of evaluating argument
occ = 0
for j = length(body.args):-1:1
b = body.args[j]
if occ < 6
occ += occurs_more(b, x->(isa(x,Slot)&&x.id==i), 6)
end
# TODO: passing `sv` here is wrong since it refers to the enclosing function
if occ > 0 && affect_free && !effect_free(b, sv, true) #TODO: we could short-circuit this test better by memoizing effect_free(b) in the for loop over i
affect_free = false
end
if occ > 5 && !affect_free
break
end
end
free = effect_free(aei,sv,true)
Expand All @@ -2728,15 +2597,6 @@ function inlineable(f::ANY, ft::ANY, e::Expr, atypes::Vector{Any}, sv::Inference
stmts_free = false
end
end
if incompletematch
unshift!(argexprs2, (argtype===Any ? args_i : SymbolNode(a,argtype)))
end
end
if incompletematch && partmatch.args[1] != false
unshift!(body.args, icall)
unshift!(body.args, thrw)
unshift!(body.args, partmatch)
unshift!(argexprs2, top_tuple)
end

# re-number the SSAValues and copy their type-info to the new ast
Expand Down Expand Up @@ -2873,9 +2733,6 @@ function inlineable(f::ANY, ft::ANY, e::Expr, atypes::Vector{Any}, sv::Inference
end
return (expr, stmts)
end
# The inlining incomplete matches optimization currently
# doesn't work on Tuples of TypeVars
const inline_incompletematch_allowed = false

inline_worthy(body::ANY, cost::Integer) = true

Expand Down
22 changes: 10 additions & 12 deletions src/alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -72,9 +72,8 @@ jl_value_t *jl_readonlymemory_exception;
union jl_typemap_t jl_cfunction_list;

jl_sym_t *call_sym; jl_sym_t *invoke_sym;
jl_sym_t *dots_sym;
jl_sym_t *dots_sym; jl_sym_t *empty_sym;
jl_sym_t *module_sym; jl_sym_t *slot_sym;
jl_sym_t *empty_sym;
jl_sym_t *export_sym; jl_sym_t *import_sym;
jl_sym_t *importall_sym; jl_sym_t *toplevel_sym;
jl_sym_t *quote_sym; jl_sym_t *amp_sym;
Expand All @@ -84,11 +83,10 @@ jl_sym_t *line_sym; jl_sym_t *jl_incomplete_sym;
jl_sym_t *goto_sym; jl_sym_t *goto_ifnot_sym;
jl_sym_t *label_sym; jl_sym_t *return_sym;
jl_sym_t *lambda_sym; jl_sym_t *assign_sym;
jl_sym_t *body_sym;
jl_sym_t *body_sym; jl_sym_t *globalref_sym;
jl_sym_t *method_sym; jl_sym_t *core_sym;
jl_sym_t *enter_sym; jl_sym_t *leave_sym;
jl_sym_t *exc_sym; jl_sym_t *error_sym;
jl_sym_t *globalref_sym;
jl_sym_t *new_sym; jl_sym_t *using_sym;
jl_sym_t *const_sym; jl_sym_t *thunk_sym;
jl_sym_t *anonymous_sym; jl_sym_t *underscore_sym;
Expand Down Expand Up @@ -851,11 +849,10 @@ jl_datatype_t *jl_new_uninitialized_datatype(void)
return t;
}

static jl_datatype_layout_t *jl_get_layout(
uint32_t nfields,
uint32_t alignment,
int haspadding,
jl_fielddesc32_t desc[])
static jl_datatype_layout_t *jl_get_layout(uint32_t nfields,
uint32_t alignment,
int haspadding,
jl_fielddesc32_t desc[])
{
// compute the smallest fielddesc type that can hold the layout description
int fielddesc_type = 0;
Expand All @@ -882,8 +879,8 @@ static jl_datatype_layout_t *jl_get_layout(

// allocate a new descriptor
uint32_t fielddesc_size = jl_fielddesc_size(fielddesc_type);
jl_datatype_layout_t *flddesc = (jl_datatype_layout_t*)jl_gc_perm_alloc(
sizeof(jl_datatype_layout_t) + nfields * fielddesc_size);
jl_datatype_layout_t *flddesc =
(jl_datatype_layout_t*)jl_gc_perm_alloc(sizeof(jl_datatype_layout_t) + nfields * fielddesc_size);
flddesc->nfields = nfields;
flddesc->alignment = alignment;
flddesc->haspadding = haspadding;
Expand Down Expand Up @@ -923,7 +920,8 @@ static jl_datatype_layout_t *jl_get_layout(
// A non-zero result *must* match the LLVM rules for a vector type <nfields x t>.
// For sake of Ahead-Of-Time (AOT) compilation, this routine has to work
// without LLVM being available.
unsigned jl_special_vector_alignment(size_t nfields, jl_value_t *t) {
unsigned jl_special_vector_alignment(size_t nfields, jl_value_t *t)
{
if (!jl_is_vecelement_type(t))
return 0;
// LLVM 3.7 and 3.8 either crash or generate wrong code for many
Expand Down
2 changes: 1 addition & 1 deletion src/builtins.c
Original file line number Diff line number Diff line change
Expand Up @@ -768,7 +768,7 @@ JL_DLLEXPORT void *(jl_symbol_name)(jl_sym_t *s)
return jl_symbol_name(s);
}

//WARNING: THIS FUNCTION IS NEVER CALLED BUT INLINE BY CCALL
// WARNING: THIS FUNCTION IS NEVER CALLED BUT INLINE BY CCALL
JL_DLLEXPORT void *jl_array_ptr(jl_array_t *a)
{
return a->data;
Expand Down
12 changes: 6 additions & 6 deletions src/ccall.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1270,21 +1270,21 @@ static jl_cgval_t emit_ccall(jl_value_t **args, size_t nargs, jl_codectx_t *ctx)
if (jl_is_expr(args[2])) {
jl_expr_t *rtexpr = (jl_expr_t*)args[2];
if (rtexpr->head == call_sym && jl_expr_nargs(rtexpr) == 4 &&
static_eval(jl_exprarg(rtexpr, 0), ctx, true, false) == jl_builtin_apply_type &&
static_eval(jl_exprarg(rtexpr, 1), ctx, true, false) == (jl_value_t*)jl_array_type) {
static_eval(jl_exprarg(rtexpr, 0), ctx, true, false) == jl_builtin_apply_type &&
static_eval(jl_exprarg(rtexpr, 1), ctx, true, false) == (jl_value_t*)jl_array_type) {
// `Array` used as return type just returns a julia object reference
rt = (jl_value_t*)jl_any_type;
static_rt = true;
}
else if (rtexpr->head == call_sym && jl_expr_nargs(rtexpr) == 3 &&
static_eval(jl_exprarg(rtexpr, 0), ctx, true, false) == jl_builtin_apply_type &&
static_eval(jl_exprarg(rtexpr, 1), ctx, true, false) == (jl_value_t*)jl_pointer_type) {
static_eval(jl_exprarg(rtexpr, 0), ctx, true, false) == jl_builtin_apply_type &&
static_eval(jl_exprarg(rtexpr, 1), ctx, true, false) == (jl_value_t*)jl_pointer_type) {
// substitute Ptr{Void} for statically-unknown pointer type
rt = (jl_value_t*)jl_voidpointer_type;
}
else if (rtexpr->head == call_sym && jl_expr_nargs(rtexpr) == 3 &&
static_eval(jl_exprarg(rtexpr, 0), ctx, true, false) == jl_builtin_apply_type &&
static_eval(jl_exprarg(rtexpr, 1), ctx, true, false) == (jl_value_t*)jl_ref_type) {
static_eval(jl_exprarg(rtexpr, 0), ctx, true, false) == jl_builtin_apply_type &&
static_eval(jl_exprarg(rtexpr, 1), ctx, true, false) == (jl_value_t*)jl_ref_type) {
// `Ref{T}` used as return type just returns T (from a jl_value_t*)
rt = (jl_value_t*)jl_any_type;
static_rt = true;
Expand Down
2 changes: 1 addition & 1 deletion src/cgmemmgr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,7 @@ static void write_self_mem(void *dest, void *ptr, size_t size)
return;
if (ret == -1 && (errno == EAGAIN || errno == EINTR))
continue;
assert(ret < size);
assert((size_t)ret < size);
size -= ret;
ptr = (char*)ptr + ret;
dest = (char*)dest + ret;
Expand Down
15 changes: 9 additions & 6 deletions src/cgutils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -86,9 +86,9 @@ static DIType julia_type_to_di(jl_value_t *jt, DIBuilder *dbuilder, bool isboxed
return jl_pvalue_dillvmt;
// always return the boxed representation for types with hidden content
if (jl_is_abstracttype(jt) || !jl_is_datatype(jt) || jl_is_array_type(jt) ||
jt == (jl_value_t*)jl_sym_type || jt == (jl_value_t*)jl_module_type ||
jt == (jl_value_t*)jl_simplevector_type || jt == (jl_value_t*)jl_datatype_type ||
jt == (jl_value_t*)jl_lambda_info_type)
jt == (jl_value_t*)jl_sym_type || jt == (jl_value_t*)jl_module_type ||
jt == (jl_value_t*)jl_simplevector_type || jt == (jl_value_t*)jl_datatype_type ||
jt == (jl_value_t*)jl_lambda_info_type)
return jl_pvalue_dillvmt;
if (jl_is_typector(jt) || jl_is_typevar(jt))
return jl_pvalue_dillvmt;
Expand Down Expand Up @@ -266,7 +266,8 @@ static Value *emit_bitcast(Value *v, Type *jl_value)
PointerType::get(cast<PointerType>(jl_value)->getElementType(),
v->getType()->getPointerAddressSpace());
return builder.CreateBitCast(v, jl_value_addr);
} else {
}
else {
return builder.CreateBitCast(v, jl_value);
}
}
Expand Down Expand Up @@ -804,7 +805,8 @@ static Value *emit_bounds_check(const jl_cgval_t &ainfo, jl_value_t *ty, Value *
//
// Parameter ptr should be the pointer argument for the LoadInst or StoreInst.
// It is currently unused, but might be used in the future for a more precise answer.
static unsigned julia_alignment(Value* /*ptr*/, jl_value_t *jltype, unsigned alignment) {
static unsigned julia_alignment(Value* /*ptr*/, jl_value_t *jltype, unsigned alignment)
{
if (!alignment && ((jl_datatype_t*)jltype)->layout->alignment > MAX_ALIGN) {
// Type's natural alignment exceeds strictest alignment promised in heap, so return the heap alignment.
return MAX_ALIGN;
Expand Down Expand Up @@ -1089,7 +1091,8 @@ static jl_cgval_t emit_getfield_knownidx(const jl_cgval_t &strct, unsigned idx,
else {
if (strct.V->getType()->isVectorTy()) {
fldv = builder.CreateExtractElement(strct.V, ConstantInt::get(T_int32, idx));
} else {
}
else {
// VecElement types are unwrapped in LLVM.
assert( strct.V->getType()->isSingleValueType() );
fldv = strct.V;
Expand Down
Loading

0 comments on commit bcc2121

Please sign in to comment.