Skip to content

Commit

Permalink
Change formatting style
Browse files Browse the repository at this point in the history
  • Loading branch information
avik-pal committed May 24, 2022
1 parent 06d1105 commit e66e37f
Show file tree
Hide file tree
Showing 26 changed files with 332 additions and 329 deletions.
2 changes: 1 addition & 1 deletion .JuliaFormatter.toml
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
style = "sciml"
whitespace_in_kwargs = true
whitespace_in_kwargs = false
5 changes: 4 additions & 1 deletion .github/workflows/FormatCheck.yml
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,11 @@ jobs:
# This will use the latest version by default but you can set the version like so:
#
# julia -e 'using Pkg; Pkg.add(PackageSpec(name="JuliaFormatter", version="0.13.0"))'
#
# FIXME: Before merging change to default release
# julia -e 'using Pkg; Pkg.add(PackageSpec(name="JuliaFormatter"))'
run: |
julia -e 'using Pkg; Pkg.add(PackageSpec(name="JuliaFormatter"))'
julia -e 'using Pkg; Pkg.add(PackageSpec(url="https://github.com/YingboMa/JuliaFormatter.jl.git", rev="myb/scimlstyle"))'
julia -e 'using JuliaFormatter; format(".", verbose=true)'
- name: Format check
run: |
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/FormatPR.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ jobs:
- uses: actions/checkout@v2
- name: Install JuliaFormatter and format
run: |
julia -e 'import Pkg; Pkg.add("JuliaFormatter")'
julia -e 'using Pkg; Pkg.add(PackageSpec(url="https://github.com/YingboMa/JuliaFormatter.jl.git", rev="myb/scimlstyle"))'
julia -e 'using JuliaFormatter; format(".")'
# https://github.com/marketplace/actions/create-pull-request
# https://github.com/peter-evans/create-pull-request#reference-example
Expand Down
42 changes: 21 additions & 21 deletions docs/make.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ using Documenter, Lux, Literate, Pkg

# Precompile example dependencies
Pkg.activate(joinpath(@__DIR__, "..", "examples"))
Pkg.develop(PackageSpec(; path = joinpath(@__DIR__, "..")))
Pkg.develop(PackageSpec(; path=joinpath(@__DIR__, "..")))
Pkg.instantiate()
Pkg.precompile()
Pkg.activate(@__DIR__)
Expand All @@ -13,8 +13,8 @@ if haskey(ENV, "GITHUB_ACTIONS")
end

deployconfig = Documenter.auto_detect_deploy_system()
Documenter.post_status(deployconfig; type = "pending",
repo = "github.com/avik-pal/Lux.jl.git")
Documenter.post_status(deployconfig; type="pending",
repo="github.com/avik-pal/Lux.jl.git")

# Tutorials
get_example_path(p) = joinpath(@__DIR__, "..", "examples", p)
Expand All @@ -28,13 +28,13 @@ ADVANCED_TUTORIALS = []
ADVANCED_TUTORIAL_NAMES = []
MAPPING = Dict("beginner" => [], "intermediate" => [], "advanced" => [])

for (d, names, paths) in
(("beginner", BEGINNER_TUTORIAL_NAMES, BEGINNER_TUTORIALS),
("intermediate", INTERMEDIATE_TUTORIAL_NAMES, INTERMEDIATE_TUTORIALS),
("advanced", ADVANCED_TUTORIAL_NAMES, ADVANCED_TUTORIALS))
for (d, names, paths) in (("beginner", BEGINNER_TUTORIAL_NAMES, BEGINNER_TUTORIALS),
("intermediate", INTERMEDIATE_TUTORIAL_NAMES,
INTERMEDIATE_TUTORIALS),
("advanced", ADVANCED_TUTORIAL_NAMES, ADVANCED_TUTORIALS))
for (n, p) in zip(names, paths)
Literate.markdown(get_example_path(p), joinpath(OUTPUT, d, dirname(p));
documenter = true)
documenter=true)
push!(MAPPING[d],
n => joinpath("examples/generated", d, dirname(p),
splitext(basename(p))[1] * ".md"))
Expand All @@ -44,17 +44,17 @@ end
display(MAPPING)

makedocs(;
sitename = "Lux",
authors = "Avik Pal et al.",
clean = true,
doctest = false,
modules = [Lux],
format = Documenter.HTML(;
prettyurls = get(ENV, "CI", nothing) == "true",
assets = ["assets/custom.css"]
# analytics = "G-Q8GYTEVTZ2"
),
pages = [
sitename="Lux",
authors="Avik Pal et al.",
clean=true,
doctest=false,
modules=[Lux],
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", nothing) == "true",
assets=["assets/custom.css"]
# analytics = "G-Q8GYTEVTZ2"
),
pages=[
"Lux: Explicitly Parameterized Neural Networks" => "index.md",
"Introduction" => [
"All about Lux" => "introduction/overview.md",
Expand All @@ -78,7 +78,7 @@ makedocs(;
],
])

deploydocs(; repo = "github.com/avik-pal/Lux.jl.git", push_preview = true,
devbranch = "main")
deploydocs(; repo="github.com/avik-pal/Lux.jl.git", push_preview=true,
devbranch="main")

Pkg.activate(@__DIR__)
14 changes: 7 additions & 7 deletions examples/BayesianNN/main.jl
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,8 @@ function plot_data()
x2 = first.(xt0s)
y2 = last.(xt0s)

plt = Plots.scatter(x1, y1; color = "red", clim = (0, 1))
Plots.scatter!(plt, x2, y2; color = "blue", clim = (0, 1))
plt = Plots.scatter(x1, y1; color="red", clim=(0, 1))
Plots.scatter!(plt, x2, y2; color="blue", clim=(0, 1))

return plt
end
Expand Down Expand Up @@ -135,8 +135,8 @@ _, i = findmax(ch[:lp])
i = i.I[1]

## Plot the posterior distribution with a contour plot
x1_range = collect(range(-6; stop = 6, length = 25))
x2_range = collect(range(-6; stop = 6, length = 25))
x1_range = collect(range(-6; stop=6, length=25))
x2_range = collect(range(-6; stop=6, length=25))
Z = [nn_forward([x1, x2], theta[i, :])[1] for x1 in x1_range, x2 in x2_range]
contour!(x1_range, x2_range, Z)

Expand All @@ -157,8 +157,8 @@ end
plot_data()

n_end = 1500
x1_range = collect(range(-6; stop = 6, length = 25))
x2_range = collect(range(-6; stop = 6, length = 25))
x1_range = collect(range(-6; stop=6, length=25))
x2_range = collect(range(-6; stop=6, length=25))
Z = [nn_predict([x1, x2], theta, n_end)[1] for x1 in x1_range, x2 in x2_range]
contour!(x1_range, x2_range, Z)

Expand All @@ -170,5 +170,5 @@ n_end = 1000
anim = @gif for i in 1:n_end
plot_data()
Z = [nn_forward([x1, x2], theta[i, :])[1] for x1 in x1_range, x2 in x2_range]
contour!(x1_range, x2_range, Z; title = "Iteration $i", clim = (0, 1))
contour!(x1_range, x2_range, Z; title="Iteration $i", clim=(0, 1))
end every 5
44 changes: 22 additions & 22 deletions examples/ImageNet/main.jl
Original file line number Diff line number Diff line change
Expand Up @@ -26,17 +26,17 @@ import DataLoaders: LearnBase # Extending Datasets
import MLUtils

# Distributed Training
FluxMPI.Init(; verbose = true)
FluxMPI.Init(; verbose=true)
CUDA.allowscalar(false)

# unsafe_free OneHotArrays
CUDA.unsafe_free!(x::OneHotArray) = CUDA.unsafe_free!(x.indices)

# Image Classification Models
VGG11_BN(args...; kwargs...) = VGG11(args...; batchnorm = true, kwargs...)
VGG13_BN(args...; kwargs...) = VGG13(args...; batchnorm = true, kwargs...)
VGG16_BN(args...; kwargs...) = VGG16(args...; batchnorm = true, kwargs...)
VGG19_BN(args...; kwargs...) = VGG19(args...; batchnorm = true, kwargs...)
VGG11_BN(args...; kwargs...) = VGG11(args...; batchnorm=true, kwargs...)
VGG13_BN(args...; kwargs...) = VGG13(args...; batchnorm=true, kwargs...)
VGG16_BN(args...; kwargs...) = VGG16(args...; batchnorm=true, kwargs...)
VGG19_BN(args...; kwargs...) = VGG19(args...; batchnorm=true, kwargs...)
MobileNetv3_small(args...; kwargs...) = MobileNetv3(:small, args...; kwargs...)
MobileNetv3_large(args...; kwargs...) = MobileNetv3(:large, args...; kwargs...)
ResNeXt50(args...; kwargs...) = ResNeXt(50, args...; kwargs...)
Expand Down Expand Up @@ -75,7 +75,7 @@ AVAILABLE_IMAGENET_MODELS = [

IMAGENET_MODELS_DICT = Dict(string(model) => model for model in AVAILABLE_IMAGENET_MODELS)

function get_model(model_name::String, models_dict::Dict, rng, args...; warmup = true,
function get_model(model_name::String, models_dict::Dict, rng, args...; warmup=true,
kwargs...)
model = Lux.transform(models_dict[model_name](args...; kwargs...).layers)
ps, st = Lux.setup(rng, model) .|> gpu
Expand All @@ -93,8 +93,8 @@ function get_model(model_name::String, models_dict::Dict, rng, args...; warmup =
end

if is_distributed()
ps = FluxMPI.synchronize!(ps; root_rank = 0)
st = FluxMPI.synchronize!(st; root_rank = 0)
ps = FluxMPI.synchronize!(ps; root_rank=0)
st = FluxMPI.synchronize!(st; root_rank=0)
should_log() && println("$(now()) ==> models synced across all ranks")
end

Expand Down Expand Up @@ -160,7 +160,7 @@ function parse_commandline_arguments()
end

# Loss Function
logitcrossentropyloss(ŷ, y) = mean(-sum(y .* logsoftmax(ŷ; dims = 1); dims = 1))
logitcrossentropyloss(ŷ, y) = mean(-sum(y .* logsoftmax(ŷ; dims=1); dims=1))

function logitcrossentropyloss(x, y, model, ps, st)
ŷ, st_ = model(x, ps, st)
Expand All @@ -181,10 +181,10 @@ end
update_lr(st_opt::NamedTuple, eta) = fmap(l -> update_lr(l, eta), st_opt)

# Accuracy
function accuracy(ŷ, y, topk = (1,))
function accuracy(ŷ, y, topk=(1,))
maxk = maximum(topk)

pred_labels = partialsortperm.(eachcol(ŷ), (1:maxk,), rev = true)
pred_labels = partialsortperm.(eachcol(ŷ), (1:maxk,), rev=true)
true_labels = onecold(y)

accuracies = Vector{Float32}(undef, length(topk))
Expand All @@ -202,11 +202,11 @@ is_distributed() = FluxMPI.Initialized() && total_workers() > 1
should_log() = !FluxMPI.Initialized() || local_rank() == 0

# Checkpointing
function save_checkpoint(state, is_best, filename = "checkpoint.pth.tar")
function save_checkpoint(state, is_best, filename="checkpoint.pth.tar")
if should_log()
serialize(filename, state)
if is_best
cp(filename, "model_best.pth.tar"; force = true)
cp(filename, "model_best.pth.tar"; force=true)
end
end
end
Expand Down Expand Up @@ -260,12 +260,12 @@ function ImageDataset(folder::String, augmentation_pipeline, normalization_param
"n02105855_2933.JPEG",
]
remove_files = joinpath.((folder,),
joinpath.(first.(rsplit.(remove_files, "_", limit = 2)),
joinpath.(first.(rsplit.(remove_files, "_", limit=2)),
remove_files))

image_files = [setdiff(Set(image_files), Set(remove_files))...]

labels = [mapping[x] for x in map(x -> x[2], rsplit.(image_files, "/", limit = 3))]
labels = [mapping[x] for x in map(x -> x[2], rsplit.(image_files, "/", limit=3))]
else
vallist = hcat(split.(readlines(joinpath(@__DIR__, "val_list.txt")))...)
labels = parse.(Int, vallist[2, :]) .+ 1
Expand Down Expand Up @@ -315,7 +315,7 @@ end

function AverageMeter(name::String, fmt::String)
fmtstr = FormatExpr("$name {1:$fmt} ({2:$fmt})")
return AverageMeter(; fmtstr = fmtstr)
return AverageMeter(; fmtstr=fmtstr)
end

function update!(meter::AverageMeter, val, n::Int)
Expand All @@ -333,7 +333,7 @@ struct ProgressMeter{N}
meters::NTuple{N, AverageMeter}
end

function ProgressMeter(num_batches::Int, meters::NTuple{N}, prefix::String = "") where {N}
function ProgressMeter(num_batches::Int, meters::NTuple{N}, prefix::String="") where {N}
fmt = "%" * string(length(string(num_batches))) * "d"
prefix = prefix != "" ? endswith(prefix, " ") ? prefix : prefix * " " : ""
batch_fmtstr = generate_formatter("$prefix[$fmt/" * sprintf1(fmt, num_batches) * "]")
Expand Down Expand Up @@ -464,11 +464,11 @@ function main(args)
println("$(now()) => creating model `$(args["arch"])`")
end
end
model, ps, st = get_model(args["arch"], IMAGENET_MODELS_DICT, rng; warmup = true,
pretrain = args["pretrained"])
model, ps, st = get_model(args["arch"], IMAGENET_MODELS_DICT, rng; warmup=true,
pretrain=args["pretrained"])

normalization_parameters = (mean = reshape([0.485f0, 0.456f0, 0.406f0], 1, 1, 3),
std = reshape([0.229f0, 0.224f0, 0.225f0], 1, 1, 3))
normalization_parameters = (mean=reshape([0.485f0, 0.456f0, 0.406f0], 1, 1, 3),
std=reshape([0.229f0, 0.224f0, 0.225f0], 1, 1, 3))
train_data_augmentation = Resize(256, 256) |> FlipX(0.5) |> RCropSize(224, 224)
val_data_augmentation = Resize(256, 256) |> CropSize(224, 224)
train_dataset = ImageDataset(joinpath(args["data"], "train"),
Expand Down Expand Up @@ -496,7 +496,7 @@ function main(args)
optimiser_state = FluxMPI.synchronize!(optimiser_state)
should_log() && println("$(now()) ==> synced optimiser state across all ranks")
end
scheduler = Step = args["learning-rate"], γ = 0.1f0, step_sizes = 30)
scheduler = Step=args["learning-rate"], γ=0.1f0, step_sizes=30)

if args["resume"] != ""
if isfile(args["resume"])
Expand Down
28 changes: 14 additions & 14 deletions examples/NeuralODE/main.jl
Original file line number Diff line number Diff line change
Expand Up @@ -26,13 +26,13 @@ function loadmnist(batchsize, train_split)
## Process images into (H,W,C,BS) batches
x_data = Float32.(reshape(imgs, size(imgs, 1), size(imgs, 2), 1, size(imgs, 3)))
y_data = onehot(labels_raw)
(x_train, y_train), (x_test, y_test) = splitobs((x_data, y_data); at = train_split)
(x_train, y_train), (x_test, y_test) = splitobs((x_data, y_data); at=train_split)

return (
## Use DataLoader to automatically minibatch and shuffle the data
DataLoader(collect.((x_train, y_train)); batchsize = batchsize, shuffle = true),
DataLoader(collect.((x_train, y_train)); batchsize=batchsize, shuffle=true),
## Don't shuffle the test data
DataLoader(collect.((x_test, y_test)); batchsize = batchsize, shuffle = false))
DataLoader(collect.((x_test, y_test)); batchsize=batchsize, shuffle=false))
end

# ## Define the Neural ODE Layer
Expand All @@ -49,9 +49,9 @@ struct NeuralODE{M <: Lux.AbstractExplicitLayer, So, Se, T, K} <:
end

function NeuralODE(model::Lux.AbstractExplicitLayer;
solver = Tsit5(),
sensealg = InterpolatingAdjoint(; autojacvec = ZygoteVJP()),
tspan = (0.0f0, 1.0f0),
solver=Tsit5(),
sensealg=InterpolatingAdjoint(; autojacvec=ZygoteVJP()),
tspan=(0.0f0, 1.0f0),
kwargs...)
return NeuralODE(model, solver, sensealg, tspan, kwargs)
end
Expand All @@ -62,13 +62,13 @@ function (n::NeuralODE)(x, ps, st)
return u_
end
prob = ODEProblem{false}(ODEFunction{false}(dudt), x, n.tspan, ps)
return solve(prob, n.solver; sensealg = n.sensealg, n.kwargs...), st
return solve(prob, n.solver; sensealg=n.sensealg, n.kwargs...), st
end

function diffeqsol_to_array(x::ODESolution{T, N, <:AbstractVector{<:CuArray}}) where {T, N}
dropdims(gpu(x); dims = 3)
dropdims(gpu(x); dims=3)
end
diffeqsol_to_array(x::ODESolution) = dropdims(Array(x); dims = 3)
diffeqsol_to_array(x::ODESolution) = dropdims(Array(x); dims=3)

# ## Create and Initialize the Neural ODE Layer
function create_model()
Expand All @@ -77,10 +77,10 @@ function create_model()
Dense(784, 20, tanh),
NeuralODE(Chain(Dense(20, 10, tanh), Dense(10, 10, tanh),
Dense(10, 20, tanh));
save_everystep = false,
reltol = 1.0f-3,
abstol = 1.0f-3,
save_start = false),
save_everystep=false,
reltol=1.0f-3,
abstol=1.0f-3,
save_start=false),
diffeqsol_to_array,
Dense(20, 10))

Expand All @@ -97,7 +97,7 @@ end
# ## Define Utility Functions
get_class(x) = argmax.(eachcol(x))

logitcrossentropy(ŷ, y) = mean(-sum(y .* logsoftmax(ŷ); dims = 1))
logitcrossentropy(ŷ, y) = mean(-sum(y .* logsoftmax(ŷ); dims=1))

function loss(x, y, model, ps, st)
ŷ, st = model(x, ps, st)
Expand Down
14 changes: 7 additions & 7 deletions examples/SimpleRNN/main.jl
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ using MLUtils, Optimisers, Zygote, NNlib, Random, Statistics

# We will use MLUtils to generate 500 (noisy) clockwise and 500 (noisy) anticlockwise spirals. Using this data we will create a `MLUtils.DataLoader`. Our dataloader will give us sequences of size 2 × seq_len × batch_size and we need to predict a binary value whether the sequence is clockwise or anticlockwise

function get_dataloaders(; dataset_size = 1000, sequence_length = 50)
function get_dataloaders(; dataset_size=1000, sequence_length=50)
## Create the spirals
data = [MLUtils.Datasets.make_spiral(sequence_length) for _ in 1:dataset_size]
## Get the labels
Expand All @@ -25,16 +25,16 @@ function get_dataloaders(; dataset_size = 1000, sequence_length = 50)
for d in data[1:(dataset_size ÷ 2)]]
anticlockwise_spirals = [reshape(d[1][:, (sequence_length + 1):end], :, sequence_length,
1) for d in data[((dataset_size ÷ 2) + 1):end]]
x_data = Float32.(cat(clockwise_spirals..., anticlockwise_spirals...; dims = 3))
x_data = Float32.(cat(clockwise_spirals..., anticlockwise_spirals...; dims=3))
## Split the dataset
(x_train, y_train), (x_val, y_val) = splitobs((x_data, labels); at = 0.8,
shuffle = true)
(x_train, y_train), (x_val, y_val) = splitobs((x_data, labels); at=0.8,
shuffle=true)
## Create DataLoaders
return (
## Use DataLoader to automatically minibatch and shuffle the data
DataLoader(collect.((x_train, y_train)); batchsize = 128, shuffle = true),
DataLoader(collect.((x_train, y_train)); batchsize=128, shuffle=true),
## Don't shuffle the validation data
DataLoader(collect.((x_val, y_val)); batchsize = 128, shuffle = false))
DataLoader(collect.((x_val, y_val)); batchsize=128, shuffle=false))
end

# ## Creating a Classifier
Expand Down Expand Up @@ -72,7 +72,7 @@ function (s::SpiralClassifier)(x::AbstractArray{T, 3}, ps::NamedTuple,
## After running through the sequence we will pass the output through the classifier
y, st_classifier = s.classifier(h, ps.classifier, st.classifier)
## Finally remember to create the updated state
st = merge(st, (classifier = st_classifier, lstm_cell = st_lstm))
st = merge(st, (classifier=st_classifier, lstm_cell=st_lstm))
return vec(y), st
end

Expand Down
Loading

0 comments on commit e66e37f

Please sign in to comment.