### A Pluto.jl notebook ### # v0.12.10 using Markdown using InteractiveUtils # ╔═╡ 62b029ce-2835-11eb-3c3f-fdaa43e64d27 begin using CUDA using Flux, Flux.Data.MNIST, Statistics using Flux: onehotbatch, onecold, crossentropy, throttle using Base.Iterators: repeated end # ╔═╡ 4d80f0ea-283f-11eb-08a6-5b0027377013 CUDA.allowscalar(false) # ╔═╡ 79b21b0a-2835-11eb-361b-e3a500dac309 # Load training data. 28x28 grayscale images of digits imgs = MNIST.images() # ╔═╡ d03dc2bc-2835-11eb-37ed-0502aad3fe9c begin # Reorder the layout of the data for the ANN imagestrip(image) = Float32.(reshape(image, :)) X = hcat(imagestrip.(imgs)...) end # ╔═╡ 6c48a8ac-2836-11eb-3086-d3a91c177ddc # Target output. What digit each image represents begin labels = MNIST.labels() Y = onehotbatch(labels, 0:9) end # ╔═╡ 0cebc402-2836-11eb-1a70-45c00bcfd7e4 # Defining the neural network begin m = gpu(Chain( Dense(28 * 28, 32, relu), Dense(32, 10), softmax)) loss(x, y) = crossentropy(m(x), y) dataset = [(X |> gpu, Y |> gpu)] opt = ADAM() evalcb = () -> @show(loss(gpu(X), gpu(Y))) end # ╔═╡ 4774d9f0-2837-11eb-1836-ad4fd43c2d75 # Perform training on the data @Flux.epochs 200 Flux.train!(loss, params(m), dataset, opt, cb = throttle(evalcb, 5)) # ╔═╡ c9fc65fa-283c-11eb-21ba-2fbdacff0dd3 # Evaluate model prediction accuracy accuracy(x, y) = mean(onecold(cpu(m(x))) .== onecold(cpu(y))) # ╔═╡ 19fbbe8e-283d-11eb-16e5-6fca41b0b176 accuracy(X |> gpu, Y |> gpu) # ╔═╡ Cell order: # ╠═62b029ce-2835-11eb-3c3f-fdaa43e64d27 # ╠═4d80f0ea-283f-11eb-08a6-5b0027377013 # ╠═79b21b0a-2835-11eb-361b-e3a500dac309 # ╠═d03dc2bc-2835-11eb-37ed-0502aad3fe9c # ╠═6c48a8ac-2836-11eb-3086-d3a91c177ddc # ╠═0cebc402-2836-11eb-1a70-45c00bcfd7e4 # ╠═4774d9f0-2837-11eb-1836-ad4fd43c2d75 # ╠═c9fc65fa-283c-11eb-21ba-2fbdacff0dd3 # ╠═19fbbe8e-283d-11eb-16e5-6fca41b0b176