From c85892795df0f9ee8110c06d3beabaea49f8676b Mon Sep 17 00:00:00 2001 From: David Widmann Date: Thu, 15 Apr 2021 00:48:50 +0200 Subject: [PATCH 1/3] Update example to new syntax --- examples/regression_1d.jl | 23 +++++++---------------- 1 file changed, 7 insertions(+), 16 deletions(-) diff --git a/examples/regression_1d.jl b/examples/regression_1d.jl index 978f1699..605ddcb8 100644 --- a/examples/regression_1d.jl +++ b/examples/regression_1d.jl @@ -110,10 +110,7 @@ struct GPLoglikelihood{X,Y} end function (ℓ::GPLoglikelihood)(params) - kernel = ScaledKernel( - transform(Matern52Kernel(), ScaleTransform(softplus(params[1]))), - softplus(params[2]), - ) + kernel = softplus(params[1]) * (Matern52Kernel() ∘ ScaleTransform(softplus(params[2]))) f = GP(kernel) fx = f(ℓ.x, 0.1) return logpdf(fx, ℓ.y) @@ -210,9 +207,7 @@ struct GPPosterior{X,Y} end function (g::GPPosterior)(p) - kernel = ScaledKernel( - transform(Matern52Kernel(), ScaleTransform(softplus(p[1]))), softplus(p[2]) - ) + kernel = softplus(p[1]) * (Matern52Kernel() ∘ ScaleTransform(softplus(p[2]))) f = GP(kernel) return posterior(f(g.x, 0.1), g.y) end @@ -404,10 +399,7 @@ struct NegativeELBO{X,Y} end function (g::NegativeELBO)(params) - kernel = ScaledKernel( - transform(Matern52Kernel(), ScaleTransform(softplus(params[1]))), - softplus(params[2]), - ) + kernel = softplus(params[1]) * (Matern52Kernel() ∘ ScaleTransform(softplus(params[2]))) f = GP(kernel) fx = f(g.x, 0.1) return -elbo(fx, g.y, f(logistic.(params[3:end]))) @@ -424,11 +416,11 @@ opt = optimize(NegativeELBO(x_train, y_train), x0, LBFGS()) opt.minimizer -# The optimized value of the inverse lengthscale is +# The optimized value of the variance is softplus(opt.minimizer[1]) -# and of the variance is +# and of the inverse lengthscale is softplus(opt.minimizer[2]) @@ -436,9 +428,8 @@ softplus(opt.minimizer[2]) # posterior. We can observe that there is a significant improvement over the # log-likelihood with the default kernel parameters of value 1. -opt_kernel = ScaledKernel( - transform(Matern52Kernel(), ScaleTransform(softplus(opt.minimizer[1]))), - softplus(opt.minimizer[2]), +opt_kernel = softplus(opt.minimizer[1]) * ( + Matern52Kernel() ∘ ScaleTransform(softplus(opt.minimizer[2])) ) opt_f = GP(opt_kernel) opt_fx = opt_f(x_train, 0.1) From 5f9a205aff9b320c213618e52bd8944cbf6ae9d2 Mon Sep 17 00:00:00 2001 From: David Widmann Date: Thu, 15 Apr 2021 00:52:17 +0200 Subject: [PATCH 2/3] Fix format Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- examples/regression_1d.jl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/regression_1d.jl b/examples/regression_1d.jl index 605ddcb8..7c980057 100644 --- a/examples/regression_1d.jl +++ b/examples/regression_1d.jl @@ -428,9 +428,9 @@ softplus(opt.minimizer[2]) # posterior. We can observe that there is a significant improvement over the # log-likelihood with the default kernel parameters of value 1. -opt_kernel = softplus(opt.minimizer[1]) * ( - Matern52Kernel() ∘ ScaleTransform(softplus(opt.minimizer[2])) -) +opt_kernel = + softplus(opt.minimizer[1]) * + (Matern52Kernel() ∘ ScaleTransform(softplus(opt.minimizer[2]))) opt_f = GP(opt_kernel) opt_fx = opt_f(x_train, 0.1) ap = approx_posterior(VFE(), opt_fx, y_train, opt_f(logistic.(opt.minimizer[3:end]))) From eb9ea2a336c6e5fb83c8f181d7f9c72589b7ad81 Mon Sep 17 00:00:00 2001 From: David Widmann Date: Thu, 15 Apr 2021 01:07:43 +0200 Subject: [PATCH 3/3] Fix titles of plots --- examples/regression_1d.jl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/regression_1d.jl b/examples/regression_1d.jl index 7c980057..d533dd18 100644 --- a/examples/regression_1d.jl +++ b/examples/regression_1d.jl @@ -191,7 +191,7 @@ histogram( xlabel="sample", ylabel="counts", layout=2, - title=["inverse length scale" "variance"], + title=["variance" "inverse length scale"], legend=false, ) vline!(mean_samples'; linewidth=2) @@ -282,7 +282,7 @@ histogram( xlabel="sample", ylabel="counts", layout=2, - title=["inverse length scale" "variance"], + title=["variance" "inverse length scale"], legend=false, ) vline!(mean_samples'; linewidth=2) @@ -342,7 +342,7 @@ histogram( xlabel="sample", ylabel="counts", layout=2, - title=["inverse length scale" "variance"], + title=["variance" "inverse length scale"], ) vline!(mean_samples'; layout=2, labels="mean")