diff --git a/benchmarks/analytic-banana/server.py b/benchmarks/analytic-banana/server.py index f462bd9..dc6526d 100644 --- a/benchmarks/analytic-banana/server.py +++ b/benchmarks/analytic-banana/server.py @@ -25,11 +25,11 @@ def __call__(self, parameters, config): y = [(parameters[0][0] / a), (parameters[0][1] * a + a * b * (parameters[0][0]**2 + a**2))] - return [[multivariate_normal.logpdf(y, [0, 4], [[1.0*scale, 0.5*scale], [0.5*scale, 1.0*scale]])]] + return [[float(multivariate_normal.logpdf(y, [0, 4], [[1.0*scale, 0.5*scale], [0.5*scale, 1.0*scale]]))]] def supports_evaluate(self): return True model = Banana() -umbridge.serve_models([model], 4243) \ No newline at end of file +umbridge.serve_models([model], 4243) diff --git a/benchmarks/analytic-donut/server.py b/benchmarks/analytic-donut/server.py index 7419c03..59d3443 100644 --- a/benchmarks/analytic-donut/server.py +++ b/benchmarks/analytic-donut/server.py @@ -19,7 +19,7 @@ def get_output_sizes(self, config): def __call__(self, parameters, config): r = np.linalg.norm(parameters[0]) - return [[ - (r - Donut.radius)**2 / Donut.sigma2 ]] + return [[float(- (r - Donut.radius)**2 / Donut.sigma2) ]] def supports_evaluate(self): return True @@ -28,8 +28,8 @@ def gradient(self, out_wrt, in_wrt, parameters, sens, config): r = np.linalg.norm(parameters[0]) if (r == 0): return [0,0] - return [sens[0] * parameters[0][0] * (Donut.radius / r - 1) * 2 / Donut.sigma2, - sens[0] * parameters[0][1] * (Donut.radius / r - 1) * 2 / Donut.sigma2] + return [float(sens[0] * parameters[0][0] * (Donut.radius / r - 1) * 2 / Donut.sigma2), + float(sens[0] * parameters[0][1] * (Donut.radius / r - 1) * 2 / Donut.sigma2)] def supports_gradient(self): return True @@ -38,12 +38,12 @@ def apply_jacobian(self, out_wrt, in_wrt, parameters, vec, config): r = np.linalg.norm(parameters[0]) if (r == 0): return [0] - return [vec[0] * parameters[0][0] * (Donut.radius / r - 1) * 2 / Donut.sigma2 - + vec[1] * parameters[0][1] * (Donut.radius / r - 1) * 2 / Donut.sigma2] + return [float(vec[0] * parameters[0][0] * (Donut.radius / r - 1) * 2 / Donut.sigma2 + + vec[1] * parameters[0][1] * (Donut.radius / r - 1) * 2 / Donut.sigma2)] def supports_apply_jacobian(self): return True model = Donut() -umbridge.serve_models([model], 4243) \ No newline at end of file +umbridge.serve_models([model], 4243) diff --git a/benchmarks/analytic-funnel/server.py b/benchmarks/analytic-funnel/server.py index 5efd076..cd7b80b 100644 --- a/benchmarks/analytic-funnel/server.py +++ b/benchmarks/analytic-funnel/server.py @@ -22,14 +22,14 @@ def f(x, m, s): s0 = 3 m1 = 0 s1 = np.exp(parameters[0][0] / 2) - return [[ f(parameters[0][0], m0, s0) + f(parameters[0][1], m1, s1) ]] + return [[ float(f(parameters[0][0], m0, s0) + f(parameters[0][1], m1, s1)) ]] def supports_evaluate(self): return True def gradient(self, out_wrt, in_wrt, parameters, sens, config): - return [self.apply_jacobian(out_wrt, in_wrt, parameters, [sens[0], 0], config)[0], - self.apply_jacobian(out_wrt, in_wrt, parameters, [0, sens[0]], config)[0]] + return [float(self.apply_jacobian(out_wrt, in_wrt, parameters, [sens[0], 0], config)[0]), + float(self.apply_jacobian(out_wrt, in_wrt, parameters, [0, sens[0]], config)[0])] def supports_gradient(self): return True @@ -46,12 +46,12 @@ def dfds(x, m, s): m1 = 0 s1 = np.exp(parameters[0][0] / 2) - return [vec[1] * dfdx(parameters[0][1], m1, s1) - + vec[0] * (dfdx(parameters[0][0], m0, s0) + .5 * s1 * dfds(parameters[0][1], m1, s1))] + return [float(vec[1] * dfdx(parameters[0][1], m1, s1) + + vec[0] * (dfdx(parameters[0][0], m0, s0) + .5 * s1 * dfds(parameters[0][1], m1, s1)))] def supports_apply_jacobian(self): return True model = Funnel() -umbridge.serve_models([model], 4243) \ No newline at end of file +umbridge.serve_models([model], 4243) diff --git a/benchmarks/analytic-gaussian-mixture/server.py b/benchmarks/analytic-gaussian-mixture/server.py index 08ae80d..4076640 100644 --- a/benchmarks/analytic-gaussian-mixture/server.py +++ b/benchmarks/analytic-gaussian-mixture/server.py @@ -23,14 +23,14 @@ def __call__(self, parameters, config): if dens1 + dens2 + dens3 == 0: # log(0) not defined, so return minimal float value return [[ sys.float_info.min ]] - return [[ np.log(dens1 + dens2 + dens3) ]] + return [[ float(np.log(dens1 + dens2 + dens3)) ]] def supports_evaluate(self): return True def gradient(self, out_wrt, in_wrt, parameters, sens, config): - return [self.apply_jacobian(out_wrt, in_wrt, parameters, [sens[0], 0], config)[0], - self.apply_jacobian(out_wrt, in_wrt, parameters, [0, sens[0]], config)[0]] + return [float(self.apply_jacobian(out_wrt, in_wrt, parameters, [sens[0], 0], config)[0]), + float(self.apply_jacobian(out_wrt, in_wrt, parameters, [0, sens[0]], config)[0])] def supports_gradient(self): return True @@ -43,14 +43,14 @@ def apply_jacobian(self, out_wrt, in_wrt, parameters, vec, config): if dens1 + dens2 + dens3 == 0: # Return zero in log(0) case above return [0] - return [- vec[0] / (dens1 + dens2 + dens3) + return [float(- vec[0] / (dens1 + dens2 + dens3) * (dens1 * (parameters[0][0] - -1.5) / 0.8 + dens2 * (parameters[0][0] - 1.5) / 0.8 + dens3 * (parameters[0][0] - -2) / 0.5) - vec[1] / (dens1 + dens2 + dens3) * (dens1 * (parameters[0][1] - -1.5) / 0.8 + dens2 * (parameters[0][1] - 1.5) / 0.8 - + dens3 * (parameters[0][1] - 2) / 0.5) + + dens3 * (parameters[0][1] - 2) / 0.5)) ] def supports_apply_jacobian(self):