From eb8d1a683522a592ef8f1a29c4b4e4ec6d680b8a Mon Sep 17 00:00:00 2001 From: Mridul Seth Date: Thu, 23 Jan 2020 15:43:19 +0530 Subject: [PATCH] time.clock is removed from py3.8 (#471) * change time.clock to time.process_time * use time instead of process_time * update core.py --- HARK/core.py | 6 +++--- HARK/cstwMPC/cstwMPC.py | 10 +++++----- HARK/interpolation.py | 18 +++++++++--------- HARK/parallel.py | 6 +++--- HARK/tests/OpenCLtest.py | 10 +++++----- 5 files changed, 25 insertions(+), 25 deletions(-) diff --git a/HARK/core.py b/HARK/core.py index e1129aafd..aeee94fea 100644 --- a/HARK/core.py +++ b/HARK/core.py @@ -18,7 +18,7 @@ from .utilities import getArgNames, NullFunc from copy import copy, deepcopy import numpy as np -from time import clock +from time import time from .parallel import multiThreadCommands, multiThreadCommandsFake @@ -794,7 +794,7 @@ def solveAgent(agent, verbose): completed_cycles = 0 # NOQA max_cycles = 5000 # NOQA - escape clause if verbose: - t_last = clock() + t_last = time() while go: # Solve a cycle of the model, recording it if horizon is finite solution_cycle = solveOneCycle(agent, solution_last) @@ -822,7 +822,7 @@ def solveAgent(agent, verbose): # Display progress if requested if verbose: - t_now = clock() + t_now = time() if infinite_horizon: print('Finished cycle #' + str(completed_cycles) + ' in ' + str(t_now-t_last) + ' seconds, solution distance = ' + str(solution_distance)) diff --git a/HARK/cstwMPC/cstwMPC.py b/HARK/cstwMPC/cstwMPC.py index 89fb19731..67c3c4ff8 100644 --- a/HARK/cstwMPC/cstwMPC.py +++ b/HARK/cstwMPC/cstwMPC.py @@ -11,7 +11,7 @@ import numpy as np from copy import copy, deepcopy -from time import clock +from time import time from HARK.utilities import approxMeanOneLognormal, combineIndepDstns, approxUniform, \ getPercentiles, getLorenzShares, calcSubpopAvg, approxLognormal from HARK.simulation import drawDiscrete @@ -589,10 +589,10 @@ def main(): center_range = param_range, spread = spread, dist_type = Params.dist_type) - t_start = clock() + t_start = time() spread_estimate = golden(paramDistObjective,brack=spread_range,tol=1e-4) center_estimate = EstimationEconomy.center_save - t_end = clock() + t_end = time() else: # Run the param-point estimation only paramPointObjective = lambda center : getKYratioDifference(Economy = EstimationEconomy, @@ -601,10 +601,10 @@ def main(): center = center, spread = 0.0, dist_type = Params.dist_type) - t_start = clock() + t_start = time() center_estimate = brentq(paramPointObjective,param_range[0],param_range[1],xtol=1e-6) spread_estimate = 0.0 - t_end = clock() + t_end = time() # Display statistics about the estimated model #center_estimate = 0.986609223266 diff --git a/HARK/interpolation.py b/HARK/interpolation.py index 144101c2b..23268829b 100644 --- a/HARK/interpolation.py +++ b/HARK/interpolation.py @@ -3510,7 +3510,7 @@ def main(): print("of the model modules in /ConsumptionSavingModel. In the future, running") print("this module will show examples of each interpolation class.") - from time import clock + from time import time import matplotlib.pyplot as plt RNG = np.random.RandomState(123) @@ -3616,13 +3616,13 @@ def main(): rand_x = RNG.rand(N)*5.0 rand_y = RNG.rand(N)*5.0 rand_z = RNG.rand(N)*5.0 - t_start = clock() + t_start = time() z = (f(rand_w,rand_x,rand_y,rand_z) - g(rand_w,rand_x,rand_y,rand_z))/f(rand_w,rand_x,rand_y,rand_z) q = (dfdw(rand_w,rand_x,rand_y,rand_z) - g.derivativeW(rand_w,rand_x,rand_y,rand_z))/dfdw(rand_w,rand_x,rand_y,rand_z) r = (dfdx(rand_w,rand_x,rand_y,rand_z) - g.derivativeX(rand_w,rand_x,rand_y,rand_z))/dfdx(rand_w,rand_x,rand_y,rand_z) p = (dfdy(rand_w,rand_x,rand_y,rand_z) - g.derivativeY(rand_w,rand_x,rand_y,rand_z))/dfdy(rand_w,rand_x,rand_y,rand_z) s = (dfdz(rand_w,rand_x,rand_y,rand_z) - g.derivativeZ(rand_w,rand_x,rand_y,rand_z))/dfdz(rand_w,rand_x,rand_y,rand_z) - t_end = clock() + t_end = time() z.sort() print(z) @@ -3689,9 +3689,9 @@ def main(): rand_x = RNG.rand(N)*5.0 rand_y = RNG.rand(N)*5.0 rand_z = RNG.rand(N)*5.0 - t_start = clock() + t_start = time() z = (f(rand_w,rand_x,rand_y,rand_z) - g(rand_w,rand_x,rand_y,rand_z))/f(rand_w,rand_x,rand_y,rand_z) - t_end = clock() + t_end = time() #print(z) print(t_end-t_start) @@ -3711,11 +3711,11 @@ def main(): rand_x = RNG.rand(1000)*5.0 rand_y = RNG.rand(1000)*5.0 - t_start = clock() + t_start = time() z = (f(rand_x,rand_y) - g(rand_x,rand_y))/f(rand_x,rand_y) q = (dfdx(rand_x,rand_y) - g.derivativeX(rand_x,rand_y))/dfdx(rand_x,rand_y) r = (dfdy(rand_x,rand_y) - g.derivativeY(rand_x,rand_y))/dfdy(rand_x,rand_y) - t_end = clock() + t_end = time() z.sort() q.sort() r.sort() @@ -3785,9 +3785,9 @@ def main(): rand_y = RNG.rand(N)*5.0 rand_z = RNG.rand(N)*5.0 - t_start = clock() + t_start = time() z = (f(rand_w,rand_x,rand_y,rand_z) - g(rand_w,rand_x,rand_y,rand_z))/f(rand_w,rand_x,rand_y,rand_z) - t_end = clock() + t_end = time() z.sort() print(z) print(t_end-t_start) diff --git a/HARK/parallel.py b/HARK/parallel.py index 97c8b856f..2de073a75 100644 --- a/HARK/parallel.py +++ b/HARK/parallel.py @@ -10,7 +10,7 @@ from builtins import range import multiprocessing import numpy as np -from time import clock +from time import time import csv @@ -274,7 +274,7 @@ def parallelNelderMead(objFunc,guess,perturb=None,P=1,ftol=0.000001,xtol=0.00000 # Run the Nelder-Mead algorithm until a terminal condition is met go = True while go: - t_start = clock() + t_start = time() iters += 1 if verbose > 0: print('Beginning iteration #' + str(iters) + ' now.') @@ -320,7 +320,7 @@ def parallelNelderMead(objFunc,guess,perturb=None,P=1,ftol=0.000001,xtol=0.00000 fmin = fvals[0] f_dist = np.abs(fmin - fvals[-1]) x_dist = np.max(np.sqrt(np.sum((simplex - np.tile(simplex[0,:],(N,1)))**2.0,axis=1))) - t_end = clock() + t_end = time() if verbose > 0: t_iter = t_end - t_start print('Finished iteration #' + str(iters) +' with ' + str(new_evals) + ' evaluations (' + str(evals) + ' cumulative) in ' + str(t_iter) + ' seconds.') diff --git a/HARK/tests/OpenCLtest.py b/HARK/tests/OpenCLtest.py index 3e16a4902..aa2d5ca25 100644 --- a/HARK/tests/OpenCLtest.py +++ b/HARK/tests/OpenCLtest.py @@ -9,7 +9,7 @@ # EVERY machine will have a device 0:0, which by default is the CPU # Other devices will have various numbers # Substitute her the device you want to compare to the CPU -from time import clock +from time import time if __name__ == "__main__": @@ -82,17 +82,17 @@ c_buf = ctx.create_buffer(cl.CL_MEM_WRITE_ONLY | cl.CL_MEM_ALLOC_HOST_PTR, size=c.nbytes) # Write only, allocate memory, use byte size of array c # Run the kernel and time it - t_start = clock() + t_start = time() krn.set_args(a_buf, b_buf, c_buf, k[0:1]) # Set kernel arguments as the three buffers and a float queue.execute_kernel(krn, [N], None) # Execute the simple kernel, specifying the global workspace dimensionality and local workspace dimensionality (None uses some default) queue.read_buffer(c_buf, c) # Read the memory buffer for c into the numpy array for c - t_end = clock() + t_end = time() print('OpenCL took ' + str(t_end-t_start) + ' seconds.') # Now do the equivalent work as the kernel, but in Python (and time it) - t_start = clock() + t_start = time() truth = (a + b) * k[0] - t_end = clock() + t_end = time() print('Python took ' + str(t_end-t_start) + ' seconds.') # Make sure that OpenCL and Python actually agree on their results