From 83790f31c8f1725040ee6e4458cb63403a32ee58 Mon Sep 17 00:00:00 2001 From: Vecko Date: Fri, 19 May 2023 08:04:53 +0800 Subject: [PATCH 01/10] Pre-commit config Added black and prettier --- .pre-commit-config.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 .pre-commit-config.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..86485c3 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,9 @@ +repos: + - repo: https://github.com/psf/black + rev: "23.3.0" + hooks: + - id: black + - repo: https://github.com/pre-commit/mirrors-prettier + rev: v2.7.1 + hooks: + - id: prettier From e1c3de5eb8d31df38696bd9c393f4423ec95c6a6 Mon Sep 17 00:00:00 2001 From: Vecko Date: Tue, 23 May 2023 14:37:09 +0800 Subject: [PATCH 02/10] prettier ignore --- .prettierignore | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 .prettierignore diff --git a/.prettierignore b/.prettierignore new file mode 100644 index 0000000..d5f751d --- /dev/null +++ b/.prettierignore @@ -0,0 +1,6 @@ +* + +# Ignore all except... +# !index.html # Broken for the timebeing +!utrechtteam.html +!faq.html \ No newline at end of file From 2dd75b3200a3bd57c932c7ade067c39bc504a965 Mon Sep 17 00:00:00 2001 From: Vecko Date: Tue, 23 May 2023 14:38:50 +0800 Subject: [PATCH 03/10] HTML fix --- faq.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/faq.html b/faq.html index 4bb44dd..0fedf20 100755 --- a/faq.html +++ b/faq.html @@ -80,7 +80,7 @@

Frequenty Asked Questions and support for Parcels


- +

Parcels (Probably A Really Computationally Efficient Lagrangian Simulator) is a set of Python classes and methods to create customisable particle tracking simulations using output from Ocean Circulation models. Parcels focusses on tracking of passive water parcels, as well as active particulates such as plankton, plastic and fish.

The Parcels code is licensed under an open source MIT license and can be downloaded from https://github.com/OceanParcels/parcels. From 72033c5c10a22296d5fcb64c2d5b1d4a0a0809b9 Mon Sep 17 00:00:00 2001 From: Vecko Date: Tue, 23 May 2023 14:40:13 +0800 Subject: [PATCH 04/10] Run pre-commit --- .../create_movingeddies_files.py | 47 +- examples-data/make_index.py | 40 +- faq.html | 890 +++++++--- utrechtteam.html | 1467 +++++++++++------ 4 files changed, 1641 insertions(+), 803 deletions(-) diff --git a/examples-data/MovingEddies_data/create_movingeddies_files.py b/examples-data/MovingEddies_data/create_movingeddies_files.py index 0c08419..d43068f 100644 --- a/examples-data/MovingEddies_data/create_movingeddies_files.py +++ b/examples-data/MovingEddies_data/create_movingeddies_files.py @@ -3,23 +3,28 @@ import math -def moving_eddies_fieldset(xdim=200, ydim=350, mesh='flat'): +def moving_eddies_fieldset(xdim=200, ydim=350, mesh="flat"): # Set Parcels FieldSet variables - time = np.arange(0., 8. * 86400., 86400., dtype=np.float64) + time = np.arange(0.0, 8.0 * 86400.0, 86400.0, dtype=np.float64) # Coordinates of the test fieldset (on A-grid in m) - if mesh is 'spherical': + if mesh is "spherical": lon = np.linspace(0, 4, xdim, dtype=np.float32) lat = np.linspace(45, 52, ydim, dtype=np.float32) else: - lon = np.linspace(0, 4.e5, xdim, dtype=np.float32) - lat = np.linspace(0, 7.e5, ydim, dtype=np.float32) + lon = np.linspace(0, 4.0e5, xdim, dtype=np.float32) + lat = np.linspace(0, 7.0e5, ydim, dtype=np.float32) # Grid spacing in m def cosd(x): return math.cos(math.radians(float(x))) - dx = (lon[1] - lon[0]) * 1852 * 60 * cosd(lat.mean()) if mesh is 'spherical' else lon[1] - lon[0] - dy = (lat[1] - lat[0]) * 1852 * 60 if mesh is 'spherical' else lat[1] - lat[0] + + dx = ( + (lon[1] - lon[0]) * 1852 * 60 * cosd(lat.mean()) + if mesh is "spherical" + else lon[1] - lon[0] + ) + dy = (lat[1] - lat[0]) * 1852 * 60 if mesh is "spherical" else lat[1] - lat[0] # Define arrays U (zonal), V (meridional), and P (sea surface height) on A-grid U = np.zeros((lon.size, lat.size, time.size), dtype=np.float32) @@ -27,7 +32,7 @@ def cosd(x): P = np.zeros((lon.size, lat.size, time.size), dtype=np.float32) # Some constants - corio_0 = 1.e-4 # Coriolis parameter + corio_0 = 1.0e-4 # Coriolis parameter h0 = 1 # Max eddy height sig = 0.5 # Eddy e-folding decay scale (in degrees) g = 10 # Gravitational constant @@ -35,15 +40,21 @@ def cosd(x): dX = eddyspeed * 86400 / dx # Grid cell movement of eddy max each day dY = eddyspeed * 86400 / dy # Grid cell movement of eddy max each day - [x, y] = np.mgrid[:lon.size, :lat.size] + [x, y] = np.mgrid[: lon.size, : lat.size] for t in range(time.size): - hymax_1 = lat.size / 7. - hxmax_1 = .75 * lon.size - dX * t - hymax_2 = 3. * lat.size / 7. + dY * t - hxmax_2 = .75 * lon.size - dX * t + hymax_1 = lat.size / 7.0 + hxmax_1 = 0.75 * lon.size - dX * t + hymax_2 = 3.0 * lat.size / 7.0 + dY * t + hxmax_2 = 0.75 * lon.size - dX * t - P[:, :, t] = h0 * np.exp(-(x-hxmax_1)**2/(sig*lon.size/4.)**2-(y-hymax_1)**2/(sig*lat.size/7.)**2) - P[:, :, t] += h0 * np.exp(-(x-hxmax_2)**2/(sig*lon.size/4.)**2-(y-hymax_2)**2/(sig*lat.size/7.)**2) + P[:, :, t] = h0 * np.exp( + -((x - hxmax_1) ** 2) / (sig * lon.size / 4.0) ** 2 + - (y - hymax_1) ** 2 / (sig * lat.size / 7.0) ** 2 + ) + P[:, :, t] += h0 * np.exp( + -((x - hxmax_2) ** 2) / (sig * lon.size / 4.0) ** 2 + - (y - hymax_2) ** 2 / (sig * lat.size / 7.0) ** 2 + ) V[:-1, :, t] = -np.diff(P[:, :, t], axis=0) / dx / corio_0 * g V[-1, :, t] = V[-2, :, t] # Fill in the last column @@ -51,12 +62,12 @@ def cosd(x): U[:, :-1, t] = np.diff(P[:, :, t], axis=1) / dy / corio_0 * g U[:, -1, t] = U[:, -2, t] # Fill in the last row - data = {'U': U, 'V': V, 'P': P} - dimensions = {'lon': lon, 'lat': lat, 'time': time} + data = {"U": U, "V": V, "P": P} + dimensions = {"lon": lon, "lat": lat, "time": time} return FieldSet.from_data(data, dimensions, transpose=True, mesh=mesh) if __name__ == "__main__": fieldset = moving_eddies_fieldset() - filename = 'moving_eddies' + filename = "moving_eddies" fieldset.write(filename) diff --git a/examples-data/make_index.py b/examples-data/make_index.py index b07b9fa..0bb389a 100644 --- a/examples-data/make_index.py +++ b/examples-data/make_index.py @@ -68,7 +68,7 @@ """ -EXCLUDED = ['index.html'] +EXCLUDED = ["index.html"] import os import argparse @@ -76,29 +76,43 @@ # May need to do "pip install mako" from mako.template import Template -def fun(dir,rootdir): - print('Processing: '+dir) - filenames = [fname for fname in sorted(os.listdir(dir)) - if fname not in EXCLUDED and os.path.isfile(dir+fname)] - dirnames = [fname for fname in sorted(os.listdir(dir)) - if fname not in EXCLUDED ] + +def fun(dir, rootdir): + print("Processing: " + dir) + filenames = [ + fname + for fname in sorted(os.listdir(dir)) + if fname not in EXCLUDED and os.path.isfile(dir + fname) + ] + dirnames = [fname for fname in sorted(os.listdir(dir)) if fname not in EXCLUDED] dirnames = [fname for fname in dirnames if fname not in filenames] -# header = os.path.basename(dir) - f = open(dir+'/index.html','w') - print(Template(INDEX_TEMPLATE).render(dirnames=dirnames,filenames=filenames, header=dir,ROOTDIR=rootdir,time=time.ctime(os.path.getctime(dir))),file=f) + # header = os.path.basename(dir) + f = open(dir + "/index.html", "w") + print( + Template(INDEX_TEMPLATE).render( + dirnames=dirnames, + filenames=filenames, + header=dir, + ROOTDIR=rootdir, + time=time.ctime(os.path.getctime(dir)), + ), + file=f, + ) f.close() for subdir in dirnames: try: - fun(dir+subdir+"/",rootdir+'../') + fun(dir + subdir + "/", rootdir + "../") except: pass + def main(): parser = argparse.ArgumentParser() parser.add_argument("directory") parser.add_argument("--header") args = parser.parse_args() - fun(args.directory+'/','../') + fun(args.directory + "/", "../") + -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/faq.html b/faq.html index 0fedf20..44407ce 100755 --- a/faq.html +++ b/faq.html @@ -1,115 +1,186 @@ - - - - - - - - - OceanParcels - a Lagrangian Ocean Analysis toolbox - - - - - - - - - - - - - - - - - - - - - - -

- -
-
- - -
-
- -

Frequenty Asked Questions and support for Parcels

- -
-

- Parcels (Probably A Really Computationally Efficient Lagrangian Simulator) is a set of Python classes and methods to create customisable particle tracking simulations using output from Ocean Circulation models. Parcels focusses on tracking of passive water parcels, as well as active particulates such as plankton, plastic and fish. -

- The Parcels code is licensed under an open source MIT license and can be downloaded from https://github.com/OceanParcels/parcels. -

- There is also an extensive documentation of all methods and classes in Parcels. - -


-

Parcels design overview

- - -

- The figure above gives a brief overview of the most important classes and methods in Parcels and how they are related. Classes are in blue, methods are in green. Note that not all classes and methods are shown. - -
-

Tips on constructing FieldSet objects

- - Probably the trickiest bit to get right when starting with Parcels on your own model output is how to construct a FieldSet object. - - The general method to use is FieldSet.from_netcdf(), which requires filenames, variables and dimensions. Each of these is a dictionary, and variables requires at least a U and V, but any other variable can be added too (e.g. temperature, mixedlayerdepth, etc). Note also that filenames can contain wildcards. - - For example, the GlobCurrent data that is shipped with Parcels can be read with: -
fname = 'GlobCurrent_example_data/*.nc'
+  
+    
+    
+    
+    
+
+    OceanParcels - a Lagrangian Ocean Analysis toolbox
+
+    
+    
+
+    
+    
+    
+
+    
+    
+    
+
+    
+    
+
+    
+    
+  
+
+  
+    
+    
+
+    
+ + +
+
+ +

Frequenty Asked Questions and support for Parcels

+ +
+

+ Parcels (Probably A Really + Computationally Efficient Lagrangian + Simulator) is a set of Python classes and methods to create + customisable particle tracking simulations using output from Ocean + Circulation models. Parcels focusses on tracking of passive water + parcels, as well as active particulates such as plankton, + plastic and + fish. +

+

+ The Parcels code is licensed under an open source + MIT license + and can be downloaded from + https://github.com/OceanParcels/parcels. +

+

+ There is also an + extensive documentation of + all methods and classes in Parcels. +

+ +
+

Parcels design overview

+ + +

+ The figure above gives a brief overview of the most important classes and + methods in Parcels and how they are related. Classes are in blue, methods + are in green. Note that not all classes and methods are shown. + +
+

Tips on constructing FieldSet objects

+ + Probably the trickiest bit to get right when starting with Parcels on your + own model output is how to construct a + FieldSet object. The general method + to use is FieldSet.from_netcdf(), + which requires filenames, + variables and + dimensions. Each of these is a + dictionary, and variables requires at + least a U and + V, but any other + variable can be added too (e.g. + temperature, mixedlayerdepth, etc). Note also that + filenames can contain wildcards. For + example, the GlobCurrent data that is shipped with Parcels can be read + with: +
fname = 'GlobCurrent_example_data/*.nc'
 filenames = {'U': fname, 'V': fname}
 variables = {'U': 'eastward_eulerian_current_velocity', 'V': 'northward_eulerian_current_velocity'}
 dimensions = {'lat': 'lat', 'lon': 'lon', 'depth': 'depth', 'time': 'time'}
 fset = FieldSet.from_netcdf(filenames, variables, dimensions)
- Note that dimensions can also be a dictionary-of-dictionaries. For example, if you have wind data on a completely different grid (and without depth dimension), you can do: + Note that dimensions can also be a + dictionary-of-dictionaries. For example, if you have wind data on a + completely different grid (and without + depth dimension), you can do: -
fname = 'GlobCurrent_example_data/*.nc'
+      
fname = 'GlobCurrent_example_data/*.nc'
 wname = 'path_to_your_windfiles'
 filenames = {'U': fname, 'V': fname, 'wind': wname}
 variables = {'U': 'eastward_eulerian_current_velocity', 'V': 'northward_eulerian_current_velocity', 'wind': 'wind'}
@@ -118,171 +189,484 @@ 

Tips on constructing FieldSet objects

dimensions['V'] = {'lat': 'lat', 'lon': 'lon', 'depth': 'depth', 'time': 'time'} dimensions['wind'] = {'lat': 'latw', 'lon': 'lonw', 'time': 'time'} fset = FieldSet.from_netcdf(filenames, variables, dimensions)
- In a similar way, you can add U and V fields that are on different grids (e.g. Arakawa C-grids). Parcels will take care under the hood that the different grids are dealt with properly. - -
-

Writing Parcels Kernels

- - One of the most powerful features of Parcels is the ability to write custom Kernels (see e.g. the Adding-a-custom-behaviour-kernel part of the Tutorial). These Kernels are little snippets of code that get executed by Parcels, giving the ability to add ‘behaviour’ to particles. -

- However, there are some key limitations to the Kernels that everyone who wants to write their own should be aware of: -
    -
  • Every Kernel must be a function with the following (and only those) arguments: (particle, fieldset, time) (note that before Parcels v2.0, Kernels also required a dt argument)
  • - -
  • In order to run successfully in JIT mode, Kernel definitions can only contain the following types of commands:
  • -
      -
    • Basic arithmetical operators (+, -, *, /, **) and assignments (=).
    • - -
    • Basic logical operators (<, ==, !=, >, &, |). Note that you can use a statement like particle.lon != particle.lon to check if particle.lon is NaN (since math.nan != math.nan)
    • - -
    • if and while loops, as well as break statements. Note that for-loops are not supported in JIT mode
    • - -
    • Interpolation of a Field from the FieldSet at a (time, depth, lat, lon) point, using using square brackets notation.
      Note that from version 2.0, the syntax has changed from the old (time, lon, lat, depth) to the new (time, depth, lat, lon) order.
      For example, to interpolate the zonal velocity (U) field at the particle location, use the following statement:
      - -
      value = fieldset.U[time, particle.depth, particle.lat, particle.lon]
      - Also note that it is possible to add the particle as an additional argument to the Field Sampling, so -
      value = fieldset.U[time, depth, lat, lon, particle]
      - or simply -
      value = fieldset.U[particle]
      - Adding the particle to the sampling can dramatically speed up simulations in Scipy-mode on Curvilinear Grids, see als the JIT-vs_Scipy tutorial. + In a similar way, you can add U and + V fields that are on different grids + (e.g. Arakawa C-grids). Parcels will take care under the hood that the + different grids are dealt with properly. + +
      +

      Writing Parcels Kernels

      + + One of the most powerful features of Parcels is the ability to write + custom Kernels (see e.g. the + Adding-a-custom-behaviour-kernel + part of the Tutorial). These Kernels are little snippets of code that get + executed by Parcels, giving the ability to add ‘behaviour’ to particles. +

      + However, there are some key limitations to the Kernels that everyone who + wants to write their own should be aware of: +
        +
      • + Every Kernel must be a function with the following (and only those) + arguments: + (particle, fieldset, time) + (note that before Parcels v2.0, Kernels also required a + dt argument) +
      • - +
      • + In order to run successfully in JIT mode, Kernel definitions can only + contain the following types of commands: +
      • +
          +
        • + Basic arithmetical operators (+, -, + *, + /, + **) and assignments (=). +
        • + +
        • + Basic logical operators (<, + ==, + !=, + >, + &, + |). Note that you can use a + statement like + particle.lon != particle.lon to + check if particle.lon is NaN + (since math.nan != math.nan) +
        • + +
        • + if and + while loops, as well as + break statements. Note that + for-loops are not supported in + JIT mode +
        • + +
        • + Interpolation of a Field from + the FieldSet at a + (time, depth, lat, lon) point, + using using square brackets notation.
          Note that from version 2.0, the syntax has changed from the old + (time, lon, lat, depth) to the new (time, depth, lat, lon) + order.
          + For example, to interpolate the zonal velocity (U) field at the + particle location, use the following statement:
          + +
          value = fieldset.U[time, particle.depth, particle.lat, particle.lon]
          + Also note that it is possible to add the + particle as an additional + argument to the Field Sampling, so +
          value = fieldset.U[time, depth, lat, lon, particle]
          + or simply +
          value = fieldset.U[particle]
          + Adding the particle to the sampling can dramatically speed up + simulations in Scipy-mode on Curvilinear Grids, see als the + JIT-vs_Scipy tutorial. +
        • + +
        • + Functions from the + maths standard library. +
        • +
        • + Functions from the custom + ParcelsRandom library at + parcels.rng. Note that these + have to be used as + ParcelsRandom.random(), + ParcelsRandom.uniform() etc for + the code to compile. +
        • + +
        • + Simple print statements, such + as: +
        • +
            +
          • print("Some print")
          • +
          • print(particle.lon)
          • +
          • + print("particle id: %d" % particle.id) +
          • +
          • + print("lon: %f, lat: %f" % (particle.lon, particle.lat)) +
          • +
          +
        +
      • + Local variables can be used in Kernels, and these variables will be + accessible in all concatenated Kernels. Note that these local + variables are not shared between particles, and also not between time + steps. +
      • -
      • Functions from the maths standard library.
      • -
      • Functions from the custom ParcelsRandom library at parcels.rng. Note that these have to be used as ParcelsRandom.random(), ParcelsRandom.uniform() etc for the code to compile.
      • +
      • + Note that one has to be careful with writing kernels for vector fields + on Curvilinear grids. While Parcels automatically rotates the U and V + field when necessary, this is not the case for for example wind data. + In that case, a custom rotation function will have to be written. +
      • +
      -
    • Simple print statements, such as:
    • + All other functions and methods are not supported yet in Parcels Kernels. + If there is a functionality that can not be programmed with this limited + set of commands, please create an + Issue ticket. + +
      +

      The output format of ParticleFile

      + The information on particle trajectories is stored in + zarr format, when using the + ParticleFile class. The + file/directory contains arrays of at least the + time, + lon, + lat and + z (depth) of each particle + trajectory, plus any extra custom + Variables defined in the + pclass. +

      + Each row in these arrays corresponds to a particle, and each column is an + 'observation'. Note that, when particles are added during runtime, their + first position is still stored in the first column, so that not all + particles in the same column necessarily share the same + time. The time of each observation is + stored in the time array. See + the output tutorial notebook + for more information. + +
      +

      Memory Behaviour and Performance

      + + Parcels, with its Lagrangian simulation approach and the commonly large + amount of particles being simulated, is a computationally-demanding + application. Global scale, fine-resolution hydronamic input data, such as + from CMEMS and NEMO, add to this computational demand as they require + large blocks of memory to be available. Despite the continuous + improvements in performance and efficiency, it is thus possible to + encounter performance- and memory-related problems. The following + paragraphs are meant as a guideline to resolve those related issues. +

      + If you encounter random failures of your particle simulations, the first + step is to simplify the simulation in order to make the error reproducible + for external persons. Therefore, it is advisable to run the simulations + with the simplest type of particles (ideally: common Particle objects) and + possibly just a simple AdvectionRK4 kernel. This rules out any errors + coming from faulty calculations. In case this simplification resolves your + issue, then the failure of your simulation is due to the specific particle + or kernel you attempt to execute, and you may want to refine your + calculations accordingly. +

      + When calculation-related cancellations are ruled out as error source, the + first step in tracking memory-related issues is a reasonability analysis: + can issues that you are experiencing actual stem from memory exhaustion? + Get an overview of the models and fields that you are using, consider if + you're running the application with- or without depth information, and + keep in mind that at each time in the simulation you need at least space + for 3 timestamps of all numerical field values in addition to the actual + particles. Does this really exhaust the memory? As a safeguard check, you + can run a subset of your simulation (e.g the first day or week) locally + while tracking the memory consumption with tools such as the + Task Manager (Windows), the Activity Monitor (MacOS) or the + System Monitor (Linux). If you can, run the simplified simulation + over the whole timespan locally on your computer (even though this may be + slow). +

      + Suppose that you have determined that reason for your simulation not + finishing can be or is memory exhaustion. Then, the next + step is to determine if the cause of memory exhaustion is due to the + particles or due to the fields. Fortunately, this is easy to assess:
        -
      • print("Some print")
      • -
      • print(particle.lon)
      • -
      • print("particle id: %d" % particle.id)
      • -
      • print("lon: %f, lat: %f" % (particle.lon, particle.lat))
      • +
      • + Initialize your ParcileSet with only few particles, e.g. + pset = ParticleSet(fieldset=fieldset, pclass=JITParticle, + lon=np.random.rand(16,1), lat=np.random.rand(16,1)) +
      • +
      • + Do not re-add particles periodically via + repeatdt, e.g. change + pset = ParticleSet(..., lat=np.random.rand(16,1), + repeatdt=timedelta(hours=1)) + to + pset = ParticleSet(..., lat=np.random.rand(16,1)) +
      -
    -
  • Local variables can be used in Kernels, and these variables will be accessible in all concatenated Kernels. Note that these local variables are not shared between particles, and also not between time steps.
  • - -
  • Note that one has to be careful with writing kernels for vector fields on Curvilinear grids. While Parcels automatically rotates the U and V field when necessary, this is not the case for for example wind data. In that case, a custom rotation function will have to be written.
  • -
- - All other functions and methods are not supported yet in Parcels Kernels. If there is a functionality that can not be programmed with this limited set of commands, please create an Issue ticket. - -
-

The output format of ParticleFile

- The information on particle trajectories is stored in zarr format, when using the ParticleFile class. The file/directory contains arrays of at least the time, lon, lat and z (depth) of each particle trajectory, plus any extra custom Variables defined in the pclass. -

- Each row in these arrays corresponds to a particle, and each column is an 'observation'. Note that, when particles are added during runtime, their first position is still stored in the first column, so that not all particles in the same column necessarily share the same time. The time of each observation is stored in the time array. See the output tutorial notebook for more information. - -
-

Memory Behaviour and Performance

- - Parcels, with its Lagrangian simulation approach and the commonly large amount of particles being simulated, is a computationally-demanding application. Global scale, fine-resolution hydronamic input data, such as from CMEMS and NEMO, add to this computational demand as they require large blocks of memory to be available. Despite the continuous improvements in performance and efficiency, it is thus possible to encounter performance- and memory-related problems. The following paragraphs are meant as a guideline to resolve those related issues. -

- If you encounter random failures of your particle simulations, the first step is to simplify the simulation in order to make the error reproducible for external persons. Therefore, it is advisable to run the simulations with the simplest type of particles (ideally: common Particle objects) and possibly just a simple AdvectionRK4 kernel. This rules out any errors coming from faulty calculations. In case this simplification resolves your issue, then the failure of your simulation is due to the specific particle or kernel you attempt to execute, and you may want to refine your calculations accordingly. -

- When calculation-related cancellations are ruled out as error source, the first step in tracking memory-related issues is a reasonability analysis: can issues that you are experiencing actual stem from memory exhaustion? Get an overview of the models and fields that you are using, consider if you're running the application with- or without depth information, and keep in mind that at each time in the simulation you need at least space for 3 timestamps of all numerical field values in addition to the actual particles. Does this really exhaust the memory? As a safeguard check, you can run a subset of your simulation (e.g the first day or week) locally while tracking the memory consumption with tools such as the Task Manager (Windows), the Activity Monitor (MacOS) or the System Monitor (Linux). If you can, run the simplified simulation over the whole timespan locally on your computer (even though this may be slow). -

- Suppose that you have determined that reason for your simulation not finishing can be or is memory exhaustion. Then, the next step is to determine if the cause of memory exhaustion is due to the particles or due to the fields. Fortunately, this is easy to assess: -
    -
  • Initialize your ParcileSet with only few particles, e.g. pset = ParticleSet(fieldset=fieldset, pclass=JITParticle, lon=np.random.rand(16,1), lat=np.random.rand(16,1))
  • -
  • Do not re-add particles periodically via repeatdt, e.g. change pset = ParticleSet(..., lat=np.random.rand(16,1), repeatdt=timedelta(hours=1)) to pset = ParticleSet(..., lat=np.random.rand(16,1))
  • -
- Run your simulation again and observe the memory with the tools mentioned above. If your simulation runs through without problems, it is almost certain your real simulation requires too many particles. In that case, an applicable general advice is to reduce the overall particle density (i.e. creating less particles overall), or manually split your simulation in smaller pieces and reduce the particle count accordingly, if you want to preserve the particle density. Apart from this advice, there is no generic solution at this point for the problem of exhaustive particle sets, so: Get in touch with us at the OceanParcels GitHub page via the issue tracker to find an individual solution. -

- If the reduction of particles indeed does not solve your issue and your simulation still crashes, then this is likely due to the fields consuming more memory than available. Luckily, there are some solutions to this problem. -

- -

Introduce field chunking (i.e. dynamic loading of fields)

-

- So, your situation is as follows: you found out that your Parcels simulation consumes more field data than can fit in memory on any system that you tried. Then, it may be that you are not using field chunking. -

- Field chunking is introduced in Parcels > 2.1.0. It manages the access and data transfer between your hydrodynamic data (i.e. your NetCDF files), the computer memory, and the kernels, in a way so that only the parts of the data ends up in memory which are directly needed by some particles. This technique is commonly referred to as dynamic loading. In Python, with its numeric backend of Numpy and SciPy, the Dask module implements this data access technique, which is called chunking or lazy loading within Dask. Chunking for field data is activated by default in Parcels > 2.1.0. -

- As an example, let us assume we load a field set as such: -
- fieldset = FieldSet.from_netcdf(files, variables, dimensions) -
- then, this is equivalent to -
- fieldset = FieldSet.from_netcdf(files, variables, dimensions, deferred_load=True, field_chunksize='auto') -

- Hence, if you are using an older version of Parcels (<= 2.1.0), or you have accidentally switched of chunking via fieldset = FieldSet.from_netcdf(..., field_chunksize=False), then you may exceed your available memory. Try out the latest version of Parcels and activate field chunking. -

- -

Simulations crash on HPC clusters (via job submission systems such as SGE or SLURM), but locally finish without problems

-

- The behaviour of simulations crashing on cluster systems whereas running as-expected on local machines can occur because -
    -
  • Python itself does not return memory to the operation system
  • -
  • Your (field) data are too large for the memory, but fit comfortably in virtual memory (e.g. swap drive, swap files, page files) =-> job submission systems do not provide access to the swap memory
  • -
  • Your (field) data exceed the default memory capacity granted to users of your cluster -> consult tutorials on SGE qsub (e.g. from MIT and University of Austin / Texas) or SLURM (e.g. from Princeton University or University of Buffalo) to raise the granted memory to your simulation job. In particular, consider submission option parameters such as -l h_vmem=size (SGE qsub) and - --mem=size (SLURM).
  • -
-

- -

Advanced control over chunking and memory - configure Dask

-

- In the case that you have field chunking running, your job submission files are all set correctly, but your fields are still too big, then your fields must be either many (in terms of individual fields), or they are complex (i.e. many diverse, hydrological properties), or they are in dense (i.e. fine-resolution) 4D grids (i.e. using time, depth, latitude and longitude). - In this case, you may need to tune your environment a bit more directly to your application or scenario. -
    -

    -
  1. Tune your chunk size configuration manually: Instead of leaving the chunking up to the black-box that is Dask, you can define your chunks yourself. Say you have simulation data of 30 timesteps on a grid with depth=150, lat=2100, lon=1800 (just an example), then you may wanna define the chunks yourself. + Run your simulation again and observe the memory with the tools mentioned + above. If your simulation runs through without problems, it is almost + certain your real simulation requires too many particles. In that case, an + applicable general advice is to reduce the overall particle density (i.e. + creating less particles overall), or manually split your simulation in + smaller pieces and reduce the particle count accordingly, if you want to + preserve the particle density. Apart from this advice, there is no generic + solution at this point for the problem of exhaustive particle sets, so: + Get in touch with us at + the OceanParcels GitHub page + via the issue tracker to find an individual solution. +

    + If the reduction of particles indeed does not solve your issue and your + simulation still crashes, then this is likely due to the fields consuming + more memory than available. Luckily, there are some solutions to this + problem. +

    + +

    + Introduce field chunking (i.e. dynamic loading of fields) +

    +

    + So, your situation is as follows: you found out that your Parcels + simulation consumes more field data than can fit in memory on any system + that you tried. Then, it may be that you are not using field chunking. +

    + Field chunking is introduced in Parcels > 2.1.0. It manages the access and + data transfer between your hydrodynamic data (i.e. your NetCDF files), the + computer memory, and the kernels, in a way so that only the parts of the + data ends up in memory which are directly needed by some particles. This + technique is commonly referred to as dynamic loading. In Python, + with its numeric backend of Numpy and SciPy, the + Dask module implements this data access + technique, which is called chunking or lazy loading within + Dask. Chunking for field data is activated by default in Parcels > + 2.1.0. +

    + As an example, let us assume we load a field set as such: +
    + fieldset = FieldSet.from_netcdf(files, variables, dimensions) +
    + then, this is equivalent to +
    + fieldset = FieldSet.from_netcdf(files, variables, dimensions, + deferred_load=True, field_chunksize='auto') +

    + Hence, if you are using an older version of Parcels (<= 2.1.0), or you + have accidentally switched of chunking via + fieldset = FieldSet.from_netcdf(..., field_chunksize=False), then you may exceed your available memory. Try out the latest version + of Parcels and activate field chunking. +

    + +

    + Simulations crash on HPC clusters (via job submission systems such as + SGE or SLURM), but locally finish without problems +

    +

    + The behaviour of simulations crashing on cluster systems whereas running + as-expected on local machines can occur because
      -
    • use the parameter field_chunksize of the FieldSet class as a tuple: here, the order is (time steps, depth size, latitude size, longitude size), so e.g. fieldset = FieldSet.from_netcdf(files, variables, dimensions, field_chunksize=(1,8,128,128))
    • -
    • use the parameter field_chunksize of the FieldSet class as a dictionary: here, you define the chunks by name, such that e.g. fieldset = FieldSet.from_netcdf(files, variables, dimensions, field_chunksize={'time_counter': 1, 'depth': 8, 'nav_lat': 128, 'nav_lon': 128})
    • +
    • Python itself does not return memory to the operation system
    • +
    • + Your (field) data are too large for the memory, but fit comfortably in + virtual memory (e.g. swap drive, swap files, page files) =-> + job submission systems do not provide access to the swap memory +
    • +
    • + Your (field) data exceed the default memory capacity granted to users + of your cluster -> consult tutorials on SGE qsub (e.g. from + MIT + and + University of Austin / Texas) or SLURM (e.g. from + Princeton University + or + University of Buffalo) to raise the granted memory to your simulation job. In particular, + consider submission option parameters such as + -l h_vmem=size (SGE qsub) and + --mem=size (SLURM). +
    -
  2. -

    -
  3. Override Dask's chunking procedure: Dask's auto-chunking feature attempts to minimize read-access from the file, hence it will chunk the data so that an individual chunk fits into memory. If you are working with multiple fields (or: multiple cores via MPI), then this assumption doesn't apply to you and, thus, the parameter setting field_chunksize='auto' may not provided you with the expected result. If you want to adapt this behaviour without manually tuning every application and scenario script, then you can configure the auto-chunking function itself.

    - Information on how to adapt the auto-chunking can be found in the Dask documentation. Dask itself can use a term in the Dask configuration file, which defines a new upper limit for a chunked array. Detailed information and guidance on the configuration file can be found in the Dask guidelines. -

    - As a short summary: when having installed Dask, there is a (hidden) user-defined folder in your home directory ${HOME}/.config/dask that has two files: dask.yaml and distributed.yaml. The second one is of less interest for you unless you have distributed file management (if unsure, contact your system administrator). Under normal conditions, Dask will read configuration parameters from dask.yaml. That file could look like this (default after installation): + +

    + Advanced control over chunking and memory - configure Dask +

    -
    +      In the case that you have field chunking running, your job submission
    +      files are all set correctly, but your fields are still too big, then your
    +      fields must be either many (in terms of individual fields), or they
    +      are complex (i.e. many diverse, hydrological properties), or they are in
    +      dense (i.e. fine-resolution) 4D grids (i.e. using time, depth,
    +      latitude and longitude). In this case, you may need to tune your
    +      environment a bit more directly to your application or scenario.
    +      
      +

      +
    1. + Tune your chunk size configuration manually: Instead of leaving + the chunking up to the black-box that is Dask, you can define your + chunks yourself. Say you have simulation data of 30 timesteps on a + grid with depth=150, lat=2100, lon=1800 (just an example), then you + may wanna define the chunks yourself. +
        +
      • + use the parameter field_chunksize of the FieldSet class as + a tuple: here, the order is + (time steps, depth size, latitude size, longitude size), so e.g. + fieldset = FieldSet.from_netcdf(files, variables, dimensions, + field_chunksize=(1,8,128,128)) +
      • +
      • + use the parameter field_chunksize of the FieldSet class as + a dictionary: here, you define the chunks by name, such that e.g. + fieldset = FieldSet.from_netcdf(files, variables, dimensions, + field_chunksize={'time_counter': 1, 'depth': 8, 'nav_lat': 128, + 'nav_lon': 128}) +
      • +
      +
    2. +

      +
    3. + Override Dask's chunking procedure: Dask's auto-chunking + feature attempts to minimize read-access from the file, hence it will + chunk the data so that an individual chunk fits into memory. If you + are working with multiple fields (or: multiple cores via MPI), then + this assumption doesn't apply to you and, thus, the parameter setting + field_chunksize='auto' may not + provided you with the expected result. If you want to adapt this + behaviour without manually tuning every application and scenario + script, then you can configure the auto-chunking function itself. +

      + Information on how to adapt the auto-chunking can be found in the + Dask documentation. Dask itself can use a term in the Dask configuration file, which + defines a new upper limit for a chunked array. Detailed + information and guidance on the configuration file can be found in the + Dask guidelines. +

      + As a short summary: when having installed Dask, there is a (hidden) + user-defined folder in your home directory + ${HOME}/.config/dask that has two files: + dask.yaml and distributed.yaml. The second + one is of less interest for you unless you have distributed file + management (if unsure, contact your system administrator). Under + normal conditions, Dask will read configuration parameters from + dask.yaml. That file could look like this (default + after installation): +

      +
         # temporary-directory: null     # Directory for local disk like /tmp, /scratch, or /local
       
         # array:
         #   svg:
      -  #     size: 120  # pixels
      - You can adapt this so the chunks, for example, are smaller, which improves loading and allows multiple simultaneous fields being accessed without exceeding memory. -
      +  #     size: 120  # pixels
      + You can adapt this so the chunks, for example, are smaller, which + improves loading and allows multiple simultaneous fields being + accessed without exceeding memory. +
         temporary-directory: /tmp
       
         array:
           svg:
             size: 120  # pixels
      -    chunk-size: 128 MiB
      - Another very important change is that we removed the # from the start of the lines, which transforms them from being comments into actual information. Of course, if you have e.g. 4 fields, then you may want to change 128 MiB into 32 MiB. Power-of-two sizes here are also advantageous, although not strictly necessary. -

      -
    4. -
    -
    - - -
- - - -
+ Another very important change is that we removed the + # from the start of the lines, + which transforms them from being comments into actual + information. Of course, if you have e.g. 4 fields, then you may + want to change 128 MiB into 32 MiB. Power-of-two sizes + here are also advantageous, although not strictly necessary. +

+ + +
-

- Copyright © OceanParcels project, 2020
- Based on the Modern Business theme, distributed by Start Bootstrap under an MIT license -

- - - - - - - - - + + + + + + + + + diff --git a/utrechtteam.html b/utrechtteam.html index 026926f..99feed9 100644 --- a/utrechtteam.html +++ b/utrechtteam.html @@ -1,740 +1,1169 @@ - - - - - - - - - + + + + + + + The Lagrangian Ocean Analysis team at Utrecht University - + - - + + - + - - - - - - - -
- -
- -

Tracking how ocean currents transport plastic, plankton and other materials

- -
- -

The Lagrangian Ocean Analysis team at Utrecht University

- The Lagrangian Ocean Analysis team within the Institute for Marine and Atmospheric research at Utrecht University's Department of Physics, uses the Parcels framework to create and analyse simulations of the transport of material (plastics, plankton, fish) by ocean currents and its impact on climate and marine ecosystems. Within the group, we adhere to Open Science principles and aim to create a collaborative and inclusive atmosphere. -

- -
- -

The team (alphabetical)

-
- + + + + + +
+
+ + + +
+
+ +

+ Tracking how ocean currents transport plastic, plankton and other + materials +

+ +
+ +

The Lagrangian Ocean Analysis team at Utrecht University

+ The Lagrangian Ocean Analysis team within the + Institute for Marine and Atmospheric research + at Utrecht University's Department of + Physics, uses the + Parcels framework to create + and analyse simulations of the transport of material (plastics, plankton, + fish) by ocean currents and its impact on climate and marine ecosystems. + Within the group, we adhere to Open Science principles and aim to create a + collaborative and inclusive atmosphere. +

+ +
+ +

The team (alphabetical)

+
-
- -
-

Bas Altena

-
Postdoctoral researcher
-

Bas works on the valorization of marine litter backtracking. -

-
- -
+
+ +
+

Bas Altena

+
+ Postdoctoral researcher +
+

+ Bas works on the valorization of marine litter backtracking. +

+
+ +
-
- -
-

Vesna Bertoncelj

-
PhD researcher
-

Vesna investigates how polluntants spread around coral reefs on Curacao. -

-
- -
+
+ +
+

Vesna Bertoncelj

+
PhD researcher
+

+ Vesna investigates how polluntants spread around coral reefs on + Curacao. +

+
+ +
-
- -
-

Mark Elbertsen

-
MSc student
-

Mark simulates how and where large icebergs melted in the Eocene Southern Ocean. -

-
- -
+
+ +
+

Mark Elbertsen

+
MSc student
+

+ Mark simulates how and where large icebergs melted in the Eocene + Southern Ocean. +

+
+ +
-
- -
-

Jente Heisterborg

-
BSc student
-

Jente simulates how plastics in the Wadden Sea are affected by tides. -

-
- -
+
+ +
+

Jente Heisterborg

+
BSc student
+

+ Jente simulates how plastics in the Wadden Sea are affected by + tides. +

+
+ +
-
- -
-

Laura Gomez Navarro

-
Margarita Salas Fellow
-

Laura investigates how oceanic fine-scales affect the transport of marine plastic. -

-
- -
+
+ +
+

Laura Gomez Navarro

+
+ Margarita Salas Fellow +
+

+ Laura investigates how oceanic fine-scales affect the transport + of marine plastic. +

+
+ +
-
- -
-

Cleo Jongedijk

-
PhD researcher
-

Cleo investigates how plastic litter ends up on beaches. -

-
- -
+
+ +
+

Cleo Jongedijk

+
PhD researcher
+

+ Cleo investigates how plastic litter ends up on beaches. +

+
+ +
-
- -
-

Marc op den Kamp

-
MSc student
-

Marc uses machine learning to predict macroplastic beaching events. -

-
- -
+
+ +
+

Marc op den Kamp

+
MSc student
+

+ Marc uses machine learning to predict macroplastic beaching + events. +

+
+ +
-
- -
-

Clara Koster

-
BSc student
-

Clara computes the probability to find nanoplastics in samples from the South Atlantic. -

-
- -
+
+ +
+

Clara Koster

+
BSc student
+

+ Clara computes the probability to find nanoplastics in samples + from the South Atlantic. +

+
+ +
-
- -
-

Darshika Manral

-
PhD researcher
-

Darshika investigates how plankton interact with nutrients and plastic in the Atlantic Ocean. -

-
- -
+
+ +
+

Darshika Manral

+
PhD researcher
+

+ Darshika investigates how plankton interact with nutrients and + plastic in the Atlantic Ocean. +

+
+ +
-
- -
-

Claudio Pierard

-
PhD researcher
-

Claudio investigates the origin and fate of nanoplastics in our ocean. -

-
- -
+
+ +
+

Claudio Pierard

+
PhD researcher
+

+ Claudio investigates the origin and fate of nanoplastics in our + ocean. +

+
+ +
-
- -
-

Daan Reijnders

-
PhD researcher
-

Daan investigates how to incorporate diffusion into particle simulations in biogeochemical oceanography. -

-
- -
+
+ +
+

Daan Reijnders

+
PhD researcher
+

+ Daan investigates how to incorporate diffusion into particle + simulations in biogeochemical oceanography. +

+
+ +
-
- -
-

Mattia Romero

-
MSc student
-

Mattia investigates how fine-scale ocean processes lead to heterogeneity of floating plastic. -

-
- -
+
+ +
+

Mattia Romero

+
MSc student
+

+ Mattia investigates how fine-scale ocean processes lead to + heterogeneity of floating plastic. +

+
+ +
-
- -
-

Siren Rühs

-
Postdoctoral researcher
-

Siren investigates how the latest generation of ocean models can be used to simulate the dispersion of plastic. -

-
- -
+
+ +
+

Siren Rühs

+
+ Postdoctoral researcher +
+

+ Siren investigates how the latest generation of ocean models can + be used to simulate the dispersion of plastic. +

+
+ +
-
- -
-

Erik van Sebille

-
Professor
-

Erik investigates how ocean currents move 'stuff' around, and leads the OceanParcels project. -

-
- -
+
+ +
+

Erik van Sebille

+
Professor
+

+ Erik investigates how ocean currents move 'stuff' around, and + leads the OceanParcels project. +

+
+ +
-
- -
-

Steffie Ypma

-
Postdoctoral researcher
-

Steffie creates a tool that supports plastic cleanup in the Galapagos. -

-
- -
-
- -
-
- -

Previous team members

-
- +
+ +
+

Steffie Ypma

+
+ Postdoctoral researcher +
+

+ Steffie creates a tool that supports plastic cleanup in the + Galapagos. +

+
+ +
+
+
+
+ +

Previous team members

+
- +
- +
- +
- +
- +
- +
- +
- +
- +
- +
- +
- +
- +
- +
- +
- +
- +
- +
- +
- +
- +
- +
- +
- +
- +
- +
- +
- +
- +
- +
- +
- +
- +
- +
- +
- -
- -
-
-

Current Projects

- -
- + +
+ +
+

Current Projects

+ +
- +
- +
- +
- +
- +
- +
- +
- +
- -
- -
-
-

Past Projects

- -
+ +
+ +
+

Past Projects

+ +
- +
- +
- +
- -
- -
-
-

Social Media

-
-
+ +
+ +
+

Social Media

+
+

- + Youtube channel - +

-
+
-
- -
-
- -
-
- -
-
-
- -
+
+ +
+
+ +
+
+ +
+
+ + + + +
-
- - - - - + + + + From b2f300d07b8332d093ed2b5e9a709af18d55308f Mon Sep 17 00:00:00 2001 From: Vecko Date: Tue, 23 May 2023 14:41:06 +0800 Subject: [PATCH 05/10] Add .git-blame-ignore-revs --- .git-blame-ignore-revs | 1 + 1 file changed, 1 insertion(+) create mode 100644 .git-blame-ignore-revs diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 0000000..2d85f03 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1 @@ +72033c5c10a22296d5fcb64c2d5b1d4a0a0809b9 From b8301be1271aab13b14f6b2f2fdf3bb6364ae7db Mon Sep 17 00:00:00 2001 From: Vecko Date: Thu, 25 May 2023 12:21:03 +0800 Subject: [PATCH 06/10] Beautiful soup read write Read in index.html using beautiful soup, and write out. This automatically fixes some errors like unclosed tags. ```py from bs4 import BeautifulSoup html_file = "index.html" with open(html_file, 'r') as file: html_content = file.read() soup = BeautifulSoup(html_content, 'html.parser') with open(html_file, 'w', encoding="utf-8") as file: file.write(str(soup)) ``` --- index.html | 5572 +++++++++++++++++++++++++--------------------------- 1 file changed, 2626 insertions(+), 2946 deletions(-) diff --git a/index.html b/index.html index d201e8a..ee0a502 100755 --- a/index.html +++ b/index.html @@ -1,34 +1,27 @@ - + - - - - - - - - - Parcels - a Lagrangian Ocean Analysis toolbox - - - - - - - - - - - - - - - - - - + + + + - - - - + - -
- +
-
- -
- -

What is OceanParcels?

-
-
-

+


+

What is OceanParcels?

+
+
+

The OceanParcels project develops Parcels (Probably A Really Computationally Efficient Lagrangian Simulator), a set of Python classes and methods to create customisable particle tracking simulations using output from Ocean Circulation models. Parcels can be used to track passive and active particulates such as water, plankton, plastic and fish.

The code from the OceanParcels project is licensed under an open source MIT license and can be downloaded from github.com/OceanParcels/parcels or installed via anaconda.org/conda-forge/parcels:

- - - - - - - -

+ + + + + + + +

The manuscript detailing this first release of Parcels, version 0.9, has been published in Geoscientific Model Development and can be cited as:

-

- +

The manuscript detailing version 2.0 of Parcels is available at Geoscientific Model Development and can be cited as:

- +
+

The manuscript detailing the performance of Parcels is available at Computers and Geosciences and can be cited as:

-
-
- -
- -
- -

Installing Parcels

+
+
+
+
+

+
+
+

Installing Parcels

The simplest way to install the Parcels code is to use Anaconda and the Parcels Conda-Forge package with the latest release of Parcels. This package will automatically install (almost) all the requirements for a fully functional installation of Parcels. This is the “batteries-included” solution probably suitable for most users.

The steps below are the installation instructions for Linux / macOS and for Windows. If the commands for Linux / macOS and Windows differ, this is indicated with a comment at the end of the line.

-
    -
  1. Install Anaconda's Miniconda following the steps at https://conda.io/docs/user-guide/install/, making sure to select the Python-3 version. If you're on Linux / macOS, it also assumes that you installed Miniconda-3 to your home directory.
  2. -

    -
  3. Start a terminal (Linux / macOS) or the Anaconda prompt (Windows). Activate the root (or base) environment of your Miniconda and create an environment containing Parcels, all its essential dependencies, and the nice-to-have Jupyter, cartopy, and ffmpeg packages: +
      +
    1. Install Anaconda's Miniconda following the steps at https://conda.io/docs/user-guide/install/, making sure to select the Python-3 version. If you're on Linux / macOS, it also assumes that you installed Miniconda-3 to your home directory.
    2. +

      +
    3. Start a terminal (Linux / macOS) or the Anaconda prompt (Windows). Activate the root (or base) environment of your Miniconda and create an environment containing Parcels, all its essential dependencies, and the nice-to-have Jupyter, cartopy, and ffmpeg packages:
      conda activate root  # Linux / macOS
       activate root        # Windows
       
       conda create -n py3_parcels -c conda-forge parcels jupyter cartopy ffmpeg
    4. - - *Note that for some of the examples, pytest needs to be installed with conda install -n py3_parcels pytest. -

      -
    5. Activate the newly created Parcels environment. + +*Note that for some of the examples, pytest needs to be installed with conda install -n py3_parcels pytest. +

      +
    6. Activate the newly created Parcels environment.
      conda activate py3_parcels  # Linux / macOS
       activate py3_parcels        # Windows
    7. -
    8. Get a copy of the Parcels tutorials and examples, as well as the data required to run these: +
    9. Get a copy of the Parcels tutorials and examples, as well as the data required to run these:
      parcels_get_examples parcels_examples
    10. - *Note that if you are on Windows and you get a Fatal error in launcher error, you can instead download the data with -
      curl https://raw.githubusercontent.com/OceanParcels/parcels/master/parcels/scripts/get_examples.py > parcels_get_examples.py
      -python parcels_get_examples.py parcels_examples
      -
    11. Run the simplest of the examples to validate that you have a working Parcels setup: +*Note that if you are on Windows and you get a Fatal error in launcher error, you can instead download the data with +
      curl https://raw.githubusercontent.com/OceanParcels/parcels/master/parcels/scripts/get_examples.py > parcels_get_examples.py
      +python parcels_get_examples.py parcels_examples
      +
    12. Run the simplest of the examples to validate that you have a working Parcels setup:
      cd parcels_examples
       python example_peninsula.py --fieldset 100 100
    13. - *Note that if you are on macOS and get a compilation error, you may need to accept the Apple xcode license (xcode-select --install). If this does not solve the compilation error, you may want to try running export CC=gcc . If the compilation error remains, you may want to check this solution. +*Note that if you are on macOS and get a compilation error, you may need to accept the Apple xcode license (xcode-select --install). If this does not solve the compilation error, you may want to try running export CC=gcc . If the compilation error remains, you may want to check this solution.

      -
    14. Optionally, if you want to run all the examples and tutorials, start Jupyter and open the tutorial notebooks: +
    15. Optionally, if you want to run all the examples and tutorials, start Jupyter and open the tutorial notebooks:
      jupyter notebook
    16. -

      -
    17. The next time you start a terminal and want to work with Parcels, activate the environment with: +

      +
    18. The next time you start a terminal and want to work with Parcels, activate the environment with:
      conda activate py3_parcels  # Linux / macOS
       activate py3_parcels        # Windows
    19. -
    - -

    -

    Installing a non-released version of Parcels

    +
+

+

Installing a non-released version of Parcels

There might be cases where you want to install a version of Parcels that has not been released yet. (Perhaps, if you want to use a bleeding-edge feature which already is included on Github, but not in the conda-forge package.) @@ -268,1500 +250,1382 @@

Installing a non-released version of Parcels

conda remove --force parcels pip install git+https://github.com/OceanParcels/parcels.git@master
- -

-

Installation for developers

+

+

Installation for developers

Parcels depends on a working Python installation, a netCDF installation, a C compiler, and various Python packages. If you prefer to maintain your own Python installation providing all this, git clone the master branch of Parcels and manually install all packages listed under dependencies in the environment files (environment_py3_linux.yml for Linux, environment_py3_osx.yml for OSX and environment_py3_win.yml for Windows), before running python setup.py install --prefix=PREFIX where PREFIX is the path to the directory of your local branch of the parcels code.

-

Installation of Parallel Parcels with MPI

+

Installation of Parallel Parcels with MPI

Parcels uses MPI for parallel execution, but this only works on linux and macOS. To install it, follow the steps below and see here for further documentation -

-
    -
  1. We strongly encourage to create a new environment for Parallel Parcels: +

    +
      +
    1. We strongly encourage to create a new environment for Parallel Parcels:
      conda create -n py3_parcels_mpi -c conda-forge parcels mpi4py mpich scikit-learn jupyter cartopy ffmpeg
       conda activate py3_parcels_mpi
    2. -

      -
    3. If you are on a Mac, you may need to issue +

      +
    4. If you are on a Mac, you may need to issue
      export CONDA_BUILD_SYSROOT=/
       export C_INCLUDE_PATH=$C_INCLUDE_PATH:/Applications/Xcode.app/Contents//Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.14.sdk/usr/include/
       
      Where possibly you may need to change the MacOSX10.14.sdk into the macOS version you have.
    5. -

      -
    6. Now, you can run in Parallel with e.g. +

      +
    7. Now, you can run in Parallel with e.g.
      mpirun -np 2 python parcels/examples/example_stommel.py -p 10
       
      Let us know your feedback!
    8. - -
    - -

    -

    Contributing to Parcels development

    +
+

+

Contributing to Parcels development

OceanParcels is fully open-source community and strongly encourages contributions from users, as also stated in our Contributor Code of Conduct. If you want to help make OceanParcels even better, then please consider contributing to its development. To get started, see this excellent tutorial on the Git workflow and this great general overview of contributing to open source software. -
-

Parcels tutorials

+
+

Parcels tutorials

The best way to get started with Parcels is to explore the Jupyter notebooks below. By clicking on the tutorials you can read them on the Jupyter notebook viewer. Due to some issues with the GitHub API some notebooks are temporarily not found, resulting in a 404 Error. Please find the corresponding notebook file by clicking the GitHub tag in the card. If you want to practice by interacting with them, you can either visit mybinder.org or run them on your own device if you have installed them with parcels_get_examples parcels_examples (see 4. in the installation).

-
-
-
-

- +

+
+ +

The Parcels structure tutorial introduces the main building blocks for a Parcels simulation and is therefore a good starting point for new users

-
- -
-
- -
-
-

- +

+ +
+
+
+
+

+ Simple Parcels tutorial -

-
- -

+ +

+ +

This tutorial guides you through a simple example of what you can do with the building blocks described in the General structure tutorial

-
- -
-
- -
-
-

- +

+ +
+
+
+
+

+ Output tutorial -

-
- -

+ +

+ +

A tutorial on how to start analyzing the Parcels output

-
- -
-
-
- -

-

Tutorials on setting up FieldSets

-

-
- +
+
+

+

Tutorials on setting up FieldSets

+

+ - -

-

Tutorials on creating ParticleSets

-

-
- +
+

+

Tutorials on creating ParticleSets

+

+ - -

-

Tutorials on writing kernels to be executed on each particle

-

-
- +
+

+

Tutorials on writing kernels to be executed on each particle

+

+ - -

-

Other tutorials

-

-
- +
+

+

Other tutorials

+

+ - -
- -

Parcels development status

+
+ + +
+ + + +
+

Parcels development status

The current release of Parcels, version 2.4, is a fully-functional, feature-complete code for offline Lagrangian ocean analysis. See below for a list of features, or keep an eye on the Github Development Timeline page -

-

Major features

-
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
- -
-
- -

- -

Experimental features

-
-
- -
-
- -
-
- -
-
- -
-
- -
-
- -
-
- -
-
- -

- -

Future development goals

-
-
- -
- -
- -
-
- -
- - -
-
-
-
- +

+

Major features

+
+
+ +
+
+
+ +
+
+ +
+
+ +
+
+ +
+
+ +
+
+ +
+
+ +
+
+

+

Experimental features

+
+
+ +
+
+ +
+
+ +
+
+ +
+
+ +
+
+ +
+
+ +
+
+

+

Future development goals

+
+
+ +
+
+ +
+
+
+ +
+
+
+
+ Plot bathymetric data -
-
- -

+ +

+ +

A notebook to plot bathymetry with continental contoured-geometries. The used colour map is 'cmo.ice'. Created by Claudio Pierard.

-
- -
-
-
-
-
- +
+ +
+
+
+
+
+ Animating particles with trails -
-
- -

+ +

+ +

A notebook to animate particles with their vanishing trails over past timesteps in an unsteady doublegyre fluid. The background shows the absolute velocity magnitude. Created by Christian Kehl.

-
- -
-
-
-
-
- +
+ +
+
+
+
+
+ Animating particles with tidal background flow -
-
- -

+ +

+ +

A notebook animating particles with a tidally-influenced flow field, animated with continental contoured-geometries using cartopy. Created by Laura Navarro Gomez.

-
- -
-
- -
- -
-
-
-
- +
+ +
+
+
+
+
+
+
+ Animating data on a rotating sphere -
-
- -

+ +

+ +

A notebook animating data on a rotating Earth using cartopy. Created by Peter Nooteboom.

-
- -
-
- -
- -
-

Frequently Asked Questions and further support

+
+ +
+
+
+
+

Frequently Asked Questions and further support

See the FAQ page for further information on using and developing Parcels. This includes information on the Parcels design overview, tips on construction of FieldSet objects, support for writing custom Kernels and an explanation of the zarr output format.

If you need help with Parcels, try the Discussions page on GitHub. There is also an extensive documentation of all methods and classes in Parcels. -
-

Peer-reviewed articles using Parcels

- -
-
-
-