diff --git a/externals/pio2/CMakeLists.txt b/externals/pio2/CMakeLists.txt index 7ddaf9a15b8..ecf7c2ad9e8 100644 --- a/externals/pio2/CMakeLists.txt +++ b/externals/pio2/CMakeLists.txt @@ -14,18 +14,34 @@ mark_as_advanced(VERSION_MAJOR VERSION_MINOR VERSION_PATCH) #===== Library Options ===== option (PIO_ENABLE_FORTRAN "Enable the Fortran library builds" ON) option (PIO_ENABLE_TIMING "Enable the use of the GPTL timing library" ON) +option (PIO_ENABLE_ASYNC "Enable the use of asychronus IO operations" OFF) +option (PIO_ENABLE_LOGGING "Enable debug logging (large output possible)" OFF) option (PIO_TEST_BIG_ENDIAN "Enable test to see if machine is big endian" ON) option (PIO_USE_MPIIO "Enable support for MPI-IO auto detect" ON) option (PIO_USE_MPISERIAL "Enable mpi-serial support (instead of MPI)" OFF) option (PIO_USE_MALLOC "Use native malloc (instead of bget package)" OFF) option (WITH_PNETCDF "Require the use of PnetCDF" ON) +# Set a variable that appears in the config.h.in file. if(PIO_USE_MALLOC) set(USE_MALLOC 1) else() set(USE_MALLOC 0) endif() +# Set a variable that appears in the config.h.in file. +if(PIO_ENABLE_LOGGING) + set(ENABLE_LOGGING 1) +else() + set(ENABLE_LOGGING 0) +endif() + +if(PIO_USE_MPISERIAL) + set(USE_MPI_SERIAL 1) +else() + set(USE_MPI_SERIAL 0) +endif() + #===== Library Variables ===== set (PIO_FILESYSTEM_HINTS IGNORE CACHE STRING "Filesystem hints (lustre or gpfs)") diff --git a/externals/pio2/ctest/CTestEnvironment-nwsc.cmake b/externals/pio2/ctest/CTestEnvironment-nwsc.cmake index 9289e3d2cc6..bc0f5194e99 100644 --- a/externals/pio2/ctest/CTestEnvironment-nwsc.cmake +++ b/externals/pio2/ctest/CTestEnvironment-nwsc.cmake @@ -10,7 +10,7 @@ # set with existing environment variables: NETCDF, PNETCDF, HDF5, etc. # Define the extra CMake configure options -set (CTEST_CONFIGURE_OPTIONS "-DCMAKE_VERBOSE_MAKEFILE=TRUE") +set (CTEST_CONFIGURE_OPTIONS "-DCMAKE_VERBOSE_MAKEFILE=TRUE -DPIO_ENABLE_ASYNC=TRUE") # If MPISERIAL environment variable is set, then enable MPISERIAL if (DEFINED ENV{MPISERIAL}) diff --git a/externals/pio2/ctest/runcdash-cgd-nag.sh b/externals/pio2/ctest/runcdash-cgd-nag.sh index 44ced786200..f81102d8ff1 100755 --- a/externals/pio2/ctest/runcdash-cgd-nag.sh +++ b/externals/pio2/ctest/runcdash-cgd-nag.sh @@ -8,8 +8,8 @@ else fi module purge -module load compiler/nag/6.0 -module load tool/parallel-netcdf/1.6.1/nag/openmpi +module load compiler/nag/6.1 +module load tool/parallel-netcdf/1.7.0/nag/mvapich2 export CC=mpicc export FC=mpif90 @@ -17,7 +17,7 @@ export PIO_DASHBOARD_SITE="cgd" export PIO_DASHBOARD_ROOT=/scratch/cluster/katec/dashboard export CTEST_SCRIPT_DIRECTORY=${PIO_DASHBOARD_ROOT}/src export PIO_DASHBOARD_SOURCE_DIR=${CTEST_SCRIPT_DIRECTORY} -export PIO_COMPILER_ID=Nag-6.0-gcc-`gcc --version | head -n 1 | cut -d' ' -f3` +export PIO_COMPILER_ID=Nag-6.1-gcc-`gcc --version | head -n 1 | cut -d' ' -f3` if [ ! -d "$PIO_DASHBOARD_ROOT" ]; then mkdir "$PIO_DASHBOARD_ROOT" diff --git a/externals/pio2/ctest/runctest-cgd.sh b/externals/pio2/ctest/runctest-cgd.sh index 02c99d10b4a..34233e20cf0 100755 --- a/externals/pio2/ctest/runctest-cgd.sh +++ b/externals/pio2/ctest/runctest-cgd.sh @@ -29,7 +29,7 @@ echo "\$CTESTCMD -S ${scrdir}/CTestScript-Test.cmake,${model} -V" >> runctest.sh chmod +x runctest.sh # Submit the job to the queue -jobid=`/usr/local/bin/qsub -l nodes=1:ppn=4 runctest.sh` +jobid=`/usr/local/bin/qsub -l nodes=1:ppn=4 runctest.sh -q short` # Wait for the job to complete before exiting while true; do diff --git a/externals/pio2/doc/CMakeLists.txt b/externals/pio2/doc/CMakeLists.txt index 8e0ffa2e587..0294c950a40 100644 --- a/externals/pio2/doc/CMakeLists.txt +++ b/externals/pio2/doc/CMakeLists.txt @@ -5,17 +5,30 @@ #============================================================================== find_package(Doxygen) + +# This supports the build with/witout async code. Once async code is +# fully merged, remove the definition of C_SRC_FILES and its mention +# in Doxyfile.in for simplicity. +if (PIO_ENABLE_ASYNC) + SET(C_SRC_FILES "@CMAKE_CURRENT_SOURCE_DIR@/../src/clib/bget.c @CMAKE_CURRENT_SOURCE_DIR@/../src/clib/pioc.c @CMAKE_CURRENT_SOURCE_DIR@/../src/clib/pioc_sc.c @CMAKE_CURRENT_SOURCE_DIR@/../src/clib/pio_darray_async.c @CMAKE_CURRENT_SOURCE_DIR@/../src/clib/pio_get_nc_async.c @CMAKE_CURRENT_SOURCE_DIR@/../src/clib/pio_internal.h @CMAKE_CURRENT_SOURCE_DIR@/../src/clib/pio_nc4.c @CMAKE_CURRENT_SOURCE_DIR@/../src/clib/pio_put_nc_async.c @CMAKE_CURRENT_SOURCE_DIR@/../src/clib/pio_spmd.c @CMAKE_CURRENT_SOURCE_DIR@/../src/clib/bget.h @CMAKE_CURRENT_SOURCE_DIR@/../src/clib/pioc_support.c @CMAKE_CURRENT_SOURCE_DIR@/../src/clib/pio_lists.c @CMAKE_CURRENT_SOURCE_DIR@/../src/clib/pio_nc_async.c @CMAKE_CURRENT_SOURCE_DIR@/../src/clib/pio_varm.c @CMAKE_CURRENT_SOURCE_DIR@/../src/clib/dtypes.h @CMAKE_CURRENT_SOURCE_DIR@/../src/clib/pio_file.c @CMAKE_CURRENT_SOURCE_DIR@/../src/clib/pio.h @CMAKE_CURRENT_SOURCE_DIR@/../src/clib/pio_msg.c @CMAKE_CURRENT_SOURCE_DIR@/../src/clib/pio_rearrange.c @CMAKE_CURRENT_SOURCE_DIR@/../src/clib/topology.c") +else () + SET(C_SRC_FILES "@CMAKE_CURRENT_SOURCE_DIR@/../src/clib/bget.c @CMAKE_CURRENT_SOURCE_DIR@/../src/clib/pioc.c @CMAKE_CURRENT_SOURCE_DIR@/../src/clib/pioc_sc.c @CMAKE_CURRENT_SOURCE_DIR@/../src/clib/pio_internal.h @CMAKE_CURRENT_SOURCE_DIR@/../src/clib/pio_nc4.c @CMAKE_CURRENT_SOURCE_DIR@/../src/clib/pio_spmd.c @CMAKE_CURRENT_SOURCE_DIR@/../src/clib/bget.h @CMAKE_CURRENT_SOURCE_DIR@/../src/clib/pioc_support.c @CMAKE_CURRENT_SOURCE_DIR@/../src/clib/pio_darray.c @CMAKE_CURRENT_SOURCE_DIR@/../src/clib/pio_get_nc.c @CMAKE_CURRENT_SOURCE_DIR@/../src/clib/pio_lists.c @CMAKE_CURRENT_SOURCE_DIR@/../src/clib/pio_put_nc.c @CMAKE_CURRENT_SOURCE_DIR@/../src/clib/pio_varm.c @CMAKE_CURRENT_SOURCE_DIR@/../src/clib/dtypes.h @CMAKE_CURRENT_SOURCE_DIR@/../src/clib/pio_file.c @CMAKE_CURRENT_SOURCE_DIR@/../src/clib/pio.h @CMAKE_CURRENT_SOURCE_DIR@/../src/clib/pio_msg.c @CMAKE_CURRENT_SOURCE_DIR@/../src/clib/pio_nc.c @CMAKE_CURRENT_SOURCE_DIR@/../src/clib/pio_rearrange.c @CMAKE_CURRENT_SOURCE_DIR@/../src/clib/topology.c") +endif () + if(DOXYGEN_FOUND) - configure_file(${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in - ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile @ONLY) - add_custom_target(doc - COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/customdoxygen.css - ${CMAKE_CURRENT_BINARY_DIR} - COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/DoxygenLayout.xml - ${CMAKE_CURRENT_BINARY_DIR} - COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/doxygen.sty - ${CMAKE_CURRENT_BINARY_DIR} - COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile - WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} - COMMENT "Generating API documentation with Doxygen" VERBATIM) + # Process the Doxyfile using options set during configure. + configure_file(${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in + ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile @ONLY) + + # Copy necessary files. + add_custom_target(doc + COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/customdoxygen.css + ${CMAKE_CURRENT_BINARY_DIR} + COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/DoxygenLayout.xml + ${CMAKE_CURRENT_BINARY_DIR} + COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/doxygen.sty + ${CMAKE_CURRENT_BINARY_DIR} + COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + COMMENT "Generating API documentation with Doxygen" VERBATIM) endif(DOXYGEN_FOUND) diff --git a/externals/pio2/doc/Doxyfile.in b/externals/pio2/doc/Doxyfile.in index 0cafd81717d..c39e69b13d2 100644 --- a/externals/pio2/doc/Doxyfile.in +++ b/externals/pio2/doc/Doxyfile.in @@ -291,7 +291,7 @@ OPTIMIZE_OUTPUT_VHDL = NO # Note that for custom extensions you also need to set FILE_PATTERNS otherwise # the files are not read by doxygen. -EXTENSION_MAPPING = F90=FortranFree +EXTENSION_MAPPING = F90=FortranFree # If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments # according to the Markdown format, which allows for more readable @@ -769,11 +769,15 @@ WARN_LOGFILE = # Note: If this tag is empty the current directory is searched. INPUT = @CMAKE_CURRENT_SOURCE_DIR@/source \ - @CMAKE_CURRENT_SOURCE_DIR@/../src/clib \ @CMAKE_CURRENT_SOURCE_DIR@/../src/flib \ @CMAKE_CURRENT_SOURCE_DIR@/../examples/c \ @CMAKE_CURRENT_SOURCE_DIR@/../examples/f03 \ - @CMAKE_BINARY_DIR@/src/flib + @CMAKE_BINARY_DIR@/src/clib \ + @CMAKE_BINARY_DIR@/src/flib \ + @C_SRC_FILES@ + +# Uncomment this after the async code is fully merged into PIO. +# @CMAKE_CURRENT_SOURCE_DIR@/../src/clib # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses diff --git a/externals/pio2/doc/source/Error.txt b/externals/pio2/doc/source/Error.txt index 774e11039d7..72c0da23e20 100644 --- a/externals/pio2/doc/source/Error.txt +++ b/externals/pio2/doc/source/Error.txt @@ -1,19 +1,19 @@ /****************************************************************************** * + * * - * - * Copyright (C) 2009 + * Copyright (C) 2009 * * Permission to use, copy, modify, and distribute this software and its - * documentation under the terms of the GNU General Public License is hereby - * granted. No representations are made about the suitability of this software + * documentation under the terms of the GNU General Public License is hereby + * granted. No representations are made about the suitability of this software * for any purpose. It is provided "as is" without express or implied warranty. * See the GNU General Public License for more details. * * Documents produced by Doxygen are derivative works derived from the * input used in their production; they are not affected by this license. * - */ /*! + */ /*! \page error Error Handling By default, PIO handles errors internally by printing a string @@ -21,9 +21,6 @@ describing the error and then calling mpi_abort. Application developers can change this behaivior with a call to \ref PIO_seterrorhandling -For example, if a developer wanted -to see if an input netcdf file contained the variable 'U' they might do the following: - \verbinclude errorhandle \copydoc PIO_error_method diff --git a/externals/pio2/doc/source/Examples.txt b/externals/pio2/doc/source/Examples.txt index 85da90cddfd..e18d2926c7c 100644 --- a/externals/pio2/doc/source/Examples.txt +++ b/externals/pio2/doc/source/Examples.txt @@ -1,12 +1,12 @@ /****************************************************************************** * + * * - * - * Copyright (C) 2009 + * Copyright (C) 2009 * * Permission to use, copy, modify, and distribute this software and its - * documentation under the terms of the GNU General Public License is hereby - * granted. No representations are made about the suitability of this software + * documentation under the terms of the GNU General Public License is hereby + * granted. No representations are made about the suitability of this software * for any purpose. It is provided "as is" without express or implied warranty. * See the GNU General Public License for more details. * @@ -41,11 +41,13 @@ The Fortran examples are in the examples/f03 subdirectory. - examplePio.f90 A simple example showing a write, then read, of a 1D variable. - PIO has been implemented in several geophysical component models, including the -Community Atmosphere Model (CAM), the Community Land Model (CLM), the Parallel Ocean Program -(POP), the Community Ice CodE (CICE), and coupler for used by CCSM4.0 (CPL7). We also provide -several simpler example code as well as a test code that is suitable for regression testing and -benchmarking. +### Other Examples + + PIO has been implemented in several geophysical component models, including the +Community Atmosphere Model (CAM), the Community Land Model (CLM), the Parallel Ocean Program +(POP), the Community Ice CodE (CICE), and coupler for used by CCSM4.0 (CPL7). We also provide +several simpler example code as well as a test code that is suitable for regression testing and +benchmarking. - \subpage CAMexample - \subpage testpio_example diff --git a/externals/pio2/doc/source/Installing.txt b/externals/pio2/doc/source/Installing.txt index cf4cec0101e..6f281152248 100644 --- a/externals/pio2/doc/source/Installing.txt +++ b/externals/pio2/doc/source/Installing.txt @@ -1,12 +1,12 @@ /****************************************************************************** * - * + * * * Copyright (C) 2013 * * Permission to use, copy, modify, and distribute this software and its - * documentation under the terms of the GNU General Public License is hereby - * granted. No representations are made about the suitability of this software + * documentation under the terms of the GNU General Public License is hereby + * granted. No representations are made about the suitability of this software * for any purpose. It is provided "as is" without express or implied warranty. * See the GNU General Public License for more details. * @@ -20,9 +20,9 @@ The PIO code is currently stored on github at setenv DAV_CORES 4 + > execca ctest + +## PIO2 Performance Test + +To run the performance tests, you will need to add two files to the **tests/performance** subdirectory of the PIO build directory. First, you will need a decomp file. You can download one from our google code page here: +https://svn-ccsm-piodecomps.cgd.ucar.edu/trunk/ . +You can use any of these files, and save them to your home or base work directory. Secondly, you will need to add a namelist file, named "pioperf.nl". Save this file in the directory with your **pioperf** executable (this is found in the **tests/performance** subdirectory of the PIO build directory). + + +The contents of the namelist file should look like: + + &pioperf + + decompfile = "/u/home/user/piodecomp30tasks01dims06.dat" + + pio_typenames = 'pnetcdf' + + niotasks = 30 + + rearrangers = 1 + + nvars = 2 + + / + +Here, the second line ("decompfile") points to the path for your decomp file (wherever you saved it). For the rest of the lines, each item added to the list adds another test to be run. For instance, to test all of the types of supported IO, your pio_typenames would look like: + + pio_typenames = 'pnetcdf','netcdf','netcdf4p','netcdf4c' + +HDF5 is netcdf4p, and Parallel-Netcdf is pnetcdf. + +To test with different numbers of IO tasks, you could do: + + niotasks = 30,15,5 + +(These tasks are the subset of the run tasks that are designated IO tasks) + +To test with both of the rearranger algorithms: + + rearrangers = 1,2 + +(Each rearranger is a different algorithm for converting from data in memory to data in a file on disk. The first one, BOX, is the older method from PIO1, the second, SUBSET, is a newer method that seems to be more efficient in large numbers of tasks) + +To test with different numbers of variables: + + nvars = 8,5,3,2 + +(The more variables you use, the higher data throughput goes, usually) + +To run, submit a job with 'pioperf' as the executable, and at least as many tasks as you have specified in the decomposition file. On yellowstone, a submit script could look like: + + #!/bin/tcsh + + #BSUB -P P00000000 # project code + #BSUB -W 00:10 # wall-clock time (hrs:mins) + #BSUB -n 30 # number of tasks in job + #BSUB -R "span[ptile=16]" # run 16 MPI tasks per node + #BSUB -J pio_perftest # job name + #BSUB -o pio_perftest.%J.out # output file name in which %J is replaced by the job ID + #BSUB -e pio_perftest.%J.err # error file name in which %J is replaced by the job ID + #BSUB -q small # queue + + #run the executable + mpirun.lsf /glade/p/work/katec/pio_work/pio_build/tests/performance/pioperf + +The result(s) will look like a line in the output file such as: +~~~~~~~~~~~~~~ +RESULT: write BOX 4 30 2 16.9905924688 +~~~~~~~~~~~~~~ + +You can decode this as: +1. Read/write describes the io operation performed +2. BOX/SUBSET is the algorithm for the rearranger (as described above) +3. 4 [1-4] is the io library used for the operation. The options here are [1] Parallel-netcdf [2] NetCDF3 [3] NetCDF4-Compressed [4] NetCDF4-Parallel +4. 30 [any number] is the number of io-specific tasks used in the operation. Must be less than the number of MPI tasks used in the test. +5. 2 [any number] is the number of variables read or written during the operation +6. 16.9905924688 [any number] is the Data Rate of the operation in MB/s. This is the important value for determining performance of the system. The higher this numbre is, the better the PIO2 library is performing for the given operation. + +_Last updated: 05-17-2016_ +*/ diff --git a/externals/pio2/doc/source/base.txt b/externals/pio2/doc/source/base.txt index 41c5b21089d..2108786f13c 100644 --- a/externals/pio2/doc/source/base.txt +++ b/externals/pio2/doc/source/base.txt @@ -1,20 +1,20 @@ /****************************************************************************** * + * * - * - * Copyright (C) 2009 + * Copyright (C) 2009 * * Permission to use, copy, modify, and distribute this software and its - * documentation under the terms of the GNU General Public License is hereby - * granted. No representations are made about the suitability of this software + * documentation under the terms of the GNU General Public License is hereby + * granted. No representations are made about the suitability of this software * for any purpose. It is provided "as is" without express or implied warranty. * See the GNU General Public License for more details. * * Documents produced by Doxygen are derivative works derived from the * input used in their production; they are not affected by this license. * - */ -/*! + */ +/*! \mainpage Parallel I/O library (PIO) @@ -35,13 +35,15 @@ PIO2 represents a significant rewrite of the PIO library and includes a C API as well as the original F90 API. A new decomposition strategy has been introduced which gives the user more ability to tune io communications. -This user's guide provides information about the PIO library and examples on how it can be used. +This user's guide provides information about the PIO library and examples on how it can be used. Please review the ChangeLog that is included with the distribution for up-to-date release information. - \ref intro - \ref install + - \ref mach_walkthrough - \ref decomp - \ref error + - \ref test - \ref examp - \ref faq - \ref api diff --git a/externals/pio2/doc/source/mach_walkthrough.txt b/externals/pio2/doc/source/mach_walkthrough.txt index 7741c44e8e4..a208e132956 100644 --- a/externals/pio2/doc/source/mach_walkthrough.txt +++ b/externals/pio2/doc/source/mach_walkthrough.txt @@ -44,7 +44,7 @@ Modules required for installation depend on your prefered compiler. Issue the co %> module load git/2.3.0
%> module load cmake/3.0.2
%> module load netcdf-mpi/4.3.3.1
- %> module load pnetcdf/1.6.0
+ %> module load pnetcdf/1.6.1
+ GNU @@ -54,7 +54,7 @@ Modules required for installation depend on your prefered compiler. Issue the co %> module load git/2.3.0
%> module load cmake/3.0.2
%> module load netcdf-mpi/4.3.3.1
- %> module load pnetcdf/1.6.0
+ %> module load pnetcdf/1.6.1
+ PGI @@ -64,7 +64,7 @@ Modules required for installation depend on your prefered compiler. Issue the co %> module load git/2.3.0
%> module load cmake/3.0.2
%> module load netcdf-mpi/4.3.3.1
- %> module load pnetcdf/1.6.0
+ %> module load pnetcdf/1.6.1
  • Environment Variables @@ -103,7 +103,7 @@ Modules required for installation depend on your prefered compiler. Issue the co %> module load cmake/3.0.0
    %> module load cray-hdf5-parallel/1.8.14
    %> module load cray-netcdf-hdf5parallel/4.3.3.1
    - %> module load cray-parallel-netcdf/1.6.0
    + %> module load cray-parallel-netcdf/1.6.1
    + GNU @@ -117,7 +117,7 @@ Modules required for installation depend on your prefered compiler. Issue the co %> module load cmake/3.0.0
    %> module load cray-hdf5-parallel/1.8.14
    %> module load cray-netcdf-hdf5parallel/4.3.3.1
    - %> module load cray-parallel-netcdf/1.6.0
    + %> module load cray-parallel-netcdf/1.6.1
    + Cray @@ -132,7 +132,7 @@ Modules required for installation depend on your prefered compiler. Issue the co %> module load cmake/3.0.0
    %> module load cray-hdf5-parallel/1.8.14
    %> module load cray-netcdf-hdf5parallel/4.3.3.1
    - %> module load cray-parallel-netcdf/1.6.0
    + %> module load cray-parallel-netcdf/1.6.1
  • Environment Variables @@ -178,7 +178,7 @@ And then set the following environment variables to add in the rest of the libra %> setenv LIBZ /soft/libraries/alcf/current/xl/ZLIB
    %> setenv HDF5 /soft/libraries/hdf5/1.8.14/cnk-xl/V1R2M2-20150213
    %> setenv NETCDF /soft/libraries/netcdf/4.3.3-f4.4.1/cnk-xl/V1R2M2-20150213
    - %> setenv PNETCDF /soft/libraries/pnetcdf/1.6.0/cnk-xl/V1R2M2-20150213
    + %> setenv PNETCDF /soft/libraries/pnetcdf/1.6.1/cnk-xl/V1R2M2-20150213
    %> setenv CC /soft/compilers/wrappers/xl/mpixlc_r
    %> setenv FC /soft/compilers/wrappers/xl/mpixlf90_r
    @@ -209,7 +209,7 @@ Modules required for installation depend on your prefered compiler. Issue the co %> module load cmake
    %> module load cray-hdf5-parallel/1.8.14
    %> module load cray-netcdf-hdf5parallel/4.3.3.1
    - %> module load cray-parallel-netcdf/1.6.0
    + %> module load cray-parallel-netcdf/1.6.1
    + PGI @@ -219,7 +219,7 @@ Modules required for installation depend on your prefered compiler. Issue the co %> module load cmake
    %> module load cray-hdf5-parallel/1.8.14
    %> module load cray-netcdf-hdf5parallel/4.3.3.1
    - %> module load cray-parallel-netcdf/1.6.0
    + %> module load cray-parallel-netcdf/1.6.1
  • Environment Variables @@ -477,5 +477,5 @@ variables to these tests will increase the time significantly. -_Last updated: 11-18-2015_ +_Last updated: 05-16-2016_ */ diff --git a/externals/pio2/examples/c/example2.c b/externals/pio2/examples/c/example2.c index f4b75d075b7..d86af67774c 100644 --- a/externals/pio2/examples/c/example2.c +++ b/externals/pio2/examples/c/example2.c @@ -48,8 +48,8 @@ * responsibilty for writing and reading them will be spread between * all the processors used to run this example. */ /**@{*/ -#define X_DIM_LEN 400 -#define Y_DIM_LEN 400 +#define X_DIM_LEN 20 +#define Y_DIM_LEN 30 /**@}*/ /** The number of timesteps of data to write. */ diff --git a/externals/pio2/examples/c/valsupp_example1.supp b/externals/pio2/examples/c/valsupp_example1.supp new file mode 100644 index 00000000000..63f3e073836 --- /dev/null +++ b/externals/pio2/examples/c/valsupp_example1.supp @@ -0,0 +1,15 @@ +{ + cond_jump_1 + Memcheck:Cond + fun:MPIC_Waitall + fun:MPIR_Alltoallw_intra + fun:MPIR_Alltoallw + fun:MPIR_Alltoallw_impl + fun:PMPI_Alltoallw + fun:pio_swapm + fun:rearrange_comp2io + fun:PIOc_write_darray_multi + fun:flush_buffer + fun:PIOc_sync + fun:main +} \ No newline at end of file diff --git a/externals/pio2/examples/f03/examplePio.f90 b/externals/pio2/examples/f03/examplePio.f90 index 555998aed43..d2baddf2096 100644 --- a/externals/pio2/examples/f03/examplePio.f90 +++ b/externals/pio2/examples/f03/examplePio.f90 @@ -10,7 +10,7 @@ module pioExample use pio, only : PIO_nowrite, PIO_openfile implicit none - save + private include 'mpif.h' @@ -20,7 +20,7 @@ module pioExample integer, parameter :: LEN = 16 !> @brief Value used for array that will be written to netcdf file. - integer, parameter :: VAL = 42 + integer, parameter :: VAL = 42 !> @brief Error code if anything goes wrong. integer, parameter :: ERR_CODE = 99 @@ -40,7 +40,7 @@ module pioExample integer :: niotasks !> @brief Stride in the mpi rank between io tasks. - integer :: stride + integer :: stride !> @brief Number of aggregator. integer :: numAggregator @@ -181,7 +181,7 @@ subroutine init(this) this%pioIoSystem, & ! iosystem base=this%optBase) ! base (optional argument) - ! + ! ! set up some data that we will write to a netcdf file ! @@ -353,7 +353,7 @@ end module pioExample !! - read the sample data with @ref PIO_read_darray. !! !! - close the netCDF file with @ref PIO_closefile. -!! +!! !! - clean up local memory, ParallelIO library resources with @ref !! PIO_freedecomp and @ref PIO_finalize, and MPI library resources. !! diff --git a/externals/pio2/src/clib/CMakeLists.txt b/externals/pio2/src/clib/CMakeLists.txt index bb08376a3c3..8c01336969c 100644 --- a/externals/pio2/src/clib/CMakeLists.txt +++ b/externals/pio2/src/clib/CMakeLists.txt @@ -14,15 +14,21 @@ set (PIO_C_SRCS topology.c pioc_sc.c pio_spmd.c pio_rearrange.c - pio_darray.c + pio_nc4.c bget.c) -set (PIO_GENNC_SRCS ${CMAKE_CURRENT_BINARY_DIR}/pio_nc.c - ${CMAKE_CURRENT_BINARY_DIR}/pio_nc4.c - ${CMAKE_CURRENT_BINARY_DIR}/pio_put_nc.c - ${CMAKE_CURRENT_BINARY_DIR}/pio_get_nc.c) +set (PIO_GENNC_SRCS ${CMAKE_CURRENT_BINARY_DIR}/pio_put_nc.c + ${CMAKE_CURRENT_BINARY_DIR}/pio_get_nc.c + ${CMAKE_CURRENT_BINARY_DIR}/pio_nc.c) -add_library (pioc ${PIO_C_SRCS} ${PIO_GENNC_SRCS}) +if (PIO_ENABLE_ASYNC) + set (PIO_ADDL_SRCS pio_nc_async.c pio_put_nc_async.c pio_get_nc_async.c + pio_msg.c pio_varm.c pio_darray_async.c) +else () + set (PIO_ADDL_SRCS pio_darray.c ${PIO_GENNC_SRCS}) +endif () + +add_library (pioc ${PIO_C_SRCS} ${PIO_ADDL_SRCS}) # set up include-directories include_directories( @@ -67,7 +73,7 @@ install (FILES ${CMAKE_CURRENT_SOURCE_DIR}/pio.h DESTINATION include) #============================================================================== # DEFINE THE DEPENDENCIES #============================================================================== - + #===== MPI ===== if (PIO_USE_MPISERIAL) find_package (MPISERIAL COMPONENTS C REQUIRED) @@ -90,7 +96,7 @@ if (PIO_ENABLE_TIMING) find_package (GPTL COMPONENTS C QUIET) if (GPTL_C_FOUND) message (STATUS "Found GPTL C: ${GPTL_C_LIBRARIES}") - target_include_directories (pioc + target_include_directories (pioc PUBLIC ${GPTL_C_INCLUDE_DIRS}) target_link_libraries (pioc PUBLIC ${GPTL_C_LIBRARIES}) @@ -106,18 +112,18 @@ endif () #===== NetCDF-C ===== find_package (NetCDF "4.3.3" COMPONENTS C) if (NetCDF_C_FOUND) - target_include_directories (pioc + target_include_directories (pioc PUBLIC ${NetCDF_C_INCLUDE_DIRS}) - target_compile_definitions (pioc + target_compile_definitions (pioc PUBLIC _NETCDF) target_link_libraries (pioc PUBLIC ${NetCDF_C_LIBRARIES}) if (${NetCDF_C_HAS_PARALLEL}) - target_compile_definitions (pioc + target_compile_definitions (pioc PUBLIC _NETCDF4) endif () else () - target_compile_definitions (pioc + target_compile_definitions (pioc PUBLIC _NONETCDF) endif () @@ -126,9 +132,9 @@ if (WITH_PNETCDF) find_package (PnetCDF "1.6" COMPONENTS C REQUIRED) endif () if (PnetCDF_C_FOUND) - target_include_directories (pioc + target_include_directories (pioc PUBLIC ${PnetCDF_C_INCLUDE_DIRS}) - target_compile_definitions (pioc + target_compile_definitions (pioc PUBLIC _PNETCDF) target_link_libraries (pioc PUBLIC ${PnetCDF_C_LIBRARIES}) @@ -140,9 +146,9 @@ if (PnetCDF_C_FOUND) target_compile_definitions(pioc PUBLIC USE_PNETCDF_VARN PUBLIC USE_PNETCDF_VARN_ON_READ) - endif() + endif() else () - target_compile_definitions (pioc + target_compile_definitions (pioc PUBLIC _NOPNETCDF) endif () @@ -156,7 +162,7 @@ target_compile_options (pioc target_compile_definitions (pioc PUBLIC ${PIO_C_EXTRA_COMPILE_DEFINITIONS}) if (PIO_C_EXTRA_LINK_FLAGS) - set_target_properties(pioc PROPERTIES + set_target_properties(pioc PROPERTIES LINK_FLAGS ${PIO_C_EXTRA_LINK_FLAGS}) endif () @@ -172,7 +178,7 @@ if (PnetCDF_C_FOUND AND NetCDF_C_FOUND AND PIO_GENERATE_SOURCES_FROM_TEMPLATES) pio_c_put_template.c COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/pio_c_get_template.c pio_c_get_template.c - COMMAND ${PERL_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/ncputgetparser.pl + COMMAND ${PERL_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/ncputgetparser.pl ${NetCDF_C_INCLUDE_DIR} ${PnetCDF_C_INCLUDE_DIR} COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/pio_c_template.c pio_c_template.c @@ -186,7 +192,7 @@ if (PnetCDF_C_FOUND AND NetCDF_C_FOUND AND PIO_GENERATE_SOURCES_FROM_TEMPLATES) else () message (FATAL_ERROR "Need Perl to create PIO C source files from templates") endif () - + # If both NetCDF and PnetCDF are NOT found, then just copy existing source files else () add_custom_command (OUTPUT ${PIO_GENNC_SRCS} @@ -196,12 +202,9 @@ else () pio_get_nc.c COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/pio_nc.c pio_nc.c - COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/pio_nc4.c - pio_nc4.c DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/pio_put_nc.c ${CMAKE_CURRENT_SOURCE_DIR}/pio_get_nc.c - ${CMAKE_CURRENT_SOURCE_DIR}/pio_nc.c - ${CMAKE_CURRENT_SOURCE_DIR}/pio_nc4.c) + ${CMAKE_CURRENT_SOURCE_DIR}/pio_nc.c) endif () diff --git a/externals/pio2/src/clib/bget.c b/externals/pio2/src/clib/bget.c index a7f4744a3e1..020fb724b76 100644 --- a/externals/pio2/src/clib/bget.c +++ b/externals/pio2/src/clib/bget.c @@ -397,7 +397,6 @@ BGET CONFIGURATION ================== */ -//#define PIO_USE_MALLOC 1 #ifdef PIO_USE_MALLOC #include #endif @@ -419,7 +418,7 @@ #define BufValid 1 /* Define this symbol to enable the bpoolv() function for validating - a buffer pool. */ + a buffer pool. */ #define DumpData 1 /* Define this symbol to enable the bufdump() function which allows @@ -439,7 +438,7 @@ pointers into released buffers. */ //#define BestFit 1 -#undef BestFit +#undef BestFit /* Use a best fit algorithm when searching for space for an allocation request. This uses @@ -577,7 +576,7 @@ void bpoolrelease() numpblk = 0; /* Number of pool blocks */ numpget = 0; numprel = 0; /* Number of block gets and rels */ - numdget = 0; + numdget = 0; numdrel = 0; /* Number of direct gets and rels */ #endif /* BECtl */ #endif /* BufStats */ @@ -587,7 +586,7 @@ void bpoolrelease() compfcn = NULL; acqfcn = NULL; relfcn = NULL; - exp_incr = 0; + exp_incr = 0; pool_len = 0; #endif @@ -618,7 +617,7 @@ void *bget(requested_size) buf = malloc(requested_size); // printf("bget allocate %ld %x\n",requested_size,buf); return(buf); -#endif +#endif if(size<=0) @@ -773,7 +772,7 @@ void *bget(requested_size) /*only let this happen once */ printf("%s %d memory request exceeds block size %d %d %x\n",__FILE__,__LINE__,size,exp_incr,buf); exp_incr = size+sizeof(struct bhead); - + return buf; } @@ -881,7 +880,7 @@ void brel(buf) // printf("bget free %d %x\n",__LINE__,buf); free(buf); return; -#endif +#endif if(buf==NULL) return; /* allow for null buffer */ diff --git a/externals/pio2/src/clib/config.h.in b/externals/pio2/src/clib/config.h.in index ded06e0aa42..1722872c305 100644 --- a/externals/pio2/src/clib/config.h.in +++ b/externals/pio2/src/clib/config.h.in @@ -1,4 +1,4 @@ -/** @file +/** @file * * This is the template for the config.h file, which is created at * build-time by cmake. @@ -19,4 +19,7 @@ * will use the included bget() package for memory management. */ #define PIO_USE_MALLOC @USE_MALLOC@ +/** Set to non-zero to turn on logging. Output may be large. */ +#define PIO_ENABLE_LOGGING @ENABLE_LOGGING@ + #endif /* _PIO_CONFIG_ */ diff --git a/externals/pio2/src/clib/pio.h b/externals/pio2/src/clib/pio.h index 5b8446bf21e..07f4b8b20c5 100644 --- a/externals/pio2/src/clib/pio.h +++ b/externals/pio2/src/clib/pio.h @@ -1,11 +1,8 @@ /** * @file + * Public headers for the PIO C interface. * @author Jim Edwards * @date 2014 - * @brief Public headers for the PIO C interface. - * - * - * * * @see http://code.google.com/p/parallelio/ */ @@ -43,209 +40,292 @@ /** The maximum number of variables allowed in a netCDF file. */ #define PIO_MAX_VARS NC_MAX_VARS - /** - * @brief Variable description structure - * - * The variable record is the index into the unlimited dimension in the netcdf file - * typically this is the time dimension. - * ndims is the number of dimensions on the file for this variable - * request is the id of each outstanding pnetcdf request for this variable - * nreqs is the number of outstanding pnetcdf requests for this variable - * fillbuf is a memory buffer to hold fill values for this variable (write only) - * iobuf is a memory buffer to hold (write only) -*/ + * Variable description structure. + */ typedef struct var_desc_t { - int record; - int ndims; + /** The unlimited dimension in the netCDF file (typically the time + * dimension). -1 if there is no unlimited dimension. */ + int record; + + /** Number of dimensions for this variable. */ + int ndims; + + /** ID of each outstanding pnetcdf request for this variable. */ + int *request; - int *request; // used for pnetcdf iput calls - int nreqs; - void *fillbuf; - void *iobuf; + /** Number of requests bending with pnetcdf. */ + int nreqs; + /** Buffer that contains the fill value for this variable. */ + void *fillbuf; + + /** ??? */ + void *iobuf; } var_desc_t; /** - * @brief io region structure + * IO region structure. * * Each IO region is a unit of data which can be described using start and count - * arrays. Each IO task may in general have multiple io regions per variable. The + * arrays. Each IO task may in general have multiple io regions per variable. The * box rearranger will have at most one io region per variable. * -*/ + */ typedef struct io_region { - int loffset; - PIO_Offset *start; - PIO_Offset *count; - struct io_region *next; + int loffset; + PIO_Offset *start; + PIO_Offset *count; + struct io_region *next; } io_region; /** - * @brief io descriptor structure + * IO descriptor structure. * * This structure defines the mapping for a given variable between * compute and IO decomposition. - * -*/ + */ typedef struct io_desc_t { - int ioid; - int async_id; - int nrecvs; - int ndof; - int ndims; - int num_aiotasks; - int rearranger; - int maxregions; - bool needsfill; // Does this decomp leave holes in the field (true) or write everywhere (false) - int maxbytes; // maximum number of bytes of this iodesc before flushing - MPI_Datatype basetype; - PIO_Offset llen; - int maxiobuflen; - PIO_Offset *gsize; - - int *rfrom; - int *rcount; - int *scount; - PIO_Offset *sindex; - PIO_Offset *rindex; - - MPI_Datatype *rtype; - MPI_Datatype *stype; - int num_stypes; - int holegridsize; - int maxfillregions; - io_region *firstregion; - io_region *fillregion; - - - bool handshake; - bool isend; - int max_requests; + /** The ID of this io_desc_t. */ + int ioid; + int async_id; + int nrecvs; + int ndof; + int ndims; + int num_aiotasks; + int rearranger; + int maxregions; + bool needsfill; // Does this decomp leave holes in the field (true) or write everywhere (false) + + /** The maximum number of bytes of this iodesc before flushing. */ + int maxbytes; + MPI_Datatype basetype; + PIO_Offset llen; + int maxiobuflen; + PIO_Offset *gsize; + + int *rfrom; + int *rcount; + int *scount; + PIO_Offset *sindex; + PIO_Offset *rindex; + + MPI_Datatype *rtype; + MPI_Datatype *stype; + int num_stypes; + int holegridsize; + int maxfillregions; + io_region *firstregion; + io_region *fillregion; + + bool handshake; + bool isend; + int max_requests; - MPI_Comm subset_comm; - struct io_desc_t *next; + MPI_Comm subset_comm; + + /** Pointer to the next io_desc_t in the list. */ + struct io_desc_t *next; } io_desc_t; /** - * @brief io system descriptor structure + * IO system descriptor structure. * - * This structure contains the general IO subsystem data - * and MPI structure - * -*/ + * This structure contains the general IO subsystem data and MPI + * structure + */ typedef struct iosystem_desc_t { - int iosysid; - MPI_Comm union_comm; - MPI_Comm io_comm; - MPI_Comm comp_comm; - MPI_Comm intercomm; - MPI_Comm my_comm; - - /** This MPI group contains the processors involved in - * computation. It is created in PIOc_Init_Intracomm(), and freed my - * PIO_finalize(). */ - MPI_Group compgroup; + /** The ID of this iosystem_desc_t. This will be obtained by + * calling PIOc_Init_Intercomm() or PIOc_Init_Intracomm(). */ + int iosysid; + + /** This is an MPI intra communicator that includes all the tasks in + * both the IO and the computation communicators. */ + MPI_Comm union_comm; + + /** This is an MPI intra communicator that includes all the tasks + * involved in IO. */ + MPI_Comm io_comm; + + /** This is an MPI intra communicator that includes all the tasks + * involved in computation. */ + MPI_Comm comp_comm; + + /** This is an MPI inter communicator between IO communicator and + * computation communicator. */ + MPI_Comm intercomm; + + /** This is a copy (but not an MPI copy) of either the comp (for + * non-async) or the union (for async) communicator. */ + MPI_Comm my_comm; + + /** This MPI group contains the processors involved in + * computation. */ + MPI_Group compgroup; - /** This MPI group contains the processors involved in I/O. It is - * created in PIOc_Init_Intracomm(), and freed my PIOc_finalize(). */ - MPI_Group iogroup; - - int num_iotasks; - int num_comptasks; + /** This MPI group contains the processors involved in I/O. */ + MPI_Group iogroup; - int union_rank; - int comp_rank; - int io_rank; + /** The number of tasks in the IO communicator. */ + int num_iotasks; - bool iomaster; - bool compmaster; + /** The number of tasks in the computation communicator. */ + int num_comptasks; - int ioroot; - int comproot; - int *ioranks; + /** Rank of this task in the union communicator. */ + int union_rank; - int error_handler; - int default_rearranger; + /** The rank of this process in the computation communicator, or -1 + * if this process is not part of the computation communicator. */ + int comp_rank; - bool async_interface; - bool ioproc; - - MPI_Info info; - struct iosystem_desc_t *next; + /** The rank of this process in the IO communicator, or -1 if this + * process is not part of the IO communicator. */ + int io_rank; + + /** Set to MPI_ROOT if this task is the master of IO communicator, 0 + * otherwise. */ + int iomaster; + + /** Set to MPI_ROOT if this task is the master of comp communicator, 0 + * otherwise. */ + int compmaster; + + /** Rank of IO root task (which is rank 0 in io_comm) in the union + * communicator. */ + int ioroot; + + /** Rank of computation root task (which is rank 0 in + * comm_comms[cmp]) in the union communicator. */ + int comproot; + + /** An array of the ranks of all IO tasks within the union + * communicator. */ + int *ioranks; + + /** Controls handling errors. */ + int error_handler; + + /** The rearranger decides which parts of a distributed array are + * handled by which IO tasks. */ + int default_rearranger; + + /** True if asynchronous interface is in use. */ + bool async_interface; + + /** True if this task is a member of the IO communicator. */ + bool ioproc; + + /** MPI Info object. */ + MPI_Info info; + + /** Pointer to the next iosystem_desc_t in the list. */ + struct iosystem_desc_t *next; } iosystem_desc_t; /** - * @brief multi buffer - * -*/ + * multi buffer. + */ typedef struct wmulti_buffer { - int ioid; - int validvars; - int arraylen; - int *vid; - int *frame; - void *fillvalue; - void *data; - struct wmulti_buffer *next; + int ioid; + int validvars; + int arraylen; + int *vid; + int *frame; + void *fillvalue; + void *data; + struct wmulti_buffer *next; } wmulti_buffer; - - /** - * @brief io system descriptor structure + * File descriptor structure. * * This structure holds information associated with each open file - * -*/ + */ typedef struct file_desc_t { - iosystem_desc_t *iosystem; - PIO_Offset buffsize; - int fh; - int iotype; - struct var_desc_t varlist[PIO_MAX_VARS]; - int mode; - struct wmulti_buffer buffer; - struct file_desc_t *next; + /** The IO system ID used to open this file. */ + iosystem_desc_t *iosystem; + + /** The buffersize does not seem to be used anywhere. */ + /* PIO_Offset buffsize;*/ + + /** The ncid returned for this file by the underlying library + * (netcdf or pnetcdf). */ + int fh; + + /** The PIO_TYPE value that was used to open this file. */ + int iotype; + + /** List of variables in this file. */ + struct var_desc_t varlist[PIO_MAX_VARS]; + + /** ??? */ + int mode; + + /** ??? */ + struct wmulti_buffer buffer; + + /** Pointer to the next file_desc_t in the list of open files. */ + struct file_desc_t *next; + + /** True if this task should participate in IO (only true for one + * task with netcdf serial files. */ + int do_io; } file_desc_t; /** - * @brief These are the supported output formats - * -*/ + * These are the supported methods of reading/writing netCDF + * files. (Not all methods can be used with all netCDF files.) + */ +enum PIO_IOTYPE +{ + /** Parallel Netcdf (parallel) */ + PIO_IOTYPE_PNETCDF = 1, -enum PIO_IOTYPE{ - PIO_IOTYPE_PNETCDF=1, //< Parallel Netcdf (parallel) - PIO_IOTYPE_NETCDF=2, //< Netcdf3 Classic format (serial) - PIO_IOTYPE_NETCDF4C=3, //< NetCDF4 (HDF5) compressed format (serial) - PIO_IOTYPE_NETCDF4P=4 //< NetCDF4 (HDF5) parallel + /** Netcdf3 Classic format (serial) */ + PIO_IOTYPE_NETCDF = 2, + + /** NetCDF4 (HDF5) compressed format (serial) */ + PIO_IOTYPE_NETCDF4C = 3, + + /** NetCDF4 (HDF5) parallel */ + PIO_IOTYPE_NETCDF4P = 4 }; /** - * @brief These are the supported output data rearrangement methods - * -*/ -enum PIO_REARRANGERS{ - PIO_REARR_BOX = 1, - PIO_REARR_SUBSET = 2 + * These are the supported output data rearrangement methods. + */ +enum PIO_REARRANGERS +{ + /** Box rearranger. */ + PIO_REARR_BOX = 1, + + /** Subset rearranger. */ + PIO_REARR_SUBSET = 2 }; /** - * @brief These are the supported error handlers - * -*/ -enum PIO_ERROR_HANDLERS{ - PIO_INTERNAL_ERROR=(-51), //< Errors cause abort - PIO_BCAST_ERROR=(-52), //< Error codes are broadcast to all tasks - PIO_RETURN_ERROR=(-53) //< Errors are returned to caller with no internal action + * These are the supported error handlers. + */ +enum PIO_ERROR_HANDLERS +{ + /** Errors cause abort. */ + PIO_INTERNAL_ERROR = (-51), + + /** Error codes are broadcast to all tasks. */ + PIO_BCAST_ERROR = (-52), + + /** Errors are returned to caller with no internal action. */ + PIO_RETURN_ERROR = (-53) }; +/** Define the netCDF-based error codes. */ #if defined( _PNETCDF) || defined(_NETCDF) #define PIO_GLOBAL NC_GLOBAL #define PIO_UNLIMITED NC_UNLIMITED @@ -338,269 +418,283 @@ enum PIO_ERROR_HANDLERS{ #define PIO_EBADCHUNK NC_EBADCHUNK #define PIO_ENOTBUILT NC_ENOTBUILT #define PIO_EDISKLESS NC_EDISKLESS - #define PIO_FILL_DOUBLE NC_FILL_DOUBLE #define PIO_FILL_FLOAT NC_FILL_FLOAT #define PIO_FILL_INT NC_FILL_INT #define PIO_FILL_CHAR NC_FILL_CHAR +#endif /* defined( _PNETCDF) || defined(_NETCDF) */ -#endif +/** Define the extra error codes for the parallel-netcdf library. */ #ifdef _PNETCDF #define PIO_EINDEP NC_EINDEP -#else +#else /* _PNETCDF */ #define PIO_EINDEP (-203) -#endif -#if defined(__cplusplus) -extern "C" { -#endif +#endif /* _PNETCDF */ + +/** Define error codes for PIO. */ #define PIO_EBADIOTYPE -255 + +/** ??? */ #define PIO_REQ_NULL (NC_REQ_NULL-1) -int PIOc_freedecomp(int iosysid, int ioid); -int PIOc_inq_att (int ncid, int varid, const char *name, nc_type *xtypep, PIO_Offset *lenp); -int PIOc_inq_format (int ncid, int *formatp); -int PIOc_inq_varid (int ncid, const char *name, int *varidp); -int PIOc_inq_varnatts (int ncid, int varid, int *nattsp); -int PIOc_def_var (int ncid, const char *name, nc_type xtype, int ndims, const int *dimidsp, int *varidp); -int PIOc_def_var_deflate(int ncid, int varid, int shuffle, int deflate, - int deflate_level); -int PIOc_inq_var_deflate(int ncid, int varid, int *shufflep, - int *deflatep, int *deflate_levelp); -int PIOc_inq_var_szip(int ncid, int varid, int *options_maskp, int *pixels_per_blockp); -int PIOc_def_var_chunking(int ncid, int varid, int storage, const PIO_Offset *chunksizesp); -int PIOc_inq_var_chunking(int ncid, int varid, int *storagep, PIO_Offset *chunksizesp); -int PIOc_def_var_fill(int ncid, int varid, int no_fill, const void *fill_value); -int PIOc_inq_var_fill(int ncid, int varid, int *no_fill, void *fill_valuep); -int PIOc_def_var_endian(int ncid, int varid, int endian); -int PIOc_inq_var_endian(int ncid, int varid, int *endianp); -int PIOc_set_chunk_cache(int iosysid, int iotype, PIO_Offset size, PIO_Offset nelems, float preemption); -int PIOc_get_chunk_cache(int iosysid, int iotype, PIO_Offset *sizep, PIO_Offset *nelemsp, float *preemptionp); -int PIOc_set_var_chunk_cache(int ncid, int varid, PIO_Offset size, PIO_Offset nelems, - float preemption); -int PIOc_get_var_chunk_cache(int ncid, int varid, PIO_Offset *sizep, PIO_Offset *nelemsp, - float *preemptionp); -int PIOc_inq_var (int ncid, int varid, char *name, nc_type *xtypep, int *ndimsp, int *dimidsp, int *nattsp); -int PIOc_inq_varname (int ncid, int varid, char *name); -int PIOc_put_att_double (int ncid, int varid, const char *name, nc_type xtype, PIO_Offset len, const double *op); -int PIOc_put_att_int (int ncid, int varid, const char *name, nc_type xtype, PIO_Offset len, const int *op); -int PIOc_rename_att (int ncid, int varid, const char *name, const char *newname); -int PIOc_del_att (int ncid, int varid, const char *name); -int PIOc_inq_natts (int ncid, int *ngattsp); -int PIOc_inq (int ncid, int *ndimsp, int *nvarsp, int *ngattsp, int *unlimdimidp); -int PIOc_get_att_text (int ncid, int varid, const char *name, char *ip); -int PIOc_get_att_short (int ncid, int varid, const char *name, short *ip); -int PIOc_put_att_long (int ncid, int varid, const char *name, nc_type xtype, PIO_Offset len, const long *op); -int PIOc_redef (int ncid); -int PIOc_set_fill (int ncid, int fillmode, int *old_modep); -int PIOc_enddef (int ncid); -int PIOc_rename_var (int ncid, int varid, const char *name); -int PIOc_put_att_short (int ncid, int varid, const char *name, nc_type xtype, PIO_Offset len, const short *op); -int PIOc_put_att_text (int ncid, int varid, const char *name, PIO_Offset len, const char *op); -int PIOc_inq_attname (int ncid, int varid, int attnum, char *name); -int PIOc_get_att_ulonglong (int ncid, int varid, const char *name, unsigned long long *ip); -int PIOc_get_att_ushort (int ncid, int varid, const char *name, unsigned short *ip); -int PIOc_put_att_ulonglong (int ncid, int varid, const char *name, nc_type xtype, PIO_Offset len, const unsigned long long *op); -int PIOc_inq_dimlen (int ncid, int dimid, PIO_Offset *lenp); -int PIOc_get_att_uint (int ncid, int varid, const char *name, unsigned int *ip); -int PIOc_get_att_longlong (int ncid, int varid, const char *name, long long *ip); -int PIOc_put_att_schar (int ncid, int varid, const char *name, nc_type xtype, PIO_Offset len, const signed char *op); -int PIOc_put_att_float (int ncid, int varid, const char *name, nc_type xtype, PIO_Offset len, const float *op); -int PIOc_inq_nvars (int ncid, int *nvarsp); -int PIOc_rename_dim (int ncid, int dimid, const char *name); -int PIOc_inq_varndims (int ncid, int varid, int *ndimsp); -int PIOc_get_att_long (int ncid, int varid, const char *name, long *ip); -int PIOc_inq_dim (int ncid, int dimid, char *name, PIO_Offset *lenp); -int PIOc_inq_dimid (int ncid, const char *name, int *idp); -int PIOc_inq_unlimdim (int ncid, int *unlimdimidp); -int PIOc_inq_vardimid (int ncid, int varid, int *dimidsp); -int PIOc_inq_attlen (int ncid, int varid, const char *name, PIO_Offset *lenp); -int PIOc_inq_dimname (int ncid, int dimid, char *name); -int PIOc_put_att_ushort (int ncid, int varid, const char *name, nc_type xtype, PIO_Offset len, const unsigned short *op); -int PIOc_get_att_float (int ncid, int varid, const char *name, float *ip); -int PIOc_sync (int ncid); -int PIOc_put_att_longlong (int ncid, int varid, const char *name, nc_type xtype, PIO_Offset len, const long long *op); -int PIOc_put_att_uint (int ncid, int varid, const char *name, nc_type xtype, PIO_Offset len, const unsigned int *op); -int PIOc_get_att_schar (int ncid, int varid, const char *name, signed char *ip); -int PIOc_inq_attid (int ncid, int varid, const char *name, int *idp); -int PIOc_def_dim (int ncid, const char *name, PIO_Offset len, int *idp); -int PIOc_inq_ndims (int ncid, int *ndimsp); -int PIOc_inq_vartype (int ncid, int varid, nc_type *xtypep); -int PIOc_get_att_int (int ncid, int varid, const char *name, int *ip); -int PIOc_get_att_double (int ncid, int varid, const char *name, double *ip); -int PIOc_inq_atttype (int ncid, int varid, const char *name, nc_type *xtypep); -int PIOc_put_att_uchar (int ncid, int varid, const char *name, nc_type xtype, PIO_Offset len, const unsigned char *op); -int PIOc_get_att_uchar (int ncid, int varid, const char *name, unsigned char *ip); -int PIOc_InitDecomp(const int iosysid, const int basetype,const int ndims, const int dims[], - const int maplen, const PIO_Offset *compmap, int *ioidp, const int *rearr, - const PIO_Offset *iostart,const PIO_Offset *iocount); -int PIOc_Init_Intracomm(const MPI_Comm comp_comm, - const int num_iotasks, const int stride, - const int base, const int rearr, int *iosysidp); -int PIOc_closefile(int ncid); -int PIOc_createfile(const int iosysid, int *ncidp, int *iotype, - const char *fname, const int mode); -int PIOc_openfile(const int iosysid, int *ncidp, int *iotype, - const char *fname, const int mode); -int PIOc_write_darray(const int ncid, const int vid, const int ioid, const PIO_Offset arraylen, void *array, void *fillvalue); - int PIOc_write_darray_multi(const int ncid, const int vid[], const int ioid, const int nvars, const PIO_Offset arraylen, void *array, const int frame[], void *fillvalue[], bool flushtodisk); - -int PIOc_get_att_ubyte (int ncid, int varid, const char *name, unsigned char *ip); -int PIOc_put_att_ubyte (int ncid, int varid, const char *name, nc_type xtype, PIO_Offset len, const unsigned char *op) ; -int PIOc_set_blocksize(const int newblocksize); - int PIOc_readmap(const char file[], int *ndims, int *gdims[], PIO_Offset *fmaplen, PIO_Offset *map[], const MPI_Comm comm); - int PIOc_readmap_from_f90(const char file[],int *ndims, int *gdims[], PIO_Offset *maplen, PIO_Offset *map[], const int f90_comm); - int PIOc_writemap(const char file[], const int ndims, const int gdims[], PIO_Offset maplen, PIO_Offset map[], const MPI_Comm comm); - int PIOc_writemap_from_f90(const char file[], const int ndims, const int gdims[], const PIO_Offset maplen, const PIO_Offset map[], const int f90_comm); - int PIOc_deletefile(const int iosysid, const char filename[]); - int PIOc_File_is_Open(int ncid); - int PIOc_Set_File_Error_Handling(int ncid, int method); - int PIOc_advanceframe(int ncid, int varid); - int PIOc_setframe(const int ncid, const int varid,const int frame); - int PIOc_get_numiotasks(int iosysid, int *numiotasks); - int PIOc_get_iorank(int iosysid, int *iorank); - int PIOc_get_local_array_size(int ioid); - int PIOc_Set_IOSystem_Error_Handling(int iosysid, int method); - int PIOc_set_hint(const int iosysid, char hint[], const char hintval[]); - int PIOc_Init_Intracomm(const MPI_Comm comp_comm, - const int num_iotasks, const int stride, - const int base,const int rearr, int *iosysidp); - int PIOc_finalize(const int iosysid); - int PIOc_iam_iotask(const int iosysid, bool *ioproc); - int PIOc_iotask_rank(const int iosysid, int *iorank); - int PIOc_iosystem_is_active(const int iosysid, bool *active); - int PIOc_put_vars_uchar (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const unsigned char *op) ; - int PIOc_get_var1_schar (int ncid, int varid, const PIO_Offset index[], signed char *buf) ; - int PIOc_put_vars_ushort (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const unsigned short *op) ; - int pio_read_darray_nc(file_desc_t *file, io_desc_t *iodesc, const int vid, void *IOBUF); - int PIOc_put_vars_ulonglong (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const unsigned long long *op) ; - int PIOc_get_vars_ulonglong (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], unsigned long long *buf) ; - int PIOc_put_varm (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const void *buf, PIO_Offset bufcount, MPI_Datatype buftype) ; - int PIOc_read_darray(const int ncid, const int vid, const int ioid, const PIO_Offset arraylen, void *array); - int PIOc_put_vars_uint (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const unsigned int *op) ; - int PIOc_get_varm_schar (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], signed char *buf) ; - int PIOc_put_varm_uchar (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const unsigned char *op) ; - int PIOc_put_var_ushort (int ncid, int varid, const unsigned short *op) ; - int PIOc_get_vars_short (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], short *buf) ; - int PIOc_put_var1_longlong (int ncid, int varid, const PIO_Offset index[], const long long *op) ; - int PIOc_get_var_double (int ncid, int varid, double *buf) ; - int PIOc_put_vara_uchar (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const unsigned char *op) ; - int PIOc_put_varm_short (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const short *op) ; - int PIOc_get_vara_double (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], double *buf) ; - int PIOc_put_var1_long (int ncid, int varid, const PIO_Offset index[], const long *ip) ; - int PIOc_get_var_int (int ncid, int varid, int *buf) ; - int PIOc_put_vars_long (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const long *op) ; - int PIOc_put_var_short (int ncid, int varid, const short *op) ; - int PIOc_get_vara_text (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], char *buf) ; - int PIOc_put_vara_int (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const int *op) ; - int PIOc_put_vara_int (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const int *op) ; - - int PIOc_put_var1_ushort (int ncid, int varid, const PIO_Offset index[], const unsigned short *op); - int PIOc_put_vara_text (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const char *op); - int PIOc_put_varm_text (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const char *op); - int PIOc_put_varm_ushort (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const unsigned short *op); - int PIOc_put_var_ulonglong (int ncid, int varid, const unsigned long long *op); - int PIOc_put_var_int (int ncid, int varid, const int *op); - int PIOc_put_var_longlong (int ncid, int varid, const long long *op); - int PIOc_put_var_schar (int ncid, int varid, const signed char *op); - int PIOc_put_var_uint (int ncid, int varid, const unsigned int *op); - int PIOc_put_var (int ncid, int varid, const void *buf, PIO_Offset bufcount, MPI_Datatype buftype); - int PIOc_put_vara_ushort (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const unsigned short *op); - int PIOc_put_vars_short (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const short *op); - int PIOc_put_vara_uint (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const unsigned int *op); - int PIOc_put_vara_schar (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const signed char *op); - int PIOc_put_varm_ulonglong (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const unsigned long long *op); - int PIOc_put_var1_uchar (int ncid, int varid, const PIO_Offset index[], const unsigned char *op); - int PIOc_put_varm_int (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const int *op); - int PIOc_put_vars_schar (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const signed char *op); - int PIOc_put_var1 (int ncid, int varid, const PIO_Offset index[], const void *buf, PIO_Offset bufcount, MPI_Datatype buftype); - int PIOc_put_vara_float (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const float *op); - int PIOc_put_var1_float (int ncid, int varid, const PIO_Offset index[], const float *op); - int PIOc_put_varm_float (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const float *op); - int PIOc_put_var1_text (int ncid, int varid, const PIO_Offset index[], const char *op); - int PIOc_put_vars_text (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const char *op); - int PIOc_put_varm_long (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const long *op); - int PIOc_put_vars_double (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const double *op); - int PIOc_put_vara_longlong (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const long long *op); - int PIOc_put_var_double (int ncid, int varid, const double *op); - int PIOc_put_var_float (int ncid, int varid, const float *op); - int PIOc_put_var1_ulonglong (int ncid, int varid, const PIO_Offset index[], const unsigned long long *op); - int PIOc_put_varm_uint (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const unsigned int *op); - int PIOc_put_var1_uint (int ncid, int varid, const PIO_Offset index[], const unsigned int *op); - int PIOc_put_var1_int (int ncid, int varid, const PIO_Offset index[], const int *op); - int PIOc_put_vars_float (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const float *op); - int PIOc_put_vara_short (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const short *op); - int PIOc_put_var1_schar (int ncid, int varid, const PIO_Offset index[], const signed char *op); - int PIOc_put_vara_ulonglong (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const unsigned long long *op); - int PIOc_put_varm_double (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const double *op); - int PIOc_put_vara (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const void *buf, PIO_Offset bufcount, MPI_Datatype buftype); - int PIOc_put_vara_long (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const long *op); - int PIOc_put_var1_double (int ncid, int varid, const PIO_Offset index[], const double *op); - int PIOc_put_varm_schar (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const signed char *op); - int PIOc_put_var_text (int ncid, int varid, const char *op); - int PIOc_put_vars_int (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const int *op); - int PIOc_put_var1_short (int ncid, int varid, const PIO_Offset index[], const short *op); - int PIOc_put_vars_longlong (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const long long *op); - int PIOc_put_vara_double (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const double *op); - int PIOc_put_vars (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const void *buf, PIO_Offset bufcount, MPI_Datatype buftype); - int PIOc_put_var_uchar (int ncid, int varid, const unsigned char *op); - int PIOc_put_var_long (int ncid, int varid, const long *op); - int PIOc_put_varm_longlong (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const long long *op); - int PIOc_get_vara_int (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], int *buf); - int PIOc_get_var1_float (int ncid, int varid, const PIO_Offset index[], float *buf); - int PIOc_get_var1_short (int ncid, int varid, const PIO_Offset index[], short *buf); - int PIOc_get_vars_int (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], int *buf); - int PIOc_get_var_text (int ncid, int varid, char *buf); - int PIOc_get_varm_double (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], double *buf); - int PIOc_get_vars_schar (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], signed char *buf); - int PIOc_get_vara_ushort (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], unsigned short *buf); - int PIOc_get_var1_ushort (int ncid, int varid, const PIO_Offset index[], unsigned short *buf); - int PIOc_get_var_float (int ncid, int varid, float *buf); - int PIOc_get_vars_uchar (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], unsigned char *buf); - int PIOc_get_var (int ncid, int varid, void *buf, PIO_Offset bufcount, MPI_Datatype buftype); - int PIOc_get_var1_longlong (int ncid, int varid, const PIO_Offset index[], long long *buf); - int PIOc_get_vars_ushort (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], unsigned short *buf); - int PIOc_get_var_long (int ncid, int varid, long *buf); - int PIOc_get_var1_double (int ncid, int varid, const PIO_Offset index[], double *buf); - int PIOc_get_vara_uint (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], unsigned int *buf); - int PIOc_get_vars_longlong (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], long long *buf); - int PIOc_get_var_longlong (int ncid, int varid, long long *buf); - int PIOc_get_vara_short (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], short *buf); - int PIOc_get_vara_long (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], long *buf); - int PIOc_get_var1_int (int ncid, int varid, const PIO_Offset index[], int *buf); - int PIOc_get_var1_ulonglong (int ncid, int varid, const PIO_Offset index[], unsigned long long *buf); - int PIOc_get_var_uchar (int ncid, int varid, unsigned char *buf); - int PIOc_get_vara_uchar (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], unsigned char *buf); - int PIOc_get_vars_float (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], float *buf); - int PIOc_get_vars_long (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], long *buf); - int PIOc_get_var1 (int ncid, int varid, const PIO_Offset index[], void *buf, PIO_Offset bufcount, MPI_Datatype buftype); - int PIOc_get_var_uint (int ncid, int varid, unsigned int *buf); - int PIOc_get_vara (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], void *buf, PIO_Offset bufcount, MPI_Datatype buftype); - int PIOc_get_vara_schar (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], signed char *buf); - int PIOc_get_var1_uint (int ncid, int varid, const PIO_Offset index[], unsigned int *buf); - int PIOc_get_vars_uint (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], unsigned int *buf); - int PIOc_get_vara_float (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], float *buf); - int PIOc_get_varm_text (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], char *buf); - int PIOc_get_var1_text (int ncid, int varid, const PIO_Offset index[], char *buf); - int PIOc_get_varm_int (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], int *buf); - int PIOc_get_varm_uint (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], unsigned int *buf); - int PIOc_get_varm (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], void *buf, PIO_Offset bufcount, MPI_Datatype buftype); - int PIOc_get_vars_double (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], double *buf); - int PIOc_get_vara_longlong (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], long long *buf); - int PIOc_get_var_ulonglong (int ncid, int varid, unsigned long long *buf); - int PIOc_get_vara_ulonglong (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], unsigned long long *buf); - int PIOc_get_var_short (int ncid, int varid, short *buf); - int PIOc_get_varm_float (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], float *buf); - int PIOc_get_var1_long (int ncid, int varid, const PIO_Offset index[], long *buf); - int PIOc_get_varm_long (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], long *buf); - int PIOc_get_varm_ushort (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], unsigned short *buf); - int PIOc_get_varm_longlong (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], long long *buf); - int PIOc_get_vars_text (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], char *buf); - int PIOc_get_var1_uchar (int ncid, int varid, const PIO_Offset index[], unsigned char *buf); - int PIOc_get_vars (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], void *buf, PIO_Offset bufcount, MPI_Datatype buftype); - int PIOc_get_varm_short (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], short *buf); - int PIOc_get_varm_ulonglong (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], unsigned long long *buf); - int PIOc_get_var_schar (int ncid, int varid, signed char *buf); - int PIOc_iotype_available(const int iotype); +#if defined(__cplusplus) +extern "C" { +#endif + int PIOc_strerror(int pioerr, char *errstr); + int PIOc_freedecomp(int iosysid, int ioid); + int PIOc_inq_att (int ncid, int varid, const char *name, nc_type *xtypep, PIO_Offset *lenp); + int PIOc_inq_format (int ncid, int *formatp); + int PIOc_inq_varid (int ncid, const char *name, int *varidp); + int PIOc_inq_varnatts (int ncid, int varid, int *nattsp); + int PIOc_def_var (int ncid, const char *name, nc_type xtype, int ndims, const int *dimidsp, int *varidp); + int PIOc_def_var_deflate(int ncid, int varid, int shuffle, int deflate, + int deflate_level); + int PIOc_inq_var_deflate(int ncid, int varid, int *shufflep, int *deflatep, + int *deflate_levelp); + int PIOc_inq_var_szip(int ncid, int varid, int *options_maskp, int *pixels_per_blockp); + int PIOc_def_var_chunking(int ncid, int varid, int storage, const PIO_Offset *chunksizesp); + int PIOc_inq_var_chunking(int ncid, int varid, int *storagep, PIO_Offset *chunksizesp); + int PIOc_def_var_fill(int ncid, int varid, int no_fill, const void *fill_value); + int PIOc_inq_var_fill(int ncid, int varid, int *no_fill, void *fill_valuep); + int PIOc_def_var_endian(int ncid, int varid, int endian); + int PIOc_inq_var_endian(int ncid, int varid, int *endianp); + int PIOc_set_chunk_cache(int iosysid, int iotype, PIO_Offset size, PIO_Offset nelems, float preemption); + int PIOc_get_chunk_cache(int iosysid, int iotype, PIO_Offset *sizep, PIO_Offset *nelemsp, float *preemptionp); + int PIOc_set_var_chunk_cache(int ncid, int varid, PIO_Offset size, PIO_Offset nelems, + float preemption); + int PIOc_get_var_chunk_cache(int ncid, int varid, PIO_Offset *sizep, PIO_Offset *nelemsp, + float *preemptionp); + int PIOc_inq_var (int ncid, int varid, char *name, nc_type *xtypep, int *ndimsp, int *dimidsp, int *nattsp); + int PIOc_inq_varname (int ncid, int varid, char *name); + int PIOc_put_att_double (int ncid, int varid, const char *name, nc_type xtype, PIO_Offset len, const double *op); + int PIOc_put_att_int (int ncid, int varid, const char *name, nc_type xtype, PIO_Offset len, const int *op); + int PIOc_rename_att (int ncid, int varid, const char *name, const char *newname); + int PIOc_del_att (int ncid, int varid, const char *name); + int PIOc_inq_natts (int ncid, int *ngattsp); + int PIOc_inq (int ncid, int *ndimsp, int *nvarsp, int *ngattsp, int *unlimdimidp); + int PIOc_get_att_text (int ncid, int varid, const char *name, char *ip); + int PIOc_get_att_short (int ncid, int varid, const char *name, short *ip); + int PIOc_put_att_long (int ncid, int varid, const char *name, nc_type xtype, PIO_Offset len, const long *op); + int PIOc_redef (int ncid); + int PIOc_set_fill (int ncid, int fillmode, int *old_modep); + int PIOc_enddef (int ncid); + int PIOc_rename_var (int ncid, int varid, const char *name); + int PIOc_put_att_short (int ncid, int varid, const char *name, nc_type xtype, PIO_Offset len, const short *op); + int PIOc_put_att_text (int ncid, int varid, const char *name, PIO_Offset len, const char *op); + int PIOc_inq_attname (int ncid, int varid, int attnum, char *name); + int PIOc_get_att_ulonglong (int ncid, int varid, const char *name, unsigned long long *ip); + int PIOc_get_att_ushort (int ncid, int varid, const char *name, unsigned short *ip); + int PIOc_put_att_ulonglong (int ncid, int varid, const char *name, nc_type xtype, PIO_Offset len, const unsigned long long *op); + int PIOc_inq_dimlen (int ncid, int dimid, PIO_Offset *lenp); + int PIOc_get_att_uint (int ncid, int varid, const char *name, unsigned int *ip); + int PIOc_get_att_longlong (int ncid, int varid, const char *name, long long *ip); + int PIOc_put_att_schar (int ncid, int varid, const char *name, nc_type xtype, PIO_Offset len, const signed char *op); + int PIOc_put_att_float (int ncid, int varid, const char *name, nc_type xtype, PIO_Offset len, const float *op); + int PIOc_inq_nvars (int ncid, int *nvarsp); + int PIOc_rename_dim (int ncid, int dimid, const char *name); + int PIOc_inq_varndims (int ncid, int varid, int *ndimsp); + int PIOc_get_att_long (int ncid, int varid, const char *name, long *ip); + int PIOc_inq_dim (int ncid, int dimid, char *name, PIO_Offset *lenp); + int PIOc_inq_dimid (int ncid, const char *name, int *idp); + int PIOc_inq_unlimdim (int ncid, int *unlimdimidp); + int PIOc_inq_vardimid (int ncid, int varid, int *dimidsp); + int PIOc_inq_attlen (int ncid, int varid, const char *name, PIO_Offset *lenp); + int PIOc_inq_dimname (int ncid, int dimid, char *name); + int PIOc_put_att_ushort (int ncid, int varid, const char *name, nc_type xtype, PIO_Offset len, const unsigned short *op); + int PIOc_get_att_float (int ncid, int varid, const char *name, float *ip); + int PIOc_sync (int ncid); + int PIOc_put_att_longlong (int ncid, int varid, const char *name, nc_type xtype, PIO_Offset len, const long long *op); + int PIOc_put_att_uint (int ncid, int varid, const char *name, nc_type xtype, PIO_Offset len, const unsigned int *op); + int PIOc_get_att_schar (int ncid, int varid, const char *name, signed char *ip); + int PIOc_inq_attid (int ncid, int varid, const char *name, int *idp); + int PIOc_def_dim (int ncid, const char *name, PIO_Offset len, int *idp); + int PIOc_inq_ndims (int ncid, int *ndimsp); + int PIOc_inq_vartype (int ncid, int varid, nc_type *xtypep); + int PIOc_get_att_int (int ncid, int varid, const char *name, int *ip); + int PIOc_get_att_double (int ncid, int varid, const char *name, double *ip); + int PIOc_inq_atttype (int ncid, int varid, const char *name, nc_type *xtypep); + int PIOc_put_att_uchar (int ncid, int varid, const char *name, nc_type xtype, PIO_Offset len, const unsigned char *op); + int PIOc_get_att_uchar (int ncid, int varid, const char *name, unsigned char *ip); + int PIOc_InitDecomp(const int iosysid, const int basetype,const int ndims, const int dims[], + const int maplen, const PIO_Offset *compmap, int *ioidp, const int *rearr, + const PIO_Offset *iostart,const PIO_Offset *iocount); + int PIOc_Init_Intracomm(const MPI_Comm comp_comm, + const int num_iotasks, const int stride, + const int base, const int rearr, int *iosysidp); + int PIOc_Init_Intercomm(int component_count, MPI_Comm peer_comm, MPI_Comm *comp_comms, + MPI_Comm io_comm, int *iosysidp); + int PIOc_closefile(int ncid); + int PIOc_createfile(const int iosysid, int *ncidp, int *iotype, + const char *fname, const int mode); + int PIOc_openfile(const int iosysid, int *ncidp, int *iotype, + const char *fname, const int mode); + int PIOc_write_darray(const int ncid, const int vid, const int ioid, const PIO_Offset arraylen, + void *array, void *fillvalue); + int PIOc_write_darray_multi(const int ncid, const int vid[], const int ioid, const int nvars, const PIO_Offset arraylen, + void *array, const int frame[], void *fillvalue[], bool flushtodisk); + + int PIOc_get_att_ubyte (int ncid, int varid, const char *name, unsigned char *ip); + int PIOc_put_att_ubyte (int ncid, int varid, const char *name, nc_type xtype, PIO_Offset len, const unsigned char *op) ; + int PIOc_set_blocksize(const int newblocksize); + int PIOc_readmap(const char file[], int *ndims, int *gdims[], PIO_Offset *fmaplen, PIO_Offset *map[], const MPI_Comm comm); + int PIOc_readmap_from_f90(const char file[],int *ndims, int *gdims[], PIO_Offset *maplen, PIO_Offset *map[], const int f90_comm); + int PIOc_writemap(const char file[], const int ndims, const int gdims[], PIO_Offset maplen, PIO_Offset map[], const MPI_Comm comm); + int PIOc_writemap_from_f90(const char file[], const int ndims, const int gdims[], const PIO_Offset maplen, const PIO_Offset map[], const int f90_comm); + int PIOc_deletefile(const int iosysid, const char filename[]); + int PIOc_File_is_Open(int ncid); + int PIOc_Set_File_Error_Handling(int ncid, int method); + int PIOc_advanceframe(int ncid, int varid); + int PIOc_setframe(const int ncid, const int varid,const int frame); + int PIOc_get_numiotasks(int iosysid, int *numiotasks); + int PIOc_get_iorank(int iosysid, int *iorank); + int PIOc_get_local_array_size(int ioid); + int PIOc_Set_IOSystem_Error_Handling(int iosysid, int method); + int PIOc_set_hint(const int iosysid, char hint[], const char hintval[]); + int PIOc_Init_Intracomm(const MPI_Comm comp_comm, + const int num_iotasks, const int stride, + const int base,const int rearr, int *iosysidp); + int PIOc_finalize(const int iosysid); + int PIOc_iam_iotask(const int iosysid, bool *ioproc); + int PIOc_iotask_rank(const int iosysid, int *iorank); + int PIOc_iosystem_is_active(const int iosysid, bool *active); + int PIOc_put_vars_uchar (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const unsigned char *op) ; + int PIOc_get_var1_schar (int ncid, int varid, const PIO_Offset index[], signed char *buf) ; + int PIOc_put_vars_ushort (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const unsigned short *op) ; + int pio_read_darray_nc(file_desc_t *file, io_desc_t *iodesc, const int vid, void *IOBUF); + int PIOc_put_vars_ulonglong (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const unsigned long long *op) ; + int PIOc_get_vars_ulonglong (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], unsigned long long *buf) ; + int PIOc_put_varm (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const void *buf, PIO_Offset bufcount, MPI_Datatype buftype) ; + int PIOc_read_darray(const int ncid, const int vid, const int ioid, const PIO_Offset arraylen, void *array); + int PIOc_put_vars_uint (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const unsigned int *op) ; + int PIOc_get_varm_schar (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], signed char *buf) ; + int PIOc_put_varm_uchar (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const unsigned char *op) ; + int PIOc_put_var_ushort (int ncid, int varid, const unsigned short *op) ; + int PIOc_get_vars_short (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], short *buf) ; + int PIOc_put_var1_longlong (int ncid, int varid, const PIO_Offset index[], const long long *op) ; + int PIOc_get_var_double (int ncid, int varid, double *buf) ; + int PIOc_put_vara_uchar (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const unsigned char *op) ; + int PIOc_put_varm_short (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const short *op) ; + int PIOc_get_vara_double (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], double *buf) ; + int PIOc_put_var1_long (int ncid, int varid, const PIO_Offset index[], const long *ip) ; + int PIOc_get_var_int (int ncid, int varid, int *buf) ; + int PIOc_put_vars_long (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const long *op) ; + int PIOc_put_var_short (int ncid, int varid, const short *op) ; + int PIOc_get_vara_text (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], char *buf) ; + int PIOc_put_vara_int (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const int *op) ; + + int PIOc_put_var1_ushort (int ncid, int varid, const PIO_Offset index[], const unsigned short *op); + int PIOc_put_vara_text (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const char *op); + int PIOc_put_varm_text (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const char *op); + int PIOc_put_varm_ushort (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const unsigned short *op); + int PIOc_put_var_ulonglong (int ncid, int varid, const unsigned long long *op); + int PIOc_put_var_int (int ncid, int varid, const int *op); + int PIOc_put_var_longlong (int ncid, int varid, const long long *op); + int PIOc_put_var_schar (int ncid, int varid, const signed char *op); + int PIOc_put_var_uint (int ncid, int varid, const unsigned int *op); + int PIOc_put_var (int ncid, int varid, const void *buf, PIO_Offset bufcount, MPI_Datatype buftype); + int PIOc_put_vara_ushort (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const unsigned short *op); + int PIOc_put_vars_short (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const short *op); + int PIOc_put_vara_uint (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const unsigned int *op); + int PIOc_put_vara_schar (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const signed char *op); + int PIOc_put_varm_ulonglong (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const unsigned long long *op); + int PIOc_put_var1_uchar (int ncid, int varid, const PIO_Offset index[], const unsigned char *op); + int PIOc_put_varm_int (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const int *op); + int PIOc_put_vars_schar (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const signed char *op); + int PIOc_put_var1 (int ncid, int varid, const PIO_Offset index[], const void *buf, PIO_Offset bufcount, MPI_Datatype buftype); + int PIOc_put_vara_float (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const float *op); + int PIOc_put_var1_float (int ncid, int varid, const PIO_Offset index[], const float *op); + int PIOc_put_varm_float (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const float *op); + int PIOc_put_var1_text (int ncid, int varid, const PIO_Offset index[], const char *op); + int PIOc_put_vars_text (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const char *op); + int PIOc_put_varm_long (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const long *op); + int PIOc_put_vars_double (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const double *op); + int PIOc_put_vara_longlong (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const long long *op); + int PIOc_put_var_double (int ncid, int varid, const double *op); + int PIOc_put_var_float (int ncid, int varid, const float *op); + int PIOc_put_var1_ulonglong (int ncid, int varid, const PIO_Offset index[], const unsigned long long *op); + int PIOc_put_varm_uint (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const unsigned int *op); + int PIOc_put_var1_uint (int ncid, int varid, const PIO_Offset index[], const unsigned int *op); + int PIOc_put_var1_int (int ncid, int varid, const PIO_Offset index[], const int *op); + int PIOc_put_vars_float (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const float *op); + int PIOc_put_vara_short (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const short *op); + int PIOc_put_var1_schar (int ncid, int varid, const PIO_Offset index[], const signed char *op); + int PIOc_put_vara_ulonglong (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const unsigned long long *op); + int PIOc_put_varm_double (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const double *op); + int PIOc_put_vara (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const void *buf, PIO_Offset bufcount, MPI_Datatype buftype); + int PIOc_put_vara_long (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const long *op); + int PIOc_put_var1_double (int ncid, int varid, const PIO_Offset index[], const double *op); + int PIOc_put_varm_schar (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const signed char *op); + int PIOc_put_var_text (int ncid, int varid, const char *op); + int PIOc_put_vars_int (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const int *op); + int PIOc_put_var1_short (int ncid, int varid, const PIO_Offset index[], const short *op); + int PIOc_put_vars_longlong (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const long long *op); + int PIOc_put_vara_double (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const double *op); + int PIOc_put_vars (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const void *buf, PIO_Offset bufcount, MPI_Datatype buftype); + int PIOc_put_vars_tc(int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], nc_type xtype, const void *buf); + int PIOc_put_var_uchar (int ncid, int varid, const unsigned char *op); + int PIOc_put_var_long (int ncid, int varid, const long *op); + int PIOc_put_varm_longlong (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const long long *op); + int PIOc_get_vara_int (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], int *buf); + int PIOc_get_var1_float (int ncid, int varid, const PIO_Offset index[], float *buf); + int PIOc_get_var1_short (int ncid, int varid, const PIO_Offset index[], short *buf); + int PIOc_get_vars_int (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], int *buf); + int PIOc_get_var_text (int ncid, int varid, char *buf); + int PIOc_get_varm_double (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], double *buf); + int PIOc_get_vars_schar (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], signed char *buf); + int PIOc_get_vara_ushort (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], unsigned short *buf); + int PIOc_get_var1_ushort (int ncid, int varid, const PIO_Offset index[], unsigned short *buf); + int PIOc_get_var_float (int ncid, int varid, float *buf); + int PIOc_get_vars_uchar (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], unsigned char *buf); + int PIOc_get_var (int ncid, int varid, void *buf, PIO_Offset bufcount, MPI_Datatype buftype); + int PIOc_get_var1_longlong (int ncid, int varid, const PIO_Offset index[], long long *buf); + int PIOc_get_vars_ushort (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], unsigned short *buf); + int PIOc_get_var_long (int ncid, int varid, long *buf); + int PIOc_get_var1_double (int ncid, int varid, const PIO_Offset index[], double *buf); + int PIOc_get_vara_uint (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], unsigned int *buf); + int PIOc_get_vars_longlong (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], long long *buf); + int PIOc_get_var_longlong (int ncid, int varid, long long *buf); + int PIOc_get_vara_short (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], short *buf); + int PIOc_get_vara_long (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], long *buf); + int PIOc_get_var1_int (int ncid, int varid, const PIO_Offset index[], int *buf); + int PIOc_get_var1_ulonglong (int ncid, int varid, const PIO_Offset index[], unsigned long long *buf); + int PIOc_get_var_uchar (int ncid, int varid, unsigned char *buf); + int PIOc_get_vara_uchar (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], unsigned char *buf); + int PIOc_get_vars_float (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], float *buf); + int PIOc_get_vars_long (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], long *buf); + int PIOc_get_var1 (int ncid, int varid, const PIO_Offset index[], void *buf, PIO_Offset bufcount, MPI_Datatype buftype); + int PIOc_get_var_uint (int ncid, int varid, unsigned int *buf); + int PIOc_get_vara (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], void *buf, PIO_Offset bufcount, MPI_Datatype buftype); + int PIOc_get_vara_schar (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], signed char *buf); + int PIOc_get_var1_uint (int ncid, int varid, const PIO_Offset index[], unsigned int *buf); + int PIOc_get_vars_uint (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], unsigned int *buf); + int PIOc_get_vara_float (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], float *buf); + int PIOc_get_varm_text (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], char *buf); + int PIOc_get_var1_text (int ncid, int varid, const PIO_Offset index[], char *buf); + int PIOc_get_varm_int (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], int *buf); + int PIOc_get_varm_uint (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], unsigned int *buf); + int PIOc_get_varm (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], void *buf, PIO_Offset bufcount, MPI_Datatype buftype); + int PIOc_get_vars_double (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], double *buf); + int PIOc_get_vara_longlong (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], long long *buf); + int PIOc_get_var_ulonglong (int ncid, int varid, unsigned long long *buf); + int PIOc_get_vara_ulonglong (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], unsigned long long *buf); + int PIOc_get_var_short (int ncid, int varid, short *buf); + int PIOc_get_varm_float (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], float *buf); + int PIOc_get_var1_long (int ncid, int varid, const PIO_Offset index[], long *buf); + int PIOc_get_varm_long (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], long *buf); + int PIOc_get_varm_ushort (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], unsigned short *buf); + int PIOc_get_varm_longlong (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], long long *buf); + int PIOc_get_vars_text (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], char *buf); + int PIOc_get_var1_uchar (int ncid, int varid, const PIO_Offset index[], unsigned char *buf); + int PIOc_get_vars (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], void *buf, PIO_Offset bufcount, MPI_Datatype buftype); + int PIOc_get_vars_tc(int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], nc_type xtype, void *buf); + int PIOc_get_varm_short (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], short *buf); + int PIOc_get_varm_ulonglong (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], unsigned long long *buf); + int PIOc_get_var_schar (int ncid, int varid, signed char *buf); + int PIOc_iotype_available(const int iotype); + int PIOc_set_log_level(int level); + int PIOc_put_att(int ncid, int varid, const char *name, nc_type xtype, PIO_Offset len, const void *op); + int PIOc_get_att(int ncid, int varid, const char *name, void *ip); + int PIOc_inq_type(int ncid, nc_type xtype, char *name, PIO_Offset *sizep); #if defined(__cplusplus) } #endif diff --git a/externals/pio2/src/clib/pio_darray.c b/externals/pio2/src/clib/pio_darray.c index 1a22ec6e11a..bf734291a95 100644 --- a/externals/pio2/src/clib/pio_darray.c +++ b/externals/pio2/src/clib/pio_darray.c @@ -13,16 +13,20 @@ #include #include + #define PIO_WRITE_BUFFERING 1 PIO_Offset PIO_BUFFER_SIZE_LIMIT=10485760; // 10MB default limit bufsize PIO_CNBUFFER_LIMIT=33554432; static void *CN_bpool=NULL; static PIO_Offset maxusage=0; -/** @brief Set the pio buffer size limit, this is the size of the data buffer on the IO nodes. + +/** Set the pio buffer size limit. This is the size of the data buffer + * on the IO nodes. * + * The pio_buffer_size_limit will only apply to files opened after + * the setting is changed. * - * The pio_buffer_size_limit will only apply to files opened after the setting is changed. * @param limit the size of the buffer on the IO nodes * @return The previous limit setting. */ @@ -35,87 +39,108 @@ static PIO_Offset maxusage=0; return(oldsize); } -/** @brief Initialize the compute buffer to size PIO_CNBUFFER_LIMIT +/** Initialize the compute buffer to size PIO_CNBUFFER_LIMIT. + * + * This routine initializes the compute buffer pool if the bget memory + * management is used. * - * This routine initializes the compute buffer pool if the bget memory management is used. * @param ios the iosystem descriptor which will use the new buffer */ - void compute_buffer_init(iosystem_desc_t ios) { #ifndef PIO_USE_MALLOC - if(CN_bpool == NULL){ - CN_bpool = malloc( PIO_CNBUFFER_LIMIT ); - if(CN_bpool==NULL){ + if (!CN_bpool) + { + if (!(CN_bpool = malloc(PIO_CNBUFFER_LIMIT))) + { char errmsg[180]; - sprintf(errmsg,"Unable to allocate a buffer pool of size %d on task %d: try reducing PIO_CNBUFFER_LIMIT\n",PIO_CNBUFFER_LIMIT,ios.comp_rank); + sprintf(errmsg,"Unable to allocate a buffer pool of size %d on task %d:" + " try reducing PIO_CNBUFFER_LIMIT\n", PIO_CNBUFFER_LIMIT, ios.comp_rank); piodie(errmsg,__FILE__,__LINE__); } + bpool( CN_bpool, PIO_CNBUFFER_LIMIT); - if(CN_bpool==NULL){ + if (!CN_bpool) + { char errmsg[180]; - sprintf(errmsg,"Unable to allocate a buffer pool of size %d on task %d: try reducing PIO_CNBUFFER_LIMIT\n",PIO_CNBUFFER_LIMIT,ios.comp_rank); + sprintf(errmsg,"Unable to allocate a buffer pool of size %d on task %d:" + " try reducing PIO_CNBUFFER_LIMIT\n", PIO_CNBUFFER_LIMIT, ios.comp_rank); piodie(errmsg,__FILE__,__LINE__); } + bectl(NULL, malloc, free, PIO_CNBUFFER_LIMIT); } #endif } -/** @ingroup PIO_write_darray - * @brief Write a single distributed field to output. This routine is only used if aggregation is off. - * @param[in] file: a pointer to the open file descriptor for the file that will be written to +/** Write a single distributed field to output. This routine is only + * used if aggregation is off. + * + * @param[in] file: a pointer to the open file descriptor for the file + * that will be written to + * * @param[in] iodesc: a pointer to the defined iodescriptor for the buffer + * * @param[in] vid: the variable id to be written + * * @param[in] IOBUF: the buffer to be written from this mpi task - * @param[in] fillvalue: the optional fillvalue to be used for missing data in this buffer + * + * @param[in] fillvalue: the optional fillvalue to be used for missing + * data in this buffer + * + * @return 0 for success, error code otherwise. + * + * @ingroup PIO_write_darray */ - int pio_write_darray_nc(file_desc_t *file, io_desc_t *iodesc, const int vid, void *IOBUF, void *fillvalue) +int pio_write_darray_nc(file_desc_t *file, io_desc_t *iodesc, const int vid, + void *IOBUF, void *fillvalue) { - iosystem_desc_t *ios; + iosystem_desc_t *ios; /** Pointer to io system information. */ var_desc_t *vdesc; int ndims; - int ierr; + int ierr = PIO_NOERR; /** Return code from function calls. */ int i; - int msg; - int mpierr; + int mpierr = MPI_SUCCESS; /** Return code from MPI function codes. */ int dsize; MPI_Status status; PIO_Offset usage; int fndims; - PIO_Offset tdsize; + PIO_Offset tdsize = 0; + #ifdef TIMING GPTLstart("PIO:write_darray_nc"); #endif - tdsize=0; - ierr = PIO_NOERR; - - ios = file->iosystem; - if(ios == NULL){ - fprintf(stderr,"Failed to find iosystem handle \n"); + /* Get the IO system info. */ + if (!(ios = file->iosystem)) return PIO_EBADID; - } - vdesc = (file->varlist)+vid; - if(vdesc == NULL){ - fprintf(stderr,"Failed to find variable handle %d\n",vid); + /* Get pointer to variable information. */ + if (!(vdesc = file->varlist + vid)) return PIO_EBADID; - } + ndims = iodesc->ndims; - msg = 0; - if(ios->async_interface && ! ios->ioproc){ - if(ios->comp_rank==0) + /* If async is in use, and this is not an IO task, bcast the parameters. */ + if (ios->async_interface) + { + if (!ios->ioproc) + { + int msg = 0; + + if (ios->compmaster) mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); - mpierr = MPI_Bcast(&(file->fh),1, MPI_INT, ios->compmaster, ios->intercomm); + + if (!mpierr) + mpierr = MPI_Bcast(&file->fh, 1, MPI_INT, ios->compmaster, ios->intercomm); + } } ierr = PIOc_inq_varndims(file->fh, vid, &fndims); - - if(ios->ioproc){ + if (ios->ioproc) + { io_region *region; int ncid = file->fh; int regioncnt; @@ -135,126 +160,157 @@ void compute_buffer_init(iosystem_desc_t ios) if(vdesc->record >= 0 && ndimsiotype == PIO_IOTYPE_PNETCDF){ - // make sure we have room in the buffer ; + /* make sure we have room in the buffer. */ + if (file->iotype == PIO_IOTYPE_PNETCDF) flush_output_buffer(file, false, tsize*(iodesc->maxiobuflen)); - } #endif rrcnt=0; - for(regioncnt=0;regioncntmaxregions;regioncnt++){ - for(i=0;imaxregions; regioncnt++) + { + for (i = 0; i < ndims; i++) + { start[i] = 0; count[i] = 0; } - if(region != NULL){ + if (region) + { bufptr = (void *)((char *) IOBUF+tsize*region->loffset); // this is a record based multidimensional array - if(vdesc->record >= 0){ + if (vdesc->record >= 0) + { start[0] = vdesc->record; - for(i=1;istart[i-1]; count[i] = region->count[i-1]; } if(count[1]>0) count[0] = 1; // Non-time dependent array - }else{ - for( i=0;istart[i]; count[i] = region->count[i]; } } } - switch(file->iotype){ + switch(file->iotype) + { #ifdef _NETCDF #ifdef _NETCDF4 case PIO_IOTYPE_NETCDF4P: + + /* Use collective writes with this variable. */ ierr = nc_var_par_access(ncid, vid, NC_COLLECTIVE); - if(iodesc->basetype == MPI_DOUBLE || iodesc->basetype == MPI_REAL8){ - ierr = nc_put_vara_double (ncid, vid,(size_t *) start,(size_t *) count, (const double *) bufptr); - } else if(iodesc->basetype == MPI_INTEGER){ - ierr = nc_put_vara_int (ncid, vid, (size_t *) start, (size_t *) count, (const int *) bufptr); - }else if(iodesc->basetype == MPI_FLOAT || iodesc->basetype == MPI_REAL4){ - ierr = nc_put_vara_float (ncid, vid, (size_t *) start, (size_t *) count, (const float *) bufptr); - }else{ - fprintf(stderr,"Type not recognized %d in pioc_write_darray\n",(int) iodesc->basetype); - } + if (iodesc->basetype == MPI_DOUBLE || iodesc->basetype == MPI_REAL8) + ierr = nc_put_vara_double(ncid, vid, (size_t *)start, (size_t *)count, + (const double *)bufptr); + else if (iodesc->basetype == MPI_INTEGER) + ierr = nc_put_vara_int(ncid, vid, (size_t *)start, (size_t *)count, + (const int *)bufptr); + else if (iodesc->basetype == MPI_FLOAT || iodesc->basetype == MPI_REAL4) + ierr = nc_put_vara_float(ncid, vid, (size_t *)start, (size_t *)count, + (const float *)bufptr); + else + fprintf(stderr,"Type not recognized %d in pioc_write_darray\n", + (int)iodesc->basetype); break; case PIO_IOTYPE_NETCDF4C: -#endif +#endif /* _NETCDF4 */ case PIO_IOTYPE_NETCDF: { mpierr = MPI_Type_size(iodesc->basetype, &dsize); size_t tstart[ndims], tcount[ndims]; - if(ios->io_rank==0){ - - for(i=0;inum_aiotasks;i++){ - if(i==0){ + if (ios->io_rank == 0) + { + for (i = 0; i < iodesc->num_aiotasks; i++) + { + if (i == 0) + { buflen=1; - for(j=0;jio_comm); // handshake - tell the sending task I'm ready mpierr = MPI_Recv( &buflen, 1, MPI_INT, i, 1, ios->io_comm, &status); - if(buflen>0){ - mpierr = MPI_Recv( tstart, ndims, MPI_OFFSET, i, ios->num_iotasks+i, ios->io_comm, &status); - mpierr = MPI_Recv( tcount, ndims, MPI_OFFSET, i,2*ios->num_iotasks+i, ios->io_comm, &status); + if (buflen > 0) + { + mpierr = MPI_Recv(tstart, ndims, MPI_OFFSET, i, ios->num_iotasks+i, + ios->io_comm, &status); + mpierr = MPI_Recv(tcount, ndims, MPI_OFFSET, i, 2 * ios->num_iotasks + i, + ios->io_comm, &status); tmp_buf = malloc(buflen * dsize); mpierr = MPI_Recv( tmp_buf, buflen, iodesc->basetype, i, i, ios->io_comm, &status); } } - if(buflen>0){ - if(iodesc->basetype == MPI_INTEGER){ + if (buflen>0) + { + if (iodesc->basetype == MPI_INTEGER) ierr = nc_put_vara_int (ncid, vid, tstart, tcount, (const int *) tmp_buf); - }else if(iodesc->basetype == MPI_DOUBLE || iodesc->basetype == MPI_REAL8){ + else if (iodesc->basetype == MPI_DOUBLE || iodesc->basetype == MPI_REAL8) ierr = nc_put_vara_double (ncid, vid, tstart, tcount, (const double *) tmp_buf); - }else if(iodesc->basetype == MPI_FLOAT || iodesc->basetype == MPI_REAL4){ + else if (iodesc->basetype == MPI_FLOAT || iodesc->basetype == MPI_REAL4) ierr = nc_put_vara_float (ncid,vid, tstart, tcount, (const float *) tmp_buf); - }else{ - fprintf(stderr,"Type not recognized %d in pioc_write_darray\n",(int) iodesc->basetype); - } - if(ierr == PIO_EEDGE){ + else + fprintf(stderr,"Type not recognized %d in pioc_write_darray\n", + (int)iodesc->basetype); + + if (ierr == PIO_EEDGE) for(i=0;iio_rank < iodesc->num_aiotasks ){ + } + else if (ios->io_rank < iodesc->num_aiotasks) + { buflen=1; - for(i=0;iio_rank,tstart[0],tstart[1],tcount[0],tcount[1],buflen,ndims,fndims); + /* printf("%s %d %d %d %d %d %d %d %d %d\n",__FILE__,__LINE__,ios->io_rank,tstart[0], + tstart[1],tcount[0],tcount[1],buflen,ndims,fndims);*/ mpierr = MPI_Recv( &ierr, 1, MPI_INT, 0, 0, ios->io_comm, &status); // task0 is ready to recieve mpierr = MPI_Rsend( &buflen, 1, MPI_INT, 0, 1, ios->io_comm); - if(buflen>0) { - mpierr = MPI_Rsend( tstart, ndims, MPI_OFFSET, 0, ios->num_iotasks+ios->io_rank, ios->io_comm); - mpierr = MPI_Rsend( tcount, ndims, MPI_OFFSET, 0,2*ios->num_iotasks+ios->io_rank, ios->io_comm); + if (buflen > 0) + { + mpierr = MPI_Rsend(tstart, ndims, MPI_OFFSET, 0, ios->num_iotasks+ios->io_rank, + ios->io_comm); + mpierr = MPI_Rsend(tcount, ndims, MPI_OFFSET, 0,2*ios->num_iotasks+ios->io_rank, + ios->io_comm); mpierr = MPI_Rsend( bufptr, buflen, iodesc->basetype, 0, ios->io_rank, ios->io_comm); } } break; } break; - #endif +#endif /* _NETCDF */ #ifdef _PNETCDF case PIO_IOTYPE_PNETCDF: - for( i=0,dsize=1;ibasetype); @@ -266,56 +322,57 @@ void compute_buffer_init(iosystem_desc_t ios) } } */ - if(dsize>0){ + if (dsize > 0) + { // printf("%s %d %d %d\n",__FILE__,__LINE__,ios->io_rank,dsize); startlist[rrcnt] = (PIO_Offset *) calloc(fndims, sizeof(PIO_Offset)); countlist[rrcnt] = (PIO_Offset *) calloc(fndims, sizeof(PIO_Offset)); - for( i=0; imaxregions-1){ + if (regioncnt == iodesc->maxregions - 1) + { // printf("%s %d %d %ld %ld\n",__FILE__,__LINE__,ios->io_rank,iodesc->llen, tdsize); // ierr = ncmpi_put_varn_all(ncid, vid, iodesc->maxregions, startlist, countlist, // IOBUF, iodesc->llen, iodesc->basetype); int reqn=0; - - if(vdesc->nreqs%PIO_REQUEST_ALLOC_CHUNK == 0 ){ + if (vdesc->nreqs % PIO_REQUEST_ALLOC_CHUNK == 0 ) + { vdesc->request = realloc(vdesc->request, sizeof(int)*(vdesc->nreqs+PIO_REQUEST_ALLOC_CHUNK)); - for(int i=vdesc->nreqs;inreqs+PIO_REQUEST_ALLOC_CHUNK;i++){ + for (int i = vdesc->nreqs; i < vdesc->nreqs + PIO_REQUEST_ALLOC_CHUNK; i++) vdesc->request[i]=NC_REQ_NULL; - } reqn = vdesc->nreqs; - }else{ - while(vdesc->request[reqn] != NC_REQ_NULL ){ - reqn++; - } } + else + while(vdesc->request[reqn] != NC_REQ_NULL) + reqn++; ierr = ncmpi_bput_varn(ncid, vid, rrcnt, startlist, countlist, IOBUF, iodesc->llen, iodesc->basetype, vdesc->request+reqn); - if(vdesc->request[reqn] == NC_REQ_NULL){ + if (vdesc->request[reqn] == NC_REQ_NULL) vdesc->request[reqn] = PIO_REQ_NULL; //keeps wait calls in sync - } vdesc->nreqs = reqn; // printf("%s %d %X %d\n",__FILE__,__LINE__,IOBUF,request); - for(i=0;iiotype,__FILE__,__LINE__); } - if(region != NULL) + if (region) region = region->next; } // for(regioncnt=0;regioncntmaxregions;regioncnt++){ } // if(ios->ioproc) @@ -328,11 +385,12 @@ void compute_buffer_init(iosystem_desc_t ios) return ierr; } -/** @brief Write a set of one or more aggregated arrays to output file +/** Write a set of one or more aggregated arrays to output file * @ingroup PIO_write_darray * * This routine is used if aggregation is enabled, data is already on the * io-tasks + * * @param[in] file: a pointer to the open file descriptor for the file that will be written to * @param[in] nvars: the number of variables to be written with this decomposition * @param[in] vid: an array of the variable ids to be written @@ -353,12 +411,11 @@ int pio_write_darray_multi_nc(file_desc_t *file, const int nvars, const int vid[ const int maxiobuflen, const int num_aiotasks, void *IOBUF, const int frame[]) { - iosystem_desc_t *ios; + iosystem_desc_t *ios; /** Pointer to io system information. */ var_desc_t *vdesc; int ierr; int i; - int msg; - int mpierr; + int mpierr = MPI_SUCCESS; /** Return code from MPI function codes. */ int dsize; MPI_Status status; PIO_Offset usage; @@ -373,29 +430,39 @@ int pio_write_darray_multi_nc(file_desc_t *file, const int nvars, const int vid[ #endif ios = file->iosystem; - if(ios == NULL){ + if (ios == NULL) + { fprintf(stderr,"Failed to find iosystem handle \n"); return PIO_EBADID; } vdesc = (file->varlist)+vid[0]; ncid = file->fh; - if(vdesc == NULL){ + if (vdesc == NULL) + { fprintf(stderr,"Failed to find variable handle %d\n",vid[0]); return PIO_EBADID; } - msg = 0; - if(ios->async_interface && ! ios->ioproc){ - if(ios->comp_rank==0) + /* If async is in use, send message to IO master task. */ + if (ios->async_interface) + { + if (!ios->ioproc) + { + int msg = 0; + if (ios->compmaster) mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + + if (!mpierr) mpierr = MPI_Bcast(&(file->fh),1, MPI_INT, ios->compmaster, ios->intercomm); } + } ierr = PIOc_inq_varndims(file->fh, vid[0], &fndims); MPI_Type_size(basetype, &tsize); - if(ios->ioproc){ + if (ios->ioproc) + { io_region *region; int regioncnt; int rrcnt; @@ -411,104 +478,141 @@ int pio_write_darray_multi_nc(file_desc_t *file, const int nvars, const int vid[ ncid = file->fh; region = firstregion; - rrcnt=0; - for(regioncnt=0;regioncntstart[0],region->count[0],ndims,fndims,vdesc->record); - for(i=0;irecord >= 0){ - for(i=fndims-ndims;irecord >= 0) + { + for (i = fndims - ndims; i < fndims; i++) + { start[i] = region->start[i-(fndims-ndims)]; count[i] = region->count[i-(fndims-ndims)]; } - if(fndims>1 && ndims0){ + if (fndims>1 && ndims0) + { count[0] = 1; start[0] = frame[0]; - }else if(fndims==ndims){ + } + else if (fndims==ndims) + { start[0]+=vdesc->record; } // Non-time dependent array - }else{ - for( i=0;istart[i]; count[i] = region->count[i]; } } } - switch(file->iotype){ + switch(file->iotype) + { #ifdef _NETCDF4 case PIO_IOTYPE_NETCDF4P: - for(int nv=0; nvrecord >= 0 && ndimsrecord >= 0 && ndims < fndims) + { start[0] = frame[nv]; } - if(region != NULL){ + if (region) + { bufptr = (void *)((char *) IOBUF + tsize*(nv*llen + region->loffset)); } ierr = nc_var_par_access(ncid, vid[nv], NC_COLLECTIVE); - if(basetype == MPI_DOUBLE ||basetype == MPI_REAL8){ - ierr = nc_put_vara_double (ncid, vid[nv],(size_t *) start,(size_t *) count, (const double *) bufptr); - } else if(basetype == MPI_INTEGER){ - ierr = nc_put_vara_int (ncid, vid[nv], (size_t *) start, (size_t *) count, (const int *) bufptr); - }else if(basetype == MPI_FLOAT || basetype == MPI_REAL4){ - ierr = nc_put_vara_float (ncid, vid[nv], (size_t *) start, (size_t *) count, (const float *) bufptr); - }else{ - fprintf(stderr,"Type not recognized %d in pioc_write_darray\n",(int) basetype); + if (basetype == MPI_DOUBLE ||basetype == MPI_REAL8) + { + ierr = nc_put_vara_double (ncid, vid[nv],(size_t *) start,(size_t *) count, + (const double *)bufptr); + } + else if (basetype == MPI_INTEGER) + { + ierr = nc_put_vara_int (ncid, vid[nv], (size_t *) start, (size_t *) count, + (const int *)bufptr); + } + else if (basetype == MPI_FLOAT || basetype == MPI_REAL4) + { + ierr = nc_put_vara_float (ncid, vid[nv], (size_t *) start, (size_t *) count, + (const float *)bufptr); + } + else + { + fprintf(stderr,"Type not recognized %d in pioc_write_darray\n", + (int)basetype); } } break; #endif #ifdef _PNETCDF case PIO_IOTYPE_PNETCDF: - for( i=0,dsize=1;i0){ + if (dsize>0) + { // printf("%s %d %d %d\n",__FILE__,__LINE__,ios->io_rank,dsize); startlist[rrcnt] = (PIO_Offset *) calloc(fndims, sizeof(PIO_Offset)); countlist[rrcnt] = (PIO_Offset *) calloc(fndims, sizeof(PIO_Offset)); - for( i=0; iio_rank,iodesc->llen, tdsize); // ierr = ncmpi_put_varn_all(ncid, vid, iodesc->maxregions, startlist, countlist, // IOBUF, iodesc->llen, iodesc->basetype); //printf("%s %d %ld \n",__FILE__,__LINE__,IOBUF); - for(int nv=0; nvvarlist)+vid[nv]; - if(vdesc->record >= 0 && ndimsrecord >= 0 && ndimsnreqs%PIO_REQUEST_ALLOC_CHUNK == 0 ){ + if (vdesc->nreqs%PIO_REQUEST_ALLOC_CHUNK == 0 ) + { vdesc->request = realloc(vdesc->request, sizeof(int)*(vdesc->nreqs+PIO_REQUEST_ALLOC_CHUNK)); - for(int i=vdesc->nreqs;inreqs+PIO_REQUEST_ALLOC_CHUNK;i++){ + for (int i=vdesc->nreqs;inreqs+PIO_REQUEST_ALLOC_CHUNK;i++) + { vdesc->request[i]=NC_REQ_NULL; } reqn = vdesc->nreqs; - }else{ - while(vdesc->request[reqn] != NC_REQ_NULL){ + } + else + { + while(vdesc->request[reqn] != NC_REQ_NULL) + { reqn++; } } @@ -518,16 +622,20 @@ int pio_write_darray_multi_nc(file_desc_t *file, const int nvars, const int vid[ ierr = ncmpi_bput_varn(ncid, vid[nv], rrcnt, startlist, countlist, bufptr, llen, basetype, &(vdesc->request)); */ - if(vdesc->request[reqn] == NC_REQ_NULL){ + if (vdesc->request[reqn] == NC_REQ_NULL) + { vdesc->request[reqn] = PIO_REQ_NULL; //keeps wait calls in sync } vdesc->nreqs += reqn+1; // printf("%s %d %d %d\n",__FILE__,__LINE__,vdesc->nreqs,vdesc->request[reqn]); } - for(i=0;iiotype,__FILE__,__LINE__); } - if(region != NULL) + if (region) region = region->next; } // for(regioncnt=0;regioncntmaxregions;regioncnt++){ } // if(ios->ioproc) @@ -578,12 +686,11 @@ int pio_write_darray_multi_nc_serial(file_desc_t *file, const int nvars, const i const int maxiobuflen, const int num_aiotasks, void *IOBUF, const int frame[]) { - iosystem_desc_t *ios; + iosystem_desc_t *ios; /** Pointer to io system information. */ var_desc_t *vdesc; int ierr; int i; - int msg; - int mpierr; + int mpierr = MPI_SUCCESS; /** Return code from MPI function codes. */ int dsize; MPI_Status status; PIO_Offset usage; @@ -597,30 +704,40 @@ int pio_write_darray_multi_nc_serial(file_desc_t *file, const int nvars, const i GPTLstart("PIO:write_darray_multi_nc_serial"); #endif - ios = file->iosystem; - if(ios == NULL){ + if (!(ios = file->iosystem)) + { fprintf(stderr,"Failed to find iosystem handle \n"); return PIO_EBADID; } - vdesc = (file->varlist)+vid[0]; + ncid = file->fh; - if(vdesc == NULL){ + if (!(vdesc = (file->varlist) + vid[0])) + { fprintf(stderr,"Failed to find variable handle %d\n",vid[0]); return PIO_EBADID; } - msg = 0; - if(ios->async_interface && ! ios->ioproc){ + /* If async is in use, and this is not an IO task, bcast the parameters. */ + if (ios->async_interface) + { + if (! ios->ioproc) + { + int msg = 0; + if(ios->comp_rank==0) mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + + if (!mpierr) mpierr = MPI_Bcast(&(file->fh),1, MPI_INT, ios->compmaster, ios->intercomm); } + } ierr = PIOc_inq_varndims(file->fh, vid[0], &fndims); MPI_Type_size(basetype, &tsize); - if(ios->ioproc){ + if (ios->ioproc) + { io_region *region; int regioncnt; int rrcnt; @@ -636,21 +753,29 @@ int pio_write_darray_multi_nc_serial(file_desc_t *file, const int nvars, const i rrcnt=0; - for(regioncnt=0;regioncntrecord >= 0){ - for(i=fndims-ndims;irecord >= 0) + { + for (i = fndims - ndims; i < fndims; i++) + { tmp_start[i+regioncnt*fndims] = region->start[i-(fndims-ndims)]; tmp_count[i+regioncnt*fndims] = region->count[i-(fndims-ndims)]; } // Non-time dependent array - }else{ - for( i=0;istart[i]; tmp_count[i+regioncnt*fndims] = region->count[i]; } @@ -658,25 +783,31 @@ int pio_write_darray_multi_nc_serial(file_desc_t *file, const int nvars, const i region = region->next; } } - if(ios->io_rank>0){ + if (ios->io_rank > 0) + { mpierr = MPI_Recv( &ierr, 1, MPI_INT, 0, 0, ios->io_comm, &status); // task0 is ready to recieve MPI_Send( &llen, 1, MPI_OFFSET, 0, ios->io_rank, ios->io_comm); - if(llen>0){ + if (llen>0) + { MPI_Send( &maxregions, 1, MPI_INT, 0, ios->io_rank+ios->num_iotasks, ios->io_comm); MPI_Send( tmp_start, maxregions*fndims, MPI_OFFSET, 0, ios->io_rank+2*ios->num_iotasks, ios->io_comm); MPI_Send( tmp_count, maxregions*fndims, MPI_OFFSET, 0, ios->io_rank+3*ios->num_iotasks, ios->io_comm); // printf("%s %d %ld\n",__FILE__,__LINE__,nvars*llen); MPI_Send( IOBUF, nvars*llen, basetype, 0, ios->io_rank+4*ios->num_iotasks, ios->io_comm); } - }else{ + } + else + { size_t rlen; int rregions; size_t start[fndims], count[fndims]; size_t loffset; mpierr = MPI_Type_size(basetype, &dsize); - for(int rtask=0; rtasknum_iotasks; rtask++){ - if(rtask>0){ + for (int rtask=0; rtasknum_iotasks; rtask++) + { + if (rtask>0) + { mpierr = MPI_Send( &ierr, 1, MPI_INT, rtask, 0, ios->io_comm); // handshake - tell the sending task I'm ready MPI_Recv( &rlen, 1, MPI_OFFSET, rtask, rtask, ios->io_comm, &status); if(rlen>0){ @@ -686,40 +817,54 @@ int pio_write_darray_multi_nc_serial(file_desc_t *file, const int nvars, const i // printf("%s %d %d %ld\n",__FILE__,__LINE__,rtask,nvars*rlen); MPI_Recv( IOBUF, nvars*rlen, basetype, rtask, rtask+4*ios->num_iotasks, ios->io_comm, &status); } - }else{ + } + else + { rlen = llen; rregions = maxregions; } - if(rlen>0){ + if (rlen>0) + { loffset = 0; - for(regioncnt=0;regioncntrecord>=0){ - if(fndims>1 && ndims0){ + if (vdesc->record>=0) + { + if (fndims>1 && ndims0) + { count[0] = 1; start[0] = frame[nv]; - }else if(fndims==ndims){ + } + else if (fndims==ndims) + { start[0]+=vdesc->record; } } - - - - if(basetype == MPI_INTEGER){ + if (basetype == MPI_INTEGER) + { ierr = nc_put_vara_int (ncid, vid[nv], start, count, (const int *) bufptr); - }else if(basetype == MPI_DOUBLE || basetype == MPI_REAL8){ + } + else if (basetype == MPI_DOUBLE || basetype == MPI_REAL8) + { ierr = nc_put_vara_double (ncid, vid[nv], start, count, (const double *) bufptr); - }else if(basetype == MPI_FLOAT || basetype == MPI_REAL4){ + } + else if (basetype == MPI_FLOAT || basetype == MPI_REAL4) + { ierr = nc_put_vara_float (ncid,vid[nv], start, count, (const float *) bufptr); - }else{ + } + else + { fprintf(stderr,"Type not recognized %d in pioc_write_darray\n",(int) basetype); } @@ -730,7 +875,8 @@ int pio_write_darray_multi_nc_serial(file_desc_t *file, const int nvars, const i } size_t tsize; tsize = 1; - for(int i=0;imode & PIO_WRITE)){ + if (! (file->mode & PIO_WRITE)) + { fprintf(stderr,"ERROR: Attempt to write to read-only file\n"); return PIO_EPERM; } iodesc = pio_get_iodesc_from_id(ioid); - if(iodesc == NULL){ + if (iodesc == NULL) + { // print_trace(NULL); //fprintf(stderr,"iodesc handle not found %d %d\n",ioid,__LINE__); return PIO_EBADID; @@ -791,42 +950,57 @@ int PIOc_write_darray_multi(const int ncid, const int vid[], const int ioid, con ios = file->iosystem; // rlen = iodesc->llen*nvars; rlen=0; - if(iodesc->llen>0){ + if (iodesc->llen>0) + { rlen = iodesc->maxiobuflen*nvars; } - if(vdesc0->iobuf != NULL){ + if (vdesc0->iobuf) + { piodie("Attempt to overwrite existing io buffer",__FILE__,__LINE__); } - if(iodesc->rearranger>0){ - if(rlen>0){ + if (iodesc->rearranger>0) + { + if (rlen>0) + { MPI_Type_size(iodesc->basetype, &vsize); vdesc0->iobuf = bget((size_t) vsize* (size_t) rlen); - if(vdesc0->iobuf==NULL){ + if (vdesc0->iobuf==NULL) + { printf("%s %d %d %ld\n",__FILE__,__LINE__,nvars,vsize*rlen); piomemerror(*ios,(size_t) rlen*(size_t) vsize, __FILE__,__LINE__); } - if(iodesc->needsfill && iodesc->rearranger==PIO_REARR_BOX){ - if(vsize==4){ - for(int nv=0;nv < nvars; nv++){ - for(int i=0;imaxiobuflen;i++){ + if (iodesc->needsfill && iodesc->rearranger==PIO_REARR_BOX) + { + if (vsize==4) + { + for (int nv=0;nv < nvars; nv++) + { + for (int i=0;imaxiobuflen;i++) + { ((float *) vdesc0->iobuf)[i+nv*(iodesc->maxiobuflen)] = ((float *)fillvalue)[nv]; } } - }else if(vsize==8){ - for(int nv=0;nv < nvars; nv++){ - for(int i=0;imaxiobuflen;i++){ + } + else if (vsize==8) + { + for (int nv=0;nv < nvars; nv++) + { + for (int i=0;imaxiobuflen;i++) + { ((double *)vdesc0->iobuf)[i+nv*(iodesc->maxiobuflen)] = ((double *)fillvalue)[nv]; } } } } } + ierr = rearrange_comp2io(*ios, iodesc, array, vdesc0->iobuf, nvars); }/* this is wrong, need to think about it else{ vdesc0->iobuf = array; } */ - switch(file->iotype){ + switch(file->iotype) + { case PIO_IOTYPE_NETCDF4P: case PIO_IOTYPE_PNETCDF: ierr = pio_write_darray_multi_nc(file, nvars, vid, @@ -842,7 +1016,8 @@ int PIOc_write_darray_multi(const int ncid, const int vid[], const int ioid, con iodesc->maxregions, iodesc->firstregion, iodesc->llen, iodesc->maxiobuflen, iodesc->num_aiotasks, vdesc0->iobuf, frame); - if(vdesc0->iobuf != NULL){ + if (vdesc0->iobuf) + { brel(vdesc0->iobuf); vdesc0->iobuf = NULL; } @@ -850,30 +1025,38 @@ int PIOc_write_darray_multi(const int ncid, const int vid[], const int ioid, con } - - if(iodesc->rearranger == PIO_REARR_SUBSET && iodesc->needsfill && - iodesc->holegridsize>0){ - if(vdesc0->fillbuf != NULL){ + iodesc->holegridsize>0) + { + if (vdesc0->fillbuf) + { piodie("Attempt to overwrite existing buffer",__FILE__,__LINE__); } vdesc0->fillbuf = bget(iodesc->holegridsize*vsize*nvars); //printf("%s %d %x\n",__FILE__,__LINE__,vdesc0->fillbuf); - if(vsize==4){ - for(int nv=0;nvholegridsize;i++){ + if (vsize==4) + { + for (int nv=0;nvholegridsize;i++) + { ((float *) vdesc0->fillbuf)[i+nv*iodesc->holegridsize] = ((float *) fillvalue)[nv]; } } - }else if(vsize==8){ - for(int nv=0;nvholegridsize;i++){ + } + else if (vsize==8) + { + for (int nv=0;nvholegridsize;i++) + { ((double *) vdesc0->fillbuf)[i+nv*iodesc->holegridsize] = ((double *) fillvalue)[nv]; } } } - switch(file->iotype){ + switch(file->iotype) + { case PIO_IOTYPE_PNETCDF: ierr = pio_write_darray_multi_nc(file, nvars, vid, iodesc->ndims, iodesc->basetype, iodesc->gsize, @@ -902,13 +1085,10 @@ int PIOc_write_darray_multi(const int ncid, const int vid[], const int ioid, con flush_output_buffer(file, flushtodisk, 0); - return ierr; - } -/** @brief Write a distributed array to the output file. - * @ingroup PIO_write_darray +/** Write a distributed array to the output file. * * This routine aggregates output on the compute nodes and only sends * it to the IO nodes when the compute buffer is full or when a flush @@ -918,24 +1098,23 @@ int PIOc_write_darray_multi(const int ncid, const int vid[], const int ioid, con * @param[in] vid: the variable ID returned by PIOc_def_var(). * @param[in] ioid: the I/O description ID as passed back by * PIOc_InitDecomp(). - * @param[in] arraylen: the length of the array to be written. This * is the length of the distrubited array. That is, the length of * the portion of the data that is on the processor. - * @param[in] array: pointer to the data to be written. This is a * pointer to the distributed portion of the array that is on this * processor. - * @param[in] fillvalue: pointer to the fill value to be used for * missing data. * * @returns 0 for success, non-zero error code for failure. + * @ingroup PIO_write_darray */ #ifdef PIO_WRITE_BUFFERING - int PIOc_write_darray(const int ncid, const int vid, const int ioid, const PIO_Offset arraylen, void *array, void *fillvalue) +int PIOc_write_darray(const int ncid, const int vid, const int ioid, + const PIO_Offset arraylen, void *array, void *fillvalue) { - iosystem_desc_t *ios; + iosystem_desc_t *ios; /** Pointer to io system information. */ file_desc_t *file; io_desc_t *iodesc; var_desc_t *vdesc; @@ -955,50 +1134,53 @@ int PIOc_write_darray_multi(const int ncid, const int vid[], const int ioid, con ierr = PIO_NOERR; needsflush = 0; // false file = pio_get_file_from_id(ncid); - if(file == NULL){ + if (file == NULL) + { fprintf(stderr,"File handle not found %d %d\n",ncid,__LINE__); return PIO_EBADID; } - if(! (file->mode & PIO_WRITE)){ + if (! (file->mode & PIO_WRITE)) + { fprintf(stderr,"ERROR: Attempt to write to read-only file\n"); return PIO_EPERM; } iodesc = pio_get_iodesc_from_id(ioid); - if(iodesc == NULL){ + if (iodesc == NULL) + { fprintf(stderr,"iodesc handle not found %d %d\n",ioid,__LINE__); return PIO_EBADID; } ios = file->iosystem; - vdesc = (file->varlist)+vid; if(vdesc == NULL) return PIO_EBADID; - if(vdesc->record<0){ - recordvar=false; - }else{ - recordvar=true; - } - if(iodesc->ndof != arraylen){ + /* Is this a record variable? */ + recordvar = vdesc->record < 0 ? true : false; + + if (iodesc->ndof != arraylen) + { fprintf(stderr,"ndof=%ld, arraylen=%ld\n",iodesc->ndof,arraylen); piodie("ndof != arraylen",__FILE__,__LINE__); } wmb = &(file->buffer); - if(wmb->ioid == -1){ - if(recordvar){ + if (wmb->ioid == -1) + { + if (recordvar) wmb->ioid = ioid; - }else{ + else wmb->ioid = -(ioid); } - }else{ + else + { // separate record and non-record variables - if(recordvar){ - while(wmb->next != NULL && wmb->ioid!=ioid){ + if (recordvar) + { + while(wmb->next && wmb->ioid!=ioid) if(wmb->next!=NULL) wmb = wmb->next; - } #ifdef _PNETCDF /* flush the previous record before starting a new one. this is collective */ // if(vdesc->request != NULL && (vdesc->request[0] != NC_REQ_NULL) || @@ -1006,25 +1188,27 @@ int PIOc_write_darray_multi(const int ncid, const int vid[], const int ioid, con // needsflush = 2; // flush to disk // } #endif - }else{ - while(wmb->next != NULL && wmb->ioid!= -(ioid)){ + } + else + { + while(wmb->next && wmb->ioid!= -(ioid)) + { if(wmb->next!=NULL) wmb = wmb->next; } } } - if((recordvar && wmb->ioid != ioid) || (!recordvar && wmb->ioid != -(ioid))){ + if ((recordvar && wmb->ioid != ioid) || (!recordvar && wmb->ioid != -(ioid))) + { wmb->next = (wmulti_buffer *) bget((bufsize) sizeof(wmulti_buffer)); - if(wmb->next == NULL){ + if (wmb->next == NULL) piomemerror(*ios,sizeof(wmulti_buffer), __FILE__,__LINE__); - } wmb=wmb->next; wmb->next=NULL; - if(recordvar){ + if (recordvar) wmb->ioid = ioid; - }else{ + else wmb->ioid = -(ioid); - } wmb->validvars=0; wmb->arraylen=arraylen; wmb->vid=NULL; @@ -1033,68 +1217,71 @@ int PIOc_write_darray_multi(const int ncid, const int vid[], const int ioid, con wmb->fillvalue=NULL; } - MPI_Type_size(iodesc->basetype, &tsize); // At this point wmb should be pointing to a new or existing buffer // so we can add the data // printf("%s %d %X %d %d %d\n",__FILE__,__LINE__,wmb->data,wmb->validvars,arraylen,tsize); // cn_buffer_report(*ios, true); bfreespace(&totfree, &maxfree); - if(needsflush==0){ + if (needsflush == 0) needsflush = (maxfree <= 1.1*(1+wmb->validvars)*arraylen*tsize ); - } MPI_Allreduce(MPI_IN_PLACE, &needsflush, 1, MPI_INT, MPI_MAX, ios->comp_comm); - - if(needsflush > 0 ){ + if (needsflush > 0 ) + { // need to flush first // printf("%s %d %ld %d %ld %ld\n",__FILE__,__LINE__,maxfree, wmb->validvars, (1+wmb->validvars)*arraylen*tsize,totfree); cn_buffer_report(*ios, true); flush_buffer(ncid,wmb, needsflush==2); // if needsflush == 2 flush to disk otherwise just flush to io node } - if(arraylen > 0){ - wmb->data = bgetr( wmb->data, (1+wmb->validvars)*arraylen*tsize); - if(wmb->data == NULL){ + + if (arraylen > 0) + if (!(wmb->data = bgetr(wmb->data, (1+wmb->validvars)*arraylen*tsize))) piomemerror(*ios, (1+wmb->validvars)*arraylen*tsize , __FILE__,__LINE__); - } - } - wmb->vid = (int *) bgetr( wmb->vid,sizeof(int)*( 1+wmb->validvars)); - if(wmb->vid == NULL){ + + if (!(wmb->vid = (int *) bgetr(wmb->vid,sizeof(int)*(1+wmb->validvars)))) piomemerror(*ios, (1+wmb->validvars)*sizeof(int) , __FILE__,__LINE__); - } - if(vdesc->record>=0){ - wmb->frame = (int *) bgetr( wmb->frame,sizeof(int)*( 1+wmb->validvars)); - if(wmb->frame == NULL){ + + if (vdesc->record >= 0) + if (!(wmb->frame = (int *)bgetr(wmb->frame, sizeof(int) * (1 + wmb->validvars)))) piomemerror(*ios, (1+wmb->validvars)*sizeof(int) , __FILE__,__LINE__); - } - } - if(iodesc->needsfill){ - wmb->fillvalue = bgetr( wmb->fillvalue,tsize*( 1+wmb->validvars)); - if(wmb->fillvalue == NULL){ - piomemerror(*ios, (1+wmb->validvars)*tsize , __FILE__,__LINE__); - } - } + if (iodesc->needsfill) + if (!(wmb->fillvalue = bgetr(wmb->fillvalue,tsize*(1+wmb->validvars)))) + piomemerror(*ios, (1+wmb->validvars)*tsize , __FILE__,__LINE__); - if(iodesc->needsfill){ - if(fillvalue != NULL){ + if (iodesc->needsfill) + { + if (fillvalue) + { memcpy((char *) wmb->fillvalue+tsize*wmb->validvars,fillvalue, tsize); - }else{ + } + else + { vtype = (MPI_Datatype) iodesc->basetype; - if(vtype == MPI_INTEGER){ + if (vtype == MPI_INTEGER) + { int fill = PIO_FILL_INT; memcpy((char *) wmb->fillvalue+tsize*wmb->validvars, &fill, tsize); - }else if(vtype == MPI_FLOAT || vtype == MPI_REAL4){ + } + else if (vtype == MPI_FLOAT || vtype == MPI_REAL4) + { float fill = PIO_FILL_FLOAT; memcpy((char *) wmb->fillvalue+tsize*wmb->validvars, &fill, tsize); - }else if(vtype == MPI_DOUBLE || vtype == MPI_REAL8){ + } + else if (vtype == MPI_DOUBLE || vtype == MPI_REAL8) + { double fill = PIO_FILL_DOUBLE; memcpy((char *) wmb->fillvalue+tsize*wmb->validvars, &fill, tsize); - }else if(vtype == MPI_CHARACTER){ + } + else if (vtype == MPI_CHARACTER) + { char fill = PIO_FILL_CHAR; memcpy((char *) wmb->fillvalue+tsize*wmb->validvars, &fill, tsize); - }else{ + } + else + { fprintf(stderr,"Type not recognized %d in pioc_write_darray\n",vtype); } } @@ -1104,9 +1291,8 @@ int PIOc_write_darray_multi(const int ncid, const int vid[], const int ioid, con wmb->arraylen = arraylen; wmb->vid[wmb->validvars]=vid; bufptr = (void *)((char *) wmb->data + arraylen*tsize*wmb->validvars); - if(arraylen>0){ + if (arraylen>0) memcpy(bufptr, array, arraylen*tsize); - } /* if(tsize==8){ double asum=0.0; @@ -1120,30 +1306,37 @@ int PIOc_write_darray_multi(const int ncid, const int vid[], const int ioid, con // printf("%s %d %d %d %d %X\n",__FILE__,__LINE__,wmb->validvars,wmb->ioid,vid,bufptr); - if(wmb->frame!=NULL){ + if (wmb->frame!=NULL) wmb->frame[wmb->validvars]=vdesc->record; - } wmb->validvars++; // printf("%s %d %d %d %d %d\n",__FILE__,__LINE__,wmb->validvars,iodesc->maxbytes/tsize, iodesc->ndof, iodesc->llen); - if(wmb->validvars >= iodesc->maxbytes/tsize){ + if (wmb->validvars >= iodesc->maxbytes/tsize) PIOc_sync(ncid); - } return ierr; - } #else -/** @brief Write a distributed array to the output file +/** Write a distributed array to the output file. * @ingroup PIO_write_darray * - * This version of the routine does not buffer, all data is communicated to the io tasks - * before the routine returns + * This version of the routine does not buffer, all data is + * communicated to the io tasks before the routine returns. + * + * @param ncid identifies the netCDF file + * @param vid + * @param ioid + * @param arraylen + * @param array + * @param fillvalue + * + * @return */ - int PIOc_write_darray(const int ncid, const int vid, const int ioid, const PIO_Offset arraylen, void *array, void *fillvalue) +int PIOc_write_darray(const int ncid, const int vid, const int ioid, + const PIO_Offset arraylen, void *array, void *fillvalue) { - iosystem_desc_t *ios; + iosystem_desc_t *ios; /** Pointer to io system information. */ file_desc_t *file; io_desc_t *iodesc; void *iobuf; @@ -1156,12 +1349,14 @@ int PIOc_write_darray_multi(const int ncid, const int vid[], const int ioid, con file = pio_get_file_from_id(ncid); - if(file == NULL){ + if (file == NULL) + { fprintf(stderr,"File handle not found %d %d\n",ncid,__LINE__); return PIO_EBADID; } iodesc = pio_get_iodesc_from_id(ioid); - if(iodesc == NULL){ + if (iodesc == NULL) + { fprintf(stderr,"iodesc handle not found %d %d\n",ioid,__LINE__); return PIO_EBADID; } @@ -1170,20 +1365,20 @@ int PIOc_write_darray_multi(const int ncid, const int vid[], const int ioid, con ios = file->iosystem; rlen = iodesc->llen; - if(iodesc->rearranger>0){ - if(rlen>0){ + if (iodesc->rearranger>0) + { + if (rlen>0) + { MPI_Type_size(iodesc->basetype, &tsize); // iobuf = bget(tsize*rlen); iobuf = malloc((size_t) tsize*rlen); - if(iobuf==NULL){ + if (!iobuf) piomemerror(*ios,rlen*(size_t) tsize, __FILE__,__LINE__); } - } // printf(" rlen = %d %ld\n",rlen,iobuf); // } - ierr = rearrange_comp2io(*ios, iodesc, array, iobuf, 1); printf("%s %d ",__FILE__,__LINE__); @@ -1191,10 +1386,13 @@ int PIOc_write_darray_multi(const int ncid, const int vid[], const int ioid, con printf(" %d ",((int *) iobuf)[n]); printf("\n"); - }else{ + } + else + { iobuf = array; } - switch(file->iotype){ + switch(file->iotype) + { case PIO_IOTYPE_PNETCDF: case PIO_IOTYPE_NETCDF: case PIO_IOTYPE_NETCDF4P: @@ -1206,17 +1404,22 @@ int PIOc_write_darray_multi(const int ncid, const int vid[], const int ioid, con free(iobuf); return ierr; - } #endif -/** @brief Read an array of data from a file to the (parallel) IO library. +/** Read an array of data from a file to the (parallel) IO library. * @ingroup PIO_read_darray + * + * @param file + * @param iodesc + * @param vid + * @param IOBUF */ -int pio_read_darray_nc(file_desc_t *file, io_desc_t *iodesc, const int vid, void *IOBUF) +int pio_read_darray_nc(file_desc_t *file, io_desc_t *iodesc, const int vid, + void *IOBUF) { int ierr=PIO_NOERR; - iosystem_desc_t *ios; + iosystem_desc_t *ios; /** Pointer to io system information. */ var_desc_t *vdesc; int ndims, fndims; MPI_Status status; @@ -1240,7 +1443,8 @@ int pio_read_darray_nc(file_desc_t *file, io_desc_t *iodesc, const int vid, void if(fndims==ndims) vdesc->record=-1; - if(ios->ioproc){ + if (ios->ioproc) + { io_region *region; size_t start[fndims]; size_t count[fndims]; @@ -1261,21 +1465,27 @@ int pio_read_darray_nc(file_desc_t *file, io_desc_t *iodesc, const int vid, void // calling program to change the basetype. region = iodesc->firstregion; MPI_Type_size(iodesc->basetype, &tsize); - if(fndims>ndims){ + if (fndims>ndims) + { ndims++; if(vdesc->record<0) vdesc->record=0; } - for(regioncnt=0;regioncntmaxregions;regioncnt++){ + for (regioncnt=0;regioncntmaxregions;regioncnt++) + { // printf("%s %d %d %ld %d %d\n",__FILE__,__LINE__,regioncnt,region,fndims,ndims); tmp_bufsize=1; - if(region==NULL || iodesc->llen==0){ - for(i=0;illen==0) + { + for (i=0;illen - region->loffset, iodesc->llen, region->loffset); - if(vdesc->record >= 0 && fndims>1){ + if (vdesc->record >= 0 && fndims>1) + { start[0] = vdesc->record; - for(i=1;istart[i-1]; count[i] = region->count[i-1]; // printf("%s %d %d %ld %ld\n",__FILE__,__LINE__,i,start[i],count[i]); } if(count[1]>0) count[0] = 1; - }else{ + } + else + { // Non-time dependent array - for(i=0;istart[i]; count[i] = region->count[i]; // printf("%s %d %d %ld %ld\n",__FILE__,__LINE__,i,start[i],count[i]); @@ -1302,16 +1517,24 @@ int pio_read_darray_nc(file_desc_t *file, io_desc_t *iodesc, const int vid, void } } - switch(file->iotype){ + switch(file->iotype) + { #ifdef _NETCDF4 case PIO_IOTYPE_NETCDF4P: - if(iodesc->basetype == MPI_DOUBLE || iodesc->basetype == MPI_REAL8){ + if (iodesc->basetype == MPI_DOUBLE || iodesc->basetype == MPI_REAL8) + { ierr = nc_get_vara_double (file->fh, vid,start,count, bufptr); - } else if(iodesc->basetype == MPI_INTEGER){ + } + else if (iodesc->basetype == MPI_INTEGER) + { ierr = nc_get_vara_int (file->fh, vid, start, count, bufptr); - }else if(iodesc->basetype == MPI_FLOAT || iodesc->basetype == MPI_REAL4){ + } + else if (iodesc->basetype == MPI_FLOAT || iodesc->basetype == MPI_REAL4) + { ierr = nc_get_vara_float (file->fh, vid, start, count, bufptr); - }else{ + } + else + { fprintf(stderr,"Type not recognized %d in pioc_read_darray\n",(int) iodesc->basetype); } break; @@ -1320,25 +1543,29 @@ int pio_read_darray_nc(file_desc_t *file, io_desc_t *iodesc, const int vid, void case PIO_IOTYPE_PNETCDF: { tmp_bufsize=1; - for(int j=0;j0){ + if (tmp_bufsize > 0) + { startlist[rrlen] = (PIO_Offset *) bget(fndims * sizeof(PIO_Offset)); countlist[rrlen] = (PIO_Offset *) bget(fndims * sizeof(PIO_Offset)); - for(int j=0;jmaxregions, j,start[j],count[j],tmp_bufsize); + /* printf("%s %d %d %d %d %ld %ld %ld\n",__FILE__,__LINE__,realregioncnt, + iodesc->maxregions, j,start[j],count[j],tmp_bufsize);*/ } rrlen++; } - if(regioncnt==iodesc->maxregions-1){ + if (regioncnt==iodesc->maxregions-1) + { ierr = ncmpi_get_varn_all(file->fh, vid, rrlen, startlist, countlist, IOBUF, iodesc->llen, iodesc->basetype); - for(i=0;iiotype,__FILE__,__LINE__); } - if(region != NULL) + if (region) region = region->next; } // for(regioncnt=0;...) } @@ -1363,14 +1590,21 @@ int pio_read_darray_nc(file_desc_t *file, io_desc_t *iodesc, const int vid, void return ierr; } - -/** @brief Read an array of data from a file to the (serial) IO library. +/** Read an array of data from a file to the (serial) IO library. * @ingroup PIO_read_darray + * + * @param file + * @param iodesc + * @param vid + * @param IOBUF + * + * @returns */ -int pio_read_darray_nc_serial(file_desc_t *file, io_desc_t *iodesc, const int vid, void *IOBUF) +int pio_read_darray_nc_serial(file_desc_t *file, io_desc_t *iodesc, + const int vid, void *IOBUF) { int ierr=PIO_NOERR; - iosystem_desc_t *ios; + iosystem_desc_t *ios; /** Pointer to io system information. */ var_desc_t *vdesc; int ndims, fndims; MPI_Status status; @@ -1394,7 +1628,8 @@ int pio_read_darray_nc_serial(file_desc_t *file, io_desc_t *iodesc, const int vi if(fndims==ndims) vdesc->record=-1; - if(ios->ioproc){ + if (ios->ioproc) + { io_region *region; size_t start[fndims]; size_t count[fndims]; @@ -1413,80 +1648,114 @@ int pio_read_darray_nc_serial(file_desc_t *file, io_desc_t *iodesc, const int vi // calling program to change the basetype. region = iodesc->firstregion; MPI_Type_size(iodesc->basetype, &tsize); - if(fndims>ndims){ + if (fndims>ndims) + { if(vdesc->record<0) vdesc->record=0; } - for(regioncnt=0;regioncntmaxregions;regioncnt++){ - if(region==NULL || iodesc->llen==0){ - for(i=0;imaxregions;regioncnt++) + { + if (region==NULL || iodesc->llen==0) + { + for (i = 0; i < fndims; i++) + { tmp_start[i+regioncnt*fndims] = 0; tmp_count[i+regioncnt*fndims] = 0; } bufptr=NULL; - }else{ - if(vdesc->record >= 0 && fndims>1){ + } + else + { + if (vdesc->record >= 0 && fndims>1) + { tmp_start[regioncnt*fndims] = vdesc->record; - for(i=1;istart[i-1]; tmp_count[i+regioncnt*fndims] = region->count[i-1]; } if(tmp_count[1+regioncnt*fndims]>0) tmp_count[regioncnt*fndims] = 1; - }else{ + } + else + { // Non-time dependent array - for(i=0;istart[i]; tmp_count[i+regioncnt*fndims] = region->count[i]; } } /* for(i=0;inext; } // for(regioncnt=0;...) - if(ios->io_rank>0){ + if (ios->io_rank>0) + { MPI_Send( &(iodesc->llen), 1, MPI_OFFSET, 0, ios->io_rank, ios->io_comm); - if(iodesc->llen > 0){ - MPI_Send( &(iodesc->maxregions), 1, MPI_INT, 0, ios->num_iotasks+ios->io_rank, ios->io_comm); - MPI_Send( tmp_count, iodesc->maxregions*fndims, MPI_OFFSET, 0, 2*ios->num_iotasks+ios->io_rank, ios->io_comm); - MPI_Send( tmp_start, iodesc->maxregions*fndims, MPI_OFFSET, 0, 3*ios->num_iotasks+ios->io_rank, ios->io_comm); - MPI_Recv(IOBUF, iodesc->llen, iodesc->basetype, 0, 4*ios->num_iotasks+ios->io_rank, ios->io_comm, &status); + if (iodesc->llen > 0) + { + MPI_Send(&(iodesc->maxregions), 1, MPI_INT, 0, + ios->num_iotasks + ios->io_rank, ios->io_comm); + MPI_Send(tmp_count, iodesc->maxregions*fndims, MPI_OFFSET, 0, + 2 * ios->num_iotasks + ios->io_rank, ios->io_comm); + MPI_Send(tmp_start, iodesc->maxregions*fndims, MPI_OFFSET, 0, + 3 * ios->num_iotasks + ios->io_rank, ios->io_comm); + MPI_Recv(IOBUF, iodesc->llen, iodesc->basetype, 0, + 4 * ios->num_iotasks+ios->io_rank, ios->io_comm, &status); } - }else if(ios->io_rank==0){ + } + else if (ios->io_rank == 0) + { int maxregions=0; size_t loffset, regionsize; size_t this_start[fndims*iodesc->maxregions]; size_t this_count[fndims*iodesc->maxregions]; // for( i=ios->num_iotasks-1; i>=0; i--){ - for(int rtask=1;rtask<=ios->num_iotasks;rtask++){ - if(rtasknum_iotasks){ + for (int rtask = 1; rtask <= ios->num_iotasks; rtask++) + { + if (rtasknum_iotasks) + { MPI_Recv(&tmp_bufsize, 1, MPI_OFFSET, rtask, rtask, ios->io_comm, &status); - if(tmp_bufsize>0){ - MPI_Recv(&maxregions, 1, MPI_INT, rtask, ios->num_iotasks+rtask, ios->io_comm, &status); - MPI_Recv(this_count, maxregions*fndims, MPI_OFFSET, rtask, 2*ios->num_iotasks+rtask, ios->io_comm, &status); - MPI_Recv(this_start, maxregions*fndims, MPI_OFFSET, rtask, 3*ios->num_iotasks+rtask, ios->io_comm, &status); + if (tmp_bufsize>0) + { + MPI_Recv(&maxregions, 1, MPI_INT, rtask, ios->num_iotasks+rtask, + ios->io_comm, &status); + MPI_Recv(this_count, maxregions*fndims, MPI_OFFSET, rtask, + 2 * ios->num_iotasks + rtask, ios->io_comm, &status); + MPI_Recv(this_start, maxregions*fndims, MPI_OFFSET, rtask, + 3 * ios->num_iotasks + rtask, ios->io_comm, &status); + } } - }else{ + else + { maxregions=iodesc->maxregions; tmp_bufsize=iodesc->llen; } loffset = 0; - for(regioncnt=0;regioncntnum_iotasks){ - for(int m=0; mnum_iotasks) + { + for (int m=0; mbasetype == MPI_DOUBLE || iodesc->basetype == MPI_REAL8){ + if (iodesc->basetype == MPI_DOUBLE || iodesc->basetype == MPI_REAL8) + { ierr = nc_get_vara_double (file->fh, vid,start, count, bufptr); - }else if(iodesc->basetype == MPI_INTEGER){ + } + else if (iodesc->basetype == MPI_INTEGER) + { ierr = nc_get_vara_int (file->fh, vid, start, count, bufptr); - }else if(iodesc->basetype == MPI_FLOAT || iodesc->basetype == MPI_REAL4){ + } + else if (iodesc->basetype == MPI_FLOAT || iodesc->basetype == MPI_REAL4) + { ierr = nc_get_vara_float (file->fh, vid, start, count, bufptr); - }else{ - fprintf(stderr,"Type not recognized %d in pioc_write_darray_nc_serial\n",(int) iodesc->basetype); + } + else + { + fprintf(stderr,"Type not recognized %d in pioc_write_darray_nc_serial\n", + (int)iodesc->basetype); } - if(ierr != PIO_NOERR){ + if (ierr != PIO_NOERR) + { for(int i=0;inum_iotasks){ - MPI_Send(IOBUF, tmp_bufsize, iodesc->basetype, rtask,4*ios->num_iotasks+rtask, ios->io_comm); - } + if (rtask < ios->num_iotasks) + MPI_Send(IOBUF, tmp_bufsize, iodesc->basetype, rtask, + 4 * ios->num_iotasks + rtask, ios->io_comm); } } } @@ -1528,14 +1808,21 @@ int pio_read_darray_nc_serial(file_desc_t *file, io_desc_t *iodesc, const int vi return ierr; } - -/** @brief Read a field from a file to the IO library. +/** Read a field from a file to the IO library. * @ingroup PIO_read_darray * + * @param ncid identifies the netCDF file + * @param vid + * @param ioid + * @param arraylen + * @param array + * + * @return */ -int PIOc_read_darray(const int ncid, const int vid, const int ioid, const PIO_Offset arraylen, void *array) +int PIOc_read_darray(const int ncid, const int vid, const int ioid, + const PIO_Offset arraylen, void *array) { - iosystem_desc_t *ios; + iosystem_desc_t *ios; /** Pointer to io system information. */ file_desc_t *file; io_desc_t *iodesc; void *iobuf=NULL; @@ -1545,35 +1832,46 @@ int PIOc_read_darray(const int ncid, const int vid, const int ioid, const PIO_Of file = pio_get_file_from_id(ncid); - if(file == NULL){ + if (file == NULL) + { fprintf(stderr,"File handle not found %d %d\n",ncid,__LINE__); return PIO_EBADID; } iodesc = pio_get_iodesc_from_id(ioid); - if(iodesc == NULL){ + if (iodesc == NULL) + { fprintf(stderr,"iodesc handle not found %d %d\n",ioid,__LINE__); return PIO_EBADID; } ios = file->iosystem; - if(ios->iomaster){ + if (ios->iomaster) + { rlen = iodesc->maxiobuflen; - }else{ + } + else + { rlen = iodesc->llen; } - if(iodesc->rearranger > 0){ - if(ios->ioproc && rlen>0){ + if (iodesc->rearranger > 0) + { + if (ios->ioproc && rlen>0) + { MPI_Type_size(iodesc->basetype, &tsize); iobuf = bget(((size_t) tsize)*rlen); - if(iobuf==NULL){ + if (iobuf==NULL) + { piomemerror(*ios,rlen*((size_t) tsize), __FILE__,__LINE__); } } - }else{ + } + else + { iobuf = array; } - switch(file->iotype){ + switch(file->iotype) + { case PIO_IOTYPE_NETCDF: case PIO_IOTYPE_NETCDF4C: ierr = pio_read_darray_nc_serial(file, iodesc, vid, iobuf); @@ -1585,7 +1883,8 @@ int PIOc_read_darray(const int ncid, const int vid, const int ioid, const PIO_Of default: ierr = iotype_error(file->iotype,__FILE__,__LINE__); } - if(iodesc->rearranger > 0){ + if (iodesc->rearranger > 0) + { ierr = rearrange_io2comp(*ios, iodesc, iobuf, array); if(rlen>0) @@ -1596,6 +1895,14 @@ int PIOc_read_darray(const int ncid, const int vid, const int ioid, const PIO_Of } +/** Flush the output buffer. + * + * @param file + * @param force + * @param addsize + * + * @return + */ int flush_output_buffer(file_desc_t *file, bool force, PIO_Offset addsize) { var_desc_t *vdesc; @@ -1611,17 +1918,20 @@ int flush_output_buffer(file_desc_t *file, bool force, PIO_Offset addsize) ierr = ncmpi_inq_buffer_usage(file->fh, &usage); - if(!force && file->iosystem->io_comm != MPI_COMM_NULL){ + if (!force && file->iosystem->io_comm != MPI_COMM_NULL) + { usage += addsize; MPI_Allreduce(MPI_IN_PLACE, &usage, 1, MPI_OFFSET, MPI_MAX, file->iosystem->io_comm); } - if(usage > maxusage){ + if (usage > maxusage) + { maxusage = usage; } - if(force || usage>=PIO_BUFFER_SIZE_LIMIT){ + if (force || usage>=PIO_BUFFER_SIZE_LIMIT) + { int rcnt; bool prev_dist=false; int prev_record=-1; @@ -1631,28 +1941,32 @@ int flush_output_buffer(file_desc_t *file, bool force, PIO_Offset addsize) maxreq = 0; reqcnt=0; rcnt=0; - for(int i=0; ivarlist+i; reqcnt+=vdesc->nreqs; - if(vdesc->nreqs>0) maxreq = i; + if (vdesc->nreqs > 0) + maxreq = i; } int request[reqcnt]; int status[reqcnt]; - for(int i=0; i<=maxreq; i++){ + for (int i = 0; i <= maxreq; i++) + { vdesc = file->varlist+i; #ifdef MPIO_ONESIDED /*onesided optimization requires that all of the requests in a wait_all call represent a contiguous block of data in the file */ - if(rcnt>0 && (prev_record != vdesc->record || - vdesc->nreqs==0)){ + if (rcnt>0 && (prev_record != vdesc->record || vdesc->nreqs==0)) + { ierr = ncmpi_wait_all(file->fh, rcnt, request,status); rcnt=0; } prev_record = vdesc->record; #endif // printf("%s %d %d %d %d \n",__FILE__,__LINE__,i,vdesc->nreqs,vdesc->request); - for(reqcnt=0;reqcntnreqs;reqcnt++){ + for (reqcnt=0;reqcntnreqs;reqcnt++) + { request[rcnt++] = max(vdesc->request[reqcnt],NC_REQ_NULL); } free(vdesc->request); @@ -1667,7 +1981,8 @@ int flush_output_buffer(file_desc_t *file, bool force, PIO_Offset addsize) // if(file->iosystem->io_rank==0){ // printf("%s %d %d\n",__FILE__,__LINE__,rcnt); // } - if(rcnt>0){ + if (rcnt > 0) + { /* if(file->iosystem->io_rank==0){ printf("%s %d %d ",__FILE__,__LINE__,rcnt); @@ -1678,13 +1993,16 @@ int flush_output_buffer(file_desc_t *file, bool force, PIO_Offset addsize) }*/ ierr = ncmpi_wait_all(file->fh, rcnt, request,status); } - for(int i=0; ivarlist+i; - if(vdesc->iobuf != NULL){ + if (vdesc->iobuf) + { brel(vdesc->iobuf); vdesc->iobuf=NULL; } - if(vdesc->fillbuf != NULL){ + if (vdesc->fillbuf) + { brel(vdesc->fillbuf); vdesc->fillbuf=NULL; } @@ -1699,40 +2017,65 @@ int flush_output_buffer(file_desc_t *file, bool force, PIO_Offset addsize) return ierr; } +/** Print out info about the buffer for debug purposes. + * + * @param ios the IO system structure + * @param collective true if collective report is desired + */ void cn_buffer_report(iosystem_desc_t ios, bool collective) { - if(CN_bpool != NULL){ + if (CN_bpool) + { long bget_stats[5]; long bget_mins[5]; long bget_maxs[5]; bstats(bget_stats, bget_stats+1,bget_stats+2,bget_stats+3,bget_stats+4); - if(collective){ + if (collective) + { MPI_Reduce(bget_stats, bget_maxs, 5, MPI_LONG, MPI_MAX, 0, ios.comp_comm); MPI_Reduce(bget_stats, bget_mins, 5, MPI_LONG, MPI_MIN, 0, ios.comp_comm); - if(ios.compmaster){ - printf("PIO: Currently allocated buffer space %ld %ld\n",bget_mins[0],bget_maxs[0]); - printf("PIO: Currently available buffer space %ld %ld\n",bget_mins[1],bget_maxs[1]); - printf("PIO: Current largest free block %ld %ld\n",bget_mins[2],bget_maxs[2]); - printf("PIO: Number of successful bget calls %ld %ld\n",bget_mins[3],bget_maxs[3]); - printf("PIO: Number of successful brel calls %ld %ld\n",bget_mins[4],bget_maxs[4]); + if (ios.compmaster) + { + printf("PIO: Currently allocated buffer space %ld %ld\n", + bget_mins[0], bget_maxs[0]); + printf("PIO: Currently available buffer space %ld %ld\n", + bget_mins[1], bget_maxs[1]); + printf("PIO: Current largest free block %ld %ld\n", + bget_mins[2], bget_maxs[2]); + printf("PIO: Number of successful bget calls %ld %ld\n", + bget_mins[3], bget_maxs[3]); + printf("PIO: Number of successful brel calls %ld %ld\n", + bget_mins[4], bget_maxs[4]); // print_trace(stdout); } - }else{ - printf("%d: PIO: Currently allocated buffer space %ld \n",ios.union_rank,bget_stats[0]) ; - printf("%d: PIO: Currently available buffer space %ld \n",ios.union_rank,bget_stats[1]); - printf("%d: PIO: Current largest free block %ld \n",ios.union_rank,bget_stats[2]); - printf("%d: PIO: Number of successful bget calls %ld \n",ios.union_rank,bget_stats[3]); - printf("%d: PIO: Number of successful brel calls %ld \n",ios.union_rank,bget_stats[4]); + } + else + { + printf("%d: PIO: Currently allocated buffer space %ld \n", + ios.union_rank, bget_stats[0]) ; + printf("%d: PIO: Currently available buffer space %ld \n", + ios.union_rank, bget_stats[1]); + printf("%d: PIO: Current largest free block %ld \n", + ios.union_rank, bget_stats[2]); + printf("%d: PIO: Number of successful bget calls %ld \n", + ios.union_rank, bget_stats[3]); + printf("%d: PIO: Number of successful brel calls %ld \n", + ios.union_rank, bget_stats[4]); } } } +/** Free the buffer pool. + * + * @param ios + */ void free_cn_buffer_pool(iosystem_desc_t ios) { #ifndef PIO_USE_MALLOC - if(CN_bpool != NULL){ + if (CN_bpool) + { cn_buffer_report(ios, true); bpoolrelease(CN_bpool); // free(CN_bpool); @@ -1741,24 +2084,38 @@ void free_cn_buffer_pool(iosystem_desc_t ios) #endif } +/** Flush the buffer. + * + * @param ncid + * @param wmb + * @param flushtodisk + */ void flush_buffer(int ncid, wmulti_buffer *wmb, bool flushtodisk) { - if(wmb->validvars>0){ - PIOc_write_darray_multi(ncid, wmb->vid, wmb->ioid, wmb->validvars, wmb->arraylen, wmb->data, wmb->frame, wmb->fillvalue, flushtodisk); + if (wmb->validvars > 0) + { + PIOc_write_darray_multi(ncid, wmb->vid, wmb->ioid, wmb->validvars, + wmb->arraylen, wmb->data, wmb->frame, + wmb->fillvalue, flushtodisk); wmb->validvars=0; brel(wmb->vid); wmb->vid=NULL; brel(wmb->data); wmb->data=NULL; - if(wmb->fillvalue != NULL) + if (wmb->fillvalue) brel(wmb->fillvalue); - if(wmb->frame != NULL) + if (wmb->frame) brel(wmb->frame); wmb->fillvalue=NULL; wmb->frame=NULL; } } +/** Comput the maximum aggregate number of bytes. + * + * @param ios + * @param iodesc + */ void compute_maxaggregate_bytes(const iosystem_desc_t ios, io_desc_t *iodesc) { int maxbytesoniotask=INT_MAX; @@ -1767,12 +2124,12 @@ void compute_maxaggregate_bytes(const iosystem_desc_t ios, io_desc_t *iodesc) // printf("%s %d %d %d\n",__FILE__,__LINE__,iodesc->maxiobuflen, iodesc->ndof); - if(ios.ioproc && iodesc->maxiobuflen>0){ + if (ios.ioproc && iodesc->maxiobuflen > 0) maxbytesoniotask = PIO_BUFFER_SIZE_LIMIT/ iodesc->maxiobuflen; - } - if(ios.comp_rank>=0 && iodesc->ndof>0){ + + if (ios.comp_rank >= 0 && iodesc->ndof > 0) maxbytesoncomputetask = PIO_CNBUFFER_LIMIT/iodesc->ndof; - } + maxbytes = min(maxbytesoniotask,maxbytesoncomputetask); // printf("%s %d %d %d\n",__FILE__,__LINE__,maxbytesoniotask, maxbytesoncomputetask); diff --git a/externals/pio2/src/clib/pio_darray_async.c b/externals/pio2/src/clib/pio_darray_async.c new file mode 100644 index 00000000000..d9e41a8340e --- /dev/null +++ b/externals/pio2/src/clib/pio_darray_async.c @@ -0,0 +1,2164 @@ +/** @file + * + * This file contains the routines that read and write + * distributed arrays in PIO. + * + * When arrays are distributed, each processor holds some of the + * array. Only by combining the distributed arrays from all processor + * can the full array be obtained. + * + * @author Jim Edwards, Ed Hartnett + */ + +#include +#include +#include + +/* 10MB default limit. */ +PIO_Offset PIO_BUFFER_SIZE_LIMIT = 10485760; + +/* Initial size of compute buffer. */ +bufsize PIO_CNBUFFER_LIMIT = 33554432; + +/* Global buffer pool pointer. */ +static void *CN_bpool = NULL; + +/* Maximum buffer usage. */ +static PIO_Offset maxusage = 0; + +/** Set the pio buffer size limit. This is the size of the data buffer + * on the IO nodes. + * + * The pio_buffer_size_limit will only apply to files opened after + * the setting is changed. + * + * @param limit the size of the buffer on the IO nodes + * + * @return The previous limit setting. + */ +PIO_Offset PIOc_set_buffer_size_limit(const PIO_Offset limit) +{ + PIO_Offset oldsize; + oldsize = PIO_BUFFER_SIZE_LIMIT; + if (limit > 0) + PIO_BUFFER_SIZE_LIMIT = limit; + return oldsize; +} + +/** Initialize the compute buffer to size PIO_CNBUFFER_LIMIT. + * + * This routine initializes the compute buffer pool if the bget memory + * management is used. If malloc is used (that is, PIO_USE_MALLOC is + * non zero), this function does nothing. + * + * @param ios the iosystem descriptor which will use the new buffer + */ +void compute_buffer_init(iosystem_desc_t ios) +{ +#if !PIO_USE_MALLOC + + if (!CN_bpool) + { + if (!(CN_bpool = malloc(PIO_CNBUFFER_LIMIT))) + { + char errmsg[180]; + sprintf(errmsg,"Unable to allocate a buffer pool of size %d on task %d:" + " try reducing PIO_CNBUFFER_LIMIT\n", PIO_CNBUFFER_LIMIT, ios.comp_rank); + piodie(errmsg, __FILE__, __LINE__); + } + + bpool(CN_bpool, PIO_CNBUFFER_LIMIT); + if (!CN_bpool) + { + char errmsg[180]; + sprintf(errmsg,"Unable to allocate a buffer pool of size %d on task %d:" + " try reducing PIO_CNBUFFER_LIMIT\n", PIO_CNBUFFER_LIMIT, ios.comp_rank); + piodie(errmsg, __FILE__, __LINE__); + } + + bectl(NULL, malloc, free, PIO_CNBUFFER_LIMIT); + } +#endif +} + +/** Write a single distributed field to output. This routine is only + * used if aggregation is off. + * + * @param file a pointer to the open file descriptor for the file + * that will be written to + * @param iodesc a pointer to the defined iodescriptor for the buffer + * @param vid the variable id to be written + * @param IOBUF the buffer to be written from this mpi task + * @param fillvalue the optional fillvalue to be used for missing + * data in this buffer + * + * @return 0 for success, error code otherwise. + * @ingroup PIO_write_darray + */ +int pio_write_darray_nc(file_desc_t *file, io_desc_t *iodesc, const int vid, + void *IOBUF, void *fillvalue) +{ + iosystem_desc_t *ios; /* Pointer to io system information. */ + var_desc_t *vdesc; + int ndims; /* Number of dimensions according to iodesc. */ + int ierr = PIO_NOERR; /* Return code from function calls. */ + int i; /* Loop counter. */ + int mpierr = MPI_SUCCESS; /* Return code from MPI function codes. */ + int dsize; /* Size of the type. */ + MPI_Status status; /* Status from MPI_Recv calls. */ + PIO_Offset usage; /* Size of current buffer. */ + int fndims; /* Number of dims for variable according to netCDF. */ + PIO_Offset tdsize = 0; /* Total size. */ + + LOG((1, "pio_write_array_nc vid = %d", vid)); + +#ifdef TIMING + /* Start timing this function. */ + GPTLstart("PIO:write_darray_nc"); +#endif + + /* Get the IO system info. */ + if (!(ios = file->iosystem)) + return PIO_EBADID; + + /* Get pointer to variable information. */ + if (!(vdesc = file->varlist + vid)) + return PIO_EBADID; + + ndims = iodesc->ndims; + + /* Get the number of dims for this var from netcdf. */ + ierr = PIOc_inq_varndims(file->fh, vid, &fndims); + + /* If async is in use, and this is not an IO task, bcast the parameters. */ + if (ios->async_interface) + { + if (!ios->ioproc) + { + int msg = 0; + + if (ios->compmaster) + mpierr = MPI_Send(&msg, 1, MPI_INT, ios->ioroot, 1, ios->union_comm); + + if (!mpierr) + mpierr = MPI_Bcast(&file->fh, 1, MPI_INT, ios->compmaster, ios->intercomm); + } + } + + /* If this is an IO task, write the data. */ + if (ios->ioproc) + { + io_region *region; + int regioncnt; + int rrcnt; + void *bufptr; + void *tmp_buf = NULL; + int tsize; /* Type size. */ + size_t start[fndims]; /* Local start array for this task. */ + size_t count[fndims]; /* Local count array for this task. */ + int buflen; + int j; /* Loop counter. */ + + PIO_Offset *startlist[iodesc->maxregions]; + PIO_Offset *countlist[iodesc->maxregions]; + + /* Get the type size (again?) */ + MPI_Type_size(iodesc->basetype, &tsize); + + region = iodesc->firstregion; + + /* If this is a var with an unlimited dimension, and the + * iodesc ndims doesn't contain it, then add it to ndims. */ + if (vdesc->record >= 0 && ndims < fndims) + ndims++; + +#ifdef _PNETCDF + /* Make sure we have room in the buffer. */ + if (file->iotype == PIO_IOTYPE_PNETCDF) + flush_output_buffer(file, false, tsize * (iodesc->maxiobuflen)); +#endif + + rrcnt = 0; + /* For each region, figure out start/count arrays. */ + for (regioncnt = 0; regioncnt < iodesc->maxregions; regioncnt++) + { + /* Init arrays to zeros. */ + for (i = 0; i < ndims; i++) + { + start[i] = 0; + count[i] = 0; + } + + if (region) + { + bufptr = (void *)((char *)IOBUF + tsize * region->loffset); + if (vdesc->record >= 0) + { + /* This is a record based multidimensional array. */ + + /* This does not look correct, but will work if + * unlimited dim is dim 0. */ + start[0] = vdesc->record; + + /* Set the local start and count arrays. */ + for (i = 1; i < ndims; i++) + { + start[i] = region->start[i - 1]; + count[i] = region->count[i - 1]; + } + + /* If there is data to be written, write one timestep. */ + if (count[1] > 0) + count[0] = 1; + } + else + { + /* Array without unlimited dimension. */ + for (i = 0; i < ndims; i++) + { + start[i] = region->start[i]; + count[i] = region->count[i]; + } + } + } + + switch(file->iotype) + { +#ifdef _NETCDF +#ifdef _NETCDF4 + case PIO_IOTYPE_NETCDF4P: + + /* Use collective writes with this variable. */ + ierr = nc_var_par_access(file->fh, vid, NC_COLLECTIVE); + + /* Write the data. */ + if (iodesc->basetype == MPI_DOUBLE || iodesc->basetype == MPI_REAL8) + ierr = nc_put_vara_double(file->fh, vid, (size_t *)start, (size_t *)count, + (const double *)bufptr); + else if (iodesc->basetype == MPI_INTEGER) + ierr = nc_put_vara_int(file->fh, vid, (size_t *)start, (size_t *)count, + (const int *)bufptr); + else if (iodesc->basetype == MPI_FLOAT || iodesc->basetype == MPI_REAL4) + ierr = nc_put_vara_float(file->fh, vid, (size_t *)start, (size_t *)count, + (const float *)bufptr); + else + fprintf(stderr,"Type not recognized %d in pioc_write_darray\n", + (int)iodesc->basetype); + break; + case PIO_IOTYPE_NETCDF4C: +#endif /* _NETCDF4 */ + case PIO_IOTYPE_NETCDF: + { + /* Find the type size (again?) */ + mpierr = MPI_Type_size(iodesc->basetype, &dsize); + + size_t tstart[ndims], tcount[ndims]; + + /* The IO master task does all the data writes, but + * sends the data to the other IO tasks (why?). */ + if (ios->io_rank == 0) + { + for (i = 0; i < iodesc->num_aiotasks; i++) + { + if (i == 0) + { + buflen = 1; + for (j = 0; j < ndims; j++) + { + tstart[j] = start[j]; + tcount[j] = count[j]; + buflen *= tcount[j]; + tmp_buf = bufptr; + } + } + else + { + /* Handshake - tell the sending task I'm ready. */ + mpierr = MPI_Send(&ierr, 1, MPI_INT, i, 0, ios->io_comm); + mpierr = MPI_Recv(&buflen, 1, MPI_INT, i, 1, ios->io_comm, &status); + if (buflen > 0) + { + mpierr = MPI_Recv(tstart, ndims, MPI_OFFSET, i, ios->num_iotasks+i, + ios->io_comm, &status); + mpierr = MPI_Recv(tcount, ndims, MPI_OFFSET, i, 2 * ios->num_iotasks + i, + ios->io_comm, &status); + tmp_buf = malloc(buflen * dsize); + mpierr = MPI_Recv(tmp_buf, buflen, iodesc->basetype, i, i, ios->io_comm, &status); + } + } + + if (buflen > 0) + { + /* Write the data. */ + if (iodesc->basetype == MPI_INTEGER) + ierr = nc_put_vara_int(file->fh, vid, tstart, tcount, (const int *)tmp_buf); + else if (iodesc->basetype == MPI_DOUBLE || iodesc->basetype == MPI_REAL8) + ierr = nc_put_vara_double(file->fh, vid, tstart, tcount, (const double *)tmp_buf); + else if (iodesc->basetype == MPI_FLOAT || iodesc->basetype == MPI_REAL4) + ierr = nc_put_vara_float(file->fh, vid, tstart, tcount, (const float *)tmp_buf); + else + fprintf(stderr,"Type not recognized %d in pioc_write_darray\n", + (int)iodesc->basetype); + + /* Was there an error from netCDF? */ + if (ierr == PIO_EEDGE) + for (i = 0; i < ndims; i++) + fprintf(stderr,"dim %d start %ld count %ld\n", i, tstart[i], tcount[i]); + + /* Free the temporary buffer, if we don't need it any more. */ + if (tmp_buf != bufptr) + free(tmp_buf); + } + } + } + else if (ios->io_rank < iodesc->num_aiotasks) + { + buflen = 1; + for (i = 0; i < ndims; i++) + { + tstart[i] = (size_t) start[i]; + tcount[i] = (size_t) count[i]; + buflen *= tcount[i]; + // printf("%s %d %d %d %d\n",__FILE__,__LINE__,i,tstart[i],tcount[i]); + } + /* printf("%s %d %d %d %d %d %d %d %d %d\n",__FILE__,__LINE__,ios->io_rank,tstart[0], + tstart[1],tcount[0],tcount[1],buflen,ndims,fndims);*/ + mpierr = MPI_Recv(&ierr, 1, MPI_INT, 0, 0, ios->io_comm, &status); // task0 is ready to recieve + mpierr = MPI_Rsend(&buflen, 1, MPI_INT, 0, 1, ios->io_comm); + if (buflen > 0) + { + mpierr = MPI_Rsend(tstart, ndims, MPI_OFFSET, 0, ios->num_iotasks+ios->io_rank, + ios->io_comm); + mpierr = MPI_Rsend(tcount, ndims, MPI_OFFSET, 0,2*ios->num_iotasks+ios->io_rank, + ios->io_comm); + mpierr = MPI_Rsend(bufptr, buflen, iodesc->basetype, 0, ios->io_rank, ios->io_comm); + } + } + break; + } + break; +#endif /* _NETCDF */ +#ifdef _PNETCDF + case PIO_IOTYPE_PNETCDF: + for (i = 0, dsize = 1; i < ndims; i++) + dsize *= count[i]; + + tdsize += dsize; + // if (dsize==1 && ndims==2) + // printf("%s %d %d\n",__FILE__,__LINE__,iodesc->basetype); + + if (dsize > 0) + { + // printf("%s %d %d %d\n",__FILE__,__LINE__,ios->io_rank,dsize); + startlist[rrcnt] = (PIO_Offset *) calloc(fndims, sizeof(PIO_Offset)); + countlist[rrcnt] = (PIO_Offset *) calloc(fndims, sizeof(PIO_Offset)); + for (i = 0; i < fndims; i++) + { + startlist[rrcnt][i] = start[i]; + countlist[rrcnt][i] = count[i]; + } + rrcnt++; + } + if (regioncnt == iodesc->maxregions - 1) + { + // printf("%s %d %d %ld %ld\n",__FILE__,__LINE__,ios->io_rank,iodesc->llen, tdsize); + // ierr = ncmpi_put_varn_all(file->fh, vid, iodesc->maxregions, startlist, countlist, + // IOBUF, iodesc->llen, iodesc->basetype); + int reqn = 0; + + if (vdesc->nreqs % PIO_REQUEST_ALLOC_CHUNK == 0 ) + { + vdesc->request = realloc(vdesc->request, + sizeof(int) * (vdesc->nreqs + PIO_REQUEST_ALLOC_CHUNK)); + + for (int i = vdesc->nreqs; i < vdesc->nreqs + PIO_REQUEST_ALLOC_CHUNK; i++) + vdesc->request[i] = NC_REQ_NULL; + reqn = vdesc->nreqs; + } + else + while(vdesc->request[reqn] != NC_REQ_NULL) + reqn++; + + ierr = ncmpi_bput_varn(file->fh, vid, rrcnt, startlist, countlist, + IOBUF, iodesc->llen, iodesc->basetype, vdesc->request+reqn); + if (vdesc->request[reqn] == NC_REQ_NULL) + vdesc->request[reqn] = PIO_REQ_NULL; //keeps wait calls in sync + vdesc->nreqs = reqn; + + // printf("%s %d %X %d\n",__FILE__,__LINE__,IOBUF,request); + for (i=0;iiotype,__FILE__,__LINE__); + } + + /* Move to the next region. */ + if (region) + region = region->next; + } // for (regioncnt=0;regioncntmaxregions;regioncnt++){ + } // if (ios->ioproc) + + /* Check the error code returned by netCDF. */ + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + +#ifdef TIMING + /* Stop timing this function. */ + GPTLstop("PIO:write_darray_nc"); +#endif + + return ierr; +} + +/** Write a set of one or more aggregated arrays to output file. + * + * This routine is used if aggregation is enabled, data is already on + * the io-tasks + * + * @param file a pointer to the open file descriptor for the file + * that will be written to + * @param nvars the number of variables to be written with this + * decomposition + * @param vid: an array of the variable ids to be written + * @param iodesc_ndims: the number of dimensions explicitly in the + * iodesc + * @param basetype the basic type of the minimal data unit + * @param gsize array of the global dimensions of the field to + * be written + * @param maxregions max number of blocks to be written from + * this iotask + * @param firstregion pointer to the first element of a linked + * list of region descriptions. + * @param llen length of the iobuffer on this task for a single + * field + * @param maxiobuflen maximum llen participating + * @param num_aiotasks actual number of iotasks participating + * @param IOBUF the buffer to be written from this mpi task + * @param frame the frame or record dimension for each of the nvars + * variables in IOBUF + * + * @return 0 for success, error code otherwise. + * @ingroup PIO_write_darray + */ +int pio_write_darray_multi_nc(file_desc_t *file, const int nvars, const int *vid, + const int iodesc_ndims, MPI_Datatype basetype, const PIO_Offset *gsize, + const int maxregions, io_region *firstregion, const PIO_Offset llen, + const int maxiobuflen, const int num_aiotasks, + void *IOBUF, const int *frame) +{ + iosystem_desc_t *ios; /* Pointer to io system information. */ + var_desc_t *vdesc; + int ierr; + int i; + int mpierr = MPI_SUCCESS; /* Return code from MPI function codes. */ + int dsize; + MPI_Status status; + PIO_Offset usage; + int fndims; + PIO_Offset tdsize; + int tsize; + int ncid; + tdsize=0; + ierr = PIO_NOERR; + +#ifdef TIMING + /* Start timing this function. */ + GPTLstart("PIO:write_darray_multi_nc"); +#endif + + ios = file->iosystem; + if (ios == NULL) + { + fprintf(stderr,"Failed to find iosystem handle \n"); + return PIO_EBADID; + } + vdesc = (file->varlist)+vid[0]; + ncid = file->fh; + + if (vdesc == NULL) + { + fprintf(stderr,"Failed to find variable handle %d\n",vid[0]); + return PIO_EBADID; + } + + /* If async is in use, send message to IO master task. */ + if (ios->async_interface) + { + if (!ios->ioproc) + { + int msg = 0; + if (ios->compmaster) + mpierr = MPI_Send(&msg, 1, MPI_INT, ios->ioroot, 1, ios->union_comm); + + if (!mpierr) + mpierr = MPI_Bcast(&(file->fh),1, MPI_INT, ios->compmaster, ios->intercomm); + } + } + + ierr = PIOc_inq_varndims(file->fh, vid[0], &fndims); + MPI_Type_size(basetype, &tsize); + + if (ios->ioproc) + { + io_region *region; + int regioncnt; + int rrcnt; + void *bufptr; + int buflen, j; + size_t start[fndims]; + size_t count[fndims]; + int ndims = iodesc_ndims; + + PIO_Offset *startlist[maxregions]; + PIO_Offset *countlist[maxregions]; + + ncid = file->fh; + region = firstregion; + + rrcnt = 0; + for (regioncnt = 0; regioncnt < maxregions; regioncnt++) + { + // printf("%s %d %d %d %d %d %d\n",__FILE__,__LINE__,region->start[0],region->count[0],ndims,fndims,vdesc->record); + for (i = 0; i < fndims; i++) + { + start[i] = 0; + count[i] = 0; + } + if (region) + { + // this is a record based multidimensional array + if (vdesc->record >= 0) + { + for (i = fndims - ndims; i < fndims; i++) + { + start[i] = region->start[i-(fndims-ndims)]; + count[i] = region->count[i-(fndims-ndims)]; + } + + if (fndims>1 && ndims0) + { + count[0] = 1; + start[0] = frame[0]; + } + else if (fndims==ndims) + { + start[0] += vdesc->record; + } + // Non-time dependent array + } + else + { + for (i = 0; i < ndims; i++) + { + start[i] = region->start[i]; + count[i] = region->count[i]; + } + } + } + + switch(file->iotype) + { +#ifdef _NETCDF4 + case PIO_IOTYPE_NETCDF4P: + for (int nv = 0; nv < nvars; nv++) + { + if (vdesc->record >= 0 && ndims < fndims) + { + start[0] = frame[nv]; + } + if (region) + { + bufptr = (void *)((char *) IOBUF + tsize*(nv*llen + region->loffset)); + } + ierr = nc_var_par_access(ncid, vid[nv], NC_COLLECTIVE); + + if (basetype == MPI_DOUBLE ||basetype == MPI_REAL8) + { + ierr = nc_put_vara_double (ncid, vid[nv],(size_t *) start,(size_t *) count, + (const double *)bufptr); + } + else if (basetype == MPI_INTEGER) + { + ierr = nc_put_vara_int (ncid, vid[nv], (size_t *) start, (size_t *) count, + (const int *)bufptr); + } + else if (basetype == MPI_FLOAT || basetype == MPI_REAL4) + { + ierr = nc_put_vara_float (ncid, vid[nv], (size_t *) start, (size_t *) count, + (const float *)bufptr); + } + else + { + fprintf(stderr,"Type not recognized %d in pioc_write_darray\n", + (int)basetype); + } + } + break; +#endif +#ifdef _PNETCDF + case PIO_IOTYPE_PNETCDF: + for (i = 0, dsize = 1; i < fndims; i++) + { + dsize *= count[i]; + } + tdsize += dsize; + + if (dsize>0) + { + // printf("%s %d %d %d\n",__FILE__,__LINE__,ios->io_rank,dsize); + startlist[rrcnt] = (PIO_Offset *) calloc(fndims, sizeof(PIO_Offset)); + countlist[rrcnt] = (PIO_Offset *) calloc(fndims, sizeof(PIO_Offset)); + for (i = 0; i < fndims; i++) + { + startlist[rrcnt][i]=start[i]; + countlist[rrcnt][i]=count[i]; + } + rrcnt++; + } + if (regioncnt==maxregions-1) + { + //printf("%s %d %d %ld %ld\n",__FILE__,__LINE__,ios->io_rank,iodesc->llen, tdsize); + // ierr = ncmpi_put_varn_all(ncid, vid, iodesc->maxregions, startlist, countlist, + // IOBUF, iodesc->llen, iodesc->basetype); + + //printf("%s %d %ld \n",__FILE__,__LINE__,IOBUF); + for (int nv=0; nvvarlist)+vid[nv]; + if (vdesc->record >= 0 && ndimsnreqs%PIO_REQUEST_ALLOC_CHUNK == 0 ) + { + vdesc->request = realloc(vdesc->request, + sizeof(int)*(vdesc->nreqs+PIO_REQUEST_ALLOC_CHUNK)); + + for (int i=vdesc->nreqs;inreqs+PIO_REQUEST_ALLOC_CHUNK;i++) + { + vdesc->request[i]=NC_REQ_NULL; + } + reqn = vdesc->nreqs; + } + else + { + while(vdesc->request[reqn] != NC_REQ_NULL) + { + reqn++; + } + } + ierr = ncmpi_iput_varn(ncid, vid[nv], rrcnt, startlist, countlist, + bufptr, llen, basetype, vdesc->request+reqn); + /* + ierr = ncmpi_bput_varn(ncid, vid[nv], rrcnt, startlist, countlist, + bufptr, llen, basetype, &(vdesc->request)); + */ + if (vdesc->request[reqn] == NC_REQ_NULL) + { + vdesc->request[reqn] = PIO_REQ_NULL; //keeps wait calls in sync + } + vdesc->nreqs += reqn+1; + + // printf("%s %d %d %d\n",__FILE__,__LINE__,vdesc->nreqs,vdesc->request[reqn]); + } + for (i=0;iiotype,__FILE__,__LINE__); + } + if (region) + region = region->next; + } // for (regioncnt=0;regioncntmaxregions;regioncnt++){ + } // if (ios->ioproc) + + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + +#ifdef TIMING + /* Stop timing this function. */ + GPTLstop("PIO:write_darray_multi_nc"); +#endif + + return ierr; +} + +/** Write a set of one or more aggregated arrays to output file in + * serial mode. + * + * This routine is used if aggregation is enabled, data is already on the + * io-tasks + * + * @param file: a pointer to the open file descriptor for the file + * that will be written to + * @param nvars: the number of variables to be written with this + * decomposition + * @param vid: an array of the variable ids to be written + * @param iodesc_ndims: the number of dimensions explicitly in the + * iodesc + * @param basetype : the basic type of the minimal data unit + * @param gsize : array of the global dimensions of the field to be + * written + * @param maxregions : max number of blocks to be written from this + * iotask + * @param firstregion : pointer to the first element of a linked + * list of region descriptions. + * @param llen : length of the iobuffer on this task for a single + * field + * @param maxiobuflen : maximum llen participating + * @param num_aiotasks : actual number of iotasks participating + * @param IOBUF: the buffer to be written from this mpi task + * @param frame : the frame or record dimension for each of the + * nvars variables in IOBUF + * + * @return 0 for success, error code otherwise. + * @ingroup PIO_write_darray + */ +int pio_write_darray_multi_nc_serial(file_desc_t *file, const int nvars, const int *vid, + const int iodesc_ndims, MPI_Datatype basetype, const PIO_Offset *gsize, + const int maxregions, io_region *firstregion, const PIO_Offset llen, + const int maxiobuflen, const int num_aiotasks, + void *IOBUF, const int *frame) +{ + iosystem_desc_t *ios; /* Pointer to io system information. */ + var_desc_t *vdesc; + int ierr; + int i; + int mpierr = MPI_SUCCESS; /* Return code from MPI function codes. */ + int dsize; + MPI_Status status; + PIO_Offset usage; + int fndims; + PIO_Offset tdsize; + int tsize; + int ncid; + tdsize=0; + ierr = PIO_NOERR; +#ifdef TIMING + /* Start timing this function. */ + GPTLstart("PIO:write_darray_multi_nc_serial"); +#endif + + if (!(ios = file->iosystem)) + { + fprintf(stderr,"Failed to find iosystem handle \n"); + return PIO_EBADID; + } + + ncid = file->fh; + + if (!(vdesc = (file->varlist) + vid[0])) + { + fprintf(stderr,"Failed to find variable handle %d\n",vid[0]); + return PIO_EBADID; + } + + /* If async is in use, and this is not an IO task, bcast the parameters. */ + if (ios->async_interface) + { + if (! ios->ioproc) + { + int msg = 0; + + if (ios->comp_rank==0) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + + if (!mpierr) + mpierr = MPI_Bcast(&(file->fh),1, MPI_INT, ios->compmaster, ios->intercomm); + } + } + + ierr = PIOc_inq_varndims(file->fh, vid[0], &fndims); + MPI_Type_size(basetype, &tsize); + + if (ios->ioproc) + { + io_region *region; + int regioncnt; + int rrcnt; + void *bufptr; + int buflen, j; + size_t tmp_start[fndims*maxregions]; + size_t tmp_count[fndims*maxregions]; + + int ndims = iodesc_ndims; + + ncid = file->fh; + region = firstregion; + + + rrcnt = 0; + for (regioncnt = 0; regioncnt < maxregions; regioncnt++) + { + for (i = 0; i < fndims; i++) + { + tmp_start[i + regioncnt * fndims] = 0; + tmp_count[i + regioncnt * fndims] = 0; + } + if (region) + { + // this is a record based multidimensional array + if (vdesc->record >= 0) + { + for (i = fndims - ndims; i < fndims; i++) + { + tmp_start[i + regioncnt * fndims] = region->start[i - (fndims - ndims)]; + tmp_count[i + regioncnt * fndims] = region->count[i - (fndims - ndims)]; + } + // Non-time dependent array + } + else + { + for (i = 0; i < ndims; i++) + { + tmp_start[i + regioncnt * fndims] = region->start[i]; + tmp_count[i + regioncnt * fndims] = region->count[i]; + } + } + region = region->next; + } + } + if (ios->io_rank > 0) + { + mpierr = MPI_Recv(&ierr, 1, MPI_INT, 0, 0, ios->io_comm, &status); // task0 is ready to recieve + MPI_Send(&llen, 1, MPI_OFFSET, 0, ios->io_rank, ios->io_comm); + if (llen>0) + { + MPI_Send(&maxregions, 1, MPI_INT, 0, ios->io_rank+ios->num_iotasks, ios->io_comm); + MPI_Send(tmp_start, maxregions*fndims, MPI_OFFSET, 0, ios->io_rank+2*ios->num_iotasks, ios->io_comm); + MPI_Send(tmp_count, maxregions*fndims, MPI_OFFSET, 0, ios->io_rank+3*ios->num_iotasks, ios->io_comm); + // printf("%s %d %ld\n",__FILE__,__LINE__,nvars*llen); + MPI_Send(IOBUF, nvars*llen, basetype, 0, ios->io_rank+4*ios->num_iotasks, ios->io_comm); + } + } + else + { + size_t rlen; + int rregions; + size_t start[fndims], count[fndims]; + size_t loffset; + mpierr = MPI_Type_size(basetype, &dsize); + + for (int rtask=0; rtasknum_iotasks; rtask++) + { + if (rtask>0) + { + mpierr = MPI_Send(&ierr, 1, MPI_INT, rtask, 0, ios->io_comm); // handshake - tell the sending task I'm ready + MPI_Recv(&rlen, 1, MPI_OFFSET, rtask, rtask, ios->io_comm, &status); + if (rlen>0){ + MPI_Recv(&rregions, 1, MPI_INT, rtask, rtask+ios->num_iotasks, ios->io_comm, &status); + MPI_Recv(tmp_start, rregions*fndims, MPI_OFFSET, rtask, rtask+2*ios->num_iotasks, ios->io_comm, &status); + MPI_Recv(tmp_count, rregions*fndims, MPI_OFFSET, rtask, rtask+3*ios->num_iotasks, ios->io_comm, &status); + // printf("%s %d %d %ld\n",__FILE__,__LINE__,rtask,nvars*rlen); + MPI_Recv(IOBUF, nvars*rlen, basetype, rtask, rtask+4*ios->num_iotasks, ios->io_comm, &status); + } + } + else + { + rlen = llen; + rregions = maxregions; + } + if (rlen>0) + { + loffset = 0; + for (regioncnt=0;regioncntrecord>=0) + { + if (fndims>1 && ndims0) + { + count[0] = 1; + start[0] = frame[nv]; + } + else if (fndims==ndims) + { + start[0]+=vdesc->record; + } + } + + if (basetype == MPI_INTEGER) + { + ierr = nc_put_vara_int (ncid, vid[nv], start, count, (const int *) bufptr); + } + else if (basetype == MPI_DOUBLE || basetype == MPI_REAL8) + { + ierr = nc_put_vara_double (ncid, vid[nv], start, count, (const double *) bufptr); + } + else if (basetype == MPI_FLOAT || basetype == MPI_REAL4) + { + ierr = nc_put_vara_float (ncid,vid[nv], start, count, (const float *) bufptr); + } + else + { + fprintf(stderr,"Type not recognized %d in pioc_write_darray\n",(int) basetype); + } + + if (ierr != PIO_NOERR){ + for (i=0;imaxregions;regioncnt++){ + } // if (rlen>0) + } // for (int rtask=0; rtasknum_iotasks; rtask++){ + + } + } // if (ios->ioproc) + + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + +#ifdef TIMING + /* Stop timing this function. */ + GPTLstop("PIO:write_darray_multi_nc_serial"); +#endif + + return ierr; +} + +/** Write one or more arrays with the same IO decomposition to the file. + * + * @param ncid identifies the netCDF file + * @param vid: an array of the variable ids to be written + * @param ioid: the I/O description ID as passed back by + * PIOc_InitDecomp(). + * @param nvars the number of variables to be written with this + * decomposition + * @param arraylen: the length of the array to be written. This + * is the length of the distrubited array. That is, the length of + * the portion of the data that is on the processor. + * @param array: pointer to the data to be written. This is a + * pointer to the distributed portion of the array that is on this + * processor. + * @param frame the frame or record dimension for each of the nvars + * variables in IOBUF + * @param fillvalue: pointer to the fill value to be used for + * missing data. + * @param flushtodisk + * + * @return 0 for success, error code otherwise. + * @ingroup PIO_write_darray + */ +int PIOc_write_darray_multi(const int ncid, const int *vid, const int ioid, + const int nvars, const PIO_Offset arraylen, + void *array, const int *frame, void **fillvalue, + bool flushtodisk) +{ + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; + io_desc_t *iodesc; + + int vsize, rlen; + int ierr; + var_desc_t *vdesc0; + + ierr = PIO_NOERR; + + file = pio_get_file_from_id(ncid); + if (file == NULL) + { + fprintf(stderr,"File handle not found %d %d\n",ncid,__LINE__); + return PIO_EBADID; + } + if (! (file->mode & PIO_WRITE)) + { + fprintf(stderr,"ERROR: Attempt to write to read-only file\n"); + return PIO_EPERM; + } + + iodesc = pio_get_iodesc_from_id(ioid); + if (iodesc == NULL) + { + // print_trace(NULL); + //fprintf(stderr,"iodesc handle not found %d %d\n",ioid,__LINE__); + return PIO_EBADID; + } + + vdesc0 = file->varlist+vid[0]; + + pioassert(nvars>0,"nvars <= 0",__FILE__,__LINE__); + + ios = file->iosystem; + // rlen = iodesc->llen*nvars; + rlen=0; + if (iodesc->llen>0) + { + rlen = iodesc->maxiobuflen*nvars; + } + if (vdesc0->iobuf) + { + piodie("Attempt to overwrite existing io buffer",__FILE__,__LINE__); + } + if (iodesc->rearranger>0) + { + if (rlen>0) + { + MPI_Type_size(iodesc->basetype, &vsize); + //printf("rlen*vsize = %ld\n",rlen*vsize); + + vdesc0->iobuf = bget((size_t) vsize* (size_t) rlen); + if (vdesc0->iobuf==NULL) + { + printf("%s %d %d %ld\n",__FILE__,__LINE__,nvars,vsize*rlen); + piomemerror(*ios,(size_t) rlen*(size_t) vsize, __FILE__,__LINE__); + } + if (iodesc->needsfill && iodesc->rearranger==PIO_REARR_BOX) + { + if (vsize==4) + { + for (int nv=0;nv < nvars; nv++) + { + for (int i=0;imaxiobuflen;i++) + { + ((float *) vdesc0->iobuf)[i+nv*(iodesc->maxiobuflen)] = ((float *)fillvalue)[nv]; + } + } + } + else if (vsize==8) + { + for (int nv=0;nv < nvars; nv++) + { + for (int i=0;imaxiobuflen;i++) + { + ((double *)vdesc0->iobuf)[i+nv*(iodesc->maxiobuflen)] = ((double *)fillvalue)[nv]; + } + } + } + } + } + + ierr = rearrange_comp2io(*ios, iodesc, array, vdesc0->iobuf, nvars); + }/* this is wrong, need to think about it + else{ + vdesc0->iobuf = array; + } */ + switch(file->iotype) + { + case PIO_IOTYPE_NETCDF4P: + case PIO_IOTYPE_PNETCDF: + ierr = pio_write_darray_multi_nc(file, nvars, vid, + iodesc->ndims, iodesc->basetype, iodesc->gsize, + iodesc->maxregions, iodesc->firstregion, iodesc->llen, + iodesc->maxiobuflen, iodesc->num_aiotasks, + vdesc0->iobuf, frame); + break; + case PIO_IOTYPE_NETCDF4C: + case PIO_IOTYPE_NETCDF: + ierr = pio_write_darray_multi_nc_serial(file, nvars, vid, + iodesc->ndims, iodesc->basetype, iodesc->gsize, + iodesc->maxregions, iodesc->firstregion, iodesc->llen, + iodesc->maxiobuflen, iodesc->num_aiotasks, + vdesc0->iobuf, frame); + if (vdesc0->iobuf) + { + brel(vdesc0->iobuf); + vdesc0->iobuf = NULL; + } + break; + + } + + if (iodesc->rearranger == PIO_REARR_SUBSET && iodesc->needsfill && + iodesc->holegridsize>0) + { + if (vdesc0->fillbuf) + { + piodie("Attempt to overwrite existing buffer",__FILE__,__LINE__); + } + + vdesc0->fillbuf = bget(iodesc->holegridsize*vsize*nvars); + //printf("%s %d %x\n",__FILE__,__LINE__,vdesc0->fillbuf); + if (vsize==4) + { + for (int nv=0;nvholegridsize;i++) + { + ((float *) vdesc0->fillbuf)[i+nv*iodesc->holegridsize] = ((float *) fillvalue)[nv]; + } + } + } + else if (vsize==8) + { + for (int nv=0;nvholegridsize;i++) + { + ((double *) vdesc0->fillbuf)[i+nv*iodesc->holegridsize] = ((double *) fillvalue)[nv]; + } + } + } + switch(file->iotype) + { + case PIO_IOTYPE_PNETCDF: + ierr = pio_write_darray_multi_nc(file, nvars, vid, + iodesc->ndims, iodesc->basetype, iodesc->gsize, + iodesc->maxfillregions, iodesc->fillregion, iodesc->holegridsize, + iodesc->holegridsize, iodesc->num_aiotasks, + vdesc0->fillbuf, frame); + break; + case PIO_IOTYPE_NETCDF4P: + case PIO_IOTYPE_NETCDF4C: + case PIO_IOTYPE_NETCDF: + /* ierr = pio_write_darray_multi_nc_serial(file, nvars, vid, + iodesc->ndims, iodesc->basetype, iodesc->gsize, + iodesc->maxfillregions, iodesc->fillregion, iodesc->holegridsize, + iodesc->holegridsize, iodesc->num_aiotasks, + vdesc0->fillbuf, frame); + */ + /* if (vdesc0->fillbuf != NULL){ + printf("%s %d %x\n",__FILE__,__LINE__,vdesc0->fillbuf); + brel(vdesc0->fillbuf); + vdesc0->fillbuf = NULL; + } + */ + break; + } + } + + flush_output_buffer(file, flushtodisk, 0); + + return ierr; +} + +/** Write a distributed array to the output file. + * + * This routine aggregates output on the compute nodes and only sends + * it to the IO nodes when the compute buffer is full or when a flush + * is triggered. + * + * @param ncid: the ncid of the open netCDF file. + * @param vid: the variable ID returned by PIOc_def_var(). + * @param ioid: the I/O description ID as passed back by + * PIOc_InitDecomp(). + * @param arraylen: the length of the array to be written. This + * is the length of the distrubited array. That is, the length of + * the portion of the data that is on the processor. + * @param array: pointer to the data to be written. This is a + * pointer to the distributed portion of the array that is on this + * processor. + * @param fillvalue: pointer to the fill value to be used for + * missing data. + * + * @returns 0 for success, non-zero error code for failure. + * @ingroup PIO_write_darray + */ +int PIOc_write_darray(const int ncid, const int vid, const int ioid, + const PIO_Offset arraylen, void *array, void *fillvalue) +{ + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; + io_desc_t *iodesc; + var_desc_t *vdesc; + void *bufptr; + size_t rlen; + int ierr; + MPI_Datatype vtype; + wmulti_buffer *wmb; + int tsize; + int *tptr; + void *bptr; + void *fptr; + bool recordvar; + int needsflush; + bufsize totfree, maxfree; + + ierr = PIO_NOERR; + needsflush = 0; // false + file = pio_get_file_from_id(ncid); + if (file == NULL) + { + fprintf(stderr,"File handle not found %d %d\n",ncid,__LINE__); + return PIO_EBADID; + } + if (! (file->mode & PIO_WRITE)) + { + fprintf(stderr,"ERROR: Attempt to write to read-only file\n"); + return PIO_EPERM; + } + + iodesc = pio_get_iodesc_from_id(ioid); + if (iodesc == NULL) + { + fprintf(stderr,"iodesc handle not found %d %d\n",ioid,__LINE__); + return PIO_EBADID; + } + ios = file->iosystem; + + vdesc = (file->varlist)+vid; + if (vdesc == NULL) + return PIO_EBADID; + + /* Is this a record variable? */ + recordvar = vdesc->record < 0 ? true : false; + + if (iodesc->ndof != arraylen) + { + fprintf(stderr,"ndof=%ld, arraylen=%ld\n",iodesc->ndof,arraylen); + piodie("ndof != arraylen",__FILE__,__LINE__); + } + wmb = &(file->buffer); + if (wmb->ioid == -1) + { + if (recordvar) + wmb->ioid = ioid; + else + wmb->ioid = -(ioid); + } + else + { + // separate record and non-record variables + if (recordvar) + { + while(wmb->next && wmb->ioid!=ioid) + if (wmb->next!=NULL) + wmb = wmb->next; +#ifdef _PNETCDF + /* flush the previous record before starting a new one. this is collective */ + // if (vdesc->request != NULL && (vdesc->request[0] != NC_REQ_NULL) || + // (wmb->frame != NULL && vdesc->record != wmb->frame[0])){ + // needsflush = 2; // flush to disk + // } +#endif + } + else + { + while(wmb->next && wmb->ioid!= -(ioid)) + { + if (wmb->next!=NULL) + wmb = wmb->next; + } + } + } + if ((recordvar && wmb->ioid != ioid) || (!recordvar && wmb->ioid != -(ioid))) + { + wmb->next = (wmulti_buffer *) bget((bufsize) sizeof(wmulti_buffer)); + if (wmb->next == NULL) + piomemerror(*ios,sizeof(wmulti_buffer), __FILE__,__LINE__); + wmb=wmb->next; + wmb->next=NULL; + if (recordvar) + wmb->ioid = ioid; + else + wmb->ioid = -(ioid); + wmb->validvars=0; + wmb->arraylen=arraylen; + wmb->vid=NULL; + wmb->data=NULL; + wmb->frame=NULL; + wmb->fillvalue=NULL; + } + + MPI_Type_size(iodesc->basetype, &tsize); + // At this point wmb should be pointing to a new or existing buffer + // so we can add the data + // printf("%s %d %X %d %d %d\n",__FILE__,__LINE__,wmb->data,wmb->validvars,arraylen,tsize); + // cn_buffer_report(*ios, true); + bfreespace(&totfree, &maxfree); + if (needsflush == 0) + needsflush = (maxfree <= 1.1*(1+wmb->validvars)*arraylen*tsize ); + MPI_Allreduce(MPI_IN_PLACE, &needsflush, 1, MPI_INT, MPI_MAX, ios->comp_comm); + + if (needsflush > 0 ) + { + // need to flush first + // printf("%s %d %ld %d %ld %ld\n",__FILE__,__LINE__,maxfree, wmb->validvars, (1+wmb->validvars)*arraylen*tsize,totfree); + cn_buffer_report(*ios, true); + + flush_buffer(ncid, wmb, needsflush == 2); // if needsflush == 2 flush to disk otherwise just flush to io node + } + + if (arraylen > 0) + if (!(wmb->data = bgetr(wmb->data, (1+wmb->validvars)*arraylen*tsize))) + piomemerror(*ios, (1+wmb->validvars)*arraylen*tsize, __FILE__, __LINE__); + + if (!(wmb->vid = (int *) bgetr(wmb->vid,sizeof(int)*(1+wmb->validvars)))) + piomemerror(*ios, (1+wmb->validvars)*sizeof(int), __FILE__, __LINE__); + + if (vdesc->record >= 0) + if (!(wmb->frame = (int *)bgetr(wmb->frame, sizeof(int) * (1 + wmb->validvars)))) + piomemerror(*ios, (1+wmb->validvars)*sizeof(int), __FILE__, __LINE__); + + if (iodesc->needsfill) + if (!(wmb->fillvalue = bgetr(wmb->fillvalue,tsize*(1+wmb->validvars)))) + piomemerror(*ios, (1+wmb->validvars)*tsize , __FILE__,__LINE__); + + if (iodesc->needsfill) + { + if (fillvalue) + { + memcpy((char *) wmb->fillvalue+tsize*wmb->validvars,fillvalue, tsize); + } + else + { + vtype = (MPI_Datatype) iodesc->basetype; + if (vtype == MPI_INTEGER) + { + int fill = PIO_FILL_INT; + memcpy((char *) wmb->fillvalue+tsize*wmb->validvars, &fill, tsize); + } + else if (vtype == MPI_FLOAT || vtype == MPI_REAL4) + { + float fill = PIO_FILL_FLOAT; + memcpy((char *) wmb->fillvalue+tsize*wmb->validvars, &fill, tsize); + } + else if (vtype == MPI_DOUBLE || vtype == MPI_REAL8) + { + double fill = PIO_FILL_DOUBLE; + memcpy((char *) wmb->fillvalue+tsize*wmb->validvars, &fill, tsize); + } + else if (vtype == MPI_CHARACTER) + { + char fill = PIO_FILL_CHAR; + memcpy((char *) wmb->fillvalue+tsize*wmb->validvars, &fill, tsize); + } + else + { + fprintf(stderr,"Type not recognized %d in pioc_write_darray\n",vtype); + } + } + + } + + wmb->arraylen = arraylen; + wmb->vid[wmb->validvars]=vid; + bufptr = (void *)((char *) wmb->data + arraylen*tsize*wmb->validvars); + if (arraylen>0) + memcpy(bufptr, array, arraylen*tsize); + /* + if (tsize==8){ + double asum=0.0; + printf("%s %d %d %d %d\n",__FILE__,__LINE__,vid,arraylen,iodesc->ndof); + for (int k=0;kvalidvars,wmb->ioid,vid,bufptr); + + if (wmb->frame!=NULL) + wmb->frame[wmb->validvars]=vdesc->record; + wmb->validvars++; + + // printf("%s %d %d %d %d %d\n",__FILE__,__LINE__,wmb->validvars,iodesc->maxbytes/tsize, iodesc->ndof, iodesc->llen); + if (wmb->validvars >= iodesc->maxbytes/tsize) + PIOc_sync(ncid); + + return ierr; +} + +/** Read an array of data from a file to the (parallel) IO library. + * + * @param file a pointer to the open file descriptor for the file + * that will be written to + * @param iodesc a pointer to the defined iodescriptor for the buffer + * @param vid the variable id to be read + * @param IOBUF the buffer to be read into from this mpi task + * + * @return 0 on success, error code otherwise. + * @ingroup PIO_read_darray + */ +int pio_read_darray_nc(file_desc_t *file, io_desc_t *iodesc, const int vid, + void *IOBUF) +{ + int ierr=PIO_NOERR; + iosystem_desc_t *ios; /* Pointer to io system information. */ + var_desc_t *vdesc; + int ndims, fndims; + MPI_Status status; + int i; + +#ifdef TIMING + /* Start timing this function. */ + GPTLstart("PIO:read_darray_nc"); +#endif + ios = file->iosystem; + if (ios == NULL) + return PIO_EBADID; + + vdesc = (file->varlist)+vid; + + if (vdesc == NULL) + return PIO_EBADID; + + ndims = iodesc->ndims; + ierr = PIOc_inq_varndims(file->fh, vid, &fndims); + + if (fndims==ndims) + vdesc->record=-1; + + if (ios->ioproc) + { + io_region *region; + size_t start[fndims]; + size_t count[fndims]; + size_t tmp_start[fndims]; + size_t tmp_count[fndims]; + size_t tmp_bufsize=1; + int regioncnt; + void *bufptr; + int tsize; + + int rrlen=0; + PIO_Offset *startlist[iodesc->maxregions]; + PIO_Offset *countlist[iodesc->maxregions]; + + // buffer is incremented by byte and loffset is in terms of the iodessc->basetype + // so we need to multiply by the size of the basetype + // We can potentially allow for one iodesc to have multiple datatypes by allowing the + // calling program to change the basetype. + region = iodesc->firstregion; + MPI_Type_size(iodesc->basetype, &tsize); + if (fndims>ndims) + { + ndims++; + if (vdesc->record<0) + vdesc->record=0; + } + for (regioncnt=0;regioncntmaxregions;regioncnt++) + { + // printf("%s %d %d %ld %d %d\n",__FILE__,__LINE__,regioncnt,region,fndims,ndims); + tmp_bufsize=1; + if (region==NULL || iodesc->llen==0) + { + for (i=0;iloffset); + + // printf("%s %d %d %d %d\n",__FILE__,__LINE__,iodesc->llen - region->loffset, iodesc->llen, region->loffset); + + if (vdesc->record >= 0 && fndims>1) + { + start[0] = vdesc->record; + for (i=1;istart[i-1]; + count[i] = region->count[i-1]; + // printf("%s %d %d %ld %ld\n",__FILE__,__LINE__,i,start[i],count[i]); + } + if (count[1] > 0) + count[0] = 1; + } + else + { + // Non-time dependent array + for (i=0;istart[i]; + count[i] = region->count[i]; + // printf("%s %d %d %ld %ld\n",__FILE__,__LINE__,i,start[i],count[i]); + } + } + } + + switch(file->iotype) + { +#ifdef _NETCDF4 + case PIO_IOTYPE_NETCDF4P: + if (iodesc->basetype == MPI_DOUBLE || iodesc->basetype == MPI_REAL8) + { + ierr = nc_get_vara_double (file->fh, vid,start,count, bufptr); + } + else if (iodesc->basetype == MPI_INTEGER) + { + ierr = nc_get_vara_int (file->fh, vid, start, count, bufptr); + } + else if (iodesc->basetype == MPI_FLOAT || iodesc->basetype == MPI_REAL4) + { + ierr = nc_get_vara_float (file->fh, vid, start, count, bufptr); + } + else + { + fprintf(stderr,"Type not recognized %d in pioc_read_darray\n",(int) iodesc->basetype); + } + break; +#endif +#ifdef _PNETCDF + case PIO_IOTYPE_PNETCDF: + { + tmp_bufsize=1; + for (int j = 0; j < fndims; j++) + tmp_bufsize *= count[j]; + + if (tmp_bufsize > 0) + { + startlist[rrlen] = (PIO_Offset *) bget(fndims * sizeof(PIO_Offset)); + countlist[rrlen] = (PIO_Offset *) bget(fndims * sizeof(PIO_Offset)); + + for (int j = 0; j < fndims; j++) + { + startlist[rrlen][j] = start[j]; + countlist[rrlen][j] = count[j]; + /* printf("%s %d %d %d %d %ld %ld %ld\n",__FILE__,__LINE__,realregioncnt, + iodesc->maxregions, j,start[j],count[j],tmp_bufsize);*/ + } + rrlen++; + } + if (regioncnt==iodesc->maxregions-1) + { + ierr = ncmpi_get_varn_all(file->fh, vid, rrlen, startlist, + countlist, IOBUF, iodesc->llen, iodesc->basetype); + for (i=0;iiotype,__FILE__,__LINE__); + + } + if (region) + region = region->next; + } // for (regioncnt=0;...) + } + + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + +#ifdef TIMING + /* Stop timing this function. */ + GPTLstop("PIO:read_darray_nc"); +#endif + + return ierr; +} + +/** Read an array of data from a file to the (serial) IO library. + * + * @param file a pointer to the open file descriptor for the file + * that will be written to + * @param iodesc a pointer to the defined iodescriptor for the buffer + * @param vid the variable id to be read. + * @param IOBUF the buffer to be read into from this mpi task + * + * @returns + * @ingroup PIO_read_darray + */ +int pio_read_darray_nc_serial(file_desc_t *file, io_desc_t *iodesc, + const int vid, void *IOBUF) +{ + int ierr=PIO_NOERR; + iosystem_desc_t *ios; /* Pointer to io system information. */ + var_desc_t *vdesc; + int ndims, fndims; + MPI_Status status; + int i; + +#ifdef TIMING + /* Start timing this function. */ + GPTLstart("PIO:read_darray_nc_serial"); +#endif + ios = file->iosystem; + if (ios == NULL) + return PIO_EBADID; + + vdesc = (file->varlist)+vid; + + if (vdesc == NULL) + return PIO_EBADID; + + ndims = iodesc->ndims; + ierr = PIOc_inq_varndims(file->fh, vid, &fndims); + + if (fndims==ndims) + vdesc->record=-1; + + if (ios->ioproc) + { + io_region *region; + size_t start[fndims]; + size_t count[fndims]; + size_t tmp_start[fndims * iodesc->maxregions]; + size_t tmp_count[fndims * iodesc->maxregions]; + size_t tmp_bufsize; + int regioncnt; + void *bufptr; + int tsize; + + int rrlen = 0; + + // buffer is incremented by byte and loffset is in terms of the iodessc->basetype + // so we need to multiply by the size of the basetype + // We can potentially allow for one iodesc to have multiple datatypes by allowing the + // calling program to change the basetype. + region = iodesc->firstregion; + MPI_Type_size(iodesc->basetype, &tsize); + if (fndims>ndims) + { + if (vdesc->record < 0) + vdesc->record = 0; + } + for (regioncnt=0;regioncntmaxregions;regioncnt++) + { + if (region==NULL || iodesc->llen==0) + { + for (i = 0; i < fndims; i++) + { + tmp_start[i + regioncnt * fndims] = 0; + tmp_count[i + regioncnt * fndims] = 0; + } + bufptr=NULL; + } + else + { + if (vdesc->record >= 0 && fndims>1) + { + tmp_start[regioncnt*fndims] = vdesc->record; + for (i=1;istart[i-1]; + tmp_count[i+regioncnt*fndims] = region->count[i-1]; + } + if (tmp_count[1 + regioncnt * fndims] > 0) + tmp_count[regioncnt * fndims] = 1; + } + else + { + // Non-time dependent array + for (i = 0; i < fndims; i++) + { + tmp_start[i + regioncnt * fndims] = region->start[i]; + tmp_count[i + regioncnt * fndims] = region->count[i]; + } + } + /* for (i=0;inext; + } // for (regioncnt=0;...) + + if (ios->io_rank>0) + { + MPI_Send(&(iodesc->llen), 1, MPI_OFFSET, 0, ios->io_rank, ios->io_comm); + if (iodesc->llen > 0) + { + MPI_Send(&(iodesc->maxregions), 1, MPI_INT, 0, + ios->num_iotasks + ios->io_rank, ios->io_comm); + MPI_Send(tmp_count, iodesc->maxregions*fndims, MPI_OFFSET, 0, + 2 * ios->num_iotasks + ios->io_rank, ios->io_comm); + MPI_Send(tmp_start, iodesc->maxregions*fndims, MPI_OFFSET, 0, + 3 * ios->num_iotasks + ios->io_rank, ios->io_comm); + MPI_Recv(IOBUF, iodesc->llen, iodesc->basetype, 0, + 4 * ios->num_iotasks+ios->io_rank, ios->io_comm, &status); + } + } + else if (ios->io_rank == 0) + { + int maxregions=0; + size_t loffset, regionsize; + size_t this_start[fndims*iodesc->maxregions]; + size_t this_count[fndims*iodesc->maxregions]; + // for (i=ios->num_iotasks-1; i>=0; i--){ + for (int rtask = 1; rtask <= ios->num_iotasks; rtask++) + { + if (rtasknum_iotasks) + { + MPI_Recv(&tmp_bufsize, 1, MPI_OFFSET, rtask, rtask, ios->io_comm, &status); + if (tmp_bufsize>0) + { + MPI_Recv(&maxregions, 1, MPI_INT, rtask, ios->num_iotasks+rtask, + ios->io_comm, &status); + MPI_Recv(this_count, maxregions*fndims, MPI_OFFSET, rtask, + 2 * ios->num_iotasks + rtask, ios->io_comm, &status); + MPI_Recv(this_start, maxregions*fndims, MPI_OFFSET, rtask, + 3 * ios->num_iotasks + rtask, ios->io_comm, &status); + } + } + else + { + maxregions=iodesc->maxregions; + tmp_bufsize=iodesc->llen; + } + loffset = 0; + for (regioncnt=0;regioncntnum_iotasks) + { + for (int m=0; mbasetype == MPI_DOUBLE || iodesc->basetype == MPI_REAL8) + { + ierr = nc_get_vara_double (file->fh, vid,start, count, bufptr); + } + else if (iodesc->basetype == MPI_INTEGER) + { + ierr = nc_get_vara_int (file->fh, vid, start, count, bufptr); + } + else if (iodesc->basetype == MPI_FLOAT || iodesc->basetype == MPI_REAL4) + { + ierr = nc_get_vara_float (file->fh, vid, start, count, bufptr); + } + else + { + fprintf(stderr,"Type not recognized %d in pioc_write_darray_nc_serial\n", + (int)iodesc->basetype); + } + + if (ierr != PIO_NOERR) + { + for (int i = 0; i < fndims; i++) + fprintf(stderr,"vid %d dim %d start %ld count %ld err %d\n", + vid, i, start[i], count[i], ierr); + + } + +#endif + } + if (rtask < ios->num_iotasks) + MPI_Send(IOBUF, tmp_bufsize, iodesc->basetype, rtask, + 4 * ios->num_iotasks + rtask, ios->io_comm); + } + } + } + + ierr = check_netcdf(file, ierr, __FILE__, __LINE__); + +#ifdef TIMING + /* Stop timing this function. */ + GPTLstop("PIO:read_darray_nc_serial"); +#endif + + return ierr; +} + +/** Read a field from a file to the IO library. + * @ingroup PIO_read_darray + * + * @param ncid identifies the netCDF file + * @param vid the variable ID to be read + * @param ioid: the I/O description ID as passed back by + * PIOc_InitDecomp(). + * @param arraylen: the length of the array to be read. This + * is the length of the distrubited array. That is, the length of + * the portion of the data that is on the processor. + * @param array: pointer to the data to be read. This is a + * pointer to the distributed portion of the array that is on this + * processor. + * + * @return 0 for success, error code otherwise. + * @ingroup PIO_read_darray + */ +int PIOc_read_darray(const int ncid, const int vid, const int ioid, + const PIO_Offset arraylen, void *array) +{ + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; + io_desc_t *iodesc; + void *iobuf=NULL; + size_t rlen=0; + int ierr, tsize; + MPI_Datatype vtype; + + file = pio_get_file_from_id(ncid); + + if (file == NULL) + { + fprintf(stderr,"File handle not found %d %d\n",ncid,__LINE__); + return PIO_EBADID; + } + iodesc = pio_get_iodesc_from_id(ioid); + if (iodesc == NULL) + { + fprintf(stderr,"iodesc handle not found %d %d\n",ioid,__LINE__); + return PIO_EBADID; + } + ios = file->iosystem; + if (ios->iomaster) + { + rlen = iodesc->maxiobuflen; + } + else + { + rlen = iodesc->llen; + } + + if (iodesc->rearranger > 0) + { + if (ios->ioproc && rlen>0) + { + MPI_Type_size(iodesc->basetype, &tsize); + iobuf = bget(((size_t) tsize)*rlen); + if (iobuf==NULL) + { + piomemerror(*ios,rlen*((size_t) tsize), __FILE__,__LINE__); + } + } + } + else + { + iobuf = array; + } + + switch(file->iotype) + { + case PIO_IOTYPE_NETCDF: + case PIO_IOTYPE_NETCDF4C: + ierr = pio_read_darray_nc_serial(file, iodesc, vid, iobuf); + break; + case PIO_IOTYPE_PNETCDF: + case PIO_IOTYPE_NETCDF4P: + ierr = pio_read_darray_nc(file, iodesc, vid, iobuf); + break; + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + } + if (iodesc->rearranger > 0) + { + ierr = rearrange_io2comp(*ios, iodesc, iobuf, array); + + if (rlen>0) + brel(iobuf); + } + + return ierr; + +} + +/** Flush the output buffer. This is only relevant for files opened + * with pnetcdf. + * + * @param file a pointer to the open file descriptor for the file + * that will be written to + * @param force true to force the flushing of the buffer + * @param addsize additional size to add to buffer (in bytes) + * + * @return 0 for success, error code otherwise. + * @private + * @ingroup PIO_write_darray + */ +int flush_output_buffer(file_desc_t *file, bool force, PIO_Offset addsize) +{ + int ierr = PIO_NOERR; + +#ifdef _PNETCDF + var_desc_t *vdesc; + int *status; + PIO_Offset usage = 0; + +#ifdef TIMING + /* Start timing this function. */ + GPTLstart("PIO:flush_output_buffer"); +#endif + + pioassert(file != NULL, "file pointer not defined", __FILE__, + __LINE__); + + /* Find out the buffer usage. */ + ierr = ncmpi_inq_buffer_usage(file->fh, &usage); + + /* If we are not forcing a flush, spread the usage to all IO + * tasks. */ + if (!force && file->iosystem->io_comm != MPI_COMM_NULL) + { + usage += addsize; + MPI_Allreduce(MPI_IN_PLACE, &usage, 1, MPI_OFFSET, MPI_MAX, + file->iosystem->io_comm); + } + + /* Keep track of the maximum usage. */ + if (usage > maxusage) + maxusage = usage; + + /* If the user forces it, or the buffer has exceeded the size + * limit, then flush to disk. */ + if (force || usage >= PIO_BUFFER_SIZE_LIMIT) + { + int rcnt; + bool prev_dist=false; + int prev_record=-1; + int prev_type=0; + int maxreq; + int reqcnt; + maxreq = 0; + reqcnt=0; + rcnt=0; + for (int i = 0; i < PIO_MAX_VARS; i++) + { + vdesc = file->varlist + i; + reqcnt += vdesc->nreqs; + if (vdesc->nreqs > 0) + maxreq = i; + } + int request[reqcnt]; + int status[reqcnt]; + + for (int i = 0; i <= maxreq; i++) + { + vdesc = file->varlist + i; +#ifdef MPIO_ONESIDED + /*onesided optimization requires that all of the requests in a wait_all call represent + a contiguous block of data in the file */ + if (rcnt>0 && (prev_record != vdesc->record || vdesc->nreqs==0)) + { + ierr = ncmpi_wait_all(file->fh, rcnt, request,status); + rcnt=0; + } + prev_record = vdesc->record; +#endif + // printf("%s %d %d %d %d \n",__FILE__,__LINE__,i,vdesc->nreqs,vdesc->request); + for (reqcnt=0;reqcntnreqs;reqcnt++) + { + request[rcnt++] = max(vdesc->request[reqcnt],NC_REQ_NULL); + } + free(vdesc->request); + vdesc->request = NULL; + vdesc->nreqs = 0; + // if (file->iosystem->io_rank < 2) printf("%s %d varid=%d\n",__FILE__,__LINE__,i); +#ifdef FLUSH_EVERY_VAR + ierr = ncmpi_wait_all(file->fh, rcnt, request, status); + rcnt = 0; +#endif + } + // if (file->iosystem->io_rank==0){ + // printf("%s %d %d\n",__FILE__,__LINE__,rcnt); + // } + if (rcnt > 0) + { + /* + if (file->iosystem->io_rank==0){ + printf("%s %d %d ",__FILE__,__LINE__,rcnt); + for (int i=0; ifh, rcnt, request, status); + } + for (int i = 0; i < PIO_MAX_VARS; i++) + { + vdesc = file->varlist + i; + if (vdesc->iobuf) + { + brel(vdesc->iobuf); + vdesc->iobuf=NULL; + } + if (vdesc->fillbuf) + { + brel(vdesc->fillbuf); + vdesc->fillbuf=NULL; + } + } + + } + +#ifdef TIMING + /* Stop timing this function. */ + GPTLstop("PIO:flush_output_buffer"); +#endif + +#endif /* _PNETCDF */ + return ierr; +} + +/** Print out info about the buffer for debug purposes. + * + * @param ios the IO system structure + * @param collective true if collective report is desired + * + * @private + * @ingroup PIO_write_darray + */ +void cn_buffer_report(iosystem_desc_t ios, bool collective) +{ + + if (CN_bpool) + { + long bget_stats[5]; + long bget_mins[5]; + long bget_maxs[5]; + + bstats(bget_stats, bget_stats+1,bget_stats+2,bget_stats+3,bget_stats+4); + if (collective) + { + MPI_Reduce(bget_stats, bget_maxs, 5, MPI_LONG, MPI_MAX, 0, ios.comp_comm); + MPI_Reduce(bget_stats, bget_mins, 5, MPI_LONG, MPI_MIN, 0, ios.comp_comm); + if (ios.compmaster) + { + printf("PIO: Currently allocated buffer space %ld %ld\n", + bget_mins[0], bget_maxs[0]); + printf("PIO: Currently available buffer space %ld %ld\n", + bget_mins[1], bget_maxs[1]); + printf("PIO: Current largest free block %ld %ld\n", + bget_mins[2], bget_maxs[2]); + printf("PIO: Number of successful bget calls %ld %ld\n", + bget_mins[3], bget_maxs[3]); + printf("PIO: Number of successful brel calls %ld %ld\n", + bget_mins[4], bget_maxs[4]); + // print_trace(stdout); + } + } + else + { + printf("%d: PIO: Currently allocated buffer space %ld \n", + ios.union_rank, bget_stats[0]) ; + printf("%d: PIO: Currently available buffer space %ld \n", + ios.union_rank, bget_stats[1]); + printf("%d: PIO: Current largest free block %ld \n", + ios.union_rank, bget_stats[2]); + printf("%d: PIO: Number of successful bget calls %ld \n", + ios.union_rank, bget_stats[3]); + printf("%d: PIO: Number of successful brel calls %ld \n", + ios.union_rank, bget_stats[4]); + } + } +} + +/** Free the buffer pool. If malloc is used (that is, PIO_USE_MALLOC is + * non zero), this function does nothing. + * + * @param ios the IO system structure + * + * @private + * @ingroup PIO_write_darray + */ +void free_cn_buffer_pool(iosystem_desc_t ios) +{ +#if !PIO_USE_MALLOC + if (CN_bpool) + { + cn_buffer_report(ios, true); + bpoolrelease(CN_bpool); + // free(CN_bpool); + CN_bpool = NULL; + } +#endif /* !PIO_USE_MALLOC */ +} + +/** Flush the buffer. + * + * @param ncid identifies the netCDF file + * @param wmb + * @param flushtodisk + * + * @private + * @ingroup PIO_write_darray + */ +void flush_buffer(int ncid, wmulti_buffer *wmb, bool flushtodisk) +{ + if (wmb->validvars > 0) + { + PIOc_write_darray_multi(ncid, wmb->vid, wmb->ioid, wmb->validvars, + wmb->arraylen, wmb->data, wmb->frame, + wmb->fillvalue, flushtodisk); + wmb->validvars = 0; + brel(wmb->vid); + wmb->vid = NULL; + brel(wmb->data); + wmb->data = NULL; + if (wmb->fillvalue) + brel(wmb->fillvalue); + if (wmb->frame) + brel(wmb->frame); + wmb->fillvalue = NULL; + wmb->frame = NULL; + } +} + +/** Compute the maximum aggregate number of bytes. + * + * @param ios the IO system structure + * @param iodesc a pointer to the defined iodescriptor for the buffer + * + * @private + * @ingroup PIO_write_darray + */ +void compute_maxaggregate_bytes(const iosystem_desc_t ios, io_desc_t *iodesc) +{ + int maxbytesoniotask = INT_MAX; + int maxbytesoncomputetask = INT_MAX; + int maxbytes; + + // printf("%s %d %d %d\n",__FILE__,__LINE__,iodesc->maxiobuflen, iodesc->ndof); + + if (ios.ioproc && iodesc->maxiobuflen > 0) + maxbytesoniotask = PIO_BUFFER_SIZE_LIMIT / iodesc->maxiobuflen; + + if (ios.comp_rank >= 0 && iodesc->ndof > 0) + maxbytesoncomputetask = PIO_CNBUFFER_LIMIT / iodesc->ndof; + + maxbytes = min(maxbytesoniotask, maxbytesoncomputetask); + + // printf("%s %d %d %d\n",__FILE__,__LINE__,maxbytesoniotask, maxbytesoncomputetask); + + MPI_Allreduce(MPI_IN_PLACE, &maxbytes, 1, MPI_INT, MPI_MIN, ios.union_comm); + iodesc->maxbytes = maxbytes; + // printf("%s %d %d %d\n",__FILE__,__LINE__,iodesc->maxbytes,iodesc->maxiobuflen); + +} diff --git a/externals/pio2/src/clib/pio_file.c b/externals/pio2/src/clib/pio_file.c index 69d03336c4a..5003288caa0 100644 --- a/externals/pio2/src/clib/pio_file.c +++ b/externals/pio2/src/clib/pio_file.c @@ -1,461 +1,592 @@ +#include #include #include -/** - ** @public - ** @ingroup PIO_openfile - ** @brief open an existing file using pio - ** @details Input parameters are read on comp task 0 and ignored elsewhere. - ** @param iosysid : A defined pio system descriptor (input) - ** @param ncidp : A pio file descriptor (output) - ** @param iotype : A pio output format (input) - ** @param filename : The filename to open - ** @param mode : The netcdf mode for the open operation - */ +/** Open an existing file using PIO library. + * + * Input parameters are read on comp task 0 and ignored elsewhere. + * + * @param iosysid : A defined pio system descriptor (input) + * @param ncidp : A pio file descriptor (output) + * @param iotype : A pio output format (input) + * @param filename : The filename to open + * @param mode : The netcdf mode for the open operation + * + * @return 0 for success, error code otherwise. + * @ingroup PIO_openfile + */ int PIOc_openfile(const int iosysid, int *ncidp, int *iotype, - const char filename[], const int mode) + const char *filename, const int mode) { - int ierr; - int msg; - int mpierr; - size_t len; - iosystem_desc_t *ios; - file_desc_t *file; - - ierr = PIO_NOERR; - - msg = PIO_MSG_OPEN_FILE; - ios = pio_get_iosystem_from_id(iosysid); - if(ios==NULL){ - printf("bad iosysid %d\n",iosysid); - return PIO_EBADID; - } - - file = (file_desc_t *) malloc(sizeof(*file)); - if(file==NULL){ - return PIO_ENOMEM; - } - file->iotype = *iotype; - file->next = NULL; - file->iosystem = ios; - file->mode = mode; - for(int i=0; ivarlist[i].record = -1; - file->varlist[i].ndims = -1; + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; /* Pointer to file information. */ + int ierr = PIO_NOERR; /* Return code from function calls. */ + int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ + + LOG((1, "PIOc_openfile iosysid = %d", iosysid)); + + /* User must provide valid input for these parameters. */ + if (!ncidp || !iotype || !filename) + return PIO_EINVAL; + if (*iotype < PIO_IOTYPE_PNETCDF || *iotype > PIO_IOTYPE_NETCDF4P) + return PIO_ENOMEM; + + /* Get the IO system info from the iosysid. */ + if (!(ios = pio_get_iosystem_from_id(iosysid))) + { + LOG((0, "PIOc_openfile got bad iosysid %d",iosysid)); + return PIO_EBADID; + } + + /* Allocate space for the file info. */ + if (!(file = (file_desc_t *) malloc(sizeof(*file)))) + return PIO_ENOMEM; + + /* Fill in some file values. */ + file->iotype = *iotype; + file->next = NULL; + file->iosystem = ios; + file->mode = mode; + for (int i = 0; i < PIO_MAX_VARS; i++) + { + file->varlist[i].record = -1; + file->varlist[i].ndims = -1; #ifdef _PNETCDF - file->varlist[i].request = NULL; - file->varlist[i].nreqs=0; + file->varlist[i].request = NULL; + file->varlist[i].nreqs=0; #endif - file->varlist[i].fillbuf = NULL; - file->varlist[i].iobuf = NULL; - } - - file->buffer.validvars=0; - file->buffer.vid=NULL; - file->buffer.data=NULL; - file->buffer.next=NULL; - file->buffer.frame=NULL; - file->buffer.fillvalue=NULL; - - if(ios->async_interface && ! ios->ioproc){ - if(ios->comp_rank==0) - mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); - len = strlen(filename); - mpierr = MPI_Bcast((void *) filename,len, MPI_CHAR, ios->compmaster, ios->intercomm); - mpierr = MPI_Bcast(&(file->iotype), 1, MPI_INT, ios->compmaster, ios->intercomm); - mpierr = MPI_Bcast(&(file->mode), 1, MPI_INT, ios->compmaster, ios->intercomm); - } - - if(ios->ioproc){ - - switch(file->iotype){ + file->varlist[i].fillbuf = NULL; + file->varlist[i].iobuf = NULL; + } + + file->buffer.validvars = 0; + file->buffer.vid = NULL; + file->buffer.data = NULL; + file->buffer.next = NULL; + file->buffer.frame = NULL; + file->buffer.fillvalue = NULL; + + /* Set to true if this task should participate in IO (only true for + * one task with netcdf serial files. */ + if (file->iotype == PIO_IOTYPE_NETCDF4P || file->iotype == PIO_IOTYPE_PNETCDF || + ios->io_rank == 0) + file->do_io = 1; + else + file->do_io = 0; + + /* If async is in use, and this is not an IO task, bcast the parameters. */ + if (ios->async_interface) + { + int msg = PIO_MSG_OPEN_FILE; + size_t len = strlen(filename); + + if (!ios->ioproc) + { + /* Send the message to the message handler. */ + if (ios->compmaster) + mpierr = MPI_Send(&msg, 1, MPI_INT, ios->ioroot, 1, ios->union_comm); + + /* Send the parameters of the function call. */ + if (!mpierr) + mpierr = MPI_Bcast(&len, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast((void *)filename, len + 1, MPI_CHAR, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&file->iotype, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&file->mode, 1, MPI_INT, ios->compmaster, ios->intercomm); + } + + /* Handle MPI errors. */ + if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm))) + return check_mpi(file, mpierr2, __FILE__, __LINE__); + if (mpierr) + return check_mpi(file, mpierr, __FILE__, __LINE__); + } + + /* If this is an IO task, then call the netCDF function. */ + if (ios->ioproc) + { + switch (file->iotype) + { #ifdef _NETCDF #ifdef _NETCDF4 - case PIO_IOTYPE_NETCDF4P: -#ifdef _MPISERIAL - ierr = nc_open(filename, file->mode, &(file->fh)); + case PIO_IOTYPE_NETCDF4P: +#ifdef _MPISERIAL + ierr = nc_open(filename, file->mode, &(file->fh)); #else - file->mode = file->mode | NC_MPIIO; - ierr = nc_open_par(filename, file->mode, ios->io_comm,ios->info, &(file->fh)); + file->mode = file->mode | NC_MPIIO; + ierr = nc_open_par(filename, file->mode, ios->io_comm, ios->info, &file->fh); #endif - break; + break; - case PIO_IOTYPE_NETCDF4C: - file->mode = file->mode | NC_NETCDF4; - // *** Note the INTENTIONAL FALLTHROUGH *** + case PIO_IOTYPE_NETCDF4C: + file->mode = file->mode | NC_NETCDF4; + // *** Note the INTENTIONAL FALLTHROUGH *** #endif - case PIO_IOTYPE_NETCDF: - if(ios->io_rank==0){ - ierr = nc_open(filename, file->mode, &(file->fh)); - } - break; + case PIO_IOTYPE_NETCDF: + if(ios->io_rank==0){ + ierr = nc_open(filename, file->mode, &file->fh); + } + break; #endif #ifdef _PNETCDF - case PIO_IOTYPE_PNETCDF: - ierr = ncmpi_open(ios->io_comm, filename, file->mode, ios->info, &(file->fh)); - - // This should only be done with a file opened to append - if(ierr == PIO_NOERR && (file->mode & PIO_WRITE)){ - if(ios->iomaster) printf("%d Setting IO buffer %ld\n",__LINE__,PIO_BUFFER_SIZE_LIMIT); - ierr = ncmpi_buffer_attach(file->fh, PIO_BUFFER_SIZE_LIMIT ); - } - break; + case PIO_IOTYPE_PNETCDF: + ierr = ncmpi_open(ios->io_comm, filename, file->mode, ios->info, &file->fh); + + // This should only be done with a file opened to append + if (ierr == PIO_NOERR && (file->mode & PIO_WRITE)) + { + if(ios->iomaster) + LOG((1, "%d Setting IO buffer %ld", __LINE__, PIO_BUFFER_SIZE_LIMIT)); + ierr = ncmpi_buffer_attach(file->fh, PIO_BUFFER_SIZE_LIMIT); + } + break; #endif - default: - ierr = iotype_error(file->iotype,__FILE__,__LINE__); - break; - } + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + break; + } - // If we failed to open a file due to an incompatible type of NetCDF, try it - // once with just plain old basic NetCDF + // If we failed to open a file due to an incompatible type of + // NetCDF, try it once with just plain old basic NetCDF. #ifdef _NETCDF - if(ierr == NC_ENOTNC && (file->iotype != PIO_IOTYPE_NETCDF)) { - if(ios->iomaster) printf("PIO2 pio_file.c retry NETCDF\n"); - // reset ierr on all tasks - ierr = PIO_NOERR; - // reset file markers for NETCDF on all tasks - file->iotype = PIO_IOTYPE_NETCDF; + if((ierr == NC_ENOTNC || ierr == NC_EINVAL) && (file->iotype != PIO_IOTYPE_NETCDF)) { + if(ios->iomaster) printf("PIO2 pio_file.c retry NETCDF\n"); + // reset ierr on all tasks + ierr = PIO_NOERR; + // reset file markers for NETCDF on all tasks + file->iotype = PIO_IOTYPE_NETCDF; - // open netcdf file serially on main task - if(ios->io_rank==0){ - ierr = nc_open(filename, file->mode, &(file->fh)); } + // open netcdf file serially on main task + if(ios->io_rank==0){ + ierr = nc_open(filename, file->mode, &(file->fh)); } - } + } #endif - } - - ierr = check_netcdf(file, ierr, __FILE__,__LINE__); - - if(ierr==PIO_NOERR){ - mpierr = MPI_Bcast(&(file->mode), 1, MPI_INT, ios->ioroot, ios->union_comm); - pio_add_to_file_list(file); - *ncidp = file->fh; - } - if(ios->io_rank==0){ - printf("Open file %s %d\n",filename,file->fh); //,file->fh,file->id,ios->io_rank,ierr); -// if(file->fh==5) print_trace(stdout); - } - return ierr; + } + + /* Broadcast and check the return code. */ + if ((mpierr = MPI_Bcast(&ierr, 1, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + if (ierr) + return check_netcdf(file, ierr, __FILE__, __LINE__); + + /* Broadcast results to all tasks. Ignore NULL parameters. */ + if (!ierr) + { + if ((mpierr = MPI_Bcast(&file->mode, 1, MPI_INT, ios->ioroot, ios->union_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + + if ((mpierr = MPI_Bcast(&file->fh, 1, MPI_INT, ios->ioroot, ios->union_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + + *ncidp = file->fh; + pio_add_to_file_list(file); + } + + if (ios->io_rank == 0) + LOG((1, "Open file %s %d", filename, file->fh)); + + return ierr; } -/** - ** @public - ** @ingroup PIO_createfile - ** @brief open a new file using pio - ** @details Input parameters are read on comp task 0 and ignored elsewhere. - ** @param iosysid : A defined pio system descriptor (input) - ** @param ncidp : A pio file descriptor (output) - ** @param iotype : A pio output format (input) - ** @param filename : The filename to open - ** @param mode : The netcdf mode for the open operation +/** Open a new file using pio. Input parameters are read on comp task + * 0 and ignored elsewhere. + * + * @public + * @ingroup PIO_createfile + * + * @param iosysid : A defined pio system descriptor (input) + * @param ncidp : A pio file descriptor (output) + * @param iotype : A pio output format (input) + * @param filename : The filename to open + * @param mode : The netcdf mode for the open operation */ -int PIOc_createfile(const int iosysid, int *ncidp, int *iotype, - const char filename[], const int mode) +int PIOc_createfile(const int iosysid, int *ncidp, int *iotype, + const char filename[], const int mode) { - int ierr; - int msg; - int mpierr; - - size_t len; - iosystem_desc_t *ios; - file_desc_t *file; - - - ierr = PIO_NOERR; - - ios = pio_get_iosystem_from_id(iosysid); - file = (file_desc_t *) malloc(sizeof(file_desc_t)); - file->next = NULL; - file->iosystem = ios; - file->iotype = *iotype; - - file->buffer.validvars=0; - file->buffer.data=NULL; - file->buffer.next=NULL; - file->buffer.vid=NULL; - file->buffer.ioid=-1; - file->buffer.frame=NULL; - file->buffer.fillvalue=NULL; - - for(int i=0; ivarlist[i].record = -1; - file->varlist[i].ndims = -1; + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; /* Pointer to file information. */ + int ierr = PIO_NOERR; /* Return code from function calls. */ + int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ + + /* User must provide valid input for these parameters. */ + if (!ncidp || !iotype || !filename || strlen(filename) > NC_MAX_NAME) + return PIO_EINVAL; + + /* Get the IO system info from the iosysid. */ + if (!(ios = pio_get_iosystem_from_id(iosysid))) + return PIO_EBADID; + + /* Allocate space for the file info. */ + if (!(file = (file_desc_t *)malloc(sizeof(file_desc_t)))) + return PIO_ENOMEM; + + /* Fill in some file values. */ + file->next = NULL; + file->iosystem = ios; + file->iotype = *iotype; + + file->buffer.validvars = 0; + file->buffer.data = NULL; + file->buffer.next = NULL; + file->buffer.vid = NULL; + file->buffer.ioid = -1; + file->buffer.frame = NULL; + file->buffer.fillvalue = NULL; + + for(int i = 0; i < PIO_MAX_VARS; i++) + { + file->varlist[i].record = -1; + file->varlist[i].ndims = -1; #ifdef _PNETCDF - file->varlist[i].request = NULL; - file->varlist[i].nreqs=0; + file->varlist[i].request = NULL; + file->varlist[i].nreqs=0; #endif - file->varlist[i].fillbuf = NULL; - file->varlist[i].iobuf = NULL; - } - - msg = PIO_MSG_CREATE_FILE; - file->mode = mode; - - - if(ios->async_interface && ! ios->ioproc){ - if(ios->comp_rank==0) - mpierr = MPI_Send( &msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); - len = strlen(filename); - mpierr = MPI_Bcast((void *) filename,len, MPI_CHAR, ios->compmaster, ios->intercomm); - mpierr = MPI_Bcast(&(file->iotype), 1, MPI_INT, ios->compmaster, ios->intercomm); - mpierr = MPI_Bcast(&file->mode, 1, MPI_INT, ios->compmaster, ios->intercomm); - } + file->varlist[i].fillbuf = NULL; + file->varlist[i].iobuf = NULL; + } + file->mode = mode; + + /* Set to true if this task should participate in IO (only true for + * one task with netcdf serial files. */ + if (file->iotype == PIO_IOTYPE_NETCDF4P || file->iotype == PIO_IOTYPE_PNETCDF || + ios->io_rank == 0) + file->do_io = 1; + else + file->do_io = 0; + + /* If async is in use, and this is not an IO task, bcast the + * parameters. */ + if (ios->async_interface) + { + int msg = PIO_MSG_CREATE_FILE; + size_t len = strlen(filename); + + if (!ios->ioproc) + { + /* Send the message to the message handler. */ + if (ios->compmaster) + mpierr = MPI_Send(&msg, 1, MPI_INT, ios->ioroot, 1, ios->union_comm); + + /* Send the parameters of the function call. */ + if (!mpierr) + mpierr = MPI_Bcast(&len, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast((void *)filename, len + 1, MPI_CHAR, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&file->iotype, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&file->mode, 1, MPI_INT, ios->compmaster, ios->intercomm); + } - if(ios->ioproc){ - switch(file->iotype){ + /* Handle MPI errors. */ + if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm))) + return check_mpi(file, mpierr2, __FILE__, __LINE__); + if (mpierr) + return check_mpi(file, mpierr, __FILE__, __LINE__); + } + + if (ios->ioproc) + { + switch (file->iotype) + { #ifdef _NETCDF #ifdef _NETCDF4 - case PIO_IOTYPE_NETCDF4P: - // The 64 bit options are not compatable with hdf5 format files - // printf("%d %d %d %d %d \n",__LINE__,file->mode,PIO_64BIT_DATA, PIO_64BIT_OFFSET, NC_MPIIO); - file->mode = file->mode | NC_MPIIO | NC_NETCDF4; - //printf("%s %d %d %d\n",__FILE__,__LINE__,file->mode, NC_MPIIO| NC_NETCDF4); - ierr = nc_create_par(filename, file->mode, ios->io_comm,ios->info , &(file->fh)); - break; - case PIO_IOTYPE_NETCDF4C: - file->mode = file->mode | NC_NETCDF4; + case PIO_IOTYPE_NETCDF4P: + // The 64 bit options are not compatable with hdf5 format files + // printf("%d %d %d %d %d \n",__LINE__,file->mode,PIO_64BIT_DATA, PIO_64BIT_OFFSET, NC_MPIIO); + file->mode = file->mode | NC_MPIIO | NC_NETCDF4; + //printf("%s %d %d %d\n",__FILE__,__LINE__,file->mode, NC_MPIIO| NC_NETCDF4); + ierr = nc_create_par(filename, file->mode, ios->io_comm,ios->info , &(file->fh)); + break; + case PIO_IOTYPE_NETCDF4C: + file->mode = file->mode | NC_NETCDF4; #endif - case PIO_IOTYPE_NETCDF: - if(ios->io_rank==0){ - ierr = nc_create(filename, file->mode, &(file->fh)); - } - break; + case PIO_IOTYPE_NETCDF: + if(ios->io_rank==0){ + ierr = nc_create(filename, file->mode, &(file->fh)); + } + break; #endif #ifdef _PNETCDF - case PIO_IOTYPE_PNETCDF: - ierr = ncmpi_create(ios->io_comm, filename, file->mode, ios->info, &(file->fh)); - if(ierr == PIO_NOERR){ - if(ios->io_rank==0){ - printf("%d Setting IO buffer size on all iotasks to %ld\n",ios->io_rank,PIO_BUFFER_SIZE_LIMIT); - } - int oldfill; - ierr = ncmpi_buffer_attach(file->fh, PIO_BUFFER_SIZE_LIMIT ); - // ierr = ncmpi_set_fill(file->fh, NC_FILL, &oldfill); - } - break; + case PIO_IOTYPE_PNETCDF: + ierr = ncmpi_create(ios->io_comm, filename, file->mode, ios->info, &(file->fh)); + if(ierr == PIO_NOERR){ + if(ios->io_rank==0){ + printf("%d Setting IO buffer size on all iotasks to %ld\n",ios->io_rank,PIO_BUFFER_SIZE_LIMIT); + } + int oldfill; + ierr = ncmpi_buffer_attach(file->fh, PIO_BUFFER_SIZE_LIMIT ); + // ierr = ncmpi_set_fill(file->fh, NC_FILL, &oldfill); + } + break; #endif - default: - ierr = iotype_error(file->iotype,__FILE__,__LINE__); + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + } } - } - - ierr = check_netcdf(file, ierr, __FILE__,__LINE__); - - if(ierr == PIO_NOERR){ - mpierr = MPI_Bcast(&(file->mode), 1, MPI_INT, ios->ioroot, ios->union_comm); - file->mode = file->mode | PIO_WRITE; // This flag is implied by netcdf create functions but we need to know if its set - pio_add_to_file_list(file); - *ncidp = file->fh; - } - if(ios->io_rank==0){ - printf("Create file %s %d\n",filename,file->fh); //,file->fh,file->id,ios->io_rank,ierr); -// if(file->fh==5) print_trace(stdout); - } - return ierr; + + /* Broadcast and check the return code. */ + if ((mpierr = MPI_Bcast(&ierr, 1, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + if (ierr) + return check_netcdf(file, ierr, __FILE__, __LINE__); + + /* Broadcast results to all tasks. Ignore NULL parameters. */ + if (!ierr) + { + if ((mpierr = MPI_Bcast(&file->mode, 1, MPI_INT, ios->ioroot, ios->union_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + file->mode = file->mode | PIO_WRITE; // This flag is implied by netcdf create functions but we need to know if its set + + if ((mpierr = MPI_Bcast(&file->fh, 1, MPI_INT, ios->ioroot, ios->union_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + + *ncidp = file->fh; + pio_add_to_file_list(file); + } + + if (ios->io_rank == 0) + LOG((1, "Create file %s %d", filename, file->fh)); + + return ierr; } -/** - ** @ingroup PIO_closefile - ** @brief close a file previously opened with PIO - ** @param ncid: the file pointer +/** Close a file previously opened with PIO. + * @ingroup PIO_closefile + * + * @param ncid: the file pointer */ int PIOc_closefile(int ncid) { - int ierr; - int msg; - int mpierr; - iosystem_desc_t *ios; - file_desc_t *file; - - ierr = PIO_NOERR; - - file = pio_get_file_from_id(ncid); - if(file == NULL) - return PIO_EBADID; - ios = file->iosystem; - msg = 0; - if((file->mode & PIO_WRITE)){ - PIOc_sync(ncid); - } - if(ios->async_interface && ! ios->ioproc){ - if(ios->comp_rank==0) - mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); - mpierr = MPI_Bcast(&(file->fh),1, MPI_INT, ios->compmaster, ios->intercomm); - } - - if(ios->ioproc){ - switch(file->iotype){ + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; /* Pointer to file information. */ + int ierr = PIO_NOERR; /* Return code from function calls. */ + int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ + + /* Find the info about this file. */ + if (!(file = pio_get_file_from_id(ncid))) + return PIO_EBADID; + ios = file->iosystem; + + /* Sync changes before closing. */ + if (file->mode & PIO_WRITE) + PIOc_sync(ncid); + + /* If async is in use and this is a comp tasks, then the compmaster + * sends a msg to the pio_msg_handler running on the IO master and + * waiting for a message. Then broadcast the ncid over the intercomm + * to the IO tasks. */ + if (ios->async_interface) + { + if (!ios->ioproc) + { + int msg = PIO_MSG_CLOSE_FILE; + + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1, MPI_INT, ios->ioroot, 1, ios->union_comm); + + if (!mpierr) + mpierr = MPI_Bcast(&file->fh, 1, MPI_INT, ios->compmaster, ios->intercomm); + } + + /* Handle MPI errors. */ + if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm))) + return check_mpi(file, mpierr2, __FILE__, __LINE__); + if (mpierr) + return check_mpi(file, mpierr, __FILE__, __LINE__); + } + + /* If this is an IO task, then call the netCDF function. */ + if (ios->ioproc) + { + switch (file->iotype) + { #ifdef _NETCDF #ifdef _NETCDF4 - case PIO_IOTYPE_NETCDF4P: - ierr = nc_close(file->fh); - break; - case PIO_IOTYPE_NETCDF4C: + case PIO_IOTYPE_NETCDF4P: + ierr = nc_close(file->fh); + break; + case PIO_IOTYPE_NETCDF4C: #endif - case PIO_IOTYPE_NETCDF: - if(ios->io_rank==0){ - ierr = nc_close(file->fh); - } - break; + case PIO_IOTYPE_NETCDF: + if(ios->io_rank==0){ + ierr = nc_close(file->fh); + } + break; #endif #ifdef _PNETCDF - case PIO_IOTYPE_PNETCDF: - if((file->mode & PIO_WRITE)){ - ierr = ncmpi_buffer_detach(file->fh); - } - ierr = ncmpi_close(file->fh); - break; + case PIO_IOTYPE_PNETCDF: + if((file->mode & PIO_WRITE)){ + ierr = ncmpi_buffer_detach(file->fh); + } + ierr = ncmpi_close(file->fh); + break; #endif - default: - ierr = iotype_error(file->iotype,__FILE__,__LINE__); + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + } } - } - if(ios->io_rank==0){ - printf("Close file %d \n",file->fh); -// if(file->fh==5) print_trace(stdout); - } - - ierr = check_netcdf(file, ierr, __FILE__,__LINE__); - int iret = pio_delete_file_from_list(ncid); + /* Broadcast and check the return code. */ + if ((mpierr = MPI_Bcast(&ierr, 1, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + if (ierr) + return check_netcdf(file, ierr, __FILE__, __LINE__); + /* Delete file from our list of open files. */ + pio_delete_file_from_list(ncid); - return ierr; + return ierr; } -/** - ** @ingroup PIO_deletefile - ** @brief Delete a file - ** @param iosysid : a pio system handle - ** @param filename : a filename +/** Delete a file. + * @ingroup PIO_deletefile + * + * @param iosysid : a pio system handle + * @param filename : a filename */ int PIOc_deletefile(const int iosysid, const char filename[]) { - int ierr; - int msg; - int mpierr; - int chkerr; - iosystem_desc_t *ios; - - ierr = PIO_NOERR; - ios = pio_get_iosystem_from_id(iosysid); - - if(ios == NULL) - return PIO_EBADID; - - msg = 0; - - if(ios->async_interface && ! ios->ioproc){ - if(ios->comp_rank==0) - mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); - // mpierr = MPI_Bcast(iosysid,1, MPI_INT, ios->compmaster, ios->intercomm); - } - // The barriers are needed to assure that no task is trying to operate on the file while it is being deleted. - if(ios->ioproc){ - MPI_Barrier(ios->io_comm); + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; /* Pointer to file information. */ + int ierr = PIO_NOERR; /* Return code from function calls. */ + int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ + int msg = PIO_MSG_DELETE_FILE; + size_t len; + + /* Get the IO system info from the id. */ + if (!(ios = pio_get_iosystem_from_id(iosysid))) + return PIO_EBADID; + + /* If async is in use, send message to IO master task. */ + if (ios->async_interface) + { + if (!ios->ioproc) + { + if(ios->comp_rank==0) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + + len = strlen(filename); + if (!mpierr) + mpierr = MPI_Bcast(&len, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast((void *)filename, len + 1, MPI_CHAR, ios->compmaster, ios->intercomm); + } + } + + /* If this is an IO task, then call the netCDF function. The + * barriers are needed to assure that no task is trying to operate + * on the file while it is being deleted. */ + if(ios->ioproc){ + MPI_Barrier(ios->io_comm); #ifdef _NETCDF - if(ios->io_rank==0) - ierr = nc_delete(filename); + if(ios->io_rank==0) + ierr = nc_delete(filename); #else #ifdef _PNETCDF - ierr = ncmpi_delete(filename, ios->info); + ierr = ncmpi_delete(filename, ios->info); #endif #endif - MPI_Barrier(ios->io_comm); - } - // Special case - always broadcast the return from the - MPI_Bcast(&ierr, 1, MPI_INT, ios->ioroot, ios->my_comm); - - + MPI_Barrier(ios->io_comm); + } + + // Special case - always broadcast the return from the + MPI_Bcast(&ierr, 1, MPI_INT, ios->ioroot, ios->my_comm); - return ierr; + return ierr; } -/// -/// PIO interface to nc_sync -/// -/// This routine is called collectively by all tasks in the communicator ios.union_comm. -/// -/// Refer to the netcdf documentation. -/// -/** -* @name PIOc_sync -*/ -int PIOc_sync (int ncid) +/** + * PIO interface to nc_sync This routine is called collectively by all + * tasks in the communicator ios.union_comm. + * + * Refer to the netcdf documentation. + */ +int PIOc_sync(int ncid) { - int ierr; - int msg; - int mpierr; - iosystem_desc_t *ios; - file_desc_t *file; - wmulti_buffer *wmb, *twmb; - - ierr = PIO_NOERR; - - file = pio_get_file_from_id(ncid); - if(file == NULL) - return PIO_EBADID; - ios = file->iosystem; - msg = PIO_MSG_SYNC; - - if(ios->async_interface && ! ios->ioproc){ - if(ios->compmaster) - mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); - mpierr = MPI_Bcast(&(file->fh),1, MPI_INT, 0, ios->intercomm); - } - - if((file->mode & PIO_WRITE)){ - // cn_buffer_report( *ios, true); - wmb = &(file->buffer); - while(wmb != NULL){ - // printf("%s %d %d %d\n",__FILE__,__LINE__,wmb->ioid, wmb->validvars); - if(wmb->validvars>0){ - flush_buffer(ncid, wmb, true); - } - twmb = wmb; - wmb = wmb->next; - if(twmb == &(file->buffer)){ - twmb->ioid=-1; - twmb->next=NULL; - }else{ - brel(twmb); - } + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; /* Pointer to file information. */ + int ierr = PIO_NOERR; /* Return code from function calls. */ + int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ + wmulti_buffer *wmb, *twmb; + + /* Get the file info from the ncid. */ + if (!(file = pio_get_file_from_id(ncid))) + return PIO_EBADID; + ios = file->iosystem; + + /* If async is in use, send message to IO master tasks. */ + if (ios->async_interface) + { + if (!ios->ioproc) + { + int msg = PIO_MSG_SYNC; + + if(ios->comp_rank == 0) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + + mpierr = MPI_Bcast(&(file->fh),1, MPI_INT, ios->compmaster, ios->intercomm); + } } - flush_output_buffer(file, true, 0); - if(ios->ioproc){ - switch(file->iotype){ + if (file->mode & PIO_WRITE) + { + // cn_buffer_report( *ios, true); + wmb = &(file->buffer); + while(wmb != NULL){ + // printf("%s %d %d %d\n",__FILE__,__LINE__,wmb->ioid, wmb->validvars); + if(wmb->validvars>0){ + flush_buffer(ncid, wmb, true); + } + twmb = wmb; + wmb = wmb->next; + if(twmb == &(file->buffer)){ + twmb->ioid=-1; + twmb->next=NULL; + }else{ + brel(twmb); + } + } + flush_output_buffer(file, true, 0); + + if(ios->ioproc){ + switch(file->iotype){ #ifdef _NETCDF #ifdef _NETCDF4 - case PIO_IOTYPE_NETCDF4P: - ierr = nc_sync(file->fh);; - break; - case PIO_IOTYPE_NETCDF4C: + case PIO_IOTYPE_NETCDF4P: + ierr = nc_sync(file->fh);; + break; + case PIO_IOTYPE_NETCDF4C: #endif - case PIO_IOTYPE_NETCDF: - if(ios->io_rank==0){ - ierr = nc_sync(file->fh);; - } - break; + case PIO_IOTYPE_NETCDF: + if(ios->io_rank==0){ + ierr = nc_sync(file->fh);; + } + break; #endif #ifdef _PNETCDF - case PIO_IOTYPE_PNETCDF: - ierr = ncmpi_sync(file->fh);; - break; + case PIO_IOTYPE_PNETCDF: + ierr = ncmpi_sync(file->fh);; + break; #endif - default: - ierr = iotype_error(file->iotype,__FILE__,__LINE__); - } - } + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + } + } - ierr = check_netcdf(file, ierr, __FILE__,__LINE__); - } - return ierr; + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + } + return ierr; } diff --git a/externals/pio2/src/clib/pio_get_nc_async.c b/externals/pio2/src/clib/pio_get_nc_async.c new file mode 100644 index 00000000000..7f6aeb79ce3 --- /dev/null +++ b/externals/pio2/src/clib/pio_get_nc_async.c @@ -0,0 +1,921 @@ +/** + * @file + * PIO functions to get data (excluding varm functions). + * + * @author Ed Hartnett + * @date 2016 + * + * @see http://code.google.com/p/parallelio/ + */ + +#include +#include +#include + +/** + * Internal PIO function which provides a type-neutral interface to + * nc_get_vars. + * + * Users should not call this function directly. Instead, call one of + * the derived functions, depending on the type of data you are + * reading: PIOc_get_vars_text(), PIOc_get_vars_uchar(), + * PIOc_get_vars_schar(), PIOc_get_vars_ushort(), + * PIOc_get_vars_short(), PIOc_get_vars_uint(), PIOc_get_vars_int(), + * PIOc_get_vars_long(), PIOc_get_vars_float(), + * PIOc_get_vars_double(), PIOc_get_vars_ulonglong(), + * PIOc_get_vars_longlong() + * + * This routine is called collectively by all tasks in the + * communicator ios.union_comm. + * + * @param ncid identifies the netCDF file + * @param varid the variable ID number + * @param start an array of start indicies (must have same number of + * entries as variable has dimensions). If NULL, indices of 0 will be + * used. + * + * @param count an array of counts (must have same number of entries + * as variable has dimensions). If NULL, counts matching the size of + * the variable will be used. + * + * @param stride an array of strides (must have same number of + * entries as variable has dimensions). If NULL, strides of 1 will be + * used. + * + * @param xtype the netCDF type of the data being passed in buf. Data + * will be automatically covnerted from the type of the variable being + * read from to this type. + * + * @param buf pointer to the data to be written. + * + * @return PIO_NOERR on success, error code otherwise. + */ +int PIOc_get_vars_tc(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, + const PIO_Offset *stride, nc_type xtype, void *buf) +{ + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; /* Pointer to file information. */ + int ierr = PIO_NOERR; /* Return code from function calls. */ + int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ + int ndims; /* The number of dimensions in the variable. */ + int *dimids; /* The IDs of the dimensions for this variable. */ + PIO_Offset typelen; /* Size (in bytes) of the data type of data in buf. */ + PIO_Offset num_elem = 1; /* Number of data elements in the buffer. */ + int bcast = false; + + LOG((1, "PIOc_get_vars_tc ncid = %d varid = %d start = %d count = %d " + "stride = %d xtype = %d", ncid, varid, start, count, stride, xtype)); + + /* User must provide a place to put some data. */ + if (!buf) + return PIO_EINVAL; + + /* Find the info about this file. */ + if (!(file = pio_get_file_from_id(ncid))) + return PIO_EBADID; + ios = file->iosystem; + + /* Run these on all tasks if async is not in use, but only on + * non-IO tasks if async is in use. */ + if (!ios->async_interface || !ios->ioproc) + { + /* Get the length of the data type. */ + if ((ierr = PIOc_inq_type(ncid, xtype, NULL, &typelen))) + return check_netcdf(file, ierr, __FILE__, __LINE__); + + /* Get the number of dims for this var. */ + if ((ierr = PIOc_inq_varndims(ncid, varid, &ndims))) + return check_netcdf(file, ierr, __FILE__, __LINE__); + + PIO_Offset dimlen[ndims]; + + /* If no count array was passed, we need to know the dimlens + * so we can calculate how many data elements are in the + * buf. */ + if (!count) + { + int dimid[ndims]; + + /* Get the dimids for this var. */ + if ((ierr = PIOc_inq_vardimid(ncid, varid, dimid))) + return check_netcdf(file, ierr, __FILE__, __LINE__); + + /* Get the length of each dimension. */ + for (int vd = 0; vd < ndims; vd++) + if ((ierr = PIOc_inq_dimlen(ncid, dimid[vd], &dimlen[vd]))) + return check_netcdf(file, ierr, __FILE__, __LINE__); + } + + /* Figure out the real start, count, and stride arrays. (The + * user may have passed in NULLs.) */ + PIO_Offset rstart[ndims], rcount[ndims], rstride[ndims]; + for (int vd = 0; vd < ndims; vd++) + { + rstart[vd] = start ? start[vd] : 0; + rcount[vd] = count ? count[vd] : dimlen[vd]; + rstride[vd] = stride ? stride[vd] : 1; + } + + /* How many elements in buf? */ + for (int vd = 0; vd < ndims; vd++) + num_elem *= (rcount[vd] - rstart[vd])/rstride[vd]; + LOG((2, "PIOc_get_vars_tc num_elem = %d", num_elem)); + } + + /* If async is in use, and this is not an IO task, bcast the parameters. */ + if (ios->async_interface) + { + if (!ios->ioproc) + { + int msg = PIO_MSG_GET_VARS; + char start_present = start ? true : false; + char count_present = count ? true : false; + char stride_present = stride ? true : false; + + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1, MPI_INT, ios->ioroot, 1, ios->union_comm); + + /* Send the function parameters and associated informaiton + * to the msg handler. */ + if (!mpierr) + mpierr = MPI_Bcast(&ncid, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&varid, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&ndims, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&start_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); + if (!mpierr && start_present) + mpierr = MPI_Bcast((PIO_Offset *)start, ndims, MPI_OFFSET, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&count_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); + if (!mpierr && count_present) + mpierr = MPI_Bcast((PIO_Offset *)count, ndims, MPI_OFFSET, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&stride_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); + if (!mpierr && stride_present) + mpierr = MPI_Bcast((PIO_Offset *)stride, ndims, MPI_OFFSET, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&xtype, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&num_elem, 1, MPI_OFFSET, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&typelen, 1, MPI_OFFSET, ios->compmaster, ios->intercomm); + LOG((2, "PIOc_get_vars_tc ncid = %d varid = %d ndims = %d start_present = %d " + "count_present = %d stride_present = %d xtype = %d num_elem = %d", ncid, varid, + ndims, start_present, count_present, stride_present, xtype, num_elem)); + } + + /* Handle MPI errors. */ + if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm))) + return check_mpi(file, mpierr2, __FILE__, __LINE__); + if (mpierr) + return check_mpi(file, mpierr, __FILE__, __LINE__); + + /* Broadcast values currently only known on computation tasks to IO tasks. */ + if ((mpierr = MPI_Bcast(&num_elem, 1, MPI_OFFSET, ios->comproot, ios->my_comm))) + check_mpi(file, mpierr, __FILE__, __LINE__); + if ((mpierr = MPI_Bcast(&typelen, 1, MPI_OFFSET, ios->comproot, ios->my_comm))) + check_mpi(file, mpierr, __FILE__, __LINE__); + } + + /* If this is an IO task, then call the netCDF function. */ + if (ios->ioproc) + { +#ifdef _PNETCDF + if (file->iotype == PIO_IOTYPE_PNETCDF) + { +#ifdef PNET_READ_AND_BCAST + LOG((1, "PNET_READ_AND_BCAST")); + ncmpi_begin_indep_data(file->fh); + + /* Only the IO master does the IO, so we are not really + * getting parallel IO here. */ + if (ios->iomaster) + { + switch(xtype) + { + case NC_BYTE: + ierr = ncmpi_get_vars_schar(ncid, varid, start, count, stride, buf); + break; + case NC_CHAR: + ierr = ncmpi_get_vars_text(ncid, varid, start, count, stride, buf); + break; + case NC_SHORT: + ierr = ncmpi_get_vars_short(ncid, varid, start, count, stride, buf); + break; + case NC_INT: + ierr = ncmpi_get_vars_int(ncid, varid, start, count, stride, buf); + break; + case NC_FLOAT: + ierr = ncmpi_get_vars_float(ncid, varid, start, count, stride, buf); + break; + case NC_DOUBLE: + ierr = ncmpi_get_vars_double(ncid, varid, start, count, stride, buf); + break; + case NC_INT64: + ierr = ncmpi_get_vars_longlong(ncid, varid, start, count, stride, buf); + break; + default: + LOG((0, "Unknown type for pnetcdf file! xtype = %d", xtype)); + } + }; + ncmpi_end_indep_data(file->fh); + bcast=true; +#else /* PNET_READ_AND_BCAST */ + LOG((1, "not PNET_READ_AND_BCAST")); + switch(xtype) + { + case NC_BYTE: + ierr = ncmpi_get_vars_schar_all(ncid, varid, start, count, stride, buf); + break; + case NC_CHAR: + ierr = ncmpi_get_vars_text_all(ncid, varid, start, count, stride, buf); + break; + case NC_SHORT: + ierr = ncmpi_get_vars_short_all(ncid, varid, start, count, stride, buf); + break; + case NC_INT: + ierr = ncmpi_get_vars_int_all(ncid, varid, start, count, stride, buf); + for (int i = 0; i < 4; i++) + LOG((2, "((int *)buf)[%d] = %d", i, ((int *)buf)[0])); + break; + case NC_FLOAT: + ierr = ncmpi_get_vars_float_all(ncid, varid, start, count, stride, buf); + break; + case NC_DOUBLE: + ierr = ncmpi_get_vars_double_all(ncid, varid, start, count, stride, buf); + break; + case NC_INT64: + ierr = ncmpi_get_vars_longlong_all(ncid, varid, start, count, stride, buf); + break; + default: + LOG((0, "Unknown type for pnetcdf file! xtype = %d", xtype)); + } +#endif /* PNET_READ_AND_BCAST */ + } +#endif /* _PNETCDF */ +#ifdef _NETCDF + if (file->iotype != PIO_IOTYPE_PNETCDF && file->do_io) + switch(xtype) + { + case NC_BYTE: + ierr = nc_get_vars_schar(ncid, varid, (size_t *)start, (size_t *)count, + (ptrdiff_t *)stride, buf); + break; + case NC_CHAR: + ierr = nc_get_vars_schar(ncid, varid, (size_t *)start, (size_t *)count, + (ptrdiff_t *)stride, buf); + break; + case NC_SHORT: + ierr = nc_get_vars_short(ncid, varid, (size_t *)start, (size_t *)count, + (ptrdiff_t *)stride, buf); + break; + case NC_INT: + ierr = nc_get_vars_int(ncid, varid, (size_t *)start, (size_t *)count, + (ptrdiff_t *)stride, buf); + break; + case NC_FLOAT: + ierr = nc_get_vars_float(ncid, varid, (size_t *)start, (size_t *)count, + (ptrdiff_t *)stride, buf); + break; + case NC_DOUBLE: + ierr = nc_get_vars_double(ncid, varid, (size_t *)start, (size_t *)count, + (ptrdiff_t *)stride, buf); + break; +#ifdef _NETCDF4 + case NC_UBYTE: + ierr = nc_get_vars_uchar(ncid, varid, (size_t *)start, (size_t *)count, + (ptrdiff_t *)stride, buf); + break; + case NC_USHORT: + ierr = nc_get_vars_ushort(ncid, varid, (size_t *)start, (size_t *)count, + (ptrdiff_t *)stride, buf); + break; + case NC_UINT: + ierr = nc_get_vars_uint(ncid, varid, (size_t *)start, (size_t *)count, + (ptrdiff_t *)stride, buf); + break; + case NC_INT64: + ierr = nc_get_vars_longlong(ncid, varid, (size_t *)start, (size_t *)count, + (ptrdiff_t *)stride, buf); + break; + case NC_UINT64: + ierr = nc_get_vars_ulonglong(ncid, varid, (size_t *)start, (size_t *)count, + (ptrdiff_t *)stride, buf); + break; + /* case NC_STRING: */ + /* ierr = nc_get_vars_string(ncid, varid, (size_t *)start, (size_t *)count, */ + /* (ptrdiff_t *)stride, (void *)buf); */ + /* break; */ + default: + ierr = nc_get_vars(ncid, varid, (size_t *)start, (size_t *)count, + (ptrdiff_t *)stride, buf); +#endif /* _NETCDF4 */ + } +#endif /* _NETCDF */ + } + + /* Broadcast and check the return code. */ + if ((mpierr = MPI_Bcast(&ierr, 1, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + if (ierr) + return check_netcdf(file, ierr, __FILE__, __LINE__); + + /* Send the data. */ + LOG((2, "PIOc_get_vars_tc bcasting data num_elem = %d typelen = %d", num_elem, + typelen)); + if (!mpierr) + mpierr = MPI_Bcast((void *)buf, num_elem * typelen, MPI_BYTE, ios->ioroot, + ios->my_comm); + return ierr; +} + +int PIOc_get_vars_text(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, + const PIO_Offset *stride, char *buf) +{ + return PIOc_get_vars_tc(ncid, varid, start, count, stride, NC_CHAR, buf); +} + +int PIOc_get_vars_uchar(int ncid, int varid, const PIO_Offset *start, + const PIO_Offset *count, const PIO_Offset *stride, unsigned char *buf) +{ + return PIOc_get_vars_tc(ncid, varid, start, count, stride, NC_UBYTE, buf); +} + +int PIOc_get_vars_schar(int ncid, int varid, const PIO_Offset *start, + const PIO_Offset *count, const PIO_Offset *stride, signed char *buf) +{ + return PIOc_get_vars_tc(ncid, varid, start, count, stride, NC_BYTE, buf); +} + +int PIOc_get_vars_ushort(int ncid, int varid, const PIO_Offset *start, + const PIO_Offset *count, const PIO_Offset *stride, unsigned short *buf) +{ + return PIOc_get_vars_tc(ncid, varid, start, count, stride, NC_USHORT, buf); +} + +int PIOc_get_vars_short(int ncid, int varid, const PIO_Offset *start, + const PIO_Offset *count, const PIO_Offset *stride, short *buf) +{ + return PIOc_get_vars_tc(ncid, varid, start, count, stride, NC_SHORT, buf); +} + +int PIOc_get_vars_uint(int ncid, int varid, const PIO_Offset *start, + const PIO_Offset *count, const PIO_Offset *stride, unsigned int *buf) +{ + return PIOc_get_vars_tc(ncid, varid, start, count, stride, NC_UINT, buf); +} + +int PIOc_get_vars_int(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, + const PIO_Offset *stride, int *buf) +{ + return PIOc_get_vars_tc(ncid, varid, start, count, stride, NC_INT, buf); +} + +int PIOc_get_vars_long(int ncid, int varid, const PIO_Offset *start, + const PIO_Offset *count, const PIO_Offset *stride, long *buf) +{ + return PIOc_get_vars_tc(ncid, varid, start, count, stride, NC_LONG, buf); +} + +int PIOc_get_vars_float(int ncid, int varid, const PIO_Offset *start, + const PIO_Offset *count, const PIO_Offset *stride, float *buf) +{ + return PIOc_get_vars_tc(ncid, varid, start, count, stride, NC_FLOAT, buf); +} + +int PIOc_get_vars_double(int ncid, int varid, const PIO_Offset *start, + const PIO_Offset *count, const PIO_Offset *stride, double *buf) +{ + return PIOc_get_vars_tc(ncid, varid, start, count, stride, NC_DOUBLE, buf); +} + +int PIOc_get_vars_ulonglong(int ncid, int varid, const PIO_Offset *start, + const PIO_Offset *count, const PIO_Offset *stride, + unsigned long long *buf) +{ + return PIOc_get_vars_tc(ncid, varid, start, count, stride, NC_UINT64, buf); +} + +int PIOc_get_vars_longlong(int ncid, int varid, const PIO_Offset *start, + const PIO_Offset *count, const PIO_Offset *stride, long long *buf) +{ + return PIOc_get_vars_tc(ncid, varid, start, count, stride, NC_UINT64, buf); +} + +int PIOc_get_vara_text(int ncid, int varid, const PIO_Offset *start, + const PIO_Offset *count, char *buf) +{ + return PIOc_get_vars_tc(ncid, varid, start, count, NULL, NC_CHAR, buf); +} + +int PIOc_get_vara_uchar(int ncid, int varid, const PIO_Offset *start, + const PIO_Offset *count, unsigned char *buf) +{ + return PIOc_get_vars_tc(ncid, varid, start, count, NULL, NC_UBYTE, buf); +} + +int PIOc_get_vara_schar(int ncid, int varid, const PIO_Offset *start, + const PIO_Offset *count, signed char *buf) +{ + return PIOc_get_vars_tc(ncid, varid, start, count, NULL, NC_BYTE, buf); +} + +int PIOc_get_vara_ushort(int ncid, int varid, const PIO_Offset *start, + const PIO_Offset *count, unsigned short *buf) +{ + return PIOc_get_vars_tc(ncid, varid, start, count, NULL, NC_USHORT, buf); +} + +int PIOc_get_vara_short(int ncid, int varid, const PIO_Offset *start, + const PIO_Offset *count, short *buf) +{ + return PIOc_get_vars_tc(ncid, varid, start, count, NULL, NC_SHORT, buf); +} + +int PIOc_get_vara_long(int ncid, int varid, const PIO_Offset *start, + const PIO_Offset *count, long *buf) +{ + return PIOc_get_vars_tc(ncid, varid, start, count, NULL, NC_LONG, buf); +} + +int PIOc_get_vara_uint(int ncid, int varid, const PIO_Offset *start, + const PIO_Offset *count, unsigned int *buf) +{ + return PIOc_get_vars_tc(ncid, varid, start, count, NULL, NC_UINT, buf); +} + +int PIOc_get_vara_int(int ncid, int varid, const PIO_Offset *start, + const PIO_Offset *count, int *buf) +{ + return PIOc_get_vars_tc(ncid, varid, start, count, NULL, NC_INT, buf); +} + +int PIOc_get_vara_float(int ncid, int varid, const PIO_Offset *start, + const PIO_Offset *count, float *buf) +{ + return PIOc_get_vars_tc(ncid, varid, start, count, NULL, NC_FLOAT, buf); +} + +int PIOc_get_vara_double(int ncid, int varid, const PIO_Offset *start, + const PIO_Offset *count, double *buf) +{ + return PIOc_get_vars_tc(ncid, varid, start, count, NULL, NC_DOUBLE, buf); +} + +int PIOc_get_vara_ulonglong(int ncid, int varid, const PIO_Offset *start, + const PIO_Offset *count, unsigned long long *buf) +{ + return PIOc_get_vars_tc(ncid, varid, start, count, NULL, NC_UINT64, buf); +} + +int PIOc_get_vara_longlong(int ncid, int varid, const PIO_Offset *start, + const PIO_Offset *count, long long *buf) +{ + return PIOc_get_vars_tc(ncid, varid, start, count, NULL, NC_INT64, buf); +} + +int PIOc_get_var_text(int ncid, int varid, char *buf) +{ + return PIOc_get_vars_tc(ncid, varid, NULL, NULL, NULL, NC_CHAR, buf); +} + +int PIOc_get_var_uchar(int ncid, int varid, unsigned char *buf) +{ + return PIOc_get_vars_tc(ncid, varid, NULL, NULL, NULL, NC_UBYTE, buf); +} + +int PIOc_get_var_schar(int ncid, int varid, signed char *buf) +{ + return PIOc_get_vars_tc(ncid, varid, NULL, NULL, NULL, NC_BYTE, buf); +} + +int PIOc_get_var_ushort(int ncid, int varid, unsigned short *buf) +{ + return PIOc_get_vars_tc(ncid, varid, NULL, NULL, NULL, NC_USHORT, buf); +} + +int PIOc_get_var_short(int ncid, int varid, short *buf) +{ + return PIOc_get_vars_tc(ncid, varid, NULL, NULL, NULL, NC_SHORT, buf); +} + +int PIOc_get_var_uint(int ncid, int varid, unsigned int *buf) +{ + return PIOc_get_vars_tc(ncid, varid, NULL, NULL, NULL, NC_UINT, buf); +} + +int PIOc_get_var_int(int ncid, int varid, int *buf) +{ + return PIOc_get_vars_tc(ncid, varid, NULL, NULL, NULL, NC_INT, buf); +} + +int PIOc_get_var_long (int ncid, int varid, long *buf) +{ + return PIOc_get_vars_tc(ncid, varid, NULL, NULL, NULL, NC_LONG, buf); +} + +int PIOc_get_var_float(int ncid, int varid, float *buf) +{ + return PIOc_get_vars_tc(ncid, varid, NULL, NULL, NULL, NC_FLOAT, buf); +} + +int PIOc_get_var_double(int ncid, int varid, double *buf) +{ + return PIOc_get_vars_tc(ncid, varid, NULL, NULL, NULL, NC_DOUBLE, buf); +} + +int PIOc_get_var_ulonglong(int ncid, int varid, unsigned long long *buf) +{ + return PIOc_get_vars_tc(ncid, varid, NULL, NULL, NULL, NC_UINT64, buf); +} + +int PIOc_get_var_longlong(int ncid, int varid, long long *buf) +{ + return PIOc_get_vars_tc(ncid, varid, NULL, NULL, NULL, NC_INT64, buf); +} + +int PIOc_get_var1_tc(int ncid, int varid, const PIO_Offset *index, nc_type xtype, + void *buf) +{ + int ndims; + int ierr; + + /* Find the number of dimensions. */ + if ((ierr = PIOc_inq_varndims(ncid, varid, &ndims))) + return ierr; + + /* Set up count array. */ + PIO_Offset count[ndims]; + for (int c = 0; c < ndims; c++) + count[c] = 1; + + return PIOc_get_vars_tc(ncid, varid, index, count, NULL, xtype, buf); +} + +int PIOc_get_var1_text(int ncid, int varid, const PIO_Offset *index, char *buf) +{ + return PIOc_get_var1_tc(ncid, varid, index, NC_CHAR, buf); +} + +int PIOc_get_var1_uchar (int ncid, int varid, const PIO_Offset *index, unsigned char *buf) +{ + return PIOc_get_var1_tc(ncid, varid, index, NC_UBYTE, buf); +} + +int PIOc_get_var1_schar(int ncid, int varid, const PIO_Offset *index, signed char *buf) +{ + return PIOc_get_var1_tc(ncid, varid, index, NC_BYTE, buf); +} + +int PIOc_get_var1_ushort(int ncid, int varid, const PIO_Offset *index, unsigned short *buf) +{ + return PIOc_get_var1_tc(ncid, varid, index, NC_USHORT, buf); +} + +int PIOc_get_var1_short(int ncid, int varid, const PIO_Offset *index, short *buf) +{ + return PIOc_get_var1_tc(ncid, varid, index, NC_SHORT, buf); +} + +int PIOc_get_var1_uint(int ncid, int varid, const PIO_Offset *index, unsigned int *buf) +{ + return PIOc_get_var1_tc(ncid, varid, index, NC_UINT, buf); +} + +int PIOc_get_var1_long (int ncid, int varid, const PIO_Offset *index, long *buf) +{ + return PIOc_get_var1_tc(ncid, varid, index, NC_LONG, buf); +} + +int PIOc_get_var1_int(int ncid, int varid, const PIO_Offset *index, int *buf) +{ + return PIOc_get_var1_tc(ncid, varid, index, NC_INT, buf); +} + +int PIOc_get_var1_float(int ncid, int varid, const PIO_Offset *index, float *buf) +{ + return PIOc_get_var1_tc(ncid, varid, index, NC_FLOAT, buf); +} + +int PIOc_get_var1_double (int ncid, int varid, const PIO_Offset *index, double *buf) +{ + return PIOc_get_var1_tc(ncid, varid, index, NC_DOUBLE, buf); +} + +int PIOc_get_var1_ulonglong (int ncid, int varid, const PIO_Offset *index, + unsigned long long *buf) +{ + return PIOc_get_var1_tc(ncid, varid, index, NC_INT64, buf); +} + + +int PIOc_get_var1_longlong(int ncid, int varid, const PIO_Offset *index, + long long *buf) +{ + return PIOc_get_var1_tc(ncid, varid, index, NC_INT64, buf); +} + +int PIOc_get_var (int ncid, int varid, void *buf, PIO_Offset bufcount, MPI_Datatype buftype) +{ + int ierr; + int msg; + int mpierr; + iosystem_desc_t *ios; + file_desc_t *file; + MPI_Datatype ibuftype; + int ndims; + int ibufcnt; + bool bcast = false; + + file = pio_get_file_from_id(ncid); + if(file == NULL) + return PIO_EBADID; + ios = file->iosystem; + msg = PIO_MSG_GET_VAR; + ibufcnt = bufcount; + ibuftype = buftype; + ierr = PIO_NOERR; + + if(ios->async_interface && ! ios->ioproc){ + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + mpierr = MPI_Bcast(&(file->fh),1, MPI_INT, 0, ios->intercomm); + } + + + if(ios->ioproc){ + switch(file->iotype){ +#ifdef _NETCDF +#ifdef _NETCDF4 + case PIO_IOTYPE_NETCDF4P: + ierr = nc_get_var(file->fh, varid, buf);; + break; + case PIO_IOTYPE_NETCDF4C: +#endif + case PIO_IOTYPE_NETCDF: + bcast = true; + if(ios->iomaster){ + ierr = nc_get_var(file->fh, varid, buf);; + } + break; +#endif +#ifdef _PNETCDF + case PIO_IOTYPE_PNETCDF: +#ifdef PNET_READ_AND_BCAST + ncmpi_begin_indep_data(file->fh); + if(ios->iomaster){ + ierr = ncmpi_get_var(file->fh, varid, buf, bufcount, buftype);; + }; + ncmpi_end_indep_data(file->fh); + bcast=true; +#else + ierr = ncmpi_get_var_all(file->fh, varid, buf, bufcount, buftype);; +#endif + break; +#endif + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + } + } + + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + + if(ios->async_interface || bcast || + (ios->num_iotasks < ios->num_comptasks)){ + MPI_Bcast(buf, ibufcnt, ibuftype, ios->ioroot, ios->my_comm); + } + + return ierr; +} + + + + + + +int PIOc_get_var1 (int ncid, int varid, const PIO_Offset *index, void *buf, PIO_Offset bufcount, MPI_Datatype buftype) +{ + int ierr; + int msg; + int mpierr; + iosystem_desc_t *ios; + file_desc_t *file; + MPI_Datatype ibuftype; + int ndims; + int ibufcnt; + bool bcast = false; + + file = pio_get_file_from_id(ncid); + if(file == NULL) + return PIO_EBADID; + ios = file->iosystem; + msg = PIO_MSG_GET_VAR1; + ibufcnt = bufcount; + ibuftype = buftype; + ierr = PIO_NOERR; + + if(ios->async_interface && ! ios->ioproc){ + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + mpierr = MPI_Bcast(&(file->fh),1, MPI_INT, 0, ios->intercomm); + } + + + if(ios->ioproc){ + switch(file->iotype){ +#ifdef _NETCDF +#ifdef _NETCDF4 + case PIO_IOTYPE_NETCDF4P: + ierr = nc_get_var1(file->fh, varid, (size_t *) index, buf);; + break; + case PIO_IOTYPE_NETCDF4C: +#endif + case PIO_IOTYPE_NETCDF: + bcast = true; + if(ios->iomaster){ + ierr = nc_get_var1(file->fh, varid, (size_t *) index, buf);; + } + break; +#endif +#ifdef _PNETCDF + case PIO_IOTYPE_PNETCDF: +#ifdef PNET_READ_AND_BCAST + ncmpi_begin_indep_data(file->fh); + if(ios->iomaster){ + ierr = ncmpi_get_var1(file->fh, varid, index, buf, bufcount, buftype);; + }; + ncmpi_end_indep_data(file->fh); + bcast=true; +#else + ierr = ncmpi_get_var1_all(file->fh, varid, index, buf, bufcount, buftype);; +#endif + break; +#endif + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + } + } + + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + + if(ios->async_interface || bcast || + (ios->num_iotasks < ios->num_comptasks)){ + MPI_Bcast(buf, ibufcnt, ibuftype, ios->ioroot, ios->my_comm); + } + + return ierr; +} + +int PIOc_get_vara (int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, void *buf, PIO_Offset bufcount, MPI_Datatype buftype) +{ + int ierr; + int msg; + int mpierr; + iosystem_desc_t *ios; + file_desc_t *file; + MPI_Datatype ibuftype; + int ndims; + int ibufcnt; + bool bcast = false; + + file = pio_get_file_from_id(ncid); + if(file == NULL) + return PIO_EBADID; + ios = file->iosystem; + msg = PIO_MSG_GET_VARA; + ibufcnt = bufcount; + ibuftype = buftype; + ierr = PIO_NOERR; + + if(ios->async_interface && ! ios->ioproc){ + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + mpierr = MPI_Bcast(&(file->fh),1, MPI_INT, 0, ios->intercomm); + } + + + if(ios->ioproc){ + switch(file->iotype){ +#ifdef _NETCDF +#ifdef _NETCDF4 + case PIO_IOTYPE_NETCDF4P: + ierr = nc_get_vara(file->fh, varid, (size_t *) start, (size_t *) count, buf);; + break; + case PIO_IOTYPE_NETCDF4C: +#endif + case PIO_IOTYPE_NETCDF: + bcast = true; + if(ios->iomaster){ + ierr = nc_get_vara(file->fh, varid, (size_t *) start, (size_t *) count, buf);; + } + break; +#endif +#ifdef _PNETCDF + case PIO_IOTYPE_PNETCDF: +#ifdef PNET_READ_AND_BCAST + ncmpi_begin_indep_data(file->fh); + if(ios->iomaster){ + ierr = ncmpi_get_vara(file->fh, varid, start, count, buf, bufcount, buftype);; + }; + ncmpi_end_indep_data(file->fh); + bcast=true; +#else + ierr = ncmpi_get_vara_all(file->fh, varid, start, count, buf, bufcount, buftype);; +#endif + break; +#endif + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + } + } + + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + + if(ios->async_interface || bcast || + (ios->num_iotasks < ios->num_comptasks)){ + MPI_Bcast(buf, ibufcnt, ibuftype, ios->ioroot, ios->my_comm); + } + + return ierr; +} + + + + + + +int PIOc_get_vars (int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const PIO_Offset *stride, void *buf, PIO_Offset bufcount, MPI_Datatype buftype) +{ + int ierr; + int msg; + int mpierr; + iosystem_desc_t *ios; + file_desc_t *file; + MPI_Datatype ibuftype; + int ndims; + int ibufcnt; + bool bcast = false; + + file = pio_get_file_from_id(ncid); + if(file == NULL) + return PIO_EBADID; + ios = file->iosystem; + msg = PIO_MSG_GET_VARS; + ibufcnt = bufcount; + ibuftype = buftype; + ierr = PIO_NOERR; + + if(ios->async_interface && ! ios->ioproc){ + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + mpierr = MPI_Bcast(&(file->fh),1, MPI_INT, 0, ios->intercomm); + } + + + if(ios->ioproc){ + switch(file->iotype){ +#ifdef _NETCDF +#ifdef _NETCDF4 + case PIO_IOTYPE_NETCDF4P: + ierr = nc_get_vars(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, buf);; + break; + case PIO_IOTYPE_NETCDF4C: +#endif + case PIO_IOTYPE_NETCDF: + bcast = true; + if(ios->iomaster){ + ierr = nc_get_vars(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, buf);; + } + break; +#endif +#ifdef _PNETCDF + case PIO_IOTYPE_PNETCDF: +#ifdef PNET_READ_AND_BCAST + ncmpi_begin_indep_data(file->fh); + if(ios->iomaster){ + ierr = ncmpi_get_vars(file->fh, varid, start, count, stride, buf, bufcount, buftype);; + }; + ncmpi_end_indep_data(file->fh); + bcast=true; +#else + ierr = ncmpi_get_vars_all(file->fh, varid, start, count, stride, buf, bufcount, buftype);; +#endif + break; +#endif + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + } + } + + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + + if(ios->async_interface || bcast || + (ios->num_iotasks < ios->num_comptasks)){ + MPI_Bcast(buf, ibufcnt, ibuftype, ios->ioroot, ios->my_comm); + } + + return ierr; +} + diff --git a/externals/pio2/src/clib/pio_internal.h b/externals/pio2/src/clib/pio_internal.h index 8a42dfa2601..40d7e12f450 100644 --- a/externals/pio2/src/clib/pio_internal.h +++ b/externals/pio2/src/clib/pio_internal.h @@ -1,7 +1,19 @@ +/** + * @file + * Private headers and defines for the PIO C interface. + * @author Jim Edwards + * @date 2014 + * + * @see http://code.google.com/p/parallelio/ + */ + #ifndef __PIO_INTERNAL__ #define __PIO_INTERNAL__ + #include -// It seems that some versions of openmpi fail to define MPI_OFFSET + +/* It seems that some versions of openmpi fail to define + * MPI_OFFSET. */ #ifdef OMPI_OFFSET_DATATYPE #ifndef MPI_OFFSET #define MPI_OFFSET OMPI_OFFSET_DATATYPE @@ -17,17 +29,22 @@ #include #endif +#if PIO_ENABLE_LOGGING +void pio_log(int severity, const char *fmt, ...); +#define LOG(e) pio_log e +#else +#define LOG(e) +#endif /* PIO_ENABLE_LOGGING */ -#define max(a,b) \ - ({ __typeof__ (a) _a = (a); \ - __typeof__ (b) _b = (b); \ - _a > _b ? _a : _b; }) - -#define min(a,b) \ - ({ __typeof__ (a) _a = (a); \ - __typeof__ (b) _b = (b); \ - _a < _b ? _a : _b; }) +#define max(a,b) \ + ({ __typeof__ (a) _a = (a); \ + __typeof__ (b) _b = (b); \ + _a > _b ? _a : _b; }) +#define min(a,b) \ + ({ __typeof__ (a) _a = (a); \ + __typeof__ (b) _b = (b); \ + _a < _b ? _a : _b; }) #define MAX_GATHER_BLOCK_SIZE 0 #define PIO_REQUEST_ALLOC_CHUNK 16 @@ -36,121 +53,116 @@ extern "C" { #endif -extern PIO_Offset PIO_BUFFER_SIZE_LIMIT; -extern bool PIO_Save_Decomps; - - -/** - ** @brief Used to sort map points in the subset rearranger -*/ -typedef struct mapsort -{ - int rfrom; - PIO_Offset soffset; - PIO_Offset iomap; -} mapsort; - -/** - * @brief swapm defaults. - * -*/ -typedef struct pio_swapm_defaults -{ - int nreqs; - bool handshake; - bool isend; -} pio_swapm_defaults; - - - void pio_get_env(void); - int pio_add_to_iodesc_list(io_desc_t *iodesc); - io_desc_t *pio_get_iodesc_from_id(int ioid); - int pio_delete_iodesc_from_list(int ioid); - - file_desc_t *pio_get_file_from_id(int ncid); - int pio_delete_file_from_list(int ncid); - void pio_add_to_file_list(file_desc_t *file); - void pio_push_request(file_desc_t *file, int request); - - iosystem_desc_t *pio_get_iosystem_from_id(int iosysid); - int pio_add_to_iosystem_list(iosystem_desc_t *ios); - - int check_netcdf(file_desc_t *file,const int status, const char *fname, const int line); - int iotype_error(const int iotype, const char *fname, const int line); - void piodie(const char *msg,const char *fname, const int line); - void pioassert(bool exp, const char *msg,const char *fname, const int line); - int CalcStartandCount(const int basetype, const int ndims, const int *gdims, const int num_io_procs, - const int myiorank, PIO_Offset *start, PIO_Offset *kount); - void CheckMPIReturn(const int ierr,const char file[],const int line); - int pio_fc_gather( void *sendbuf, const int sendcnt, const MPI_Datatype sendtype, - void *recvbuf, const int recvcnt, const MPI_Datatype recvtype, const int root, - MPI_Comm comm, const int flow_cntl); - int pio_fc_gatherv( void *sendbuf, const int sendcnt, const MPI_Datatype sendtype, - void *recvbuf, const int recvcnts[], const int recvdispl[], const MPI_Datatype recvtype, const int root, - MPI_Comm comm, const int flow_cntl); - - int pio_fc_gatherv( void *sendbuf, const int sendcnt, const MPI_Datatype sendtype, - void *recvbuf, const int recvcnts[], const int rdispls[], const MPI_Datatype recvtype, const int root, - MPI_Comm comm, const int flow_cntl); - - int pio_swapm(void *sndbuf, int sndlths[], int sdispls[], MPI_Datatype stypes[], - void *rcvbuf, int rcvlths[], int rdispls[], MPI_Datatype rtypes[], - MPI_Comm comm, const bool handshake, bool isend, const int max_requests); - - long long lgcd_array(int nain, long long*ain); - - void PIO_Offset_size(MPI_Datatype *dtype, int *tsize); - PIO_Offset GCDblocksize(const int arrlen, const PIO_Offset arr_in[]); - - int subset_rearrange_create(const iosystem_desc_t ios,const int maplen, PIO_Offset compmap[], const int gsize[], - const int ndim, io_desc_t *iodesc); - - - int box_rearrange_create(const iosystem_desc_t ios,const int maplen, const PIO_Offset compmap[], const int gsize[], - const int ndim, io_desc_t *iodesc); - - - int rearrange_io2comp(const iosystem_desc_t ios, io_desc_t *iodesc, void *sbuf, - void *rbuf); - int rearrange_comp2io(const iosystem_desc_t ios, io_desc_t *iodesc, void *sbuf, - void *rbuf, const int nvars); - int calcdisplace(const int bsize, const int numblocks,const PIO_Offset map[],int displace[]); - io_desc_t *malloc_iodesc(const int piotype, const int ndims); - void performance_tune_rearranger(iosystem_desc_t ios, io_desc_t *iodesc); - - int flush_output_buffer(file_desc_t *file, bool force, PIO_Offset addsize); - void compute_maxIObuffersize(MPI_Comm io_comm, io_desc_t *iodesc); - io_region *alloc_region(const int ndims); - int pio_delete_iosystem_from_list(int piosysid); - int gcd(int a, int b); - long long lgcd (long long a,long long b ); - int gcd_array(int nain, int *ain); - void free_region_list(io_region *top); - void gindex_to_coord(const int ndims, const PIO_Offset gindex, const PIO_Offset gstride[], PIO_Offset *gcoord); - PIO_Offset coord_to_lindex(const int ndims, const PIO_Offset lcoord[], const PIO_Offset count[]); - - int ceil2(const int i); - int pair(const int np, const int p, const int k); - int define_iodesc_datatypes(const iosystem_desc_t ios, io_desc_t *iodesc); - - int create_mpi_datatypes(const MPI_Datatype basetype,const int msgcnt,const PIO_Offset dlen, - const PIO_Offset mindex[],const int mcount[],int *mfrom, MPI_Datatype mtype[]); - int compare_offsets(const void *a,const void *b) ; - - int subset_rearrange_create(const iosystem_desc_t ios, int maplen, PIO_Offset compmap[], - const int gsize[], const int ndims, io_desc_t *iodesc); - void print_trace (FILE *fp); - void cn_buffer_report(iosystem_desc_t ios, bool collective); - void compute_buffer_init(iosystem_desc_t ios); - void free_cn_buffer_pool(iosystem_desc_t ios); -void flush_buffer(int ncid, wmulti_buffer *wmb, bool flushtodisk); - void piomemerror(iosystem_desc_t ios, size_t req, char *fname, const int line); - void compute_maxaggregate_bytes(const iosystem_desc_t ios, io_desc_t *iodesc); + extern PIO_Offset PIO_BUFFER_SIZE_LIMIT; + extern bool PIO_Save_Decomps; + + /** Used to sort map points in the subset rearranger. */ + typedef struct mapsort + { + int rfrom; + PIO_Offset soffset; + PIO_Offset iomap; + } mapsort; + + /** swapm defaults. */ + typedef struct pio_swapm_defaults + { + int nreqs; + bool handshake; + bool isend; + } pio_swapm_defaults; + + void pio_get_env(void); + int pio_add_to_iodesc_list(io_desc_t *iodesc); + io_desc_t *pio_get_iodesc_from_id(int ioid); + int pio_delete_iodesc_from_list(int ioid); + + file_desc_t *pio_get_file_from_id(int ncid); + int pio_delete_file_from_list(int ncid); + void pio_add_to_file_list(file_desc_t *file); + void pio_push_request(file_desc_t *file, int request); + + iosystem_desc_t *pio_get_iosystem_from_id(int iosysid); + int pio_add_to_iosystem_list(iosystem_desc_t *ios); + + int check_netcdf(file_desc_t *file,const int status, const char *fname, const int line); + int iotype_error(const int iotype, const char *fname, const int line); + void piodie(const char *msg,const char *fname, const int line); + void pioassert(bool exp, const char *msg,const char *fname, const int line); + int CalcStartandCount(const int basetype, const int ndims, const int *gdims, const int num_io_procs, + const int myiorank, PIO_Offset *start, PIO_Offset *kount); + void CheckMPIReturn(const int ierr,const char file[],const int line); + int pio_fc_gather( void *sendbuf, const int sendcnt, const MPI_Datatype sendtype, + void *recvbuf, const int recvcnt, const MPI_Datatype recvtype, const int root, + MPI_Comm comm, const int flow_cntl); + int pio_fc_gatherv( void *sendbuf, const int sendcnt, const MPI_Datatype sendtype, + void *recvbuf, const int recvcnts[], const int recvdispl[], const MPI_Datatype recvtype, const int root, + MPI_Comm comm, const int flow_cntl); + + int pio_fc_gatherv( void *sendbuf, const int sendcnt, const MPI_Datatype sendtype, + void *recvbuf, const int recvcnts[], const int rdispls[], const MPI_Datatype recvtype, const int root, + MPI_Comm comm, const int flow_cntl); + + int pio_swapm(void *sndbuf, int sndlths[], int sdispls[], MPI_Datatype stypes[], + void *rcvbuf, int rcvlths[], int rdispls[], MPI_Datatype rtypes[], + MPI_Comm comm, const bool handshake, bool isend, const int max_requests); + + long long lgcd_array(int nain, long long*ain); + + void PIO_Offset_size(MPI_Datatype *dtype, int *tsize); + PIO_Offset GCDblocksize(const int arrlen, const PIO_Offset arr_in[]); + + int subset_rearrange_create(const iosystem_desc_t ios,const int maplen, PIO_Offset compmap[], const int gsize[], + const int ndim, io_desc_t *iodesc); + + + int box_rearrange_create(const iosystem_desc_t ios,const int maplen, const PIO_Offset compmap[], const int gsize[], + const int ndim, io_desc_t *iodesc); + + + int rearrange_io2comp(const iosystem_desc_t ios, io_desc_t *iodesc, void *sbuf, + void *rbuf); + int rearrange_comp2io(const iosystem_desc_t ios, io_desc_t *iodesc, void *sbuf, + void *rbuf, const int nvars); + int calcdisplace(const int bsize, const int numblocks,const PIO_Offset map[],int displace[]); + io_desc_t *malloc_iodesc(const int piotype, const int ndims); + void performance_tune_rearranger(iosystem_desc_t ios, io_desc_t *iodesc); + + int flush_output_buffer(file_desc_t *file, bool force, PIO_Offset addsize); + void compute_maxIObuffersize(MPI_Comm io_comm, io_desc_t *iodesc); + io_region *alloc_region(const int ndims); + int pio_delete_iosystem_from_list(int piosysid); + int gcd(int a, int b); + long long lgcd (long long a,long long b ); + int gcd_array(int nain, int *ain); + void free_region_list(io_region *top); + void gindex_to_coord(const int ndims, const PIO_Offset gindex, const PIO_Offset gstride[], PIO_Offset *gcoord); + PIO_Offset coord_to_lindex(const int ndims, const PIO_Offset lcoord[], const PIO_Offset count[]); + + int ceil2(const int i); + int pair(const int np, const int p, const int k); + int define_iodesc_datatypes(const iosystem_desc_t ios, io_desc_t *iodesc); + + int create_mpi_datatypes(const MPI_Datatype basetype,const int msgcnt,const PIO_Offset dlen, + const PIO_Offset mindex[],const int mcount[],int *mfrom, MPI_Datatype mtype[]); + int compare_offsets(const void *a,const void *b) ; + + int subset_rearrange_create(const iosystem_desc_t ios, int maplen, PIO_Offset compmap[], + const int gsize[], const int ndims, io_desc_t *iodesc); + void print_trace (FILE *fp); + void cn_buffer_report(iosystem_desc_t ios, bool collective); + void compute_buffer_init(iosystem_desc_t ios); + void free_cn_buffer_pool(iosystem_desc_t ios); + void flush_buffer(int ncid, wmulti_buffer *wmb, bool flushtodisk); + void piomemerror(iosystem_desc_t ios, size_t req, char *fname, const int line); + void compute_maxaggregate_bytes(const iosystem_desc_t ios, io_desc_t *iodesc); + int check_mpi(file_desc_t *file, const int mpierr, const char *filename, + const int line); #ifdef BGQ - void identity(MPI_Comm comm, int *iotask); - void determineiotasks(const MPI_Comm comm, int *numiotasks,int *base, int *stride, int *rearr, - bool *iamIOtask); + void identity(MPI_Comm comm, int *iotask); + void determineiotasks(const MPI_Comm comm, int *numiotasks,int *base, int *stride, int *rearr, + bool *iamIOtask); #endif @@ -158,214 +170,215 @@ void flush_buffer(int ncid, wmulti_buffer *wmb, bool flushtodisk); } #endif -enum PIO_MSG{ - PIO_MSG_OPEN_FILE, - PIO_MSG_CREATE_FILE, - PIO_MSG_INQ_ATT, - PIO_MSG_INQ_FORMAT, - PIO_MSG_INQ_VARID, - PIO_MSG_INQ_VARNATTS, - PIO_MSG_DEF_VAR, - PIO_MSG_INQ_VAR, - PIO_MSG_INQ_VARNAME, - PIO_MSG_PUT_ATT_DOUBLE, - PIO_MSG_PUT_ATT_INT, - PIO_MSG_RENAME_ATT, - PIO_MSG_DEL_ATT, - PIO_MSG_INQ_NATTS, - PIO_MSG_INQ, - PIO_MSG_GET_ATT_TEXT, - PIO_MSG_GET_ATT_SHORT, - PIO_MSG_PUT_ATT_LONG, - PIO_MSG_REDEF, - PIO_MSG_SET_FILL, - PIO_MSG_ENDDEF, - PIO_MSG_RENAME_VAR, - PIO_MSG_PUT_ATT_SHORT, - PIO_MSG_PUT_ATT_TEXT, - PIO_MSG_INQ_ATTNAME, - PIO_MSG_GET_ATT_ULONGLONG, - PIO_MSG_GET_ATT_USHORT, - PIO_MSG_PUT_ATT_ULONGLONG, - PIO_MSG_INQ_DIMLEN, - PIO_MSG_GET_ATT_UINT, - PIO_MSG_GET_ATT_LONGLONG, - PIO_MSG_PUT_ATT_SCHAR, - PIO_MSG_PUT_ATT_FLOAT, - PIO_MSG_INQ_NVARS, - PIO_MSG_RENAME_DIM, - PIO_MSG_INQ_VARNDIMS, - PIO_MSG_GET_ATT_LONG, - PIO_MSG_INQ_DIM, - PIO_MSG_INQ_DIMID, - PIO_MSG_INQ_UNLIMDIM, - PIO_MSG_INQ_VARDIMID, - PIO_MSG_INQ_ATTLEN, - PIO_MSG_INQ_DIMNAME, - PIO_MSG_PUT_ATT_USHORT, - PIO_MSG_GET_ATT_FLOAT, - PIO_MSG_SYNC, - PIO_MSG_PUT_ATT_LONGLONG, - PIO_MSG_PUT_ATT_UINT, - PIO_MSG_GET_ATT_SCHAR, - PIO_MSG_INQ_ATTID, - PIO_MSG_DEF_DIM, - PIO_MSG_INQ_NDIMS, - PIO_MSG_INQ_VARTYPE, - PIO_MSG_GET_ATT_INT, - PIO_MSG_GET_ATT_DOUBLE, - PIO_MSG_INQ_ATTTYPE, - PIO_MSG_PUT_ATT_UCHAR, - PIO_MSG_GET_ATT_UCHAR, - PIO_MSG_PUT_VARS_UCHAR, - PIO_MSG_GET_VAR1_SCHAR, - PIO_MSG_GET_VARS_ULONGLONG, - PIO_MSG_GET_VARM_UCHAR, - PIO_MSG_GET_VARM_SCHAR, - PIO_MSG_GET_VARS_SHORT, - PIO_MSG_GET_VAR_DOUBLE, - PIO_MSG_GET_VARA_DOUBLE, - PIO_MSG_GET_VAR_INT, - PIO_MSG_GET_VAR_USHORT, - PIO_MSG_PUT_VARS_USHORT, - PIO_MSG_GET_VARA_TEXT, - PIO_MSG_PUT_VARS_ULONGLONG, - PIO_MSG_GET_VARA_INT, - PIO_MSG_PUT_VARM, - PIO_MSG_GET_VAR1_FLOAT, - PIO_MSG_GET_VAR1_SHORT, - PIO_MSG_GET_VARS_INT, - PIO_MSG_PUT_VARS_UINT, - PIO_MSG_GET_VAR_TEXT, - PIO_MSG_GET_VARM_DOUBLE, - PIO_MSG_PUT_VARM_UCHAR, - PIO_MSG_PUT_VAR_USHORT, - PIO_MSG_GET_VARS_SCHAR, - PIO_MSG_GET_VARA_USHORT, - PIO_MSG_PUT_VAR1_LONGLONG, - PIO_MSG_PUT_VARA_UCHAR, - PIO_MSG_PUT_VARM_SHORT, - PIO_MSG_PUT_VAR1_LONG, - PIO_MSG_PUT_VARS_LONG, - PIO_MSG_GET_VAR1_USHORT, - PIO_MSG_PUT_VAR_SHORT, - PIO_MSG_PUT_VARA_INT, - PIO_MSG_GET_VAR_FLOAT, - PIO_MSG_PUT_VAR1_USHORT, - PIO_MSG_PUT_VARA_TEXT, - PIO_MSG_PUT_VARM_TEXT, - PIO_MSG_GET_VARS_UCHAR, - PIO_MSG_GET_VAR, - PIO_MSG_PUT_VARM_USHORT, - PIO_MSG_GET_VAR1_LONGLONG, - PIO_MSG_GET_VARS_USHORT, - PIO_MSG_GET_VAR_LONG, - PIO_MSG_GET_VAR1_DOUBLE, - PIO_MSG_PUT_VAR_ULONGLONG, - PIO_MSG_PUT_VAR_INT, - PIO_MSG_GET_VARA_UINT, - PIO_MSG_PUT_VAR_LONGLONG, - PIO_MSG_GET_VARS_LONGLONG, - PIO_MSG_PUT_VAR_SCHAR, - PIO_MSG_PUT_VAR_UINT, - PIO_MSG_PUT_VAR, - PIO_MSG_PUT_VARA_USHORT, - PIO_MSG_GET_VAR_LONGLONG, - PIO_MSG_GET_VARA_SHORT, - PIO_MSG_PUT_VARS_SHORT, - PIO_MSG_PUT_VARA_UINT, - PIO_MSG_PUT_VARA_SCHAR, - PIO_MSG_PUT_VARM_ULONGLONG, - PIO_MSG_PUT_VAR1_UCHAR, - PIO_MSG_PUT_VARM_INT, - PIO_MSG_PUT_VARS_SCHAR, - PIO_MSG_GET_VARA_LONG, - PIO_MSG_PUT_VAR1, - PIO_MSG_GET_VAR1_INT, - PIO_MSG_GET_VAR1_ULONGLONG, - PIO_MSG_GET_VAR_UCHAR, - PIO_MSG_PUT_VARA_FLOAT, - PIO_MSG_GET_VARA_UCHAR, - PIO_MSG_GET_VARS_FLOAT, - PIO_MSG_PUT_VAR1_FLOAT, - PIO_MSG_PUT_VARM_FLOAT, - PIO_MSG_PUT_VAR1_TEXT, - PIO_MSG_PUT_VARS_TEXT, - PIO_MSG_PUT_VARM_LONG, - PIO_MSG_GET_VARS_LONG, - PIO_MSG_PUT_VARS_DOUBLE, - PIO_MSG_GET_VAR1, - PIO_MSG_GET_VAR_UINT, - PIO_MSG_PUT_VARA_LONGLONG, - PIO_MSG_GET_VARA, - PIO_MSG_PUT_VAR_DOUBLE, - PIO_MSG_GET_VARA_SCHAR, - PIO_MSG_PUT_VAR_FLOAT, - PIO_MSG_GET_VAR1_UINT, - PIO_MSG_GET_VARS_UINT, - PIO_MSG_PUT_VAR1_ULONGLONG, - PIO_MSG_PUT_VARM_UINT, - PIO_MSG_PUT_VAR1_UINT, - PIO_MSG_PUT_VAR1_INT, - PIO_MSG_GET_VARA_FLOAT, - PIO_MSG_GET_VARM_TEXT, - PIO_MSG_PUT_VARS_FLOAT, - PIO_MSG_GET_VAR1_TEXT, - PIO_MSG_PUT_VARA_SHORT, - PIO_MSG_PUT_VAR1_SCHAR, - PIO_MSG_PUT_VARA_ULONGLONG, - PIO_MSG_PUT_VARM_DOUBLE, - PIO_MSG_GET_VARM_INT, - PIO_MSG_PUT_VARA, - PIO_MSG_PUT_VARA_LONG, - PIO_MSG_GET_VARM_UINT, - PIO_MSG_GET_VARM, - PIO_MSG_PUT_VAR1_DOUBLE, - PIO_MSG_GET_VARS_DOUBLE, - PIO_MSG_GET_VARA_LONGLONG, - PIO_MSG_GET_VAR_ULONGLONG, - PIO_MSG_PUT_VARM_SCHAR, - PIO_MSG_GET_VARA_ULONGLONG, - PIO_MSG_GET_VAR_SHORT, - PIO_MSG_GET_VARM_FLOAT, - PIO_MSG_PUT_VAR_TEXT, - PIO_MSG_PUT_VARS_INT, - PIO_MSG_GET_VAR1_LONG, - PIO_MSG_GET_VARM_LONG, - PIO_MSG_GET_VARM_USHORT, - PIO_MSG_PUT_VAR1_SHORT, - PIO_MSG_PUT_VARS_LONGLONG, - PIO_MSG_GET_VARM_LONGLONG, - PIO_MSG_GET_VARS_TEXT, - PIO_MSG_PUT_VARA_DOUBLE, - PIO_MSG_PUT_VARS, - PIO_MSG_PUT_VAR_UCHAR, - PIO_MSG_GET_VAR1_UCHAR, - PIO_MSG_PUT_VAR_LONG, - PIO_MSG_GET_VARS, - PIO_MSG_GET_VARM_SHORT, - PIO_MSG_GET_VARM_ULONGLONG, - PIO_MSG_PUT_VARM_LONGLONG, - PIO_MSG_GET_VAR_SCHAR, - PIO_MSG_GET_ATT_UBYTE, - PIO_MSG_PUT_ATT_STRING, - PIO_MSG_GET_ATT_STRING, - PIO_MSG_PUT_ATT_UBYTE, - PIO_MSG_INQ_VAR_FILL, - PIO_MSG_DEF_VAR_FILL, - PIO_MSG_DEF_VAR_DEFLATE, - PIO_MSG_INQ_VAR_DEFLATE, - PIO_MSG_INQ_VAR_SZIP, - PIO_MSG_DEF_VAR_FLETCHER32, - PIO_MSG_INQ_VAR_FLETCHER32, - PIO_MSG_DEF_VAR_CHUNKING, - PIO_MSG_INQ_VAR_CHUNKING, - PIO_MSG_DEF_VAR_ENDIAN, - PIO_MSG_INQ_VAR_ENDIAN, - PIO_MSG_SET_CHUNK_CACHE, - PIO_MSG_GET_CHUNK_CACHE, - PIO_MSG_SET_VAR_CHUNK_CACHE, - PIO_MSG_GET_VAR_CHUNK_CACHE +/** These are the messages that can be sent over the intercomm when + * async is being used. */ +enum PIO_MSG +{ + PIO_MSG_OPEN_FILE, + PIO_MSG_CREATE_FILE, + PIO_MSG_INQ_ATT, + PIO_MSG_INQ_FORMAT, + PIO_MSG_INQ_VARID, + PIO_MSG_DEF_VAR, + PIO_MSG_INQ_VAR, + PIO_MSG_PUT_ATT_DOUBLE, + PIO_MSG_PUT_ATT_INT, + PIO_MSG_RENAME_ATT, + PIO_MSG_DEL_ATT, + PIO_MSG_INQ, + PIO_MSG_GET_ATT_TEXT, + PIO_MSG_GET_ATT_SHORT, + PIO_MSG_PUT_ATT_LONG, + PIO_MSG_REDEF, + PIO_MSG_SET_FILL, + PIO_MSG_ENDDEF, + PIO_MSG_RENAME_VAR, + PIO_MSG_PUT_ATT_SHORT, + PIO_MSG_PUT_ATT_TEXT, + PIO_MSG_INQ_ATTNAME, + PIO_MSG_GET_ATT_ULONGLONG, + PIO_MSG_GET_ATT_USHORT, + PIO_MSG_PUT_ATT_ULONGLONG, + PIO_MSG_GET_ATT_UINT, + PIO_MSG_GET_ATT_LONGLONG, + PIO_MSG_PUT_ATT_SCHAR, + PIO_MSG_PUT_ATT_FLOAT, + PIO_MSG_RENAME_DIM, + PIO_MSG_GET_ATT_LONG, + PIO_MSG_INQ_DIM, + PIO_MSG_INQ_DIMID, + PIO_MSG_PUT_ATT_USHORT, + PIO_MSG_GET_ATT_FLOAT, + PIO_MSG_SYNC, + PIO_MSG_PUT_ATT_LONGLONG, + PIO_MSG_PUT_ATT_UINT, + PIO_MSG_GET_ATT_SCHAR, + PIO_MSG_INQ_ATTID, + PIO_MSG_DEF_DIM, + PIO_MSG_GET_ATT_INT, + PIO_MSG_GET_ATT_DOUBLE, + PIO_MSG_PUT_ATT_UCHAR, + PIO_MSG_GET_ATT_UCHAR, + PIO_MSG_PUT_VARS_UCHAR, + PIO_MSG_GET_VAR1_SCHAR, + PIO_MSG_GET_VARS_ULONGLONG, + PIO_MSG_GET_VARM_UCHAR, + PIO_MSG_GET_VARM_SCHAR, + PIO_MSG_GET_VARS_SHORT, + PIO_MSG_GET_VAR_DOUBLE, + PIO_MSG_GET_VARA_DOUBLE, + PIO_MSG_GET_VAR_INT, + PIO_MSG_GET_VAR_USHORT, + PIO_MSG_PUT_VARS_USHORT, + PIO_MSG_GET_VARA_TEXT, + PIO_MSG_PUT_VARS_ULONGLONG, + PIO_MSG_GET_VARA_INT, + PIO_MSG_PUT_VARM, + PIO_MSG_GET_VAR1_FLOAT, + PIO_MSG_GET_VAR1_SHORT, + PIO_MSG_GET_VARS_INT, + PIO_MSG_PUT_VARS_UINT, + PIO_MSG_GET_VAR_TEXT, + PIO_MSG_GET_VARM_DOUBLE, + PIO_MSG_PUT_VARM_UCHAR, + PIO_MSG_PUT_VAR_USHORT, + PIO_MSG_GET_VARS_SCHAR, + PIO_MSG_GET_VARA_USHORT, + PIO_MSG_PUT_VAR1_LONGLONG, + PIO_MSG_PUT_VARA_UCHAR, + PIO_MSG_PUT_VARM_SHORT, + PIO_MSG_PUT_VAR1_LONG, + PIO_MSG_PUT_VARS_LONG, + PIO_MSG_GET_VAR1_USHORT, + PIO_MSG_PUT_VAR_SHORT, + PIO_MSG_PUT_VARA_INT, + PIO_MSG_GET_VAR_FLOAT, + PIO_MSG_PUT_VAR1_USHORT, + PIO_MSG_PUT_VARA_TEXT, + PIO_MSG_PUT_VARM_TEXT, + PIO_MSG_GET_VARS_UCHAR, + PIO_MSG_GET_VAR, + PIO_MSG_PUT_VARM_USHORT, + PIO_MSG_GET_VAR1_LONGLONG, + PIO_MSG_GET_VARS_USHORT, + PIO_MSG_GET_VAR_LONG, + PIO_MSG_GET_VAR1_DOUBLE, + PIO_MSG_PUT_VAR_ULONGLONG, + PIO_MSG_PUT_VAR_INT, + PIO_MSG_GET_VARA_UINT, + PIO_MSG_PUT_VAR_LONGLONG, + PIO_MSG_GET_VARS_LONGLONG, + PIO_MSG_PUT_VAR_SCHAR, + PIO_MSG_PUT_VAR_UINT, + PIO_MSG_PUT_VAR, + PIO_MSG_PUT_VARA_USHORT, + PIO_MSG_GET_VAR_LONGLONG, + PIO_MSG_GET_VARA_SHORT, + PIO_MSG_PUT_VARS_SHORT, + PIO_MSG_PUT_VARA_UINT, + PIO_MSG_PUT_VARA_SCHAR, + PIO_MSG_PUT_VARM_ULONGLONG, + PIO_MSG_PUT_VAR1_UCHAR, + PIO_MSG_PUT_VARM_INT, + PIO_MSG_PUT_VARS_SCHAR, + PIO_MSG_GET_VARA_LONG, + PIO_MSG_PUT_VAR1, + PIO_MSG_GET_VAR1_INT, + PIO_MSG_GET_VAR1_ULONGLONG, + PIO_MSG_GET_VAR_UCHAR, + PIO_MSG_PUT_VARA_FLOAT, + PIO_MSG_GET_VARA_UCHAR, + PIO_MSG_GET_VARS_FLOAT, + PIO_MSG_PUT_VAR1_FLOAT, + PIO_MSG_PUT_VARM_FLOAT, + PIO_MSG_PUT_VAR1_TEXT, + PIO_MSG_PUT_VARS_TEXT, + PIO_MSG_PUT_VARM_LONG, + PIO_MSG_GET_VARS_LONG, + PIO_MSG_PUT_VARS_DOUBLE, + PIO_MSG_GET_VAR1, + PIO_MSG_GET_VAR_UINT, + PIO_MSG_PUT_VARA_LONGLONG, + PIO_MSG_GET_VARA, + PIO_MSG_PUT_VAR_DOUBLE, + PIO_MSG_GET_VARA_SCHAR, + PIO_MSG_PUT_VAR_FLOAT, + PIO_MSG_GET_VAR1_UINT, + PIO_MSG_GET_VARS_UINT, + PIO_MSG_PUT_VAR1_ULONGLONG, + PIO_MSG_PUT_VARM_UINT, + PIO_MSG_PUT_VAR1_UINT, + PIO_MSG_PUT_VAR1_INT, + PIO_MSG_GET_VARA_FLOAT, + PIO_MSG_GET_VARM_TEXT, + PIO_MSG_PUT_VARS_FLOAT, + PIO_MSG_GET_VAR1_TEXT, + PIO_MSG_PUT_VARA_SHORT, + PIO_MSG_PUT_VAR1_SCHAR, + PIO_MSG_PUT_VARA_ULONGLONG, + PIO_MSG_PUT_VARM_DOUBLE, + PIO_MSG_GET_VARM_INT, + PIO_MSG_PUT_VARA, + PIO_MSG_PUT_VARA_LONG, + PIO_MSG_GET_VARM_UINT, + PIO_MSG_GET_VARM, + PIO_MSG_PUT_VAR1_DOUBLE, + PIO_MSG_GET_VARS_DOUBLE, + PIO_MSG_GET_VARA_LONGLONG, + PIO_MSG_GET_VAR_ULONGLONG, + PIO_MSG_PUT_VARM_SCHAR, + PIO_MSG_GET_VARA_ULONGLONG, + PIO_MSG_GET_VAR_SHORT, + PIO_MSG_GET_VARM_FLOAT, + PIO_MSG_PUT_VAR_TEXT, + PIO_MSG_PUT_VARS_INT, + PIO_MSG_GET_VAR1_LONG, + PIO_MSG_GET_VARM_LONG, + PIO_MSG_GET_VARM_USHORT, + PIO_MSG_PUT_VAR1_SHORT, + PIO_MSG_PUT_VARS_LONGLONG, + PIO_MSG_GET_VARM_LONGLONG, + PIO_MSG_GET_VARS_TEXT, + PIO_MSG_PUT_VARA_DOUBLE, + PIO_MSG_PUT_VARS, + PIO_MSG_PUT_VAR_UCHAR, + PIO_MSG_GET_VAR1_UCHAR, + PIO_MSG_PUT_VAR_LONG, + PIO_MSG_GET_VARS, + PIO_MSG_GET_VARM_SHORT, + PIO_MSG_GET_VARM_ULONGLONG, + PIO_MSG_PUT_VARM_LONGLONG, + PIO_MSG_GET_VAR_SCHAR, + PIO_MSG_GET_ATT_UBYTE, + PIO_MSG_PUT_ATT_STRING, + PIO_MSG_GET_ATT_STRING, + PIO_MSG_PUT_ATT_UBYTE, + PIO_MSG_INQ_VAR_FILL, + PIO_MSG_DEF_VAR_FILL, + PIO_MSG_DEF_VAR_DEFLATE, + PIO_MSG_INQ_VAR_DEFLATE, + PIO_MSG_INQ_VAR_SZIP, + PIO_MSG_DEF_VAR_FLETCHER32, + PIO_MSG_INQ_VAR_FLETCHER32, + PIO_MSG_DEF_VAR_CHUNKING, + PIO_MSG_INQ_VAR_CHUNKING, + PIO_MSG_DEF_VAR_ENDIAN, + PIO_MSG_INQ_VAR_ENDIAN, + PIO_MSG_SET_CHUNK_CACHE, + PIO_MSG_GET_CHUNK_CACHE, + PIO_MSG_SET_VAR_CHUNK_CACHE, + PIO_MSG_GET_VAR_CHUNK_CACHE, + PIO_MSG_INITDECOMP_DOF, + PIO_MSG_WRITEDARRAY, + PIO_MSG_READDARRAY, + PIO_MSG_SETERRORHANDLING, + PIO_MSG_FREEDECOMP, + PIO_MSG_CLOSE_FILE, + PIO_MSG_DELETE_FILE, + PIO_MSG_EXIT, + PIO_MSG_GET_ATT, + PIO_MSG_PUT_ATT, + PIO_MSG_INQ_TYPE }; -#endif +#endif /* __PIO_INTERNAL__ */ diff --git a/externals/pio2/src/clib/pio_lists.c b/externals/pio2/src/clib/pio_lists.c index e69a944bb20..efa0868c444 100644 --- a/externals/pio2/src/clib/pio_lists.c +++ b/externals/pio2/src/clib/pio_lists.c @@ -9,38 +9,35 @@ static iosystem_desc_t *pio_iosystem_list=NULL; static file_desc_t *pio_file_list = NULL; static file_desc_t *current_file=NULL; +/** Add a new entry to the global list of open files. + * + * @param file pointer to the file_desc_t struct for the new file. +*/ void pio_add_to_file_list(file_desc_t *file) { file_desc_t *cfile; - int cnt=-1; - // on iotasks the fh returned from netcdf should be unique, on non-iotasks we - // need to generate a unique fh, we do this with cnt, its a negative index + /* This file will be at the end of the list, and have no next. */ file->next = NULL; - cfile = pio_file_list; - current_file = file; - if(cfile==NULL){ - pio_file_list = file; - }else{ - cnt = min(cnt,cfile->fh-1); - while(cfile->next != NULL) - { - cfile=cfile->next; - cnt = min(cnt,cfile->fh-1); - } - cfile->next = file; - } - if(! file->iosystem->ioproc || ((file->iotype != PIO_IOTYPE_PNETCDF && - file->iotype != PIO_IOTYPE_NETCDF4P) && - file->iosystem->io_rank>0)) - file->fh = cnt; + /* Get a pointer to the global list of files. */ cfile = pio_file_list; -} - - + /* Keep a global pointer to the current file. */ + current_file = file; + /* If there is nothing in the list, then file will be the first + * entry. Otherwise, move to end of the list. */ + if (!cfile) + pio_file_list = file; + else + { + while (cfile->next) + cfile = cfile->next; + cfile->next = file; + } +} + file_desc_t *pio_get_file_from_id(int ncid) { file_desc_t *cfile; @@ -61,7 +58,7 @@ file_desc_t *pio_get_file_from_id(int ncid) } return cfile; } - + int pio_delete_file_from_list(int ncid) { @@ -78,7 +75,7 @@ int pio_delete_file_from_list(int ncid) } if(current_file==cfile) current_file=pfile; - free(cfile); + free(cfile); return PIO_NOERR; } pfile = cfile; @@ -148,7 +145,7 @@ iosystem_desc_t *pio_get_iosystem_from_id(int iosysid) ciosystem = ciosystem->next; } return NULL; - + } int pio_add_to_iodesc_list(io_desc_t *iodesc) @@ -170,7 +167,7 @@ int pio_add_to_iodesc_list(io_desc_t *iodesc) return iodesc->ioid; } - + io_desc_t *pio_get_iodesc_from_id(int ioid) { io_desc_t *ciodesc; @@ -195,7 +192,7 @@ io_desc_t *pio_get_iodesc_from_id(int ioid) return ciodesc; } - + int pio_delete_iodesc_from_list(int ioid) { diff --git a/externals/pio2/src/clib/pio_msg.c b/externals/pio2/src/clib/pio_msg.c new file mode 100644 index 00000000000..e6220d850cd --- /dev/null +++ b/externals/pio2/src/clib/pio_msg.c @@ -0,0 +1,2119 @@ +/** + * @file + * @author Ed Hartnett + * @date 2016 + * @brief PIO async msg handling + * + * @see http://code.google.com/p/parallelio/ + */ + +#include +#include +#include + +/* MPI serial builds stub out MPI functions so that the MPI code can + * work on one processor. This function is missing from our serial MPI + * implementation, so it is included here. This can be removed after + * it is added to the MPI serial library. */ +/* #ifdef USE_MPI_SERIAL */ +/* int MPI_Intercomm_merge(MPI_Comm intercomm, int high, MPI_Comm *newintracomm) */ +/* { */ +/* return MPI_SUCCESS; */ +/* } */ +/* #endif /\* USE_MPI_SERIAL *\/ */ + +#ifdef PIO_ENABLE_LOGGING +extern int my_rank; +extern int pio_log_level; +#endif /* PIO_ENABLE_LOGGING */ + +/** This function is run on the IO tasks to find netCDF type + * length. */ +int inq_type_handler(iosystem_desc_t *ios) +{ + int ncid; + int xtype; + char name_present, size_present; + char *namep = NULL, name[NC_MAX_NAME + 1]; + PIO_Offset *sizep = NULL, size; + int mpierr; + int ret; + + LOG((1, "inq_type_handler")); + + /* Get the parameters for this function that the the comp master + * task is broadcasting. */ + if ((mpierr = MPI_Bcast(&ncid, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&xtype, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&name_present, 1, MPI_CHAR, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&size_present, 1, MPI_CHAR, 0, ios->intercomm))) + return PIO_EIO; + + /* Handle null pointer issues. */ + if (name_present) + namep = name; + if (size_present) + sizep = &size; + + /* Call the function. */ + if ((ret = PIOc_inq_type(ncid, xtype, namep, sizep))) + return ret; + + LOG((1, "inq_type_handler succeeded!")); + return PIO_NOERR; +} + +/** This function is run on the IO tasks to find netCDF file + * format. */ +int inq_format_handler(iosystem_desc_t *ios) +{ + int ncid; + int *formatp = NULL, format; + char format_present; + int mpierr; + int ret; + + LOG((1, "inq_format_handler")); + + /* Get the parameters for this function that the the comp master + * task is broadcasting. */ + if ((mpierr = MPI_Bcast(&ncid, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&format_present, 1, MPI_CHAR, 0, ios->intercomm))) + return PIO_EIO; + LOG((2, "inq_format_handler got parameters ncid = %d format_present = %d", + ncid, format_present)); + + /* Manage NULL pointers. */ + if (format_present) + formatp = &format; + + /* Call the function. */ + if ((ret = PIOc_inq_format(ncid, formatp))) + return ret; + + if (formatp) + LOG((2, "inq_format_handler format = %d", *formatp)); + LOG((1, "inq_format_handler succeeded!")); + + return PIO_NOERR; +} + +/** This function is run on the IO tasks to create a netCDF file. */ +int create_file_handler(iosystem_desc_t *ios) +{ + int ncid; + int len; + int iotype; + char *filename; + int mode; + int mpierr; + int ret; + + LOG((1, "create_file_handler comproot = %d\n", ios->comproot)); + + /* Get the parameters for this function that the he comp master + * task is broadcasting. */ + if ((mpierr = MPI_Bcast(&len, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + LOG((1, "create_file_handler got parameter len = %d\n", len)); + if (!(filename = malloc(len + 1 * sizeof(char)))) + return PIO_ENOMEM; + if ((mpierr = MPI_Bcast((void *)filename, len + 1, MPI_CHAR, 0, + ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&iotype, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&mode, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + LOG((1, "create_file_handler got parameters len = %d " + "filename = %s iotype = %d mode = %d\n", + len, filename, iotype, mode)); + + /* Call the create file function. */ + if ((ret = PIOc_createfile(ios->iosysid, &ncid, &iotype, filename, mode))) + return ret; + + /* Free resources. */ + free(filename); + + LOG((1, "create_file_handler succeeded!")); + return PIO_NOERR; +} + +/** This function is run on the IO tasks to close a netCDF file. It is + * only ever run on the IO tasks. + * + * @param ios pointer to the iosystem_desc_t. + * @return PIO_NOERR for success, error code otherwise. +*/ +int close_file_handler(iosystem_desc_t *ios) +{ + int ncid; + int mpierr; + int ret; + + int my_rank; + MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); + LOG((1, "%d close_file_handler\n", my_rank)); + + /* Get the parameters for this function that the the comp master + * task is broadcasting. */ + if ((mpierr = MPI_Bcast(&ncid, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + LOG((1, "%d create_file_handler got parameter ncid = %d\n", ncid)); + + /* Call the close file function. */ + if ((ret = PIOc_closefile(ncid))) + return ret; + + LOG((1, "close_file_handler succeeded!\n", my_rank)); + return PIO_NOERR; +} + +/** This function is run on the IO tasks to inq a netCDF file. It is + * only ever run on the IO tasks. + * + * @param ios pointer to the iosystem_desc_t. + * @return PIO_NOERR for success, error code otherwise. +*/ +int inq_handler(iosystem_desc_t *ios) +{ + int ncid; + int ndims, nvars, ngatts, unlimdimid; + int *ndimsp = NULL, *nvarsp = NULL, *ngattsp = NULL, *unlimdimidp = NULL; + char ndims_present, nvars_present, ngatts_present, unlimdimid_present; + int mpierr; + int ret; + + int my_rank; + MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); + LOG((1, "%d inq_handler\n", my_rank)); + + /* Get the parameters for this function that the the comp master + * task is broadcasting. */ + if ((mpierr = MPI_Bcast(&ncid, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&ndims_present, 1, MPI_CHAR, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&nvars_present, 1, MPI_CHAR, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&ngatts_present, 1, MPI_CHAR, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&unlimdimid_present, 1, MPI_CHAR, 0, ios->intercomm))) + return PIO_EIO; + LOG((1, "%d inq_handler ndims_present = %d nvars_present = %d ngatts_present = %d unlimdimid_present = %d\n", + ndims_present, nvars_present, ngatts_present, unlimdimid_present)); + + /* NULLs passed in to any of the pointers in the original call + * need to be matched with NULLs here. Assign pointers where + * non-NULL pointers were passed in. */ + if (ndims_present) + ndimsp = &ndims; + if (nvars_present) + nvarsp = &nvars; + if (ngatts_present) + ngattsp = &ngatts; + if (unlimdimid_present) + unlimdimidp = &unlimdimid; + + /* Call the inq function to get the values. */ + if ((ret = PIOc_inq(ncid, ndimsp, nvarsp, ngattsp, unlimdimidp))) + return ret; + + return PIO_NOERR; +} + +/** Do an inq_dim on a netCDF dimension. This function is only run on + * IO tasks. + * + * @param ios pointer to the iosystem_desc_t. + * @param msg the message sent my the comp root task. + * @return PIO_NOERR for success, error code otherwise. +*/ +int inq_dim_handler(iosystem_desc_t *ios, int msg) +{ + int ncid; + int dimid; + char name_present, len_present; + char *dimnamep = NULL; + PIO_Offset *dimlenp = NULL; + char dimname[NC_MAX_NAME + 1]; + PIO_Offset dimlen; + + int mpierr; + int ret; + + LOG((1, "inq_dim_handler")); + + /* Get the parameters for this function that the the comp master + * task is broadcasting. */ + if ((mpierr = MPI_Bcast(&ncid, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&dimid, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&name_present, 1, MPI_CHAR, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&len_present, 1, MPI_CHAR, 0, ios->intercomm))) + return PIO_EIO; + LOG((2, "inq_handler name_present = %d len_present = %d", name_present, + len_present)); + + /* Set the non-null pointers. */ + if (name_present) + dimnamep = dimname; + if (len_present) + dimlenp = &dimlen; + + /* Call the inq function to get the values. */ + if ((ret = PIOc_inq_dim(ncid, dimid, dimnamep, dimlenp))) + return ret; + + return PIO_NOERR; +} + +/** Do an inq_dimid on a netCDF dimension name. This function is only + * run on IO tasks. + * + * @param ios pointer to the iosystem_desc_t. + * @return PIO_NOERR for success, error code otherwise. +*/ +int inq_dimid_handler(iosystem_desc_t *ios) +{ + int ncid; + int *dimidp = NULL, dimid; + int mpierr; + int id_present; + int ret; + int namelen; + char *name; + + LOG((1, "inq_dimid_handler")); + + /* Get the parameters for this function that the the comp master + * task is broadcasting. */ + if ((mpierr = MPI_Bcast(&ncid, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&namelen, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if (!(name = malloc((namelen + 1) * sizeof(char)))) + return PIO_ENOMEM; + if ((mpierr = MPI_Bcast((void *)name, namelen + 1, MPI_CHAR, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&id_present, 1, MPI_CHAR, 0, ios->intercomm))) + return PIO_EIO; + LOG((1, "inq_dimid_handler ncid = %d namelen = %d name = %s id_present = %d", + ncid, namelen, name, id_present)); + + /* Set non-null pointer. */ + if (id_present) + dimidp = &dimid; + + /* Call the inq_dimid function. */ + if ((ret = PIOc_inq_dimid(ncid, name, dimidp))) + return ret; + + /* Free resources. */ + free(name); + + return PIO_NOERR; +} + +/** Handle attribute inquiry operations. This code only runs on IO + * tasks. + * + * @param ios pointer to the iosystem_desc_t. + * @param msg the message sent my the comp root task. + * @return PIO_NOERR for success, error code otherwise. +*/ +int inq_att_handler(iosystem_desc_t *ios) +{ + int ncid; + int varid; + int mpierr; + int ret; + char *name5; + int namelen; + int *op, *ip; + nc_type xtype, *xtypep = NULL; + PIO_Offset len, *lenp = NULL; + char xtype_present, len_present; + + LOG((1, "inq_att_handler")); + + /* Get the parameters for this function that the the comp master + * task is broadcasting. */ + if ((mpierr = MPI_Bcast(&ncid, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&varid, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&namelen, 1, MPI_INT, ios->compmaster, ios->intercomm))) + return PIO_EIO; + if (!(name5 = malloc((namelen + 1) * sizeof(char)))) + return PIO_ENOMEM; + if ((mpierr = MPI_Bcast((void *)name5, namelen + 1, MPI_CHAR, ios->compmaster, + ios->intercomm))) + return PIO_ENOMEM; + if ((mpierr = MPI_Bcast(&xtype_present, 1, MPI_CHAR, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&len_present, 1, MPI_CHAR, 0, ios->intercomm))) + return PIO_EIO; + + /* Match NULLs in collective function call. */ + if (xtype_present) + xtypep = &xtype; + if (len_present) + lenp = &len; + + /* Call the function to learn about the attribute. */ + if ((ret = PIOc_inq_att(ncid, varid, name5, xtypep, lenp))) + return ret; + + return PIO_NOERR; +} + +/** Handle attribute inquiry operations. This code only runs on IO + * tasks. + * + * @param ios pointer to the iosystem_desc_t. + * @param msg the message sent my the comp root task. + * @return PIO_NOERR for success, error code otherwise. +*/ +int inq_attname_handler(iosystem_desc_t *ios) +{ + int ncid; + int varid; + int attnum; + char name[NC_MAX_NAME + 1], *namep = NULL; + char name_present; + int mpierr; + int ret; + + LOG((1, "inq_att_name_handler")); + + /* Get the parameters for this function that the the comp master + * task is broadcasting. */ + if ((mpierr = MPI_Bcast(&ncid, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&varid, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&attnum, 1, MPI_INT, ios->compmaster, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&name_present, 1, MPI_CHAR, 0, ios->intercomm))) + return PIO_EIO; + LOG((2, "inq_attname_handler got ncid = %d varid = %d attnum = %d name_present = %d", + ncid, varid, attnum, name_present)); + + /* Match NULLs in collective function call. */ + if (name_present) + namep = name; + + /* Call the function to learn about the attribute. */ + if ((ret = PIOc_inq_attname(ncid, varid, attnum, namep))) + return ret; + + return PIO_NOERR; +} + +/** Handle attribute inquiry operations. This code only runs on IO + * tasks. + * + * @param ios pointer to the iosystem_desc_t. + * @param msg the message sent my the comp root task. + * @return PIO_NOERR for success, error code otherwise. +*/ +int inq_attid_handler(iosystem_desc_t *ios) +{ + int ncid; + int varid; + int attnum; + char *name; + int namelen; + int id, *idp = NULL; + char id_present; + int mpierr; + int ret; + + LOG((1, "inq_attid_handler")); + + /* Get the parameters for this function that the the comp master + * task is broadcasting. */ + if ((mpierr = MPI_Bcast(&ncid, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&varid, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&namelen, 1, MPI_INT, ios->compmaster, ios->intercomm))) + return PIO_EIO; + if (!(name = malloc((namelen + 1) * sizeof(char)))) + return PIO_ENOMEM; + if ((mpierr = MPI_Bcast(name, namelen + 1, MPI_CHAR, ios->compmaster, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&id_present, 1, MPI_CHAR, 0, ios->intercomm))) + return PIO_EIO; + LOG((2, "inq_attid_handler got ncid = %d varid = %d attnum = %d id_present = %d", + ncid, varid, attnum, id_present)); + + /* Match NULLs in collective function call. */ + if (id_present) + idp = &id; + + /* Call the function to learn about the attribute. */ + if ((ret = PIOc_inq_attid(ncid, varid, name, idp))) + return ret; + + /* Free resources. */ + free(name); + + return PIO_NOERR; +} + +/** Handle attribute operations. This code only runs on IO tasks. + * + * @param ios pointer to the iosystem_desc_t. + * @param msg the message sent my the comp root task. + * @return PIO_NOERR for success, error code otherwise. +*/ +int att_put_handler(iosystem_desc_t *ios) +{ + int ncid; + int varid; + int mpierr; + int ierr; + char *name; + int namelen; + PIO_Offset attlen, typelen; + nc_type atttype; + int *op, *ip; + int iotype; + + LOG((1, "att_put_handler")); + + /* Get the parameters for this function that the the comp master + * task is broadcasting. */ + if ((mpierr = MPI_Bcast(&ncid, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&varid, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + mpierr = MPI_Bcast(&namelen, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!(name = malloc((namelen + 1) * sizeof(char)))) + return PIO_ENOMEM; + mpierr = MPI_Bcast((void *)name, namelen + 1, MPI_CHAR, ios->compmaster, + ios->intercomm); + if ((mpierr = MPI_Bcast(&atttype, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&attlen, 1, MPI_OFFSET, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&typelen, 1, MPI_OFFSET, 0, ios->intercomm))) + return PIO_EIO; + if (!(op = malloc(attlen * typelen))) + return PIO_ENOMEM; + if ((mpierr = MPI_Bcast((void *)op, attlen * typelen, MPI_BYTE, 0, ios->intercomm))) + return PIO_EIO; + LOG((1, "att_put_handler ncid = %d varid = %d namelen = %d name = %s iotype = %d" + "atttype = %d attlen = %d typelen = %d", + ncid, varid, namelen, name, iotype, atttype, attlen, typelen)); + + /* Call the function to read the attribute. */ + if ((ierr = PIOc_put_att(ncid, varid, name, atttype, attlen, op))) + return ierr; + LOG((2, "put_handler called PIOc_put_att, ierr = %d", ierr)); + + /* Free resources. */ + free(name); + free(op); + + LOG((2, "put_handler complete!")); + return PIO_NOERR; +} + +/** Handle attribute operations. This code only runs on IO tasks. + * + * @param ios pointer to the iosystem_desc_t. + * @param msg the message sent my the comp root task. + * @return PIO_NOERR for success, error code otherwise. +*/ +int att_get_handler(iosystem_desc_t *ios) +{ + int ncid; + int varid; + int mpierr; + int ierr; + char *name; + int namelen; + PIO_Offset attlen, typelen; + nc_type atttype; + int *op, *ip; + int iotype; + + LOG((1, "att_get_handler")); + + /* Get the parameters for this function that the the comp master + * task is broadcasting. */ + if ((mpierr = MPI_Bcast(&ncid, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&varid, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + mpierr = MPI_Bcast(&namelen, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!(name = malloc((namelen + 1) * sizeof(char)))) + return PIO_ENOMEM; + mpierr = MPI_Bcast((void *)name, namelen + 1, MPI_CHAR, ios->compmaster, + ios->intercomm); + if ((mpierr = MPI_Bcast(&iotype, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&atttype, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&attlen, 1, MPI_OFFSET, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&typelen, 1, MPI_OFFSET, 0, ios->intercomm))) + return PIO_EIO; + LOG((1, "att_get_handler ncid = %d varid = %d namelen = %d name = %s iotype = %d" + "atttype = %d attlen = %d typelen = %d", + ncid, varid, namelen, name, iotype, atttype, attlen, typelen)); + + /* Allocate space for the attribute data. */ + if (!(ip = malloc(attlen * typelen))) + return PIO_ENOMEM; + + /* Call the function to read the attribute. */ + if ((ierr = PIOc_get_att(ncid, varid, name, ip))) + return ierr; + + /* Free resources. */ + free(name); + free(ip); + + return PIO_NOERR; +} + +/** Handle var put operations. This code only runs on IO tasks. + * + * @param ios pointer to the iosystem_desc_t. + * @return PIO_NOERR for success, error code otherwise. +*/ +int put_vars_handler(iosystem_desc_t *ios) +{ + int ncid; + int varid; + int mpierr; + int ierr; + char *name; + int namelen; + PIO_Offset typelen; /** Length (in bytes) of this type. */ + nc_type xtype; /** Type of the data being written. */ + char start_present, count_present, stride_present; + PIO_Offset *startp = NULL, *countp = NULL, *stridep = NULL; + int ndims; /** Number of dimensions. */ + void *buf; /** Buffer for data storage. */ + PIO_Offset num_elem; /** Number of data elements in the buffer. */ + + LOG((1, "put_vars_handler")); + + /* Get the parameters for this function that the the comp master + * task is broadcasting. */ + if ((mpierr = MPI_Bcast(&ncid, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&varid, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&ndims, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + + /* Now we know how big to make these arrays. */ + PIO_Offset start[ndims], count[ndims], stride[ndims]; + + if ((mpierr = MPI_Bcast(&start_present, 1, MPI_CHAR, 0, ios->intercomm))) + return PIO_EIO; + if (!mpierr && start_present) + { + if ((mpierr = MPI_Bcast(start, ndims, MPI_OFFSET, 0, ios->intercomm))) + return PIO_EIO; + LOG((1, "put_vars_handler getting start[0] = %d ndims = %d", start[0], ndims)); + } + if ((mpierr = MPI_Bcast(&count_present, 1, MPI_CHAR, 0, ios->intercomm))) + return PIO_EIO; + if (!mpierr && count_present) + if ((mpierr = MPI_Bcast(count, ndims, MPI_OFFSET, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&stride_present, 1, MPI_CHAR, 0, ios->intercomm))) + return PIO_EIO; + if (!mpierr && stride_present) + if ((mpierr = MPI_Bcast(stride, ndims, MPI_OFFSET, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&xtype, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&num_elem, 1, MPI_OFFSET, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&typelen, 1, MPI_OFFSET, 0, ios->intercomm))) + return PIO_EIO; + LOG((1, "put_vars_handler ncid = %d varid = %d ndims = %d start_present = %d " + "count_present = %d stride_present = %d xtype = %d num_elem = %d typelen = %d", + ncid, varid, ndims, start_present, count_present, stride_present, xtype, + num_elem, typelen)); + + for (int d = 0; d < ndims; d++) + { + if (start_present) + LOG((2, "start[%d] = %d\n", d, start[d])); + if (count_present) + LOG((2, "count[%d] = %d\n", d, count[d])); + if (stride_present) + LOG((2, "stride[%d] = %d\n", d, stride[d])); + } + + /* Allocate room for our data. */ + if (!(buf = malloc(num_elem * typelen))) + return PIO_ENOMEM; + + /* Get the data. */ + if ((mpierr = MPI_Bcast(buf, num_elem * typelen, MPI_BYTE, 0, ios->intercomm))) + return PIO_EIO; + + /* for (int e = 0; e < num_elem; e++) */ + /* LOG((2, "element %d = %d", e, ((int *)buf)[e])); */ + + /* Set the non-NULL pointers. */ + if (start_present) + startp = start; + if (count_present) + countp = count; + if (stride_present) + stridep = stride; + + /* Call the function to write the data. */ + switch(xtype) + { + case NC_BYTE: + ierr = PIOc_put_vars_schar(ncid, varid, startp, countp, stridep, buf); + break; + case NC_CHAR: + ierr = PIOc_put_vars_schar(ncid, varid, startp, countp, stridep, buf); + break; + case NC_SHORT: + ierr = PIOc_put_vars_short(ncid, varid, startp, countp, stridep, buf); + break; + case NC_INT: + ierr = PIOc_put_vars_int(ncid, varid, startp, countp, + stridep, buf); + break; + case NC_FLOAT: + ierr = PIOc_put_vars_float(ncid, varid, startp, countp, + stridep, buf); + break; + case NC_DOUBLE: + ierr = PIOc_put_vars_double(ncid, varid, startp, countp, + stridep, buf); + break; +#ifdef _NETCDF4 + case NC_UBYTE: + ierr = PIOc_put_vars_uchar(ncid, varid, startp, countp, + stridep, buf); + break; + case NC_USHORT: + ierr = PIOc_put_vars_ushort(ncid, varid, startp, countp, + stridep, buf); + break; + case NC_UINT: + ierr = PIOc_put_vars_uint(ncid, varid, startp, countp, + stridep, buf); + break; + case NC_INT64: + ierr = PIOc_put_vars_longlong(ncid, varid, startp, countp, + stridep, buf); + break; + case NC_UINT64: + ierr = PIOc_put_vars_ulonglong(ncid, varid, startp, countp, + stridep, buf); + break; + /* case NC_STRING: */ + /* ierr = PIOc_put_vars_string(ncid, varid, startp, countp, */ + /* stridep, (void *)buf); */ + /* break; */ + /* default:*/ + /* ierr = PIOc_put_vars(ncid, varid, startp, countp, */ + /* stridep, buf); */ +#endif /* _NETCDF4 */ + } + + return PIO_NOERR; +} + +/** Handle var get operations. This code only runs on IO tasks. + * + * @param ios pointer to the iosystem_desc_t. + * @return PIO_NOERR for success, error code otherwise. +*/ +int get_vars_handler(iosystem_desc_t *ios) +{ + int ncid; + int varid; + int mpierr; + int ierr; + char *name; + int namelen; + PIO_Offset typelen; /** Length (in bytes) of this type. */ + nc_type xtype; /** Type of the data being written. */ + char start_present, count_present, stride_present; + PIO_Offset *startp = NULL, *countp = NULL, *stridep = NULL; + int ndims; /** Number of dimensions. */ + void *buf; /** Buffer for data storage. */ + PIO_Offset num_elem; /** Number of data elements in the buffer. */ + + LOG((1, "get_vars_handler")); + + /* Get the parameters for this function that the the comp master + * task is broadcasting. */ + if ((mpierr = MPI_Bcast(&ncid, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&varid, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&ndims, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + + /* Now we know how big to make these arrays. */ + PIO_Offset start[ndims], count[ndims], stride[ndims]; + + if ((mpierr = MPI_Bcast(&start_present, 1, MPI_CHAR, 0, ios->intercomm))) + return PIO_EIO; + if (!mpierr && start_present) + { + if ((mpierr = MPI_Bcast(start, ndims, MPI_OFFSET, 0, ios->intercomm))) + return PIO_EIO; + LOG((1, "put_vars_handler getting start[0] = %d ndims = %d", start[0], ndims)); + } + if ((mpierr = MPI_Bcast(&count_present, 1, MPI_CHAR, 0, ios->intercomm))) + return PIO_EIO; + if (!mpierr && count_present) + if ((mpierr = MPI_Bcast(count, ndims, MPI_OFFSET, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&stride_present, 1, MPI_CHAR, 0, ios->intercomm))) + return PIO_EIO; + if (!mpierr && stride_present) + if ((mpierr = MPI_Bcast(stride, ndims, MPI_OFFSET, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&xtype, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&num_elem, 1, MPI_OFFSET, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&typelen, 1, MPI_OFFSET, 0, ios->intercomm))) + return PIO_EIO; + LOG((1, "get_vars_handler ncid = %d varid = %d ndims = %d start_present = %d " + "count_present = %d stride_present = %d xtype = %d num_elem = %d typelen = %d", + ncid, varid, ndims, start_present, count_present, stride_present, xtype, + num_elem, typelen)); + + for (int d = 0; d < ndims; d++) + { + if (start_present) + LOG((2, "start[%d] = %d\n", d, start[d])); + if (count_present) + LOG((2, "count[%d] = %d\n", d, count[d])); + if (stride_present) + LOG((2, "stride[%d] = %d\n", d, stride[d])); + } + + /* Allocate room for our data. */ + if (!(buf = malloc(num_elem * typelen))) + return PIO_ENOMEM; + + /* Set the non-NULL pointers. */ + if (start_present) + startp = start; + if (count_present) + countp = count; + if (stride_present) + stridep = stride; + + /* Call the function to read the data. */ + switch(xtype) + { + case NC_BYTE: + ierr = PIOc_get_vars_schar(ncid, varid, startp, countp, + stridep, buf); + break; + case NC_CHAR: + ierr = PIOc_get_vars_schar(ncid, varid, startp, countp, + stridep, buf); + break; + case NC_SHORT: + ierr = PIOc_get_vars_short(ncid, varid, startp, countp, + stridep, buf); + break; + case NC_INT: + ierr = PIOc_get_vars_int(ncid, varid, startp, countp, + stridep, buf); + break; + case NC_FLOAT: + ierr = PIOc_get_vars_float(ncid, varid, startp, countp, + stridep, buf); + break; + case NC_DOUBLE: + ierr = PIOc_get_vars_double(ncid, varid, startp, countp, + stridep, buf); + break; +#ifdef _NETCDF4 + case NC_UBYTE: + ierr = PIOc_get_vars_uchar(ncid, varid, startp, countp, + stridep, buf); + break; + case NC_USHORT: + ierr = PIOc_get_vars_ushort(ncid, varid, startp, countp, + stridep, buf); + break; + case NC_UINT: + ierr = PIOc_get_vars_uint(ncid, varid, startp, countp, + stridep, buf); + break; + case NC_INT64: + ierr = PIOc_get_vars_longlong(ncid, varid, startp, countp, + stridep, buf); + break; + case NC_UINT64: + ierr = PIOc_get_vars_ulonglong(ncid, varid, startp, countp, + stridep, buf); + break; + /* case NC_STRING: */ + /* ierr = PIOc_get_vars_string(ncid, varid, startp, countp, */ + /* stridep, (void *)buf); */ + /* break; */ + /* default:*/ + /* ierr = PIOc_get_vars(ncid, varid, startp, countp, */ + /* stridep, buf); */ +#endif /* _NETCDF4 */ + } + + LOG((1, "get_vars_handler succeeded!")); + return PIO_NOERR; +} + +/** Do an inq_var on a netCDF variable. This function is only run on + * IO tasks. + * + * @param ios pointer to the iosystem_desc_t. + * @param msg the message sent my the comp root task. + * @return PIO_NOERR for success, error code otherwise. +*/ +int inq_var_handler(iosystem_desc_t *ios) +{ + int ncid; + int varid; + int mpierr; + char name_present, xtype_present, ndims_present, dimids_present, natts_present; + char name[NC_MAX_NAME + 1], *namep; + nc_type xtype, *xtypep = NULL; + int *ndimsp = NULL, *dimidsp = NULL, *nattsp = NULL; + int ndims, dimids[NC_MAX_DIMS], natts; + int ret; + + LOG((1, "inq_var_handler")); + + /* Get the parameters for this function that the the comp master + * task is broadcasting. */ + if ((mpierr = MPI_Bcast(&ncid, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&varid, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&name_present, 1, MPI_CHAR, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&xtype_present, 1, MPI_CHAR, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&ndims_present, 1, MPI_CHAR, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&dimids_present, 1, MPI_CHAR, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&natts_present, 1, MPI_CHAR, 0, ios->intercomm))) + return PIO_EIO; + LOG((2,"inq_var_handler ncid = %d varid = %d name_present = %d xtype_present = %d ndims_present = %d " + "dimids_present = %d natts_present = %d\n", + ncid, varid, name_present, xtype_present, ndims_present, dimids_present, natts_present)); + + /* Set the non-NULL pointers. */ + if (name_present) + namep = name; + if (xtype_present) + xtypep = &xtype; + if (ndims_present) + ndimsp = &ndims; + if (dimids_present) + dimidsp = dimids; + if (natts_present) + nattsp = &natts; + + /* Call the inq function to get the values. */ + if ((ret = PIOc_inq_var(ncid, varid, namep, xtypep, ndimsp, dimidsp, nattsp))) + return ret; + + if (ndims_present) + LOG((2, "inq_var_handler ndims = %d", ndims)); + + return PIO_NOERR; +} + +/** Do an inq_varid on a netCDF variable name. This function is only + * run on IO tasks. + * + * @param ios pointer to the iosystem_desc_t. + * @return PIO_NOERR for success, error code otherwise. +*/ +int inq_varid_handler(iosystem_desc_t *ios) +{ + int ncid; + int varid; + int mpierr; + int ret; + int namelen; + char *name; + + /* Get the parameters for this function that the the comp master + * task is broadcasting. */ + if ((mpierr = MPI_Bcast(&ncid, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&namelen, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if (!(name = malloc((namelen + 1) * sizeof(char)))) + return PIO_ENOMEM; + if ((mpierr = MPI_Bcast((void *)name, namelen + 1, MPI_CHAR, 0, ios->intercomm))) + return PIO_EIO; + + /* Call the inq_dimid function. */ + if ((ret = PIOc_inq_varid(ncid, name, &varid))) + return ret; + + /* Free resources. */ + free(name); + + return PIO_NOERR; +} + +/** This function is run on the IO tasks to sync a netCDF file. + * + * @param ios pointer to the iosystem_desc_t. + * @return PIO_NOERR for success, error code otherwise. +*/ +int sync_file_handler(iosystem_desc_t *ios) +{ + int ncid; + int mpierr; + int ret; + + LOG((1, "sync_file_handler")); + + /* Get the parameters for this function that the comp master + * task is broadcasting. */ + if ((mpierr = MPI_Bcast(&ncid, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + LOG((1, "sync_file_handler got parameter ncid = %d", ncid)); + + /* Call the sync file function. */ + if ((ret = PIOc_sync(ncid))) + return ret; + + LOG((2, "sync_file_handler succeeded!")); + return PIO_NOERR; +} + +/** This function is run on the IO tasks to enddef a netCDF file. + * + * @param ios pointer to the iosystem_desc_t. + * @return PIO_NOERR for success, error code otherwise. +*/ +int change_def_file_handler(iosystem_desc_t *ios, int msg) +{ + int ncid; + int mpierr; + int ret; + + LOG((1, "change_def_file_handler")); + + /* Get the parameters for this function that the comp master + * task is broadcasting. */ + if ((mpierr = MPI_Bcast(&ncid, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + + /* Call the function. */ + ret = (msg == PIO_MSG_ENDDEF) ? PIOc_enddef(ncid) : PIOc_redef(ncid); + + LOG((1, "change_def_file_handler succeeded!")); + return PIO_NOERR; +} + +/** This function is run on the IO tasks to define a netCDF + * variable. */ +int def_var_handler(iosystem_desc_t *ios) +{ + int ncid; + int len, namelen; + int iotype; + char *name; + int mode; + int mpierr; + int ret; + int varid; + nc_type xtype; + int ndims; + int *dimids; + + int my_rank; + MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); + LOG((1, "%d def_var_handler comproot = %d\n", my_rank, ios->comproot)); + + /* Get the parameters for this function that the he comp master + * task is broadcasting. */ + if ((mpierr = MPI_Bcast(&ncid, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&namelen, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if (!(name = malloc(namelen + 1 * sizeof(char)))) + return PIO_ENOMEM; + if ((mpierr = MPI_Bcast((void *)name, namelen + 1, MPI_CHAR, 0, + ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&xtype, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&ndims, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if (!(dimids = malloc(ndims * sizeof(int)))) + return PIO_ENOMEM; + if ((mpierr = MPI_Bcast(dimids, ndims, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + LOG((1, "%d def_var_handler got parameters namelen = %d " + "name = %s len = %d ncid = %d\n", + my_rank, namelen, name, len, ncid)); + + /* Call the create file function. */ + if ((ret = PIOc_def_var(ncid, name, xtype, ndims, dimids, &varid))) + return ret; + + /* Free resources. */ + free(name); + free(dimids); + + LOG((1, "%d def_var_handler succeeded!\n", my_rank)); + return PIO_NOERR; +} + +/** This function is run on the IO tasks to define a netCDF + * dimension. */ +int def_dim_handler(iosystem_desc_t *ios) +{ + int ncid; + int len, namelen; + int iotype; + char *name; + int mode; + int mpierr; + int ret; + int dimid; + + int my_rank; + MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); + LOG((1, "def_dim_handler comproot = %d", ios->comproot)); + + /* Get the parameters for this function that the he comp master + * task is broadcasting. */ + if ((mpierr = MPI_Bcast(&ncid, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&namelen, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if (!(name = malloc(namelen + 1 * sizeof(char)))) + return PIO_ENOMEM; + if ((mpierr = MPI_Bcast((void *)name, namelen + 1, MPI_CHAR, 0, + ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&len, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + LOG((2, "def_dim_handler got parameters namelen = %d " + "name = %s len = %d ncid = %d", namelen, name, len, ncid)); + + /* Call the create file function. */ + if ((ret = PIOc_def_dim(ncid, name, len, &dimid))) + return ret; + + /* Free resources. */ + free(name); + + LOG((1, "%d def_dim_handler succeeded!\n", my_rank)); + return PIO_NOERR; +} + +/** This function is run on the IO tasks to rename a netCDF + * dimension. */ +int rename_dim_handler(iosystem_desc_t *ios) +{ + int ncid; + int len, namelen; + int iotype; + char *name; + int mode; + int mpierr; + int ret; + int dimid; + char name1[NC_MAX_NAME + 1]; + + LOG((1, "rename_dim_handler")); + + /* Get the parameters for this function that the he comp master + * task is broadcasting. */ + if ((mpierr = MPI_Bcast(&ncid, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&dimid, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&namelen, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if (!(name = malloc((namelen + 1) * sizeof(char)))) + return PIO_ENOMEM; + if ((mpierr = MPI_Bcast((void *)name, namelen + 1, MPI_CHAR, 0, ios->intercomm))) + return PIO_EIO; + LOG((2, "rename_dim_handler got parameters namelen = %d " + "name = %s ncid = %d dimid = %d", namelen, name, ncid, dimid)); + + /* Call the create file function. */ + if ((ret = PIOc_rename_dim(ncid, dimid, name))) + return ret; + + /* Free resources. */ + free(name); + + LOG((1, "%d rename_dim_handler succeeded!\n", my_rank)); + return PIO_NOERR; +} + +/** This function is run on the IO tasks to rename a netCDF + * dimension. */ +int rename_var_handler(iosystem_desc_t *ios) +{ + int ncid; + int len, namelen; + int iotype; + char *name; + int mode; + int mpierr; + int ret; + int varid; + char name1[NC_MAX_NAME + 1]; + + LOG((1, "rename_var_handler")); + + /* Get the parameters for this function that the he comp master + * task is broadcasting. */ + if ((mpierr = MPI_Bcast(&ncid, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&varid, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&namelen, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if (!(name = malloc((namelen + 1) * sizeof(char)))) + return PIO_ENOMEM; + if ((mpierr = MPI_Bcast((void *)name, namelen + 1, MPI_CHAR, 0, ios->intercomm))) + return PIO_EIO; + LOG((2, "rename_var_handler got parameters namelen = %d " + "name = %s ncid = %d varid = %d", namelen, name, ncid, varid)); + + /* Call the create file function. */ + if ((ret = PIOc_rename_var(ncid, varid, name))) + return ret; + + /* Free resources. */ + free(name); + + LOG((1, "%d rename_var_handler succeeded!\n", my_rank)); + return PIO_NOERR; +} + +/** This function is run on the IO tasks to rename a netCDF + * attribute. */ +int rename_att_handler(iosystem_desc_t *ios) +{ + int ncid; + int varid; + int namelen, newnamelen; + char *name, *newname; + int mpierr; + int ret; + + LOG((1, "rename_att_handler")); + + /* Get the parameters for this function that the he comp master + * task is broadcasting. */ + if ((mpierr = MPI_Bcast(&ncid, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&varid, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&namelen, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if (!(name = malloc((namelen + 1) * sizeof(char)))) + return PIO_ENOMEM; + if ((mpierr = MPI_Bcast(name, namelen + 1, MPI_CHAR, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&newnamelen, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if (!(newname = malloc((newnamelen + 1) * sizeof(char)))) + return PIO_ENOMEM; + if ((mpierr = MPI_Bcast(newname, newnamelen + 1, MPI_CHAR, 0, ios->intercomm))) + return PIO_EIO; + LOG((2, "rename_att_handler got parameters namelen = %d name = %s ncid = %d varid = %d " + "newnamelen = %d newname = %s", namelen, name, ncid, varid, newnamelen, newname)); + + /* Call the create file function. */ + if ((ret = PIOc_rename_att(ncid, varid, name, newname))) + return ret; + + /* Free resources. */ + free(name); + free(newname); + + LOG((1, "%d rename_att_handler succeeded!\n", my_rank)); + return PIO_NOERR; +} + +/** This function is run on the IO tasks to delete a netCDF + * attribute. */ +int delete_att_handler(iosystem_desc_t *ios) +{ + int ncid; + int varid; + int namelen, newnamelen; + char *name, *newname; + int mpierr; + int ret; + + LOG((1, "delete_att_handler")); + + /* Get the parameters for this function that the he comp master + * task is broadcasting. */ + if ((mpierr = MPI_Bcast(&ncid, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&varid, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&namelen, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if (!(name = malloc((namelen + 1) * sizeof(char)))) + return PIO_ENOMEM; + if ((mpierr = MPI_Bcast(name, namelen + 1, MPI_CHAR, 0, ios->intercomm))) + return PIO_EIO; + LOG((2, "delete_att_handler namelen = %d name = %s ncid = %d varid = %d ", + namelen, name, ncid, varid)); + + /* Call the create file function. */ + if ((ret = PIOc_del_att(ncid, varid, name))) + return ret; + + /* Free resources. */ + free(name); + + LOG((1, "delete_att_handler succeeded!")); + return PIO_NOERR; +} + +/** This function is run on the IO tasks to open a netCDF file. + * + * @param ios pointer to the iosystem_desc_t data. + * + * @return PIO_NOERR for success, error code otherwise. */ +int open_file_handler(iosystem_desc_t *ios) +{ + int ncid; + int len; + int iotype; + char *filename; + int mode; + int mpierr; + int ret; + + int my_rank; + MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); + LOG((1, "%d open_file_handler comproot = %d\n", my_rank, ios->comproot)); + + /* Get the parameters for this function that the he comp master + * task is broadcasting. */ + if ((mpierr = MPI_Bcast(&len, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + LOG((2, "open_file_handler got parameter len = %d", len)); + if (!(filename = malloc(len + 1 * sizeof(char)))) + return PIO_ENOMEM; + if ((mpierr = MPI_Bcast((void *)filename, len + 1, MPI_CHAR, 0, + ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&iotype, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if ((mpierr = MPI_Bcast(&mode, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + LOG((2, "open_file_handler got parameters len = %d filename = %s iotype = %d mode = %d\n", + len, filename, iotype, mode)); + + /* Call the open file function. */ + if ((ret = PIOc_openfile(ios->iosysid, &ncid, &iotype, filename, mode))) + return ret; + + /* Free resources. */ + free(filename); + + LOG((1, "%d open_file_handler succeeded!\n", my_rank)); + return PIO_NOERR; +} + +/** This function is run on the IO tasks to delete a netCDF file. + * + * @param ios pointer to the iosystem_desc_t data. + * + * @return PIO_NOERR for success, error code otherwise. */ +int delete_file_handler(iosystem_desc_t *ios) +{ + int ncid; + int len; + char *filename; + int mpierr; + int ret; + + int my_rank; + MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); + LOG((1, "%d delete_file_handler comproot = %d\n", my_rank, ios->comproot)); + + /* Get the parameters for this function that the he comp master + * task is broadcasting. */ + if ((mpierr = MPI_Bcast(&len, 1, MPI_INT, 0, ios->intercomm))) + return PIO_EIO; + if (!(filename = malloc(len + 1 * sizeof(char)))) + return PIO_ENOMEM; + if ((mpierr = MPI_Bcast((void *)filename, len + 1, MPI_CHAR, 0, + ios->intercomm))) + return PIO_EIO; + LOG((1, "%d delete_file_handler got parameters len = %d filename = %s\n", + my_rank, len, filename)); + + /* Call the delete file function. */ + if ((ret = PIOc_deletefile(ios->iosysid, filename))) + return ret; + + /* Free resources. */ + free(filename); + + LOG((1, "%d delete_file_handler succeeded!\n", my_rank)); + return PIO_NOERR; +} + +int initdecomp_dof_handler(iosystem_desc_t *ios) +{ + return PIO_NOERR; +} + +int writedarray_handler(iosystem_desc_t *ios) +{ + return PIO_NOERR; +} + +int readdarray_handler(iosystem_desc_t *ios) +{ + return PIO_NOERR; +} + +int seterrorhandling_handler(iosystem_desc_t *ios) +{ + return PIO_NOERR; +} + +int var_handler(iosystem_desc_t *ios, int msg) +{ + return PIO_NOERR; +} + +int freedecomp_handler(iosystem_desc_t *ios) +{ + return PIO_NOERR; +} + +int finalize_handler(iosystem_desc_t *ios) +{ + int my_rank; + MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); + LOG((1, "%d finalize_handler called\n", my_rank)); + return PIO_NOERR; +} + +int pio_callback_handler(iosystem_desc_t *ios, int msg) +{ + return PIO_NOERR; +} + +/** This function is called by the IO tasks. This function will not + return, unless there is an error. */ +int pio_msg_handler(int io_rank, int component_count, iosystem_desc_t *iosys) +{ + iosystem_desc_t *my_iosys; + int msg = 0; + MPI_Request req[component_count]; + MPI_Status status; + int index; + int mpierr; + int ret = PIO_NOERR; + + int my_rank; + MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); + LOG((1, "%d pio_msg_handler called\n", my_rank)); + + /* Have IO comm rank 0 (the ioroot) register to receive + * (non-blocking) for a message from each of the comproots. */ + if (!io_rank) + { + for (int cmp = 0; cmp < component_count; cmp++) + { + my_iosys = &iosys[cmp]; + LOG((1, "%d about to call MPI_Irecv\n", my_rank)); + mpierr = MPI_Irecv(&msg, 1, MPI_INT, my_iosys->comproot, MPI_ANY_TAG, + my_iosys->union_comm, &req[cmp]); + CheckMPIReturn(mpierr, __FILE__, __LINE__); + } + } + + /* If the message is not -1, keep processing messages. */ + while (msg != -1) + { + /* Wait until any one of the requests are complete. */ + if (!io_rank) + { + LOG((1, "%d about to call MPI_Waitany req[0] = %d MPI_REQUEST_NULL = %d\n", + my_rank, req[0], MPI_REQUEST_NULL)); + mpierr = MPI_Waitany(component_count, req, &index, &status); + CheckMPIReturn(mpierr, __FILE__, __LINE__); + LOG((3, "Waitany returned index = %d req[%d] = %d", + index, index, req[index])); + } + + /* Broadcast the index of the computational component that + * originated the request to the rest of the IO tasks. */ + mpierr = MPI_Bcast(&index, 1, MPI_INT, 0, iosys->io_comm); + CheckMPIReturn(mpierr, __FILE__, __LINE__); + my_iosys = &iosys[index]; + LOG((3, "index MPI_Bcast complete index = %d", index)); + + /* Broadcast the msg value to the rest of the IO tasks. */ + LOG((3, "about to call msg MPI_Bcast")); + mpierr = MPI_Bcast(&msg, 1, MPI_INT, 0, my_iosys->io_comm); + CheckMPIReturn(mpierr, __FILE__, __LINE__); + LOG((1, "pio_msg_handler msg MPI_Bcast complete msg = %d", msg)); + + /* Handle the message. This code is run on all IO tasks. */ + switch (msg) + { + case PIO_MSG_INQ_TYPE: + inq_type_handler(my_iosys); + break; + case PIO_MSG_INQ_FORMAT: + inq_format_handler(my_iosys); + break; + case PIO_MSG_CREATE_FILE: + create_file_handler(my_iosys); + LOG((2, "returned from create_file_handler")); + break; + case PIO_MSG_SYNC: + sync_file_handler(my_iosys); + break; + case PIO_MSG_ENDDEF: + case PIO_MSG_REDEF: + LOG((2, "calling change_def_file_handler")); + change_def_file_handler(my_iosys, msg); + LOG((2, "returned from change_def_file_handler")); + break; + case PIO_MSG_OPEN_FILE: + open_file_handler(my_iosys); + break; + case PIO_MSG_CLOSE_FILE: + close_file_handler(my_iosys); + break; + case PIO_MSG_DELETE_FILE: + delete_file_handler(my_iosys); + break; + case PIO_MSG_RENAME_DIM: + rename_dim_handler(my_iosys); + break; + case PIO_MSG_RENAME_VAR: + rename_var_handler(my_iosys); + break; + case PIO_MSG_RENAME_ATT: + rename_att_handler(my_iosys); + break; + case PIO_MSG_DEL_ATT: + delete_att_handler(my_iosys); + break; + case PIO_MSG_DEF_DIM: + def_dim_handler(my_iosys); + break; + case PIO_MSG_DEF_VAR: + def_var_handler(my_iosys); + break; + case PIO_MSG_INQ: + inq_handler(my_iosys); + break; + case PIO_MSG_INQ_DIM: + inq_dim_handler(my_iosys, msg); + break; + case PIO_MSG_INQ_DIMID: + inq_dimid_handler(my_iosys); + break; + case PIO_MSG_INQ_VAR: + inq_var_handler(my_iosys); + break; + case PIO_MSG_GET_ATT: + ret = att_get_handler(my_iosys); + break; + case PIO_MSG_PUT_ATT: + ret = att_put_handler(my_iosys); + break; + case PIO_MSG_INQ_VARID: + inq_varid_handler(my_iosys); + break; + case PIO_MSG_INQ_ATT: + inq_att_handler(my_iosys); + break; + case PIO_MSG_INQ_ATTNAME: + inq_attname_handler(my_iosys); + break; + case PIO_MSG_INQ_ATTID: + inq_attid_handler(my_iosys); + break; + case PIO_MSG_GET_VARS: + get_vars_handler(my_iosys); + break; + case PIO_MSG_PUT_VARS: + put_vars_handler(my_iosys); + break; + case PIO_MSG_INITDECOMP_DOF: + initdecomp_dof_handler(my_iosys); + break; + case PIO_MSG_WRITEDARRAY: + writedarray_handler(my_iosys); + break; + case PIO_MSG_READDARRAY: + readdarray_handler(my_iosys); + break; + case PIO_MSG_SETERRORHANDLING: + seterrorhandling_handler(my_iosys); + break; + case PIO_MSG_FREEDECOMP: + freedecomp_handler(my_iosys); + break; + case PIO_MSG_EXIT: + finalize_handler(my_iosys); + msg = -1; + break; + default: + pio_callback_handler(my_iosys, msg); + } + + /* If an error was returned by the handler, do something! */ + LOG((3, "pio_msg_handler checking error ret = %d", ret)); + if (ret) + { + LOG((0, "hander returned error code %d", ret)); + MPI_Finalize(); + } + + LOG((3, "pio_msg_handler getting ready to listen")); + /* Unless finalize was called, listen for another msg from the + * component whose message we just handled. */ + if (!io_rank && msg != -1) + { + LOG((3, "pio_msg_handler about to Irecv")); + my_iosys = &iosys[index]; + mpierr = MPI_Irecv(&msg, 1, MPI_INT, my_iosys->comproot, MPI_ANY_TAG, my_iosys->union_comm, + &req[index]); + LOG((3, "pio_msg_handler called MPI_Irecv req[%d] = %d\n", index, req[index])); + CheckMPIReturn(mpierr, __FILE__, __LINE__); + } + + } + + return PIO_NOERR; +} + +int +pio_iosys_print(int my_rank, iosystem_desc_t *iosys) +{ + printf("%d iosysid: %d\n", my_rank, iosys->iosysid); + if (iosys->union_comm == MPI_COMM_NULL) + printf("%d union_comm: MPI_COMM_NULL ", my_rank); + else + printf("%d union_comm: %d ", my_rank, iosys->union_comm); + + if (iosys->comp_comm == MPI_COMM_NULL) + printf("comp_comm: MPI_COMM_NULL "); + else + printf("comp_comm: %d ", iosys->comp_comm); + + if (iosys->io_comm == MPI_COMM_NULL) + printf("io_comm: MPI_COMM_NULL "); + else + printf("io_comm: %d ", iosys->io_comm); + + if (iosys->intercomm == MPI_COMM_NULL) + printf("intercomm: MPI_COMM_NULL\n"); + else + printf("intercomm: %d\n", iosys->intercomm); + + printf("%d num_iotasks=%d num_comptasks=%d union_rank=%d, comp_rank=%d, " + "io_rank=%d async_interface=%d\n", + my_rank, iosys->num_iotasks, iosys->num_comptasks, iosys->union_rank, + iosys->comp_rank, iosys->io_rank, iosys->async_interface); + + printf("%d ioroot=%d comproot=%d iomaster=%d, compmaster=%d\n", + my_rank, iosys->ioroot, iosys->comproot, iosys->iomaster, + iosys->compmaster); + + printf("%d iotasks:", my_rank); + for (int i = 0; i < iosys->num_iotasks; i++) + printf("%d ", iosys->ioranks[i]); + printf("\n"); + return PIO_NOERR; +} + +/** @ingroup PIO_init + * Library initialization used when IO tasks are distinct from compute + * tasks. + * + * This is a collective call. Input parameters are read on + * comp_rank=0 values on other tasks are ignored. This variation of + * PIO_init sets up a distinct set of tasks to handle IO, these tasks + * do not return from this call. Instead they go to an internal loop + * and wait to receive further instructions from the computational + * tasks. + * + * For 4 tasks, to have 2 of them be computational, and 2 of them + * be IO, I would provide the following: + * + * component_count = 1 + * + * peer_comm = MPI_COMM_WORLD + * + * comp_comms = an array with one element, an MPI (intra) communicator + * that contains the two tasks designated to do computation + * (processors 0, 1). + + * io_comm = an MPI (intra) communicator with the other two tasks (2, + * 3). + * + * iosysidp = pointer that gets the IO system ID. + * + * Fortran function (from PIO1, in piolib_mod.F90) is: + * + * subroutine init_intercom(component_count, peer_comm, comp_comms, + * io_comm, iosystem, rearr_opts) + * + * Some notes from Jim: + * + * Components and Component Count + * ------------------------------ + * + * It's a cesm thing - the cesm model is composed of several component + * models (atm, ocn, ice, lnd, etc) that may or may not be collocated + * on mpi tasks. Since for intercomm the IOCOMM tasks are a subset of + * the compute tasks for a given component we have a separate iocomm + * for each model component. and we call init_inracomm independently + * for each component. + * + * When the IO tasks are independent of any model component then we + * can have all of the components share one set of iotasks and we call + * init_intercomm once with the information for all components. + * + * Inter vs Intra Communicators + * ---------------------------- + * + * ​For an intra you just need to provide the compute comm, pio creates + * an io comm as a subset of that compute comm. + * + * For an inter you need to provide multiple comms - peer comm is the + * communicator that is going to encompass all of the tasks - usually + * this will be mpi_comm_world. Then you need to provide a comm for + * each component model that will share the io server, then an + * io_comm. + * + * Example of Communicators + * ------------------------ + * + * Starting from MPI_COMM_WORLD the calling program will create an + * IO_COMM and one or more COMP_COMMs, I think an example might be best: + * + * Suppose we have 10 tasks and 2 of them will be IO tasks. Then 0:7 + * are in COMP_COMM and 8:9 are in IO_COMM In this case on tasks 0:7 + * COMP_COMM is defined and IO_COMM is MPI_COMM_NULL and on tasks 8:9 + * IO_COMM is defined and COMP_COMM is MPI_COMM_NULL The communicators + * to handle communications between COMP_COMM and IO_COMM are defined + * in init_intercomm and held in a pio internal data structure. + * + * Return or Not + * ------------- + * + * The io_comm tasks do not return from the init_intercomm routine. + * + * Sequence of Events to do Asynch I/O + * ----------------------------------- + * + * Here is the sequence of events that needs to occur when an IO + * operation is called from the collection of compute tasks. I'm + * going to use pio_put_var because write_darray has some special + * characteristics that make it a bit more complicated... + * + * Compute tasks call pio_put_var with an integer argument + * + * The MPI_Send sends a message from comp_rank=0 to io_rank=0 on + * union_comm (a comm defined as the union of io and compute tasks) + * msg is an integer which indicates the function being called, in + * this case the msg is PIO_MSG_PUT_VAR_INT + * + * The iotasks now know what additional arguments they should expect + * to receive from the compute tasks, in this case a file handle, a + * variable id, the length of the array and the array itself. + * + * The iotasks now have the information they need to complete the + * operation and they call the pio_put_var routine. (In pio1 this bit + * of code is in pio_get_put_callbacks.F90.in) + * + * After the netcdf operation is completed (in the case of an inq or + * get operation) the result is communicated back to the compute + * tasks. + * + * + * @param component_count The number of computational (ex. model) + * components to associate with this IO component + * + * @param peer_comm The communicator from which all other communicator + * arguments are derived + * + * @param comp_comms An array containing the computational + * communicator for each of the computational components. The I/O + * tasks pass MPI_COMM_NULL for this parameter. + * +`* @param io_comm The io communicator. Processing tasks pass + * MPI_COMM_NULL for this parameter. + * + * @param iosysidp An array of length component_count. It will get the + * iosysid for each component. + * + * @return PIO_NOERR on success, error code otherwise. + */ +int PIOc_Init_Intercomm(int component_count, MPI_Comm peer_comm, + MPI_Comm *comp_comms, MPI_Comm io_comm, int *iosysidp) +{ + iosystem_desc_t *iosys; + iosystem_desc_t *my_iosys; + int ierr = PIO_NOERR; + int mpierr; + int iam; + int io_leader, comp_leader; + int root; + MPI_Group io_grp, comm_grp, union_grp; + + /* Allocate struct to hold io system info for each component. */ + if (!(iosys = (iosystem_desc_t *) calloc(1, sizeof(iosystem_desc_t) * component_count))) + ierr = PIO_ENOMEM; + + if (!ierr) + for (int cmp = 0; cmp < component_count; cmp++) + { + /* These are used when using the intercomm. */ + int comp_master = MPI_PROC_NULL, io_master = MPI_PROC_NULL; + + /* Get a pointer to the iosys struct */ + my_iosys = &iosys[cmp]; + + /* Create an MPI info object. */ + CheckMPIReturn(MPI_Info_create(&(my_iosys->info)),__FILE__,__LINE__); + + /* This task is part of the computation communicator. */ + if (comp_comms[cmp] != MPI_COMM_NULL) + { + /* Copy the computation communicator. */ + mpierr = MPI_Comm_dup(comp_comms[cmp], &my_iosys->comp_comm); + CheckMPIReturn(mpierr, __FILE__, __LINE__); + if (mpierr) + ierr = PIO_EIO; + + /* Create an MPI group with the computation tasks. */ + mpierr = MPI_Comm_group(my_iosys->comp_comm, &my_iosys->compgroup); + CheckMPIReturn(mpierr, __FILE__, __LINE__); + if (mpierr) + ierr = PIO_EIO; + + /* Find out how many tasks are in this communicator. */ + mpierr = MPI_Comm_size(iosys->comp_comm, &my_iosys->num_comptasks); + CheckMPIReturn(mpierr, __FILE__, __LINE__); + if (mpierr) + ierr = PIO_EIO; + + /* Set the rank within the comp_comm. */ + mpierr = MPI_Comm_rank(my_iosys->comp_comm, &my_iosys->comp_rank); + CheckMPIReturn(mpierr, __FILE__, __LINE__); + if (mpierr) + ierr = PIO_EIO; + + /* Find the rank of the io leader in peer_comm. */ + iam = -1; + mpierr = MPI_Allreduce(&iam, &io_leader, 1, MPI_INT, MPI_MAX, peer_comm); + CheckMPIReturn(mpierr, __FILE__, __LINE__); + if (mpierr) + ierr = PIO_EIO; + + /* Find the rank of the comp leader in peer_comm. */ + if (!my_iosys->comp_rank) + { + mpierr = MPI_Comm_rank(peer_comm, &iam); + CheckMPIReturn(mpierr, __FILE__, __LINE__); + if (mpierr) + ierr = PIO_EIO; + } + else + iam = -1; + + /* Find the lucky comp_leader task. */ + mpierr = MPI_Allreduce(&iam, &comp_leader, 1, MPI_INT, MPI_MAX, peer_comm); + CheckMPIReturn(mpierr, __FILE__, __LINE__); + if (mpierr) + ierr = PIO_EIO; + + /* Is this the compmaster? Only if the comp_rank is zero. */ + if (!my_iosys->comp_rank) + { + my_iosys->compmaster = MPI_ROOT; + comp_master = MPI_ROOT; + } + else + my_iosys->compmaster = MPI_PROC_NULL; + + /* Set up the intercomm from the computation side. */ + mpierr = MPI_Intercomm_create(my_iosys->comp_comm, 0, peer_comm, + io_leader, cmp, &my_iosys->intercomm); + CheckMPIReturn(mpierr, __FILE__, __LINE__); + if (mpierr) + ierr = PIO_EIO; + + /* Create the union communicator. */ + mpierr = MPI_Intercomm_merge(my_iosys->intercomm, 0, &my_iosys->union_comm); + CheckMPIReturn(mpierr, __FILE__, __LINE__); + if (mpierr) + ierr = PIO_EIO; + } + else + { + my_iosys->comp_comm = MPI_COMM_NULL; + my_iosys->compgroup = MPI_GROUP_NULL; + my_iosys->comp_rank = -1; + } + + /* This task is part of the IO communicator, so set up the + * IO stuff. */ + if (io_comm != MPI_COMM_NULL) + { + /* Copy the IO communicator. */ + mpierr = MPI_Comm_dup(io_comm, &my_iosys->io_comm); + CheckMPIReturn(mpierr, __FILE__, __LINE__); + if (mpierr) + ierr = PIO_EIO; + + /* Get an MPI group that includes the io tasks. */ + mpierr = MPI_Comm_group(my_iosys->io_comm, &my_iosys->iogroup); + CheckMPIReturn(mpierr, __FILE__, __LINE__); + if (mpierr) + ierr = PIO_EIO; + + /* Find out how many tasks are in this communicator. */ + mpierr = MPI_Comm_size(iosys->io_comm, &my_iosys->num_iotasks); + CheckMPIReturn(mpierr, __FILE__, __LINE__); + if (mpierr) + ierr = PIO_EIO; + + /* Set the rank within the io_comm. */ + mpierr = MPI_Comm_rank(my_iosys->io_comm, &my_iosys->io_rank); + CheckMPIReturn(mpierr, __FILE__, __LINE__); + if (mpierr) + ierr = PIO_EIO; + + /* Find the rank of the io leader in peer_comm. */ + if (!my_iosys->io_rank) + { + mpierr = MPI_Comm_rank(peer_comm, &iam); + CheckMPIReturn(mpierr, __FILE__, __LINE__); + if (mpierr) + ierr = PIO_EIO; + } + else + iam = -1; + + /* Find the lucky io_leader task. */ + mpierr = MPI_Allreduce(&iam, &io_leader, 1, MPI_INT, MPI_MAX, peer_comm); + CheckMPIReturn(mpierr, __FILE__, __LINE__); + if (mpierr) + ierr = PIO_EIO; + + /* Find the rank of the comp leader in peer_comm. */ + iam = -1; + mpierr = MPI_Allreduce(&iam, &comp_leader, 1, MPI_INT, MPI_MAX, peer_comm); + CheckMPIReturn(mpierr, __FILE__, __LINE__); + if (mpierr) + ierr = PIO_EIO; + + /* This is an io task. */ + my_iosys->ioproc = true; + + /* Is this the iomaster? Only if the io_rank is zero. */ + if (!my_iosys->io_rank) + { + my_iosys->iomaster = MPI_ROOT; + io_master = MPI_ROOT; + } + else + my_iosys->iomaster = 0; + + /* Set up the intercomm from the I/O side. */ + mpierr = MPI_Intercomm_create(my_iosys->io_comm, 0, peer_comm, + comp_leader, cmp, &my_iosys->intercomm); + CheckMPIReturn(mpierr, __FILE__, __LINE__); + if (mpierr) + ierr = PIO_EIO; + + /* Create the union communicator. */ + mpierr = MPI_Intercomm_merge(my_iosys->intercomm, 0, &my_iosys->union_comm); + CheckMPIReturn(mpierr, __FILE__, __LINE__); + if (mpierr) + ierr = PIO_EIO; + + } + else + { + my_iosys->io_comm = MPI_COMM_NULL; + my_iosys->iogroup = MPI_GROUP_NULL; + my_iosys->io_rank = -1; + my_iosys->ioproc = false; + my_iosys->iomaster = false; + } + + /* my_comm points to the union communicator for async, and + * the comp_comm for non-async. It should not be freed + * since it is not a proper copy of the commuicator, just + * a copy of the reference to it. */ + my_iosys->my_comm = my_iosys->union_comm; + + /* Find rank in union communicator. */ + mpierr = MPI_Comm_rank(my_iosys->union_comm, &my_iosys->union_rank); + CheckMPIReturn(mpierr, __FILE__, __LINE__); + if (mpierr) + ierr = PIO_EIO; + + /* Find the rank of the io leader in the union communicator. */ + if (!my_iosys->io_rank) + my_iosys->ioroot = my_iosys->union_rank; + else + my_iosys->ioroot = -1; + + /* Distribute the answer to all tasks. */ + mpierr = MPI_Allreduce(&my_iosys->ioroot, &root, 1, MPI_INT, MPI_MAX, + my_iosys->union_comm); + CheckMPIReturn(mpierr, __FILE__, __LINE__); + if (mpierr) + ierr = PIO_EIO; + my_iosys->ioroot = root; + + /* Find the rank of the computation leader in the union + * communicator. */ + if (!my_iosys->comp_rank) + my_iosys->comproot = my_iosys->union_rank; + else + my_iosys->comproot = -1; + + /* Distribute the answer to all tasks. */ + mpierr = MPI_Allreduce(&my_iosys->comproot, &root, 1, MPI_INT, MPI_MAX, + my_iosys->union_comm); + CheckMPIReturn(mpierr, __FILE__, __LINE__); + if (mpierr) + ierr = PIO_EIO; + my_iosys->comproot = root; + + /* Send the number of tasks in the IO and computation + communicators to each other over the intercomm. This is + a one-to-all bcast from the local task that passes + MPI_ROOT as the root (all other local tasks should pass + MPI_PROC_NULL as the root). The bcast is recieved by + all the members of the leaf group which each pass the + rank of the root relative to the root group. */ + if (io_comm != MPI_COMM_NULL) + { + comp_master = 0; + mpierr = MPI_Bcast(&my_iosys->num_comptasks, 1, MPI_INT, comp_master, + my_iosys->intercomm); + CheckMPIReturn(mpierr, __FILE__, __LINE__); + mpierr = MPI_Bcast(&my_iosys->num_iotasks, 1, MPI_INT, io_master, + my_iosys->intercomm); + CheckMPIReturn(mpierr, __FILE__, __LINE__); + } + else + { + io_master = 0; + mpierr = MPI_Bcast(&my_iosys->num_comptasks, 1, MPI_INT, comp_master, + my_iosys->intercomm); + CheckMPIReturn(mpierr, __FILE__, __LINE__); + mpierr = MPI_Bcast(&my_iosys->num_iotasks, 1, MPI_INT, io_master, + my_iosys->intercomm); + CheckMPIReturn(mpierr, __FILE__, __LINE__); + } + + /* Allocate an array to hold the ranks of the IO tasks + * within the union communicator. */ + if (!(my_iosys->ioranks = malloc(my_iosys->num_iotasks * sizeof(int)))) + return PIO_ENOMEM; + + /* Allocate a temp array to help get the IO ranks. */ + int *tmp_ioranks; + if (!(tmp_ioranks = malloc(my_iosys->num_iotasks * sizeof(int)))) + return PIO_ENOMEM; + + /* Init array, then have IO tasks set their values, then + * use allreduce to distribute results to all tasks. */ + for (int cnt = 0 ; cnt < my_iosys->num_iotasks; cnt++) + tmp_ioranks[cnt] = -1; + if (io_comm != MPI_COMM_NULL) + tmp_ioranks[my_iosys->io_rank] = my_iosys->union_rank; + mpierr = MPI_Allreduce(tmp_ioranks, my_iosys->ioranks, my_iosys->num_iotasks, MPI_INT, MPI_MAX, + my_iosys->union_comm); + CheckMPIReturn(mpierr, __FILE__, __LINE__); + + /* Free temp array. */ + free(tmp_ioranks); + + /* Set the default error handling. */ + my_iosys->error_handler = PIO_INTERNAL_ERROR; + + /* We do support asynch interface. */ + my_iosys->async_interface = true; + + /* For debug purposes, print the contents of the struct. */ + /*int my_rank;*/ + /* MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);*/ + + /* for (int t = 0; t < my_iosys->num_iotasks + my_iosys->num_comptasks; t++) */ + /* { */ + /* MPI_Barrier(my_iosys->union_comm); */ + /* if (my_rank == t) */ + /* pio_iosys_print(my_rank, my_iosys); */ + /* } */ + + /* Add this id to the list of PIO iosystem ids. */ + iosysidp[cmp] = pio_add_to_iosystem_list(my_iosys); + LOG((2, "added to iosystem_list iosysid = %d", iosysidp[cmp])); + + /* Now call the function from which the IO tasks will not + * return until the PIO_MSG_EXIT message is sent. */ + if (io_comm != MPI_COMM_NULL) + if ((ierr = pio_msg_handler(my_iosys->io_rank, component_count, iosys))) + return ierr; + } + + /* If there was an error, make sure all tasks see it. */ + if (ierr) + { + mpierr = MPI_Bcast(&ierr, 1, MPI_INT, 0, iosys->intercomm); + CheckMPIReturn(mpierr, __FILE__, __LINE__); + if (mpierr) + ierr = PIO_EIO; + } + + return ierr; +} diff --git a/externals/pio2/src/clib/pio_nc.c b/externals/pio2/src/clib/pio_nc.c index f511475a4f6..52a376f0c26 100644 --- a/externals/pio2/src/clib/pio_nc.c +++ b/externals/pio2/src/clib/pio_nc.c @@ -122,7 +122,7 @@ int PIOc_inq_dimname (int ncid, int dimid, char *name) if(file == NULL) return PIO_EBADID; ios = file->iosystem; - msg = PIO_MSG_INQ_DIMNAME; + msg = PIO_MSG_INQ_DIM; if(ios->async_interface && ! ios->ioproc){ if(ios->compmaster) @@ -961,7 +961,7 @@ int PIOc_inq_vartype (int ncid, int varid, nc_type *xtypep) if(file == NULL) return PIO_EBADID; ios = file->iosystem; - msg = PIO_MSG_INQ_VARTYPE; + msg = PIO_MSG_INQ_VAR; if(ios->async_interface && ! ios->ioproc){ if(ios->compmaster) @@ -1108,7 +1108,7 @@ int PIOc_inq_vardimid (int ncid, int varid, int *dimidsp) if(file == NULL) return PIO_EBADID; ios = file->iosystem; - msg = PIO_MSG_INQ_VARDIMID; + msg = PIO_MSG_INQ_VAR; if(ios->async_interface && ! ios->ioproc){ if(ios->compmaster) @@ -1340,7 +1340,7 @@ int PIOc_inq_attlen (int ncid, int varid, const char *name, PIO_Offset *lenp) if(file == NULL) return PIO_EBADID; ios = file->iosystem; - msg = PIO_MSG_INQ_ATTLEN; + msg = PIO_MSG_INQ_ATT; if(ios->async_interface && ! ios->ioproc){ if(ios->compmaster) @@ -1415,7 +1415,7 @@ int PIOc_inq_atttype (int ncid, int varid, const char *name, nc_type *xtypep) if(file == NULL) return PIO_EBADID; ios = file->iosystem; - msg = PIO_MSG_INQ_ATTTYPE; + msg = PIO_MSG_INQ_ATT; if(ios->async_interface && ! ios->ioproc){ if(ios->compmaster) @@ -1561,7 +1561,7 @@ int PIOc_inq_natts (int ncid, int *ngattsp) if(file == NULL) return PIO_EBADID; ios = file->iosystem; - msg = PIO_MSG_INQ_NATTS; + msg = PIO_MSG_INQ; if(ios->async_interface && ! ios->ioproc){ if(ios->compmaster) @@ -2322,7 +2322,7 @@ int PIOc_inq_attname (int ncid, int varid, int attnum, char *name) if(file == NULL) return PIO_EBADID; ios = file->iosystem; - msg = PIO_MSG_INQ_ATTNAME; + msg = PIO_MSG_INQ_ATT; if(ios->async_interface && ! ios->ioproc){ if(ios->compmaster) @@ -2551,7 +2551,7 @@ int PIOc_inq_unlimdim (int ncid, int *unlimdimidp) if(file == NULL) return PIO_EBADID; ios = file->iosystem; - msg = PIO_MSG_INQ_UNLIMDIM; + msg = PIO_MSG_INQ; if(ios->async_interface && ! ios->ioproc){ if(ios->compmaster) @@ -2702,7 +2702,7 @@ int PIOc_inq_ndims (int ncid, int *ndimsp) if(file == NULL) return PIO_EBADID; ios = file->iosystem; - msg = PIO_MSG_INQ_NDIMS; + msg = PIO_MSG_INQ; if(ios->async_interface && ! ios->ioproc){ if(ios->compmaster) @@ -2848,7 +2848,7 @@ int PIOc_inq_nvars (int ncid, int *nvarsp) if(file == NULL) return PIO_EBADID; ios = file->iosystem; - msg = PIO_MSG_INQ_NVARS; + msg = PIO_MSG_INQ; if(ios->async_interface && ! ios->ioproc){ if(ios->compmaster) @@ -3141,7 +3141,7 @@ int PIOc_inq_varnatts (int ncid, int varid, int *nattsp) if(file == NULL) return PIO_EBADID; ios = file->iosystem; - msg = PIO_MSG_INQ_VARNATTS; + msg = PIO_MSG_INQ_VAR; if(ios->async_interface && ! ios->ioproc){ if(ios->compmaster) @@ -3444,7 +3444,7 @@ int PIOc_inq_dimlen (int ncid, int dimid, PIO_Offset *lenp) if(file == NULL) return PIO_EBADID; ios = file->iosystem; - msg = PIO_MSG_INQ_DIMLEN; + msg = PIO_MSG_INQ_DIM; if(ios->async_interface && ! ios->ioproc){ if(ios->compmaster) @@ -3674,7 +3674,7 @@ int PIOc_inq_varndims (int ncid, int varid, int *ndimsp) if(file == NULL) return PIO_EBADID; ios = file->iosystem; - msg = PIO_MSG_INQ_VARNDIMS; + msg = PIO_MSG_INQ_VAR; if(file->varlist[varid].ndims > 0){ (*ndimsp) = file->varlist[varid].ndims; return PIO_NOERR; @@ -3753,7 +3753,7 @@ int PIOc_inq_varname (int ncid, int varid, char *name) if(file == NULL) return PIO_EBADID; ios = file->iosystem; - msg = PIO_MSG_INQ_VARNAME; + msg = PIO_MSG_INQ_VAR; if(ios->async_interface && ! ios->ioproc){ if(ios->compmaster) diff --git a/externals/pio2/src/clib/pio_nc4.c b/externals/pio2/src/clib/pio_nc4.c index 735e3755a99..a311ed003f4 100644 --- a/externals/pio2/src/clib/pio_nc4.c +++ b/externals/pio2/src/clib/pio_nc4.c @@ -235,28 +235,40 @@ int PIOc_inq_var_deflate(int ncid, int varid, int *shufflep, int PIOc_def_var_chunking(int ncid, int varid, int storage, const PIO_Offset *chunksizesp) { - int ierr; - int msg; - int mpierr; - iosystem_desc_t *ios; - file_desc_t *file; + iosystem_desc_t *ios; /** Pointer to io system information. */ + file_desc_t *file; /** Pointer to file information. */ + int ierr = PIO_NOERR; /** Return code from function calls. */ + int mpierr = MPI_SUCCESS, mpierr2; /** Return code from MPI function codes. */ char *errstr; - errstr = NULL; - ierr = PIO_NOERR; + /* Find the info about this file. */ if (!(file = pio_get_file_from_id(ncid))) return PIO_EBADID; ios = file->iosystem; - msg = PIO_MSG_DEF_VAR_CHUNKING; - if (ios->async_interface && ! ios->ioproc) + /* If async is in use, and this is not an IO task, bcast the parameters. */ + if (ios->async_interface) { - if (ios->compmaster) - mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); - mpierr = MPI_Bcast(&(file->fh),1, MPI_INT, 0, ios->intercomm); + if (!ios->ioproc) + { + int msg = PIO_MSG_DEF_VAR_CHUNKING; + + if (ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + + if (!mpierr) + mpierr = MPI_Bcast(&(file->fh),1, MPI_INT, 0, ios->intercomm); + } + + /* Handle MPI errors. */ + if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm))) + return check_mpi(file, mpierr2, __FILE__, __LINE__); + if (mpierr) + return check_mpi(file, mpierr, __FILE__, __LINE__); } + /* If this is an IO task, then call the netCDF function. */ if (ios->ioproc) { switch (file->iotype) @@ -287,20 +299,12 @@ int PIOc_def_var_chunking(int ncid, int varid, int storage, } } - /* Allocate an error string if needed. */ - if (ierr != PIO_NOERR) - { - errstr = (char *) malloc((strlen(__FILE__) + 20)* sizeof(char)); - sprintf(errstr,"in file %s",__FILE__); - } - - /* Check for netCDF error. */ - ierr = check_netcdf(file, ierr, errstr,__LINE__); - - /* Free the error string if it was allocated. */ - if (errstr != NULL) - free(errstr); - + /* Broadcast and check the return code. */ + if ((mpierr = MPI_Bcast(&ierr, 1, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + if (ierr) + return check_netcdf(file, ierr, __FILE__, __LINE__); + return ierr; } diff --git a/externals/pio2/src/clib/pio_nc_async.c b/externals/pio2/src/clib/pio_nc_async.c new file mode 100644 index 00000000000..147fc548d7a --- /dev/null +++ b/externals/pio2/src/clib/pio_nc_async.c @@ -0,0 +1,2513 @@ +/** + * @file + * PIO interfaces to + * [NetCDF](http://www.unidata.ucar.edu/software/netcdf/docs/modules.html) + * support functions + + * This file provides an interface to the + * [NetCDF](http://www.unidata.ucar.edu/software/netcdf/docs/modules.html) + * support functions. Each subroutine calls the underlying netcdf or + * pnetcdf or netcdf4 functions from the appropriate subset of mpi + * tasks (io_comm). Each routine must be called collectively from + * union_comm. + * + * @author Jim Edwards (jedwards@ucar.edu), Ed Hartnett + * @date Feburary 2014, April 2016 + */ + +#include +#include +#include + +/** + * @ingroup PIOc_inq + * The PIO-C interface for the NetCDF function nc_inq. + * + * This routine is called collectively by all tasks in the + * communicator ios.union_comm. For more information on the underlying + * NetCDF commmand please read about this function in the NetCDF + * documentation at: + * http://www.unidata.ucar.edu/software/netcdf/docs/group__datasets.html + * + * @param ncid the ncid of the open file, obtained from + * PIOc_openfile() or PIOc_createfile(). + * + * @return PIO_NOERR for success, error code otherwise. See + * PIOc_Set_File_Error_Handling + */ +int PIOc_inq(int ncid, int *ndimsp, int *nvarsp, int *ngattsp, int *unlimdimidp) +{ + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; /* Pointer to file information. */ + int ierr = PIO_NOERR; /* Return code from function calls. */ + int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ + + LOG((1, "PIOc_inq ncid = %d", ncid)); + + /* Find the info about this file. */ + if (!(file = pio_get_file_from_id(ncid))) + return PIO_EBADID; + ios = file->iosystem; + + /* If async is in use, and this is not an IO task, bcast the parameters. */ + if (ios->async_interface) + { + if (!ios->ioproc) + { + int msg = PIO_MSG_INQ; /* Message for async notification. */ + char ndims_present = ndimsp ? true : false; + char nvars_present = nvarsp ? true : false; + char ngatts_present = ngattsp ? true : false; + char unlimdimid_present = unlimdimidp ? true : false; + + if (ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + + if (!mpierr) + mpierr = MPI_Bcast(&file->fh, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&ndims_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&nvars_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&ngatts_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&unlimdimid_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); + LOG((2, "PIOc_inq ncid = %d ndims_present = %d nvars_present = %d ngatts_present = %d unlimdimid_present = %d", + ncid, ndims_present, nvars_present, ngatts_present, unlimdimid_present)); + } + + /* Handle MPI errors. */ + if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm))) + return check_mpi(file, mpierr2, __FILE__, __LINE__); + if (mpierr) + return check_mpi(file, mpierr, __FILE__, __LINE__); + } + + /* If this is an IO task, then call the netCDF function. */ + if (ios->ioproc) + { +#ifdef _PNETCDF + if (file->iotype == PIO_IOTYPE_PNETCDF) + { + LOG((2, "PIOc_inq calling ncmpi_inq unlimdimidp = %d", unlimdimidp)); + ierr = ncmpi_inq(ncid, ndimsp, nvarsp, ngattsp, unlimdimidp); + LOG((2, "PIOc_inq called ncmpi_inq")); + if (unlimdimidp) + LOG((2, "PIOc_inq returned from ncmpi_inq unlimdimid = %d", *unlimdimidp)); + } +#endif /* _PNETCDF */ +#ifdef _NETCDF + if (file->iotype == PIO_IOTYPE_NETCDF && file->do_io) + { + LOG((2, "PIOc_inq calling classic nc_inq")); + /* Should not be necessary to do this - nc_inq should + * handle null pointers. This has been reported as a bug + * to netCDF developers. */ + int tmp_ndims, tmp_nvars, tmp_ngatts, tmp_unlimdimid; + LOG((2, "PIOc_inq calling classic nc_inq")); + ierr = nc_inq(ncid, &tmp_ndims, &tmp_nvars, &tmp_ngatts, &tmp_unlimdimid); + LOG((2, "PIOc_inq calling classic nc_inq")); + if (unlimdimidp) + LOG((2, "classic tmp_unlimdimid = %d", tmp_unlimdimid)); + if (ndimsp) + *ndimsp = tmp_ndims; + if (nvarsp) + *nvarsp = tmp_nvars; + if (ngattsp) + *ngattsp = tmp_ngatts; + if (unlimdimidp) + *unlimdimidp = tmp_unlimdimid; + if (unlimdimidp) + LOG((2, "classic unlimdimid = %d", *unlimdimidp)); + } + else if (file->iotype != PIO_IOTYPE_PNETCDF && file->do_io) + { + LOG((2, "PIOc_inq calling netcdf-4 nc_inq")); + ierr = nc_inq(ncid, ndimsp, nvarsp, ngattsp, unlimdimidp); + } +#endif /* _NETCDF */ + LOG((2, "PIOc_inq netcdf call returned %d", ierr)); + } + + /* Broadcast and check the return code. */ + if ((mpierr = MPI_Bcast(&ierr, 1, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + if (ierr) + return check_netcdf(file, ierr, __FILE__, __LINE__); + + /* Broadcast results to all tasks. Ignore NULL parameters. */ + if (!ierr) + { + if (ndimsp) + if ((mpierr = MPI_Bcast(ndimsp, 1, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + + if (nvarsp) + if ((mpierr = MPI_Bcast(nvarsp, 1, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + + if (ngattsp) + if ((mpierr = MPI_Bcast(ngattsp, 1, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + + if (unlimdimidp) + if ((mpierr = MPI_Bcast(unlimdimidp, 1, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + } + + return ierr; +} + +/** + * @ingroup PIOc_inq_ndims + * The PIO-C interface for the NetCDF function nc_inq_ndims. + */ +int PIOc_inq_ndims (int ncid, int *ndimsp) +{ + LOG((1, "PIOc_inq_ndims")); + return PIOc_inq(ncid, ndimsp, NULL, NULL, NULL); +} + +/** + * @ingroup PIOc_inq_nvars + * The PIO-C interface for the NetCDF function nc_inq_nvars. + */ +int PIOc_inq_nvars(int ncid, int *nvarsp) +{ + return PIOc_inq(ncid, NULL, nvarsp, NULL, NULL); +} + +/** + * @ingroup PIOc_inq_natts + * The PIO-C interface for the NetCDF function nc_inq_natts. + */ +int PIOc_inq_natts(int ncid, int *ngattsp) +{ + return PIOc_inq(ncid, NULL, NULL, ngattsp, NULL); +} + +/** + * @ingroup PIOc_inq_unlimdim + * The PIO-C interface for the NetCDF function nc_inq_unlimdim. + */ +int PIOc_inq_unlimdim(int ncid, int *unlimdimidp) +{ + LOG((1, "PIOc_inq_unlimdim ncid = %d unlimdimidp = %d", ncid, unlimdimidp)); + return PIOc_inq(ncid, NULL, NULL, NULL, unlimdimidp); +} + +/** Internal function to provide inq_type function for pnetcdf. */ +int pioc_pnetcdf_inq_type(int ncid, nc_type xtype, char *name, + PIO_Offset *sizep) +{ + int typelen; + char typename[NC_MAX_NAME + 1]; + + switch (xtype) + { + case NC_UBYTE: + case NC_BYTE: + case NC_CHAR: + typelen = 1; + break; + case NC_SHORT: + case NC_USHORT: + typelen = 2; + break; + case NC_UINT: + case NC_INT: + case NC_FLOAT: + typelen = 4; + break; + case NC_UINT64: + case NC_INT64: + case NC_DOUBLE: + typelen = 8; + break; + } + + /* If pointers were supplied, copy results. */ + if (sizep) + *sizep = typelen; + if (name) + strcpy(name, "some type"); + + return PIO_NOERR; +} + +/** + * @ingroup PIOc_typelen + * The PIO-C interface for the NetCDF function nctypelen. + */ +int PIOc_inq_type(int ncid, nc_type xtype, char *name, PIO_Offset *sizep) +{ + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; /* Pointer to file information. */ + int ierr = PIO_NOERR; /* Return code from function calls. */ + int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ + int typelen; + + LOG((1, "PIOc_inq_type ncid = %d xtype = %d", ncid, xtype)); + + /* Find the info about this file. */ + if (!(file = pio_get_file_from_id(ncid))) + return PIO_EBADID; + ios = file->iosystem; + + /* If async is in use, and this is not an IO task, bcast the parameters. */ + if (ios->async_interface) + { + if (!ios->ioproc) + { + int msg = PIO_MSG_INQ_TYPE; /* Message for async notification. */ + char name_present = name ? true : false; + char size_present = sizep ? true : false; + + if (ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + + if (!mpierr) + mpierr = MPI_Bcast(&file->fh, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&xtype, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&name_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&size_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); + } + + /* Handle MPI errors. */ + if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm))) + return check_mpi(file, mpierr2, __FILE__, __LINE__); + if (mpierr) + return check_mpi(file, mpierr, __FILE__, __LINE__); + } + + /* If this is an IO task, then call the netCDF function. */ + if (ios->ioproc) + { +#ifdef _PNETCDF + if (file->iotype == PIO_IOTYPE_PNETCDF) + ierr = pioc_pnetcdf_inq_type(ncid, xtype, name, sizep); +#endif /* _PNETCDF */ +#ifdef _NETCDF + if (file->iotype != PIO_IOTYPE_PNETCDF && file->do_io) + ierr = nc_inq_type(ncid, xtype, name, (size_t *)sizep); +#endif /* _NETCDF */ + LOG((2, "PIOc_inq_type netcdf call returned %d", ierr)); + } + + /* Broadcast and check the return code. */ + if ((mpierr = MPI_Bcast(&ierr, 1, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + check_netcdf(file, ierr, __FILE__, __LINE__); + + /* Broadcast results to all tasks. Ignore NULL parameters. */ + if (!ierr) + { + if (name) + { + int slen; + if (ios->iomaster) + slen = strlen(name); + if ((mpierr = MPI_Bcast(&slen, 1, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + if (!mpierr) + if ((mpierr = MPI_Bcast((void *)name, slen + 1, MPI_CHAR, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + } + if (sizep) + if ((mpierr = MPI_Bcast(sizep , 1, MPI_OFFSET, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + } + + return ierr; +} + +/** + * @ingroup PIOc_inq_format + * The PIO-C interface for the NetCDF function nc_inq_format. + */ +int PIOc_inq_format (int ncid, int *formatp) +{ + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; /* Pointer to file information. */ + int ierr = PIO_NOERR; /* Return code from function calls. */ + int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ + + LOG((1, "PIOc_inq ncid = %d", ncid)); + + /* Find the info about this file. */ + if (!(file = pio_get_file_from_id(ncid))) + return PIO_EBADID; + ios = file->iosystem; + + /* If async is in use, and this is not an IO task, bcast the parameters. */ + if (ios->async_interface) + { + if (!ios->ioproc) + { + int msg = PIO_MSG_INQ_FORMAT; + char format_present = formatp ? true : false; + + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + + if (!mpierr) + mpierr = MPI_Bcast(&file->fh, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&format_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); + } + + /* Handle MPI errors. */ + if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm))) + return check_mpi(file, mpierr2, __FILE__, __LINE__); + if (mpierr) + return check_mpi(file, mpierr, __FILE__, __LINE__); + } + + /* If this is an IO task, then call the netCDF function. */ + if (ios->ioproc) + { +#ifdef _PNETCDF + if (file->iotype == PIO_IOTYPE_PNETCDF) + ierr = ncmpi_inq_format(file->fh, formatp); +#endif /* _PNETCDF */ +#ifdef _NETCDF + if (file->iotype != PIO_IOTYPE_PNETCDF && file->do_io) + ierr = nc_inq_format(file->fh, formatp); +#endif /* _NETCDF */ + LOG((2, "PIOc_inq netcdf call returned %d", ierr)); + } + + /* Broadcast and check the return code. */ + if ((mpierr = MPI_Bcast(&ierr, 1, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + check_netcdf(file, ierr, __FILE__, __LINE__); + + /* Broadcast results to all tasks. Ignore NULL parameters. */ + if (!ierr) + { + if (formatp) + if ((mpierr = MPI_Bcast(formatp , 1, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + } + + return ierr; +} + +/** + * @ingroup PIOc_inq_dim + * The PIO-C interface for the NetCDF function nc_inq_dim. + * + * This routine is called collectively by all tasks in the communicator + * ios.union_comm. For more information on the underlying NetCDF commmand + * please read about this function in the NetCDF documentation at: + * http://www.unidata.ucar.edu/software/netcdf/docs/group__dimensions.html + * + * @param ncid the ncid of the open file, obtained from + * PIOc_openfile() or PIOc_createfile(). + * @param lenp a pointer that will get the number of values + * @return PIO_NOERR for success, error code otherwise. See PIOc_Set_File_Error_Handling + */ +int PIOc_inq_dim(int ncid, int dimid, char *name, PIO_Offset *lenp) +{ + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; /* Pointer to file information. */ + int ierr = PIO_NOERR; /* Return code from function calls. */ + int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ + + LOG((1, "PIOc_inq_dim")); + + /* Get the file info, based on the ncid. */ + if (!(file = pio_get_file_from_id(ncid))) + return PIO_EBADID; + ios = file->iosystem; + + /* If async is in use, and this is not an IO task, bcast the parameters. */ + if (ios->async_interface) + { + if (!ios->ioproc) + { + int msg = PIO_MSG_INQ_DIM; + char name_present = name ? true : false; + char len_present = lenp ? true : false; + + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + + if (!mpierr) + mpierr = MPI_Bcast(&file->fh, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&dimid, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&name_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); + LOG((2, "PIOc_inq netcdf Bcast name_present = %d", name_present)); + if (!mpierr) + mpierr = MPI_Bcast(&len_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); + LOG((2, "PIOc_inq netcdf Bcast len_present = %d", len_present)); + } + + /* Handle MPI errors. */ + if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm))) + return check_mpi(file, mpierr2, __FILE__, __LINE__); + if (mpierr) + return check_mpi(file, mpierr, __FILE__, __LINE__); + } + + /* If this is an IO task, then call the netCDF function. */ + if (ios->ioproc) + { +#ifdef _PNETCDF + if (file->iotype == PIO_IOTYPE_PNETCDF) + ierr = ncmpi_inq_dim(file->fh, dimid, name, lenp);; +#endif /* _PNETCDF */ +#ifdef _NETCDF + if (file->iotype != PIO_IOTYPE_PNETCDF && file->do_io) + ierr = nc_inq_dim(file->fh, dimid, name, (size_t *)lenp);; +#endif /* _NETCDF */ + } + + /* Broadcast and check the return code. */ + if ((mpierr = MPI_Bcast(&ierr, 1, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + check_netcdf(file, ierr, __FILE__, __LINE__); + + /* Broadcast results to all tasks. Ignore NULL parameters. */ + if (!ierr) + { + if (name) + { + int slen; + if (ios->iomaster) + slen = strlen(name); + if ((mpierr = MPI_Bcast(&slen, 1, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + if ((mpierr = MPI_Bcast((void *)name, slen + 1, MPI_CHAR, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + } + + if (lenp) + if ((mpierr = MPI_Bcast(lenp , 1, MPI_OFFSET, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + } + + return ierr; +} + +/** + * @ingroup PIOc_inq_dimname + * The PIO-C interface for the NetCDF function nc_inq_dimname. + */ +int PIOc_inq_dimname(int ncid, int dimid, char *name) +{ + return PIOc_inq_dim(ncid, dimid, name, NULL); +} + +/** + * @ingroup PIOc_inq_dimlen + * The PIO-C interface for the NetCDF function nc_inq_dimlen. + */ +int PIOc_inq_dimlen(int ncid, int dimid, PIO_Offset *lenp) +{ + return PIOc_inq_dim(ncid, dimid, NULL, lenp); +} + +/** + * @ingroup PIOc_inq_dimid + * The PIO-C interface for the NetCDF function nc_inq_dimid. + * + * This routine is called collectively by all tasks in the communicator + * ios.union_comm. For more information on the underlying NetCDF commmand + * please read about this function in the NetCDF documentation at: + * http://www.unidata.ucar.edu/software/netcdf/docs/group__dimensions.html + * + * @param ncid the ncid of the open file, obtained from + * PIOc_openfile() or PIOc_createfile(). + * @param idp a pointer that will get the id of the variable or attribute. + * @return PIO_NOERR for success, error code otherwise. See PIOc_Set_File_Error_Handling + */ +int PIOc_inq_dimid(int ncid, const char *name, int *idp) +{ + iosystem_desc_t *ios; + file_desc_t *file; + int ierr = PIO_NOERR; + int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ + + /* Name must be provided. */ + if (!name) + return PIO_EINVAL; + + LOG((1, "PIOc_inq_dimid name = %s", name)); + + /* Get the file info, based on the ncid. */ + if (!(file = pio_get_file_from_id(ncid))) + return PIO_EBADID; + ios = file->iosystem; + + /* If using async, and not an IO task, then send parameters. */ + if (ios->async_interface) + { + if (!ios->ioproc) + { + int msg = PIO_MSG_INQ_DIMID; + char id_present = idp ? true : false; + + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + + if (!mpierr) + mpierr = MPI_Bcast(&file->fh, 1, MPI_INT, ios->compmaster, ios->intercomm); + int namelen = strlen(name); + if (!mpierr) + mpierr = MPI_Bcast(&namelen, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast((void *)name, namelen + 1, MPI_CHAR, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&id_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); + } + + /* Handle MPI errors. */ + if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm))) + return check_mpi(file, mpierr2, __FILE__, __LINE__); + if (mpierr) + return check_mpi(file, mpierr, __FILE__, __LINE__); + } + + /* IO tasks call the netCDF functions. */ + if (ios->ioproc) + { +#ifdef _PNETCDF + if (file->iotype == PIO_IOTYPE_PNETCDF) + ierr = ncmpi_inq_dimid(file->fh, name, idp);; +#endif /* _PNETCDF */ +#ifdef _NETCDF + if (file->iotype != PIO_IOTYPE_PNETCDF && file->do_io) + ierr = nc_inq_dimid(file->fh, name, idp);; +#endif /* _NETCDF */ + } + + /* Broadcast and check the return code. */ + if ((mpierr = MPI_Bcast(&ierr, 1, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + check_netcdf(file, ierr, __FILE__, __LINE__); + + /* Broadcast results. */ + if (!ierr) + if (idp) + if ((mpierr = MPI_Bcast(idp, 1, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + + return ierr; +} + +/** + * @ingroup PIOc_inq_var + * The PIO-C interface for the NetCDF function nc_inq_var. + * + * This routine is called collectively by all tasks in the communicator + * ios.union_comm. For more information on the underlying NetCDF commmand + * please read about this function in the NetCDF documentation at: + * http://www.unidata.ucar.edu/software/netcdf/docs/group__variables.html + * + * @param ncid the ncid of the open file, obtained from + * PIOc_openfile() or PIOc_createfile(). + * @param varid the variable ID. + * @param xtypep a pointer that will get the type of the attribute. + * @param nattsp a pointer that will get the number of attributes + * @return PIO_NOERR for success, error code otherwise. See PIOc_Set_File_Error_Handling + */ +int PIOc_inq_var(int ncid, int varid, char *name, nc_type *xtypep, int *ndimsp, + int *dimidsp, int *nattsp) +{ + iosystem_desc_t *ios; + file_desc_t *file; + int ndims; /* The number of dimensions for this variable. */ + int ierr = PIO_NOERR; + int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ + + LOG((1, "PIOc_inq_var ncid = %d varid = %d", ncid, varid)); + + /* Get the file info, based on the ncid. */ + if (!(file = pio_get_file_from_id(ncid))) + return PIO_EBADID; + ios = file->iosystem; + + /* If async is in use, and this is not an IO task, bcast the parameters. */ + if (ios->async_interface) + { + if (!ios->ioproc) + { + int msg = PIO_MSG_INQ_VAR; + char name_present = name ? true : false; + char xtype_present = xtypep ? true : false; + char ndims_present = ndimsp ? true : false; + char dimids_present = dimidsp ? true : false; + char natts_present = nattsp ? true : false; + + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + + if (!mpierr) + mpierr = MPI_Bcast(&file->fh, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&varid, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&name_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&xtype_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&ndims_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&dimids_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&natts_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); + LOG((2, "PIOc_inq_var name_present = %d xtype_present = %d ndims_present = %d " + "dimids_present = %d, natts_present = %d nattsp = %d", + name_present, xtype_present, ndims_present, dimids_present, natts_present, nattsp)); + } + + /* Handle MPI errors. */ + if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm))) + return check_mpi(file, mpierr2, __FILE__, __LINE__); + if (mpierr) + return check_mpi(file, mpierr, __FILE__, __LINE__); + } + + /* Call the netCDF layer. */ + if (ios->ioproc) + { +#ifdef _PNETCDF + if (file->iotype == PIO_IOTYPE_PNETCDF) + { + ierr = ncmpi_inq_varndims(file->fh, varid, &ndims); + if (!ierr) + ierr = ncmpi_inq_var(file->fh, varid, name, xtypep, ndimsp, dimidsp, nattsp);; + } +#endif /* _PNETCDF */ +#ifdef _NETCDF + if (file->iotype != PIO_IOTYPE_PNETCDF && file->do_io) + { + ierr = nc_inq_varndims(file->fh, varid, &ndims); + if (!ierr) + ierr = nc_inq_var(file->fh, varid, name, xtypep, ndimsp, dimidsp, nattsp); + } +#endif /* _NETCDF */ + } + + if (ndimsp) + LOG((2, "PIOc_inq_var ndims = %d ierr = %d", *ndimsp, ierr)); + + /* Broadcast and check the return code. */ + if ((mpierr = MPI_Bcast(&ierr, 1, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + if (ierr) + return check_netcdf(file, ierr, __FILE__, __LINE__); + + /* Broadcast the results for non-null pointers. */ + if (!ierr) + { + if (name) + { + int slen; + if(ios->iomaster) + slen = strlen(name); + if ((mpierr = MPI_Bcast(&slen, 1, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + if ((mpierr = MPI_Bcast((void *)name, slen + 1, MPI_CHAR, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + } + if (xtypep) + if ((mpierr = MPI_Bcast(xtypep, 1, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + + if (ndimsp) + { + if (ios->ioroot) + LOG((2, "PIOc_inq_var about to Bcast ndims = %d ios->ioroot = %d", *ndimsp, ios->ioroot)); + if ((mpierr = MPI_Bcast(ndimsp, 1, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + file->varlist[varid].ndims = *ndimsp; + LOG((2, "PIOc_inq_var Bcast ndims = %d", *ndimsp)); + } + if (dimidsp) + { + if ((mpierr = MPI_Bcast(&ndims, 1, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + if ((mpierr = MPI_Bcast(dimidsp, ndims, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + } + if (nattsp) + if ((mpierr = MPI_Bcast(nattsp, 1, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + } + + return ierr; +} + +/** + * @ingroup PIOc_inq_varname + * The PIO-C interface for the NetCDF function nc_inq_varname. + */ +int PIOc_inq_varname (int ncid, int varid, char *name) +{ + return PIOc_inq_var(ncid, varid, name, NULL, NULL, NULL, NULL); +} + +/** + * @ingroup PIOc_inq_vartype + * The PIO-C interface for the NetCDF function nc_inq_vartype. + */ +int PIOc_inq_vartype (int ncid, int varid, nc_type *xtypep) +{ + return PIOc_inq_var(ncid, varid, NULL, xtypep, NULL, NULL, NULL); +} + +/** + * @ingroup PIOc_inq_varndims + * The PIO-C interface for the NetCDF function nc_inq_varndims. + */ +int PIOc_inq_varndims (int ncid, int varid, int *ndimsp) +{ + return PIOc_inq_var(ncid, varid, NULL, NULL, ndimsp, NULL, NULL); +} + +/** + * @ingroup PIOc_inq_vardimid + * The PIO-C interface for the NetCDF function nc_inq_vardimid. + */ +int PIOc_inq_vardimid(int ncid, int varid, int *dimidsp) +{ + return PIOc_inq_var(ncid, varid, NULL, NULL, NULL, dimidsp, NULL); +} + +/** + * @ingroup PIOc_inq_varnatts + * The PIO-C interface for the NetCDF function nc_inq_varnatts. + */ +int PIOc_inq_varnatts (int ncid, int varid, int *nattsp) +{ + return PIOc_inq_var(ncid, varid, NULL, NULL, NULL, NULL, nattsp); +} + +/** + * @ingroup PIOc_inq_varid + * The PIO-C interface for the NetCDF function nc_inq_varid. + * + * This routine is called collectively by all tasks in the communicator + * ios.union_comm. For more information on the underlying NetCDF commmand + * please read about this function in the NetCDF documentation at: + * http://www.unidata.ucar.edu/software/netcdf/docs/group__variables.html + * + * @param ncid the ncid of the open file, obtained from + * PIOc_openfile() or PIOc_createfile(). + * @param varid the variable ID. + * @param varidp a pointer that will get the variable id + * @return PIO_NOERR for success, error code otherwise. See PIOc_Set_File_Error_Handling + */ +int PIOc_inq_varid (int ncid, const char *name, int *varidp) +{ + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; /* Pointer to file information. */ + int ierr = PIO_NOERR; /* Return code from function calls. */ + int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ + + /* Caller must provide name. */ + if (!name || strlen(name) > NC_MAX_NAME) + return PIO_EINVAL; + + /* Get file info based on ncid. */ + if (!(file = pio_get_file_from_id(ncid))) + return PIO_EBADID; + ios = file->iosystem; + + LOG((1, "PIOc_inq_varid ncid = %d name = %s", ncid, name)); + + if (ios->async_interface) + { + if (!ios->ioproc) + { + int msg = PIO_MSG_INQ_VARID; + + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + + if (!mpierr) + mpierr = MPI_Bcast(&file->fh, 1, MPI_INT, ios->compmaster, ios->intercomm); + int namelen; + namelen = strlen(name); + if (!mpierr) + mpierr = MPI_Bcast(&namelen, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast((void *)name, namelen + 1, MPI_CHAR, ios->compmaster, ios->intercomm); + } + + /* Handle MPI errors. */ + if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm))) + check_mpi(file, mpierr2, __FILE__, __LINE__); + if (mpierr) + return check_mpi(file, mpierr, __FILE__, __LINE__); + } + + /* If this is an IO task, then call the netCDF function. */ + if (ios->ioproc) + { +#ifdef _PNETCDF + if (file->iotype == PIO_IOTYPE_PNETCDF) + ierr = ncmpi_inq_varid(file->fh, name, varidp);; +#endif /* _PNETCDF */ +#ifdef _NETCDF + if (file->iotype != PIO_IOTYPE_PNETCDF && file->do_io) + ierr = nc_inq_varid(file->fh, name, varidp); +#endif /* _NETCDF */ + } + + /* Broadcast and check the return code. */ + if ((mpierr = MPI_Bcast(&ierr, 1, MPI_INT, ios->ioroot, ios->my_comm))) + { + check_mpi(file, mpierr, __FILE__, __LINE__); + return PIO_EIO; + } + check_netcdf(file, ierr, __FILE__, __LINE__); + + /* Broadcast results to all tasks. Ignore NULL parameters. */ + if (varidp) + if ((mpierr = MPI_Bcast(varidp, 1, MPI_INT, ios->ioroot, ios->my_comm))) + check_mpi(file, mpierr, __FILE__, __LINE__); + + return ierr; +} + +/** + * @ingroup PIOc_inq_att + * The PIO-C interface for the NetCDF function nc_inq_att. + * + * This routine is called collectively by all tasks in the communicator + * ios.union_comm. For more information on the underlying NetCDF commmand + * please read about this function in the NetCDF documentation at: + * http://www.unidata.ucar.edu/software/netcdf/docs/group__attributes.html + * + * @param ncid the ncid of the open file, obtained from + * PIOc_openfile() or PIOc_createfile(). + * @param varid the variable ID. + * @param xtypep a pointer that will get the type of the attribute. + * @param lenp a pointer that will get the number of values + * @return PIO_NOERR for success, error code otherwise. See PIOc_Set_File_Error_Handling + */ +int PIOc_inq_att(int ncid, int varid, const char *name, nc_type *xtypep, + PIO_Offset *lenp) +{ + int msg = PIO_MSG_INQ_ATT; + iosystem_desc_t *ios; + file_desc_t *file; + int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ + int ierr = PIO_NOERR; + + /* Caller must provide a name. */ + if (!name) + return PIO_EINVAL; + + LOG((1, "PIOc_inq_att ncid = %d varid = %d xtpyep = %d lenp = %d", + ncid, varid, xtypep, lenp)); + + /* Find file based on ncid. */ + if (!(file = pio_get_file_from_id(ncid))) + return PIO_EBADID; + ios = file->iosystem; + + /* If async is in use, and this is not an IO task, bcast the parameters. */ + if (ios->async_interface) + { + if (!ios->ioproc) + { + char xtype_present = xtypep ? true : false; + char len_present = lenp ? true : false; + int namelen = strlen(name); + + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + + if (!mpierr) + mpierr = MPI_Bcast(&file->fh, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&varid, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&namelen, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast((void *)name, namelen + 1, MPI_CHAR, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&xtype_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&len_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); + } + + /* Handle MPI errors. */ + if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm))) + check_mpi(file, mpierr2, __FILE__, __LINE__); + if (mpierr) + return check_mpi(file, mpierr, __FILE__, __LINE__); + } + + /* If this is an IO task, then call the netCDF function. */ + if (ios->ioproc) + { +#ifdef _PNETCDF + if (file->iotype == PIO_IOTYPE_PNETCDF) + ierr = ncmpi_inq_att(file->fh, varid, name, xtypep, lenp); +#endif /* _PNETCDF */ +#ifdef _NETCDF + if (file->iotype != PIO_IOTYPE_PNETCDF && file->do_io) + ierr = nc_inq_att(file->fh, varid, name, xtypep, (size_t *)lenp); +#endif /* _NETCDF */ + LOG((2, "PIOc_inq netcdf call returned %d", ierr)); + } + + /* Broadcast and check the return code. */ + if ((mpierr = MPI_Bcast(&ierr, 1, MPI_INT, ios->ioroot, ios->my_comm))) + { + check_mpi(file, mpierr, __FILE__, __LINE__); + return PIO_EIO; + } + check_netcdf(file, ierr, __FILE__, __LINE__); + + /* Broadcast results. */ + if (!ierr) + { + if(xtypep) + if ((mpierr = MPI_Bcast(xtypep, 1, MPI_INT, ios->ioroot, ios->my_comm))) + check_mpi(file, mpierr, __FILE__, __LINE__); + if(lenp) + if ((mpierr = MPI_Bcast(lenp, 1, MPI_OFFSET, ios->ioroot, ios->my_comm))) + check_mpi(file, mpierr, __FILE__, __LINE__); + } + + return ierr; +} + +/** + * @ingroup PIOc_inq_attlen + * The PIO-C interface for the NetCDF function nc_inq_attlen. + */ +int PIOc_inq_attlen (int ncid, int varid, const char *name, PIO_Offset *lenp) +{ + return PIOc_inq_att(ncid, varid, name, NULL, lenp); +} + +/** + * @ingroup PIOc_inq_atttype + * The PIO-C interface for the NetCDF function nc_inq_atttype. + */ +int PIOc_inq_atttype(int ncid, int varid, const char *name, nc_type *xtypep) +{ + return PIOc_inq_att(ncid, varid, name, xtypep, NULL); +} + +/** + * @ingroup PIOc_inq_attname + * The PIO-C interface for the NetCDF function nc_inq_attname. + * + * This routine is called collectively by all tasks in the communicator + * ios.union_comm. For more information on the underlying NetCDF commmand + * please read about this function in the NetCDF documentation at: + * http://www.unidata.ucar.edu/software/netcdf/docs/group__attributes.html + * + * @param ncid the ncid of the open file, obtained from + * PIOc_openfile() or PIOc_createfile(). + * @param varid the variable ID. + * @param attnum the attribute ID. + * @return PIO_NOERR for success, error code otherwise. See PIOc_Set_File_Error_Handling + */ +int PIOc_inq_attname(int ncid, int varid, int attnum, char *name) +{ + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; /* Pointer to file information. */ + int ierr = PIO_NOERR; /* Return code from function calls. */ + int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ + + LOG((1, "PIOc_inq_attname ncid = %d varid = %d attnum = %d", ncid, varid, + attnum)); + + /* Find the info about this file. */ + if (!(file = pio_get_file_from_id(ncid))) + return PIO_EBADID; + ios = file->iosystem; + + /* If async is in use, and this is not an IO task, bcast the parameters. */ + if (ios->async_interface) + { + if (!ios->ioproc) + { + int msg = PIO_MSG_INQ_ATTNAME; + char name_present = name ? true : false; + + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + + if (!mpierr) + mpierr = MPI_Bcast(&file->fh, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&varid, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&attnum, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&name_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); + } + + /* Handle MPI errors. */ + if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm))) + check_mpi(file, mpierr2, __FILE__, __LINE__); + if (mpierr) + return check_mpi(file, mpierr, __FILE__, __LINE__); + } + + /* If this is an IO task, then call the netCDF function. */ + if (ios->ioproc) + { +#ifdef _PNETCDF + if (file->iotype == PIO_IOTYPE_PNETCDF) + ierr = ncmpi_inq_attname(file->fh, varid, attnum, name);; +#endif /* _PNETCDF */ +#ifdef _NETCDF + if (file->iotype != PIO_IOTYPE_PNETCDF && file->do_io) + ierr = nc_inq_attname(file->fh, varid, attnum, name);; +#endif /* _NETCDF */ + LOG((2, "PIOc_inq_attname netcdf call returned %d", ierr)); + } + + /* Broadcast and check the return code. */ + if ((mpierr = MPI_Bcast(&ierr, 1, MPI_INT, ios->ioroot, ios->my_comm))) + { + check_mpi(file, mpierr, __FILE__, __LINE__); + return PIO_EIO; + } + check_netcdf(file, ierr, __FILE__, __LINE__); + + /* Broadcast results to all tasks. Ignore NULL parameters. */ + if (!ierr) + if (name) + { + int namelen = strlen(name); + if ((mpierr = MPI_Bcast(&namelen, 1, MPI_INT, ios->ioroot, ios->my_comm))) + check_mpi(file, mpierr, __FILE__, __LINE__); + if ((mpierr = MPI_Bcast((void *)name, namelen + 1, MPI_CHAR, ios->ioroot, + ios->my_comm))) + check_mpi(file, mpierr, __FILE__, __LINE__); + } + + return ierr; +} + +/** + * @ingroup PIOc_inq_attid + * The PIO-C interface for the NetCDF function nc_inq_attid. + * + * This routine is called collectively by all tasks in the communicator + * ios.union_comm. For more information on the underlying NetCDF commmand + * please read about this function in the NetCDF documentation at: + * http://www.unidata.ucar.edu/software/netcdf/docs/group__attributes.html + * + * @param ncid the ncid of the open file, obtained from + * PIOc_openfile() or PIOc_createfile(). + * @param varid the variable ID. + * @param idp a pointer that will get the id of the variable or attribute. + * @return PIO_NOERR for success, error code otherwise. See PIOc_Set_File_Error_Handling + */ +int PIOc_inq_attid(int ncid, int varid, const char *name, int *idp) +{ + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; /* Pointer to file information. */ + int ierr = PIO_NOERR; /* Return code from function calls. */ + int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ + + /* User must provide name shorter than NC_MAX_NAME +1. */ + if (!name || strlen(name) > NC_MAX_NAME) + return PIO_EINVAL; + + LOG((1, "PIOc_inq_attid ncid = %d varid = %d name = %s", ncid, varid, name)); + + /* Find the info about this file. */ + if (!(file = pio_get_file_from_id(ncid))) + return PIO_EBADID; + ios = file->iosystem; + + /* If async is in use, and this is not an IO task, bcast the parameters. */ + if (ios->async_interface) + { + if (!ios->ioproc) + { + int msg = PIO_MSG_INQ_ATTID; + int namelen = strlen(name); + char id_present = idp ? true : false; + + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + + if (!mpierr) + mpierr = MPI_Bcast(&file->fh, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&varid, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&namelen, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast((char *)name, namelen + 1, MPI_CHAR, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&id_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); + } + + /* Handle MPI errors. */ + if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm))) + check_mpi(file, mpierr2, __FILE__, __LINE__); + if (mpierr) + return check_mpi(file, mpierr, __FILE__, __LINE__); + } + + /* If this is an IO task, then call the netCDF function. */ + if (ios->ioproc) + { +#ifdef _PNETCDF + if (file->iotype == PIO_IOTYPE_PNETCDF) + ierr = ncmpi_inq_attid(file->fh, varid, name, idp);; +#endif /* _PNETCDF */ +#ifdef _NETCDF + if (file->iotype != PIO_IOTYPE_PNETCDF && file->do_io) + ierr = nc_inq_attid(file->fh, varid, name, idp);; +#endif /* _NETCDF */ + LOG((2, "PIOc_inq_attname netcdf call returned %d", ierr)); + } + + /* Broadcast and check the return code. */ + if ((mpierr = MPI_Bcast(&ierr, 1, MPI_INT, ios->ioroot, ios->my_comm))) + { + check_mpi(file, mpierr, __FILE__, __LINE__); + return PIO_EIO; + } + check_netcdf(file, ierr, __FILE__, __LINE__); + + /* Broadcast results. */ + if (!ierr) + { + if (idp) + if ((mpierr = MPI_Bcast(idp, 1, MPI_INT, ios->ioroot, ios->my_comm))) + check_mpi(file, mpierr, __FILE__, __LINE__); + } + + return ierr; +} + +/** + * @ingroup PIOc_rename_dim + * The PIO-C interface for the NetCDF function nc_rename_dim. + * + * This routine is called collectively by all tasks in the communicator + * ios.union_comm. For more information on the underlying NetCDF commmand + * please read about this function in the NetCDF documentation at: + * http://www.unidata.ucar.edu/software/netcdf/docs/group__dimensions.html + * + * @param ncid the ncid of the open file, obtained from + * PIOc_openfile() or PIOc_createfile(). + * @return PIO_NOERR for success, error code otherwise. See PIOc_Set_File_Error_Handling + */ +int PIOc_rename_dim(int ncid, int dimid, const char *name) +{ + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; /* Pointer to file information. */ + int ierr = PIO_NOERR; /* Return code from function calls. */ + int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ + + /* User must provide name of correct length. */ + if (!name || strlen(name) > NC_MAX_NAME) + return PIO_EINVAL; + + LOG((1, "PIOc_rename_dim ncid = %d dimid = %d name = %s", ncid, dimid, name)); + + /* Find the info about this file. */ + if (!(file = pio_get_file_from_id(ncid))) + return PIO_EBADID; + ios = file->iosystem; + + /* If async is in use, and this is not an IO task, bcast the parameters. */ + if (ios->async_interface) + { + if (!ios->ioproc) + { + int msg = PIO_MSG_RENAME_DIM; /* Message for async notification. */ + int namelen = strlen(name); + + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + + if (!mpierr) + mpierr = MPI_Bcast(&file->fh, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&dimid, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&namelen, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast((void *)name, namelen + 1, MPI_CHAR, ios->compmaster, ios->intercomm); + LOG((2, "PIOc_rename_dim Bcast file->fh = %d dimid = %d namelen = %d name = %s", + file->fh, dimid, namelen, name)); + } + + /* Handle MPI errors. */ + if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm))) + check_mpi(file, mpierr2, __FILE__, __LINE__); + if (mpierr) + return check_mpi(file, mpierr, __FILE__, __LINE__); + } + + + /* If this is an IO task, then call the netCDF function. */ + if (ios->ioproc) + { +#ifdef _PNETCDF + if (file->iotype == PIO_IOTYPE_PNETCDF) + ierr = ncmpi_rename_dim(file->fh, dimid, name); +#endif /* _PNETCDF */ +#ifdef _NETCDF + if (file->iotype != PIO_IOTYPE_PNETCDF && file->do_io) + ierr = nc_rename_dim(file->fh, dimid, name);; +#endif /* _NETCDF */ + LOG((2, "PIOc_inq netcdf call returned %d", ierr)); + } + + /* Broadcast and check the return code. */ + if ((mpierr = MPI_Bcast(&ierr, 1, MPI_INT, ios->ioroot, ios->my_comm))) + { + check_mpi(file, mpierr, __FILE__, __LINE__); + return PIO_EIO; + } + check_netcdf(file, ierr, __FILE__, __LINE__); + + return ierr; +} + +/** + * @ingroup PIOc_rename_var + * The PIO-C interface for the NetCDF function nc_rename_var. + * + * This routine is called collectively by all tasks in the communicator + * ios.union_comm. For more information on the underlying NetCDF commmand + * please read about this function in the NetCDF documentation at: + * http://www.unidata.ucar.edu/software/netcdf/docs/group__variables.html + * + * @param ncid the ncid of the open file, obtained from + * PIOc_openfile() or PIOc_createfile(). + * @param varid the variable ID. + * @return PIO_NOERR for success, error code otherwise. See PIOc_Set_File_Error_Handling + */ +int PIOc_rename_var(int ncid, int varid, const char *name) +{ + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; /* Pointer to file information. */ + int ierr = PIO_NOERR; /* Return code from function calls. */ + int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ + + /* User must provide name of correct length. */ + if (!name || strlen(name) > NC_MAX_NAME) + return PIO_EINVAL; + + LOG((1, "PIOc_rename_var ncid = %d varid = %d name = %s", ncid, varid, name)); + + /* Find the info about this file. */ + if (!(file = pio_get_file_from_id(ncid))) + return PIO_EBADID; + ios = file->iosystem; + + /* If async is in use, and this is not an IO task, bcast the parameters. */ + if (ios->async_interface) + { + if (!ios->ioproc) + { + int msg = PIO_MSG_RENAME_VAR; /* Message for async notification. */ + int namelen = strlen(name); + + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + + if (!mpierr) + mpierr = MPI_Bcast(&file->fh, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&varid, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&namelen, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast((void *)name, namelen + 1, MPI_CHAR, ios->compmaster, ios->intercomm); + LOG((2, "PIOc_rename_var Bcast file->fh = %d varid = %d namelen = %d name = %s", + file->fh, varid, namelen, name)); + } + + /* Handle MPI errors. */ + if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm))) + check_mpi(file, mpierr2, __FILE__, __LINE__); + if (mpierr) + return check_mpi(file, mpierr, __FILE__, __LINE__); + } + + + /* If this is an IO task, then call the netCDF function. */ + if (ios->ioproc) + { +#ifdef _PNETCDF + if (file->iotype == PIO_IOTYPE_PNETCDF) + ierr = ncmpi_rename_var(file->fh, varid, name); +#endif /* _PNETCDF */ +#ifdef _NETCDF + if (file->iotype != PIO_IOTYPE_PNETCDF && file->do_io) + ierr = nc_rename_var(file->fh, varid, name);; +#endif /* _NETCDF */ + LOG((2, "PIOc_inq netcdf call returned %d", ierr)); + } + + /* Broadcast and check the return code. */ + if ((mpierr = MPI_Bcast(&ierr, 1, MPI_INT, ios->ioroot, ios->my_comm))) + { + check_mpi(file, mpierr, __FILE__, __LINE__); + return PIO_EIO; + } + check_netcdf(file, ierr, __FILE__, __LINE__); + + return ierr; +} + +/** + * @ingroup PIOc_rename_att + * The PIO-C interface for the NetCDF function nc_rename_att. + * + * This routine is called collectively by all tasks in the communicator + * ios.union_comm. For more information on the underlying NetCDF commmand + * please read about this function in the NetCDF documentation at: + * http://www.unidata.ucar.edu/software/netcdf/docs/group__attributes.html + * + * @param ncid the ncid of the open file, obtained from + * PIOc_openfile() or PIOc_createfile(). + * @param varid the variable ID. + * @return PIO_NOERR for success, error code otherwise. See + * PIOc_Set_File_Error_Handling + */ +int PIOc_rename_att (int ncid, int varid, const char *name, + const char *newname) +{ + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; /* Pointer to file information. */ + int ierr = PIO_NOERR; /* Return code from function calls. */ + int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI functions. */ + + /* User must provide names of correct length. */ + if (!name || strlen(name) > NC_MAX_NAME || + !newname || strlen(newname) > NC_MAX_NAME) + return PIO_EINVAL; + + LOG((1, "PIOc_rename_att ncid = %d varid = %d name = %s newname = %s", + ncid, varid, name, newname)); + + /* Find the info about this file. */ + if (!(file = pio_get_file_from_id(ncid))) + return PIO_EBADID; + ios = file->iosystem; + + /* If async is in use, and this is not an IO task, bcast the parameters. */ + if (ios->async_interface) + { + if (!ios->ioproc) + { + int msg = PIO_MSG_RENAME_ATT; /* Message for async notification. */ + int namelen = strlen(name); + int newnamelen = strlen(newname); + + if (ios->compmaster) + mpierr = MPI_Send(&msg, 1, MPI_INT, ios->ioroot, 1, ios->union_comm); + + if (!mpierr) + mpierr = MPI_Bcast(&file->fh, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&varid, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&namelen, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast((char *)name, namelen + 1, MPI_CHAR, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&newnamelen, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast((char *)newname, newnamelen + 1, MPI_CHAR, ios->compmaster, ios->intercomm); + } + + /* Handle MPI errors. */ + if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm))) + check_mpi(file, mpierr2, __FILE__, __LINE__); + if (mpierr) + return check_mpi(file, mpierr, __FILE__, __LINE__); + } + + /* If this is an IO task, then call the netCDF function. */ + if (ios->ioproc) + { +#ifdef _PNETCDF + if (file->iotype == PIO_IOTYPE_PNETCDF) + ierr = ncmpi_rename_att(file->fh, varid, name, newname); +#endif /* _PNETCDF */ +#ifdef _NETCDF + if (file->iotype != PIO_IOTYPE_PNETCDF && file->do_io) + ierr = nc_rename_att(file->fh, varid, name, newname); +#endif /* _NETCDF */ + } + + /* Broadcast and check the return code. */ + if ((mpierr = MPI_Bcast(&ierr, 1, MPI_INT, ios->ioroot, ios->my_comm))) + { + check_mpi(file, mpierr, __FILE__, __LINE__); + return PIO_EIO; + } + check_netcdf(file, ierr, __FILE__, __LINE__); + + LOG((2, "PIOc_rename_att succeeded")); + return ierr; +} + +/** + * @ingroup PIOc_del_att + * The PIO-C interface for the NetCDF function nc_del_att. + * + * This routine is called collectively by all tasks in the communicator + * ios.union_comm. For more information on the underlying NetCDF commmand + * please read about this function in the NetCDF documentation at: + * http://www.unidata.ucar.edu/software/netcdf/docs/group__attributes.html + * + * @param ncid the ncid of the open file, obtained from + * PIOc_openfile() or PIOc_createfile(). + * @param varid the variable ID. + * @return PIO_NOERR for success, error code otherwise. See PIOc_Set_File_Error_Handling + */ +int PIOc_del_att(int ncid, int varid, const char *name) +{ + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; /* Pointer to file information. */ + int ierr = PIO_NOERR; /* Return code from function calls. */ + int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI functions. */ + + /* User must provide name of correct length. */ + if (!name || strlen(name) > NC_MAX_NAME) + return PIO_EINVAL; + + LOG((1, "PIOc_del_att ncid = %d varid = %d name = %s", ncid, varid, name)); + + /* Find the info about this file. */ + if (!(file = pio_get_file_from_id(ncid))) + return PIO_EBADID; + ios = file->iosystem; + + /* If async is in use, and this is not an IO task, bcast the parameters. */ + if (ios->async_interface) + { + if (!ios->ioproc) + { + int msg = PIO_MSG_DEL_ATT; + int namelen = strlen(name); /* Length of name string. */ + + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + + if (!mpierr) + mpierr = MPI_Bcast(&file->fh, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&varid, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&namelen, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast((char *)name, namelen + 1, MPI_CHAR, ios->compmaster, ios->intercomm); + } + + /* Handle MPI errors. */ + if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm))) + check_mpi(file, mpierr2, __FILE__, __LINE__); + if (mpierr) + return check_mpi(file, mpierr, __FILE__, __LINE__); + } + + /* If this is an IO task, then call the netCDF function. */ + if (ios->ioproc) + { +#ifdef _PNETCDF + if (file->iotype == PIO_IOTYPE_PNETCDF) + ierr = ncmpi_del_att(file->fh, varid, name); +#endif /* _PNETCDF */ +#ifdef _NETCDF + if (file->iotype != PIO_IOTYPE_PNETCDF && file->do_io) + ierr = nc_del_att(file->fh, varid, name); +#endif /* _NETCDF */ + } + + /* Broadcast and check the return code. */ + if ((mpierr = MPI_Bcast(&ierr, 1, MPI_INT, ios->ioroot, ios->my_comm))) + { + check_mpi(file, mpierr, __FILE__, __LINE__); + return PIO_EIO; + } + check_netcdf(file, ierr, __FILE__, __LINE__); + + LOG((2, "PIOc_del_att succeeded")); + return ierr; +} + +/** + * @ingroup PIOc_set_fill + * The PIO-C interface for the NetCDF function nc_set_fill. + * + * This routine is called collectively by all tasks in the communicator + * ios.union_comm. For more information on the underlying NetCDF commmand + * please read about this function in the NetCDF documentation at: + * http://www.unidata.ucar.edu/software/netcdf/docs/group__datasets.html + * + * @param ncid the ncid of the open file, obtained from + * PIOc_openfile() or PIOc_createfile(). + * @return PIO_NOERR for success, error code otherwise. See PIOc_Set_File_Error_Handling + */ +int PIOc_set_fill (int ncid, int fillmode, int *old_modep) +{ + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; /* Pointer to file information. */ + int ierr = PIO_NOERR; /* Return code from function calls. */ + int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI functions. */ + + LOG((1, "PIOc_set_fill ncid = %d fillmode = %d old_modep = %d", ncid, fillmode, + old_modep)); + + /* Find the info about this file. */ + if (!(file = pio_get_file_from_id(ncid))) + return PIO_EBADID; + ios = file->iosystem; + + /* If async is in use, and this is not an IO task, bcast the parameters. */ + if (ios->async_interface) + { + if (!ios->ioproc) + { + int msg = PIO_MSG_SET_FILL; + + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + + if (!mpierr) + mpierr = MPI_Bcast(&file->fh, 1, MPI_INT, ios->compmaster, ios->intercomm); + } + + /* Handle MPI errors. */ + if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm))) + check_mpi(file, mpierr2, __FILE__, __LINE__); + if (mpierr) + return check_mpi(file, mpierr, __FILE__, __LINE__); + } + + + /* If this is an IO task, then call the netCDF function. */ + if (ios->ioproc) + { +#ifdef _PNETCDF + if (file->iotype == PIO_IOTYPE_PNETCDF) + ierr = ncmpi_set_fill(file->fh, fillmode, old_modep); +#endif /* _PNETCDF */ +#ifdef _NETCDF + if (file->iotype != PIO_IOTYPE_PNETCDF && file->do_io) + ierr = nc_set_fill(file->fh, fillmode, old_modep); +#endif /* _NETCDF */ + } + + /* Broadcast and check the return code. */ + if ((mpierr = MPI_Bcast(&ierr, 1, MPI_INT, ios->ioroot, ios->my_comm))) + { + check_mpi(file, mpierr, __FILE__, __LINE__); + return PIO_EIO; + } + check_netcdf(file, ierr, __FILE__, __LINE__); + + LOG((2, "PIOc_set_fill succeeded")); + return ierr; +} + +/** This is an internal function that handles both PIOc_enddef and + * PIOc_redef. + * @param ncid the ncid of the file to enddef or redef + * @param is_enddef set to non-zero for enddef, 0 for redef. + * @returns PIO_NOERR on success, error code on failure. */ +int pioc_change_def(int ncid, int is_enddef) +{ + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; /* Pointer to file information. */ + int ierr = PIO_NOERR; /* Return code from function calls. */ + int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI functions. */ + + LOG((1, "pioc_change_def ncid = %d is_enddef = %d", ncid, is_enddef)); + + /* Find the info about this file. */ + if (!(file = pio_get_file_from_id(ncid))) + return PIO_EBADID; + ios = file->iosystem; + LOG((2, "pioc_change_def found file")); + + /* If async is in use, and this is not an IO task, bcast the parameters. */ + if (ios->async_interface) + { + if (!ios->ioproc) + { + int msg = is_enddef ? PIO_MSG_ENDDEF : PIO_MSG_REDEF; + LOG((2, "sending message msg = %d", msg)); + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1, MPI_INT, ios->ioroot, 1, ios->union_comm); + + if (!mpierr) + mpierr = MPI_Bcast(&file->fh, 1, MPI_INT, ios->compmaster, ios->intercomm); + LOG((2, "pioc_change_def ncid = %d mpierr = %d", file->fh, mpierr)); + } + + /* Handle MPI errors. */ + LOG((2, "pioc_change_def handling MPI errors")); + if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm))) + check_mpi(file, mpierr2, __FILE__, __LINE__); + if (mpierr) + return check_mpi(file, mpierr, __FILE__, __LINE__); + } + + /* If this is an IO task, then call the netCDF function. */ + LOG((2, "pioc_change_def ios->ioproc = %d", ios->ioproc)); + if (ios->ioproc) + { + LOG((2, "pioc_change_def calling netcdf function")); +#ifdef _PNETCDF + if (file->iotype == PIO_IOTYPE_PNETCDF) + if (is_enddef) + ierr = ncmpi_enddef(file->fh); + else + ierr = ncmpi_redef(file->fh); +#endif /* _PNETCDF */ +#ifdef _NETCDF + if (file->iotype != PIO_IOTYPE_PNETCDF && file->do_io) + if (is_enddef) + ierr = nc_enddef(file->fh); + else + ierr = nc_redef(file->fh); +#endif /* _NETCDF */ + LOG((2, "pioc_change_def ierr = %d", ierr)); + } + + /* Broadcast and check the return code. */ + LOG((2, "pioc_change_def bcasting return code ierr = %d", ierr)); + if ((mpierr = MPI_Bcast(&ierr, 1, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + if (ierr) + return check_netcdf(file, ierr, __FILE__, __LINE__); + LOG((2, "pioc_change_def succeeded")); + + return ierr; +} + +/** + * @ingroup PIOc_enddef + * The PIO-C interface for the NetCDF function nc_enddef. + * + * This routine is called collectively by all tasks in the communicator + * ios.union_comm. For more information on the underlying NetCDF commmand + * please read about this function in the NetCDF documentation at: + * http://www.unidata.ucar.edu/software/netcdf/docs/group__datasets.html + * + * @param ncid the ncid of the open file, obtained from + * PIOc_openfile() or PIOc_createfile(). + * @return PIO_NOERR for success, error code otherwise. See PIOc_Set_File_Error_Handling + */ +int PIOc_enddef(int ncid) +{ + return pioc_change_def(ncid, 1); +} + +/** + * @ingroup PIOc_redef + * The PIO-C interface for the NetCDF function nc_redef. + * + * This routine is called collectively by all tasks in the communicator + * ios.union_comm. For more information on the underlying NetCDF commmand + * please read about this function in the NetCDF documentation at: + * http://www.unidata.ucar.edu/software/netcdf/docs/group__datasets.html + * + * @param ncid the ncid of the open file, obtained from + * PIOc_openfile() or PIOc_createfile(). + * @return PIO_NOERR for success, error code otherwise. See PIOc_Set_File_Error_Handling + */ +int PIOc_redef(int ncid) +{ + return pioc_change_def(ncid, 0); +} + +/** + * @ingroup PIOc_def_dim + * The PIO-C interface for the NetCDF function nc_def_dim. + * + * This routine is called collectively by all tasks in the communicator + * ios.union_comm. For more information on the underlying NetCDF commmand + * please read about this function in the NetCDF documentation at: + * http://www.unidata.ucar.edu/software/netcdf/docs/group__dimensions.html + * + * @param ncid the ncid of the open file, obtained from + * PIOc_openfile() or PIOc_createfile(). + * @param idp a pointer that will get the id of the variable or attribute. + * @return PIO_NOERR for success, error code otherwise. See PIOc_Set_File_Error_Handling + */ +int PIOc_def_dim (int ncid, const char *name, PIO_Offset len, int *idp) +{ + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; /* Pointer to file information. */ + int ierr = PIO_NOERR; /* Return code from function calls. */ + int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ + + /* User must provide name. */ + if (!name || strlen(name) > NC_MAX_NAME) + return PIO_EINVAL; + + LOG((1, "PIOc_def_dim ncid = %d name = %s len = %d", ncid, name, len)); + + /* Find the info about this file. */ + if (!(file = pio_get_file_from_id(ncid))) + return PIO_EBADID; + ios = file->iosystem; + + /* If async is in use, and this is not an IO task, bcast the parameters. */ + if (ios->async_interface) + { + if (!ios->ioproc) + { + int msg = PIO_MSG_DEF_DIM; + int namelen = strlen(name); + + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + + if (!mpierr) + mpierr = MPI_Bcast(&(file->fh),1, MPI_INT, ios->compmaster, ios->intercomm); + + if (!mpierr) + mpierr = MPI_Bcast(&namelen, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast((void *)name, namelen + 1, MPI_CHAR, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&len, 1, MPI_INT, ios->compmaster, ios->intercomm); + } + + + /* Handle MPI errors. */ + if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm))) + check_mpi(file, mpierr2, __FILE__, __LINE__); + if (mpierr) + return check_mpi(file, mpierr, __FILE__, __LINE__); + } + + /* If this is an IO task, then call the netCDF function. */ + if (ios->ioproc) + { +#ifdef _PNETCDF + if (file->iotype == PIO_IOTYPE_PNETCDF) + ierr = ncmpi_def_dim(file->fh, name, len, idp);; +#endif /* _PNETCDF */ +#ifdef _NETCDF + if (file->iotype != PIO_IOTYPE_PNETCDF && file->do_io) + ierr = nc_def_dim(file->fh, name, (size_t)len, idp); +#endif /* _NETCDF */ + } + + /* Broadcast and check the return code. */ + if ((mpierr = MPI_Bcast(&ierr, 1, MPI_INT, ios->ioroot, ios->my_comm))) + { + check_mpi(file, mpierr2, __FILE__, __LINE__); + return PIO_EIO; + } + check_netcdf(file, ierr, __FILE__, __LINE__); + + /* Broadcast results to all tasks. Ignore NULL parameters. */ + if (!ierr) + if (idp) + if ((mpierr = MPI_Bcast(idp , 1, MPI_INT, ios->ioroot, ios->my_comm))) + check_mpi(file, mpierr, __FILE__, __LINE__); + + return ierr; +} + +/** + * @ingroup PIOc_def_var + * The PIO-C interface for the NetCDF function nc_def_var. + * + * This routine is called collectively by all tasks in the communicator + * ios.union_comm. For more information on the underlying NetCDF commmand + * please read about this function in the NetCDF documentation at: + * http://www.unidata.ucar.edu/software/netcdf/docs/group__variables.html + * + * @param ncid the ncid of the open file, obtained from + * PIOc_openfile() or PIOc_createfile(). + * @param varid the variable ID. + * @param varidp a pointer that will get the variable id + * @return PIO_NOERR for success, error code otherwise. See + * PIOc_Set_File_Error_Handling + */ +int PIOc_def_var (int ncid, const char *name, nc_type xtype, int ndims, + const int *dimidsp, int *varidp) +{ + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; /* Pointer to file information. */ + int ierr = PIO_NOERR; /* Return code from function calls. */ + int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ + + /* User must provide name and storage for varid. */ + if (!name || !varidp || strlen(name) > NC_MAX_NAME) + { + check_netcdf(file, PIO_EINVAL, __FILE__, __LINE__); + return PIO_EINVAL; + } + + /* Get the file information. */ + if (!(file = pio_get_file_from_id(ncid))) + { + check_netcdf(file, PIO_EBADID, __FILE__, __LINE__); + return PIO_EBADID; + } + ios = file->iosystem; + + /* If using async, and not an IO task, then send parameters. */ + if (ios->async_interface) + { + if (!ios->ioproc) + { + int msg = PIO_MSG_DEF_VAR; + int namelen = strlen(name); + + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1, MPI_INT, ios->ioroot, 1, ios->union_comm); + + if (!mpierr) + mpierr = MPI_Bcast(&(ncid), 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&namelen, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast((void *)name, namelen + 1, MPI_CHAR, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&xtype, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&ndims, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast((void *)dimidsp, ndims, MPI_INT, ios->compmaster, ios->intercomm); + } + + /* Handle MPI errors. */ + if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm))) + check_mpi(file, mpierr2, __FILE__, __LINE__); + if (mpierr) + return check_mpi(file, mpierr, __FILE__, __LINE__); + } + + /* If this is an IO task, then call the netCDF function. */ + if (ios->ioproc) + { +#ifdef _PNETCDF + if (file->iotype == PIO_IOTYPE_PNETCDF) + ierr = ncmpi_def_var(ncid, name, xtype, ndims, dimidsp, varidp); +#endif /* _PNETCDF */ +#ifdef _NETCDF + if (file->iotype != PIO_IOTYPE_PNETCDF && file->do_io) + ierr = nc_def_var(ncid, name, xtype, ndims, dimidsp, varidp); +#ifdef _NETCDF4 + /* For netCDF-4 serial files, turn on compression for this variable. */ + if (!ierr && file->iotype == PIO_IOTYPE_NETCDF4C) + ierr = nc_def_var_deflate(ncid, *varidp, 0, 1, 1); + + /* For netCDF-4 parallel files, set parallel access to collective. */ + if (!ierr && file->iotype == PIO_IOTYPE_NETCDF4P) + ierr = nc_var_par_access(ncid, *varidp, NC_COLLECTIVE); +#endif /* _NETCDF4 */ +#endif /* _NETCDF */ + } + + /* Broadcast and check the return code. */ + if ((mpierr = MPI_Bcast(&ierr, 1, MPI_INT, ios->ioroot, ios->my_comm))) + { + check_mpi(file, mpierr, __FILE__, __LINE__); + return PIO_EIO; + } + check_netcdf(file, ierr, __FILE__, __LINE__); + + /* Broadcast results. */ + if (!ierr) + if (varidp) + if ((mpierr = MPI_Bcast(varidp , 1, MPI_INT, ios->ioroot, ios->my_comm))) + check_mpi(file, mpierr, __FILE__, __LINE__); + + return ierr; +} + +/** + * @ingroup PIOc_inq_var_fill + * The PIO-C interface for the NetCDF function nc_inq_var_fill. + * + * This routine is called collectively by all tasks in the communicator + * ios.union_comm. For more information on the underlying NetCDF commmand + * please read about this function in the NetCDF documentation at: + * http://www.unidata.ucar.edu/software/netcdf/docs/group__variables.html + * + * @param ncid the ncid of the open file, obtained from + * PIOc_openfile() or PIOc_createfile(). + * @param varid the variable ID. + * @return PIO_NOERR for success, error code otherwise. See PIOc_Set_File_Error_Handling + */ +int PIOc_inq_var_fill(int ncid, int varid, int *no_fill, void *fill_valuep) +{ + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; /* Pointer to file information. */ + int ierr = PIO_NOERR; /* Return code from function calls. */ + int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ + + LOG((1, "PIOc_inq ncid = %d", ncid)); + + /* Find the info about this file. */ + if (!(file = pio_get_file_from_id(ncid))) + return PIO_EBADID; + ios = file->iosystem; + + /* If async is in use, and this is not an IO task, bcast the parameters. */ + if (ios->async_interface) + { + if (!ios->ioproc) + { + int msg = PIO_MSG_INQ_VAR_FILL; + + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + + if (!mpierr) + mpierr = MPI_Bcast(&(file->fh),1, MPI_INT, ios->compmaster, ios->intercomm); + } + + /* Handle MPI errors. */ + if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm))) + check_mpi(file, mpierr2, __FILE__, __LINE__); + if (mpierr) + return check_mpi(file, mpierr, __FILE__, __LINE__); + } + + /* If this is an IO task, then call the netCDF function. */ + if (ios->ioproc) + { +#ifdef _PNETCDF + if (file->iotype == PIO_IOTYPE_PNETCDF) + ierr = ncmpi_inq_var_fill(file->fh, varid, no_fill, fill_valuep); +#endif /* _PNETCDF */ +#ifdef _NETCDF + if (file->iotype != PIO_IOTYPE_PNETCDF && file->do_io) + ierr = nc_inq_var_fill(file->fh, varid, no_fill, fill_valuep); +#endif /* _NETCDF */ + } + + /* Broadcast and check the return code. */ + if ((mpierr = MPI_Bcast(&ierr, 1, MPI_INT, ios->ioroot, ios->my_comm))) + { + check_mpi(file, mpierr, __FILE__, __LINE__); + return PIO_EIO; + } + check_netcdf(file, ierr, __FILE__, __LINE__); + + /* Broadcast results to all tasks. Ignore NULL parameters. */ + if (!ierr) + if (fill_valuep) + if ((mpierr = MPI_Bcast(fill_valuep, 1, MPI_INT, ios->ioroot, ios->my_comm))) + check_mpi(file, mpierr, __FILE__, __LINE__); + + return ierr; +} + +/** + * @ingroup PIOc_get_att + * The PIO-C interface for the NetCDF function nc_get_att. + * + * This routine is called collectively by all tasks in the communicator + * ios.union_comm. For more information on the underlying NetCDF commmand + * please read about this function in the NetCDF documentation at: + * http://www.unidata.ucar.edu/software/netcdf/docs/group__attributes.html + * + * @param ncid the ncid of the open file, obtained from + * PIOc_openfile() or PIOc_createfile(). + * @param varid the variable ID. + * @return PIO_NOERR for success, error code otherwise. See + * PIOc_Set_File_Error_Handling + */ +int PIOc_get_att(int ncid, int varid, const char *name, void *ip) +{ + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; /* Pointer to file information. */ + int ierr = PIO_NOERR; /* Return code from function calls. */ + int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ + PIO_Offset attlen, typelen; + nc_type atttype; + + /* User must provide a name and destination pointer. */ + if (!name || !ip || strlen(name) > NC_MAX_NAME) + return PIO_EINVAL; + + LOG((1, "PIOc_get_att ncid %d varid %d name %s", ncid, varid, name)); + + /* Find the info about this file. */ + if (!(file = pio_get_file_from_id(ncid))) + return PIO_EBADID; + ios = file->iosystem; + + /* Run these on all tasks if async is not in use, but only on + * non-IO tasks if async is in use. */ + if (!ios->async_interface || !ios->ioproc) + { + /* Get the type and length of the attribute. */ + if ((ierr = PIOc_inq_att(file->fh, varid, name, &atttype, &attlen))) + { + check_netcdf(file, ierr, __FILE__, __LINE__); + return ierr; + } + + /* Get the length (in bytes) of the type. */ + if ((ierr = PIOc_inq_type(file->fh, atttype, NULL, &typelen))) + { + check_netcdf(file, ierr, __FILE__, __LINE__); + return ierr; + } + } + + /* If async is in use, and this is not an IO task, bcast the + * parameters and the attribute and type information we fetched. */ + if (ios->async_interface) + { + if (!ios->ioproc) + { + int msg = PIO_MSG_GET_ATT; + + /* Send the message to IO master. */ + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + + /* Send the function parameters. */ + if (!mpierr) + mpierr = MPI_Bcast(&file->fh, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&varid, 1, MPI_INT, ios->compmaster, ios->intercomm); + int namelen = strlen(name); + if (!mpierr) + mpierr = MPI_Bcast(&namelen, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast((void *)name, namelen + 1, MPI_CHAR, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&file->iotype, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&atttype, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&attlen, 1, MPI_OFFSET, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&typelen, 1, MPI_OFFSET, ios->compmaster, ios->intercomm); + } + + /* Handle MPI errors. */ + if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm))) + check_mpi(file, mpierr2, __FILE__, __LINE__); + if (mpierr) + return check_mpi(file, mpierr, __FILE__, __LINE__); + + /* Broadcast values currently only known on computation tasks to IO tasks. */ + LOG((2, "PIOc_get_att bcast from comproot = %d attlen = %d typelen = %d", ios->comproot, attlen, typelen)); + if ((mpierr = MPI_Bcast(&attlen, 1, MPI_OFFSET, ios->comproot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + if ((mpierr = MPI_Bcast(&typelen, 1, MPI_OFFSET, ios->comproot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + LOG((2, "PIOc_get_att bcast complete attlen = %d typelen = %d", attlen, typelen)); + } + + /* If this is an IO task, then call the netCDF function. */ + if (ios->ioproc) + { +#ifdef _PNETCDF + if (file->iotype == PIO_IOTYPE_PNETCDF) + ierr = ncmpi_get_att(file->fh, varid, name, ip); +#endif /* _PNETCDF */ +#ifdef _NETCDF + if (file->iotype != PIO_IOTYPE_PNETCDF && file->do_io) + ierr = nc_get_att(file->fh, varid, name, ip); +#endif /* _NETCDF */ + } + + /* Broadcast and check the return code. */ + if ((mpierr = MPI_Bcast(&ierr, 1, MPI_INT, ios->ioroot, ios->my_comm))) + { + check_mpi(file, mpierr, __FILE__, __LINE__); + return PIO_EIO; + } + check_netcdf(file, ierr, __FILE__, __LINE__); + + /* Broadcast results to all tasks. */ + if (!ierr) + { + if ((mpierr = MPI_Bcast(ip, (int)attlen * typelen, MPI_BYTE, ios->ioroot, + ios->my_comm))) + { + check_mpi(file, mpierr, __FILE__, __LINE__); + return PIO_EIO; + } + } + return ierr; +} + +/** + * @ingroup PIOc_put_att + * The PIO-C interface for the NetCDF function nc_put_att. + * + * This routine is called collectively by all tasks in the communicator + * ios.union_comm. For more information on the underlying NetCDF commmand + * please read about this function in the NetCDF documentation at: + * http://www.unidata.ucar.edu/software/netcdf/docs/group__attributes.html + * + * @param ncid the ncid of the open file, obtained from + * PIOc_openfile() or PIOc_createfile(). + * @param varid the variable ID. + * @return PIO_NOERR for success, error code otherwise. See PIOc_Set_File_Error_Handling + */ +int PIOc_put_att(int ncid, int varid, const char *name, nc_type xtype, + PIO_Offset len, const void *op) +{ + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; /* Pointer to file information. */ + PIO_Offset typelen; /* Length (in bytes) of the type. */ + int ierr = PIO_NOERR; /* Return code from function calls. */ + int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ + + LOG((1, "PIOc_put_att ncid = %d varid = %d name = %s", ncid, varid, name)); + + /* Find the info about this file. */ + if (!(file = pio_get_file_from_id(ncid))) + return PIO_EBADID; + ios = file->iosystem; + + /* Run these on all tasks if async is not in use, but only on + * non-IO tasks if async is in use. */ + if (!ios->async_interface || !ios->ioproc) + { + /* Get the length (in bytes) of the type. */ + if ((ierr = PIOc_inq_type(ncid, xtype, NULL, &typelen))) + { + check_netcdf(file, ierr, __FILE__, __LINE__); + return ierr; + } + LOG((2, "PIOc_put_att typelen = %d", ncid, typelen)); + } + + /* If async is in use, and this is not an IO task, bcast the parameters. */ + if (ios->async_interface) + { + if (!ios->ioproc) + { + int msg = PIO_MSG_PUT_ATT; + + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1, MPI_INT, ios->ioroot, 1, ios->union_comm); + + if (!mpierr) + mpierr = MPI_Bcast(&file->fh, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&varid, 1, MPI_INT, ios->compmaster, ios->intercomm); + int namelen = strlen(name); + if (!mpierr) + mpierr = MPI_Bcast(&namelen, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast((void *)name, namelen + 1, MPI_CHAR, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&xtype, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&len, 1, MPI_OFFSET, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&typelen, 1, MPI_OFFSET, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast((void *)op, len * typelen, MPI_BYTE, ios->compmaster, + ios->intercomm); + LOG((2, "PIOc_put_att finished bcast ncid = %d varid = %d namelen = %d name = %s " + "len = %d typelen = %d", file->fh, varid, namelen, name, len, typelen)); + } + + /* Handle MPI errors. */ + if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm))) + check_mpi(file, mpierr2, __FILE__, __LINE__); + if (mpierr) + return check_mpi(file, mpierr, __FILE__, __LINE__); + + /* Broadcast values currently only known on computation tasks to IO tasks. */ + LOG((2, "PIOc_put_att bcast from comproot = %d typelen = %d", ios->comproot, typelen)); + if ((mpierr = MPI_Bcast(&typelen, 1, MPI_OFFSET, ios->comproot, ios->my_comm))) + check_mpi(file, mpierr, __FILE__, __LINE__); + } + + /* If this is an IO task, then call the netCDF function. */ + if (ios->ioproc) + { +#ifdef _PNETCDF + if (file->iotype == PIO_IOTYPE_PNETCDF) + ierr = ncmpi_put_att(file->fh, varid, name, xtype, len, op); +#endif /* _PNETCDF */ +#ifdef _NETCDF + if (file->iotype != PIO_IOTYPE_PNETCDF && file->do_io) + ierr = nc_put_att(file->fh, varid, name, xtype, (size_t)len, op); +#endif /* _NETCDF */ + } + + /* Broadcast and check the return code. */ + if ((mpierr = MPI_Bcast(&ierr, 1, MPI_INT, ios->ioroot, ios->my_comm))) + { + check_mpi(file, mpierr, __FILE__, __LINE__); + return PIO_EIO; + } + check_netcdf(file, ierr, __FILE__, __LINE__); + + return ierr; +} + +/** + * @ingroup PIOc_get_att_double + * The PIO-C interface for the NetCDF function nc_get_att_double. + */ +int PIOc_get_att_double(int ncid, int varid, const char *name, double *ip) +{ + return PIOc_get_att(ncid, varid, name, (void *)ip); +} + +/** + * @ingroup PIOc_get_att_uchar + * The PIO-C interface for the NetCDF function nc_get_att_uchar. + */ +int PIOc_get_att_uchar (int ncid, int varid, const char *name, unsigned char *ip) +{ + return PIOc_get_att(ncid, varid, name, (void *)ip); +} + +/** + * @ingroup PIOc_get_att_ushort + * The PIO-C interface for the NetCDF function nc_get_att_ushort. + */ +int PIOc_get_att_ushort (int ncid, int varid, const char *name, unsigned short *ip) +{ + return PIOc_get_att(ncid, varid, name, (void *)ip); +} + +/** + * @ingroup PIOc_get_att_uint + * The PIO-C interface for the NetCDF function nc_get_att_uint. + */ +int PIOc_get_att_uint (int ncid, int varid, const char *name, unsigned int *ip) +{ + return PIOc_get_att(ncid, varid, name, (void *)ip); +} + +/** + * @ingroup PIOc_get_att_long + * The PIO-C interface for the NetCDF function nc_get_att_long. + */ +int PIOc_get_att_long (int ncid, int varid, const char *name, long *ip) +{ + return PIOc_get_att(ncid, varid, name, (void *)ip); +} + +/** + * @ingroup PIOc_get_att_ubyte + * The PIO-C interface for the NetCDF function nc_get_att_ubyte. + */ +int PIOc_get_att_ubyte (int ncid, int varid, const char *name, unsigned char *ip) +{ + return PIOc_get_att(ncid, varid, name, (void *)ip); +} + +/** + * @ingroup PIOc_get_att_text + * The PIO-C interface for the NetCDF function nc_get_att_text. + */ +int PIOc_get_att_text (int ncid, int varid, const char *name, char *ip) +{ + return PIOc_get_att(ncid, varid, name, (void *)ip); +} + +/** + * @ingroup PIOc_get_att_schar + * The PIO-C interface for the NetCDF function nc_get_att_schar. + */ +int PIOc_get_att_schar (int ncid, int varid, const char *name, signed char *ip) +{ + return PIOc_get_att(ncid, varid, name, (void *)ip); +} + +/** + * @ingroup PIOc_get_att_ulonglong + * The PIO-C interface for the NetCDF function nc_get_att_ulonglong. + */ +int PIOc_get_att_ulonglong (int ncid, int varid, const char *name, unsigned long long *ip) +{ + return PIOc_get_att(ncid, varid, name, (void *)ip); +} + +/** + * @ingroup PIOc_get_att_short + * The PIO-C interface for the NetCDF function nc_get_att_short. + */ +int PIOc_get_att_short (int ncid, int varid, const char *name, short *ip) +{ + return PIOc_get_att(ncid, varid, name, (void *)ip); +} + +/** + * @ingroup PIOc_get_att_int + * The PIO-C interface for the NetCDF function nc_get_att_int. + */ +int PIOc_get_att_int(int ncid, int varid, const char *name, int *ip) +{ + return PIOc_get_att(ncid, varid, name, (void *)ip); +} + +/** + * @ingroup PIOc_get_att_longlong + * The PIO-C interface for the NetCDF function nc_get_att_longlong. + */ +int PIOc_get_att_longlong(int ncid, int varid, const char *name, long long *ip) +{ + return PIOc_get_att(ncid, varid, name, (void *)ip); +} + +/** + * @ingroup PIOc_get_att_float + * The PIO-C interface for the NetCDF function nc_get_att_float. + */ +int PIOc_get_att_float (int ncid, int varid, const char *name, float *ip) +{ + return PIOc_get_att(ncid, varid, name, (void *)ip); +} + +/** + * @ingroup PIOc_put_att_schar + * The PIO-C interface for the NetCDF function nc_put_att_schar. + */ +int PIOc_put_att_schar(int ncid, int varid, const char *name, nc_type xtype, + PIO_Offset len, const signed char *op) +{ + return PIOc_put_att(ncid, varid, name, xtype, len, op); +} + +/** + * @ingroup PIOc_put_att_long + * The PIO-C interface for the NetCDF function nc_put_att_long. + */ +int PIOc_put_att_long(int ncid, int varid, const char *name, nc_type xtype, + PIO_Offset len, const long *op) +{ + return PIOc_put_att(ncid, varid, name, NC_CHAR, len, op); +} + +/** + * @ingroup PIOc_put_att_int + * The PIO-C interface for the NetCDF function nc_put_att_int. + */ +int PIOc_put_att_int(int ncid, int varid, const char *name, nc_type xtype, + PIO_Offset len, const int *op) +{ + return PIOc_put_att(ncid, varid, name, xtype, len, op); +} + +/** + * @ingroup PIOc_put_att_uchar + * The PIO-C interface for the NetCDF function nc_put_att_uchar. + */ +int PIOc_put_att_uchar(int ncid, int varid, const char *name, nc_type xtype, + PIO_Offset len, const unsigned char *op) +{ + return PIOc_put_att(ncid, varid, name, xtype, len, op); +} + +/** + * @ingroup PIOc_put_att_longlong + * The PIO-C interface for the NetCDF function nc_put_att_longlong. + */ +int PIOc_put_att_longlong(int ncid, int varid, const char *name, nc_type xtype, + PIO_Offset len, const long long *op) +{ + return PIOc_put_att(ncid, varid, name, xtype, len, op); +} + +/** + * @ingroup PIOc_put_att_uint + * The PIO-C interface for the NetCDF function nc_put_att_uint. + */ +int PIOc_put_att_uint(int ncid, int varid, const char *name, nc_type xtype, + PIO_Offset len, const unsigned int *op) +{ + return PIOc_put_att(ncid, varid, name, xtype, len, op); +} + +/** + * @ingroup PIOc_put_att_ubyte + * The PIO-C interface for the NetCDF function nc_put_att_ubyte. + */ +int PIOc_put_att_ubyte(int ncid, int varid, const char *name, nc_type xtype, + PIO_Offset len, const unsigned char *op) +{ + return PIOc_put_att(ncid, varid, name, xtype, len, op); +} + +/** + * @ingroup PIOc_put_att_float + * The PIO-C interface for the NetCDF function nc_put_att_float. + */ +int PIOc_put_att_float(int ncid, int varid, const char *name, nc_type xtype, + PIO_Offset len, const float *op) +{ + return PIOc_put_att(ncid, varid, name, xtype, len, op); +} + +/** + * @ingroup PIOc_put_att_ulonglong + * The PIO-C interface for the NetCDF function nc_put_att_ulonglong. + */ +int PIOc_put_att_ulonglong(int ncid, int varid, const char *name, nc_type xtype, + PIO_Offset len, const unsigned long long *op) +{ + return PIOc_put_att(ncid, varid, name, xtype, len, op); +} + +/** + * @ingroup PIOc_put_att_ushort + * The PIO-C interface for the NetCDF function nc_put_att_ushort. + */ +int PIOc_put_att_ushort(int ncid, int varid, const char *name, nc_type xtype, + PIO_Offset len, const unsigned short *op) +{ + return PIOc_put_att(ncid, varid, name, xtype, len, op); +} + +/** + * @ingroup PIOc_put_att_text + * The PIO-C interface for the NetCDF function nc_put_att_text. + */ +int PIOc_put_att_text(int ncid, int varid, const char *name, + PIO_Offset len, const char *op) +{ + return PIOc_put_att(ncid, varid, name, NC_CHAR, len, op); +} + +/** + * @ingroup PIOc_put_att_short + * The PIO-C interface for the NetCDF function nc_put_att_short. + */ +int PIOc_put_att_short(int ncid, int varid, const char *name, nc_type xtype, + PIO_Offset len, const short *op) +{ + return PIOc_put_att(ncid, varid, name, xtype, len, op); +} + +/** + * @ingroup PIOc_put_att_double + * The PIO-C interface for the NetCDF function nc_put_att_double. + */ +int PIOc_put_att_double(int ncid, int varid, const char *name, nc_type xtype, + PIO_Offset len, const double *op) +{ + return PIOc_put_att(ncid, varid, name, xtype, len, op); +} + + diff --git a/externals/pio2/src/clib/pio_put_nc_async.c b/externals/pio2/src/clib/pio_put_nc_async.c new file mode 100644 index 00000000000..9ef0d63da12 --- /dev/null +++ b/externals/pio2/src/clib/pio_put_nc_async.c @@ -0,0 +1,988 @@ +/** + * @file + * PIO functions to write data. + * + * @author Ed Hartnett + * @date 2016 + * @see http://code.google.com/p/parallelio/ + */ + +#include +#include +#include + +/** + * Internal PIO function which provides a type-neutral interface to + * nc_put_vars. + * + * Users should not call this function directly. Instead, call one of + * the derived functions, depending on the type of data you are + * writing: PIOc_put_vars_text(), PIOc_put_vars_uchar(), + * PIOc_put_vars_schar(), PIOc_put_vars_ushort(), + * PIOc_put_vars_short(), PIOc_put_vars_uint(), PIOc_put_vars_int(), + * PIOc_put_vars_long(), PIOc_put_vars_float(), + * PIOc_put_vars_longlong(), PIOc_put_vars_double(), + * PIOc_put_vars_ulonglong(). + * + * This routine is called collectively by all tasks in the + * communicator ios.union_comm. + * + * @param ncid identifies the netCDF file + * @param varid the variable ID number + * @param start an array of start indicies (must have same number of + * entries as variable has dimensions). If NULL, indices of 0 will be + * used. + * @param count an array of counts (must have same number of entries + * as variable has dimensions). If NULL, counts matching the size of + * the variable will be used. + * @param stride an array of strides (must have same number of + * entries as variable has dimensions). If NULL, strides of 1 will be + * used. + * @param xtype the netCDF type of the data being passed in buf. Data + * will be automatically covnerted from this type to the type of the + * variable being written to. + * @param buf pointer to the data to be written. + * + * @return PIO_NOERR on success, error code otherwise. + * @private + */ +int PIOc_put_vars_tc(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, + const PIO_Offset *stride, nc_type xtype, const void *buf) +{ + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; /* Pointer to file information. */ + int ierr = PIO_NOERR; /* Return code from function calls. */ + int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ + int ndims; /* The number of dimensions in the variable. */ + int *dimids; /* The IDs of the dimensions for this variable. */ + PIO_Offset typelen; /* Size (in bytes) of the data type of data in buf. */ + PIO_Offset num_elem = 1; /* Number of data elements in the buffer. */ + char start_present = start ? true : false; /* Is start non-NULL? */ + char count_present = count ? true : false; /* Is count non-NULL? */ + char stride_present = stride ? true : false; /* Is stride non-NULL? */ + PIO_Offset *rstart, *rcount, *rstride; + var_desc_t *vdesc; + PIO_Offset usage; + int *request; + + LOG((1, "PIOc_put_vars_tc ncid = %d varid = %d start = %d count = %d " + "stride = %d xtype = %d", ncid, varid, start, count, stride, xtype)); + + /* User must provide some data. */ + if (!buf) + return PIO_EINVAL; + + /* Find the info about this file. */ + if (!(file = pio_get_file_from_id(ncid))) + return PIO_EBADID; + ios = file->iosystem; + + /* Run these on all tasks if async is not in use, but only on + * non-IO tasks if async is in use. */ + if (!ios->async_interface || !ios->ioproc) + { + /* Get the number of dims for this var. */ + if ((ierr = PIOc_inq_varndims(ncid, varid, &ndims))) + return check_netcdf(file, ierr, __FILE__, __LINE__); + + /* Get the length of the data type. */ + if ((ierr = PIOc_inq_type(ncid, xtype, NULL, &typelen))) + return check_netcdf(file, ierr, __FILE__, __LINE__); + + PIO_Offset dimlen[ndims]; + + /* If no count array was passed, we need to know the dimlens + * so we can calculate how many data elements are in the + * buf. */ + if (!count) + { + int dimid[ndims]; + + /* Get the dimids for this var. */ + if ((ierr = PIOc_inq_vardimid(ncid, varid, dimid))) + return check_netcdf(file, ierr, __FILE__, __LINE__); + + /* Get the length of each dimension. */ + for (int vd = 0; vd < ndims; vd++) + if ((ierr = PIOc_inq_dimlen(ncid, dimid[vd], &dimlen[vd]))) + return check_netcdf(file, ierr, __FILE__, __LINE__); + } + + /* Allocate memory for these arrays, now that we know ndims. */ + if (!(rstart = malloc(ndims * sizeof(PIO_Offset)))) + return check_netcdf(file, PIO_ENOMEM, __FILE__, __LINE__); + if (!(rcount = malloc(ndims * sizeof(PIO_Offset)))) + return check_netcdf(file, PIO_ENOMEM, __FILE__, __LINE__); + if (!(rstride = malloc(ndims * sizeof(PIO_Offset)))) + return check_netcdf(file, PIO_ENOMEM, __FILE__, __LINE__); + + /* Figure out the real start, count, and stride arrays. (The + * user may have passed in NULLs.) */ + for (int vd = 0; vd < ndims; vd++) + { + rstart[vd] = start ? start[vd] : 0; + rcount[vd] = count ? count[vd] : dimlen[vd]; + rstride[vd] = stride ? stride[vd] : 1; + } + + /* How many elements in buf? */ + for (int vd = 0; vd < ndims; vd++) + num_elem *= (rcount[vd] - rstart[vd])/rstride[vd]; + LOG((2, "PIOc_put_vars_tc num_elem = %d", num_elem)); + } + + /* If async is in use, and this is not an IO task, bcast the parameters. */ + if (ios->async_interface) + { + if (!ios->ioproc) + { + int msg = PIO_MSG_PUT_VARS; + + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1, MPI_INT, ios->ioroot, 1, ios->union_comm); + + /* Send the function parameters and associated informaiton + * to the msg handler. */ + if (!mpierr) + mpierr = MPI_Bcast(&ncid, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&varid, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&ndims, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&start_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); + if (!mpierr && start_present) + mpierr = MPI_Bcast((PIO_Offset *)start, ndims, MPI_OFFSET, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&count_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); + if (!mpierr && count_present) + mpierr = MPI_Bcast((PIO_Offset *)count, ndims, MPI_OFFSET, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&stride_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); + if (!mpierr && stride_present) + mpierr = MPI_Bcast((PIO_Offset *)stride, ndims, MPI_OFFSET, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&xtype, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&num_elem, 1, MPI_OFFSET, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&typelen, 1, MPI_OFFSET, ios->compmaster, ios->intercomm); + LOG((2, "PIOc_put_vars_tc ncid = %d varid = %d ndims = %d start_present = %d " + "count_present = %d stride_present = %d xtype = %d num_elem = %d", ncid, varid, + ndims, start_present, count_present, stride_present, xtype, num_elem)); + + /* Send the data. */ + if (!mpierr) + mpierr = MPI_Bcast((void *)buf, num_elem * typelen, MPI_BYTE, ios->compmaster, + ios->intercomm); + } + + /* Handle MPI errors. */ + if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm))) + return check_mpi(file, mpierr2, __FILE__, __LINE__); + if (mpierr) + check_mpi(file, mpierr, __FILE__, __LINE__); + LOG((2, "PIOc_put_vars_tc checked mpierr = %d", mpierr)); + + /* Broadcast values currently only known on computation tasks to IO tasks. */ + LOG((2, "PIOc_put_vars_tc bcast from comproot")); + if ((mpierr = MPI_Bcast(&ndims, 1, MPI_INT, ios->comproot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + LOG((2, "PIOc_put_vars_tc complete bcast from comproot ndims = %d", ndims)); + } + + /* If this is an IO task, then call the netCDF function. */ + if (ios->ioproc) + { +#ifdef _PNETCDF + if (file->iotype == PIO_IOTYPE_PNETCDF) + { + PIO_Offset *fake_stride; + + if (!stride_present) + { + LOG((2, "stride not present")); + if (!(fake_stride = malloc(ndims * sizeof(PIO_Offset)))) + return PIO_ENOMEM; + for (int d = 0; d < ndims; d++) + fake_stride[d] = 1; + } + else + fake_stride = (PIO_Offset *)stride; + + LOG((2, "PIOc_put_vars_tc calling pnetcdf function")); + vdesc = file->varlist + varid; + if (vdesc->nreqs % PIO_REQUEST_ALLOC_CHUNK == 0) + vdesc->request = realloc(vdesc->request, + sizeof(int) * (vdesc->nreqs + PIO_REQUEST_ALLOC_CHUNK)); + request = vdesc->request + vdesc->nreqs; + LOG((2, "PIOc_put_vars_tc request = %d", vdesc->request)); + + /* Only the IO master actually does the call. */ + if (ios->iomaster) + { +/* LOG((2, "PIOc_put_vars_tc ncid = %d varid = %d start[0] = %d count[0] = %d fake_stride[0] = %d", + ncid, varid, start[0], count[0], fake_stride[0]));*/ + /* for (int d = 0; d < ndims; d++) */ + /* LOG((2, "start[%d] = %d count[%d] = %d stride[%d] = %d", d, start[d], d, count[d], d, stride[d])); */ + switch(xtype) + { + case NC_BYTE: + ierr = ncmpi_bput_vars_schar(ncid, varid, start, count, fake_stride, buf, request); + break; + case NC_CHAR: + ierr = ncmpi_bput_vars_text(ncid, varid, start, count, fake_stride, buf, request); + break; + case NC_SHORT: + ierr = ncmpi_bput_vars_short(ncid, varid, start, count, fake_stride, buf, request); + break; + case NC_INT: + LOG((2, "PIOc_put_vars_tc io_rank 0 doing pnetcdf for int")); + ierr = ncmpi_bput_vars_int(ncid, varid, start, count, fake_stride, buf, request); + LOG((2, "PIOc_put_vars_tc io_rank 0 done with pnetcdf call for int ierr = %d", ierr)); + break; + case NC_FLOAT: + ierr = ncmpi_bput_vars_float(ncid, varid, start, count, fake_stride, buf, request); + break; + case NC_DOUBLE: + ierr = ncmpi_bput_vars_double(ncid, varid, start, count, fake_stride, buf, request); + break; + case NC_INT64: + ierr = ncmpi_bput_vars_longlong(ncid, varid, start, count, fake_stride, buf, request); + break; + default: + LOG((0, "Unknown type for pnetcdf file! xtype = %d", xtype)); + } + LOG((2, "PIOc_put_vars_tc io_rank 0 done with pnetcdf call")); + } + else + *request = PIO_REQ_NULL; + + vdesc->nreqs++; + LOG((2, "PIOc_put_vars_tc flushing output buffer")); + flush_output_buffer(file, false, 0); + LOG((2, "PIOc_put_vars_tc flushed output buffer")); + + /* Free malloced resources. */ + if (!stride_present) + free(fake_stride); + } +#endif /* _PNETCDF */ +#ifdef _NETCDF + if (file->iotype != PIO_IOTYPE_PNETCDF && file->do_io) + { + LOG((2, "PIOc_put_vars_tc calling netcdf function file->iotype = %d", + file->iotype)); + switch(xtype) + { + case NC_BYTE: + ierr = nc_put_vars_schar(ncid, varid, (size_t *)start, (size_t *)count, + (ptrdiff_t *)stride, buf); + break; + case NC_CHAR: + ierr = nc_put_vars_schar(ncid, varid, (size_t *)start, (size_t *)count, + (ptrdiff_t *)stride, buf); + break; + case NC_SHORT: + ierr = nc_put_vars_short(ncid, varid, (size_t *)start, (size_t *)count, + (ptrdiff_t *)stride, buf); + break; + case NC_INT: + ierr = nc_put_vars_int(ncid, varid, (size_t *)start, (size_t *)count, + (ptrdiff_t *)stride, buf); + break; + case NC_FLOAT: + ierr = nc_put_vars_float(ncid, varid, (size_t *)start, (size_t *)count, + (ptrdiff_t *)stride, buf); + break; + case NC_DOUBLE: + ierr = nc_put_vars_double(ncid, varid, (size_t *)start, (size_t *)count, + (ptrdiff_t *)stride, buf); + break; +#ifdef _NETCDF4 + case NC_UBYTE: + ierr = nc_put_vars_uchar(ncid, varid, (size_t *)start, (size_t *)count, + (ptrdiff_t *)stride, buf); + break; + case NC_USHORT: + ierr = nc_put_vars_ushort(ncid, varid, (size_t *)start, (size_t *)count, + (ptrdiff_t *)stride, buf); + break; + case NC_UINT: + ierr = nc_put_vars_uint(ncid, varid, (size_t *)start, (size_t *)count, + (ptrdiff_t *)stride, buf); + break; + case NC_INT64: + ierr = nc_put_vars_longlong(ncid, varid, (size_t *)start, (size_t *)count, + (ptrdiff_t *)stride, buf); + break; + case NC_UINT64: + ierr = nc_put_vars_ulonglong(ncid, varid, (size_t *)start, (size_t *)count, + (ptrdiff_t *)stride, buf); + break; + /* case NC_STRING: */ + /* ierr = nc_put_vars_string(ncid, varid, (size_t *)start, (size_t *)count, */ + /* (ptrdiff_t *)stride, (void *)buf); */ + /* break; */ + default: + ierr = nc_put_vars(ncid, varid, (size_t *)start, (size_t *)count, + (ptrdiff_t *)stride, buf); +#endif /* _NETCDF4 */ + } + } +#endif /* _NETCDF */ + } + + /* Broadcast and check the return code. */ + LOG((2, "PIOc_put_vars_tc bcasting netcdf return code %d", ierr)); + if ((mpierr = MPI_Bcast(&ierr, 1, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + if (ierr) + return check_netcdf(file, ierr, __FILE__, __LINE__); + LOG((2, "PIOc_put_vars_tc bcast netcdf return code %d complete", ierr)); + + return ierr; +} + +/** Interface to netCDF data write function. */ +int PIOc_put_vars_text(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, + const PIO_Offset *stride, const char *op) +{ + return PIOc_put_vars_tc(ncid, varid, start, count, stride, NC_CHAR, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_vars_uchar(int ncid, int varid, const PIO_Offset *start, + const PIO_Offset *count, const PIO_Offset *stride, + const unsigned char *op) +{ + return PIOc_put_vars_tc(ncid, varid, start, count, stride, NC_UBYTE, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_vars_schar(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, + const PIO_Offset *stride, const signed char *op) +{ + return PIOc_put_vars_tc(ncid, varid, start, count, stride, NC_BYTE, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_vars_ushort(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, + const PIO_Offset *stride, const unsigned short *op) +{ + return PIOc_put_vars_tc(ncid, varid, start, count, stride, NC_USHORT, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_vars_short(int ncid, int varid, const PIO_Offset *start, + const PIO_Offset *count, const PIO_Offset *stride, const short *op) +{ + return PIOc_put_vars_tc(ncid, varid, start, count, stride, NC_SHORT, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_vars_uint(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, + const PIO_Offset *stride, const unsigned int *op) +{ + return PIOc_put_vars_tc(ncid, varid, start, count, stride, NC_UINT, op); +} + +/** PIO interface to nc_put_vars_int */ +int PIOc_put_vars_int(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, + const PIO_Offset *stride, const int *op) +{ + return PIOc_put_vars_tc(ncid, varid, start, count, stride, NC_INT, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_vars_long(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, + const PIO_Offset *stride, const long *op) +{ + return PIOc_put_vars_tc(ncid, varid, start, count, stride, NC_INT, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_vars_float(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, + const PIO_Offset *stride, const float *op) +{ + return PIOc_put_vars_tc(ncid, varid, start, count, stride, NC_FLOAT, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_vars_longlong(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, + const PIO_Offset *stride, const long long *op) +{ + return PIOc_put_vars_tc(ncid, varid, start, count, stride, NC_INT64, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_vars_double(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, + const PIO_Offset *stride, const double *op) +{ + return PIOc_put_vars_tc(ncid, varid, start, count, stride, NC_DOUBLE, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_vars_ulonglong(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, + const PIO_Offset *stride, const unsigned long long *op) +{ + return PIOc_put_vars_tc(ncid, varid, start, count, stride, NC_UINT64, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_var1_tc(int ncid, int varid, const PIO_Offset *index, nc_type xtype, + const void *op) +{ + int ndims; + int ierr; + + /* Find the number of dimensions. */ + if ((ierr = PIOc_inq_varndims(ncid, varid, &ndims))) + return ierr; + + /* Set up count array. */ + PIO_Offset count[ndims]; + for (int c = 0; c < ndims; c++) + count[c] = 1; + + return PIOc_put_vars_tc(ncid, varid, index, count, NULL, xtype, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_var1_text(int ncid, int varid, const PIO_Offset *index, const char *op) +{ + return PIOc_put_var1_tc(ncid, varid, index, NC_CHAR, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_var1_uchar(int ncid, int varid, const PIO_Offset *index, + const unsigned char *op) +{ + return PIOc_put_var1_tc(ncid, varid, index, NC_UBYTE, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_var1_schar(int ncid, int varid, const PIO_Offset *index, + const signed char *op) +{ + return PIOc_put_var1_tc(ncid, varid, index, NC_BYTE, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_var1_ushort(int ncid, int varid, const PIO_Offset *index, + const unsigned short *op) +{ + return PIOc_put_var1_tc(ncid, varid, index, NC_USHORT, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_var1_short(int ncid, int varid, const PIO_Offset *index, + const short *op) +{ + return PIOc_put_var1_tc(ncid, varid, index, NC_SHORT, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_var1_uint(int ncid, int varid, const PIO_Offset *index, + const unsigned int *op) +{ + return PIOc_put_var1_tc(ncid, varid, index, NC_UINT, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_var1_int(int ncid, int varid, const PIO_Offset *index, const int *op) +{ + return PIOc_put_var1_tc(ncid, varid, index, NC_INT, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_var1_float(int ncid, int varid, const PIO_Offset *index, const float *op) +{ + return PIOc_put_var1_tc(ncid, varid, index, NC_FLOAT, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_var1_long(int ncid, int varid, const PIO_Offset *index, const long *op) +{ + return PIOc_put_var1_tc(ncid, varid, index, NC_LONG, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_var1_double(int ncid, int varid, const PIO_Offset *index, + const double *op) +{ + return PIOc_put_var1_tc(ncid, varid, index, NC_DOUBLE, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_var1_ulonglong(int ncid, int varid, const PIO_Offset *index, + const unsigned long long *op) +{ + return PIOc_put_var1_tc(ncid, varid, index, NC_UINT64, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_var1_longlong(int ncid, int varid, const PIO_Offset *index, + const long long *op) +{ + return PIOc_put_var1_tc(ncid, varid, index, NC_INT64, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_vara_text(int ncid, int varid, const PIO_Offset *start, + const PIO_Offset *count, const char *op) +{ + return PIOc_put_vars_text(ncid, varid, start, count, NULL, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_vara_uchar(int ncid, int varid, const PIO_Offset *start, + const PIO_Offset *count, const unsigned char *op) +{ + return PIOc_put_vars_uchar(ncid, varid, start, count, NULL, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_vara_schar(int ncid, int varid, const PIO_Offset *start, + const PIO_Offset *count, const signed char *op) +{ + return PIOc_put_vars_schar(ncid, varid, start, count, NULL, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_vara_ushort(int ncid, int varid, const PIO_Offset *start, + const PIO_Offset *count, const unsigned short *op) +{ + return PIOc_put_vars_ushort(ncid, varid, start, count, NULL, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_vara_short(int ncid, int varid, const PIO_Offset *start, + const PIO_Offset *count, const short *op) +{ + return PIOc_put_vars_short(ncid, varid, start, count, NULL, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_vara_uint(int ncid, int varid, const PIO_Offset *start, + const PIO_Offset *count, const unsigned int *op) +{ + return PIOc_put_vars_uint(ncid, varid, start, count, NULL, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_vara_int(int ncid, int varid, const PIO_Offset *start, + const PIO_Offset *count, const int *op) +{ + return PIOc_put_vars_int(ncid, varid, start, count, NULL, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_vara_long(int ncid, int varid, const PIO_Offset *start, + const PIO_Offset *count, const long *op) +{ + return PIOc_put_vars_long(ncid, varid, start, count, NULL, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_vara_float(int ncid, int varid, const PIO_Offset *start, + const PIO_Offset *count, const float *op) +{ + return PIOc_put_vars_float(ncid, varid, start, count, NULL, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_vara_ulonglong(int ncid, int varid, const PIO_Offset *start, + const PIO_Offset *count, const unsigned long long *op) +{ + return PIOc_put_vars_ulonglong(ncid, varid, start, count, NULL, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_vara_longlong(int ncid, int varid, const PIO_Offset *start, + const PIO_Offset *count, const long long *op) +{ + return PIOc_put_vars_longlong(ncid, varid, start, count, NULL, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_vara_double(int ncid, int varid, const PIO_Offset *start, + const PIO_Offset *count, const double *op) +{ + return PIOc_put_vars_double(ncid, varid, start, count, NULL, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_var_text(int ncid, int varid, const char *op) +{ + return PIOc_put_vars_text(ncid, varid, NULL, NULL, NULL, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_var_uchar(int ncid, int varid, const unsigned char *op) +{ + return PIOc_put_vars_uchar(ncid, varid, NULL, NULL, NULL, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_var_schar(int ncid, int varid, const signed char *op) +{ + return PIOc_put_vars_schar(ncid, varid, NULL, NULL, NULL, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_var_ushort(int ncid, int varid, const unsigned short *op) +{ + return PIOc_put_vars_tc(ncid, varid, NULL, NULL, NULL, NC_USHORT, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_var_short(int ncid, int varid, const short *op) +{ + return PIOc_put_vars_short(ncid, varid, NULL, NULL, NULL, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_var_uint(int ncid, int varid, const unsigned int *op) +{ + return PIOc_put_vars_uint(ncid, varid, NULL, NULL, NULL, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_var_int(int ncid, int varid, const int *op) +{ + return PIOc_put_vars_int(ncid, varid, NULL, NULL, NULL, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_var_long(int ncid, int varid, const long *op) +{ + return PIOc_put_vars_long(ncid, varid, NULL, NULL, NULL, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_var_float(int ncid, int varid, const float *op) +{ + return PIOc_put_vars_float(ncid, varid, NULL, NULL, NULL, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_var_ulonglong(int ncid, int varid, const unsigned long long *op) +{ + return PIOc_put_vars_ulonglong(ncid, varid, NULL, NULL, NULL, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_var_longlong(int ncid, int varid, const long long *op) +{ + return PIOc_put_vars_longlong(ncid, varid, NULL, NULL, NULL, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_var_double(int ncid, int varid, const double *op) +{ + return PIOc_put_vars_double(ncid, varid, NULL, NULL, NULL, op); +} + +/** Interface to netCDF data write function. */ +int PIOc_put_var(int ncid, int varid, const void *buf, PIO_Offset bufcount, + MPI_Datatype buftype) +{ + int ierr; + int msg; + int mpierr; + iosystem_desc_t *ios; + file_desc_t *file; + var_desc_t *vdesc; + PIO_Offset usage; + int *request; + + ierr = PIO_NOERR; + + file = pio_get_file_from_id(ncid); + if(file == NULL) + return PIO_EBADID; + ios = file->iosystem; + msg = PIO_MSG_PUT_VAR; + + if(ios->async_interface && ! ios->ioproc){ + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + mpierr = MPI_Bcast(&(file->fh),1, MPI_INT, ios->compmaster, ios->intercomm); + } + + + if(ios->ioproc){ + switch(file->iotype){ +#ifdef _NETCDF +#ifdef _NETCDF4 + case PIO_IOTYPE_NETCDF4P: + ierr = nc_var_par_access(file->fh, varid, NC_COLLECTIVE); + ierr = nc_put_var(file->fh, varid, buf);; + break; + case PIO_IOTYPE_NETCDF4C: +#endif + case PIO_IOTYPE_NETCDF: + if(ios->io_rank==0){ + ierr = nc_put_var(file->fh, varid, buf);; + } + break; +#endif +#ifdef _PNETCDF + case PIO_IOTYPE_PNETCDF: + vdesc = file->varlist + varid; + + if(vdesc->nreqs%PIO_REQUEST_ALLOC_CHUNK == 0 ){ + vdesc->request = realloc(vdesc->request, + sizeof(int)*(vdesc->nreqs+PIO_REQUEST_ALLOC_CHUNK)); + } + request = vdesc->request+vdesc->nreqs; + + if(ios->io_rank==0){ + ierr = ncmpi_bput_var(file->fh, varid, buf, bufcount, buftype, request);; + }else{ + *request = PIO_REQ_NULL; + } + vdesc->nreqs++; + flush_output_buffer(file, false, 0); + break; +#endif + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + } + } + + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + + return ierr; +} + +/** + * PIO interface to nc_put_vars + * + * This routine is called collectively by all tasks in the + * communicator ios.union_comm. + + * Refer to the + * netcdf documentation. */ +int PIOc_put_vars(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, + const PIO_Offset *stride, const void *buf, PIO_Offset bufcount, + MPI_Datatype buftype) +{ + int ierr; + int msg; + int mpierr; + iosystem_desc_t *ios; + file_desc_t *file; + var_desc_t *vdesc; + PIO_Offset usage; + int *request; + + ierr = PIO_NOERR; + + file = pio_get_file_from_id(ncid); + if(file == NULL) + return PIO_EBADID; + ios = file->iosystem; + msg = PIO_MSG_PUT_VARS; + + if(ios->async_interface && ! ios->ioproc){ + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + mpierr = MPI_Bcast(&(file->fh),1, MPI_INT, ios->compmaster, ios->intercomm); + } + + + if(ios->ioproc){ + switch(file->iotype){ +#ifdef _NETCDF +#ifdef _NETCDF4 + case PIO_IOTYPE_NETCDF4P: + ierr = nc_var_par_access(file->fh, varid, NC_COLLECTIVE); + ierr = nc_put_vars(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, buf);; + break; + case PIO_IOTYPE_NETCDF4C: +#endif + case PIO_IOTYPE_NETCDF: + if(ios->io_rank==0){ + ierr = nc_put_vars(file->fh, varid, (size_t *)start, (size_t *)count, + (ptrdiff_t *)stride, buf);; + } + break; +#endif +#ifdef _PNETCDF + case PIO_IOTYPE_PNETCDF: + vdesc = file->varlist + varid; + + if(vdesc->nreqs%PIO_REQUEST_ALLOC_CHUNK == 0 ){ + vdesc->request = realloc(vdesc->request, + sizeof(int)*(vdesc->nreqs+PIO_REQUEST_ALLOC_CHUNK)); + } + request = vdesc->request+vdesc->nreqs; + + if(ios->io_rank==0){ + ierr = ncmpi_bput_vars(file->fh, varid, start, count, stride, buf, + bufcount, buftype, request);; + }else{ + *request = PIO_REQ_NULL; + } + vdesc->nreqs++; + flush_output_buffer(file, false, 0); + break; +#endif + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + } + } + + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + + return ierr; +} + +/** Interface to netCDF data write function. */ +int PIOc_put_var1(int ncid, int varid, const PIO_Offset *index, const void *buf, + PIO_Offset bufcount, MPI_Datatype buftype) +{ + int ierr; + int msg; + int mpierr; + iosystem_desc_t *ios; + file_desc_t *file; + var_desc_t *vdesc; + PIO_Offset usage; + int *request; + + ierr = PIO_NOERR; + + file = pio_get_file_from_id(ncid); + if(file == NULL) + return PIO_EBADID; + ios = file->iosystem; + msg = PIO_MSG_PUT_VAR1; + + if(ios->async_interface && ! ios->ioproc){ + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + mpierr = MPI_Bcast(&(file->fh),1, MPI_INT, ios->compmaster, ios->intercomm); + } + + + if(ios->ioproc){ + switch(file->iotype){ +#ifdef _NETCDF +#ifdef _NETCDF4 + case PIO_IOTYPE_NETCDF4P: + ierr = nc_var_par_access(file->fh, varid, NC_COLLECTIVE); + ierr = nc_put_var1(file->fh, varid, (size_t *) index, buf);; + break; + case PIO_IOTYPE_NETCDF4C: +#endif + case PIO_IOTYPE_NETCDF: + if(ios->io_rank==0){ + ierr = nc_put_var1(file->fh, varid, (size_t *) index, buf);; + } + break; +#endif +#ifdef _PNETCDF + case PIO_IOTYPE_PNETCDF: + vdesc = file->varlist + varid; + + if(vdesc->nreqs%PIO_REQUEST_ALLOC_CHUNK == 0 ){ + vdesc->request = realloc(vdesc->request, + sizeof(int)*(vdesc->nreqs+PIO_REQUEST_ALLOC_CHUNK)); + } + request = vdesc->request+vdesc->nreqs; + + if(ios->io_rank==0){ + ierr = ncmpi_bput_var1(file->fh, varid, index, buf, bufcount, buftype, request);; + }else{ + *request = PIO_REQ_NULL; + } + vdesc->nreqs++; + flush_output_buffer(file, false, 0); + break; +#endif + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + } + } + + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + + return ierr; +} + +/** Interface to netCDF data write function. */ +int PIOc_put_vara(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const void *buf, + PIO_Offset bufcount, MPI_Datatype buftype) +{ + int ierr; + int msg; + int mpierr; + iosystem_desc_t *ios; + file_desc_t *file; + var_desc_t *vdesc; + PIO_Offset usage; + int *request; + + ierr = PIO_NOERR; + + file = pio_get_file_from_id(ncid); + if(file == NULL) + return PIO_EBADID; + ios = file->iosystem; + msg = PIO_MSG_PUT_VARA; + + if(ios->async_interface && ! ios->ioproc){ + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + mpierr = MPI_Bcast(&(file->fh),1, MPI_INT, ios->compmaster, ios->intercomm); + } + + + if(ios->ioproc){ + switch(file->iotype){ +#ifdef _NETCDF +#ifdef _NETCDF4 + case PIO_IOTYPE_NETCDF4P: + ierr = nc_var_par_access(file->fh, varid, NC_COLLECTIVE); + ierr = nc_put_vara(file->fh, varid, (size_t *) start, (size_t *) count, buf);; + break; + case PIO_IOTYPE_NETCDF4C: +#endif + case PIO_IOTYPE_NETCDF: + if(ios->io_rank==0){ + ierr = nc_put_vara(file->fh, varid, (size_t *) start, (size_t *) count, buf);; + } + break; +#endif +#ifdef _PNETCDF + case PIO_IOTYPE_PNETCDF: + vdesc = file->varlist + varid; + + if(vdesc->nreqs%PIO_REQUEST_ALLOC_CHUNK == 0 ){ + vdesc->request = realloc(vdesc->request, + sizeof(int)*(vdesc->nreqs+PIO_REQUEST_ALLOC_CHUNK)); + } + request = vdesc->request+vdesc->nreqs; + + if(ios->io_rank==0){ + ierr = ncmpi_bput_vara(file->fh, varid, start, count, buf, bufcount, buftype, request);; + }else{ + *request = PIO_REQ_NULL; + } + vdesc->nreqs++; + flush_output_buffer(file, false, 0); + break; +#endif + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + } + } + + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + + return ierr; +} diff --git a/externals/pio2/src/clib/pio_varm.c b/externals/pio2/src/clib/pio_varm.c new file mode 100644 index 00000000000..3110a4333f3 --- /dev/null +++ b/externals/pio2/src/clib/pio_varm.c @@ -0,0 +1,1990 @@ +#include +#include +#include + +/// +/// PIO interface to nc_put_varm +/// +/// This routine is called collectively by all tasks in the communicator ios.union_comm. +/// +/// Refer to the netcdf documentation. +/// +int PIOc_put_varm (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const void *buf, PIO_Offset bufcount, MPI_Datatype buftype) +{ + int ierr; + int msg; + int mpierr; + iosystem_desc_t *ios; + file_desc_t *file; + var_desc_t *vdesc; + PIO_Offset usage; + int *request; + + ierr = PIO_NOERR; + + file = pio_get_file_from_id(ncid); + if(file == NULL) + return PIO_EBADID; + ios = file->iosystem; + msg = PIO_MSG_PUT_VARM; + + /* Sorry, but varm functions are not supported by the async interface. */ + if(ios->async_interface) + return PIO_EINVAL; + + if(ios->ioproc){ + switch(file->iotype){ +#ifdef _NETCDF +#ifdef _NETCDF4 + case PIO_IOTYPE_NETCDF4P: + ierr = nc_var_par_access(file->fh, varid, NC_COLLECTIVE); + ierr = nc_put_varm(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, buf);; + break; + case PIO_IOTYPE_NETCDF4C: +#endif + case PIO_IOTYPE_NETCDF: + if(ios->io_rank==0){ + ierr = nc_put_varm(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, buf);; + } + break; +#endif +#ifdef _PNETCDF + case PIO_IOTYPE_PNETCDF: + vdesc = file->varlist + varid; + + if(vdesc->nreqs%PIO_REQUEST_ALLOC_CHUNK == 0 ){ + vdesc->request = realloc(vdesc->request, + sizeof(int)*(vdesc->nreqs+PIO_REQUEST_ALLOC_CHUNK)); + } + request = vdesc->request+vdesc->nreqs; + + if(ios->io_rank==0){ + ierr = ncmpi_bput_varm(file->fh, varid, start, count, stride, imap, buf, bufcount, buftype, request);; + }else{ + *request = PIO_REQ_NULL; + } + vdesc->nreqs++; + flush_output_buffer(file, false, 0); + break; +#endif + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + } + } + + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + + return ierr; +} + +/// +/// PIO interface to nc_put_varm_uchar +/// +/// This routine is called collectively by all tasks in the communicator ios.union_comm. +/// +/// Refer to the netcdf documentation. +/// +int PIOc_put_varm_uchar (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const unsigned char *op) +{ + int ierr; + int msg; + int mpierr; + iosystem_desc_t *ios; + file_desc_t *file; + var_desc_t *vdesc; + PIO_Offset usage; + int *request; + + ierr = PIO_NOERR; + + file = pio_get_file_from_id(ncid); + if(file == NULL) + return PIO_EBADID; + ios = file->iosystem; + msg = PIO_MSG_PUT_VARM_UCHAR; + + /* Sorry, but varm functions are not supported by the async interface. */ + if(ios->async_interface) + return PIO_EINVAL; + + if(ios->ioproc){ + switch(file->iotype){ +#ifdef _NETCDF +#ifdef _NETCDF4 + case PIO_IOTYPE_NETCDF4P: + ierr = nc_var_par_access(file->fh, varid, NC_COLLECTIVE); + ierr = nc_put_varm_uchar(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, op);; + break; + case PIO_IOTYPE_NETCDF4C: +#endif + case PIO_IOTYPE_NETCDF: + if(ios->io_rank==0){ + ierr = nc_put_varm_uchar(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, op);; + } + break; +#endif +#ifdef _PNETCDF + case PIO_IOTYPE_PNETCDF: + vdesc = file->varlist + varid; + + if(vdesc->nreqs%PIO_REQUEST_ALLOC_CHUNK == 0 ){ + vdesc->request = realloc(vdesc->request, + sizeof(int)*(vdesc->nreqs+PIO_REQUEST_ALLOC_CHUNK)); + } + request = vdesc->request+vdesc->nreqs; + + if(ios->io_rank==0){ + ierr = ncmpi_bput_varm_uchar(file->fh, varid, start, count, stride, imap, op, request);; + }else{ + *request = PIO_REQ_NULL; + } + vdesc->nreqs++; + flush_output_buffer(file, false, 0); + break; +#endif + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + } + } + + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + + return ierr; +} + +/// +/// PIO interface to nc_put_varm_short +/// +/// This routine is called collectively by all tasks in the communicator ios.union_comm. +/// +/// Refer to the netcdf documentation. +/// +int PIOc_put_varm_short (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const short *op) +{ + int ierr; + int msg; + int mpierr; + iosystem_desc_t *ios; + file_desc_t *file; + var_desc_t *vdesc; + PIO_Offset usage; + int *request; + + ierr = PIO_NOERR; + + file = pio_get_file_from_id(ncid); + if(file == NULL) + return PIO_EBADID; + ios = file->iosystem; + msg = PIO_MSG_PUT_VARM_SHORT; + + /* Sorry, but varm functions are not supported by the async interface. */ + if(ios->async_interface) + return PIO_EINVAL; + + if(ios->ioproc){ + switch(file->iotype){ +#ifdef _NETCDF +#ifdef _NETCDF4 + case PIO_IOTYPE_NETCDF4P: + ierr = nc_var_par_access(file->fh, varid, NC_COLLECTIVE); + ierr = nc_put_varm_short(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, op);; + break; + case PIO_IOTYPE_NETCDF4C: +#endif + case PIO_IOTYPE_NETCDF: + if(ios->io_rank==0){ + ierr = nc_put_varm_short(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, op);; + } + break; +#endif +#ifdef _PNETCDF + case PIO_IOTYPE_PNETCDF: + vdesc = file->varlist + varid; + + if(vdesc->nreqs%PIO_REQUEST_ALLOC_CHUNK == 0 ){ + vdesc->request = realloc(vdesc->request, + sizeof(int)*(vdesc->nreqs+PIO_REQUEST_ALLOC_CHUNK)); + } + request = vdesc->request+vdesc->nreqs; + + if(ios->io_rank==0){ + ierr = ncmpi_bput_varm_short(file->fh, varid, start, count, stride, imap, op, request);; + }else{ + *request = PIO_REQ_NULL; + } + vdesc->nreqs++; + flush_output_buffer(file, false, 0); + break; +#endif + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + } + } + + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + + return ierr; +} +/// +/// PIO interface to nc_put_varm_text +/// +/// This routine is called collectively by all tasks in the communicator ios.union_comm. +/// +/// Refer to the netcdf documentation. +/// +int PIOc_put_varm_text (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const char *op) +{ + int ierr; + int msg; + int mpierr; + iosystem_desc_t *ios; + file_desc_t *file; + var_desc_t *vdesc; + PIO_Offset usage; + int *request; + + ierr = PIO_NOERR; + + file = pio_get_file_from_id(ncid); + if(file == NULL) + return PIO_EBADID; + ios = file->iosystem; + msg = PIO_MSG_PUT_VARM_TEXT; + + /* Sorry, but varm functions are not supported by the async interface. */ + if(ios->async_interface) + return PIO_EINVAL; + + + + if(ios->ioproc){ + switch(file->iotype){ +#ifdef _NETCDF +#ifdef _NETCDF4 + case PIO_IOTYPE_NETCDF4P: + ierr = nc_var_par_access(file->fh, varid, NC_COLLECTIVE); + ierr = nc_put_varm_text(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, op);; + break; + case PIO_IOTYPE_NETCDF4C: +#endif + case PIO_IOTYPE_NETCDF: + if(ios->io_rank==0){ + ierr = nc_put_varm_text(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, op);; + } + break; +#endif +#ifdef _PNETCDF + case PIO_IOTYPE_PNETCDF: + vdesc = file->varlist + varid; + + if(vdesc->nreqs%PIO_REQUEST_ALLOC_CHUNK == 0 ){ + vdesc->request = realloc(vdesc->request, + sizeof(int)*(vdesc->nreqs+PIO_REQUEST_ALLOC_CHUNK)); + } + request = vdesc->request+vdesc->nreqs; + + if(ios->io_rank==0){ + ierr = ncmpi_bput_varm_text(file->fh, varid, start, count, stride, imap, op, request);; + }else{ + *request = PIO_REQ_NULL; + } + vdesc->nreqs++; + flush_output_buffer(file, false, 0); + break; +#endif + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + } + } + + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + + return ierr; +} + +/// +/// PIO interface to nc_put_varm_ushort +/// +/// This routine is called collectively by all tasks in the communicator ios.union_comm. +/// +/// Refer to the netcdf documentation. +/// +int PIOc_put_varm_ushort (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const unsigned short *op) +{ + int ierr; + int msg; + int mpierr; + iosystem_desc_t *ios; + file_desc_t *file; + var_desc_t *vdesc; + PIO_Offset usage; + int *request; + + ierr = PIO_NOERR; + + file = pio_get_file_from_id(ncid); + if(file == NULL) + return PIO_EBADID; + ios = file->iosystem; + msg = PIO_MSG_PUT_VARM_USHORT; + + /* Sorry, but varm functions are not supported by the async interface. */ + if(ios->async_interface) + return PIO_EINVAL; + + + + if(ios->ioproc){ + switch(file->iotype){ +#ifdef _NETCDF +#ifdef _NETCDF4 + case PIO_IOTYPE_NETCDF4P: + ierr = nc_var_par_access(file->fh, varid, NC_COLLECTIVE); + ierr = nc_put_varm_ushort(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, op);; + break; + case PIO_IOTYPE_NETCDF4C: +#endif + case PIO_IOTYPE_NETCDF: + if(ios->io_rank==0){ + ierr = nc_put_varm_ushort(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, op);; + } + break; +#endif +#ifdef _PNETCDF + case PIO_IOTYPE_PNETCDF: + vdesc = file->varlist + varid; + + if(vdesc->nreqs%PIO_REQUEST_ALLOC_CHUNK == 0 ){ + vdesc->request = realloc(vdesc->request, + sizeof(int)*(vdesc->nreqs+PIO_REQUEST_ALLOC_CHUNK)); + } + request = vdesc->request+vdesc->nreqs; + + if(ios->io_rank==0){ + ierr = ncmpi_bput_varm_ushort(file->fh, varid, start, count, stride, imap, op, request);; + }else{ + *request = PIO_REQ_NULL; + } + vdesc->nreqs++; + flush_output_buffer(file, false, 0); + break; +#endif + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + } + } + + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + + return ierr; +} + +/// +/// PIO interface to nc_put_varm_ulonglong +/// +/// This routine is called collectively by all tasks in the communicator ios.union_comm. +/// +/// Refer to the netcdf documentation. +/// +int PIOc_put_varm_ulonglong (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const unsigned long long *op) +{ + int ierr; + int msg; + int mpierr; + iosystem_desc_t *ios; + file_desc_t *file; + var_desc_t *vdesc; + PIO_Offset usage; + int *request; + + ierr = PIO_NOERR; + + file = pio_get_file_from_id(ncid); + if(file == NULL) + return PIO_EBADID; + ios = file->iosystem; + msg = PIO_MSG_PUT_VARM_ULONGLONG; + + /* Sorry, but varm functions are not supported by the async interface. */ + if(ios->async_interface) + return PIO_EINVAL; + + + + if(ios->ioproc){ + switch(file->iotype){ +#ifdef _NETCDF +#ifdef _NETCDF4 + case PIO_IOTYPE_NETCDF4P: + ierr = nc_var_par_access(file->fh, varid, NC_COLLECTIVE); + ierr = nc_put_varm_ulonglong(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, op);; + break; + case PIO_IOTYPE_NETCDF4C: +#endif + case PIO_IOTYPE_NETCDF: + if(ios->io_rank==0){ + ierr = nc_put_varm_ulonglong(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, op);; + } + break; +#endif +#ifdef _PNETCDF + case PIO_IOTYPE_PNETCDF: + vdesc = file->varlist + varid; + + if(vdesc->nreqs%PIO_REQUEST_ALLOC_CHUNK == 0 ){ + vdesc->request = realloc(vdesc->request, + sizeof(int)*(vdesc->nreqs+PIO_REQUEST_ALLOC_CHUNK)); + } + request = vdesc->request+vdesc->nreqs; + + if(ios->io_rank==0){ + ierr = ncmpi_bput_varm_ulonglong(file->fh, varid, start, count, stride, imap, op, request);; + }else{ + *request = PIO_REQ_NULL; + } + vdesc->nreqs++; + flush_output_buffer(file, false, 0); + break; +#endif + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + } + } + + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + + return ierr; +} +/// +/// PIO interface to nc_put_varm_int +/// +/// This routine is called collectively by all tasks in the communicator ios.union_comm. +/// +/// Refer to the netcdf documentation. +/// +int PIOc_put_varm_int (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const int *op) +{ + int ierr; + int msg; + int mpierr; + iosystem_desc_t *ios; + file_desc_t *file; + var_desc_t *vdesc; + PIO_Offset usage; + int *request; + + ierr = PIO_NOERR; + + file = pio_get_file_from_id(ncid); + if(file == NULL) + return PIO_EBADID; + ios = file->iosystem; + msg = PIO_MSG_PUT_VARM_INT; + + /* Sorry, but varm functions are not supported by the async interface. */ + if(ios->async_interface) + return PIO_EINVAL; + + + + if(ios->ioproc){ + switch(file->iotype){ +#ifdef _NETCDF +#ifdef _NETCDF4 + case PIO_IOTYPE_NETCDF4P: + ierr = nc_var_par_access(file->fh, varid, NC_COLLECTIVE); + ierr = nc_put_varm_int(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, op);; + break; + case PIO_IOTYPE_NETCDF4C: +#endif + case PIO_IOTYPE_NETCDF: + if(ios->io_rank==0){ + ierr = nc_put_varm_int(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, op);; + } + break; +#endif +#ifdef _PNETCDF + case PIO_IOTYPE_PNETCDF: + vdesc = file->varlist + varid; + + if(vdesc->nreqs%PIO_REQUEST_ALLOC_CHUNK == 0 ){ + vdesc->request = realloc(vdesc->request, + sizeof(int)*(vdesc->nreqs+PIO_REQUEST_ALLOC_CHUNK)); + } + request = vdesc->request+vdesc->nreqs; + + if(ios->io_rank==0){ + ierr = ncmpi_bput_varm_int(file->fh, varid, start, count, stride, imap, op, request);; + }else{ + *request = PIO_REQ_NULL; + } + vdesc->nreqs++; + flush_output_buffer(file, false, 0); + break; +#endif + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + } + } + + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + + return ierr; +} + +/// +/// PIO interface to nc_put_varm_float +/// +/// This routine is called collectively by all tasks in the communicator ios.union_comm. +/// +/// Refer to the netcdf documentation. +/// +int PIOc_put_varm_float (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const float *op) +{ + int ierr; + int msg; + int mpierr; + iosystem_desc_t *ios; + file_desc_t *file; + var_desc_t *vdesc; + PIO_Offset usage; + int *request; + + ierr = PIO_NOERR; + + file = pio_get_file_from_id(ncid); + if(file == NULL) + return PIO_EBADID; + ios = file->iosystem; + msg = PIO_MSG_PUT_VARM_FLOAT; + + /* Sorry, but varm functions are not supported by the async interface. */ + if(ios->async_interface) + return PIO_EINVAL; + + + + if(ios->ioproc){ + switch(file->iotype){ +#ifdef _NETCDF +#ifdef _NETCDF4 + case PIO_IOTYPE_NETCDF4P: + ierr = nc_var_par_access(file->fh, varid, NC_COLLECTIVE); + ierr = nc_put_varm_float(file->fh, varid,(size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, op);; + break; + case PIO_IOTYPE_NETCDF4C: +#endif + case PIO_IOTYPE_NETCDF: + if(ios->io_rank==0){ + ierr = nc_put_varm_float(file->fh, varid,(size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, op);; + } + break; +#endif +#ifdef _PNETCDF + case PIO_IOTYPE_PNETCDF: + vdesc = file->varlist + varid; + + if(vdesc->nreqs%PIO_REQUEST_ALLOC_CHUNK == 0 ){ + vdesc->request = realloc(vdesc->request, + sizeof(int)*(vdesc->nreqs+PIO_REQUEST_ALLOC_CHUNK)); + } + request = vdesc->request+vdesc->nreqs; + + if(ios->io_rank==0){ + ierr = ncmpi_bput_varm_float(file->fh, varid, start, count, stride, imap, op, request);; + }else{ + *request = PIO_REQ_NULL; + } + vdesc->nreqs++; + flush_output_buffer(file, false, 0); + break; +#endif + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + } + } + + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + + return ierr; +} +/// +/// PIO interface to nc_put_varm_long +/// +/// This routine is called collectively by all tasks in the communicator ios.union_comm. +/// +/// Refer to the netcdf documentation. +/// +int PIOc_put_varm_long (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const long *op) +{ + int ierr; + int msg; + int mpierr; + iosystem_desc_t *ios; + file_desc_t *file; + var_desc_t *vdesc; + PIO_Offset usage; + int *request; + + ierr = PIO_NOERR; + + file = pio_get_file_from_id(ncid); + if(file == NULL) + return PIO_EBADID; + ios = file->iosystem; + msg = PIO_MSG_PUT_VARM_LONG; + + /* Sorry, but varm functions are not supported by the async interface. */ + if(ios->async_interface) + return PIO_EINVAL; + + + + if(ios->ioproc){ + switch(file->iotype){ +#ifdef _NETCDF +#ifdef _NETCDF4 + case PIO_IOTYPE_NETCDF4P: + ierr = nc_var_par_access(file->fh, varid, NC_COLLECTIVE); + ierr = nc_put_varm_long(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, op);; + break; + case PIO_IOTYPE_NETCDF4C: +#endif + case PIO_IOTYPE_NETCDF: + if(ios->io_rank==0){ + ierr = nc_put_varm_long(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, op);; + } + break; +#endif +#ifdef _PNETCDF + case PIO_IOTYPE_PNETCDF: + vdesc = file->varlist + varid; + + if(vdesc->nreqs%PIO_REQUEST_ALLOC_CHUNK == 0 ){ + vdesc->request = realloc(vdesc->request, + sizeof(int)*(vdesc->nreqs+PIO_REQUEST_ALLOC_CHUNK)); + } + request = vdesc->request+vdesc->nreqs; + + if(ios->io_rank==0){ + ierr = ncmpi_bput_varm_long(file->fh, varid, start, count, stride, imap, op, request);; + }else{ + *request = PIO_REQ_NULL; + } + vdesc->nreqs++; + flush_output_buffer(file, false, 0); + break; +#endif + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + } + } + + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + + return ierr; +} + +/// +/// PIO interface to nc_put_varm_uint +/// +/// This routine is called collectively by all tasks in the communicator ios.union_comm. +/// +/// Refer to the netcdf documentation. +/// +int PIOc_put_varm_uint (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const unsigned int *op) +{ + int ierr; + int msg; + int mpierr; + iosystem_desc_t *ios; + file_desc_t *file; + var_desc_t *vdesc; + PIO_Offset usage; + int *request; + + ierr = PIO_NOERR; + + file = pio_get_file_from_id(ncid); + if(file == NULL) + return PIO_EBADID; + ios = file->iosystem; + msg = PIO_MSG_PUT_VARM_UINT; + + /* Sorry, but varm functions are not supported by the async interface. */ + if(ios->async_interface) + return PIO_EINVAL; + + + + if(ios->ioproc){ + switch(file->iotype){ +#ifdef _NETCDF +#ifdef _NETCDF4 + case PIO_IOTYPE_NETCDF4P: + ierr = nc_var_par_access(file->fh, varid, NC_COLLECTIVE); + ierr = nc_put_varm_uint(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, op);; + break; + case PIO_IOTYPE_NETCDF4C: +#endif + case PIO_IOTYPE_NETCDF: + if(ios->io_rank==0){ + ierr = nc_put_varm_uint(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, op);; + } + break; +#endif +#ifdef _PNETCDF + case PIO_IOTYPE_PNETCDF: + vdesc = file->varlist + varid; + + if(vdesc->nreqs%PIO_REQUEST_ALLOC_CHUNK == 0 ){ + vdesc->request = realloc(vdesc->request, + sizeof(int)*(vdesc->nreqs+PIO_REQUEST_ALLOC_CHUNK)); + } + request = vdesc->request+vdesc->nreqs; + + if(ios->io_rank==0){ + ierr = ncmpi_bput_varm_uint(file->fh, varid, start, count, stride, imap, op, request);; + }else{ + *request = PIO_REQ_NULL; + } + vdesc->nreqs++; + flush_output_buffer(file, false, 0); + break; +#endif + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + } + } + + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + + return ierr; +} + +/// +/// PIO interface to nc_put_varm_double +/// +/// This routine is called collectively by all tasks in the communicator ios.union_comm. +/// +/// Refer to the netcdf documentation. +/// +int PIOc_put_varm_double (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const double *op) +{ + int ierr; + int msg; + int mpierr; + iosystem_desc_t *ios; + file_desc_t *file; + var_desc_t *vdesc; + PIO_Offset usage; + int *request; + + ierr = PIO_NOERR; + + file = pio_get_file_from_id(ncid); + if(file == NULL) + return PIO_EBADID; + ios = file->iosystem; + msg = PIO_MSG_PUT_VARM_DOUBLE; + + /* Sorry, but varm functions are not supported by the async interface. */ + if(ios->async_interface) + return PIO_EINVAL; + + + + if(ios->ioproc){ + switch(file->iotype){ +#ifdef _NETCDF +#ifdef _NETCDF4 + case PIO_IOTYPE_NETCDF4P: + ierr = nc_var_par_access(file->fh, varid, NC_COLLECTIVE); + ierr = nc_put_varm_double(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, op);; + break; + case PIO_IOTYPE_NETCDF4C: +#endif + case PIO_IOTYPE_NETCDF: + if(ios->io_rank==0){ + ierr = nc_put_varm_double(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, op);; + } + break; +#endif +#ifdef _PNETCDF + case PIO_IOTYPE_PNETCDF: + vdesc = file->varlist + varid; + + if(vdesc->nreqs%PIO_REQUEST_ALLOC_CHUNK == 0 ){ + vdesc->request = realloc(vdesc->request, + sizeof(int)*(vdesc->nreqs+PIO_REQUEST_ALLOC_CHUNK)); + } + request = vdesc->request+vdesc->nreqs; + + if(ios->io_rank==0){ + ierr = ncmpi_bput_varm_double(file->fh, varid, start, count, stride, imap, op, request);; + }else{ + *request = PIO_REQ_NULL; + } + vdesc->nreqs++; + flush_output_buffer(file, false, 0); + break; +#endif + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + } + } + + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + + return ierr; +} +/// +/// PIO interface to nc_put_varm_schar +/// +/// This routine is called collectively by all tasks in the communicator ios.union_comm. +/// +/// Refer to the netcdf documentation. +/// +int PIOc_put_varm_schar (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const signed char *op) +{ + int ierr; + int msg; + int mpierr; + iosystem_desc_t *ios; + file_desc_t *file; + var_desc_t *vdesc; + PIO_Offset usage; + int *request; + + ierr = PIO_NOERR; + + file = pio_get_file_from_id(ncid); + if(file == NULL) + return PIO_EBADID; + ios = file->iosystem; + msg = PIO_MSG_PUT_VARM_SCHAR; + + /* Sorry, but varm functions are not supported by the async interface. */ + if(ios->async_interface) + return PIO_EINVAL; + + + + if(ios->ioproc){ + switch(file->iotype){ +#ifdef _NETCDF +#ifdef _NETCDF4 + case PIO_IOTYPE_NETCDF4P: + ierr = nc_var_par_access(file->fh, varid, NC_COLLECTIVE); + ierr = nc_put_varm_schar(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, op);; + break; + case PIO_IOTYPE_NETCDF4C: +#endif + case PIO_IOTYPE_NETCDF: + if(ios->io_rank==0){ + ierr = nc_put_varm_schar(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, op);; + } + break; +#endif +#ifdef _PNETCDF + case PIO_IOTYPE_PNETCDF: + vdesc = file->varlist + varid; + + if(vdesc->nreqs%PIO_REQUEST_ALLOC_CHUNK == 0 ){ + vdesc->request = realloc(vdesc->request, + sizeof(int)*(vdesc->nreqs+PIO_REQUEST_ALLOC_CHUNK)); + } + request = vdesc->request+vdesc->nreqs; + + if(ios->io_rank==0){ + ierr = ncmpi_bput_varm_schar(file->fh, varid, start, count, stride, imap, op, request);; + }else{ + *request = PIO_REQ_NULL; + } + vdesc->nreqs++; + flush_output_buffer(file, false, 0); + break; +#endif + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + } + } + + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + + return ierr; +} + +/// +/// PIO interface to nc_put_varm_longlong +/// +/// This routine is called collectively by all tasks in the communicator ios.union_comm. +/// +/// Refer to the netcdf documentation. +/// +int PIOc_put_varm_longlong (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], const long long *op) +{ + int ierr; + int msg; + int mpierr; + iosystem_desc_t *ios; + file_desc_t *file; + var_desc_t *vdesc; + PIO_Offset usage; + int *request; + + ierr = PIO_NOERR; + + file = pio_get_file_from_id(ncid); + if(file == NULL) + return PIO_EBADID; + ios = file->iosystem; + msg = PIO_MSG_PUT_VARM_LONGLONG; + + /* Sorry, but varm functions are not supported by the async interface. */ + if(ios->async_interface) + return PIO_EINVAL; + + if(ios->ioproc){ + switch(file->iotype){ +#ifdef _NETCDF +#ifdef _NETCDF4 + case PIO_IOTYPE_NETCDF4P: + ierr = nc_var_par_access(file->fh, varid, NC_COLLECTIVE); + ierr = nc_put_varm_longlong(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, op);; + break; + case PIO_IOTYPE_NETCDF4C: +#endif + case PIO_IOTYPE_NETCDF: + if(ios->io_rank==0){ + ierr = nc_put_varm_longlong(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, op);; + } + break; +#endif +#ifdef _PNETCDF + case PIO_IOTYPE_PNETCDF: + vdesc = file->varlist + varid; + + if(vdesc->nreqs%PIO_REQUEST_ALLOC_CHUNK == 0 ){ + vdesc->request = realloc(vdesc->request, + sizeof(int)*(vdesc->nreqs+PIO_REQUEST_ALLOC_CHUNK)); + } + request = vdesc->request+vdesc->nreqs; + + if(ios->io_rank==0){ + ierr = ncmpi_bput_varm_longlong(file->fh, varid, start, count, stride, imap, op, request);; + }else{ + *request = PIO_REQ_NULL; + } + vdesc->nreqs++; + flush_output_buffer(file, false, 0); + break; +#endif + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + } + } + + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + + return ierr; +} + +int PIOc_get_varm_uchar (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], unsigned char *buf) +{ + int ierr; + int msg; + int mpierr; + iosystem_desc_t *ios; + file_desc_t *file; + MPI_Datatype ibuftype; + int ndims; + int ibufcnt; + bool bcast = false; + + file = pio_get_file_from_id(ncid); + if(file == NULL) + return PIO_EBADID; + ios = file->iosystem; + msg = PIO_MSG_GET_VARM_UCHAR; + ibuftype = MPI_UNSIGNED_CHAR; + ierr = PIOc_inq_varndims(file->fh, varid, &ndims); + ibufcnt = 1; + for(int i=0;iasync_interface && ! ios->ioproc){ + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + mpierr = MPI_Bcast(&(file->fh),1, MPI_INT, 0, ios->intercomm); + } + + + if(ios->ioproc){ + switch(file->iotype){ +#ifdef _NETCDF +#ifdef _NETCDF4 + case PIO_IOTYPE_NETCDF4P: + ierr = nc_get_varm_uchar(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, buf);; + break; + case PIO_IOTYPE_NETCDF4C: +#endif + case PIO_IOTYPE_NETCDF: + bcast = true; + if(ios->iomaster){ + ierr = nc_get_varm_uchar(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, buf);; + } + break; +#endif +#ifdef _PNETCDF + case PIO_IOTYPE_PNETCDF: +#ifdef PNET_READ_AND_BCAST + ncmpi_begin_indep_data(file->fh); + if(ios->iomaster){ + ierr = ncmpi_get_varm_uchar(file->fh, varid, start, count, stride, imap, buf);; + }; + ncmpi_end_indep_data(file->fh); + bcast=true; +#else + ierr = ncmpi_get_varm_uchar_all(file->fh, varid, start, count, stride, imap, buf);; +#endif + break; +#endif + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + } + } + + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + + if(ios->async_interface || bcast || + (ios->num_iotasks < ios->num_comptasks)){ + MPI_Bcast(buf, ibufcnt, ibuftype, ios->ioroot, ios->my_comm); + } + + return ierr; +} + +int PIOc_get_varm_schar (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], signed char *buf) +{ + int ierr; + int msg; + int mpierr; + iosystem_desc_t *ios; + file_desc_t *file; + MPI_Datatype ibuftype; + int ndims; + int ibufcnt; + bool bcast = false; + + file = pio_get_file_from_id(ncid); + if(file == NULL) + return PIO_EBADID; + ios = file->iosystem; + msg = PIO_MSG_GET_VARM_SCHAR; + ibuftype = MPI_CHAR; + ierr = PIOc_inq_varndims(file->fh, varid, &ndims); + ibufcnt = 1; + for(int i=0;iasync_interface && ! ios->ioproc){ + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + mpierr = MPI_Bcast(&(file->fh),1, MPI_INT, 0, ios->intercomm); + } + + + if(ios->ioproc){ + switch(file->iotype){ +#ifdef _NETCDF +#ifdef _NETCDF4 + case PIO_IOTYPE_NETCDF4P: + ierr = nc_get_varm_schar(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, buf);; + break; + case PIO_IOTYPE_NETCDF4C: +#endif + case PIO_IOTYPE_NETCDF: + bcast = true; + if(ios->iomaster){ + ierr = nc_get_varm_schar(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, buf);; + } + break; +#endif +#ifdef _PNETCDF + case PIO_IOTYPE_PNETCDF: +#ifdef PNET_READ_AND_BCAST + ncmpi_begin_indep_data(file->fh); + if(ios->iomaster){ + ierr = ncmpi_get_varm_schar(file->fh, varid, start, count, stride, imap, buf);; + }; + ncmpi_end_indep_data(file->fh); + bcast=true; +#else + ierr = ncmpi_get_varm_schar_all(file->fh, varid, start, count, stride, imap, buf);; +#endif + break; +#endif + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + } + } + + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + + if(ios->async_interface || bcast || + (ios->num_iotasks < ios->num_comptasks)){ + MPI_Bcast(buf, ibufcnt, ibuftype, ios->ioroot, ios->my_comm); + } + + return ierr; +} + +int PIOc_get_varm_double (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], double *buf) +{ + int ierr; + int msg; + int mpierr; + iosystem_desc_t *ios; + file_desc_t *file; + MPI_Datatype ibuftype; + int ndims; + int ibufcnt; + bool bcast = false; + + file = pio_get_file_from_id(ncid); + if(file == NULL) + return PIO_EBADID; + ios = file->iosystem; + msg = PIO_MSG_GET_VARM_DOUBLE; + ibuftype = MPI_DOUBLE; + ierr = PIOc_inq_varndims(file->fh, varid, &ndims); + ibufcnt = 1; + for(int i=0;iasync_interface && ! ios->ioproc){ + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + mpierr = MPI_Bcast(&(file->fh),1, MPI_INT, 0, ios->intercomm); + } + + + if(ios->ioproc){ + switch(file->iotype){ +#ifdef _NETCDF +#ifdef _NETCDF4 + case PIO_IOTYPE_NETCDF4P: + ierr = nc_get_varm_double(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, buf);; + break; + case PIO_IOTYPE_NETCDF4C: +#endif + case PIO_IOTYPE_NETCDF: + bcast = true; + if(ios->iomaster){ + ierr = nc_get_varm_double(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, buf);; + } + break; +#endif +#ifdef _PNETCDF + case PIO_IOTYPE_PNETCDF: +#ifdef PNET_READ_AND_BCAST + ncmpi_begin_indep_data(file->fh); + if(ios->iomaster){ + ierr = ncmpi_get_varm_double(file->fh, varid, start, count, stride, imap, buf);; + }; + ncmpi_end_indep_data(file->fh); + bcast=true; +#else + ierr = ncmpi_get_varm_double_all(file->fh, varid, start, count, stride, imap, buf);; +#endif + break; +#endif + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + } + } + + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + + if(ios->async_interface || bcast || + (ios->num_iotasks < ios->num_comptasks)){ + MPI_Bcast(buf, ibufcnt, ibuftype, ios->ioroot, ios->my_comm); + } + + return ierr; +} + +int PIOc_get_varm_text (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], char *buf) +{ + int ierr; + int msg; + int mpierr; + iosystem_desc_t *ios; + file_desc_t *file; + MPI_Datatype ibuftype; + int ndims; + int ibufcnt; + bool bcast = false; + + file = pio_get_file_from_id(ncid); + if(file == NULL) + return PIO_EBADID; + ios = file->iosystem; + msg = PIO_MSG_GET_VARM_TEXT; + ibuftype = MPI_CHAR; + ierr = PIOc_inq_varndims(file->fh, varid, &ndims); + ibufcnt = 1; + for(int i=0;iasync_interface && ! ios->ioproc){ + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + mpierr = MPI_Bcast(&(file->fh),1, MPI_INT, 0, ios->intercomm); + } + + + if(ios->ioproc){ + switch(file->iotype){ +#ifdef _NETCDF +#ifdef _NETCDF4 + case PIO_IOTYPE_NETCDF4P: + ierr = nc_get_varm_text(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, buf);; + break; + case PIO_IOTYPE_NETCDF4C: +#endif + case PIO_IOTYPE_NETCDF: + bcast = true; + if(ios->iomaster){ + ierr = nc_get_varm_text(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, buf);; + } + break; +#endif +#ifdef _PNETCDF + case PIO_IOTYPE_PNETCDF: +#ifdef PNET_READ_AND_BCAST + ncmpi_begin_indep_data(file->fh); + if(ios->iomaster){ + ierr = ncmpi_get_varm_text(file->fh, varid, start, count, stride, imap, buf);; + }; + ncmpi_end_indep_data(file->fh); + bcast=true; +#else + ierr = ncmpi_get_varm_text_all(file->fh, varid, start, count, stride, imap, buf);; +#endif + break; +#endif + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + } + } + + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + + if(ios->async_interface || bcast || + (ios->num_iotasks < ios->num_comptasks)){ + MPI_Bcast(buf, ibufcnt, ibuftype, ios->ioroot, ios->my_comm); + } + + return ierr; +} + +int PIOc_get_varm_int (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], int *buf) +{ + int ierr; + int msg; + int mpierr; + iosystem_desc_t *ios; + file_desc_t *file; + MPI_Datatype ibuftype; + int ndims; + int ibufcnt; + bool bcast = false; + + file = pio_get_file_from_id(ncid); + if(file == NULL) + return PIO_EBADID; + ios = file->iosystem; + msg = PIO_MSG_GET_VARM_INT; + ibuftype = MPI_INT; + ierr = PIOc_inq_varndims(file->fh, varid, &ndims); + ibufcnt = 1; + for(int i=0;iasync_interface && ! ios->ioproc){ + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + mpierr = MPI_Bcast(&(file->fh),1, MPI_INT, 0, ios->intercomm); + } + + + if(ios->ioproc){ + switch(file->iotype){ +#ifdef _NETCDF +#ifdef _NETCDF4 + case PIO_IOTYPE_NETCDF4P: + ierr = nc_get_varm_int(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, buf);; + break; + case PIO_IOTYPE_NETCDF4C: +#endif + case PIO_IOTYPE_NETCDF: + bcast = true; + if(ios->iomaster){ + ierr = nc_get_varm_int(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, buf);; + } + break; +#endif +#ifdef _PNETCDF + case PIO_IOTYPE_PNETCDF: +#ifdef PNET_READ_AND_BCAST + ncmpi_begin_indep_data(file->fh); + if(ios->iomaster){ + ierr = ncmpi_get_varm_int(file->fh, varid, start, count, stride, imap, buf);; + }; + ncmpi_end_indep_data(file->fh); + bcast=true; +#else + ierr = ncmpi_get_varm_int_all(file->fh, varid, start, count, stride, imap, buf);; +#endif + break; +#endif + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + } + } + + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + + if(ios->async_interface || bcast || + (ios->num_iotasks < ios->num_comptasks)){ + MPI_Bcast(buf, ibufcnt, ibuftype, ios->ioroot, ios->my_comm); + } + + return ierr; +} + +int PIOc_get_varm_uint (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], unsigned int *buf) +{ + int ierr; + int msg; + int mpierr; + iosystem_desc_t *ios; + file_desc_t *file; + MPI_Datatype ibuftype; + int ndims; + int ibufcnt; + bool bcast = false; + + file = pio_get_file_from_id(ncid); + if(file == NULL) + return PIO_EBADID; + ios = file->iosystem; + msg = PIO_MSG_GET_VARM_UINT; + ibuftype = MPI_UNSIGNED; + ierr = PIOc_inq_varndims(file->fh, varid, &ndims); + ibufcnt = 1; + for(int i=0;iasync_interface && ! ios->ioproc){ + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + mpierr = MPI_Bcast(&(file->fh),1, MPI_INT, 0, ios->intercomm); + } + + + if(ios->ioproc){ + switch(file->iotype){ +#ifdef _NETCDF +#ifdef _NETCDF4 + case PIO_IOTYPE_NETCDF4P: + ierr = nc_get_varm_uint(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, buf);; + break; + case PIO_IOTYPE_NETCDF4C: +#endif + case PIO_IOTYPE_NETCDF: + bcast = true; + if(ios->iomaster){ + ierr = nc_get_varm_uint(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, buf);; + } + break; +#endif +#ifdef _PNETCDF + case PIO_IOTYPE_PNETCDF: +#ifdef PNET_READ_AND_BCAST + ncmpi_begin_indep_data(file->fh); + if(ios->iomaster){ + ierr = ncmpi_get_varm_uint(file->fh, varid, start, count, stride, imap, buf);; + }; + ncmpi_end_indep_data(file->fh); + bcast=true; +#else + ierr = ncmpi_get_varm_uint_all(file->fh, varid, start, count, stride, imap, buf);; +#endif + break; +#endif + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + } + } + + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + + if(ios->async_interface || bcast || + (ios->num_iotasks < ios->num_comptasks)){ + MPI_Bcast(buf, ibufcnt, ibuftype, ios->ioroot, ios->my_comm); + } + + return ierr; +} + +int PIOc_get_varm (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], void *buf, PIO_Offset bufcount, MPI_Datatype buftype) +{ + int ierr; + int msg; + int mpierr; + iosystem_desc_t *ios; + file_desc_t *file; + MPI_Datatype ibuftype; + int ndims; + int ibufcnt; + bool bcast = false; + + file = pio_get_file_from_id(ncid); + if(file == NULL) + return PIO_EBADID; + ios = file->iosystem; + msg = PIO_MSG_GET_VARM; + ibufcnt = bufcount; + ibuftype = buftype; + ierr = PIO_NOERR; + + if(ios->async_interface && ! ios->ioproc){ + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + mpierr = MPI_Bcast(&(file->fh),1, MPI_INT, 0, ios->intercomm); + } + + + if(ios->ioproc){ + switch(file->iotype){ +#ifdef _NETCDF +#ifdef _NETCDF4 + case PIO_IOTYPE_NETCDF4P: + ierr = nc_get_varm(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, buf);; + break; + case PIO_IOTYPE_NETCDF4C: +#endif + case PIO_IOTYPE_NETCDF: + bcast = true; + if(ios->iomaster){ + ierr = nc_get_varm(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, buf);; + } + break; +#endif +#ifdef _PNETCDF + case PIO_IOTYPE_PNETCDF: +#ifdef PNET_READ_AND_BCAST + ncmpi_begin_indep_data(file->fh); + if(ios->iomaster){ + ierr = ncmpi_get_varm(file->fh, varid, start, count, stride, imap, buf, bufcount, buftype);; + }; + ncmpi_end_indep_data(file->fh); + bcast=true; +#else + ierr = ncmpi_get_varm_all(file->fh, varid, start, count, stride, imap, buf, bufcount, buftype);; +#endif + break; +#endif + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + } + } + + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + + if(ios->async_interface || bcast || + (ios->num_iotasks < ios->num_comptasks)){ + MPI_Bcast(buf, ibufcnt, ibuftype, ios->ioroot, ios->my_comm); + } + + return ierr; +} + +int PIOc_get_varm_float (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], float *buf) +{ + int ierr; + int msg; + int mpierr; + iosystem_desc_t *ios; + file_desc_t *file; + MPI_Datatype ibuftype; + int ndims; + int ibufcnt; + bool bcast = false; + + file = pio_get_file_from_id(ncid); + if(file == NULL) + return PIO_EBADID; + ios = file->iosystem; + msg = PIO_MSG_GET_VARM_FLOAT; + ibuftype = MPI_FLOAT; + ierr = PIOc_inq_varndims(file->fh, varid, &ndims); + ibufcnt = 1; + for(int i=0;iasync_interface && ! ios->ioproc){ + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + mpierr = MPI_Bcast(&(file->fh),1, MPI_INT, 0, ios->intercomm); + } + + + if(ios->ioproc){ + switch(file->iotype){ +#ifdef _NETCDF +#ifdef _NETCDF4 + case PIO_IOTYPE_NETCDF4P: + ierr = nc_get_varm_float(file->fh, varid,(size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, buf);; + break; + case PIO_IOTYPE_NETCDF4C: +#endif + case PIO_IOTYPE_NETCDF: + bcast = true; + if(ios->iomaster){ + ierr = nc_get_varm_float(file->fh, varid,(size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, buf);; + } + break; +#endif +#ifdef _PNETCDF + case PIO_IOTYPE_PNETCDF: +#ifdef PNET_READ_AND_BCAST + ncmpi_begin_indep_data(file->fh); + if(ios->iomaster){ + ierr = ncmpi_get_varm_float(file->fh, varid, start, count, stride, imap, buf);; + }; + ncmpi_end_indep_data(file->fh); + bcast=true; +#else + ierr = ncmpi_get_varm_float_all(file->fh, varid, start, count, stride, imap, buf);; +#endif + break; +#endif + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + } + } + + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + + if(ios->async_interface || bcast || + (ios->num_iotasks < ios->num_comptasks)){ + MPI_Bcast(buf, ibufcnt, ibuftype, ios->ioroot, ios->my_comm); + } + + return ierr; +} + +int PIOc_get_varm_long (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], long *buf) +{ + int ierr; + int msg; + int mpierr; + iosystem_desc_t *ios; + file_desc_t *file; + MPI_Datatype ibuftype; + int ndims; + int ibufcnt; + bool bcast = false; + + file = pio_get_file_from_id(ncid); + if(file == NULL) + return PIO_EBADID; + ios = file->iosystem; + msg = PIO_MSG_GET_VARM_LONG; + ibuftype = MPI_LONG; + ierr = PIOc_inq_varndims(file->fh, varid, &ndims); + ibufcnt = 1; + for(int i=0;iasync_interface && ! ios->ioproc){ + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + mpierr = MPI_Bcast(&(file->fh),1, MPI_INT, 0, ios->intercomm); + } + + + if(ios->ioproc){ + switch(file->iotype){ +#ifdef _NETCDF +#ifdef _NETCDF4 + case PIO_IOTYPE_NETCDF4P: + ierr = nc_get_varm_long(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, buf);; + break; + case PIO_IOTYPE_NETCDF4C: +#endif + case PIO_IOTYPE_NETCDF: + bcast = true; + if(ios->iomaster){ + ierr = nc_get_varm_long(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, buf);; + } + break; +#endif +#ifdef _PNETCDF + case PIO_IOTYPE_PNETCDF: +#ifdef PNET_READ_AND_BCAST + ncmpi_begin_indep_data(file->fh); + if(ios->iomaster){ + ierr = ncmpi_get_varm_long(file->fh, varid, start, count, stride, imap, buf);; + }; + ncmpi_end_indep_data(file->fh); + bcast=true; +#else + ierr = ncmpi_get_varm_long_all(file->fh, varid, start, count, stride, imap, buf);; +#endif + break; +#endif + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + } + } + + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + + if(ios->async_interface || bcast || + (ios->num_iotasks < ios->num_comptasks)){ + MPI_Bcast(buf, ibufcnt, ibuftype, ios->ioroot, ios->my_comm); + } + + return ierr; +} + +int PIOc_get_varm_ushort (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], unsigned short *buf) +{ + int ierr; + int msg; + int mpierr; + iosystem_desc_t *ios; + file_desc_t *file; + MPI_Datatype ibuftype; + int ndims; + int ibufcnt; + bool bcast = false; + + file = pio_get_file_from_id(ncid); + if(file == NULL) + return PIO_EBADID; + ios = file->iosystem; + msg = PIO_MSG_GET_VARM_USHORT; + ibuftype = MPI_UNSIGNED_SHORT; + ierr = PIOc_inq_varndims(file->fh, varid, &ndims); + ibufcnt = 1; + for(int i=0;iasync_interface && ! ios->ioproc){ + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + mpierr = MPI_Bcast(&(file->fh),1, MPI_INT, 0, ios->intercomm); + } + + + if(ios->ioproc){ + switch(file->iotype){ +#ifdef _NETCDF +#ifdef _NETCDF4 + case PIO_IOTYPE_NETCDF4P: + ierr = nc_get_varm_ushort(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, buf);; + break; + case PIO_IOTYPE_NETCDF4C: +#endif + case PIO_IOTYPE_NETCDF: + bcast = true; + if(ios->iomaster){ + ierr = nc_get_varm_ushort(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, buf);; + } + break; +#endif +#ifdef _PNETCDF + case PIO_IOTYPE_PNETCDF: +#ifdef PNET_READ_AND_BCAST + ncmpi_begin_indep_data(file->fh); + if(ios->iomaster){ + ierr = ncmpi_get_varm_ushort(file->fh, varid, start, count, stride, imap, buf);; + }; + ncmpi_end_indep_data(file->fh); + bcast=true; +#else + ierr = ncmpi_get_varm_ushort_all(file->fh, varid, start, count, stride, imap, buf);; +#endif + break; +#endif + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + } + } + + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + + if(ios->async_interface || bcast || + (ios->num_iotasks < ios->num_comptasks)){ + MPI_Bcast(buf, ibufcnt, ibuftype, ios->ioroot, ios->my_comm); + } + + return ierr; +} + +int PIOc_get_varm_longlong (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], long long *buf) +{ + int ierr; + int msg; + int mpierr; + iosystem_desc_t *ios; + file_desc_t *file; + MPI_Datatype ibuftype; + int ndims; + int ibufcnt; + bool bcast = false; + + file = pio_get_file_from_id(ncid); + if(file == NULL) + return PIO_EBADID; + ios = file->iosystem; + msg = PIO_MSG_GET_VARM_LONGLONG; + ibuftype = MPI_LONG_LONG; + ierr = PIOc_inq_varndims(file->fh, varid, &ndims); + ibufcnt = 1; + for(int i=0;iasync_interface && ! ios->ioproc){ + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + mpierr = MPI_Bcast(&(file->fh),1, MPI_INT, 0, ios->intercomm); + } + + + if(ios->ioproc){ + switch(file->iotype){ +#ifdef _NETCDF +#ifdef _NETCDF4 + case PIO_IOTYPE_NETCDF4P: + ierr = nc_get_varm_longlong(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, buf);; + break; + case PIO_IOTYPE_NETCDF4C: +#endif + case PIO_IOTYPE_NETCDF: + bcast = true; + if(ios->iomaster){ + ierr = nc_get_varm_longlong(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, buf);; + } + break; +#endif +#ifdef _PNETCDF + case PIO_IOTYPE_PNETCDF: +#ifdef PNET_READ_AND_BCAST + ncmpi_begin_indep_data(file->fh); + if(ios->iomaster){ + ierr = ncmpi_get_varm_longlong(file->fh, varid, start, count, stride, imap, buf);; + }; + ncmpi_end_indep_data(file->fh); + bcast=true; +#else + ierr = ncmpi_get_varm_longlong_all(file->fh, varid, start, count, stride, imap, buf);; +#endif + break; +#endif + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + } + } + + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + + if(ios->async_interface || bcast || + (ios->num_iotasks < ios->num_comptasks)){ + MPI_Bcast(buf, ibufcnt, ibuftype, ios->ioroot, ios->my_comm); + } + + return ierr; +} + +int PIOc_get_varm_short (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], short *buf) +{ + int ierr; + int msg; + int mpierr; + iosystem_desc_t *ios; + file_desc_t *file; + MPI_Datatype ibuftype; + int ndims; + int ibufcnt; + bool bcast = false; + + file = pio_get_file_from_id(ncid); + if(file == NULL) + return PIO_EBADID; + ios = file->iosystem; + msg = PIO_MSG_GET_VARM_SHORT; + ibuftype = MPI_SHORT; + ierr = PIOc_inq_varndims(file->fh, varid, &ndims); + ibufcnt = 1; + for(int i=0;iasync_interface && ! ios->ioproc){ + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + mpierr = MPI_Bcast(&(file->fh),1, MPI_INT, 0, ios->intercomm); + } + + + if(ios->ioproc){ + switch(file->iotype){ +#ifdef _NETCDF +#ifdef _NETCDF4 + case PIO_IOTYPE_NETCDF4P: + ierr = nc_get_varm_short(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, buf);; + break; + case PIO_IOTYPE_NETCDF4C: +#endif + case PIO_IOTYPE_NETCDF: + bcast = true; + if(ios->iomaster){ + ierr = nc_get_varm_short(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, buf);; + } + break; +#endif +#ifdef _PNETCDF + case PIO_IOTYPE_PNETCDF: +#ifdef PNET_READ_AND_BCAST + ncmpi_begin_indep_data(file->fh); + if(ios->iomaster){ + ierr = ncmpi_get_varm_short(file->fh, varid, start, count, stride, imap, buf);; + }; + ncmpi_end_indep_data(file->fh); + bcast=true; +#else + ierr = ncmpi_get_varm_short_all(file->fh, varid, start, count, stride, imap, buf);; +#endif + break; +#endif + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + } + } + + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + + if(ios->async_interface || bcast || + (ios->num_iotasks < ios->num_comptasks)){ + MPI_Bcast(buf, ibufcnt, ibuftype, ios->ioroot, ios->my_comm); + } + + return ierr; +} + +int PIOc_get_varm_ulonglong (int ncid, int varid, const PIO_Offset start[], const PIO_Offset count[], const PIO_Offset stride[], const PIO_Offset imap[], unsigned long long *buf) +{ + int ierr; + int msg; + int mpierr; + iosystem_desc_t *ios; + file_desc_t *file; + MPI_Datatype ibuftype; + int ndims; + int ibufcnt; + bool bcast = false; + + file = pio_get_file_from_id(ncid); + if(file == NULL) + return PIO_EBADID; + ios = file->iosystem; + msg = PIO_MSG_GET_VARM_ULONGLONG; + ibuftype = MPI_UNSIGNED_LONG_LONG; + ierr = PIOc_inq_varndims(file->fh, varid, &ndims); + ibufcnt = 1; + for(int i=0;iasync_interface && ! ios->ioproc){ + if(ios->compmaster) + mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); + mpierr = MPI_Bcast(&(file->fh),1, MPI_INT, 0, ios->intercomm); + } + + + if(ios->ioproc){ + switch(file->iotype){ +#ifdef _NETCDF +#ifdef _NETCDF4 + case PIO_IOTYPE_NETCDF4P: + ierr = nc_get_varm_ulonglong(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, buf);; + break; + case PIO_IOTYPE_NETCDF4C: +#endif + case PIO_IOTYPE_NETCDF: + bcast = true; + if(ios->iomaster){ + ierr = nc_get_varm_ulonglong(file->fh, varid, (size_t *) start, (size_t *) count, (ptrdiff_t *) stride, (ptrdiff_t *) imap, buf);; + } + break; +#endif +#ifdef _PNETCDF + case PIO_IOTYPE_PNETCDF: +#ifdef PNET_READ_AND_BCAST + ncmpi_begin_indep_data(file->fh); + if(ios->iomaster){ + ierr = ncmpi_get_varm_ulonglong(file->fh, varid, start, count, stride, imap, buf);; + }; + ncmpi_end_indep_data(file->fh); + bcast=true; +#else + ierr = ncmpi_get_varm_ulonglong_all(file->fh, varid, start, count, stride, imap, buf);; +#endif + break; +#endif + default: + ierr = iotype_error(file->iotype,__FILE__,__LINE__); + } + } + + ierr = check_netcdf(file, ierr, __FILE__,__LINE__); + + if(ios->async_interface || bcast || + (ios->num_iotasks < ios->num_comptasks)){ + MPI_Bcast(buf, ibufcnt, ibuftype, ios->ioroot, ios->my_comm); + } + + return ierr; +} + diff --git a/externals/pio2/src/clib/pioc.c b/externals/pio2/src/clib/pioc.c index 17983d298c7..97afd948977 100644 --- a/externals/pio2/src/clib/pioc.c +++ b/externals/pio2/src/clib/pioc.c @@ -1,17 +1,16 @@ /** - * @file + * @file * @author Jim Edwards * @date 2014 - * @brief PIO C interface + * @brief PIO C interface * * @see http://code.google.com/p/parallelio/ */ - +#include #include #include - static int counter=0; /** @@ -23,7 +22,7 @@ int PIOc_iosystem_is_active(const int iosysid, bool *active) ios = pio_get_iosystem_from_id(iosysid); if(ios == NULL) return PIO_EBADID; - + if(ios->comp_comm == MPI_COMM_NULL && ios->io_comm == MPI_COMM_NULL){ *active = false; }else{ @@ -46,7 +45,7 @@ int PIOc_File_is_Open(int ncid) } /** - ** @brief Set the error handling method to be used for subsequent + ** @brief Set the error handling method to be used for subsequent ** pio library calls, returns the previous method setting */ int PIOc_Set_File_Error_Handling(int ncid, int method) @@ -57,7 +56,7 @@ int PIOc_Set_File_Error_Handling(int ncid, int method) oldmethod = file->iosystem->error_handler; file->iosystem->error_handler = method; return(oldmethod); -} +} /** ** @brief Increment the unlimited dimension of the given variable @@ -72,12 +71,12 @@ int PIOc_advanceframe(int ncid, int varid) file->varlist[varid].record++; return(PIO_NOERR); -} +} /** - * @ingroup PIO_setframe + * @ingroup PIO_setframe * @brief Set the unlimited dimension of the given variable - * + * * @param ncid the ncid of the file. * @param varid the varid of the variable * @param frame the value of the unlimited dimension. In c 0 for the @@ -96,7 +95,7 @@ int PIOc_setframe(const int ncid, const int varid, const int frame) file->varlist[varid].record = frame; return(PIO_NOERR); -} +} /** ** @brief Get the number of IO tasks set. @@ -156,11 +155,11 @@ int PIOc_get_local_array_size(int ioid) fprintf(stderr,"%s %d Error setting eh method\n",__FILE__,__LINE__); print_trace(stderr); return PIO_EBADID; - } + } oldmethod = ios->error_handler; ios->error_handler = method; return(oldmethod); -} +} /** ** @ingroup PIO_initdecomp @@ -176,10 +175,8 @@ int PIOc_get_local_array_size(int ioid) ** @param iostart An optional array of start values for block cyclic decompositions (optional input) ** @param iocount An optional array of count values for block cyclic decompositions (optional input) */ - - -int PIOc_InitDecomp(const int iosysid, const int basetype,const int ndims, const int dims[], - const int maplen, const PIO_Offset *compmap, int *ioidp,const int *rearranger, +int PIOc_InitDecomp(const int iosysid, const int basetype,const int ndims, const int dims[], + const int maplen, const PIO_Offset *compmap, int *ioidp,const int *rearranger, const PIO_Offset *iostart,const PIO_Offset *iocount) { iosystem_desc_t *ios; @@ -189,8 +186,6 @@ int PIOc_InitDecomp(const int iosysid, const int basetype,const int ndims, const int iosize; int ndisp; - - for(int i=0;inum_comptasks,ndims,counter); } - PIOc_writemap(filename,ndims,dims,maplen,compmap,ios->comp_comm); + PIOc_writemap(filename,ndims,dims,maplen, (PIO_Offset *)compmap,ios->comp_comm); counter++; } @@ -220,29 +215,29 @@ int PIOc_InitDecomp(const int iosysid, const int basetype,const int ndims, const iodesc->rearranger = ios->default_rearranger; else iodesc->rearranger = *rearranger; - + if(iodesc->rearranger==PIO_REARR_SUBSET){ - if((iostart != NULL) && (iocount != NULL)){ + if((iostart != NULL) && (iocount != NULL)){ fprintf(stderr,"%s %s\n","Iostart and iocount arguments to PIOc_InitDecomp", "are incompatable with subset rearrange method and will be ignored"); } iodesc->num_aiotasks = ios->num_iotasks; ierr = subset_rearrange_create( *ios, maplen, compmap, dims, ndims, iodesc); - }else{ + }else{ if(ios->ioproc){ - // Unless the user specifies the start and count for each IO task compute it. - if((iostart != NULL) && (iocount != NULL)){ + // Unless the user specifies the start and count for each IO task compute it. + if((iostart != NULL) && (iocount != NULL)){ // printf("iocount[0] = %ld %ld\n",iocount[0], iocount); iodesc->maxiobuflen=1; for(int i=0;ifirstregion->start[i] = iostart[i]; iodesc->firstregion->count[i] = iocount[i]; compute_maxIObuffersize(ios->io_comm, iodesc); - + } iodesc->num_aiotasks = ios->num_iotasks; }else{ - iodesc->num_aiotasks = CalcStartandCount(basetype, ndims, dims, + iodesc->num_aiotasks = CalcStartandCount(basetype, ndims, dims, ios->num_iotasks, ios->io_rank, iodesc->firstregion->start, iodesc->firstregion->count); } @@ -253,7 +248,7 @@ int PIOc_InitDecomp(const int iosysid, const int basetype,const int ndims, const CheckMPIReturn(MPI_Bcast(&(iodesc->num_aiotasks), 1, MPI_INT, ios->ioroot, ios->my_comm),__FILE__,__LINE__); // Compute the communications pattern for this decomposition - if(iodesc->rearranger==PIO_REARR_BOX){ + if(iodesc->rearranger==PIO_REARR_BOX){ ierr = box_rearrange_create( *ios, maplen, compmap, dims, ndims, iodesc); } /* @@ -271,21 +266,19 @@ int PIOc_InitDecomp(const int iosysid, const int basetype,const int ndims, const *ioidp = pio_add_to_iodesc_list(iodesc); performance_tune_rearranger(*ios, iodesc); - + return PIO_NOERR; } /** ** @ingroup PIO_initdecomp - ** This is a simplified initdecomp which can be used if the memory order of the data can be + ** This is a simplified initdecomp which can be used if the memory order of the data can be ** expressed in terms of start and count on the file. ** in this case we compute the compdof and use the subset rearranger */ - - -int PIOc_InitDecomp_bc(const int iosysid, const int basetype,const int ndims, const int dims[], +int PIOc_InitDecomp_bc(const int iosysid, const int basetype,const int ndims, const int dims[], const long int start[], const long int count[], int *ioidp) - + { iosystem_desc_t *ios; io_desc_t *iodesc; @@ -294,7 +287,7 @@ int PIOc_InitDecomp_bc(const int iosysid, const int basetype,const int ndims, co int iosize; int ndisp; - + for(int i=0;i=0;n--){ - prod[n]=prod[n+1]*dims[n+1]; + prod[n]=prod[n+1]*dims[n+1]; loc[n]=0; } for(i=0;iunion_comm = comp_comm; - iosys->comp_comm = comp_comm; - iosys->my_comm = comp_comm; - iosys->io_comm = MPI_COMM_NULL; - iosys->intercomm = MPI_COMM_NULL; - iosys->error_handler = PIO_INTERNAL_ERROR; - iosys->async_interface= false; - iosys->compmaster = false; - iosys->iomaster = false; - iosys->ioproc = false; - iosys->default_rearranger = rearr; - iosys->num_iotasks = num_iotasks; - - ustride = stride; - - CheckMPIReturn(MPI_Comm_rank(comp_comm, &(iosys->comp_rank)),__FILE__,__LINE__); - CheckMPIReturn(MPI_Comm_size(comp_comm, &(iosys->num_comptasks)),__FILE__,__LINE__); - if(iosys->comp_rank==0) - iosys->compmaster = true; - -#ifdef BGQxxx - lbase = base; - determineiotasks(comp_comm, &(iosys->num_iotasks), &lbase, &stride, &rearr, &(iosys->ioproc)); - if(iosys->comp_rank==0) - printf("%s %d %d\n",__FILE__,__LINE__,iosys->num_iotasks); - if(iosys->ioproc) - printf("%s %d %d\n",__FILE__,__LINE__,iosys->comp_rank); - -#else - if((iosys->num_comptasks == 1) && (num_iotasks*ustride > 1)) { - // This is a serial run with a bad configuration. Set up a single task. - fprintf(stderr, "PIO_TP PIOc_Init_Intracomm reset stride and tasks.\n"); - iosys->num_iotasks = 1; - ustride = 1; - } - if((iosys->num_iotasks < 1) || ((iosys->num_iotasks*ustride) > iosys->num_comptasks)){ - fprintf(stderr, "PIO_TP PIOc_Init_Intracomm error\n"); - fprintf(stderr, "num_iotasks=%d, ustride=%d, num_comptasks=%d\n", num_iotasks, ustride, iosys->num_comptasks); - return PIO_EBADID; - } - iosys->ioranks = (int *) calloc(sizeof(int), iosys->num_iotasks); - for(int i=0;i< iosys->num_iotasks; i++){ - iosys->ioranks[i] = (base + i*ustride) % iosys->num_comptasks; - if(iosys->ioranks[i] == iosys->comp_rank) - iosys->ioproc = true; + /* Copy the computation communicator into union_comm. */ + mpierr = MPI_Comm_dup(comp_comm, &iosys->union_comm); + CheckMPIReturn(mpierr, __FILE__, __LINE__); + if (mpierr) + ierr = PIO_EIO; + + /* Copy the computation communicator into comp_comm. */ + if (!ierr) + { + mpierr = MPI_Comm_dup(comp_comm, &iosys->comp_comm); + CheckMPIReturn(mpierr, __FILE__, __LINE__); + if (mpierr) + ierr = PIO_EIO; } - iosys->ioroot = iosys->ioranks[0]; -#endif - CheckMPIReturn(MPI_Info_create(&(iosys->info)),__FILE__,__LINE__); - iosys->info = MPI_INFO_NULL; + if (!ierr) + { + iosys->my_comm = iosys->comp_comm; + iosys->io_comm = MPI_COMM_NULL; + iosys->intercomm = MPI_COMM_NULL; + iosys->error_handler = PIO_INTERNAL_ERROR; + iosys->async_interface= false; + iosys->compmaster = 0; + iosys->iomaster = 0; + iosys->ioproc = false; + iosys->default_rearranger = rearr; + iosys->num_iotasks = num_iotasks; + + ustride = stride; + + /* Find MPI rank and number of tasks in comp_comm communicator. */ + CheckMPIReturn(MPI_Comm_rank(iosys->comp_comm, &(iosys->comp_rank)),__FILE__,__LINE__); + CheckMPIReturn(MPI_Comm_size(iosys->comp_comm, &(iosys->num_comptasks)),__FILE__,__LINE__); + if(iosys->comp_rank==0) + iosys->compmaster = MPI_ROOT; + + /* Ensure that settings for number of computation tasks, number + * of IO tasks, and the stride are reasonable. */ + if((iosys->num_comptasks == 1) && (num_iotasks*ustride > 1)) { + // This is a serial run with a bad configuration. Set up a single task. + fprintf(stderr, "PIO_TP PIOc_Init_Intracomm reset stride and tasks.\n"); + iosys->num_iotasks = 1; + ustride = 1; + } + if((iosys->num_iotasks < 1) || ((iosys->num_iotasks*ustride) > iosys->num_comptasks)){ + fprintf(stderr, "PIO_TP PIOc_Init_Intracomm error\n"); + fprintf(stderr, "num_iotasks=%d, ustride=%d, num_comptasks=%d\n", num_iotasks, ustride, iosys->num_comptasks); + return PIO_EBADID; + } - if(iosys->comp_rank == iosys->ioranks[0]) - iosys->iomaster = true; + /* Create an array that holds the ranks of the tasks to be used for IO. */ + iosys->ioranks = (int *) calloc(sizeof(int), iosys->num_iotasks); + for(int i=0;i< iosys->num_iotasks; i++){ + iosys->ioranks[i] = (base + i*ustride) % iosys->num_comptasks; + if(iosys->ioranks[i] == iosys->comp_rank) + iosys->ioproc = true; + } + iosys->ioroot = iosys->ioranks[0]; - CheckMPIReturn(MPI_Comm_group(comp_comm, &(iosys->compgroup)),__FILE__,__LINE__); + /* Create an MPI info object. */ + CheckMPIReturn(MPI_Info_create(&(iosys->info)),__FILE__,__LINE__); + iosys->info = MPI_INFO_NULL; - CheckMPIReturn(MPI_Group_incl(iosys->compgroup, iosys->num_iotasks, iosys->ioranks, - &(iosys->iogroup)),__FILE__,__LINE__); + if(iosys->comp_rank == iosys->ioranks[0]) + iosys->iomaster = MPI_ROOT; - CheckMPIReturn(MPI_Comm_create(comp_comm, iosys->iogroup, &(iosys->io_comm)),__FILE__,__LINE__); - if(iosys->ioproc) - CheckMPIReturn(MPI_Comm_rank(iosys->io_comm, &(iosys->io_rank)),__FILE__,__LINE__); - else - iosys->io_rank = -1; + /* Create a group for the computation tasks. */ + CheckMPIReturn(MPI_Comm_group(iosys->comp_comm, &(iosys->compgroup)),__FILE__,__LINE__); + + /* Create a group for the IO tasks. */ + CheckMPIReturn(MPI_Group_incl(iosys->compgroup, iosys->num_iotasks, iosys->ioranks, + &(iosys->iogroup)),__FILE__,__LINE__); - iosys->union_rank = iosys->comp_rank; + /* Create an MPI communicator for the IO tasks. */ + CheckMPIReturn(MPI_Comm_create(iosys->comp_comm, iosys->iogroup, &(iosys->io_comm)) + ,__FILE__,__LINE__); - *iosysidp = pio_add_to_iosystem_list(iosys); + /* For the tasks that are doing IO, get their rank. */ + if (iosys->ioproc) + CheckMPIReturn(MPI_Comm_rank(iosys->io_comm, &(iosys->io_rank)),__FILE__,__LINE__); + else + iosys->io_rank = -1; - pio_get_env(); + iosys->union_rank = iosys->comp_rank; - /* allocate buffer space for compute nodes */ - compute_buffer_init(*iosys); + /* Add this iosys struct to the list in the PIO library. */ + *iosysidp = pio_add_to_iosystem_list(iosys); - return PIO_NOERR; + pio_get_env(); + + /* allocate buffer space for compute nodes */ + compute_buffer_init(*iosys); + } + + return ierr; } /** - ** @internal + ** @internal ** interface to call from pio_init from fortran ** @endinternal */ -int PIOc_Init_Intracomm_from_F90(int f90_comp_comm, - const int num_iotasks, const int stride, +int PIOc_Init_Intracomm_from_F90(int f90_comp_comm, + const int num_iotasks, const int stride, const int base, const int rearr, int *iosysidp){ return PIOc_Init_Intracomm(MPI_Comm_f2c(f90_comp_comm), num_iotasks, stride,base,rearr, iosysidp); } - + /** - ** @brief Send a hint to the MPI-IO library + ** @brief Send a hint to the MPI-IO library ** */ int PIOc_set_hint(const int iosysid, char hint[], const char hintval[]) @@ -469,8 +497,9 @@ int PIOc_set_hint(const int iosysid, char hint[], const char hintval[]) } -/** @ingroup PIO_finalize - * @brief Clean up data structures and exit the pio library. +/** @ingroup PIO_finalize + * Clean up internal data structures, free MPI resources, and exit the + * pio library. * * @param iosysid: the io system ID provided by PIOc_Init_Intracomm(). * @@ -480,44 +509,57 @@ int PIOc_set_hint(const int iosysid, char hint[], const char hintval[]) int PIOc_finalize(const int iosysid) { iosystem_desc_t *ios, *nios; + int msg; + int mpierr; ios = pio_get_iosystem_from_id(iosysid); if(ios == NULL) return PIO_EBADID; - /* FIXME: The memory for ioranks is allocated in C only for intracomms - * Remove this check once mem allocs for ioranks completely moves to the - * C code - */ - if(ios->intercomm == MPI_COMM_NULL){ - if(ios->ioranks != NULL){ - free(ios->ioranks); - } + + /* If asynch IO is in use, send the PIO_MSG_EXIT message from the + * comp master to the IO processes. */ + if (ios->async_interface && !ios->comp_rank) + { + msg = PIO_MSG_EXIT; + mpierr = MPI_Send(&msg, 1, MPI_INT, ios->ioroot, 1, ios->union_comm); + CheckMPIReturn(mpierr, __FILE__, __LINE__); } + /* Free this memory that was allocated in init_intracomm. */ + if (ios->ioranks) + free(ios->ioranks); + + /* Free the buffer pool. */ free_cn_buffer_pool(*ios); /* Free the MPI groups. */ - MPI_Group_free(&(ios->compgroup)); - MPI_Group_free(&(ios->iogroup)); + if (ios->compgroup != MPI_GROUP_NULL) + MPI_Group_free(&ios->compgroup); - /* Free the MPI communicators. */ + if (ios->iogroup != MPI_GROUP_NULL) + MPI_Group_free(&(ios->iogroup)); + + /* Free the MPI communicators. my_comm is just a copy (but not an + * MPI copy), so does not have to have an MPI_Comm_free() call. */ if(ios->intercomm != MPI_COMM_NULL){ MPI_Comm_free(&(ios->intercomm)); } if(ios->io_comm != MPI_COMM_NULL){ MPI_Comm_free(&(ios->io_comm)); } - /* if(ios->comp_comm != MPI_COMM_NULL){ */ - /* MPI_Comm_free(&(ios->comp_comm)); */ - /* } */ + if(ios->comp_comm != MPI_COMM_NULL){ + MPI_Comm_free(&(ios->comp_comm)); + } + if(ios->union_comm != MPI_COMM_NULL){ + MPI_Comm_free(&(ios->union_comm)); + } + /* Delete the iosystem_desc_t data associated with this id. */ return pio_delete_iosystem_from_list(iosysid); - - } /** - ** @brief return a logical indicating whether this task is an iotask + ** @brief return a logical indicating whether this task is an iotask */ int PIOc_iam_iotask(const int iosysid, bool *ioproc) { @@ -525,7 +567,7 @@ int PIOc_iam_iotask(const int iosysid, bool *ioproc) ios = pio_get_iosystem_from_id(iosysid); if(ios == NULL) return PIO_EBADID; - + *ioproc = ios->ioproc; return PIO_NOERR; } @@ -544,7 +586,7 @@ int PIOc_iotask_rank(const int iosysid, int *iorank) *iorank = ios->io_rank; return PIO_NOERR; - + } /** diff --git a/externals/pio2/src/clib/pioc_support.c b/externals/pio2/src/clib/pioc_support.c index c06c48d62e4..c60f4047a49 100644 --- a/externals/pio2/src/clib/pioc_support.c +++ b/externals/pio2/src/clib/pioc_support.c @@ -1,12 +1,133 @@ -/** @file +/** @file * Support functions. */ +#include +#if PIO_ENABLE_LOGGING +#include +#include +#endif /* PIO_ENABLE_LOGGING */ #include #include #include #define versno 2001 +#if PIO_ENABLE_LOGGING +int pio_log_level = 0; +int my_rank; +#endif /* PIO_ENABLE_LOGGING */ + +/** Return a string description of an error code. If zero is passed, a + * null is returned. + * + * @param pioerr the error code returned by a PIO function call. + * @param errmsg Pointer that will get the error message. It will be + * PIO_MAX_NAME chars or less. + * + * @return 0 on success + */ +int +PIOc_strerror(int pioerr, char *errmsg) +{ + + /* System error? */ + if(pioerr > 0) + { + const char *cp = (const char *)strerror(pioerr); + if (cp) + strncpy(errmsg, cp, PIO_MAX_NAME); + else + strcpy(errmsg, "Unknown Error"); + } + else if (pioerr == PIO_NOERR) + { + strcpy(errmsg, "No error"); + } + else if (pioerr <= NC2_ERR && pioerr >= NC4_LAST_ERROR) /* NetCDF error? */ + { +#if defined( _PNETCDF) || defined(_NETCDF) + strncpy(errmsg, nc_strerror(pioerr), NC_MAX_NAME); +#else /* defined( _PNETCDF) || defined(_NETCDF) */ + strcpy(errmsg, "NetCDF error code, PIO not built with netCDF."); +#endif /* defined( _PNETCDF) || defined(_NETCDF) */ + } + else + { + /* Handle PIO errors. */ + switch(pioerr) { + case PIO_EBADIOTYPE: + strcpy(errmsg, "Bad IO type"); + break; + default: + strcpy(errmsg, "unknown PIO error"); + } + } + + return PIO_NOERR; +} + +/** Set the logging level. Set to -1 for nothing, 0 for errors only, 1 + * for important logging, and so on. Log levels below 1 are only + * printed on the io/component root. If the library is not built with + * logging, this function does nothing. */ +int PIOc_set_log_level(int level) +{ +#if PIO_ENABLE_LOGGING + printf("setting log level to %d\n", level); + pio_log_level = level; + MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); +#endif /* PIO_ENABLE_LOGGING */ + return PIO_NOERR; +} + +#if PIO_ENABLE_LOGGING +/** This function prints out a message, if the severity of the message + is lower than the global pio_log_level. To use it, do something + like this: + + pio_log(0, "this computer will explode in %d seconds", i); + + After the first arg (the severity), use the rest like a normal + printf statement. Output will appear on stdout. + This function is heavily based on the function in section 15.5 of + the C FAQ. +*/ +void +pio_log(int severity, const char *fmt, ...) +{ + va_list argp; + int t; + + /* If the severity is greater than the log level, we don't print + this message. */ + if (severity > pio_log_level) + return; + + /* If the severity is 0, only print on rank 0. */ + if (severity < 1 && my_rank != 0) + return; + + /* If the severity is zero, this is an error. Otherwise insert that + many tabs before the message. */ + if (!severity) + fprintf(stdout, "ERROR: "); + for (t = 0; t < severity; t++) + fprintf(stdout, "\t"); + + /* Show the rank. */ + fprintf(stdout, "%d ", my_rank); + + /* Print out the variable list of args with vprintf. */ + va_start(argp, fmt); + vfprintf(stdout, fmt, argp); + va_end(argp); + + /* Put on a final linefeed. */ + fprintf(stdout, "\n"); + fflush(stdout); +} +#endif /* PIO_ENABLE_LOGGING */ + static pio_swapm_defaults swapm_defaults; bool PIO_Save_Decomps=false; /** @@ -18,7 +139,7 @@ void pio_get_env(void) char *envptr; extern bufsize PIO_CNBUFFER_LIMIT; envptr = getenv("PIO_Save_Decomps"); - + if(envptr != NULL && (strcmp(envptr,"true")==0)){ PIO_Save_Decomps=true; } @@ -29,7 +150,7 @@ void pio_get_env(void) envptr = getenv("PIO_SWAPM"); if(envptr != NULL){ char *token = strtok(envptr, ":"); - + swapm_defaults.nreqs = atoi(token); token = strtok(NULL, ":"); @@ -38,7 +159,7 @@ void pio_get_env(void) swapm_defaults.handshake = true; } token = strtok(NULL, ":"); - + if((token!=NULL) && strcmp(token,"t")==0){ swapm_defaults.isend = true; } @@ -53,15 +174,15 @@ void pio_get_env(void) mult = 1000; } PIO_CNBUFFER_LIMIT=(bufsize) atoll(envptr)*mult; - + } - + } - + /* Obtain a backtrace and print it to stderr. */ void print_trace (FILE *fp) { @@ -69,18 +190,18 @@ void print_trace (FILE *fp) size_t size; char **strings; size_t i; - + if(fp==NULL) fp = stderr; size = backtrace (array, 10); strings = backtrace_symbols (array, size); - + fprintf (fp,"Obtained %zd stack frames.\n", size); - + for (i = 0; i < size; i++) fprintf (fp,"%s\n", strings[i]); - + free (strings); } @@ -94,7 +215,7 @@ void piomemerror(iosystem_desc_t ios, size_t req, char *fname, const int line){ void piodie(const char *msg,const char *fname, const int line){ fprintf(stderr,"Abort with message %s in file %s at line %d\n",msg,fname,line); - + print_trace(stderr); #ifdef MPI_SERIAL abort(); @@ -110,17 +231,46 @@ void pioassert(_Bool expression, const char *msg, const char *fname, const int l if(! expression){ piodie(msg,fname,line); } -#endif +#endif } -/** Check the result of a netCDF API call. - * +/** Handle MPI errors. An error message is sent to stderr, then the + check_netcdf() function is called with PIO_EIO. + + @param file pointer to the file_desc_t info + @param mpierr the MPI return code to handle + @param filename the name of the code file where error occured. + @param line the line of code where error occured. + @return PIO_NOERR for no error, otherwise PIO_EIO. + */ +int check_mpi(file_desc_t *file, const int mpierr, const char *filename, + const int line) +{ + if (mpierr) + { + char errstring[MPI_MAX_ERROR_STRING]; + int errstrlen; + + /* If we can get an error string from MPI, print it to stderr. */ + if (!MPI_Error_string(mpierr, errstring, &errstrlen)) + fprintf(stderr, "MPI ERROR: %s in file %s at line %d\n", + errstring, filename, line); + + /* Handle all MPI errors as PIO_EIO. */ + check_netcdf(file, PIO_EIO, filename, line); + return PIO_EIO; + } + return PIO_NOERR; +} + +/** Check the result of a netCDF API call. + * * @param file pointer to the PIO structure describing this file. * @param status the return value from the netCDF call. - * @param fname the name of the code file. - * @param line the line number of the netCDF call in the code. - * + * @param fname the name of the code file. + * @param line the line number of the netCDF call in the code. + * * @return the error code */ int check_netcdf(file_desc_t *file, int status, const char *fname, const int line){ @@ -139,11 +289,11 @@ int check_netcdf(file_desc_t *file, int status, const char *fname, const int lin case PIO_IOTYPE_NETCDF: if(ios->iomaster){ if(status != NC_NOERR && (ios->error_handler == PIO_INTERNAL_ERROR)) - piodie(nc_strerror(status),fname,line); + piodie(nc_strerror(status),fname,line); // fprintf(stderr,"NETCDF ERROR: %s %s %d\n",nc_strerror(status),fname,line); } if(ios->error_handler == PIO_INTERNAL_ERROR){ - if(status != NC_NOERR) + if(status != NC_NOERR) MPI_Abort(MPI_COMM_WORLD,status); // abort }else if(ios->error_handler==PIO_BCAST_ERROR){ @@ -164,7 +314,7 @@ int check_netcdf(file_desc_t *file, int status, const char *fname, const int lin #endif default: ierr = iotype_error(file->iotype,__FILE__,__LINE__); - } + } return status; } @@ -199,7 +349,7 @@ io_desc_t *malloc_iodesc(const int piotype, const int ndims) fprintf(stderr,"ERROR: allocation error \n"); switch(piotype){ - case PIO_REAL: + case PIO_REAL: iodesc->basetype=MPI_FLOAT; break; case PIO_DOUBLE: @@ -208,11 +358,11 @@ io_desc_t *malloc_iodesc(const int piotype, const int ndims) case PIO_CHAR: iodesc->basetype=MPI_CHAR; break; - case PIO_INT: + case PIO_INT: default: iodesc->basetype = MPI_INTEGER; break; - } + } iodesc->rearranger = 0; iodesc->maxregions=1; iodesc->rfrom = NULL; @@ -251,7 +401,7 @@ void free_region_list(io_region *top) brel(ptr->count); tptr=ptr; ptr=ptr->next; - brel(tptr); + brel(tptr); } } @@ -314,7 +464,7 @@ int PIOc_freedecomp(int iosysid, int ioid) int PIOc_readmap(const char file[], int *ndims, int *gdims[], PIO_Offset *fmaplen, PIO_Offset *map[], const MPI_Comm comm) { - int npes, myrank; + int npes, myrank; int rnpes, rversno; int j; int *tdims; @@ -324,12 +474,12 @@ int PIOc_readmap(const char file[], int *ndims, int *gdims[], PIO_Offset *fmaple MPI_Comm_size(comm, &npes); MPI_Comm_rank(comm, &myrank); - + if(myrank == 0) { FILE *fp = fopen(file, "r"); if(fp==NULL) piodie("Failed to open dof file",__FILE__,__LINE__); - + fscanf(fp,"version %d npes %d ndims %d\n",&rversno, &rnpes,ndims); if(rversno != versno) @@ -354,7 +504,7 @@ int PIOc_readmap(const char file[], int *ndims, int *gdims[], PIO_Offset *fmaple tmap = (PIO_Offset *) malloc(maplen*sizeof(PIO_Offset)); for(j=0;j0){ MPI_Send(&maplen, 1, PIO_OFFSET, i, i+npes, comm); MPI_Send(tmap, maplen, PIO_OFFSET, i, i, comm); @@ -381,7 +531,7 @@ int PIOc_readmap(const char file[], int *ndims, int *gdims[], PIO_Offset *fmaple maplen=0; } *fmaplen = maplen; - } + } *gdims = tdims; return PIO_NOERR; } @@ -429,11 +579,11 @@ int PIOc_writemap(const char file[], const int ndims, const int gdims[], PIO_Off for(i=0;i put_att, & PIO_get_att => get_att diff --git a/externals/pio2/src/flib/pio_nf.F90 b/externals/pio2/src/flib/pio_nf.F90 index 5c798f834ec..4db50a2a51d 100644 --- a/externals/pio2/src/flib/pio_nf.F90 +++ b/externals/pio2/src/flib/pio_nf.F90 @@ -36,7 +36,9 @@ module pio_nf pio_get_chunk_cache , & pio_set_var_chunk_cache , & pio_get_var_chunk_cache , & - pio_redef + pio_redef , & + pio_set_log_level , & + pio_strerror ! pio_copy_att to be done interface pio_def_var @@ -180,13 +182,24 @@ module pio_nf module procedure & enddef_desc , & enddef_id - end interface + end interface pio_enddef + interface pio_redef module procedure & redef_desc , & redef_id end interface + interface pio_set_log_level + module procedure & + set_log_level + end interface pio_set_log_level + + interface pio_strerror + module procedure & + strerror + end interface pio_strerror + interface pio_inquire module procedure & inquire_desc , & @@ -648,18 +661,69 @@ integer function redef_desc(File) result(ierr) type (File_desc_t) , intent(inout) :: File ierr = redef_id(file%fh) end function redef_desc + +!> +!! @defgroup PIO_set_log_level +!< +!> +!! @ingroup PIO_set_log_level +!! Sets the logging level. Only takes effect if PIO was built with +!! PIO_ENABLE_LOGGING=On +!! +!! @param log_level the logging level. +!! @retval ierr @copydoc error_return +!< + integer function set_log_level(log_level) result(ierr) + integer, intent(in) :: log_level + interface + integer(C_INT) function PIOc_set_log_level(log_level) & + bind(C, name="PIOc_set_log_level") + use iso_c_binding + integer(C_INT), value :: log_level + end function PIOc_set_log_level + end interface + ierr = PIOc_set_log_level(log_level) + end function set_log_level + + !> + !! @defgroup PIO_strerror + !< + !> + !! @ingroup PIO_strerror + !! Returns a descriptive string for an error code. + !! + !! @param errcode the error code + !! @retval a description of the error + !< + integer function strerror(errcode, errmsg) result(ierr) + integer, intent(in) :: errcode + character(len=*), intent(out) :: errmsg + interface + integer(C_INT) function PIOc_strerror(errcode, errmsg) & + bind(C, name="PIOc_strerror") + use iso_c_binding + integer(C_INT), value :: errcode + character(C_CHAR) :: errmsg(*) + end function PIOc_strerror + end interface + errmsg = C_NULL_CHAR + ierr = PIOc_strerror(errcode, errmsg) + call replace_c_null(errmsg) + + end function strerror + !> !! @public !! @ingroup PIO_redef !! @brief Wrapper for the C function \ref PIOc_redef . !< integer function redef_id(ncid) result(ierr) - integer ,intent(in) :: ncid + integer, intent(in) :: ncid interface integer(C_INT) function PIOc_redef(ncid) & - bind(C ,name="PIOc_redef") + bind(C, name="PIOc_redef") use iso_c_binding - integer(C_INT) , value :: ncid + integer(C_INT), value :: ncid end function PIOc_redef end interface ierr = PIOc_redef(ncid) @@ -1633,7 +1697,7 @@ end function set_chunk_cache !> !! @public -!! @ingroup PIO_set_chunk_cache +!! @ingroup PIO_get_chunk_cache !! @brief Gets current settings for chunk cache (only relevant for netCDF4/HDF5 files.) !< integer function get_chunk_cache(iosysid, iotype, chunk_cache_size, chunk_cache_nelems, & @@ -1663,7 +1727,7 @@ end function get_chunk_cache !> !! @public -!! @ingroup PIO_set_chunk_cache +!! @ingroup PIO_set_var_chunk_cache !! @brief Changes chunk cache settings for a variable in a netCDF-4/HDF5 file. !< integer function set_var_chunk_cache_id(file, varid, chunk_cache_size, & diff --git a/externals/pio2/src/gptl/perf_mod.F90 b/externals/pio2/src/gptl/perf_mod.F90 index a4e25cc1f4d..e62059de98e 100644 --- a/externals/pio2/src/gptl/perf_mod.F90 +++ b/externals/pio2/src/gptl/perf_mod.F90 @@ -1,14 +1,14 @@ module perf_mod -!----------------------------------------------------------------------- -! +!----------------------------------------------------------------------- +! ! Purpose: This module is responsible for controlling the performance ! timer logic. -! +! ! Author: P. Worley, January 2007 ! ! $Id$ -! +! !----------------------------------------------------------------------- !----------------------------------------------------------------------- @@ -30,7 +30,7 @@ module perf_mod !----------------------------------------------------------------------- implicit none private ! Make the default access private - save + !----------------------------------------------------------------------- ! Public interfaces ---------------------------------------------------- @@ -62,7 +62,7 @@ module perf_mod !----------------------------------------------------------------------- !- include statements -------------------------------------------------- !----------------------------------------------------------------------- -#include +#include #include "gptl.inc" !----------------------------------------------------------------------- @@ -93,7 +93,7 @@ module perf_mod integer, parameter :: def_timer_depth_limit = 99999 ! default integer, private :: timer_depth_limit = def_timer_depth_limit ! integer indicating maximum number of levels of - ! timer nesting + ! timer nesting integer, parameter :: def_timing_detail_limit = 1 ! default integer, private :: timing_detail_limit = def_timing_detail_limit @@ -111,19 +111,19 @@ module perf_mod logical, parameter :: def_perf_single_file = .false. ! default logical, private :: perf_single_file = def_perf_single_file ! flag indicating whether the performance timer - ! output should be written to a single file - ! (per component communicator) or to a + ! output should be written to a single file + ! (per component communicator) or to a ! separate file for each process integer, parameter :: def_perf_outpe_num = 0 ! default integer, private :: perf_outpe_num = def_perf_outpe_num - ! maximum number of processes writing out + ! maximum number of processes writing out ! timing data (for this component communicator) integer, parameter :: def_perf_outpe_stride = 1 ! default integer, private :: perf_outpe_stride = def_perf_outpe_stride ! separation between process ids for processes - ! that are writing out timing data + ! that are writing out timing data ! (for this component communicator) logical, parameter :: def_perf_global_stats = .true. ! default @@ -176,9 +176,9 @@ module perf_mod !======================================================================== ! subroutine t_getLogUnit(LogUnit) -!----------------------------------------------------------------------- +!----------------------------------------------------------------------- ! Purpose: Get log unit number. -! Author: P. Worley +! Author: P. Worley !----------------------------------------------------------------------- !---------------------------Input arguments----------------------------- ! @@ -193,9 +193,9 @@ end subroutine t_getLogUnit !======================================================================== ! subroutine t_setLogUnit(LogUnit) -!----------------------------------------------------------------------- +!----------------------------------------------------------------------- ! Purpose: Set log unit number. -! Author: P. Worley +! Author: P. Worley !----------------------------------------------------------------------- !---------------------------Input arguments----------------------------- ! @@ -222,9 +222,9 @@ subroutine perf_defaultopts(timing_disable_out, & perf_single_file_out, & perf_global_stats_out, & perf_papi_enable_out ) -!----------------------------------------------------------------------- +!----------------------------------------------------------------------- ! Purpose: Return default runtime options -! Author: P. Worley +! Author: P. Worley !----------------------------------------------------------------------- !---------------------------Input arguments----------------------------- ! timers disable/enable option @@ -296,16 +296,16 @@ subroutine perf_setopts(mastertask, & perf_single_file_in, & perf_global_stats_in, & perf_papi_enable_in ) -!----------------------------------------------------------------------- +!----------------------------------------------------------------------- ! Purpose: Set runtime options -! Author: P. Worley +! Author: P. Worley !----------------------------------------------------------------------- !---------------------------Input arguments---------------------------- ! ! master process? logical, intent(in) :: mastertask ! Print out to log file? - logical, intent(IN) :: LogPrint + logical, intent(IN) :: LogPrint ! timers disable/enable option logical, intent(in), optional :: timing_disable_in ! performance timer option @@ -337,7 +337,7 @@ subroutine perf_setopts(mastertask, & timing_disable = timing_disable_in if (timing_disable) then ierr = GPTLdisable() - else + else ierr = GPTLenable() endif endif @@ -392,17 +392,17 @@ subroutine perf_setopts(mastertask, & endif ! if (mastertask .and. LogPrint) then - write(p_logunit,*) '(t_initf) Using profile_disable=', timing_disable, & + write(p_logunit,*) '(t_initf) Using profile_disable=', timing_disable, & ' profile_timer=', perf_timer - write(p_logunit,*) '(t_initf) profile_depth_limit=', timer_depth_limit, & + write(p_logunit,*) '(t_initf) profile_depth_limit=', timer_depth_limit, & ' profile_detail_limit=', timing_detail_limit write(p_logunit,*) '(t_initf) profile_barrier=', timing_barrier, & ' profile_outpe_num=', perf_outpe_num write(p_logunit,*) '(t_initf) profile_outpe_stride=', perf_outpe_stride , & ' profile_single_file=', perf_single_file write(p_logunit,*) '(t_initf) profile_global_stats=', perf_global_stats , & - ' profile_papi_enable=', perf_papi_enable - endif + ' profile_papi_enable=', perf_papi_enable + endif ! #ifdef DEBUG else @@ -420,9 +420,9 @@ subroutine papi_defaultopts(papi_ctr1_out, & papi_ctr2_out, & papi_ctr3_out, & papi_ctr4_out ) -!----------------------------------------------------------------------- +!----------------------------------------------------------------------- ! Purpose: Return default runtime PAPI counter options -! Author: P. Worley +! Author: P. Worley !----------------------------------------------------------------------- !---------------------------Input arguments----------------------------- ! PAPI counter option #1 @@ -456,9 +456,9 @@ subroutine papi_setopts(papi_ctr1_in, & papi_ctr2_in, & papi_ctr3_in, & papi_ctr4_in ) -!----------------------------------------------------------------------- +!----------------------------------------------------------------------- ! Purpose: Set runtime PAPI counter options -! Author: P. Worley +! Author: P. Worley !----------------------------------------------------------------------- !---------------------------Input arguments---------------------------- ! @@ -518,12 +518,12 @@ end subroutine papi_setopts !======================================================================== ! logical function t_profile_onf() -!----------------------------------------------------------------------- +!----------------------------------------------------------------------- ! Purpose: Return flag indicating whether profiling is currently active. ! Part of workaround to implement FVbarrierclock before ! communicators exposed in Pilgrim. Does not check level of ! event nesting. -! Author: P. Worley +! Author: P. Worley !----------------------------------------------------------------------- if ((.not. timing_initialized) .or. & @@ -539,10 +539,10 @@ end function t_profile_onf !======================================================================== ! logical function t_barrier_onf() -!----------------------------------------------------------------------- -! Purpose: Return timing_barrier. Part of workaround to implement -! FVbarrierclock before communicators exposed in Pilgrim. -! Author: P. Worley +!----------------------------------------------------------------------- +! Purpose: Return timing_barrier. Part of workaround to implement +! FVbarrierclock before communicators exposed in Pilgrim. +! Author: P. Worley !----------------------------------------------------------------------- t_barrier_onf = timing_barrier @@ -552,10 +552,10 @@ end function t_barrier_onf !======================================================================== ! logical function t_single_filef() -!----------------------------------------------------------------------- +!----------------------------------------------------------------------- ! Purpose: Return perf_single_file. Used to control output of other ! performance data, only spmdstats currently. -! Author: P. Worley +! Author: P. Worley !----------------------------------------------------------------------- t_single_filef = perf_single_file @@ -565,9 +565,9 @@ end function t_single_filef !======================================================================== ! subroutine t_stampf(wall, usr, sys) -!----------------------------------------------------------------------- +!----------------------------------------------------------------------- ! Purpose: Record wallclock, user, and system times (seconds). -! Author: P. Worley +! Author: P. Worley !----------------------------------------------------------------------- !---------------------------Output arguments----------------------------- ! @@ -596,14 +596,14 @@ end subroutine t_stampf !======================================================================== ! subroutine t_startf(event, handle) -!----------------------------------------------------------------------- +!----------------------------------------------------------------------- ! Purpose: Start an event timer -! Author: P. Worley +! Author: P. Worley !----------------------------------------------------------------------- !---------------------------Input arguments----------------------------- ! ! performance timer event name - character(len=*), intent(in) :: event + character(len=*), intent(in) :: event ! !---------------------------Input/Output arguments---------------------- ! @@ -634,14 +634,14 @@ end subroutine t_startf !======================================================================== ! subroutine t_stopf(event, handle) -!----------------------------------------------------------------------- +!----------------------------------------------------------------------- ! Purpose: Stop an event timer -! Author: P. Worley +! Author: P. Worley !----------------------------------------------------------------------- !---------------------------Input arguments----------------------------- ! ! performance timer event name - character(len=*), intent(in) :: event + character(len=*), intent(in) :: event ! !---------------------------Input/Output arguments---------------------- ! @@ -672,10 +672,10 @@ end subroutine t_stopf !======================================================================== ! subroutine t_enablef() -!----------------------------------------------------------------------- +!----------------------------------------------------------------------- ! Purpose: Enable t_startf, t_stopf, t_stampf, and t_barrierf. Ignored ! in threaded regions. -! Author: P. Worley +! Author: P. Worley !----------------------------------------------------------------------- !---------------------------Local workspace----------------------------- ! @@ -709,10 +709,10 @@ end subroutine t_enablef !======================================================================== ! subroutine t_disablef() -!----------------------------------------------------------------------- +!----------------------------------------------------------------------- ! Purpose: Disable t_startf, t_stopf, t_stampf, and t_barrierf. Ignored ! in threaded regions. -! Author: P. Worley +! Author: P. Worley !----------------------------------------------------------------------- !---------------------------Local workspace----------------------------- ! @@ -744,9 +744,9 @@ end subroutine t_disablef !======================================================================== ! subroutine t_adj_detailf(detail_adjustment) -!----------------------------------------------------------------------- +!----------------------------------------------------------------------- ! Purpose: Modify current detail level. Ignored in threaded regions. -! Author: P. Worley +! Author: P. Worley !----------------------------------------------------------------------- !---------------------------Input arguments----------------------------- ! @@ -776,11 +776,11 @@ end subroutine t_adj_detailf !======================================================================== ! subroutine t_barrierf(event, mpicom) -!----------------------------------------------------------------------- +!----------------------------------------------------------------------- ! Purpose: Call (and time) mpi_barrier. Ignored inside OpenMP ! threaded regions. Note that barrier executed even if ! event not recorded because of level of timer event nesting. -! Author: P. Worley +! Author: P. Worley !----------------------------------------------------------------------- !---------------------------Input arguments----------------------------- ! mpi communicator id @@ -835,9 +835,9 @@ end subroutine t_barrierf ! subroutine t_prf(filename, mpicom, num_outpe, stride_outpe, & single_file, global_stats, output_thispe) -!----------------------------------------------------------------------- +!----------------------------------------------------------------------- ! Purpose: Write out performance timer data -! Author: P. Worley +! Author: P. Worley !----------------------------------------------------------------------- !---------------------------Input arguments----------------------------- ! @@ -847,7 +847,7 @@ subroutine t_prf(filename, mpicom, num_outpe, stride_outpe, & integer, intent(in), optional :: mpicom ! maximum number of processes writing out timing data integer, intent(in), optional :: num_outpe - ! separation between process ids for processes writing out data + ! separation between process ids for processes writing out data integer, intent(in), optional :: stride_outpe ! enable/disable the writing of data to a single file logical, intent(in), optional :: single_file @@ -862,7 +862,7 @@ subroutine t_prf(filename, mpicom, num_outpe, stride_outpe, & ! all data to a single file logical glb_stats ! flag indicting whether to compute ! global statistics - logical pr_write ! flag indicating whether the current + logical pr_write ! flag indicating whether the current ! GPTL output mode is write logical write_data ! flag indicating whether this process ! should output its timing data @@ -916,7 +916,7 @@ subroutine t_prf(filename, mpicom, num_outpe, stride_outpe, & if (GPTLpr_query_write() == 1) then pr_write = .true. ierr = GPTLpr_set_append() - else + else pr_write=.false. endif @@ -1113,8 +1113,8 @@ end subroutine t_prf !======================================================================== ! subroutine t_initf(NLFilename, LogPrint, LogUnit, mpicom, MasterTask) -!----------------------------------------------------------------------- -! Purpose: Set default values of runtime timing options +!----------------------------------------------------------------------- +! Purpose: Set default values of runtime timing options ! before namelists prof_inparm and papi_inparm are read, ! read namelists (and broadcast, if SPMD), ! then initialize timing library. @@ -1224,12 +1224,12 @@ subroutine t_initf(NLFilename, LogPrint, LogUnit, mpicom, MasterTask) open( unitn, file=trim(NLFilename), status='old', iostat=ierr ) if (ierr .eq. 0) then - ! Look for prof_inparm group name in the input file. + ! Look for prof_inparm group name in the input file. ! If found, leave the file positioned at that namelist group. call find_group_name(unitn, 'prof_inparm', status=ierr) if (ierr == 0) then ! found prof_inparm - read(unitn, nml=prof_inparm, iostat=ierr) + read(unitn, nml=prof_inparm, iostat=ierr) if (ierr /= 0) then call shr_sys_abort( subname//':: namelist read returns an'// & ' error condition for prof_inparm' ) @@ -1291,12 +1291,12 @@ subroutine t_initf(NLFilename, LogPrint, LogUnit, mpicom, MasterTask) ierr = 1 open( unitn, file=trim(NLFilename), status='old', iostat=ierr ) if (ierr .eq. 0) then - ! Look for papi_inparm group name in the input file. + ! Look for papi_inparm group name in the input file. ! If found, leave the file positioned at that namelist group. call find_group_name(unitn, 'papi_inparm', status=ierr) if (ierr == 0) then ! found papi_inparm - read(unitn, nml=papi_inparm, iostat=ierr) + read(unitn, nml=papi_inparm, iostat=ierr) if (ierr /= 0) then call shr_sys_abort( subname//':: namelist read returns an'// & ' error condition for papi_inparm' ) @@ -1355,12 +1355,12 @@ subroutine t_initf(NLFilename, LogPrint, LogUnit, mpicom, MasterTask) !$OMP MASTER ! - ! Set options and initialize timing library. - ! + ! Set options and initialize timing library. + ! ! Set timer if (gptlsetutr (perf_timer) < 0) call shr_sys_abort (subname//':: gptlsetutr') ! - ! For logical settings, 2nd arg 0 + ! For logical settings, 2nd arg 0 ! to gptlsetoption means disable, non-zero means enable ! ! Turn off CPU timing (expensive) @@ -1404,9 +1404,9 @@ end subroutine t_initf !======================================================================== ! subroutine t_finalizef() -!----------------------------------------------------------------------- +!----------------------------------------------------------------------- ! Purpose: shut down timing library -! Author: P. Worley +! Author: P. Worley !----------------------------------------------------------------------- !---------------------------Local workspace----------------------------- ! diff --git a/externals/pio2/tests/performance/kt.PIO1.perfmakefile b/externals/pio2/tests/performance/kt.PIO1.perfmakefile new file mode 100644 index 00000000000..67f0d3be11f --- /dev/null +++ b/externals/pio2/tests/performance/kt.PIO1.perfmakefile @@ -0,0 +1,19 @@ +all: pioperf pioperfp1 + +pioperf: pioperformance.o + mpif90 pioperformance.o -o pioperf ../pio_build_int/src/flib/libpiof.a ../pio_build_int/src/clib/libpioc.a ../pio_build_int/src/gptl/libgptl.a /glade/apps/opt/netcdf-mpi/4.3.3.1/intel/default/lib/libnetcdff.a /glade/apps/opt/netcdf-mpi/4.3.3.1/intel/default/lib/libnetcdf.so /glade/apps/opt/pnetcdf/1.6.1/intel/15.0.3/lib/libpnetcdf.a -lirng -ldecimal -lcilkrts -lstdc++ + +pioperformance.o: pioperformance.F90 + mpif90 -DCPRINTEL -DHAVE_MPI -DINCLUDE_CMAKE_FCI -DLINUX -DTIMING -DUSEMPIIO -DUSE_PNETCDF_VARN -DUSE_PNETCDF_VARN_ON_READ -D_NETCDF -D_NETCDF4 -D_PNETCDF -I/glade/p/work/katec/pio_work/ncar_pio2/src/flib -I/glade/p/work/katec/pio_work/pio_build_int/src/flib -I/glade/apps/opt/netcdf-mpi/4.3.2/intel/default/include -I/glade/apps/opt/pnetcdf/1.6.1/intel/15.0.3/include -I/glade/p/work/katec/pio_work/ncar_pio2/src/clib -I/glade/p/work/katec/pio_work/ncar_pio2/src/gptl -I/glade/p/work/katec/pio_work/pio_build_int/src/gptl -c pioperformance.F90 -o pioperformance.o + +pioperfp1: pioperformancep1.o + mpif90 pioperformancep1.o -o pioperfp1 ../PIO1_bld/pio/libpio.a ../pio_build_int/src/gptl/libgptl.a /glade/apps/opt/netcdf-mpi/4.3.3.1/intel/default/lib/libnetcdff.a /glade/apps/opt/netcdf-mpi/4.3.3.1/intel/default/lib/libnetcdf.so /glade/apps/opt/pnetcdf/1.6.1/intel/15.0.3/lib/libpnetcdf.a -lirng -ldecimal -lcilkrts -lstdc++ -openmp + +pioperformancep1.o: pioperformance.F90 + mpif90 -DPIO_GPFS_HINTS -DUSEMPIIO -D_NETCDF -D_NETCDF4 -D_NOUSEMCT -D_PNETCDF -D_USEBOX -D_PIO1 -no-opt-dynamic-align -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -fp-model source -g -xHost -debug minimal -openmp -DLINUX -DMCT_INTERFACE -DHAVE_MPI -DTHREADED_OMP -DFORTRANUNDERSCORE -DNO_R16 -DINTEL_MKL -DHAVE_SSE2 -DLINUX -DCPRINTEL -DHAVE_SLASHPROC -I. -I/glade/p/work/katec/pio_work/PIO1_bld/pio -I/glade/apps/opt/netcdf-mpi/4.3.3.1/intel/default/include -I/glade/apps/opt/pnetcdf/1.6.1/intel/15.0.3/include -I/glade/p/work/katec/pio_work/PIO1/pio -I/glade/p/work/katec/pio_work/PIO1_bld/pio/timing -I/glade/apps/opt/netcdf-mpi/4.3.3.1/intel/default/include -I/glade/apps/opt/pnetcdf/1.6.1/intel/15.0.3/include -c pioperformance.F90 -o pioperformancep1.o + +cleanp1: + rm pioperformancep1.o pioperfp1 + +clean: + rm pioperformance.o pioperf \ No newline at end of file diff --git a/externals/pio2/tests/performance/pioperformance.F90 b/externals/pio2/tests/performance/pioperformance.F90 index 3b295a3d779..16fe145a193 100644 --- a/externals/pio2/tests/performance/pioperformance.F90 +++ b/externals/pio2/tests/performance/pioperformance.F90 @@ -1,4 +1,4 @@ -#define VARINT 1 +#define VARINT 1 !#define VARREAL 1 !#define VARDOUBLE 1 @@ -12,7 +12,7 @@ program pioperformance implicit none #ifdef NO_MPIMOD #include -#endif +#endif integer, parameter :: max_io_task_array_size=64, max_decomp_files=64 @@ -31,6 +31,11 @@ program pioperformance nvars, varsize, unlimdimindof #ifdef BGQTRY external :: print_memusage +#endif +#ifdef _PIO1 + integer, parameter :: PIO_FILL_INT = 02147483647 + real, parameter :: PIO_FILL_FLOAT = 9.969209968E+36 + double precision, parameter :: PIO_FILL_DOUBLE = 9.969209968E+36 #endif ! ! Initialize MPI @@ -97,7 +102,7 @@ program pioperformance if(rearrangers(1)==0) then rearrangers(1)=1 rearrangers(2)=2 - endif + endif do i=1,max_decomp_files if(len_trim(decompfile(i))==0) exit @@ -107,7 +112,7 @@ program pioperformance do nv=1,max_nvars if(nvars(nv)>0) then call pioperformancetest(decompfile(i), piotypes(1:niotypes), mype, npe, & - rearrangers, niotasks, nframes, nvars(nv), varsize(vs),unlimdimindof) + rearrangers, niotasks, nframes, nvars(nv), varsize(vs),unlimdimindof) endif enddo endif @@ -128,7 +133,7 @@ subroutine pioperformancetest(filename, piotypes, mype, npe_base, & integer, intent(in) :: piotypes(:) integer, intent(in) :: rearrangers(:) integer, intent(inout) :: niotasks(:) - integer, intent(in) :: nframes + integer, intent(in) :: nframes integer, intent(in) :: nvars integer, intent(in) :: varsize logical, intent(in) :: unlimdimindof @@ -163,11 +168,18 @@ subroutine pioperformancetest(filename, piotypes, mype, npe_base, & character(len=*), parameter :: rearr_name(2) = (/' BOX','SUBSET'/) nullify(compmap) - if(mype.eq.0) print *,trim(filename) + if(trim(filename) .eq. 'ROUNDROBIN' .or. trim(filename).eq.'BLOCK') then call init_ideal_dof(filename, mype, npe_base, ndims, gdims, compmap, varsize) else + ! Changed to support PIO1 as well +#ifdef _PIO1 + call pio_readdof(filename, compmap, MPI_COMM_WORLD, 81, ndims, gdims) +#else call pio_readdof(filename, ndims, gdims, compmap, MPI_COMM_WORLD) +#endif + +! print *,__FILE__,__LINE__,' gdims=',ndims endif maplen = size(compmap) ! color = 0 @@ -194,7 +206,7 @@ subroutine pioperformancetest(filename, piotypes, mype, npe_base, & ! if(gmaplen /= product(gdims)) then ! print *,__FILE__,__LINE__,gmaplen,gdims ! endif - + allocate(ifld(maplen,nvars)) allocate(ifld_in(maplen,nvars,nframes)) @@ -243,14 +255,16 @@ subroutine pioperformancetest(filename, piotypes, mype, npe_base, & stride = max(1,npe/ntasks) call pio_init(mype, comm, ntasks, 0, stride, PIO_REARR_SUBSET, iosystem) - + write(fname, '(a,i1,a,i4.4,a,i1,a)') 'pioperf.',rearr,'-',ntasks,'-',iotype,'.nc' - + ierr = PIO_CreateFile(iosystem, File, iotype, trim(fname), mode) call WriteMetadata(File, gdims, vari, varr, vard, unlimdimindof) + call MPI_Barrier(comm,ierr) call t_stampf(wall(1), usr(1), sys(1)) + if(.not. unlimdimindof) then #ifdef VARINT call PIO_InitDecomp(iosystem, PIO_INT, gdims, compmap, iodesc_i4, rearr=rearr) @@ -262,7 +276,8 @@ subroutine pioperformancetest(filename, piotypes, mype, npe_base, & call PIO_InitDecomp(iosystem, PIO_DOUBLE, gdims, compmap, iodesc_r8, rearr=rearr) #endif endif -! print *,__FILE__,__LINE__,minval(dfld),maxval(dfld),minloc(dfld),maxloc(dfld) + + ! print *,__FILE__,__LINE__,minval(dfld),maxval(dfld),minloc(dfld),maxloc(dfld) do frame=1,nframes recnum = frame @@ -282,7 +297,7 @@ subroutine pioperformancetest(filename, piotypes, mype, npe_base, & endif if(mype==0) print *,__FILE__,__LINE__,'Frame: ',recnum - do nv=1,nvars + do nv=1,nvars if(mype==0) print *,__FILE__,__LINE__,'var: ',nv #ifdef VARINT call PIO_setframe(File, vari(nv), recnum) @@ -298,7 +313,7 @@ subroutine pioperformancetest(filename, piotypes, mype, npe_base, & #endif enddo if(unlimdimindof) then -#ifdef VARREAL +#ifdef VARREAL call PIO_freedecomp(File, iodesc_r4) #endif #ifdef VARDOUBLE @@ -306,7 +321,7 @@ subroutine pioperformancetest(filename, piotypes, mype, npe_base, & #endif #ifdef VARINT call PIO_freedecomp(File, iodesc_i4) -#endif +#endif endif enddo call pio_closefile(File) @@ -329,7 +344,7 @@ subroutine pioperformancetest(filename, piotypes, mype, npe_base, & #ifdef VARDOUBLE nvarmult = nvarmult+2 #endif - write(*,'(a15,a9,i10,i10,i10,f20.10)') & + write(*,'(a15,a9,i10,i10,i10,f20.10)') & 'RESULT: write ',rearr_name(rearr), piotypes(k), ntasks, nvars, & nvarmult*nvars*nframes*gmaplen*4.0/(1048576.0*wall(2)) #ifdef BGQTRY @@ -368,8 +383,8 @@ subroutine pioperformancetest(filename, piotypes, mype, npe_base, & call MPI_Barrier(comm,ierr) call t_stampf(wall(1), usr(1), sys(1)) - - do frame=1,nframes + + do frame=1,nframes do nv=1,nvars #ifdef VARINT call PIO_setframe(File, vari(nv), frame) @@ -385,7 +400,7 @@ subroutine pioperformancetest(filename, piotypes, mype, npe_base, & #endif enddo enddo - + call pio_closefile(File) call MPI_Barrier(comm,ierr) call t_stampf(wall(2), usr(2), sys(2)) @@ -398,7 +413,7 @@ subroutine pioperformancetest(filename, piotypes, mype, npe_base, & if(compmap(j)>0) then #ifdef VARINT #ifdef DEBUG - write(*,'(a11,i2,a9,i11,a9,i11,a9,i2)') & + write(*,'(a11,i2,a9,i11,a9,i11,a9,i2)') & ' Int PE=',mype,'ifld=',ifld(j,nv),' ifld_in=',ifld_in(j,nv,frame),' compmap=',compmap(j) #endif if(ifld(j,nv) /= ifld_in(j,nv,frame)) then @@ -406,7 +421,7 @@ subroutine pioperformancetest(filename, piotypes, mype, npe_base, & ! print *,__LINE__,'Int: ',mype,j,nv,ifld(j,nv),ifld_in(j,nv,frame),compmap(j) !endif write(*,*) '***ERROR:Mismatch!***' - write(*,'(a11,i2,a9,i11,a9,i11,a9,i2)') & + write(*,'(a11,i2,a9,i11,a9,i11,a9,i2)') & ' Int PE=',mype,'ifld=',ifld(j,nv),' ifld_in=',ifld_in(j,nv,frame),' compmap=',compmap(j) errorcnt = errorcnt+1 @@ -417,7 +432,7 @@ subroutine pioperformancetest(filename, piotypes, mype, npe_base, & write(*,'(a11,i2,a9,f11.2,a9,f11.2,a9,i2)') & ' Real PE=',mype,'rfld=',rfld(j,nv),' rfld_in=',rfld_in(j,nv,frame),' compmap=',compmap(j) #endif - + if(rfld(j,nv) /= rfld_in(j,nv,frame) ) then !if(errorcnt < 10) then ! print *,__LINE__,'Real:', mype,j,nv,rfld(j,nv),rfld_in(j,nv,frame),compmap(j) @@ -426,7 +441,7 @@ subroutine pioperformancetest(filename, piotypes, mype, npe_base, & write(*,'(a11,i2,a9,f11.2,a9,f11.2,a9,i2)') & ' Real PE=',mype,'rfld=',rfld(j,nv),' rfld_in=',rfld_in(j,nv,frame),' compmap=',compmap(j) - errorcnt = errorcnt+1 + errorcnt = errorcnt+1 endif #endif #ifdef VARDOUBLE @@ -451,7 +466,7 @@ subroutine pioperformancetest(filename, piotypes, mype, npe_base, & enddo j = errorcnt call MPI_Reduce(j, errorcnt, 1, MPI_INTEGER, MPI_SUM, 0, comm, ierr) - + if(mype==0) then if(errorcnt > 0) then print *,'ERROR: INPUT/OUTPUT data mismatch ',errorcnt @@ -469,11 +484,11 @@ subroutine pioperformancetest(filename, piotypes, mype, npe_base, & write(*,'(a15,a9,i10,i10,i10,f20.10)') & 'RESULT: read ',rearr_name(rearr), piotypes(k), ntasks, nvars, & nvarmult*nvars*nframes*gmaplen*4.0/(1048576.0*wall(2)) -#ifdef BGQTRY +#ifdef BGQTRY call print_memusage() #endif end if -#ifdef VARREAL +#ifdef VARREAL call PIO_freedecomp(iosystem, iodesc_r4) #endif #ifdef VARDOUBLE @@ -481,7 +496,7 @@ subroutine pioperformancetest(filename, piotypes, mype, npe_base, & #endif #ifdef VARINT call PIO_freedecomp(iosystem, iodesc_i4) -#endif +#endif call pio_finalize(iosystem, ierr) enddo enddo @@ -516,7 +531,7 @@ subroutine init_ideal_dof(doftype, mype, npe, ndims, gdims, compmap, varsize) allocate(compmap(varsize)) if(doftype .eq. 'ROUNDROBIN') then do i=1,varsize - compmap(i) = (i-1)*npe+mype+1 + compmap(i) = (i-1)*npe+mype+1 enddo else if(doftype .eq. 'BLOCK') then do i=1,varsize @@ -550,8 +565,10 @@ subroutine WriteMetadata(File, gdims, vari, varr, vard,unlimdimindof) ndims=ndims-1 endif allocate(dimid(ndims+1)) + do i=1,ndims - write(dimname,'(a,i6.6)') 'dim',i + + write(dimname,'(a,i6.6)') 'dim',i iostat = PIO_def_dim(File, trim(dimname), int(gdims(i),pio_offset_kind), dimid(i)) enddo iostat = PIO_def_dim(File, 'time', PIO_UNLIMITED, dimid(ndims+1)) @@ -592,15 +609,15 @@ subroutine CheckMPIreturn(line,errcode) implicit none #ifdef NO_MPIMOD #include -#endif +#endif integer, intent(in) :: errcode integer, intent(in) :: line character(len=MPI_MAX_ERROR_STRING) :: errorstring - + integer :: errorlen - + integer :: ierr - + if (errcode .ne. MPI_SUCCESS) then call MPI_Error_String(errcode,errorstring,errorlen,ierr) write(*,*) errorstring(1:errorlen) diff --git a/externals/pio2/tests/unit/CMakeLists.txt b/externals/pio2/tests/unit/CMakeLists.txt index 66d8995f7af..12999f2518e 100644 --- a/externals/pio2/tests/unit/CMakeLists.txt +++ b/externals/pio2/tests/unit/CMakeLists.txt @@ -43,10 +43,20 @@ if ("${CMAKE_Fortran_COMPILER_ID}" STREQUAL "GNU") PRIVATE -ffree-line-length-none) endif() +if (PIO_ENABLE_ASYNC AND NOT PIO_USE_MPI_SERIAL) + add_executable (test_intercomm EXCLUDE_FROM_ALL test_intercomm.c) + target_link_libraries (test_intercomm pioc) + add_dependencies (tests test_intercomm) + add_executable (test_darray_async EXCLUDE_FROM_ALL test_darray_async.c) + target_link_libraries (test_darray_async pioc) + add_dependencies (tests test_darray_async) +endif () add_executable (test_names EXCLUDE_FROM_ALL test_names.c) target_link_libraries (test_names pioc) add_executable (test_nc4 EXCLUDE_FROM_ALL test_nc4.c) target_link_libraries (test_nc4 pioc) +add_executable (test_darray EXCLUDE_FROM_ALL test_darray.c) +target_link_libraries (test_darray pioc) if (CMAKE_Fortran_COMPILER_ID STREQUAL "NAG") set ( CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -mismatch_all" ) @@ -55,6 +65,7 @@ if (CMAKE_Fortran_COMPILER_ID STREQUAL "NAG") endif () add_dependencies (tests test_names) +add_dependencies (tests test_darray) add_dependencies (tests test_nc4) add_dependencies (tests pio_unit_test) @@ -71,6 +82,16 @@ if (PIO_USE_MPISERIAL) set_tests_properties(pio_unit_test PROPERTIES TIMEOUT ${DEFAULT_TEST_TIMEOUT}) else () + add_mpi_test(test_darray + EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/test_darray + NUMPROCS 4 + TIMEOUT ${DEFAULT_TEST_TIMEOUT}) + if (PIO_ENABLE_ASYNC) + add_mpi_test(test_intercomm + EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/test_intercomm + NUMPROCS 4 + TIMEOUT ${DEFAULT_TEST_TIMEOUT}) + endif () add_mpi_test(test_names EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/test_names NUMPROCS 4 diff --git a/externals/pio2/tests/unit/basic_tests.F90 b/externals/pio2/tests/unit/basic_tests.F90 index de41cef69d4..daad01babca 100644 --- a/externals/pio2/tests/unit/basic_tests.F90 +++ b/externals/pio2/tests/unit/basic_tests.F90 @@ -33,7 +33,7 @@ Subroutine test_create(test_id, err_msg) ! Local Vars character(len=str_len) :: filename - integer :: iotype, ret_val, pio_dim + integer :: iotype, ret_val, ret_val2, pio_dim err_msg = "no_error" @@ -51,7 +51,7 @@ Subroutine test_create(test_id, err_msg) ! Error in PIO_createfile print *,' ret_val = ', ret_val err_msg = "Could not create " // trim(filename) - call mpi_abort(MPI_COMM_WORLD,0,ret_val) + call mpi_abort(MPI_COMM_WORLD, 0, ret_val2) end if call mpi_barrier(mpi_comm_world,ret_val) @@ -62,7 +62,7 @@ Subroutine test_create(test_id, err_msg) ! Error in PIO_enddef err_msg = "Could not end define mode" call PIO_closefile(pio_file) - call mpi_abort(MPI_COMM_WORLD,0,ret_val) + call mpi_abort(MPI_COMM_WORLD, 0, ret_val2) end if end if call PIO_closefile(pio_file) @@ -73,7 +73,7 @@ Subroutine test_create(test_id, err_msg) if (ret_val .ne. PIO_NOERR) then ! Error in PIO_openfile err_msg = "Could not open " // trim(filename) - call mpi_abort(MPI_COMM_WORLD,0,ret_val) + call mpi_abort(MPI_COMM_WORLD, 0, ret_val2) end if ! Close file @@ -85,7 +85,7 @@ Subroutine test_create(test_id, err_msg) if (ret_val .ne. PIO_NOERR) then ! Error in PIO_createfile err_msg = "Could not clobber " // trim(filename) - call mpi_abort(MPI_COMM_WORLD,0,ret_val) + call mpi_abort(MPI_COMM_WORLD, 0, ret_val2) end if ! Leave define mode @@ -94,7 +94,7 @@ Subroutine test_create(test_id, err_msg) ! Error in PIO_enddef err_msg = "Could not end define mode in clobbered file" call PIO_closefile(pio_file) - call mpi_abort(MPI_COMM_WORLD,0,ret_val) + call mpi_abort(MPI_COMM_WORLD, 0, ret_val2) end if ! Close file @@ -113,7 +113,7 @@ Subroutine test_create(test_id, err_msg) err_msg = "Was able to clobber file despite PIO_NOCLOBBER" ret_val = PIO_enddef(pio_file) call PIO_closefile(pio_file) - call mpi_abort(MPI_COMM_WORLD,0,ret_val) + call mpi_abort(MPI_COMM_WORLD, 0, ret_val2) end if end if @@ -137,7 +137,7 @@ Subroutine test_open(test_id, err_msg) ! Local Vars character(len=str_len) :: filename - integer :: iotype, ret_val + integer :: iotype, ret_val, ret_val2 ! Data used to test writing integer, dimension(3) :: data_buffer, compdof @@ -147,6 +147,12 @@ Subroutine test_open(test_id, err_msg) integer :: unlimdimid type(var_desc_t) :: pio_var + ! These will be used to set chunk cache sizes in netCDF-4/HDF5 + ! files. + integer(kind=PIO_OFFSET_KIND) :: chunk_cache_size + integer(kind=PIO_OFFSET_KIND) :: chunk_cache_nelems + real :: chunk_cache_preemption + err_msg = "no_error" dims(1) = 3*ntasks compdof = 3*my_rank+(/1,2,3/) ! Where in the global array each task writes @@ -166,7 +172,7 @@ Subroutine test_open(test_id, err_msg) ! Error in PIO_openfile err_msg = "Successfully opened file that doesn't exist" call PIO_closefile(pio_file) - call mpi_abort(MPI_COMM_WORLD,0,ret_val) + call mpi_abort(MPI_COMM_WORLD, 0, ret_val2) end if ! Open existing file, write data to it (for binary file, need to create new file) @@ -178,17 +184,16 @@ Subroutine test_open(test_id, err_msg) if (ret_val .ne. PIO_NOERR) then ! Error in PIO_openfile (or PIO_createfile) err_msg = "Could not open " // trim(filename) // " in write mode" - call mpi_abort(MPI_COMM_WORLD,0,ret_val) + call mpi_abort(MPI_COMM_WORLD, 0, ret_val2) end if ! Enter define mode for netcdf files if (is_netcdf(iotype)) then ret_val = PIO_redef(pio_file) if (ret_val .ne. PIO_NOERR) then - ! Error in PIO_redef err_msg = "Could not enter redef mode" call PIO_closefile(pio_file) - call mpi_abort(MPI_COMM_WORLD,0,ret_val) + call mpi_abort(MPI_COMM_WORLD, 0, ret_val2) end if ! Define a new dimension N @@ -197,7 +202,7 @@ Subroutine test_open(test_id, err_msg) ! Error in PIO_def_dim err_msg = "Could not define dimension N" call PIO_closefile(pio_file) - call mpi_abort(MPI_COMM_WORLD,0,ret_val) + call mpi_abort(MPI_COMM_WORLD, 0, ret_val2) end if ! Define a new variable foo @@ -207,7 +212,7 @@ Subroutine test_open(test_id, err_msg) ! Error in PIO_def_var err_msg = "Could not define variable foo" call PIO_closefile(pio_file) - call mpi_abort(MPI_COMM_WORLD,0,ret_val) + call mpi_abort(MPI_COMM_WORLD, 0, ret_val2) end if ! Leave define mode @@ -217,7 +222,7 @@ Subroutine test_open(test_id, err_msg) print *,__FILE__,__LINE__,ret_val err_msg = "Could not end define mode" call PIO_closefile(pio_file) - call mpi_abort(MPI_COMM_WORLD,0,ret_val) + call mpi_abort(MPI_COMM_WORLD, 0, ret_val2) end if end if @@ -229,7 +234,7 @@ Subroutine test_open(test_id, err_msg) ! Error in PIO_write_darray err_msg = "Could not write data" call PIO_closefile(pio_file) - call mpi_abort(MPI_COMM_WORLD,0,ret_val) + call mpi_abort(MPI_COMM_WORLD, 0, ret_val2) end if ! Close file @@ -242,7 +247,7 @@ Subroutine test_open(test_id, err_msg) if (ret_val .ne. PIO_NOERR) then ! Error opening file err_msg = "Could not open file in NoWrite mode" - call mpi_abort(MPI_COMM_WORLD,0,ret_val) + call mpi_abort(MPI_COMM_WORLD, 0, ret_val2) end if ! Try to write (should fail) @@ -254,7 +259,7 @@ Subroutine test_open(test_id, err_msg) ! Error in PIO_write_darray err_msg = "Wrote to file opened in NoWrite mode" call PIO_closefile(pio_file) - call mpi_abort(MPI_COMM_WORLD,0,ret_val) + call mpi_abort(MPI_COMM_WORLD, 0, ret_val2) end if call mpi_barrier(MPI_COMM_WORLD,ret_val) @@ -267,25 +272,25 @@ Subroutine test_open(test_id, err_msg) err_msg = "Error in read_darray" call PIO_closefile(pio_file) print *,__FILE__,__LINE__,err_msg - call mpi_abort(MPI_COMM_WORLD,0,ret_val) + call mpi_abort(MPI_COMM_WORLD, 0, ret_val2) end if if(any(data_buffer /= my_rank)) then err_msg = "Error reading data" call PIO_closefile(pio_file) print *,__FILE__,__LINE__,iotype, trim(err_msg), data_buffer - call mpi_abort(MPI_COMM_WORLD,0,ret_val) + call mpi_abort(MPI_COMM_WORLD, 0, ret_val2) end if + ret_val = PIO_set_log_level(3) ret_val = PIO_inq_unlimdim(pio_file, unlimdimid) if(unlimdimid /= -1) then err_msg = "Error in inq_unlimdim" call PIO_closefile(pio_file) print *,__FILE__,__LINE__,iotype, trim(err_msg) - call mpi_abort(MPI_COMM_WORLD,0,ret_val) + call mpi_abort(MPI_COMM_WORLD, 0, ret_val2) end if - + ret_val = PIO_set_log_level(0) - ! Close file call PIO_closefile(pio_file) end if @@ -303,7 +308,7 @@ Subroutine test_open(test_id, err_msg) ! Error in PIO_openfile err_msg = "Opened a non-netcdf file as netcdf" call PIO_closefile(pio_file) - call mpi_abort(MPI_COMM_WORLD,0,ret_val) + call mpi_abort(MPI_COMM_WORLD, 0, ret_val2) end if end if diff --git a/externals/pio2/tests/unit/driver.F90 b/externals/pio2/tests/unit/driver.F90 index 7dbcfada7ed..92af6cec3eb 100644 --- a/externals/pio2/tests/unit/driver.F90 +++ b/externals/pio2/tests/unit/driver.F90 @@ -26,6 +26,10 @@ Program pio_unit_test_driver #if defined( _NETCDF4) && defined(LOGGING) integer, external :: nc_set_log_level2 #endif + integer ret_val + character(len=80) :: errmsg + character(len=80) :: expected + ! Set up MPI call MPI_Init(ierr) call MPI_Comm_rank(MPI_COMM_WORLD, my_rank, ierr) @@ -62,7 +66,7 @@ Program pio_unit_test_driver ! Ignore namelist values if PIO not built with correct options ! (i.e. don't test pnetcdf if not built with pnetcdf) - + ret_val = PIO_set_log_level(2) #ifndef _NETCDF if (ltest_netcdf) then write(*,"(A,1x,A)") "WARNING: can not test netcdf files because PIO", & @@ -121,6 +125,16 @@ Program pio_unit_test_driver fail_cnt = 0 test_cnt = 0 + ! Test pio_strerror. + ret_val = PIO_strerror(-33, errmsg); + print *, 'errcode =', -33, ' strerror = ', errmsg + expected = 'NetCDF: Not a valid ID' + if (trim(errmsg) .ne. expected) then + err_msg = 'expected ' // trim(expected) // ' and got ' // trim(errmsg) + print *, err_msg + call parse(err_msg, fail_cnt) + end if + do test_id=1,ntest if (ltest(test_id)) then ! Make sure i is a valid test number @@ -145,6 +159,7 @@ Program pio_unit_test_driver #if defined( _NETCDF4) && defined(LOGGING) if(master_task) ierr = nc_set_log_level2(3) #endif + ! test_create() if (master_task) write(*,"(3x,A,1x)") "testing PIO_createfile..." call test_create(test_id, err_msg) diff --git a/externals/pio2/tests/unit/test_darray.c b/externals/pio2/tests/unit/test_darray.c new file mode 100644 index 00000000000..abc1949633b --- /dev/null +++ b/externals/pio2/tests/unit/test_darray.c @@ -0,0 +1,260 @@ +/** + * @file + * Tests for darray functions. + * + */ +#include +#ifdef TIMING +#include +#endif + +#define NUM_NETCDF_FLAVORS 4 +#define NDIM 3 +#define X_DIM_LEN 400 +#define Y_DIM_LEN 400 +#define NUM_TIMESTEPS 6 +#define VAR_NAME "foo" +#define ATT_NAME "bar" +#define START_DATA_VAL 42 +#define ERR_AWFUL 1111 +#define VAR_CACHE_SIZE (1024 * 1024) +#define VAR_CACHE_NELEMS 10 +#define VAR_CACHE_PREEMPTION 0.5 + +/** Handle MPI errors. This should only be used with MPI library + * function calls. */ +#define MPIERR(e) do { \ + MPI_Error_string(e, err_buffer, &resultlen); \ + fprintf(stderr, "MPI error, line %d, file %s: %s\n", __LINE__, __FILE__, err_buffer); \ + MPI_Finalize(); \ + return ERR_AWFUL; \ + } while (0) + +/** Handle non-MPI errors by finalizing the MPI library and exiting + * with an exit code. */ +#define ERR(e) do { \ + fprintf(stderr, "Error %d in %s, line %d\n", e, __FILE__, __LINE__); \ + MPI_Finalize(); \ + return e; \ + } while (0) + +/** Global err buffer for MPI. */ +char err_buffer[MPI_MAX_ERROR_STRING]; +int resultlen; + +/** The dimension names. */ +char dim_name[NDIM][NC_MAX_NAME + 1] = {"timestep", "x", "y"}; + +/** Length of the dimensions in the sample data. */ +int dim_len[NDIM] = {NC_UNLIMITED, X_DIM_LEN, Y_DIM_LEN}; + +/** Run Tests for darray Functions. + * + * @param argc argument count + * @param argv array of arguments + */ +int +main(int argc, char **argv) +{ + int verbose = 1; + + /** Zero-based rank of processor. */ + int my_rank; + + /** Number of processors involved in current execution. */ + int ntasks; + + /** Specifies the flavor of netCDF output format. */ + int iotype; + + /** Different output flavors. */ + int format[NUM_NETCDF_FLAVORS] = {PIO_IOTYPE_PNETCDF, + PIO_IOTYPE_NETCDF, + PIO_IOTYPE_NETCDF4C, + PIO_IOTYPE_NETCDF4P}; + + /** Names for the output files. */ + char filename[NUM_NETCDF_FLAVORS][NC_MAX_NAME + 1] = {"test_darray_pnetcdf.nc", + "test_darray_classic.nc", + "test_darray_serial4.nc", + "test_darray_parallel4.nc"}; + + /** Number of processors that will do IO. In this test we + * will do IO from all processors. */ + int niotasks; + + /** Stride in the mpi rank between io tasks. Always 1 in this + * test. */ + int ioproc_stride = 1; + + /** Number of the aggregator? Always 0 in this test. */ + int numAggregator = 0; + + /** Zero based rank of first processor to be used for I/O. */ + int ioproc_start = 0; + + /** The dimension IDs. */ + int dimids[NDIM]; + + /** Array index per processing unit. */ + PIO_Offset elements_per_pe; + + /** The ID for the parallel I/O system. */ + int iosysid; + + /** The ncid of the netCDF file. */ + int ncid; + + /** The ID of the netCDF varable. */ + int varid; + + /** The I/O description ID. */ + int ioid; + + /** A buffer for sample data. */ + float *buffer; + + /** A buffer for reading data back from the file. */ + int *read_buffer; + + /** The decomposition mapping. */ + PIO_Offset *compdof; + + /** Return code. */ + int ret; + + /** Index for loops. */ + int fmt, d, d1, i; + +#ifdef TIMING + /* Initialize the GPTL timing library. */ + if ((ret = GPTLinitialize ())) + return ret; +#endif + + /* Initialize MPI. */ + if ((ret = MPI_Init(&argc, &argv))) + MPIERR(ret); + + /* Learn my rank and the total number of processors. */ + if ((ret = MPI_Comm_rank(MPI_COMM_WORLD, &my_rank))) + MPIERR(ret); + if ((ret = MPI_Comm_size(MPI_COMM_WORLD, &ntasks))) + MPIERR(ret); + + /* Check that a valid number of processors was specified. */ + if (!(ntasks == 1 || ntasks == 2 || ntasks == 4 || + ntasks == 8 || ntasks == 16)) + fprintf(stderr, "Number of processors must be 1, 2, 4, 8, or 16!\n"); + if (verbose) + printf("%d: ParallelIO Library example1 running on %d processors.\n", + my_rank, ntasks); + + /* keep things simple - 1 iotask per MPI process */ + niotasks = ntasks; + + /* Initialize the PIO IO system. This specifies how + * many and which processors are involved in I/O. */ + if ((ret = PIOc_Init_Intracomm(MPI_COMM_WORLD, niotasks, ioproc_stride, + ioproc_start, PIO_REARR_SUBSET, &iosysid))) + ERR(ret); + + /* Describe the decomposition. This is a 1-based array, so add 1! */ + elements_per_pe = X_DIM_LEN * Y_DIM_LEN / ntasks; + if (!(compdof = malloc(elements_per_pe * sizeof(PIO_Offset)))) + return PIO_ENOMEM; + for (i = 0; i < elements_per_pe; i++) { + compdof[i] = my_rank * elements_per_pe + i + 1; + } + + /* Create the PIO decomposition for this test. */ + if (verbose) + printf("rank: %d Creating decomposition...\n", my_rank); + if ((ret = PIOc_InitDecomp(iosysid, PIO_FLOAT, 2, &dim_len[1], (PIO_Offset)elements_per_pe, + compdof, &ioid, NULL, NULL, NULL))) + ERR(ret); + free(compdof); + + /* How many flavors will we be running for? */ + int num_flavors = 0; + int fmtidx = 0; +#ifdef _PNETCDF + num_flavors++; + format[fmtidx++] = PIO_IOTYPE_PNETCDF; +#endif +#ifdef _NETCDF + num_flavors++; + format[fmtidx++] = PIO_IOTYPE_NETCDF; +#endif +#ifdef _NETCDF4 + num_flavors += 2; + format[fmtidx++] = PIO_IOTYPE_NETCDF4C; + format[fmtidx] = PIO_IOTYPE_NETCDF4P; +#endif + + /* Use PIO to create the example file in each of the four + * available ways. */ + for (fmt = 0; fmt < num_flavors; fmt++) + { + /* Create the netCDF output file. */ + if (verbose) + printf("rank: %d Creating sample file %s with format %d...\n", + my_rank, filename[fmt], format[fmt]); + if ((ret = PIOc_createfile(iosysid, &ncid, &(format[fmt]), filename[fmt], + PIO_CLOBBER))) + ERR(ret); + + /* Define netCDF dimensions and variable. */ + if (verbose) + printf("rank: %d Defining netCDF metadata...\n", my_rank); + for (d = 0; d < NDIM; d++) { + if (verbose) + printf("rank: %d Defining netCDF dimension %s, length %d\n", my_rank, + dim_name[d], dim_len[d]); + if ((ret = PIOc_def_dim(ncid, dim_name[d], (PIO_Offset)dim_len[d], &dimids[d]))) + ERR(ret); + } + + /* Define a variable. */ + if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_FLOAT, NDIM, dimids, &varid))) + ERR(ret); + + if ((ret = PIOc_enddef(ncid))) + ERR(ret); + + /* Close the netCDF file. */ + if (verbose) + printf("rank: %d Closing the sample data file...\n", my_rank); + if ((ret = PIOc_closefile(ncid))) + ERR(ret); + + /* Put a barrier here to make verbose output look better. */ + if ((ret = MPI_Barrier(MPI_COMM_WORLD))) + MPIERR(ret); + + } + + /* Free the PIO decomposition. */ + if (verbose) + printf("rank: %d Freeing PIO decomposition...\n", my_rank); + if ((ret = PIOc_freedecomp(iosysid, ioid))) + ERR(ret); + + /* Finalize the IO system. */ + if (verbose) + printf("rank: %d Freeing PIO resources...\n", my_rank); + if ((ret = PIOc_finalize(iosysid))) + ERR(ret); + + /* Finalize the MPI library. */ + MPI_Finalize(); + +#ifdef TIMING + /* Finalize the GPTL timing library. */ + if ((ret = GPTLfinalize ())) + return ret; +#endif + + + return 0; +} diff --git a/externals/pio2/tests/unit/test_darray_async.c b/externals/pio2/tests/unit/test_darray_async.c new file mode 100644 index 00000000000..f589f01e3a5 --- /dev/null +++ b/externals/pio2/tests/unit/test_darray_async.c @@ -0,0 +1,261 @@ +/** + * @file + * Tests for darray functions. + * @author Ed Hartnett + * + */ +#include +#ifdef TIMING +#include +#endif + +#define NUM_NETCDF_FLAVORS 4 +#define NDIM 3 +#define X_DIM_LEN 400 +#define Y_DIM_LEN 400 +#define NUM_TIMESTEPS 6 +#define VAR_NAME "foo" +#define ATT_NAME "bar" +#define START_DATA_VAL 42 +#define ERR_AWFUL 1111 +#define VAR_CACHE_SIZE (1024 * 1024) +#define VAR_CACHE_NELEMS 10 +#define VAR_CACHE_PREEMPTION 0.5 + +/** Handle MPI errors. This should only be used with MPI library + * function calls. */ +#define MPIERR(e) do { \ + MPI_Error_string(e, err_buffer, &resultlen); \ + fprintf(stderr, "MPI error, line %d, file %s: %s\n", __LINE__, __FILE__, err_buffer); \ + MPI_Finalize(); \ + return ERR_AWFUL; \ + } while (0) + +/** Handle non-MPI errors by finalizing the MPI library and exiting + * with an exit code. */ +#define ERR(e) do { \ + fprintf(stderr, "Error %d in %s, line %d\n", e, __FILE__, __LINE__); \ + MPI_Finalize(); \ + return e; \ + } while (0) + +/** Global err buffer for MPI. */ +char err_buffer[MPI_MAX_ERROR_STRING]; +int resultlen; + +/** The dimension names. */ +char dim_name[NDIM][NC_MAX_NAME + 1] = {"timestep", "x", "y"}; + +/** Length of the dimensions in the sample data. */ +int dim_len[NDIM] = {NC_UNLIMITED, X_DIM_LEN, Y_DIM_LEN}; + +/** Run Tests for darray Functions. + * + * @param argc argument count + * @param argv array of arguments + */ +int +main(int argc, char **argv) +{ + int verbose = 1; + + /** Zero-based rank of processor. */ + int my_rank; + + /** Number of processors involved in current execution. */ + int ntasks; + + /** Specifies the flavor of netCDF output format. */ + int iotype; + + /** Different output flavors. */ + int format[NUM_NETCDF_FLAVORS] = {PIO_IOTYPE_PNETCDF, + PIO_IOTYPE_NETCDF, + PIO_IOTYPE_NETCDF4C, + PIO_IOTYPE_NETCDF4P}; + + /** Names for the output files. */ + char filename[NUM_NETCDF_FLAVORS][NC_MAX_NAME + 1] = {"test_darray_pnetcdf.nc", + "test_darray_classic.nc", + "test_darray_serial4.nc", + "test_darray_parallel4.nc"}; + + /** Number of processors that will do IO. In this test we + * will do IO from all processors. */ + int niotasks; + + /** Stride in the mpi rank between io tasks. Always 1 in this + * test. */ + int ioproc_stride = 1; + + /** Number of the aggregator? Always 0 in this test. */ + int numAggregator = 0; + + /** Zero based rank of first processor to be used for I/O. */ + int ioproc_start = 0; + + /** The dimension IDs. */ + int dimids[NDIM]; + + /** Array index per processing unit. */ + PIO_Offset elements_per_pe; + + /** The ID for the parallel I/O system. */ + int iosysid; + + /** The ncid of the netCDF file. */ + int ncid; + + /** The ID of the netCDF varable. */ + int varid; + + /** The I/O description ID. */ + int ioid; + + /** A buffer for sample data. */ + float *buffer; + + /** A buffer for reading data back from the file. */ + int *read_buffer; + + /** The decomposition mapping. */ + PIO_Offset *compdof; + + /** Return code. */ + int ret; + + /** Index for loops. */ + int fmt, d, d1, i; + +#ifdef TIMING + /* Initialize the GPTL timing library. */ + if ((ret = GPTLinitialize ())) + return ret; +#endif + + /* Initialize MPI. */ + if ((ret = MPI_Init(&argc, &argv))) + MPIERR(ret); + + /* Learn my rank and the total number of processors. */ + if ((ret = MPI_Comm_rank(MPI_COMM_WORLD, &my_rank))) + MPIERR(ret); + if ((ret = MPI_Comm_size(MPI_COMM_WORLD, &ntasks))) + MPIERR(ret); + + /* Check that a valid number of processors was specified. */ + if (!(ntasks == 1 || ntasks == 2 || ntasks == 4 || + ntasks == 8 || ntasks == 16)) + fprintf(stderr, "Number of processors must be 1, 2, 4, 8, or 16!\n"); + if (verbose) + printf("%d: ParallelIO Library example1 running on %d processors.\n", + my_rank, ntasks); + + /* keep things simple - 1 iotask per MPI process */ + niotasks = ntasks; + + /* Initialize the PIO IO system. This specifies how + * many and which processors are involved in I/O. */ + if ((ret = PIOc_Init_Intracomm(MPI_COMM_WORLD, niotasks, ioproc_stride, + ioproc_start, PIO_REARR_SUBSET, &iosysid))) + ERR(ret); + + /* Describe the decomposition. This is a 1-based array, so add 1! */ + elements_per_pe = X_DIM_LEN * Y_DIM_LEN / ntasks; + if (!(compdof = malloc(elements_per_pe * sizeof(PIO_Offset)))) + return PIO_ENOMEM; + for (i = 0; i < elements_per_pe; i++) { + compdof[i] = my_rank * elements_per_pe + i + 1; + } + + /* Create the PIO decomposition for this test. */ + if (verbose) + printf("rank: %d Creating decomposition...\n", my_rank); + if ((ret = PIOc_InitDecomp(iosysid, PIO_FLOAT, 2, &dim_len[1], (PIO_Offset)elements_per_pe, + compdof, &ioid, NULL, NULL, NULL))) + ERR(ret); + free(compdof); + + /* How many flavors will we be running for? */ + int num_flavors = 0; + int fmtidx = 0; +#ifdef _PNETCDF + num_flavors++; + format[fmtidx++] = PIO_IOTYPE_PNETCDF; +#endif +#ifdef _NETCDF + num_flavors++; + format[fmtidx++] = PIO_IOTYPE_NETCDF; +#endif +#ifdef _NETCDF4 + num_flavors += 2; + format[fmtidx++] = PIO_IOTYPE_NETCDF4C; + format[fmtidx] = PIO_IOTYPE_NETCDF4P; +#endif + + /* Use PIO to create the example file in each of the four + * available ways. */ + for (fmt = 0; fmt < num_flavors; fmt++) + { + /* Create the netCDF output file. */ + if (verbose) + printf("rank: %d Creating sample file %s with format %d...\n", + my_rank, filename[fmt], format[fmt]); + if ((ret = PIOc_createfile(iosysid, &ncid, &(format[fmt]), filename[fmt], + PIO_CLOBBER))) + ERR(ret); + + /* Define netCDF dimensions and variable. */ + if (verbose) + printf("rank: %d Defining netCDF metadata...\n", my_rank); + for (d = 0; d < NDIM; d++) { + if (verbose) + printf("rank: %d Defining netCDF dimension %s, length %d\n", my_rank, + dim_name[d], dim_len[d]); + if ((ret = PIOc_def_dim(ncid, dim_name[d], (PIO_Offset)dim_len[d], &dimids[d]))) + ERR(ret); + } + + /* Define a variable. */ + if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_FLOAT, NDIM, dimids, &varid))) + ERR(ret); + + if ((ret = PIOc_enddef(ncid))) + ERR(ret); + + /* Close the netCDF file. */ + if (verbose) + printf("rank: %d Closing the sample data file...\n", my_rank); + if ((ret = PIOc_closefile(ncid))) + ERR(ret); + + /* Put a barrier here to make verbose output look better. */ + if ((ret = MPI_Barrier(MPI_COMM_WORLD))) + MPIERR(ret); + + } + + /* Free the PIO decomposition. */ + if (verbose) + printf("rank: %d Freeing PIO decomposition...\n", my_rank); + if ((ret = PIOc_freedecomp(iosysid, ioid))) + ERR(ret); + + /* Finalize the IO system. */ + if (verbose) + printf("rank: %d Freeing PIO resources...\n", my_rank); + if ((ret = PIOc_finalize(iosysid))) + ERR(ret); + + /* Finalize the MPI library. */ + MPI_Finalize(); + +#ifdef TIMING + /* Finalize the GPTL timing library. */ + if ((ret = GPTLfinalize ())) + return ret; +#endif + + + return 0; +} diff --git a/externals/pio2/tests/unit/test_intercomm.c b/externals/pio2/tests/unit/test_intercomm.c new file mode 100644 index 00000000000..36fb2a72ad2 --- /dev/null +++ b/externals/pio2/tests/unit/test_intercomm.c @@ -0,0 +1,575 @@ +/** + * @file Tests for PIOc_Intercomm. This tests the Init_Intercomm() + * function, and basic asynch I/O capability. + * + */ +#include +#include +#ifdef TIMING +#include +#endif + +/** The number of possible output netCDF output flavors available to + * the ParallelIO library. */ +#define NUM_NETCDF_FLAVORS 4 + +/** The number of dimensions in the test data. */ +#define NDIM 1 + +/** The length of our test data. */ +#define DIM_LEN 4 + +/** The name of the dimension in the netCDF output file. */ +#define FIRST_DIM_NAME "jojo" +#define DIM_NAME "dim_test_intercomm" + +/** The name of the variable in the netCDF output file. */ +#define FIRST_VAR_NAME "bill" +#define VAR_NAME "var_test_intercomm" + +/** The name of the global attribute in the netCDF output file. */ +#define FIRST_ATT_NAME "willy_gatt_test_intercomm" +#define ATT_NAME "gatt_test_intercomm" +#define SHORT_ATT_NAME "short_gatt_test_intercomm" +#define FLOAT_ATT_NAME "float_gatt_test_intercomm" +#define DOUBLE_ATT_NAME "double_gatt_test_intercomm" + +/** The value of the global attribute in the netCDF output file. */ +#define ATT_VALUE 42 + +/** Error code for when things go wrong. */ +#define ERR_AWFUL 1111 +#define ERR_WRONG 2222 + +/** Handle MPI errors. This should only be used with MPI library + * function calls. */ +#define MPIERR(e) do { \ + MPI_Error_string(e, err_buffer, &resultlen); \ + fprintf(stderr, "MPI error, line %d, file %s: %s\n", __LINE__, __FILE__, err_buffer); \ + MPI_Finalize(); \ + return ERR_AWFUL; \ + } while (0) + +/** Handle non-MPI errors by finalizing the MPI library and exiting + * with an exit code. */ +#define ERR(e) do { \ + fprintf(stderr, "Error %d in %s, line %d\n", e, __FILE__, __LINE__); \ + MPI_Finalize(); \ + return e; \ + } while (0) + +/** Global err buffer for MPI. When there is an MPI error, this buffer + * is used to store the error message that is associated with the MPI + * error. */ +char err_buffer[MPI_MAX_ERROR_STRING]; + +/** This is the length of the most recent MPI error message, stored + * int the global error string. */ +int resultlen; + +/* Check the file for correctness. */ +int +check_file(int iosysid, int format, char *filename, int my_rank, int verbose) +{ + int ncid; + int ret; + int ndims, nvars, ngatts, unlimdimid; + int ndims2, nvars2, ngatts2, unlimdimid2; + int dimid2; + char dimname[NC_MAX_NAME + 1]; + PIO_Offset dimlen; + char dimname2[NC_MAX_NAME + 1]; + PIO_Offset dimlen2; + char varname[NC_MAX_NAME + 1]; + nc_type vartype; + int varndims, vardimids, varnatts; + char varname2[NC_MAX_NAME + 1]; + nc_type vartype2; + int varndims2, vardimids2, varnatts2; + int varid2; + int att_data; + short short_att_data; + float float_att_data; + double double_att_data; + + /* Re-open the file to check it. */ + if (verbose) + printf("%d test_intercomm opening file %s format %d\n", my_rank, filename, format); + if ((ret = PIOc_openfile(iosysid, &ncid, &format, filename, + NC_NOWRITE))) + ERR(ret); + + /* Try to read the data. */ + PIO_Offset start[NDIM] = {0}, count[NDIM] = {DIM_LEN}; + int data_in[DIM_LEN]; + if ((ret = PIOc_get_vars_tc(ncid, 0, start, count, NULL, NC_INT, data_in))) + ERR(ret); + for (int i = 0; i < DIM_LEN; i++) + { + if (verbose) + printf("%d test_intercomm read data_in[%d] = %d\n", my_rank, i, data_in[i]); + if (data_in[i] != i) + ERR(ERR_AWFUL); + } + + /* Find the number of dimensions, variables, and global attributes.*/ + if ((ret = PIOc_inq(ncid, &ndims, &nvars, &ngatts, &unlimdimid))) + ERR(ret); + if (ndims != 1 || nvars != 1 || ngatts != 4 || unlimdimid != -1) + ERR(ERR_WRONG); + + /* This should return PIO_NOERR. */ + if ((ret = PIOc_inq(ncid, NULL, NULL, NULL, NULL))) + ERR(ret); + + /* Check the other functions that get these values. */ + if ((ret = PIOc_inq_ndims(ncid, &ndims2))) + ERR(ret); + if (ndims2 != 1) + ERR(ERR_WRONG); + if ((ret = PIOc_inq_nvars(ncid, &nvars2))) + ERR(ret); + if (nvars2 != 1) + ERR(ERR_WRONG); + if ((ret = PIOc_inq_natts(ncid, &ngatts2))) + ERR(ret); + if (ngatts2 != 4) + ERR(ERR_WRONG); + if ((ret = PIOc_inq_unlimdim(ncid, &unlimdimid2))) + ERR(ret); + if (unlimdimid != -1) + ERR(ERR_WRONG); + /* Should succeed, do nothing. */ + if ((ret = PIOc_inq_unlimdim(ncid, NULL))) + ERR(ret); + + /* Check out the dimension. */ + if ((ret = PIOc_inq_dim(ncid, 0, dimname, &dimlen))) + ERR(ret); + if (strcmp(dimname, DIM_NAME) || dimlen != DIM_LEN) + ERR(ERR_WRONG); + + /* Check the other functions that get these values. */ + if ((ret = PIOc_inq_dimname(ncid, 0, dimname2))) + ERR(ret); + if (strcmp(dimname2, DIM_NAME)) + ERR(ERR_WRONG); + if ((ret = PIOc_inq_dimlen(ncid, 0, &dimlen2))) + ERR(ret); + if (dimlen2 != DIM_LEN) + ERR(ERR_WRONG); + if ((ret = PIOc_inq_dimid(ncid, DIM_NAME, &dimid2))) + ERR(ret); + if (dimid2 != 0) + ERR(ERR_WRONG); + + /* Check out the variable. */ + if ((ret = PIOc_inq_var(ncid, 0, varname, &vartype, &varndims, &vardimids, &varnatts))) + ERR(ret); + if (strcmp(varname, VAR_NAME) || vartype != NC_INT || varndims != NDIM || + vardimids != 0 || varnatts != 0) + ERR(ERR_WRONG); + + /* Check the other functions that get these values. */ + if ((ret = PIOc_inq_varname(ncid, 0, varname2))) + ERR(ret); + if (strcmp(varname2, VAR_NAME)) + ERR(ERR_WRONG); + if ((ret = PIOc_inq_vartype(ncid, 0, &vartype2))) + ERR(ret); + if (vartype2 != NC_INT) + ERR(ERR_WRONG); + if ((ret = PIOc_inq_varndims(ncid, 0, &varndims2))) + ERR(ret); + if (varndims2 != NDIM) + ERR(ERR_WRONG); + if ((ret = PIOc_inq_vardimid(ncid, 0, &vardimids2))) + ERR(ret); + if (vardimids2 != 0) + ERR(ERR_WRONG); + if ((ret = PIOc_inq_varnatts(ncid, 0, &varnatts2))) + ERR(ret); + if (varnatts2 != 0) + ERR(ERR_WRONG); + if ((ret = PIOc_inq_varid(ncid, VAR_NAME, &varid2))) + ERR(ret); + if (varid2 != 0) + ERR(ERR_WRONG); + + /* Check out the global attributes. */ + nc_type atttype; + PIO_Offset attlen; + char myattname[NC_MAX_NAME + 1]; + int myid; + if ((ret = PIOc_inq_att(ncid, NC_GLOBAL, ATT_NAME, &atttype, &attlen))) + ERR(ret); + if (atttype != NC_INT || attlen != 1) + ERR(ERR_WRONG); + if ((ret = PIOc_inq_attlen(ncid, NC_GLOBAL, ATT_NAME, &attlen))) + ERR(ret); + if (attlen != 1) + ERR(ERR_WRONG); + if ((ret = PIOc_inq_attname(ncid, NC_GLOBAL, 0, myattname))) + ERR(ret); + if (strcmp(ATT_NAME, myattname)) + ERR(ERR_WRONG); + if ((ret = PIOc_inq_attid(ncid, NC_GLOBAL, ATT_NAME, &myid))) + ERR(ret); + if (myid != 0) + ERR(ERR_WRONG); + if ((ret = PIOc_get_att_int(ncid, NC_GLOBAL, ATT_NAME, &att_data))) + ERR(ret); + if (verbose) + printf("%d test_intercomm att_data = %d\n", my_rank, att_data); + if (att_data != ATT_VALUE) + ERR(ERR_WRONG); + if ((ret = PIOc_inq_att(ncid, NC_GLOBAL, SHORT_ATT_NAME, &atttype, &attlen))) + ERR(ret); + if (atttype != NC_SHORT || attlen != 1) + ERR(ERR_WRONG); + if ((ret = PIOc_get_att_short(ncid, NC_GLOBAL, SHORT_ATT_NAME, &short_att_data))) + ERR(ret); + if (short_att_data != ATT_VALUE) + ERR(ERR_WRONG); + if ((ret = PIOc_get_att_float(ncid, NC_GLOBAL, FLOAT_ATT_NAME, &float_att_data))) + ERR(ret); + if (float_att_data != ATT_VALUE) + ERR(ERR_WRONG); + if ((ret = PIOc_get_att_double(ncid, NC_GLOBAL, DOUBLE_ATT_NAME, &double_att_data))) + ERR(ret); + if (double_att_data != ATT_VALUE) + ERR(ERR_WRONG); + + + /* Close the file. */ + if (verbose) + printf("%d test_intercomm closing file (again) ncid = %d\n", my_rank, ncid); + if ((ret = PIOc_closefile(ncid))) + ERR(ret); + + return 0; +} + +/** Run Tests for Init_Intercomm + * + * @param argc argument count + * @param argv array of arguments + */ +int +main(int argc, char **argv) +{ + int verbose = 1; + + /** Zero-based rank of processor. */ + int my_rank; + + /** Number of processors involved in current execution. */ + int ntasks; + + /** Different output flavors. */ + int format[NUM_NETCDF_FLAVORS] = {PIO_IOTYPE_PNETCDF, + PIO_IOTYPE_NETCDF, + PIO_IOTYPE_NETCDF4C, + PIO_IOTYPE_NETCDF4P}; + + /** Names for the output files. */ + char filename[NUM_NETCDF_FLAVORS][NC_MAX_NAME + 1] = {"test_intercomm_pnetcdf.nc", + "test_intercomm_classic.nc", + "test_intercomm_serial4.nc", + "test_intercomm_parallel4.nc"}; + + /** The ID for the parallel I/O system. */ + int iosysid; + + /** The ncid of the netCDF file. */ + int ncid; + + /** The ID of the netCDF varable. */ + int varid; + + /** Return code. */ + int ret; + + /** Index for loops. */ + int fmt, d, d1, i; + +#ifdef TIMING + /* Initialize the GPTL timing library. */ + if ((ret = GPTLinitialize ())) + return ret; +#endif + + /* Initialize MPI. */ + if ((ret = MPI_Init(&argc, &argv))) + MPIERR(ret); + + /* Learn my rank and the total number of processors. */ + if ((ret = MPI_Comm_rank(MPI_COMM_WORLD, &my_rank))) + MPIERR(ret); + if ((ret = MPI_Comm_size(MPI_COMM_WORLD, &ntasks))) + MPIERR(ret); + + /* Check that a valid number of processors was specified. */ + if (ntasks != 4) + { + fprintf(stderr, "test_intercomm Number of processors must be exactly 4!\n"); + ERR(ERR_AWFUL); + } + if (verbose) + printf("%d: test_intercomm ParallelIO Library test_intercomm running on %d processors.\n", + my_rank, ntasks); + + /* For example, if I have 4 processors, and I want to have 2 of them be computational, */ + /* and 2 of them be IO: component count is 1 */ + /* peer_comm = MPI_COMM_WORLD */ + /* comp_comms is an array of comms of size 1 with a comm defined just over tasks (0,1) */ + /* io_comm is a comm over tasks (2,3) */ + + /* Initialize the PIO IO system. This specifies how many and which + * processors are involved in I/O. */ +#define COMPONENT_COUNT 1 + MPI_Comm comp_comms; + MPI_Comm io_comm; + + /* Tasks 0 and 1 will be computational. Tasks 2 and 3 will be I/O + * tasks. */ + int comp_task; + + int color = my_rank < 2 ? 0 : 1; // Determine color based on row + + // Split the communicator based on the color and use the + // original rank for ordering + MPI_Comm row_comm; + MPI_Comm_split(MPI_COMM_WORLD, color, my_rank, &row_comm); + + int row_rank, row_size; + MPI_Comm_rank(row_comm, &row_rank); + MPI_Comm_size(row_comm, &row_size); + + printf("WORLD RANK: %d \t ROW RANK/SIZE: %d/%d\n", + my_rank, row_rank, row_size); + if (my_rank == 0 || my_rank == 1) + { + /* We will define comp_comm. The io_comm will get null. */ + io_comm = MPI_COMM_NULL; + comp_comms = row_comm; + comp_task = 1; + if (verbose) + printf("%d added to the comp_comm\n", my_rank); + } + else + { + /* We will define io_comm. The comp_comms array will get nulls. */ + comp_comms = MPI_COMM_NULL; + io_comm = row_comm; + comp_task = 0; + if (verbose) + printf("%d added to the io_comm\n", my_rank); + } + + /* Turn on logging. */ + if ((ret = PIOc_set_log_level(3))) + ERR(ret); + + /* Initialize the async setup. */ + if ((ret = PIOc_Init_Intercomm(COMPONENT_COUNT, MPI_COMM_WORLD, &comp_comms, + io_comm, &iosysid))) + ERR(ret); + if (verbose) + printf("%d test_intercomm init intercomm returned %d iosysid = %d\n", my_rank, ret, + iosysid); + + /* All the netCDF calls are only executed on the computation + * tasks. The IO tasks have not returned from PIOc_Init_Intercomm, + * and when the do, they should go straight to finalize. */ + if (comp_task) + { + for (int fmt = 0; fmt < NUM_NETCDF_FLAVORS; fmt++) + { + int ncid, varid, dimid; + PIO_Offset start[NDIM], count[NDIM] = {0}; + int data[DIM_LEN]; + + /* Create a netCDF file with one dimension and one variable. */ + if (verbose) + printf("%d test_intercomm creating file %s\n", my_rank, filename[fmt]); + if ((ret = PIOc_createfile(iosysid, &ncid, &format[fmt], filename[fmt], + NC_CLOBBER))) + ERR(ret); + if (verbose) + printf("%d test_intercomm file created ncid = %d\n", my_rank, ncid); + + /* /\* End define mode, then re-enter it. *\/ */ + if ((ret = PIOc_enddef(ncid))) + ERR(ret); + if (verbose) + printf("%d test_intercomm calling redef\n", my_rank); + if ((ret = PIOc_redef(ncid))) + ERR(ret); + + /* Test the inq_format function. */ + int myformat; + if ((ret = PIOc_inq_format(ncid, &myformat))) + ERR(ret); + if ((format[fmt] == PIO_IOTYPE_PNETCDF || format[fmt] == PIO_IOTYPE_NETCDF) && + myformat != 1) + ERR(ERR_AWFUL); + else if ((format[fmt] == PIO_IOTYPE_NETCDF4C || format[fmt] == PIO_IOTYPE_NETCDF4P) && + myformat != 3) + ERR(ERR_AWFUL); + + /* Test the inq_type function for atomic types. */ + char type_name[NC_MAX_NAME + 1]; + PIO_Offset type_size; + #define NUM_TYPES 11 + nc_type xtype[NUM_TYPES] = {NC_CHAR, NC_BYTE, NC_SHORT, NC_INT, NC_FLOAT, NC_DOUBLE, + NC_UBYTE, NC_USHORT, NC_UINT, NC_INT64, NC_UINT64}; + int type_len[NUM_TYPES] = {1, 1, 2, 4, 4, 8, 1, 2, 4, 8, 8}; + int max_type = format[fmt] == PIO_IOTYPE_NETCDF ? NC_DOUBLE : NC_UINT64; + for (int i = 0; i < max_type; i++) + { + if ((ret = PIOc_inq_type(ncid, xtype[i], type_name, &type_size))) + ERR(ret); + if (type_size != type_len[i]) + ERR(ERR_AWFUL); + } + + /* Define a dimension. */ + char dimname2[NC_MAX_NAME + 1]; + if (verbose) + printf("%d test_intercomm defining dimension %s\n", my_rank, DIM_NAME); + if ((ret = PIOc_def_dim(ncid, FIRST_DIM_NAME, DIM_LEN, &dimid))) + ERR(ret); + if ((ret = PIOc_inq_dimname(ncid, 0, dimname2))) + ERR(ret); + if (strcmp(dimname2, FIRST_DIM_NAME)) + ERR(ERR_WRONG); + if ((ret = PIOc_rename_dim(ncid, 0, DIM_NAME))) + ERR(ret); + + /* Define a 1-D variable. */ + char varname2[NC_MAX_NAME + 1]; + if (verbose) + printf("%d test_intercomm defining variable %s\n", my_rank, VAR_NAME); + if ((ret = PIOc_def_var(ncid, FIRST_VAR_NAME, NC_INT, NDIM, &dimid, &varid))) + ERR(ret); + if ((ret = PIOc_inq_varname(ncid, 0, varname2))) + ERR(ret); + if (strcmp(varname2, FIRST_VAR_NAME)) + ERR(ERR_WRONG); + if ((ret = PIOc_rename_var(ncid, 0, VAR_NAME))) + ERR(ret); + + /* Add a global attribute. */ + if (verbose) + printf("%d test_intercomm writing attributes %s\n", my_rank, ATT_NAME); + int att_data = ATT_VALUE; + short short_att_data = ATT_VALUE; + float float_att_data = ATT_VALUE; + double double_att_data = ATT_VALUE; + char attname2[NC_MAX_NAME + 1]; + /* Write an att and rename it. */ + if ((ret = PIOc_put_att_int(ncid, NC_GLOBAL, FIRST_ATT_NAME, NC_INT, 1, &att_data))) + ERR(ret); + if ((ret = PIOc_inq_attname(ncid, NC_GLOBAL, 0, attname2))) + ERR(ret); + if (strcmp(attname2, FIRST_ATT_NAME)) + ERR(ERR_WRONG); + if ((ret = PIOc_rename_att(ncid, NC_GLOBAL, FIRST_ATT_NAME, ATT_NAME))) + ERR(ret); + + /* Write an att and delete it. */ + nc_type myatttype; + if ((ret = PIOc_put_att_int(ncid, NC_GLOBAL, FIRST_ATT_NAME, NC_INT, 1, &att_data))) + ERR(ret); + if ((ret = PIOc_del_att(ncid, NC_GLOBAL, FIRST_ATT_NAME))) + ERR(ret); + /* if ((ret = PIOc_inq_att(ncid, NC_GLOBAL, FIRST_ATT_NAME, NULL, NULL)) != PIO_ENOTATT) */ + /* { */ + /* printf("ret = %d\n", ret); */ + /* ERR(ERR_AWFUL); */ + /* } */ + + /* Write some atts of different types. */ + if ((ret = PIOc_put_att_short(ncid, NC_GLOBAL, SHORT_ATT_NAME, NC_SHORT, 1, &short_att_data))) + ERR(ret); + if ((ret = PIOc_put_att_float(ncid, NC_GLOBAL, FLOAT_ATT_NAME, NC_FLOAT, 1, &float_att_data))) + ERR(ret); + if ((ret = PIOc_put_att_double(ncid, NC_GLOBAL, DOUBLE_ATT_NAME, NC_DOUBLE, 1, &double_att_data))) + ERR(ret); + + /* End define mode. */ + if (verbose) + printf("%d test_intercomm ending define mode ncid = %d\n", my_rank, ncid); + if ((ret = PIOc_enddef(ncid))) + ERR(ret); + + /* Write some data. For the PIOc_put/get functions, all + * data must be on compmaster before the function is + * called. Only compmaster's arguments are passed to the + * async msg handler. All other computation tasks are + * ignored. */ + for (int i = 0; i < DIM_LEN; i++) + data[i] = i; + if (verbose) + printf("%d test_intercomm writing data\n", my_rank); + if (verbose) + printf("%d test_intercomm writing data\n", my_rank); + start[0] = 0; + count[0] = DIM_LEN; + if ((ret = PIOc_put_vars_tc(ncid, varid, start, count, NULL, NC_INT, data))) + ERR(ret); + + /* Close the file. */ + if (verbose) + printf("%d test_intercomm closing file ncid = %d\n", my_rank, ncid); + if ((ret = PIOc_closefile(ncid))) + ERR(ret); + + /* Check the file for correctness. */ + if ((ret = check_file(iosysid, format[fmt], filename[fmt], my_rank, verbose))) + ERR(ret); + + /* Now delete the file. */ + /* if ((ret = PIOc_deletefile(iosysid, filename[fmt]))) */ + /* ERR(ret); */ + /* if ((ret = PIOc_openfile(iosysid, &ncid, &format[fmt], filename[fmt], */ + /* NC_NOWRITE)) != PIO_ENFILE) */ + /* ERR(ERR_AWFUL); */ + + } /* next netcdf format flavor */ + } + + /* Free local MPI resources. */ + if (verbose) + printf("%d test_intercomm Freeing local MPI resources...\n", my_rank); + if (comp_task) + { + MPI_Comm_free(&comp_comms); + } + else + { + MPI_Comm_free(&io_comm); + } + + /* Finalize the IO system. */ + if (verbose) + printf("%d test_intercomm Freeing PIO resources...\n", my_rank); + if ((ret = PIOc_finalize(iosysid))) + ERR(ret); + + /* Finalize the MPI library. */ + MPI_Finalize(); + +#ifdef TIMING + /* Finalize the GPTL timing library. */ + if ((ret = GPTLfinalize())) + return ret; +#endif + + if (verbose) + printf("%d test_intercomm SUCCESS!!\n", my_rank); + + + return 0; +} diff --git a/externals/pio2/tests/unit/test_names.c b/externals/pio2/tests/unit/test_names.c new file mode 100644 index 00000000000..b9684d03628 --- /dev/null +++ b/externals/pio2/tests/unit/test_names.c @@ -0,0 +1,466 @@ +/** + * @file Tests for names of vars, atts, and dims. Also test the + * PIOc_strerror() function. + * + */ +#include +#ifdef TIMING +#include +#endif + +#define NUM_NETCDF_FLAVORS 4 +#define NDIM 3 +#define X_DIM_LEN 400 +#define Y_DIM_LEN 400 +#define NUM_TIMESTEPS 6 +#define VAR_NAME "foo" +#define ATT_NAME "bar" +#define START_DATA_VAL 42 +#define ERR_AWFUL 1111 +#define VAR_CACHE_SIZE (1024 * 1024) +#define VAR_CACHE_NELEMS 10 +#define VAR_CACHE_PREEMPTION 0.5 + +/** Handle MPI errors. This should only be used with MPI library + * function calls. */ +#define MPIERR(e) do { \ + MPI_Error_string(e, err_buffer, &resultlen); \ + fprintf(stderr, "MPI error, line %d, file %s: %s\n", __LINE__, __FILE__, err_buffer); \ + MPI_Finalize(); \ + return ERR_AWFUL; \ + } while (0) + +/** Handle non-MPI errors by finalizing the MPI library and exiting + * with an exit code. */ +#define ERR(e) do { \ + fprintf(stderr, "Error %d in %s, line %d\n", e, __FILE__, __LINE__); \ + MPI_Finalize(); \ + return e; \ + } while (0) + +/** Global err buffer for MPI. */ +char err_buffer[MPI_MAX_ERROR_STRING]; +int resultlen; + +/** The dimension names. */ +char dim_name[NDIM][NC_MAX_NAME + 1] = {"timestep", "x", "y"}; + +/** Length of the dimensions in the sample data. */ +int dim_len[NDIM] = {NC_UNLIMITED, X_DIM_LEN, Y_DIM_LEN}; + +/** Length of chunksizes to use in netCDF-4 files. */ +size_t chunksize[NDIM] = {2, X_DIM_LEN/2, Y_DIM_LEN/2}; + +/** Check the dimension names. + * + * @param my_rank rank of process + * @param ncid ncid of open netCDF file + * + * @returns 0 for success, error code otherwise. */ +int +check_dim_names(int my_rank, int ncid, int verbose) +{ + char dim_name[NC_MAX_NAME + 1]; + char zero_dim_name[NC_MAX_NAME + 1]; + int ret; + + for (int d = 0; d < NDIM; d++) + { + strcpy(dim_name, "11111111111111111111111111111111"); + if ((ret = PIOc_inq_dimname(ncid, d, dim_name))) + return ret; + if (verbose) + printf("my_rank %d dim %d name %s\n", my_rank, d, dim_name); + + /* Did other ranks get the same name? */ + if (!my_rank) + strcpy(zero_dim_name, dim_name); + /* if (verbose) */ + /* printf("rank %d dim_name %s zero_dim_name %s\n", my_rank, dim_name, zero_dim_name); */ + if ((ret = MPI_Bcast(&zero_dim_name, strlen(dim_name) + 1, MPI_CHAR, 0, + MPI_COMM_WORLD))) + MPIERR(ret); + if (strcmp(dim_name, zero_dim_name)) + return ERR_AWFUL; + } + return 0; +} + +/** Check the variable name. + * + * @param my_rank rank of process + * @param ncid ncid of open netCDF file + * + * @returns 0 for success, error code otherwise. */ +int +check_var_name(int my_rank, int ncid, int verbose) +{ + char var_name[NC_MAX_NAME + 1]; + char zero_var_name[NC_MAX_NAME + 1]; + int ret; + + strcpy(var_name, "11111111111111111111111111111111"); + if ((ret = PIOc_inq_varname(ncid, 0, var_name))) + return ret; + if (verbose) + printf("my_rank %d var name %s\n", my_rank, var_name); + + /* Did other ranks get the same name? */ + if (!my_rank) + strcpy(zero_var_name, var_name); + if ((ret = MPI_Bcast(&zero_var_name, strlen(var_name) + 1, MPI_CHAR, 0, + MPI_COMM_WORLD))) + MPIERR(ret); + if (strcmp(var_name, zero_var_name)) + return ERR_AWFUL; + return 0; +} + +/** Check the attribute name. + * + * @param my_rank rank of process + * @param ncid ncid of open netCDF file + * + * @returns 0 for success, error code otherwise. */ +int +check_att_name(int my_rank, int ncid, int verbose) +{ + char att_name[NC_MAX_NAME + 1]; + char zero_att_name[NC_MAX_NAME + 1]; + int ret; + + strcpy(att_name, "11111111111111111111111111111111"); + if ((ret = PIOc_inq_attname(ncid, NC_GLOBAL, 0, att_name))) + return ret; + if (verbose) + printf("my_rank %d att name %s\n", my_rank, att_name); + + /* Did everyone ranks get the same length name? */ +/* if (strlen(att_name) != strlen(ATT_NAME)) + return ERR_AWFUL;*/ + if (!my_rank) + strcpy(zero_att_name, att_name); + if ((ret = MPI_Bcast(&zero_att_name, strlen(att_name) + 1, MPI_CHAR, 0, + MPI_COMM_WORLD))) + MPIERR(ret); + if (strcmp(att_name, zero_att_name)) + return ERR_AWFUL; + return 0; +} + +/** Check the PIOc_strerror() function. + * + * @param my_rank the rank of this process. + * @param verbose true to get printfs on stdout. + * + * @return 0 for success, error code otherwise. + */ +int +check_strerror(int my_rank, int verbose) { + +#define NUM_TRIES 6 + char errstr[PIO_MAX_NAME + 1]; + int errcode[NUM_TRIES] = {PIO_EBADID, + NC_ENOTNC3, NC4_LAST_ERROR - 1, 0, 1, + PIO_EBADIOTYPE}; + const char *expected[NUM_TRIES] = {"NetCDF: Not a valid ID", + "NetCDF: Attempting netcdf-3 operation on netcdf-4 file", + "unknown PIO error", "No error", + nc_strerror(1), "Bad IO type"}; + int ret = PIO_NOERR; + + for (int try = 0; try < NUM_TRIES; try++) + { + char result[PIO_MAX_NAME]; + + /* Get the error string for this errcode. */ + PIOc_strerror(errcode[try], errstr); + + /* Check that it was as expected. */ + if (strcmp(errstr, expected[try])) + ret = ERR_AWFUL; + + /* Print some output to stdout if required. */ + if (verbose) + { + printf("%d: PIO strerror(%d) = %s\n", my_rank, errcode[try], + errstr); + strcpy(errstr, nc_strerror(errcode[try])); + printf("%d: netCDF strerror(%d) = %s\n", my_rank, errcode[try], + errstr); + } + } + + return ret; +} + +/** Run Tests for NetCDF-4 Functions. + * + * @param argc argument count + * @param argv array of arguments + */ +int +main(int argc, char **argv) +{ + int verbose = 1; + + /** Zero-based rank of processor. */ + int my_rank; + + /** Number of processors involved in current execution. */ + int ntasks; + + /** Specifies the flavor of netCDF output format. */ + int iotype; + + /** Different output flavors. */ + int format[NUM_NETCDF_FLAVORS] = {PIO_IOTYPE_PNETCDF, + PIO_IOTYPE_NETCDF, + PIO_IOTYPE_NETCDF4C, + PIO_IOTYPE_NETCDF4P}; + + /** Names for the output files. */ + char filename[NUM_NETCDF_FLAVORS][NC_MAX_NAME + 1] = {"test_names_pnetcdf.nc", + "test_names_classic.nc", + "test_names_serial4.nc", + "test_names_parallel4.nc"}; + + /** Number of processors that will do IO. In this test we + * will do IO from all processors. */ + int niotasks; + + /** Stride in the mpi rank between io tasks. Always 1 in this + * test. */ + int ioproc_stride = 1; + + /** Number of the aggregator? Always 0 in this test. */ + int numAggregator = 0; + + /** Zero based rank of first processor to be used for I/O. */ + int ioproc_start = 0; + + /** The dimension IDs. */ + int dimids[NDIM]; + + /** Array index per processing unit. */ + PIO_Offset elements_per_pe; + + /** The ID for the parallel I/O system. */ + int iosysid; + + /** The ncid of the netCDF file. */ + int ncid = 0; + + /** The ID of the netCDF varable. */ + int varid; + + /** Storage of netCDF-4 files (contiguous vs. chunked). */ + int storage; + + /** Chunksizes set in the file. */ + size_t my_chunksize[NDIM]; + + /** The shuffle filter setting in the netCDF-4 test file. */ + int shuffle; + + /** Non-zero if deflate set for the variable in the netCDF-4 test file. */ + int deflate; + + /** The deflate level set for the variable in the netCDF-4 test file. */ + int deflate_level; + + /** Non-zero if fletcher32 filter is used for variable. */ + int fletcher32; + + /** Endianness of variable. */ + int endianness; + + /* Size of the file chunk cache. */ + size_t chunk_cache_size; + + /* Number of elements in file cache. */ + size_t nelems; + + /* File cache preemption. */ + float preemption; + + /* Size of the var chunk cache. */ + size_t var_cache_size; + + /* Number of elements in var cache. */ + size_t var_cache_nelems; + + /* Var cache preemption. */ + float var_cache_preemption; + + /** The I/O description ID. */ + int ioid; + + /** A buffer for sample data. */ + float *buffer; + + /** A buffer for reading data back from the file. */ + int *read_buffer; + + /** The decomposition mapping. */ + PIO_Offset *compdof; + + /** Return code. */ + int ret; + + /** Index for loops. */ + int fmt, d, d1, i; + +#ifdef TIMING + /* Initialize the GPTL timing library. */ + if ((ret = GPTLinitialize ())) + return ret; +#endif + + /* Initialize MPI. */ + if ((ret = MPI_Init(&argc, &argv))) + MPIERR(ret); + + /* Learn my rank and the total number of processors. */ + if ((ret = MPI_Comm_rank(MPI_COMM_WORLD, &my_rank))) + MPIERR(ret); + if ((ret = MPI_Comm_size(MPI_COMM_WORLD, &ntasks))) + MPIERR(ret); + + /* Check that a valid number of processors was specified. */ + if (!(ntasks == 1 || ntasks == 2 || ntasks == 4 || + ntasks == 8 || ntasks == 16)) + fprintf(stderr, "Number of processors must be 1, 2, 4, 8, or 16!\n"); + if (verbose) + printf("%d: ParallelIO Library example1 running on %d processors.\n", + my_rank, ntasks); + + /* Check the error string function. */ + if ((ret = check_strerror(my_rank, verbose))) + ERR(ret); + + /* keep things simple - 1 iotask per MPI process */ + niotasks = ntasks; + + /* Initialize the PIO IO system. This specifies how + * many and which processors are involved in I/O. */ + if ((ret = PIOc_Init_Intracomm(MPI_COMM_WORLD, niotasks, ioproc_stride, + ioproc_start, PIO_REARR_SUBSET, &iosysid))) + ERR(ret); + + /* Describe the decomposition. This is a 1-based array, so add 1! */ + elements_per_pe = X_DIM_LEN * Y_DIM_LEN / ntasks; + if (!(compdof = malloc(elements_per_pe * sizeof(PIO_Offset)))) + return PIO_ENOMEM; + for (i = 0; i < elements_per_pe; i++) { + compdof[i] = my_rank * elements_per_pe + i + 1; + } + + /* Create the PIO decomposition for this test. */ + if (verbose) + printf("rank: %d Creating decomposition...\n", my_rank); + if ((ret = PIOc_InitDecomp(iosysid, PIO_FLOAT, 2, &dim_len[1], (PIO_Offset)elements_per_pe, + compdof, &ioid, NULL, NULL, NULL))) + ERR(ret); + free(compdof); + + /* How many flavors will we be running for? */ + int num_flavors = 0; + int fmtidx = 0; +#ifdef _PNETCDF + num_flavors++; + format[fmtidx++] = PIO_IOTYPE_PNETCDF; +#endif +#ifdef _NETCDF + num_flavors++; + format[fmtidx++] = PIO_IOTYPE_NETCDF; +#endif +#ifdef _NETCDF4 + num_flavors += 2; + format[fmtidx++] = PIO_IOTYPE_NETCDF4C; + format[fmtidx] = PIO_IOTYPE_NETCDF4P; +#endif + + /* Use PIO to create the example file in each of the four + * available ways. */ + for (fmt = 0; fmt < num_flavors; fmt++) + { + /* Create the netCDF output file. */ + if (verbose) + printf("rank: %d Creating sample file %s with format %d...\n", + my_rank, filename[fmt], format[fmt]); + if ((ret = PIOc_createfile(iosysid, &ncid, &(format[fmt]), filename[fmt], + PIO_CLOBBER))) + ERR(ret); + + /* Define netCDF dimensions and variable. */ + if (verbose) + printf("rank: %d Defining netCDF metadata...\n", my_rank); + for (d = 0; d < NDIM; d++) { + if (verbose) + printf("rank: %d Defining netCDF dimension %s, length %d\n", my_rank, + dim_name[d], dim_len[d]); + if ((ret = PIOc_def_dim(ncid, dim_name[d], (PIO_Offset)dim_len[d], &dimids[d]))) + ERR(ret); + } + + /* Check the dimension names. */ + if ((ret = check_dim_names(my_rank, ncid, verbose))) + ERR(ret); + + /* Define a global attribute. */ + int att_val = 42; + if ((ret = PIOc_put_att_int(ncid, NC_GLOBAL, ATT_NAME, NC_INT, 1, &att_val))) + ERR(ret); + + /* Check the attribute name. */ + if ((ret = check_att_name(my_rank, ncid, verbose))) + ERR(ret); + + /* Define a variable. */ + if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_FLOAT, NDIM, dimids, &varid))) + ERR(ret); + + /* Check the variable name. */ + if ((ret = check_var_name(my_rank, ncid, verbose))) + ERR(ret); + + if ((ret = PIOc_enddef(ncid))) + ERR(ret); + + /* Close the netCDF file. */ + if (verbose) + printf("rank: %d Closing the sample data file...\n", my_rank); + if ((ret = PIOc_closefile(ncid))) + ERR(ret); + + /* Put a barrier here to make verbose output look better. */ + if ((ret = MPI_Barrier(MPI_COMM_WORLD))) + MPIERR(ret); + + } + + /* Free the PIO decomposition. */ + if (verbose) + printf("rank: %d Freeing PIO decomposition...\n", my_rank); + if ((ret = PIOc_freedecomp(iosysid, ioid))) + ERR(ret); + + /* Finalize the IO system. */ + if (verbose) + printf("rank: %d Freeing PIO resources...\n", my_rank); + if ((ret = PIOc_finalize(iosysid))) + ERR(ret); + + /* Finalize the MPI library. */ + MPI_Finalize(); + +#ifdef TIMING + /* Finalize the GPTL timing library. */ + if ((ret = GPTLfinalize ())) + return ret; +#endif + + + return 0; +} diff --git a/externals/pio2/tests/unit/test_nc4.c b/externals/pio2/tests/unit/test_nc4.c index b748f7a420f..3d464bb660d 100644 --- a/externals/pio2/tests/unit/test_nc4.c +++ b/externals/pio2/tests/unit/test_nc4.c @@ -81,7 +81,7 @@ char dim_name[NDIM][NC_MAX_NAME + 1] = {"timestep", "x", "y"}; int dim_len[NDIM] = {NC_UNLIMITED, X_DIM_LEN, Y_DIM_LEN}; /** Length of chunksizes to use in netCDF-4 files. */ -size_t chunksize[NDIM] = {2, X_DIM_LEN/2, Y_DIM_LEN/2}; +PIO_Offset chunksize[NDIM] = {2, X_DIM_LEN/2, Y_DIM_LEN/2}; /** Run Tests for NetCDF-4 Functions. * @@ -147,7 +147,7 @@ main(int argc, char **argv) int storage; /** Chunksizes set in the file. */ - size_t my_chunksize[NDIM]; + PIO_Offset my_chunksize[NDIM]; /** The shuffle filter setting in the netCDF-4 test file. */ int shuffle; @@ -189,13 +189,13 @@ main(int argc, char **argv) int fmt, d, d1, i; /** For setting the chunk cache. */ - size_t chunk_cache_size = 1024*1024; - size_t chunk_cache_nelems = 1024; + PIO_Offset chunk_cache_size = 1024*1024; + PIO_Offset chunk_cache_nelems = 1024; float chunk_cache_preemption = 0.5; /* For reading the chunk cache. */ - size_t chunk_cache_size_in; - size_t chunk_cache_nelems_in; + PIO_Offset chunk_cache_size_in; + PIO_Offset chunk_cache_nelems_in; float chunk_cache_preemption_in; char varname[15];