diff --git a/.github/workflows/intel-cmake.yml b/.github/workflows/intel-cmake.yml index 780021461f0..4bdda03c79d 100644 --- a/.github/workflows/intel-cmake.yml +++ b/.github/workflows/intel-cmake.yml @@ -58,9 +58,7 @@ jobs: -DCMAKE_TOOLCHAIN_FILE=config/toolchain/intel.cmake \ -DMKL_ROOT="/opt/intel/oneapi/mkl/latest" \ -DTBB_ROOT="/opt/intel/oneapi/tbb/latest" \ - -DBUILD_SZIP_WITH_FETCHCONTENT=ON \ -DLIBAEC_USE_LOCALCONTENT=OFF \ - -DBUILD_ZLIB_WITH_FETCHCONTENT=ON \ -DZLIB_USE_LOCALCONTENT=OFF \ -DHDF5_BUILD_FORTRAN=OFF \ $GITHUB_WORKSPACE diff --git a/.github/workflows/main-cmake.yml b/.github/workflows/main-cmake.yml index 7327d2a8518..0bf383b0dd9 100644 --- a/.github/workflows/main-cmake.yml +++ b/.github/workflows/main-cmake.yml @@ -184,9 +184,7 @@ jobs: -DHDF5_BUILD_FORTRAN=${{ matrix.fortran }} \ -DHDF5_BUILD_JAVA=${{ matrix.java }} \ -DHDF5_BUILD_DOC=${{ matrix.docs }} \ - -DBUILD_SZIP_WITH_FETCHCONTENT=${{ matrix.libaecfc }} \ -DLIBAEC_USE_LOCALCONTENT=${{ matrix.localaec }} \ - -DBUILD_ZLIB_WITH_FETCHCONTENT=${{ matrix.zlibfc }} \ -DZLIB_USE_LOCALCONTENT=${{ matrix.localzlib }} \ -DHDF5_ENABLE_MIRROR_VFD:BOOL=${{ matrix.mirror_vfd }} \ -DHDF5_ENABLE_DIRECT_VFD:BOOL=${{ matrix.direct_vfd }} \ @@ -213,9 +211,7 @@ jobs: -DHDF5_BUILD_JAVA:BOOL=OFF \ -DHDF5_BUILD_HL_LIB:BOOL=OFF \ -DHDF5_BUILD_DOC=OFF \ - -DBUILD_SZIP_WITH_FETCHCONTENT=${{ matrix.libaecfc }} \ -DLIBAEC_USE_LOCALCONTENT=${{ matrix.localaec }} \ - -DBUILD_ZLIB_WITH_FETCHCONTENT=${{ matrix.zlibfc }} \ -DZLIB_USE_LOCALCONTENT=${{ matrix.localzlib }} \ -DHDF5_ENABLE_MIRROR_VFD:BOOL=${{ matrix.mirror_vfd }} \ -DHDF5_ENABLE_DIRECT_VFD:BOOL=${{ matrix.direct_vfd }} \ diff --git a/CMakeFilters.cmake b/CMakeFilters.cmake index 78f98db9384..042bfdc356b 100644 --- a/CMakeFilters.cmake +++ b/CMakeFilters.cmake @@ -13,30 +13,24 @@ option (USE_LIBAEC_STATIC "Use static AEC library " OFF) option (ZLIB_USE_EXTERNAL "Use External Library Building for ZLIB" 0) option (SZIP_USE_EXTERNAL "Use External Library Building for SZIP" 0) -option (BUILD_ZLIB_WITH_FETCHCONTENT "Use FetchContent to use original source files" OFF) -if (BUILD_ZLIB_WITH_FETCHCONTENT) - set (ZLIB_USE_EXTERNAL "Use External Library Building for ZLIB" 1) - if (NOT ZLIB_USE_LOCALCONTENT) - set (ZLIB_URL ${ZLIB_TGZ_ORIGPATH}/${ZLIB_TGZ_ORIGNAME}) - else () - set (ZLIB_URL ${TGZPATH}/${ZLIB_TGZ_ORIGNAME}) - endif () - if (CMAKE_VERSION VERSION_GREATER_EQUAL "3.15.0") - message (VERBOSE "Filter ZLIB file is ${ZLIB_URL}") - endif () +set (ZLIB_USE_EXTERNAL "Use External Library Building for ZLIB" 1) +if (NOT ZLIB_USE_LOCALCONTENT) + set (ZLIB_URL ${ZLIB_TGZ_ORIGPATH}/${ZLIB_TGZ_NAME}) +else () + set (ZLIB_URL ${TGZPATH}/${ZLIB_TGZ_NAME}) +endif () +if (CMAKE_VERSION VERSION_GREATER_EQUAL "3.15.0") + message (VERBOSE "Filter ZLIB file is ${ZLIB_URL}") endif () -option (BUILD_SZIP_WITH_FETCHCONTENT "Use FetchContent to use original source files" OFF) -if (BUILD_SZIP_WITH_FETCHCONTENT) - set (SZIP_USE_EXTERNAL "Use External Library Building for SZIP" 1) - if (NOT LIBAEC_USE_LOCALCONTENT) - set (SZIP_URL ${LIBAEC_TGZ_ORIGPATH}/${LIBAEC_TGZ_ORIGNAME}) - else () - set (SZIP_URL ${TGZPATH}/${LIBAEC_TGZ_ORIGNAME}) - endif () - if (CMAKE_VERSION VERSION_GREATER_EQUAL "3.15.0") - message (VERBOSE "Filter SZIP file is ${SZIP_URL}") - endif () +set (SZIP_USE_EXTERNAL "Use External Library Building for SZIP" 1) +if (NOT LIBAEC_USE_LOCALCONTENT) + set (SZIP_URL ${LIBAEC_TGZ_ORIGPATH}/${LIBAEC_TGZ_NAME}) +else () + set (SZIP_URL ${TGZPATH}/${LIBAEC_TGZ_NAME}) +endif () +if (CMAKE_VERSION VERSION_GREATER_EQUAL "3.15.0") + message (VERBOSE "Filter SZIP file is ${SZIP_URL}") endif () include (ExternalProject) @@ -55,18 +49,12 @@ if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MAT if (NOT TGZPATH) set (TGZPATH ${HDF5_SOURCE_DIR}) endif () - if (NOT BUILD_ZLIB_WITH_FETCHCONTENT) - set (ZLIB_URL ${TGZPATH}/${ZLIB_TGZ_NAME}) - endif () if (ZLIB_USE_LOCALCONTENT) if (NOT EXISTS "${ZLIB_URL}") set (HDF5_ENABLE_Z_LIB_SUPPORT OFF CACHE BOOL "" FORCE) message (VERBOSE "Filter ZLIB file ${ZLIB_URL} not found") endif () endif () - if (NOT BUILD_SZIP_WITH_FETCHCONTENT) - set (SZIP_URL ${TGZPATH}/${SZAEC_TGZ_NAME}) - endif () if (LIBAEC_USE_LOCALCONTENT) if (NOT EXISTS "${SZIP_URL}") set (HDF5_ENABLE_SZIP_SUPPORT OFF CACHE BOOL "" FORCE) @@ -97,12 +85,7 @@ if (HDF5_ENABLE_Z_LIB_SUPPORT) set (LINK_COMP_LIBS ${LINK_COMP_LIBS} ${ZLIB_LIBRARIES}) endif () else () - if (BUILD_ZLIB_WITH_FETCHCONTENT) - # Only tgz files available - ORIGINAL_ZLIB_LIBRARY ("TGZ") - message (VERBOSE "HDF5_ZLIB is built from fetch content") - set (LINK_COMP_LIBS ${LINK_COMP_LIBS} ${ZLIB_STATIC_LIBRARY}) - elseif (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ") + if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ") EXTERNAL_ZLIB_LIBRARY (${HDF5_ALLOW_EXTERNAL_SUPPORT}) message (VERBOSE "Filter HDF5_ZLIB is built") set (LINK_COMP_LIBS ${LINK_COMP_LIBS} ${ZLIB_STATIC_LIBRARY}) @@ -153,13 +136,7 @@ if (HDF5_ENABLE_SZIP_SUPPORT) set (LINK_COMP_LIBS ${LINK_COMP_LIBS} ${SZIP_LIBRARIES}) endif () else () - if (BUILD_SZIP_WITH_FETCHCONTENT) - # Only tgz files available - ORIGINAL_SZIP_LIBRARY ("TGZ" ${HDF5_ENABLE_SZIP_ENCODING}) - message (VERBOSE "SZIP is built from fetch content") - message (VERBOSE "... with library AEC") - set (LINK_COMP_LIBS ${LINK_COMP_LIBS} ${SZIP_STATIC_LIBRARY}) - elseif (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ") + if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ") EXTERNAL_SZIP_LIBRARY (${HDF5_ALLOW_EXTERNAL_SUPPORT} ${HDF5_ENABLE_SZIP_ENCODING}) message (VERBOSE "Filter SZIP is built") message (VERBOSE "... with library AEC") diff --git a/CMakeInstallation.cmake b/CMakeInstallation.cmake index 51c9c83958d..d42142a6da0 100644 --- a/CMakeInstallation.cmake +++ b/CMakeInstallation.cmake @@ -335,7 +335,7 @@ if (NOT HDF5_EXTERNALLY_CONFIGURED AND NOT HDF5_NO_PACKAGES) set (CPACK_NSIS_INSTALL_ROOT "$PROGRAMFILES") set (CPACK_PACKAGE_INSTALL_REGISTRY_KEY "${CPACK_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION}") endif () - # set the install/unistall icon used for the installer itself + # set the install/uninstall icon used for the installer itself # There is a bug in NSI that does not handle full unix paths properly. set (CPACK_NSIS_MUI_ICON "${HDF_RESOURCES_DIR}\\\\hdf.ico") set (CPACK_NSIS_MUI_UNIICON "${HDF_RESOURCES_DIR}\\\\hdf.ico") diff --git a/CMakeLists.txt b/CMakeLists.txt index da6a4d1fa8d..6aa467d110b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -450,6 +450,12 @@ include (${HDF_RESOURCES_DIR}/ConfigureChecks.cmake) set (CMAKE_INCLUDE_CURRENT_DIR_IN_INTERFACE ON) +#----------------------------------------------------------------------------- +# Include directories in the source or build tree should come before other +# directories to prioritize headers in the sources over installed ones. +#----------------------------------------------------------------------------- +set(CMAKE_INCLUDE_DIRECTORIES_PROJECT_BEFORE ON) + #----------------------------------------------------------------------------- # Mac OS X Options #----------------------------------------------------------------------------- @@ -671,6 +677,12 @@ if (HDF5_ENABLE_PARALLEL) find_package(MPI REQUIRED) if (MPI_C_FOUND) set (H5_HAVE_PARALLEL 1) + + # Require MPI standard 3.0 and greater + if (MPI_VERSION LESS 3) + message (FATAL_ERROR "HDF5 requires MPI standard 3.0 or greater") + endif () + # MPI checks, only do these if MPI_C_FOUND is true, otherwise they always fail # and once set, they are cached as false and not regenerated set (CMAKE_REQUIRED_LIBRARIES "${MPI_C_LIBRARIES}") @@ -723,6 +735,9 @@ set (HDF5_SRC_INCLUDE_DIRS ) option (HDF5_ENABLE_SUBFILING_VFD "Build Parallel HDF5 Subfiling VFD" OFF) if (HDF5_ENABLE_SUBFILING_VFD) + if (WIN32) + message (FATAL_ERROR " **** Subfiling is not supported on Windows **** ") + endif () if (NOT HDF5_ENABLE_PARALLEL) message (FATAL_ERROR "Subfiling VFD requires a parallel HDF5 build") else () @@ -731,7 +746,7 @@ if (HDF5_ENABLE_SUBFILING_VFD) if (NOT H5_HAVE_MPI_Comm_split_type) message (FATAL_ERROR "Subfiling VFD requires MPI-3 support for MPI_Comm_split_type") endif () - endif() + endif () if (NOT DEFINED Threads_FOUND) set (THREADS_PREFER_PTHREAD_FLAG ON) @@ -855,13 +870,6 @@ option (HDF5_PACKAGE_EXTLIBS "CPACK - include external libraries" OFF) if (NOT HDF5_EXTERNALLY_CONFIGURED) if (HDF5_PACKAGE_EXTLIBS) set (HDF5_NO_PACKAGES OFF CACHE BOOL "CPACK - Disable packaging" FORCE) - if (HDF5_ENABLE_Z_LIB_SUPPORT AND ZLIB_FOUND AND NOT BUILD_ZLIB_WITH_FETCHCONTENT) - PACKAGE_ZLIB_LIBRARY (${HDF5_ALLOW_EXTERNAL_SUPPORT}) - endif () - - if (HDF5_ENABLE_SZIP_SUPPORT AND SZIP_FOUND AND NOT BUILD_SZIP_WITH_FETCHCONTENT) - PACKAGE_SZIP_LIBRARY (${HDF5_ALLOW_EXTERNAL_SUPPORT}) - endif () endif () endif () @@ -872,8 +880,9 @@ option (HDF5_ENABLE_THREADSAFE "Enable thread-safety" OFF) if (HDF5_ENABLE_THREADSAFE) # check for unsupported options if (WIN32) - message (VERBOSE " **** thread-safety option not supported with static library **** ") - message (VERBOSE " **** thread-safety option will not be used building static library **** ") + if (BUILD_STATIC_LIBS) + message (FATAL_ERROR " **** thread-safety option not supported with static library **** ") + endif () endif () if (HDF5_BUILD_FORTRAN) if (NOT ALLOW_UNSUPPORTED) @@ -955,100 +964,7 @@ endif () #----------------------------------------------------------------------------- option (BUILD_TESTING "Build HDF5 Unit Testing" ON) if (BUILD_TESTING) - set (DART_TESTING_TIMEOUT 1200 - CACHE STRING - "Timeout in seconds for each test (default 1200=20minutes)" - ) - - # Generate a list of timeouts based on DART_TESTING_TIMEOUT - math (EXPR CTEST_SHORT_TIMEOUT "${DART_TESTING_TIMEOUT} / 2") - math (EXPR CTEST_LONG_TIMEOUT "${DART_TESTING_TIMEOUT} * 2") - math (EXPR CTEST_VERY_LONG_TIMEOUT "${DART_TESTING_TIMEOUT} * 3") - - option (HDF5_TEST_API "Execute HDF5 API tests" OFF) - mark_as_advanced (HDF5_TEST_API) - if (HDF5_TEST_API) - option (HDF5_TEST_API_INSTALL "Install HDF5 API tests" OFF) - mark_as_advanced (HDF5_TEST_API_INSTALL) - - # Enable HDF5 Async API tests - option (HDF5_TEST_API_ENABLE_ASYNC "Enable HDF5 Async API tests" OFF) - mark_as_advanced (HDF5_TEST_API_ENABLE_ASYNC) - - # Build and use HDF5 test driver program for API tests - option (HDF5_TEST_API_ENABLE_DRIVER "Enable HDF5 API test driver program" OFF) - mark_as_advanced (HDF5_TEST_API_ENABLE_DRIVER) - if (HDF5_TEST_API_ENABLE_DRIVER) - set (HDF5_TEST_API_SERVER "" CACHE STRING "Server executable for running API tests") - mark_as_advanced (HDF5_TEST_API_SERVER) - endif () - endif () - - option (HDF5_TEST_VFD "Execute tests with different VFDs" OFF) - mark_as_advanced (HDF5_TEST_VFD) - if (HDF5_TEST_VFD) - option (HDF5_TEST_FHEAP_VFD "Execute tests with different VFDs" ON) - mark_as_advanced (HDF5_TEST_FHEAP_VFD) - - # Initialize the list of VFDs to be used for testing and create a test folder for each VFD - H5_SET_VFD_LIST() - endif () - - option (HDF5_TEST_PASSTHROUGH_VOL "Execute tests with different passthrough VOL connectors" OFF) - mark_as_advanced (HDF5_TEST_PASSTHROUGH_VOL) - if (HDF5_TEST_PASSTHROUGH_VOL) - option (HDF5_TEST_FHEAP_PASSTHROUGH_VOL "Execute fheap test with different passthrough VOL connectors" ON) - mark_as_advanced (HDF5_TEST_FHEAP_PASSTHROUGH VOL) - endif () - - set (H5_TEST_EXPRESS_LEVEL_DEFAULT "3") - set (HDF_TEST_EXPRESS "${H5_TEST_EXPRESS_LEVEL_DEFAULT}" - CACHE STRING "Control testing framework (0-3) (0 = exhaustive testing; 3 = quicker testing)") - mark_as_advanced (HDF_TEST_EXPRESS) - if (NOT "${HDF_TEST_EXPRESS}" STREQUAL "") - set (H5_TEST_EXPRESS_LEVEL_DEFAULT "${HDF_TEST_EXPRESS}") - endif () - - enable_testing () - include (CTest) - - include (${HDF5_SOURCE_DIR}/CTestConfig.cmake) - configure_file (${HDF_RESOURCES_DIR}/CTestCustom.cmake ${HDF5_BINARY_DIR}/CTestCustom.ctest @ONLY) - - option (HDF5_TEST_SERIAL "Execute non-parallel tests" ON) - mark_as_advanced (HDF5_TEST_SERIAL) - - option (HDF5_TEST_TOOLS "Execute tools tests" ON) - mark_as_advanced (HDF5_TEST_TOOLS) - - option (HDF5_TEST_EXAMPLES "Execute tests on examples" ON) - mark_as_advanced (HDF5_TEST_EXAMPLES) - - option (HDF5_TEST_SWMR "Execute SWMR tests" ON) - mark_as_advanced (HDF5_TEST_SWMR) - - option (HDF5_TEST_PARALLEL "Execute parallel tests" ON) - mark_as_advanced (HDF5_TEST_PARALLEL) - - option (HDF5_TEST_FORTRAN "Execute fortran tests" ON) - mark_as_advanced (HDF5_TEST_FORTRAN) - - option (HDF5_TEST_CPP "Execute cpp tests" ON) - mark_as_advanced (HDF5_TEST_CPP) - - option (HDF5_TEST_JAVA "Execute java tests" ON) - mark_as_advanced (HDF5_TEST_JAVA) - - if (NOT HDF5_EXTERNALLY_CONFIGURED) - if (EXISTS "${HDF5_TEST_SRC_DIR}" AND IS_DIRECTORY "${HDF5_TEST_SRC_DIR}") - add_subdirectory (test) - endif () - if (H5_HAVE_PARALLEL) - if (EXISTS "${HDF5_TEST_PAR_DIR}" AND IS_DIRECTORY "${HDF5_TEST_PAR_DIR}") - add_subdirectory (testpar) - endif () - endif () - endif () + include (CMakeTests.cmake) endif () #----------------------------------------------------------------------------- diff --git a/CMakePlugins.cmake b/CMakePlugins.cmake index 16fb8740fd9..b96d1ee0466 100644 --- a/CMakePlugins.cmake +++ b/CMakePlugins.cmake @@ -9,27 +9,41 @@ # If you do not have access to either file, you may request a copy from # help@hdfgroup.org. # +option (PLUGIN_USE_EXTERNAL "Use External Library Building for filter PLUGIN" 0) + +set (PLUGIN_USE_EXTERNAL "Use External Library Building for PLUGIN" 1) +if (NOT PLUGIN_USE_LOCALCONTENT) + set (PLUGIN_URL ${PLUGIN_TGZ_ORIGPATH}/${PLUGIN_TGZ_NAME}) +else () + if (NOT H5PL_TGZPATH) + set (H5PL_TGZPATH ${TGZPATH}) + endif () + set (PLUGIN_URL ${H5PL_TGZPATH}/${PLUGIN_TGZ_NAME}) +endif () +message (STATUS "Filter PLUGIN file is ${PLUGIN_URL}") include (ExternalProject) #option (HDF5_ALLOW_EXTERNAL_SUPPORT "Allow External Library Building (NO GIT TGZ)" "NO") set (HDF5_ALLOW_EXTERNAL_SUPPORT "NO" CACHE STRING "Allow External Library Building (NO GIT TGZ)") set_property (CACHE HDF5_ALLOW_EXTERNAL_SUPPORT PROPERTY STRINGS NO GIT TGZ) if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ") - option (PLUGIN_USE_EXTERNAL "Use External Library Building for filter PLUGIN" 1) + set (PLUGIN_USE_EXTERNAL "Use External Library Building for PLUGIN" 1) if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT") set (PLUGIN_URL ${PLUGIN_GIT_URL} CACHE STRING "Path to PLUGIN git repository") set (PLUGIN_BRANCH ${PLUGIN_GIT_BRANCH}) elseif (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ") - if (NOT TGZPATH) - set (TGZPATH ${HDF5_SOURCE_DIR}) + if (NOT H5PL_TGZPATH) + set (H5PL_TGZPATH ${TGZPATH}) endif () - set (PLUGIN_URL ${TGZPATH}/${PLUGIN_TGZ_NAME}) - if (NOT EXISTS "${PLUGIN_URL}") - set (HDF5_ENABLE_PLUGIN_SUPPORT OFF CACHE BOOL "" FORCE) - message (STATUS "Filter PLUGIN file ${PLUGIN_URL} not found") + if (PLUGIN_USE_LOCALCONTENT) + if (NOT EXISTS "${PLUGIN_URL}") + set (HDF5_ENABLE_PLUGIN_SUPPORT OFF CACHE BOOL "" FORCE) + message (VERBOSE "Filter PLUGIN file ${PLUGIN_URL} not found") + endif () endif () else () set (PLUGIN_USE_EXTERNAL 0) + message (VERBOSE "Filter PLUGIN not built") endif () endif () diff --git a/CMakePresets.json b/CMakePresets.json index 6fe2fd09571..6d1a12fdff6 100644 --- a/CMakePresets.json +++ b/CMakePresets.json @@ -9,22 +9,20 @@ "hidden": true, "inherits": "ci-base", "cacheVariables": { - "HDF5_ALLOW_EXTERNAL_SUPPORT": "NO", - "TGZPATH": {"type": "STRING", "value": "${sourceParentDir}/temp"} + "HDF5_ALLOW_EXTERNAL_SUPPORT": {"type": "STRING", "value": "TGZ"}, + "TGZPATH": {"type": "PATH", "value": "${sourceParentDir}/temp"} } }, { "name": "ci-CompressionVars", "hidden": true, "cacheVariables": { - "BUILD_ZLIB_WITH_FETCHCONTENT": "ON", "ZLIB_PACKAGE_NAME": {"type": "STRING", "value": "zlib"}, - "ZLIB_TGZ_ORIGPATH": {"type": "STRING", "value": "https://github.com/madler/zlib/releases/download/v1.2.13"}, - "ZLIB_TGZ_ORIGNAME": {"type": "STRING", "value": "zlib-1.2.13.tar.gz"}, - "BUILD_SZIP_WITH_FETCHCONTENT": "ON", + "ZLIB_TGZ_ORIGPATH": {"type": "STRING", "value": "https://github.com/madler/zlib/releases/download/v1.3"}, + "ZLIB_TGZ_NAME": {"type": "STRING", "value": "zlib-1.3.tar.gz"}, "LIBAEC_PACKAGE_NAME": {"type": "STRING", "value": "libaec"}, "LIBAEC_TGZ_ORIGPATH": {"type": "STRING", "value": "https://github.com/MathisRosenhauer/libaec/releases/download/v1.0.6"}, - "LIBAEC_TGZ_ORIGNAME": {"type": "STRING", "value": "libaec-1.0.6.tar.gz"} + "LIBAEC_TGZ_NAME": {"type": "STRING", "value": "libaec-1.0.6.tar.gz"} } }, { @@ -41,30 +39,29 @@ "name": "ci-base-plugins", "hidden": true, "cacheVariables": { - "PLUGIN_TGZ_NAME": {"type": "STRING", "value": "hdf5_plugins.tar.gz"}, - "PLUGIN_PACKAGE_NAME": {"type": "STRING", "value": "pl"}, - "BSHUF_TGZ_NAME": {"type": "STRING", "value": "bitshuffle.tar.gz"}, + "PLUGIN_TGZ_NAME": {"type": "STRING", "value": "hdf5_plugins-master.tar.gz"}, + "BSHUF_TGZ_NAME": {"type": "STRING", "value": "bitshuffle-0.5.1.tar.gz"}, "BSHUF_PACKAGE_NAME": {"type": "STRING", "value": "bshuf"}, - "BLOSC_TGZ_NAME": {"type": "STRING", "value": "c-blosc.tar.gz"}, + "BLOSC_TGZ_NAME": {"type": "STRING", "value": "c-blosc-1.21.5.tar.gz"}, "BLOSC_PACKAGE_NAME": {"type": "STRING", "value": "blosc"}, - "BLOSC_ZLIB_TGZ_NAME": {"type": "STRING", "value": "ZLib.tar.gz"}, + "BLOSC_ZLIB_TGZ_NAME": {"type": "STRING", "value": "zlib-1.3.tar.gz"}, "BLOSC_ZLIB_PACKAGE_NAME": {"type": "STRING", "value": "zlib"}, - "BZ2_TGZ_NAME": {"type": "STRING", "value": "BZ2.tar.gz"}, + "BZ2_TGZ_NAME": {"type": "STRING", "value": "bzip2-bzip2-1.0.8.tar.gz"}, "BZ2_PACKAGE_NAME": {"type": "STRING", "value": "bz2"}, - "FPZIP_TGZ_NAME": {"type": "STRING", "value": "fpzip.tar.gz"}, + "FPZIP_TGZ_NAME": {"type": "STRING", "value": "fpzip-1.3.0.tar.gz"}, "FPZIP_PACKAGE_NAME": {"type": "STRING", "value": "fpzip"}, - "JPEG_TGZ_NAME": {"type": "STRING", "value": "JPEG.tar.gz"}, + "JPEG_TGZ_NAME": {"type": "STRING", "value": "jpegsrc.v9e.tar.gz"}, "JPEG_PACKAGE_NAME": {"type": "STRING", "value": "jpeg"}, "BUILD_LZ4_LIBRARY_SOURCE": "ON", - "LZ4_TGZ_NAME": {"type": "STRING", "value": "lz4.tar.gz"}, + "LZ4_TGZ_NAME": {"type": "STRING", "value": "lz4-1.9.4.tar.gz"}, "LZ4_PACKAGE_NAME": {"type": "STRING", "value": "lz4"}, - "LZF_TGZ_NAME": {"type": "STRING", "value": "lzf.tar.gz"}, + "LZF_TGZ_NAME": {"type": "STRING", "value": "liblzf-3.6.tar.gz"}, "LZF_PACKAGE_NAME": {"type": "STRING", "value": "lzf"}, - "SZ_TGZ_NAME": {"type": "STRING", "value": "szf.tar.gz"}, + "SZ_TGZ_NAME": {"type": "STRING", "value": "SZ-2.1.12.5.tar.gz"}, "SZ_PACKAGE_NAME": {"type": "STRING", "value": "SZ"}, - "ZFP_TGZ_NAME": {"type": "STRING", "value": "zfp.tar.gz"}, + "ZFP_TGZ_NAME": {"type": "STRING", "value": "zfp-1.0.0.tar.gz"}, "ZFP_PACKAGE_NAME": {"type": "STRING", "value": "zfp"}, - "ZSTD_TGZ_NAME": {"type": "STRING", "value": "zstd.tar.gz"}, + "ZSTD_TGZ_NAME": {"type": "STRING", "value": "zstd-1.5.5.tar.gz"}, "ZSTD_PACKAGE_NAME": {"type": "STRING", "value": "zstd"} } }, @@ -73,23 +70,28 @@ "hidden": true, "cacheVariables": { "HDF5_ENABLE_PLUGIN_SUPPORT": "ON", - "PLUGIN_TGZ_ORIGPATH": {"type": "STRING", "value": "https://github.com/HDFGroup/hdf5_plugins/archive/refs/tags"}, - "PLUGIN_TGZ_ORIGNAME": {"type": "STRING", "value": "hdf5_plugins-master.tar.gz"} + "H5PL_ALLOW_EXTERNAL_SUPPORT": {"type": "STRING", "value": "TGZ"}, + "PLUGIN_PACKAGE_NAME": {"type": "STRING", "value": "pl"}, + "PLUGIN_TGZ_ORIGPATH": {"type": "STRING", "value": "https://github.com/HDFGroup/hdf5_plugins/releases/download/snapshot"}, + "PLUGIN_TGZ_NAME": {"type": "STRING", "value": "hdf5_plugins-master.tar.gz"} } }, { "name": "ci-StdPlugins", "hidden": true, - "inherits": ["ci-base-plugins", "ci-PluginsVars", "ci-base-tgz"] + "inherits": ["ci-base-plugins", "ci-PluginsVars", "ci-base-tgz"], + "cacheVariables": { + "PLUGIN_USE_LOCALCONTENT": "OFF" + } }, { "name": "ci-ExamplesVars", "hidden": true, "cacheVariables": { - "HDF5_EXAMPLES_COMPRESSED": {"type": "STRING", "value": "hdf5-examples-2.0.4.tar.gz"}, - "HDF5_EXAMPLES_COMPRESSED_DIR": {"type": "STRING", "value": "${sourceParentDir}/temp"}, - "EXAMPLES_TGZ_ORIGPATH": {"type": "STRING", "value": "https://github.com/HDFGroup/hdf5-examples/archive/refs/tags/"}, - "EXAMPLES_TGZ_ORIGNAME": {"type": "STRING", "value": "2.0.4.tar.gz"} + "HDF5_EXAMPLES_COMPRESSED": {"type": "STRING", "value": "hdf5-examples-master.tar.gz"}, + "HDF5_EXAMPLES_COMPRESSED_DIR": {"type": "PATH", "value": "${sourceParentDir}/temp"}, + "EXAMPLES_TGZ_ORIGPATH": {"type": "STRING", "value": "https://github.com/HDFGroup/hdf5-examples/releases/download/snapshot"}, + "EXAMPLES_TGZ_ORIGNAME": {"type": "STRING", "value": "snapshot.tar.gz"} } }, { @@ -106,7 +108,7 @@ { "name": "ci-StdShar", "hidden": true, - "inherits": ["ci-StdCompression", "ci-StdExamples"], + "inherits": ["ci-StdCompression", "ci-StdExamples", "ci-StdPlugins"], "cacheVariables": { "HDF_PACKAGE_NAMESPACE": {"type": "STRING", "value": "hdf5::"}, "HDF5_INSTALL_MOD_FORTRAN": "NO", @@ -181,7 +183,7 @@ "name": "ci-StdShar-GNUC", "description": "GNUC Standard Build for x64 (Release)", "configurePreset": "ci-StdShar-GNUC", - "verbose": false, + "verbose": true, "inherits": [ "ci-x64-Release-GNUC" ] diff --git a/CMakeTests.cmake b/CMakeTests.cmake new file mode 100644 index 00000000000..1dfa7007642 --- /dev/null +++ b/CMakeTests.cmake @@ -0,0 +1,109 @@ +# +# Copyright by The HDF Group. +# All rights reserved. +# +# This file is part of HDF5. The full HDF5 copyright notice, including +# terms governing use, modification, and redistribution, is contained in +# the COPYING file, which can be found at the root of the source code +# distribution tree, or in https://www.hdfgroup.org/licenses. +# If you do not have access to either file, you may request a copy from +# help@hdfgroup.org. +# + +#----------------------------------------------------------------------------- +# Dashboard and Testing Settings +#----------------------------------------------------------------------------- + set (DART_TESTING_TIMEOUT 1200 + CACHE STRING + "Timeout in seconds for each test (default 1200=20minutes)" + ) + + # Generate a list of timeouts based on DART_TESTING_TIMEOUT + math (EXPR CTEST_SHORT_TIMEOUT "${DART_TESTING_TIMEOUT} / 2") + math (EXPR CTEST_LONG_TIMEOUT "${DART_TESTING_TIMEOUT} * 2") + math (EXPR CTEST_VERY_LONG_TIMEOUT "${DART_TESTING_TIMEOUT} * 3") + + option (HDF5_TEST_API "Execute HDF5 API tests" OFF) + mark_as_advanced (HDF5_TEST_API) + if (HDF5_TEST_API) + option (HDF5_TEST_API_INSTALL "Install HDF5 API tests" OFF) + mark_as_advanced (HDF5_TEST_API_INSTALL) + + # Enable HDF5 Async API tests + option (HDF5_TEST_API_ENABLE_ASYNC "Enable HDF5 Async API tests" OFF) + mark_as_advanced (HDF5_TEST_API_ENABLE_ASYNC) + + # Build and use HDF5 test driver program for API tests + option (HDF5_TEST_API_ENABLE_DRIVER "Enable HDF5 API test driver program" OFF) + mark_as_advanced (HDF5_TEST_API_ENABLE_DRIVER) + if (HDF5_TEST_API_ENABLE_DRIVER) + set (HDF5_TEST_API_SERVER "" CACHE STRING "Server executable for running API tests") + mark_as_advanced (HDF5_TEST_API_SERVER) + endif () + endif () + + option (HDF5_TEST_VFD "Execute tests with different VFDs" OFF) + mark_as_advanced (HDF5_TEST_VFD) + if (HDF5_TEST_VFD) + option (HDF5_TEST_FHEAP_VFD "Execute tests with different VFDs" ON) + mark_as_advanced (HDF5_TEST_FHEAP_VFD) + + # Initialize the list of VFDs to be used for testing and create a test folder for each VFD + H5_SET_VFD_LIST() + endif () + + option (HDF5_TEST_PASSTHROUGH_VOL "Execute tests with different passthrough VOL connectors" OFF) + mark_as_advanced (HDF5_TEST_PASSTHROUGH_VOL) + if (HDF5_TEST_PASSTHROUGH_VOL) + option (HDF5_TEST_FHEAP_PASSTHROUGH_VOL "Execute fheap test with different passthrough VOL connectors" ON) + mark_as_advanced (HDF5_TEST_FHEAP_PASSTHROUGH VOL) + endif () + + set (H5_TEST_EXPRESS_LEVEL_DEFAULT "3") + set (HDF_TEST_EXPRESS "${H5_TEST_EXPRESS_LEVEL_DEFAULT}" + CACHE STRING "Control testing framework (0-3) (0 = exhaustive testing; 3 = quicker testing)") + mark_as_advanced (HDF_TEST_EXPRESS) + if (NOT "${HDF_TEST_EXPRESS}" STREQUAL "") + set (H5_TEST_EXPRESS_LEVEL_DEFAULT "${HDF_TEST_EXPRESS}") + endif () + + enable_testing () + include (CTest) + + include (${HDF5_SOURCE_DIR}/CTestConfig.cmake) + configure_file (${HDF_RESOURCES_DIR}/CTestCustom.cmake ${HDF5_BINARY_DIR}/CTestCustom.ctest @ONLY) + + option (HDF5_TEST_SERIAL "Execute non-parallel tests" ON) + mark_as_advanced (HDF5_TEST_SERIAL) + + option (HDF5_TEST_TOOLS "Execute tools tests" ON) + mark_as_advanced (HDF5_TEST_TOOLS) + + option (HDF5_TEST_EXAMPLES "Execute tests on examples" ON) + mark_as_advanced (HDF5_TEST_EXAMPLES) + + option (HDF5_TEST_SWMR "Execute SWMR tests" ON) + mark_as_advanced (HDF5_TEST_SWMR) + + option (HDF5_TEST_PARALLEL "Execute parallel tests" ON) + mark_as_advanced (HDF5_TEST_PARALLEL) + + option (HDF5_TEST_FORTRAN "Execute fortran tests" ON) + mark_as_advanced (HDF5_TEST_FORTRAN) + + option (HDF5_TEST_CPP "Execute cpp tests" ON) + mark_as_advanced (HDF5_TEST_CPP) + + option (HDF5_TEST_JAVA "Execute java tests" ON) + mark_as_advanced (HDF5_TEST_JAVA) + + if (NOT HDF5_EXTERNALLY_CONFIGURED) + if (EXISTS "${HDF5_TEST_SRC_DIR}" AND IS_DIRECTORY "${HDF5_TEST_SRC_DIR}") + add_subdirectory (test) + endif () + if (H5_HAVE_PARALLEL) + if (EXISTS "${HDF5_TEST_PAR_DIR}" AND IS_DIRECTORY "${HDF5_TEST_PAR_DIR}") + add_subdirectory (testpar) + endif () + endif () + endif () diff --git a/README.md b/README.md index fc448c97208..26fac451997 100644 --- a/README.md +++ b/README.md @@ -11,9 +11,10 @@ HDF5 version 1.15.0 currently under development [![CVE regression](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/cve.yml?branch=develop&label=CVE)](https://github.com/HDFGroup/hdf5/actions?query=branch%3Adevelop) [![1.14 build status](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/main.yml?branch=hdf5_1_14&label=1.14)](https://github.com/HDFGroup/hdf5/actions?query=branch%3Ahdf5_1_14) [![1.12 build status](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/main.yml?branch=hdf5_1_12&label=1.12)](https://github.com/HDFGroup/hdf5/actions?query=branch%3Ahdf5_1_12) -[![1.10 build status](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/main.yml?branch=hdf5_1_10&label=1.10)](https://github.com/HDFGroup/hdf5/actions?query=branch%3Ahdf5_1_10) [![BSD](https://img.shields.io/badge/License-BSD-blue.svg)](https://github.com/HDFGroup/hdf5/blob/develop/COPYING) +[HPC configure/build/test results](https://my.cdash.org/index.php?project=HDF5) + *Please refer to the release_docs/INSTALL file for installation instructions.* This repository contains a high-performance library's source code and a file format @@ -88,7 +89,6 @@ are tentative. | Release | New Features | | ------- | ------------ | -| 1.10.11 | CVE fixes, last HDF5 1.10 release | | 1.12.3 | CVE fixes, performance improvements, H5Dchunk\_iter(), last HDF5 1.12 release | | 1.14.3 | CVE-free!, better cross-compile support | | 1.14.4 | S3 VFD improvements | diff --git a/config/cmake-presets/hidden-presets.json b/config/cmake-presets/hidden-presets.json index 18ffdd17f5f..bd36153c278 100644 --- a/config/cmake-presets/hidden-presets.json +++ b/config/cmake-presets/hidden-presets.json @@ -100,7 +100,7 @@ "name": "ci-Fortran-Clang", "hidden": true, "cacheVariables": { - "CMAKE_Fortran_COMPILER": "gfortran" + "CMAKE_Fortran_COMPILER": {"type": "FILEPATH", "value": "gfortran"} }, "condition": { "type": "matches", diff --git a/config/cmake/HDF5PluginCache.cmake b/config/cmake/HDF5PluginCache.cmake index 0d1795de82b..34a97d5902a 100644 --- a/config/cmake/HDF5PluginCache.cmake +++ b/config/cmake/HDF5PluginCache.cmake @@ -18,7 +18,13 @@ set (H5PL_HDF5_DIR ${CMAKE_CURRENT_BINARY_DIR} CACHE STRING "HDF5 build folder" set (H5PL_HDF5_DUMP_EXECUTABLE $ CACHE STRING "HDF5 h5dump target" FORCE) set (H5PL_HDF5_REPACK_EXECUTABLE $ CACHE STRING "HDF5 h5repack target" FORCE) -set (H5PL_ALLOW_EXTERNAL_SUPPORT "${HDF5_ALLOW_EXTERNAL_SUPPORT}" CACHE STRING "Allow External Library Building (NO GIT TGZ)" FORCE) +if (NOT DEFINED H5PL_ALLOW_EXTERNAL_SUPPORT) + set (H5PL_ALLOW_EXTERNAL_SUPPORT "${HDF5_ALLOW_EXTERNAL_SUPPORT}" CACHE STRING "Allow External Library Building (NO GIT TGZ)" FORCE) +endif () + +if (NOT DEFINED H5PL_TGZPATH) + set (H5PL_TGZPATH "${TGZPATH}" CACHE PATH "PATH for finding plugin tgz file" FORCE) +endif () set (H5PL_GIT_URL "https://github.com/HDFGroup/hdf5_plugins.git" CACHE STRING "Use plugins from HDF Group repository" FORCE) set (H5PL_GIT_BRANCH "master" CACHE STRING "" FORCE) diff --git a/config/cmake/HDF5PluginMacros.cmake b/config/cmake/HDF5PluginMacros.cmake index da0eab5f903..aa409f710a2 100644 --- a/config/cmake/HDF5PluginMacros.cmake +++ b/config/cmake/HDF5PluginMacros.cmake @@ -1,3 +1,14 @@ +# +# Copyright by The HDF Group. +# All rights reserved. +# +# This file is part of HDF5. The full HDF5 copyright notice, including +# terms governing use, modification, and redistribution, is contained in +# the COPYING file, which can be found at the root of the source code +# distribution tree, or in https://www.hdfgroup.org/licenses. +# If you do not have access to either file, you may request a copy from +# help@hdfgroup.org. +# #------------------------------------------------------------------------------- # Plugins must be built SHARED #------------------------------------------------------------------------------- @@ -85,16 +96,16 @@ macro (FILTER_OPTION plname) if (ENABLE_${plname}) option (HDF_${plname}_USE_EXTERNAL "Use External Library Building for ${PLUGIN_NAME} plugin" 0) mark_as_advanced (HDF_${plname}_USE_EXTERNAL) - if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ") + if (H5PL_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR H5PL_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ") set (HDF_${plname}_USE_EXTERNAL 1 CACHE BOOL "Use External Library Building for ${PLUGIN_NAME} plugin" FORCE) - if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT") + if (H5PL_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT") set (HDF_${plname}_URL ${HDF_${plname}_GIT_URL}) set (HDF_${plname}_BRANCH ${HDF_${plname}_GIT_BRANCH}) - elseif (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ") - if (NOT TGZPATH) - set (TGZPATH ${H5PL_SOURCE_DIR}) + elseif (H5PL_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ") + if (NOT H5PL_COMP_TGZPATH) + set (H5PL_COMP_TGZPATH ${H5PL_SOURCE_DIR}/libs) endif () - set (HDF_${plname}_URL ${TGZPATH}/${HDF_${plname}_TGZ_NAME}) + set (HDF_${plname}_URL ${H5PL_COMP_TGZPATH}/${HDF_${plname}_TGZ_NAME}) endif () endif () add_subdirectory (${plname}) diff --git a/config/cmake/HDFCXXCompilerFlags.cmake b/config/cmake/HDFCXXCompilerFlags.cmake index 96763e086c1..951eded0ec8 100644 --- a/config/cmake/HDFCXXCompilerFlags.cmake +++ b/config/cmake/HDFCXXCompilerFlags.cmake @@ -50,11 +50,11 @@ if (CMAKE_CXX_COMPILER_ID STREQUAL SunPro AND CMAKE_CXX_COMPILER_LOADED) endif () if (CMAKE_CXX_COMPILER_ID STREQUAL "NVHPC") - set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Minform=warn" + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Minform=warn") if (NOT ${HDF_CFG_NAME} MATCHES "Debug" AND NOT ${HDF_CFG_NAME} MATCHES "Developer") - set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -s -O4" + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O4") else () - set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Mbounds -g -gopt -O2" + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Mbounds -gopt -O2") endif () endif () @@ -106,7 +106,7 @@ if (HDF5_DISABLE_COMPILER_WARNINGS) endif () #----------------------------------------------------------------------------- -# HDF5 library compile options +# HDF5 library compile options - to be made available to all targets #----------------------------------------------------------------------------- if (${CMAKE_SYSTEM_NAME} MATCHES "SunOS") diff --git a/config/cmake/HDFCompilerFlags.cmake b/config/cmake/HDFCompilerFlags.cmake index b4e68b8ba86..971fec560ac 100644 --- a/config/cmake/HDFCompilerFlags.cmake +++ b/config/cmake/HDFCompilerFlags.cmake @@ -50,9 +50,9 @@ endif() if(CMAKE_C_COMPILER_ID STREQUAL "NVHPC" ) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Minform=warn") if (NOT ${HDF_CFG_NAME} MATCHES "Debug" AND NOT ${HDF_CFG_NAME} MATCHES "Developer") - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fast -s -O4") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fast -O4") else () - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Mbounds -g -O2") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Mbounds -O2") endif () endif() @@ -115,7 +115,7 @@ if (HDF5_DISABLE_COMPILER_WARNINGS) endif () #----------------------------------------------------------------------------- -# HDF5 library compile options +# HDF5 library compile options - to be made available to all targets #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- diff --git a/config/cmake/HDFFortranCompilerFlags.cmake b/config/cmake/HDFFortranCompilerFlags.cmake index 0f1c45d54b7..06184a5c468 100644 --- a/config/cmake/HDFFortranCompilerFlags.cmake +++ b/config/cmake/HDFFortranCompilerFlags.cmake @@ -41,7 +41,7 @@ if (HDF5_DISABLE_COMPILER_WARNINGS) endif () #----------------------------------------------------------------------------- -# HDF5 library compile options +# HDF5 library compile options - to be made available to all targets #----------------------------------------------------------------------------- if (CMAKE_Fortran_COMPILER_ID STREQUAL "GNU" AND NOT CMAKE_Fortran_COMPILER_VERSION VERSION_LESS 10.0) if (HDF5_ENABLE_BUILD_DIAGS) @@ -52,12 +52,17 @@ if (CMAKE_Fortran_COMPILER_ID STREQUAL "GNU" AND NOT CMAKE_Fortran_COMPILER_VERS endif () endif () +if (CMAKE_Fortran_COMPILER_ID STREQUAL "NAG") + message (STATUS "... Select IEEE floating-point mode full") + list (APPEND HDF5_CMAKE_Fortran_FLAGS "-ieee=full") +endif () + if (CMAKE_Fortran_COMPILER_ID STREQUAL "NVHPC") set (CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -fPIC") if (NOT ${HDF_CFG_NAME} MATCHES "Debug" AND NOT ${HDF_CFG_NAME} MATCHES "Developer") - set (CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -fast -Mnoframe -s") + set (CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -fast -Mnoframe") else () - set (CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -Mbounds -Mchkptr -Mdclchk -g") + set (CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -Mbounds -Mchkptr -Mdclchk") endif () endif () diff --git a/config/cmake/HDFLibMacros.cmake b/config/cmake/HDFLibMacros.cmake index 8737b770468..4039a503825 100644 --- a/config/cmake/HDFLibMacros.cmake +++ b/config/cmake/HDFLibMacros.cmake @@ -10,7 +10,7 @@ # help@hdfgroup.org. # #------------------------------------------------------------------------------- -macro (ORIGINAL_ZLIB_LIBRARY compress_type) +macro (EXTERNAL_ZLIB_LIBRARY compress_type) if (${compress_type} MATCHES "GIT") FetchContent_Declare (HDF5_ZLIB GIT_REPOSITORY ${ZLIB_URL} @@ -44,7 +44,7 @@ macro (ORIGINAL_ZLIB_LIBRARY compress_type) endmacro () #------------------------------------------------------------------------------- -macro (ORIGINAL_SZIP_LIBRARY compress_type encoding) +macro (EXTERNAL_SZIP_LIBRARY compress_type encoding) # Only libaec library is usable if (${compress_type} MATCHES "GIT") FetchContent_Declare (SZIP @@ -78,157 +78,3 @@ macro (ORIGINAL_SZIP_LIBRARY compress_type encoding) set (SZIP_FOUND 1) set (SZIP_INCLUDE_DIRS ${SZIP_INCLUDE_DIR_GEN} ${SZIP_INCLUDE_DIR}) endmacro () - -#------------------------------------------------------------------------------- -macro (EXTERNAL_SZIP_LIBRARY compress_type encoding) - if (${compress_type} MATCHES "GIT") - EXTERNALPROJECT_ADD (SZIP - GIT_REPOSITORY ${SZIP_URL} - GIT_TAG ${SZIP_BRANCH} - INSTALL_COMMAND "" - CMAKE_ARGS - -DBUILD_SHARED_LIBS:BOOL=OFF - -DSZIP_PACKAGE_EXT:STRING=${HDF_PACKAGE_EXT} - -DSZIP_EXTERNALLY_CONFIGURED:BOOL=OFF - -DCMAKE_BUILD_TYPE:STRING=${HDF_CFG_NAME} - -DCMAKE_DEBUG_POSTFIX:STRING=${CMAKE_DEBUG_POSTFIX} - -DCMAKE_INSTALL_PREFIX:PATH=${CMAKE_INSTALL_PREFIX} - -DCMAKE_RUNTIME_OUTPUT_DIRECTORY:PATH=${CMAKE_RUNTIME_OUTPUT_DIRECTORY} - -DCMAKE_LIBRARY_OUTPUT_DIRECTORY:PATH=${CMAKE_LIBRARY_OUTPUT_DIRECTORY} - -DCMAKE_ARCHIVE_OUTPUT_DIRECTORY:PATH=${CMAKE_ARCHIVE_OUTPUT_DIRECTORY} - -DCMAKE_PDB_OUTPUT_DIRECTORY:PATH=${CMAKE_PDB_OUTPUT_DIRECTORY} - -DSZIP_ENABLE_ENCODING:BOOL=${encoding} - -DHDF_USE_GNU_DIRS:STRING=${HDF5_USE_GNU_DIRS} - -DCMAKE_OSX_ARCHITECTURES:STRING=${CMAKE_OSX_ARCHITECTURES} - -DCMAKE_TOOLCHAIN_FILE:STRING=${CMAKE_TOOLCHAIN_FILE} - -DPACKAGE_NAMESPACE=${HDF_PACKAGE_NAMESPACE} - ) - elseif (${compress_type} MATCHES "TGZ") - EXTERNALPROJECT_ADD (SZIP - URL ${SZIP_URL} - URL_MD5 "" - INSTALL_COMMAND "" - CMAKE_ARGS - -DBUILD_SHARED_LIBS:BOOL=OFF - -DSZIP_PACKAGE_EXT:STRING=${HDF_PACKAGE_EXT} - -DSZIP_EXTERNALLY_CONFIGURED:BOOL=OFF - -DCMAKE_BUILD_TYPE:STRING=${HDF_CFG_NAME} - -DCMAKE_DEBUG_POSTFIX:STRING=${CMAKE_DEBUG_POSTFIX} - -DCMAKE_INSTALL_PREFIX:PATH=${CMAKE_INSTALL_PREFIX} - -DCMAKE_RUNTIME_OUTPUT_DIRECTORY:PATH=${CMAKE_RUNTIME_OUTPUT_DIRECTORY} - -DCMAKE_LIBRARY_OUTPUT_DIRECTORY:PATH=${CMAKE_LIBRARY_OUTPUT_DIRECTORY} - -DCMAKE_ARCHIVE_OUTPUT_DIRECTORY:PATH=${CMAKE_ARCHIVE_OUTPUT_DIRECTORY} - -DCMAKE_PDB_OUTPUT_DIRECTORY:PATH=${CMAKE_PDB_OUTPUT_DIRECTORY} - -DSZIP_ENABLE_ENCODING:BOOL=${encoding} - -DHDF_USE_GNU_DIRS:STRING=${HDF5_USE_GNU_DIRS} - -DCMAKE_OSX_ARCHITECTURES:STRING=${CMAKE_OSX_ARCHITECTURES} - -DCMAKE_TOOLCHAIN_FILE:STRING=${CMAKE_TOOLCHAIN_FILE} - -DPACKAGE_NAMESPACE=${HDF_PACKAGE_NAMESPACE} - ) - endif () - externalproject_get_property (SZIP BINARY_DIR SOURCE_DIR) -# -##include (${BINARY_DIR}/${LIBAEC_PACKAGE_NAME}${HDF_PACKAGE_EXT}-targets.cmake) -# Create imported target szip-static - add_library(${HDF_PACKAGE_NAMESPACE}szaec-static STATIC IMPORTED) - HDF_IMPORT_SET_LIB_OPTIONS (${HDF_PACKAGE_NAMESPACE}szaec-static "szaec" STATIC "") - add_dependencies (${HDF_PACKAGE_NAMESPACE}szaec-static SZIP) - add_library(${HDF_PACKAGE_NAMESPACE}aec-static STATIC IMPORTED) - HDF_IMPORT_SET_LIB_OPTIONS (${HDF_PACKAGE_NAMESPACE}aec-static "aec" STATIC "") - add_dependencies (${HDF_PACKAGE_NAMESPACE}aec-static SZIP) - set (SZIP_STATIC_LIBRARY "${HDF_PACKAGE_NAMESPACE}szaec-static;${HDF_PACKAGE_NAMESPACE}aec-static") - set (SZIP_LIBRARIES ${SZIP_STATIC_LIBRARY}) - - set (SZIP_INCLUDE_DIR_GEN "${BINARY_DIR}") - set (SZIP_INCLUDE_DIR "${SOURCE_DIR}/include") - set (SZIP_FOUND 1) - set (SZIP_INCLUDE_DIRS ${SZIP_INCLUDE_DIR_GEN} ${SZIP_INCLUDE_DIR}) -endmacro () - -#------------------------------------------------------------------------------- -macro (PACKAGE_SZIP_LIBRARY compress_type) - add_custom_target (SZIP-GenHeader-Copy ALL - COMMAND ${CMAKE_COMMAND} -E copy_if_different ${SZIP_INCLUDE_DIR_GEN}/aec_config.h ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ - COMMENT "Copying ${SZIP_INCLUDE_DIR_GEN}/aec_config.h to ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/" - ) - set (EXTERNAL_HEADER_LIST ${EXTERNAL_HEADER_LIST} ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/aec_config.h) - if (${compress_type} MATCHES "GIT" OR ${compress_type} MATCHES "TGZ") - add_dependencies (SZIP-GenHeader-Copy SZIP) - endif () -endmacro () - -#------------------------------------------------------------------------------- -macro (EXTERNAL_ZLIB_LIBRARY compress_type) - if (${compress_type} MATCHES "GIT") - EXTERNALPROJECT_ADD (HDF5_ZLIB - GIT_REPOSITORY ${ZLIB_URL} - GIT_TAG ${ZLIB_BRANCH} - INSTALL_COMMAND "" - CMAKE_ARGS - -DBUILD_SHARED_LIBS:BOOL=OFF - -DZLIB_PACKAGE_EXT:STRING=${HDF_PACKAGE_EXT} - -DZLIB_EXTERNALLY_CONFIGURED:BOOL=OFF - -DCMAKE_BUILD_TYPE:STRING=${HDF_CFG_NAME} - -DCMAKE_DEBUG_POSTFIX:STRING=${CMAKE_DEBUG_POSTFIX} - -DCMAKE_INSTALL_PREFIX:PATH=${CMAKE_INSTALL_PREFIX} - -DCMAKE_RUNTIME_OUTPUT_DIRECTORY:PATH=${CMAKE_RUNTIME_OUTPUT_DIRECTORY} - -DCMAKE_LIBRARY_OUTPUT_DIRECTORY:PATH=${CMAKE_LIBRARY_OUTPUT_DIRECTORY} - -DCMAKE_ARCHIVE_OUTPUT_DIRECTORY:PATH=${CMAKE_ARCHIVE_OUTPUT_DIRECTORY} - -DCMAKE_PDB_OUTPUT_DIRECTORY:PATH=${CMAKE_PDB_OUTPUT_DIRECTORY} - -DHDF_USE_GNU_DIRS:STRING=${HDF5_USE_GNU_DIRS} - -DCMAKE_OSX_ARCHITECTURES:STRING=${CMAKE_OSX_ARCHITECTURES} - -DCMAKE_TOOLCHAIN_FILE:STRING=${CMAKE_TOOLCHAIN_FILE} - -DPACKAGE_NAMESPACE=${HDF_PACKAGE_NAMESPACE} - ) - elseif (${compress_type} MATCHES "TGZ") - EXTERNALPROJECT_ADD (HDF5_ZLIB - URL ${ZLIB_URL} - URL_MD5 "" - INSTALL_COMMAND "" - CMAKE_ARGS - -DBUILD_SHARED_LIBS:BOOL=OFF - -DZLIB_PACKAGE_EXT:STRING=${HDF_PACKAGE_EXT} - -DZLIB_EXTERNALLY_CONFIGURED:BOOL=OFF - -DCMAKE_BUILD_TYPE:STRING=${HDF_CFG_NAME} - -DCMAKE_DEBUG_POSTFIX:STRING=${CMAKE_DEBUG_POSTFIX} - -DCMAKE_INSTALL_PREFIX:PATH=${CMAKE_INSTALL_PREFIX} - -DCMAKE_RUNTIME_OUTPUT_DIRECTORY:PATH=${CMAKE_RUNTIME_OUTPUT_DIRECTORY} - -DCMAKE_LIBRARY_OUTPUT_DIRECTORY:PATH=${CMAKE_LIBRARY_OUTPUT_DIRECTORY} - -DCMAKE_ARCHIVE_OUTPUT_DIRECTORY:PATH=${CMAKE_ARCHIVE_OUTPUT_DIRECTORY} - -DCMAKE_PDB_OUTPUT_DIRECTORY:PATH=${CMAKE_PDB_OUTPUT_DIRECTORY} - -DHDF_USE_GNU_DIRS:STRING=${HDF5_USE_GNU_DIRS} - -DCMAKE_OSX_ARCHITECTURES:STRING=${CMAKE_OSX_ARCHITECTURES} - -DCMAKE_TOOLCHAIN_FILE:STRING=${CMAKE_TOOLCHAIN_FILE} - -DPACKAGE_NAMESPACE=${HDF_PACKAGE_NAMESPACE} - ) - endif () - externalproject_get_property (HDF5_ZLIB BINARY_DIR SOURCE_DIR) - - if (NOT ZLIB_LIB_NAME) - set (ZLIB_LIB_NAME "z") - endif () -##include (${BINARY_DIR}/${ZLIB_PACKAGE_NAME}${HDF_PACKAGE_EXT}-targets.cmake) -# Create imported target zlib-static - add_library(${HDF_PACKAGE_NAMESPACE}zlib-static STATIC IMPORTED) - HDF_IMPORT_SET_LIB_OPTIONS (${HDF_PACKAGE_NAMESPACE}zlib-static ${ZLIB_LIB_NAME} STATIC "") - add_dependencies (${HDF_PACKAGE_NAMESPACE}zlib-static HDF5_ZLIB) - set (ZLIB_STATIC_LIBRARY "${HDF_PACKAGE_NAMESPACE}zlib-static") - set (ZLIB_LIBRARIES ${ZLIB_STATIC_LIBRARY}) - - set (ZLIB_INCLUDE_DIR_GEN "${BINARY_DIR}") - set (ZLIB_INCLUDE_DIR "${SOURCE_DIR}") - set (ZLIB_FOUND 1) - set (ZLIB_INCLUDE_DIRS ${ZLIB_INCLUDE_DIR_GEN} ${ZLIB_INCLUDE_DIR}) -endmacro () - -#------------------------------------------------------------------------------- -macro (PACKAGE_ZLIB_LIBRARY compress_type) - add_custom_target (ZLIB-GenHeader-Copy ALL - COMMAND ${CMAKE_COMMAND} -E copy_if_different ${ZLIB_INCLUDE_DIR_GEN}/zconf.h ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ - COMMENT "Copying ${ZLIB_INCLUDE_DIR_GEN}/zconf.h to ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/" - ) - set (EXTERNAL_HEADER_LIST ${EXTERNAL_HEADER_LIST} ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/zconf.h) - if (${compress_type} MATCHES "GIT" OR ${compress_type} MATCHES "TGZ") - add_dependencies (ZLIB-GenHeader-Copy HDF5_ZLIB) - endif () -endmacro () diff --git a/config/cmake/UseJava.cmake b/config/cmake/UseJava.cmake index 1de08db8d40..2783cb638e4 100644 --- a/config/cmake/UseJava.cmake +++ b/config/cmake/UseJava.cmake @@ -1474,7 +1474,7 @@ function (create_javah) "CLASSES;CLASSPATH;DEPENDS" ${ARGN}) - # ckeck parameters + # check parameters if (NOT _create_javah_TARGET AND NOT _create_javah_GENERATED_FILES) message (FATAL_ERROR "create_javah: TARGET or GENERATED_FILES must be specified.") endif() diff --git a/config/cmake/cacheinit.cmake b/config/cmake/cacheinit.cmake index ad61584c902..c7878e944f5 100644 --- a/config/cmake/cacheinit.cmake +++ b/config/cmake/cacheinit.cmake @@ -46,42 +46,41 @@ set_property (CACHE HDF5_ALLOW_EXTERNAL_SUPPORT PROPERTY STRINGS NO GIT TGZ) ######################## # compression options ######################## - set (ZLIB_PACKAGE_NAME "zlib" CACHE STRING "Name of ZLIB package" FORCE) -set (ZLIB_TGZ_NAME "ZLib.tar.gz" CACHE STRING "Use HDF5_ZLib from compressed file" FORCE) -set (ZLIB_TGZ_ORIGPATH "https://github.com/madler/zlib/releases/download/v1.2.13" CACHE STRING "Use ZLIB from original location" FORCE) -set (ZLIB_TGZ_ORIGNAME "zlib-1.2.13.tar.gz" CACHE STRING "Use ZLIB from original compressed file" FORCE) +set (ZLIB_TGZ_NAME "zlib-1.3.tar.gz" CACHE STRING "Use HDF5_ZLib from compressed file" FORCE) +set (ZLIB_TGZ_ORIGPATH "https://github.com/madler/zlib/releases/download/v1.3" CACHE STRING "Use ZLIB from original location" FORCE) set (ZLIB_USE_LOCALCONTENT ON CACHE BOOL "Use local file for ZLIB FetchContent" FORCE) set (LIBAEC_PACKAGE_NAME "libaec" CACHE STRING "Name of AEC SZIP package" FORCE) -set (SZAEC_TGZ_NAME "LIBAEC.tar.gz" CACHE STRING "Use SZip AEC from compressed file" FORCE) +set (LIBAEC_TGZ_NAME "libaec-1.0.6.tar.gz" CACHE STRING "Use SZip AEC from compressed file" FORCE) set (LIBAEC_TGZ_ORIGPATH "https://github.com/MathisRosenhauer/libaec/releases/download/v1.0.6" CACHE STRING "Use LIBAEC from original location" FORCE) -set (LIBAEC_TGZ_ORIGNAME "libaec-1.0.6.tar.gz" CACHE STRING "Use LIBAEC from original compressed file" FORCE) set (LIBAEC_USE_LOCALCONTENT ON CACHE BOOL "Use local file for LIBAEC FetchContent" FORCE) ######################## # API test options ######################## set (KWSYS_TGZ_ORIGPATH "https://gitlab.kitware.com/utils/kwsys/-/archive/master" CACHE STRING "Use KWSYS from original location" FORCE) -set (KWSYS_TGZ_ORIGNAME "kwsys-master.tar.gz" CACHE STRING "Use KWSYS from original compressed file" FORCE) +set (KWSYS_TGZ_NAME "kwsys-master.tar.gz" CACHE STRING "Use KWSYS from original compressed file" FORCE) set (KWSYS_USE_LOCALCONTENT OFF CACHE BOOL "Use local file for KWSYS FetchContent" FORCE) ######################## # filter plugin options ######################## -set (PLUGIN_TGZ_NAME "hdf5_plugins.tar.gz" CACHE STRING "Use PLUGINS from compressed file" FORCE) - +set (PLUGIN_TGZ_ORIGPATH "https://github.com/HDFGroup/hdf5_plugins/releases/download/snapshots" CACHE STRING "Use PLUGINS from original location" FORCE) +set (PLUGIN_TGZ_NAME "hdf5_plugins-master.tar.gz" CACHE STRING "Use PLUGINS from compressed file" FORCE) +set (PLUGIN_USE_LOCALCONTENT ON CACHE BOOL "Use local file for PLUGIN FetchContent" FORCE) set (PLUGIN_PACKAGE_NAME "pl" CACHE STRING "Name of PLUGIN package" FORCE) ############# # bitshuffle ############# -set (BSHUF_GIT_URL "https://someurl/bitshuffle.git" CACHE STRING "Use BSHUF from HDF Group repository" FORCE) +set (BSHUF_GIT_URL "https://github.com/kiyo-masui/bitshuffle.git" CACHE STRING "Use BSHUF from HDF repository" FORCE) set (BSHUF_GIT_BRANCH "master" CACHE STRING "" FORCE) -set (BSHUF_TGZ_NAME "bitshuffle.tar.gz" CACHE STRING "Use BSHUF from compressed file" FORCE) +set (BSHUF_TGZ_ORIGPATH "https://github.com/kiyo-masui/bitshuffle/archive/refs/tags" CACHE STRING "Use PLUGINS from original location" FORCE) +set (BSHUF_TGZ_NAME "bitshuffle-0.5.1.tar.gz" CACHE STRING "Use BSHUF from compressed file" FORCE) set (BSHUF_PACKAGE_NAME "bshuf" CACHE STRING "Name of BSHUF package" FORCE) @@ -89,17 +88,19 @@ set (BSHUF_PACKAGE_NAME "bshuf" CACHE STRING "Name of BSHUF package" FORCE) # blosc ######## -set (BLOSC_GIT_URL "https://github.com/Blosc/c-blosc.git" CACHE STRING "Use BLOSC from GitHub repository" FORCE) -set (BLOSC_GIT_BRANCH "master" CACHE STRING "" FORCE) +set (BLOSC_GIT_URL "https://github.com/Blosc/c-blosc.git" CACHE STRING "Use BLOSC from Github repository" FORCE) +set (BLOSC_GIT_BRANCH "main" CACHE STRING "" FORCE) -set (BLOSC_TGZ_NAME "c-blosc.tar.gz" CACHE STRING "Use BLOSC from compressed file" FORCE) +set (BLOSC_TGZ_ORIGPATH "https://github.com/Blosc/c-blosc/archive/refs/tags" CACHE STRING "Use PLUGINS from original location" FORCE) +set (BLOSC_TGZ_NAME "c-blosc-1.21.5.tar.gz" CACHE STRING "Use BLOSC from compressed file" FORCE) set (BLOSC_PACKAGE_NAME "blosc" CACHE STRING "Name of BLOSC package" FORCE) -set (BLOSC_ZLIB_GIT_URL "https://someurl/zlib.git" CACHE STRING "Use BLOSC_ZLIB from HDF Group repository" FORCE) -set (BLOSC_ZLIB_GIT_BRANCH "master" CACHE STRING "" FORCE) +set (BLOSC_ZLIB_GIT_URL "https://github.com/madler/zlib.git" CACHE STRING "Use ZLIB from GitHub repository" FORCE) +set (BLOSC_ZLIB_GIT_BRANCH "develop" CACHE STRING "" FORCE) -set (BLOSC_ZLIB_TGZ_NAME "ZLib.tar.gz" CACHE STRING "Use BLOSC_ZLib from compressed file" FORCE) +set (BLOSC_ZLIB_TGZ_ORIGPATH "https://github.com/madler/zlib/releases/download/v1.3" CACHE STRING "Use PLUGINS from original location" FORCE) +set (BLOSC_ZLIB_TGZ_NAME "zlib-1.3.tar.gz" CACHE STRING "Use ZLib from compressed file" FORCE) set (BLOSC_ZLIB_PACKAGE_NAME "zlib" CACHE STRING "Name of BLOSC_ZLIB package" FORCE) @@ -107,10 +108,11 @@ set (BLOSC_ZLIB_PACKAGE_NAME "zlib" CACHE STRING "Name of BLOSC_ZLIB package" FO # bzip2 ######## -set (BZ2_GIT_URL "https://someurl/bzip2.git" CACHE STRING "Use BZ2 from HDF Group repository" FORCE) +set (BZ2_GIT_URL "https://github.com/libarchive/bzip2.git" CACHE STRING "Use BZ2 from GitHub repository" FORCE) set (BZ2_GIT_BRANCH "master" CACHE STRING "" FORCE) -set (BZ2_TGZ_NAME "BZ2.tar.gz" CACHE STRING "Use BZ2 from compressed file" FORCE) +set (BZ2_TGZ_ORIGPATH "https://github.com/libarchive/bzip2/archive/refs/tags" CACHE STRING "Use PLUGINS from original location" FORCE) +set (BZ2_TGZ_NAME "bzip2-bzip2-1.0.8.tar.gz" CACHE STRING "Use BZ2 from compressed file" FORCE) set (BZ2_PACKAGE_NAME "bz2" CACHE STRING "Name of BZ2 package" FORCE) @@ -118,10 +120,11 @@ set (BZ2_PACKAGE_NAME "bz2" CACHE STRING "Name of BZ2 package" FORCE) # fpzip ######## -set (FPZIP_GIT_URL "https://https://github.com/LLNL/fpzip" CACHE STRING "Use FPZIP from GitHub repository" FORCE) -set (FPZIP_GIT_BRANCH "master" CACHE STRING "" FORCE) +set (FPZIP_GIT_URL "https://github.com/LLNL/fpzip.git" CACHE STRING "Use FPZIP from GitHub repository" FORCE) +set (FPZIP_GIT_BRANCH "develop" CACHE STRING "" FORCE) -set (FPZIP_TGZ_NAME "fpzip.tar.gz" CACHE STRING "Use FPZIP from compressed file" FORCE) +set (FPZIP_TGZ_ORIGPATH "https://github.com/LLNL/fpzip/releases/download/1.3.0" CACHE STRING "Use PLUGINS from original location" FORCE) +set (FPZIP_TGZ_NAME "fpzip-1.3.0.tar.gz" CACHE STRING "Use FPZIP from compressed file" FORCE) set (FPZIP_PACKAGE_NAME "fpzip" CACHE STRING "Name of FPZIP package" FORCE) @@ -129,11 +132,11 @@ set (FPZIP_PACKAGE_NAME "fpzip" CACHE STRING "Name of FPZIP package" FORCE) # jpeg ####### -set (JPEG_GIT_URL "https://someurl/jpeg.git" CACHE STRING "Use JPEG from HDF Group repository" FORCE) -set (JPEG_GIT_BRANCH "jpeg9c" CACHE STRING "" FORCE) +set (JPEG_GIT_URL "No repo www.ijg.org/files" CACHE STRING "Use JPEG from ILG" FORCE) +set (JPEG_GIT_BRANCH "v9e" CACHE STRING "" FORCE) -#set (JPEG_TGZ_NAME "JPEG9c.tar.gz" CACHE STRING "Use JPEG from compressed file" FORCE) -set (JPEG_TGZ_NAME "JPEG.tar.gz" CACHE STRING "Use JPEG from compressed file" FORCE) +set (JPEG_TGZ_ORIGPATH "https://www.ijg.org/files" CACHE STRING "Use PLUGINS from original location" FORCE) +set (JPEG_TGZ_NAME "jpegsrc.v9e.tar.gz" CACHE STRING "Use JPEG from compressed file" FORCE) set (JPEG_PACKAGE_NAME "jpeg" CACHE STRING "Name of JPEG package" FORCE) @@ -143,10 +146,11 @@ set (JPEG_PACKAGE_NAME "jpeg" CACHE STRING "Name of JPEG package" FORCE) set (BUILD_LZ4_LIBRARY_SOURCE ON CACHE BOOL "build the lz4 library within the plugin" FORCE) -set (LZ4_GIT_URL "https://someurl/lz4.git" CACHE STRING "Use LZ4 from HDF Group repository" FORCE) -set (LZ4_GIT_BRANCH "master" CACHE STRING "" FORCE) +set (LZ4_GIT_URL "https://github.com/lz4/lz4.git" CACHE STRING "Use LZ4 from GitHub repository" FORCE) +set (LZ4_GIT_BRANCH "dev" CACHE STRING "" FORCE) -set (LZ4_TGZ_NAME "lz4.tar.gz" CACHE STRING "Use LZ4 from compressed file" FORCE) +set (LZ4_TGZ_ORIGPATH "https://github.com/lz4/lz4/releases/download/v1.9.4" CACHE STRING "Use PLUGINS from original location" FORCE) +set (LZ4_TGZ_NAME "lz4-1.9.4.tar.gz" CACHE STRING "Use LZ4 from compressed file" FORCE) set (LZ4_PACKAGE_NAME "lz4" CACHE STRING "Name of LZ4 package" FORCE) @@ -154,10 +158,10 @@ set (LZ4_PACKAGE_NAME "lz4" CACHE STRING "Name of LZ4 package" FORCE) # lzf ###### -set (LZF_GIT_URL "https://someurl/lzf.git" CACHE STRING "Use LZF from HDF Group repository" FORCE) -set (LZF_GIT_BRANCH "master" CACHE STRING "" FORCE) +set (LZF_URL "http://software.schmorp.de/pkg/liblzf.html" CACHE STRING "LZF home" FORCE) -set (LZF_TGZ_NAME "lzf.tar.gz" CACHE STRING "Use LZF from compressed file" FORCE) +set (LZF_TGZ_ORIGPATH "http://dist.schmorp.de/liblzf" CACHE STRING "Use LZF from original location" FORCE) +set (LZF_TGZ_NAME "liblzf-3.6.tar.gz" CACHE STRING "Use LZF from compressed file" FORCE) set (LZF_PACKAGE_NAME "lzf" CACHE STRING "Name of LZF package" FORCE) @@ -167,16 +171,19 @@ set (LZF_PACKAGE_NAME "lzf" CACHE STRING "Name of LZF package" FORCE) #set (BUILD_MAFISC_LIBRARY_SOURCE OFF CACHE BOOL "build the mafisc library within the plugin" FORCE) +#set (MAFISC_TGZ_ORIGPATH "" CACHE STRING "Use PLUGINS from original location" FORCE) +#set (MAFISC_TGZ_NAME ".tar.gz" CACHE STRING "Use MAFISC from compressed file" FORCE) #set (MAFISC_PACKAGE_NAME "mafisc" CACHE STRING "Name of MAFISC package" FORCE) ##### # sz ##### -set (SZ_GIT_URL "https://github.com/disheng222/SZ" CACHE STRING "Use SZ filter from GitHub repository" FORCE) +set (SZ_GIT_URL "https://github.com/szcompressor/SZ.git" CACHE STRING "Use SZ filter from GitHub repository" FORCE) set (SZ_GIT_BRANCH "master" CACHE STRING "" FORCE) -set (SZ_TGZ_NAME "szf.tar.gz" CACHE STRING "Use SZ filter from compressed file" FORCE) +set (SZ_TGZ_ORIGPATH "https://github.com/szcompressor/SZ/releases/download/v2.1.12.5" CACHE STRING "Use PLUGINS from original location" FORCE) +set (SZ_TGZ_NAME "SZ-2.1.12.5.tar.gz" CACHE STRING "Use SZ filter from compressed file" FORCE) set (SZ_PACKAGE_NAME "SZ" CACHE STRING "Name of SZ filter package" FORCE) @@ -184,10 +191,11 @@ set (SZ_PACKAGE_NAME "SZ" CACHE STRING "Name of SZ filter package" FORCE) # zfp ###### -set (ZFP_GIT_URL "https://github.com/LLNL/zfp.git" CACHE STRING "Use ZFP from GitHub repository" FORCE) -set (ZFP_GIT_BRANCH "master" CACHE STRING "" FORCE) +set (ZFP_GIT_URL "https://github.com/LLNL/zfp.git" CACHE STRING "Use ZFP from GitHub repository" FORCE) +set (ZFP_GIT_BRANCH "develop" CACHE STRING "" FORCE) -set (ZFP_TGZ_NAME "zfp.tar.gz" CACHE STRING "Use ZFP from compressed file" FORCE) +set (ZFP_TGZ_ORIGPATH "https://github.com/LLNL/zfp/releases/download/1.0.0" CACHE STRING "Use PLUGINS from original location" FORCE) +set (ZFP_TGZ_NAME "zfp-1.0.0.tar.gz" CACHE STRING "Use ZFP from compressed file" FORCE) set (ZFP_PACKAGE_NAME "zfp" CACHE STRING "Name of ZFP package" FORCE) @@ -195,10 +203,10 @@ set (ZFP_PACKAGE_NAME "zfp" CACHE STRING "Name of ZFP package" FORCE) # zstd ###### -set (ZSTD_GIT_URL "https://github.com/facebook/zstd" CACHE STRING "Use ZSTD from repository" FORCE) +set (ZSTD_GIT_URL "https://github.com/facebook/zstd.git" CACHE STRING "Use ZSTD from GitHub repository" FORCE) set (ZSTD_GIT_BRANCH "dev" CACHE STRING "" FORCE) -set (ZSTD_TGZ_NAME "zstd.tar.gz" CACHE STRING "Use ZSTD from compressed file" FORCE) +set (ZSTD_TGZ_ORIGPATH "https://github.com/facebook/zstd/releases/download/v1.5.5" CACHE STRING "Use PLUGINS from original location" FORCE) +set (ZSTD_TGZ_NAME "zstd-1.5.5.tar.gz" CACHE STRING "Use ZSTD from compressed file" FORCE) set (ZSTD_PACKAGE_NAME "zstd" CACHE STRING "Name of ZSTD package" FORCE) - diff --git a/config/cmake/mccacheinit.cmake b/config/cmake/mccacheinit.cmake index 068adff6269..b499371a022 100644 --- a/config/cmake/mccacheinit.cmake +++ b/config/cmake/mccacheinit.cmake @@ -54,7 +54,7 @@ set (ZLIB_PACKAGE_NAME "zlib" CACHE STRING "Name of ZLIB package" FORCE) set (ZLIB_TGZ_NAME "ZLib.tar.gz" CACHE STRING "Use ZLib from compressed file" FORCE) set (LIBAEC_PACKAGE_NAME "libaec" CACHE STRING "Name of AEC SZIP package" FORCE) -set (SZAEC_TGZ_NAME "LIBAEC.tar.gz" CACHE STRING "Use SZip AEC from compressed file" FORCE) +set (LIBAEC_TGZ_NAME "LIBAEC.tar.gz" CACHE STRING "Use SZip AEC from compressed file" FORCE) set (CMAKE_BUILD_TYPE "Debug" CACHE STRING "Build Debug" FORCE) diff --git a/config/linux-gnulibc1 b/config/linux-gnulibc1 index e72d371f2ec..db201cc3381 100644 --- a/config/linux-gnulibc1 +++ b/config/linux-gnulibc1 @@ -95,7 +95,7 @@ else fc_version_info=`$FC -V | grep Absoft` ;; # The NAG compiler - nagfor*|nagftn*) + *nagfor*|*nagftn*) RM='rm -f' tmpfile=/tmp/cmpver.$$ $FC -V >& $tmpfile @@ -137,8 +137,6 @@ case $FC_BASENAME in H5_CFLAGS="$H5_CFLAGS" F9XSUFFIXFLAG="" -# We force compiler to use upper case for external names -# (just in case since this should be a default EIP) H5_FCFLAGS="$H5_FCFLAGS" FSEARCH_DIRS="" @@ -168,9 +166,10 @@ case $FC_BASENAME in nagfor) F9XSUFFIXFLAG="" -# We force compiler to use upper case for external names -# (just in case since this should be a default EIP) - H5_FCFLAGS="$H5_FCFLAGS" + # NOTE: The default is -ieee=stop, which will cause problems + # when the H5T module performs floating-point type + # introspection + AM_FCFLAGS="$AM_FCFLAGS -ieee=full" FSEARCH_DIRS="" # Production diff --git a/configure.ac b/configure.ac index 37252d9a76a..9e823ceb3f7 100644 --- a/configure.ac +++ b/configure.ac @@ -2654,6 +2654,20 @@ if test "X${ALLOW_UNSUPPORTED}" != "Xyes"; then fi fi +# Requires MPI standard 3.0 and greater +if test "X${enable_parallel}" = "Xyes"; then + AC_MSG_CHECKING([whether MPI meets the minimum 3.0 standard]) + AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ + #include + #if MPI_VERSION < 3 + #error, found MPI_VERSION < 3 + #endif]])], + [AC_MSG_RESULT([yes])], + [AC_MSG_RESULT([no]) + AC_MSG_ERROR([HDF5 requires MPI standard 3.0 or greater])] + ) +fi + AC_MSG_CHECKING([for parallel support files]) case "X-$enable_parallel" in X-|X-no|X-none) diff --git a/doc/getting-started-with-hdf5-development.md b/doc/getting-started-with-hdf5-development.md index 3754df7f342..b6771dca76e 100644 --- a/doc/getting-started-with-hdf5-development.md +++ b/doc/getting-started-with-hdf5-development.md @@ -496,7 +496,7 @@ files at configure time by the `bin/make_err` script. The original intent was fo codes to be strongly associated. i.e., a given minor code would *only* be used with its associated major code. Unfortunately, this has not been the case in practice, and the emitted text can appear nonsensical in error -stack dumps. Even worse, the major and minor error codes are used inconsitently +stack dumps. Even worse, the major and minor error codes are used inconsistently throughout the library, making interpreting them almost impossible for external users. We hope to address this deficiency in the near future. @@ -556,7 +556,7 @@ configure/CMake options, but it's normally easier to use external tools like valgrind or the compiler's memory debugging options. `H5FL` provides memory pools (*Free Lists*) that create a set of fixed-size allocations -of a certain type that the library will re-use as needed. They use `H5MM` calls +of a certain type that the library will reuse as needed. They use `H5MM` calls under the hood and can be useful when the library creates and frees a lot of objects of that type. It's difficult to give a good guideline as to when to use the `H5FL` calls and when to use the `H5MM` calls, but it's probably best to diff --git a/doc/parallel-compression.md b/doc/parallel-compression.md index e4fa8228628..efe685c3a65 100644 --- a/doc/parallel-compression.md +++ b/doc/parallel-compression.md @@ -61,8 +61,8 @@ H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE); H5Dwrite(..., dxpl_id, ...); ``` -The following are two simple examples of using the parallel compression -feature: +The following are two simple examples of using the parallel +compression feature: [ph5_filtered_writes.c](https://github.com/HDFGroup/hdf5/blob/develop/examples/ph5_filtered_writes.c) @@ -76,9 +76,30 @@ Remember that the feature requires these writes to use collective I/O, so the MPI ranks which have nothing to contribute must still participate in the collective write call. +## Multi-dataset I/O support + +The parallel compression feature is supported when using the +multi-dataset I/O API routines ([H5Dwrite_multi](https://hdfgroup.github.io/hdf5/group___h5_d.html#gaf6213bf3a876c1741810037ff2bb85d8)/[H5Dread_multi](https://hdfgroup.github.io/hdf5/group___h5_d.html#ga8eb1c838aff79a17de385d0707709915)), but the +following should be kept in mind: + + - Parallel writes to filtered datasets **must** still be collective, + even when using the multi-dataset I/O API routines + + - When the multi-dataset I/O API routines are passed a mixture of + filtered and unfiltered datasets, the library currently has to + perform I/O on them separately in two phases. Since there is + some slight complexity involved in this, it may be best (depending + on the number of datasets, number of selected chunks, number of + filtered vs. unfiltered datasets, etc.) to make two individual + multi-dataset I/O calls, one for the filtered datasets and one + for the unfiltered datasets. When performing writes to the datasets, + this would also allow independent write access to the unfiltered + datasets if desired, while still performing collective writes to + the filtered datasets. + ## Incremental file space allocation support -HDF5's [file space allocation time](https://portal.hdfgroup.org/display/HDF5/H5P_SET_ALLOC_TIME) +HDF5's [file space allocation time](https://hdfgroup.github.io/hdf5/group___d_c_p_l.html#ga85faefca58387bba409b65c470d7d851) is a dataset creation property that can have significant effects on application performance, especially if the application uses parallel HDF5. In a serial HDF5 application, the default file space @@ -97,7 +118,7 @@ While this strategy has worked in the past, it has some noticeable drawbacks. For one, the larger the chunked dataset being created, the more noticeable overhead there will be during dataset creation as all of the data chunks are being allocated in the HDF5 file. -Further, these data chunks will, by default, be [filled](https://portal.hdfgroup.org/display/HDF5/H5P_SET_FILL_VALUE) +Further, these data chunks will, by default, be [filled](https://hdfgroup.github.io/hdf5/group___d_c_p_l.html#ga4335bb45b35386daa837b4ff1b9cd4a4) with HDF5's default fill data value, leading to extraordinary dataset creation overhead and resulting in pre-filling large portions of a dataset that the application might have been planning @@ -105,7 +126,7 @@ to overwrite anyway. Even worse, there will be more initial overhead from compressing that fill data before writing it out, only to have it read back in, unfiltered and modified the first time a chunk is written to. In the past, it was typically suggested that parallel -HDF5 applications should use [H5Pset_fill_time](https://portal.hdfgroup.org/display/HDF5/H5P_SET_FILL_TIME) +HDF5 applications should use [H5Pset_fill_time](https://hdfgroup.github.io/hdf5/group___d_c_p_l.html#ga6bd822266b31f86551a9a1d79601b6a2) with a value of `H5D_FILL_TIME_NEVER` in order to disable writing of the fill value to dataset chunks, but this isn't ideal if the application actually wishes to make use of fill values. @@ -199,14 +220,14 @@ chunks to end up at addresses in the file that do not align well with the underlying file system, possibly leading to poor performance. As an example, Lustre performance is generally good when writes are aligned with the chosen stripe size. -The HDF5 application can use [H5Pset_alignment](https://portal.hdfgroup.org/display/HDF5/H5P_SET_ALIGNMENT) +The HDF5 application can use [H5Pset_alignment](https://hdfgroup.github.io/hdf5/group___f_a_p_l.html#gab99d5af749aeb3896fd9e3ceb273677a) to have a bit more control over where objects in the HDF5 file end up. However, do note that setting the alignment of objects generally wastes space in the file and has the potential to dramatically increase its resulting size, so caution should be used when choosing the alignment parameters. -[H5Pset_alignment](https://portal.hdfgroup.org/display/HDF5/H5P_SET_ALIGNMENT) +[H5Pset_alignment](https://hdfgroup.github.io/hdf5/group___f_a_p_l.html#gab99d5af749aeb3896fd9e3ceb273677a) has two parameters that control the alignment of objects in the HDF5 file, the "threshold" value and the alignment value. The threshold value specifies that any object greater @@ -243,19 +264,19 @@ in a file, this can create significant amounts of free space in the file over its lifetime and eventually cause performance issues. -An HDF5 application can use [H5Pset_file_space_strategy](http://portal.hdfgroup.org/display/HDF5/H5P_SET_FILE_SPACE_STRATEGY) +An HDF5 application can use [H5Pset_file_space_strategy](https://hdfgroup.github.io/hdf5/group___f_c_p_l.html#ga167ff65f392ca3b7f1933b1cee1b9f70) with a value of `H5F_FSPACE_STRATEGY_PAGE` to enable the paged aggregation feature, which can accumulate metadata and raw data for dataset data chunks into well-aligned, configurably sized "pages" for better performance. However, note that using the paged aggregation feature will cause any setting from -[H5Pset_alignment](https://portal.hdfgroup.org/display/HDF5/H5P_SET_ALIGNMENT) +[H5Pset_alignment](https://hdfgroup.github.io/hdf5/group___f_a_p_l.html#gab99d5af749aeb3896fd9e3ceb273677a) to be ignored. While an application should be able to get -comparable performance effects by [setting the size of these pages](http://portal.hdfgroup.org/display/HDF5/H5P_SET_FILE_SPACE_PAGE_SIZE) to be equal to the value that -would have been set for [H5Pset_alignment](https://portal.hdfgroup.org/display/HDF5/H5P_SET_ALIGNMENT), +comparable performance effects by [setting the size of these pages](https://hdfgroup.github.io/hdf5/group___f_c_p_l.html#gad012d7f3c2f1e1999eb1770aae3a4963) to be equal to the value that +would have been set for [H5Pset_alignment](https://hdfgroup.github.io/hdf5/group___f_a_p_l.html#gab99d5af749aeb3896fd9e3ceb273677a), this may not necessarily be the case and should be studied. -Note that [H5Pset_file_space_strategy](http://portal.hdfgroup.org/display/HDF5/H5P_SET_FILE_SPACE_STRATEGY) +Note that [H5Pset_file_space_strategy](https://hdfgroup.github.io/hdf5/group___f_c_p_l.html#ga167ff65f392ca3b7f1933b1cee1b9f70) has a `persist` parameter. This determines whether or not the file free space manager should include extra metadata in the HDF5 file about free space sections in the file. If this @@ -279,12 +300,12 @@ hid_t file_id = H5Fcreate("file.h5", H5F_ACC_TRUNC, fcpl_id, fapl_id); While the parallel compression feature requires that the HDF5 application set and maintain collective I/O at the application -interface level (via [H5Pset_dxpl_mpio](https://portal.hdfgroup.org/display/HDF5/H5P_SET_DXPL_MPIO)), +interface level (via [H5Pset_dxpl_mpio](https://hdfgroup.github.io/hdf5/group___d_x_p_l.html#ga001a22b64f60b815abf5de8b4776f09e)), it does not require that the actual MPI I/O that occurs at the lowest layers of HDF5 be collective; independent I/O may perform better depending on the application I/O patterns and parallel file system performance, among other factors. The -application may use [H5Pset_dxpl_mpio_collective_opt](https://portal.hdfgroup.org/display/HDF5/H5P_SET_DXPL_MPIO_COLLECTIVE_OPT) +application may use [H5Pset_dxpl_mpio_collective_opt](https://hdfgroup.github.io/hdf5/group___d_x_p_l.html#gacb30d14d1791ec7ff9ee73aa148a51a3) to control this setting and see which I/O method provides the best performance. @@ -297,7 +318,7 @@ H5Dwrite(..., dxpl_id, ...); ### Runtime HDF5 Library version -An HDF5 application can use the [H5Pset_libver_bounds](http://portal.hdfgroup.org/display/HDF5/H5P_SET_LIBVER_BOUNDS) +An HDF5 application can use the [H5Pset_libver_bounds](https://hdfgroup.github.io/hdf5/group___f_a_p_l.html#gacbe1724e7f70cd17ed687417a1d2a910) routine to set the upper and lower bounds on library versions to use when creating HDF5 objects. For parallel compression specifically, setting the library version to the latest available diff --git a/doxygen/examples/H5.format.1.0.html b/doxygen/examples/H5.format.1.0.html index 26d04213d84..5002695de76 100644 --- a/doxygen/examples/H5.format.1.0.html +++ b/doxygen/examples/H5.format.1.0.html @@ -2020,7 +2020,7 @@

Name: Complex Dataspace (Fiber Bundle?)

Mesh Type: (unsigned 32-bit integer)
This value indicates whether the grid is - polar/spherical/cartesion, + polar/spherical/cartesian, structured/unstructured and regular/irregular.
The mesh type value is broken up as follows:
diff --git a/doxygen/examples/H5.format.1.1.html b/doxygen/examples/H5.format.1.1.html index 3af50d66194..f5e4c4e0fe5 100644 --- a/doxygen/examples/H5.format.1.1.html +++ b/doxygen/examples/H5.format.1.1.html @@ -2596,7 +2596,7 @@

Name: Complex Dataspace (Fiber Bundle?)

Mesh Type: (unsigned 32-bit integer)
This value indicates whether the grid is - polar/spherical/cartesion, + polar/spherical/cartesian, structured/unstructured and regular/irregular.
The mesh type value is broken up as follows:
diff --git a/doxygen/examples/H5.format.2.0.html b/doxygen/examples/H5.format.2.0.html index d2979e18ba1..bde030f3853 100644 --- a/doxygen/examples/H5.format.2.0.html +++ b/doxygen/examples/H5.format.2.0.html @@ -8458,7 +8458,7 @@

Header Message Name: Complex Dataspace (Fiber Bun
Mesh Type: (unsigned 32-bit integer)
This value indicates whether the grid is - polar/spherical/cartesion, + polar/spherical/cartesian, structured/unstructured and regular/irregular.
The mesh type value is broken up as follows:
diff --git a/doxygen/examples/H5.format.html b/doxygen/examples/H5.format.html index c52e8ea3b8c..832e3fcd79b 100644 --- a/doxygen/examples/H5.format.html +++ b/doxygen/examples/H5.format.html @@ -9123,7 +9123,7 @@

Header Message Name: Complex Dataspace (Fiber Bun
Mesh Type: (unsigned 32-bit integer)
This value indicates whether the grid is - polar/spherical/cartesion, + polar/spherical/cartesian, structured/unstructured and regular/irregular.
The mesh type value is broken up as follows:
diff --git a/fortran/examples/CMakeLists.txt b/fortran/examples/CMakeLists.txt index 3a16c23a0c1..6cbe8d6daac 100644 --- a/fortran/examples/CMakeLists.txt +++ b/fortran/examples/CMakeLists.txt @@ -35,7 +35,10 @@ set (F2003_examples foreach (example ${examples}) add_executable (f90_ex_${example} ${HDF5_F90_EXAMPLES_SOURCE_DIR}/${example}.f90) - target_compile_options(f90_ex_${example} PRIVATE $<$:${WIN_COMPILE_FLAGS}>) + target_compile_options(f90_ex_${example} + PRIVATE + "${HDF5_CMAKE_Fortran_FLAGS}" + $<$:${WIN_COMPILE_FLAGS}>) # set_property(TARGET f90_ex_${example} APPEND PROPERTY LINK_FLAGS $<$:"-SUBSYSTEM:CONSOLE">) # set_property(TARGET f90_ex_${example} APPEND PROPERTY LINK_FLAGS $<$:${WIN_LINK_FLAGS}>) if(MSVC) @@ -76,7 +79,10 @@ endforeach () foreach (example ${F2003_examples}) add_executable (f03_ex_${example} ${HDF5_F90_EXAMPLES_SOURCE_DIR}/${example}.f90) - target_compile_options(f03_ex_${example} PRIVATE $<$:${WIN_COMPILE_FLAGS}>) + target_compile_options(f03_ex_${example} + PRIVATE + "${HDF5_CMAKE_Fortran_FLAGS}" + $<$:${WIN_COMPILE_FLAGS}>) # set_property(TARGET f03_ex_${example} APPEND PROPERTY LINK_FLAGS $<$:"-SUBSYSTEM:CONSOLE">) # set_property(TARGET f03_ex_${example} APPEND PROPERTY LINK_FLAGS $<$:${WIN_LINK_FLAGS}>) if(MSVC) @@ -117,7 +123,10 @@ endforeach () if (H5_HAVE_PARALLEL AND MPI_Fortran_FOUND) add_executable (f90_ex_ph5example ${HDF5_F90_EXAMPLES_SOURCE_DIR}/ph5example.f90) - target_compile_options(f90_ex_ph5example PRIVATE $<$:${WIN_COMPILE_FLAGS}>) + target_compile_options(f90_ex_ph5example + PRIVATE + "${HDF5_CMAKE_Fortran_FLAGS}" + $<$:${WIN_COMPILE_FLAGS}>) # set_property(TARGET f90_ex_ph5example APPEND PROPERTY LINK_FLAGS $<$:"-SUBSYSTEM:CONSOLE">) # set_property(TARGET f90_ex_ph5example APPEND PROPERTY LINK_FLAGS $<$:${WIN_LINK_FLAGS}>) if(MSVC) diff --git a/fortran/src/H5Dff.F90 b/fortran/src/H5Dff.F90 index f4fe4ac5e88..5d6ff524bb2 100644 --- a/fortran/src/H5Dff.F90 +++ b/fortran/src/H5Dff.F90 @@ -1816,7 +1816,6 @@ END SUBROUTINE h5dwrite_reference_obj SUBROUTINE h5dwrite_reference_dsetreg(dset_id, mem_type_id, buf, dims, hdferr, & mem_space_id, file_space_id, xfer_prp) - USE, INTRINSIC :: ISO_C_BINDING, ONLY : C_PTR IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: dset_id INTEGER(HID_T), INTENT(IN) :: mem_type_id diff --git a/fortran/src/H5Sff.F90 b/fortran/src/H5Sff.F90 index 5a1ca53cd46..e734c03d1f9 100644 --- a/fortran/src/H5Sff.F90 +++ b/fortran/src/H5Sff.F90 @@ -71,7 +71,7 @@ INTEGER FUNCTION h5screate_simple_c(rank, dims, maxdims, space_id) BIND(C,NAME=' IMPLICIT NONE INTEGER, INTENT(IN) :: rank INTEGER(HSIZE_T), INTENT(IN) :: dims(rank) - INTEGER(HSIZE_T), DIMENSION(:),INTENT(IN) :: maxdims(rank) + INTEGER(HSIZE_T), INTENT(IN) :: maxdims(rank) INTEGER(HID_T), INTENT(OUT) :: space_id END FUNCTION h5screate_simple_c END INTERFACE diff --git a/fortran/src/H5_buildiface.F90 b/fortran/src/H5_buildiface.F90 index cd4580b8c04..62ced23cf45 100644 --- a/fortran/src/H5_buildiface.F90 +++ b/fortran/src/H5_buildiface.F90 @@ -43,7 +43,6 @@ #include PROGRAM H5_buildiface - USE, INTRINSIC :: ISO_C_BINDING, ONLY : C_PTR, C_CHAR, C_LOC IMPLICIT NONE ! These values are valid REAL KINDs (with corresponding C float) found during configure diff --git a/fortran/test/tH5D.F90 b/fortran/test/tH5D.F90 index 8c1484f3ddc..c38123518fe 100644 --- a/fortran/test/tH5D.F90 +++ b/fortran/test/tH5D.F90 @@ -644,7 +644,6 @@ SUBROUTINE test_dset_fill(cleanup, total_error) INTEGER, PARAMETER :: int_kind_16 = SELECTED_INT_KIND(18) !should map to INTEGER*8 on most modern processors INTEGER(KIND=int_kind_1) , DIMENSION(1:DIM0), TARGET :: data_i1 INTEGER(KIND=int_kind_4) , DIMENSION(1:DIM0), TARGET :: data_i4 - INTEGER(KIND=int_kind_8) , DIMENSION(1:DIM0), TARGET :: data_i8 INTEGER(KIND=int_kind_16), DIMENSION(1:DIM0), TARGET :: data_i16 INTEGER(KIND=int_kind_1) , TARGET :: data0_i1 = 4 INTEGER(KIND=int_kind_4) , TARGET :: data0_i4 = 4 @@ -683,7 +682,6 @@ SUBROUTINE test_dset_fill(cleanup, total_error) ! Initialize memory buffer data_i1 = -2 data_i4 = -2 - data_i8 = -2 data_i16 = -2 data_int = -2 #if H5_HAVE_Fortran_INTEGER_SIZEOF_16!=0 @@ -798,7 +796,6 @@ SUBROUTINE test_dset_fill(cleanup, total_error) ! Initialize memory buffer data_i1 = -2 data_i4 = -2 - data_i8 = -2 data_i16 = -2 #if H5_HAVE_Fortran_INTEGER_SIZEOF_16!=0 data_i32 = -2 diff --git a/fortran/test/tH5G_1_8.F90 b/fortran/test/tH5G_1_8.F90 index dca4cf2a956..cd354d81d2b 100644 --- a/fortran/test/tH5G_1_8.F90 +++ b/fortran/test/tH5G_1_8.F90 @@ -192,48 +192,18 @@ SUBROUTINE group_info(cleanup, fapl, total_error) IF(idx_type == H5_INDEX_CRT_ORDER_F)THEN IF(iorder == H5_ITER_INC_F)THEN order = H5_ITER_INC_F -!!$ IF(use_index(i))THEN -!!$ WRITE(*,'(5x,A)')"query group info by creation order index in increasing order w/creation order index" -!!$ ELSE -!!$ WRITE(*,'(5x,A)')"query group info by creation order index in increasing order w/o creation order index" -!!$ ENDIF ELSE IF (iorder == H5_ITER_DEC_F) THEN order = H5_ITER_DEC_F -!!$ IF(use_index(i))THEN -!!$ WRITE(*,'(5x,A)')"query group info by creation order index in decreasing order w/creation order index" -!!$ ELSE -!!$ WRITE(*,'(5x,A)')"query group info by creation order index in decreasing order w/o creation order index" -!!$ ENDIF ELSE order = H5_ITER_NATIVE_F -!!$ IF(use_index(i))THEN -!!$ WRITE(*,'(5x,A)')"query group info by creation order index in native order w/creation order index" -!!$ ELSE -!!$ WRITE(*,'(5x,A)')"query group info by creation order index in native order w/o creation order index" -!!$ ENDIF ENDIF ELSE IF(iorder == H5_ITER_INC_F)THEN order = H5_ITER_INC_F -!!$ IF(use_index(i))THEN -!!$ WRITE(*,'(5x,A)')"query group info by creation order index in increasing order w/creation order index" -!!$ ELSE -!!$ WRITE(*,'(5x,A)')"query group info by creation order index in increasing order w/o creation order index" -!!$ ENDIF ELSE IF (iorder == H5_ITER_DEC_F) THEN order = H5_ITER_DEC_F -!!$ IF(use_index(i))THEN -!!$ WRITE(*,'(5x,A)')"query group info by creation order index in decreasing order w/creation order index" -!!$ ELSE -!!$ WRITE(*,'(5x,A)')"query group info by creation order index in decreasing order w/o creation order index" -!!$ ENDIF ELSE order = H5_ITER_NATIVE_F -!!$ IF(use_index(i))THEN -!!$ WRITE(*,'(5x,A)')"query group info by creation order index in native order w/creation order index" -!!$ ELSE -!!$ WRITE(*,'(5x,A)')"query group info by creation order index in native order w/o creation order index" -!!$ ENDIF ENDIF END IF @@ -1263,56 +1233,16 @@ SUBROUTINE delete_by_idx(cleanup, fapl, total_error) CHARACTER(LEN=2) :: chr2 INTEGER :: error INTEGER :: id_type - ! - ! - ! - CHARACTER(LEN=80) :: fix_filename1 - CHARACTER(LEN=80) :: fix_filename2 INTEGER(HSIZE_T) :: htmp LOGICAL :: cleanup - DO i = 1, 80 - fix_filename1(i:i) = " " - fix_filename2(i:i) = " " - ENDDO - ! Loop over operating on different indices on link fields DO idx_type = H5_INDEX_NAME_F, H5_INDEX_CRT_ORDER_F ! Loop over operating in different orders DO iorder = H5_ITER_INC_F, H5_ITER_DEC_F ! Loop over using index for creation order value DO i = 1, 2 - ! Print appropriate test message -!!$ IF(idx_type == H5_INDEX_CRT_ORDER_F)THEN -!!$ IF(iorder == H5_ITER_INC_F)THEN -!!$ IF(use_index(i))THEN -!!$ WRITE(*,'(5x,A)')"deleting links by creation order index in increasing order w/creation order index" -!!$ ELSE -!!$ WRITE(*,'(5x,A)')"deleting links by creation order index in increasing order w/o creation order index" -!!$ ENDIF -!!$ ELSE -!!$ IF(use_index(i))THEN -!!$ WRITE(*,'(5x,A)')"deleting links by creation order index in decreasing order w/creation order index" -!!$ ELSE -!!$ WRITE(*,'(5x,A)')"deleting links by creation order index in decreasing order w/o creation order index" -!!$ ENDIF -!!$ ENDIF -!!$ ELSE -!!$ IF(iorder == H5_ITER_INC_F)THEN -!!$ IF(use_index(i))THEN -!!$ WRITE(*,'(5x,A)')"deleting links by name index in increasing order w/creation order index" -!!$ ELSE -!!$ WRITE(*,'(5x,A)')"deleting links by name index in increasing order w/o creation order index" -!!$ ENDIF -!!$ ELSE -!!$ IF(use_index(i))THEN -!!$ WRITE(*,'(5x,A)')"deleting links by name index in decreasing order w/creation order index" -!!$ ELSE -!!$ WRITE(*,'(5x,A)')"deleting links by name index in decreasing order w/o creation order index" -!!$ ENDIF -!!$ ENDIF -!!$ ENDIF ! Create file CALL H5Fcreate_f(filename, H5F_ACC_TRUNC_F, file_id, error, access_prp=fapl) diff --git a/hl/fortran/examples/CMakeLists.txt b/hl/fortran/examples/CMakeLists.txt index 293bc2b096f..caa5e78ec02 100644 --- a/hl/fortran/examples/CMakeLists.txt +++ b/hl/fortran/examples/CMakeLists.txt @@ -10,6 +10,7 @@ foreach (example ${examples}) add_executable (hl_f90_ex_${example} ${HDF5_HL_F90_EXAMPLES_SOURCE_DIR}/${example}.f90) target_compile_options(hl_f90_ex_${example} PRIVATE + "${HDF5_CMAKE_Fortran_FLAGS}" $<$:${WIN_COMPILE_FLAGS}> ) # set_property(TARGET hl_f90_ex_${example} APPEND PROPERTY LINK_FLAGS $<$:"-SUBSYSTEM:CONSOLE">) diff --git a/hl/fortran/src/H5IMff.F90 b/hl/fortran/src/H5IMff.F90 index a1be745d099..5354dd0ee9c 100644 --- a/hl/fortran/src/H5IMff.F90 +++ b/hl/fortran/src/H5IMff.F90 @@ -380,7 +380,7 @@ END SUBROUTINE h5imlink_palette_f !> !! \ingroup FH5IM !! -!! \brief This function dettaches a palette to an existing image dataset. +!! \brief This function detaches a palette to an existing image dataset. !! !! \param loc_id Location identifier. The identifier may be that of a file or group. !! \param image_name The name of the image dataset. diff --git a/hl/src/H5IM.c b/hl/src/H5IM.c index 42a5feb4abe..b5476c6e93a 100644 --- a/hl/src/H5IM.c +++ b/hl/src/H5IM.c @@ -600,7 +600,7 @@ H5IMlink_palette(hid_t loc_id, const char *image_name, const char *pal_name) /*------------------------------------------------------------------------- * Function: H5IMunlink_palette * - * Purpose: This function dettaches a palette from an existing image dataset + * Purpose: This function detaches a palette from an existing image dataset * * Return: Success: 0, Failure: -1 * diff --git a/hl/src/H5IMpublic.h b/hl/src/H5IMpublic.h index 81dbb623740..0ba9d648cff 100644 --- a/hl/src/H5IMpublic.h +++ b/hl/src/H5IMpublic.h @@ -66,7 +66,7 @@ extern "C" { * - \ref H5IMread_image * \n Reads image data from disk. * - \ref H5IMunlink_palette - * \n Dettaches a palette from an image. + * \n Detaches a palette from an image. * */ @@ -229,7 +229,7 @@ H5_HLDLL herr_t H5IMlink_palette(hid_t loc_id, const char *image_name, const cha * -------------------------------------------------------------------------- * \ingroup H5IM * - * \brief Dettaches a palette from an image. + * \brief Detaches a palette from an image. * * \fg_loc_id * \param[in] image_name The name of the image dataset @@ -237,7 +237,7 @@ H5_HLDLL herr_t H5IMlink_palette(hid_t loc_id, const char *image_name, const cha * * \return \herr_t * - * \details H5IMunlink_palette() dettaches a palette from an image + * \details H5IMunlink_palette() detaches a palette from an image * specified by \p image_name. * */ diff --git a/java/src/hdf/hdf5lib/H5.java b/java/src/hdf/hdf5lib/H5.java index 77029904297..388ba18f87a 100644 --- a/java/src/hdf/hdf5lib/H5.java +++ b/java/src/hdf/hdf5lib/H5.java @@ -9434,10 +9434,10 @@ public synchronized static native void H5Pset_meta_block_size(long fapl_id, long /** * @ingroup JH5P * - * H5Pset_sieve_buf_size Sets the maximum size of the data seive buffer used for file + * H5Pset_sieve_buf_size Sets the maximum size of the data sieve buffer used for file * drivers which are capable of using data sieving. The data sieve * buffer is used when performing I/O on datasets in the file. Using a - * buffer which is large anough to hold several pieces of the dataset + * buffer which is large enough to hold several pieces of the dataset * being read in for hyperslab selections boosts performance by quite a * bit. *

@@ -9448,7 +9448,7 @@ public synchronized static native void H5Pset_meta_block_size(long fapl_id, long * @param fapl_id * IN: Identifier of property list to modify. * @param size - * IN: maximum size of the data seive buffer. + * IN: maximum size of the data sieve buffer. * * @exception HDF5LibraryException * Error from the HDF5 Library. diff --git a/release_docs/HISTORY-1_0-1_8_0.txt b/release_docs/HISTORY-1_0-1_8_0.txt index 6d9148d5e5e..afe200b0b9d 100644 --- a/release_docs/HISTORY-1_0-1_8_0.txt +++ b/release_docs/HISTORY-1_0-1_8_0.txt @@ -1443,7 +1443,7 @@ Known Problems filter script. * H5Ocopy() does not copy reg_ref attributes correctly when shared-message - is turn on. The value of the reference in the destination attriubte is + is turn on. The value of the reference in the destination attribute is wrong. This H5Ocopy problem will affect h5copy tool %%%%1.6.7%%%% Release Information for hdf5-1.6.7 (31/January/08) diff --git a/release_docs/HISTORY-1_10_0-1_12_0.txt b/release_docs/HISTORY-1_10_0-1_12_0.txt index a83e58db858..4649a319523 100644 --- a/release_docs/HISTORY-1_10_0-1_12_0.txt +++ b/release_docs/HISTORY-1_10_0-1_12_0.txt @@ -409,7 +409,7 @@ Bug Fixes since HDF5-1.10.3 release - Fixed a bug caused by bad tag value when condensing object header messages - There was an assertion failure when moving meessages from running a + There was an assertion failure when moving messages from running a user test program with library release hdf5.1.10.4. It was because the tag value (object header's address) was not set up when entering the library routine H5O__chunk_update_idx(), which will eventually diff --git a/release_docs/HISTORY-1_12_0-1_14_0.txt b/release_docs/HISTORY-1_12_0-1_14_0.txt index 11ca947af9d..c48517c7c75 100644 --- a/release_docs/HISTORY-1_12_0-1_14_0.txt +++ b/release_docs/HISTORY-1_12_0-1_14_0.txt @@ -3163,7 +3163,7 @@ New Features Some of the tools accepted shortened versions of the long options (ex: --datas instead of --dataset). These were implemented inconsistently, - are difficult to maintian, and occasionally block useful long option + are difficult to maintain, and occasionally block useful long option names. These partial long options have been removed from all the tools. (DER - 2021/08/03) diff --git a/release_docs/HISTORY-1_8_0-1_10_0.txt b/release_docs/HISTORY-1_8_0-1_10_0.txt index cc42d3bc7a5..255ccca07c3 100644 --- a/release_docs/HISTORY-1_8_0-1_10_0.txt +++ b/release_docs/HISTORY-1_8_0-1_10_0.txt @@ -837,7 +837,7 @@ Bug Fixes since HDF5-1.8.0 release - Support for TFLOPS, config/intel-osf1, is removed since the TFLOPS machine has long retired. AKC - 2009/10/06. - Added $(EXEEXT) extension to H5detect when it's executed in the - src/Makfile to generate H5Tinit.c so it works correctly on platforms + src/Makefile to generate H5Tinit.c so it works correctly on platforms that require the full extension when running executables. MAM - 2009/10/01 - BZ #1613 - Configure will now set FC and CXX to "no" when fortran and c++ @@ -1734,6 +1734,6 @@ Known Problems filter script. * H5Ocopy() does not copy reg_ref attributes correctly when shared-message - is turn on. The value of the reference in the destination attriubte is + is turn on. The value of the reference in the destination attribute is wrong. This H5Ocopy problem will affect h5copy tool diff --git a/release_docs/INSTALL_CMake.txt b/release_docs/INSTALL_CMake.txt index f979ff315bc..835892471c2 100644 --- a/release_docs/INSTALL_CMake.txt +++ b/release_docs/INSTALL_CMake.txt @@ -307,7 +307,7 @@ IV. Further considerations CMake options: HDF5_ALLOW_EXTERNAL_SUPPORT:STRING="TGZ" ZLIB_TGZ_NAME:STRING="zlib_src.ext" - SZAEC_TGZ_NAME:STRING="szaec_src.ext" + LIBAEC_TGZ_NAME:STRING="szaec_src.ext" TGZPATH:STRING="some_location" where "some_location" is the URL or full path to the compressed file and ext is the type of compression file. Also set CMAKE_BUILD_TYPE @@ -316,12 +316,10 @@ IV. Further considerations D. Use original source packages from a compressed file by adding the following CMake options: - BUILD_SZIP_WITH_FETCHCONTENT:BOOL=ON - LIBAEC_TGZ_ORIGNAME:STRING="szip_src.ext" + LIBAEC_TGZ_NAME:STRING="szip_src.ext" LIBAEC_TGZ_ORIGPATH:STRING="some_location" - BUILD_ZLIB_WITH_FETCHCONTENT:BOOL=ON - ZLIB_TGZ_ORIGNAME:STRING="zlib_src.ext" + ZLIB_TGZ_NAME:STRING="zlib_src.ext" ZLIB_TGZ_ORIGPATH:STRING="some_location" HDF5_ALLOW_EXTERNAL_SUPPORT:STRING="TGZ" @@ -329,8 +327,6 @@ IV. Further considerations ZLIB_USE_LOCALCONTENT:BOOL=OFF LIBAEC_USE_LOCALCONTENT:BOOL=OFF or full path to the compressed file and ext is the type of compression file. - The individual filters are enabled by setting the BUILD__WITH_FETCHCONTENT - CMake variable to ON. Also set CMAKE_BUILD_TYPE to the configuration type during configuration. See the settings in the config/cmake/cacheinit.cmake file HDF uses for testing. @@ -527,75 +523,82 @@ These five steps are described in detail below. set (HDF5_ALLOW_EXTERNAL_SUPPORT "TGZ" CACHE STRING "Allow External Library Building (NO GIT TGZ)" FORCE) set_property (CACHE HDF5_ALLOW_EXTERNAL_SUPPORT PROPERTY STRINGS NO GIT TGZ) set (ZLIB_PACKAGE_NAME "zlib" CACHE STRING "Name of ZLIB package" FORCE) - set (ZLIB_TGZ_NAME "ZLib.tar.gz" CACHE STRING "Use HDF5_ZLib from compressed file" FORCE) - set (ZLIB_TGZ_ORIGPATH "https://github.com/madler/zlib/releases/download/v1.2.13" CACHE STRING "Use ZLIB from original location" FORCE) - set (ZLIB_TGZ_ORIGNAME "zlib-1.2.13.tar.gz" CACHE STRING "Use ZLIB from original compressed file" FORCE) + set (ZLIB_TGZ_NAME "zlib-1.3.tar.gz" CACHE STRING "Use HDF5_ZLib from compressed file" FORCE) + set (ZLIB_TGZ_ORIGPATH "https://github.com/madler/zlib/releases/download/v1.3" CACHE STRING "Use ZLIB from original location" FORCE) + set (ZLIB_TGZ_NAME "zlib-1.3.tar.gz" CACHE STRING "Use ZLIB from original compressed file" FORCE) set (ZLIB_USE_LOCALCONTENT ON CACHE BOOL "Use local file for ZLIB FetchContent" FORCE) set (LIBAEC_PACKAGE_NAME "libaec" CACHE STRING "Name of AEC SZIP package" FORCE) - set (SZAEC_TGZ_NAME "LIBAEC.tar.gz" CACHE STRING "Use SZip AEC from compressed file" FORCE) set (LIBAEC_TGZ_ORIGPATH "https://github.com/MathisRosenhauer/libaec/releases/download/v1.0.6" CACHE STRING "Use LIBAEC from original location" FORCE) - set (LIBAEC_TGZ_ORIGNAME "libaec-1.0.6.tar.gz" CACHE STRING "Use LIBAEC from original compressed file" FORCE) + set (LIBAEC_TGZ_NAME "libaec-1.0.6.tar.gz" CACHE STRING "Use LIBAEC from original compressed file" FORCE) set (LIBAEC_USE_LOCALCONTENT ON CACHE BOOL "Use local file for LIBAEC FetchContent" FORCE) ####################### # filter plugin options ####################### - set (PLUGIN_TGZ_NAME "hdf5_plugins.tar.gz" CACHE STRING "Use PLUGINS from compressed file" FORCE) + set (PLUGIN_TGZ_ORIGPATH "https://github.com/HDFGroup/hdf5_plugins/releases/download/snapshots" CACHE STRING "Use PLUGINS from original location" FORCE) + set (PLUGIN_TGZ_NAME "hdf5_plugins-master.tar.gz" CACHE STRING "Use PLUGINS from compressed file" FORCE) + set (PLUGIN_USE_LOCALCONTENT ON CACHE BOOL "Use local file for PLUGIN FetchContent" FORCE) set (PLUGIN_PACKAGE_NAME "pl" CACHE STRING "Name of PLUGIN package" FORCE) ############ # bitshuffle ########### - set (BSHUF_GIT_URL "https://someurl/bitshuffle.git" CACHE STRING "Use BSHUF from HDF repository" FORCE) + set (BSHUF_GIT_URL "https://github.com/kiyo-masui/bitshuffle.git" CACHE STRING "Use BSHUF from HDF repository" FORCE) set (BSHUF_GIT_BRANCH "master" CACHE STRING "" FORCE) - set (BSHUF_TGZ_NAME "bitshuffle.tar.gz" CACHE STRING "Use BSHUF from compressed file" FORCE) + set (BSHUF_TGZ_ORIGPATH "https://github.com/kiyo-masui/bitshuffle/archive/refs/tags" CACHE STRING "Use PLUGINS from original location" FORCE) + set (BSHUF_TGZ_NAME "bitshuffle-0.5.1.tar.gz" CACHE STRING "Use BSHUF from compressed file" FORCE) set (BSHUF_PACKAGE_NAME "bshuf" CACHE STRING "Name of BSHUF package" FORCE) ####### # blosc ####### - set (BLOSC_GIT_URL "https://github.com/Blosc/c-blosc.git" CACHE STRING "Use BLOSC from Github" FORCE) - set (BLOSC_GIT_BRANCH "master" CACHE STRING "" FORCE) - set (BLOSC_TGZ_NAME "c-blosc.tar.gz" CACHE STRING "Use BLOSC from compressed file" FORCE) + set (BLOSC_GIT_URL "https://github.com/Blosc/c-blosc.git" CACHE STRING "Use BLOSC from Github repository" FORCE) + set (BLOSC_GIT_BRANCH "main" CACHE STRING "" FORCE) + set (BLOSC_TGZ_ORIGPATH "https://github.com/Blosc/c-blosc/archive/refs/tags" CACHE STRING "Use PLUGINS from original location" FORCE) + set (BLOSC_TGZ_NAME "c-blosc-1.21.5.tar.gz" CACHE STRING "Use BLOSC from compressed file" FORCE) set (BLOSC_PACKAGE_NAME "blosc" CACHE STRING "Name of BLOSC package" FORCE) - set (ZLIB_GIT_URL "https://someurl/zlib.git" CACHE STRING "Use ZLIB from HDF repo" FORCE) - set (ZLIB_GIT_BRANCH "master" CACHE STRING "" FORCE) - set (ZLIB_TGZ_NAME "ZLib.tar.gz" CACHE STRING "Use ZLib from compressed file" FORCE) - set (ZLIB_PACKAGE_NAME "zlib" CACHE STRING "Name of ZLIB package" FORCE) + set (BLOSC_ZLIB_GIT_URL "https://github.com/madler/zlib.git" CACHE STRING "Use ZLIB from GitHub repository" FORCE) + set (BLOSC_ZLIB_GIT_BRANCH "develop" CACHE STRING "" FORCE) + set (BLOSC_ZLIB_TGZ_ORIGPATH "https://github.com/madler/zlib/releases/download/v1.3" CACHE STRING "Use PLUGINS from original location" FORCE) + set (BLOSC_ZLIB_TGZ_NAME "zlib-1.3.tar.gz" CACHE STRING "Use ZLib from compressed file" FORCE) + set (BLOSC_ZLIB_PACKAGE_NAME "zlib" CACHE STRING "Name of BLOSC_ZLIB package" FORCE) ####### # bzip2 ###### # - set (BZ2_GIT_URL "https://someurl/bzip2.git" CACHE STRING "Use BZ2 from HDF repository" FORCE) + set (BZ2_GIT_URL "https://github.com/libarchive/bzip2.git" CACHE STRING "Use BZ2 from GitHub repository" FORCE) set (BZ2_GIT_BRANCH "master" CACHE STRING "" FORCE) - set (BZ2_TGZ_NAME "BZ2.tar.gz" CACHE STRING "Use BZ2 from compressed file" FORCE) + set (BZ2_TGZ_ORIGPATH "https://github.com/libarchive/bzip2/archive/refs/tags" CACHE STRING "Use PLUGINS from original location" FORCE) + set (BZ2_TGZ_NAME "bzip2-bzip2-1.0.8.tar.gz" CACHE STRING "Use BZ2 from compressed file" FORCE) set (BZ2_PACKAGE_NAME "bz2" CACHE STRING "Name of BZ2 package" FORCE) ####### # fpzip ####### - set (FPZIP_GIT_URL "https://github.com/LLNL/fpzip" CACHE STRING "Use FPZIP from github repository" FORCE) - set (FPZIP_GIT_BRANCH "master" CACHE STRING "" FORCE) - set (FPZIP_TGZ_NAME "fpzip.tar.gz" CACHE STRING "Use FPZIP from compressed file" FORCE) + set (FPZIP_GIT_URL "https://github.com/LLNL/fpzip.git" CACHE STRING "Use FPZIP from GitHub repository" FORCE) + set (FPZIP_GIT_BRANCH "develop" CACHE STRING "" FORCE) + set (FPZIP_TGZ_ORIGPATH "https://github.com/LLNL/fpzip/releases/download/1.3.0" CACHE STRING "Use PLUGINS from original location" FORCE) + set (FPZIP_TGZ_NAME "fpzip-1.3.0.tar.gz" CACHE STRING "Use FPZIP from compressed file" FORCE) set (FPZIP_PACKAGE_NAME "fpzip" CACHE STRING "Name of FPZIP package" FORCE) ###### # jpeg ###### - set (JPEG_GIT_URL "https://someurl/jpeg.git" CACHE STRING "Use JPEG from HDF repository" FORCE) - set (JPEG_GIT_BRANCH "jpeg9c" CACHE STRING "" FORCE) - #set (JPEG_TGZ_NAME "JPEG9c.tar.gz" CACHE STRING "Use JPEG from compressed file" FORCE) - set (JPEG_TGZ_NAME "JPEG.tar.gz" CACHE STRING "Use JPEG from compressed file" FORCE) + set (JPEG_GIT_URL "No repo www.ijg.org/files" CACHE STRING "Use JPEG from ILG" FORCE) + set (JPEG_GIT_BRANCH "v9e" CACHE STRING "" FORCE) + set (JPEG_TGZ_ORIGPATH "https://www.ijg.org/files" CACHE STRING "Use PLUGINS from original location" FORCE) + set (JPEG_TGZ_NAME "jpegsrc.v9e.tar.gz" CACHE STRING "Use JPEG from compressed file" FORCE) set (JPEG_PACKAGE_NAME "jpeg" CACHE STRING "Name of JPEG package" FORCE) ###### # lz4 ###### set (BUILD_LZ4_LIBRARY_SOURCE ON CACHE BOOL "build the lz4 library within the plugin" FORCE) - set (LZ4_GIT_URL "https://someurl/lz4.git" CACHE STRING "Use LZ4 from HDF repository" FORCE) - set (LZ4_GIT_BRANCH "master" CACHE STRING "" FORCE) - set (LZ4_TGZ_NAME "lz4.tar.gz" CACHE STRING "Use LZ4 from compressed file" FORCE) + set (LZ4_GIT_URL "https://github.com/lz4/lz4.git" CACHE STRING "Use LZ4 from GitHub repository" FORCE) + set (LZ4_GIT_BRANCH "dev" CACHE STRING "" FORCE) + set (LZ4_TGZ_ORIGPATH "https://github.com/lz4/lz4/releases/download/v1.9.4" CACHE STRING "Use PLUGINS from original location" FORCE) + set (LZ4_TGZ_NAME "lz4-1.9.4.tar.gz" CACHE STRING "Use LZ4 from compressed file" FORCE) set (LZ4_PACKAGE_NAME "lz4" CACHE STRING "Name of LZ4 package" FORCE) ###### # lzf ###### - set (LZF_GIT_URL "https://someurl/lzf.git" CACHE STRING "Use LZF from HDF repository" FORCE) - set (LZF_GIT_BRANCH "master" CACHE STRING "" FORCE) - set (LZF_TGZ_NAME "lzf.tar.gz" CACHE STRING "Use LZF from compressed file" FORCE) + set (LZF_URL "http://software.schmorp.de/pkg/liblzf.html" CACHE STRING "LZF home" FORCE) + set (LZF_TGZ_ORIGPATH "http://dist.schmorp.de/liblzf" CACHE STRING "Use LZF from original location" FORCE) + set (LZF_TGZ_NAME "liblzf-3.6.tar.gz" CACHE STRING "Use LZF from compressed file" FORCE) set (LZF_PACKAGE_NAME "lzf" CACHE STRING "Name of LZF package" FORCE) ######## # mafisc @@ -605,23 +608,26 @@ These five steps are described in detail below. ###### # szf ###### - set (SZF_GIT_URL "https://github.com/disheng222/SZ" CACHE STRING "Use SZ from github repository" FORCE) - set (SZF_GIT_BRANCH "master" CACHE STRING "" FORCE) - set (SZF_TGZ_NAME "szf.tar.gz" CACHE STRING "Use SZ from compressed file" FORCE) + set (SZ_GIT_URL "https://github.com/szcompressor/SZ.git" CACHE STRING "Use SZ filter from GitHub repository" FORCE) + set (SZ_GIT_BRANCH "master" CACHE STRING "" FORCE) + set (SZ_TGZ_ORIGPATH "https://github.com/szcompressor/SZ/releases/download/v2.1.12.5" CACHE STRING "Use PLUGINS from original location" FORCE) + set (SZ_TGZ_NAME "SZ-2.1.12.5.tar.gz" CACHE STRING "Use SZ filter from compressed file" FORCE) set (SZF_PACKAGE_NAME "szf" CACHE STRING "Name of SZ package" FORCE) ###### # zfp ###### - set (ZFP_GIT_URL "https://github.com/LLNL/zfp.git" CACHE STRING "Use ZFP from Github" FORCE) - set (ZFP_GIT_BRANCH "master" CACHE STRING "" FORCE) - set (ZFP_TGZ_NAME "zfp.tar.gz" CACHE STRING "Use ZFP from compressed file" FORCE) + set (ZFP_GIT_URL "https://github.com/LLNL/zfp.git" CACHE STRING "Use ZFP from GitHub repository" FORCE) + set (ZFP_GIT_BRANCH "develop" CACHE STRING "" FORCE) + set (ZFP_TGZ_ORIGPATH "https://github.com/LLNL/zfp/releases/download/1.0.0" CACHE STRING "Use PLUGINS from original location" FORCE) + set (ZFP_TGZ_NAME "zfp-1.0.0.tar.gz" CACHE STRING "Use ZFP from compressed file" FORCE) set (ZFP_PACKAGE_NAME "zfp" CACHE STRING "Name of ZFP package" FORCE) ###### # zstd ###### - set (ZSTD_GIT_URL "https://github.com/facebook/zstd" CACHE STRING "Use ZSTD from repository" FORCE) + set (ZSTD_GIT_URL "https://github.com/facebook/zstd.git" CACHE STRING "Use ZSTD from GitHub repository" FORCE) set (ZSTD_GIT_BRANCH "dev" CACHE STRING "" FORCE) - set (ZSTD_TGZ_NAME "zstd.tar.gz" CACHE STRING "Use ZSTD from compressed file" FORCE) + set (ZSTD_TGZ_ORIGPATH "https://github.com/facebook/zstd/releases/download/v1.5.5" CACHE STRING "Use PLUGINS from original location" FORCE) + set (ZSTD_TGZ_NAME "zstd-1.5.5.tar.gz" CACHE STRING "Use ZSTD from compressed file" FORCE) set (ZSTD_PACKAGE_NAME "zstd" CACHE STRING "Name of ZSTD package" FORCE) 2. Configure the cache settings @@ -878,24 +884,25 @@ HDF5_ALLOW_EXTERNAL_SUPPORT "Allow External Library Building (NO GIT TGZ)" HDF5_ENABLE_PLUGIN_SUPPORT "Enable PLUGIN Filters" OFF HDF5_ENABLE_SZIP_SUPPORT "Use SZip Filter" ON HDF5_ENABLE_Z_LIB_SUPPORT "Enable Zlib Filters" ON -PLUGIN_USE_EXTERNAL "Use External Library Building for PLUGINS" 0 -ZLIB_USE_EXTERNAL "Use External Library Building for ZLIB" 0 -SZIP_USE_EXTERNAL "Use External Library Building for SZIP" 0 + +ZLIB_USE_EXTERNAL "Use External Library Building for ZLIB" 0 +ZLIB_TGZ_ORIGPATH "Use ZLIB from original location" "https://github.com/madler/zlib/releases/download/v1.2.13" +ZLIB_TGZ_NAME "Use ZLIB from original compressed file" "zlib-1.2.13.tar.gz" +ZLIB_USE_LOCALCONTENT "Use local file for ZLIB FetchContent" ON + +SZIP_USE_EXTERNAL "Use External Library Building for SZIP" 0 if (HDF5_ENABLE_SZIP_SUPPORT) HDF5_ENABLE_SZIP_ENCODING "Use SZip Encoding" ON +LIBAEC_TGZ_ORIGPATH "Use LIBAEC from original location" "https://github.com/MathisRosenhauer/libaec/releases/download/v1.0.6/libaec-1.0.6.tar.gz" +LIBAEC_TGZ_NAME "Use LIBAEC from original compressed file" "libaec-v1.0.6.tar.gz" +LIBAEC_USE_LOCALCONTENT "Use local file for LIBAEC FetchContent" ON + +PLUGIN_USE_EXTERNAL "Use External Library Building for PLUGINS" 0 if (WINDOWS) - H5_DEFAULT_PLUGINDIR "%ALLUSERSPROFILE%/hdf5/lib/plugin" + H5_DEFAULT_PLUGINDIR "%ALLUSERSPROFILE%/hdf5/lib/plugin" else () - H5_DEFAULT_PLUGINDIR "/usr/local/hdf5/lib/plugin" + H5_DEFAULT_PLUGINDIR "/usr/local/hdf5/lib/plugin" endif () -if (BUILD_SZIP_WITH_FETCHCONTENT) - LIBAEC_TGZ_ORIGPATH "Use LIBAEC from original location" "https://github.com/MathisRosenhauer/libaec/releases/download/v1.0.6/libaec-1.0.6.tar.gz" - LIBAEC_TGZ_ORIGNAME "Use LIBAEC from original compressed file" "libaec-v1.0.6.tar.gz" - LIBAEC_USE_LOCALCONTENT "Use local file for LIBAEC FetchContent" ON -if (BUILD_ZLIB_WITH_FETCHCONTENT) - ZLIB_TGZ_ORIGPATH "Use ZLIB from original location" "https://github.com/madler/zlib/releases/download/v1.2.13" - ZLIB_TGZ_ORIGNAME "Use ZLIB from original compressed file" "zlib-1.2.13.tar.gz" - ZLIB_USE_LOCALCONTENT "Use local file for ZLIB FetchContent" ON NOTE: The BUILD_STATIC_EXECS ("Build Static Executables") option is only valid @@ -1060,15 +1067,15 @@ Available configurations presets can be displayed by executing: Using individual command presets (where is GNUC or MSVC or Clang): change directory to the hdf5 source folder - cmake --presets=ci-StdShar- - cmake --build --presets=ci-StdShar- - ctest --presets=ci-StdShar- - cpack --presets=ci-StdShar- + cmake --preset ci-StdShar- + cmake --build --preset ci-StdShar- + ctest --preset ci-StdShar- + cpack --preset ci-StdShar- Using the workflow preset to configure, build, test and package the standard configuration is: change directory to the hdf5 source folder - execute "cmake --workflow --presets=ci-StdShar- --fresh" + execute "cmake --workflow --preset ci-StdShar- --fresh" where is GNUC or MSVC or Clang Creating your own configurations @@ -1088,7 +1095,7 @@ For instance, to change the support files to use a local directory, edit CMakeUs "inherits": "ci-base", "cacheVariables": { "HDF5_ALLOW_EXTERNAL_SUPPORT": {"type": "STRING", "value": "TGZ"}, - "TGZPATH": {"type": "STRING", "value": "${sourceParentDir}/temp"} + "TGZPATH": {"type": "PATH", "value": "${sourceParentDir}/temp"} } }, { diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 0e1dbe42b3b..bcc91df70d2 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -47,6 +47,29 @@ New Features Configuration: ------------- + - Converted the build of libaec and zlib to use FETCH_CONTENT with CMake. + + Using the CMake FetchContent module, the external filters can populate + content at configure time via any method supported by the ExternalProject + module. Whereas ExternalProject_Add() downloads at build time, the + FetchContent module makes content available immediately, allowing the + configure step to use the content in commands like add_subdirectory(), + include() or file() operations. + + Removed HDF options for using FETCH_CONTENT explicitly: + BUILD_SZIP_WITH_FETCHCONTENT:BOOL + BUILD_ZLIB_WITH_FETCHCONTENT:BOOL + - Thread-safety + static library disabled on Windows w/ CMake + + The thread-safety feature requires hooks in DllMain(), which is only + present in the shared library. + + We previously just warned about this, but now any CMake configuration + that tries to build thread-safety and the static library will fail. + This cannot be overridden with ALLOW_UNSUPPORTED. + + Fixes GitHub issue #3613 + - Autotools builds now build the szip filter by default when an appropriate library is found @@ -196,7 +219,7 @@ New Features selection I/O should improve performance and reduce memory uses in some cases. - - Change the error handling for a not found path in the find plugin process. + - Changed the error handling for a not found path in the find plugin process. While attempting to load a plugin the HDF5 library will fail if one of the directories in the plugin paths does not exist, even if there are more paths @@ -235,7 +258,16 @@ New Features Parallel Library: ----------------- - - + - Added optimized support for the parallel compression feature when + using the multi-dataset I/O API routines collectively + + Previously, calling H5Dwrite_multi/H5Dread_multi collectively in parallel + with a list containing one or more filtered datasets would cause HDF5 to + break out of the optimized multi-dataset I/O mode and instead perform I/O + by looping over each dataset in the I/O request. The library has now been + updated to perform I/O in a more optimized manner in this case by first + performing I/O on all the filtered datasets at once and then performing + I/O on all the unfiltered datasets at once. Fortran Library: @@ -309,6 +341,12 @@ Bug Fixes since HDF5-1.14.0 release =================================== Library ------- + + - Dropped support for MPI-2 + + The MPI-2 supporting artifacts have been removed due to the cessation + of MPI-2 maintenance and testing since version HDF5 1.12. + - Fixed a bug with the way the Subfiling VFD assigns I/O concentrators During a file open operation, the Subfiling VFD determines the topology @@ -653,7 +691,14 @@ Bug Fixes since HDF5-1.14.0 release Checks were added to the CMake and Autotools code to verify that CLOCK_MONOTONIC_COARSE, PTHREAD_MUTEX_ADAPTIVE_NP and pthread_condattr_setclock() are available before attempting to use them in Subfiling VFD-related utility code. Without these checks, attempting - to build the Subfiling VFD on macOS would fail. + to build the Subfiling VFD on macOS would fail. + + - Fixes the ordering of INCLUDES when building with CMake + + Include directories in the source or build tree should come before other + directories to prioritize headers in the sources over installed ones. + + Fixes GitHub #1027 - The accum test now passes on macOS 12+ (Monterey) w/ CMake @@ -957,6 +1002,12 @@ Platforms Tested Known Problems ============== + IEEE standard arithmetic enables software to raise exceptions such as overflow, + division by zero, and other illegal operations without interrupting or halting + the program flow. The HDF5 C library intentionally performs these exceptions. + Therefore, the "-ieee=full" nagfor switch is necessary when compiling a program + to avoid stopping on an exception. + CMake files do not behave correctly with paths containing spaces. Do not use spaces in paths because the required escaping for handling spaces results in very complex and fragile build files. diff --git a/src/H5ACmpio.c b/src/H5ACmpio.c index 77c7fdb65ec..cdebe80a3b6 100644 --- a/src/H5ACmpio.c +++ b/src/H5ACmpio.c @@ -971,7 +971,7 @@ H5AC__log_inserted_entry(const H5AC_info_t *entry_ptr) * dirty bytes count. * * The rank 0 process then removes any references to the - * entry under its old address from the cleands and dirtied + * entry under its old address from the clean and dirtied * lists, and inserts an entry in the dirtied list under the * new address. * diff --git a/src/H5ACprivate.h b/src/H5ACprivate.h index 010fdc706ec..5e23036784b 100644 --- a/src/H5ACprivate.h +++ b/src/H5ACprivate.h @@ -25,7 +25,7 @@ #include "H5ACpublic.h" /*public prototypes */ -/* Pivate headers needed by this header */ +/* Private headers needed by this header */ #include "H5private.h" /* Generic Functions */ #include "H5Cprivate.h" /* Cache */ #include "H5Fprivate.h" /* File access */ diff --git a/src/H5Centry.c b/src/H5Centry.c index 353cc1f8d71..2bbf9acdbb5 100644 --- a/src/H5Centry.c +++ b/src/H5Centry.c @@ -1737,7 +1737,7 @@ H5C__destroy_pf_entry_child_flush_deps(H5C_t *cache_ptr, H5C_cache_entry_t *pf_e * deserialized entry after it is inserted in the cache. * * Since deserializing a prefetched entry is semantically - * equivalent to a load, issue an entry loaded nofification + * equivalent to a load, issue an entry loaded notification * if the notify callback is defined. * * Return: SUCCEED on success, and FAIL on failure. diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h index 5aadd9e2939..3477e75d7e5 100644 --- a/src/H5Cprivate.h +++ b/src/H5Cprivate.h @@ -537,7 +537,7 @@ typedef struct H5C_t H5C_t; * ensure that the entry is ready to be flushed -- in particular, * if the entry contains references to other entries that are in * temporary file space, the pre-serialize callback must move those - * entries into real file space so that the serialzed entry will + * entries into real file space so that the serialized entry will * contain no invalid data. * * One would think that the base address and length of diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c index cabdcfb996c..9f4bd90b68a 100644 --- a/src/H5Dchunk.c +++ b/src/H5Dchunk.c @@ -1114,6 +1114,31 @@ H5D__chunk_io_init(H5D_io_info_t *io_info, H5D_dset_io_info_t *dinfo) } } +#ifdef H5_HAVE_PARALLEL + /* + * If collective metadata reads are enabled, ensure all ranks + * have the dataset's chunk index open (if it was created) to + * prevent possible metadata inconsistency issues or unintentional + * independent metadata reads later on. + */ + if (H5F_SHARED_HAS_FEATURE(io_info->f_sh, H5FD_FEAT_HAS_MPI) && + H5F_shared_get_coll_metadata_reads(io_info->f_sh) && + H5D__chunk_is_space_alloc(&dataset->shared->layout.storage)) { + H5D_chunk_ud_t udata; + hsize_t scaled[H5O_LAYOUT_NDIMS] = {0}; + + /* + * TODO: Until the dataset chunk index callback structure has + * callbacks for checking if an index is opened and also for + * directly opening the index, the following fake chunk lookup + * serves the purpose of forcing a chunk index open operation + * on all ranks + */ + if (H5D__chunk_lookup(dataset, scaled, &udata) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to collectively open dataset chunk index"); + } +#endif + done: if (file_space_normalized == true) if (H5S_hyper_denormalize_offset(dinfo->file_space, old_offset) < 0) @@ -1556,6 +1581,9 @@ H5D__create_piece_map_single(H5D_dset_io_info_t *di, H5D_io_info_t *io_info) piece_info->in_place_tconv = false; piece_info->buf_off = 0; + /* Check if chunk is in a dataset with filters applied */ + piece_info->filtered_dset = di->dset->shared->dcpl_cache.pline.nused > 0; + /* make connection to related dset info from this piece_info */ piece_info->dset_info = di; @@ -1591,6 +1619,7 @@ H5D__create_piece_file_map_all(H5D_dset_io_info_t *di, H5D_io_info_t *io_info) hsize_t curr_partial_clip[H5S_MAX_RANK]; /* Current partial dimension sizes to clip against */ hsize_t partial_dim_size[H5S_MAX_RANK]; /* Size of a partial dimension */ bool is_partial_dim[H5S_MAX_RANK]; /* Whether a dimension is currently a partial chunk */ + bool filtered_dataset; /* Whether the dataset in question has filters applied */ unsigned num_partial_dims; /* Current number of partial dimensions */ unsigned u; /* Local index variable */ herr_t ret_value = SUCCEED; /* Return value */ @@ -1640,6 +1669,9 @@ H5D__create_piece_file_map_all(H5D_dset_io_info_t *di, H5D_io_info_t *io_info) /* Set the index of this chunk */ chunk_index = 0; + /* Check whether dataset has filters applied */ + filtered_dataset = di->dset->shared->dcpl_cache.pline.nused > 0; + /* Create "temporary" chunk for selection operations (copy file space) */ if (NULL == (tmp_fchunk = H5S_create_simple(fm->f_ndims, fm->chunk_dim, NULL))) HGOTO_ERROR(H5E_DATASET, H5E_CANTCREATE, FAIL, "unable to create dataspace for chunk"); @@ -1686,6 +1718,8 @@ H5D__create_piece_file_map_all(H5D_dset_io_info_t *di, H5D_io_info_t *io_info) new_piece_info->in_place_tconv = false; new_piece_info->buf_off = 0; + new_piece_info->filtered_dset = filtered_dataset; + /* Insert the new chunk into the skip list */ if (H5SL_insert(fm->dset_sel_pieces, new_piece_info, &new_piece_info->index) < 0) { H5D__free_piece_info(new_piece_info, NULL, NULL); @@ -1798,6 +1832,7 @@ H5D__create_piece_file_map_hyper(H5D_dset_io_info_t *dinfo, H5D_io_info_t *io_in hsize_t chunk_index; /* Index of chunk */ hsize_t start_scaled[H5S_MAX_RANK]; /* Starting scaled coordinates of selection */ hsize_t scaled[H5S_MAX_RANK]; /* Scaled coordinates for this chunk */ + bool filtered_dataset; /* Whether the dataset in question has filters applied */ int curr_dim; /* Current dimension to increment */ unsigned u; /* Local index variable */ herr_t ret_value = SUCCEED; /* Return value */ @@ -1831,6 +1866,9 @@ H5D__create_piece_file_map_hyper(H5D_dset_io_info_t *dinfo, H5D_io_info_t *io_in /* Calculate the index of this chunk */ chunk_index = H5VM_array_offset_pre(fm->f_ndims, dinfo->layout->u.chunk.down_chunks, scaled); + /* Check whether dataset has filters applied */ + filtered_dataset = dinfo->dset->shared->dcpl_cache.pline.nused > 0; + /* Iterate through each chunk in the dataset */ while (sel_points) { /* Check for intersection of current chunk and file selection */ @@ -1885,6 +1923,8 @@ H5D__create_piece_file_map_hyper(H5D_dset_io_info_t *dinfo, H5D_io_info_t *io_in new_piece_info->in_place_tconv = false; new_piece_info->buf_off = 0; + new_piece_info->filtered_dset = filtered_dataset; + /* Add piece to global piece_count */ io_info->piece_count++; @@ -2257,6 +2297,8 @@ H5D__piece_file_cb(void H5_ATTR_UNUSED *elem, const H5T_t H5_ATTR_UNUSED *type, piece_info->in_place_tconv = false; piece_info->buf_off = 0; + piece_info->filtered_dset = dinfo->dset->shared->dcpl_cache.pline.nused > 0; + /* Make connection to related dset info from this piece_info */ piece_info->dset_info = dinfo; @@ -2417,6 +2459,9 @@ H5D__chunk_mdio_init(H5D_io_info_t *io_info, H5D_dset_io_info_t *dinfo) /* Add to sel_pieces and update pieces_added */ io_info->sel_pieces[io_info->pieces_added++] = piece_info; + + if (piece_info->filtered_dset) + io_info->filtered_pieces_added++; } /* Advance to next skip list node */ @@ -2728,6 +2773,9 @@ H5D__chunk_read(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_info) if (io_info->sel_pieces) io_info->sel_pieces[io_info->pieces_added] = chunk_info; io_info->pieces_added++; + + if (io_info->sel_pieces && chunk_info->filtered_dset) + io_info->filtered_pieces_added++; } } /* end if */ else if (!skip_missing_chunks) { @@ -3142,6 +3190,9 @@ H5D__chunk_write(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_info) if (io_info->sel_pieces) io_info->sel_pieces[io_info->pieces_added] = chunk_info; io_info->pieces_added++; + + if (io_info->sel_pieces && chunk_info->filtered_dset) + io_info->filtered_pieces_added++; } } /* end else */ diff --git a/src/H5Dcontig.c b/src/H5Dcontig.c index db156fd348a..2a9f178ce8c 100644 --- a/src/H5Dcontig.c +++ b/src/H5Dcontig.c @@ -644,6 +644,8 @@ H5D__contig_io_init(H5D_io_info_t *io_info, H5D_dset_io_info_t *dinfo) new_piece_info->in_place_tconv = false; new_piece_info->buf_off = 0; + new_piece_info->filtered_dset = dinfo->dset->shared->dcpl_cache.pline.nused > 0; + /* Calculate type conversion buffer size and check for in-place conversion if necessary. Currently * only implemented for selection I/O. */ if (io_info->use_select_io != H5D_SELECTION_IO_MODE_OFF && diff --git a/src/H5Dio.c b/src/H5Dio.c index 543bb56b2ec..2134ce1c79a 100644 --- a/src/H5Dio.c +++ b/src/H5Dio.c @@ -107,6 +107,17 @@ H5D__read(size_t count, H5D_dset_io_info_t *dset_info) FUNC_ENTER_NOAPI(FAIL) +#ifdef H5_HAVE_PARALLEL + /* Reset the actual io mode properties to the default values in case + * the DXPL (if it's non-default) was previously used in a collective + * I/O operation. + */ + if (!H5CX_is_def_dxpl()) { + H5CX_set_mpio_actual_chunk_opt(H5D_MPIO_NO_CHUNK_OPTIMIZATION); + H5CX_set_mpio_actual_io_mode(H5D_MPIO_NO_COLLECTIVE); + } /* end if */ +#endif + /* Init io_info */ if (H5D__ioinfo_init(count, H5D_IO_OP_READ, dset_info, &io_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize I/O info"); @@ -222,6 +233,14 @@ H5D__read(size_t count, H5D_dset_io_info_t *dset_info) dset_info[i].buf.vp = (void *)(((uint8_t *)dset_info[i].buf.vp) + buf_adj); } /* end if */ + /* Set up I/O operation */ + if (H5D__dset_ioinfo_init(dset_info[i].dset, &(dset_info[i]), &(store[i])) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to set up I/O operation"); + + /* Check if any filters are applied to the dataset */ + if (dset_info[i].dset->shared->dcpl_cache.pline.nused > 0) + io_info.filtered_count++; + /* If space hasn't been allocated and not using external storage, * return fill value to buffer if fill time is upon allocation, or * do nothing if fill time is never. If the dataset is compact and @@ -259,10 +278,6 @@ H5D__read(size_t count, H5D_dset_io_info_t *dset_info) io_skipped = io_skipped + 1; } /* end if */ else { - /* Set up I/O operation */ - if (H5D__dset_ioinfo_init(dset_info[i].dset, &(dset_info[i]), &(store[i])) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "unable to set up I/O operation"); - /* Sanity check that space is allocated, if there are elements */ if (dset_info[i].nelmts > 0) assert( @@ -273,22 +288,23 @@ H5D__read(size_t count, H5D_dset_io_info_t *dset_info) dset_info[i].dset->shared->dcpl_cache.efl.nused > 0 || dset_info[i].dset->shared->layout.type == H5D_COMPACT); - /* Call storage method's I/O initialization routine */ - if (dset_info[i].layout_ops.io_init && - (dset_info[i].layout_ops.io_init)(&io_info, &(dset_info[i])) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize I/O info"); dset_info[i].skip_io = false; - io_op_init++; - - /* Reset metadata tagging */ - H5AC_tag(prev_tag, NULL); } + + /* Call storage method's I/O initialization routine */ + if (dset_info[i].layout_ops.io_init && + (dset_info[i].layout_ops.io_init)(&io_info, &(dset_info[i])) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize I/O info"); + io_op_init++; + + /* Reset metadata tagging */ + H5AC_tag(prev_tag, NULL); } /* end of for loop */ - assert(io_op_init + io_skipped == count); + assert(io_op_init == count); /* If no datasets have I/O, we're done */ - if (io_op_init == 0) + if (io_skipped == count) HGOTO_DONE(SUCCEED); /* Perform second phase of type info initialization */ @@ -323,7 +339,11 @@ H5D__read(size_t count, H5D_dset_io_info_t *dset_info) } /* MDIO-specific second phase initialization */ - for (i = 0; i < count; i++) + for (i = 0; i < count; i++) { + /* Check for skipped I/O */ + if (dset_info[i].skip_io) + continue; + if (dset_info[i].layout_ops.mdio_init) { haddr_t prev_tag = HADDR_UNDEF; @@ -337,6 +357,7 @@ H5D__read(size_t count, H5D_dset_io_info_t *dset_info) /* Reset metadata tagging */ H5AC_tag(prev_tag, NULL); } + } /* Invoke correct "high level" I/O routine */ if ((*io_info.md_io_ops.multi_read_md)(&io_info) < 0) @@ -430,7 +451,7 @@ H5D__read(size_t count, H5D_dset_io_info_t *dset_info) done: /* Shut down the I/O op information */ for (i = 0; i < io_op_init; i++) - if (!dset_info[i].skip_io && dset_info[i].layout_ops.io_term && + if (dset_info[i].layout_ops.io_term && (*dset_info[i].layout_ops.io_term)(&io_info, &(dset_info[i])) < 0) HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to shut down I/O op info"); @@ -512,6 +533,17 @@ H5D__write(size_t count, H5D_dset_io_info_t *dset_info) FUNC_ENTER_NOAPI(FAIL) +#ifdef H5_HAVE_PARALLEL + /* Reset the actual io mode properties to the default values in case + * the DXPL (if it's non-default) was previously used in a collective + * I/O operation. + */ + if (!H5CX_is_def_dxpl()) { + H5CX_set_mpio_actual_chunk_opt(H5D_MPIO_NO_CHUNK_OPTIMIZATION); + H5CX_set_mpio_actual_io_mode(H5D_MPIO_NO_COLLECTIVE); + } /* end if */ +#endif + /* Init io_info */ if (H5D__ioinfo_init(count, H5D_IO_OP_WRITE, dset_info, &io_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize I/O info"); @@ -586,7 +618,7 @@ H5D__write(size_t count, H5D_dset_io_info_t *dset_info) if (NULL == dset_info[i].buf.cvp) { /* Check for any elements selected (which is invalid) */ if (dset_info[i].nelmts > 0) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no output buffer"); + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no input buffer"); /* If the buffer is nil, and 0 element is selected, make a fake buffer. * This is for some MPI package like ChaMPIon on NCSA's tungsten which @@ -655,6 +687,10 @@ H5D__write(size_t count, H5D_dset_io_info_t *dset_info) if (H5D__dset_ioinfo_init(dset_info[i].dset, &(dset_info[i]), &(store[i])) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to set up I/O operation"); + /* Check if any filters are applied to the dataset */ + if (dset_info[i].dset->shared->dcpl_cache.pline.nused > 0) + io_info.filtered_count++; + /* Allocate dataspace and initialize it if it hasn't been. */ should_alloc_space = dset_info[i].dset->shared->dcpl_cache.efl.nused == 0 && !(*dset_info[i].dset->shared->layout.ops->is_space_alloc)( @@ -1225,15 +1261,6 @@ H5D__ioinfo_adjust(H5D_io_info_t *io_info) dset0 = io_info->dsets_info[0].dset; assert(dset0->oloc.file); - /* Reset the actual io mode properties to the default values in case - * the DXPL (if it's non-default) was previously used in a collective - * I/O operation. - */ - if (!H5CX_is_def_dxpl()) { - H5CX_set_mpio_actual_chunk_opt(H5D_MPIO_NO_CHUNK_OPTIMIZATION); - H5CX_set_mpio_actual_io_mode(H5D_MPIO_NO_COLLECTIVE); - } /* end if */ - /* Make any parallel I/O adjustments */ if (io_info->using_mpi_vfd) { H5FD_mpio_xfer_t xfer_mode; /* Parallel transfer for this request */ diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c index 3bab6be8c15..0ef6542fcdb 100644 --- a/src/H5Dmpio.c +++ b/src/H5Dmpio.c @@ -82,21 +82,10 @@ */ #define H5D_MPIO_INIT_CHUNK_IDX_INFO(index_info, dset) \ do { \ - index_info.f = (dset)->oloc.file; \ - index_info.pline = &((dset)->shared->dcpl_cache.pline); \ - index_info.layout = &((dset)->shared->layout.u.chunk); \ - index_info.storage = &((dset)->shared->layout.storage.u.chunk); \ - } while (0) - -/* - * Macro to initialize a H5D_chunk_ud_t structure - * given a pointer to a H5D_chk_idx_info_t structure - */ -#define H5D_MPIO_INIT_CHUNK_UD_INFO(chunk_ud, index_info_ptr) \ - do { \ - memset(&chunk_ud, 0, sizeof(H5D_chunk_ud_t)); \ - chunk_ud.common.layout = (index_info_ptr)->layout; \ - chunk_ud.common.storage = (index_info_ptr)->storage; \ + (index_info).f = (dset)->oloc.file; \ + (index_info).pline = &((dset)->shared->dcpl_cache.pline); \ + (index_info).layout = &((dset)->shared->layout.u.chunk); \ + (index_info).storage = &((dset)->shared->layout.storage.u.chunk); \ } while (0) /******************/ @@ -129,14 +118,43 @@ typedef struct H5D_chunk_alloc_info_t { H5F_block_t chunk_current; H5F_block_t chunk_new; hsize_t chunk_idx; + haddr_t dset_oloc_addr; } H5D_chunk_alloc_info_t; /* * Information for a chunk pertaining to the dataset's chunk - * index entry for the chunk + * index entry for the chunk. + * + * NOTE: To support efficient lookups of H5D_filtered_collective_chunk_info_t + * structures during parallel writes to filtered chunks, the + * chunk_idx and dset_oloc_addr fields of this structure are used + * together as a key for a hash table by following the approach + * outlined at https://troydhanson.github.io/uthash/userguide.html#_compound_keys. + * This means the following: + * + * - Instances of this structure should be memset to 0 when + * used for hashing to ensure that any padding between the + * chunk_idx and dset_oloc_addr fields does not affect the + * generated key. + * + * - The chunk_idx and dset_oloc_addr fields should be arranged + * in that specific order, as the code currently relies on + * this ordering when calculating the key length and it + * performs memory operations on the structure starting from + * the chunk_idx field and using the calculated key length. + * + * - The chunk_idx and dset_oloc_addr fields should ideally + * be arranged next to each other in the structure to minimize + * the calculated key length. */ typedef struct H5D_chunk_index_info_t { - hsize_t chunk_idx; + /* + * These two fields must come in this order and next to + * each other for proper and efficient hashing + */ + hsize_t chunk_idx; + haddr_t dset_oloc_addr; + unsigned filter_mask; bool need_insert; } H5D_chunk_index_info_t; @@ -231,6 +249,24 @@ typedef struct H5D_filtered_collective_chunk_info_t { UT_hash_handle hh; } H5D_filtered_collective_chunk_info_t; +/* + * Information cached about each dataset involved when performing + * collective I/O on filtered chunks. + */ +typedef struct H5D_mpio_filtered_dset_info_t { + const H5D_dset_io_info_t *dset_io_info; + H5D_fill_buf_info_t fb_info; + H5D_chk_idx_info_t chunk_idx_info; + hsize_t file_chunk_size; + haddr_t dset_oloc_addr; + H5S_t *fill_space; + bool should_fill; + bool fb_info_init; + bool index_empty; + + UT_hash_handle hh; +} H5D_mpio_filtered_dset_info_t; + /* * Top-level structure that contains an array of H5D_filtered_collective_chunk_info_t * chunk info structures for collective filtered I/O, as well as other useful information. @@ -249,6 +285,10 @@ typedef struct H5D_filtered_collective_chunk_info_t { * will contain the chunk's "chunk index" value that can be used for chunk * lookup operations. * + * chunk_hash_table_keylen - The calculated length of the key used for the chunk info hash + * table, depending on whether collective I/O is being performed + * on a single or multiple filtered datasets. + * * num_chunks_infos - The number of entries in the `chunk_infos` array. * * num_chunks_to_read - The number of entries (or chunks) in the `chunk_infos` array that @@ -263,12 +303,39 @@ typedef struct H5D_filtered_collective_chunk_info_t { * of chunk info structures to determine how big of I/O vectors to * allocate during read operations, as an example. * + * all_dset_indices_empty - A boolean determining whether all the datasets involved in the + * I/O operation have empty chunk indices. If this is the case, + * collective read operations can be skipped during processing + * of chunks. + * + * no_dset_index_insert_methods - A boolean determining whether all the datasets involved + * in the I/O operation have no chunk index insertion + * methods. If this is the case, collective chunk reinsertion + * operations can be skipped during processing of chunks. + * + * single_dset_info - A pointer to a H5D_mpio_filtered_dset_info_t structure containing + * information that is used when performing collective I/O on a single + * filtered dataset. + * + * dset_info_hash_table - A hash table storing H5D_mpio_filtered_dset_info_t structures + * that is populated when performing collective I/O on multiple + * filtered datasets at a time using the multi-dataset I/O API + * routines. + * */ typedef struct H5D_filtered_collective_io_info_t { H5D_filtered_collective_chunk_info_t *chunk_infos; H5D_filtered_collective_chunk_info_t *chunk_hash_table; + size_t chunk_hash_table_keylen; size_t num_chunk_infos; size_t num_chunks_to_read; + bool all_dset_indices_empty; + bool no_dset_index_insert_methods; + + union { + H5D_mpio_filtered_dset_info_t *single_dset_info; + H5D_mpio_filtered_dset_info_t *dset_info_hash_table; + } dset_info; } H5D_filtered_collective_io_info_t; /* @@ -278,6 +345,7 @@ typedef struct H5D_filtered_collective_io_info_t { typedef struct H5D_chunk_redistribute_info_t { H5F_block_t chunk_block; hsize_t chunk_idx; + haddr_t dset_oloc_addr; int orig_owner; int new_owner; int num_writers; @@ -299,11 +367,11 @@ typedef struct H5D_chunk_insert_info_t { static herr_t H5D__piece_io(H5D_io_info_t *io_info); static herr_t H5D__multi_chunk_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_info, int mpi_rank, int mpi_size); -static herr_t H5D__multi_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_info, - int mpi_rank, int mpi_size); +static herr_t H5D__multi_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_infos, + size_t num_dset_infos, int mpi_rank, int mpi_size); static herr_t H5D__link_piece_collective_io(H5D_io_info_t *io_info, int mpi_rank); -static herr_t H5D__link_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_info, - int mpi_rank, int mpi_size); +static herr_t H5D__link_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_infos, + size_t num_dset_infos, int mpi_rank, int mpi_size); static herr_t H5D__inter_collective_io(H5D_io_info_t *io_info, const H5D_dset_io_info_t *di, H5S_t *file_space, H5S_t *mem_space); static herr_t H5D__final_collective_io(H5D_io_info_t *io_info, hsize_t mpi_buf_count, @@ -314,7 +382,8 @@ static herr_t H5D__mpio_get_sum_chunk(const H5D_io_info_t *io_info, int *sum_chu static herr_t H5D__mpio_get_sum_chunk_dset(const H5D_io_info_t *io_info, const H5D_dset_io_info_t *dset_info, int *sum_chunkf); static herr_t H5D__mpio_collective_filtered_chunk_io_setup(const H5D_io_info_t *io_info, - const H5D_dset_io_info_t *di, int mpi_rank, + const H5D_dset_io_info_t *di, + size_t num_dset_infos, int mpi_rank, H5D_filtered_collective_io_info_t *chunk_list); static herr_t H5D__mpio_redistribute_shared_chunks(H5D_filtered_collective_io_info_t *chunk_list, const H5D_io_info_t *io_info, int mpi_rank, int mpi_size, @@ -324,28 +393,25 @@ static herr_t H5D__mpio_redistribute_shared_chunks_int(H5D_filtered_collective_i bool all_ranks_involved, const H5D_io_info_t *io_info, int mpi_rank, int mpi_size); static herr_t H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk_list, - H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_info, - int mpi_rank, int H5_ATTR_NDEBUG_UNUSED mpi_size, - unsigned char ***chunk_msg_bufs, - int *chunk_msg_bufs_len); + H5D_io_info_t *io_info, int mpi_rank, + int H5_ATTR_NDEBUG_UNUSED mpi_size, + unsigned char ***chunk_msg_bufs, + int *chunk_msg_bufs_len); static herr_t H5D__mpio_collective_filtered_chunk_read(H5D_filtered_collective_io_info_t *chunk_list, - const H5D_io_info_t *io_info, - const H5D_dset_io_info_t *di, int mpi_rank); + const H5D_io_info_t *io_info, size_t num_dset_infos, + int mpi_rank); static herr_t H5D__mpio_collective_filtered_chunk_update(H5D_filtered_collective_io_info_t *chunk_list, unsigned char **chunk_msg_bufs, int chunk_msg_bufs_len, const H5D_io_info_t *io_info, - const H5D_dset_io_info_t *di, - int H5_ATTR_NDEBUG_UNUSED mpi_rank); + size_t num_dset_infos, int mpi_rank); static herr_t H5D__mpio_collective_filtered_chunk_reallocate(H5D_filtered_collective_io_info_t *chunk_list, - size_t *num_chunks_assigned_map, - H5D_io_info_t *io_info, - H5D_chk_idx_info_t *idx_info, int mpi_rank, - int mpi_size); + size_t *num_chunks_assigned_map, + H5D_io_info_t *io_info, size_t num_dset_infos, + int mpi_rank, int mpi_size); static herr_t H5D__mpio_collective_filtered_chunk_reinsert(H5D_filtered_collective_io_info_t *chunk_list, size_t *num_chunks_assigned_map, - H5D_io_info_t *io_info, H5D_dset_io_info_t *di, - H5D_chk_idx_info_t *idx_info, int mpi_rank, - int mpi_size); + H5D_io_info_t *io_info, size_t num_dset_infos, + int mpi_rank, int mpi_size); static herr_t H5D__mpio_get_chunk_redistribute_info_types(MPI_Datatype *contig_type, bool *contig_type_derived, MPI_Datatype *resized_type, @@ -636,8 +702,8 @@ H5D__mpio_opt_possible(H5D_io_info_t *io_info) } /* Check whether these are both simple or scalar dataspaces */ - if (!((H5S_SIMPLE == H5S_GET_EXTENT_TYPE(mem_space) || - H5S_SCALAR == H5S_GET_EXTENT_TYPE(mem_space)) && + if (!((H5S_SIMPLE == H5S_GET_EXTENT_TYPE(mem_space) || H5S_SCALAR == H5S_GET_EXTENT_TYPE(mem_space) || + H5S_NULL == H5S_GET_EXTENT_TYPE(mem_space)) && (H5S_SIMPLE == H5S_GET_EXTENT_TYPE(file_space) || H5S_SCALAR == H5S_GET_EXTENT_TYPE(file_space)))) local_cause[0] |= H5D_MPIO_NOT_SIMPLE_OR_SCALAR_DATASPACES; @@ -1143,13 +1209,6 @@ H5D__piece_io(H5D_io_info_t *io_info) /* Use multi dataset path for now */ use_multi_dset = true; - /* Check for filtered datasets */ - for (i = 0; i < io_info->count; i++) - if (io_info->dsets_info[i].dset->shared->dcpl_cache.pline.nused > 0) { - use_multi_dset = false; - break; - } - /* Check if this I/O exceeds one linked chunk threshold */ if (recalc_io_option && use_multi_dset) { /* Get the chunk optimization option threshold */ @@ -1173,26 +1232,40 @@ H5D__piece_io(H5D_io_info_t *io_info) } } } + } - /* Perform multi dataset I/O if appropriate */ - if (use_multi_dset) { + /* Perform multi dataset I/O if appropriate */ + if (use_multi_dset) { #ifdef H5_HAVE_INSTRUMENTED_LIBRARY - /*** Set collective chunk user-input optimization API. ***/ - if (H5D_ONE_LINK_CHUNK_IO == io_option) { - if (H5CX_test_set_mpio_coll_chunk_link_hard(0) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to set property value"); - } /* end if */ -#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ + /*** Set collective chunk user-input optimization API. ***/ + if (H5D_ONE_LINK_CHUNK_IO == io_option) { + if (H5CX_test_set_mpio_coll_chunk_link_hard(0) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to set property value"); + } /* end if */ +#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ + + /* Process all the filtered datasets first */ + if (io_info->filtered_count > 0) { + if (H5D__link_chunk_filtered_collective_io(io_info, io_info->dsets_info, io_info->count, mpi_rank, + mpi_size) < 0) + HGOTO_ERROR(H5E_IO, (H5D_IO_OP_READ == io_info->op_type ? H5E_READERROR : H5E_WRITEERROR), + FAIL, "couldn't finish filtered linked chunk MPI-IO"); + } + /* Process all the unfiltered datasets */ + if ((io_info->filtered_count == 0) || (io_info->filtered_count < io_info->count)) { /* Perform unfiltered link chunk collective IO */ if (H5D__link_piece_collective_io(io_info, mpi_rank) < 0) - HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish linked chunk MPI-IO"); + HGOTO_ERROR(H5E_IO, (H5D_IO_OP_READ == io_info->op_type ? H5E_READERROR : H5E_WRITEERROR), + FAIL, "couldn't finish linked chunk MPI-IO"); } } - - if (!use_multi_dset) { + else { /* Loop over datasets */ for (i = 0; i < io_info->count; i++) { + if (io_info->dsets_info[i].skip_io) + continue; + if (io_info->dsets_info[i].layout->type == H5D_CONTIGUOUS) { /* Contiguous: call H5D__inter_collective_io() directly */ H5D_mpio_actual_io_mode_t actual_io_mode = H5D_MPIO_CONTIGUOUS_COLLECTIVE; @@ -1203,7 +1276,8 @@ H5D__piece_io(H5D_io_info_t *io_info) if (H5D__inter_collective_io(io_info, &io_info->dsets_info[i], io_info->dsets_info[i].file_space, io_info->dsets_info[i].mem_space) < 0) - HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish shared collective MPI-IO"); + HGOTO_ERROR(H5E_IO, (H5D_IO_OP_READ == io_info->op_type ? H5E_READERROR : H5E_WRITEERROR), + FAIL, "couldn't finish shared collective MPI-IO"); /* Set the actual I/O mode property. internal_collective_io will not break to * independent I/O, so we set it here. @@ -1248,10 +1322,12 @@ H5D__piece_io(H5D_io_info_t *io_info) case H5D_ONE_LINK_CHUNK_IO_MORE_OPT: /* Check if there are any filters in the pipeline */ if (io_info->dsets_info[i].dset->shared->dcpl_cache.pline.nused > 0) { - if (H5D__link_chunk_filtered_collective_io(io_info, &io_info->dsets_info[i], + if (H5D__link_chunk_filtered_collective_io(io_info, &io_info->dsets_info[i], 1, mpi_rank, mpi_size) < 0) - HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, - "couldn't finish filtered linked chunk MPI-IO"); + HGOTO_ERROR( + H5E_IO, + (H5D_IO_OP_READ == io_info->op_type ? H5E_READERROR : H5E_WRITEERROR), + FAIL, "couldn't finish filtered linked chunk MPI-IO"); } /* end if */ else { /* If there is more than one dataset we cannot make the multi dataset call here, @@ -1262,14 +1338,18 @@ H5D__piece_io(H5D_io_info_t *io_info) if (H5D__multi_chunk_collective_io(io_info, &io_info->dsets_info[i], mpi_rank, mpi_size) < 0) - HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, - "couldn't finish optimized multiple chunk MPI-IO"); + HGOTO_ERROR( + H5E_IO, + (H5D_IO_OP_READ == io_info->op_type ? H5E_READERROR : H5E_WRITEERROR), + FAIL, "couldn't finish optimized multiple chunk MPI-IO"); } else { /* Perform unfiltered link chunk collective IO */ if (H5D__link_piece_collective_io(io_info, mpi_rank) < 0) - HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, - "couldn't finish linked chunk MPI-IO"); + HGOTO_ERROR( + H5E_IO, + (H5D_IO_OP_READ == io_info->op_type ? H5E_READERROR : H5E_WRITEERROR), + FAIL, "couldn't finish linked chunk MPI-IO"); } } @@ -1279,17 +1359,21 @@ H5D__piece_io(H5D_io_info_t *io_info) default: /* multiple chunk IO via threshold */ /* Check if there are any filters in the pipeline */ if (io_info->dsets_info[i].dset->shared->dcpl_cache.pline.nused > 0) { - if (H5D__multi_chunk_filtered_collective_io(io_info, &io_info->dsets_info[i], + if (H5D__multi_chunk_filtered_collective_io(io_info, &io_info->dsets_info[i], 1, mpi_rank, mpi_size) < 0) - HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, - "couldn't finish optimized multiple filtered chunk MPI-IO"); + HGOTO_ERROR( + H5E_IO, + (H5D_IO_OP_READ == io_info->op_type ? H5E_READERROR : H5E_WRITEERROR), + FAIL, "couldn't finish optimized multiple filtered chunk MPI-IO"); } /* end if */ else { /* Perform unfiltered multi chunk collective IO */ if (H5D__multi_chunk_collective_io(io_info, &io_info->dsets_info[i], mpi_rank, mpi_size) < 0) - HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, - "couldn't finish optimized multiple chunk MPI-IO"); + HGOTO_ERROR( + H5E_IO, + (H5D_IO_OP_READ == io_info->op_type ? H5E_READERROR : H5E_WRITEERROR), + FAIL, "couldn't finish optimized multiple chunk MPI-IO"); } break; @@ -1423,14 +1507,24 @@ H5D__link_piece_collective_io(H5D_io_info_t *io_info, int H5_ATTR_UNUSED mpi_ran int mpi_code; /* MPI return code */ H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode = H5D_MPIO_LINK_CHUNK; H5D_mpio_actual_io_mode_t actual_io_mode = 0; - size_t i; /* Local index variable */ - herr_t ret_value = SUCCEED; + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE /* set actual_io_mode */ - for (i = 0; i < io_info->count; i++) { - assert(io_info->dsets_info[i].dset->shared->dcpl_cache.pline.nused == 0); + for (size_t i = 0; i < io_info->count; i++) { + /* Skip this dataset if no I/O is being performed */ + if (io_info->dsets_info[i].skip_io) + continue; + + /* Filtered datasets are processed elsewhere. A contiguous dataset + * could possibly have filters in the DCPL pipeline, but the library + * will currently ignore optional filters in that case. + */ + if ((io_info->dsets_info[i].dset->shared->dcpl_cache.pline.nused > 0) && + (io_info->dsets_info[i].layout->type != H5D_CONTIGUOUS)) + continue; + if (io_info->dsets_info[i].layout->type == H5D_CHUNKED) actual_io_mode |= H5D_MPIO_CHUNK_COLLECTIVE; else if (io_info->dsets_info[i].layout->type == H5D_CONTIGUOUS) @@ -1457,8 +1551,9 @@ H5D__link_piece_collective_io(H5D_io_info_t *io_info, int H5_ATTR_UNUSED mpi_ran H5_flexible_const_ptr_t base_buf_addr; base_buf_addr.cvp = NULL; - /* Get the number of chunks with a selection */ - num_chunk = io_info->pieces_added; + /* Get the number of unfiltered chunks with a selection */ + assert(io_info->filtered_pieces_added <= io_info->pieces_added); + num_chunk = io_info->pieces_added - io_info->filtered_pieces_added; H5_CHECK_OVERFLOW(num_chunk, size_t, int); #ifdef H5Dmpio_DEBUG @@ -1471,7 +1566,7 @@ H5D__link_piece_collective_io(H5D_io_info_t *io_info, int H5_ATTR_UNUSED mpi_ran /* Check if sel_pieces array is sorted */ assert(io_info->sel_pieces[0]->faddr != HADDR_UNDEF); - for (i = 1; i < num_chunk; i++) { + for (size_t i = 1; i < io_info->pieces_added; i++) { assert(io_info->sel_pieces[i]->faddr != HADDR_UNDEF); if (io_info->sel_pieces[i]->faddr < io_info->sel_pieces[i - 1]->faddr) { @@ -1508,11 +1603,20 @@ H5D__link_piece_collective_io(H5D_io_info_t *io_info, int H5_ATTR_UNUSED mpi_ran HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk file is derived datatype flags buffer"); - /* save lowest file address */ - ctg_store.contig.dset_addr = io_info->sel_pieces[0]->faddr; - - /* save base mem addr of piece for read/write */ - base_buf_addr = io_info->sel_pieces[0]->dset_info->buf; + /* + * After sorting sel_pieces according to file address, locate + * the first unfiltered chunk and save its file address and + * base memory address for read/write + */ + ctg_store.contig.dset_addr = HADDR_UNDEF; + for (size_t i = 0; i < io_info->pieces_added; i++) { + if (!io_info->sel_pieces[i]->filtered_dset) { + ctg_store.contig.dset_addr = io_info->sel_pieces[i]->faddr; + base_buf_addr = io_info->sel_pieces[i]->dset_info->buf; + break; + } + } + assert(ctg_store.contig.dset_addr != HADDR_UNDEF); #ifdef H5Dmpio_DEBUG H5D_MPIO_DEBUG(mpi_rank, "before iterate over selected pieces\n"); @@ -1520,7 +1624,7 @@ H5D__link_piece_collective_io(H5D_io_info_t *io_info, int H5_ATTR_UNUSED mpi_ran /* Obtain MPI derived datatype from all individual pieces */ /* Iterate over selected pieces for this process */ - for (i = 0; i < num_chunk; i++) { + for (size_t i = 0, curr_idx = 0; i < io_info->pieces_added; i++) { hsize_t *permute_map = NULL; /* array that holds the mapping from the old, out-of-order displacements to the in-order displacements of the MPI datatypes of the @@ -1530,24 +1634,28 @@ H5D__link_piece_collective_io(H5D_io_info_t *io_info, int H5_ATTR_UNUSED mpi_ran /* Assign convenience pointer to piece info */ piece_info = io_info->sel_pieces[i]; + /* Skip over filtered pieces as they are processed elsewhere */ + if (piece_info->filtered_dset) + continue; + /* Obtain disk and memory MPI derived datatype */ /* NOTE: The permute_map array can be allocated within H5S_mpio_space_type * and will be fed into the next call to H5S_mpio_space_type * where it will be freed. */ if (H5S_mpio_space_type(piece_info->fspace, piece_info->dset_info->type_info.src_type_size, - &chunk_ftype[i], /* OUT: datatype created */ - &chunk_mpi_file_counts[i], /* OUT */ - &(chunk_mft_is_derived_array[i]), /* OUT */ - true, /* this is a file space, - so permute the - datatype if the point - selections are out of - order */ - &permute_map, /* OUT: a map to indicate the - permutation of points - selected in case they - are out of order */ + &chunk_ftype[curr_idx], /* OUT: datatype created */ + &chunk_mpi_file_counts[curr_idx], /* OUT */ + &(chunk_mft_is_derived_array[curr_idx]), /* OUT */ + true, /* this is a file space, + so permute the + datatype if the point + selections are out of + order */ + &permute_map, /* OUT: a map to indicate the + permutation of points + selected in case they + are out of order */ &is_permuted /* OUT */) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create MPI file type"); @@ -1555,20 +1663,20 @@ H5D__link_piece_collective_io(H5D_io_info_t *io_info, int H5_ATTR_UNUSED mpi_ran if (is_permuted) assert(permute_map); if (H5S_mpio_space_type(piece_info->mspace, piece_info->dset_info->type_info.dst_type_size, - &chunk_mtype[i], &chunk_mpi_mem_counts[i], - &(chunk_mbt_is_derived_array[i]), false, /* this is a memory - space, so if the file - space is not - permuted, there is no - need to permute the - datatype if the point - selections are out of - order*/ - &permute_map, /* IN: the permutation map - generated by the - file_space selection - and applied to the - memory selection */ + &chunk_mtype[curr_idx], &chunk_mpi_mem_counts[curr_idx], + &(chunk_mbt_is_derived_array[curr_idx]), false, /* this is a memory + space, so if the + file space is not + permuted, there is + no need to permute + the datatype if the + point selections + are out of order */ + &permute_map, /* IN: the permutation map + generated by the + file_space selection + and applied to the + memory selection */ &is_permuted /* IN */) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create MPI buf type"); /* Sanity check */ @@ -1578,16 +1686,19 @@ H5D__link_piece_collective_io(H5D_io_info_t *io_info, int H5_ATTR_UNUSED mpi_ran /* Piece address relative to the first piece addr * Assign piece address to MPI displacement * (assume MPI_Aint big enough to hold it) */ - chunk_file_disp_array[i] = (MPI_Aint)piece_info->faddr - (MPI_Aint)ctg_store.contig.dset_addr; + chunk_file_disp_array[curr_idx] = + (MPI_Aint)piece_info->faddr - (MPI_Aint)ctg_store.contig.dset_addr; if (io_info->op_type == H5D_IO_OP_WRITE) { - chunk_mem_disp_array[i] = + chunk_mem_disp_array[curr_idx] = (MPI_Aint)piece_info->dset_info->buf.cvp - (MPI_Aint)base_buf_addr.cvp; } else if (io_info->op_type == H5D_IO_OP_READ) { - chunk_mem_disp_array[i] = + chunk_mem_disp_array[curr_idx] = (MPI_Aint)piece_info->dset_info->buf.vp - (MPI_Aint)base_buf_addr.vp; } + + curr_idx++; } /* end for */ /* Create final MPI derived datatype for the file */ @@ -1610,7 +1721,7 @@ H5D__link_piece_collective_io(H5D_io_info_t *io_info, int H5_ATTR_UNUSED mpi_ran chunk_final_mtype_is_derived = true; /* Free the file & memory MPI datatypes for each chunk */ - for (i = 0; i < num_chunk; i++) { + for (size_t i = 0; i < num_chunk; i++) { if (chunk_mbt_is_derived_array[i]) if (MPI_SUCCESS != (mpi_code = MPI_Type_free(chunk_mtype + i))) HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code) @@ -1655,6 +1766,9 @@ H5D__link_piece_collective_io(H5D_io_info_t *io_info, int H5_ATTR_UNUSED mpi_ran ret_value); #endif + if (ret_value < 0) + H5CX_set_mpio_actual_chunk_opt(H5D_MPIO_NO_CHUNK_OPTIMIZATION); + /* Release resources */ if (chunk_mtype) H5MM_xfree(chunk_mtype); @@ -1751,8 +1865,8 @@ H5D__link_piece_collective_io(H5D_io_info_t *io_info, int H5_ATTR_UNUSED mpi_ran *------------------------------------------------------------------------- */ static herr_t -H5D__link_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_info, int mpi_rank, - int mpi_size) +H5D__link_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_infos, + size_t num_dset_infos, int mpi_rank, int mpi_size) { H5D_filtered_collective_io_info_t chunk_list = {0}; unsigned char **chunk_msg_bufs = NULL; @@ -1760,7 +1874,7 @@ H5D__link_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info_ int chunk_msg_bufs_len = 0; herr_t ret_value = SUCCEED; - FUNC_ENTER_PACKAGE_TAG(dset_info->dset->oloc.addr) + FUNC_ENTER_PACKAGE assert(io_info); @@ -1781,18 +1895,15 @@ H5D__link_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info_ /* Build a list of selected chunks in the collective io operation */ - if (H5D__mpio_collective_filtered_chunk_io_setup(io_info, dset_info, mpi_rank, &chunk_list) < 0) + if (H5D__mpio_collective_filtered_chunk_io_setup(io_info, dset_infos, num_dset_infos, mpi_rank, + &chunk_list) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "couldn't construct filtered I/O info list"); if (io_info->op_type == H5D_IO_OP_READ) { /* Filtered collective read */ - if (H5D__mpio_collective_filtered_chunk_read(&chunk_list, io_info, dset_info, mpi_rank) < 0) + if (H5D__mpio_collective_filtered_chunk_read(&chunk_list, io_info, num_dset_infos, mpi_rank) < 0) HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "couldn't read filtered chunks"); } else { /* Filtered collective write */ - H5D_chk_idx_info_t index_info; - - H5D_MPIO_INIT_CHUNK_IDX_INFO(index_info, dset_info->dset); - if (mpi_size > 1) { /* Redistribute shared chunks being written to */ if (H5D__mpio_redistribute_shared_chunks(&chunk_list, io_info, mpi_rank, mpi_size, @@ -1800,7 +1911,7 @@ H5D__link_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info_ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to redistribute shared chunks"); /* Send any chunk modification messages for chunks this rank no longer owns */ - if (H5D__mpio_share_chunk_modification_data(&chunk_list, io_info, dset_info, mpi_rank, mpi_size, + if (H5D__mpio_share_chunk_modification_data(&chunk_list, io_info, mpi_rank, mpi_size, &chunk_msg_bufs, &chunk_msg_bufs_len) < 0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to send chunk modification data between MPI ranks"); @@ -1815,7 +1926,7 @@ H5D__link_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info_ * must participate. */ if (H5D__mpio_collective_filtered_chunk_update(&chunk_list, chunk_msg_bufs, chunk_msg_bufs_len, - io_info, dset_info, mpi_rank) < 0) + io_info, num_dset_infos, mpi_rank) < 0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "couldn't update modified chunks"); /* Free up resources used by chunk hash table now that we're done updating chunks */ @@ -1823,7 +1934,7 @@ H5D__link_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info_ /* All ranks now collectively re-allocate file space for all chunks */ if (H5D__mpio_collective_filtered_chunk_reallocate(&chunk_list, rank_chunks_assigned_map, io_info, - &index_info, mpi_rank, mpi_size) < 0) + num_dset_infos, mpi_rank, mpi_size) < 0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "couldn't collectively re-allocate file space for chunks"); @@ -1843,12 +1954,15 @@ H5D__link_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info_ * into the chunk index */ if (H5D__mpio_collective_filtered_chunk_reinsert(&chunk_list, rank_chunks_assigned_map, io_info, - dset_info, &index_info, mpi_rank, mpi_size) < 0) + num_dset_infos, mpi_rank, mpi_size) < 0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "couldn't collectively re-insert modified chunks into chunk index"); } done: + if (ret_value < 0) + H5CX_set_mpio_actual_chunk_opt(H5D_MPIO_NO_CHUNK_OPTIMIZATION); + if (chunk_msg_bufs) { for (size_t i = 0; i < (size_t)chunk_msg_bufs_len; i++) H5MM_free(chunk_msg_bufs[i]); @@ -1858,6 +1972,9 @@ H5D__link_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info_ HASH_CLEAR(hh, chunk_list.chunk_hash_table); + if (rank_chunks_assigned_map) + H5MM_free(rank_chunks_assigned_map); + /* Free resources used by a rank which had some selection */ if (chunk_list.chunk_infos) { for (size_t i = 0; i < chunk_list.num_chunk_infos; i++) @@ -1867,15 +1984,42 @@ H5D__link_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info_ H5MM_free(chunk_list.chunk_infos); } /* end if */ - if (rank_chunks_assigned_map) - H5MM_free(rank_chunks_assigned_map); + /* Free resources used by cached dataset info */ + if ((num_dset_infos == 1) && (chunk_list.dset_info.single_dset_info)) { + H5D_mpio_filtered_dset_info_t *curr_dset_info = chunk_list.dset_info.single_dset_info; + + if (curr_dset_info->fb_info_init && H5D__fill_term(&curr_dset_info->fb_info) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "can't release fill buffer info"); + if (curr_dset_info->fill_space && H5S_close(curr_dset_info->fill_space) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "can't close fill space"); + + H5MM_free(chunk_list.dset_info.single_dset_info); + chunk_list.dset_info.single_dset_info = NULL; + } + else if ((num_dset_infos > 1) && (chunk_list.dset_info.dset_info_hash_table)) { + H5D_mpio_filtered_dset_info_t *curr_dset_info; + H5D_mpio_filtered_dset_info_t *tmp; + + HASH_ITER(hh, chunk_list.dset_info.dset_info_hash_table, curr_dset_info, tmp) + { + HASH_DELETE(hh, chunk_list.dset_info.dset_info_hash_table, curr_dset_info); + + if (curr_dset_info->fb_info_init && H5D__fill_term(&curr_dset_info->fb_info) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "can't release fill buffer info"); + if (curr_dset_info->fill_space && H5S_close(curr_dset_info->fill_space) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "can't close fill space"); + + H5MM_free(curr_dset_info); + curr_dset_info = NULL; + } + } #ifdef H5Dmpio_DEBUG H5D_MPIO_TIME_STOP(mpi_rank); H5D_MPIO_TRACE_EXIT(mpi_rank); #endif - FUNC_LEAVE_NOAPI_TAG(ret_value) + FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__link_chunk_filtered_collective_io() */ /*------------------------------------------------------------------------- @@ -2079,6 +2223,9 @@ H5D__multi_chunk_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_ H5CX_set_mpio_actual_io_mode(actual_io_mode); done: + if (ret_value < 0) + H5CX_set_mpio_actual_chunk_opt(H5D_MPIO_NO_CHUNK_OPTIMIZATION); + /* Reset collective opt mode */ if (H5CX_set_mpio_coll_opt(orig_coll_opt_mode) < 0) HDONE_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't reset MPI-I/O collective_op property"); @@ -2171,8 +2318,8 @@ H5D__multi_chunk_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_ *------------------------------------------------------------------------- */ static herr_t -H5D__multi_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_info, int mpi_rank, - int mpi_size) +H5D__multi_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_infos, + size_t num_dset_infos, int mpi_rank, int mpi_size) { H5D_filtered_collective_io_info_t chunk_list = {0}; unsigned char **chunk_msg_bufs = NULL; @@ -2182,9 +2329,10 @@ H5D__multi_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info int mpi_code; herr_t ret_value = SUCCEED; - FUNC_ENTER_PACKAGE_TAG(dset_info->dset->oloc.addr) + FUNC_ENTER_PACKAGE_TAG(dset_infos->dset->oloc.addr) assert(io_info); + assert(num_dset_infos == 1); /* Currently only supported with 1 dataset at a time */ #ifdef H5Dmpio_DEBUG H5D_MPIO_TRACE_ENTER(mpi_rank); @@ -2202,7 +2350,7 @@ H5D__multi_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info H5CX_set_mpio_actual_io_mode(H5D_MPIO_CHUNK_COLLECTIVE); /* Build a list of selected chunks in the collective IO operation */ - if (H5D__mpio_collective_filtered_chunk_io_setup(io_info, dset_info, mpi_rank, &chunk_list) < 0) + if (H5D__mpio_collective_filtered_chunk_io_setup(io_info, dset_infos, 1, mpi_rank, &chunk_list) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "couldn't construct filtered I/O info list"); /* Retrieve the maximum number of chunks selected for any rank */ @@ -2216,7 +2364,7 @@ H5D__multi_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info if (io_info->op_type == H5D_IO_OP_READ) { /* Filtered collective read */ for (size_t i = 0; i < max_num_chunks; i++) { - H5D_filtered_collective_io_info_t single_chunk_list = {0}; + H5D_filtered_collective_io_info_t single_chunk_list = chunk_list; /* Check if this rank has a chunk to work on for this iteration */ have_chunk_to_process = (i < chunk_list.num_chunk_infos); @@ -2236,8 +2384,7 @@ H5D__multi_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info single_chunk_list.num_chunks_to_read = 0; } - if (H5D__mpio_collective_filtered_chunk_read(&single_chunk_list, io_info, dset_info, mpi_rank) < - 0) + if (H5D__mpio_collective_filtered_chunk_read(&single_chunk_list, io_info, 1, mpi_rank) < 0) HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "couldn't read filtered chunks"); if (have_chunk_to_process && chunk_list.chunk_infos[i].buf) { @@ -2247,18 +2394,13 @@ H5D__multi_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info } } else { /* Filtered collective write */ - H5D_chk_idx_info_t index_info; - - /* Construct chunked index info */ - H5D_MPIO_INIT_CHUNK_IDX_INFO(index_info, dset_info->dset); - if (mpi_size > 1) { /* Redistribute shared chunks being written to */ if (H5D__mpio_redistribute_shared_chunks(&chunk_list, io_info, mpi_rank, mpi_size, NULL) < 0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to redistribute shared chunks"); /* Send any chunk modification messages for chunks this rank no longer owns */ - if (H5D__mpio_share_chunk_modification_data(&chunk_list, io_info, dset_info, mpi_rank, mpi_size, + if (H5D__mpio_share_chunk_modification_data(&chunk_list, io_info, mpi_rank, mpi_size, &chunk_msg_bufs, &chunk_msg_bufs_len) < 0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to send chunk modification data between MPI ranks"); @@ -2269,7 +2411,7 @@ H5D__multi_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info * collective re-allocation and re-insertion of chunks modified by other ranks. */ for (size_t i = 0; i < max_num_chunks; i++) { - H5D_filtered_collective_io_info_t single_chunk_list = {0}; + H5D_filtered_collective_io_info_t single_chunk_list = chunk_list; /* Check if this rank has a chunk to work on for this iteration */ have_chunk_to_process = @@ -2281,13 +2423,11 @@ H5D__multi_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info */ if (have_chunk_to_process) { single_chunk_list.chunk_infos = &chunk_list.chunk_infos[i]; - single_chunk_list.chunk_hash_table = chunk_list.chunk_hash_table; single_chunk_list.num_chunk_infos = 1; single_chunk_list.num_chunks_to_read = chunk_list.chunk_infos[i].need_read ? 1 : 0; } else { single_chunk_list.chunk_infos = NULL; - single_chunk_list.chunk_hash_table = chunk_list.chunk_hash_table; single_chunk_list.num_chunk_infos = 0; single_chunk_list.num_chunks_to_read = 0; } @@ -2297,13 +2437,13 @@ H5D__multi_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info * the chunks. As chunk reads are done collectively here, all ranks * must participate. */ - if (H5D__mpio_collective_filtered_chunk_update( - &single_chunk_list, chunk_msg_bufs, chunk_msg_bufs_len, io_info, dset_info, mpi_rank) < 0) + if (H5D__mpio_collective_filtered_chunk_update(&single_chunk_list, chunk_msg_bufs, + chunk_msg_bufs_len, io_info, 1, mpi_rank) < 0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "couldn't update modified chunks"); /* All ranks now collectively re-allocate file space for all chunks */ - if (H5D__mpio_collective_filtered_chunk_reallocate(&single_chunk_list, NULL, io_info, &index_info, - mpi_rank, mpi_size) < 0) + if (H5D__mpio_collective_filtered_chunk_reallocate(&single_chunk_list, NULL, io_info, 1, mpi_rank, + mpi_size) < 0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "couldn't collectively re-allocate file space for chunks"); @@ -2321,14 +2461,17 @@ H5D__multi_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info /* Participate in the collective re-insertion of all chunks modified * in this iteration into the chunk index */ - if (H5D__mpio_collective_filtered_chunk_reinsert(&single_chunk_list, NULL, io_info, dset_info, - &index_info, mpi_rank, mpi_size) < 0) + if (H5D__mpio_collective_filtered_chunk_reinsert(&single_chunk_list, NULL, io_info, 1, mpi_rank, + mpi_size) < 0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "couldn't collectively re-insert modified chunks into chunk index"); } /* end for */ } done: + if (ret_value < 0) + H5CX_set_mpio_actual_chunk_opt(H5D_MPIO_NO_CHUNK_OPTIMIZATION); + if (chunk_msg_bufs) { for (size_t i = 0; i < (size_t)chunk_msg_bufs_len; i++) H5MM_free(chunk_msg_bufs[i]); @@ -2347,6 +2490,36 @@ H5D__multi_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info H5MM_free(chunk_list.chunk_infos); } /* end if */ + /* Free resources used by cached dataset info */ + if ((num_dset_infos == 1) && (chunk_list.dset_info.single_dset_info)) { + H5D_mpio_filtered_dset_info_t *curr_dset_info = chunk_list.dset_info.single_dset_info; + + if (curr_dset_info->fb_info_init && H5D__fill_term(&curr_dset_info->fb_info) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "can't release fill buffer info"); + if (curr_dset_info->fill_space && H5S_close(curr_dset_info->fill_space) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "can't close fill space"); + + H5MM_free(chunk_list.dset_info.single_dset_info); + chunk_list.dset_info.single_dset_info = NULL; + } + else if ((num_dset_infos > 1) && (chunk_list.dset_info.dset_info_hash_table)) { + H5D_mpio_filtered_dset_info_t *curr_dset_info; + H5D_mpio_filtered_dset_info_t *tmp; + + HASH_ITER(hh, chunk_list.dset_info.dset_info_hash_table, curr_dset_info, tmp) + { + HASH_DELETE(hh, chunk_list.dset_info.dset_info_hash_table, curr_dset_info); + + if (curr_dset_info->fb_info_init && H5D__fill_term(&curr_dset_info->fb_info) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "can't release fill buffer info"); + if (curr_dset_info->fill_space && H5S_close(curr_dset_info->fill_space) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "can't close fill space"); + + H5MM_free(curr_dset_info); + curr_dset_info = NULL; + } + } + #ifdef H5Dmpio_DEBUG H5D_MPIO_TIME_STOP(mpi_rank); H5D_MPIO_TRACE_EXIT(mpi_rank); @@ -2583,20 +2756,25 @@ H5D__cmp_filtered_collective_io_info_entry(const void *filtered_collective_io_in addr2 = entry2->chunk_new.offset; /* - * If both chunk addresses are defined, H5_addr_cmp is safe to use. - * Otherwise, if both addresses aren't defined, compared chunk - * entries based on their chunk index. Finally, if only one chunk - * address is defined, return the appropriate value based on which - * is defined. + * If both chunk's file addresses are defined, H5_addr_cmp is safe to use. + * If only one chunk's file address is defined, return the appropriate + * value based on which is defined. If neither chunk's file address is + * defined, compare chunk entries based on their dataset object header + * address, then by their chunk index value. */ if (H5_addr_defined(addr1) && H5_addr_defined(addr2)) { ret_value = H5_addr_cmp(addr1, addr2); } else if (!H5_addr_defined(addr1) && !H5_addr_defined(addr2)) { - hsize_t chunk_idx1 = entry1->index_info.chunk_idx; - hsize_t chunk_idx2 = entry2->index_info.chunk_idx; + haddr_t oloc_addr1 = entry1->index_info.dset_oloc_addr; + haddr_t oloc_addr2 = entry2->index_info.dset_oloc_addr; + + if (0 == (ret_value = H5_addr_cmp(oloc_addr1, oloc_addr2))) { + hsize_t chunk_idx1 = entry1->index_info.chunk_idx; + hsize_t chunk_idx2 = entry2->index_info.chunk_idx; - ret_value = (chunk_idx1 > chunk_idx2) - (chunk_idx1 < chunk_idx2); + ret_value = (chunk_idx1 > chunk_idx2) - (chunk_idx1 < chunk_idx2); + } } else ret_value = H5_addr_defined(addr1) ? 1 : -1; @@ -2622,8 +2800,8 @@ H5D__cmp_chunk_redistribute_info(const void *_entry1, const void *_entry2) { const H5D_chunk_redistribute_info_t *entry1; const H5D_chunk_redistribute_info_t *entry2; - hsize_t chunk_index1; - hsize_t chunk_index2; + haddr_t oloc_addr1; + haddr_t oloc_addr2; int ret_value; FUNC_ENTER_PACKAGE_NOERR @@ -2631,17 +2809,26 @@ H5D__cmp_chunk_redistribute_info(const void *_entry1, const void *_entry2) entry1 = (const H5D_chunk_redistribute_info_t *)_entry1; entry2 = (const H5D_chunk_redistribute_info_t *)_entry2; - chunk_index1 = entry1->chunk_idx; - chunk_index2 = entry2->chunk_idx; + oloc_addr1 = entry1->dset_oloc_addr; + oloc_addr2 = entry2->dset_oloc_addr; + + /* Sort first by dataset object header address */ + if (0 == (ret_value = H5_addr_cmp(oloc_addr1, oloc_addr2))) { + hsize_t chunk_index1 = entry1->chunk_idx; + hsize_t chunk_index2 = entry2->chunk_idx; - if (chunk_index1 == chunk_index2) { - int orig_owner1 = entry1->orig_owner; - int orig_owner2 = entry2->orig_owner; + /* Then by chunk index value */ + if (chunk_index1 == chunk_index2) { + int orig_owner1 = entry1->orig_owner; + int orig_owner2 = entry2->orig_owner; - ret_value = (orig_owner1 > orig_owner2) - (orig_owner1 < orig_owner2); + /* And finally by original owning MPI rank for the chunk */ + + ret_value = (orig_owner1 > orig_owner2) - (orig_owner1 < orig_owner2); + } + else + ret_value = (chunk_index1 > chunk_index2) - (chunk_index1 < chunk_index2); } - else - ret_value = (chunk_index1 > chunk_index2) - (chunk_index1 < chunk_index2); FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__cmp_chunk_redistribute_info() */ @@ -2656,6 +2843,16 @@ H5D__cmp_chunk_redistribute_info(const void *_entry1, const void *_entry2) * rank for two H5D_chunk_redistribute_info_t * structures * + * NOTE: The inner logic used in this sorting callback (inside the + * block where the original owners are equal) is intended to + * cause the given array of H5D_chunk_redistribute_info_t + * structures to be sorted back exactly as it was sorted + * before a shared chunks redistribution operation, according + * to the logic in H5D__cmp_filtered_collective_io_info_entry. + * Since the two sorting callbacks are currently tied directly + * to each other, both should be updated in the same way when + * changes are made. + * * Return: -1, 0, 1 * *------------------------------------------------------------------------- @@ -2682,20 +2879,25 @@ H5D__cmp_chunk_redistribute_info_orig_owner(const void *_entry1, const void *_en haddr_t addr2 = entry2->chunk_block.offset; /* - * If both chunk addresses are defined, H5_addr_cmp is safe to use. - * Otherwise, if both addresses aren't defined, compared chunk - * entries based on their chunk index. Finally, if only one chunk - * address is defined, return the appropriate value based on which - * is defined. + * If both chunk's file addresses are defined, H5_addr_cmp is safe to use. + * If only one chunk's file address is defined, return the appropriate + * value based on which is defined. If neither chunk's file address is + * defined, compare chunk entries based on their dataset object header + * address, then by their chunk index value. */ if (H5_addr_defined(addr1) && H5_addr_defined(addr2)) { ret_value = H5_addr_cmp(addr1, addr2); } else if (!H5_addr_defined(addr1) && !H5_addr_defined(addr2)) { - hsize_t chunk_idx1 = entry1->chunk_idx; - hsize_t chunk_idx2 = entry2->chunk_idx; + haddr_t oloc_addr1 = entry1->dset_oloc_addr; + haddr_t oloc_addr2 = entry2->dset_oloc_addr; - ret_value = (chunk_idx1 > chunk_idx2) - (chunk_idx1 < chunk_idx2); + if (0 == (ret_value = H5_addr_cmp(oloc_addr1, oloc_addr2))) { + hsize_t chunk_idx1 = entry1->chunk_idx; + hsize_t chunk_idx2 = entry2->chunk_idx; + + ret_value = (chunk_idx1 > chunk_idx2) - (chunk_idx1 < chunk_idx2); + } } else ret_value = H5_addr_defined(addr1) ? 1 : -1; @@ -2927,20 +3129,21 @@ H5D__obtain_mpio_mode(H5D_io_info_t *io_info, H5D_dset_io_info_t *di, uint8_t as */ static herr_t H5D__mpio_collective_filtered_chunk_io_setup(const H5D_io_info_t *io_info, const H5D_dset_io_info_t *di, - int mpi_rank, H5D_filtered_collective_io_info_t *chunk_list) + size_t num_dset_infos, int mpi_rank, + H5D_filtered_collective_io_info_t *chunk_list) { - H5D_filtered_collective_chunk_info_t *local_info_array = NULL; - H5D_chunk_ud_t udata; - bool filter_partial_edge_chunks; - size_t num_chunks_selected; - size_t num_chunks_to_read = 0; - herr_t ret_value = SUCCEED; + H5D_filtered_collective_chunk_info_t *local_info_array = NULL; + H5D_mpio_filtered_dset_info_t *curr_dset_info = NULL; + size_t num_chunks_selected = 0; + size_t num_chunks_to_read = 0; + size_t buf_idx = 0; + bool need_sort = false; + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE assert(io_info); assert(di); - assert(di->layout->type == H5D_CHUNKED); assert(chunk_list); #ifdef H5Dmpio_DEBUG @@ -2948,166 +3151,330 @@ H5D__mpio_collective_filtered_chunk_io_setup(const H5D_io_info_t *io_info, const H5D_MPIO_TIME_START(mpi_rank, "Filtered Collective I/O Setup"); #endif - /* Each rank builds a local list of the chunks they have selected */ - if ((num_chunks_selected = H5SL_count(di->layout_io_info.chunk_map->dset_sel_pieces))) { - H5D_piece_info_t *chunk_info; - H5SL_node_t *chunk_node; - hsize_t select_npoints; - bool need_sort = false; + /* Calculate hash key length for chunk hash table */ + if (num_dset_infos > 1) { + /* Just in case the structure changes... */ + HDcompile_assert(offsetof(H5D_chunk_index_info_t, dset_oloc_addr) > + offsetof(H5D_chunk_index_info_t, chunk_idx)); - /* Determine whether partial edge chunks should be filtered */ - filter_partial_edge_chunks = - !(di->dset->shared->layout.u.chunk.flags & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS); + /* Calculate key length using uthash compound key example */ + chunk_list->chunk_hash_table_keylen = offsetof(H5D_chunk_index_info_t, dset_oloc_addr) + + sizeof(haddr_t) - offsetof(H5D_chunk_index_info_t, chunk_idx); + } + else + chunk_list->chunk_hash_table_keylen = sizeof(hsize_t); + + chunk_list->all_dset_indices_empty = true; + chunk_list->no_dset_index_insert_methods = true; + /* Calculate size needed for total chunk list */ + for (size_t dset_idx = 0; dset_idx < num_dset_infos; dset_idx++) { + /* Skip this dataset if no I/O is being performed */ + if (di[dset_idx].skip_io) + continue; + + /* Only process filtered, chunked datasets. A contiguous dataset + * could possibly have filters in the DCPL pipeline, but the library + * will currently ignore optional filters in that case. + */ + if ((di[dset_idx].dset->shared->dcpl_cache.pline.nused == 0) || + (di[dset_idx].layout->type == H5D_CONTIGUOUS)) + continue; + + assert(di[dset_idx].layout->type == H5D_CHUNKED); + assert(di[dset_idx].layout->storage.type == H5D_CHUNKED); + + num_chunks_selected += H5SL_count(di[dset_idx].layout_io_info.chunk_map->dset_sel_pieces); + } + + if (num_chunks_selected) if (NULL == (local_info_array = H5MM_malloc(num_chunks_selected * sizeof(*local_info_array)))) HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate local io info array buffer"); - chunk_node = H5SL_first(di->layout_io_info.chunk_map->dset_sel_pieces); - for (size_t i = 0; chunk_node; i++) { - chunk_info = (H5D_piece_info_t *)H5SL_item(chunk_node); + for (size_t dset_idx = 0; dset_idx < num_dset_infos; dset_idx++) { + H5D_chunk_ud_t udata; + H5O_fill_t *fill_msg; + haddr_t prev_tag = HADDR_UNDEF; - /* Obtain this chunk's address */ - if (H5D__chunk_lookup(di->dset, chunk_info->scaled, &udata) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address"); + /* Skip this dataset if no I/O is being performed */ + if (di[dset_idx].skip_io) + continue; - /* Initialize rank-local chunk info */ - local_info_array[i].chunk_info = chunk_info; - local_info_array[i].chunk_buf_size = 0; - local_info_array[i].num_writers = 0; - local_info_array[i].orig_owner = mpi_rank; - local_info_array[i].new_owner = mpi_rank; - local_info_array[i].buf = NULL; + /* Only process filtered, chunked datasets. A contiguous dataset + * could possibly have filters in the DCPL pipeline, but the library + * will currently ignore optional filters in that case. + */ + if ((di[dset_idx].dset->shared->dcpl_cache.pline.nused == 0) || + (di[dset_idx].layout->type == H5D_CONTIGUOUS)) + continue; - select_npoints = H5S_GET_SELECT_NPOINTS(chunk_info->fspace); - local_info_array[i].io_size = (size_t)select_npoints * di->type_info.dst_type_size; + assert(di[dset_idx].layout->storage.type == H5D_CHUNKED); + assert(di[dset_idx].layout->storage.u.chunk.idx_type != H5D_CHUNK_IDX_NONE); - /* - * Determine whether this chunk will need to be read from the file. If this is - * a read operation, the chunk will be read. If this is a write operation, we - * generally need to read a filtered chunk from the file before modifying it, - * unless the chunk is being fully overwritten. - * - * TODO: Currently the full overwrite status of a chunk is only obtained on a - * per-rank basis. This means that if the total selection in the chunk, as - * determined by the combination of selections of all of the ranks interested in - * the chunk, covers the entire chunk, the performance optimization of not reading - * the chunk from the file is still valid, but is not applied in the current - * implementation. - * - * To implement this case, a few approaches were considered: - * - * - Keep a running total (distributed to each rank) of the number of chunk - * elements selected during chunk redistribution and compare that to the total - * number of elements in the chunk once redistribution is finished - * - * - Process all incoming chunk messages before doing I/O (these are currently - * processed AFTER doing I/O), combine the owning rank's selection in a chunk - * with the selections received from other ranks and check to see whether that - * combined selection covers the entire chunk - * - * The first approach will be dangerous if the application performs an overlapping - * write to a chunk, as the number of selected elements can equal or exceed the - * number of elements in the chunk without the whole chunk selection being covered. - * While it might be considered erroneous for an application to do an overlapping - * write, we don't explicitly disallow it. - * - * The second approach contains a bit of complexity in that part of the chunk - * messages will be needed before doing I/O and part will be needed after doing I/O. - * Since modification data from chunk messages can't be applied until after any I/O - * is performed (otherwise, we'll overwrite any applied modification data), chunk - * messages are currently entirely processed after I/O. However, in order to determine - * if a chunk is being fully overwritten, we need the dataspace portion of the chunk - * messages before doing I/O. The naive way to do this is to process chunk messages - * twice, using just the relevant information from the message before and after I/O. - * The better way would be to avoid processing chunk messages twice by extracting (and - * keeping around) the dataspace portion of the message before I/O and processing the - * rest of the chunk message after I/O. Note that the dataspace portion of each chunk - * message is used to correctly apply chunk modification data from the message, so - * must be kept around both before and after I/O in this case. - */ - if (io_info->op_type == H5D_IO_OP_READ) - local_info_array[i].need_read = true; - else { - local_info_array[i].need_read = - local_info_array[i].io_size < (size_t)di->dset->shared->layout.u.chunk.size; - } + /* + * To support the multi-dataset I/O case, cache some info (chunk size, + * fill buffer and fill dataspace, etc.) about each dataset involved + * in the I/O operation for use when processing chunks. If only one + * dataset is involved, this information is the same for every chunk + * processed. Otherwise, if multiple datasets are involved, a hash + * table is used to quickly match a particular chunk with the cached + * information pertaining to the dataset it resides in. + */ + if (NULL == (curr_dset_info = H5MM_malloc(sizeof(H5D_mpio_filtered_dset_info_t)))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate space for dataset info"); + + memset(&curr_dset_info->fb_info, 0, sizeof(H5D_fill_buf_info_t)); + + H5D_MPIO_INIT_CHUNK_IDX_INFO(curr_dset_info->chunk_idx_info, di[dset_idx].dset); + + curr_dset_info->dset_io_info = &di[dset_idx]; + curr_dset_info->file_chunk_size = di[dset_idx].dset->shared->layout.u.chunk.size; + curr_dset_info->dset_oloc_addr = di[dset_idx].dset->oloc.addr; + curr_dset_info->fill_space = NULL; + curr_dset_info->fb_info_init = false; + curr_dset_info->index_empty = false; + + /* Determine if fill values should be written to chunks */ + fill_msg = &di[dset_idx].dset->shared->dcpl_cache.fill; + curr_dset_info->should_fill = + (fill_msg->fill_time == H5D_FILL_TIME_ALLOC) || + ((fill_msg->fill_time == H5D_FILL_TIME_IFSET) && fill_msg->fill_defined); + + if (curr_dset_info->should_fill) { + hsize_t chunk_dims[H5S_MAX_RANK]; + + assert(di[dset_idx].dset->shared->ndims == di[dset_idx].dset->shared->layout.u.chunk.ndims - 1); + for (size_t dim_idx = 0; dim_idx < di[dset_idx].dset->shared->layout.u.chunk.ndims - 1; dim_idx++) + chunk_dims[dim_idx] = (hsize_t)di[dset_idx].dset->shared->layout.u.chunk.dim[dim_idx]; + + /* Get a dataspace for filling chunk memory buffers */ + if (NULL == (curr_dset_info->fill_space = H5S_create_simple( + di[dset_idx].dset->shared->layout.u.chunk.ndims - 1, chunk_dims, NULL))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to create chunk fill dataspace"); + + /* Initialize fill value buffer */ + if (H5D__fill_init(&curr_dset_info->fb_info, NULL, (H5MM_allocate_t)H5D__chunk_mem_alloc, + (void *)&di[dset_idx].dset->shared->dcpl_cache.pline, + (H5MM_free_t)H5D__chunk_mem_free, + (void *)&di[dset_idx].dset->shared->dcpl_cache.pline, + &di[dset_idx].dset->shared->dcpl_cache.fill, di[dset_idx].dset->shared->type, + di[dset_idx].dset->shared->type_id, 0, curr_dset_info->file_chunk_size) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize fill value buffer"); + + curr_dset_info->fb_info_init = true; + } + + /* + * If the dataset is incrementally allocated and hasn't been written + * to yet, the chunk index should be empty. In this case, a collective + * read of its chunks is essentially a no-op, so we can avoid that read + * later. If all datasets have empty chunk indices, we can skip the + * collective read entirely. + */ + if (fill_msg->alloc_time == H5D_ALLOC_TIME_INCR) + if (H5D__chunk_index_empty(di[dset_idx].dset, &curr_dset_info->index_empty) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "couldn't determine if chunk index is empty"); + + if ((fill_msg->alloc_time != H5D_ALLOC_TIME_INCR) || !curr_dset_info->index_empty) + chunk_list->all_dset_indices_empty = false; + + if (curr_dset_info->chunk_idx_info.storage->ops->insert) + chunk_list->no_dset_index_insert_methods = false; + + /* + * For multi-dataset I/O, use a hash table to keep a mapping between + * chunks and the cached info for the dataset that they're in. Otherwise, + * we can just use the info object directly if only one dataset is being + * worked on. + */ + if (num_dset_infos > 1) { + HASH_ADD(hh, chunk_list->dset_info.dset_info_hash_table, dset_oloc_addr, sizeof(haddr_t), + curr_dset_info); + } + else + chunk_list->dset_info.single_dset_info = curr_dset_info; + curr_dset_info = NULL; + + /* + * Now, each rank builds a local list of info about the chunks + * they have selected among the chunks in the current dataset + */ - if (local_info_array[i].need_read) - num_chunks_to_read++; + /* Set metadata tagging with dataset oheader addr */ + H5AC_tag(di[dset_idx].dset->oloc.addr, &prev_tag); + + if (H5SL_count(di[dset_idx].layout_io_info.chunk_map->dset_sel_pieces)) { + H5SL_node_t *chunk_node; + bool filter_partial_edge_chunks; + + /* Determine whether partial edge chunks should be filtered */ + filter_partial_edge_chunks = !(di[dset_idx].dset->shared->layout.u.chunk.flags & + H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS); + + chunk_node = H5SL_first(di[dset_idx].layout_io_info.chunk_map->dset_sel_pieces); + while (chunk_node) { + H5D_piece_info_t *chunk_info; + hsize_t select_npoints; + + chunk_info = (H5D_piece_info_t *)H5SL_item(chunk_node); + assert(chunk_info->filtered_dset); + + /* Obtain this chunk's address */ + if (H5D__chunk_lookup(di[dset_idx].dset, chunk_info->scaled, &udata) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address"); + + /* Initialize rank-local chunk info */ + local_info_array[buf_idx].chunk_info = chunk_info; + local_info_array[buf_idx].chunk_buf_size = 0; + local_info_array[buf_idx].num_writers = 0; + local_info_array[buf_idx].orig_owner = mpi_rank; + local_info_array[buf_idx].new_owner = mpi_rank; + local_info_array[buf_idx].buf = NULL; + + select_npoints = H5S_GET_SELECT_NPOINTS(chunk_info->fspace); + local_info_array[buf_idx].io_size = + (size_t)select_npoints * di[dset_idx].type_info.dst_type_size; - local_info_array[i].skip_filter_pline = false; - if (!filter_partial_edge_chunks) { /* - * If this is a partial edge chunk and the "don't filter partial edge - * chunks" flag is set, make sure not to apply filters to the chunk. + * Determine whether this chunk will need to be read from the file. If this is + * a read operation, the chunk will be read. If this is a write operation, we + * generally need to read a filtered chunk from the file before modifying it, + * unless the chunk is being fully overwritten. + * + * TODO: Currently the full overwrite status of a chunk is only obtained on a + * per-rank basis. This means that if the total selection in the chunk, as + * determined by the combination of selections of all of the ranks interested in + * the chunk, covers the entire chunk, the performance optimization of not reading + * the chunk from the file is still valid, but is not applied in the current + * implementation. + * + * To implement this case, a few approaches were considered: + * + * - Keep a running total (distributed to each rank) of the number of chunk + * elements selected during chunk redistribution and compare that to the total + * number of elements in the chunk once redistribution is finished + * + * - Process all incoming chunk messages before doing I/O (these are currently + * processed AFTER doing I/O), combine the owning rank's selection in a chunk + * with the selections received from other ranks and check to see whether that + * combined selection covers the entire chunk + * + * The first approach will be dangerous if the application performs an overlapping + * write to a chunk, as the number of selected elements can equal or exceed the + * number of elements in the chunk without the whole chunk selection being covered. + * While it might be considered erroneous for an application to do an overlapping + * write, we don't explicitly disallow it. + * + * The second approach contains a bit of complexity in that part of the chunk + * messages will be needed before doing I/O and part will be needed after doing I/O. + * Since modification data from chunk messages can't be applied until after any I/O + * is performed (otherwise, we'll overwrite any applied modification data), chunk + * messages are currently entirely processed after I/O. However, in order to determine + * if a chunk is being fully overwritten, we need the dataspace portion of the chunk + * messages before doing I/O. The naive way to do this is to process chunk messages + * twice, using just the relevant information from the message before and after I/O. + * The better way would be to avoid processing chunk messages twice by extracting (and + * keeping around) the dataspace portion of the message before I/O and processing the + * rest of the chunk message after I/O. Note that the dataspace portion of each chunk + * message is used to correctly apply chunk modification data from the message, so + * must be kept around both before and after I/O in this case. */ - if (H5D__chunk_is_partial_edge_chunk(di->dset->shared->ndims, - di->dset->shared->layout.u.chunk.dim, chunk_info->scaled, - di->dset->shared->curr_dims)) - local_info_array[i].skip_filter_pline = true; - } + if (io_info->op_type == H5D_IO_OP_READ) + local_info_array[buf_idx].need_read = true; + else { + local_info_array[buf_idx].need_read = + local_info_array[buf_idx].io_size < + (size_t)di[dset_idx].dset->shared->layout.u.chunk.size; + } - /* Initialize the chunk's shared info */ - local_info_array[i].chunk_current = udata.chunk_block; - local_info_array[i].chunk_new = udata.chunk_block; + if (local_info_array[buf_idx].need_read) + num_chunks_to_read++; - /* - * Check if the list is not in ascending order of offset in the file - * or has unallocated chunks. In either case, the list should get - * sorted. - */ - if (i) { - haddr_t curr_chunk_offset = local_info_array[i].chunk_current.offset; - haddr_t prev_chunk_offset = local_info_array[i - 1].chunk_current.offset; + local_info_array[buf_idx].skip_filter_pline = false; + if (!filter_partial_edge_chunks) { + /* + * If this is a partial edge chunk and the "don't filter partial edge + * chunks" flag is set, make sure not to apply filters to the chunk. + */ + if (H5D__chunk_is_partial_edge_chunk( + di[dset_idx].dset->shared->ndims, di[dset_idx].dset->shared->layout.u.chunk.dim, + chunk_info->scaled, di[dset_idx].dset->shared->curr_dims)) + local_info_array[buf_idx].skip_filter_pline = true; + } - if (!H5_addr_defined(prev_chunk_offset) || !H5_addr_defined(curr_chunk_offset) || - (curr_chunk_offset < prev_chunk_offset)) - need_sort = true; + /* Initialize the chunk's shared info */ + local_info_array[buf_idx].chunk_current = udata.chunk_block; + local_info_array[buf_idx].chunk_new = udata.chunk_block; + + /* + * Check if the list is not in ascending order of offset in the file + * or has unallocated chunks. In either case, the list should get + * sorted. + */ + if (!need_sort && buf_idx) { + haddr_t curr_chunk_offset = local_info_array[buf_idx].chunk_current.offset; + haddr_t prev_chunk_offset = local_info_array[buf_idx - 1].chunk_current.offset; + + if (!H5_addr_defined(prev_chunk_offset) || !H5_addr_defined(curr_chunk_offset) || + (curr_chunk_offset < prev_chunk_offset)) + need_sort = true; + } + + /* Needed for proper hashing later on */ + memset(&local_info_array[buf_idx].index_info, 0, sizeof(H5D_chunk_index_info_t)); + + /* + * Extensible arrays may calculate a chunk's index a little differently + * than normal when the dataset's unlimited dimension is not the + * slowest-changing dimension, so set the index here based on what the + * extensible array code calculated instead of what was calculated + * in the chunk file mapping. + */ + if (di[dset_idx].dset->shared->layout.u.chunk.idx_type == H5D_CHUNK_IDX_EARRAY) + local_info_array[buf_idx].index_info.chunk_idx = udata.chunk_idx; + else + local_info_array[buf_idx].index_info.chunk_idx = chunk_info->index; + + assert(H5_addr_defined(di[dset_idx].dset->oloc.addr)); + local_info_array[buf_idx].index_info.dset_oloc_addr = di[dset_idx].dset->oloc.addr; + + local_info_array[buf_idx].index_info.filter_mask = udata.filter_mask; + local_info_array[buf_idx].index_info.need_insert = false; + + buf_idx++; + + chunk_node = H5SL_next(chunk_node); } + } + else if (H5F_get_coll_metadata_reads(di[dset_idx].dset->oloc.file)) { + hsize_t scaled[H5O_LAYOUT_NDIMS] = {0}; /* - * Extensible arrays may calculate a chunk's index a little differently - * than normal when the dataset's unlimited dimension is not the - * slowest-changing dimension, so set the index here based on what the - * extensible array code calculated instead of what was calculated - * in the chunk file mapping. + * If this rank has no selection in the dataset and collective + * metadata reads are enabled, do a fake lookup of a chunk to + * ensure that this rank has the chunk index opened. Otherwise, + * only the ranks that had a selection will have opened the + * chunk index and they will have done so independently. Therefore, + * when ranks with no selection participate in later collective + * metadata reads, they will try to open the chunk index collectively + * and issues will occur since other ranks won't participate. + * + * In the future, we should consider having a chunk index "open" + * callback that can be used to ensure collectivity between ranks + * in a more natural way, but this hack should suffice for now. */ - if (di->dset->shared->layout.u.chunk.idx_type == H5D_CHUNK_IDX_EARRAY) - local_info_array[i].index_info.chunk_idx = udata.chunk_idx; - else - local_info_array[i].index_info.chunk_idx = chunk_info->index; - - local_info_array[i].index_info.filter_mask = udata.filter_mask; - local_info_array[i].index_info.need_insert = false; - - chunk_node = H5SL_next(chunk_node); + if (H5D__chunk_lookup(di[dset_idx].dset, scaled, &udata) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address"); } - /* Ensure the chunk list is sorted in ascending order of offset in the file */ - if (need_sort) - qsort(local_info_array, num_chunks_selected, sizeof(H5D_filtered_collective_chunk_info_t), - H5D__cmp_filtered_collective_io_info_entry); + /* Reset metadata tagging */ + H5AC_tag(prev_tag, NULL); } - else if (H5F_get_coll_metadata_reads(di->dset->oloc.file)) { - hsize_t scaled[H5O_LAYOUT_NDIMS] = {0}; - /* - * If this rank has no selection in the dataset and collective - * metadata reads are enabled, do a fake lookup of a chunk to - * ensure that this rank has the chunk index opened. Otherwise, - * only the ranks that had a selection will have opened the - * chunk index and they will have done so independently. Therefore, - * when ranks with no selection participate in later collective - * metadata reads, they will try to open the chunk index collectively - * and issues will occur since other ranks won't participate. - * - * In the future, we should consider having a chunk index "open" - * callback that can be used to ensure collectivity between ranks - * in a more natural way, but this hack should suffice for now. - */ - if (H5D__chunk_lookup(di->dset, scaled, &udata) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address"); - } + /* Ensure the chunk list is sorted in ascending order of offset in the file */ + if (local_info_array && need_sort) + qsort(local_info_array, num_chunks_selected, sizeof(H5D_filtered_collective_chunk_info_t), + H5D__cmp_filtered_collective_io_info_entry); chunk_list->chunk_infos = local_info_array; chunk_list->num_chunk_infos = num_chunks_selected; @@ -3119,6 +3486,37 @@ H5D__mpio_collective_filtered_chunk_io_setup(const H5D_io_info_t *io_info, const done: if (ret_value < 0) { + /* Free temporary cached dataset info object */ + if (curr_dset_info) { + if (curr_dset_info->fb_info_init && H5D__fill_term(&curr_dset_info->fb_info) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "can't release fill buffer info"); + if (curr_dset_info->fill_space && H5S_close(curr_dset_info->fill_space) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "can't close fill space"); + + H5MM_free(curr_dset_info); + curr_dset_info = NULL; + + if (num_dset_infos == 1) + chunk_list->dset_info.single_dset_info = NULL; + } + + /* Free resources used by cached dataset info hash table */ + if (num_dset_infos > 1) { + H5D_mpio_filtered_dset_info_t *tmp; + + HASH_ITER(hh, chunk_list->dset_info.dset_info_hash_table, curr_dset_info, tmp) + { + HASH_DELETE(hh, chunk_list->dset_info.dset_info_hash_table, curr_dset_info); + H5MM_free(curr_dset_info); + curr_dset_info = NULL; + } + } + + if (num_dset_infos == 1) + chunk_list->dset_info.single_dset_info = NULL; + else + chunk_list->dset_info.dset_info_hash_table = NULL; + H5MM_free(local_info_array); } @@ -3158,7 +3556,6 @@ H5D__mpio_redistribute_shared_chunks(H5D_filtered_collective_io_info_t *chunk_li bool redistribute_on_all_ranks; size_t *num_chunks_map = NULL; size_t coll_chunk_list_size = 0; - size_t i; int mpi_code; herr_t ret_value = SUCCEED; @@ -3186,8 +3583,8 @@ H5D__mpio_redistribute_shared_chunks(H5D_filtered_collective_io_info_t *chunk_li num_chunks_map, 1, H5_SIZE_T_AS_MPI_TYPE, io_info->comm))) HMPI_GOTO_ERROR(FAIL, "MPI_Allgather failed", mpi_code) - for (i = 0; i < (size_t)mpi_size; i++) - coll_chunk_list_size += num_chunks_map[i]; + for (int curr_rank = 0; curr_rank < mpi_size; curr_rank++) + coll_chunk_list_size += num_chunks_map[curr_rank]; /* * Determine whether we should perform chunk redistribution on all @@ -3257,21 +3654,23 @@ H5D__mpio_redistribute_shared_chunks(H5D_filtered_collective_io_info_t *chunk_li * * - All MPI ranks send their list of selected chunks to the * ranks involved in chunk redistribution. Then, the - * involved ranks sort this new list in order of chunk - * index. + * involved ranks sort this new list in order of: + * + * dataset object header address -> chunk index value -> + * original owning MPI rank for chunk * * - The involved ranks scan the list looking for matching - * runs of chunk index values (corresponding to a shared - * chunk which has been selected by more than one rank in - * the I/O operation) and for each shared chunk, - * redistribute the chunk to the MPI rank writing to the - * chunk which currently has the least amount of chunks - * assigned to it. This is done by modifying the "new_owner" - * field in each of the list entries corresponding to that - * chunk. The involved ranks then re-sort the list in order - * of original chunk owner so that each rank's section of - * contributed chunks is contiguous in the collective chunk - * list. + * runs of (dataset object header address, chunk index value) + * pairs (corresponding to a shared chunk which has been + * selected by more than one rank in the I/O operation) and + * for each shared chunk, redistribute the chunk to the MPI + * rank writing to the chunk which currently has the least + * amount of chunks assigned to it. This is done by modifying + * the "new_owner" field in each of the list entries + * corresponding to that chunk. The involved ranks then + * re-sort the list in order of original chunk owner so that + * each rank's section of contributed chunks is contiguous + * in the collective chunk list. * * - If chunk redistribution occurred on all ranks, each rank * scans through the collective chunk list to find their @@ -3293,9 +3692,8 @@ H5D__mpio_redistribute_shared_chunks_int(H5D_filtered_collective_io_info_t *chun { MPI_Datatype struct_type; MPI_Datatype packed_type; - bool struct_type_derived = false; - bool packed_type_derived = false; - size_t i; + bool struct_type_derived = false; + bool packed_type_derived = false; size_t coll_chunk_list_num_entries = 0; void *coll_chunk_list = NULL; int *counts_disps_array = NULL; @@ -3346,15 +3744,15 @@ H5D__mpio_redistribute_shared_chunks_int(H5D_filtered_collective_io_info_t *chun /* Set the receive counts from the assigned chunks map */ counts_ptr = counts_disps_array; - for (i = 0; i < (size_t)mpi_size; i++) - H5_CHECKED_ASSIGN(counts_ptr[i], int, num_chunks_assigned_map[i], size_t); + for (int curr_rank = 0; curr_rank < mpi_size; curr_rank++) + H5_CHECKED_ASSIGN(counts_ptr[curr_rank], int, num_chunks_assigned_map[curr_rank], size_t); /* Set the displacements into the receive buffer for the gather operation */ displacements_ptr = &counts_disps_array[mpi_size]; *displacements_ptr = 0; - for (i = 1; i < (size_t)mpi_size; i++) - displacements_ptr[i] = displacements_ptr[i - 1] + counts_ptr[i - 1]; + for (int curr_rank = 1; curr_rank < mpi_size; curr_rank++) + displacements_ptr[curr_rank] = displacements_ptr[curr_rank - 1] + counts_ptr[curr_rank - 1]; } } @@ -3363,9 +3761,11 @@ H5D__mpio_redistribute_shared_chunks_int(H5D_filtered_collective_io_info_t *chun * necessary for MPI communication */ if (H5D__mpio_get_chunk_redistribute_info_types(&packed_type, &packed_type_derived, &struct_type, - &struct_type_derived) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, + &struct_type_derived) < 0) { + /* Push an error, but still participate in collective gather operation */ + HDONE_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't create derived datatypes for chunk redistribution info"); + } /* Perform gather operation */ if (H5_mpio_gatherv_alloc(chunk_list->chunk_infos, num_chunks_int, struct_type, counts_ptr, @@ -3389,15 +3789,14 @@ H5D__mpio_redistribute_shared_chunks_int(H5D_filtered_collective_io_info_t *chun if (all_ranks_involved || (mpi_rank == 0)) { H5D_chunk_redistribute_info_t *chunk_entry; - hsize_t curr_chunk_idx; - size_t set_begin_index; - int num_writers; - int new_chunk_owner; /* Clear the mapping from rank value -> number of assigned chunks */ memset(num_chunks_assigned_map, 0, (size_t)mpi_size * sizeof(*num_chunks_assigned_map)); - /* Sort collective chunk list according to chunk index */ + /* + * Sort collective chunk list according to: + * dataset object header address -> chunk index value -> original owning MPI rank for chunk + */ qsort(coll_chunk_list, coll_chunk_list_num_entries, sizeof(H5D_chunk_redistribute_info_t), H5D__cmp_chunk_redistribute_info); @@ -3410,21 +3809,30 @@ H5D__mpio_redistribute_shared_chunks_int(H5D_filtered_collective_io_info_t *chun * chunks). */ chunk_entry = &((H5D_chunk_redistribute_info_t *)coll_chunk_list)[0]; - for (i = 0; i < coll_chunk_list_num_entries;) { + for (size_t entry_idx = 0; entry_idx < coll_chunk_list_num_entries;) { + haddr_t curr_oloc_addr; + hsize_t curr_chunk_idx; + size_t set_begin_index; + bool keep_processing; + int num_writers; + int new_chunk_owner; + /* Set chunk's initial new owner to its original owner */ new_chunk_owner = chunk_entry->orig_owner; /* - * Set the current chunk index so we know when we've processed - * all duplicate entries for a particular shared chunk + * Set the current dataset object header address and chunk + * index value so we know when we've processed all duplicate + * entries for a particular shared chunk */ + curr_oloc_addr = chunk_entry->dset_oloc_addr; curr_chunk_idx = chunk_entry->chunk_idx; /* Reset the initial number of writers to this chunk */ num_writers = 0; /* Set index for the beginning of this section of duplicate chunk entries */ - set_begin_index = i; + set_begin_index = entry_idx; /* * Process each chunk entry in the set for the current @@ -3445,13 +3853,21 @@ H5D__mpio_redistribute_shared_chunks_int(H5D_filtered_collective_io_info_t *chun num_writers++; chunk_entry++; - } while (++i < coll_chunk_list_num_entries && chunk_entry->chunk_idx == curr_chunk_idx); + + keep_processing = + /* Make sure we haven't run out of chunks in the chunk list */ + (++entry_idx < coll_chunk_list_num_entries) && + /* Make sure the chunk we're looking at is in the same dataset */ + (H5_addr_eq(chunk_entry->dset_oloc_addr, curr_oloc_addr)) && + /* Make sure the chunk we're looking at is the same chunk */ + (chunk_entry->chunk_idx == curr_chunk_idx); + } while (keep_processing); /* We should never have more writers to a chunk than the number of MPI ranks */ assert(num_writers <= mpi_size); /* Set all processed chunk entries' "new_owner" and "num_writers" fields */ - for (; set_begin_index < i; set_begin_index++) { + for (; set_begin_index < entry_idx; set_begin_index++) { H5D_chunk_redistribute_info_t *entry; entry = &((H5D_chunk_redistribute_info_t *)coll_chunk_list)[set_begin_index]; @@ -3485,29 +3901,32 @@ H5D__mpio_redistribute_shared_chunks_int(H5D_filtered_collective_io_info_t *chun } if (all_ranks_involved) { + size_t entry_idx; + /* * If redistribution occurred on all ranks, search for the section * in the collective chunk list corresponding to this rank's locally * selected chunks and update the local list after redistribution. */ - for (i = 0; i < coll_chunk_list_num_entries; i++) - if (mpi_rank == ((H5D_chunk_redistribute_info_t *)coll_chunk_list)[i].orig_owner) + for (entry_idx = 0; entry_idx < coll_chunk_list_num_entries; entry_idx++) + if (mpi_rank == ((H5D_chunk_redistribute_info_t *)coll_chunk_list)[entry_idx].orig_owner) break; - for (size_t j = 0; j < (size_t)num_chunks_int; j++) { + for (size_t info_idx = 0; info_idx < (size_t)num_chunks_int; info_idx++) { H5D_chunk_redistribute_info_t *coll_entry; - coll_entry = &((H5D_chunk_redistribute_info_t *)coll_chunk_list)[i++]; + coll_entry = &((H5D_chunk_redistribute_info_t *)coll_chunk_list)[entry_idx++]; - chunk_list->chunk_infos[j].new_owner = coll_entry->new_owner; - chunk_list->chunk_infos[j].num_writers = coll_entry->num_writers; + chunk_list->chunk_infos[info_idx].new_owner = coll_entry->new_owner; + chunk_list->chunk_infos[info_idx].num_writers = coll_entry->num_writers; /* * Check if the chunk list struct's `num_chunks_to_read` field * needs to be updated */ - if (chunk_list->chunk_infos[j].need_read && (chunk_list->chunk_infos[j].new_owner != mpi_rank)) { - chunk_list->chunk_infos[j].need_read = false; + if (chunk_list->chunk_infos[info_idx].need_read && + (chunk_list->chunk_infos[info_idx].new_owner != mpi_rank)) { + chunk_list->chunk_infos[info_idx].need_read = false; assert(chunk_list->num_chunks_to_read > 0); chunk_list->num_chunks_to_read--; @@ -3530,9 +3949,10 @@ H5D__mpio_redistribute_shared_chunks_int(H5D_filtered_collective_io_info_t *chun * their chunk list struct's `num_chunks_to_read` field since it * may now be out of date. */ - for (i = 0; i < chunk_list->num_chunk_infos; i++) { - if ((chunk_list->chunk_infos[i].new_owner != mpi_rank) && chunk_list->chunk_infos[i].need_read) { - chunk_list->chunk_infos[i].need_read = false; + for (size_t info_idx = 0; info_idx < chunk_list->num_chunk_infos; info_idx++) { + if ((chunk_list->chunk_infos[info_idx].new_owner != mpi_rank) && + chunk_list->chunk_infos[info_idx].need_read) { + chunk_list->chunk_infos[info_idx].need_read = false; assert(chunk_list->num_chunks_to_read > 0); chunk_list->num_chunks_to_read--; @@ -3597,9 +4017,10 @@ H5D__mpio_redistribute_shared_chunks_int(H5D_filtered_collective_io_info_t *chun * owned by that rank, the rank sends the data it wishes to * update the chunk with to the MPI rank that now has * ownership of that chunk. To do this, it encodes the - * chunk's index, its selection in the chunk and its - * modification data into a buffer and then posts a - * non-blocking MPI_Issend to the owning rank. + * chunk's index value, the dataset's object header address + * (only for the multi-dataset I/O case), its selection in + * the chunk and its modification data into a buffer and + * then posts a non-blocking MPI_Issend to the owning rank. * * Once this step is complete, all MPI ranks allocate arrays * to hold chunk message receive buffers and MPI request @@ -3641,11 +4062,9 @@ H5D__mpio_redistribute_shared_chunks_int(H5D_filtered_collective_io_info_t *chun */ static herr_t H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk_list, H5D_io_info_t *io_info, - H5D_dset_io_info_t *dset_info, int mpi_rank, - int H5_ATTR_NDEBUG_UNUSED mpi_size, unsigned char ***chunk_msg_bufs, - int *chunk_msg_bufs_len) + int mpi_rank, int H5_ATTR_NDEBUG_UNUSED mpi_size, + unsigned char ***chunk_msg_bufs, int *chunk_msg_bufs_len) { -#if H5_CHECK_MPI_VERSION(3, 0) H5D_filtered_collective_chunk_info_t *chunk_table = NULL; H5S_sel_iter_t *mem_iter = NULL; unsigned char **msg_send_bufs = NULL; @@ -3659,8 +4078,8 @@ H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk size_t num_send_requests = 0; size_t num_recv_requests = 0; size_t num_msgs_incoming = 0; + size_t hash_keylen = 0; size_t last_assigned_idx; - size_t i; int mpi_code; herr_t ret_value = SUCCEED; @@ -3668,7 +4087,6 @@ H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk assert(chunk_list); assert(io_info); - assert(dset_info); assert(mpi_size > 1); assert(chunk_msg_bufs); assert(chunk_msg_bufs_len); @@ -3682,6 +4100,9 @@ H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk H5CX_set_libver_bounds(NULL); if (chunk_list->num_chunk_infos > 0) { + hash_keylen = chunk_list->chunk_hash_table_keylen; + assert(hash_keylen > 0); + /* Allocate a selection iterator for iterating over chunk dataspaces */ if (NULL == (mem_iter = H5FL_MALLOC(H5S_sel_iter_t))) HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate dataspace selection iterator"); @@ -3713,8 +4134,9 @@ H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk * synchronous sends to send the data this rank is writing to * the rank that does own the chunk. */ - for (i = 0, last_assigned_idx = 0; i < chunk_list->num_chunk_infos; i++) { - H5D_filtered_collective_chunk_info_t *chunk_entry = &chunk_list->chunk_infos[i]; + last_assigned_idx = 0; + for (size_t info_idx = 0; info_idx < chunk_list->num_chunk_infos; info_idx++) { + H5D_filtered_collective_chunk_info_t *chunk_entry = &chunk_list->chunk_infos[info_idx]; if (mpi_rank == chunk_entry->new_owner) { num_msgs_incoming += (size_t)(chunk_entry->num_writers - 1); @@ -3724,19 +4146,24 @@ H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk * does own, since it has sent the necessary data and is no longer * interested in the chunks it doesn't own. */ - chunk_list->chunk_infos[last_assigned_idx] = chunk_list->chunk_infos[i]; + chunk_list->chunk_infos[last_assigned_idx] = chunk_list->chunk_infos[info_idx]; /* * Since, at large scale, a chunk's index value may be larger than * the maximum value that can be stored in an int, we cannot rely * on using a chunk's index value as the tag for the MPI messages - * sent/received for a chunk. Therefore, add this chunk to a hash - * table with the chunk's index as a key so that we can quickly find - * the chunk when processing chunk messages that were received. The - * message itself will contain the chunk's index so we can update - * the correct chunk with the received data. + * sent/received for a chunk. Further, to support the multi-dataset + * I/O case, we can't rely on being able to distinguish between + * chunks by their chunk index value alone since two chunks from + * different datasets could have the same chunk index value. + * Therefore, add this chunk to a hash table with the dataset's + * object header address + the chunk's index value as a key so that + * we can quickly find the chunk when processing chunk messages that + * were received. The message itself will contain the dataset's + * object header address and the chunk's index value so we can + * update the correct chunk with the received data. */ - HASH_ADD(hh, chunk_table, index_info.chunk_idx, sizeof(hsize_t), + HASH_ADD(hh, chunk_table, index_info.chunk_idx, hash_keylen, &chunk_list->chunk_infos[last_assigned_idx]); last_assigned_idx++; @@ -3748,8 +4175,8 @@ H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk size_t mod_data_size = 0; size_t space_size = 0; - /* Add the size of the chunk index to the encoded size */ - mod_data_size += sizeof(hsize_t); + /* Add the size of the chunk hash table key to the encoded size */ + mod_data_size += hash_keylen; /* Determine size of serialized chunk file dataspace */ if (H5S_encode(chunk_info->fspace, &mod_data_p, &space_size) < 0) @@ -3760,7 +4187,7 @@ H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk iter_nelmts = H5S_GET_SELECT_NPOINTS(chunk_info->mspace); H5_CHECK_OVERFLOW(iter_nelmts, hsize_t, size_t); - mod_data_size += (size_t)iter_nelmts * dset_info->type_info.src_type_size; + mod_data_size += (size_t)iter_nelmts * chunk_info->dset_info->type_info.src_type_size; if (NULL == (msg_send_bufs[num_send_requests] = H5MM_malloc(mod_data_size))) HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, @@ -3768,23 +4195,28 @@ H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk mod_data_p = msg_send_bufs[num_send_requests]; - /* Store the chunk's index into the buffer */ - H5MM_memcpy(mod_data_p, &chunk_entry->index_info.chunk_idx, sizeof(hsize_t)); - mod_data_p += sizeof(hsize_t); + /* + * Add the chunk hash table key (chunk index value + possibly + * dataset object header address) into the buffer + */ + H5MM_memcpy(mod_data_p, &chunk_entry->index_info.chunk_idx, hash_keylen); + mod_data_p += hash_keylen; /* Serialize the chunk's file dataspace into the buffer */ if (H5S_encode(chunk_info->fspace, &mod_data_p, &mod_data_size) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTENCODE, FAIL, "unable to encode dataspace"); /* Initialize iterator for memory selection */ - if (H5S_select_iter_init(mem_iter, chunk_info->mspace, dset_info->type_info.src_type_size, + if (H5S_select_iter_init(mem_iter, chunk_info->mspace, + chunk_info->dset_info->type_info.src_type_size, H5S_SEL_ITER_SHARE_WITH_DATASPACE) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize memory selection information"); mem_iter_init = true; /* Collect the modification data into the buffer */ - if (0 == H5D__gather_mem(dset_info->buf.cvp, mem_iter, (size_t)iter_nelmts, mod_data_p)) + if (0 == + H5D__gather_mem(chunk_info->dset_info->buf.cvp, mem_iter, (size_t)iter_nelmts, mod_data_p)) HGOTO_ERROR(H5E_IO, H5E_CANTGATHER, FAIL, "couldn't gather from write buffer"); /* @@ -3867,20 +4299,12 @@ H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk * post a non-blocking receive to receive it */ if (msg_flag) { -#if H5_CHECK_MPI_VERSION(3, 0) MPI_Count msg_size = 0; if (MPI_SUCCESS != (mpi_code = MPI_Get_elements_x(&status, MPI_BYTE, &msg_size))) HMPI_GOTO_ERROR(FAIL, "MPI_Get_elements_x failed", mpi_code) H5_CHECK_OVERFLOW(msg_size, MPI_Count, int); -#else - int msg_size = 0; - - if (MPI_SUCCESS != (mpi_code = MPI_Get_elements(&status, MPI_BYTE, &msg_size))) - HMPI_GOTO_ERROR(FAIL, "MPI_Get_elements failed", mpi_code) -#endif - if (msg_size <= 0) HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "invalid chunk modification message size"); @@ -3934,7 +4358,7 @@ H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk * send buffers used in the non-blocking operations */ if (msg_send_bufs) { - for (i = 0; i < num_send_requests; i++) { + for (size_t i = 0; i < num_send_requests; i++) { if (msg_send_bufs[i]) H5MM_free(msg_send_bufs[i]); } @@ -3969,7 +4393,7 @@ H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk /* Set the new number of locally-selected chunks */ chunk_list->num_chunk_infos = last_assigned_idx; - /* Set chunk hash table pointer for future use */ + /* Set chunk hash table information for future use */ chunk_list->chunk_hash_table = chunk_table; /* Return chunk message buffers if any were received */ @@ -3985,19 +4409,19 @@ H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk } if (num_send_requests) { - for (i = 0; i < num_send_requests; i++) { + for (size_t i = 0; i < num_send_requests; i++) { MPI_Cancel(&send_requests[i]); } } if (recv_requests) { - for (i = 0; i < num_recv_requests; i++) { + for (size_t i = 0; i < num_recv_requests; i++) { MPI_Cancel(&recv_requests[i]); } } if (msg_recv_bufs) { - for (i = 0; i < num_recv_requests; i++) { + for (size_t i = 0; i < num_recv_requests; i++) { H5MM_free(msg_recv_bufs[i]); } @@ -4013,7 +4437,7 @@ H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk H5MM_free(send_requests); if (msg_send_bufs) { - for (i = 0; i < num_send_requests; i++) { + for (size_t i = 0; i < num_send_requests; i++) { if (msg_send_bufs[i]) H5MM_free(msg_send_bufs[i]); } @@ -4033,13 +4457,6 @@ H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk #endif FUNC_LEAVE_NOAPI(ret_value) -#else - FUNC_ENTER_PACKAGE - HERROR( - H5E_DATASET, H5E_WRITEERROR, - "unable to send chunk modification data between MPI ranks - MPI version < 3 (MPI_Ibarrier missing)") - FUNC_LEAVE_NOAPI(FAIL) -#endif } /* end H5D__mpio_share_chunk_modification_data() */ /*------------------------------------------------------------------------- @@ -4056,26 +4473,16 @@ H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk */ static herr_t H5D__mpio_collective_filtered_chunk_read(H5D_filtered_collective_io_info_t *chunk_list, - const H5D_io_info_t *io_info, const H5D_dset_io_info_t *di, - int mpi_rank) + const H5D_io_info_t *io_info, size_t num_dset_infos, int mpi_rank) { - H5D_fill_buf_info_t fb_info; - H5Z_EDC_t err_detect; /* Error detection info */ - H5Z_cb_t filter_cb; /* I/O filter callback function */ - hsize_t file_chunk_size = 0; - hsize_t iter_nelmts; /* Number of points to iterate over for the chunk IO operation */ - bool should_fill = false; - bool fb_info_init = false; - bool index_empty = false; - H5S_t *fill_space = NULL; - void *base_read_buf = NULL; - herr_t ret_value = SUCCEED; + H5Z_EDC_t err_detect; /* Error detection info */ + H5Z_cb_t filter_cb; /* I/O filter callback function */ + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE assert(chunk_list); assert(io_info); - assert(di); #ifdef H5Dmpio_DEBUG H5D_MPIO_TRACE_ENTER(mpi_rank); @@ -4084,22 +4491,6 @@ H5D__mpio_collective_filtered_chunk_read(H5D_filtered_collective_io_info_t *chun (void)mpi_rank; #endif - if (chunk_list->num_chunk_infos) { - /* Retrieve filter settings from API context */ - if (H5CX_get_err_detect(&err_detect) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get error detection info"); - if (H5CX_get_filter_cb(&filter_cb) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get I/O filter callback function"); - - /* Set size of full chunks in dataset */ - file_chunk_size = di->dset->shared->layout.u.chunk.size; - - /* Determine if fill values should be "read" for unallocated chunks */ - should_fill = (di->dset->shared->dcpl_cache.fill.fill_time == H5D_FILL_TIME_ALLOC) || - ((di->dset->shared->dcpl_cache.fill.fill_time == H5D_FILL_TIME_IFSET) && - di->dset->shared->dcpl_cache.fill.fill_defined); - } - /* * Allocate memory buffers for all chunks being read. Chunk data buffers are of * the largest size between the chunk's current filtered size and the chunk's true @@ -4113,29 +4504,61 @@ H5D__mpio_collective_filtered_chunk_read(H5D_filtered_collective_io_info_t *chun * size; reading into a (smaller) buffer of size equal to the unfiltered * chunk size would of course be bad. */ - for (size_t i = 0; i < chunk_list->num_chunk_infos; i++) { - H5D_filtered_collective_chunk_info_t *chunk_entry = &chunk_list->chunk_infos[i]; + for (size_t info_idx = 0; info_idx < chunk_list->num_chunk_infos; info_idx++) { + H5D_filtered_collective_chunk_info_t *chunk_entry = &chunk_list->chunk_infos[info_idx]; + H5D_mpio_filtered_dset_info_t *cached_dset_info; + hsize_t file_chunk_size; assert(chunk_entry->need_read); + /* Find the cached dataset info for the dataset this chunk is in */ + if (num_dset_infos > 1) { + HASH_FIND(hh, chunk_list->dset_info.dset_info_hash_table, &chunk_entry->index_info.dset_oloc_addr, + sizeof(haddr_t), cached_dset_info); + if (cached_dset_info == NULL) { + if (chunk_list->all_dset_indices_empty) + HGOTO_ERROR(H5E_DATASET, H5E_CANTFIND, FAIL, "unable to find cached dataset info entry"); + else { + /* Push an error, but participate in collective read */ + HDONE_ERROR(H5E_DATASET, H5E_CANTFIND, FAIL, "unable to find cached dataset info entry"); + break; + } + } + } + else + cached_dset_info = chunk_list->dset_info.single_dset_info; + assert(cached_dset_info); + + file_chunk_size = cached_dset_info->file_chunk_size; + chunk_entry->chunk_buf_size = MAX(chunk_entry->chunk_current.length, file_chunk_size); if (NULL == (chunk_entry->buf = H5MM_malloc(chunk_entry->chunk_buf_size))) { - /* Push an error, but participate in collective read */ - HDONE_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk data buffer"); - break; + if (chunk_list->all_dset_indices_empty) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk data buffer"); + else { + /* Push an error, but participate in collective read */ + HDONE_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk data buffer"); + break; + } } /* - * Check if chunk is currently allocated. If not, don't try to - * read it from the file. Instead, just fill the chunk buffer - * with the fill value if necessary. + * Check whether the chunk needs to be read from the file, based + * on whether the dataset's chunk index is empty or the chunk has + * a defined address in the file. If the chunk doesn't need to be + * read from the file, just fill the chunk buffer with the fill + * value if necessary. */ - if (H5_addr_defined(chunk_entry->chunk_current.offset)) { - /* Set first read buffer */ - if (!base_read_buf) - base_read_buf = chunk_entry->buf; + if (cached_dset_info->index_empty || !H5_addr_defined(chunk_entry->chunk_current.offset)) { + chunk_entry->need_read = false; + /* Update field keeping track of number of chunks to read */ + assert(chunk_list->num_chunks_to_read > 0); + chunk_list->num_chunks_to_read--; + } + + if (chunk_entry->need_read) { /* Set chunk's new length for eventual filter pipeline calls */ if (chunk_entry->skip_filter_pline) chunk_entry->chunk_new.length = file_chunk_size; @@ -4143,77 +4566,58 @@ H5D__mpio_collective_filtered_chunk_read(H5D_filtered_collective_io_info_t *chun chunk_entry->chunk_new.length = chunk_entry->chunk_current.length; } else { - chunk_entry->need_read = false; - - /* Update field keeping track of number of chunks to read */ - assert(chunk_list->num_chunks_to_read > 0); - chunk_list->num_chunks_to_read--; - /* Set chunk's new length for eventual filter pipeline calls */ chunk_entry->chunk_new.length = file_chunk_size; - if (should_fill) { - /* Initialize fill value buffer if not already initialized */ - if (!fb_info_init) { - hsize_t chunk_dims[H5S_MAX_RANK]; - - assert(di->dset->shared->ndims == di->dset->shared->layout.u.chunk.ndims - 1); - for (size_t j = 0; j < di->dset->shared->layout.u.chunk.ndims - 1; j++) - chunk_dims[j] = (hsize_t)di->dset->shared->layout.u.chunk.dim[j]; - - /* Get a dataspace for filling chunk memory buffers */ - if (NULL == (fill_space = H5S_create_simple(di->dset->shared->layout.u.chunk.ndims - 1, - chunk_dims, NULL))) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to create chunk fill dataspace"); - - /* Initialize fill value buffer */ - if (H5D__fill_init( - &fb_info, NULL, (H5MM_allocate_t)H5D__chunk_mem_alloc, - (void *)&di->dset->shared->dcpl_cache.pline, (H5MM_free_t)H5D__chunk_mem_free, - (void *)&di->dset->shared->dcpl_cache.pline, &di->dset->shared->dcpl_cache.fill, - di->dset->shared->type, di->dset->shared->type_id, 0, file_chunk_size) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize fill value buffer"); - - fb_info_init = true; - } + /* Determine if fill values should be "read" for this unallocated chunk */ + if (cached_dset_info->should_fill) { + assert(cached_dset_info->fb_info_init); + assert(cached_dset_info->fb_info.fill_buf); /* Write fill value to memory buffer */ - assert(fb_info.fill_buf); - if (H5D__fill(fb_info.fill_buf, di->dset->shared->type, chunk_entry->buf, - di->type_info.mem_type, fill_space) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, - "couldn't fill chunk buffer with fill value"); + if (H5D__fill(cached_dset_info->fb_info.fill_buf, + cached_dset_info->dset_io_info->type_info.dset_type, chunk_entry->buf, + cached_dset_info->dset_io_info->type_info.mem_type, + cached_dset_info->fill_space) < 0) { + if (chunk_list->all_dset_indices_empty) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, + "couldn't fill chunk buffer with fill value"); + else { + /* Push an error, but participate in collective read */ + HDONE_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, + "couldn't fill chunk buffer with fill value"); + break; + } + } } } } - /* - * If dataset is incrementally allocated and hasn't been written to - * yet, the chunk index should be empty. In this case, a collective - * read of chunks is essentially a no-op, so avoid it here. - */ - index_empty = false; - if (di->dset->shared->dcpl_cache.fill.alloc_time == H5D_ALLOC_TIME_INCR) - if (H5D__chunk_index_empty(di->dset, &index_empty) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "couldn't determine if chunk index is empty"); - - if (!index_empty) { - /* Perform collective vector read */ + /* Perform collective vector read if necessary */ + if (!chunk_list->all_dset_indices_empty) if (H5D__mpio_collective_filtered_vec_io(chunk_list, io_info->f_sh, H5D_IO_OP_READ) < 0) HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "couldn't perform vector I/O on filtered chunks"); + + if (chunk_list->num_chunk_infos) { + /* Retrieve filter settings from API context */ + if (H5CX_get_err_detect(&err_detect) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get error detection info"); + if (H5CX_get_filter_cb(&filter_cb) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get I/O filter callback function"); } /* * Iterate through all the read chunks, unfiltering them and scattering their * data out to the application's read buffer. */ - for (size_t i = 0; i < chunk_list->num_chunk_infos; i++) { - H5D_filtered_collective_chunk_info_t *chunk_entry = &chunk_list->chunk_infos[i]; + for (size_t info_idx = 0; info_idx < chunk_list->num_chunk_infos; info_idx++) { + H5D_filtered_collective_chunk_info_t *chunk_entry = &chunk_list->chunk_infos[info_idx]; H5D_piece_info_t *chunk_info = chunk_entry->chunk_info; + hsize_t iter_nelmts; /* Unfilter the chunk, unless we didn't read it from the file */ if (chunk_entry->need_read && !chunk_entry->skip_filter_pline) { - if (H5Z_pipeline(&di->dset->shared->dcpl_cache.pline, H5Z_FLAG_REVERSE, + if (H5Z_pipeline(&chunk_info->dset_info->dset->shared->dcpl_cache.pline, H5Z_FLAG_REVERSE, &(chunk_entry->index_info.filter_mask), err_detect, filter_cb, (size_t *)&chunk_entry->chunk_new.length, &chunk_entry->chunk_buf_size, &chunk_entry->buf) < 0) @@ -4223,26 +4627,21 @@ H5D__mpio_collective_filtered_chunk_read(H5D_filtered_collective_io_info_t *chun /* Scatter the chunk data to the read buffer */ iter_nelmts = H5S_GET_SELECT_NPOINTS(chunk_info->fspace); - if (H5D_select_io_mem(di->buf.vp, chunk_info->mspace, chunk_entry->buf, chunk_info->fspace, - di->type_info.src_type_size, (size_t)iter_nelmts) < 0) + if (H5D_select_io_mem(chunk_info->dset_info->buf.vp, chunk_info->mspace, chunk_entry->buf, + chunk_info->fspace, chunk_info->dset_info->type_info.src_type_size, + (size_t)iter_nelmts) < 0) HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "couldn't copy chunk data to read buffer"); } done: /* Free all resources used by entries in the chunk list */ - for (size_t i = 0; i < chunk_list->num_chunk_infos; i++) { - if (chunk_list->chunk_infos[i].buf) { - H5MM_free(chunk_list->chunk_infos[i].buf); - chunk_list->chunk_infos[i].buf = NULL; + for (size_t info_idx = 0; info_idx < chunk_list->num_chunk_infos; info_idx++) { + if (chunk_list->chunk_infos[info_idx].buf) { + H5MM_free(chunk_list->chunk_infos[info_idx].buf); + chunk_list->chunk_infos[info_idx].buf = NULL; } } - /* Release the fill buffer info, if it's been initialized */ - if (fb_info_init && H5D__fill_term(&fb_info) < 0) - HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release fill buffer info"); - if (fill_space && (H5S_close(fill_space) < 0)) - HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "can't close fill space"); - #ifdef H5Dmpio_DEBUG H5D_MPIO_TIME_STOP(mpi_rank); H5D_MPIO_TRACE_EXIT(mpi_rank); @@ -4266,58 +4665,27 @@ H5D__mpio_collective_filtered_chunk_read(H5D_filtered_collective_io_info_t *chun static herr_t H5D__mpio_collective_filtered_chunk_update(H5D_filtered_collective_io_info_t *chunk_list, unsigned char **chunk_msg_bufs, int chunk_msg_bufs_len, - const H5D_io_info_t *io_info, const H5D_dset_io_info_t *di, - int H5_ATTR_NDEBUG_UNUSED mpi_rank) + const H5D_io_info_t *io_info, size_t num_dset_infos, int mpi_rank) { - const H5D_type_info_t *type_info = NULL; - H5D_fill_buf_info_t fb_info; - H5S_sel_iter_t *sel_iter = NULL; /* Dataspace selection iterator for H5D__scatter_mem */ - H5Z_EDC_t err_detect; /* Error detection info */ - H5Z_cb_t filter_cb; /* I/O filter callback function */ - hsize_t file_chunk_size = 0; - hsize_t iter_nelmts; /* Number of points to iterate over for the chunk IO operation */ - bool should_fill = false; - bool fb_info_init = false; - bool sel_iter_init = false; - bool index_empty = false; - size_t i; - H5S_t *dataspace = NULL; - H5S_t *fill_space = NULL; - void *base_read_buf = NULL; - herr_t ret_value = SUCCEED; + H5S_sel_iter_t *sel_iter = NULL; /* Dataspace selection iterator for H5D__scatter_mem */ + H5Z_EDC_t err_detect; /* Error detection info */ + H5Z_cb_t filter_cb; /* I/O filter callback function */ + uint8_t *key_buf = NULL; + H5S_t *dataspace = NULL; + bool sel_iter_init = false; + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE assert(chunk_list); assert((chunk_msg_bufs && chunk_list->chunk_hash_table) || 0 == chunk_msg_bufs_len); assert(io_info); - assert(di); #ifdef H5Dmpio_DEBUG H5D_MPIO_TRACE_ENTER(mpi_rank); H5D_MPIO_TIME_START(mpi_rank, "Filtered collective chunk update"); #endif - /* Set convenience pointers */ - type_info = &(di->type_info); - assert(type_info); - - if (chunk_list->num_chunk_infos > 0) { - /* Retrieve filter settings from API context */ - if (H5CX_get_err_detect(&err_detect) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get error detection info"); - if (H5CX_get_filter_cb(&filter_cb) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get I/O filter callback function"); - - /* Set size of full chunks in dataset */ - file_chunk_size = di->dset->shared->layout.u.chunk.size; - - /* Determine if fill values should be written to chunks */ - should_fill = (di->dset->shared->dcpl_cache.fill.fill_time == H5D_FILL_TIME_ALLOC) || - ((di->dset->shared->dcpl_cache.fill.fill_time == H5D_FILL_TIME_IFSET) && - di->dset->shared->dcpl_cache.fill.fill_defined); - } - /* * Allocate memory buffers for all owned chunks. Chunk data buffers are of the * largest size between the chunk's current filtered size and the chunk's true @@ -4337,11 +4705,33 @@ H5D__mpio_collective_filtered_chunk_update(H5D_filtered_collective_io_info_t *ch * size; reading into a (smaller) buffer of size equal to the unfiltered * chunk size would of course be bad. */ - for (i = 0; i < chunk_list->num_chunk_infos; i++) { - H5D_filtered_collective_chunk_info_t *chunk_entry = &chunk_list->chunk_infos[i]; + for (size_t info_idx = 0; info_idx < chunk_list->num_chunk_infos; info_idx++) { + H5D_filtered_collective_chunk_info_t *chunk_entry = &chunk_list->chunk_infos[info_idx]; + H5D_mpio_filtered_dset_info_t *cached_dset_info; + hsize_t file_chunk_size; assert(mpi_rank == chunk_entry->new_owner); + /* Find the cached dataset info for the dataset this chunk is in */ + if (num_dset_infos > 1) { + HASH_FIND(hh, chunk_list->dset_info.dset_info_hash_table, &chunk_entry->index_info.dset_oloc_addr, + sizeof(haddr_t), cached_dset_info); + if (cached_dset_info == NULL) { + if (chunk_list->all_dset_indices_empty) + HGOTO_ERROR(H5E_DATASET, H5E_CANTFIND, FAIL, "unable to find cached dataset info entry"); + else { + /* Push an error, but participate in collective read */ + HDONE_ERROR(H5E_DATASET, H5E_CANTFIND, FAIL, "unable to find cached dataset info entry"); + break; + } + } + } + else + cached_dset_info = chunk_list->dset_info.single_dset_info; + assert(cached_dset_info); + + file_chunk_size = cached_dset_info->file_chunk_size; + chunk_entry->chunk_buf_size = MAX(chunk_entry->chunk_current.length, file_chunk_size); /* @@ -4349,29 +4739,41 @@ H5D__mpio_collective_filtered_chunk_update(H5D_filtered_collective_io_info_t *ch * out fill values to it, make sure to 0-fill its memory buffer * so we don't use uninitialized memory. */ - if (!H5_addr_defined(chunk_entry->chunk_current.offset) && !should_fill) + if (!H5_addr_defined(chunk_entry->chunk_current.offset) && !cached_dset_info->should_fill) chunk_entry->buf = H5MM_calloc(chunk_entry->chunk_buf_size); else chunk_entry->buf = H5MM_malloc(chunk_entry->chunk_buf_size); if (NULL == chunk_entry->buf) { - /* Push an error, but participate in collective read */ - HDONE_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk data buffer"); - break; + if (chunk_list->all_dset_indices_empty) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk data buffer"); + else { + /* Push an error, but participate in collective read */ + HDONE_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk data buffer"); + break; + } } - /* Set chunk's new length for eventual filter pipeline calls */ - if (chunk_entry->need_read) { + if (!chunk_entry->need_read) + /* Set chunk's new length for eventual filter pipeline calls */ + chunk_entry->chunk_new.length = file_chunk_size; + else { /* - * Check if chunk is currently allocated. If not, don't try to - * read it from the file. Instead, just fill the chunk buffer - * with the fill value if fill values are to be written. + * Check whether the chunk needs to be read from the file, based + * on whether the dataset's chunk index is empty or the chunk has + * a defined address in the file. If the chunk doesn't need to be + * read from the file, just fill the chunk buffer with the fill + * value if necessary. */ - if (H5_addr_defined(chunk_entry->chunk_current.offset)) { - /* Set first read buffer */ - if (!base_read_buf) - base_read_buf = chunk_entry->buf; + if (cached_dset_info->index_empty || !H5_addr_defined(chunk_entry->chunk_current.offset)) { + chunk_entry->need_read = false; + + /* Update field keeping track of number of chunks to read */ + assert(chunk_list->num_chunks_to_read > 0); + chunk_list->num_chunks_to_read--; + } + if (chunk_entry->need_read) { /* Set chunk's new length for eventual filter pipeline calls */ if (chunk_entry->skip_filter_pline) chunk_entry->chunk_new.length = file_chunk_size; @@ -4379,81 +4781,57 @@ H5D__mpio_collective_filtered_chunk_update(H5D_filtered_collective_io_info_t *ch chunk_entry->chunk_new.length = chunk_entry->chunk_current.length; } else { - chunk_entry->need_read = false; - - /* Update field keeping track of number of chunks to read */ - assert(chunk_list->num_chunks_to_read > 0); - chunk_list->num_chunks_to_read--; - /* Set chunk's new length for eventual filter pipeline calls */ chunk_entry->chunk_new.length = file_chunk_size; - if (should_fill) { - /* Initialize fill value buffer if not already initialized */ - if (!fb_info_init) { - hsize_t chunk_dims[H5S_MAX_RANK]; + /* Determine if fill values should be "read" for this unallocated chunk */ + if (cached_dset_info->should_fill) { + assert(cached_dset_info->fb_info_init); + assert(cached_dset_info->fb_info.fill_buf); - assert(di->dset->shared->ndims == di->dset->shared->layout.u.chunk.ndims - 1); - for (size_t j = 0; j < di->dset->shared->layout.u.chunk.ndims - 1; j++) - chunk_dims[j] = (hsize_t)di->dset->shared->layout.u.chunk.dim[j]; - - /* Get a dataspace for filling chunk memory buffers */ - if (NULL == (fill_space = H5S_create_simple( - di->dset->shared->layout.u.chunk.ndims - 1, chunk_dims, NULL))) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, - "unable to create chunk fill dataspace"); - - /* Initialize fill value buffer */ - if (H5D__fill_init(&fb_info, NULL, (H5MM_allocate_t)H5D__chunk_mem_alloc, - (void *)&di->dset->shared->dcpl_cache.pline, - (H5MM_free_t)H5D__chunk_mem_free, - (void *)&di->dset->shared->dcpl_cache.pline, - &di->dset->shared->dcpl_cache.fill, di->dset->shared->type, - di->dset->shared->type_id, 0, file_chunk_size) < 0) + /* Write fill value to memory buffer */ + if (H5D__fill(cached_dset_info->fb_info.fill_buf, + cached_dset_info->dset_io_info->type_info.dset_type, chunk_entry->buf, + cached_dset_info->dset_io_info->type_info.mem_type, + cached_dset_info->fill_space) < 0) { + if (chunk_list->all_dset_indices_empty) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, - "can't initialize fill value buffer"); - - fb_info_init = true; + "couldn't fill chunk buffer with fill value"); + else { + /* Push an error, but participate in collective read */ + HDONE_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, + "couldn't fill chunk buffer with fill value"); + break; + } } - - /* Write fill value to memory buffer */ - assert(fb_info.fill_buf); - if (H5D__fill(fb_info.fill_buf, di->dset->shared->type, chunk_entry->buf, - type_info->mem_type, fill_space) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, - "couldn't fill chunk buffer with fill value"); } } } - else - chunk_entry->chunk_new.length = file_chunk_size; } - /* - * If dataset is incrementally allocated and hasn't been written to - * yet, the chunk index should be empty. In this case, a collective - * read of chunks is essentially a no-op, so avoid it here. - */ - index_empty = false; - if (di->dset->shared->dcpl_cache.fill.alloc_time == H5D_ALLOC_TIME_INCR) - if (H5D__chunk_index_empty(di->dset, &index_empty) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "couldn't determine if chunk index is empty"); - - if (!index_empty) { - /* Perform collective vector read */ + /* Perform collective vector read if necessary */ + if (!chunk_list->all_dset_indices_empty) if (H5D__mpio_collective_filtered_vec_io(chunk_list, io_info->f_sh, H5D_IO_OP_READ) < 0) HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "couldn't perform vector I/O on filtered chunks"); - } /* * Now that all owned chunks have been read, update the chunks * with modification data from the owning rank and other ranks. */ + if (chunk_list->num_chunk_infos > 0) { + /* Retrieve filter settings from API context */ + if (H5CX_get_err_detect(&err_detect) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get error detection info"); + if (H5CX_get_filter_cb(&filter_cb) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get I/O filter callback function"); + } + /* Process all chunks with data from the owning rank first */ - for (i = 0; i < chunk_list->num_chunk_infos; i++) { - H5D_filtered_collective_chunk_info_t *chunk_entry = &chunk_list->chunk_infos[i]; + for (size_t info_idx = 0; info_idx < chunk_list->num_chunk_infos; info_idx++) { + H5D_filtered_collective_chunk_info_t *chunk_entry = &chunk_list->chunk_infos[info_idx]; H5D_piece_info_t *chunk_info = chunk_entry->chunk_info; + hsize_t iter_nelmts; assert(mpi_rank == chunk_entry->new_owner); @@ -4462,7 +4840,7 @@ H5D__mpio_collective_filtered_chunk_update(H5D_filtered_collective_io_info_t *ch * the file, so we need to unfilter it */ if (chunk_entry->need_read && !chunk_entry->skip_filter_pline) { - if (H5Z_pipeline(&di->dset->shared->dcpl_cache.pline, H5Z_FLAG_REVERSE, + if (H5Z_pipeline(&chunk_info->dset_info->dset->shared->dcpl_cache.pline, H5Z_FLAG_REVERSE, &(chunk_entry->index_info.filter_mask), err_detect, filter_cb, (size_t *)&chunk_entry->chunk_new.length, &chunk_entry->chunk_buf_size, &chunk_entry->buf) < 0) @@ -4471,28 +4849,35 @@ H5D__mpio_collective_filtered_chunk_update(H5D_filtered_collective_io_info_t *ch iter_nelmts = H5S_GET_SELECT_NPOINTS(chunk_info->mspace); - if (H5D_select_io_mem(chunk_entry->buf, chunk_info->fspace, di->buf.cvp, chunk_info->mspace, - type_info->dst_type_size, (size_t)iter_nelmts) < 0) + if (H5D_select_io_mem(chunk_entry->buf, chunk_info->fspace, chunk_info->dset_info->buf.cvp, + chunk_info->mspace, chunk_info->dset_info->type_info.dst_type_size, + (size_t)iter_nelmts) < 0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "couldn't copy chunk data to write buffer"); } /* Allocate iterator for memory selection */ - if (NULL == (sel_iter = H5FL_MALLOC(H5S_sel_iter_t))) - HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate memory iterator"); + if (chunk_msg_bufs_len > 0) { + assert(chunk_list->chunk_hash_table_keylen > 0); + if (NULL == (key_buf = H5MM_malloc(chunk_list->chunk_hash_table_keylen))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate hash table key buffer"); + + if (NULL == (sel_iter = H5FL_MALLOC(H5S_sel_iter_t))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate memory iterator"); + } /* Now process all received chunk message buffers */ - for (i = 0; i < (size_t)chunk_msg_bufs_len; i++) { + for (size_t buf_idx = 0; buf_idx < (size_t)chunk_msg_bufs_len; buf_idx++) { H5D_filtered_collective_chunk_info_t *chunk_entry = NULL; - const unsigned char *msg_ptr = chunk_msg_bufs[i]; - hsize_t chunk_idx; + const unsigned char *msg_ptr = chunk_msg_bufs[buf_idx]; if (msg_ptr) { - /* Retrieve the chunk's index value */ - H5MM_memcpy(&chunk_idx, msg_ptr, sizeof(hsize_t)); - msg_ptr += sizeof(hsize_t); + /* Retrieve the chunk hash table key from the chunk message buffer */ + H5MM_memcpy(key_buf, msg_ptr, chunk_list->chunk_hash_table_keylen); + msg_ptr += chunk_list->chunk_hash_table_keylen; - /* Find the chunk entry according to its chunk index */ - HASH_FIND(hh, chunk_list->chunk_hash_table, &chunk_idx, sizeof(hsize_t), chunk_entry); + /* Find the chunk entry according to its chunk hash table key */ + HASH_FIND(hh, chunk_list->chunk_hash_table, key_buf, chunk_list->chunk_hash_table_keylen, + chunk_entry); if (chunk_entry == NULL) HGOTO_ERROR(H5E_DATASET, H5E_CANTFIND, FAIL, "unable to find chunk entry"); if (mpi_rank != chunk_entry->new_owner) @@ -4507,11 +4892,14 @@ H5D__mpio_collective_filtered_chunk_update(H5D_filtered_collective_io_info_t *ch if (!chunk_entry->buf) continue; else { + hsize_t iter_nelmts; + /* Decode the chunk file dataspace from the message */ if (NULL == (dataspace = H5S_decode(&msg_ptr))) HGOTO_ERROR(H5E_DATASET, H5E_CANTDECODE, FAIL, "unable to decode dataspace"); - if (H5S_select_iter_init(sel_iter, dataspace, type_info->dst_type_size, + if (H5S_select_iter_init(sel_iter, dataspace, + chunk_entry->chunk_info->dset_info->type_info.dst_type_size, H5S_SEL_ITER_SHARE_WITH_DATASPACE) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize memory selection information"); @@ -4533,50 +4921,49 @@ H5D__mpio_collective_filtered_chunk_update(H5D_filtered_collective_io_info_t *ch dataspace = NULL; } - H5MM_free(chunk_msg_bufs[i]); - chunk_msg_bufs[i] = NULL; + H5MM_free(chunk_msg_bufs[buf_idx]); + chunk_msg_bufs[buf_idx] = NULL; } } } /* Finally, filter all the chunks */ - for (i = 0; i < chunk_list->num_chunk_infos; i++) { - if (!chunk_list->chunk_infos[i].skip_filter_pline) { - if (H5Z_pipeline(&di->dset->shared->dcpl_cache.pline, 0, - &(chunk_list->chunk_infos[i].index_info.filter_mask), err_detect, filter_cb, - (size_t *)&chunk_list->chunk_infos[i].chunk_new.length, - &chunk_list->chunk_infos[i].chunk_buf_size, &chunk_list->chunk_infos[i].buf) < 0) + for (size_t info_idx = 0; info_idx < chunk_list->num_chunk_infos; info_idx++) { + if (!chunk_list->chunk_infos[info_idx].skip_filter_pline) { + if (H5Z_pipeline( + &chunk_list->chunk_infos[info_idx].chunk_info->dset_info->dset->shared->dcpl_cache.pline, + 0, &(chunk_list->chunk_infos[info_idx].index_info.filter_mask), err_detect, filter_cb, + (size_t *)&chunk_list->chunk_infos[info_idx].chunk_new.length, + &chunk_list->chunk_infos[info_idx].chunk_buf_size, + &chunk_list->chunk_infos[info_idx].buf) < 0) HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, FAIL, "output pipeline failed"); } #if H5_SIZEOF_SIZE_T > 4 /* Check for the chunk expanding too much to encode in a 32-bit value */ - if (chunk_list->chunk_infos[i].chunk_new.length > ((size_t)0xffffffff)) + if (chunk_list->chunk_infos[info_idx].chunk_new.length > ((size_t)0xffffffff)) HGOTO_ERROR(H5E_DATASET, H5E_BADRANGE, FAIL, "chunk too large for 32-bit length"); #endif } done: + if (dataspace && (H5S_close(dataspace) < 0)) + HDONE_ERROR(H5E_DATASPACE, H5E_CANTFREE, FAIL, "can't close dataspace"); + if (sel_iter) { if (sel_iter_init && H5S_SELECT_ITER_RELEASE(sel_iter) < 0) HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "couldn't release selection iterator"); sel_iter = H5FL_FREE(H5S_sel_iter_t, sel_iter); } - if (dataspace && (H5S_close(dataspace) < 0)) - HDONE_ERROR(H5E_DATASPACE, H5E_CANTFREE, FAIL, "can't close dataspace"); - if (fill_space && (H5S_close(fill_space) < 0)) - HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "can't close fill space"); - /* Release the fill buffer info, if it's been initialized */ - if (fb_info_init && H5D__fill_term(&fb_info) < 0) - HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release fill buffer info"); + H5MM_free(key_buf); /* On failure, try to free all resources used by entries in the chunk list */ if (ret_value < 0) { - for (i = 0; i < chunk_list->num_chunk_infos; i++) { - if (chunk_list->chunk_infos[i].buf) { - H5MM_free(chunk_list->chunk_infos[i].buf); - chunk_list->chunk_infos[i].buf = NULL; + for (size_t info_idx = 0; info_idx < chunk_list->num_chunk_infos; info_idx++) { + if (chunk_list->chunk_infos[info_idx].buf) { + H5MM_free(chunk_list->chunk_infos[info_idx].buf); + chunk_list->chunk_infos[info_idx].buf = NULL; } } } @@ -4605,7 +4992,7 @@ H5D__mpio_collective_filtered_chunk_update(H5D_filtered_collective_io_info_t *ch static herr_t H5D__mpio_collective_filtered_chunk_reallocate(H5D_filtered_collective_io_info_t *chunk_list, size_t *num_chunks_assigned_map, H5D_io_info_t *io_info, - H5D_chk_idx_info_t *idx_info, int mpi_rank, int mpi_size) + size_t num_dset_infos, int mpi_rank, int mpi_size) { H5D_chunk_alloc_info_t *collective_list = NULL; MPI_Datatype send_type; @@ -4615,11 +5002,10 @@ H5D__mpio_collective_filtered_chunk_reallocate(H5D_filtered_collective_io_info_t bool need_sort = false; size_t collective_num_entries = 0; size_t num_local_chunks_processed = 0; - size_t i; - void *gathered_array = NULL; - int *counts_disps_array = NULL; - int *counts_ptr = NULL; - int *displacements_ptr = NULL; + void *gathered_array = NULL; + int *counts_disps_array = NULL; + int *counts_ptr = NULL; + int *displacements_ptr = NULL; int mpi_code; herr_t ret_value = SUCCEED; @@ -4627,8 +5013,6 @@ H5D__mpio_collective_filtered_chunk_reallocate(H5D_filtered_collective_io_info_t assert(chunk_list); assert(io_info); - assert(idx_info); - assert(idx_info->storage->idx_type != H5D_CHUNK_IDX_NONE); #ifdef H5Dmpio_DEBUG H5D_MPIO_TRACE_ENTER(mpi_rank); @@ -4667,15 +5051,15 @@ H5D__mpio_collective_filtered_chunk_reallocate(H5D_filtered_collective_io_info_t /* Set the receive counts from the assigned chunks map */ counts_ptr = counts_disps_array; - for (i = 0; i < (size_t)mpi_size; i++) - H5_CHECKED_ASSIGN(counts_ptr[i], int, num_chunks_assigned_map[i], size_t); + for (int curr_rank = 0; curr_rank < mpi_size; curr_rank++) + H5_CHECKED_ASSIGN(counts_ptr[curr_rank], int, num_chunks_assigned_map[curr_rank], size_t); /* Set the displacements into the receive buffer for the gather operation */ displacements_ptr = &counts_disps_array[mpi_size]; *displacements_ptr = 0; - for (i = 1; i < (size_t)mpi_size; i++) - displacements_ptr[i] = displacements_ptr[i - 1] + counts_ptr[i - 1]; + for (int curr_rank = 1; curr_rank < mpi_size; curr_rank++) + displacements_ptr[curr_rank] = displacements_ptr[curr_rank - 1] + counts_ptr[curr_rank - 1]; } /* Perform gather operation */ @@ -4701,14 +5085,27 @@ H5D__mpio_collective_filtered_chunk_reallocate(H5D_filtered_collective_io_info_t } /* Collectively re-allocate the modified chunks (from each rank) in the file */ - collective_list = (H5D_chunk_alloc_info_t *)gathered_array; - for (i = 0, num_local_chunks_processed = 0; i < collective_num_entries; i++) { - H5D_chunk_alloc_info_t *coll_entry = &collective_list[i]; - bool need_insert; - bool update_local_chunk; - - if (H5D__chunk_file_alloc(idx_info, &coll_entry->chunk_current, &coll_entry->chunk_new, &need_insert, - NULL) < 0) + collective_list = (H5D_chunk_alloc_info_t *)gathered_array; + num_local_chunks_processed = 0; + for (size_t entry_idx = 0; entry_idx < collective_num_entries; entry_idx++) { + H5D_mpio_filtered_dset_info_t *cached_dset_info; + H5D_chunk_alloc_info_t *coll_entry = &collective_list[entry_idx]; + bool need_insert; + bool update_local_chunk; + + /* Find the cached dataset info for the dataset this chunk is in */ + if (num_dset_infos > 1) { + HASH_FIND(hh, chunk_list->dset_info.dset_info_hash_table, &coll_entry->dset_oloc_addr, + sizeof(haddr_t), cached_dset_info); + if (cached_dset_info == NULL) + HGOTO_ERROR(H5E_DATASET, H5E_CANTFIND, FAIL, "unable to find cached dataset info entry"); + } + else + cached_dset_info = chunk_list->dset_info.single_dset_info; + assert(cached_dset_info); + + if (H5D__chunk_file_alloc(&cached_dset_info->chunk_idx_info, &coll_entry->chunk_current, + &coll_entry->chunk_new, &need_insert, NULL) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "unable to allocate chunk"); /* @@ -4716,9 +5113,12 @@ H5D__mpio_collective_filtered_chunk_reallocate(H5D_filtered_collective_io_info_t * rank, make sure to update the chunk entry in the local * chunk list */ - update_local_chunk = (num_local_chunks_processed < chunk_list->num_chunk_infos) && - (coll_entry->chunk_idx == - chunk_list->chunk_infos[num_local_chunks_processed].index_info.chunk_idx); + update_local_chunk = + (num_local_chunks_processed < chunk_list->num_chunk_infos) && + (coll_entry->dset_oloc_addr == + chunk_list->chunk_infos[num_local_chunks_processed].index_info.dset_oloc_addr) && + (coll_entry->chunk_idx == + chunk_list->chunk_infos[num_local_chunks_processed].index_info.chunk_idx); if (update_local_chunk) { H5D_filtered_collective_chunk_info_t *local_chunk; @@ -4798,38 +5198,35 @@ H5D__mpio_collective_filtered_chunk_reallocate(H5D_filtered_collective_io_info_t static herr_t H5D__mpio_collective_filtered_chunk_reinsert(H5D_filtered_collective_io_info_t *chunk_list, size_t *num_chunks_assigned_map, H5D_io_info_t *io_info, - H5D_dset_io_info_t *di, H5D_chk_idx_info_t *idx_info, - int mpi_rank, int mpi_size) + size_t num_dset_infos, int mpi_rank, int mpi_size) { - H5D_chunk_ud_t chunk_ud; - MPI_Datatype send_type; - MPI_Datatype recv_type; - bool send_type_derived = false; - bool recv_type_derived = false; - hsize_t scaled_coords[H5O_LAYOUT_NDIMS]; - size_t collective_num_entries = 0; - size_t i; - void *gathered_array = NULL; - int *counts_disps_array = NULL; - int *counts_ptr = NULL; - int *displacements_ptr = NULL; - int mpi_code; - herr_t ret_value = SUCCEED; + MPI_Datatype send_type; + MPI_Datatype recv_type; + size_t collective_num_entries = 0; + bool send_type_derived = false; + bool recv_type_derived = false; + void *gathered_array = NULL; + int *counts_disps_array = NULL; + int *counts_ptr = NULL; + int *displacements_ptr = NULL; + int mpi_code; + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE assert(chunk_list); assert(io_info); - assert(di); - assert(idx_info); #ifdef H5Dmpio_DEBUG H5D_MPIO_TRACE_ENTER(mpi_rank); H5D_MPIO_TIME_START(mpi_rank, "Reinsertion of modified chunks into chunk index"); #endif - /* Only re-insert chunks if index has an insert method */ - if (!idx_info->storage->ops->insert) + /* + * If no datasets involved have a chunk index 'insert' + * operation, this function is a no-op + */ + if (chunk_list->no_dset_index_insert_methods) HGOTO_DONE(SUCCEED); /* @@ -4864,15 +5261,15 @@ H5D__mpio_collective_filtered_chunk_reinsert(H5D_filtered_collective_io_info_t * /* Set the receive counts from the assigned chunks map */ counts_ptr = counts_disps_array; - for (i = 0; i < (size_t)mpi_size; i++) - H5_CHECKED_ASSIGN(counts_ptr[i], int, num_chunks_assigned_map[i], size_t); + for (int curr_rank = 0; curr_rank < mpi_size; curr_rank++) + H5_CHECKED_ASSIGN(counts_ptr[curr_rank], int, num_chunks_assigned_map[curr_rank], size_t); /* Set the displacements into the receive buffer for the gather operation */ displacements_ptr = &counts_disps_array[mpi_size]; *displacements_ptr = 0; - for (i = 1; i < (size_t)mpi_size; i++) - displacements_ptr[i] = displacements_ptr[i - 1] + counts_ptr[i - 1]; + for (int curr_rank = 1; curr_rank < mpi_size; curr_rank++) + displacements_ptr[curr_rank] = displacements_ptr[curr_rank - 1] + counts_ptr[curr_rank - 1]; } /* Perform gather operation */ @@ -4897,11 +5294,12 @@ H5D__mpio_collective_filtered_chunk_reinsert(H5D_filtered_collective_io_info_t * "can't gather chunk index re-insertion info to/from ranks"); } - /* Initialize static chunk udata fields from chunk index info */ - H5D_MPIO_INIT_CHUNK_UD_INFO(chunk_ud, idx_info); - - for (i = 0; i < collective_num_entries; i++) { - H5D_chunk_insert_info_t *coll_entry = &((H5D_chunk_insert_info_t *)gathered_array)[i]; + for (size_t entry_idx = 0; entry_idx < collective_num_entries; entry_idx++) { + H5D_mpio_filtered_dset_info_t *cached_dset_info; + H5D_chunk_insert_info_t *coll_entry = &((H5D_chunk_insert_info_t *)gathered_array)[entry_idx]; + H5D_chunk_ud_t chunk_ud; + haddr_t prev_tag = HADDR_UNDEF; + hsize_t scaled_coords[H5O_LAYOUT_NDIMS]; /* * We only need to reinsert this chunk if we had to actually @@ -4910,13 +5308,28 @@ H5D__mpio_collective_filtered_chunk_reinsert(H5D_filtered_collective_io_info_t * if (!coll_entry->index_info.need_insert) continue; - chunk_ud.chunk_block = coll_entry->chunk_block; - chunk_ud.chunk_idx = coll_entry->index_info.chunk_idx; - chunk_ud.filter_mask = coll_entry->index_info.filter_mask; - chunk_ud.common.scaled = scaled_coords; + /* Find the cached dataset info for the dataset this chunk is in */ + if (num_dset_infos > 1) { + HASH_FIND(hh, chunk_list->dset_info.dset_info_hash_table, &coll_entry->index_info.dset_oloc_addr, + sizeof(haddr_t), cached_dset_info); + if (cached_dset_info == NULL) + HGOTO_ERROR(H5E_DATASET, H5E_CANTFIND, FAIL, "unable to find cached dataset info entry"); + } + else + cached_dset_info = chunk_list->dset_info.single_dset_info; + assert(cached_dset_info); + + chunk_ud.common.layout = cached_dset_info->chunk_idx_info.layout; + chunk_ud.common.storage = cached_dset_info->chunk_idx_info.storage; + chunk_ud.common.scaled = scaled_coords; + + chunk_ud.chunk_block = coll_entry->chunk_block; + chunk_ud.chunk_idx = coll_entry->index_info.chunk_idx; + chunk_ud.filter_mask = coll_entry->index_info.filter_mask; /* Calculate scaled coordinates for the chunk */ - if (idx_info->layout->idx_type == H5D_CHUNK_IDX_EARRAY && idx_info->layout->u.earray.unlim_dim > 0) { + if (cached_dset_info->chunk_idx_info.layout->idx_type == H5D_CHUNK_IDX_EARRAY && + cached_dset_info->chunk_idx_info.layout->u.earray.unlim_dim > 0) { /* * Extensible arrays where the unlimited dimension is not * the slowest-changing dimension "swizzle" the coordinates @@ -4930,17 +5343,20 @@ H5D__mpio_collective_filtered_chunk_reinsert(H5D_filtered_collective_io_info_t * * callback that accepts a chunk index and provides the * caller with the scaled coordinates for that chunk. */ - H5VM_array_calc_pre(chunk_ud.chunk_idx, di->dset->shared->ndims, - idx_info->layout->u.earray.swizzled_down_chunks, scaled_coords); + H5VM_array_calc_pre(chunk_ud.chunk_idx, cached_dset_info->dset_io_info->dset->shared->ndims, + cached_dset_info->chunk_idx_info.layout->u.earray.swizzled_down_chunks, + scaled_coords); - H5VM_unswizzle_coords(hsize_t, scaled_coords, idx_info->layout->u.earray.unlim_dim); + H5VM_unswizzle_coords(hsize_t, scaled_coords, + cached_dset_info->chunk_idx_info.layout->u.earray.unlim_dim); } else { - H5VM_array_calc_pre(chunk_ud.chunk_idx, di->dset->shared->ndims, - di->dset->shared->layout.u.chunk.down_chunks, scaled_coords); + H5VM_array_calc_pre(chunk_ud.chunk_idx, cached_dset_info->dset_io_info->dset->shared->ndims, + cached_dset_info->dset_io_info->dset->shared->layout.u.chunk.down_chunks, + scaled_coords); } - scaled_coords[di->dset->shared->ndims] = 0; + scaled_coords[cached_dset_info->dset_io_info->dset->shared->ndims] = 0; #ifndef NDEBUG /* @@ -4952,10 +5368,18 @@ H5D__mpio_collective_filtered_chunk_reinsert(H5D_filtered_collective_io_info_t * * they match. */ for (size_t dbg_idx = 0; dbg_idx < chunk_list->num_chunk_infos; dbg_idx++) { - if (coll_entry->index_info.chunk_idx == chunk_list->chunk_infos[dbg_idx].index_info.chunk_idx) { + bool same_chunk; + + /* Chunks must have the same index and reside in the same dataset */ + same_chunk = (0 == H5_addr_cmp(coll_entry->index_info.dset_oloc_addr, + chunk_list->chunk_infos[dbg_idx].index_info.dset_oloc_addr)); + same_chunk = same_chunk && (coll_entry->index_info.chunk_idx == + chunk_list->chunk_infos[dbg_idx].index_info.chunk_idx); + + if (same_chunk) { bool coords_match = !memcmp(scaled_coords, chunk_list->chunk_infos[dbg_idx].chunk_info->scaled, - di->dset->shared->ndims * sizeof(hsize_t)); + cached_dset_info->dset_io_info->dset->shared->ndims * sizeof(hsize_t)); assert(coords_match && "Calculated scaled coordinates for chunk didn't match " "chunk's actual scaled coordinates!"); @@ -4964,8 +5388,15 @@ H5D__mpio_collective_filtered_chunk_reinsert(H5D_filtered_collective_io_info_t * } #endif - if ((idx_info->storage->ops->insert)(idx_info, &chunk_ud, di->dset) < 0) + /* Set metadata tagging with dataset oheader addr */ + H5AC_tag(cached_dset_info->dset_io_info->dset->oloc.addr, &prev_tag); + + if ((cached_dset_info->chunk_idx_info.storage->ops->insert)( + &cached_dset_info->chunk_idx_info, &chunk_ud, cached_dset_info->dset_io_info->dset) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert chunk address into index"); + + /* Reset metadata tagging */ + H5AC_tag(prev_tag, NULL); } done: @@ -5021,9 +5452,9 @@ H5D__mpio_get_chunk_redistribute_info_types(MPI_Datatype *contig_type, bool *con bool struct_type_derived = false; MPI_Datatype chunk_block_type = MPI_DATATYPE_NULL; bool chunk_block_type_derived = false; - MPI_Datatype types[5]; - MPI_Aint displacements[5]; - int block_lengths[5]; + MPI_Datatype types[6]; + MPI_Aint displacements[6]; + int block_lengths[6]; int field_count; int mpi_code; herr_t ret_value = SUCCEED; @@ -5042,29 +5473,32 @@ H5D__mpio_get_chunk_redistribute_info_types(MPI_Datatype *contig_type, bool *con if (H5F_mpi_get_file_block_type(false, &chunk_block_type, &chunk_block_type_derived) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't create derived type for chunk file description"); - field_count = 5; + field_count = 6; assert(field_count == (sizeof(types) / sizeof(MPI_Datatype))); /* * Create structure type to pack chunk H5F_block_t structure - * next to chunk_idx, orig_owner, new_owner and num_writers - * fields + * next to chunk_idx, dset_oloc_addr, orig_owner, new_owner + * and num_writers fields */ block_lengths[0] = 1; block_lengths[1] = 1; block_lengths[2] = 1; block_lengths[3] = 1; block_lengths[4] = 1; + block_lengths[5] = 1; displacements[0] = offsetof(H5D_chunk_redistribute_info_t, chunk_block); displacements[1] = offsetof(H5D_chunk_redistribute_info_t, chunk_idx); - displacements[2] = offsetof(H5D_chunk_redistribute_info_t, orig_owner); - displacements[3] = offsetof(H5D_chunk_redistribute_info_t, new_owner); - displacements[4] = offsetof(H5D_chunk_redistribute_info_t, num_writers); + displacements[2] = offsetof(H5D_chunk_redistribute_info_t, dset_oloc_addr); + displacements[3] = offsetof(H5D_chunk_redistribute_info_t, orig_owner); + displacements[4] = offsetof(H5D_chunk_redistribute_info_t, new_owner); + displacements[5] = offsetof(H5D_chunk_redistribute_info_t, num_writers); types[0] = chunk_block_type; types[1] = HSIZE_AS_MPI_TYPE; - types[2] = MPI_INT; + types[2] = HADDR_AS_MPI_TYPE; types[3] = MPI_INT; types[4] = MPI_INT; + types[5] = MPI_INT; if (MPI_SUCCESS != (mpi_code = MPI_Type_create_struct(field_count, block_lengths, displacements, types, contig_type))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_struct failed", mpi_code) @@ -5073,25 +5507,28 @@ H5D__mpio_get_chunk_redistribute_info_types(MPI_Datatype *contig_type, bool *con if (MPI_SUCCESS != (mpi_code = MPI_Type_commit(contig_type))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code) - /* Create struct type to extract the chunk_current, chunk_idx, orig_owner, - * new_owner and num_writers fields from a H5D_filtered_collective_chunk_info_t - * structure + /* Create struct type to extract the chunk_current, chunk_idx, + * dset_oloc_addr, orig_owner, new_owner and num_writers fields + * from a H5D_filtered_collective_chunk_info_t structure */ block_lengths[0] = 1; block_lengths[1] = 1; block_lengths[2] = 1; block_lengths[3] = 1; block_lengths[4] = 1; + block_lengths[5] = 1; displacements[0] = offsetof(H5D_filtered_collective_chunk_info_t, chunk_current); displacements[1] = offsetof(H5D_filtered_collective_chunk_info_t, index_info.chunk_idx); - displacements[2] = offsetof(H5D_filtered_collective_chunk_info_t, orig_owner); - displacements[3] = offsetof(H5D_filtered_collective_chunk_info_t, new_owner); - displacements[4] = offsetof(H5D_filtered_collective_chunk_info_t, num_writers); + displacements[2] = offsetof(H5D_filtered_collective_chunk_info_t, index_info.dset_oloc_addr); + displacements[3] = offsetof(H5D_filtered_collective_chunk_info_t, orig_owner); + displacements[4] = offsetof(H5D_filtered_collective_chunk_info_t, new_owner); + displacements[5] = offsetof(H5D_filtered_collective_chunk_info_t, num_writers); types[0] = chunk_block_type; types[1] = HSIZE_AS_MPI_TYPE; - types[2] = MPI_INT; + types[2] = HADDR_AS_MPI_TYPE; types[3] = MPI_INT; types[4] = MPI_INT; + types[5] = MPI_INT; if (MPI_SUCCESS != (mpi_code = MPI_Type_create_struct(field_count, block_lengths, displacements, types, &struct_type))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_struct failed", mpi_code) @@ -5162,9 +5599,9 @@ H5D__mpio_get_chunk_alloc_info_types(MPI_Datatype *contig_type, bool *contig_typ bool struct_type_derived = false; MPI_Datatype chunk_block_type = MPI_DATATYPE_NULL; bool chunk_block_type_derived = false; - MPI_Datatype types[3]; - MPI_Aint displacements[3]; - int block_lengths[3]; + MPI_Datatype types[4]; + MPI_Aint displacements[4]; + int block_lengths[4]; int field_count; int mpi_code; herr_t ret_value = SUCCEED; @@ -5183,22 +5620,25 @@ H5D__mpio_get_chunk_alloc_info_types(MPI_Datatype *contig_type, bool *contig_typ if (H5F_mpi_get_file_block_type(false, &chunk_block_type, &chunk_block_type_derived) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't create derived type for chunk file description"); - field_count = 3; + field_count = 4; assert(field_count == (sizeof(types) / sizeof(MPI_Datatype))); /* * Create structure type to pack both chunk H5F_block_t structures - * next to chunk_idx field + * next to chunk_idx and dset_oloc_addr fields */ block_lengths[0] = 1; block_lengths[1] = 1; block_lengths[2] = 1; + block_lengths[3] = 1; displacements[0] = offsetof(H5D_chunk_alloc_info_t, chunk_current); displacements[1] = offsetof(H5D_chunk_alloc_info_t, chunk_new); displacements[2] = offsetof(H5D_chunk_alloc_info_t, chunk_idx); + displacements[3] = offsetof(H5D_chunk_alloc_info_t, dset_oloc_addr); types[0] = chunk_block_type; types[1] = chunk_block_type; types[2] = HSIZE_AS_MPI_TYPE; + types[3] = HADDR_AS_MPI_TYPE; if (MPI_SUCCESS != (mpi_code = MPI_Type_create_struct(field_count, block_lengths, displacements, types, contig_type))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_struct failed", mpi_code) @@ -5208,18 +5648,22 @@ H5D__mpio_get_chunk_alloc_info_types(MPI_Datatype *contig_type, bool *contig_typ HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code) /* - * Create struct type to extract the chunk_current, chunk_new and chunk_idx - * fields from a H5D_filtered_collective_chunk_info_t structure + * Create struct type to extract the chunk_current, chunk_new, chunk_idx + * and dset_oloc_addr fields from a H5D_filtered_collective_chunk_info_t + * structure */ block_lengths[0] = 1; block_lengths[1] = 1; block_lengths[2] = 1; + block_lengths[3] = 1; displacements[0] = offsetof(H5D_filtered_collective_chunk_info_t, chunk_current); displacements[1] = offsetof(H5D_filtered_collective_chunk_info_t, chunk_new); displacements[2] = offsetof(H5D_filtered_collective_chunk_info_t, index_info.chunk_idx); + displacements[3] = offsetof(H5D_filtered_collective_chunk_info_t, index_info.dset_oloc_addr); types[0] = chunk_block_type; types[1] = chunk_block_type; types[2] = HSIZE_AS_MPI_TYPE; + types[3] = HADDR_AS_MPI_TYPE; if (MPI_SUCCESS != (mpi_code = MPI_Type_create_struct(field_count, block_lengths, displacements, types, &struct_type))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_struct failed", mpi_code) @@ -5293,9 +5737,9 @@ H5D__mpio_get_chunk_insert_info_types(MPI_Datatype *contig_type, bool *contig_ty MPI_Datatype chunk_block_type = MPI_DATATYPE_NULL; bool chunk_block_type_derived = false; MPI_Aint contig_type_extent; - MPI_Datatype types[4]; - MPI_Aint displacements[4]; - int block_lengths[4]; + MPI_Datatype types[5]; + MPI_Aint displacements[5]; + int block_lengths[5]; int field_count; int mpi_code; herr_t ret_value = SUCCEED; @@ -5314,7 +5758,7 @@ H5D__mpio_get_chunk_insert_info_types(MPI_Datatype *contig_type, bool *contig_ty if (H5F_mpi_get_file_block_type(false, &chunk_block_type, &chunk_block_type_derived) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't create derived type for chunk file description"); - field_count = 4; + field_count = 5; assert(field_count == (sizeof(types) / sizeof(MPI_Datatype))); /* @@ -5327,14 +5771,17 @@ H5D__mpio_get_chunk_insert_info_types(MPI_Datatype *contig_type, bool *contig_ty block_lengths[1] = 1; block_lengths[2] = 1; block_lengths[3] = 1; + block_lengths[4] = 1; displacements[0] = offsetof(H5D_chunk_insert_info_t, chunk_block); displacements[1] = offsetof(H5D_chunk_insert_info_t, index_info.chunk_idx); - displacements[2] = offsetof(H5D_chunk_insert_info_t, index_info.filter_mask); - displacements[3] = offsetof(H5D_chunk_insert_info_t, index_info.need_insert); + displacements[2] = offsetof(H5D_chunk_insert_info_t, index_info.dset_oloc_addr); + displacements[3] = offsetof(H5D_chunk_insert_info_t, index_info.filter_mask); + displacements[4] = offsetof(H5D_chunk_insert_info_t, index_info.need_insert); types[0] = chunk_block_type; types[1] = HSIZE_AS_MPI_TYPE; - types[2] = MPI_UNSIGNED; - types[3] = MPI_C_BOOL; + types[2] = HADDR_AS_MPI_TYPE; + types[3] = MPI_UNSIGNED; + types[4] = MPI_C_BOOL; if (MPI_SUCCESS != (mpi_code = MPI_Type_create_struct(field_count, block_lengths, displacements, types, &struct_type))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_struct failed", mpi_code) @@ -5360,8 +5807,9 @@ H5D__mpio_get_chunk_insert_info_types(MPI_Datatype *contig_type, bool *contig_ty */ displacements[0] = offsetof(H5D_filtered_collective_chunk_info_t, chunk_new); displacements[1] = offsetof(H5D_filtered_collective_chunk_info_t, index_info.chunk_idx); - displacements[2] = offsetof(H5D_filtered_collective_chunk_info_t, index_info.filter_mask); - displacements[3] = offsetof(H5D_filtered_collective_chunk_info_t, index_info.need_insert); + displacements[2] = offsetof(H5D_filtered_collective_chunk_info_t, index_info.dset_oloc_addr); + displacements[3] = offsetof(H5D_filtered_collective_chunk_info_t, index_info.filter_mask); + displacements[4] = offsetof(H5D_filtered_collective_chunk_info_t, index_info.need_insert); if (MPI_SUCCESS != (mpi_code = MPI_Type_create_struct(field_count, block_lengths, displacements, types, &struct_type))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_struct failed", mpi_code) @@ -5568,6 +6016,8 @@ H5D__mpio_dump_collective_filtered_chunk_list(H5D_filtered_collective_io_info_t chunk_rank < 3 ? 0 : chunk_entry->chunk_info->scaled[2], chunk_rank < 4 ? 0 : chunk_entry->chunk_info->scaled[3]); H5D_MPIO_DEBUG_VA(mpi_rank, " Chunk Index: %" PRIuHSIZE, chunk_entry->index_info.chunk_idx); + H5D_MPIO_DEBUG_VA(mpi_rank, " Dataset Object Header Address: %" PRIuHADDR, + chunk_entry->index_info.dset_oloc_addr); H5D_MPIO_DEBUG_VA(mpi_rank, " Filter Mask: %u", chunk_entry->index_info.filter_mask); H5D_MPIO_DEBUG_VA(mpi_rank, " Need Insert: %s", chunk_entry->index_info.need_insert ? "YES" : "NO"); diff --git a/src/H5Dpkg.h b/src/H5Dpkg.h index 0dbcb64c9e0..82fec0ea1ff 100644 --- a/src/H5Dpkg.h +++ b/src/H5Dpkg.h @@ -254,6 +254,7 @@ typedef struct H5D_piece_info_t { unsigned mspace_shared; /* Indicate that the memory space for a chunk is shared and shouldn't be freed */ bool in_place_tconv; /* Whether to perform type conversion in-place */ size_t buf_off; /* Buffer offset for in-place type conversion */ + bool filtered_dset; /* Whether the dataset this chunk is in has filters applied */ struct H5D_dset_io_info_t *dset_info; /* Pointer to dset_info */ } H5D_piece_info_t; @@ -292,26 +293,28 @@ typedef struct H5D_io_info_t { #endif /* H5_HAVE_PARALLEL */ H5D_md_io_ops_t md_io_ops; /* Multi dataset I/O operation function pointers */ H5D_io_op_type_t op_type; - size_t count; /* Number of datasets in I/O request */ - H5D_dset_io_info_t *dsets_info; /* dsets info where I/O is done to/from */ - size_t piece_count; /* Number of pieces in I/O request */ - size_t pieces_added; /* Number of pieces added so far to arrays */ - H5D_piece_info_t **sel_pieces; /* Array of info struct for all pieces in I/O */ - H5S_t **mem_spaces; /* Array of chunk memory spaces */ - H5S_t **file_spaces; /* Array of chunk file spaces */ - haddr_t *addrs; /* Array of chunk addresses */ - size_t *element_sizes; /* Array of element sizes */ - void **rbufs; /* Array of read buffers */ - const void **wbufs; /* Array of write buffers */ - haddr_t store_faddr; /* lowest file addr for read/write */ - H5_flexible_const_ptr_t base_maddr; /* starting mem address */ - H5D_selection_io_mode_t use_select_io; /* Whether to use selection I/O */ - uint8_t *tconv_buf; /* Datatype conv buffer */ - bool tconv_buf_allocated; /* Whether the type conversion buffer was allocated */ - size_t tconv_buf_size; /* Size of type conversion buffer */ - uint8_t *bkg_buf; /* Background buffer */ - bool bkg_buf_allocated; /* Whether the background buffer was allocated */ - size_t bkg_buf_size; /* Size of background buffer */ + size_t count; /* Number of datasets in I/O request */ + size_t filtered_count; /* Number of datasets with filters applied in I/O request */ + H5D_dset_io_info_t *dsets_info; /* dsets info where I/O is done to/from */ + size_t piece_count; /* Number of pieces in I/O request */ + size_t pieces_added; /* Number of pieces added so far to arrays */ + size_t filtered_pieces_added; /* Number of filtered pieces in I/O request */ + H5D_piece_info_t **sel_pieces; /* Array of info struct for all pieces in I/O */ + H5S_t **mem_spaces; /* Array of chunk memory spaces */ + H5S_t **file_spaces; /* Array of chunk file spaces */ + haddr_t *addrs; /* Array of chunk addresses */ + size_t *element_sizes; /* Array of element sizes */ + void **rbufs; /* Array of read buffers */ + const void **wbufs; /* Array of write buffers */ + haddr_t store_faddr; /* lowest file addr for read/write */ + H5_flexible_const_ptr_t base_maddr; /* starting mem address */ + H5D_selection_io_mode_t use_select_io; /* Whether to use selection I/O */ + uint8_t *tconv_buf; /* Datatype conv buffer */ + bool tconv_buf_allocated; /* Whether the type conversion buffer was allocated */ + size_t tconv_buf_size; /* Size of type conversion buffer */ + uint8_t *bkg_buf; /* Background buffer */ + bool bkg_buf_allocated; /* Whether the background buffer was allocated */ + size_t bkg_buf_size; /* Size of background buffer */ size_t max_tconv_type_size; /* Largest of all source and destination type sizes involved in type conversion */ bool must_fill_bkg; /* Whether any datasets need a background buffer filled with destination contents */ @@ -538,13 +541,12 @@ struct H5D_shared_t { /* Buffered/cached information for types of raw data storage*/ struct { - H5D_rdcdc_t contig; /* Information about contiguous data */ - /* (Note that the "contig" cache - * information can be used by a chunked - * dataset in certain circumstances) - */ - H5D_rdcc_t chunk; /* Information about chunked data */ - H5SL_t *sel_pieces; /* Skip list containing information for each piece selected */ + H5D_rdcdc_t contig; /* Information about contiguous data */ + /* (Note that the "contig" cache + * information can be used by a chunked + * dataset in certain circumstances) + */ + H5D_rdcc_t chunk; /* Information about chunked data */ } cache; H5D_append_flush_t append_flush; /* Append flush property information */ diff --git a/src/H5Dscatgath.c b/src/H5Dscatgath.c index cd2027bdb07..9b60d81b2b0 100644 --- a/src/H5Dscatgath.c +++ b/src/H5Dscatgath.c @@ -718,7 +718,7 @@ H5D__scatgath_write(const H5D_io_info_t *io_info, const H5D_dset_io_info_t *dset /* Use "vp" field of union to twiddle away const. OK because if we're doing this it means the * user explicitly allowed us to modify this buffer via H5Pset_modify_write_buf(). */ tmp_buf = (uint8_t *)dset_info->buf.vp + dset_info->layout_io_info.contig_piece_info->buf_off + - (smine_start * dset_info->type_info.dst_type_size); + (smine_start * dset_info->type_info.src_type_size); } else { /* Do type conversion using intermediate buffer */ @@ -1335,7 +1335,7 @@ H5D__scatgath_write_select(H5D_io_info_t *io_info) write_mem_spaces = NULL; } - /* Free bakcground buffer parameter arrays */ + /* Free background buffer parameter arrays */ H5MM_free(bkg_mem_spaces); bkg_mem_spaces = NULL; H5MM_free(bkg_file_spaces); diff --git a/src/H5FDhdfs.c b/src/H5FDhdfs.c index 6f2c559f155..3bd2502e232 100644 --- a/src/H5FDhdfs.c +++ b/src/H5FDhdfs.c @@ -926,7 +926,7 @@ hdfs__fprint_stats(FILE *stream, const H5FD_hdfs_t *file) unsigned long long max_raw = 0; unsigned long long bytes_raw = 0; unsigned long long bytes_meta = 0; - double re_dub = 0.0; /* re-usable double variable */ + double re_dub = 0.0; /* reusable double variable */ unsigned suffix_i = 0; const char suffixes[] = {' ', 'K', 'M', 'G', 'T', 'P'}; diff --git a/src/H5FDmpio.c b/src/H5FDmpio.c index f5711225b1d..7141550f40a 100644 --- a/src/H5FDmpio.c +++ b/src/H5FDmpio.c @@ -11,7 +11,7 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Purpose: This is the MPI-2 I/O driver. + * Purpose: This is the MPI I/O driver. */ #include "H5FDdrvr_module.h" /* This source code file is part of the H5FD driver module */ @@ -363,12 +363,12 @@ H5FD__mpio_term(void) * only in the parallel HDF5 library and is not collective. * * comm is the MPI communicator to be used for file open as - * defined in MPI_FILE_OPEN of MPI-2. This function makes a + * defined in MPI_FILE_OPEN of MPI. This function makes a * duplicate of comm. Any modification to comm after this function * call returns has no effect on the access property list. * * info is the MPI Info object to be used for file open as - * defined in MPI_FILE_OPEN of MPI-2. This function makes a + * defined in MPI_FILE_OPEN of MPI. This function makes a * duplicate of info. Any modification to info after this * function call returns has no effect on the access property * list. @@ -946,7 +946,7 @@ H5FD__mpio_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t H5_ATTR if (H5_mpio_get_file_sync_required(fh, &file->mpi_file_sync_required) < 0) HGOTO_ERROR(H5E_VFL, H5E_CANTGET, NULL, "unable to get mpi_file_sync_required hint"); - /* Only processor p0 will get the filesize and broadcast it. */ + /* Only processor p0 will get the file size and broadcast it. */ if (mpi_rank == 0) { /* If MPI_File_get_size fails, broadcast file size as -1 to signal error */ if (MPI_SUCCESS != (mpi_code = MPI_File_get_size(fh, &file_size))) @@ -1222,20 +1222,13 @@ H5FD__mpio_read(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_UNU MPI_Status mpi_stat; /* Status from I/O operation */ MPI_Datatype buf_type = MPI_BYTE; /* MPI description of the selection in memory */ int size_i; /* Integer copy of 'size' to read */ -#if H5_CHECK_MPI_VERSION(3, 0) - MPI_Count bytes_read = 0; /* Number of bytes read in */ - MPI_Count type_size; /* MPI datatype used for I/O's size */ - MPI_Count io_size; /* Actual number of bytes requested */ - MPI_Count n; -#else - int bytes_read = 0; /* Number of bytes read in */ - int type_size; /* MPI datatype used for I/O's size */ - int io_size; /* Actual number of bytes requested */ - int n; -#endif - bool use_view_this_time = false; - bool derived_type = false; - bool rank0_bcast = false; /* If read-with-rank0-and-bcast flag was used */ + MPI_Count bytes_read = 0; /* Number of bytes read in */ + MPI_Count type_size; /* MPI datatype used for I/O's size */ + MPI_Count io_size; /* Actual number of bytes requested */ + MPI_Count n; + bool use_view_this_time = false; + bool derived_type = false; + bool rank0_bcast = false; /* If read-with-rank0-and-bcast flag was used */ #ifdef H5FDmpio_DEBUG bool H5FD_mpio_debug_t_flag = (H5FD_mpio_debug_flags_s[(int)'t'] && H5FD_MPIO_TRACE_THIS_RANK(file)); bool H5FD_mpio_debug_r_flag = (H5FD_mpio_debug_flags_s[(int)'r'] && H5FD_MPIO_TRACE_THIS_RANK(file)); @@ -1283,7 +1276,7 @@ H5FD__mpio_read(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_UNU /* Remember that views are used */ use_view_this_time = true; - /* Prepare for a full-blown xfer using btype, ftype, and disp */ + /* Prepare for a full-blown xfer using btype, ftype, and displacement */ if (H5CX_get_mpi_coll_datatypes(&buf_type, &file_type) < 0) HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't get MPI-I/O datatypes"); @@ -1393,18 +1386,14 @@ H5FD__mpio_read(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_UNU /* Only retrieve bytes read if this rank _actually_ participated in I/O */ if (!rank0_bcast || (rank0_bcast && file->mpi_rank == 0)) { /* How many bytes were actually read? */ -#if H5_CHECK_MPI_VERSION(3, 0) if (MPI_SUCCESS != (mpi_code = MPI_Get_elements_x(&mpi_stat, buf_type, &bytes_read))) { -#else - if (MPI_SUCCESS != (mpi_code = MPI_Get_elements(&mpi_stat, MPI_BYTE, &bytes_read))) { -#endif if (rank0_bcast && file->mpi_rank == 0) { /* If MPI_Get_elements(_x) fails for a rank 0 bcast strategy, * push an error, but continue to participate in the following * MPI_Bcast. */ bytes_read = -1; - HMPI_DONE_ERROR(FAIL, "MPI_Get_elements failed", mpi_code) + HMPI_DONE_ERROR(FAIL, "MPI_Get_elements failed for rank 0", mpi_code) } else HMPI_GOTO_ERROR(FAIL, "MPI_Get_elements failed", mpi_code) @@ -1418,19 +1407,11 @@ H5FD__mpio_read(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_UNU * of the data. (QAK - 2019/1/2) */ if (rank0_bcast) -#if H5_CHECK_MPI_VERSION(3, 0) if (MPI_SUCCESS != MPI_Bcast(&bytes_read, 1, MPI_COUNT, 0, file->comm)) -#else - if (MPI_SUCCESS != MPI_Bcast(&bytes_read, 1, MPI_INT, 0, file->comm)) -#endif HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", 0) - /* Get the type's size */ -#if H5_CHECK_MPI_VERSION(3, 0) + /* Get the type's size */ if (MPI_SUCCESS != (mpi_code = MPI_Type_size_x(buf_type, &type_size))) -#else - if (MPI_SUCCESS != (mpi_code = MPI_Type_size(buf_type, &type_size))) -#endif HMPI_GOTO_ERROR(FAIL, "MPI_Type_size failed", mpi_code) /* Compute the actual number of bytes requested */ @@ -1486,19 +1467,13 @@ static herr_t H5FD__mpio_write(H5FD_t *_file, H5FD_mem_t type, hid_t H5_ATTR_UNUSED dxpl_id, haddr_t addr, size_t size, const void *buf) { - H5FD_mpio_t *file = (H5FD_mpio_t *)_file; - MPI_Offset mpi_off; - MPI_Status mpi_stat; /* Status from I/O operation */ - MPI_Datatype buf_type = MPI_BYTE; /* MPI description of the selection in memory */ -#if H5_CHECK_MPI_VERSION(3, 0) - MPI_Count bytes_written; - MPI_Count type_size; /* MPI datatype used for I/O's size */ - MPI_Count io_size; /* Actual number of bytes requested */ -#else - int bytes_written; - int type_size; /* MPI datatype used for I/O's size */ - int io_size; /* Actual number of bytes requested */ -#endif + H5FD_mpio_t *file = (H5FD_mpio_t *)_file; + MPI_Offset mpi_off; + MPI_Status mpi_stat; /* Status from I/O operation */ + MPI_Datatype buf_type = MPI_BYTE; /* MPI description of the selection in memory */ + MPI_Count bytes_written; + MPI_Count type_size; /* MPI datatype used for I/O's size */ + MPI_Count io_size; /* Actual number of bytes requested */ int size_i; bool use_view_this_time = false; bool derived_type = false; @@ -1642,19 +1617,11 @@ H5FD__mpio_write(H5FD_t *_file, H5FD_mem_t type, hid_t H5_ATTR_UNUSED dxpl_id, h } /* end else */ /* How many bytes were actually written? */ -#if H5_CHECK_MPI_VERSION(3, 0) if (MPI_SUCCESS != (mpi_code = MPI_Get_elements_x(&mpi_stat, buf_type, &bytes_written))) -#else - if (MPI_SUCCESS != (mpi_code = MPI_Get_elements(&mpi_stat, MPI_BYTE, &bytes_written))) -#endif HMPI_GOTO_ERROR(FAIL, "MPI_Get_elements failed", mpi_code) - /* Get the type's size */ -#if H5_CHECK_MPI_VERSION(3, 0) + /* Get the type's size */ if (MPI_SUCCESS != (mpi_code = MPI_Type_size_x(buf_type, &type_size))) -#else - if (MPI_SUCCESS != (mpi_code = MPI_Type_size(buf_type, &type_size))) -#endif HMPI_GOTO_ERROR(FAIL, "MPI_Type_size failed", mpi_code) /* Compute the actual number of bytes requested */ @@ -1858,7 +1825,7 @@ H5FD__mpio_vector_build_types(uint32_t count, H5FD_mem_t types[], haddr_t addrs[ HMPI_GOTO_ERROR(FAIL, "MPI_Get_address for s_bufs[] - mpi_bufs_base failed", mpi_code) /*... and then subtract mpi_bufs_base_Aint from it. */ -#if ((MPI_VERSION > 3) || ((MPI_VERSION == 3) && (MPI_SUBVERSION >= 1))) +#if H5_CHECK_MPI_VERSION(3, 1) mpi_bufs[i] = MPI_Aint_diff(mpi_bufs[i], mpi_bufs_base_Aint); #else mpi_bufs[i] = mpi_bufs[i] - mpi_bufs_base_Aint; @@ -2085,18 +2052,11 @@ H5FD__mpio_read_vector(H5FD_t *_file, hid_t H5_ATTR_UNUSED dxpl_id, uint32_t cou H5FD_mpio_xfer_t xfer_mode; /* I/O transfer mode */ H5FD_mpio_collective_opt_t coll_opt_mode; /* whether we are doing collective or independent I/O */ int size_i; -#if MPI_VERSION >= 3 - MPI_Count bytes_read = 0; /* Number of bytes read in */ - MPI_Count type_size; /* MPI datatype used for I/O's size */ - MPI_Count io_size; /* Actual number of bytes requested */ - MPI_Count n; -#else - int bytes_read = 0; /* Number of bytes read in */ - int type_size; /* MPI datatype used for I/O's size */ - int io_size; /* Actual number of bytes requested */ - int n; -#endif - bool rank0_bcast = false; /* If read-with-rank0-and-bcast flag was used */ + MPI_Count bytes_read = 0; /* Number of bytes read in */ + MPI_Count type_size; /* MPI datatype used for I/O's size */ + MPI_Count io_size; /* Actual number of bytes requested */ + MPI_Count n; + bool rank0_bcast = false; /* If read-with-rank0-and-bcast flag was used */ #ifdef H5FDmpio_DEBUG bool H5FD_mpio_debug_t_flag = (H5FD_mpio_debug_flags_s[(int)'t'] && H5FD_MPIO_TRACE_THIS_RANK(file)); bool H5FD_mpio_debug_r_flag = (H5FD_mpio_debug_flags_s[(int)'r'] && H5FD_MPIO_TRACE_THIS_RANK(file)); @@ -2226,11 +2186,7 @@ H5FD__mpio_read_vector(H5FD_t *_file, hid_t H5_ATTR_UNUSED dxpl_id, uint32_t cou /* Only retrieve bytes read if this rank _actually_ participated in I/O */ if (!rank0_bcast || (rank0_bcast && file->mpi_rank == 0)) { /* How many bytes were actually read? */ -#if MPI_VERSION >= 3 if (MPI_SUCCESS != (mpi_code = MPI_Get_elements_x(&mpi_stat, buf_type, &bytes_read))) -#else - if (MPI_SUCCESS != (mpi_code = MPI_Get_elements(&mpi_stat, MPI_BYTE, &bytes_read))) -#endif HMPI_GOTO_ERROR(FAIL, "MPI_Get_elements failed", mpi_code) } /* end if */ @@ -2243,19 +2199,11 @@ H5FD__mpio_read_vector(H5FD_t *_file, hid_t H5_ATTR_UNUSED dxpl_id, uint32_t cou * the bcast. (NAF - 2021/9/15) */ if (rank0_bcast) -#if MPI_VERSION >= 3 if (MPI_SUCCESS != MPI_Bcast(&bytes_read, 1, MPI_COUNT, 0, file->comm)) -#else - if (MPI_SUCCESS != MPI_Bcast(&bytes_read, 1, MPI_INT, 0, file->comm)) -#endif HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", 0) - /* Get the type's size */ -#if MPI_VERSION >= 3 + /* Get the type's size */ if (MPI_SUCCESS != (mpi_code = MPI_Type_size_x(buf_type, &type_size))) -#else - if (MPI_SUCCESS != (mpi_code = MPI_Type_size(buf_type, &type_size))) -#endif HMPI_GOTO_ERROR(FAIL, "MPI_Type_size failed", mpi_code) /* Compute the actual number of bytes requested */ @@ -2275,13 +2223,8 @@ H5FD__mpio_read_vector(H5FD_t *_file, hid_t H5_ATTR_UNUSED dxpl_id, uint32_t cou do { assert(i >= 0); -#if MPI_VERSION >= 3 io_size = MIN(n, (MPI_Count)s_sizes[i]); bytes_read = (MPI_Count)s_sizes[i] - io_size; -#else - io_size = MIN(n, (int)s_sizes[i]); - bytes_read = (int)s_sizes[i] - io_size; -#endif assert(bytes_read >= 0); memset((char *)s_bufs[i] + bytes_read, 0, (size_t)io_size); @@ -2359,20 +2302,12 @@ H5FD__mpio_read_vector(H5FD_t *_file, hid_t H5_ATTR_UNUSED dxpl_id, uint32_t cou HMPI_GOTO_ERROR(FAIL, "MPI_File_read_at failed", mpi_code) - /* How many bytes were actually read? */ -#if MPI_VERSION >= 3 + /* How many bytes were actually read? */ if (MPI_SUCCESS != (mpi_code = MPI_Get_elements_x(&mpi_stat, MPI_BYTE, &bytes_read))) -#else - if (MPI_SUCCESS != (mpi_code = MPI_Get_elements(&mpi_stat, MPI_BYTE, &bytes_read))) -#endif HMPI_GOTO_ERROR(FAIL, "MPI_Get_elements failed", mpi_code) - /* Compute the actual number of bytes requested */ -#if MPI_VERSION >= 3 + /* Compute the actual number of bytes requested */ io_size = (MPI_Count)size; -#else - io_size = (int)size; -#endif /* Check for read failure */ if (bytes_read < 0 || bytes_read > io_size) @@ -2537,7 +2472,7 @@ H5FD__mpio_write_vector(H5FD_t *_file, hid_t H5_ATTR_UNUSED dxpl_id, uint32_t co &buf_type_created, &file_type, &file_type_created, &unused) < 0) HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't build MPI datatypes for I/O"); - /* Compute max addr writted to */ + /* Compute max address written to */ if (count > 0) max_addr = s_addrs[count - 1] + (haddr_t)(s_sizes[count - 1]); @@ -3008,18 +2943,11 @@ H5FD__mpio_read_selection(H5FD_t *_file, H5FD_mem_t type, hid_t H5_ATTR_UNUSED d void *mpi_bufs_base = NULL; char unused = 0; /* Unused, except for non-NULL pointer value */ -#if H5_CHECK_MPI_VERSION(3, 0) MPI_Count bytes_read = 0; /* Number of bytes read in */ MPI_Count type_size; /* MPI datatype used for I/O's size */ MPI_Count io_size; /* Actual number of bytes requested */ MPI_Count n; -#else - int bytes_read = 0; /* Number of bytes read in */ - int type_size; /* MPI datatype used for I/O's size */ - int io_size; /* Actual number of bytes requested */ - int n; -#endif - bool rank0_bcast = false; /* If read-with-rank0-and-bcast flag was used */ + bool rank0_bcast = false; /* If read-with-rank0-and-bcast flag was used */ #ifdef H5FDmpio_DEBUG bool H5FD_mpio_debug_t_flag = (H5FD_mpio_debug_flags_s[(int)'t'] && H5FD_MPIO_TRACE_THIS_RANK(file)); bool H5FD_mpio_debug_r_flag = (H5FD_mpio_debug_flags_s[(int)'r'] && H5FD_MPIO_TRACE_THIS_RANK(file)); @@ -3097,9 +3025,9 @@ H5FD__mpio_read_selection(H5FD_t *_file, H5FD_mem_t type, hid_t H5_ATTR_UNUSED d * s_bufs[] to find the smallest value, and choose that for * mpi_bufs_base. */ - j = 0; /* guess at the index of the smallest value of s_bufs[] */ - if (s_bufs[j + 1].vp != NULL) { + j = 0; /* guess at the index of the smallest value of s_bufs[] */ + if ((count > 1) && (s_bufs[1].vp != NULL)) { for (i = 1; i < count; i++) if (s_bufs[i].vp < s_bufs[j].vp) j = i; @@ -3209,11 +3137,7 @@ H5FD__mpio_read_selection(H5FD_t *_file, H5FD_mem_t type, hid_t H5_ATTR_UNUSED d /* Only retrieve bytes read if this rank _actually_ participated in I/O */ if (!rank0_bcast || (rank0_bcast && file->mpi_rank == 0)) { /* How many bytes were actually read? */ -#if H5_CHECK_MPI_VERSION(3, 0) if (MPI_SUCCESS != (mpi_code = MPI_Get_elements_x(&mpi_stat, final_mtype, &bytes_read))) { -#else - if (MPI_SUCCESS != (mpi_code = MPI_Get_elements(&mpi_stat, MPI_BYTE, &bytes_read))) { -#endif if (rank0_bcast && file->mpi_rank == 0) { /* If MPI_Get_elements(_x) fails for a rank 0 bcast strategy, * push an error, but continue to participate in the following @@ -3234,19 +3158,11 @@ H5FD__mpio_read_selection(H5FD_t *_file, H5FD_mem_t type, hid_t H5_ATTR_UNUSED d * of the data. (QAK - 2019/1/2) */ if (rank0_bcast) -#if H5_CHECK_MPI_VERSION(3, 0) if (MPI_SUCCESS != MPI_Bcast(&bytes_read, 1, MPI_COUNT, 0, file->comm)) -#else - if (MPI_SUCCESS != MPI_Bcast(&bytes_read, 1, MPI_INT, 0, file->comm)) -#endif HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", 0); - /* Get the type's size */ -#if H5_CHECK_MPI_VERSION(3, 0) + /* Get the type's size */ if (MPI_SUCCESS != (mpi_code = MPI_Type_size_x(final_mtype, &type_size))) -#else - if (MPI_SUCCESS != (mpi_code = MPI_Type_size(final_mtype, &type_size))) -#endif HMPI_GOTO_ERROR(FAIL, "MPI_Type_size failed", mpi_code); /* Compute the actual number of bytes requested */ @@ -3382,15 +3298,9 @@ H5FD__mpio_write_selection(H5FD_t *_file, H5FD_mem_t type, hid_t H5_ATTR_UNUSED char unused = 0; /* Unused, except for non-NULL pointer value */ H5_flexible_const_ptr_t mbb; -#if H5_CHECK_MPI_VERSION(3, 0) MPI_Count bytes_written; MPI_Count type_size; /* MPI datatype used for I/O's size */ MPI_Count io_size; /* Actual number of bytes requested */ -#else - int bytes_written; - int type_size; /* MPI datatype used for I/O's size */ - int io_size; /* Actual number of bytes requested */ -#endif #ifdef H5FDmpio_DEBUG bool H5FD_mpio_debug_t_flag = (H5FD_mpio_debug_flags_s[(int)'t'] && H5FD_MPIO_TRACE_THIS_RANK(file)); @@ -3465,9 +3375,9 @@ H5FD__mpio_write_selection(H5FD_t *_file, H5FD_mem_t type, hid_t H5_ATTR_UNUSED * s_bufs[] to find the smallest value, and choose that for * mpi_bufs_base. */ - j = 0; /* guess at the index of the smallest value of s_bufs[] */ - if (s_bufs[j + 1].cvp != NULL) { + j = 0; /* guess at the index of the smallest value of s_bufs[] */ + if ((count > 1) && (s_bufs[1].cvp != NULL)) { for (i = 1; i < count; i++) if (s_bufs[i].cvp < s_bufs[j].cvp) j = i; @@ -3559,20 +3469,12 @@ H5FD__mpio_write_selection(H5FD_t *_file, H5FD_mem_t type, hid_t H5_ATTR_UNUSED H5FD_mpi_native_g, file->info))) HMPI_GOTO_ERROR(FAIL, "MPI_File_set_view failed", mpi_code); - /* How many bytes were actually written */ -#if H5_CHECK_MPI_VERSION(3, 0) + /* How many bytes were actually written */ if (MPI_SUCCESS != (mpi_code = MPI_Get_elements_x(&mpi_stat, final_mtype, &bytes_written))) -#else - if (MPI_SUCCESS != (mpi_code = MPI_Get_elements(&mpi_stat, MPI_BYTE, &bytes_written))) -#endif HMPI_GOTO_ERROR(FAIL, "MPI_Get_elements failed", mpi_code); - /* Get the type's size */ -#if H5_CHECK_MPI_VERSION(3, 0) + /* Get the type's size */ if (MPI_SUCCESS != (mpi_code = MPI_Type_size_x(final_mtype, &type_size))) -#else - if (MPI_SUCCESS != (mpi_code = MPI_Type_size(final_mtype, &type_size))) -#endif HMPI_GOTO_ERROR(FAIL, "MPI_Type_size failed", mpi_code); /* Compute the actual number of bytes requested */ diff --git a/src/H5FDmpio.h b/src/H5FDmpio.h index 96a93668ea7..60deec2c07b 100644 --- a/src/H5FDmpio.h +++ b/src/H5FDmpio.h @@ -48,8 +48,8 @@ H5_DLL hid_t H5FD_mpio_init(void); * \brief Stores MPI IO communicator information to the file access property list * * \fapl_id - * \param[in] comm MPI-2 communicator - * \param[in] info MPI-2 info object + * \param[in] comm MPI communicator + * \param[in] info MPI info object * \returns \herr_t * * \details H5Pset_fapl_mpio() stores the user-supplied MPI IO parameters \p @@ -61,12 +61,12 @@ H5_DLL hid_t H5FD_mpio_init(void); * and is not a collective function. * * \p comm is the MPI communicator to be used for file open, as defined - * in \c MPI_File_open of MPI-2. This function makes a duplicate of the + * in \c MPI_File_open of MPI. This function makes a duplicate of the * communicator, so modifications to \p comm after this function call * returns have no effect on the file access property list. * * \p info is the MPI Info object to be used for file open, as defined - * in MPI_File_open() of MPI-2. This function makes a duplicate copy of + * in MPI_File_open() of MPI. This function makes a duplicate copy of * the Info object, so modifications to the Info object after this * function call returns will have no effect on the file access * property list. @@ -96,8 +96,8 @@ H5_DLL herr_t H5Pset_fapl_mpio(hid_t fapl_id, MPI_Comm comm, MPI_Info info); * \brief Returns MPI IO communicator information * * \fapl_id - * \param[out] comm MPI-2 communicator - * \param[out] info MPI-2 info object + * \param[out] comm MPI communicator + * \param[out] info MPI info object * \returns \herr_t * * \details If the file access property list is set to the #H5FD_MPIO driver, diff --git a/src/H5FDonion.c b/src/H5FDonion.c index 5c0994cab3c..a16338b1dc4 100644 --- a/src/H5FDonion.c +++ b/src/H5FDonion.c @@ -492,7 +492,7 @@ H5FD__onion_commit_new_revision_record(H5FD_onion_t *file) /* Update history info to accommodate new revision */ if (history->n_revisions == 0) { - unsigned char *ptr = buf; /* re-use buffer space to compute checksum */ + unsigned char *ptr = buf; /* reuse buffer space to compute checksum */ assert(history->record_locs == NULL); history->n_revisions = 1; @@ -508,7 +508,7 @@ H5FD__onion_commit_new_revision_record(H5FD_onion_t *file) file->header.history_size += H5FD_ONION_ENCODED_SIZE_RECORD_POINTER; } /* end if no extant revisions in history */ else { - unsigned char *ptr = buf; /* re-use buffer space to compute checksum */ + unsigned char *ptr = buf; /* reuse buffer space to compute checksum */ assert(history->record_locs != NULL); diff --git a/src/H5FDpublic.h b/src/H5FDpublic.h index 0eceb2fa63a..5f40bff6845 100644 --- a/src/H5FDpublic.h +++ b/src/H5FDpublic.h @@ -76,7 +76,7 @@ /* * Defining H5FD_FEAT_DATA_SIEVE for a VFL driver means that * the library will attempt to cache raw data as it is read from/written to - * a file in a "data seive" buffer. See Rajeev Thakur's papers: + * a file in a "data sieve" buffer. See Rajeev Thakur's papers: * http://www.mcs.anl.gov/~thakur/papers/romio-coll.ps.gz * http://www.mcs.anl.gov/~thakur/papers/mpio-high-perf.ps.gz */ diff --git a/src/H5FDros3.c b/src/H5FDros3.c index 2137703d51a..3f3413c6d0e 100644 --- a/src/H5FDros3.c +++ b/src/H5FDros3.c @@ -960,7 +960,7 @@ H5FD__ros3_open(const char *url, unsigned flags, hid_t fapl_id, haddr_t maxaddr) if (fa.authenticate == true) { /* compute signing key (part of AWS/S3 REST API) * can be re-used by user/key for 7 days after creation. - * find way to re-use/share + * find way to reuse/share */ now = gmnow(); assert(now != NULL); @@ -1084,7 +1084,7 @@ ros3_fprint_stats(FILE *stream, const H5FD_ros3_t *file) unsigned long long max_raw = 0; unsigned long long bytes_raw = 0; unsigned long long bytes_meta = 0; - double re_dub = 0.0; /* re-usable double variable */ + double re_dub = 0.0; /* reusable double variable */ unsigned suffix_i = 0; const char suffixes[] = {' ', 'K', 'M', 'G', 'T', 'P'}; diff --git a/src/H5FDs3comms.c b/src/H5FDs3comms.c index 2255ec00179..58fc4355e63 100644 --- a/src/H5FDs3comms.c +++ b/src/H5FDs3comms.c @@ -571,7 +571,7 @@ H5FD_s3comms_hrb_node_set(hrb_node_t **L, const char *name, const char *value) * * Headers list at `first_header` is not touched. * - * - Programmer should re-use or destroy `first_header` pointer + * - Programmer should reuse or destroy `first_header` pointer * (hrb_node_t *) as suits their purposes. * - Recommend fetching prior to destroy() * e.g., `reuse_node = hrb_to_die->first_header; destroy(hrb_to_die);` @@ -2531,7 +2531,7 @@ H5FD_s3comms_percent_encode_char(char *repr, const unsigned char c, size_t *repr * Create AWS4 "Signing Key" from secret key, AWS region, and timestamp. * * Sequentially runs HMAC_SHA256 on strings in specified order, - * generating re-usable checksum (according to documentation, valid for + * generating reusable checksum (according to documentation, valid for * 7 days from time given). * * `secret` is `access key id` for targeted service/bucket/resource. diff --git a/src/H5FDs3comms.h b/src/H5FDs3comms.h index b29d2d8c58d..120a71a9c85 100644 --- a/src/H5FDs3comms.h +++ b/src/H5FDs3comms.h @@ -211,7 +211,7 @@ * * `magic` (unsigned long) * - * "unique" idenfier number for the structure type + * "unique" identifier number for the structure type * * `name` (char *) * @@ -458,7 +458,7 @@ typedef struct { * * `signing_key` (unsigned char *) * - * Pointer to `SHA256_DIGEST_LENGTH`-long string for "re-usable" signing + * Pointer to `SHA256_DIGEST_LENGTH`-long string for "reusable" signing * key, generated via * `HMAC-SHA256(HMAC-SHA256(HMAC-SHA256(HMAC-SHA256("AWS4", * ""), ""), "aws4_request")` diff --git a/src/H5FDsubfiling/H5FDioc_threads.c b/src/H5FDsubfiling/H5FDioc_threads.c index 0b82b8f15a9..c86157bb6f2 100644 --- a/src/H5FDsubfiling/H5FDioc_threads.c +++ b/src/H5FDsubfiling/H5FDioc_threads.c @@ -33,15 +33,15 @@ typedef struct ioc_data_t { hg_thread_pool_t *io_thread_pool; int64_t sf_context_id; - /* sf_io_ops_pending is use to track the number of I/O operations pending so that we can wait + atomic_int sf_ioc_ready; + atomic_int sf_shutdown_flag; + /* sf_io_ops_pending tracks the number of I/O operations pending so that we can wait * until all I/O operations have been serviced before shutting down the worker thread pool. * The value of this variable must always be non-negative. * * Note that this is a convenience variable -- we could use io_queue.q_len instead. * However, accessing this field requires locking io_queue.q_mutex. */ - atomic_int sf_ioc_ready; - atomic_int sf_shutdown_flag; atomic_int sf_io_ops_pending; atomic_int sf_work_pending; } ioc_data_t; diff --git a/src/H5FDsubfiling/H5FDsubfiling.c b/src/H5FDsubfiling/H5FDsubfiling.c index c15fd2c01fd..d8616c8c1b4 100644 --- a/src/H5FDsubfiling/H5FDsubfiling.c +++ b/src/H5FDsubfiling/H5FDsubfiling.c @@ -2031,7 +2031,7 @@ H5FD__subfiling_write(H5FD_t *_file, H5FD_mem_t type, hid_t H5_ATTR_UNUSED dxpl_ * The contents of supplied buffers are undefined. * * Notes: Thus function doesn't actually implement vector read. - * Instead, it comverts the vector read call into a series + * Instead, it converts the vector read call into a series * of scalar read calls. Fix this when time permits. * * Also, it didn't support the sizes and types optimization. @@ -2191,7 +2191,7 @@ H5FD__subfiling_read_vector(H5FD_t *_file, hid_t dxpl_id, uint32_t count, H5FD_m * subfiling writes have failed for some reason. * * Notes: Thus function doesn't actually implement vector write. - * Instead, it comverts the vector write call into a series + * Instead, it converts the vector write call into a series * of scalar read calls. Fix this when time permits. * * Also, it didn't support the sizes and types optimization. diff --git a/src/H5FDsubfiling/H5subfiling_common.c b/src/H5FDsubfiling/H5subfiling_common.c index 37fd5efad08..f9cc0dccf88 100644 --- a/src/H5FDsubfiling/H5subfiling_common.c +++ b/src/H5FDsubfiling/H5subfiling_common.c @@ -618,7 +618,7 @@ H5_open_subfiling_stub_file(const char *name, unsigned flags, MPI_Comm file_comm * new MPI communicators that facilitate messaging between * HDF5 clients and the IOCs. * - * Return: Success (0) or Faiure (non-zero) + * Return: Success (0) or Failure (non-zero) * Errors: If MPI operations fail for some reason. * *------------------------------------------------------------------------- @@ -1951,7 +1951,7 @@ init_subfiling_context(subfiling_context_t *sf_context, const char *base_filenam * discovery. The number and mapping of IOC to MPI_rank * is part of the sf_context->topology structure. * - * Return: Success (0) or Faiure (non-zero) + * Return: Success (0) or Failure (non-zero) * Errors: If MPI operations fail for some reason. * *------------------------------------------------------------------------- @@ -2686,7 +2686,7 @@ H5_resolve_pathname(const char *filepath, MPI_Comm comm, char **resolved_filepat * which actually manages all subfile closing via commands * to the set of IO Concentrators. * - * Return: Success (0) or Faiure (non-zero) + * Return: Success (0) or Failure (non-zero) * Errors: If MPI operations fail for some reason. * *------------------------------------------------------------------------- @@ -2707,7 +2707,7 @@ H5_resolve_pathname(const char *filepath, MPI_Comm comm, char **resolved_filepat * Once the subfiles are closed, we initiate a teardown of * the IOC and associated thread_pool threads. * - * Return: Success (0) or Faiure (non-zero) + * Return: Success (0) or Failure (non-zero) * Errors: If MPI operations fail for some reason. * *------------------------------------------------------------------------- diff --git a/src/H5FLprivate.h b/src/H5FLprivate.h index 3b9a84a4b59..348cfda542d 100644 --- a/src/H5FLprivate.h +++ b/src/H5FLprivate.h @@ -201,7 +201,7 @@ typedef struct H5FL_blk_head_t { #define H5FL_BLK_REALLOC(t, blk, new_size) \ (uint8_t *)H5FL_blk_realloc(&(H5FL_BLK_NAME(t)), blk, new_size H5FL_TRACK_INFO) -/* Check if there is a free block available to re-use */ +/* Check if there is a free block available to reuse */ #define H5FL_BLK_AVAIL(t, size) H5FL_blk_free_block_avail(&(H5FL_BLK_NAME(t)), size) #else /* H5_NO_BLK_FREE_LISTS */ diff --git a/src/H5Fmodule.h b/src/H5Fmodule.h index 8a3f8df9520..40ff6b4d703 100644 --- a/src/H5Fmodule.h +++ b/src/H5Fmodule.h @@ -1045,7 +1045,7 @@ * The file access properties managed by #H5Pset_fapl_mpio and retrieved by * #H5Pget_fapl_mpio are the MPI communicator, comm, and the MPI info object, info. comm and * info are used for file open. info is an information object much like an HDF5 property list. Both - * are defined in MPI_FILE_OPEN of MPI-2. + * are defined in MPI_FILE_OPEN of MPI. * * The communicator and the info object are saved in the file access property list fapl_id. * fapl_id can then be passed to MPI_FILE_OPEN to create and/or open the file. diff --git a/src/H5Fmpi.c b/src/H5Fmpi.c index 4abc2265d45..8a8fdc135c8 100644 --- a/src/H5Fmpi.c +++ b/src/H5Fmpi.c @@ -407,16 +407,39 @@ H5F_mpi_retrieve_comm(hid_t loc_id, hid_t acspl_id, MPI_Comm *mpi_comm) */ bool H5F_get_coll_metadata_reads(const H5F_t *file) +{ + FUNC_ENTER_NOAPI_NOERR + + assert(file && file->shared); + + FUNC_LEAVE_NOAPI(H5F_shared_get_coll_metadata_reads(file->shared)); +} /* end H5F_get_coll_metadata_reads() */ + +/*------------------------------------------------------------------------- + * Function: H5F_shared_get_coll_metadata_reads + * + * Purpose: Determines whether collective metadata reads should be + * performed. This routine is meant to be the single source of + * truth for the collective metadata reads status, as it + * coordinates between the file-global flag and the flag set + * for the current operation in the current API context. + * + * Return: true/false (can't fail) + * + *------------------------------------------------------------------------- + */ +bool +H5F_shared_get_coll_metadata_reads(const H5F_shared_t *f_sh) { H5P_coll_md_read_flag_t file_flag = H5P_USER_FALSE; bool ret_value = false; FUNC_ENTER_NOAPI_NOERR - assert(file && file->shared); + assert(f_sh); /* Retrieve the file-global flag */ - file_flag = H5F_COLL_MD_READ(file); + file_flag = H5F_SHARED_COLL_MD_READ(f_sh); /* If file flag is set to H5P_FORCE_FALSE, exit early * with false, since collective metadata reads have @@ -442,7 +465,7 @@ H5F_get_coll_metadata_reads(const H5F_t *file) } FUNC_LEAVE_NOAPI(ret_value) -} /* end H5F_get_coll_metadata_reads() */ +} /* end H5F_shared_get_coll_metadata_reads() */ /*------------------------------------------------------------------------- * Function: H5F_set_coll_metadata_reads diff --git a/src/H5Fprivate.h b/src/H5Fprivate.h index 5b232c5444b..9adbf3a0258 100644 --- a/src/H5Fprivate.h +++ b/src/H5Fprivate.h @@ -85,7 +85,8 @@ typedef struct H5F_t H5F_t; #define H5F_USE_TMP_SPACE(F) ((F)->shared->fs.use_tmp_space) #define H5F_IS_TMP_ADDR(F, ADDR) (H5_addr_le((F)->shared->fs.tmp_addr, (ADDR))) #ifdef H5_HAVE_PARALLEL -#define H5F_COLL_MD_READ(F) ((F)->shared->coll_md_read) +#define H5F_COLL_MD_READ(F) ((F)->shared->coll_md_read) +#define H5F_SHARED_COLL_MD_READ(F_SH) ((F_SH)->coll_md_read) #endif /* H5_HAVE_PARALLEL */ #define H5F_USE_MDC_LOGGING(F) ((F)->shared->use_mdc_logging) #define H5F_START_MDC_LOG_ON_ACCESS(F) ((F)->shared->start_mdc_log_on_access) @@ -148,7 +149,8 @@ typedef struct H5F_t H5F_t; #define H5F_USE_TMP_SPACE(F) (H5F_use_tmp_space(F)) #define H5F_IS_TMP_ADDR(F, ADDR) (H5F_is_tmp_addr((F), (ADDR))) #ifdef H5_HAVE_PARALLEL -#define H5F_COLL_MD_READ(F) (H5F_coll_md_read(F)) +#define H5F_COLL_MD_READ(F) (H5F_coll_md_read(F)) +#define H5F_SHARED_COLL_MD_READ(F_SH) (H5F_shared_coll_md_read(F)) #endif /* H5_HAVE_PARALLEL */ #define H5F_USE_MDC_LOGGING(F) (H5F_use_mdc_logging(F)) #define H5F_START_MDC_LOG_ON_ACCESS(F) (H5F_start_mdc_log_on_access(F)) @@ -556,6 +558,7 @@ H5_DLL hsize_t H5F_get_alignment(const H5F_t *f); H5_DLL hsize_t H5F_get_threshold(const H5F_t *f); #ifdef H5_HAVE_PARALLEL H5_DLL H5P_coll_md_read_flag_t H5F_coll_md_read(const H5F_t *f); +H5_DLL H5P_coll_md_read_flag_t H5F_shared_coll_md_read(const H5F_shared_t *f_sh); #endif /* H5_HAVE_PARALLEL */ H5_DLL bool H5F_use_mdc_logging(const H5F_t *f); H5_DLL bool H5F_start_mdc_log_on_access(const H5F_t *f); @@ -642,6 +645,7 @@ H5_DLL int H5F_mpi_get_size(const H5F_t *f); H5_DLL herr_t H5F_mpi_retrieve_comm(hid_t loc_id, hid_t acspl_id, MPI_Comm *mpi_comm); H5_DLL herr_t H5F_mpi_get_file_block_type(bool commit, MPI_Datatype *new_type, bool *new_type_derived); H5_DLL bool H5F_get_coll_metadata_reads(const H5F_t *f); +H5_DLL bool H5F_shared_get_coll_metadata_reads(const H5F_shared_t *f_sh); H5_DLL void H5F_set_coll_metadata_reads(H5F_t *f, H5P_coll_md_read_flag_t *file_flag, bool *context_flag); H5_DLL herr_t H5F_shared_get_mpi_file_sync_required(const H5F_shared_t *f_sh, bool *flag); #endif /* H5_HAVE_PARALLEL */ diff --git a/src/H5Fquery.c b/src/H5Fquery.c index af120a43a78..44a52c8dbfc 100644 --- a/src/H5Fquery.c +++ b/src/H5Fquery.c @@ -1054,11 +1054,31 @@ H5F_coll_md_read(const H5F_t *f) /* Use FUNC_ENTER_NOAPI_NOINIT_NOERR here to avoid performance issues */ FUNC_ENTER_NOAPI_NOINIT_NOERR - assert(f); + assert(f && f->shared); FUNC_LEAVE_NOAPI(f->shared->coll_md_read) } /* end H5F_coll_md_read() */ +/*------------------------------------------------------------------------- + * Function: H5F_shared_coll_md_read + * + * Purpose: Retrieve the 'collective metadata reads' flag for the file. + * + * Return: Success: Non-negative, the 'collective metadata reads' flag + * Failure: (can't happen) + *------------------------------------------------------------------------- + */ +H5P_coll_md_read_flag_t +H5F_shared_coll_md_read(const H5F_shared_t *f_sh) +{ + /* Use FUNC_ENTER_NOAPI_NOINIT_NOERR here to avoid performance issues */ + FUNC_ENTER_NOAPI_NOINIT_NOERR + + assert(f_sh); + + FUNC_LEAVE_NOAPI(f_sh->coll_md_read) +} /* end H5F_shared_coll_md_read() */ + /*------------------------------------------------------------------------- * Function: H5F_shared_get_mpi_file_sync_required * diff --git a/src/H5Gpublic.h b/src/H5Gpublic.h index a21ffa2a847..cc046803162 100644 --- a/src/H5Gpublic.h +++ b/src/H5Gpublic.h @@ -809,7 +809,7 @@ H5_DLL herr_t H5Gmove2(hid_t src_loc_id, const char *src_name, hid_t dst_loc_id, * any object identifier is open for the object, the space will not be * released until after the object identifier is closed. * - * Note that space identified as freespace is available for re-use only + * Note that space identified as freespace is available for reuse only * as long as the file remains open; once a file has been closed, the * HDF5 library loses track of freespace. See “Freespace Management” in * the \ref UG for further details. diff --git a/src/H5HFcache.c b/src/H5HFcache.c index 9f2fa0a56fc..ad8e4d24227 100644 --- a/src/H5HFcache.c +++ b/src/H5HFcache.c @@ -2521,7 +2521,7 @@ H5HF__cache_dblock_fsf_size(const void *_thing, hsize_t *fsf_size) * Further, metadata cache entries are now allowed to have * multiple flush dependency parents. * - * This means that the fractal heap is no longer ncessarily + * This means that the fractal heap is no longer necessarily * flushed from the bottom up. * * For example, it is now possible for a dirty fractal heap @@ -2881,7 +2881,7 @@ H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, H5HF_hdr_t *hdr, bool *fd_cle * Further, metadata cache entries are now allowed to have * multiple flush dependency parents. * - * This means that the fractal heap is no longer ncessarily + * This means that the fractal heap is no longer necessarily * flushed from the bottom up. * * For example, it is now possible for a dirty fractal heap @@ -2994,7 +2994,7 @@ H5HF__cache_verify_iblock_descendants_clean(H5F_t *f, haddr_t fd_parent_addr, H5 * Further, metadata cache entries are now allowed to have * multiple flush dependency parents. * - * This means that the fractal heap is no longer ncessarily + * This means that the fractal heap is no longer necessarily * flushed from the bottom up. * * For example, it is now possible for a dirty fractal heap @@ -3153,7 +3153,7 @@ H5HF__cache_verify_iblocks_dblocks_clean(H5F_t *f, haddr_t fd_parent_addr, H5HF_ * Further, metadata cache entries are now allowed to have * multiple flush dependency parents. * - * This means that the fractal heap is no longer ncessarily + * This means that the fractal heap is no longer necessarily * flushed from the bottom up. * * For example, it is now possible for a dirty fractal heap diff --git a/src/H5Iint.c b/src/H5Iint.c index 63557c5e6ed..7d8b4acd0cc 100644 --- a/src/H5Iint.c +++ b/src/H5Iint.c @@ -1149,7 +1149,7 @@ H5I_dec_app_ref_async(hid_t id, void **token) /* Sanity check */ assert(id >= 0); - /* [Possibly] aynchronously decrement refcount on ID */ + /* [Possibly] asynchronously decrement refcount on ID */ if ((ret_value = H5I__dec_app_ref(id, token)) < 0) HGOTO_ERROR(H5E_ID, H5E_CANTDEC, (-1), "can't asynchronously decrement ID ref count"); @@ -1254,7 +1254,7 @@ H5I_dec_app_ref_always_close_async(hid_t id, void **token) /* Sanity check */ assert(id >= 0); - /* [Possibly] aynchronously decrement refcount on ID */ + /* [Possibly] asynchronously decrement refcount on ID */ if ((ret_value = H5I__dec_app_ref_always_close(id, token)) < 0) HGOTO_ERROR(H5E_ID, H5E_CANTDEC, (-1), "can't asynchronously decrement ID ref count"); diff --git a/src/H5MFaggr.c b/src/H5MFaggr.c index 80d25f26a3b..fa39c7256ef 100644 --- a/src/H5MFaggr.c +++ b/src/H5MFaggr.c @@ -182,7 +182,7 @@ H5MF__aggr_alloc(H5F_t *f, H5F_blk_aggr_t *aggr, H5F_blk_aggr_t *other_aggr, H5F if ((f->shared->feature_flags & aggr->feature_flag) && f->shared->fs_strategy != H5F_FSPACE_STRATEGY_NONE && !f->shared->closing) { #endif - haddr_t aggr_frag_addr = HADDR_UNDEF; /* Address of aggregrator fragment */ + haddr_t aggr_frag_addr = HADDR_UNDEF; /* Address of aggregator fragment */ hsize_t aggr_frag_size = 0; /* Size of aggregator fragment */ hsize_t alignment; /* Alignment of this section */ hsize_t aggr_mis_align = 0; /* Misalignment of aggregator */ @@ -472,7 +472,7 @@ H5MF__aggr_alloc(H5F_t *f, H5F_blk_aggr_t *aggr, H5F_blk_aggr_t *other_aggr, H5F } /* end else */ } /* end if */ else { - /* The aggreator is not at end of file */ + /* The aggregator is not at end of file */ /* Check if aggregator has enough internal space to satisfy the extension. */ if (aggr->size >= extra_requested) { /* Extend block into aggregator */ diff --git a/src/H5Oalloc.c b/src/H5Oalloc.c index d7ede47623c..4eadc315e05 100644 --- a/src/H5Oalloc.c +++ b/src/H5Oalloc.c @@ -1810,7 +1810,7 @@ H5O__move_msgs_forward(H5F_t *f, H5O_t *oh) null_msg->raw + null_msg->raw_size, gap_size) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTINSERT, FAIL, "can't insert gap in chunk"); - /* Re-use message # for new null message taking place of non-null message */ + /* Reuse message # for new null message taking place of non-null message */ new_null_msg = v; } /* end if */ else { diff --git a/src/H5Oefl.c b/src/H5Oefl.c index 571c8da44cd..c06ecf68694 100644 --- a/src/H5Oefl.c +++ b/src/H5Oefl.c @@ -346,7 +346,7 @@ H5O__efl_size(const H5F_t *f, bool H5_ATTR_UNUSED disable_shared, const void *_m * Function: H5O__efl_reset * * Purpose: Frees internal pointers and resets the message to an - * initialial state. + * initial state. * * Return: Non-negative on success/Negative on failure * diff --git a/src/H5Pfapl.c b/src/H5Pfapl.c index bf448a6bc4a..5f5782cae3b 100644 --- a/src/H5Pfapl.c +++ b/src/H5Pfapl.c @@ -2596,10 +2596,10 @@ H5Pget_meta_block_size(hid_t plist_id, hsize_t *size /*out*/) /*------------------------------------------------------------------------- * Function: H5Pset_sieve_buf_size * - * Purpose: Sets the maximum size of the data seive buffer used for file + * Purpose: Sets the maximum size of the data sieve buffer used for file * drivers which are capable of using data sieving. The data sieve * buffer is used when performing I/O on datasets in the file. Using a - * buffer which is large anough to hold several pieces of the dataset + * buffer which is large enough to hold several pieces of the dataset * being read in for hyperslab selections boosts performance by quite a * bit. * diff --git a/src/H5Pint.c b/src/H5Pint.c index da7f8870bde..f6dbb2706c1 100644 --- a/src/H5Pint.c +++ b/src/H5Pint.c @@ -4102,7 +4102,7 @@ H5P_object_verify(hid_t plist_id, hid_t pclass_id) /* Compare the property list's class against the other class */ if (H5P_isa_class(plist_id, pclass_id) != true) - HGOTO_ERROR(H5E_PLIST, H5E_CANTREGISTER, NULL, "property list is not a member of the class"); + HGOTO_ERROR(H5E_PLIST, H5E_CANTCOMPARE, NULL, "property list is not a member of the class"); /* Get the plist structure */ if (NULL == (ret_value = (H5P_genplist_t *)H5I_object(plist_id))) diff --git a/src/H5Smpio.c b/src/H5Smpio.c index 10a9cd5b38f..1e0fef5c3b7 100644 --- a/src/H5Smpio.c +++ b/src/H5Smpio.c @@ -197,13 +197,9 @@ H5S__mpio_create_point_datatype(size_t elmt_size, hsize_t num_points, MPI_Aint * int *inner_blocks = NULL; /* Arrays for MPI datatypes when "large" datatype needed */ MPI_Aint *inner_disps = NULL; MPI_Datatype *inner_types = NULL; -#if MPI_VERSION < 3 - int *blocks = NULL; /* Array of block sizes for MPI hindexed create call */ - hsize_t u; /* Local index variable */ -#endif - hsize_t bigio_count; /* Transition point to create derived type */ - int mpi_code; /* MPI error code */ - herr_t ret_value = SUCCEED; /* Return value */ + hsize_t bigio_count; /* Transition point to create derived type */ + int mpi_code; /* MPI error code */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE @@ -216,24 +212,10 @@ H5S__mpio_create_point_datatype(size_t elmt_size, hsize_t num_points, MPI_Aint * /* Check whether standard or BIGIO processing will be employeed */ if (bigio_count >= num_points) { -#if H5_CHECK_MPI_VERSION(3, 0) /* Create an MPI datatype for the whole point selection */ if (MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed_block((int)num_points, 1, disp, elmt_type, new_type))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_indexed_block failed", mpi_code) -#else - /* Allocate block sizes for MPI datatype call */ - if (NULL == (blocks = (int *)H5MM_malloc(sizeof(int) * num_points))) - HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of blocks"); - - for (u = 0; u < num_points; u++) - blocks[u] = 1; - - /* Create an MPI datatype for the whole point selection */ - if (MPI_SUCCESS != - (mpi_code = MPI_Type_create_hindexed((int)num_points, blocks, disp, elmt_type, new_type))) - HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code) -#endif /* Commit MPI datatype for later use */ if (MPI_SUCCESS != (mpi_code = MPI_Type_commit(new_type))) @@ -267,43 +249,20 @@ H5S__mpio_create_point_datatype(size_t elmt_size, hsize_t num_points, MPI_Aint * if (NULL == (inner_disps = (MPI_Aint *)H5MM_malloc(sizeof(MPI_Aint) * (size_t)total_types))) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of blocks"); -#if MPI_VERSION < 3 - /* Allocate block sizes for MPI datatype call */ - if (NULL == (blocks = (int *)H5MM_malloc(sizeof(int) * bigio_count))) - HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of blocks"); - - for (u = 0; u < bigio_count; u++) - blocks[u] = 1; -#endif - for (i = 0; i < num_big_types; i++) { -#if H5_CHECK_MPI_VERSION(3, 0) if (MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed_block((int)bigio_count, 1, &disp[(hsize_t)i * bigio_count], elmt_type, &inner_types[i]))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed_block failed", mpi_code); -#else - if (MPI_SUCCESS != - (mpi_code = MPI_Type_create_hindexed((int)bigio_count, blocks, &disp[i * bigio_count], - elmt_type, &inner_types[i]))) - HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code) -#endif inner_blocks[i] = 1; inner_disps[i] = 0; } /* end for*/ if (remaining_points) { -#if H5_CHECK_MPI_VERSION(3, 0) if (MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed_block( remaining_points, 1, &disp[(hsize_t)num_big_types * bigio_count], elmt_type, &inner_types[num_big_types]))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed_block failed", mpi_code); -#else - if (MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed((int)remaining_points, blocks, - &disp[num_big_types * bigio_count], - elmt_type, &inner_types[num_big_types]))) - HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code) -#endif inner_blocks[num_big_types] = 1; inner_disps[num_big_types] = 0; } @@ -323,10 +282,6 @@ H5S__mpio_create_point_datatype(size_t elmt_size, hsize_t num_points, MPI_Aint * done: if (elmt_type_created) MPI_Type_free(&elmt_type); -#if MPI_VERSION < 3 - if (blocks) - H5MM_free(blocks); -#endif if (inner_types) H5MM_free(inner_types); if (inner_blocks) diff --git a/src/H5Tcommit.c b/src/H5Tcommit.c index 4f43a79cf99..70b0930fa90 100644 --- a/src/H5Tcommit.c +++ b/src/H5Tcommit.c @@ -1296,7 +1296,7 @@ H5T_get_actual_type(H5T_t *dt) /*------------------------------------------------------------------------- * Function: H5T_save_refresh_state * - * Purpose: Save state for datatype reconstuction after a refresh. + * Purpose: Save state for datatype reconstruction after a refresh. * * Return: SUCCEED/FAIL * @@ -1336,7 +1336,7 @@ H5T_save_refresh_state(hid_t tid, H5O_shared_t *cached_H5O_shared) /*------------------------------------------------------------------------- * Function: H5T_restore_refresh_state * - * Purpose: Restore state for datatype reconstuction after a refresh. + * Purpose: Restore state for datatype reconstruction after a refresh. * * Return: SUCCEED/FAIL * diff --git a/src/H5Tnative.c b/src/H5Tnative.c index 757474819c6..f83e9c3a646 100644 --- a/src/H5Tnative.c +++ b/src/H5Tnative.c @@ -579,7 +579,7 @@ H5T__get_native_integer(size_t prec, H5T_sign_t sign, H5T_direction_t direction, match = H5T_NATIVE_INT_MATCH_LLONG; native_size = sizeof(long long); } - else { /* If no native type matches the querried datatype, simply choose the type of biggest size. */ + else { /* If no native type matches the queried datatype, simply choose the type of biggest size. */ match = H5T_NATIVE_INT_MATCH_LLONG; native_size = sizeof(long long); } @@ -838,7 +838,7 @@ H5T__get_native_bitfield(size_t prec, H5T_direction_t direction, size_t *struct_ native_size = 8; align = H5T_NATIVE_UINT64_ALIGN_g; } - else { /* If no native type matches the querried datatype, simply choose the type of biggest size. */ + else { /* If no native type matches the queried datatype, simply choose the type of biggest size. */ tid = H5T_NATIVE_B64; native_size = 8; align = H5T_NATIVE_UINT64_ALIGN_g; diff --git a/src/H5VLnative_dataset.c b/src/H5VLnative_dataset.c index 90541571bc7..a58eb51b9a2 100644 --- a/src/H5VLnative_dataset.c +++ b/src/H5VLnative_dataset.c @@ -97,6 +97,16 @@ H5VL__native_dataset_io_setup(size_t count, void *obj[], hid_t mem_type_id[], hi /* Iterate over datasets */ for (i = 0; i < count; i++) { + /* Initialize fields not set here to prevent use of uninitialized */ + memset(&dinfo[i].layout_ops, 0, sizeof(dinfo[i].layout_ops)); + memset(&dinfo[i].io_ops, 0, sizeof(dinfo[i].io_ops)); + memset(&dinfo[i].layout_io_info, 0, sizeof(dinfo[i].layout_io_info)); + memset(&dinfo[i].type_info, 0, sizeof(dinfo[i].type_info)); + dinfo[i].store = NULL; + dinfo[i].layout = NULL; + dinfo[i].nelmts = 0; + dinfo[i].skip_io = false; + /* Set up dset */ dinfo[i].dset = (H5D_t *)obj[i]; assert(dinfo[i].dset); diff --git a/src/H5WB.c b/src/H5WB.c index 7be258d8f91..d9b9c5e9ef3 100644 --- a/src/H5WB.c +++ b/src/H5WB.c @@ -153,10 +153,10 @@ H5WB_actual(H5WB_t *wb, size_t need) /* Sanity check */ assert(wb->actual_size > wb->wrapped_size); - /* Check if we can re-use existing buffer */ + /* Check if we can reuse existing buffer */ if (need <= wb->alloc_size) HGOTO_DONE(wb->actual_buf); - /* Can't re-use existing buffer, free it and proceed */ + /* Can't reuse existing buffer, free it and proceed */ else wb->actual_buf = H5FL_BLK_FREE(extra_buf, wb->actual_buf); } /* end if */ diff --git a/src/H5Zfletcher32.c b/src/H5Zfletcher32.c index e5b98be66d5..37f13819ab9 100644 --- a/src/H5Zfletcher32.c +++ b/src/H5Zfletcher32.c @@ -104,7 +104,7 @@ H5Z__filter_fletcher32(unsigned flags, size_t H5_ATTR_UNUSED cd_nelmts, } /* Set return values */ - /* (Re-use the input buffer, just note that the size is smaller by the size of the checksum) */ + /* (Reuse the input buffer, just note that the size is smaller by the size of the checksum) */ ret_value = nbytes - FLETCHER_LEN; } else { /* Write */ diff --git a/src/H5mpi.c b/src/H5mpi.c index 005a99aa6ae..2725ec5bf19 100644 --- a/src/H5mpi.c +++ b/src/H5mpi.c @@ -619,17 +619,12 @@ H5_mpio_gatherv_alloc(void *send_buf, int send_count, MPI_Datatype send_type, co const int displacements[], MPI_Datatype recv_type, bool allgather, int root, MPI_Comm comm, int mpi_rank, int mpi_size, void **out_buf, size_t *out_buf_num_entries) { - size_t recv_buf_num_entries = 0; - void *recv_buf = NULL; -#if H5_CHECK_MPI_VERSION(3, 0) + size_t recv_buf_num_entries = 0; + void *recv_buf = NULL; MPI_Count type_lb; MPI_Count type_extent; -#else - MPI_Aint type_lb; - MPI_Aint type_extent; -#endif - int mpi_code; - herr_t ret_value = SUCCEED; + int mpi_code; + herr_t ret_value = SUCCEED; FUNC_ENTER_NOAPI(FAIL) @@ -637,12 +632,8 @@ H5_mpio_gatherv_alloc(void *send_buf, int send_count, MPI_Datatype send_type, co if (allgather || (mpi_rank == root)) assert(out_buf && out_buf_num_entries); - /* Retrieve the extent of the MPI Datatype being used */ -#if H5_CHECK_MPI_VERSION(3, 0) + /* Retrieve the extent of the MPI Datatype being used */ if (MPI_SUCCESS != (mpi_code = MPI_Type_get_extent_x(recv_type, &type_lb, &type_extent))) -#else - if (MPI_SUCCESS != (mpi_code = MPI_Type_get_extent(recv_type, &type_lb, &type_extent))) -#endif HMPI_GOTO_ERROR(FAIL, "MPI_Type_get_extent(_x) failed", mpi_code) if (type_extent < 0) diff --git a/src/H5private.h b/src/H5private.h index a77624aea69..14a0ac3225f 100644 --- a/src/H5private.h +++ b/src/H5private.h @@ -1582,7 +1582,7 @@ H5_DLL herr_t H5CX_pop(bool update_dxpl_props); H5_PUSH_FUNC \ { -/* Use this macro for package-level functions which propgate errors, but don't issue them */ +/* Use this macro for package-level functions which propagate errors, but don't issue them */ #define FUNC_ENTER_PACKAGE_NOERR \ { \ FUNC_ENTER_COMMON_NOERR(H5_IS_PKG(__func__)); \ @@ -1600,7 +1600,7 @@ H5_DLL herr_t H5CX_pop(bool update_dxpl_props); H5_PUSH_FUNC \ { -/* Use this macro for staticly-scoped functions which propgate errors, but don't issue them */ +/* Use this macro for staticly-scoped functions which propagate errors, but don't issue them */ /* And that shouldn't push their name on the function stack */ #define FUNC_ENTER_PACKAGE_NOERR_NOFS \ { \ diff --git a/test/API/driver/CMakeLists.txt b/test/API/driver/CMakeLists.txt index 23ba0535b0f..5993a447978 100644 --- a/test/API/driver/CMakeLists.txt +++ b/test/API/driver/CMakeLists.txt @@ -2,9 +2,9 @@ cmake_minimum_required (VERSION 3.18) project(H5_API_TEST_DRIVER CXX) if (NOT KWSYS_USE_LOCALCONTENT) - set (KWSYS_URL ${KWSYS_TGZ_ORIGPATH}/${KWSYS_TGZ_ORIGNAME}) + set (KWSYS_URL ${KWSYS_TGZ_ORIGPATH}/${KWSYS_TGZ_NAME}) else () - set (KWSYS_URL ${TGZPATH}/${KWSYS_TGZ_ORIGNAME}) + set (KWSYS_URL ${TGZPATH}/${KWSYS_TGZ_NAME}) endif () # Only tgz files FetchContent_Declare (KWSYS diff --git a/test/API/tfile.c b/test/API/tfile.c index a3b2f6cd2fa..6b316d47259 100644 --- a/test/API/tfile.c +++ b/test/API/tfile.c @@ -2657,7 +2657,7 @@ test_file_double_file_dataset_open(bool new_format) hsize_t e_ext_dims[1] = {7}; /* Expanded dimension sizes */ hsize_t s_ext_dims[1] = {3}; /* Shrunk dimension sizes */ hsize_t max_dims0[1] = {8}; /* Maximum dimension sizes */ - hsize_t max_dims1[1] = {H5S_UNLIMITED}; /* Maximum dimesion sizes for extensible array index */ + hsize_t max_dims1[1] = {H5S_UNLIMITED}; /* Maximum dimension sizes for extensible array index */ hsize_t max_dims2[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* Maximum dimension sizes for v2 B-tree index */ hsize_t chunks[1] = {2}, chunks2[2] = {4, 5}; /* Chunk dimension sizes */ #if 0 diff --git a/test/API/tselect.c b/test/API/tselect.c index 17da4c6f726..9d398be7916 100644 --- a/test/API/tselect.c +++ b/test/API/tselect.c @@ -11334,7 +11334,7 @@ test_shape_same_dr__checkerboard(int test_num, int small_rank, int large_rank, i /* Now select the checkerboard selection in the (possibly larger) n-cube. * * Since we have already calculated the base start, stride, count, - * and block, re-use the values in setting up start, stride, count, + * and block, reuse the values in setting up start, stride, count, * and block. */ for (i = 0; i < SS_DR_MAX_RANK; i++) { @@ -12790,7 +12790,7 @@ test_space_update_diminfo(void) hid_t space_id; /* Dataspace id */ #if 0 H5S_diminfo_valid_t diminfo_valid; /* Diminfo status */ - H5S_diminfo_valid_t rebuild_status; /* Diminfo status after rebuid */ + H5S_diminfo_valid_t rebuild_status; /* Diminfo status after rebuild */ #endif H5S_sel_type sel_type; /* Selection type */ herr_t ret; /* Return value */ diff --git a/test/cache_api.c b/test/cache_api.c index 746f2f0e155..de636cf4bb2 100644 --- a/test/cache_api.c +++ b/test/cache_api.c @@ -243,7 +243,7 @@ check_fapl_mdc_api_calls(unsigned paged, hid_t fcpl_id) } } - /* conpare the cache's internal configuration with the expected value */ + /* compare the cache's internal configuration with the expected value */ if (pass) { if (!resize_configs_are_equal(&default_auto_size_ctl, &cache_ptr->resize_ctl, true)) { @@ -386,7 +386,7 @@ check_fapl_mdc_api_calls(unsigned paged, hid_t fcpl_id) } } - /* conpare the cache's internal configuration with the expected value */ + /* compare the cache's internal configuration with the expected value */ if (pass) { if (!resize_configs_are_equal(&mod_auto_size_ctl, &cache_ptr->resize_ctl, true)) { diff --git a/test/cmpd_dset.c b/test/cmpd_dset.c index 3ead12f7d96..02dbde3759a 100644 --- a/test/cmpd_dset.c +++ b/test/cmpd_dset.c @@ -19,7 +19,7 @@ #include "h5test.h" -static const char *FILENAME[] = {"cmpd_dset", "src_subset", "dst_subset", NULL}; +static const char *FILENAME[] = {"cmpd_dset", "src_subset", "dst_subset", "select_cmpd_dset", NULL}; const char *DSET_NAME[] = {"contig_src_subset", "chunk_src_subset", "contig_dst_subset", "chunk_dst_subset", NULL}; @@ -78,6 +78,17 @@ typedef struct s6_t { unsigned int post; } s6_t; +typedef struct s7_t { + int32_t a; + int32_t d; +} s7_t; + +typedef struct s8_t { + int64_t a; + int64_t b; + int64_t c; +} s8_t; + /* Structures for testing the optimization for the Chicago company. */ typedef struct { int a, b, c[8], d, e; @@ -85,28 +96,1186 @@ typedef struct { double k, l, m, n; } stype1; -typedef struct { - int a, b, c[8], d, e; - float f, g, h[16], i, j; - double k, l, m, n; - long o, p, q; -} stype2; +typedef struct { + int a, b, c[8], d, e; + float f, g, h[16], i, j; + double k, l, m, n; + long o, p, q; +} stype2; + +typedef struct { + int a, b, c[8], d, e; +} stype3; + +typedef struct { + int a, b, c[8], d, e; + float f, g, h[16], i, j; + double k, l, m, n; + long o, p, q; + long long r, s, t; +} stype4; + +#define NX 100U +#define NY 2000U +#define PACK_NMEMBS 100 + +static void initialize_stype1(unsigned char *buf, size_t num); +static void initialize_stype2(unsigned char *buf, size_t num); +static void initialize_stype3(unsigned char *buf, size_t num); +static void initialize_stype4(unsigned char *buf, size_t num); +static hid_t create_stype1(void); +static hid_t create_stype2(void); +static hid_t create_stype3(void); +static hid_t create_stype4(void); +static int compare_data(void *src_data, void *dst_data, hbool_t src_subset); +static int compare_stype4_data(void *expect_buf, void *rbuf); +static int compare_s1_data(void *expect_buf, void *rbuf); +static int compare_s1_s3_data(void *expect_buf, void *rbuf); +static int compare_s7_data(void *expect_buf, void *rbuf); +static int compare_a_d_data(void *exp1_buf, void *exp2_buf, void *rbuf); +static int compare_a_b_c_data(void *exp1_buf, void *exp2_buf, void *rbuf); + +/*------------------------------------------------------------------------- + * Function: compare_stype4_data + * + * Purpose: Compare data (the common fields in stype4/stype2) read in rbuf with expected data + * in expect_buf. + * + * Return: Success: 0 + * + * Failure: negative + * + *------------------------------------------------------------------------- + */ +static int +compare_stype4_data(void *expect_buf, void *rbuf) +{ + int i; + + for (i = 0; i < (int)(NX * NY); i++) { + stype4 *s1_ptr; + stype4 *s2_ptr; + s1_ptr = ((stype4 *)expect_buf) + i; + s2_ptr = ((stype4 *)rbuf) + i; + + if (s1_ptr->a != s2_ptr->a || s1_ptr->b != s2_ptr->b || s1_ptr->c[0] != s2_ptr->c[0] || + s1_ptr->c[1] != s2_ptr->c[1] || s1_ptr->c[2] != s2_ptr->c[2] || s1_ptr->c[3] != s2_ptr->c[3] || + s1_ptr->c[4] != s2_ptr->c[4] || s1_ptr->c[5] != s2_ptr->c[5] || s1_ptr->c[6] != s2_ptr->c[6] || + s1_ptr->c[7] != s2_ptr->c[7] || s1_ptr->d != s2_ptr->d || s1_ptr->e != s2_ptr->e || + !H5_FLT_ABS_EQUAL(s1_ptr->f, s2_ptr->f) || !H5_FLT_ABS_EQUAL(s1_ptr->g, s2_ptr->g) || + !H5_FLT_ABS_EQUAL(s1_ptr->h[0], s2_ptr->h[0]) || !H5_FLT_ABS_EQUAL(s1_ptr->h[1], s2_ptr->h[1]) || + !H5_FLT_ABS_EQUAL(s1_ptr->h[2], s2_ptr->h[2]) || !H5_FLT_ABS_EQUAL(s1_ptr->h[3], s2_ptr->h[3]) || + !H5_FLT_ABS_EQUAL(s1_ptr->h[4], s2_ptr->h[4]) || !H5_FLT_ABS_EQUAL(s1_ptr->h[5], s2_ptr->h[5]) || + !H5_FLT_ABS_EQUAL(s1_ptr->h[6], s2_ptr->h[6]) || !H5_FLT_ABS_EQUAL(s1_ptr->h[7], s2_ptr->h[7]) || + !H5_FLT_ABS_EQUAL(s1_ptr->h[8], s2_ptr->h[8]) || !H5_FLT_ABS_EQUAL(s1_ptr->h[9], s2_ptr->h[9]) || + !H5_FLT_ABS_EQUAL(s1_ptr->h[10], s2_ptr->h[10]) || + !H5_FLT_ABS_EQUAL(s1_ptr->h[11], s2_ptr->h[11]) || + !H5_FLT_ABS_EQUAL(s1_ptr->h[12], s2_ptr->h[12]) || + !H5_FLT_ABS_EQUAL(s1_ptr->h[13], s2_ptr->h[13]) || + !H5_FLT_ABS_EQUAL(s1_ptr->h[14], s2_ptr->h[14]) || + !H5_FLT_ABS_EQUAL(s1_ptr->h[15], s2_ptr->h[15]) || !H5_FLT_ABS_EQUAL(s1_ptr->i, s2_ptr->i) || + !H5_FLT_ABS_EQUAL(s1_ptr->j, s2_ptr->j) || !H5_DBL_ABS_EQUAL(s1_ptr->k, s2_ptr->k) || + !H5_DBL_ABS_EQUAL(s1_ptr->l, s2_ptr->l) || !H5_DBL_ABS_EQUAL(s1_ptr->m, s2_ptr->m) || + !H5_DBL_ABS_EQUAL(s1_ptr->n, s2_ptr->n) || s1_ptr->o != s2_ptr->o || s1_ptr->p != s2_ptr->p || + s1_ptr->q != s2_ptr->q) { + H5_FAILED(); + printf(" i=%d\n", i); + printf(" exp_buf={a=%d, b=%d, c=[%d,%d,%d,%d,%d,%d,%d,%d], d=%d, e=%d, f=%f, g=%f, " + "h=[%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f], i=%f, j=%f, k=%f, l=%f, m=%f, n=%f, " + "o=%ld, p=%ld, q=%ld}\n", + s1_ptr->a, s1_ptr->b, s1_ptr->c[0], s1_ptr->c[1], s1_ptr->c[2], s1_ptr->c[3], s1_ptr->c[4], + s1_ptr->c[5], s1_ptr->c[6], s1_ptr->c[7], s1_ptr->d, s1_ptr->e, (double)s1_ptr->f, + (double)s1_ptr->g, (double)s1_ptr->h[0], (double)s1_ptr->h[1], (double)s1_ptr->h[2], + (double)s1_ptr->h[3], (double)s1_ptr->h[4], (double)s1_ptr->h[5], (double)s1_ptr->h[6], + (double)s1_ptr->h[7], (double)s1_ptr->h[8], (double)s1_ptr->h[9], (double)s1_ptr->h[10], + (double)s1_ptr->h[11], (double)s1_ptr->h[12], (double)s1_ptr->h[13], (double)s1_ptr->h[14], + (double)s1_ptr->h[15], (double)s1_ptr->i, (double)s1_ptr->j, s1_ptr->k, s1_ptr->l, + s1_ptr->m, s1_ptr->n, s1_ptr->o, s1_ptr->p, s1_ptr->q); + printf(" rbuf={a=%d, b=%d, c=[%d,%d,%d,%d,%d,%d,%d,%d], d=%d, e=%d, f=%f, g=%f, " + "h=[%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f], i=%f, j=%f, k=%f, l=%f, m=%f, n=%f, " + "o=%ld, p=%ld, q=%ld}\n", + s2_ptr->a, s2_ptr->b, s2_ptr->c[0], s2_ptr->c[1], s2_ptr->c[2], s2_ptr->c[3], s2_ptr->c[4], + s2_ptr->c[5], s2_ptr->c[6], s2_ptr->c[7], s2_ptr->d, s2_ptr->e, (double)s2_ptr->f, + (double)s2_ptr->g, (double)s2_ptr->h[0], (double)s2_ptr->h[1], (double)s2_ptr->h[2], + (double)s2_ptr->h[3], (double)s2_ptr->h[4], (double)s2_ptr->h[5], (double)s2_ptr->h[6], + (double)s2_ptr->h[7], (double)s2_ptr->h[8], (double)s2_ptr->h[9], (double)s2_ptr->h[10], + (double)s2_ptr->h[11], (double)s2_ptr->h[12], (double)s2_ptr->h[13], (double)s2_ptr->h[14], + (double)s2_ptr->h[15], (double)s2_ptr->i, (double)s2_ptr->j, s2_ptr->k, s2_ptr->l, + s2_ptr->m, s2_ptr->n, s1_ptr->o, s1_ptr->p, s1_ptr->q); + + goto error; + } + } /* end for */ + + return SUCCEED; + +error: + return FAIL; + +} /* compare_stype4_data() */ + +/*------------------------------------------------------------------------- + * Function: compare_s1_data + * + * Purpose: Compare data (s1_t) read in rbuf with expected data in expect_buf. + * + * Return: Success: 0 + * + * Failure: negative + * + *------------------------------------------------------------------------- + */ +static int +compare_s1_data(void *expect_buf, void *rbuf) +{ + int i; + s1_t *s1_ptr; + s1_t *s2_ptr; + + /* Compare save_s1 with rbuf1. They should be the same */ + for (i = 0; i < (int)(NX * NY); i++) { + s1_ptr = ((s1_t *)expect_buf) + i; + s2_ptr = ((s1_t *)rbuf) + i; + + if (s1_ptr->a != s2_ptr->a || s1_ptr->b != s2_ptr->b || s1_ptr->c[0] != s2_ptr->c[0] || + s1_ptr->c[1] != s2_ptr->c[1] || s1_ptr->c[2] != s2_ptr->c[2] || s1_ptr->c[3] != s2_ptr->c[3] || + s1_ptr->d != s2_ptr->d || s1_ptr->e != s2_ptr->e) { + H5_FAILED(); + printf(" i=%d\n", i); + puts(" Incorrect values read from the file"); + goto error; + } + } + + return SUCCEED; + +error: + return FAIL; + +} /* compare_s1_data() */ + +/*------------------------------------------------------------------------- + * Function: compare_s1_s3_data + * + * Purpose: Compare data (s1_t/s3_t) read in rbuf with expected data in expect_buf. + * + * Return: Success: 0 + * + * Failure: negative + * + *------------------------------------------------------------------------- + */ +static int +compare_s1_s3_data(void *expect_buf, void *rbuf) +{ + int i; + s1_t *s1_ptr; + s3_t *s2_ptr; + + for (i = 0; i < (int)(NX * NY); i++) { + s1_ptr = ((s1_t *)expect_buf) + i; + s2_ptr = ((s3_t *)rbuf) + i; + + if (s1_ptr->a != s2_ptr->a || s1_ptr->b != s2_ptr->b || s1_ptr->c[0] != s2_ptr->c[0] || + s1_ptr->c[1] != s2_ptr->c[1] || s1_ptr->c[2] != s2_ptr->c[2] || s1_ptr->c[3] != s2_ptr->c[3] || + s1_ptr->d != s2_ptr->d || s1_ptr->e != s2_ptr->e) { + H5_FAILED(); + printf(" i=%d\n", i); + puts(" Incorrect values read from the file"); + goto error; + } + } + + return SUCCEED; + +error: + return FAIL; + +} /* compare_s1_s3_data() */ + +/*------------------------------------------------------------------------- + * Function: compare_s7_data + * + * Purpose: Compare data (s7_t) read in rbuf with expected data in expect_buf. + * + * Return: Success: 0 + * + * Failure: negative + * + *------------------------------------------------------------------------- + */ +static int +compare_s7_data(void *expect_buf, void *rbuf) +{ + int i; + s7_t *s1_ptr; + s7_t *s2_ptr; + + for (i = 0; i < (int)(NX * NY); i++) { + s1_ptr = ((s7_t *)expect_buf) + i; + s2_ptr = ((s7_t *)rbuf) + i; + + /* Compare only the data */ + if (s1_ptr->a != s2_ptr->a || s1_ptr->d != s2_ptr->d) { + H5_FAILED(); + printf(" i=%d\n", i); + printf(" expect_buf:a=%d, d=%d\n", s1_ptr->a, s1_ptr->d); + printf(" rbuf:a=%d, d=%d", s2_ptr->a, s2_ptr->d); + goto error; + } + } /* end for */ + + return SUCCEED; + +error: + return FAIL; + +} /* compare_s7_data() */ + +/*------------------------------------------------------------------------- + * Function: compare_s7_s8_data + * + * Purpose: Compare data read in rbuf with expected data + * in expect_buf: save_s7, save_s8. + * + * Return: Success: 0 + * + * Failure: negative + * + *------------------------------------------------------------------------- + */ +static int +compare_a_d_data(void *exp1_buf, void *exp2_buf, void *rbuf) +{ + int i; + s7_t *s1_ptr; + s8_t *s2_ptr; + s7_t *rbuf_ptr; + + for (i = 0; i < (int)(NX * NY); i++) { + s1_ptr = ((s7_t *)exp1_buf) + i; + s2_ptr = ((s8_t *)exp2_buf) + i; + rbuf_ptr = ((s7_t *)rbuf) + i; + + if (s2_ptr->a != rbuf_ptr->a || s1_ptr->d != rbuf_ptr->d) { + H5_FAILED(); + printf(" i=%d\n", i); + printf(" expect_buf:a=%d, d=%d\n", (int32_t)s2_ptr->a, s1_ptr->d); + printf(" rbuf: a=%d, d=%d", rbuf_ptr->a, rbuf_ptr->d); + goto error; + } + } /* end for */ + + return SUCCEED; + +error: + return FAIL; + +} /* compare_a_d_data() */ + +/*------------------------------------------------------------------------- + * Function: compare_a_b_c_data + * + * Purpose: Compare data read in rbuf with expected data + * in expect_buf: save_s8, save_rbuf8. + * + * Return: Success: 0 + * + * Failure: negative + * + *------------------------------------------------------------------------- + */ +static int +compare_a_b_c_data(void *exp1_buf, void *exp2_buf, void *rbuf) +{ + int i; + s8_t *s1_ptr; + s8_t *s2_ptr; + s8_t *rbuf_ptr; + + for (i = 0; i < (int)(NX * NY); i++) { + s1_ptr = ((s8_t *)exp1_buf) + i; + s2_ptr = ((s8_t *)exp2_buf) + i; + rbuf_ptr = ((s8_t *)rbuf) + i; + + if (s1_ptr->a != rbuf_ptr->a || s2_ptr->b != rbuf_ptr->b || s2_ptr->c != rbuf_ptr->c) { + H5_FAILED(); + printf(" i=%d\n", i); + printf(" expect_buf:a=%ld, b=%ld, c=%ld\n", s1_ptr->a, s2_ptr->b, s2_ptr->c); + printf(" rbuf: a=%ld, b=%ld, c=%ld", rbuf_ptr->a, rbuf_ptr->b, rbuf_ptr->c); + goto error; + } + } /* end for */ + + return SUCCEED; + +error: + return FAIL; + +} /* compare_a_b_c_data() */ + +/*------------------------------------------------------------------------- + * Function: test_select_src_subset + * + * Purpose: This is derived from test_hdf5_src_subset() for selection + * I/O testing: + * + * Test the optimization of compound data writing, rewriting, + * and reading when the source type is a subset of the destination + * type. For example: + * struct source { struct destination { + * TYPE1 A; --> TYPE1 A; + * TYPE2 B; --> TYPE2 B; + * TYPE3 C; --> TYPE3 C; + * }; TYPE4 D; + * TYPE5 E; + * }; + * + * Return: Success: 0 + * Failure: 1 + * + *------------------------------------------------------------------------- + */ +static unsigned +test_select_src_subset(char *fname, hid_t fapl, hid_t in_dxpl, unsigned set_fillvalue, unsigned set_buf) +{ + hid_t fid = H5I_INVALID_HID; + hid_t rew_tid = H5I_INVALID_HID, src_tid = H5I_INVALID_HID; + hid_t did = H5I_INVALID_HID; + hid_t sid = H5I_INVALID_HID; + hid_t dcpl = H5I_INVALID_HID; + hid_t dxpl = H5I_INVALID_HID; + hsize_t dims[2] = {NX, NY}; + hsize_t chunk_dims[2] = {NX / 10, NY / 10}; + unsigned char *rew_buf = NULL, *save_rew_buf = NULL, *rbuf = NULL; + int fillvalue = (-1); + size_t ss, ss1, ss2; + + /* Create the file for this test */ + if ((fid = H5Fcreate(fname, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) + goto error; + + /* Build hdf5 datatypes */ + if ((src_tid = create_stype1()) < 0) + goto error; + + if ((rew_tid = create_stype3()) < 0) + goto error; + + /* Create the data space */ + if ((sid = H5Screate_simple(2, dims, NULL)) < 0) + goto error; + + /* Allocate space and initialize data */ + rbuf = (unsigned char *)calloc(NX * NY, sizeof(stype3)); + + rew_buf = (unsigned char *)calloc(NX * NY, sizeof(stype3)); + initialize_stype3(rew_buf, (size_t)NX * NY); + + /* Save a copy as the buffer may be clobbered due to H5Pset_modify_write_buf() */ + save_rew_buf = (unsigned char *)calloc(NX * NY, sizeof(stype3)); + initialize_stype3(save_rew_buf, (size_t)NX * NY); + + /* Create dataset creation property list */ + if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + goto error; + + /* + * Create contiguous and chunked datasets. + * Write to the datasets in a different compound subset order + */ + printf(" test_select_src_subset(): writing data to contiguous and chunked datasets"); + + if (set_fillvalue) { + if (H5Pset_fill_value(dcpl, src_tid, &fillvalue) < 0) + goto error; + } + + dxpl = H5Pcopy(in_dxpl); + if (set_buf) { + ss1 = H5Tget_size(rew_tid); + ss2 = H5Tget_size(src_tid); + ss = MAX(ss1, ss2) * NX * NY; + + if (H5Pset_buffer(dxpl, ss, NULL, NULL) < 0) + goto error; + } + + /* Create contiguous data set */ + if ((did = H5Dcreate2(fid, DSET_NAME[0], src_tid, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + goto error; + + /* Write to the dataset with rew_tid */ + if (H5Dwrite(did, rew_tid, H5S_ALL, H5S_ALL, dxpl, rew_buf) < 0) + goto error; + + if (H5Dread(did, rew_tid, H5S_ALL, H5S_ALL, dxpl, rbuf) < 0) + goto error; + + if (memcmp(save_rew_buf, rbuf, sizeof(stype3) * NX * NY) != 0) + goto error; + + if (H5Dclose(did) < 0) + goto error; + + /* Set chunking */ + if (H5Pset_chunk(dcpl, 2, chunk_dims) < 0) + goto error; + + /* Create chunked data set */ + if ((did = H5Dcreate2(fid, DSET_NAME[1], src_tid, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + goto error; + + /* Write to the dataset with rew_tid */ + if (H5Dwrite(did, rew_tid, H5S_ALL, H5S_ALL, dxpl, rew_buf) < 0) + FAIL_STACK_ERROR; + + if (H5Dread(did, rew_tid, H5S_ALL, H5S_ALL, dxpl, rbuf) < 0) + goto error; + + if (memcmp(save_rew_buf, rbuf, sizeof(stype3) * NX * NY) != 0) + goto error; + + if (H5Dclose(did) < 0) + goto error; + + /* Finishing test and release resources */ + if (H5Sclose(sid) < 0) + FAIL_STACK_ERROR; + + if (H5Pclose(dcpl) < 0) + FAIL_STACK_ERROR; + + if (H5Pclose(dxpl) < 0) + FAIL_STACK_ERROR; + + if (H5Tclose(src_tid) < 0) + FAIL_STACK_ERROR; + + if (H5Tclose(rew_tid) < 0) + FAIL_STACK_ERROR; + + if (H5Fclose(fid) < 0) + FAIL_STACK_ERROR; + + free(rbuf); + free(rew_buf); + free(save_rew_buf); + + PASSED(); + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(sid); + H5Pclose(dcpl); + H5Pclose(dxpl); + H5Dclose(did); + H5Fclose(fid); + H5Tclose(src_tid); + H5Tclose(rew_tid); + } + H5E_END_TRY + + if (rbuf) + free(rbuf); + if (rew_buf) + free(rew_buf); + if (save_rew_buf) + free(save_rew_buf); + + printf("\n*** SELECT SRC SUBSET TEST FAILED ***\n"); + return 1; +} /* test_select_src_subset() */ + +/*------------------------------------------------------------------------- + * Function: test_select_dst_subset + * + * Purpose: This is derived from test_hdf5_dst_subset() for selection + * I/O testing: + + * Test the optimization of compound data writing, rewriting, + * and reading when the destination type is a subset of the + * source type. For example: + * struct source { struct destination { + * TYPE1 A; --> TYPE1 A; + * TYPE2 B; --> TYPE2 B; + * TYPE3 C; --> TYPE3 C; + * TYPE4 D; } + * TYPE5 E; + * }; + * + * Return: Success: 0 + * Failure: 1 + * + *------------------------------------------------------------------------- + */ +static unsigned +test_select_dst_subset(char *fname, hid_t fapl, hid_t in_dxpl, unsigned set_fillvalue, unsigned set_buf) +{ + hid_t fid = H5I_INVALID_HID; + hid_t rew_tid = H5I_INVALID_HID, src_tid = H5I_INVALID_HID; + hid_t did = H5I_INVALID_HID; + hid_t sid = H5I_INVALID_HID; + hid_t dcpl = H5I_INVALID_HID; + hid_t dxpl = H5I_INVALID_HID; + hsize_t dims[2] = {NX, NY}; + hsize_t chunk_dims[2] = {NX / 10, NY / 10}; + unsigned char *rew_buf = NULL, *save_rew_buf = NULL, *rbuf = NULL; + int fillvalue = (-1); + size_t ss, ss1, ss2; + + /* Create the file for this test */ + if ((fid = H5Fcreate(fname, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) + goto error; + + /* Build hdf5 datatypes */ + if ((src_tid = create_stype2()) < 0) + goto error; + + if ((rew_tid = create_stype4()) < 0) + goto error; + + /* Create the data space */ + if ((sid = H5Screate_simple(2, dims, NULL)) < 0) + goto error; + + rbuf = (unsigned char *)calloc(NX * NY, sizeof(stype4)); + + rew_buf = (unsigned char *)calloc(NX * NY, sizeof(stype4)); + initialize_stype4(rew_buf, (size_t)NX * NY); + + /* Save a copy as the buffer may be clobbered due to H5Pset_modify_write_buf() */ + save_rew_buf = (unsigned char *)calloc(NX * NY, sizeof(stype4)); + initialize_stype4(save_rew_buf, (size_t)NX * NY); + + /* Create dataset creation property list */ + if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + goto error; + + /* + * Write data to contiguous and chunked datasets. + */ + printf(" test_select_dst_subset(): writing data to contiguous and chunked datasets"); + + if (set_fillvalue) { + if (H5Pset_fill_value(dcpl, src_tid, &fillvalue) < 0) + goto error; + } + + dxpl = H5Pcopy(in_dxpl); + if (set_buf) { + ss1 = H5Tget_size(rew_tid); + ss2 = H5Tget_size(src_tid); + ss = MAX(ss1, ss2) * NX * NY; + + if (H5Pset_buffer(dxpl, ss, NULL, NULL) < 0) + goto error; + } + + /* Create contiguous data set */ + if ((did = H5Dcreate2(fid, DSET_NAME[2], src_tid, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + goto error; + + /* Write to the dataset with rew_tid */ + if (H5Dwrite(did, rew_tid, H5S_ALL, H5S_ALL, dxpl, rew_buf) < 0) + goto error; + + /* Read from the dataset with rew_tid */ + if (H5Dread(did, rew_tid, H5S_ALL, H5S_ALL, dxpl, rbuf) < 0) + goto error; + + if (compare_stype4_data(save_rew_buf, rbuf) < 0) + goto error; + + if (H5Dclose(did) < 0) + goto error; + + /* Set chunking */ + if (H5Pset_chunk(dcpl, 2, chunk_dims) < 0) + goto error; + + /* Create chunked data set */ + if ((did = H5Dcreate2(fid, DSET_NAME[3], src_tid, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + goto error; + + initialize_stype4(rew_buf, (size_t)NX * NY); + + /* Write data to the dataset with rew_tid */ + if (H5Dwrite(did, rew_tid, H5S_ALL, H5S_ALL, dxpl, rew_buf) < 0) + goto error; + + /* Read frm the dataset with rew_tid */ + if (H5Dread(did, rew_tid, H5S_ALL, H5S_ALL, dxpl, rbuf) < 0) + goto error; + + if (compare_stype4_data(save_rew_buf, rbuf) < 0) + goto error; + + if (H5Dclose(did) < 0) + goto error; + + /* Finishing test and release resources */ + if (H5Sclose(sid) < 0) + goto error; + + if (H5Pclose(dcpl) < 0) + goto error; + + if (H5Pclose(dxpl) < 0) + FAIL_STACK_ERROR; + + if (H5Tclose(src_tid) < 0) + goto error; + + if (H5Tclose(rew_tid) < 0) + goto error; + if (H5Fclose(fid) < 0) + goto error; + + free(rbuf); + free(rew_buf); + free(save_rew_buf); + + PASSED(); + return 0; -typedef struct { - int a, b, c[8], d, e; -} stype3; +error: + H5E_BEGIN_TRY + { + H5Sclose(sid); + H5Pclose(dcpl); + H5Pclose(dxpl); + H5Dclose(did); + H5Fclose(fid); + H5Tclose(src_tid); + H5Tclose(rew_tid); + } + H5E_END_TRY -typedef struct { - int a, b, c[8], d, e; - float f, g, h[16], i, j; - double k, l, m, n; - long o, p, q; - long long r, s, t; -} stype4; + if (rbuf) + free(rbuf); + if (rew_buf) + free(rew_buf); + if (save_rew_buf) + free(save_rew_buf); -#define NX 100U -#define NY 2000U -#define PACK_NMEMBS 100 + printf("\n*** SELECT DST SUBSET TEST FAILED ***\n"); + return 1; +} /* test_select_dst_subset */ + +/*------------------------------------------------------------------------- + * Function: test_select_compound + * + * Purpose: This is derived from test_comppound() for selection I/O + * testing: + * + * --Creates a simple dataset of a compound type and then + * writes it in original and reverse order. + * --Creates another dataset to verify the CI window + * is fixed. + * + * Return: Success: 0 + * Failure: 1 + * + *------------------------------------------------------------------------- + */ +static unsigned +test_select_compound(char *fname, hid_t fapl, hid_t in_dxpl, unsigned set_fillvalue, unsigned set_buf) +{ + hid_t s1_tid = H5I_INVALID_HID; + hid_t s3_tid = H5I_INVALID_HID; + hid_t s7_tid = H5I_INVALID_HID; + hid_t s8_tid = H5I_INVALID_HID; + + /* Buffers */ + s1_t *s1 = NULL; + s1_t *save_s1 = NULL; + s3_t *s3 = NULL; + s3_t *save_s3 = NULL; + s1_t *rbuf1 = NULL; + s3_t *rbuf3 = NULL; + + s7_t *s7 = NULL; + s7_t *save_s7 = NULL; + s7_t *rbuf7 = NULL; + + s8_t *s8 = NULL; + s8_t *save_s8 = NULL; + s8_t *rbuf8 = NULL; + s8_t *save_rbuf8 = NULL; + + /* Other variables */ + unsigned int i; + hid_t fid = H5I_INVALID_HID; + hid_t did = H5I_INVALID_HID; + hid_t sid = H5I_INVALID_HID; + hid_t dcpl = H5I_INVALID_HID; + hid_t dxpl = H5I_INVALID_HID; + hid_t array_dt = H5I_INVALID_HID; + static hsize_t dim[] = {NX, NY}; + int fillvalue = (-1); + size_t ss = 0, ss1 = 0, ss2 = 0; + hsize_t memb_size[1] = {4}; + + /* Allocate buffers */ + if (NULL == (s1 = (s1_t *)calloc(NX * NY, sizeof(s1_t)))) + goto error; + if (NULL == (save_s1 = (s1_t *)calloc(NX * NY, sizeof(s1_t)))) + goto error; + if (NULL == (rbuf1 = (s1_t *)calloc(NX * NY, sizeof(s1_t)))) + goto error; + if (NULL == (s3 = (s3_t *)calloc(NX * NY, sizeof(s3_t)))) + goto error; + if (NULL == (save_s3 = (s3_t *)calloc(NX * NY, sizeof(s3_t)))) + goto error; + if (NULL == (rbuf3 = (s3_t *)calloc(NX * NY, sizeof(s3_t)))) + goto error; + + if (NULL == (s7 = (s7_t *)calloc(NX * NY, sizeof(s7_t)))) + goto error; + if (NULL == (save_s7 = (s7_t *)calloc(NX * NY, sizeof(s7_t)))) + goto error; + if (NULL == (rbuf7 = (s7_t *)calloc(NX * NY, sizeof(s7_t)))) + goto error; + + if (NULL == (s8 = (s8_t *)calloc(NX * NY, sizeof(s8_t)))) + goto error; + if (NULL == (save_s8 = (s8_t *)calloc(NX * NY, sizeof(s8_t)))) + goto error; + if (NULL == (rbuf8 = (s8_t *)calloc(NX * NY, sizeof(s8_t)))) + goto error; + if (NULL == (save_rbuf8 = (s8_t *)calloc(NX * NY, sizeof(s8_t)))) + goto error; + + /* Create the file */ + if ((fid = H5Fcreate(fname, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) { + goto error; + } + + /* Create the data space */ + if ((sid = H5Screate_simple(2, dim, NULL)) < 0) + goto error; + + /* Create dataset creation property list */ + if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + goto error; + + /* Create a copy of the incoming dataset transfer property list */ + if ((dxpl = H5Pcopy(in_dxpl)) < 0) + goto error; + + /* + * Create and write to the dataset in original compound struct members order + */ + printf(" test_select_compound(): basic compound write"); + + /* Initialize buffer with s1_t */ + for (i = 0; i < NX * NY; i++) { + s1[i].a = 8 * i + 0; + s1[i].b = 2000 + 2 * i; + s1[i].c[0] = 8 * i + 2; + s1[i].c[1] = 8 * i + 3; + s1[i].c[2] = 8 * i + 4; + s1[i].c[3] = 8 * i + 5; + s1[i].d = 2001 + 2 * i; + s1[i].e = 8 * i + 7; + } + memcpy(save_s1, s1, sizeof(s1_t) * NX * NY); + + /* Create file type s1_t */ + if ((s1_tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t))) < 0) + goto error; + array_dt = H5Tarray_create2(H5T_NATIVE_INT, 1, memb_size); + if (H5Tinsert(s1_tid, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT) < 0 || + H5Tinsert(s1_tid, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT) < 0 || + H5Tinsert(s1_tid, "c", HOFFSET(s1_t, c), array_dt) < 0 || + H5Tinsert(s1_tid, "d", HOFFSET(s1_t, d), H5T_NATIVE_INT) < 0 || + H5Tinsert(s1_tid, "e", HOFFSET(s1_t, e), H5T_NATIVE_INT) < 0) + goto error; + H5Tclose(array_dt); + + /* Set fill value accordingly */ + if (set_fillvalue) { + if (H5Pset_fill_value(dcpl, s1_tid, &fillvalue) < 0) + goto error; + } + + /* Create the dataset with file type s1_tid */ + if ((did = H5Dcreate2(fid, "s1", s1_tid, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + goto error; + + /* Set buffer size accordingly */ + if (set_buf) { + ss1 = H5Tget_size(s1_tid); + + if (H5Pset_buffer(dxpl, ss1, NULL, NULL) < 0) + goto error; + } + + /* Write to the dataset with file type s1_tid */ + if (H5Dwrite(did, s1_tid, H5S_ALL, H5S_ALL, dxpl, s1) < 0) + goto error; + + /* Read from the dataset with file type s1_tid */ + if (H5Dread(did, s1_tid, H5S_ALL, H5S_ALL, dxpl, rbuf1) < 0) + goto error; + + /* Verify data is correct */ + if (compare_s1_data(save_s1, rbuf1) < 0) + goto error; + + PASSED(); + + /* + * Write to the dataset with s3 memory buffer. This buffer + * has the same data space but the data type is different: the + * data type is a struct whose members are in the opposite order. + */ + printf(" test_select_compound(): reversal of struct members"); + + /* Create mem type s3_tid */ + if ((s3_tid = H5Tcreate(H5T_COMPOUND, sizeof(s3_t))) < 0) + goto error; + array_dt = H5Tarray_create2(H5T_NATIVE_INT, 1, memb_size); + if (H5Tinsert(s3_tid, "a", HOFFSET(s3_t, a), H5T_NATIVE_INT) < 0 || + H5Tinsert(s3_tid, "b", HOFFSET(s3_t, b), H5T_NATIVE_INT) < 0 || + H5Tinsert(s3_tid, "c", HOFFSET(s3_t, c), array_dt) < 0 || + H5Tinsert(s3_tid, "d", HOFFSET(s3_t, d), H5T_NATIVE_INT) < 0 || + H5Tinsert(s3_tid, "e", HOFFSET(s3_t, e), H5T_NATIVE_INT) < 0) + goto error; + H5Tclose(array_dt); + + /* Initialize buffer with s3_t */ + for (i = 0; i < NX * NY; i++) { + s3[i].a = 8 * i + 0; + s3[i].b = 2000 + 2 * i; + s3[i].c[0] = 8 * i + 2; + s3[i].c[1] = 8 * i + 3; + s3[i].c[2] = 8 * i + 4; + s3[i].c[3] = 8 * i + 5; + s3[i].d = 2001 + 2 * i; + s3[i].e = 8 * i + 7; + } + + memcpy(save_s3, s3, sizeof(s3_t) * NX * NY); + + /* Set buffer size accordingly */ + if (set_buf) { + /* ss1 is set already previously */ + ss2 = H5Tget_size(s3_tid); + ss = MAX(ss1, ss2) * NX * NY; + + if (H5Pset_buffer(dxpl, ss, NULL, NULL) < 0) + goto error; + } + + /* Read from the dataset with mem type s3_tid */ + if (H5Dread(did, s3_tid, H5S_ALL, H5S_ALL, dxpl, rbuf3) < 0) + goto error; + + if (compare_s1_s3_data(save_s1, rbuf3) < 0) + goto error; + + if (H5Dclose(did) < 0) + goto error; + + PASSED(); + + printf(" test_select_compound(): verify fix for non-optimized compound conversions with memory type " + "larger than file "); + + /* Create file type s7_tid */ + if ((s7_tid = H5Tcreate(H5T_COMPOUND, sizeof(s7_t))) < 0) + goto error; + + if (H5Tinsert(s7_tid, "a", HOFFSET(s7_t, a), H5T_NATIVE_INT32) < 0 || + H5Tinsert(s7_tid, "d", HOFFSET(s7_t, d), H5T_NATIVE_INT32) < 0) + goto error; + + /* Initialize buffer with s7_t */ + for (i = 0; i < NX * NY; i++) { + s7[i].a = (int32_t)(2 * i); + s7[i].d = (int32_t)(2 * i + 1); + } + memcpy(save_s7, s7, sizeof(s7_t) * NX * NY); + + /* Create mem type s8_tid */ + if ((s8_tid = H5Tcreate(H5T_COMPOUND, sizeof(s8_t))) < 0) + goto error; + + if (H5Tinsert(s8_tid, "a", HOFFSET(s8_t, a), H5T_NATIVE_INT64) < 0 || + H5Tinsert(s8_tid, "b", HOFFSET(s8_t, b), H5T_NATIVE_INT64) < 0 || + H5Tinsert(s8_tid, "c", HOFFSET(s8_t, c), H5T_NATIVE_INT64) < 0) + goto error; + + /* Initialize buffer with s8_t */ + for (i = 0; i < NX * NY; i++) { + s8[i].a = (int64_t)(2 * NX * NY + 3 * i); + s8[i].b = (int64_t)(2 * NX * NY + 3 * i + 1); + s8[i].c = (int64_t)(2 * NX * NY + 3 * i + 2); + } + memcpy(save_s8, s8, sizeof(s8_t) * NX * NY); + + /* Set fill value accordingly */ + if (set_fillvalue) { + if (H5Pset_fill_value(dcpl, s7_tid, &fillvalue) < 0) + goto error; + } + + /* Set buffer size accordingly */ + if (set_buf) { + ss1 = H5Tget_size(s7_tid); + ss2 = H5Tget_size(s8_tid); + ss = MAX(ss1, ss2) * NX * NY; + + if (H5Pset_buffer(dxpl, ss, NULL, NULL) < 0) + goto error; + } + + /* Create dataset with file type s7_tid */ + if ((did = H5Dcreate2(fid, "ss", s7_tid, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + goto error; + + /* Write to the dataset with mem type s7_tid */ + if (H5Dwrite(did, s7_tid, H5S_ALL, H5S_ALL, dxpl, s7) < 0) + goto error; + + /* Read from the dataset with mem type s7_tid */ + if (H5Dread(did, s7_tid, H5S_ALL, H5S_ALL, dxpl, rbuf7) < 0) + goto error; + + /* Verify data read is correct */ + if (compare_s7_data(save_s7, rbuf7) < 0) + goto error; + + /* Write to the dataset with mem type s8_tid */ + if (H5Dwrite(did, s8_tid, H5S_ALL, H5S_ALL, dxpl, s8) < 0) + goto error; + + /* Read from the dataset with mem type s7_tid */ + memset(rbuf7, 0, NX * NY * sizeof(s7_t)); + if (H5Dread(did, s7_tid, H5S_ALL, H5S_ALL, dxpl, rbuf7) < 0) + goto error; + + /* Verify a: save_s8, d: save_s7 */ + if (compare_a_d_data(save_s7, save_s8, rbuf7) < 0) + goto error; + + /* Initialize read buffer of s8_t with unique values */ + for (i = 0; i < NX * NY; i++) { + rbuf8[i].a = (int64_t)(5 * NX * NY + 3 * i); + rbuf8[i].b = (int64_t)(5 * NX * NY + 3 * i + 1); + rbuf8[i].c = (int64_t)(5 * NX * NY + 3 * i + 2); + } + memcpy(save_rbuf8, rbuf8, sizeof(s8_t) * NX * NY); + + /* Read from the dataset with mem type s8_tid */ + if (H5Dread(did, s8_tid, H5S_ALL, H5S_ALL, dxpl, rbuf8) < 0) + goto error; + + /* Verify a: save_s8; b, c: save_rbuf8 */ + if (compare_a_b_c_data(save_s8, save_rbuf8, rbuf8) < 0) + goto error; + + if (H5Dclose(did) < 0) + goto error; + + PASSED(); + + /* + * Release resources. + */ + if (H5Sclose(sid) < 0) + goto error; + + if (H5Pclose(dcpl) < 0) + goto error; + + if (H5Pclose(dxpl) < 0) + goto error; + + if (H5Tclose(s1_tid) < 0) + goto error; + + if (H5Tclose(s3_tid) < 0) + goto error; + + if (H5Tclose(s7_tid) < 0) + goto error; + + if (H5Tclose(s8_tid) < 0) + goto error; + + if (H5Fclose(fid) < 0) + goto error; + + /* Release buffers */ + free(s1); + free(s3); + free(save_s3); + free(rbuf1); + free(rbuf3); + free(s7); + free(save_s7); + free(s8); + free(save_s8); + free(rbuf7); + free(rbuf8); + free(save_rbuf8); + + return 0; + +error: + H5E_BEGIN_TRY + { + H5Sclose(sid); + H5Pclose(dcpl); + H5Pclose(dxpl); + H5Dclose(did); + H5Fclose(fid); + H5Tclose(s1_tid); + H5Tclose(s3_tid); + H5Tclose(s7_tid); + H5Tclose(s8_tid); + } + H5E_END_TRY + + /* Release resources */ + if (s1) + free(s1); + if (s3) + free(s3); + if (save_s3) + free(save_s3); + if (rbuf1) + free(rbuf1); + if (rbuf3) + free(rbuf3); + if (s7) + free(s7); + if (save_s7) + free(save_s7); + if (s8) + free(s8); + if (save_s8) + free(save_s8); + if (rbuf7) + free(rbuf7); + if (rbuf8) + free(rbuf8); + if (save_rbuf8) + free(save_rbuf8); + + printf("\n*** SELECT COMPOUND DATASET TESTS FAILED ***\n"); + return 1; +} /* test_select_compound() */ + +/* + * Purpose: Tests for selection I/O with compound types: + * --set_cache: set chunk cache to 0 or not + * via H5Pset_cache(fapl...) + * --set_fillvalue: set fill value or not + * via H5Pset_fill_value(dcpl...) + * --select_io: enable selection I/O or not + * via H5Pset_selection_io(dxpl...) + * --mwbuf: with or without modifying write buffers + * via H5Pset_modify_write_buf(dxpl...) + * --set_buf: with or without setting the maximum size + * for the type conversion buffer and background buffer + * via H5Pset_buffer(dxpl...) + * + * These tests will test the selection I/O pipeline in particular + * triggering H5D__scatgath_read()/write(), + * H5D__scatgath_write_select_read()/write(), + * and with/without the optimized compound read/write. + */ +static unsigned +test_compounds_selection_io(void) +{ + unsigned nerrs = 0; + unsigned set_cache; /* Set cache to 0 or not */ + unsigned set_fillvalue; /* Set fill value or not */ + unsigned select_io; /* Enable selection I/O or not */ + unsigned mwbuf; /* With or without modifying write buffers */ + unsigned set_buf; /* With or without H5Pset_buffer */ + hid_t fapl = -1; + hid_t dxpl = -1; + char fname[256]; + + fapl = h5_fileaccess(); + h5_fixname(FILENAME[3], fapl, fname, sizeof(fname)); + + for (set_cache = FALSE; set_cache <= TRUE; set_cache++) { + for (set_fillvalue = FALSE; set_fillvalue <= TRUE; set_fillvalue++) { + for (select_io = FALSE; select_io <= TRUE; select_io++) { + for (mwbuf = FALSE; mwbuf <= TRUE; mwbuf++) { + for (set_buf = FALSE; set_buf <= TRUE; set_buf++) { + + if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) + goto error; + + if (set_cache) { + printf(" With chunk cache set 0, "); + if (H5Pset_cache(fapl, 0, (size_t)0, (size_t)0, 0.0) < 0) + goto error; + } + else + printf(" With default chunk cache, "); + + if (set_fillvalue) + printf("set fill value, "); + else + printf("not set fill value, "); + + if (select_io) { + printf("selection I/O ON, "); + if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_ON) < 0) + goto error; + } + else + printf("selection I/O OFF, "); + + if (mwbuf) { + printf("with modify write buf, "); + if (H5Pset_modify_write_buf(dxpl, TRUE) < 0) + goto error; + } + else + printf("without modify write buf, "); + + if (set_buf) + printf("with H5Pset_buffer:\n"); + else + printf("without H5Pset_buffer:\n"); + + nerrs += test_select_compound(fname, fapl, dxpl, set_fillvalue, set_buf); + nerrs += test_select_src_subset(fname, fapl, dxpl, set_fillvalue, set_buf); + nerrs += test_select_dst_subset(fname, fapl, dxpl, set_fillvalue, set_buf); + + if (H5Pclose(dxpl) < 0) + goto error; + + } /* set_buf */ + } /* mwbuf */ + } /* select_io */ + } /* set_fillvalue */ + } /* set_cache */ + + if (H5Pclose(fapl) < 0) + goto error; + + if (nerrs) + goto error; + + return 0; + +error: + printf("*** COMPOUNDS TESTS FOR SELECTION I/O FAILED ***"); + + return 1; +} /* test_compounds_selection_io() */ /*------------------------------------------------------------------------- * Function: test_compound @@ -1220,7 +2389,7 @@ compare_data(void *src_data, void *dst_data, bool src_subset) * Function: test_hdf5_src_subset * * Purpose: Test the optimization of compound data writing, rewriting, - * and reading when the source type is a subset of destination + * and reading when the source type is a subset of the destination * type. For example: * struct source { struct destination { * TYPE1 A; --> TYPE1 A; @@ -1432,7 +2601,7 @@ test_hdf5_src_subset(char *filename, hid_t fapl) * TYPE5 E; * }; * This optimization is for the Chicago company. This test - * is in opposite of test_hdf5_src_subset. + * is the opposite of test_hdf5_src_subset. * * Return: Success: 0 * @@ -2173,6 +3342,9 @@ main(int argc, char *argv[]) (H5T_conv_t)((void (*)(void))H5T__conv_struct_opt)); } + printf("Testing compound dataset for selection I/O cases----\n"); + nerrors += test_compounds_selection_io(); + /* Create the file */ fapl_id = h5_fileaccess(); diff --git a/test/direct_chunk.c b/test/direct_chunk.c index 548bfc3f5c8..bde67dbdb5c 100644 --- a/test/direct_chunk.c +++ b/test/direct_chunk.c @@ -407,7 +407,7 @@ test_direct_chunk_overwrite_data(hid_t fid) if ((sid = H5Screate_simple(OVERWRITE_NDIMS, dset_dims, dset_max_dims)) < 0) FAIL_STACK_ERROR; - /* Set chunk size and filll value */ + /* Set chunk size and fill value */ if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) FAIL_STACK_ERROR; if (H5Pset_fill_value(dcpl_id, tid, &fill_value) < 0) diff --git a/test/external.c b/test/external.c index 30abc0061c7..bf3c5bdf8e0 100644 --- a/test/external.c +++ b/test/external.c @@ -513,7 +513,7 @@ test_multiple_files(hid_t file) FAIL_STACK_ERROR; if (H5Pclose(dcpl) < 0) FAIL_STACK_ERROR; - /* Re-use space below */ + /* Reuse space below */ /* ---------------------------------------------- * Verify that too-small external files will fail diff --git a/test/mf.c b/test/mf.c index d58cbfda067..269419f546c 100644 --- a/test/mf.c +++ b/test/mf.c @@ -1979,7 +1979,7 @@ test_mf_fs_extend(hid_t fapl) *------------------------------------------------------------------------- * To verify that an aggregator is absorbed into a section. * - * Test 1: To aborb the aggregator onto the beginning of the section + * Test 1: To absorb the aggregator onto the beginning of the section * Allocate block A from meta_aggr * Create a free-space section node with an address that adjoins * the end of meta_aggr and a size to make the aggregator diff --git a/test/onion.c b/test/onion.c index a2bdda3723b..5b9bb929dc6 100644 --- a/test/onion.c +++ b/test/onion.c @@ -1678,7 +1678,7 @@ verify_history_as_expected_onion(H5FD_t *raw_file, struct expected_history *filt if (H5FD__onion_history_decode(buf, &history_out) != readsize) TEST_ERROR; - /* Re-use buffer space to sanity-check checksum for record pointer(s). */ + /* Reuse buffer space to sanity-check checksum for record pointer(s). */ assert(readsize >= sizeof(H5FD_onion_record_loc_t)); for (size_t i = 0; i < history_out.n_revisions; i++) { diff --git a/test/select_io_dset.c b/test/select_io_dset.c index d75b76becdb..79449aac070 100644 --- a/test/select_io_dset.c +++ b/test/select_io_dset.c @@ -265,6 +265,7 @@ test_no_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) char *wbuf_bak = NULL; char *rbuf = NULL; char dset_name[DSET_NAME_LEN]; + int fillvalue = (-1); if ((wbuf = (char *)malloc((size_t)(4 * DSET_SELECT_DIM))) == NULL) FAIL_STACK_ERROR; @@ -289,9 +290,13 @@ test_no_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) dims[0] = DSET_SELECT_DIM; if ((sid = H5Screate_simple(1, dims, NULL)) < 0) FAIL_STACK_ERROR; + if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) FAIL_STACK_ERROR; + if (H5Pset_fill_value(dcpl, H5T_NATIVE_INT, &fillvalue) < 0) + FAIL_STACK_ERROR; + if (chunked) { cdims[0] = DSET_SELECT_CHUNK_DIM; if (H5Pset_chunk(dcpl, 1, cdims) < 0) @@ -721,6 +726,7 @@ test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) s2_t *s2_wbuf_bak = NULL; s2_t *s2_rbuf = NULL; char dset_name[DSET_NAME_LEN]; + int fillvalue = -1; /* Create dataset transfer property list */ if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) @@ -762,9 +768,13 @@ test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) dims[0] = DSET_SELECT_DIM; if ((sid = H5Screate_simple(1, dims, NULL)) < 0) FAIL_STACK_ERROR; + if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) FAIL_STACK_ERROR; + if (H5Pset_fill_value(dcpl, s1_tid, &fillvalue) < 0) + FAIL_STACK_ERROR; + if (chunked) { cdims[0] = DSET_SELECT_CHUNK_DIM; if (H5Pset_chunk(dcpl, 1, cdims) < 0) @@ -2729,8 +2739,9 @@ test_multi_dsets_all(int niter, hid_t fid, unsigned chunked, unsigned mwbuf) * Verify H5Pset/get_selection_io API works as expected */ static herr_t -test_set_get_select_io_mode(hid_t fid) +test_set_get_select_io_mode(const char *filename, hid_t fapl) { + hid_t fid = H5I_INVALID_HID; hid_t did = H5I_INVALID_HID; hid_t sid = H5I_INVALID_HID; hid_t dcpl = H5I_INVALID_HID; @@ -2744,6 +2755,9 @@ test_set_get_select_io_mode(hid_t fid) printf("\n"); TESTING("H5Pget/set_selection_io_mode()"); + if ((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) + FAIL_STACK_ERROR; + if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) TEST_ERROR; @@ -2808,6 +2822,8 @@ test_set_get_select_io_mode(hid_t fid) FAIL_STACK_ERROR; if (H5Sclose(sid) < 0) FAIL_STACK_ERROR; + if (H5Fclose(fid) < 0) + FAIL_STACK_ERROR; PASSED(); @@ -2820,6 +2836,7 @@ test_set_get_select_io_mode(hid_t fid) H5Dclose(did); H5Pclose(dcpl); H5Pclose(dxpl); + H5Fclose(fid); } H5E_END_TRY @@ -3123,150 +3140,176 @@ main(void) { int nerrors = 0; char filename[FILENAME_BUF_SIZE]; - hid_t fapl = H5I_INVALID_HID; - hid_t fid = H5I_INVALID_HID; + hid_t fapl = H5I_INVALID_HID; + hid_t fapl2 = H5I_INVALID_HID; + hid_t fid = H5I_INVALID_HID; int test_select_config; - unsigned chunked; - unsigned dtrans; - unsigned mwbuf; + unsigned set_cache; /* Set chunk cache to 0 or not */ + unsigned chunked; /* Set to chunked dataset or not */ + unsigned dtrans; /* Set to using data transform or not */ + unsigned mwbuf; /* With/without modifying write buffer */ /* Testing setup */ h5_reset(); fapl = h5_fileaccess(); - h5_fixname(FILENAME[0], fapl, filename, sizeof filename); - if ((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) + if ((fapl2 = H5Pcopy(fapl)) < 0) TEST_ERROR; - /* Test with contiguous or chunked dataset */ - for (chunked = false; chunked <= true; chunked++) { + for (set_cache = FALSE; set_cache <= TRUE; set_cache++) { - /* Data transforms only apply to integer or floating-point datasets */ - /* therefore, not all tests are run with data transform */ - for (dtrans = false; dtrans <= true; dtrans++) { + /* Disable chunk caching on fapl2 */ + if (set_cache) { + if (H5Pset_cache(fapl2, 0, (size_t)0, (size_t)0, 0.0) < 0) + TEST_ERROR; + } - /* Test with and without modify_write_buf turned on */ - for (mwbuf = false; mwbuf <= true; mwbuf++) { - /* Print configuration message */ - printf("Testing for selection I/O "); - if (chunked) - printf("with chunked dataset, "); - else - printf("with contiguous dataset, "); - if (dtrans) - printf("data transform, "); - else - printf("without data transform, "); - if (mwbuf) - printf("and with modifying write buffers\n"); - else - printf("and without modifying write buffers\n"); + if ((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl2)) < 0) + TEST_ERROR; - for (test_select_config = (int)TEST_NO_TYPE_CONV; - test_select_config < (int)TEST_SELECT_NTESTS; test_select_config++) { + /* Test with contiguous or chunked dataset */ + for (chunked = false; chunked <= true; chunked++) { - switch (test_select_config) { - case TEST_NO_TYPE_CONV: /* case 1 */ - TESTING_2("No type conversion (null case)"); + /* Data transforms only apply to integer or floating-point datasets */ + /* therefore, not all tests are run with data transform */ + for (dtrans = false; dtrans <= true; dtrans++) { - nerrors += (test_no_type_conv(fid, chunked, dtrans, mwbuf) < 0 ? 1 : 0); + /* Test with and without modify_write_buf turned on */ + for (mwbuf = false; mwbuf <= true; mwbuf++) { - break; + /* Print configuration message */ + printf("Testing for selection I/O "); - case TEST_NO_SIZE_CHANGE_NO_BKG: /* case 2 */ - TESTING_2("No size change, no background buffer"); + if (set_cache) + printf("with 0 chunk cache, "); + else + printf("with default chunk cache, "); - /* Data transforms does not apply to the dataset datatype for this test */ - if (dtrans) - SKIPPED(); - else - nerrors += (test_no_size_change_no_bkg(fid, chunked, mwbuf) < 0 ? 1 : 0); + if (chunked) + printf("with chunked dataset, "); + else + printf("with contiguous dataset, "); - break; + if (dtrans) + printf("data transform, "); + else + printf("without data transform, "); - case TEST_LARGER_MEM_NO_BKG: /* case 3 */ - TESTING_2("Larger memory type, no background buffer"); + if (mwbuf) + printf("and with modifying write buffers\n"); + else + printf("and without modifying write buffers\n"); - nerrors += (test_larger_mem_type_no_bkg(fid, chunked, dtrans, mwbuf) < 0 ? 1 : 0); + for (test_select_config = (int)TEST_NO_TYPE_CONV; + test_select_config < (int)TEST_SELECT_NTESTS; test_select_config++) { - break; + switch (test_select_config) { + case TEST_NO_TYPE_CONV: /* case 1 */ + TESTING_2("No type conversion (null case)"); - case TEST_SMALLER_MEM_NO_BKG: /* case 4 */ - TESTING_2("Smaller memory type, no background buffer"); + nerrors += (test_no_type_conv(fid, chunked, dtrans, mwbuf) < 0 ? 1 : 0); - nerrors += - (test_smaller_mem_type_no_bkg(fid, chunked, dtrans, mwbuf) < 0 ? 1 : 0); + break; - break; + case TEST_NO_SIZE_CHANGE_NO_BKG: /* case 2 */ + TESTING_2("No size change, no background buffer"); - case TEST_CMPD_WITH_BKG: /* case 5 */ - TESTING_2("Compound types with background buffer"); + /* Data transforms does not apply to the dataset datatype for this test */ + if (dtrans) + SKIPPED(); + else + nerrors += (test_no_size_change_no_bkg(fid, chunked, mwbuf) < 0 ? 1 : 0); - /* Data transforms does not apply to the dataset datatype for this test */ - if (dtrans) - SKIPPED(); - else - nerrors += (test_cmpd_with_bkg(fid, chunked, mwbuf) < 0 ? 1 : 0); + break; - break; + case TEST_LARGER_MEM_NO_BKG: /* case 3 */ + TESTING_2("Larger memory type, no background buffer"); - case TEST_MULTI_CONV_NO_BKG: /* case 6 */ - TESTING_2("multi-datasets: type conv + no bkg buffer"); + nerrors += + (test_larger_mem_type_no_bkg(fid, chunked, dtrans, mwbuf) < 0 ? 1 : 0); - nerrors += test_multi_dsets_no_bkg(fid, chunked, dtrans, mwbuf); - break; + break; - case TEST_MULTI_CONV_BKG: /* case 7 */ - TESTING_2("multi-datasets: type conv + bkg buffer"); + case TEST_SMALLER_MEM_NO_BKG: /* case 4 */ + TESTING_2("Smaller memory type, no background buffer"); - /* Data transforms does not apply to the dataset datatype for this test */ - if (dtrans) - SKIPPED(); - else - nerrors += test_multi_dsets_cmpd_with_bkg(fid, chunked, mwbuf); + nerrors += + (test_smaller_mem_type_no_bkg(fid, chunked, dtrans, mwbuf) < 0 ? 1 : 0); - break; + break; - case TEST_MULTI_CONV_SIZE_CHANGE: /* case 8 */ - TESTING_2("multi-datasets: type conv + size change + no bkg buffer"); + case TEST_CMPD_WITH_BKG: /* case 5 */ + TESTING_2("Compound types with background buffer"); - /* Data transforms does not apply to the dataset datatype for this test */ - if (dtrans) - SKIPPED(); - else - nerrors += test_multi_dsets_size_change_no_bkg(fid, chunked, mwbuf); + /* Data transforms does not apply to the dataset datatype for this test */ + if (dtrans) + SKIPPED(); + else + nerrors += (test_cmpd_with_bkg(fid, chunked, mwbuf) < 0 ? 1 : 0); - break; + break; - case TEST_MULTI_ALL: /* case 9 */ - TESTING_2("multi-datasets: no conv + conv without bkg + conv with bkg"); + case TEST_MULTI_CONV_NO_BKG: /* case 6 */ + TESTING_2("multi-datasets: type conv + no bkg buffer"); - /* Data transforms does not apply to the dataset datatype for this test */ - if (dtrans) - SKIPPED(); - else - nerrors += test_multi_dsets_all(10, fid, chunked, mwbuf); + nerrors += test_multi_dsets_no_bkg(fid, chunked, dtrans, mwbuf); - break; + break; - case TEST_SELECT_NTESTS: - default: - TEST_ERROR; + case TEST_MULTI_CONV_BKG: /* case 7 */ + TESTING_2("multi-datasets: type conv + bkg buffer"); - } /* end switch */ - } /* end for test_select_config */ + /* Data transforms does not apply to the dataset datatype for this test */ + if (dtrans) + SKIPPED(); + else + nerrors += test_multi_dsets_cmpd_with_bkg(fid, chunked, mwbuf); - } /* end mwbuf */ + break; - } /* end dtrans */ + case TEST_MULTI_CONV_SIZE_CHANGE: /* case 8 */ + TESTING_2("multi-datasets: type conv + size change + no bkg buffer"); - } /* end chunked */ + /* Data transforms does not apply to the dataset datatype for this test */ + if (dtrans) + SKIPPED(); + else + nerrors += test_multi_dsets_size_change_no_bkg(fid, chunked, mwbuf); - nerrors += test_set_get_select_io_mode(fid); + break; - if (H5Fclose(fid) < 0) - TEST_ERROR; + case TEST_MULTI_ALL: /* case 9 */ + TESTING_2("multi-datasets: no conv + conv without bkg + conv with bkg"); + + /* Data transforms does not apply to the dataset datatype for this test */ + if (dtrans) + SKIPPED(); + else + nerrors += test_multi_dsets_all(10, fid, chunked, mwbuf); + + break; + + case TEST_SELECT_NTESTS: + default: + TEST_ERROR; + + } /* end switch */ + } /* end for test_select_config */ + + } /* end mwbuf */ + + } /* end dtrans */ + + } /* end chunked */ + + if (H5Fclose(fid) < 0) + TEST_ERROR; + + } /* end set_cache */ + + /* Use own file */ + nerrors += test_set_get_select_io_mode(filename, fapl); /* Use own file */ nerrors += test_get_no_selection_io_cause(filename, fapl); diff --git a/test/swmr.c b/test/swmr.c index 68bf024d418..aacf498b22e 100644 --- a/test/swmr.c +++ b/test/swmr.c @@ -4054,7 +4054,7 @@ test_append_flush_dataset_chunked(hid_t in_fapl) hsize_t dims[2] = {100, 0}; /* The dataset dimension sizes */ hsize_t maxdims[2] = {100, H5S_UNLIMITED}; /* The dataset maximum dimension sizes */ - hsize_t chunk_dims[2] = {5, 2}; /* The chunk dimesion sizes */ + hsize_t chunk_dims[2] = {5, 2}; /* The chunk dimension sizes */ TESTING("H5Fget/set_append_flush() for a chunked dataset's access property list"); @@ -4498,7 +4498,7 @@ test_append_flush_dataset_multiple(hid_t in_fapl) hsize_t dims[2] = {0, 0}; /* The dataset dimension sizes */ hsize_t maxdims[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* The dataset maximum dimension sizes */ - hsize_t chunk_dims[2] = {5, 2}; /* The chunk dimesion sizes */ + hsize_t chunk_dims[2] = {5, 2}; /* The chunk dimension sizes */ TESTING("H5Fget/set_append_flush() for multiple opens of a chunked dataset"); diff --git a/test/tfile.c b/test/tfile.c index 3e1fa852246..1c5196acf60 100644 --- a/test/tfile.c +++ b/test/tfile.c @@ -2602,7 +2602,7 @@ test_file_double_file_dataset_open(bool new_format) hsize_t e_ext_dims[1] = {7}; /* Expanded dimension sizes */ hsize_t s_ext_dims[1] = {3}; /* Shrunk dimension sizes */ hsize_t max_dims0[1] = {8}; /* Maximum dimension sizes */ - hsize_t max_dims1[1] = {H5S_UNLIMITED}; /* Maximum dimesion sizes for extensible array index */ + hsize_t max_dims1[1] = {H5S_UNLIMITED}; /* Maximum dimension sizes for extensible array index */ hsize_t max_dims2[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* Maximum dimension sizes for v2 B-tree index */ hsize_t chunks[1] = {2}, chunks2[2] = {4, 5}; /* Chunk dimension sizes */ hsize_t size; /* File size */ diff --git a/test/tselect.c b/test/tselect.c index f3d08efdf93..55599b3324e 100644 --- a/test/tselect.c +++ b/test/tselect.c @@ -11323,7 +11323,7 @@ test_shape_same_dr__checkerboard(int test_num, int small_rank, int large_rank, i /* Now select the checkerboard selection in the (possibly larger) n-cube. * * Since we have already calculated the base start, stride, count, - * and block, re-use the values in setting up start, stride, count, + * and block, reuse the values in setting up start, stride, count, * and block. */ for (i = 0; i < SS_DR_MAX_RANK; i++) { @@ -12763,7 +12763,7 @@ test_space_update_diminfo(void) { hid_t space_id; /* Dataspace id */ H5S_diminfo_valid_t diminfo_valid; /* Diminfo status */ - H5S_diminfo_valid_t rebuild_status; /* Diminfo status after rebuid */ + H5S_diminfo_valid_t rebuild_status; /* Diminfo status after rebuild */ H5S_sel_type sel_type; /* Selection type */ herr_t ret; /* Return value */ diff --git a/testpar/API/t_dset.c b/testpar/API/t_dset.c index 31422d357a4..0da25b06463 100644 --- a/testpar/API/t_dset.c +++ b/testpar/API/t_dset.c @@ -3020,7 +3020,7 @@ none_selection_chunk(void) * * TEST_ACTUAL_IO_RESET: * Performs collective and then independent I/O with hthe same dxpl to - * make sure the peroperty is correctly reset to the default on each use. + * make sure the property is correctly reset to the default on each use. * Specifically, this test runs TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_MIX_DISAGREE * (The most complex case that works on all builds) and then performs * an independent read and write with the same dxpls. diff --git a/testpar/API/t_shapesame.c b/testpar/API/t_shapesame.c index fd6a8ddcc07..004ce1e35b2 100644 --- a/testpar/API/t_shapesame.c +++ b/testpar/API/t_shapesame.c @@ -2479,14 +2479,14 @@ do { good_data = false; } - /* zero out buffer for re-use */ + /* zero out buffer for reuse */ *val_ptr = 0; } else if (*val_ptr != 0) { good_data = false; - /* zero out buffer for re-use */ + /* zero out buffer for reuse */ *val_ptr = 0; } diff --git a/testpar/API/t_span_tree.c b/testpar/API/t_span_tree.c index a744905933a..e2f148c9e4b 100644 --- a/testpar/API/t_span_tree.c +++ b/testpar/API/t_span_tree.c @@ -1453,14 +1453,14 @@ lower_dim_size_comp_test__verify_data(uint32_t *buf_ptr, good_data = false; } - /* zero out buffer for re-use */ + /* zero out buffer for reuse */ *val_ptr = 0; } else if (*val_ptr != 0) { good_data = false; - /* zero out buffer for re-use */ + /* zero out buffer for reuse */ *val_ptr = 0; } diff --git a/testpar/t_2Gio.c b/testpar/t_2Gio.c index 9c9953cd38c..c2aac771b29 100644 --- a/testpar/t_2Gio.c +++ b/testpar/t_2Gio.c @@ -3323,7 +3323,7 @@ none_selection_chunk(void) * * TEST_ACTUAL_IO_RESET: * Performs collective and then independent I/O with the same dxpl to - * make sure the peroperty is correctly reset to the default on each use. + * make sure the property is correctly reset to the default on each use. * Specifically, this test runs TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_MIX_DISAGREE * (The most complex case that works on all builds) and then performs * an independent read and write with the same dxpls. diff --git a/testpar/t_cache_image.c b/testpar/t_cache_image.c index c331f4c31cf..5de615038b2 100644 --- a/testpar/t_cache_image.c +++ b/testpar/t_cache_image.c @@ -1995,7 +1995,7 @@ par_verify_dataset(int dset_num, hid_t file_id, int mpi_rank) * On failure, print an appropriate error message and * return false. * - * Return: true if succussful, false otherwise. + * Return: true if successful, false otherwise. * *------------------------------------------------------------------------- */ diff --git a/testpar/t_dset.c b/testpar/t_dset.c index 8998112328f..83d751120e6 100644 --- a/testpar/t_dset.c +++ b/testpar/t_dset.c @@ -2881,7 +2881,7 @@ none_selection_chunk(void) * * TEST_ACTUAL_IO_RESET: * Performs collective and then independent I/O with hthe same dxpl to - * make sure the peroperty is correctly reset to the default on each use. + * make sure the property is correctly reset to the default on each use. * Specifically, this test runs TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_MIX_DISAGREE * (The most complex case that works on all builds) and then performs * an independent read and write with the same dxpls. diff --git a/testpar/t_filters_parallel.c b/testpar/t_filters_parallel.c index 92c7f425a1d..198201abf7b 100644 --- a/testpar/t_filters_parallel.c +++ b/testpar/t_filters_parallel.c @@ -19,26 +19,22 @@ #include "t_filters_parallel.h" static const char *FILENAME[] = {"t_filters_parallel", NULL}; -char filenames[1][256]; +static char filenames[1][256]; -static MPI_Comm comm = MPI_COMM_WORLD; -static MPI_Info info = MPI_INFO_NULL; -static int mpi_rank; -static int mpi_size; +static MPI_Comm comm = MPI_COMM_WORLD; +static MPI_Info info = MPI_INFO_NULL; +static int mpi_rank = 0; +static int mpi_size = 0; int nerrors = 0; /* Arrays of filter ID values and filter names (should match each other) */ -H5Z_filter_t filterIDs[] = { +static H5Z_filter_t filterIDs[] = { H5Z_FILTER_DEFLATE, H5Z_FILTER_SHUFFLE, H5Z_FILTER_FLETCHER32, H5Z_FILTER_SZIP, H5Z_FILTER_NBIT, H5Z_FILTER_SCALEOFFSET, }; -const char *filterNames[] = {"Deflate", "Shuffle", "Fletcher32", "SZIP", "Nbit", "ScaleOffset"}; - -/* Function pointer typedef for test functions */ -typedef void (*test_func)(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id, - hid_t dxpl_id); +static const char *filterNames[] = {"Deflate", "Shuffle", "Fletcher32", "SZIP", "Nbit", "ScaleOffset"}; /* Typedef for filter arguments for user-defined filters */ typedef struct filter_options_t { @@ -47,6 +43,15 @@ typedef struct filter_options_t { const unsigned int cd_values[]; } filter_options_t; +/* Enum for running these tests in different modes */ +typedef enum test_mode_t { + USE_SINGLE_DATASET, /* Operate on a single dataset with H5Dwrite/read */ + USE_MULTIPLE_DATASETS, /* Operate on multiple datasets with H5Dwrite_multi/read_multi */ + USE_MULTIPLE_DATASETS_MIXED_FILTERED, /* Operate on multiple datasets with H5Dwrite_multi/read_multi + and with some of the datasets being unfiltered */ + TEST_MODE_SENTINEL +} test_mode_t; + /* * Enum for verify_space_alloc_status which specifies * how many chunks have been written to in a dataset @@ -58,102 +63,160 @@ typedef enum num_chunks_written_t { ALL_CHUNKS_WRITTEN } num_chunks_written_t; -static herr_t set_dcpl_filter(hid_t dcpl_id, H5Z_filter_t filter_id, filter_options_t *filter_options); -static herr_t verify_space_alloc_status(hid_t dset_id, hid_t dcpl_id, num_chunks_written_t chunks_written); +/* Function pointer typedef for test functions */ +typedef void (*test_func)(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id, + hid_t dxpl_id, test_mode_t test_mode); + +static herr_t set_dcpl_filter(hid_t dcpl_id, H5Z_filter_t filter_id, filter_options_t *filter_options); +static void verify_space_alloc_status(size_t num_dsets, hid_t *dset_ids, hid_t dcpl_id, + num_chunks_written_t chunks_written); +static void verify_chunk_opt_status(size_t num_dsets, hid_t dxpl_id); +static const char *test_mode_to_string(test_mode_t test_mode); + +static void create_datasets(hid_t parent_obj_id, const char *dset_name, hid_t type_id, hid_t filespace_id, + hid_t dcpl_id, test_mode_t test_mode, size_t *num_dsets, hid_t *dset_ids); +static void open_datasets(hid_t parent_obj_id, const char *dset_name, size_t num_dsets, test_mode_t test_mode, + hid_t *dset_ids); +static void write_datasets(size_t num_dsets, hid_t *dset_ids, hid_t type_id, hid_t mspace_id, + hid_t *fspace_ids, hid_t dxpl_id, const void **bufs, test_mode_t test_mode); +static void read_datasets(size_t num_dsets, hid_t *dset_ids, hid_t type_id, hid_t mspace_id, hid_t fspace_id, + hid_t dxpl_id, void **bufs, test_mode_t test_mode); + +static void select_hyperslab(size_t num_dsets, hid_t *dset_ids, hsize_t *start, hsize_t *stride, + hsize_t *count, hsize_t *block, hid_t *fspace_ids); +static void select_all(size_t num_dsets, hid_t *dset_ids, hid_t *fspace_ids); +static void select_none(size_t num_dsets, hid_t *dset_ids, hid_t *fspace_ids); +static void select_elements(size_t num_dsets, hid_t *dset_ids, size_t num_points, hsize_t *coords, + hid_t *fspace_ids); #ifdef H5_HAVE_PARALLEL_FILTERED_WRITES /* Tests for writing data in parallel */ static void test_write_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t filter_id, - hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id); + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode); static void test_write_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t filter_id, - hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id); + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode); static void test_write_filtered_dataset_no_overlap_partial(const char *parent_group, H5Z_filter_t filter_id, - hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id); + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode); static void test_write_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id, - hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id); + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode); static void test_write_filtered_dataset_single_unlim_dim_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id); + hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode); static void test_write_filtered_dataset_single_unlim_dim_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id); + hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode); static void test_write_filtered_dataset_multi_unlim_dim_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id); + hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode); static void test_write_filtered_dataset_multi_unlim_dim_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id); + hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode); static void test_write_filtered_dataset_single_no_selection(const char *parent_group, H5Z_filter_t filter_id, - hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id); + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode); static void test_write_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filter_t filter_id, - hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id); + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode); static void test_write_filtered_dataset_point_selection(const char *parent_group, H5Z_filter_t filter_id, - hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id); + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode); static void test_write_filtered_dataset_interleaved_write(const char *parent_group, H5Z_filter_t filter_id, - hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id); + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode); static void test_write_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id); + hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode); static void test_write_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id); + hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode); static void test_write_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id); + hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode); static void test_write_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id, - hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id); + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode); static void test_write_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id); + hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode); static void test_write_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id); + hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode); static void test_write_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id); + hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode); static void test_write_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id); + hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode); #endif /* Tests for reading data in parallel */ static void test_read_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t filter_id, - hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id); + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode); static void test_read_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t filter_id, - hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id); + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode); static void test_read_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id, - hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id); + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode); static void test_read_filtered_dataset_single_no_selection(const char *parent_group, H5Z_filter_t filter_id, - hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id); + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode); static void test_read_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filter_t filter_id, - hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id); + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode); static void test_read_filtered_dataset_point_selection(const char *parent_group, H5Z_filter_t filter_id, - hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id); + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode); static void test_read_filtered_dataset_interleaved_read(const char *parent_group, H5Z_filter_t filter_id, - hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id); + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode); static void test_read_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id); + hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode); static void test_read_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id); + hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode); static void test_read_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id); + hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode); static void test_read_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id, - hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id); + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode); static void test_read_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id); + hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode); static void test_read_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id); + hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode); static void test_read_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id); + hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode); static void test_read_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id); + hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode); /* * Tests for attempting to round-trip the data going from @@ -165,27 +228,25 @@ static void test_read_cmpd_filtered_dataset_type_conversion_shared(const char * * written in parallel -> read serially */ static void test_write_serial_read_parallel(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id); + hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode); #ifdef H5_HAVE_PARALLEL_FILTERED_WRITES static void test_write_parallel_read_serial(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id); + hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode); /* Other miscellaneous tests */ static void test_shrinking_growing_chunks(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id); + hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode); static void test_edge_chunks_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id); + hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode); static void test_edge_chunks_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id); -static void test_edge_chunks_partial_write(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id); + hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode); static void test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id, - hid_t dxpl_id); + hid_t dxpl_id, test_mode_t test_mode); static void test_fill_value_undefined(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id); + hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode); static void test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id); + hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode); #endif static test_func tests[] = { @@ -232,7 +293,6 @@ static test_func tests[] = { test_shrinking_growing_chunks, test_edge_chunks_no_overlap, test_edge_chunks_overlap, - test_edge_chunks_partial_write, test_fill_values, test_fill_value_undefined, test_fill_time_never, @@ -259,11 +319,12 @@ set_dcpl_filter(hid_t dcpl_id, H5Z_filter_t filter_id, filter_options_t *filter_ case H5Z_FILTER_SZIP: { unsigned pixels_per_block = H5_SZIP_MAX_PIXELS_PER_BLOCK; hsize_t chunk_dims[H5S_MAX_RANK] = {0}; - size_t i, chunk_nelemts; + size_t chunk_nelemts; VRFY(H5Pget_chunk(dcpl_id, H5S_MAX_RANK, chunk_dims) >= 0, "H5Pget_chunk succeeded"); - for (i = 0, chunk_nelemts = 1; i < H5S_MAX_RANK; i++) + chunk_nelemts = 1; + for (size_t i = 0; i < H5S_MAX_RANK; i++) if (chunk_dims[i] > 0) chunk_nelemts *= chunk_dims[i]; @@ -305,26 +366,37 @@ set_dcpl_filter(hid_t dcpl_id, H5Z_filter_t filter_id, filter_options_t *filter_ * based on the dataset's allocation time setting and how many chunks * in the dataset have been written to. */ -static herr_t -verify_space_alloc_status(hid_t dset_id, hid_t dcpl_id, num_chunks_written_t chunks_written) +static void +verify_space_alloc_status(size_t num_dsets, hid_t *dset_ids, hid_t dcpl_id, + num_chunks_written_t chunks_written) { - int nfilters; - herr_t ret_value = SUCCEED; + H5D_space_status_t space_status; + H5D_alloc_time_t alloc_time; - VRFY(((nfilters = H5Pget_nfilters(dcpl_id)) >= 0), "H5Pget_nfilters succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + hid_t dset_dcpl; + int nfilters; - /* - * Only verify space allocation status when there are filters - * in the dataset's filter pipeline. When filters aren't in the - * pipeline, the space allocation time and status can vary based - * on whether the file was created in parallel or serial mode. - */ - if (nfilters > 0) { - H5D_space_status_t space_status; - H5D_alloc_time_t alloc_time; + /* Check if this particular dataset has any filters applied */ + dset_dcpl = H5Dget_create_plist(dset_ids[dset_idx]); + VRFY((dset_dcpl >= 0), "H5Dget_create_plist"); + + nfilters = H5Pget_nfilters(dset_dcpl); + VRFY((nfilters >= 0), "H5Pget_nfilters"); + + VRFY((H5Pclose(dset_dcpl) >= 0), "H5Pclose"); + + /* + * Only verify space allocation status when there are filters + * in the dataset's filter pipeline. When filters aren't in the + * pipeline, the space allocation time and status can vary based + * on whether the file was created in parallel or serial mode. + */ + if (nfilters == 0) + return; VRFY((H5Pget_alloc_time(dcpl_id, &alloc_time) >= 0), "H5Pget_alloc_time succeeded"); - VRFY((H5Dget_space_status(dset_id, &space_status) >= 0), "H5Dget_space_status succeeded"); + VRFY((H5Dget_space_status(dset_ids[dset_idx], &space_status) >= 0), "H5Dget_space_status succeeded"); switch (alloc_time) { case H5D_ALLOC_TIME_EARLY: @@ -347,7 +419,7 @@ verify_space_alloc_status(hid_t dset_id, hid_t dcpl_id, num_chunks_written_t chu VRFY((space_status == H5D_SPACE_STATUS_ALLOCATED) || (space_status == H5D_SPACE_STATUS_PART_ALLOCATED), "verified space allocation status"); - else if (chunks_written == NO_CHUNKS_WRITTEN) + else if (chunks_written == NO_CHUNKS_WRITTEN) { /* * A special case where we wrote to a dataset that * uses late space allocation, but the write was @@ -358,6 +430,7 @@ verify_space_alloc_status(hid_t dset_id, hid_t dcpl_id, num_chunks_written_t chu * been allocated. */ VRFY(space_status == H5D_SPACE_STATUS_ALLOCATED, "verified space allocation status"); + } else VRFY(space_status == H5D_SPACE_STATUS_NOT_ALLOCATED, "verified space allocation status"); break; @@ -385,8 +458,388 @@ verify_space_alloc_status(hid_t dset_id, hid_t dcpl_id, num_chunks_written_t chu MPI_Abort(MPI_COMM_WORLD, 1); } } +} + +/* + * Function to verify the status of the chunk I/O optimization method + * used when the multi-dataset I/O API routines were used. As long as + * multi-dataset I/O was actually performed, the library should return + * that linked-chunk I/O was performed. Otherwise, if datasets were + * processed one at a time, the library should return that multi-chunk + * I/O was performed. + */ +static void +verify_chunk_opt_status(size_t num_dsets, hid_t dxpl_id) +{ + H5D_mpio_actual_chunk_opt_mode_t chunk_opt_mode; + H5D_selection_io_mode_t sel_io_mode; + uint32_t no_sel_io_cause = 0; + herr_t ret; + + if (H5P_DEFAULT != dxpl_id) { + ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_id, &chunk_opt_mode); + VRFY((ret >= 0), "H5Pget_mpio_actual_chunk_opt_mode succeeded"); + + ret = H5Pget_selection_io(dxpl_id, &sel_io_mode); + VRFY((ret >= 0), "H5Pget_selection_io succeeded"); + + if (sel_io_mode == H5D_SELECTION_IO_MODE_DEFAULT || sel_io_mode == H5D_SELECTION_IO_MODE_ON) { + ret = H5Pget_no_selection_io_cause(dxpl_id, &no_sel_io_cause); + VRFY((ret >= 0), "H5Pget_no_selection_io_cause succeeded"); + } + + if (num_dsets == 0) { + /* + * num_dsets == 0 implies that the write call was expected to + * failed and did so. Verify that the library returns + * H5D_MPIO_NO_CHUNK_OPTIMIZATION as the chunk I/O optimization + * method + */ + VRFY((H5D_MPIO_NO_CHUNK_OPTIMIZATION == chunk_opt_mode), + "verified I/O optimization was H5D_MPIO_NO_CHUNK_OPTIMIZATION"); + } + else if (num_dsets == 1) { + /* + * If selection I/O is set to ON and was actually performed, just + * verify that the library returns that either linked-chunk or + * multi-chunk I/O was performed. Otherwise, any of the optimization + * methods could potentially be returned by the library. + */ + if ((sel_io_mode == H5D_SELECTION_IO_MODE_DEFAULT || sel_io_mode == H5D_SELECTION_IO_MODE_ON) && + !no_sel_io_cause) { + VRFY((H5D_MPIO_NO_CHUNK_OPTIMIZATION != chunk_opt_mode), + "verified I/O optimization wasn't H5D_MPIO_NO_CHUNK_OPTIMIZATION"); + VRFY((H5D_MPIO_LINK_CHUNK == chunk_opt_mode || H5D_MPIO_MULTI_CHUNK == chunk_opt_mode), + "verified I/O optimization was linked-chunk I/O or multi-chunk I/O"); + } + } + else { + /* + * If selection I/O is set to ON and was actually performed, verify + * that the library returns that linked-chunk I/O was performed. + * Otherwise, any of the optimization methods could potentially be + * returned by the library. + */ + if ((sel_io_mode == H5D_SELECTION_IO_MODE_DEFAULT || sel_io_mode == H5D_SELECTION_IO_MODE_ON) && + !no_sel_io_cause) { + VRFY((H5D_MPIO_LINK_CHUNK == chunk_opt_mode), + "verified I/O optimization was linked-chunk I/O"); + } + } + } +} + +static const char * +test_mode_to_string(test_mode_t test_mode) +{ + switch (test_mode) { + case USE_SINGLE_DATASET: + return "USE_SINGLE_DATASET"; + case USE_MULTIPLE_DATASETS: + return "USE_MULTIPLE_DATASETS"; + case USE_MULTIPLE_DATASETS_MIXED_FILTERED: + return "USE_MULTIPLE_DATASETS_MIXED_FILTERED"; + case TEST_MODE_SENTINEL: + default: + return "INVALID"; + } +} + +/* + * Utility routine to create the datasets for each test, + * after adjusting for the current test mode + */ +static void +create_datasets(hid_t parent_obj_id, const char *dset_name, hid_t type_id, hid_t filespace_id, hid_t dcpl_id, + test_mode_t test_mode, size_t *num_dsets, hid_t *dset_ids) +{ + const char *dset_name_ptr = NULL; + hid_t unfiltered_dcpl = H5I_INVALID_HID; + char dset_name_multi_buf[512]; + int n_dsets = 0; + int n_unfiltered = 0; + + VRFY((num_dsets != NULL), "verify num_dsets"); + VRFY((dset_ids != NULL), "verify dset_ids"); + + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) + dset_ids[dset_idx] = H5I_INVALID_HID; + + switch (test_mode) { + case USE_SINGLE_DATASET: + dset_name_ptr = dset_name; + n_dsets = 1; + break; + + case USE_MULTIPLE_DATASETS: + case USE_MULTIPLE_DATASETS_MIXED_FILTERED: + dset_name_ptr = dset_name_multi_buf; + n_dsets = (rand() % (MAX_NUM_DSETS_MULTI - 1)) + 2; + + /* Select between 1 and (n_dsets - 1) datasets to NOT be filtered */ + if (test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED) { + n_unfiltered = (rand() % (n_dsets - 1)) + 1; + + unfiltered_dcpl = H5Pcopy(dcpl_id); + VRFY((unfiltered_dcpl >= 0), "H5Pcopy succeeded"); + + VRFY((H5Premove_filter(unfiltered_dcpl, H5Z_FILTER_ALL) >= 0), "H5Premove_filter succeeded"); + } + break; + + case TEST_MODE_SENTINEL: + default: + if (MAINPROCESS) + printf("Invalid test mode\n"); + fflush(stdout); + MPI_Abort(MPI_COMM_WORLD, -1); + } + + for (size_t dset_idx = 0; dset_idx < (size_t)n_dsets; dset_idx++) { + hid_t curr_dcpl = dcpl_id; + + /* Add suffix to dataset name for multi-dataset tests */ + if (test_mode == USE_MULTIPLE_DATASETS || test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED) + snprintf(dset_name_multi_buf, 512, "%s_%d", dset_name, (int)dset_idx); + + /* Determine if this should be an unfiltered dataset */ + if ((test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED) && (n_unfiltered > 0)) { + size_t dsets_left = (size_t)n_dsets - dset_idx; + bool unfiltered; + + /* + * The number of unfiltered datasets should never be + * greater than the number of datasets left to create + */ + VRFY(((size_t)n_unfiltered <= dsets_left), "number of unfiltered datasets sanity check"); + + /* + * If the number of unfiltered datasets left is the + * same as the number of datasets left, create the + * remaining datasets as unfiltered datasets. Otherwise, + * randomly determine if a dataset will be unfiltered. + */ + unfiltered = ((size_t)n_unfiltered == dsets_left) || ((rand() % 2) == 0); + + if (unfiltered) { + curr_dcpl = unfiltered_dcpl; + n_unfiltered--; + } + } + + dset_ids[dset_idx] = H5Dcreate2(parent_obj_id, dset_name_ptr, type_id, filespace_id, H5P_DEFAULT, + curr_dcpl, H5P_DEFAULT); + + VRFY((dset_ids[dset_idx] >= 0), "Dataset creation succeeded"); + } + + if (unfiltered_dcpl >= 0) + VRFY((H5Pclose(unfiltered_dcpl) >= 0), "H5Pclose succeeded"); + + *num_dsets = (size_t)n_dsets; +} + +/* + * Utility routine to open the datasets that were created + * for each test, after adjusting for the current test mode + */ +static void +open_datasets(hid_t parent_obj_id, const char *dset_name, size_t num_dsets, test_mode_t test_mode, + hid_t *dset_ids) +{ + const char *dset_name_ptr = NULL; + char dset_name_multi_buf[512]; + + VRFY((dset_ids != NULL), "verify dset_ids"); + VRFY((num_dsets <= INT_MAX), "verify num_dsets value"); + + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) + dset_ids[dset_idx] = H5I_INVALID_HID; + + switch (test_mode) { + case USE_SINGLE_DATASET: + dset_name_ptr = dset_name; + break; + + case USE_MULTIPLE_DATASETS: + case USE_MULTIPLE_DATASETS_MIXED_FILTERED: + dset_name_ptr = dset_name_multi_buf; + break; + + case TEST_MODE_SENTINEL: + default: + if (MAINPROCESS) + printf("Invalid test mode\n"); + fflush(stdout); + MPI_Abort(MPI_COMM_WORLD, -1); + } + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + /* Add suffix to dataset name for multi-dataset tests */ + if (test_mode == USE_MULTIPLE_DATASETS || test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED) + snprintf(dset_name_multi_buf, 512, "%s_%d", dset_name, (int)dset_idx); + + dset_ids[dset_idx] = H5Dopen2(parent_obj_id, dset_name_ptr, H5P_DEFAULT); + + VRFY((dset_ids[dset_idx] >= 0), "Dataset open succeeded"); + } +} + +/* + * Utility routine to write to the datasets that were created + * for each test, after adjusting for the current test mode + */ +static void +write_datasets(size_t num_dsets, hid_t *dset_ids, hid_t type_id, hid_t mspace_id, hid_t *fspace_ids, + hid_t dxpl_id, const void **bufs, test_mode_t test_mode) +{ + hid_t mem_type_ids[MAX_NUM_DSETS_MULTI]; + hid_t mem_space_ids[MAX_NUM_DSETS_MULTI]; + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + mem_type_ids[dset_idx] = type_id; + mem_space_ids[dset_idx] = mspace_id; + } + + switch (test_mode) { + case USE_SINGLE_DATASET: + VRFY((H5Dwrite(dset_ids[0], type_id, mspace_id, fspace_ids[0], dxpl_id, bufs[0]) >= 0), + "Dataset write succeeded"); + break; + + case USE_MULTIPLE_DATASETS: + case USE_MULTIPLE_DATASETS_MIXED_FILTERED: + VRFY((H5Dwrite_multi(num_dsets, dset_ids, mem_type_ids, mem_space_ids, fspace_ids, dxpl_id, + bufs) >= 0), + "Dataset write succeeded"); + break; + + case TEST_MODE_SENTINEL: + default: + if (MAINPROCESS) + printf("Invalid test mode\n"); + fflush(stdout); + MPI_Abort(MPI_COMM_WORLD, -1); + } + + verify_chunk_opt_status(num_dsets, dxpl_id); +} + +/* + * Utility routine to read from the datasets that were created + * for each test, after adjusting for the current test mode + */ +static void +read_datasets(size_t num_dsets, hid_t *dset_ids, hid_t type_id, hid_t mspace_id, hid_t fspace_id, + hid_t dxpl_id, void **bufs, test_mode_t test_mode) +{ + hid_t mem_type_ids[MAX_NUM_DSETS_MULTI]; + hid_t mem_space_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_space_ids[MAX_NUM_DSETS_MULTI]; + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + mem_type_ids[dset_idx] = type_id; + mem_space_ids[dset_idx] = mspace_id; + file_space_ids[dset_idx] = fspace_id; + } + + switch (test_mode) { + case USE_SINGLE_DATASET: + VRFY((H5Dread(dset_ids[0], type_id, mspace_id, fspace_id, dxpl_id, bufs[0]) >= 0), + "Dataset read succeeded"); + break; + + case USE_MULTIPLE_DATASETS: + case USE_MULTIPLE_DATASETS_MIXED_FILTERED: + VRFY((H5Dread_multi(num_dsets, dset_ids, mem_type_ids, mem_space_ids, file_space_ids, dxpl_id, + bufs) >= 0), + "Dataset read succeeded"); + break; + + case TEST_MODE_SENTINEL: + default: + if (MAINPROCESS) + printf("Invalid test mode\n"); + fflush(stdout); + MPI_Abort(MPI_COMM_WORLD, -1); + } + + verify_chunk_opt_status(num_dsets, dxpl_id); +} + +static void +select_hyperslab(size_t num_dsets, hid_t *dset_ids, hsize_t *start, hsize_t *stride, hsize_t *count, + hsize_t *block, hid_t *fspace_ids) +{ + VRFY((fspace_ids != NULL), "verify fspace_ids"); + + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) + fspace_ids[dset_idx] = H5I_INVALID_HID; + + if (VERBOSE_MED) { + printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + fflush(stdout); + } + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + fspace_ids[dset_idx] = H5Dget_space(dset_ids[dset_idx]); + VRFY((fspace_ids[dset_idx] >= 0), "File dataspace retrieval succeeded"); + + VRFY((H5Sselect_hyperslab(fspace_ids[dset_idx], H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + } +} + +static void +select_all(size_t num_dsets, hid_t *dset_ids, hid_t *fspace_ids) +{ + VRFY((fspace_ids != NULL), "verify fspace_ids"); + + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) + fspace_ids[dset_idx] = H5I_INVALID_HID; + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + fspace_ids[dset_idx] = H5Dget_space(dset_ids[dset_idx]); + VRFY((fspace_ids[dset_idx] >= 0), "File dataspace retrieval succeeded"); + + VRFY((H5Sselect_all(fspace_ids[dset_idx]) >= 0), "H5Sselect_all succeeded"); + } +} + +static void +select_none(size_t num_dsets, hid_t *dset_ids, hid_t *fspace_ids) +{ + VRFY((fspace_ids != NULL), "verify fspace_ids"); + + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) + fspace_ids[dset_idx] = H5I_INVALID_HID; + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + fspace_ids[dset_idx] = H5Dget_space(dset_ids[dset_idx]); + VRFY((fspace_ids[dset_idx] >= 0), "File dataspace retrieval succeeded"); + + VRFY((H5Sselect_none(fspace_ids[dset_idx]) >= 0), "H5Sselect_none succeeded"); + } +} + +static void +select_elements(size_t num_dsets, hid_t *dset_ids, size_t num_points, hsize_t *coords, hid_t *fspace_ids) +{ + VRFY((fspace_ids != NULL), "verify fspace_ids"); - return ret_value; + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) + fspace_ids[dset_idx] = H5I_INVALID_HID; + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + fspace_ids[dset_idx] = H5Dget_space(dset_ids[dset_idx]); + VRFY((fspace_ids[dset_idx] >= 0), "File dataspace retrieval succeeded"); + + VRFY((H5Sselect_elements(fspace_ids[dset_idx], H5S_SELECT_SET, num_points, coords) >= 0), + "Point selection succeeded"); + } } #ifdef H5_HAVE_PARALLEL_FILTERED_WRITES @@ -397,11 +850,12 @@ verify_space_alloc_status(hid_t dset_id, hid_t dcpl_id, num_chunks_written_t chu */ static void test_write_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id) + hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode) { - C_DATATYPE *data = NULL; - C_DATATYPE *read_buf = NULL; - C_DATATYPE *correct_buf = NULL; + C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0}; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; hsize_t dataset_dims[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS]; hsize_t chunk_dims[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS]; hsize_t sel_dims[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS]; @@ -409,10 +863,13 @@ test_write_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t fil hsize_t stride[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS]; hsize_t count[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS]; hsize_t block[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS]; - size_t i, data_size, correct_buf_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + size_t data_size, correct_buf_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; hid_t group_id = H5I_INVALID_HID; - hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; if (MAINPROCESS) puts("Testing write to one-chunk filtered dataset"); @@ -434,9 +891,6 @@ test_write_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t fil filespace = H5Screate_simple(WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS, dataset_dims, NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); - memspace = H5Screate_simple(WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS, sel_dims, NULL); - VRFY((memspace >= 0), "Memory dataspace creation succeeded"); - /* Create chunked dataset */ plist_id = H5Pcopy(dcpl_id); VRFY((plist_id >= 0), "DCPL copy succeeded"); @@ -446,12 +900,12 @@ test_write_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t fil /* Add test filter to the pipeline */ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); - dset_id = H5Dcreate2(group_id, WRITE_ONE_CHUNK_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, - H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + /* Create datasets depending on the current test mode */ + create_datasets(group_id, WRITE_ONE_CHUNK_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, plist_id, + test_mode, &num_dsets, dset_ids); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -467,73 +921,73 @@ test_write_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t fil start[0] = ((hsize_t)mpi_rank * sel_dims[0]); start[1] = 0; - if (VERBOSE_MED) { - printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); - fflush(stdout); - } - - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); /* Fill data buffer */ data_size = (hsize_t)WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS * - (hsize_t)WRITE_ONE_CHUNK_FILTERED_DATASET_NCOLS * sizeof(*data); - correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + (hsize_t)WRITE_ONE_CHUNK_FILTERED_DATASET_NCOLS * sizeof(C_DATATYPE); - data = (C_DATATYPE *)calloc(1, data_size); - VRFY((NULL != data), "calloc succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + C_DATATYPE *tmp_buf = (C_DATATYPE *)calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); - correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++) + tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx); - for (i = 0; i < data_size / sizeof(*data); i++) - data[i] = (C_DATATYPE)GEN_DATA(i); + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; + } - for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) - correct_buf[i] = ((C_DATATYPE)i % (WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size * - WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS)) + - ((C_DATATYPE)i / (WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size * - WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS)); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, + test_mode); - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0), - "Dataset write succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + free(data_bufs_nc[dset_idx]); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); - if (data) - free(data); + /* Close and re-open datasets */ + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + open_datasets(group_id, WRITE_ONE_CHUNK_FILTERED_DATASET_NAME, num_dsets, test_mode, dset_ids); /* Verify the correct data was written */ - read_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + correct_bufs[dset_idx] = (C_DATATYPE *)calloc(1, correct_buf_size); + VRFY((NULL != correct_bufs[dset_idx]), "calloc succeeded"); + read_bufs[dset_idx] = calloc(1, correct_buf_size); + VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); + + for (size_t j = 0; j < correct_buf_size / sizeof(C_DATATYPE); j++) + correct_bufs[dset_idx][j] = + ((C_DATATYPE)j % (WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size * + WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS)) + + ((C_DATATYPE)j / (WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size * + WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS)) + + (C_DATATYPE)dset_idx; + } - dset_id = H5Dopen2(group_id, WRITE_ONE_CHUNK_FILTERED_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), + "Data verification succeeded"); - VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + free(read_bufs[dset_idx]); + free(correct_bufs[dset_idx]); + } - if (correct_buf) - free(correct_buf); - if (read_buf) - free(read_buf); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -549,11 +1003,12 @@ test_write_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t fil */ static void test_write_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id) + hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode) { - C_DATATYPE *data = NULL; - C_DATATYPE *read_buf = NULL; - C_DATATYPE *correct_buf = NULL; + C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0}; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; hsize_t dataset_dims[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t chunk_dims[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t sel_dims[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; @@ -561,10 +1016,13 @@ test_write_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t fi hsize_t stride[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t count[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t block[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; - size_t i, data_size, correct_buf_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + size_t data_size, correct_buf_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; hid_t group_id = H5I_INVALID_HID; - hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; if (MAINPROCESS) puts("Testing write to unshared filtered chunks"); @@ -586,9 +1044,6 @@ test_write_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t fi filespace = H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); - memspace = H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL); - VRFY((memspace >= 0), "Memory dataspace creation succeeded"); - /* Create chunked dataset */ plist_id = H5Pcopy(dcpl_id); VRFY((plist_id >= 0), "DCPL copy succeeded"); @@ -599,12 +1054,12 @@ test_write_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t fi /* Add test filter to the pipeline */ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); - dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, - H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + /* Create datasets depending on the current test mode */ + create_datasets(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + plist_id, test_mode, &num_dsets, dset_ids); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -621,70 +1076,69 @@ test_write_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t fi start[0] = ((hsize_t)mpi_rank * (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_CH_NROWS * count[0]); start[1] = 0; - if (VERBOSE_MED) { - printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); - fflush(stdout); - } - - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); /* Fill data buffer */ - data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); - correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + data_size = sel_dims[0] * sel_dims[1] * sizeof(C_DATATYPE); - data = (C_DATATYPE *)calloc(1, data_size); - VRFY((NULL != data), "calloc succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + C_DATATYPE *tmp_buf = (C_DATATYPE *)calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); - correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++) + tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx); - for (i = 0; i < data_size / sizeof(*data); i++) - data[i] = (C_DATATYPE)GEN_DATA(i); + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; + } - for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) - correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) + - (i / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1]))); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, + test_mode); - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0), - "Dataset write succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + free(data_bufs_nc[dset_idx]); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); - if (data) - free(data); + /* Close and re-open datasets */ + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + open_datasets(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode, dset_ids); /* Verify the correct data was written */ - read_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + correct_bufs[dset_idx] = (C_DATATYPE *)calloc(1, correct_buf_size); + VRFY((NULL != correct_bufs[dset_idx]), "calloc succeeded"); + read_bufs[dset_idx] = calloc(1, correct_buf_size); + VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); + + for (size_t j = 0; j < correct_buf_size / sizeof(C_DATATYPE); j++) + correct_bufs[dset_idx][j] = + (C_DATATYPE)((j % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) + + (j / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) + dset_idx); + } - dset_id = H5Dopen2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), + "Data verification succeeded"); - VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + free(read_bufs[dset_idx]); + free(correct_bufs[dset_idx]); + } - if (correct_buf) - free(correct_buf); - if (read_buf) - free(read_buf); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -698,11 +1152,13 @@ test_write_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t fi */ static void test_write_filtered_dataset_no_overlap_partial(const char *parent_group, H5Z_filter_t filter_id, - hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode) { - C_DATATYPE *data = NULL; - C_DATATYPE *read_buf = NULL; - C_DATATYPE *correct_buf = NULL; + C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0}; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; hsize_t dataset_dims[WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS]; hsize_t chunk_dims[WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS]; hsize_t sel_dims[WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS]; @@ -710,8 +1166,11 @@ test_write_filtered_dataset_no_overlap_partial(const char *parent_group, H5Z_fil hsize_t stride[WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS]; hsize_t count[WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS]; hsize_t block[WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS]; - size_t i, data_size, correct_buf_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + size_t data_size, correct_buf_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; hid_t group_id = H5I_INVALID_HID; hid_t filespace = H5I_INVALID_HID; @@ -746,12 +1205,12 @@ test_write_filtered_dataset_no_overlap_partial(const char *parent_group, H5Z_fil /* Add test filter to the pipeline */ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); - dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_NAME, HDF5_DATATYPE_NAME, - filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + /* Create datasets depending on the current test mode */ + create_datasets(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_NAME, HDF5_DATATYPE_NAME, + filespace, plist_id, test_mode, &num_dsets, dset_ids); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -768,78 +1227,77 @@ test_write_filtered_dataset_no_overlap_partial(const char *parent_group, H5Z_fil start[0] = ((hsize_t)mpi_rank * (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NROWS * count[0]); start[1] = 0; - if (VERBOSE_MED) { - printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); - fflush(stdout); - } - - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); /* Fill data buffer */ - data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); - correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + data_size = sel_dims[0] * sel_dims[1] * sizeof(C_DATATYPE); - data = (C_DATATYPE *)calloc(1, data_size); - VRFY((NULL != data), "calloc succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + C_DATATYPE *tmp_buf = (C_DATATYPE *)calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); - correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++) + tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx); - for (i = 0; i < data_size / sizeof(*data); i++) - data[i] = (C_DATATYPE)GEN_DATA(i); - - for (i = 0; i < (size_t)mpi_size; i++) { - size_t rank_n_elems = (size_t)(mpi_size * (WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NROWS * - WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NCOLS)); - size_t data_idx = i; - - for (size_t j = 0; j < rank_n_elems; j++) { - if ((j % WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NCOLS) == 0) { - correct_buf[(i * rank_n_elems) + j] = (C_DATATYPE)data_idx; - data_idx++; - } - } + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; } - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0), - "Dataset write succeeded"); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, + test_mode); - /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + free(data_bufs_nc[dset_idx]); + + verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); - if (data) - free(data); + /* Close and re-open datasets */ + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + open_datasets(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_NAME, num_dsets, test_mode, + dset_ids); /* Verify the correct data was written */ - read_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + correct_bufs[dset_idx] = (C_DATATYPE *)calloc(1, correct_buf_size); + VRFY((NULL != correct_bufs[dset_idx]), "calloc succeeded"); + read_bufs[dset_idx] = calloc(1, correct_buf_size); + VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); + + for (size_t j = 0; j < (size_t)mpi_size; j++) { + size_t data_idx = j; + size_t rank_n_elems = (size_t)(mpi_size * (WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NROWS * + WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NCOLS)); + + for (size_t k = 0; k < rank_n_elems; k++) { + if ((k % WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NCOLS) == 0) { + correct_bufs[dset_idx][(j * rank_n_elems) + k] = (C_DATATYPE)(data_idx + dset_idx); + data_idx++; + } + } + } + } - dset_id = H5Dopen2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), + "Data verification succeeded"); - VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + free(read_bufs[dset_idx]); + free(correct_bufs[dset_idx]); + } - if (correct_buf) - free(correct_buf); - if (read_buf) - free(read_buf); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -856,11 +1314,12 @@ test_write_filtered_dataset_no_overlap_partial(const char *parent_group, H5Z_fil */ static void test_write_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id) + hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode) { - C_DATATYPE *data = NULL; - C_DATATYPE *read_buf = NULL; - C_DATATYPE *correct_buf = NULL; + C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0}; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; hsize_t dataset_dims[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t chunk_dims[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t sel_dims[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; @@ -868,10 +1327,13 @@ test_write_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filte hsize_t stride[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t count[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t block[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; - size_t i, data_size, correct_buf_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + size_t data_size, correct_buf_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; hid_t group_id = H5I_INVALID_HID; - hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; if (MAINPROCESS) puts("Testing write to shared filtered chunks"); @@ -893,9 +1355,6 @@ test_write_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filte filespace = H5Screate_simple(WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); - memspace = H5Screate_simple(WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL); - VRFY((memspace >= 0), "Memory dataspace creation succeeded"); - /* Create chunked dataset */ plist_id = H5Pcopy(dcpl_id); VRFY((plist_id >= 0), "DCPL copy succeeded"); @@ -906,12 +1365,12 @@ test_write_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filte /* Add test filter to the pipeline */ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); - dset_id = H5Dcreate2(group_id, WRITE_SHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, - H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + /* Create datasets depending on the current test mode */ + create_datasets(group_id, WRITE_SHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + plist_id, test_mode, &num_dsets, dset_ids); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -927,72 +1386,71 @@ test_write_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filte start[0] = (hsize_t)mpi_rank * block[0]; start[1] = 0; - if (VERBOSE_MED) { - printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); - fflush(stdout); - } - - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); /* Fill data buffer */ - data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); - correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + data_size = sel_dims[0] * sel_dims[1] * sizeof(C_DATATYPE); - data = (C_DATATYPE *)calloc(1, data_size); - VRFY((NULL != data), "calloc succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + C_DATATYPE *tmp_buf = (C_DATATYPE *)calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); - correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++) + tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx); - for (i = 0; i < data_size / sizeof(*data); i++) - data[i] = (C_DATATYPE)GEN_DATA(i); + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; + } - for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) - correct_buf[i] = - (C_DATATYPE)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + - (i % dataset_dims[1]) + - (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, + test_mode); - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0), - "Dataset write succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + free(data_bufs_nc[dset_idx]); - /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); - if (data) - free(data); + /* Close and re-open datasets */ + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + open_datasets(group_id, WRITE_SHARED_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode, dset_ids); - /* Verify correct data was written */ - read_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + /* Verify the correct data was written */ + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + correct_bufs[dset_idx] = (C_DATATYPE *)calloc(1, correct_buf_size); + VRFY((NULL != correct_bufs[dset_idx]), "calloc succeeded"); + read_bufs[dset_idx] = calloc(1, correct_buf_size); + VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); + + for (size_t j = 0; j < correct_buf_size / sizeof(C_DATATYPE); j++) + correct_bufs[dset_idx][j] = + (C_DATATYPE)((dataset_dims[1] * (j / ((hsize_t)mpi_size * dataset_dims[1]))) + + (j % dataset_dims[1]) + + (((j % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % + dataset_dims[1]) + + dset_idx); + } - dset_id = H5Dopen2(group_id, WRITE_SHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), + "Data verification succeeded"); - VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + free(read_bufs[dset_idx]); + free(correct_bufs[dset_idx]); + } - if (correct_buf) - free(correct_buf); - if (read_buf) - free(read_buf); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -1009,10 +1467,12 @@ test_write_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filte */ static void test_write_filtered_dataset_single_unlim_dim_no_overlap(const char *parent_group, H5Z_filter_t filter_id, - hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode) { - C_DATATYPE *data = NULL; - C_DATATYPE *read_buf = NULL; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; hsize_t dataset_dims[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS]; hsize_t max_dims[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS]; hsize_t chunk_dims[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS]; @@ -1021,8 +1481,11 @@ test_write_filtered_dataset_single_unlim_dim_no_overlap(const char *parent_group hsize_t stride[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS]; hsize_t count[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS]; hsize_t block[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS]; - size_t i, data_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + size_t data_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; hid_t group_id = H5I_INVALID_HID; hid_t filespace = H5I_INVALID_HID; @@ -1058,32 +1521,35 @@ test_write_filtered_dataset_single_unlim_dim_no_overlap(const char *parent_group /* Add test filter to the pipeline */ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); - dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, - H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + /* Create datasets depending on the current test mode */ + create_datasets(group_id, WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + plist_id, test_mode, &num_dsets, dset_ids); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); /* Fill data buffer */ - data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); + data_size = sel_dims[0] * sel_dims[1] * sizeof(C_DATATYPE); - data = (C_DATATYPE *)calloc(1, data_size); - VRFY((NULL != data), "calloc succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + C_DATATYPE *tmp_buf = (C_DATATYPE *)calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); - read_buf = (C_DATATYPE *)calloc(1, data_size); - VRFY((NULL != read_buf), "calloc succeeded"); + for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++) + tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx); - for (i = 0; i < data_size / sizeof(*data); i++) - data[i] = (C_DATATYPE)GEN_DATA(i); + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; - for (i = 0; i < (size_t)WRITE_UNSHARED_ONE_UNLIM_DIM_NLOOPS; i++) { - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + tmp_buf = (C_DATATYPE *)calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); + read_bufs[dset_idx] = tmp_buf; + } + + for (size_t i = 0; i < (size_t)WRITE_UNSHARED_ONE_UNLIM_DIM_NLOOPS; i++) { /* Each process defines the dataset selection in memory and writes * it to the hyperslab in the file */ @@ -1097,56 +1563,55 @@ test_write_filtered_dataset_single_unlim_dim_no_overlap(const char *parent_group start[0] = ((hsize_t)mpi_rank * block[0] * count[0]); start[1] = i * count[1] * block[1]; - if (VERBOSE_MED) { - printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], - block[1]); - fflush(stdout); - } - - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0), - "Dataset write succeeded"); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, + test_mode); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + /* Close and re-open datasets */ + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); - dset_id = H5Dopen2(group_id, WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + open_datasets(group_id, WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_NAME, num_dsets, test_mode, dset_ids); - memset(read_buf, 255, data_size); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + memset(read_bufs[dset_idx], 255, data_size); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, + test_mode); /* Verify the correct data was written */ - VRFY((0 == memcmp(read_buf, data, data_size)), "Data verification succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)), + "Data verification succeeded"); if (i < (size_t)WRITE_UNSHARED_ONE_UNLIM_DIM_NLOOPS - 1) { - /* Extend the dataset by count[1] chunks in the extensible dimension */ + /* Extend the dataset(s) by count[1] chunks in the extensible dimension */ dataset_dims[1] += count[1] * block[1]; - VRFY(H5Dset_extent(dset_id, dataset_dims) >= 0, "H5Dset_extent succeeded"); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY(H5Dset_extent(dset_ids[dset_idx], dataset_dims) >= 0, "H5Dset_extent succeeded"); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, SOME_CHUNKS_WRITTEN); } - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + } + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + free(read_bufs[dset_idx]); + free(data_bufs_nc[dset_idx]); } - if (data) - free(data); - if (read_buf) - free(read_buf); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -1163,10 +1628,12 @@ test_write_filtered_dataset_single_unlim_dim_no_overlap(const char *parent_group */ static void test_write_filtered_dataset_single_unlim_dim_overlap(const char *parent_group, H5Z_filter_t filter_id, - hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode) { - C_DATATYPE *data = NULL; - C_DATATYPE *read_buf = NULL; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; hsize_t dataset_dims[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS]; hsize_t max_dims[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS]; hsize_t chunk_dims[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS]; @@ -1175,8 +1642,11 @@ test_write_filtered_dataset_single_unlim_dim_overlap(const char *parent_group, H hsize_t stride[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS]; hsize_t count[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS]; hsize_t block[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS]; - size_t i, data_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + size_t data_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; hid_t group_id = H5I_INVALID_HID; hid_t filespace = H5I_INVALID_HID; @@ -1212,32 +1682,35 @@ test_write_filtered_dataset_single_unlim_dim_overlap(const char *parent_group, H /* Add test filter to the pipeline */ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); - dset_id = H5Dcreate2(group_id, WRITE_SHARED_ONE_UNLIM_DIM_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, - H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + /* Create datasets depending on the current test mode */ + create_datasets(group_id, WRITE_SHARED_ONE_UNLIM_DIM_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + plist_id, test_mode, &num_dsets, dset_ids); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); /* Fill data buffer */ - data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); + data_size = sel_dims[0] * sel_dims[1] * sizeof(C_DATATYPE); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + C_DATATYPE *tmp_buf = (C_DATATYPE *)calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); - data = (C_DATATYPE *)calloc(1, data_size); - VRFY((NULL != data), "calloc succeeded"); + for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++) + tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx); - read_buf = (C_DATATYPE *)calloc(1, data_size); - VRFY((NULL != read_buf), "calloc succeeded"); + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; - for (i = 0; i < data_size / sizeof(*data); i++) - data[i] = (C_DATATYPE)GEN_DATA(i); + tmp_buf = (C_DATATYPE *)calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); - for (i = 0; i < (size_t)WRITE_SHARED_ONE_UNLIM_DIM_NLOOPS; i++) { - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + read_bufs[dset_idx] = tmp_buf; + } + for (size_t i = 0; i < (size_t)WRITE_SHARED_ONE_UNLIM_DIM_NLOOPS; i++) { /* Each process defines the dataset selection in memory and writes * it to the hyperslab in the file */ @@ -1250,56 +1723,55 @@ test_write_filtered_dataset_single_unlim_dim_overlap(const char *parent_group, H start[0] = (hsize_t)mpi_rank * block[0]; start[1] = i * count[1] * block[1]; - if (VERBOSE_MED) { - printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], - block[1]); - fflush(stdout); - } - - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0), - "Dataset write succeeded"); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, + test_mode); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + /* Close and re-open datasets */ + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); - dset_id = H5Dopen2(group_id, WRITE_SHARED_ONE_UNLIM_DIM_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + open_datasets(group_id, WRITE_SHARED_ONE_UNLIM_DIM_DATASET_NAME, num_dsets, test_mode, dset_ids); - memset(read_buf, 255, data_size); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + memset(read_bufs[dset_idx], 255, data_size); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, + test_mode); - /* Verify correct data was written */ - VRFY((0 == memcmp(read_buf, data, data_size)), "Data verification succeeded"); + /* Verify the correct data was written */ + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)), + "Data verification succeeded"); if (i < (size_t)WRITE_SHARED_ONE_UNLIM_DIM_NLOOPS - 1) { - /* Extend the dataset by count[1] chunks in the extensible dimension */ + /* Extend the dataset(s) by count[1] chunks in the extensible dimension */ dataset_dims[1] += count[1] * block[1]; - VRFY(H5Dset_extent(dset_id, dataset_dims) >= 0, "H5Dset_extent succeeded"); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY(H5Dset_extent(dset_ids[dset_idx], dataset_dims) >= 0, "H5Dset_extent succeeded"); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, SOME_CHUNKS_WRITTEN); } - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + } + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + free(read_bufs[dset_idx]); + free(data_bufs_nc[dset_idx]); } - if (data) - free(data); - if (read_buf) - free(read_buf); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -1318,10 +1790,12 @@ test_write_filtered_dataset_single_unlim_dim_overlap(const char *parent_group, H */ static void test_write_filtered_dataset_multi_unlim_dim_no_overlap(const char *parent_group, H5Z_filter_t filter_id, - hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode) { - C_DATATYPE *data = NULL; - C_DATATYPE *read_buf = NULL; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; hsize_t dataset_dims[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS]; hsize_t max_dims[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS]; hsize_t chunk_dims[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS]; @@ -1330,8 +1804,11 @@ test_write_filtered_dataset_multi_unlim_dim_no_overlap(const char *parent_group, hsize_t stride[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS]; hsize_t count[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS]; hsize_t block[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS]; - size_t i, data_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + size_t data_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; hid_t group_id = H5I_INVALID_HID; hid_t filespace = H5I_INVALID_HID; @@ -1367,40 +1844,38 @@ test_write_filtered_dataset_multi_unlim_dim_no_overlap(const char *parent_group, /* Add test filter to the pipeline */ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); - dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, - H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + /* Create datasets depending on the current test mode */ + create_datasets(group_id, WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + plist_id, test_mode, &num_dsets, dset_ids); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - for (i = 0; i < (size_t)WRITE_UNSHARED_TWO_UNLIM_DIM_NLOOPS; i++) { - C_DATATYPE *tmp_realloc = NULL; - size_t j; - + for (size_t i = 0; i < (size_t)WRITE_UNSHARED_TWO_UNLIM_DIM_NLOOPS; i++) { /* Set selected dimensions */ sel_dims[0] = (i + 1) * WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NROWS; sel_dims[1] = (i + 1) * WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NCOLS; /* Fill data buffer */ - data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); + data_size = sel_dims[0] * sel_dims[1] * sizeof(C_DATATYPE); - tmp_realloc = (C_DATATYPE *)realloc(data, data_size); - VRFY((NULL != tmp_realloc), "realloc succeeded"); - data = tmp_realloc; + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + C_DATATYPE *tmp_buf = realloc(data_bufs_nc[dset_idx], data_size); + VRFY((NULL != tmp_buf), "realloc succeeded"); - tmp_realloc = (C_DATATYPE *)realloc(read_buf, data_size); - VRFY((NULL != tmp_realloc), "realloc succeeded"); - read_buf = tmp_realloc; + for (size_t k = 0; k < data_size / sizeof(C_DATATYPE); k++) + tmp_buf[k] = (C_DATATYPE)(GEN_DATA(k) + dset_idx); - for (j = 0; j < data_size / sizeof(*data); j++) - data[j] = (C_DATATYPE)GEN_DATA(j); + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + tmp_buf = realloc(read_bufs[dset_idx], data_size); + VRFY((NULL != tmp_buf), "realloc succeeded"); + + read_bufs[dset_idx] = tmp_buf; + } /* Each process defines the dataset selection in memory and writes * it to the hyperslab in the file @@ -1414,61 +1889,60 @@ test_write_filtered_dataset_multi_unlim_dim_no_overlap(const char *parent_group, start[0] = ((hsize_t)mpi_rank * block[0] * count[0]); start[1] = 0; - if (VERBOSE_MED) { - printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], - block[1]); - fflush(stdout); - } - - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0), - "Dataset write succeeded"); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, + test_mode); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + /* Close and re-open datasets */ + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); - dset_id = H5Dopen2(group_id, WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + open_datasets(group_id, WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_NAME, num_dsets, test_mode, dset_ids); - memset(read_buf, 255, data_size); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + memset(read_bufs[dset_idx], 255, data_size); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, + test_mode); /* Verify the correct data was written */ - VRFY((0 == memcmp(read_buf, data, data_size)), "Data verification succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)), + "Data verification succeeded"); if (i < (size_t)WRITE_UNSHARED_TWO_UNLIM_DIM_NLOOPS - 1) { /* - * Extend the dataset by the size of one chunk per rank - * in the first extensible dimension. Extend the dataset + * Extend the dataset(s) by the size of one chunk per rank + * in the first extensible dimension. Extend the dataset(s) * by the size of chunk in the second extensible dimension. */ dataset_dims[0] += (hsize_t)mpi_size * block[0]; dataset_dims[1] += block[1]; - VRFY(H5Dset_extent(dset_id, dataset_dims) >= 0, "H5Dset_extent succeeded"); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY(H5Dset_extent(dset_ids[dset_idx], dataset_dims) >= 0, "H5Dset_extent succeeded"); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, SOME_CHUNKS_WRITTEN); } - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + } + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + free(read_bufs[dset_idx]); + free(data_bufs_nc[dset_idx]); } - if (data) - free(data); - if (read_buf) - free(read_buf); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -1485,10 +1959,12 @@ test_write_filtered_dataset_multi_unlim_dim_no_overlap(const char *parent_group, */ static void test_write_filtered_dataset_multi_unlim_dim_overlap(const char *parent_group, H5Z_filter_t filter_id, - hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode) { - C_DATATYPE *data = NULL; - C_DATATYPE *read_buf = NULL; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; hsize_t dataset_dims[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS]; hsize_t max_dims[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS]; hsize_t chunk_dims[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS]; @@ -1497,8 +1973,11 @@ test_write_filtered_dataset_multi_unlim_dim_overlap(const char *parent_group, H5 hsize_t stride[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS]; hsize_t count[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS]; hsize_t block[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS]; - size_t i, data_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + size_t data_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; hid_t group_id = H5I_INVALID_HID; hid_t filespace = H5I_INVALID_HID; @@ -1534,40 +2013,38 @@ test_write_filtered_dataset_multi_unlim_dim_overlap(const char *parent_group, H5 /* Add test filter to the pipeline */ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); - dset_id = H5Dcreate2(group_id, WRITE_SHARED_TWO_UNLIM_DIM_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, - H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + /* Create datasets depending on the current test mode */ + create_datasets(group_id, WRITE_SHARED_TWO_UNLIM_DIM_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + plist_id, test_mode, &num_dsets, dset_ids); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - for (i = 0; i < (size_t)WRITE_SHARED_TWO_UNLIM_DIM_NLOOPS; i++) { - C_DATATYPE *tmp_realloc = NULL; - size_t j; - + for (size_t i = 0; i < (size_t)WRITE_SHARED_TWO_UNLIM_DIM_NLOOPS; i++) { /* Set selected dimensions */ sel_dims[0] = (i + 1); sel_dims[1] = (i + 1) * (size_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NCOLS; /* Fill data buffer */ - data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); + data_size = sel_dims[0] * sel_dims[1] * sizeof(C_DATATYPE); - tmp_realloc = (C_DATATYPE *)realloc(data, data_size); - VRFY((NULL != tmp_realloc), "realloc succeeded"); - data = tmp_realloc; + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + C_DATATYPE *tmp_buf = realloc(data_bufs_nc[dset_idx], data_size); + VRFY((NULL != tmp_buf), "realloc succeeded"); - tmp_realloc = (C_DATATYPE *)realloc(read_buf, data_size); - VRFY((NULL != tmp_realloc), "realloc succeeded"); - read_buf = tmp_realloc; + for (size_t k = 0; k < data_size / sizeof(C_DATATYPE); k++) + tmp_buf[k] = (C_DATATYPE)(GEN_DATA(k) + dset_idx); - for (j = 0; j < data_size / sizeof(*data); j++) - data[j] = (C_DATATYPE)GEN_DATA(j); + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + tmp_buf = realloc(read_bufs[dset_idx], data_size); + VRFY((NULL != tmp_buf), "realloc succeeded"); + + read_bufs[dset_idx] = tmp_buf; + } /* Each process defines the dataset selection in memory and writes * it to the hyperslab in the file @@ -1581,57 +2058,56 @@ test_write_filtered_dataset_multi_unlim_dim_overlap(const char *parent_group, H5 start[0] = (hsize_t)mpi_rank; start[1] = 0; - if (VERBOSE_MED) { - printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], - block[1]); - fflush(stdout); - } - - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0), - "Dataset write succeeded"); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, + test_mode); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + /* Close and re-open datasets */ + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); - dset_id = H5Dopen2(group_id, WRITE_SHARED_TWO_UNLIM_DIM_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + open_datasets(group_id, WRITE_SHARED_TWO_UNLIM_DIM_DATASET_NAME, num_dsets, test_mode, dset_ids); - memset(read_buf, 255, data_size); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + memset(read_bufs[dset_idx], 255, data_size); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, + test_mode); - /* Verify correct data was written */ - VRFY((0 == memcmp(read_buf, data, data_size)), "Data verification succeeded"); + /* Verify the correct data was written */ + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)), + "Data verification succeeded"); if (i < (size_t)WRITE_SHARED_TWO_UNLIM_DIM_NLOOPS - 1) { - /* Extend the dataset by the size of a chunk in each extensible dimension */ + /* Extend the dataset(s) by the size of a chunk in each extensible dimension */ dataset_dims[0] += (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NROWS; dataset_dims[1] += (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NCOLS; - VRFY(H5Dset_extent(dset_id, dataset_dims) >= 0, "H5Dset_extent succeeded"); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY(H5Dset_extent(dset_ids[dset_idx], dataset_dims) >= 0, "H5Dset_extent succeeded"); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, SOME_CHUNKS_WRITTEN); } - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + } + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + free(read_bufs[dset_idx]); + free(data_bufs_nc[dset_idx]); } - if (data) - free(data); - if (read_buf) - free(read_buf); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -1650,11 +2126,13 @@ test_write_filtered_dataset_multi_unlim_dim_overlap(const char *parent_group, H5 */ static void test_write_filtered_dataset_single_no_selection(const char *parent_group, H5Z_filter_t filter_id, - hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode) { - C_DATATYPE *data = NULL; - C_DATATYPE *read_buf = NULL; - C_DATATYPE *correct_buf = NULL; + C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0}; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; hsize_t dataset_dims[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t chunk_dims[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t sel_dims[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; @@ -1662,11 +2140,13 @@ test_write_filtered_dataset_single_no_selection(const char *parent_group, H5Z_fi hsize_t stride[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t count[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t block[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; - size_t i, data_size, correct_buf_size; - size_t segment_length; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + size_t data_size, correct_buf_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; hid_t group_id = H5I_INVALID_HID; - hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; if (MAINPROCESS) puts("Testing write to filtered chunks with a single process having no selection"); @@ -1691,9 +2171,6 @@ test_write_filtered_dataset_single_no_selection(const char *parent_group, H5Z_fi filespace = H5Screate_simple(WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); - memspace = H5Screate_simple(WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL); - VRFY((memspace >= 0), "Memory dataspace creation succeeded"); - /* Create chunked dataset */ plist_id = H5Pcopy(dcpl_id); VRFY((plist_id >= 0), "DCPL copy succeeded"); @@ -1704,12 +2181,12 @@ test_write_filtered_dataset_single_no_selection(const char *parent_group, H5Z_fi /* Add test filter to the pipeline */ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); - dset_id = H5Dcreate2(group_id, WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, - filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + /* Create datasets depending on the current test mode */ + create_datasets(group_id, WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, + filespace, plist_id, test_mode, &num_dsets, dset_ids); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -1726,80 +2203,85 @@ test_write_filtered_dataset_single_no_selection(const char *parent_group, H5Z_fi start[0] = (hsize_t)mpi_rank * (hsize_t)WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * count[0]; start[1] = 0; - if (VERBOSE_MED) { - printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); - fflush(stdout); - } - - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - if (mpi_rank == WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC) - VRFY((H5Sselect_none(filespace) >= 0), "Select none succeeded"); + select_none(num_dsets, dset_ids, fspace_ids); else - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); /* Fill data buffer */ - data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); - correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + data_size = sel_dims[0] * sel_dims[1] * sizeof(C_DATATYPE); if (mpi_rank != WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC) { - data = (C_DATATYPE *)calloc(1, data_size); - VRFY((NULL != data), "calloc succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + C_DATATYPE *tmp_buf = (C_DATATYPE *)calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); - for (i = 0; i < data_size / sizeof(*data); i++) - data[i] = (C_DATATYPE)GEN_DATA(i); - } - - correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++) + tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx); - for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) - correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) + - (i / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1]))); + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; + } + } - /* Compute the correct offset into the buffer for the process having no selection and clear it */ - segment_length = dataset_dims[0] * dataset_dims[1] / (hsize_t)mpi_size; - memset(correct_buf + ((size_t)WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC * segment_length), - 0, segment_length * sizeof(*data)); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, + test_mode); - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0), - "Dataset write succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + free(data_bufs_nc[dset_idx]); /* Verify space allocation status - data should only have been written if MPI size > 1 */ - verify_space_alloc_status(dset_id, plist_id, (mpi_size > 1 ? SOME_CHUNKS_WRITTEN : NO_CHUNKS_WRITTEN)); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, + (mpi_size > 1 ? SOME_CHUNKS_WRITTEN : NO_CHUNKS_WRITTEN)); - if (data) - free(data); + /* Close and re-open datasets */ + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + open_datasets(group_id, WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode, + dset_ids); /* Verify the correct data was written */ - read_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + correct_bufs[dset_idx] = (C_DATATYPE *)calloc(1, correct_buf_size); + VRFY((NULL != correct_bufs[dset_idx]), "calloc succeeded"); + read_bufs[dset_idx] = calloc(1, correct_buf_size); + VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); + + for (size_t j = 0; j < correct_buf_size / sizeof(C_DATATYPE); j++) { + size_t segment_length; + + correct_bufs[dset_idx][j] = + (C_DATATYPE)((j % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) + + (j / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) + dset_idx); + + /* Compute the correct offset into the buffer for the process having no selection and clear it */ + segment_length = dataset_dims[0] * dataset_dims[1] / (hsize_t)mpi_size; + memset(correct_bufs[dset_idx] + + ((size_t)WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC * segment_length), + 0, segment_length * sizeof(C_DATATYPE)); + } + } - dset_id = H5Dopen2(group_id, WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), + "Data verification succeeded"); - VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + free(read_bufs[dset_idx]); + free(correct_bufs[dset_idx]); + } - if (correct_buf) - free(correct_buf); - if (read_buf) - free(read_buf); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -1819,18 +2301,21 @@ test_write_filtered_dataset_single_no_selection(const char *parent_group, H5Z_fi */ static void test_write_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id) + hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode) { - C_DATATYPE *data = NULL; - C_DATATYPE *read_buf = NULL; - C_DATATYPE *correct_buf = NULL; + C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0}; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; hsize_t dataset_dims[WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t chunk_dims[WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t sel_dims[WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; - size_t i, data_size, correct_buf_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + size_t data_size, correct_buf_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; hid_t group_id = H5I_INVALID_HID; - hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; if (MAINPROCESS) puts("Testing write to filtered chunks with all processes having no selection"); @@ -1846,14 +2331,10 @@ test_write_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filte dataset_dims[1] = (hsize_t)WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS; chunk_dims[0] = (hsize_t)WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS; chunk_dims[1] = (hsize_t)WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS; - sel_dims[0] = sel_dims[1] = 0; filespace = H5Screate_simple(WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); - memspace = H5Screate_simple(WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL); - VRFY((memspace >= 0), "Memory dataspace creation succeeded"); - /* Create chunked dataset */ plist_id = H5Pcopy(dcpl_id); VRFY((plist_id >= 0), "DCPL copy succeeded"); @@ -1864,65 +2345,73 @@ test_write_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filte /* Add test filter to the pipeline */ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); - dset_id = H5Dcreate2(group_id, WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, - filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + /* Create datasets depending on the current test mode */ + create_datasets(group_id, WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, + filespace, plist_id, test_mode, &num_dsets, dset_ids); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - - VRFY((H5Sselect_none(filespace) >= 0), "Select none succeeded"); + select_none(num_dsets, dset_ids, fspace_ids); /* Fill data buffer */ - data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); - correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + data_size = sizeof(C_DATATYPE); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + C_DATATYPE *tmp_buf = (C_DATATYPE *)calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); - data = (C_DATATYPE *)calloc(1, data_size); - VRFY((NULL != data), "calloc succeeded"); + for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++) + tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx); - correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; + } - for (i = 0; i < data_size / sizeof(*data); i++) - data[i] = (C_DATATYPE)GEN_DATA(i); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, + test_mode); - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0), - "Dataset write succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + free(data_bufs_nc[dset_idx]); - /* Verify space allocation status - no ranks should have written any data */ - verify_space_alloc_status(dset_id, plist_id, NO_CHUNKS_WRITTEN); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, NO_CHUNKS_WRITTEN); - if (data) - free(data); + /* Close and re-open datasets */ + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + open_datasets(group_id, WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode, + dset_ids); /* Verify the correct data was written */ - read_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + correct_bufs[dset_idx] = (C_DATATYPE *)calloc(1, correct_buf_size); + VRFY((NULL != correct_bufs[dset_idx]), "calloc succeeded"); + read_bufs[dset_idx] = calloc(1, correct_buf_size); + VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); + } - dset_id = H5Dopen2(group_id, WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), + "Data verification succeeded"); - VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + free(read_bufs[dset_idx]); + free(correct_bufs[dset_idx]); + } - if (correct_buf) - free(correct_buf); - if (read_buf) - free(read_buf); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -1936,20 +2425,24 @@ test_write_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filte */ static void test_write_filtered_dataset_point_selection(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id) + hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode) { - C_DATATYPE *data = NULL; - C_DATATYPE *correct_buf = NULL; - C_DATATYPE *read_buf = NULL; - hsize_t *coords = NULL; + C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0}; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; + hsize_t *coords = NULL; hsize_t dataset_dims[WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t chunk_dims[WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t sel_dims[WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; - size_t i, j, data_size, correct_buf_size; + size_t data_size, correct_buf_size; size_t num_points; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; hid_t group_id = H5I_INVALID_HID; - hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; if (MAINPROCESS) puts("Testing write to filtered chunks with point selection"); @@ -1971,9 +2464,6 @@ test_write_filtered_dataset_point_selection(const char *parent_group, H5Z_filter filespace = H5Screate_simple(WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); - memspace = H5Screate_simple(WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL); - VRFY((memspace >= 0), "Memory dataspace creation succeeded"); - /* Create chunked dataset */ plist_id = H5Pcopy(dcpl_id); VRFY((plist_id >= 0), "DCPL copy succeeded"); @@ -1984,87 +2474,96 @@ test_write_filtered_dataset_point_selection(const char *parent_group, H5Z_filter /* Add test filter to the pipeline */ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); - dset_id = H5Dcreate2(group_id, WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, - filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + /* Create datasets depending on the current test mode */ + create_datasets(group_id, WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, + filespace, plist_id, test_mode, &num_dsets, dset_ids); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); /* Set up point selection */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - num_points = (hsize_t)WRITE_POINT_SELECTION_FILTERED_CHUNKS_NROWS * (hsize_t)WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS / (hsize_t)mpi_size; coords = (hsize_t *)calloc(1, 2 * num_points * sizeof(*coords)); VRFY((NULL != coords), "Coords calloc succeeded"); - for (i = 0; i < num_points; i++) - for (j = 0; j < WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS; j++) + for (size_t i = 0; i < num_points; i++) + for (size_t j = 0; j < WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS; j++) coords[(i * WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS) + j] = (j > 0) ? (i % (hsize_t)WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS) : ((hsize_t)mpi_rank + ((hsize_t)mpi_size * (i / (hsize_t)WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS))); - VRFY((H5Sselect_elements(filespace, H5S_SELECT_SET, (hsize_t)num_points, (const hsize_t *)coords) >= 0), - "Point selection succeeded"); + select_elements(num_dsets, dset_ids, num_points, coords, fspace_ids); /* Fill data buffer */ - data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); - correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + data_size = sel_dims[0] * sel_dims[1] * sizeof(C_DATATYPE); - data = (C_DATATYPE *)calloc(1, data_size); - VRFY((NULL != data), "calloc succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + C_DATATYPE *tmp_buf = (C_DATATYPE *)calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); - correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++) + tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx); - for (i = 0; i < data_size / sizeof(*data); i++) - data[i] = (C_DATATYPE)GEN_DATA(i); + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; + } - for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) - correct_buf[i] = - (C_DATATYPE)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + - (i % dataset_dims[1]) + - (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, + test_mode); - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0), - "Dataset write succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + free(data_bufs_nc[dset_idx]); - /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); - if (data) - free(data); + /* Close and re-open datasets */ + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + open_datasets(group_id, WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode, + dset_ids); /* Verify the correct data was written */ - read_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + correct_bufs[dset_idx] = (C_DATATYPE *)calloc(1, correct_buf_size); + VRFY((NULL != correct_bufs[dset_idx]), "calloc succeeded"); + read_bufs[dset_idx] = calloc(1, correct_buf_size); + VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); + + for (size_t j = 0; j < correct_buf_size / sizeof(C_DATATYPE); j++) + correct_bufs[dset_idx][j] = + (C_DATATYPE)((dataset_dims[1] * (j / ((hsize_t)mpi_size * dataset_dims[1]))) + + (j % dataset_dims[1]) + + (((j % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % + dataset_dims[1]) + + dset_idx); + } - dset_id = H5Dopen2(group_id, WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), + "Data verification succeeded"); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + free(read_bufs[dset_idx]); + free(correct_bufs[dset_idx]); + } - VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + free(coords); - if (coords) - free(coords); - if (correct_buf) - free(correct_buf); - if (read_buf) - free(read_buf); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -2082,11 +2581,12 @@ test_write_filtered_dataset_point_selection(const char *parent_group, H5Z_filter */ static void test_write_filtered_dataset_interleaved_write(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id) + hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode) { - C_DATATYPE *data = NULL; - C_DATATYPE *read_buf = NULL; - C_DATATYPE *correct_buf = NULL; + C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0}; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; hsize_t dataset_dims[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS]; hsize_t chunk_dims[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS]; hsize_t sel_dims[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS]; @@ -2094,10 +2594,13 @@ test_write_filtered_dataset_interleaved_write(const char *parent_group, H5Z_filt hsize_t stride[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS]; hsize_t count[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS]; hsize_t block[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS]; - size_t i, data_size, correct_buf_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + size_t data_size, correct_buf_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; hid_t group_id = H5I_INVALID_HID; - hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; if (MAINPROCESS) puts("Testing interleaved write to filtered chunks"); @@ -2119,9 +2622,6 @@ test_write_filtered_dataset_interleaved_write(const char *parent_group, H5Z_filt filespace = H5Screate_simple(INTERLEAVED_WRITE_FILTERED_DATASET_DIMS, dataset_dims, NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); - memspace = H5Screate_simple(INTERLEAVED_WRITE_FILTERED_DATASET_DIMS, sel_dims, NULL); - VRFY((memspace >= 0), "Memory dataspace creation succeeded"); - /* Create chunked dataset */ plist_id = H5Pcopy(dcpl_id); VRFY((plist_id >= 0), "DCPL copy succeeded"); @@ -2132,12 +2632,12 @@ test_write_filtered_dataset_interleaved_write(const char *parent_group, H5Z_filt /* Add test filter to the pipeline */ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); - dset_id = H5Dcreate2(group_id, INTERLEAVED_WRITE_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, - H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + /* Create datasets depending on the current test mode */ + create_datasets(group_id, INTERLEAVED_WRITE_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + plist_id, test_mode, &num_dsets, dset_ids); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -2155,80 +2655,81 @@ test_write_filtered_dataset_interleaved_write(const char *parent_group, H5Z_filt start[0] = (hsize_t)mpi_rank; start[1] = 0; - if (VERBOSE_MED) { - printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); - fflush(stdout); - } - - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); /* Fill data buffer */ - data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); - correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); - - data = (C_DATATYPE *)calloc(1, data_size); - VRFY((NULL != data), "calloc succeeded"); - - correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + data_size = sel_dims[0] * sel_dims[1] * sizeof(C_DATATYPE); - for (i = 0; i < data_size / sizeof(*data); i++) - data[i] = (C_DATATYPE)GEN_DATA(i); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + C_DATATYPE *tmp_buf = (C_DATATYPE *)calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); - for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) - /* Add Column Index */ - correct_buf[i] = - (C_DATATYPE)((i % (hsize_t)INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS) + for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++) + tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx); - /* Add the Row Index */ - + ((i % (hsize_t)(mpi_size * INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS)) / - (hsize_t)INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS) + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; + } - /* Add the amount that gets added when a rank moves down to its next section - vertically in the dataset */ - + ((hsize_t)INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS * - (i / (hsize_t)(mpi_size * INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS)))); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, + test_mode); - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0), - "Dataset write succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + free(data_bufs_nc[dset_idx]); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); - if (data) - free(data); + /* Close and re-open datasets */ + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + open_datasets(group_id, INTERLEAVED_WRITE_FILTERED_DATASET_NAME, num_dsets, test_mode, dset_ids); /* Verify the correct data was written */ - read_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + correct_bufs[dset_idx] = (C_DATATYPE *)calloc(1, correct_buf_size); + VRFY((NULL != correct_bufs[dset_idx]), "calloc succeeded"); + read_bufs[dset_idx] = calloc(1, correct_buf_size); + VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); + + for (size_t j = 0; j < correct_buf_size / sizeof(C_DATATYPE); j++) + correct_bufs[dset_idx][j] = + /* Add the Column Index */ + (C_DATATYPE)((j % (hsize_t)INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS) + + /* Add the Row Index */ + + ((j % (hsize_t)(mpi_size * INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS)) / + (hsize_t)INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS) + + /* Add the amount that gets added when a rank moves down to its next section + vertically in the dataset */ + + ((hsize_t)INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS * + (j / (hsize_t)(mpi_size * INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS))) + + /* Add an increment factor for the multi-dataset case */ + + dset_idx); + } - dset_id = H5Dopen2(group_id, INTERLEAVED_WRITE_FILTERED_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), + "Data verification succeeded"); - VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + free(read_bufs[dset_idx]); + free(correct_bufs[dset_idx]); + } - if (correct_buf) - free(correct_buf); - if (read_buf) - free(read_buf); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -2251,11 +2752,13 @@ test_write_filtered_dataset_interleaved_write(const char *parent_group, H5Z_filt */ static void test_write_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t filter_id, - hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode) { - C_DATATYPE *data = NULL; - C_DATATYPE *read_buf = NULL; - C_DATATYPE *correct_buf = NULL; + C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0}; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; hsize_t dataset_dims[WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t chunk_dims[WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t sel_dims[WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS]; @@ -2263,10 +2766,13 @@ test_write_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z hsize_t stride[WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t count[WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t block[WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS]; - size_t i, data_size, correct_buf_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + size_t data_size, correct_buf_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; hid_t group_id = H5I_INVALID_HID; - hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; if (MAINPROCESS) puts("Testing write to unshared transformed and filtered chunks"); @@ -2288,9 +2794,6 @@ test_write_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z filespace = H5Screate_simple(WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); - memspace = H5Screate_simple(WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL); - VRFY((memspace >= 0), "Memory dataspace creation succeeded"); - /* Create chunked dataset */ plist_id = H5Pcopy(dcpl_id); VRFY((plist_id >= 0), "DCPL copy succeeded"); @@ -2301,12 +2804,12 @@ test_write_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z /* Add test filter to the pipeline */ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); - dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME, - HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + /* Create datasets depending on the current test mode */ + create_datasets(group_id, WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, + filespace, plist_id, test_mode, &num_dsets, dset_ids); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -2324,37 +2827,21 @@ test_write_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z start[0] = ((hsize_t)mpi_rank * (hsize_t)WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NROWS * count[0]); start[1] = 0; - if (VERBOSE_MED) { - printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); - fflush(stdout); - } - - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); /* Fill data buffer */ - data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); - correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + data_size = sel_dims[0] * sel_dims[1] * sizeof(C_DATATYPE); - data = (C_DATATYPE *)calloc(1, data_size); - VRFY((NULL != data), "calloc succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + C_DATATYPE *tmp_buf = (C_DATATYPE *)calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); - correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++) + tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx); - for (i = 0; i < data_size / sizeof(*data); i++) - data[i] = (C_DATATYPE)GEN_DATA(i); - - for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) - correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) + - (i / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1]))); + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; + } /* Create property list for data transform */ plist_id = H5Pcopy(dxpl_id); @@ -2363,41 +2850,57 @@ test_write_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z /* Set data transform expression */ VRFY((H5Pset_data_transform(plist_id, "x") >= 0), "Set data transform expression succeeded"); - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), - "Dataset write succeeded"); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, plist_id, data_bufs, + test_mode); - if (data) - free(data); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + free(data_bufs_nc[dset_idx]); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded"); - /* Verify the correct data was written */ - read_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + /* Verify space allocation status */ + plist_id = H5Dget_create_plist(dset_ids[0]); + VRFY((plist_id >= 0), "H5Dget_create_plist succeeded"); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); - dset_id = H5Dopen2(group_id, WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + /* Close and re-open datasets */ + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), - "Dataset read succeeded"); + open_datasets(group_id, WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode, + dset_ids); + + /* Verify the correct data was written */ + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + correct_bufs[dset_idx] = (C_DATATYPE *)calloc(1, correct_buf_size); + VRFY((NULL != correct_bufs[dset_idx]), "calloc succeeded"); + read_bufs[dset_idx] = calloc(1, correct_buf_size); + VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); + + for (size_t j = 0; j < correct_buf_size / sizeof(C_DATATYPE); j++) + correct_bufs[dset_idx][j] = + (C_DATATYPE)((j % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) + + (j / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) + dset_idx); + } - VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); - VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), + "Data verification succeeded"); - /* Verify space allocation status */ - plist_id = H5Dget_create_plist(dset_id); - VRFY((plist_id >= 0), "H5Dget_create_plist succeeded"); - verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + free(read_bufs[dset_idx]); + free(correct_bufs[dset_idx]); + } - if (correct_buf) - free(correct_buf); - if (read_buf) - free(read_buf); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -2413,11 +2916,13 @@ test_write_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z */ static void test_write_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_group, H5Z_filter_t filter_id, - hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode) { - C_DATATYPE *data = NULL; - C_DATATYPE *read_buf = NULL; - C_DATATYPE *correct_buf = NULL; + C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0}; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; hsize_t dataset_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; hsize_t chunk_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; hsize_t sel_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; @@ -2425,10 +2930,13 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_grou hsize_t stride[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; hsize_t count[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; hsize_t block[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; - size_t i, data_size, correct_buf_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + size_t data_size, correct_buf_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; hid_t group_id = H5I_INVALID_HID; - hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; if (MAINPROCESS) puts("Testing write to unshared filtered chunks on separate pages in 3D dataset"); @@ -2453,9 +2961,6 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_grou filespace = H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, dataset_dims, NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); - memspace = H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, sel_dims, NULL); - VRFY((memspace >= 0), "Memory dataspace creation succeeded"); - /* Create chunked dataset */ plist_id = H5Pcopy(dcpl_id); VRFY((plist_id >= 0), "DCPL copy succeeded"); @@ -2466,12 +2971,12 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_grou /* Add test filter to the pipeline */ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); - dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, - HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + /* Create datasets depending on the current test mode */ + create_datasets(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, HDF5_DATATYPE_NAME, + filespace, plist_id, test_mode, &num_dsets, dset_ids); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -2493,71 +2998,69 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_grou start[1] = 0; start[2] = (hsize_t)mpi_rank; - if (VERBOSE_MED) { - printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE - " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE - ", %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE - " ]\n", - mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], start[0], start[1], - start[2], block[0], block[1], block[2]); - fflush(stdout); - } - - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); /* Fill data buffer */ - data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(*data); - correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf); + data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(C_DATATYPE); - data = (C_DATATYPE *)calloc(1, data_size); - VRFY((NULL != data), "calloc succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + C_DATATYPE *tmp_buf = (C_DATATYPE *)calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); - correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++) + tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx); - for (i = 0; i < data_size / sizeof(*data); i++) - data[i] = (C_DATATYPE)GEN_DATA(i); + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; + } - for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) - correct_buf[i] = (C_DATATYPE)((i % (hsize_t)mpi_size) + (i / (hsize_t)mpi_size)); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, + test_mode); - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0), - "Dataset write succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + free(data_bufs_nc[dset_idx]); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); - if (data) - free(data); + /* Close and re-open datasets */ + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + open_datasets(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, num_dsets, test_mode, + dset_ids); /* Verify the correct data was written */ - read_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(C_DATATYPE); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + correct_bufs[dset_idx] = (C_DATATYPE *)calloc(1, correct_buf_size); + VRFY((NULL != correct_bufs[dset_idx]), "calloc succeeded"); + read_bufs[dset_idx] = calloc(1, correct_buf_size); + VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); - dset_id = H5Dopen2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + for (size_t j = 0; j < correct_buf_size / sizeof(C_DATATYPE); j++) + correct_bufs[dset_idx][j] = + (C_DATATYPE)((j % (hsize_t)mpi_size) + (j / (hsize_t)mpi_size) + dset_idx); + } + + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), + "Data verification succeeded"); - VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + free(read_bufs[dset_idx]); + free(correct_bufs[dset_idx]); + } - if (correct_buf) - free(correct_buf); - if (read_buf) - free(read_buf); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -2573,11 +3076,13 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_grou */ static void test_write_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H5Z_filter_t filter_id, - hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode) { - C_DATATYPE *data = NULL; - C_DATATYPE *read_buf = NULL; - C_DATATYPE *correct_buf = NULL; + C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0}; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; hsize_t dataset_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; hsize_t chunk_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; hsize_t sel_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; @@ -2585,10 +3090,13 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H hsize_t stride[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; hsize_t count[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; hsize_t block[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; - size_t i, data_size, correct_buf_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + size_t data_size, correct_buf_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; hid_t group_id = H5I_INVALID_HID; - hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; if (MAINPROCESS) puts("Testing write to unshared filtered chunks on the same pages in 3D dataset"); @@ -2614,9 +3122,6 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, dataset_dims, NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); - memspace = H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, sel_dims, NULL); - VRFY((memspace >= 0), "Memory dataspace creation succeeded"); - /* Create chunked dataset */ plist_id = H5Pcopy(dcpl_id); VRFY((plist_id >= 0), "DCPL copy succeeded"); @@ -2627,12 +3132,12 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H /* Add test filter to the pipeline */ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); - dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, - HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + /* Create datasets depending on the current test mode */ + create_datasets(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, HDF5_DATATYPE_NAME, + filespace, plist_id, test_mode, &num_dsets, dset_ids); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -2653,72 +3158,69 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H start[1] = 0; start[2] = 0; - if (VERBOSE_MED) { - printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE - " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE - ", %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE - " ]\n", - mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], start[0], start[1], - start[2], block[0], block[1], block[2]); - fflush(stdout); - } - - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); /* Fill data buffer */ - data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(*data); - correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf); + data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(C_DATATYPE); - data = (C_DATATYPE *)calloc(1, data_size); - VRFY((NULL != data), "calloc succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + C_DATATYPE *tmp_buf = (C_DATATYPE *)calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); - correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++) + tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx); - for (i = 0; i < data_size / sizeof(*data); i++) - data[i] = (C_DATATYPE)GEN_DATA(i); + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; + } - for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) - correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] * dataset_dims[1])) + - (i / (dataset_dims[0] * dataset_dims[1]))); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, + test_mode); - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0), - "Dataset write succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + free(data_bufs_nc[dset_idx]); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); - if (data) - free(data); + /* Close and re-open datasets */ + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + open_datasets(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, num_dsets, test_mode, + dset_ids); /* Verify the correct data was written */ - read_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(C_DATATYPE); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + correct_bufs[dset_idx] = (C_DATATYPE *)calloc(1, correct_buf_size); + VRFY((NULL != correct_bufs[dset_idx]), "calloc succeeded"); + read_bufs[dset_idx] = calloc(1, correct_buf_size); + VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); + + for (size_t j = 0; j < correct_buf_size / sizeof(C_DATATYPE); j++) + correct_bufs[dset_idx][j] = (C_DATATYPE)((j % (dataset_dims[0] * dataset_dims[1])) + + (j / (dataset_dims[0] * dataset_dims[1])) + dset_idx); + } - dset_id = H5Dopen2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), + "Data verification succeeded"); - VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + free(read_bufs[dset_idx]); + free(correct_bufs[dset_idx]); + } - if (correct_buf) - free(correct_buf); - if (read_buf) - free(read_buf); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -2734,11 +3236,12 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H */ static void test_write_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id) + hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode) { - C_DATATYPE *data = NULL; - C_DATATYPE *read_buf = NULL; - C_DATATYPE *correct_buf = NULL; + C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0}; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; hsize_t dataset_dims[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; hsize_t chunk_dims[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; hsize_t sel_dims[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; @@ -2746,10 +3249,13 @@ test_write_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t fi hsize_t stride[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; hsize_t count[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; hsize_t block[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; - size_t i, data_size, correct_buf_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + size_t data_size, correct_buf_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; hid_t group_id = H5I_INVALID_HID; - hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; if (MAINPROCESS) puts("Testing write to shared filtered chunks in 3D dataset"); @@ -2774,9 +3280,6 @@ test_write_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t fi filespace = H5Screate_simple(WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, dataset_dims, NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); - memspace = H5Screate_simple(WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, sel_dims, NULL); - VRFY((memspace >= 0), "Memory dataspace creation succeeded"); - /* Create chunked dataset */ plist_id = H5Pcopy(dcpl_id); VRFY((plist_id >= 0), "DCPL copy succeeded"); @@ -2787,12 +3290,12 @@ test_write_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t fi /* Add test filter to the pipeline */ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); - dset_id = H5Dcreate2(group_id, WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, HDF5_DATATYPE_NAME, - filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + /* Create datasets depending on the current test mode */ + create_datasets(group_id, WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + plist_id, test_mode, &num_dsets, dset_ids); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -2812,86 +3315,86 @@ test_write_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t fi start[1] = 0; start[2] = 0; - if (VERBOSE_MED) { - printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE - " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE - ", %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE - " ]\n", - mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], start[0], start[1], - start[2], block[0], block[1], block[2]); - fflush(stdout); - } - - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); /* Fill data buffer */ - data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(*data); - correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf); - - data = (C_DATATYPE *)calloc(1, data_size); - VRFY((NULL != data), "calloc succeeded"); - - correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(C_DATATYPE); - for (i = 0; i < data_size / sizeof(*data); i++) - data[i] = (C_DATATYPE)GEN_DATA(i); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + C_DATATYPE *tmp_buf = (C_DATATYPE *)calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); - for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) - /* Add the Column Index */ - correct_buf[i] = (C_DATATYPE)((i % (hsize_t)(WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * - WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS)) + for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++) + tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx); - /* Add the Row Index */ - + ((i % (hsize_t)(mpi_size * WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * - WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS)) / - (hsize_t)(WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * - WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS)) + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; + } - /* Add the amount that gets added when a rank moves down to its next - section vertically in the dataset */ - + ((hsize_t)(WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * - WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS) * - (i / (hsize_t)(mpi_size * WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * - WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS)))); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, + test_mode); - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0), - "Dataset write succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + free(data_bufs_nc[dset_idx]); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); - if (data) - free(data); + /* Close and re-open datasets */ + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + open_datasets(group_id, WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, num_dsets, test_mode, dset_ids); /* Verify the correct data was written */ - read_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(C_DATATYPE); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + correct_bufs[dset_idx] = (C_DATATYPE *)calloc(1, correct_buf_size); + VRFY((NULL != correct_bufs[dset_idx]), "calloc succeeded"); + read_bufs[dset_idx] = calloc(1, correct_buf_size); + VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); + + for (size_t j = 0; j < correct_buf_size / sizeof(C_DATATYPE); j++) + correct_bufs[dset_idx][j] = + /* Add the Column Index */ + (C_DATATYPE)((j % (hsize_t)(WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * + WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS)) + + /* Add the Row Index */ + + ((j % (hsize_t)(mpi_size * WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * + WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS)) / + (hsize_t)(WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * + WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS)) + + /* Add the amount that gets added when a rank moves down to its next + section vertically in the dataset */ + + ((hsize_t)(WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * + WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS) * + (j / (hsize_t)(mpi_size * WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * + WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS))) + + /* Add an increment factor for the multi-dataset case */ + + dset_idx); + } - dset_id = H5Dopen2(group_id, WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), + "Data verification succeeded"); - VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + free(read_bufs[dset_idx]); + free(correct_bufs[dset_idx]); + } - if (correct_buf) - free(correct_buf); - if (read_buf) - free(read_buf); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -2906,23 +3409,27 @@ test_write_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t fi */ static void test_write_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group, H5Z_filter_t filter_id, - hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode) { - COMPOUND_C_DATATYPE *data = NULL; - COMPOUND_C_DATATYPE *read_buf = NULL; - COMPOUND_C_DATATYPE *correct_buf = NULL; + COMPOUND_C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0}; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; hsize_t dataset_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; hsize_t chunk_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t sel_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; hsize_t start[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; hsize_t stride[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; - size_t i, correct_buf_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID, - memtype = H5I_INVALID_HID; - hid_t group_id = H5I_INVALID_HID; - hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + size_t data_size, correct_buf_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t memtype = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; if (MAINPROCESS) puts("Testing write to unshared filtered chunks in Compound Datatype dataset without Datatype " @@ -2946,17 +3453,11 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group dataset_dims[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS; chunk_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS; chunk_dims[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS; - sel_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS; - sel_dims[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC; filespace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, dataset_dims, NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); - memspace = - H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, sel_dims, NULL); - VRFY((memspace >= 0), "Memory dataspace creation succeeded"); - /* Create chunked dataset */ plist_id = H5Pcopy(dcpl_id); VRFY((plist_id >= 0), "DCPL copy succeeded"); @@ -2979,12 +3480,12 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0), "Datatype insertion succeeded"); - dset_id = H5Dcreate2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, - memtype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + /* Create datasets depending on the current test mode */ + create_datasets(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, memtype, + filespace, plist_id, test_mode, &num_dsets, dset_ids); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -3000,76 +3501,77 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group start[0] = 0; start[1] = ((hsize_t)mpi_rank * WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS); - if (VERBOSE_MED) { - printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); - fflush(stdout); - } - - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); - - data = (COMPOUND_C_DATATYPE *)calloc( - 1, (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC * sizeof(*data)); - VRFY((NULL != data), "calloc succeeded"); - - correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE); - - correct_buf = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); /* Fill data buffer */ - for (i = 0; i < (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC; i++) { - data[i].field1 = (short)GEN_DATA(i); - data[i].field2 = (int)GEN_DATA(i); - data[i].field3 = (long)GEN_DATA(i); - } - - for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) { - correct_buf[i].field1 = (short)((i % dataset_dims[1]) + (i / dataset_dims[1])); - - correct_buf[i].field2 = (int)((i % dataset_dims[1]) + (i / dataset_dims[1])); + data_size = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC * + sizeof(COMPOUND_C_DATATYPE); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + COMPOUND_C_DATATYPE *tmp_buf = (COMPOUND_C_DATATYPE *)calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); + + for (size_t j = 0; + j < (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC; j++) { + tmp_buf[j].field1 = (short)(GEN_DATA(j) + dset_idx); + tmp_buf[j].field2 = (int)(GEN_DATA(j) + dset_idx); + tmp_buf[j].field3 = (long)(GEN_DATA(j) + dset_idx); + } - correct_buf[i].field3 = (long)((i % dataset_dims[1]) + (i / dataset_dims[1])); + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; } - VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, dxpl_id, data) >= 0), "Dataset write succeeded"); + write_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, test_mode); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + free(data_bufs_nc[dset_idx]); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); - if (data) - free(data); + /* Close and re-open datasets */ + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + open_datasets(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, num_dsets, + test_mode, dset_ids); /* Verify the correct data was written */ - read_buf = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + correct_bufs[dset_idx] = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size); + VRFY((NULL != correct_bufs[dset_idx]), "calloc succeeded"); + read_bufs[dset_idx] = calloc(1, correct_buf_size); + VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); + + for (size_t j = 0; j < correct_buf_size / sizeof(COMPOUND_C_DATATYPE); j++) { + size_t val = (j % dataset_dims[1]) + (j / dataset_dims[1]) + dset_idx; - dset_id = - H5Dopen2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + correct_bufs[dset_idx][j].field1 = (short)val; + correct_bufs[dset_idx][j].field2 = (int)val; + correct_bufs[dset_idx][j].field3 = (long)val; + } + } - VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), "Dataset read succeeded"); + read_datasets(num_dsets, dset_ids, memtype, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); - VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), + "Data verification succeeded"); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + free(read_bufs[dset_idx]); + free(correct_bufs[dset_idx]); + } - if (correct_buf) - free(correct_buf); - if (read_buf) - free(read_buf); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); VRFY((H5Tclose(memtype) >= 0), "Datatype close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -3085,23 +3587,27 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group */ static void test_write_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group, H5Z_filter_t filter_id, - hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode) { - COMPOUND_C_DATATYPE *data = NULL; - COMPOUND_C_DATATYPE *read_buf = NULL; - COMPOUND_C_DATATYPE *correct_buf = NULL; + COMPOUND_C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0}; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; hsize_t dataset_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; hsize_t chunk_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t sel_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; hsize_t start[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; hsize_t stride[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; - size_t i, correct_buf_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID, - memtype = H5I_INVALID_HID; - hid_t group_id = H5I_INVALID_HID; - hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + size_t data_size, correct_buf_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t memtype = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; if (MAINPROCESS) puts("Testing write to shared filtered chunks in Compound Datatype dataset without Datatype " @@ -3125,17 +3631,11 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group, dataset_dims[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS; chunk_dims[0] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS; chunk_dims[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS; - sel_dims[0] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS / (hsize_t)mpi_size; - sel_dims[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC; filespace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, dataset_dims, NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); - memspace = - H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, sel_dims, NULL); - VRFY((memspace >= 0), "Memory dataspace creation succeeded"); - /* Create chunked dataset */ plist_id = H5Pcopy(dcpl_id); VRFY((plist_id >= 0), "DCPL copy succeeded"); @@ -3158,12 +3658,12 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group, VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0), "Datatype insertion succeeded"); - dset_id = H5Dcreate2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, memtype, - filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + /* Create datasets depending on the current test mode */ + create_datasets(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, memtype, + filespace, plist_id, test_mode, &num_dsets, dset_ids); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -3179,82 +3679,80 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group, start[0] = (hsize_t)mpi_rank; start[1] = 0; - if (VERBOSE_MED) { - printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); - fflush(stdout); - } - - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); - - data = (COMPOUND_C_DATATYPE *)calloc( - 1, (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC * sizeof(*data)); - VRFY((NULL != data), "calloc succeeded"); - - correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE); - - correct_buf = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); /* Fill data buffer */ - for (i = 0; i < (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC; i++) { - data[i].field1 = (short)GEN_DATA(i); - data[i].field2 = (int)GEN_DATA(i); - data[i].field3 = (long)GEN_DATA(i); - } - - for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) { - correct_buf[i].field1 = - (short)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) + - (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])); - - correct_buf[i].field2 = - (int)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) + - (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])); + data_size = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC * + sizeof(COMPOUND_C_DATATYPE); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + COMPOUND_C_DATATYPE *tmp_buf = (COMPOUND_C_DATATYPE *)calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); + + for (size_t j = 0; j < (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC; + j++) { + tmp_buf[j].field1 = (short)(GEN_DATA(j) + dset_idx); + tmp_buf[j].field2 = (int)(GEN_DATA(j) + dset_idx); + tmp_buf[j].field3 = (long)(GEN_DATA(j) + dset_idx); + } - correct_buf[i].field3 = - (long)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) + - (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])); + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; } - VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, dxpl_id, data) >= 0), "Dataset write succeeded"); + write_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, test_mode); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + free(data_bufs_nc[dset_idx]); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); - if (data) - free(data); + /* Close and re-open datasets */ + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + open_datasets(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, num_dsets, + test_mode, dset_ids); /* Verify the correct data was written */ - read_buf = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + correct_bufs[dset_idx] = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size); + VRFY((NULL != correct_bufs[dset_idx]), "calloc succeeded"); + read_bufs[dset_idx] = calloc(1, correct_buf_size); + VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); + + for (size_t j = 0; j < correct_buf_size / sizeof(COMPOUND_C_DATATYPE); j++) { + size_t val1 = (dataset_dims[1] * (j / ((hsize_t)mpi_size * dataset_dims[1]))); + size_t val2 = (j % dataset_dims[1]); + size_t val3 = (((j % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]); + size_t val = val1 + val2 + val3 + dset_idx; + + correct_bufs[dset_idx][j].field1 = (short)val; + correct_bufs[dset_idx][j].field2 = (int)val; + correct_bufs[dset_idx][j].field3 = (long)val; + } + } - dset_id = - H5Dopen2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + read_datasets(num_dsets, dset_ids, memtype, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); - VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), "Dataset read succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), + "Data verification succeeded"); - VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + free(read_bufs[dset_idx]); + free(correct_bufs[dset_idx]); + } - if (correct_buf) - free(correct_buf); - if (read_buf) - free(read_buf); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); VRFY((H5Tclose(memtype) >= 0), "Datatype close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -3276,23 +3774,27 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group, */ static void test_write_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_group, H5Z_filter_t filter_id, - hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode) { - COMPOUND_C_DATATYPE *data = NULL; - COMPOUND_C_DATATYPE *read_buf = NULL; - COMPOUND_C_DATATYPE *correct_buf = NULL; + COMPOUND_C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0}; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; hsize_t dataset_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; hsize_t chunk_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t sel_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; hsize_t start[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; hsize_t stride[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; - size_t i, correct_buf_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID, - filetype = H5I_INVALID_HID, memtype = H5I_INVALID_HID; - hid_t group_id = H5I_INVALID_HID; - hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + size_t data_size, correct_buf_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filetype = H5I_INVALID_HID, memtype = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; if (MAINPROCESS) puts("Testing write to unshared filtered chunks in Compound Datatype dataset with Datatype " @@ -3322,17 +3824,11 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_gro dataset_dims[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS; chunk_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS; chunk_dims[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS; - sel_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS; - sel_dims[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC; filespace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS, dataset_dims, NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); - memspace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS, - sel_dims, NULL); - VRFY((memspace >= 0), "Memory dataspace creation succeeded"); - /* Create chunked dataset */ plist_id = H5Pcopy(dcpl_id); VRFY((plist_id >= 0), "DCPL copy succeeded"); @@ -3363,12 +3859,12 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_gro VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded"); VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded"); - dset_id = H5Dcreate2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, - filetype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + /* Create datasets depending on the current test mode */ + create_datasets(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, filetype, + filespace, plist_id, test_mode, &num_dsets, dset_ids); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -3384,73 +3880,147 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_gro start[0] = 0; start[1] = ((hsize_t)mpi_rank * WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS); - if (VERBOSE_MED) { - printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); - fflush(stdout); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); + + /* Fill data buffer */ + data_size = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC * + sizeof(COMPOUND_C_DATATYPE); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + COMPOUND_C_DATATYPE *tmp_buf = (COMPOUND_C_DATATYPE *)calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); + + for (size_t j = 0; + j < (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC; j++) { + tmp_buf[j].field1 = (short)(GEN_DATA(j) + dset_idx); + tmp_buf[j].field2 = (int)(GEN_DATA(j) + dset_idx); + tmp_buf[j].field3 = (long)(GEN_DATA(j) + dset_idx); + } + + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; } - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + /* + * Ensure that this test currently fails in most cases since type + * conversions break collective mode when selection I/O is disabled + * and the library will currently disable selection I/O when filters + * are applied to a dataset. + */ + + /* NOTE: Once type conversions no longer break collective mode, remove + * the H5E_BEGIN/END_TRY block and switch to the following code instead + * of the H5Dwrite loop: + */ + /* write_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids, + dxpl_id, data_bufs, test_mode); */ + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + herr_t expected = FAIL; + herr_t ret; - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); + /* + * Since this currently writes datasets one by one regardless of + * test mode, the write call could succeed if the dataset doesn't + * have any filters applied to it (can currently only happen when + * testing a mix of filtered and unfiltered datasets with the + * multi-dataset APIs). + */ + if (test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED) { + hid_t dset_dcpl; + int nfilters; - data = (COMPOUND_C_DATATYPE *)calloc( - 1, (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC * sizeof(*data)); - VRFY((NULL != data), "calloc succeeded"); + dset_dcpl = H5Dget_create_plist(dset_ids[dset_idx]); + VRFY((dset_dcpl >= 0), "H5Dget_create_plist"); - correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE); + nfilters = H5Pget_nfilters(dset_dcpl); + VRFY((nfilters >= 0), "H5Pget_nfilters"); - correct_buf = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + if (nfilters == 0) + expected = SUCCEED; - /* Fill data buffer */ - for (i = 0; i < (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC; i++) { - data[i].field1 = (short)GEN_DATA(i); - data[i].field2 = (int)GEN_DATA(i); - data[i].field3 = (long)GEN_DATA(i); - } + VRFY((H5Pclose(dset_dcpl) >= 0), "H5Pclose"); + } - /* Ensure that this test currently fails since type conversions break collective mode */ - H5E_BEGIN_TRY - { - VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, dxpl_id, data) < 0), "Dataset write succeeded"); + if (expected == SUCCEED) + ret = H5Dwrite(dset_ids[dset_idx], memtype, H5S_BLOCK, fspace_ids[dset_idx], dxpl_id, + data_bufs[dset_idx]); + else { + H5E_BEGIN_TRY + { + ret = H5Dwrite(dset_ids[dset_idx], memtype, H5S_BLOCK, fspace_ids[dset_idx], dxpl_id, + data_bufs[dset_idx]); + } + H5E_END_TRY + } + + VRFY((ret == expected), "Dataset write"); + + if (expected == SUCCEED) + verify_chunk_opt_status(1, dxpl_id); + else + verify_chunk_opt_status(0, dxpl_id); } - H5E_END_TRY + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + free(data_bufs_nc[dset_idx]); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, NO_CHUNKS_WRITTEN); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, NO_CHUNKS_WRITTEN); - if (data) - free(data); + /* Close and re-open datasets */ + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + + open_datasets(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, num_dsets, + test_mode, dset_ids); /* Verify that no data was written */ - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE); - read_buf = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + correct_bufs[dset_idx] = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size); + VRFY((NULL != correct_bufs[dset_idx]), "calloc succeeded"); + read_bufs[dset_idx] = calloc(1, correct_buf_size); + VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); + } + + read_datasets(num_dsets, dset_ids, memtype, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + hid_t dset_dcpl; + int nfilters; + + dset_dcpl = H5Dget_create_plist(dset_ids[dset_idx]); + VRFY((dset_dcpl >= 0), "H5Dget_create_plist"); - dset_id = - H5Dopen2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + nfilters = H5Pget_nfilters(dset_dcpl); + VRFY((nfilters >= 0), "H5Pget_nfilters"); + + VRFY((H5Pclose(dset_dcpl) >= 0), "H5Pclose"); + + /* + * TODO: For now, skip data verification for the datasets where + * writes with type conversion succeeded due to selection + * I/O being enabled. + */ + if (nfilters == 0) + continue; - VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), "Dataset read succeeded"); + VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), + "Data verification succeeded"); + } - VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + free(read_bufs[dset_idx]); + free(correct_bufs[dset_idx]); + } - if (correct_buf) - free(correct_buf); - if (read_buf) - free(read_buf); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); VRFY((H5Tclose(filetype) >= 0), "File datatype close succeeded"); VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); @@ -3473,23 +4043,27 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_gro */ static void test_write_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group, H5Z_filter_t filter_id, - hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode) { - COMPOUND_C_DATATYPE *data = NULL; - COMPOUND_C_DATATYPE *read_buf = NULL; - COMPOUND_C_DATATYPE *correct_buf = NULL; + COMPOUND_C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0}; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; hsize_t dataset_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; hsize_t chunk_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t sel_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; hsize_t start[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; hsize_t stride[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; - size_t i, correct_buf_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + size_t data_size, correct_buf_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; hid_t filetype = H5I_INVALID_HID, memtype = H5I_INVALID_HID; - hid_t group_id = H5I_INVALID_HID; - hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; if (MAINPROCESS) puts("Testing write to shared filtered chunks in Compound Datatype dataset with Datatype conversion"); @@ -3518,17 +4092,11 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group dataset_dims[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS; chunk_dims[0] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS; chunk_dims[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS; - sel_dims[0] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS / (hsize_t)mpi_size; - sel_dims[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC; filespace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, dataset_dims, NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); - memspace = - H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, sel_dims, NULL); - VRFY((memspace >= 0), "Memory dataspace creation succeeded"); - /* Create chunked dataset */ plist_id = H5Pcopy(dcpl_id); VRFY((plist_id >= 0), "DCPL copy succeeded"); @@ -3559,12 +4127,12 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded"); VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded"); - dset_id = H5Dcreate2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, - filetype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + /* Create datasets depending on the current test mode */ + create_datasets(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, filetype, + filespace, plist_id, test_mode, &num_dsets, dset_ids); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -3580,73 +4148,147 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group start[0] = (hsize_t)mpi_rank; start[1] = 0; - if (VERBOSE_MED) { - printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); - fflush(stdout); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); + + /* Fill data buffer */ + data_size = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC * + sizeof(COMPOUND_C_DATATYPE); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + COMPOUND_C_DATATYPE *tmp_buf = (COMPOUND_C_DATATYPE *)calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); + + for (size_t j = 0; + j < (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC; j++) { + tmp_buf[j].field1 = (short)(GEN_DATA(j) + dset_idx); + tmp_buf[j].field2 = (int)(GEN_DATA(j) + dset_idx); + tmp_buf[j].field3 = (long)(GEN_DATA(j) + dset_idx); + } + + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; } - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + /* + * Ensure that this test currently fails in most cases since type + * conversions break collective mode when selection I/O is disabled + * and the library will currently disable selection I/O when filters + * are applied to a dataset. + */ + + /* NOTE: Once type conversions no longer break collective mode, remove + * the H5E_BEGIN/END_TRY block and switch to the following code instead + * of the H5Dwrite loop: + */ + /* write_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids, + dxpl_id, data_bufs, test_mode); */ + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + herr_t expected = FAIL; + herr_t ret; - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); + /* + * Since this currently writes datasets one by one regardless of + * test mode, the write call could succeed if the dataset doesn't + * have any filters applied to it (can currently only happen when + * testing a mix of filtered and unfiltered datasets with the + * multi-dataset APIs). + */ + if (test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED) { + hid_t dset_dcpl; + int nfilters; - data = (COMPOUND_C_DATATYPE *)calloc( - 1, (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC * sizeof(*data)); - VRFY((NULL != data), "calloc succeeded"); + dset_dcpl = H5Dget_create_plist(dset_ids[dset_idx]); + VRFY((dset_dcpl >= 0), "H5Dget_create_plist"); - correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE); + nfilters = H5Pget_nfilters(dset_dcpl); + VRFY((nfilters >= 0), "H5Pget_nfilters"); - correct_buf = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + if (nfilters == 0) + expected = SUCCEED; - /* Fill data buffer */ - for (i = 0; i < (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC; i++) { - data[i].field1 = (short)GEN_DATA(i); - data[i].field2 = (int)GEN_DATA(i); - data[i].field3 = (long)GEN_DATA(i); - } + VRFY((H5Pclose(dset_dcpl) >= 0), "H5Pclose"); + } - /* Ensure that this test currently fails since type conversions break collective mode */ - H5E_BEGIN_TRY - { - VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, dxpl_id, data) < 0), "Dataset write succeeded"); + if (expected == SUCCEED) + ret = H5Dwrite(dset_ids[dset_idx], memtype, H5S_BLOCK, fspace_ids[dset_idx], dxpl_id, + data_bufs[dset_idx]); + else { + H5E_BEGIN_TRY + { + ret = H5Dwrite(dset_ids[dset_idx], memtype, H5S_BLOCK, fspace_ids[dset_idx], dxpl_id, + data_bufs[dset_idx]); + } + H5E_END_TRY + } + + VRFY((ret == expected), "Dataset write"); + + if (expected == SUCCEED) + verify_chunk_opt_status(1, dxpl_id); + else + verify_chunk_opt_status(0, dxpl_id); } - H5E_END_TRY + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + free(data_bufs_nc[dset_idx]); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, NO_CHUNKS_WRITTEN); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, NO_CHUNKS_WRITTEN); - if (data) - free(data); + /* Close and re-open datasets */ + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + + open_datasets(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, num_dsets, + test_mode, dset_ids); /* Verify that no data was written */ - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE); - read_buf = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + correct_bufs[dset_idx] = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size); + VRFY((NULL != correct_bufs[dset_idx]), "calloc succeeded"); + read_bufs[dset_idx] = calloc(1, correct_buf_size); + VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); + } - dset_id = - H5Dopen2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + read_datasets(num_dsets, dset_ids, memtype, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); - VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), "Dataset read succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + hid_t dset_dcpl; + int nfilters; - VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + dset_dcpl = H5Dget_create_plist(dset_ids[dset_idx]); + VRFY((dset_dcpl >= 0), "H5Dget_create_plist"); - if (correct_buf) - free(correct_buf); - if (read_buf) - free(read_buf); + nfilters = H5Pget_nfilters(dset_dcpl); + VRFY((nfilters >= 0), "H5Pget_nfilters"); + + VRFY((H5Pclose(dset_dcpl) >= 0), "H5Pclose"); + + /* + * TODO: For now, skip data verification for the datasets where + * writes with type conversion succeeded due to selection + * I/O being enabled. + */ + if (nfilters == 0) + continue; + + VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), + "Data verification succeeded"); + } + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + free(read_bufs[dset_idx]); + free(correct_bufs[dset_idx]); + } + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); VRFY((H5Tclose(filetype) >= 0), "File datatype close succeeded"); VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); @@ -3668,11 +4310,12 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group */ static void test_read_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id) + hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode) { - C_DATATYPE *read_buf = NULL; - C_DATATYPE *correct_buf = NULL; - C_DATATYPE *global_buf = NULL; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *global_buf = NULL; hsize_t dataset_dims[READ_ONE_CHUNK_FILTERED_DATASET_DIMS]; hsize_t chunk_dims[READ_ONE_CHUNK_FILTERED_DATASET_DIMS]; hsize_t sel_dims[READ_ONE_CHUNK_FILTERED_DATASET_DIMS]; @@ -3681,12 +4324,16 @@ test_read_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t filt hsize_t count[READ_ONE_CHUNK_FILTERED_DATASET_DIMS]; hsize_t block[READ_ONE_CHUNK_FILTERED_DATASET_DIMS]; hsize_t flat_dims[1]; - size_t i, read_buf_size, correct_buf_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; - hid_t group_id = H5I_INVALID_HID; - hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + size_t data_size, read_buf_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; int *recvcounts = NULL; int *displs = NULL; + int mpi_code; if (MAINPROCESS) puts("Testing read from one-chunk filtered dataset"); @@ -3695,16 +4342,61 @@ test_read_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t filt dataset_dims[1] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_NCOLS; /* Setup the buffer for writing and for comparison */ - correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + data_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE); - correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) { + C_DATATYPE *tmp_buf = calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); - for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) - correct_buf[i] = ((C_DATATYPE)i % (READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size * + for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++) + tmp_buf[j] = ((C_DATATYPE)j % (READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size * + READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS)) + + ((C_DATATYPE)j / (READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size * READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS)) + - ((C_DATATYPE)i / (READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size * - READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS)); + (C_DATATYPE)dset_idx; + + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; + } + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(READ_ONE_CHUNK_FILTERED_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS; + chunk_dims[1] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS; + + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, READ_ONE_CHUNK_FILTERED_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + /* Create datasets depending on the current test mode */ + create_datasets(group_id, READ_ONE_CHUNK_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, plist_id, + test_mode, &num_dsets, dset_ids); + + /* Verify space allocation status */ + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + MPI_Barrier(comm); if (MAINPROCESS) { plist_id = H5Pcreate(H5P_FILE_ACCESS); @@ -3721,52 +4413,37 @@ test_read_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t filt group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); VRFY((group_id >= 0), "H5Gopen2 succeeded"); - /* Create the dataspace for the dataset */ - filespace = H5Screate_simple(READ_ONE_CHUNK_FILTERED_DATASET_DIMS, dataset_dims, NULL); - VRFY((filespace >= 0), "File dataspace creation succeeded"); - - /* Create chunked dataset */ - chunk_dims[0] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS; - chunk_dims[1] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS; - - plist_id = H5Pcopy(dcpl_id); - VRFY((plist_id >= 0), "DCPL copy succeeded"); - - VRFY((H5Pset_chunk(plist_id, READ_ONE_CHUNK_FILTERED_DATASET_DIMS, chunk_dims) >= 0), - "Chunk size set"); + open_datasets(group_id, READ_ONE_CHUNK_FILTERED_DATASET_NAME, num_dsets, test_mode, dset_ids); - /* Add test filter to the pipeline */ - VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + select_all(num_dsets, dset_ids, fspace_ids); - dset_id = H5Dcreate2(group_id, READ_ONE_CHUNK_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, - H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, + test_mode); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); - - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), - "Dataset write succeeded"); + plist_id = H5Dget_create_plist(dset_ids[0]); + VRFY((plist_id >= 0), "H5Dget_create_plist succeeded"); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); - /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); } + MPI_Barrier(comm); + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id); VRFY((file_id >= 0), "Test file open succeeded"); group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); VRFY((group_id >= 0), "H5Gopen2 succeeded"); - dset_id = H5Dopen2(group_id, READ_ONE_CHUNK_FILTERED_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + open_datasets(group_id, READ_ONE_CHUNK_FILTERED_DATASET_NAME, num_dsets, test_mode, dset_ids); sel_dims[0] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_NROWS / (hsize_t)mpi_size; sel_dims[1] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_NCOLS; @@ -3774,13 +4451,6 @@ test_read_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t filt /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ flat_dims[0] = sel_dims[0] * sel_dims[1]; - memspace = H5Screate_simple(1, flat_dims, NULL); - VRFY((memspace >= 0), "Memory dataspace creation succeeded"); - - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - /* * Each process defines the dataset selection in the file and * reads it to the selection in memory @@ -3794,61 +4464,54 @@ test_read_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t filt start[0] = ((hsize_t)mpi_rank * sel_dims[0]); start[1] = 0; - if (VERBOSE_MED) { - printf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); - fflush(stdout); - } - - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); - - read_buf_size = flat_dims[0] * sizeof(*read_buf); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - read_buf = (C_DATATYPE *)calloc(1, read_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + read_buf_size = flat_dims[0] * sizeof(C_DATATYPE); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + read_bufs[dset_idx] = calloc(1, read_buf_size); + VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); + } - global_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != global_buf), "calloc succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, + test_mode); /* Collect each piece of data from all ranks into a global buffer on all ranks */ - recvcounts = (int *)calloc(1, (size_t)mpi_size * sizeof(*recvcounts)); + global_buf = calloc(1, data_size); + VRFY((NULL != global_buf), "calloc succeeded"); + recvcounts = calloc(1, (size_t)mpi_size * sizeof(*recvcounts)); VRFY((NULL != recvcounts), "calloc succeeded"); + displs = calloc(1, (size_t)mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "calloc succeeded"); - for (i = 0; i < (size_t)mpi_size; i++) + for (size_t i = 0; i < (size_t)mpi_size; i++) { recvcounts[i] = (int)flat_dims[0]; + displs[i] = (int)(i * flat_dims[0]); + } - displs = (int *)calloc(1, (size_t)mpi_size * sizeof(*displs)); - VRFY((NULL != displs), "calloc succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + mpi_code = MPI_Allgatherv(read_bufs[dset_idx], (int)flat_dims[0], C_DATATYPE_MPI, global_buf, + recvcounts, displs, C_DATATYPE_MPI, comm); + VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgatherv succeeded"); - for (i = 0; i < (size_t)mpi_size; i++) - displs[i] = (int)(i * flat_dims[0]); + VRFY((0 == memcmp(global_buf, data_bufs[dset_idx], data_size)), "Data verification succeeded"); + } - VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int)flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts, - displs, C_DATATYPE_MPI, comm)), - "MPI_Allgatherv succeeded"); + free(displs); + free(recvcounts); + free(global_buf); - VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + free(read_bufs[dset_idx]); - if (displs) - free(displs); - if (recvcounts) - free(recvcounts); - if (global_buf) - free(global_buf); - if (read_buf) - free(read_buf); - if (correct_buf) - free(correct_buf); + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) + free(data_bufs_nc[dset_idx]); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -3867,11 +4530,12 @@ test_read_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t filt */ static void test_read_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id) + hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode) { - C_DATATYPE *read_buf = NULL; - C_DATATYPE *correct_buf = NULL; - C_DATATYPE *global_buf = NULL; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *global_buf = NULL; hsize_t dataset_dims[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t chunk_dims[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t sel_dims[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; @@ -3880,12 +4544,16 @@ test_read_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t fil hsize_t count[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t block[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t flat_dims[1]; - size_t i, read_buf_size, correct_buf_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; - hid_t group_id = H5I_INVALID_HID; - hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + size_t data_size, read_buf_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; int *recvcounts = NULL; int *displs = NULL; + int mpi_code; if (MAINPROCESS) puts("Testing read from unshared filtered chunks"); @@ -3894,15 +4562,61 @@ test_read_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t fil dataset_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_NCOLS; /* Setup the buffer for writing and for comparison */ - correct_buf_size = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_NROWS * - (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_NCOLS * sizeof(*correct_buf); + data_size = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_NROWS * (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_NCOLS * + sizeof(C_DATATYPE); + + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) { + C_DATATYPE *tmp_buf = calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); + + for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++) + tmp_buf[j] = + (C_DATATYPE)((j % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) + + (j / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) + dset_idx); + + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; + } + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS; + chunk_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_CH_NCOLS; + + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + /* Create datasets depending on the current test mode */ + create_datasets(group_id, READ_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + plist_id, test_mode, &num_dsets, dset_ids); - correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + /* Verify space allocation status */ + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); - for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) - correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) + - (i / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1]))); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + MPI_Barrier(comm); if (MAINPROCESS) { plist_id = H5Pcreate(H5P_FILE_ACCESS); @@ -3919,52 +4633,37 @@ test_read_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t fil group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); VRFY((group_id >= 0), "H5Gopen2 succeeded"); - /* Create the dataspace for the dataset */ - filespace = H5Screate_simple(READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); - VRFY((filespace >= 0), "File dataspace creation succeeded"); - - /* Create chunked dataset */ - chunk_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS; - chunk_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_CH_NCOLS; - - plist_id = H5Pcopy(dcpl_id); - VRFY((plist_id >= 0), "DCPL copy succeeded"); + open_datasets(group_id, READ_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode, dset_ids); - VRFY((H5Pset_chunk(plist_id, READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), - "Chunk size set"); + select_all(num_dsets, dset_ids, fspace_ids); - /* Add test filter to the pipeline */ - VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); - - dset_id = H5Dcreate2(group_id, READ_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, - filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, + test_mode); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); - - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), - "Dataset write succeeded"); + plist_id = H5Dget_create_plist(dset_ids[0]); + VRFY((plist_id >= 0), "H5Dget_create_plist succeeded"); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); - /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); } + MPI_Barrier(comm); + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id); VRFY((file_id >= 0), "Test file open succeeded"); group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); VRFY((group_id >= 0), "H5Gopen2 succeeded"); - dset_id = H5Dopen2(group_id, READ_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + open_datasets(group_id, READ_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode, dset_ids); sel_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS; sel_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_NCOLS; @@ -3972,13 +4671,6 @@ test_read_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t fil /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ flat_dims[0] = sel_dims[0] * sel_dims[1]; - memspace = H5Screate_simple(1, flat_dims, NULL); - VRFY((memspace >= 0), "Memory dataspace creation succeeded"); - - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - /* * Each process defines the dataset selection in the file and reads * it to the selection in memory @@ -3992,61 +4684,54 @@ test_read_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t fil start[0] = ((hsize_t)mpi_rank * (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS * count[0]); start[1] = 0; - if (VERBOSE_MED) { - printf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); - fflush(stdout); - } - - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); - - read_buf_size = flat_dims[0] * sizeof(*read_buf); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - read_buf = (C_DATATYPE *)calloc(1, read_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + read_buf_size = flat_dims[0] * sizeof(C_DATATYPE); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + read_bufs[dset_idx] = calloc(1, read_buf_size); + VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); + } - global_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != global_buf), "calloc succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, + test_mode); /* Collect each piece of data from all ranks into a global buffer on all ranks */ - recvcounts = (int *)calloc(1, (size_t)mpi_size * sizeof(*recvcounts)); + global_buf = calloc(1, data_size); + VRFY((NULL != global_buf), "calloc succeeded"); + recvcounts = calloc(1, (size_t)mpi_size * sizeof(*recvcounts)); VRFY((NULL != recvcounts), "calloc succeeded"); + displs = calloc(1, (size_t)mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "calloc succeeded"); - for (i = 0; i < (size_t)mpi_size; i++) + for (size_t i = 0; i < (size_t)mpi_size; i++) { recvcounts[i] = (int)flat_dims[0]; + displs[i] = (int)(i * flat_dims[0]); + } - displs = (int *)calloc(1, (size_t)mpi_size * sizeof(*displs)); - VRFY((NULL != displs), "calloc succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + mpi_code = MPI_Allgatherv(read_bufs[dset_idx], (int)flat_dims[0], C_DATATYPE_MPI, global_buf, + recvcounts, displs, C_DATATYPE_MPI, comm); + VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgatherv succeeded"); - for (i = 0; i < (size_t)mpi_size; i++) - displs[i] = (int)(i * flat_dims[0]); + VRFY((0 == memcmp(global_buf, data_bufs[dset_idx], data_size)), "Data verification succeeded"); + } - VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int)flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts, - displs, C_DATATYPE_MPI, comm)), - "MPI_Allgatherv succeeded"); + free(displs); + free(recvcounts); + free(global_buf); - VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + free(read_bufs[dset_idx]); - if (displs) - free(displs); - if (recvcounts) - free(recvcounts); - if (global_buf) - free(global_buf); - if (read_buf) - free(read_buf); - if (correct_buf) - free(correct_buf); + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) + free(data_bufs_nc[dset_idx]); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -4066,11 +4751,12 @@ test_read_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t fil */ static void test_read_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id) + hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode) { - C_DATATYPE *read_buf = NULL; - C_DATATYPE *correct_buf = NULL; - C_DATATYPE *global_buf = NULL; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *global_buf = NULL; hsize_t dataset_dims[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t chunk_dims[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t sel_dims[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; @@ -4079,12 +4765,16 @@ test_read_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter hsize_t count[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t block[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t flat_dims[1]; - size_t i, read_buf_size, correct_buf_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; - hid_t group_id = H5I_INVALID_HID; - hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + size_t data_size, read_buf_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; int *recvcounts = NULL; int *displs = NULL; + int mpi_code; if (MAINPROCESS) puts("Testing read from shared filtered chunks"); @@ -4093,16 +4783,62 @@ test_read_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter dataset_dims[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_NCOLS; /* Setup the buffer for writing and for comparison */ - correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + data_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE); - correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) { + C_DATATYPE *tmp_buf = calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); - for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) - correct_buf[i] = - (C_DATATYPE)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + - (i % dataset_dims[1]) + - (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])); + for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++) + tmp_buf[j] = (C_DATATYPE)((dataset_dims[1] * (j / ((hsize_t)mpi_size * dataset_dims[1]))) + + (j % dataset_dims[1]) + + (((j % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % + dataset_dims[1]) + + dset_idx); + + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; + } + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_CH_NROWS; + chunk_dims[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_CH_NCOLS; + + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + /* Create datasets depending on the current test mode */ + create_datasets(group_id, READ_SHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + plist_id, test_mode, &num_dsets, dset_ids); + + /* Verify space allocation status */ + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + MPI_Barrier(comm); if (MAINPROCESS) { plist_id = H5Pcreate(H5P_FILE_ACCESS); @@ -4119,52 +4855,37 @@ test_read_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); VRFY((group_id >= 0), "H5Gopen2 succeeded"); - /* Create the dataspace for the dataset */ - filespace = H5Screate_simple(READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); - VRFY((filespace >= 0), "File dataspace creation succeeded"); - - /* Create chunked dataset */ - chunk_dims[0] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_CH_NROWS; - chunk_dims[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_CH_NCOLS; - - plist_id = H5Pcopy(dcpl_id); - VRFY((plist_id >= 0), "DCPL copy succeeded"); + open_datasets(group_id, READ_SHARED_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode, dset_ids); - VRFY((H5Pset_chunk(plist_id, READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), - "Chunk size set"); + select_all(num_dsets, dset_ids, fspace_ids); - /* Add test filter to the pipeline */ - VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); - - dset_id = H5Dcreate2(group_id, READ_SHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, - filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, + test_mode); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); - - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), - "Dataset write succeeded"); + plist_id = H5Dget_create_plist(dset_ids[0]); + VRFY((plist_id >= 0), "H5Dget_create_plist succeeded"); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); - /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); } + MPI_Barrier(comm); + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id); VRFY((file_id >= 0), "Test file open succeeded"); group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); VRFY((group_id >= 0), "H5Gopen2 succeeded"); - dset_id = H5Dopen2(group_id, READ_SHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + open_datasets(group_id, READ_SHARED_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode, dset_ids); sel_dims[0] = (hsize_t)DIM0_SCALE_FACTOR; sel_dims[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_CH_NCOLS * (hsize_t)DIM1_SCALE_FACTOR; @@ -4172,13 +4893,6 @@ test_read_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ flat_dims[0] = sel_dims[0] * sel_dims[1]; - memspace = H5Screate_simple(1, flat_dims, NULL); - VRFY((memspace >= 0), "Memory dataspace creation succeeded"); - - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - /* * Each process defines the dataset selection in the file and * reads it to the selection in memory @@ -4192,77 +4906,71 @@ test_read_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter start[0] = (hsize_t)mpi_rank * block[0]; start[1] = 0; - if (VERBOSE_MED) { - printf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); - fflush(stdout); - } - - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - read_buf_size = flat_dims[0] * sizeof(*read_buf); + read_buf_size = flat_dims[0] * sizeof(C_DATATYPE); - read_buf = (C_DATATYPE *)calloc(1, read_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + read_bufs[dset_idx] = calloc(1, read_buf_size); + VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); + } - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, + test_mode); - global_buf = (C_DATATYPE *)calloc(1, correct_buf_size); + /* Collect each piece of data from all ranks into a global buffer on all ranks */ + global_buf = calloc(1, data_size); VRFY((NULL != global_buf), "calloc succeeded"); + recvcounts = calloc(1, (size_t)mpi_size * sizeof(*recvcounts)); + VRFY((NULL != recvcounts), "calloc succeeded"); + displs = calloc(1, (size_t)mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "calloc succeeded"); /* * Since these chunks are shared, run multiple rounds of MPI_Allgatherv * to collect all of the pieces into their appropriate locations. The - * number of times MPI_Allgatherv is run should be equal to the number - * of chunks in the first dimension of the dataset. + * number of times MPI_Allgatherv is run for each dataset should be equal + * to the number of chunks in the first dimension of the dataset. */ - { - size_t loop_count = count[0]; + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { size_t total_recvcounts = 0; - recvcounts = (int *)calloc(1, (size_t)mpi_size * sizeof(*recvcounts)); - VRFY((NULL != recvcounts), "calloc succeeded"); - - displs = (int *)calloc(1, (size_t)mpi_size * sizeof(*displs)); - VRFY((NULL != displs), "calloc succeeded"); + for (size_t j = 0; j < (size_t)mpi_size; j++) { + recvcounts[j] = (int)dataset_dims[1]; + total_recvcounts += (size_t)recvcounts[j]; - for (i = 0; i < (size_t)mpi_size; i++) { - recvcounts[i] = (int)dataset_dims[1]; - total_recvcounts += (size_t)recvcounts[i]; + displs[j] = (int)(j * dataset_dims[1]); } - for (i = 0; i < (size_t)mpi_size; i++) - displs[i] = (int)(i * dataset_dims[1]); + for (size_t loop_count = count[0]; loop_count; loop_count--) { + C_DATATYPE *tmp_buf = (C_DATATYPE *)read_bufs[dset_idx]; + C_DATATYPE *tmp_glob_buf = (C_DATATYPE *)global_buf; - for (; loop_count; loop_count--) { - VRFY((MPI_SUCCESS == MPI_Allgatherv(&read_buf[(count[0] - loop_count) * dataset_dims[1]], - recvcounts[mpi_rank], C_DATATYPE_MPI, - &global_buf[(count[0] - loop_count) * total_recvcounts], - recvcounts, displs, C_DATATYPE_MPI, comm)), - "MPI_Allgatherv succeeded"); + mpi_code = + MPI_Allgatherv(&tmp_buf[(count[0] - loop_count) * dataset_dims[1]], recvcounts[mpi_rank], + C_DATATYPE_MPI, &tmp_glob_buf[(count[0] - loop_count) * total_recvcounts], + recvcounts, displs, C_DATATYPE_MPI, comm); + VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgatherv succeeded"); } + + VRFY((0 == memcmp(global_buf, data_bufs[dset_idx], data_size)), "Data verification succeeded"); } - VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + free(displs); + free(recvcounts); + free(global_buf); - if (displs) - free(displs); - if (recvcounts) - free(recvcounts); - if (global_buf) - free(global_buf); - if (read_buf) - free(read_buf); - if (correct_buf) - free(correct_buf); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + free(read_bufs[dset_idx]); + + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) + free(data_bufs_nc[dset_idx]); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -4282,11 +4990,13 @@ test_read_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter */ static void test_read_filtered_dataset_single_no_selection(const char *parent_group, H5Z_filter_t filter_id, - hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode) { - C_DATATYPE *read_buf = NULL; - C_DATATYPE *correct_buf = NULL; - C_DATATYPE *global_buf = NULL; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *global_buf = NULL; hsize_t dataset_dims[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t chunk_dims[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t sel_dims[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; @@ -4295,13 +5005,16 @@ test_read_filtered_dataset_single_no_selection(const char *parent_group, H5Z_fil hsize_t count[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t block[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t flat_dims[1]; - size_t i, read_buf_size, correct_buf_size; - size_t segment_length; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; - hid_t group_id = H5I_INVALID_HID; - hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + size_t data_size, read_buf_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; int *recvcounts = NULL; int *displs = NULL; + int mpi_code; if (MAINPROCESS) puts("Testing read from filtered chunks with a single process having no selection"); @@ -4310,19 +5023,67 @@ test_read_filtered_dataset_single_no_selection(const char *parent_group, H5Z_fil dataset_dims[1] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS; /* Setup the buffer for writing and for comparison */ - correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + data_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE); + + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) { + size_t segment_length; - correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + C_DATATYPE *tmp_buf = calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); - for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) - correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) + - (i / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1]))); + for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++) + tmp_buf[j] = + (C_DATATYPE)((j % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) + + (j / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) + dset_idx); + + /* Compute the correct offset into the buffer for the process having no selection and clear it */ + segment_length = dataset_dims[0] * dataset_dims[1] / (hsize_t)mpi_size; + memset(tmp_buf + ((size_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC * segment_length), + 0, segment_length * sizeof(C_DATATYPE)); + + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; + } - /* Compute the correct offset into the buffer for the process having no selection and clear it */ - segment_length = dataset_dims[0] * dataset_dims[1] / (hsize_t)mpi_size; - memset(correct_buf + ((size_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC * segment_length), - 0, segment_length * sizeof(*correct_buf)); + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS; + chunk_dims[1] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS; + + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + /* Create datasets depending on the current test mode */ + create_datasets(group_id, READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, + filespace, plist_id, test_mode, &num_dsets, dset_ids); + + /* Verify space allocation status */ + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + MPI_Barrier(comm); if (MAINPROCESS) { plist_id = H5Pcreate(H5P_FILE_ACCESS); @@ -4339,53 +5100,39 @@ test_read_filtered_dataset_single_no_selection(const char *parent_group, H5Z_fil group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); VRFY((group_id >= 0), "H5Gopen2 succeeded"); - /* Create the dataspace for the dataset */ - filespace = - H5Screate_simple(READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); - VRFY((filespace >= 0), "File dataspace creation succeeded"); - - /* Create chunked dataset */ - chunk_dims[0] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS; - chunk_dims[1] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS; + open_datasets(group_id, READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode, + dset_ids); - plist_id = H5Pcopy(dcpl_id); - VRFY((plist_id >= 0), "DCPL copy succeeded"); + select_all(num_dsets, dset_ids, fspace_ids); - VRFY((H5Pset_chunk(plist_id, READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), - "Chunk size set"); - - /* Add test filter to the pipeline */ - VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); - - dset_id = H5Dcreate2(group_id, READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, - HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, + test_mode); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); - - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), - "Dataset write succeeded"); + plist_id = H5Dget_create_plist(dset_ids[0]); + VRFY((plist_id >= 0), "H5Dget_create_plist succeeded"); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); - /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); } + MPI_Barrier(comm); + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id); VRFY((file_id >= 0), "Test file open succeeded"); group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); VRFY((group_id >= 0), "H5Gopen2 succeeded"); - dset_id = H5Dopen2(group_id, READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + open_datasets(group_id, READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode, + dset_ids); sel_dims[0] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS; sel_dims[1] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS; @@ -4396,13 +5143,6 @@ test_read_filtered_dataset_single_no_selection(const char *parent_group, H5Z_fil /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ flat_dims[0] = sel_dims[0] * sel_dims[1]; - memspace = H5Screate_simple(1, flat_dims, NULL); - VRFY((memspace >= 0), "Memory dataspace creation succeeded"); - - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - /* * Each process defines the dataset selection in the file and * reads it to the selection in memory @@ -4417,78 +5157,70 @@ test_read_filtered_dataset_single_no_selection(const char *parent_group, H5Z_fil start[0] = (hsize_t)mpi_rank * (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * count[0]; start[1] = 0; - if (VERBOSE_MED) { - printf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); - fflush(stdout); - } - if (mpi_rank == READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC) - VRFY((H5Sselect_none(filespace) >= 0), "Select none succeeded"); + select_none(num_dsets, dset_ids, fspace_ids); else - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); - - read_buf_size = flat_dims[0] * sizeof(*read_buf); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - read_buf = (C_DATATYPE *)calloc(1, read_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + read_buf_size = flat_dims[0] * sizeof(C_DATATYPE); - if (mpi_rank == READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC) { - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, NULL) >= 0), - "Dataset read succeeded"); - } - else { - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + if (mpi_rank != READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC) { + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + read_bufs[dset_idx] = calloc(1, read_buf_size); + VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); + } } - global_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != global_buf), "calloc succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, + test_mode); /* Collect each piece of data from all ranks into a global buffer on all ranks */ - recvcounts = (int *)calloc(1, (size_t)mpi_size * sizeof(*recvcounts)); + global_buf = calloc(1, data_size); + VRFY((NULL != global_buf), "calloc succeeded"); + recvcounts = calloc(1, (size_t)mpi_size * sizeof(*recvcounts)); VRFY((NULL != recvcounts), "calloc succeeded"); + displs = calloc(1, (size_t)mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "calloc succeeded"); - for (i = 0; i < (size_t)mpi_size; i++) + for (size_t i = 0; i < (size_t)mpi_size; i++) { recvcounts[i] = (int)(READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS); + displs[i] = (int)(i * (size_t)(READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * + READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS)); + } + recvcounts[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC] = 0; - displs = (int *)calloc(1, (size_t)mpi_size * sizeof(*displs)); - VRFY((NULL != displs), "calloc succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + if (mpi_rank == READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC) { + mpi_code = MPI_Allgatherv(read_bufs[dset_idx], 0, C_DATATYPE_MPI, global_buf, recvcounts, displs, + C_DATATYPE_MPI, comm); + VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgatherv succeeded"); + } + else { + mpi_code = MPI_Allgatherv(read_bufs[dset_idx], (int)flat_dims[0], C_DATATYPE_MPI, global_buf, + recvcounts, displs, C_DATATYPE_MPI, comm); + VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgatherv succeeded"); + } - for (i = 0; i < (size_t)mpi_size; i++) - displs[i] = (int)(i * (size_t)(READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * - READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS)); + VRFY((0 == memcmp(global_buf, data_bufs[dset_idx], data_size)), "Data verification succeeded"); + } + + free(displs); + free(recvcounts); + free(global_buf); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + free(read_bufs[dset_idx]); + + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) + free(data_bufs_nc[dset_idx]); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } - if (mpi_rank == READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC) - VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, 0, C_DATATYPE_MPI, global_buf, recvcounts, displs, - C_DATATYPE_MPI, comm)), - "MPI_Allgatherv succeeded"); - else - VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int)flat_dims[0], C_DATATYPE_MPI, global_buf, - recvcounts, displs, C_DATATYPE_MPI, comm)), - "MPI_Allgatherv succeeded"); - - VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); - - if (displs) - free(displs); - if (recvcounts) - free(recvcounts); - if (global_buf) - free(global_buf); - if (read_buf) - free(read_buf); - if (correct_buf) - free(correct_buf); - - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -4509,29 +5241,81 @@ test_read_filtered_dataset_single_no_selection(const char *parent_group, H5Z_fil */ static void test_read_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id) + hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode) { - C_DATATYPE *read_buf = NULL; - C_DATATYPE *correct_buf = NULL; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; hsize_t dataset_dims[READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t chunk_dims[READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t sel_dims[READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; - size_t read_buf_size, correct_buf_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + size_t data_size, read_buf_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; hid_t group_id = H5I_INVALID_HID; - hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; + + if (MAINPROCESS) + puts("Testing read from filtered chunks with all processes having no selection"); + + dataset_dims[0] = (hsize_t)READ_ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS; + dataset_dims[1] = (hsize_t)READ_ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS; + + /* Setup the buffer for writing and for comparison */ + data_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE); + + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) { + C_DATATYPE *tmp_buf = calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); + + /* Fill buffer with garbage data before write call */ + memset(tmp_buf, 255, data_size); + + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; + } + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = (hsize_t)READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS; + chunk_dims[1] = (hsize_t)READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS; + + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); - if (MAINPROCESS) - puts("Testing read from filtered chunks with all processes having no selection"); + VRFY((H5Pset_chunk(plist_id, READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); - dataset_dims[0] = (hsize_t)READ_ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS; - dataset_dims[1] = (hsize_t)READ_ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS; + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); - /* Setup the buffer for writing and for comparison */ - correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + /* Create datasets depending on the current test mode */ + create_datasets(group_id, READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, + filespace, plist_id, test_mode, &num_dsets, dset_ids); + + /* Verify space allocation status */ + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); - correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + MPI_Barrier(comm); if (MAINPROCESS) { plist_id = H5Pcreate(H5P_FILE_ACCESS); @@ -4548,82 +5332,75 @@ test_read_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filter group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); VRFY((group_id >= 0), "H5Gopen2 succeeded"); - /* Create the dataspace for the dataset */ - filespace = H5Screate_simple(READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); - VRFY((filespace >= 0), "File dataspace creation succeeded"); + open_datasets(group_id, READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode, + dset_ids); - /* Create chunked dataset */ - chunk_dims[0] = (hsize_t)READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS; - chunk_dims[1] = (hsize_t)READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS; + select_all(num_dsets, dset_ids, fspace_ids); - plist_id = H5Pcopy(dcpl_id); - VRFY((plist_id >= 0), "DCPL copy succeeded"); - - VRFY((H5Pset_chunk(plist_id, READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), - "Chunk size set"); - - /* Add test filter to the pipeline */ - VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); - - dset_id = H5Dcreate2(group_id, READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, - filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, + test_mode); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); - - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), - "Dataset write succeeded"); + plist_id = H5Dget_create_plist(dset_ids[0]); + VRFY((plist_id >= 0), "H5Dget_create_plist succeeded"); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); - /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); } + MPI_Barrier(comm); + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id); VRFY((file_id >= 0), "Test file open succeeded"); group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); VRFY((group_id >= 0), "H5Gopen2 succeeded"); - dset_id = H5Dopen2(group_id, READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + open_datasets(group_id, READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode, + dset_ids); sel_dims[0] = sel_dims[1] = 0; - memspace = H5Screate_simple(READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL); - VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + select_none(num_dsets, dset_ids, fspace_ids); - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE); - VRFY((H5Sselect_none(filespace) >= 0), "Select none succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + read_bufs[dset_idx] = calloc(1, read_buf_size); + VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); + } - read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*read_buf); + /* Clear data buffer that will be used for comparison since + * no data should end up being read + */ + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + memset(data_bufs_nc[dset_idx], 0, data_size); - read_buf = (C_DATATYPE *)calloc(1, read_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, + test_mode); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)), + "Data verification succeeded"); - VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + free(read_bufs[dset_idx]); - if (read_buf) - free(read_buf); - if (correct_buf) - free(correct_buf); + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) + free(data_bufs_nc[dset_idx]); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -4643,23 +5420,28 @@ test_read_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filter */ static void test_read_filtered_dataset_point_selection(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id) + hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode) { - C_DATATYPE *correct_buf = NULL; - C_DATATYPE *read_buf = NULL; - C_DATATYPE *global_buf = NULL; - hsize_t *coords = NULL; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *global_buf = NULL; + hsize_t *coords = NULL; hsize_t dataset_dims[READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t chunk_dims[READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t sel_dims[READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t flat_dims[1]; - size_t i, j, read_buf_size, correct_buf_size; + size_t data_size, read_buf_size; size_t num_points; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; - hid_t group_id = H5I_INVALID_HID; - hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; int *recvcounts = NULL; int *displs = NULL; + int mpi_code; if (MAINPROCESS) puts("Testing read from filtered chunks with point selection"); @@ -4668,16 +5450,62 @@ test_read_filtered_dataset_point_selection(const char *parent_group, H5Z_filter_ dataset_dims[1] = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS; /* Setup the buffer for writing and for comparison */ - correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + data_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE); + + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) { + C_DATATYPE *tmp_buf = calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); + + for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++) + tmp_buf[j] = (C_DATATYPE)((dataset_dims[1] * (j / ((hsize_t)mpi_size * dataset_dims[1]))) + + (j % dataset_dims[1]) + + (((j % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % + dataset_dims[1]) + + dset_idx); + + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; + } + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS; + chunk_dims[1] = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS; - correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + /* Create datasets depending on the current test mode */ + create_datasets(group_id, READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, + filespace, plist_id, test_mode, &num_dsets, dset_ids); + + /* Verify space allocation status */ + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); - for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) - correct_buf[i] = - (C_DATATYPE)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + - (i % dataset_dims[1]) + - (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])); + MPI_Barrier(comm); if (MAINPROCESS) { plist_id = H5Pcreate(H5P_FILE_ACCESS); @@ -4694,52 +5522,39 @@ test_read_filtered_dataset_point_selection(const char *parent_group, H5Z_filter_ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); VRFY((group_id >= 0), "H5Gopen2 succeeded"); - /* Create the dataspace for the dataset */ - filespace = H5Screate_simple(READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); - VRFY((filespace >= 0), "File dataspace creation succeeded"); + open_datasets(group_id, READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode, + dset_ids); - /* Create chunked dataset */ - chunk_dims[0] = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS; - chunk_dims[1] = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS; + select_all(num_dsets, dset_ids, fspace_ids); - plist_id = H5Pcopy(dcpl_id); - VRFY((plist_id >= 0), "DCPL copy succeeded"); - - VRFY((H5Pset_chunk(plist_id, READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), - "Chunk size set"); - - /* Add test filter to the pipeline */ - VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); - - dset_id = H5Dcreate2(group_id, READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, - filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, + test_mode); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); - - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), - "Dataset write succeeded"); + plist_id = H5Dget_create_plist(dset_ids[0]); + VRFY((plist_id >= 0), "H5Dget_create_plist succeeded"); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); - /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); } + MPI_Barrier(comm); + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id); VRFY((file_id >= 0), "Test file open succeeded"); group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); VRFY((group_id >= 0), "H5Gopen2 succeeded"); - dset_id = H5Dopen2(group_id, READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + open_datasets(group_id, READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode, + dset_ids); sel_dims[0] = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_NROWS / (hsize_t)mpi_size; sel_dims[1] = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS; @@ -4747,92 +5562,87 @@ test_read_filtered_dataset_point_selection(const char *parent_group, H5Z_filter_ /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ flat_dims[0] = sel_dims[0] * sel_dims[1]; - memspace = H5Screate_simple(1, flat_dims, NULL); - VRFY((memspace >= 0), "Memory dataspace creation succeeded"); - /* Set up point selection */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - num_points = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_NROWS * (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS / (hsize_t)mpi_size; coords = (hsize_t *)calloc(1, 2 * num_points * sizeof(*coords)); VRFY((NULL != coords), "Coords calloc succeeded"); - for (i = 0; i < num_points; i++) - for (j = 0; j < READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS; j++) + for (size_t i = 0; i < num_points; i++) + for (size_t j = 0; j < READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS; j++) coords[(i * READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS) + j] = (j > 0) ? (i % (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS) : ((hsize_t)mpi_rank + ((hsize_t)mpi_size * (i / (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS))); - VRFY((H5Sselect_elements(filespace, H5S_SELECT_SET, (hsize_t)num_points, (const hsize_t *)coords) >= 0), - "Point selection succeeded"); + select_elements(num_dsets, dset_ids, num_points, coords, fspace_ids); - read_buf_size = flat_dims[0] * sizeof(*read_buf); + read_buf_size = flat_dims[0] * sizeof(C_DATATYPE); - read_buf = (C_DATATYPE *)calloc(1, read_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + read_bufs[dset_idx] = calloc(1, read_buf_size); + VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); + } - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, + test_mode); - global_buf = (C_DATATYPE *)calloc(1, correct_buf_size); + /* Collect each piece of data from all ranks into a global buffer on all ranks */ + global_buf = calloc(1, data_size); VRFY((NULL != global_buf), "calloc succeeded"); + recvcounts = calloc(1, (size_t)mpi_size * sizeof(*recvcounts)); + VRFY((NULL != recvcounts), "calloc succeeded"); + displs = calloc(1, (size_t)mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "calloc succeeded"); /* * Since these chunks are shared, run multiple rounds of MPI_Allgatherv * to collect all of the pieces into their appropriate locations. The - * number of times MPI_Allgatherv is run should be equal to the number - * of chunks in the first dimension of the dataset. + * number of times MPI_Allgatherv is run for each dataset should be equal + * to the number of chunks in the first dimension of the dataset. */ - { + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { size_t original_loop_count = dataset_dims[0] / (hsize_t)mpi_size; - size_t cur_loop_count = original_loop_count; size_t total_recvcounts = 0; - recvcounts = (int *)calloc(1, (size_t)mpi_size * sizeof(*recvcounts)); - VRFY((NULL != recvcounts), "calloc succeeded"); + for (size_t j = 0; j < (size_t)mpi_size; j++) { + recvcounts[j] = (int)dataset_dims[1]; + total_recvcounts += (size_t)recvcounts[j]; - displs = (int *)calloc(1, (size_t)mpi_size * sizeof(*displs)); - VRFY((NULL != displs), "calloc succeeded"); - - for (i = 0; i < (size_t)mpi_size; i++) { - recvcounts[i] = (int)dataset_dims[1]; - total_recvcounts += (size_t)recvcounts[i]; + displs[j] = (int)(j * dataset_dims[1]); } - for (i = 0; i < (size_t)mpi_size; i++) - displs[i] = (int)(i * dataset_dims[1]); + for (size_t cur_loop_count = original_loop_count; cur_loop_count; cur_loop_count--) { + C_DATATYPE *tmp_buf = read_bufs[dset_idx]; + C_DATATYPE *tmp_glob_buf = (C_DATATYPE *)global_buf; - for (; cur_loop_count; cur_loop_count--) { - VRFY((MPI_SUCCESS == - MPI_Allgatherv(&read_buf[(original_loop_count - cur_loop_count) * dataset_dims[1]], - recvcounts[mpi_rank], C_DATATYPE_MPI, - &global_buf[(original_loop_count - cur_loop_count) * total_recvcounts], - recvcounts, displs, C_DATATYPE_MPI, comm)), - "MPI_Allgatherv succeeded"); + mpi_code = MPI_Allgatherv( + &tmp_buf[(original_loop_count - cur_loop_count) * dataset_dims[1]], recvcounts[mpi_rank], + C_DATATYPE_MPI, &tmp_glob_buf[(original_loop_count - cur_loop_count) * total_recvcounts], + recvcounts, displs, C_DATATYPE_MPI, comm); + VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgatherv succeeded"); } + + VRFY((0 == memcmp(global_buf, data_bufs[dset_idx], data_size)), "Data verification succeeded"); } - VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + free(displs); + free(recvcounts); + free(global_buf); - if (displs) - free(displs); - if (recvcounts) - free(recvcounts); - if (global_buf) - free(global_buf); - if (read_buf) - free(read_buf); - if (correct_buf) - free(correct_buf); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + free(read_bufs[dset_idx]); + + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) + free(data_bufs_nc[dset_idx]); free(coords); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -4855,11 +5665,12 @@ test_read_filtered_dataset_point_selection(const char *parent_group, H5Z_filter_ */ static void test_read_filtered_dataset_interleaved_read(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id) + hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode) { - C_DATATYPE *read_buf = NULL; - C_DATATYPE *correct_buf = NULL; - C_DATATYPE *global_buf = NULL; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *global_buf = NULL; hsize_t dataset_dims[INTERLEAVED_READ_FILTERED_DATASET_DIMS]; hsize_t chunk_dims[INTERLEAVED_READ_FILTERED_DATASET_DIMS]; hsize_t sel_dims[INTERLEAVED_READ_FILTERED_DATASET_DIMS]; @@ -4868,12 +5679,16 @@ test_read_filtered_dataset_interleaved_read(const char *parent_group, H5Z_filter hsize_t count[INTERLEAVED_READ_FILTERED_DATASET_DIMS]; hsize_t block[INTERLEAVED_READ_FILTERED_DATASET_DIMS]; hsize_t flat_dims[1]; - size_t i, read_buf_size, correct_buf_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; - hid_t group_id = H5I_INVALID_HID; - hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + size_t data_size, read_buf_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; int *recvcounts = NULL; int *displs = NULL; + int mpi_code; if (MAINPROCESS) puts("Testing interleaved read from filtered chunks"); @@ -4882,24 +5697,71 @@ test_read_filtered_dataset_interleaved_read(const char *parent_group, H5Z_filter dataset_dims[1] = (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_NCOLS; /* Setup the buffer for writing and for comparison */ - correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + data_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE); + + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) { + C_DATATYPE *tmp_buf = calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); + + for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++) + tmp_buf[j] = + /* Add the Column Index */ + (C_DATATYPE)((j % (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_NCOLS) + + /* Add the Row Index */ + + ((j % (hsize_t)(mpi_size * INTERLEAVED_READ_FILTERED_DATASET_NCOLS)) / + (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_NCOLS) - correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + /* Add the amount that gets added when a rank moves down to its next section + vertically in the dataset */ + + ((hsize_t)INTERLEAVED_READ_FILTERED_DATASET_NCOLS * + (j / (hsize_t)(mpi_size * INTERLEAVED_READ_FILTERED_DATASET_NCOLS))) - for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) - /* Add Column Index */ - correct_buf[i] = - (C_DATATYPE)((i % (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_NCOLS) + /* Add an increment factor for the multi-dataset case */ + + dset_idx); - /* Add the Row Index */ - + ((i % (hsize_t)(mpi_size * INTERLEAVED_READ_FILTERED_DATASET_NCOLS)) / - (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_NCOLS) + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; + } + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(INTERLEAVED_READ_FILTERED_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_CH_NROWS; + chunk_dims[1] = (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS; + + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, INTERLEAVED_READ_FILTERED_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set"); - /* Add the amount that gets added when a rank moves down to its next section - vertically in the dataset */ - + ((hsize_t)INTERLEAVED_READ_FILTERED_DATASET_NCOLS * - (i / (hsize_t)(mpi_size * INTERLEAVED_READ_FILTERED_DATASET_NCOLS)))); + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + /* Create datasets depending on the current test mode */ + create_datasets(group_id, INTERLEAVED_READ_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, plist_id, + test_mode, &num_dsets, dset_ids); + + /* Verify space allocation status */ + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + MPI_Barrier(comm); if (MAINPROCESS) { plist_id = H5Pcreate(H5P_FILE_ACCESS); @@ -4916,52 +5778,37 @@ test_read_filtered_dataset_interleaved_read(const char *parent_group, H5Z_filter group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); VRFY((group_id >= 0), "H5Gopen2 succeeded"); - /* Create the dataspace for the dataset */ - filespace = H5Screate_simple(INTERLEAVED_READ_FILTERED_DATASET_DIMS, dataset_dims, NULL); - VRFY((filespace >= 0), "File dataspace creation succeeded"); + open_datasets(group_id, INTERLEAVED_READ_FILTERED_DATASET_NAME, num_dsets, test_mode, dset_ids); - /* Create chunked dataset */ - chunk_dims[0] = (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_CH_NROWS; - chunk_dims[1] = (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS; + select_all(num_dsets, dset_ids, fspace_ids); - plist_id = H5Pcopy(dcpl_id); - VRFY((plist_id >= 0), "DCPL copy succeeded"); - - VRFY((H5Pset_chunk(plist_id, INTERLEAVED_READ_FILTERED_DATASET_DIMS, chunk_dims) >= 0), - "Chunk size set"); - - /* Add test filter to the pipeline */ - VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); - - dset_id = H5Dcreate2(group_id, INTERLEAVED_READ_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, - H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, + test_mode); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); - - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), - "Dataset write succeeded"); + plist_id = H5Dget_create_plist(dset_ids[0]); + VRFY((plist_id >= 0), "H5Dget_create_plist succeeded"); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); - /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); } + MPI_Barrier(comm); + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id); VRFY((file_id >= 0), "Test file open succeeded"); group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); VRFY((group_id >= 0), "H5Gopen2 succeeded"); - dset_id = H5Dopen2(group_id, INTERLEAVED_READ_FILTERED_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + open_datasets(group_id, INTERLEAVED_READ_FILTERED_DATASET_NAME, num_dsets, test_mode, dset_ids); sel_dims[0] = (hsize_t)(INTERLEAVED_READ_FILTERED_DATASET_NROWS / mpi_size); sel_dims[1] = (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_NCOLS; @@ -4969,13 +5816,6 @@ test_read_filtered_dataset_interleaved_read(const char *parent_group, H5Z_filter /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ flat_dims[0] = sel_dims[0] * sel_dims[1]; - memspace = H5Screate_simple(1, flat_dims, NULL); - VRFY((memspace >= 0), "Memory dataspace creation succeeded"); - - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - /* * Each process defines the dataset selection in the file and * reads it to the selection in memory @@ -4991,27 +5831,25 @@ test_read_filtered_dataset_interleaved_read(const char *parent_group, H5Z_filter start[0] = (hsize_t)mpi_rank; start[1] = 0; - if (VERBOSE_MED) { - printf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); - fflush(stdout); - } - - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - read_buf_size = flat_dims[0] * sizeof(*read_buf); + read_buf_size = flat_dims[0] * sizeof(C_DATATYPE); - read_buf = (C_DATATYPE *)calloc(1, read_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + read_bufs[dset_idx] = calloc(1, read_buf_size); + VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); + } - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, + test_mode); - global_buf = (C_DATATYPE *)calloc(1, correct_buf_size); + /* Collect each piece of data from all ranks into a global buffer on all ranks */ + global_buf = calloc(1, data_size); VRFY((NULL != global_buf), "calloc succeeded"); + recvcounts = calloc(1, (size_t)mpi_size * sizeof(*recvcounts)); + VRFY((NULL != recvcounts), "calloc succeeded"); + displs = calloc(1, (size_t)mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "calloc succeeded"); /* * Since these chunks are shared, run multiple rounds of MPI_Allgatherv @@ -5019,49 +5857,45 @@ test_read_filtered_dataset_interleaved_read(const char *parent_group, H5Z_filter * number of times MPI_Allgatherv is run should be equal to the number * of chunks in the first dimension of the dataset. */ - { - size_t loop_count = count[0]; + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { size_t total_recvcounts = 0; - recvcounts = (int *)calloc(1, (size_t)mpi_size * sizeof(*recvcounts)); - VRFY((NULL != recvcounts), "calloc succeeded"); - - displs = (int *)calloc(1, (size_t)mpi_size * sizeof(*displs)); - VRFY((NULL != displs), "calloc succeeded"); + for (size_t j = 0; j < (size_t)mpi_size; j++) { + recvcounts[j] = (int)dataset_dims[1]; + total_recvcounts += (size_t)recvcounts[j]; - for (i = 0; i < (size_t)mpi_size; i++) { - recvcounts[i] = (int)dataset_dims[1]; - total_recvcounts += (size_t)recvcounts[i]; + displs[j] = (int)(j * dataset_dims[1]); } - for (i = 0; i < (size_t)mpi_size; i++) - displs[i] = (int)(i * dataset_dims[1]); + for (size_t loop_count = count[0]; loop_count; loop_count--) { + C_DATATYPE *tmp_buf = read_bufs[dset_idx]; + C_DATATYPE *tmp_glob_buf = (C_DATATYPE *)global_buf; - for (; loop_count; loop_count--) { - VRFY((MPI_SUCCESS == MPI_Allgatherv(&read_buf[(count[0] - loop_count) * dataset_dims[1]], - recvcounts[mpi_rank], C_DATATYPE_MPI, - &global_buf[(count[0] - loop_count) * total_recvcounts], - recvcounts, displs, C_DATATYPE_MPI, comm)), - "MPI_Allgatherv succeeded"); + mpi_code = + MPI_Allgatherv(&tmp_buf[(count[0] - loop_count) * dataset_dims[1]], recvcounts[mpi_rank], + C_DATATYPE_MPI, &tmp_glob_buf[(count[0] - loop_count) * total_recvcounts], + recvcounts, displs, C_DATATYPE_MPI, comm); + VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgatherv succeeded"); } + + VRFY((0 == memcmp(global_buf, data_bufs[dset_idx], data_size)), "Data verification succeeded"); } - VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + free(displs); + free(recvcounts); + free(global_buf); - if (displs) - free(displs); - if (recvcounts) - free(recvcounts); - if (global_buf) - free(global_buf); - if (read_buf) - free(read_buf); - if (correct_buf) - free(correct_buf); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + free(read_bufs[dset_idx]); + + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) + free(data_bufs_nc[dset_idx]); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -5081,13 +5915,15 @@ test_read_filtered_dataset_interleaved_read(const char *parent_group, H5Z_filter */ static void test_read_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_group, H5Z_filter_t filter_id, - hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode) { MPI_Datatype vector_type; MPI_Datatype resized_vector_type; - C_DATATYPE *read_buf = NULL; - C_DATATYPE *correct_buf = NULL; - C_DATATYPE *global_buf = NULL; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *global_buf = NULL; hsize_t dataset_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; hsize_t chunk_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; hsize_t sel_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; @@ -5096,10 +5932,14 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_group hsize_t count[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; hsize_t block[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; hsize_t flat_dims[1]; - size_t i, read_buf_size, correct_buf_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + size_t data_size, read_buf_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; hid_t group_id = H5I_INVALID_HID; - hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; + int mpi_code; if (MAINPROCESS) puts("Testing read from unshared filtered chunks on separate pages in 3D dataset"); @@ -5109,13 +5949,59 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_group dataset_dims[2] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DEPTH; /* Setup the buffer for writing and for comparison */ - correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf); + data_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(C_DATATYPE); + + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) { + C_DATATYPE *tmp_buf = calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); + + for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++) + tmp_buf[j] = (C_DATATYPE)((j % (hsize_t)mpi_size) + (j / (hsize_t)mpi_size) + dset_idx); + + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; + } + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS; + chunk_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS; + chunk_dims[2] = 1; + + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + /* Create datasets depending on the current test mode */ + create_datasets(group_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, HDF5_DATATYPE_NAME, + filespace, plist_id, test_mode, &num_dsets, dset_ids); + + /* Verify space allocation status */ + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); - correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); - for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) - correct_buf[i] = (C_DATATYPE)((i % (hsize_t)mpi_size) + (i / (hsize_t)mpi_size)); + MPI_Barrier(comm); if (MAINPROCESS) { plist_id = H5Pcreate(H5P_FILE_ACCESS); @@ -5132,55 +6018,39 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_group group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); VRFY((group_id >= 0), "H5Gopen2 succeeded"); - /* Create the dataspace for the dataset */ - filespace = - H5Screate_simple(READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, dataset_dims, NULL); - VRFY((filespace >= 0), "File dataspace creation succeeded"); - - /* Create chunked dataset */ - chunk_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS; - chunk_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS; - chunk_dims[2] = 1; - - plist_id = H5Pcopy(dcpl_id); - VRFY((plist_id >= 0), "DCPL copy succeeded"); + open_datasets(group_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, num_dsets, test_mode, + dset_ids); - VRFY( - (H5Pset_chunk(plist_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, chunk_dims) >= 0), - "Chunk size set"); + select_all(num_dsets, dset_ids, fspace_ids); - /* Add test filter to the pipeline */ - VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); - - dset_id = H5Dcreate2(group_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, - HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, + test_mode); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); - - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), - "Dataset write succeeded"); + plist_id = H5Dget_create_plist(dset_ids[0]); + VRFY((plist_id >= 0), "H5Dget_create_plist succeeded"); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); - /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); } + MPI_Barrier(comm); + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id); VRFY((file_id >= 0), "Test file open succeeded"); group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); VRFY((group_id >= 0), "H5Gopen2 succeeded"); - dset_id = H5Dopen2(group_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + open_datasets(group_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, num_dsets, test_mode, + dset_ids); sel_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS; sel_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS; @@ -5189,13 +6059,6 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_group /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ flat_dims[0] = sel_dims[0] * sel_dims[1] * sel_dims[2]; - memspace = H5Screate_simple(1, flat_dims, NULL); - VRFY((memspace >= 0), "Memory dataspace creation succeeded"); - - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - /* * Each process defines the dataset selection in the file and * reads it to the selection in memory @@ -5215,63 +6078,66 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_group start[1] = 0; start[2] = (hsize_t)mpi_rank; - if (VERBOSE_MED) { - printf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); - fflush(stdout); - } - - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - read_buf_size = flat_dims[0] * sizeof(*read_buf); + read_buf_size = flat_dims[0] * sizeof(C_DATATYPE); - read_buf = (C_DATATYPE *)calloc(1, read_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + read_bufs[dset_idx] = calloc(1, read_buf_size); + VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); + } - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, + test_mode); - global_buf = (C_DATATYPE *)calloc(1, correct_buf_size); + /* Collect each piece of data from all ranks into a global buffer on all ranks */ + global_buf = calloc(1, data_size); VRFY((NULL != global_buf), "calloc succeeded"); /* * Due to the nature of 3-dimensional reading, create an MPI vector type that allows each * rank to write to the nth position of the global data buffer, where n is the rank number. */ - VRFY((MPI_SUCCESS == MPI_Type_vector((int)flat_dims[0], 1, mpi_size, C_DATATYPE_MPI, &vector_type)), - "MPI_Type_vector succeeded"); - VRFY((MPI_SUCCESS == MPI_Type_commit(&vector_type)), "MPI_Type_commit succeeded"); + mpi_code = MPI_Type_vector((int)flat_dims[0], 1, mpi_size, C_DATATYPE_MPI, &vector_type); + VRFY((MPI_SUCCESS == mpi_code), "MPI_Type_vector succeeded"); + mpi_code = MPI_Type_commit(&vector_type); + VRFY((MPI_SUCCESS == mpi_code), "MPI_Type_commit succeeded"); /* * Resize the type to allow interleaving, * so make it only one MPI_LONG wide */ - VRFY((MPI_SUCCESS == MPI_Type_create_resized(vector_type, 0, sizeof(long), &resized_vector_type)), - "MPI_Type_create_resized"); - VRFY((MPI_SUCCESS == MPI_Type_commit(&resized_vector_type)), "MPI_Type_commit succeeded"); + mpi_code = MPI_Type_create_resized(vector_type, 0, sizeof(long), &resized_vector_type); + VRFY((MPI_SUCCESS == mpi_code), "MPI_Type_create_resized"); + mpi_code = MPI_Type_commit(&resized_vector_type); + VRFY((MPI_SUCCESS == mpi_code), "MPI_Type_commit succeeded"); - VRFY((MPI_SUCCESS == MPI_Allgather(read_buf, (int)flat_dims[0], C_DATATYPE_MPI, global_buf, 1, - resized_vector_type, comm)), - "MPI_Allgather succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + mpi_code = MPI_Allgather(read_bufs[dset_idx], (int)flat_dims[0], C_DATATYPE_MPI, global_buf, 1, + resized_vector_type, comm); + VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgather succeeded"); - VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + VRFY((0 == memcmp(global_buf, data_bufs[dset_idx], data_size)), "Data verification succeeded"); + } - VRFY((MPI_SUCCESS == MPI_Type_free(&vector_type)), "MPI_Type_free succeeded"); - VRFY((MPI_SUCCESS == MPI_Type_free(&resized_vector_type)), "MPI_Type_free succeeded"); + mpi_code = MPI_Type_free(&vector_type); + VRFY((MPI_SUCCESS == mpi_code), "MPI_Type_free succeeded"); + mpi_code = MPI_Type_free(&resized_vector_type); + VRFY((MPI_SUCCESS == mpi_code), "MPI_Type_free succeeded"); - if (global_buf) - free(global_buf); - if (read_buf) - free(read_buf); - if (correct_buf) - free(correct_buf); + free(global_buf); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + free(read_bufs[dset_idx]); + + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) + free(data_bufs_nc[dset_idx]); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -5298,11 +6164,13 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_group */ static void test_read_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t filter_id, - hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode) { - C_DATATYPE *read_buf = NULL; - C_DATATYPE *correct_buf = NULL; - C_DATATYPE *global_buf = NULL; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *global_buf = NULL; hsize_t dataset_dims[READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t chunk_dims[READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t sel_dims[READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS]; @@ -5311,12 +6179,16 @@ test_read_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z_ hsize_t count[READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t block[READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t flat_dims[1]; - size_t i, read_buf_size, correct_buf_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; - hid_t group_id = H5I_INVALID_HID; - hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + size_t data_size, read_buf_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; int *recvcounts = NULL; int *displs = NULL; + int mpi_code; if (MAINPROCESS) puts("Testing read from unshared transformed and filtered chunks"); @@ -5325,15 +6197,61 @@ test_read_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z_ dataset_dims[1] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NCOLS; /* Setup the buffer for writing and for comparison */ - correct_buf_size = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NROWS * - (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NCOLS * sizeof(*correct_buf); + data_size = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NROWS * + (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NCOLS * sizeof(C_DATATYPE); + + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) { + C_DATATYPE *tmp_buf = calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); + + for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++) + tmp_buf[j] = + (C_DATATYPE)((j % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) + + (j / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) + dset_idx); + + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; + } + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NROWS; + chunk_dims[1] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NCOLS; + + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + /* Create datasets depending on the current test mode */ + create_datasets(group_id, READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, + filespace, plist_id, test_mode, &num_dsets, dset_ids); + + /* Verify space allocation status */ + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); - correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); - for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) - correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) + - (i / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1]))); + MPI_Barrier(comm); if (MAINPROCESS) { plist_id = H5Pcreate(H5P_FILE_ACCESS); @@ -5350,34 +6268,8 @@ test_read_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z_ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); VRFY((group_id >= 0), "H5Gopen2 succeeded"); - /* Create the dataspace for the dataset */ - filespace = - H5Screate_simple(READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); - VRFY((filespace >= 0), "File dataspace creation succeeded"); - - /* Create chunked dataset */ - chunk_dims[0] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NROWS; - chunk_dims[1] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NCOLS; - - plist_id = H5Pcopy(dcpl_id); - VRFY((plist_id >= 0), "DCPL copy succeeded"); - - VRFY( - (H5Pset_chunk(plist_id, READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), - "Chunk size set"); - - /* Add test filter to the pipeline */ - VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); - - dset_id = H5Dcreate2(group_id, READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME, - HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); - - /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); - - VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + open_datasets(group_id, READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode, + dset_ids); /* Create property list for collective dataset read */ plist_id = H5Pcreate(H5P_DATASET_XFER); @@ -5386,30 +6278,38 @@ test_read_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z_ /* Set data transform expression */ VRFY((H5Pset_data_transform(plist_id, "x") >= 0), "Set data transform expression succeeded"); - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, correct_buf) >= 0), - "Dataset write succeeded"); + select_all(num_dsets, dset_ids, fspace_ids); + + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, + test_mode); - VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded"); /* Verify space allocation status */ - plist_id = H5Dget_create_plist(dset_id); + plist_id = H5Dget_create_plist(dset_ids[0]); VRFY((plist_id >= 0), "H5Dget_create_plist succeeded"); - verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); } + MPI_Barrier(comm); + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id); VRFY((file_id >= 0), "Test file open succeeded"); group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); VRFY((group_id >= 0), "H5Gopen2 succeeded"); - dset_id = H5Dopen2(group_id, READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + open_datasets(group_id, READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode, + dset_ids); sel_dims[0] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NROWS; sel_dims[1] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NCOLS; @@ -5417,13 +6317,6 @@ test_read_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z_ /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ flat_dims[0] = sel_dims[0] * sel_dims[1]; - memspace = H5Screate_simple(1, flat_dims, NULL); - VRFY((memspace >= 0), "Memory dataspace creation succeeded"); - - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - /* * Each process defines the dataset selection in the file and reads * it to the selection in memory @@ -5438,16 +6331,7 @@ test_read_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z_ start[0] = ((hsize_t)mpi_rank * (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NROWS * count[0]); start[1] = 0; - if (VERBOSE_MED) { - printf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); - fflush(stdout); - } - - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); /* Create property list for data transform */ plist_id = H5Pcopy(dxpl_id); @@ -5456,50 +6340,52 @@ test_read_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z_ /* Set data transform expression */ VRFY((H5Pset_data_transform(plist_id, "x") >= 0), "Set data transform expression succeeded"); - read_buf_size = flat_dims[0] * sizeof(*read_buf); + read_buf_size = flat_dims[0] * sizeof(C_DATATYPE); - read_buf = (C_DATATYPE *)calloc(1, read_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); - - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0), - "Dataset read succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + read_bufs[dset_idx] = calloc(1, read_buf_size); + VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); + } - global_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != global_buf), "calloc succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, + test_mode); /* Collect each piece of data from all ranks into a global buffer on all ranks */ - recvcounts = (int *)calloc(1, (size_t)mpi_size * sizeof(*recvcounts)); + global_buf = calloc(1, data_size); + VRFY((NULL != global_buf), "calloc succeeded"); + recvcounts = calloc(1, (size_t)mpi_size * sizeof(*recvcounts)); VRFY((NULL != recvcounts), "calloc succeeded"); + displs = calloc(1, (size_t)mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "calloc succeeded"); - for (i = 0; i < (size_t)mpi_size; i++) + for (size_t i = 0; i < (size_t)mpi_size; i++) { recvcounts[i] = (int)flat_dims[0]; + displs[i] = (int)(i * flat_dims[0]); + } - displs = (int *)calloc(1, (size_t)mpi_size * sizeof(*displs)); - VRFY((NULL != displs), "calloc succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + mpi_code = MPI_Allgatherv(read_bufs[dset_idx], (int)flat_dims[0], C_DATATYPE_MPI, global_buf, + recvcounts, displs, C_DATATYPE_MPI, comm); + VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgatherv succeeded"); + + VRFY((0 == memcmp(global_buf, data_bufs[dset_idx], data_size)), "Data verification succeeded"); + } - for (i = 0; i < (size_t)mpi_size; i++) - displs[i] = (int)(i * flat_dims[0]); + free(displs); + free(recvcounts); + free(global_buf); - VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int)flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts, - displs, C_DATATYPE_MPI, comm)), - "MPI_Allgatherv succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + free(read_bufs[dset_idx]); - VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) + free(data_bufs_nc[dset_idx]); - if (displs) - free(displs); - if (recvcounts) - free(recvcounts); - if (global_buf) - free(global_buf); - if (read_buf) - free(read_buf); - if (correct_buf) - free(correct_buf); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -5521,11 +6407,13 @@ test_read_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z_ */ static void test_read_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H5Z_filter_t filter_id, - hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode) { - C_DATATYPE *read_buf = NULL; - C_DATATYPE *correct_buf = NULL; - C_DATATYPE *global_buf = NULL; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *global_buf = NULL; hsize_t dataset_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; hsize_t chunk_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; hsize_t sel_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; @@ -5534,12 +6422,16 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H5 hsize_t count[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; hsize_t block[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; hsize_t flat_dims[1]; - size_t i, read_buf_size, correct_buf_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; - hid_t group_id = H5I_INVALID_HID; - hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + size_t data_size, read_buf_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; int *recvcounts = NULL; int *displs = NULL; + int mpi_code; if (MAINPROCESS) puts("Testing read from unshared filtered chunks on the same pages in 3D dataset"); @@ -5549,14 +6441,60 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H5 dataset_dims[2] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH; /* Setup the buffer for writing and for comparison */ - correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf); + data_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(C_DATATYPE); + + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) { + C_DATATYPE *tmp_buf = calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); + + for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++) + tmp_buf[j] = (C_DATATYPE)((j % (dataset_dims[0] * dataset_dims[1])) + + (j / (dataset_dims[0] * dataset_dims[1])) + dset_idx); - correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; + } + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS; + chunk_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS; + chunk_dims[2] = 1; + + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + /* Create datasets depending on the current test mode */ + create_datasets(group_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, HDF5_DATATYPE_NAME, + filespace, plist_id, test_mode, &num_dsets, dset_ids); + + /* Verify space allocation status */ + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); - for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) - correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] * dataset_dims[1])) + - (i / (dataset_dims[0] * dataset_dims[1]))); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + MPI_Barrier(comm); if (MAINPROCESS) { plist_id = H5Pcreate(H5P_FILE_ACCESS); @@ -5573,55 +6511,39 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H5 group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); VRFY((group_id >= 0), "H5Gopen2 succeeded"); - /* Create the dataspace for the dataset */ - filespace = - H5Screate_simple(READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, dataset_dims, NULL); - VRFY((filespace >= 0), "File dataspace creation succeeded"); - - /* Create chunked dataset */ - chunk_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS; - chunk_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS; - chunk_dims[2] = 1; - - plist_id = H5Pcopy(dcpl_id); - VRFY((plist_id >= 0), "DCPL copy succeeded"); - - VRFY((H5Pset_chunk(plist_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, chunk_dims) >= - 0), - "Chunk size set"); + open_datasets(group_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, num_dsets, test_mode, + dset_ids); - /* Add test filter to the pipeline */ - VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + select_all(num_dsets, dset_ids, fspace_ids); - dset_id = H5Dcreate2(group_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, - HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, + test_mode); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); - - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), - "Dataset write succeeded"); + plist_id = H5Dget_create_plist(dset_ids[0]); + VRFY((plist_id >= 0), "H5Dget_create_plist succeeded"); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); - /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); } + MPI_Barrier(comm); + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id); VRFY((file_id >= 0), "Test file open succeeded"); group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); VRFY((group_id >= 0), "H5Gopen2 succeeded"); - dset_id = H5Dopen2(group_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + open_datasets(group_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, num_dsets, test_mode, + dset_ids); sel_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS; sel_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS; @@ -5630,13 +6552,6 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H5 /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ flat_dims[0] = sel_dims[0] * sel_dims[1] * sel_dims[2]; - memspace = H5Screate_simple(1, flat_dims, NULL); - VRFY((memspace >= 0), "Memory dataspace creation succeeded"); - - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - /* * Each process defines the dataset selection in the file and * reads it to the selection in memory @@ -5655,61 +6570,54 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H5 start[1] = 0; start[2] = 0; - if (VERBOSE_MED) { - printf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); - fflush(stdout); - } - - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); - - read_buf_size = flat_dims[0] * sizeof(*read_buf); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - read_buf = (C_DATATYPE *)calloc(1, read_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + read_buf_size = flat_dims[0] * sizeof(C_DATATYPE); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + read_bufs[dset_idx] = calloc(1, read_buf_size); + VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); + } - global_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != global_buf), "calloc succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, + test_mode); /* Collect each piece of data from all ranks into a global buffer on all ranks */ - recvcounts = (int *)calloc(1, (size_t)mpi_size * sizeof(*recvcounts)); + global_buf = calloc(1, data_size); + VRFY((NULL != global_buf), "calloc succeeded"); + recvcounts = calloc(1, (size_t)mpi_size * sizeof(*recvcounts)); VRFY((NULL != recvcounts), "calloc succeeded"); + displs = calloc(1, (size_t)mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "calloc succeeded"); - for (i = 0; i < (size_t)mpi_size; i++) + for (size_t i = 0; i < (size_t)mpi_size; i++) { recvcounts[i] = (int)flat_dims[0]; + displs[i] = (int)(i * flat_dims[0]); + } - displs = (int *)calloc(1, (size_t)mpi_size * sizeof(*displs)); - VRFY((NULL != displs), "calloc succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + mpi_code = MPI_Allgatherv(read_bufs[dset_idx], (int)flat_dims[0], C_DATATYPE_MPI, global_buf, + recvcounts, displs, C_DATATYPE_MPI, comm); + VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgatherv succeeded"); - for (i = 0; i < (size_t)mpi_size; i++) - displs[i] = (int)(i * flat_dims[0]); + VRFY((0 == memcmp(global_buf, data_bufs[dset_idx], data_size)), "Data verification succeeded"); + } - VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int)flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts, - displs, C_DATATYPE_MPI, comm)), - "MPI_Allgatherv succeeded"); + free(displs); + free(recvcounts); + free(global_buf); - VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + free(read_bufs[dset_idx]); - if (displs) - free(displs); - if (recvcounts) - free(recvcounts); - if (global_buf) - free(global_buf); - if (read_buf) - free(read_buf); - if (correct_buf) - free(correct_buf); + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) + free(data_bufs_nc[dset_idx]); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -5730,13 +6638,14 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H5 */ static void test_read_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id) + hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode) { MPI_Datatype vector_type; MPI_Datatype resized_vector_type; - C_DATATYPE *read_buf = NULL; - C_DATATYPE *correct_buf = NULL; - C_DATATYPE *global_buf = NULL; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *global_buf = NULL; hsize_t dataset_dims[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; hsize_t chunk_dims[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; hsize_t sel_dims[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; @@ -5745,10 +6654,14 @@ test_read_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t fil hsize_t count[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; hsize_t block[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; hsize_t flat_dims[1]; - size_t i, read_buf_size, correct_buf_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + size_t data_size, read_buf_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; hid_t group_id = H5I_INVALID_HID; - hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; + int mpi_code; if (MAINPROCESS) puts("Testing read from shared filtered chunks in 3D dataset"); @@ -5758,28 +6671,78 @@ test_read_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t fil dataset_dims[2] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_DEPTH; /* Setup the buffer for writing and for comparison */ - correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf); + data_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(C_DATATYPE); + + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) { + C_DATATYPE *tmp_buf = calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); + + for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++) + tmp_buf[j] = + /* Add the Column Index */ + (C_DATATYPE)((j % (hsize_t)(READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * + READ_SHARED_FILTERED_CHUNKS_3D_NCOLS)) + + /* Add the Row Index */ + + ((j % (hsize_t)(mpi_size * READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * + READ_SHARED_FILTERED_CHUNKS_3D_NCOLS)) / + (hsize_t)(READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * + READ_SHARED_FILTERED_CHUNKS_3D_NCOLS)) + + /* Add the amount that gets added when a rank moves down to its next + section vertically in the dataset */ + + ((hsize_t)(READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * + READ_SHARED_FILTERED_CHUNKS_3D_NCOLS) * + (j / (hsize_t)(mpi_size * READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * + READ_SHARED_FILTERED_CHUNKS_3D_NCOLS))) + + /* Add an increment factor for the multi-dataset case */ + + dset_idx); + + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; + } + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); - correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_CH_NROWS; + chunk_dims[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS; + chunk_dims[2] = 1; + + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + /* Create datasets depending on the current test mode */ + create_datasets(group_id, READ_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + plist_id, test_mode, &num_dsets, dset_ids); - for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) - /* Add the Column Index */ - correct_buf[i] = (C_DATATYPE)((i % (hsize_t)(READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * - READ_SHARED_FILTERED_CHUNKS_3D_NCOLS)) + /* Verify space allocation status */ + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); - /* Add the Row Index */ - + ((i % (hsize_t)(mpi_size * READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * - READ_SHARED_FILTERED_CHUNKS_3D_NCOLS)) / - (hsize_t)(READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * - READ_SHARED_FILTERED_CHUNKS_3D_NCOLS)) + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); - /* Add the amount that gets added when a rank moves down to its next - section vertically in the dataset */ - + ((hsize_t)(READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * - READ_SHARED_FILTERED_CHUNKS_3D_NCOLS) * - (i / (hsize_t)(mpi_size * READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * - READ_SHARED_FILTERED_CHUNKS_3D_NCOLS)))); + MPI_Barrier(comm); if (MAINPROCESS) { plist_id = H5Pcreate(H5P_FILE_ACCESS); @@ -5796,53 +6759,37 @@ test_read_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t fil group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); VRFY((group_id >= 0), "H5Gopen2 succeeded"); - /* Create the dataspace for the dataset */ - filespace = H5Screate_simple(READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, dataset_dims, NULL); - VRFY((filespace >= 0), "File dataspace creation succeeded"); - - /* Create chunked dataset */ - chunk_dims[0] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_CH_NROWS; - chunk_dims[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS; - chunk_dims[2] = 1; - - plist_id = H5Pcopy(dcpl_id); - VRFY((plist_id >= 0), "DCPL copy succeeded"); + open_datasets(group_id, READ_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, num_dsets, test_mode, dset_ids); - VRFY((H5Pset_chunk(plist_id, READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, chunk_dims) >= 0), - "Chunk size set"); + select_all(num_dsets, dset_ids, fspace_ids); - /* Add test filter to the pipeline */ - VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); - - dset_id = H5Dcreate2(group_id, READ_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, HDF5_DATATYPE_NAME, - filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, + test_mode); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); - - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), - "Dataset write succeeded"); + plist_id = H5Dget_create_plist(dset_ids[0]); + VRFY((plist_id >= 0), "H5Dget_create_plist succeeded"); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); - /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); } + MPI_Barrier(comm); + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id); VRFY((file_id >= 0), "Test file open succeeded"); group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); VRFY((group_id >= 0), "H5Gopen2 succeeded"); - dset_id = H5Dopen2(group_id, READ_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + open_datasets(group_id, READ_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, num_dsets, test_mode, dset_ids); sel_dims[0] = (hsize_t)(READ_SHARED_FILTERED_CHUNKS_3D_NROWS / mpi_size); sel_dims[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_NCOLS; @@ -5851,13 +6798,6 @@ test_read_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t fil /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ flat_dims[0] = sel_dims[0] * sel_dims[1] * sel_dims[2]; - memspace = H5Screate_simple(1, flat_dims, NULL); - VRFY((memspace >= 0), "Memory dataspace creation succeeded"); - - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - /* * Each process defines the dataset selection in the file and * reads it to the selection in memory @@ -5875,26 +6815,20 @@ test_read_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t fil start[1] = 0; start[2] = 0; - if (VERBOSE_MED) { - printf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); - fflush(stdout); - } - - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - read_buf_size = flat_dims[0] * sizeof(*read_buf); + read_buf_size = flat_dims[0] * sizeof(C_DATATYPE); - read_buf = (C_DATATYPE *)calloc(1, read_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + read_bufs[dset_idx] = calloc(1, read_buf_size); + VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); + } - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, + test_mode); - global_buf = (C_DATATYPE *)calloc(1, correct_buf_size); + /* Collect each piece of data from all ranks into a global buffer on all ranks */ + global_buf = calloc(1, data_size); VRFY((NULL != global_buf), "calloc succeeded"); { @@ -5906,41 +6840,49 @@ test_read_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t fil * Due to the nature of 3-dimensional reading, create an MPI vector type that allows each * rank to write to the nth position of the global data buffer, where n is the rank number. */ - VRFY( - (MPI_SUCCESS == MPI_Type_vector((int)num_blocks, (int)run_length, - (int)(mpi_size * (int)run_length), C_DATATYPE_MPI, &vector_type)), - "MPI_Type_vector succeeded"); - VRFY((MPI_SUCCESS == MPI_Type_commit(&vector_type)), "MPI_Type_commit succeeded"); + mpi_code = MPI_Type_vector((int)num_blocks, (int)run_length, (int)(mpi_size * (int)run_length), + C_DATATYPE_MPI, &vector_type); + VRFY((MPI_SUCCESS == mpi_code), "MPI_Type_vector succeeded"); + mpi_code = MPI_Type_commit(&vector_type); + VRFY((MPI_SUCCESS == mpi_code), "MPI_Type_commit succeeded"); /* * Resize the type to allow interleaving, * so make it "run_length" MPI_LONGs wide */ - VRFY((MPI_SUCCESS == MPI_Type_create_resized(vector_type, 0, (MPI_Aint)(run_length * sizeof(long)), - &resized_vector_type)), - "MPI_Type_create_resized"); - VRFY((MPI_SUCCESS == MPI_Type_commit(&resized_vector_type)), "MPI_Type_commit succeeded"); + mpi_code = MPI_Type_create_resized(vector_type, 0, (MPI_Aint)(run_length * sizeof(long)), + &resized_vector_type); + VRFY((MPI_SUCCESS == mpi_code), "MPI_Type_create_resized"); + mpi_code = MPI_Type_commit(&resized_vector_type); + VRFY((MPI_SUCCESS == mpi_code), "MPI_Type_commit succeeded"); } - VRFY((MPI_SUCCESS == MPI_Allgather(read_buf, (int)flat_dims[0], C_DATATYPE_MPI, global_buf, 1, - resized_vector_type, comm)), - "MPI_Allgatherv succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + mpi_code = MPI_Allgather(read_bufs[dset_idx], (int)flat_dims[0], C_DATATYPE_MPI, global_buf, 1, + resized_vector_type, comm); + VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgatherv succeeded"); - VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + VRFY((0 == memcmp(global_buf, data_bufs[dset_idx], data_size)), "Data verification succeeded"); + } - VRFY((MPI_SUCCESS == MPI_Type_free(&vector_type)), "MPI_Type_free succeeded"); - VRFY((MPI_SUCCESS == MPI_Type_free(&resized_vector_type)), "MPI_Type_free succeeded"); + mpi_code = MPI_Type_free(&vector_type); + VRFY((MPI_SUCCESS == mpi_code), "MPI_Type_free succeeded"); + mpi_code = MPI_Type_free(&resized_vector_type); + VRFY((MPI_SUCCESS == mpi_code), "MPI_Type_free succeeded"); - if (global_buf) - free(global_buf); - if (read_buf) - free(read_buf); - if (correct_buf) - free(correct_buf); + free(global_buf); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + free(read_bufs[dset_idx]); + + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) + free(data_bufs_nc[dset_idx]); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -5960,26 +6902,32 @@ test_read_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t fil */ static void test_read_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group, H5Z_filter_t filter_id, - hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode) { - COMPOUND_C_DATATYPE *read_buf = NULL; - COMPOUND_C_DATATYPE *correct_buf = NULL; - COMPOUND_C_DATATYPE *global_buf = NULL; - hsize_t dataset_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t chunk_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t sel_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t start[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t stride[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t count[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t flat_dims[1]; - size_t i, read_buf_size, correct_buf_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID, - memtype = H5I_INVALID_HID; - hid_t group_id = H5I_INVALID_HID; - hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; - int *recvcounts = NULL; - int *displs = NULL; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *global_buf = NULL; + hsize_t dataset_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t chunk_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t sel_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t start[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t stride[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t count[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t flat_dims[1]; + size_t data_size, read_buf_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t memtype = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; + int *recvcounts = NULL; + int *displs = NULL; + int mpi_code; if (MAINPROCESS) puts("Testing read from unshared filtered chunks in Compound Datatype dataset without Datatype " @@ -5996,17 +6944,20 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group, dataset_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS; /* Setup the buffer for writing and for comparison */ - correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); - - correct_buf = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + data_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE); - for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) { - correct_buf[i].field1 = (short)((i % dataset_dims[1]) + (i / dataset_dims[1])); + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) { + COMPOUND_C_DATATYPE *tmp_buf = calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); - correct_buf[i].field2 = (int)((i % dataset_dims[1]) + (i / dataset_dims[1])); + for (size_t j = 0; j < data_size / sizeof(COMPOUND_C_DATATYPE); j++) { + tmp_buf[j].field1 = (short)((j % dataset_dims[1]) + (j / dataset_dims[1]) + dset_idx); + tmp_buf[j].field2 = (int)((j % dataset_dims[1]) + (j / dataset_dims[1]) + dset_idx); + tmp_buf[j].field3 = (long)((j % dataset_dims[1]) + (j / dataset_dims[1]) + dset_idx); + } - correct_buf[i].field3 = (long)((i % dataset_dims[1]) + (i / dataset_dims[1])); + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; } /* Create the compound type for memory. */ @@ -6020,6 +6971,48 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group, VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0), "Datatype insertion succeeded"); + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, + dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS; + chunk_dims[1] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS; + + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, + chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + /* Create datasets depending on the current test mode */ + create_datasets(group_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, memtype, + filespace, plist_id, test_mode, &num_dsets, dset_ids); + + /* Verify space allocation status */ + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + MPI_Barrier(comm); + if (MAINPROCESS) { plist_id = H5Pcreate(H5P_FILE_ACCESS); VRFY((plist_id >= 0), "FAPL creation succeeded"); @@ -6035,55 +7028,38 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group, group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); VRFY((group_id >= 0), "H5Gopen2 succeeded"); - /* Create the dataspace for the dataset */ - filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, - dataset_dims, NULL); - VRFY((filespace >= 0), "File dataspace creation succeeded"); - - /* Create chunked dataset */ - chunk_dims[0] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS; - chunk_dims[1] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS; - - plist_id = H5Pcopy(dcpl_id); - VRFY((plist_id >= 0), "DCPL copy succeeded"); + open_datasets(group_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, num_dsets, + test_mode, dset_ids); - VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, - chunk_dims) >= 0), - "Chunk size set"); + select_all(num_dsets, dset_ids, fspace_ids); - /* Add test filter to the pipeline */ - VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); - - dset_id = H5Dcreate2(group_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, - memtype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + write_datasets(num_dsets, dset_ids, memtype, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, test_mode); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); - - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - - VRFY((H5Dwrite(dset_id, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), - "Dataset write succeeded"); + plist_id = H5Dget_create_plist(dset_ids[0]); + VRFY((plist_id >= 0), "H5Dget_create_plist succeeded"); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); - /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); } + MPI_Barrier(comm); + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id); VRFY((file_id >= 0), "Test file open succeeded"); group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); VRFY((group_id >= 0), "H5Gopen2 succeeded"); - dset_id = - H5Dopen2(group_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + open_datasets(group_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, num_dsets, + test_mode, dset_ids); sel_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS; sel_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC; @@ -6091,13 +7067,6 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group, /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ flat_dims[0] = sel_dims[0] * sel_dims[1]; - memspace = H5Screate_simple(1, flat_dims, NULL); - VRFY((memspace >= 0), "Memory dataspace creation succeeded"); - - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - /* * Each process defines the dataset selection in the file and * reads it to the selection in memory @@ -6111,60 +7080,53 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group, start[0] = 0; start[1] = ((hsize_t)mpi_rank * READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS); - if (VERBOSE_MED) { - printf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); - fflush(stdout); - } - - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - read_buf_size = flat_dims[0] * sizeof(*read_buf); + read_buf_size = flat_dims[0] * sizeof(COMPOUND_C_DATATYPE); - read_buf = (COMPOUND_C_DATATYPE *)calloc(1, read_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); - - VRFY((H5Dread(dset_id, memtype, memspace, filespace, dxpl_id, read_buf) >= 0), "Dataset read succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + read_bufs[dset_idx] = calloc(1, read_buf_size); + VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); + } - global_buf = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != global_buf), "calloc succeeded"); + read_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, test_mode); /* Collect each piece of data from all ranks into a global buffer on all ranks */ - recvcounts = (int *)calloc(1, (size_t)mpi_size * sizeof(*recvcounts)); + global_buf = calloc(1, data_size); + VRFY((NULL != global_buf), "calloc succeeded"); + recvcounts = calloc(1, (size_t)mpi_size * sizeof(*recvcounts)); VRFY((NULL != recvcounts), "calloc succeeded"); + displs = calloc(1, (size_t)mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "calloc succeeded"); + + for (size_t i = 0; i < (size_t)mpi_size; i++) { + recvcounts[i] = (int)(flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)); + displs[i] = (int)(i * flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)); + } - for (i = 0; i < (size_t)mpi_size; i++) - recvcounts[i] = (int)(flat_dims[0] * sizeof(*read_buf)); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + mpi_code = MPI_Allgatherv(read_bufs[dset_idx], (int)(flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), + MPI_BYTE, global_buf, recvcounts, displs, MPI_BYTE, comm); + VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgatherv succeeded"); - displs = (int *)calloc(1, (size_t)mpi_size * sizeof(*displs)); - VRFY((NULL != displs), "calloc succeeded"); + VRFY((0 == memcmp(global_buf, data_bufs[dset_idx], data_size)), "Data verification succeeded"); + } - for (i = 0; i < (size_t)mpi_size; i++) - displs[i] = (int)(i * flat_dims[0] * sizeof(*read_buf)); + free(displs); + free(recvcounts); + free(global_buf); - VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int)(flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE, - global_buf, recvcounts, displs, MPI_BYTE, comm)), - "MPI_Allgatherv succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + free(read_bufs[dset_idx]); - VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) + free(data_bufs_nc[dset_idx]); - if (displs) - free(displs); - if (recvcounts) - free(recvcounts); - if (global_buf) - free(global_buf); - if (read_buf) - free(read_buf); - if (correct_buf) - free(correct_buf); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -6185,26 +7147,32 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group, */ static void test_read_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group, H5Z_filter_t filter_id, - hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode) { - COMPOUND_C_DATATYPE *read_buf = NULL; - COMPOUND_C_DATATYPE *correct_buf = NULL; - COMPOUND_C_DATATYPE *global_buf = NULL; - hsize_t dataset_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t chunk_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t sel_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t start[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t stride[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t count[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t flat_dims[1]; - size_t i, read_buf_size, correct_buf_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID, - memtype = H5I_INVALID_HID; - hid_t group_id = H5I_INVALID_HID; - hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; - int *recvcounts = NULL; - int *displs = NULL; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *global_buf = NULL; + hsize_t dataset_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t chunk_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t sel_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t start[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t stride[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t count[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t flat_dims[1]; + size_t data_size, read_buf_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t memtype = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; + int *recvcounts = NULL; + int *displs = NULL; + int mpi_code; if (MAINPROCESS) puts("Testing read from shared filtered chunks in Compound Datatype dataset without Datatype " @@ -6221,23 +7189,25 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group, H dataset_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS; /* Setup the buffer for writing and for comparison */ - correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + data_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE); - correct_buf = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) { + COMPOUND_C_DATATYPE *tmp_buf = calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); - for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) { - correct_buf[i].field1 = - (short)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) + - (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])); + for (size_t j = 0; j < data_size / sizeof(COMPOUND_C_DATATYPE); j++) { + size_t val1 = (dataset_dims[1] * (j / ((hsize_t)mpi_size * dataset_dims[1]))); + size_t val2 = (j % dataset_dims[1]); + size_t val3 = (((j % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]); + size_t val = val1 + val2 + val3 + dset_idx; - correct_buf[i].field2 = - (int)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) + - (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])); + tmp_buf[j].field1 = (short)val; + tmp_buf[j].field2 = (int)val; + tmp_buf[j].field3 = (long)val; + } - correct_buf[i].field3 = - (long)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) + - (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])); + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; } /* Create the compound type for memory. */ @@ -6251,6 +7221,48 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group, H VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0), "Datatype insertion succeeded"); + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + filespace = + H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS; + chunk_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS; + + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, + chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + /* Create datasets depending on the current test mode */ + create_datasets(group_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, memtype, + filespace, plist_id, test_mode, &num_dsets, dset_ids); + + /* Verify space allocation status */ + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + MPI_Barrier(comm); + if (MAINPROCESS) { plist_id = H5Pcreate(H5P_FILE_ACCESS); VRFY((plist_id >= 0), "FAPL creation succeeded"); @@ -6266,55 +7278,38 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group, H group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); VRFY((group_id >= 0), "H5Gopen2 succeeded"); - /* Create the dataspace for the dataset */ - filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, - dataset_dims, NULL); - VRFY((filespace >= 0), "File dataspace creation succeeded"); - - /* Create chunked dataset */ - chunk_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS; - chunk_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS; - - plist_id = H5Pcopy(dcpl_id); - VRFY((plist_id >= 0), "DCPL copy succeeded"); - - VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, - chunk_dims) >= 0), - "Chunk size set"); - - /* Add test filter to the pipeline */ - VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); - - dset_id = H5Dcreate2(group_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, - memtype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); - - /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + open_datasets(group_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, num_dsets, + test_mode, dset_ids); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + select_all(num_dsets, dset_ids, fspace_ids); - VRFY((H5Dwrite(dset_id, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), - "Dataset write succeeded"); + write_datasets(num_dsets, dset_ids, memtype, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, test_mode); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + plist_id = H5Dget_create_plist(dset_ids[0]); + VRFY((plist_id >= 0), "H5Dget_create_plist succeeded"); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); } + MPI_Barrier(comm); + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id); VRFY((file_id >= 0), "Test file open succeeded"); group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); VRFY((group_id >= 0), "H5Gopen2 succeeded"); - dset_id = - H5Dopen2(group_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + open_datasets(group_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, num_dsets, + test_mode, dset_ids); sel_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS / (hsize_t)mpi_size; sel_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC; @@ -6322,13 +7317,6 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group, H /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ flat_dims[0] = sel_dims[0] * sel_dims[1]; - memspace = H5Screate_simple(1, flat_dims, NULL); - VRFY((memspace >= 0), "Memory dataspace creation succeeded"); - - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - /* * Each process defines the dataset selection in the file and * reads it to the selection in memory @@ -6342,60 +7330,53 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group, H start[0] = (hsize_t)mpi_rank; start[1] = 0; - if (VERBOSE_MED) { - printf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); - fflush(stdout); - } - - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - read_buf_size = flat_dims[0] * sizeof(*read_buf); + read_buf_size = flat_dims[0] * sizeof(COMPOUND_C_DATATYPE); - read_buf = (COMPOUND_C_DATATYPE *)calloc(1, read_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); - - VRFY((H5Dread(dset_id, memtype, memspace, filespace, dxpl_id, read_buf) >= 0), "Dataset read succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + read_bufs[dset_idx] = calloc(1, read_buf_size); + VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); + } - global_buf = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != global_buf), "calloc succeeded"); + read_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, test_mode); /* Collect each piece of data from all ranks into a global buffer on all ranks */ - recvcounts = (int *)calloc(1, (size_t)mpi_size * sizeof(*recvcounts)); + global_buf = calloc(1, data_size); + VRFY((NULL != global_buf), "calloc succeeded"); + recvcounts = calloc(1, (size_t)mpi_size * sizeof(*recvcounts)); VRFY((NULL != recvcounts), "calloc succeeded"); + displs = calloc(1, (size_t)mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "calloc succeeded"); + + for (size_t i = 0; i < (size_t)mpi_size; i++) { + recvcounts[i] = (int)(flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)); + displs[i] = (int)(i * flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)); + } - for (i = 0; i < (size_t)mpi_size; i++) - recvcounts[i] = (int)(flat_dims[0] * sizeof(*read_buf)); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + mpi_code = MPI_Allgatherv(read_bufs[dset_idx], (int)(flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), + MPI_BYTE, global_buf, recvcounts, displs, MPI_BYTE, comm); + VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgatherv succeeded"); - displs = (int *)calloc(1, (size_t)mpi_size * sizeof(*displs)); - VRFY((NULL != displs), "calloc succeeded"); + VRFY((0 == memcmp(global_buf, data_bufs[dset_idx], data_size)), "Data verification succeeded"); + } - for (i = 0; i < (size_t)mpi_size; i++) - displs[i] = (int)(i * flat_dims[0] * sizeof(*read_buf)); + free(displs); + free(recvcounts); + free(global_buf); - VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int)(flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE, - global_buf, recvcounts, displs, MPI_BYTE, comm)), - "MPI_Allgatherv succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + free(read_bufs[dset_idx]); - VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) + free(data_bufs_nc[dset_idx]); - if (displs) - free(displs); - if (recvcounts) - free(recvcounts); - if (global_buf) - free(global_buf); - if (read_buf) - free(read_buf); - if (correct_buf) - free(correct_buf); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -6416,26 +7397,32 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group, H */ static void test_read_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_group, H5Z_filter_t filter_id, - hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode) { - COMPOUND_C_DATATYPE *read_buf = NULL; - COMPOUND_C_DATATYPE *correct_buf = NULL; - COMPOUND_C_DATATYPE *global_buf = NULL; - hsize_t dataset_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t chunk_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t sel_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t start[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t stride[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t count[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; - hsize_t flat_dims[1]; - size_t i, read_buf_size, correct_buf_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; - hid_t filetype = H5I_INVALID_HID, memtype = H5I_INVALID_HID; - hid_t group_id = H5I_INVALID_HID; - hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; - int *recvcounts = NULL; - int *displs = NULL; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *global_buf = NULL; + hsize_t dataset_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t chunk_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t sel_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t start[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t stride[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t count[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t flat_dims[1]; + size_t data_size, read_buf_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filetype = H5I_INVALID_HID, memtype = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; + int *recvcounts = NULL; + int *displs = NULL; + int mpi_code; if (MAINPROCESS) puts("Testing read from unshared filtered chunks in Compound Datatype dataset with Datatype " @@ -6452,17 +7439,20 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_grou dataset_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS; /* Setup the buffer for writing and for comparison */ - correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); - - correct_buf = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + data_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE); - for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) { - correct_buf[i].field1 = (short)((i % dataset_dims[1]) + (i / dataset_dims[1])); + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) { + COMPOUND_C_DATATYPE *tmp_buf = calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); - correct_buf[i].field2 = (int)((i % dataset_dims[1]) + (i / dataset_dims[1])); + for (size_t j = 0; j < data_size / sizeof(COMPOUND_C_DATATYPE); j++) { + tmp_buf[j].field1 = (short)((j % dataset_dims[1]) + (j / dataset_dims[1]) + dset_idx); + tmp_buf[j].field2 = (int)((j % dataset_dims[1]) + (j / dataset_dims[1]) + dset_idx); + tmp_buf[j].field3 = (long)((j % dataset_dims[1]) + (j / dataset_dims[1]) + dset_idx); + } - correct_buf[i].field3 = (long)((i % dataset_dims[1]) + (i / dataset_dims[1])); + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; } /* Create the compound type for memory. */ @@ -6484,6 +7474,48 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_grou VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded"); VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded"); + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS, + dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS; + chunk_dims[1] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS; + + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS, + chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + /* Create datasets depending on the current test mode */ + create_datasets(group_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, filetype, + filespace, plist_id, test_mode, &num_dsets, dset_ids); + + /* Verify space allocation status */ + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + MPI_Barrier(comm); + if (MAINPROCESS) { plist_id = H5Pcreate(H5P_FILE_ACCESS); VRFY((plist_id >= 0), "FAPL creation succeeded"); @@ -6499,55 +7531,38 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_grou group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); VRFY((group_id >= 0), "H5Gopen2 succeeded"); - /* Create the dataspace for the dataset */ - filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS, - dataset_dims, NULL); - VRFY((filespace >= 0), "File dataspace creation succeeded"); - - /* Create chunked dataset */ - chunk_dims[0] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS; - chunk_dims[1] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS; - - plist_id = H5Pcopy(dcpl_id); - VRFY((plist_id >= 0), "DCPL copy succeeded"); + open_datasets(group_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, + num_dsets, test_mode, dset_ids); - VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS, - chunk_dims) >= 0), - "Chunk size set"); + select_all(num_dsets, dset_ids, fspace_ids); - /* Add test filter to the pipeline */ - VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); - - dset_id = H5Dcreate2(group_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, - filetype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + write_datasets(num_dsets, dset_ids, memtype, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, test_mode); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); - - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - - VRFY((H5Dwrite(dset_id, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), - "Dataset write succeeded"); + plist_id = H5Dget_create_plist(dset_ids[0]); + VRFY((plist_id >= 0), "H5Dget_create_plist succeeded"); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); - /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); } + MPI_Barrier(comm); + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id); VRFY((file_id >= 0), "Test file open succeeded"); group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); VRFY((group_id >= 0), "H5Gopen2 succeeded"); - dset_id = - H5Dopen2(group_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + open_datasets(group_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, num_dsets, + test_mode, dset_ids); sel_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS; sel_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC; @@ -6555,13 +7570,6 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_grou /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ flat_dims[0] = sel_dims[0] * sel_dims[1]; - memspace = H5Screate_simple(1, flat_dims, NULL); - VRFY((memspace >= 0), "Memory dataspace creation succeeded"); - - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - /* * Each process defines the dataset selection in the file and * reads it to the selection in memory @@ -6575,60 +7583,53 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_grou start[0] = 0; start[1] = ((hsize_t)mpi_rank * READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS); - if (VERBOSE_MED) { - printf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); - fflush(stdout); - } - - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - read_buf_size = flat_dims[0] * sizeof(*read_buf); + read_buf_size = flat_dims[0] * sizeof(COMPOUND_C_DATATYPE); - read_buf = (COMPOUND_C_DATATYPE *)calloc(1, read_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); - - VRFY((H5Dread(dset_id, memtype, memspace, filespace, dxpl_id, read_buf) >= 0), "Dataset read succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + read_bufs[dset_idx] = calloc(1, read_buf_size); + VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); + } - global_buf = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != global_buf), "calloc succeeded"); + read_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, test_mode); /* Collect each piece of data from all ranks into a global buffer on all ranks */ - recvcounts = (int *)calloc(1, (size_t)mpi_size * sizeof(*recvcounts)); + global_buf = calloc(1, data_size); + VRFY((NULL != global_buf), "calloc succeeded"); + recvcounts = calloc(1, (size_t)mpi_size * sizeof(*recvcounts)); VRFY((NULL != recvcounts), "calloc succeeded"); + displs = calloc(1, (size_t)mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "calloc succeeded"); - for (i = 0; i < (size_t)mpi_size; i++) - recvcounts[i] = (int)(flat_dims[0] * sizeof(*read_buf)); + for (size_t i = 0; i < (size_t)mpi_size; i++) { + recvcounts[i] = (int)(flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)); + displs[i] = (int)(i * flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)); + } - displs = (int *)calloc(1, (size_t)mpi_size * sizeof(*displs)); - VRFY((NULL != displs), "calloc succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + mpi_code = MPI_Allgatherv(read_bufs[dset_idx], (int)(flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), + MPI_BYTE, global_buf, recvcounts, displs, MPI_BYTE, comm); + VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgatherv succeeded"); - for (i = 0; i < (size_t)mpi_size; i++) - displs[i] = (int)(i * flat_dims[0] * sizeof(*read_buf)); + VRFY((0 == memcmp(global_buf, data_bufs[dset_idx], data_size)), "Data verification succeeded"); + } - VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int)(flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE, - global_buf, recvcounts, displs, MPI_BYTE, comm)), - "MPI_Allgatherv succeeded"); + free(displs); + free(recvcounts); + free(global_buf); - VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + free(read_bufs[dset_idx]); - if (displs) - free(displs); - if (recvcounts) - free(recvcounts); - if (global_buf) - free(global_buf); - if (read_buf) - free(read_buf); - if (correct_buf) - free(correct_buf); + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) + free(data_bufs_nc[dset_idx]); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); VRFY((H5Tclose(filetype) >= 0), "File datatype close succeeded"); VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); @@ -6650,26 +7651,32 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_grou */ static void test_read_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group, H5Z_filter_t filter_id, - hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, + test_mode_t test_mode) { - COMPOUND_C_DATATYPE *read_buf = NULL; - COMPOUND_C_DATATYPE *correct_buf = NULL; - COMPOUND_C_DATATYPE *global_buf = NULL; - hsize_t dataset_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t chunk_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t sel_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t start[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t stride[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t count[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; - hsize_t flat_dims[1]; - size_t i, read_buf_size, correct_buf_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; - hid_t filetype = H5I_INVALID_HID, memtype = H5I_INVALID_HID; - hid_t group_id = H5I_INVALID_HID; - hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; - int *recvcounts = NULL; - int *displs = NULL; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *global_buf = NULL; + hsize_t dataset_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t chunk_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t sel_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t start[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t stride[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t count[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t flat_dims[1]; + size_t data_size, read_buf_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filetype = H5I_INVALID_HID, memtype = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; + int *recvcounts = NULL; + int *displs = NULL; + int mpi_code; if (MAINPROCESS) puts( @@ -6686,23 +7693,25 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group, dataset_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS; /* Setup the buffer for writing and for comparison */ - correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + data_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE); - correct_buf = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) { + COMPOUND_C_DATATYPE *tmp_buf = calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); - for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) { - correct_buf[i].field1 = - (short)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) + - (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])); + for (size_t j = 0; j < data_size / sizeof(COMPOUND_C_DATATYPE); j++) { + size_t val1 = (dataset_dims[1] * (j / ((hsize_t)mpi_size * dataset_dims[1]))); + size_t val2 = (j % dataset_dims[1]); + size_t val3 = (((j % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]); + size_t val = val1 + val2 + val3 + dset_idx; - correct_buf[i].field2 = - (int)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) + - (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])); + tmp_buf[j].field1 = (short)val; + tmp_buf[j].field2 = (int)val; + tmp_buf[j].field3 = (long)val; + } - correct_buf[i].field3 = - (long)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) + - (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])); + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; } /* Create the compound type for memory. */ @@ -6724,6 +7733,48 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group, VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded"); VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded"); + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, + dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS; + chunk_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS; + + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, + chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + /* Create datasets depending on the current test mode */ + create_datasets(group_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, filetype, + filespace, plist_id, test_mode, &num_dsets, dset_ids); + + /* Verify space allocation status */ + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + MPI_Barrier(comm); + if (MAINPROCESS) { plist_id = H5Pcreate(H5P_FILE_ACCESS); VRFY((plist_id >= 0), "FAPL creation succeeded"); @@ -6739,55 +7790,38 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group, group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); VRFY((group_id >= 0), "H5Gopen2 succeeded"); - /* Create the dataspace for the dataset */ - filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, - dataset_dims, NULL); - VRFY((filespace >= 0), "File dataspace creation succeeded"); - - /* Create chunked dataset */ - chunk_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS; - chunk_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS; + open_datasets(group_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, num_dsets, + test_mode, dset_ids); - plist_id = H5Pcopy(dcpl_id); - VRFY((plist_id >= 0), "DCPL copy succeeded"); + select_all(num_dsets, dset_ids, fspace_ids); - VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, - chunk_dims) >= 0), - "Chunk size set"); - - /* Add test filter to the pipeline */ - VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); - - dset_id = H5Dcreate2(group_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, - filetype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + write_datasets(num_dsets, dset_ids, memtype, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, test_mode); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); - - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - - VRFY((H5Dwrite(dset_id, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), - "Dataset write succeeded"); + plist_id = H5Dget_create_plist(dset_ids[0]); + VRFY((plist_id >= 0), "H5Dget_create_plist succeeded"); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); - /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); } + MPI_Barrier(comm); + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id); VRFY((file_id >= 0), "Test file open succeeded"); group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); VRFY((group_id >= 0), "H5Gopen2 succeeded"); - dset_id = - H5Dopen2(group_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + open_datasets(group_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, num_dsets, + test_mode, dset_ids); sel_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS / (hsize_t)mpi_size; sel_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC; @@ -6795,13 +7829,6 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group, /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ flat_dims[0] = sel_dims[0] * sel_dims[1]; - memspace = H5Screate_simple(1, flat_dims, NULL); - VRFY((memspace >= 0), "Memory dataspace creation succeeded"); - - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - /* * Each process defines the dataset selection in the file and * reads it to the selection in memory @@ -6815,60 +7842,53 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group, start[0] = (hsize_t)mpi_rank; start[1] = 0; - if (VERBOSE_MED) { - printf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); - fflush(stdout); - } - - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); - - read_buf_size = flat_dims[0] * sizeof(*read_buf); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - read_buf = (COMPOUND_C_DATATYPE *)calloc(1, read_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + read_buf_size = flat_dims[0] * sizeof(COMPOUND_C_DATATYPE); - VRFY((H5Dread(dset_id, memtype, memspace, filespace, dxpl_id, read_buf) >= 0), "Dataset read succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + read_bufs[dset_idx] = calloc(1, read_buf_size); + VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); + } - global_buf = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != global_buf), "calloc succeeded"); + read_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, test_mode); /* Collect each piece of data from all ranks into a global buffer on all ranks */ - recvcounts = (int *)calloc(1, (size_t)mpi_size * sizeof(*recvcounts)); + global_buf = calloc(1, data_size); + VRFY((NULL != global_buf), "calloc succeeded"); + recvcounts = calloc(1, (size_t)mpi_size * sizeof(*recvcounts)); VRFY((NULL != recvcounts), "calloc succeeded"); + displs = calloc(1, (size_t)mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "calloc succeeded"); - for (i = 0; i < (size_t)mpi_size; i++) - recvcounts[i] = (int)(flat_dims[0] * sizeof(*read_buf)); + for (size_t i = 0; i < (size_t)mpi_size; i++) { + recvcounts[i] = (int)(flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)); + displs[i] = (int)(i * flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)); + } - displs = (int *)calloc(1, (size_t)mpi_size * sizeof(*displs)); - VRFY((NULL != displs), "calloc succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + mpi_code = MPI_Allgatherv(read_bufs[dset_idx], (int)(flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), + MPI_BYTE, global_buf, recvcounts, displs, MPI_BYTE, comm); + VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgatherv succeeded"); - for (i = 0; i < (size_t)mpi_size; i++) - displs[i] = (int)(i * flat_dims[0] * sizeof(*read_buf)); + VRFY((0 == memcmp(global_buf, data_bufs[dset_idx], data_size)), "Data verification succeeded"); + } - VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int)(flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE, - global_buf, recvcounts, displs, MPI_BYTE, comm)), - "MPI_Allgatherv succeeded"); + free(displs); + free(recvcounts); + free(global_buf); - VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + free(read_bufs[dset_idx]); - if (displs) - free(displs); - if (recvcounts) - free(recvcounts); - if (global_buf) - free(global_buf); - if (read_buf) - free(read_buf); - if (correct_buf) - free(correct_buf); + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) + free(data_bufs_nc[dset_idx]); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -6886,15 +7906,19 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group, */ static void test_write_serial_read_parallel(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id) + hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode) { - C_DATATYPE *data = NULL; - C_DATATYPE *read_buf = NULL; - C_DATATYPE *correct_buf = NULL; + C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0}; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; hsize_t dataset_dims[WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS]; hsize_t chunk_dims[WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS]; - size_t i, data_size, correct_buf_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + size_t data_size, correct_buf_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; hid_t group_id = H5I_INVALID_HID; hid_t filespace = H5I_INVALID_HID; @@ -6905,6 +7929,47 @@ test_write_serial_read_parallel(const char *parent_group, H5Z_filter_t filter_id dataset_dims[1] = (hsize_t)WRITE_SERIAL_READ_PARALLEL_NCOLS; dataset_dims[2] = (hsize_t)WRITE_SERIAL_READ_PARALLEL_DEPTH; + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = (hsize_t)WRITE_SERIAL_READ_PARALLEL_CH_NROWS; + chunk_dims[1] = (hsize_t)WRITE_SERIAL_READ_PARALLEL_CH_NCOLS; + chunk_dims[2] = 1; + + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + /* Create datasets depending on the current test mode */ + create_datasets(group_id, WRITE_SERIAL_READ_PARALLEL_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + plist_id, test_mode, &num_dsets, dset_ids); + + /* Verify space allocation status */ + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + MPI_Barrier(comm); + /* Write the file on the MAINPROCESS rank */ if (MAINPROCESS) { /* Set up file access property list */ @@ -6922,66 +7987,58 @@ test_write_serial_read_parallel(const char *parent_group, H5Z_filter_t filter_id group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); VRFY((group_id >= 0), "H5Gopen2 succeeded"); - /* Create the dataspace for the dataset */ - chunk_dims[0] = (hsize_t)WRITE_SERIAL_READ_PARALLEL_CH_NROWS; - chunk_dims[1] = (hsize_t)WRITE_SERIAL_READ_PARALLEL_CH_NCOLS; - chunk_dims[2] = 1; - - filespace = H5Screate_simple(WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS, dataset_dims, NULL); - VRFY((filespace >= 0), "File dataspace creation succeeded"); + open_datasets(group_id, WRITE_SERIAL_READ_PARALLEL_DATASET_NAME, num_dsets, test_mode, dset_ids); - /* Create chunked dataset */ - plist_id = H5Pcopy(dcpl_id); - VRFY((plist_id >= 0), "DCPL copy succeeded"); + data_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(C_DATATYPE); - VRFY((H5Pset_chunk(plist_id, WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS, chunk_dims) >= 0), - "Chunk size set"); - - /* Add test filter to the pipeline */ - VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); - - dset_id = H5Dcreate2(group_id, WRITE_SERIAL_READ_PARALLEL_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, - H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); - - /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + C_DATATYPE *tmp_buf = (C_DATATYPE *)calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++) + tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx); - data_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*data); + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; + } - data = (C_DATATYPE *)calloc(1, data_size); - VRFY((NULL != data), "calloc succeeded"); + select_all(num_dsets, dset_ids, fspace_ids); - for (i = 0; i < data_size / sizeof(*data); i++) - data[i] = (C_DATATYPE)GEN_DATA(i); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, + test_mode); - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, data) >= 0), - "Dataset write succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + free(data_bufs_nc[dset_idx]); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + plist_id = H5Dget_create_plist(dset_ids[0]); + VRFY((plist_id >= 0), "H5Dget_create_plist succeeded"); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); - if (data) - free(data); + /* Close and re-open datasets */ + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); } - correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf); + MPI_Barrier(comm); - correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(C_DATATYPE); - read_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + correct_bufs[dset_idx] = calloc(1, correct_buf_size); + VRFY((NULL != correct_bufs[dset_idx]), "calloc succeeded"); + read_bufs[dset_idx] = calloc(1, correct_buf_size); + VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); - for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) - correct_buf[i] = (long)i; + for (size_t j = 0; j < correct_buf_size / sizeof(C_DATATYPE); j++) + correct_bufs[dset_idx][j] = (long)(j + dset_idx); + } /* All ranks open the file and verify their "portion" of the dataset is correct */ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); @@ -6990,20 +8047,22 @@ test_write_serial_read_parallel(const char *parent_group, H5Z_filter_t filter_id group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); VRFY((group_id >= 0), "H5Gopen2 succeeded"); - dset_id = H5Dopen2(group_id, WRITE_SERIAL_READ_PARALLEL_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + open_datasets(group_id, WRITE_SERIAL_READ_PARALLEL_DATASET_NAME, num_dsets, test_mode, dset_ids); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); - VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), + "Data verification succeeded"); - if (correct_buf) - free(correct_buf); - if (read_buf) - free(read_buf); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + free(read_bufs[dset_idx]); + free(correct_bufs[dset_idx]); + } + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -7021,11 +8080,12 @@ test_write_serial_read_parallel(const char *parent_group, H5Z_filter_t filter_id */ static void test_write_parallel_read_serial(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, - hid_t dcpl_id, hid_t dxpl_id) + hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode) { - C_DATATYPE *data = NULL; - C_DATATYPE *read_buf = NULL; - C_DATATYPE *correct_buf = NULL; + C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0}; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; hsize_t dataset_dims[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS]; hsize_t chunk_dims[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS]; hsize_t sel_dims[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS]; @@ -7033,10 +8093,13 @@ test_write_parallel_read_serial(const char *parent_group, H5Z_filter_t filter_id hsize_t stride[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS]; hsize_t block[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS]; hsize_t offset[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS]; - size_t i, data_size, correct_buf_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + size_t data_size, correct_buf_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; hid_t group_id = H5I_INVALID_HID; - hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; if (MAINPROCESS) puts("Testing write file in parallel; read serially"); @@ -7061,9 +8124,6 @@ test_write_parallel_read_serial(const char *parent_group, H5Z_filter_t filter_id filespace = H5Screate_simple(WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS, dataset_dims, NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); - memspace = H5Screate_simple(WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS, sel_dims, NULL); - VRFY((memspace >= 0), "Memory dataspace creation succeeded"); - /* Create chunked dataset */ plist_id = H5Pcopy(dcpl_id); VRFY((plist_id >= 0), "DCPL copy succeeded"); @@ -7074,12 +8134,12 @@ test_write_parallel_read_serial(const char *parent_group, H5Z_filter_t filter_id /* Add test filter to the pipeline */ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); - dset_id = H5Dcreate2(group_id, WRITE_PARALLEL_READ_SERIAL_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, - H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + /* Create datasets depending on the current test mode */ + create_datasets(group_id, WRITE_PARALLEL_READ_SERIAL_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + plist_id, test_mode, &num_dsets, dset_ids); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -7099,48 +8159,42 @@ test_write_parallel_read_serial(const char *parent_group, H5Z_filter_t filter_id offset[1] = 0; offset[2] = 0; - if (VERBOSE_MED) { - printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE - " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ], offset[ %" PRIuHSIZE - ", %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE - " ]\n", - mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], offset[0], offset[1], - offset[2], block[0], block[1], block[2]); - fflush(stdout); - } + select_hyperslab(num_dsets, dset_ids, offset, stride, count, block, fspace_ids); - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + /* Fill data buffer */ + data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(C_DATATYPE); - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), - "Hyperslab selection succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + C_DATATYPE *tmp_buf = calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); - /* Fill data buffer */ - data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(*data); + for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++) + tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx); - data = (C_DATATYPE *)calloc(1, data_size); - VRFY((NULL != data), "calloc succeeded"); + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; + } - for (i = 0; i < data_size / sizeof(*data); i++) - data[i] = (C_DATATYPE)GEN_DATA(i); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, + test_mode); - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0), - "Dataset write succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + free(data_bufs_nc[dset_idx]); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); - if (data) - free(data); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + MPI_Barrier(comm); + if (MAINPROCESS) { plist_id = H5Pcreate(H5P_FILE_ACCESS); VRFY((plist_id >= 0), "FAPL creation succeeded"); @@ -7156,34 +8210,43 @@ test_write_parallel_read_serial(const char *parent_group, H5Z_filter_t filter_id group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); VRFY((group_id >= 0), "H5Gopen2 succeeded"); - dset_id = H5Dopen2(group_id, WRITE_PARALLEL_READ_SERIAL_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + open_datasets(group_id, WRITE_PARALLEL_READ_SERIAL_DATASET_NAME, num_dsets, test_mode, dset_ids); + + correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(C_DATATYPE); - correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + correct_bufs[dset_idx] = calloc(1, correct_buf_size); + VRFY((NULL != correct_bufs[dset_idx]), "calloc succeeded"); + read_bufs[dset_idx] = calloc(1, correct_buf_size); + VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); - correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + for (size_t j = 0; j < correct_buf_size / sizeof(C_DATATYPE); j++) + correct_bufs[dset_idx][j] = + (C_DATATYPE)((j % (dataset_dims[0] * dataset_dims[1])) + + (j / (dataset_dims[0] * dataset_dims[1])) + dset_idx); + } - read_buf = (C_DATATYPE *)calloc(1, correct_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_bufs, + test_mode); - for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) - correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] * dataset_dims[1])) + - (i / (dataset_dims[0] * dataset_dims[1]))); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), + "Data verification succeeded"); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_buf) >= 0), - "Dataset read succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + free(read_bufs[dset_idx]); + free(correct_bufs[dset_idx]); + } - VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); - - free(correct_buf); - free(read_buf); } + MPI_Barrier(comm); + return; } @@ -7196,21 +8259,25 @@ test_write_parallel_read_serial(const char *parent_group, H5Z_filter_t filter_id */ static void test_shrinking_growing_chunks(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id, - hid_t dxpl_id) + hid_t dxpl_id, test_mode_t test_mode) { - double *data = NULL; - double *read_buf = NULL; - hsize_t dataset_dims[SHRINKING_GROWING_CHUNKS_DATASET_DIMS]; - hsize_t chunk_dims[SHRINKING_GROWING_CHUNKS_DATASET_DIMS]; - hsize_t sel_dims[SHRINKING_GROWING_CHUNKS_DATASET_DIMS]; - hsize_t start[SHRINKING_GROWING_CHUNKS_DATASET_DIMS]; - hsize_t stride[SHRINKING_GROWING_CHUNKS_DATASET_DIMS]; - hsize_t count[SHRINKING_GROWING_CHUNKS_DATASET_DIMS]; - hsize_t block[SHRINKING_GROWING_CHUNKS_DATASET_DIMS]; - size_t i, data_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; - hid_t group_id = H5I_INVALID_HID; - hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; + hsize_t dataset_dims[SHRINKING_GROWING_CHUNKS_DATASET_DIMS]; + hsize_t chunk_dims[SHRINKING_GROWING_CHUNKS_DATASET_DIMS]; + hsize_t sel_dims[SHRINKING_GROWING_CHUNKS_DATASET_DIMS]; + hsize_t start[SHRINKING_GROWING_CHUNKS_DATASET_DIMS]; + hsize_t stride[SHRINKING_GROWING_CHUNKS_DATASET_DIMS]; + hsize_t count[SHRINKING_GROWING_CHUNKS_DATASET_DIMS]; + hsize_t block[SHRINKING_GROWING_CHUNKS_DATASET_DIMS]; + size_t data_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; if (MAINPROCESS) puts("Testing continually shrinking/growing chunks"); @@ -7232,9 +8299,6 @@ test_shrinking_growing_chunks(const char *parent_group, H5Z_filter_t filter_id, filespace = H5Screate_simple(SHRINKING_GROWING_CHUNKS_DATASET_DIMS, dataset_dims, NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); - memspace = H5Screate_simple(SHRINKING_GROWING_CHUNKS_DATASET_DIMS, sel_dims, NULL); - VRFY((memspace >= 0), "Memory dataspace creation succeeded"); - /* Create chunked dataset */ plist_id = H5Pcopy(dcpl_id); VRFY((plist_id >= 0), "DCPL copy succeeded"); @@ -7244,12 +8308,12 @@ test_shrinking_growing_chunks(const char *parent_group, H5Z_filter_t filter_id, /* Add test filter to the pipeline */ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); - dset_id = H5Dcreate2(group_id, SHRINKING_GROWING_CHUNKS_DATASET_NAME, H5T_NATIVE_DOUBLE, filespace, - H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + /* Create datasets depending on the current test mode */ + create_datasets(group_id, SHRINKING_GROWING_CHUNKS_DATASET_NAME, H5T_NATIVE_DOUBLE, filespace, plist_id, + test_mode, &num_dsets, dset_ids); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -7266,68 +8330,74 @@ test_shrinking_growing_chunks(const char *parent_group, H5Z_filter_t filter_id, start[0] = ((hsize_t)mpi_rank * (hsize_t)SHRINKING_GROWING_CHUNKS_CH_NROWS * count[0]); start[1] = 0; - if (VERBOSE_MED) { - printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); - fflush(stdout); - } + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((dset_id >= 0), "File dataspace retrieval succeeded"); + data_size = sel_dims[0] * sel_dims[1] * sizeof(double); - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + double *tmp_buf = calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); - data_size = sel_dims[0] * sel_dims[1] * sizeof(double); + for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++) + tmp_buf[j] = (double)(GEN_DATA(j) + dset_idx); - data = (double *)calloc(1, data_size); - VRFY((NULL != data), "calloc succeeded"); + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; - read_buf = (double *)calloc(1, data_size); - VRFY((NULL != read_buf), "calloc succeeded"); + tmp_buf = calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); - for (i = 0; i < SHRINKING_GROWING_CHUNKS_NLOOPS; i++) { - /* Continually write random float data, followed by zeroed-out data */ - if (i % 2) - memset(data, 0, data_size); - else { - size_t j; - for (j = 0; j < data_size / sizeof(*data); j++) { - data[j] = (rand() / (double)(RAND_MAX / (double)1.0L)); + read_bufs[dset_idx] = tmp_buf; + } + + for (size_t i = 0; i < SHRINKING_GROWING_CHUNKS_NLOOPS; i++) { + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + /* Continually write random float data, followed by zeroed-out data */ + if (i % 2) + memset(data_bufs_nc[dset_idx], 0, data_size); + else { + double *tmp_buf = data_bufs_nc[dset_idx]; + + for (size_t k = 0; k < data_size / sizeof(double); k++) { + tmp_buf[k] = (rand() / (double)(RAND_MAX / (double)1.0L)); + } } } - VRFY((H5Dwrite(dset_id, H5T_NATIVE_DOUBLE, memspace, filespace, dxpl_id, data) >= 0), - "Dataset write succeeded"); + write_datasets(num_dsets, dset_ids, H5T_NATIVE_DOUBLE, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, + test_mode); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); - if (i % 2) { - memset(read_buf, 255, data_size); - } - else { - memset(read_buf, 0, data_size); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + if (i % 2) { + memset(read_bufs[dset_idx], 255, data_size); + } + else { + memset(read_bufs[dset_idx], 0, data_size); + } } - VRFY((H5Dread(dset_id, H5T_NATIVE_DOUBLE, memspace, filespace, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + read_datasets(num_dsets, dset_ids, H5T_NATIVE_DOUBLE, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, + test_mode); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)), + "data verification succeeded"); + } - VRFY((0 == memcmp(read_buf, data, data_size)), "data verification succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + free(read_bufs[dset_idx]); + free(data_bufs_nc[dset_idx]); } - if (read_buf) - free(read_buf); - if (data) - free(data); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -7346,10 +8416,11 @@ test_shrinking_growing_chunks(const char *parent_group, H5Z_filter_t filter_id, */ static void test_edge_chunks_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id, - hid_t dxpl_id) + hid_t dxpl_id, test_mode_t test_mode) { - C_DATATYPE *data = NULL; - C_DATATYPE *read_buf = NULL; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; hsize_t dataset_dims[WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS]; hsize_t chunk_dims[WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS]; hsize_t sel_dims[WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS]; @@ -7357,8 +8428,11 @@ test_edge_chunks_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hi hsize_t stride[WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS]; hsize_t count[WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS]; hsize_t block[WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS]; - size_t i, data_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + size_t data_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; hid_t group_id = H5I_INVALID_HID; hid_t filespace = H5I_INVALID_HID; @@ -7392,12 +8466,12 @@ test_edge_chunks_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hi /* Add test filter to the pipeline */ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); - dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, - filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + /* Create datasets depending on the current test mode */ + create_datasets(group_id, WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + plist_id, test_mode, &num_dsets, dset_ids); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -7414,64 +8488,68 @@ test_edge_chunks_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hi start[1] = (hsize_t)(WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_NCOLS - WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS); - if (VERBOSE_MED) { - printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); - fflush(stdout); - } + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + /* Fill data buffer */ + data_size = sel_dims[0] * sel_dims[1] * sizeof(C_DATATYPE); - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) { + C_DATATYPE *tmp_buf = calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); - /* Fill data buffer */ - data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); + for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++) + tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx); - data = (C_DATATYPE *)calloc(1, data_size); - VRFY((NULL != data), "calloc succeeded"); + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; - read_buf = (C_DATATYPE *)calloc(1, data_size); - VRFY((NULL != read_buf), "calloc succeeded"); + tmp_buf = calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); - for (i = 0; i < data_size / sizeof(*data); i++) - data[i] = (C_DATATYPE)GEN_DATA(i); + read_bufs[dset_idx] = tmp_buf; + } - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0), - "Dataset write succeeded"); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, + test_mode); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, (mpi_size > 1) ? SOME_CHUNKS_WRITTEN : ALL_CHUNKS_WRITTEN); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, + (mpi_size > 1) ? SOME_CHUNKS_WRITTEN : ALL_CHUNKS_WRITTEN); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + /* Close and re-open datasets */ + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); /* Verify the correct data was written */ - dset_id = H5Dopen2(group_id, WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + open_datasets(group_id, WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME, num_dsets, test_mode, dset_ids); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, + test_mode); - VRFY((0 == memcmp(read_buf, data, data_size)), "Data verification succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)), + "Data verification succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } /* Repeat the previous, but set option to not filter partial edge chunks */ if (MAINPROCESS) puts("Testing write to unshared unfiltered edge chunks"); + filespace = H5Screate_simple(WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + H5Pset_chunk_opts(plist_id, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS); - dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME2, HDF5_DATATYPE_NAME, - filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + /* Create datasets depending on the current test mode */ + create_datasets(group_id, WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME2, HDF5_DATATYPE_NAME, + filespace, plist_id, test_mode, &num_dsets, dset_ids); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -7488,48 +8566,43 @@ test_edge_chunks_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hi start[1] = (hsize_t)(WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_NCOLS - WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS); - if (VERBOSE_MED) { - printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); - fflush(stdout); - } - - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0), - "Dataset write succeeded"); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, + test_mode); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, (mpi_size > 1) ? SOME_CHUNKS_WRITTEN : ALL_CHUNKS_WRITTEN); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, + (mpi_size > 1) ? SOME_CHUNKS_WRITTEN : ALL_CHUNKS_WRITTEN); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); /* Verify the correct data was written */ - dset_id = H5Dopen2(group_id, WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME2, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + open_datasets(group_id, WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME2, num_dsets, test_mode, + dset_ids); - memset(read_buf, 255, data_size); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + memset(read_bufs[dset_idx], 255, data_size); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, + test_mode); - VRFY((0 == memcmp(read_buf, data, data_size)), "Data verification succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)), + "Data verification succeeded"); + + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) { + free(read_bufs[dset_idx]); + free(data_bufs_nc[dset_idx]); + } - if (data) - free(data); - if (read_buf) - free(read_buf); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -7548,10 +8621,11 @@ test_edge_chunks_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hi */ static void test_edge_chunks_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id, - hid_t dxpl_id) + hid_t dxpl_id, test_mode_t test_mode) { - C_DATATYPE *data = NULL; - C_DATATYPE *read_buf = NULL; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; hsize_t dataset_dims[WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS]; hsize_t chunk_dims[WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS]; hsize_t sel_dims[WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS]; @@ -7559,8 +8633,11 @@ test_edge_chunks_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t hsize_t stride[WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS]; hsize_t count[WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS]; hsize_t block[WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS]; - size_t i, data_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + size_t data_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; hid_t group_id = H5I_INVALID_HID; hid_t filespace = H5I_INVALID_HID; @@ -7594,12 +8671,12 @@ test_edge_chunks_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t /* Add test filter to the pipeline */ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); - dset_id = H5Dcreate2(group_id, WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, - filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + /* Create datasets depending on the current test mode */ + create_datasets(group_id, WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + plist_id, test_mode, &num_dsets, dset_ids); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -7617,64 +8694,66 @@ test_edge_chunks_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t start[1] = (hsize_t)(WRITE_SHARED_FILTERED_EDGE_CHUNKS_NCOLS - WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS); - if (VERBOSE_MED) { - printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); - fflush(stdout); - } + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + /* Fill data buffer */ + data_size = sel_dims[0] * sel_dims[1] * sizeof(C_DATATYPE); - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) { + C_DATATYPE *tmp_buf = calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); - /* Fill data buffer */ - data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); + for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++) + tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx); - data = (C_DATATYPE *)calloc(1, data_size); - VRFY((NULL != data), "calloc succeeded"); + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; - read_buf = (C_DATATYPE *)calloc(1, data_size); - VRFY((NULL != read_buf), "calloc succeeded"); + tmp_buf = calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); - for (i = 0; i < data_size / sizeof(*data); i++) - data[i] = (C_DATATYPE)GEN_DATA(i); + read_bufs[dset_idx] = tmp_buf; + } - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0), - "Dataset write succeeded"); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, + test_mode); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, SOME_CHUNKS_WRITTEN); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); /* Verify the correct data was written */ - dset_id = H5Dopen2(group_id, WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + open_datasets(group_id, WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME, num_dsets, test_mode, dset_ids); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, + test_mode); - VRFY((0 == memcmp(read_buf, data, data_size)), "Data verification succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)), + "Data verification succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } /* Repeat the previous, but set option to not filter partial edge chunks */ if (MAINPROCESS) puts("Testing write to shared unfiltered edge chunks"); + filespace = H5Screate_simple(WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + H5Pset_chunk_opts(plist_id, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS); - dset_id = H5Dcreate2(group_id, WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME2, HDF5_DATATYPE_NAME, - filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + /* Create datasets depending on the current test mode */ + create_datasets(group_id, WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME2, HDF5_DATATYPE_NAME, filespace, + plist_id, test_mode, &num_dsets, dset_ids); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); @@ -7692,73 +8771,47 @@ test_edge_chunks_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t start[1] = (hsize_t)(WRITE_SHARED_FILTERED_EDGE_CHUNKS_NCOLS - WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS); - if (VERBOSE_MED) { - printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); - fflush(stdout); - } - - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); - - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0), - "Dataset write succeeded"); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, + test_mode); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, SOME_CHUNKS_WRITTEN); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); /* Verify the correct data was written */ - dset_id = H5Dopen2(group_id, WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME2, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + open_datasets(group_id, WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME2, num_dsets, test_mode, dset_ids); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + memset(read_bufs[dset_idx], 255, data_size); - memset(read_buf, 255, data_size); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, + test_mode); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)), + "Data verification succeeded"); - VRFY((0 == memcmp(read_buf, data, data_size)), "Data verification succeeded"); + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) { + free(read_bufs[dset_idx]); + free(data_bufs_nc[dset_idx]); + } - if (data) - free(data); - if (read_buf) - free(read_buf); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); return; } -/* - * Tests that filtered and unfiltered partial edge chunks can be - * written to and read from correctly in parallel when only one - * MPI rank writes to a particular edge chunk in the dataset and - * only performs a partial write to the edge chunk. - * - * The dataset contains partial edge chunks in the second dimension. - * Each MPI rank selects a hyperslab in the shape of part of a single - * edge chunk and writes to just a portion of the edge chunk. - */ -static void -test_edge_chunks_partial_write(const char H5_ATTR_PARALLEL_UNUSED *parent_group, - H5Z_filter_t H5_ATTR_PARALLEL_UNUSED filter_id, - hid_t H5_ATTR_PARALLEL_UNUSED fapl_id, hid_t H5_ATTR_PARALLEL_UNUSED dcpl_id, - hid_t H5_ATTR_PARALLEL_UNUSED dxpl_id) -{ - /* TODO */ -} - /* * Tests that the parallel compression feature correctly handles * writing fill values to a dataset and reading fill values from @@ -7766,11 +8819,12 @@ test_edge_chunks_partial_write(const char H5_ATTR_PARALLEL_UNUSED *parent_group */ static void test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id, - hid_t dxpl_id) + hid_t dxpl_id, test_mode_t test_mode) { - C_DATATYPE *data = NULL; - C_DATATYPE *read_buf = NULL; - C_DATATYPE *correct_buf = NULL; + C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0}; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; C_DATATYPE fill_value; hsize_t dataset_dims[FILL_VALUES_TEST_DATASET_DIMS]; hsize_t chunk_dims[FILL_VALUES_TEST_DATASET_DIMS]; @@ -7779,12 +8833,16 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id hsize_t stride[FILL_VALUES_TEST_DATASET_DIMS]; hsize_t count[FILL_VALUES_TEST_DATASET_DIMS]; hsize_t block[FILL_VALUES_TEST_DATASET_DIMS]; - size_t i, data_size, read_buf_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + size_t data_size, read_buf_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; hid_t group_id = H5I_INVALID_HID; hid_t filespace = H5I_INVALID_HID; int *recvcounts = NULL; int *displs = NULL; + int mpi_code; if (MAINPROCESS) puts("Testing fill values"); @@ -7819,32 +8877,35 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id fill_value = FILL_VALUES_TEST_FILL_VAL; VRFY((H5Pset_fill_value(plist_id, HDF5_DATATYPE_NAME, &fill_value) >= 0), "Fill Value set"); - dset_id = H5Dcreate2(group_id, FILL_VALUES_TEST_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, - plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + /* Create datasets depending on the current test mode */ + create_datasets(group_id, FILL_VALUES_TEST_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, plist_id, + test_mode, &num_dsets, dset_ids); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); /* Allocate buffer for reading entire dataset */ - read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*read_buf); + read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE); - read_buf = calloc(1, read_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); - - correct_buf = calloc(1, read_buf_size); - VRFY((NULL != correct_buf), "calloc succeeded"); + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) { + correct_bufs[dset_idx] = calloc(1, read_buf_size); + VRFY((NULL != correct_bufs[dset_idx]), "calloc succeeded"); + read_bufs[dset_idx] = calloc(1, read_buf_size); + VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); + } /* Read entire dataset and verify that the fill value is returned */ - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); - for (i = 0; i < read_buf_size / sizeof(*read_buf); i++) - correct_buf[i] = FILL_VALUES_TEST_FILL_VAL; + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + for (size_t j = 0; j < read_buf_size / sizeof(C_DATATYPE); j++) + correct_bufs[dset_idx][j] = FILL_VALUES_TEST_FILL_VAL; - VRFY((0 == memcmp(read_buf, correct_buf, read_buf_size)), "Data verification succeeded"); + VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], read_buf_size)), + "Data verification succeeded"); + } /* * Write to part of the first chunk in the dataset with @@ -7862,44 +8923,35 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id start[0] = (hsize_t)mpi_rank; start[1] = 0; - if (VERBOSE_MED) { - printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); - fflush(stdout); - } - - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); /* Fill data buffer */ - data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); + data_size = sel_dims[0] * sel_dims[1] * sizeof(C_DATATYPE); - data = (C_DATATYPE *)calloc(1, data_size); - VRFY((NULL != data), "calloc succeeded"); + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) { + C_DATATYPE *tmp_buf = calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); - for (i = 0; i < data_size / sizeof(*data); i++) - data[i] = (C_DATATYPE)GEN_DATA(i); + for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++) + tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx); + + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; + } - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0), - "Dataset write succeeded"); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, + test_mode); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, SOME_CHUNKS_WRITTEN); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); /* Verify correct data was written */ - dset_id = H5Dopen2(group_id, FILL_VALUES_TEST_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + open_datasets(group_id, FILL_VALUES_TEST_DATASET_NAME, num_dsets, test_mode, dset_ids); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); /* * Each MPI rank communicates their written piece of data @@ -7911,16 +8963,22 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id displs = calloc(1, (size_t)mpi_size * sizeof(*displs)); VRFY((NULL != displs), "calloc succeeded"); - for (i = 0; i < (size_t)mpi_size; i++) { + for (size_t i = 0; i < (size_t)mpi_size; i++) { recvcounts[i] = (int)(count[1] * block[1]); displs[i] = (int)(i * dataset_dims[1]); } - VRFY((MPI_SUCCESS == MPI_Allgatherv(data, recvcounts[mpi_rank], C_DATATYPE_MPI, correct_buf, recvcounts, - displs, C_DATATYPE_MPI, comm)), - "MPI_Allgatherv succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + mpi_code = MPI_Allgatherv(data_bufs[dset_idx], recvcounts[mpi_rank], C_DATATYPE_MPI, + correct_bufs[dset_idx], recvcounts, displs, C_DATATYPE_MPI, comm); + VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgatherv succeeded"); - VRFY((0 == memcmp(read_buf, correct_buf, read_buf_size)), "Data verification succeeded"); + VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], read_buf_size)), + "Data verification succeeded"); + } + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); /* * Write to whole dataset and ensure fill value isn't returned @@ -7939,60 +8997,62 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id start[0] = (hsize_t)mpi_rank * block[0]; start[1] = 0; - if (VERBOSE_MED) { - printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); - fflush(stdout); - } - - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0), - "Dataset write succeeded"); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, + test_mode); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); /* Verify correct data was written */ - dset_id = H5Dopen2(group_id, FILL_VALUES_TEST_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + open_datasets(group_id, FILL_VALUES_TEST_DATASET_NAME, num_dsets, test_mode, dset_ids); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); - for (i = 0; i < read_buf_size / sizeof(*read_buf); i++) - VRFY((read_buf[i] != FILL_VALUES_TEST_FILL_VAL), "Data verification succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + C_DATATYPE *tmp_buf = read_bufs[dset_idx]; + + for (size_t j = 0; j < read_buf_size / sizeof(C_DATATYPE); j++) + VRFY((tmp_buf[j] != FILL_VALUES_TEST_FILL_VAL), "Data verification succeeded"); + } - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } /******************************************************************** * Set the fill time to H5D_FILL_TIME_ALLOC and repeat the previous * ********************************************************************/ + filespace = H5Screate_simple(FILL_VALUES_TEST_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + VRFY((H5Pset_fill_time(plist_id, H5D_FILL_TIME_ALLOC) >= 0), "H5Pset_fill_time succeeded"); - dset_id = H5Dcreate2(group_id, FILL_VALUES_TEST_DATASET_NAME2, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, - plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + /* Create datasets depending on the current test mode */ + create_datasets(group_id, FILL_VALUES_TEST_DATASET_NAME2, HDF5_DATATYPE_NAME, filespace, plist_id, + test_mode, &num_dsets, dset_ids); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); /* Read entire dataset and verify that the fill value is returned */ - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); - for (i = 0; i < read_buf_size / sizeof(*read_buf); i++) - correct_buf[i] = FILL_VALUES_TEST_FILL_VAL; + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + for (size_t j = 0; j < read_buf_size / sizeof(C_DATATYPE); j++) + correct_bufs[dset_idx][j] = FILL_VALUES_TEST_FILL_VAL; - VRFY((0 == memcmp(read_buf, correct_buf, read_buf_size)), "Data verification succeeded"); + VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], read_buf_size)), + "Data verification succeeded"); + } /* * Write to part of the first chunk in the dataset with @@ -8010,40 +9070,30 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id start[0] = (hsize_t)mpi_rank; start[1] = 0; - if (VERBOSE_MED) { - printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); - fflush(stdout); - } - - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + C_DATATYPE *tmp_buf = data_bufs_nc[dset_idx]; - for (i = 0; i < data_size / sizeof(*data); i++) - data[i] = (C_DATATYPE)GEN_DATA(i); + for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++) + tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx); + } - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0), - "Dataset write succeeded"); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, + test_mode); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, SOME_CHUNKS_WRITTEN); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); /* Verify correct data was written */ - dset_id = H5Dopen2(group_id, FILL_VALUES_TEST_DATASET_NAME2, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + open_datasets(group_id, FILL_VALUES_TEST_DATASET_NAME2, num_dsets, test_mode, dset_ids); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); - for (i = 0; i < (size_t)mpi_size; i++) { + for (size_t i = 0; i < (size_t)mpi_size; i++) { recvcounts[i] = (int)(count[1] * block[1]); displs[i] = (int)(i * dataset_dims[1]); } @@ -8052,11 +9102,17 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id * Each MPI rank communicates their written piece of data * into each other rank's correctness-checking buffer */ - VRFY((MPI_SUCCESS == MPI_Allgatherv(data, recvcounts[mpi_rank], C_DATATYPE_MPI, correct_buf, recvcounts, - displs, C_DATATYPE_MPI, comm)), - "MPI_Allgatherv succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + mpi_code = MPI_Allgatherv(data_bufs[dset_idx], recvcounts[mpi_rank], C_DATATYPE_MPI, + correct_bufs[dset_idx], recvcounts, displs, C_DATATYPE_MPI, comm); + VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgatherv succeeded"); + + VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], read_buf_size)), + "Data verification succeeded"); + } - VRFY((0 == memcmp(read_buf, correct_buf, read_buf_size)), "Data verification succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); /* * Write to whole dataset and ensure fill value isn't returned @@ -8075,49 +9131,44 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id start[0] = (hsize_t)mpi_rank * block[0]; start[1] = 0; - if (VERBOSE_MED) { - printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); - fflush(stdout); - } - - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0), - "Dataset write succeeded"); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, + test_mode); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); /* Verify correct data was written */ - dset_id = H5Dopen2(group_id, FILL_VALUES_TEST_DATASET_NAME2, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); - - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); - - for (i = 0; i < read_buf_size / sizeof(*read_buf); i++) - VRFY((read_buf[i] != FILL_VALUES_TEST_FILL_VAL), "Data verification succeeded"); - - if (displs) - free(displs); - if (recvcounts) - free(recvcounts); - if (data) - free(data); - if (read_buf) - free(read_buf); - if (correct_buf) - free(correct_buf); + open_datasets(group_id, FILL_VALUES_TEST_DATASET_NAME2, num_dsets, test_mode, dset_ids); + + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + C_DATATYPE *tmp_buf = read_bufs[dset_idx]; + + for (size_t j = 0; j < read_buf_size / sizeof(C_DATATYPE); j++) + VRFY((tmp_buf[j] != FILL_VALUES_TEST_FILL_VAL), "Data verification succeeded"); + } + + free(displs); + free(recvcounts); + + for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) { + free(data_bufs_nc[dset_idx]); + free(read_bufs[dset_idx]); + free(correct_bufs[dset_idx]); + } + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -8131,11 +9182,12 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id */ static void test_fill_value_undefined(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id, - hid_t dxpl_id) + hid_t dxpl_id, test_mode_t test_mode) { H5D_alloc_time_t alloc_time; - C_DATATYPE *data = NULL; - C_DATATYPE *read_buf = NULL; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; hsize_t dataset_dims[FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS]; hsize_t chunk_dims[FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS]; hsize_t sel_dims[FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS]; @@ -8143,8 +9195,11 @@ test_fill_value_undefined(const char *parent_group, H5Z_filter_t filter_id, hid_ hsize_t stride[FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS]; hsize_t count[FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS]; hsize_t block[FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS]; - size_t i, data_size, read_buf_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + size_t data_size, read_buf_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; hid_t group_id = H5I_INVALID_HID; hid_t filespace = H5I_INVALID_HID; @@ -8182,37 +9237,93 @@ test_fill_value_undefined(const char *parent_group, H5Z_filter_t filter_id, hid_ /* Set an undefined fill value */ VRFY((H5Pset_fill_value(plist_id, HDF5_DATATYPE_NAME, NULL) >= 0), "Fill Value set"); - dset_id = H5Dcreate2(group_id, FILL_VALUE_UNDEFINED_TEST_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, - H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + /* Create datasets depending on the current test mode */ + create_datasets(group_id, FILL_VALUE_UNDEFINED_TEST_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, plist_id, + test_mode, &num_dsets, dset_ids); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + /* + * Since we aren't writing fill values to the chunks of the + * datasets we just created, close and re-open file to ensure + * that file size is updated so we don't read past the end of + * the file later if doing multi-dataset I/O. + */ + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + open_datasets(group_id, FILL_VALUE_UNDEFINED_TEST_DATASET_NAME, num_dsets, test_mode, dset_ids); + /* Allocate buffer for reading entire dataset */ - read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*read_buf); + read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE); - read_buf = calloc(1, read_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + read_bufs[dset_idx] = calloc(1, read_buf_size); + VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); + } /* * Read entire dataset - nothing to verify since there's no fill value. - * If not using early space allocation, the read should fail since storage - * isn't allocated yet and no fill value is defined. + * If not using early space allocation, the read should fail for filtered + * datasets since storage isn't allocated yet and no fill value is defined. + * For unfiltered datasets, the library will still be forcing early space + * allocation in parallel, so the read should succeed in that case. */ if (alloc_time == H5D_ALLOC_TIME_EARLY) { - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, + test_mode); } else { - H5E_BEGIN_TRY - { - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) < 0), - "Dataset read succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + herr_t expected = FAIL; + herr_t ret; + + if (test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED) { + hid_t dset_dcpl; + int nfilters; + + dset_dcpl = H5Dget_create_plist(dset_ids[dset_idx]); + VRFY((dset_dcpl >= 0), "H5Dget_create_plist"); + + nfilters = H5Pget_nfilters(dset_dcpl); + VRFY((nfilters >= 0), "H5Pget_nfilters"); + + if (nfilters == 0) + expected = SUCCEED; + + VRFY((H5Pclose(dset_dcpl) >= 0), "H5Pclose"); + } + + if (expected == SUCCEED) + ret = H5Dread(dset_ids[dset_idx], HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, + read_bufs[dset_idx]); + else { + H5E_BEGIN_TRY + { + ret = H5Dread(dset_ids[dset_idx], HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, + read_bufs[dset_idx]); + } + H5E_END_TRY + } + + VRFY((ret == expected), "Dataset write"); + + if (expected == SUCCEED) + verify_chunk_opt_status(1, dxpl_id); + else + verify_chunk_opt_status(0, dxpl_id); } - H5E_END_TRY } /* @@ -8229,43 +9340,37 @@ test_fill_value_undefined(const char *parent_group, H5Z_filter_t filter_id, hid_ start[0] = (hsize_t)mpi_rank; start[1] = 0; - if (VERBOSE_MED) { - printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); - fflush(stdout); - } - - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); /* Fill data buffer */ - data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); + data_size = sel_dims[0] * sel_dims[1] * sizeof(C_DATATYPE); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + C_DATATYPE *tmp_buf = (C_DATATYPE *)calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); - data = (C_DATATYPE *)calloc(1, data_size); - VRFY((NULL != data), "calloc succeeded"); + for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++) + tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx); - for (i = 0; i < data_size / sizeof(*data); i++) - data[i] = (C_DATATYPE)GEN_DATA(i); + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; + } - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0), - "Dataset write succeeded"); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, + test_mode); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, SOME_CHUNKS_WRITTEN); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + open_datasets(group_id, FILL_VALUE_UNDEFINED_TEST_DATASET_NAME, num_dsets, test_mode, dset_ids); - dset_id = H5Dopen2(group_id, FILL_VALUE_UNDEFINED_TEST_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); /* * Write to whole dataset and ensure data is correct @@ -8284,40 +9389,33 @@ test_fill_value_undefined(const char *parent_group, H5Z_filter_t filter_id, hid_ start[0] = (hsize_t)mpi_rank * block[0]; start[1] = 0; - if (VERBOSE_MED) { - printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); - fflush(stdout); - } - - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0), - "Dataset write succeeded"); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, + test_mode); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); /* Verify correct data was written */ - dset_id = H5Dopen2(group_id, FILL_VALUE_UNDEFINED_TEST_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + open_datasets(group_id, FILL_VALUE_UNDEFINED_TEST_DATASET_NAME, num_dsets, test_mode, dset_ids); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); - if (data) - free(data); - if (read_buf) - free(read_buf); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + free(data_bufs_nc[dset_idx]); + free(read_bufs[dset_idx]); + } + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -8331,11 +9429,12 @@ test_fill_value_undefined(const char *parent_group, H5Z_filter_t filter_id, hid_ */ static void test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id, - hid_t dxpl_id) + hid_t dxpl_id, test_mode_t test_mode) { - C_DATATYPE *data = NULL; - C_DATATYPE *read_buf = NULL; - C_DATATYPE *fill_buf = NULL; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; + C_DATATYPE *fill_buf = NULL; C_DATATYPE fill_value; hsize_t dataset_dims[FILL_TIME_NEVER_TEST_DATASET_DIMS]; hsize_t chunk_dims[FILL_TIME_NEVER_TEST_DATASET_DIMS]; @@ -8344,12 +9443,16 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap hsize_t stride[FILL_TIME_NEVER_TEST_DATASET_DIMS]; hsize_t count[FILL_TIME_NEVER_TEST_DATASET_DIMS]; hsize_t block[FILL_TIME_NEVER_TEST_DATASET_DIMS]; - size_t i, data_size, read_buf_size; - hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + size_t data_size, read_buf_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; hid_t group_id = H5I_INVALID_HID; hid_t filespace = H5I_INVALID_HID; int *recvcounts = NULL; int *displs = NULL; + int mpi_code; if (MAINPROCESS) puts("Testing fill time H5D_FILL_TIME_NEVER"); @@ -8405,29 +9508,49 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap /* Set fill time of 'never' */ VRFY((H5Pset_fill_time(plist_id, H5D_FILL_TIME_NEVER) >= 0), "H5Pset_fill_time succeeded"); - dset_id = H5Dcreate2(group_id, FILL_TIME_NEVER_TEST_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, - H5P_DEFAULT, plist_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset creation succeeded"); + /* Create datasets depending on the current test mode */ + create_datasets(group_id, FILL_TIME_NEVER_TEST_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, plist_id, + test_mode, &num_dsets, dset_ids); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED); VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + /* + * Since we aren't writing fill values to the chunks of the + * datasets we just created, close and re-open file to ensure + * that file size is updated so we don't read past the end of + * the file later if doing multi-dataset I/O. + */ + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + open_datasets(group_id, FILL_TIME_NEVER_TEST_DATASET_NAME, num_dsets, test_mode, dset_ids); + /* Allocate buffer for reading entire dataset */ - read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*read_buf); + read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE); - read_buf = calloc(1, read_buf_size); - VRFY((NULL != read_buf), "calloc succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + read_bufs[dset_idx] = calloc(1, read_buf_size); + VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); + } fill_buf = calloc(1, read_buf_size); VRFY((NULL != fill_buf), "calloc succeeded"); /* Read entire dataset and verify that the fill value isn't returned */ - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); - for (i = 0; i < read_buf_size / sizeof(*read_buf); i++) + for (size_t i = 0; i < read_buf_size / sizeof(C_DATATYPE); i++) fill_buf[i] = FILL_TIME_NEVER_TEST_FILL_VAL; /* @@ -8435,7 +9558,8 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap * values to all be the fill value, so this should be * a safe comparison in theory. */ - VRFY((0 != memcmp(read_buf, fill_buf, read_buf_size)), "Data verification succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((0 != memcmp(read_bufs[dset_idx], fill_buf, read_buf_size)), "Data verification succeeded"); /* * Write to part of the first chunk in the dataset with @@ -8453,44 +9577,35 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap start[0] = (hsize_t)mpi_rank; start[1] = 0; - if (VERBOSE_MED) { - printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); - fflush(stdout); - } - - /* Select hyperslab in the file */ - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "File dataspace retrieval succeeded"); - - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); /* Fill data buffer */ - data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); + data_size = sel_dims[0] * sel_dims[1] * sizeof(C_DATATYPE); - data = (C_DATATYPE *)calloc(1, data_size); - VRFY((NULL != data), "calloc succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + C_DATATYPE *tmp_buf = calloc(1, data_size); + VRFY((NULL != tmp_buf), "calloc succeeded"); - for (i = 0; i < data_size / sizeof(*data); i++) - data[i] = (C_DATATYPE)GEN_DATA(i); + for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++) + tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx); + + data_bufs[dset_idx] = tmp_buf; + data_bufs_nc[dset_idx] = tmp_buf; + } - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0), - "Dataset write succeeded"); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, + test_mode); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, SOME_CHUNKS_WRITTEN); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); /* Verify correct data was written */ - dset_id = H5Dopen2(group_id, FILL_TIME_NEVER_TEST_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); + open_datasets(group_id, FILL_TIME_NEVER_TEST_DATASET_NAME, num_dsets, test_mode, dset_ids); - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); /* * Each MPI rank communicates their written piece of data @@ -8502,21 +9617,26 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap displs = calloc(1, (size_t)mpi_size * sizeof(*displs)); VRFY((NULL != displs), "calloc succeeded"); - for (i = 0; i < (size_t)mpi_size; i++) { + for (size_t i = 0; i < (size_t)mpi_size; i++) { recvcounts[i] = (int)(count[1] * block[1]); displs[i] = (int)(i * dataset_dims[1]); } - VRFY((MPI_SUCCESS == MPI_Allgatherv(data, recvcounts[mpi_rank], C_DATATYPE_MPI, fill_buf, recvcounts, - displs, C_DATATYPE_MPI, comm)), - "MPI_Allgatherv succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + mpi_code = MPI_Allgatherv(data_bufs[dset_idx], recvcounts[mpi_rank], C_DATATYPE_MPI, fill_buf, + recvcounts, displs, C_DATATYPE_MPI, comm); + VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgatherv succeeded"); - /* - * It should be very unlikely for the dataset's random - * values to all be the fill value, so this should be - * a safe comparison in theory. - */ - VRFY((0 != memcmp(read_buf, fill_buf, read_buf_size)), "Data verification succeeded"); + /* + * It should be very unlikely for the dataset's random + * values to all be the fill value, so this should be + * a safe comparison in theory. + */ + VRFY((0 != memcmp(read_bufs[dset_idx], fill_buf, read_buf_size)), "Data verification succeeded"); + } + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); /* * Write to whole dataset and ensure fill value isn't returned @@ -8535,49 +9655,45 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap start[0] = (hsize_t)mpi_rank * block[0]; start[1] = 0; - if (VERBOSE_MED) { - printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE - ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE - ", %" PRIuHSIZE " ]\n", - mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); - fflush(stdout); - } - - VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), - "Hyperslab selection succeeded"); + select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0), - "Dataset write succeeded"); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, + test_mode); /* Verify space allocation status */ - verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); /* Verify correct data was written */ - dset_id = H5Dopen2(group_id, FILL_TIME_NEVER_TEST_DATASET_NAME, H5P_DEFAULT); - VRFY((dset_id >= 0), "Dataset open succeeded"); - - VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), - "Dataset read succeeded"); - - for (i = 0; i < read_buf_size / sizeof(*read_buf); i++) - VRFY((read_buf[i] != FILL_TIME_NEVER_TEST_FILL_VAL), "Data verification succeeded"); - - if (displs) - free(displs); - if (recvcounts) - free(recvcounts); - if (data) - free(data); - if (read_buf) - free(read_buf); - if (fill_buf) - free(fill_buf); + open_datasets(group_id, FILL_TIME_NEVER_TEST_DATASET_NAME, num_dsets, test_mode, dset_ids); + + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + C_DATATYPE *tmp_buf = read_bufs[dset_idx]; + + for (size_t j = 0; j < read_buf_size / sizeof(C_DATATYPE); j++) + VRFY((tmp_buf[j] != FILL_TIME_NEVER_TEST_FILL_VAL), "Data verification succeeded"); + } + + free(displs); + free(recvcounts); + + free(fill_buf); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + free(data_bufs_nc[dset_idx]); + free(read_bufs[dset_idx]); + } + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded"); + } VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); - VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); @@ -8588,28 +9704,44 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap int main(int argc, char **argv) { - size_t cur_filter_idx = 0; - size_t num_filters = 0; - hid_t file_id = H5I_INVALID_HID; - hid_t fcpl_id = H5I_INVALID_HID; - hid_t group_id = H5I_INVALID_HID; - hid_t fapl_id = H5I_INVALID_HID; - hid_t dxpl_id = H5I_INVALID_HID; - hid_t dcpl_id = H5I_INVALID_HID; - int mpi_code; + unsigned seed; + size_t cur_filter_idx = 0; + size_t num_filters = 0; + hid_t file_id = H5I_INVALID_HID; + hid_t fcpl_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t dxpl_id = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; + int mpi_code; /* Initialize MPI */ - MPI_Init(&argc, &argv); - MPI_Comm_size(comm, &mpi_size); - MPI_Comm_rank(comm, &mpi_rank); + if (MPI_SUCCESS != (mpi_code = MPI_Init(&argc, &argv))) { + printf("Failed to initialize MPI: MPI error code %d\n", mpi_code); + fflush(stdout); + MPI_Abort(MPI_COMM_WORLD, -1); + } + + if (MPI_SUCCESS != (mpi_code = MPI_Comm_size(comm, &mpi_size))) { + printf("Failed to retrieve MPI communicator size: MPI error code %d\n", mpi_code); + fflush(stdout); + MPI_Abort(MPI_COMM_WORLD, -1); + } + + if (MPI_SUCCESS != (mpi_code = MPI_Comm_rank(comm, &mpi_rank))) { + printf("Failed to retrieve MPI communicator rank: MPI error code %d\n", mpi_code); + fflush(stdout); + MPI_Abort(MPI_COMM_WORLD, -1); + } if (mpi_size <= 0) { if (MAINPROCESS) { printf("The Parallel Filters tests require at least 1 rank.\n"); printf("Quitting...\n"); + fflush(stdout); } - MPI_Abort(MPI_COMM_WORLD, 1); + MPI_Abort(MPI_COMM_WORLD, -1); } if (H5dont_atexit() < 0) { @@ -8631,6 +9763,30 @@ main(int argc, char **argv) TestAlarmOn(); + /* + * Obtain and broadcast seed value since ranks + * aren't guaranteed to arrive here at exactly + * the same time and could end up out of sync + * with each other in regards to random number + * generation + */ + if (MAINPROCESS) + seed = (unsigned)time(NULL); + + if (mpi_size > 1) { + if (MPI_SUCCESS != (mpi_code = MPI_Bcast(&seed, 1, MPI_UNSIGNED, 0, comm))) { + if (MAINPROCESS) + printf("MPI_Bcast failed with error code %d\n", mpi_code); + fflush(stdout); + MPI_Abort(MPI_COMM_WORLD, -1); + } + } + + srand(seed); + + if (MAINPROCESS) + printf("Using seed: %u\n\n", seed); + num_filters = ARRAY_SIZE(filterIDs); /* Set up file access property list with parallel I/O access, @@ -8642,7 +9798,6 @@ main(int argc, char **argv) VRFY((H5Pset_fapl_mpio(fapl_id, comm, info) >= 0), "Set FAPL MPIO succeeded"); VRFY((H5Pset_all_coll_metadata_ops(fapl_id, true) >= 0), "H5Pset_all_coll_metadata_ops succeeded"); VRFY((H5Pset_coll_metadata_write(fapl_id, true) >= 0), "H5Pset_coll_metadata_write succeeded"); - VRFY((H5Pset_libver_bounds(fapl_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), "Set libver bounds succeeded"); @@ -8676,108 +9831,160 @@ main(int argc, char **argv) /* Run tests with all available filters */ for (cur_filter_idx = 0; cur_filter_idx < num_filters; cur_filter_idx++) { - H5FD_mpio_chunk_opt_t chunk_opt; - H5Z_filter_t cur_filter = filterIDs[cur_filter_idx]; - - /* Run tests with both linked-chunk and multi-chunk I/O */ - for (chunk_opt = H5FD_MPIO_CHUNK_ONE_IO; chunk_opt <= H5FD_MPIO_CHUNK_MULTI_IO; chunk_opt++) { - H5D_alloc_time_t space_alloc_time; - - /* Run tests with all available space allocation times */ - for (space_alloc_time = H5D_ALLOC_TIME_EARLY; space_alloc_time <= H5D_ALLOC_TIME_INCR; - space_alloc_time++) { - const char *alloc_time; - unsigned filter_config; - htri_t filter_avail; - size_t i; - char group_name[512]; - - switch (space_alloc_time) { - case H5D_ALLOC_TIME_EARLY: - alloc_time = "Early"; - break; - case H5D_ALLOC_TIME_LATE: - alloc_time = "Late"; - break; - case H5D_ALLOC_TIME_INCR: - alloc_time = "Incremental"; - break; - case H5D_ALLOC_TIME_DEFAULT: - case H5D_ALLOC_TIME_ERROR: - default: - alloc_time = "Unknown"; - } - - if (MAINPROCESS) - printf("== Running tests with filter '%s' using '%s' and '%s' allocation time ==\n\n", - filterNames[cur_filter_idx], - H5FD_MPIO_CHUNK_ONE_IO == chunk_opt ? "Linked-Chunk I/O" : "Multi-Chunk I/O", - alloc_time); - - /* Make sure current filter is available before testing with it */ - filter_avail = H5Zfilter_avail(cur_filter); - VRFY((filter_avail >= 0), "H5Zfilter_avail succeeded"); - - if (!filter_avail) { - if (MAINPROCESS) - printf(" ** SKIPPED tests with filter '%s' - filter unavailable **\n\n", - filterNames[cur_filter_idx]); - continue; - } - - /* Get the current filter's info */ - VRFY((H5Zget_filter_info(cur_filter, &filter_config) >= 0), "H5Zget_filter_info succeeded"); - - /* Determine if filter is encode-enabled */ - if (0 == (filter_config & H5Z_FILTER_CONFIG_ENCODE_ENABLED)) { - if (MAINPROCESS) - printf(" ** SKIPPED tests with filter '%s' - filter not encode-enabled **\n\n", - filterNames[cur_filter_idx]); - continue; - } - - /* Set space allocation time */ - VRFY((H5Pset_alloc_time(dcpl_id, space_alloc_time) >= 0), "H5Pset_alloc_time succeeded"); - - /* Set chunk I/O optimization method */ - VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, chunk_opt) >= 0), - "H5Pset_dxpl_mpio_chunk_opt succeeded"); - - /* Create a group to hold all the datasets for this combination - * of filter and chunk optimization mode. Then, close the file - * again since some tests may need to open the file in a special - * way, like on rank 0 only */ - file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); - VRFY((file_id >= 0), "H5Fopen succeeded"); + H5D_selection_io_mode_t sel_io_mode; + + /* Run tests with different selection I/O modes */ + for (sel_io_mode = H5D_SELECTION_IO_MODE_DEFAULT; sel_io_mode <= H5D_SELECTION_IO_MODE_ON; + sel_io_mode++) { + H5FD_mpio_chunk_opt_t chunk_opt; + + /* Run tests with both linked-chunk and multi-chunk I/O */ + for (chunk_opt = H5FD_MPIO_CHUNK_ONE_IO; chunk_opt <= H5FD_MPIO_CHUNK_MULTI_IO; chunk_opt++) { + H5D_alloc_time_t space_alloc_time; + + /* Run tests with all available space allocation times */ + for (space_alloc_time = H5D_ALLOC_TIME_EARLY; space_alloc_time <= H5D_ALLOC_TIME_INCR; + space_alloc_time++) { + test_mode_t test_mode; + + /* Run with each of the test modes (single dataset, multiple datasets, etc.) */ + for (test_mode = USE_SINGLE_DATASET; test_mode < TEST_MODE_SENTINEL; test_mode++) { + H5Z_filter_t cur_filter = filterIDs[cur_filter_idx]; + const char *sel_io_str; + const char *alloc_time; + const char *mode; + unsigned filter_config; + htri_t filter_avail; + char group_name[512]; + + switch (sel_io_mode) { + case H5D_SELECTION_IO_MODE_DEFAULT: + sel_io_str = "default"; + break; + case H5D_SELECTION_IO_MODE_OFF: + sel_io_str = "off"; + break; + case H5D_SELECTION_IO_MODE_ON: + sel_io_str = "on"; + break; + default: + sel_io_str = "unknown"; + } + + switch (space_alloc_time) { + case H5D_ALLOC_TIME_EARLY: + alloc_time = "Early"; + break; + case H5D_ALLOC_TIME_LATE: + alloc_time = "Late"; + break; + case H5D_ALLOC_TIME_INCR: + alloc_time = "Incremental"; + break; + case H5D_ALLOC_TIME_DEFAULT: + case H5D_ALLOC_TIME_ERROR: + default: + alloc_time = "Unknown"; + } + + switch (test_mode) { + case USE_SINGLE_DATASET: + mode = "single"; + break; + case USE_MULTIPLE_DATASETS: + mode = "multi"; + break; + case USE_MULTIPLE_DATASETS_MIXED_FILTERED: + mode = "multi-mixed-filtered"; + break; + case TEST_MODE_SENTINEL: + default: + mode = "unknown"; + } - snprintf(group_name, sizeof(group_name), "%s_%s_%s", filterNames[cur_filter_idx], - H5FD_MPIO_CHUNK_ONE_IO == chunk_opt ? "linked-chunk-io" : "multi-chunk-io", - alloc_time); - - group_id = H5Gcreate2(file_id, group_name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((group_id >= 0), "H5Gcreate2 succeeded"); - - VRFY((H5Gclose(group_id) >= 0), "H5Gclose failed"); - group_id = H5I_INVALID_HID; - - VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded"); - file_id = H5I_INVALID_HID; - - for (i = 0; i < ARRAY_SIZE(tests); i++) { - test_func func = tests[i]; + if (MAINPROCESS) + printf("== Running tests in mode '%s' with filter '%s' using selection I/O mode " + "'%s', '%s' and '%s' allocation time ==\n\n", + test_mode_to_string(test_mode), filterNames[cur_filter_idx], sel_io_str, + H5FD_MPIO_CHUNK_ONE_IO == chunk_opt ? "Linked-Chunk I/O" + : "Multi-Chunk I/O", + alloc_time); + + /* Make sure current filter is available before testing with it */ + filter_avail = H5Zfilter_avail(cur_filter); + VRFY((filter_avail >= 0), "H5Zfilter_avail succeeded"); + + if (!filter_avail) { + if (MAINPROCESS) + printf(" ** SKIPPED tests with filter '%s' - filter unavailable **\n\n", + filterNames[cur_filter_idx]); + continue; + } + + /* Get the current filter's info */ + VRFY((H5Zget_filter_info(cur_filter, &filter_config) >= 0), + "H5Zget_filter_info succeeded"); + + /* Determine if filter is encode-enabled */ + if (0 == (filter_config & H5Z_FILTER_CONFIG_ENCODE_ENABLED)) { + if (MAINPROCESS) + printf( + " ** SKIPPED tests with filter '%s' - filter not encode-enabled **\n\n", + filterNames[cur_filter_idx]); + continue; + } + + /* Set space allocation time */ + VRFY((H5Pset_alloc_time(dcpl_id, space_alloc_time) >= 0), + "H5Pset_alloc_time succeeded"); + + /* Set selection I/O mode */ + VRFY((H5Pset_selection_io(dxpl_id, sel_io_mode) >= 0), + "H5Pset_selection_io succeeded"); + + /* Set chunk I/O optimization method */ + VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, chunk_opt) >= 0), + "H5Pset_dxpl_mpio_chunk_opt succeeded"); + + /* Create a group to hold all the datasets for this combination + * of filter and chunk optimization mode. Then, close the file + * again since some tests may need to open the file in a special + * way, like on rank 0 only */ + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "H5Fopen succeeded"); + + snprintf(group_name, sizeof(group_name), "%s_sel-io-%s_%s_%s_%s", + filterNames[cur_filter_idx], sel_io_str, + H5FD_MPIO_CHUNK_ONE_IO == chunk_opt ? "linked-chunk-io" : "multi-chunk-io", + alloc_time, mode); + + group_id = H5Gcreate2(file_id, group_name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gcreate2 succeeded"); + + VRFY((H5Gclose(group_id) >= 0), "H5Gclose failed"); + group_id = H5I_INVALID_HID; + + VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded"); + file_id = H5I_INVALID_HID; + + /* Run all tests */ + for (size_t i = 0; i < ARRAY_SIZE(tests); i++) { + test_func func = tests[i]; + + if (MPI_SUCCESS == (mpi_code = MPI_Barrier(comm))) { + func(group_name, cur_filter, fapl_id, dcpl_id, dxpl_id, test_mode); + } + else { + if (MAINPROCESS) + MESG("MPI_Barrier failed"); + nerrors++; + } + } - if (MPI_SUCCESS == (mpi_code = MPI_Barrier(comm))) { - func(group_name, cur_filter, fapl_id, dcpl_id, dxpl_id); - } - else { if (MAINPROCESS) - MESG("MPI_Barrier failed"); - nerrors++; + puts(""); } } - - if (MAINPROCESS) - puts(""); } } } diff --git a/testpar/t_filters_parallel.h b/testpar/t_filters_parallel.h index e110d0e0b96..c0b1db878f9 100644 --- a/testpar/t_filters_parallel.h +++ b/testpar/t_filters_parallel.h @@ -58,6 +58,11 @@ size_t cd_nelmts = FILTER_NUM_CDVALUES; #define DIM0_SCALE_FACTOR 4 #define DIM1_SCALE_FACTOR 2 +/* The maximum number of datasets to work on simultaneously + * when using H5Dwrite_multi/H5Dread_multi + */ +#define MAX_NUM_DSETS_MULTI 5 + /* Struct type for the compound datatype filtered dataset tests */ typedef struct { short field1; diff --git a/testpar/t_shapesame.c b/testpar/t_shapesame.c index 56e2396bee7..98e307772a9 100644 --- a/testpar/t_shapesame.c +++ b/testpar/t_shapesame.c @@ -2452,14 +2452,14 @@ do { good_data = false; } - /* zero out buffer for re-use */ + /* zero out buffer for reuse */ *val_ptr = 0; } else if (*val_ptr != 0) { good_data = false; - /* zero out buffer for re-use */ + /* zero out buffer for reuse */ *val_ptr = 0; } diff --git a/testpar/t_span_tree.c b/testpar/t_span_tree.c index 9dd56367ab7..e4ff25836d7 100644 --- a/testpar/t_span_tree.c +++ b/testpar/t_span_tree.c @@ -1354,14 +1354,14 @@ lower_dim_size_comp_test__verify_data(uint32_t *buf_ptr, good_data = false; } - /* zero out buffer for re-use */ + /* zero out buffer for reuse */ *val_ptr = 0; } else if (*val_ptr != 0) { good_data = false; - /* zero out buffer for re-use */ + /* zero out buffer for reuse */ *val_ptr = 0; } diff --git a/tools/src/h5repack/h5repack_copy.c b/tools/src/h5repack/h5repack_copy.c index 942e266b1a1..392191330d8 100644 --- a/tools/src/h5repack/h5repack_copy.c +++ b/tools/src/h5repack/h5repack_copy.c @@ -416,7 +416,7 @@ copy_objects(const char *fnamein, const char *fnameout, pack_opt_t *options) * * hslab_nbytes_p : [OUT] total byte of the hyperslab * * Update: - * The hyperslab calucation would be depend on if the dataset is chunked + * The hyperslab calculation would depend on if the dataset is chunked * or not. * * There care 3 conditions to cover: diff --git a/tools/test/h5dump/h5dumpgentest.c b/tools/test/h5dump/h5dumpgentest.c index ddbd3af3c23..aed3eda04e2 100644 --- a/tools/test/h5dump/h5dumpgentest.c +++ b/tools/test/h5dump/h5dumpgentest.c @@ -11262,7 +11262,7 @@ gent_err_attr_dspace(void) hid_t fcpl = H5I_INVALID_HID; /* File access property list */ hid_t sid = H5I_INVALID_HID; /* Dataspace identifier */ hid_t aid = H5I_INVALID_HID; /* Attribute identifier */ - hsize_t dims = 2; /* Dimensino size */ + hsize_t dims = 2; /* Dimension size */ int wdata[2] = {7, 42}; /* The buffer to write */ int fd = -1; /* The file descriptor */ char val = 6; /* An invalid version */ diff --git a/tools/test/h5repack/h5repackgentest.c b/tools/test/h5repack/h5repackgentest.c index 688ee699980..8fbfab5e420 100644 --- a/tools/test/h5repack/h5repackgentest.c +++ b/tools/test/h5repack/h5repackgentest.c @@ -16,7 +16,7 @@ * + h5repack_.h5 * + h5repack__ex.h5 * + h5repack__ex-.dat - * ...where NAME idenfities the type, and N is a positive decimal number; + * ...where NAME identifies the type, and N is a positive decimal number; * multiple external files (*.dat) are allowed per file, but they must * follow the pattern and be in contiguous numerical sequence starting at 0. * diff --git a/tools/test/h5stat/CMakeTests.cmake b/tools/test/h5stat/CMakeTests.cmake index 31b3ae34051..9035eafd29b 100644 --- a/tools/test/h5stat/CMakeTests.cmake +++ b/tools/test/h5stat/CMakeTests.cmake @@ -160,7 +160,7 @@ ADD_H5_TEST (h5stat_notexist 1 notexist.h5) ADD_H5_TEST (h5stat_nofile 1 '') -# Test file with groups, compressed datasets, user-applied fileters, etc. +# Test file with groups, compressed datasets, user-applied filters, etc. # h5stat_filters.h5 is a copy of ../../testfiles/tfilters.h5 as of release 1.8.0-alpha4 ADD_H5_TEST (h5stat_filters 0 h5stat_filters.h5) ADD_H5_TEST (h5stat_filters-file 0 -f h5stat_filters.h5) diff --git a/tools/test/h5stat/testh5stat.sh.in b/tools/test/h5stat/testh5stat.sh.in index 1223c8fc5d4..7ce0ad495e3 100644 --- a/tools/test/h5stat/testh5stat.sh.in +++ b/tools/test/h5stat/testh5stat.sh.in @@ -256,7 +256,7 @@ TOOLTEST h5stat_help2.ddl --help TOOLTEST h5stat_notexist.ddl notexist.h5 TOOLTEST h5stat_nofile.ddl '' -# Test file with groups, compressed datasets, user-applied fileters, etc. +# Test file with groups, compressed datasets, user-applied filters, etc. # h5stat_filters.h5 is a copy of ../../testfiles/tfilters.h5 as of release 1.8.0-alpha4 TOOLTEST h5stat_filters.ddl h5stat_filters.h5 TOOLTEST h5stat_filters-file.ddl -f h5stat_filters.h5 diff --git a/utils/mirror_vfd/mirror_writer.c b/utils/mirror_vfd/mirror_writer.c index 28f7e1f251d..a5a1d27f90b 100644 --- a/utils/mirror_vfd/mirror_writer.c +++ b/utils/mirror_vfd/mirror_writer.c @@ -57,7 +57,7 @@ * guard against commands from the wrong entity. * * xmit_count (uint32_t) - * Record of trasmissions received from the Driver. While the transmission + * Record of transmissions received from the Driver. While the transmission * protocol should be trustworthy, this serves as an additional guard. * Starts a 0 and should be incremented for each one-way transmission. * @@ -728,7 +728,7 @@ do_write(struct mirror_session *session, const unsigned char *xmit_buf) addr = (haddr_t)xmit_write.offset; type = (H5FD_mem_t)xmit_write.type; - /* Allocate the buffer once -- re-use between loops. + /* Allocate the buffer once -- reuse between loops. */ buf = (char *)malloc(sizeof(char) * H5FD_MIRROR_DATA_BUFFER_MAX); if (NULL == buf) {