diff --git a/Makefile.am b/Makefile.am index ce5a01ff81..fd5330ac7d 100644 --- a/Makefile.am +++ b/Makefile.am @@ -13,7 +13,7 @@ else JAVA_DIR= endif -SUBDIRS = hdf mfhdf $(JAVA_DIR) man +SUBDIRS = hdf mfhdf $(JAVA_DIR) # install libhdf4.settings in lib directory settingsdir = $(libdir) settings_DATA = libhdf4.settings diff --git a/README.md b/README.md index dc4a810b8e..cee7ae32b1 100644 --- a/README.md +++ b/README.md @@ -109,8 +109,6 @@ Source Distribution Layout mfhdf -- The netCDF(mfhdf) part of the HDF/mfhdf distribution and additional HDF utilities, such as hdp, hrepack, hdfimport, etc. - man -- An incomplete set of man page(s) for HDF. - release\_notes -- Installation instructions for UNIX and Windows. Descriptions of new features and bug fixes in this release. diff --git a/configure.ac b/configure.ac index a5f9544677..84eabc6e9d 100644 --- a/configure.ac +++ b/configure.ac @@ -1251,7 +1251,6 @@ AC_CONFIG_FILES([Makefile hdf/util/h4fc hdf/util/h4redeploy hdf/util/testutil.sh - man/Makefile mfhdf/fortran/ftest.f mfhdf/fortran/jackets.c mfhdf/fortran/netcdf.inc diff --git a/man/Makefile.am b/man/Makefile.am deleted file mode 100644 index a2c642fef8..0000000000 --- a/man/Makefile.am +++ /dev/null @@ -1,3 +0,0 @@ -include $(top_srcdir)/config/commence.am - -man1_MANS = hdf.1 diff --git a/man/gr_chunk.3 b/man/gr_chunk.3 deleted file mode 100644 index 26965ccf51..0000000000 --- a/man/gr_chunk.3 +++ /dev/null @@ -1,244 +0,0 @@ -.\" WARNING! THIS FILE WAS GENERATED AUTOMATICALLY BY c2man! -.\" DO NOT EDIT! CHANGES MADE TO THIS FILE WILL BE LOST! -.TH "mfhdf" 3 "16 October 1997" "c2man hproto.h" -.SH "NAME" -GRsetchunk, -GRgetchunkinfo, -GRetchunkcache \- GR Chunking Routines -.SH "SYNOPSIS" -.ft B -#include -.sp -extern intn GRsetchunk -.br -( -.br - int32 riid, -.br - HDF_CHUNK_DEF chunk_def, -.br - int32 flags -.br -); -.sp -extern intn GRgetchunkinfo -.br -( -.br - int32 riid, -.br - HDF_CHUNK_DEF *chunk_def, -.br - int32 *flags -.br -); -.sp -extern intn GRsetchunkcache -.br -( -.br - int32 riid, -.br - int32 maxcache, -.br - int32 flags -.br -); -.ft R -.SH "PARAMETERS" -.TP -.B "int32 riid" -IN: raseter access id. -.TP -.B "HDF_CHUNK_DEF chunk_def" -IN: chunk definition. -.TP -.BR "int32 flags" " (GRsetchunk)" -IN: flags. -.TP -.B "HDF_CHUNK_DEF *chunk_def" -IN/OUT: chunk definition. -.TP -.B "int32 *flags" -IN/OUT: flags. -.TP -.B "int32 maxcache" -IN: max number of chunks to cache. -.TP -.BR "int32 flags" " (GRsetchunkcache)" -IN: flags = 0, HDF_CACHEALL. -.SH "DESCRIPTION" -.SS "GRsetchunk" -This routine makes the GR a chunked GR according to the chunk -definition passed in. - -The dataset currently cannot be special already i.e. NBIT, -COMPRESSED, or EXTERNAL. This is an Error. - -The definition of the "HDF_CHUNK_DEF" union with relevant fields is: -.nf - -typedef union hdf_chunk_def_u -{ -int32 chunk_lengths[2]; Chunk lengths along each dimension. - { - int32 chunk_lengths[2]; Chunk lengths along each dimension. - int32 comp_type; Compression type - comp_info cinfo; Compression info struct - }comp; -} HDF_CHUNK_DEF -.fi - -The variable agruement 'flags' is a bit-or'd value which can currently be 'HDF_CHUNK' or 'HDF_CHUNK | HDF_COMP'. - -The simplest is the 'chunk_lengths' array specifying chunk -lengths for each dimension where the 'flags' argument set to -'HDF_CHUNK'; - -COMPRESSION is set by using the 'HDF_CHUNK_DEF' union to set the -appropriate compression information along with the required chunk lengths -for each dimension. The compression information is the same as -that set in 'GRsetcompress()'. The bit-or'd 'flags' argument' is set to -'HDF_CHUNK | HDF_COMP'. - -See the example in pseudo-C below for further usage. - -The maximum number of Chunks in an HDF file is 65,535. - -The performance of the GRxxx interface with chunking is greatly -affected by the users access pattern over the image and by -the maximum number of chunks set in the chunk cache. The cache contains -the Least Recently Used(LRU cache replacement policy) chunks. See the -routine GRsetchunkcache() for further info on the chunk cache and how -to set the maximum number of chunks in the chunk cache. A default chunk -cache is always created. - -The following example shows the organization of chunks for a 2D array. - -e.g. 4x4 array with 2x2 chunks. The array shows the layout of -chunks in the chunk array. - -.nf - - 4 --------------------- - | | | -Y | (0,1) | (1,1) | -^ | | | -| 2 --------------------- -| | | | -| | (0,0) | (1,0) | -| | | | -| --------------------- -| 0 2 4 ----------------> X - - --Without compression--: -{ - HDF_CHUNK_DEF chunk_def; - ....... - -- Set chunk lengths -- - chunk_def.chunk_lengths[0]= 2; - chunk_def.chunk_lengths[1]= 2; - - -- Set Chunking -- - GRsetchunk(riid, chunk_def, HDF_CHUNK); - ...... -} - - --With compression--: -{ - HDF_CHUNK_DEF chunk_def; - ....... - -- Set chunk lengths first -- - chunk_def.chunk_lengths[0]= 2; - chunk_def.chunk_lengths[1]= 2; - - -- Set compression -- - chunk_def.comp.cinfo.deflate.level = 9; - chunk_def.comp.comp_type = COMP_CODE_DEFLATE; - -- Set Chunking with Compression -- - GRsetchunk(riid, chunk_def, HDF_CHUNK | HDF_COMP); - ...... -}. -.fi - -.SS "GRgetchunkinfo" -This routine gets any special information on the GR. If its chunked, -chunked and compressed or just a regular GR. Currently it will only -fill the array of chunk lengths for each dimension as specified in -the "HDF_CHUNK_DEF" union. It does not tell you the type of compression -or the compression parameters used. You can pass in a NULL for "chunk_def" -if don't want the chunk lengths for each dimension. -Additionally if successful it will return a bit-or'd value in "flags" -indicating if the GR is: -.nf - - Chunked -> flags = HDF_CHUNK - Chunked and compressed -> flags = HDF_CHUNK | HDF_COMP - Non-chunked -> flags = HDF_NONE - -e.g. 4x4 array - Pseudo-C -{ - int32 rcdims[3]; - HDF_CHUNK_DEF rchunk_def; - int32 cflags; - ... - rchunk_def.chunk_lengths = rcdims; - GRgetchunkinfo(sdsid, &rchunk_def, &cflags); - ... -}. -.fi - -.SS "GRsetchunkcache" -Set the maximum number of chunks to cache. - -The cache contains the Least Recently Used(LRU cache replacement policy) -chunks. This routine allows the setting of maximum number of chunks that -can be cached, "maxcache". - -The performance of the GRxxx interface with chunking is greatly -affected by the users access pattern over the dataset and by -the maximum number of chunks set in the chunk cache. The number chunks -that can be set in the cache is process memory limited. It is a good -idea to always set the maximum number of chunks in the cache as the -default heuristic does not take into account the memory available for -the application. - -By default when the GR is created as a chunked element the -maximum number of chunks in the cache "maxcache" is set to the number of -chunks along the last dimension. - -The values set here affects the current object's caching behaviour. - -If the chunk cache is full and "maxcache" is greater then the -current "maxcache" value, then the chunk cache is reset to the -new "maxcache" value, else the chunk cache remains at the -current "maxcache" value. - -If the chunk cache is not full, then the chunk cache is set to the -new "maxcache" value only if the new "maxcache" value is greater than the -current number of chunks in the cache. - -Use flags argument of "HDF_CACHEALL" if the whole object is to be cached -in memory, otherwise pass in zero(0). Currently you can only -pass in zero. - -See GRsetchunk() for a description of the organization of chunks in a GR . - -.SH "RETURNS" -.SS "GRsetchunk" -SUCCEED/FAIL. -.SS "GRgetchunkinfo" -SUCCEED/FAIL. -.SS "GRsetchunkcache" -Returns the 'maxcache' value for the chunk cache if successful -and FAIL otherwise. -.SH "NAME" -.SS "GRsetchunk" -GRsetchunk -- make GR a chunked GR. -.SS "GRgetchunkinfo" -GRgetchunkinfo -- get Info on GR. -.SS "GRsetchunkcache" -GRsetchunkcache -- maximum number of chunks to cache. - - diff --git a/man/hdf.1 b/man/hdf.1 deleted file mode 100644 index 8255e9dd97..0000000000 --- a/man/hdf.1 +++ /dev/null @@ -1,170 +0,0 @@ -.TH HDF 1 "March 2023" "THG HDF 4.2.17-1" -.SH NAME -hdf \- Hierarchical Data Format library -.SH SYNOPSIS -.ft B --lmfhdf -ldf -ljpeg -lz [ -lsz ] -.sp 0 -{HDFLIBDIR}/libmfhdf.a {HDFLIBDIR}/libdf.a {JPEGLIBDIR}/libjpeg.a -.sp 0 -{GZIPLIBDIR}/libz.a [{SZIPLIBDIR}/libsz.a] - -.SH DESCRIPTION -HDF is a multi-object file format that facilitates the transfer of various -types of scientific data between machines and operating systems. See -http://www.hdfgroup.org/release4/platforms.html for a list of platforms -currently supported. - -HDF allows self-definitions of data content and easy extensibility for -future enhancements or compatibility with other standard formats. HDF -includes Fortran and C calling interfaces, and utilities for manipulating, -viewing, and analyzing data in HDF files. The HDF library contains -interfaces for storing and retrieving compressed or uncompressed 8-bit -and 24-bit raster images with palettes, n-Dimensional scientific datasets -and binary tables. An interface is also included that allows arbitrary -grouping of other HDF objects. - -.SS "HDF Raster Images" -HDF supports the storing of both 8-bit and 24-bit raster images. Beside -storing information about the dimensions and palette of a raster image, HDF -supports raster image compression. In previous versions of HDF -(DF interfaces,) Run-length encoding and Imcomp compression were both -supported. The currently supported compression methods include simple -RLE, N-bit, Skipping huffman, Gzip, Szip, and JPEG. Although no longer -support Imcomp compression, the library can read images with Imcomp compression. - -.SS "HDF Scientific Data Sets" -Scientific Data Sets (SDSs) are useful for storing n-Dimensional gridded data. -The actual data in the dataset can be of any of the "standard" number types: -8, 16 and 32bit signed and unsigned integers and 32 and 64bit floating point -values. In addition, a certain amount of meta-data can be stored with an -SDS including: -.nf - - o The coordinate system to use when interpreting or displaying the data. - o Scales to be used for each dimension. - o Labels for each dimension and the dataset as a whole. - o Units for each dimension and the data. - o The valid max and min values for the data. - o Calibration information for the data. - o Fill or missing value information. - o Ability of having more than one file open at a time. - o A more general framework for meta-data within the SDS data-model - (allowing 'name = value' style meta-data.) - o Support for an "unlimited dimension" in the SDS data-model, making - it possible to append planes to an array along one dimension. -.fi -.SS "HDF Annotations" -Any object in an HDF file can have annotations associated with it. There are a -number of types of annotations: -.nf - - o Labels are assumed to be short strings giving the "name" of a - data object. - o Descriptions are longer text segments that are useful for giving - more indepth information about a data object - o File annotations are assumed to apply to all of the objects in a - single file. -.fi -.SS "HDF Vset Interfaces" -The Vset module provides -interfaces to two basic HDF building blocks. Vgroups are generic grouping -elements allowing a user to associate related objects within an HDF file. As -Vgroups can contain other Vgroups, it is possible to build a hierarchical file. -Vdatas are data structures made up of fields and records. Data is organized into "fields" within each -Vdata. Each field is identified by a unique "fieldname". The type of each -field may be any of the basic number types that HDF supports. Fields of -different types may exist within the same Vdata. - -By combining Vdatas in Vgroups it is possible to represent higher level data -constructs: mesh data, multi-variate datasets, sparse matrices, finite-element -data, spreadsheets, splines, non-Cartesian coordinate data, etc. - -.SS "HDF and netCDF" -Starting with HDF version 3.3, netCDF v.2.3.2 of Unidata is supported with the -SD multifile interface. SD and netCDF interfaces can read both netCDF files -and multi-dimensional arrays (SDS) stored in the HDF4 files transparently. -For more information, see Chapter 3, "Scientific Data Sets", of the HDF -User's Guide. - -To disable netCDF interfaces in the HDF library, configure the library using ---disable-netcdf flag and rebuild it. - -.SS "EXAMPLES" -All HDF routines require the header "hdf.h" to be included in the C -source file, unless if the SD routines are used, then the header "mfhdf.h" -should be included instead. - -Fortran programs should use "dffunc.inc" for all interfaces, "mffunc.inc" -for the SD interfaces, and "hdf.inc" for non-SD interfaces. - -To compile a program that makes HDF calls on most Unix platforms. - - (FORTRAN): -.na - {HDFLIBDIR}/bin/h4fc myprog.f - - (C): -.na - {HDFLIBDIR}/bin/h4cc myprog.c - -.fi - -.SH DOCUMENTATION - -The HDF web site is located at http://www.hdfgroup.org/. - -For the vast majority of users, the "HDF User's Guide" and -"HDF Reference Manual" should be sufficient. - -These documents can be viewed or downloaded at -.sp 0 -http://www.hdfgroup.org/products/hdf4/. - - -.SH VENDOR -The HDF Group -.sp 0 -1800 South Oak Street, Suite 203 -.sp 0 -Champaign, IL 61820 -.sp 0 -USA -.sp 0 -www.hdfgroup.org - -.SH VERSION -4.2.17-1 currently under development -.SH LICENSE & SOURCE AVAILABILITY -Copyright by The HDF Group. -.sp 0 -Copyright by the Board of Trustees of the University of Illinois. - -All rights reserved. - -This file is part of HDF. The full HDF copyright notice, including -terms governing use, modification, and redistribution, is contained in -the COPYING file, which can be found at the root of the source code -distribution tree, or in https://support.hdfgroup.org/ftp/HDF/releases/. -.sp 0 -If you do not have access to either file, you may request a copy from -help@hdfgroup.org. - -.SH CONTACT & HELP -The HDF Group -.sp 0 -Email: help@hdfgroup.org - -.SH FILES -.PD 0 -.TP 30 -.B /usr/local/lib/hdf/{libmfhdf.a,libdf.a,libjpeg.a,libz.a [,libsz.a]} -hdf libraries -.TP 30 -.B /usr/local/bin -Location of most hdf utilities -.TP 30 -.B /usr/local/include/hdf -Location of include file hdf.h, mfhdf.h, and others -.PD - diff --git a/man/hdfunpac.1 b/man/hdfunpac.1 deleted file mode 100644 index 61d4b5ca6f..0000000000 --- a/man/hdfunpac.1 +++ /dev/null @@ -1,26 +0,0 @@ -.TH HDFUNPAC 1 "July 1995" "NCSA HDF 4.0" -.SH NAME -hdfunpac \- Unpack an HDF file -.SH SYNOPSIS -.B hdfunpac -[ -.B \-d -.I datafile -] -.I hdffile -.SH DESCRIPTION -.B hdfunpac -unpacks an HDF file by exporting the scientific data elements (DFTAG_SD) -to external object elements. -The external file is -.B DataFile -by default. -.SH OPTIONS -.TP -.BI \-d " datafile" -Use -.I datafile -as the external file name. Default is -.B DataFile. -.SH SEE ALSO -.BR hdf (1) diff --git a/release_notes/RELEASE.txt b/release_notes/RELEASE.txt index 9300354a0c..345f5df572 100644 --- a/release_notes/RELEASE.txt +++ b/release_notes/RELEASE.txt @@ -38,6 +38,14 @@ New features and changes ======================== Configuration: ------------- + - Man pages were removed + + The few man pages in the HDF4 library are woefully incomplete and/or + outdated. We have no resources to properly update those and to add the + missing ones. Instead, we will try to create better man pages from Doxygen. + + (BMR - 2024/02/04) + - Incorporated HDF4 examples repository into HDF4 library. The HDF4Examples folder is equivalent to the repository hdf4-examples.