这篇教程C++ H5Pset_chunk函数代码示例写得很实用,希望能帮到您。
本文整理汇总了C++中H5Pset_chunk函数的典型用法代码示例。如果您正苦于以下问题:C++ H5Pset_chunk函数的具体用法?C++ H5Pset_chunk怎么用?C++ H5Pset_chunk使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。 在下文中一共展示了H5Pset_chunk函数的28个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。 示例1: create_chunked_dataset/* * This creates a dataset serially with 'nchunks' chunks, each of CHUNKSIZE * elements. The allocation time is set to H5D_ALLOC_TIME_EARLY. Another * routine will open this in parallel for extension test. */voidcreate_chunked_dataset(const char *filename, int nchunks, write_type write_pattern){ hid_t file_id, dataset; /* handles */ hid_t dataspace,memspace; hid_t cparms; hsize_t dims[1]; hsize_t maxdims[1] = {H5S_UNLIMITED}; hsize_t chunk_dims[1] ={CHUNKSIZE}; hsize_t count[1]; hsize_t stride[1]; hsize_t block[1]; hsize_t offset[1]; /* Selection offset within dataspace */ /* Variables used in reading data back */ char buffer[CHUNKSIZE]; int i; herr_t hrc; MPI_Offset filesize, /* actual file size */ est_filesize; /* estimated file size */ /* set up MPI parameters */ MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); /* Only MAINPROCESS should create the file. Others just wait. */ if (MAINPROCESS){ dims[0]=nchunks*CHUNKSIZE; /* Create the data space with unlimited dimensions. */ dataspace = H5Screate_simple (1, dims, maxdims); VRFY((dataspace >= 0), ""); memspace = H5Screate_simple(1, chunk_dims, NULL); VRFY((memspace >= 0), ""); /* Create a new file. If file exists its contents will be overwritten. */ file_id = H5Fcreate(h5_rmprefix(filename), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); VRFY((file_id >= 0), "H5Fcreate"); /* Modify dataset creation properties, i.e. enable chunking */ cparms = H5Pcreate (H5P_DATASET_CREATE); VRFY((cparms >= 0), ""); hrc = H5Pset_alloc_time(cparms, H5D_ALLOC_TIME_EARLY); VRFY((hrc >= 0), ""); hrc = H5Pset_chunk ( cparms, 1, chunk_dims); VRFY((hrc >= 0), ""); /* Create a new dataset within the file using cparms creation properties. */ dataset = H5Dcreate (file_id, DATASETNAME, H5T_NATIVE_UCHAR, dataspace, cparms); VRFY((dataset >= 0), ""); switch (write_pattern) { /* writes only the second to last chunk */ case sec_last: memset(buffer, 100, CHUNKSIZE); count[0] = 1; stride[0] = 1; block[0] = chunk_dims[0]; offset[0] = (nchunks-2)*chunk_dims[0]; hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block); VRFY((hrc >= 0), ""); /* Write sec_last chunk */ hrc = H5Dwrite(dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, buffer); VRFY((hrc >= 0), "H5Dwrite"); break; /* doesn't write anything */ case none: break; } /* Close resources */ hrc = H5Dclose (dataset); VRFY((hrc >= 0), ""); dataset = -1; hrc = H5Sclose (dataspace); VRFY((hrc >= 0), ""); hrc = H5Sclose (memspace); VRFY((hrc >= 0), "");//.........这里部分代码省略.........
开发者ID:einon,项目名称:affymetrix-power-tools,代码行数:101,
示例2: WARNINGint PHDF5fileClass::WritePHDF5dataset(string grpname, string datasetname, double ***data, int nx, int ny, int nz){ /* -------------------------- */ /* Local variables and arrays */ /* -------------------------- */ string dname; double *buffer; hid_t const h5type = H5T_NATIVE_DOUBLE; hid_t glob_dspace; hid_t locl_dspace; hid_t dataset_prop; hid_t dataset; hid_t dataspace; hid_t dataset_xfer; /* --------------------------------- */ /* Check that dimensions are correct */ /* --------------------------------- */ if (bparticles && grpname.c_str()=="Particles"){ cout << " WARNING(phdf5): Particle data is not going to be written as the 'bparticles' flag is currently turn to FALSE" << endl; return (2); } for (int i=0; i<ndim; i++){ if (dim[i]%chdim[i]!=0){ cout << " ERROR(phdf5): Grid size is not a multiple of the chunk size in the " << i << " dimension." << endl; cout << " Glob: " << dim[0] << " " << dim[1] << " " << dim[2] << endl; cout << " Locl: " << chdim[0] << " " << chdim[1] << " " << chdim[2] << endl; return 1; } } /* ----------------------- */ /* Copy raw data to buffer */ /* ----------------------- */ if (nx!=chdim[0] || ny!=chdim[1] || nz!=chdim[2]){ cout << " ERROR(phdf5): data size is not equal to HDF5 chunk size " << endl; return 1; } buffer = new double[nx*ny*nz]; int l = 0; for (int i = 0; i < nx; i++) for (int j = 0; j < ny; j++) for (int k = 0; k < nz; k++) buffer[l++] = data[i][j][k]; /* -------------------------------------------------------- */ /* 5- Set the stride, count and block values for each chunk */ /* And set the offset for each chunk */ /* -------------------------------------------------------- */ hsize_t *stride = new hsize_t[ndim]; hsize_t *count = new hsize_t[ndim]; hsize_t *block = new hsize_t[ndim]; hsize_t *offset = new hsize_t[ndim]; for (int i=0; i<ndim; i++){ stride[i] = 1; count[i] = 1; block[i] = chdim[i]; offset[i] = mpicoord[i]*chdim[i]; } /* ---------------------------------- */ /* 6- Create data spaces for our data */ /* ---------------------------------- */ glob_dspace = H5Screate_simple(ndim, dim, NULL); locl_dspace = H5Screate_simple(ndim, chdim, NULL); /* --------------------------------------- */ /* 7- Create the dataset for the HDF5 file */ /* --------------------------------------- */ dataset_prop = H5Pcreate(H5P_DATASET_CREATE); H5Pset_chunk(dataset_prop, ndim, chdim); dname = "/"+grpname+"/"+datasetname; dataset = H5Dcreate2(file_id, dname.c_str(), h5type, glob_dspace, H5P_DEFAULT, dataset_prop, H5P_DEFAULT); H5Pclose(dataset_prop); dataspace = H5Dget_space(dataset); H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block); /* --------------------------------- */ /* 8- Set the parallel transfer mode */ /* --------------------------------- */ dataset_xfer = H5Pcreate(H5P_DATASET_XFER); H5Pset_dxpl_mpio(dataset_xfer, H5FD_MPIO_COLLECTIVE); /* ---------------------------- *///.........这里部分代码省略.........
开发者ID:FusionPlasma,项目名称:iPic3D_VR,代码行数:101,
示例3: mainint main(int argc, char *argv[]){ hid_t fid = -1; hid_t access_plist = -1; hid_t create_plist = -1; hid_t cparm = -1; hid_t datatype = -1; hid_t dataspace = -1; hid_t dataset = -1; hid_t memspace = -1; hid_t groupDetector = -1; int rank = 1; hsize_t chunk[2] = {10,10}; hsize_t dims[2] = {1,1}; hsize_t elementSize[2] = {1,1}; hsize_t maxdims[2] = {H5S_UNLIMITED,H5S_UNLIMITED}; int ivalue[2]; int fillValue = 0; /* Open the source file and dataset */ /* All SWMR files need to use the latest file format */ access_plist = H5Pcreate(H5P_FILE_ACCESS); H5Pset_fclose_degree(access_plist, H5F_CLOSE_STRONG);#if H5_VERSION_GE(1,9,178) H5Pset_object_flush_cb(access_plist, cFlushCallback, NULL);#endif H5Pset_libver_bounds(access_plist, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST); create_plist = H5Pcreate(H5P_FILE_CREATE); fid = H5Fcreate("test_string_swmr.h5", H5F_ACC_TRUNC, create_plist, access_plist); /* Data */ rank = 2; dims[0] = 10; dims[1] = 10; dataspace = H5Screate_simple(rank, dims, dims); cparm = H5Pcreate(H5P_DATASET_CREATE); H5Pset_chunk(cparm, rank, chunk); datatype = H5Tcopy(H5T_NATIVE_INT8); H5Pset_fill_value(cparm, datatype, &fillValue); groupDetector = H5Gcreate(fid, "detector", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); access_plist = H5Pcreate(H5P_DATASET_ACCESS); H5Pset_chunk_cache(access_plist, 503, 100, 1.0); dataset = H5Dcreate2(groupDetector, "data1", datatype, dataspace, H5P_DEFAULT, cparm, access_plist); ivalue[0] = 1; ivalue[1] = 1; writeInt32Attribute(dataset, "NDArrayDimBinning", 2, ivalue); ivalue[0] = 0; ivalue[1] = 0; writeInt32Attribute(dataset, "NDArrayDimOffset", 2, ivalue); writeInt32Attribute(dataset, "NDArrayDimReverse", 2, ivalue); ivalue[0] = 2; writeInt32Attribute(dataset, "NDArrayNumDims", 1, ivalue); H5Gclose(groupDetector); dims[0] = 1; dims[1] = 1; chunk[0] = 1; rank = 1; /* Unique ID */ datatype = H5T_NATIVE_INT32; cparm = H5Pcreate(H5P_DATASET_CREATE); H5Pset_fill_value(cparm, datatype, &fillValue); H5Pset_chunk(cparm, rank, chunk); dataspace = H5Screate_simple(rank, dims, maxdims); dataset = H5Dcreate2(fid, "NDArrayUniqueId", datatype, dataspace, H5P_DEFAULT, cparm, H5P_DEFAULT); memspace = H5Screate_simple(rank, elementSize, NULL); writeStringAttribute(dataset, "NDAttrName", "NDArrayUniqueId"); writeStringAttribute(dataset, "NDAttrDescription", "The unique ID of the NDArray"); writeStringAttribute(dataset, "NDAttrSourceType", "NDAttrSourceDriver"); writeStringAttribute(dataset, "NDAttrSource", "Driver"); /* EPICS Timestemp */ datatype = H5T_NATIVE_DOUBLE; cparm = H5Pcreate(H5P_DATASET_CREATE); H5Pset_fill_value(cparm, datatype, &fillValue); H5Pset_chunk(cparm, rank, chunk); dataspace = H5Screate_simple(rank, dims, maxdims); dataset = H5Dcreate2(fid, "NDArrayTimeStamp", datatype, dataspace, H5P_DEFAULT, cparm, H5P_DEFAULT); memspace = H5Screate_simple(rank, elementSize, NULL); writeStringAttribute(dataset, "NDAttrName", "NDArrayTimeStamp"); writeStringAttribute(dataset, "NDAttrDescription", "The timestamp of the NDArray as float64"); writeStringAttribute(dataset, "NDAttrSourceType", "NDAttrSourceDriver"); writeStringAttribute(dataset, "NDAttrSource", "Driver"); /* EPICS TS sec */ datatype = H5T_NATIVE_UINT32; cparm = H5Pcreate(H5P_DATASET_CREATE); H5Pset_fill_value(cparm, datatype, &fillValue); H5Pset_chunk(cparm, rank, chunk); dataspace = H5Screate_simple(rank, dims, maxdims); dataset = H5Dcreate2(fid, "NDArrayEpicsTSSec", datatype, dataspace,//.........这里部分代码省略.........
开发者ID:areaDetector,项目名称:ADCore,代码行数:101,
示例4: _io_write_prim_h5mpivoid _io_write_prim_h5mpi(const char *fname, const char **pnames, const double *data)// -----------------------------------------------------------------------------// This function uses a collective MPI-IO procedure to write the contents of// 'data' to the HDF5 file named 'fname', which is assumed to have been created// already. The dataset with name 'dname', which is being written to, must not// exist already. Chunking is enabled as per the module-wide ChunkSize variable,// and is disabled by default. Recommended chunk size is local subdomain// size. This will result in optimized read/write on the same decomposition// layout, but poor performance for different access patterns, for example the// slabs used by cluster-FFT functions.//// WARNING!//// All processors must define the same chunk size, the behavior of this function// is not defined otherwise. This implies that chunking should be disabled when// running on a strange number of cores, and subdomain sizes are non-uniform.// -----------------------------------------------------------------------------{ hsize_t ndp1 = n_dims + 1; hsize_t *a_nint = (hsize_t*) malloc(ndp1*sizeof(hsize_t)); hsize_t *l_ntot = (hsize_t*) malloc(ndp1*sizeof(hsize_t)); hsize_t *l_strt = (hsize_t*) malloc(ndp1*sizeof(hsize_t)); hsize_t *stride = (hsize_t*) malloc(ndp1*sizeof(hsize_t)); int i; for (i=0; i<n_dims; ++i) { a_nint[i] = A_nint[i]; // Selection size, target and destination l_ntot[i] = L_ntot[i]; // Memory space total size l_strt[i] = L_strt[i]; // Memory space selection start stride[i] = 1; } a_nint[ndp1 - 1] = 1; l_ntot[ndp1 - 1] = n_prim; stride[ndp1 - 1] = n_prim; // Here we create the following property lists: // // file access property list ........ for the call to H5Fopen // dset creation property list ........ for the call to H5Dcreate // dset transfer property list ........ for the call to H5Dwrite // --------------------------------------------------------------------------- hid_t fapl = H5Pcreate(H5P_FILE_ACCESS); hid_t dcpl = H5Pcreate(H5P_DATASET_CREATE); hid_t dxpl = H5Pcreate(H5P_DATASET_XFER); // Here we define collective (MPI) access to the file with alignment // properties optimized for the local file system, according to DiskBlockSize. // --------------------------------------------------------------------------- if (EnableChunking) { H5Pset_chunk(dcpl, n_dims, ChunkSize); } if (EnableAlignment) { H5Pset_alignment(fapl, AlignThreshold, DiskBlockSize); } H5Pset_fapl_mpio(fapl, MPI_COMM_WORLD, MPI_INFO_NULL); H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); hid_t file = H5Fopen(fname, H5F_ACC_RDWR, fapl); const int overwrite = H5Lexists(file, "prim", H5P_DEFAULT); hid_t prim = overwrite ? H5Gopen(file, "prim", H5P_DEFAULT) : H5Gcreate(file, "prim", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); hid_t mspc = H5Screate_simple(ndp1 , l_ntot, NULL); hid_t fspc = H5Screate_simple(n_dims, G_ntot, NULL); // Call signature to H5Sselect_hyperslab is (start, stride, count, chunk) // --------------------------------------------------------------------------- const clock_t start_all = clock(); for (i=0; i<n_prim; ++i) { hid_t dset = overwrite ? H5Dopen(prim, pnames[i], H5P_DEFAULT) : H5Dcreate(prim, pnames[i], H5T_NATIVE_DOUBLE, fspc, H5P_DEFAULT, dcpl, H5P_DEFAULT); l_strt[ndp1 - 1] = i; H5Sselect_hyperslab(mspc, H5S_SELECT_SET, l_strt, stride, a_nint, NULL); H5Sselect_hyperslab(fspc, H5S_SELECT_SET, G_strt, NULL, A_nint, NULL); H5Dwrite(dset, H5T_NATIVE_DOUBLE, mspc, fspc, dxpl, data); H5Dclose(dset); } if (iolog) { const double sec = (double)(clock() - start_all) / CLOCKS_PER_SEC; fprintf(iolog, "[h5mpi] write to %s took %f minutes/n", fname, sec/60.0); fflush(iolog); } free(a_nint); free(l_ntot); free(l_strt); // Always close the hid_t handles in the reverse order they were opened in. // --------------------------------------------------------------------------- H5Sclose(fspc); H5Sclose(mspc); H5Gclose(prim); H5Fclose(file); H5Pclose(dxpl); H5Pclose(dcpl); H5Pclose(fapl);}
开发者ID:darien0,项目名称:Mara,代码行数:100,
示例5: FTI_WriteHDF5Varint FTI_WriteHDF5Var(FTIT_dataset *FTI_DataVar){ int j; hsize_t dimLength[32]; char str[FTI_BUFS]; int res; hid_t dcpl; for (j = 0; j < FTI_DataVar->rank; j++) { dimLength[j] = FTI_DataVar->dimLength[j]; } dcpl = H5Pcreate (H5P_DATASET_CREATE); res = H5Pset_fletcher32 (dcpl); res = H5Pset_chunk (dcpl, FTI_DataVar->rank, dimLength); hid_t dataspace = H5Screate_simple( FTI_DataVar->rank, dimLength, NULL); hid_t dataset = H5Dcreate2 ( FTI_DataVar->h5group->h5groupID, FTI_DataVar->name,FTI_DataVar->type->h5datatype, dataspace, H5P_DEFAULT, dcpl , H5P_DEFAULT); // If my data are stored in the CPU side // Just store the data to the file and return;#ifdef GPUSUPPORT if ( !FTI_DataVar->isDevicePtr ){#endif res = H5Dwrite(dataset,FTI_DataVar->type->h5datatype, H5S_ALL, H5S_ALL, H5P_DEFAULT, FTI_DataVar->ptr); if (res < 0) { sprintf(str, "Dataset #%d could not be written", FTI_DataVar->id); FTI_Print(str, FTI_EROR); return FTI_NSCS; } res = H5Pclose (dcpl); if (res < 0) { sprintf(str, "Dataset #%d could not be written", FTI_DataVar->id); FTI_Print(str, FTI_EROR); return FTI_NSCS; } res = H5Dclose(dataset); if (res < 0) { sprintf(str, "Dataset #%d could not be written", FTI_DataVar->id); FTI_Print(str, FTI_EROR); return FTI_NSCS; } res = H5Sclose(dataspace); if (res < 0) { sprintf(str, "Dataset #%d could not be written", FTI_DataVar->id); FTI_Print(str, FTI_EROR); return FTI_NSCS; } return FTI_SCES;#ifdef GPUSUPPORT } // This code is only executed in the GPU case. hsize_t *count = (hsize_t*) malloc (sizeof(hsize_t)*FTI_DataVar->rank); hsize_t *offset= (hsize_t*) calloc (FTI_DataVar->rank,sizeof(hsize_t)); if ( !count|| !offset){ sprintf(str, "Could Not allocate count and offset regions"); FTI_Print(str, FTI_EROR); return FTI_NSCS; } hsize_t seperator; hsize_t fetchBytes = FTI_getHostBuffSize(); fetchBytes = FTI_calculateCountDim(FTI_DataVar->eleSize, fetchBytes ,count, FTI_DataVar->rank, dimLength, &seperator); sprintf(str,"GPU-Device Message: I Will Fetch %lld Bytes Per Stream Request", fetchBytes); FTI_Print(str,FTI_DBUG); FTIT_data_prefetch prefetcher; prefetcher.fetchSize = fetchBytes; prefetcher.totalBytesToFetch = FTI_DataVar->size; prefetcher.isDevice = FTI_DataVar->isDevicePtr; prefetcher.dptr = FTI_DataVar->devicePtr; size_t bytesToWrite; FTI_InitPrefetcher(&prefetcher); unsigned char *basePtr = NULL; if ( FTI_Try(FTI_getPrefetchedData(&prefetcher, &bytesToWrite, &basePtr), "Fetch next memory block from GPU to write to HDF5") != FTI_SCES){ return FTI_NSCS; } while( basePtr ){ res = FTI_WriteElements( dataspace, FTI_DataVar->type->h5datatype, dataset, count, offset, FTI_DataVar->rank , basePtr); if (res != FTI_SCES ) { free(offset); free(count); sprintf(str, "Dataset #%d could not be written", FTI_DataVar->id); FTI_Print(str, FTI_EROR); return FTI_NSCS; } FTI_AdvanceOffset(seperator, offset,count, dimLength, FTI_DataVar->rank); if ( FTI_Try(FTI_getPrefetchedData(&prefetcher, &bytesToWrite, &basePtr), "Fetch next memory block from GPU to write to HDF5") != FTI_SCES){//.........这里部分代码省略.........
开发者ID:leobago,项目名称:fti,代码行数:101,
示例6: create_deflate_dsets_float/*------------------------------------------------------------------------- * Function: create_deflate_dsets_float * * Purpose: Create a dataset of FLOAT datatype with deflate filter * * Return: Success: 0 * Failure: -1 * * Programmer: Raymond Lu * 29 March 2011 * * Modifications: * *------------------------------------------------------------------------- */intcreate_deflate_dsets_float(hid_t fid, hid_t fsid, hid_t msid){#ifdef H5_HAVE_FILTER_DEFLATE hid_t dataset = -1; /* dataset handles */ hid_t dcpl = -1; float data[NX][NY]; /* data to write */ float fillvalue = -2.2f; hsize_t chunk[RANK] = {CHUNK0, CHUNK1}; int i, j; /* * Data and output buffer initialization. */ for (j = 0; j < NX; j++) { for (i = 0; i < NY; i++) data[j][i] = ((float)(i + j + 1))/3; } /* * Create the dataset creation property list, add the Scale-Offset * filter, set the chunk size, and set the fill value. */ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) TEST_ERROR if(H5Pset_deflate (dcpl, 6) < 0) TEST_ERROR if(H5Pset_chunk(dcpl, RANK, chunk) < 0) TEST_ERROR if(H5Pset_fill_value(dcpl, H5T_NATIVE_FLOAT, &fillvalue) < 0) TEST_ERROR /* * Create a new dataset within the file using defined dataspace, little * endian datatype and default dataset creation properties. */ if((dataset = H5Dcreate2(fid, DATASETNAME16, H5T_IEEE_F32LE, fsid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) TEST_ERROR /* * Write the data to the dataset using default transfer properties. */ if(H5Dwrite(dataset, H5T_NATIVE_FLOAT, msid, fsid, H5P_DEFAULT, data) < 0) TEST_ERROR /* Close dataset */ if(H5Dclose(dataset) < 0) TEST_ERROR /* Now create a dataset with a big-endian type */ if((dataset = H5Dcreate2(fid, DATASETNAME17, H5T_IEEE_F32BE, fsid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) TEST_ERROR if(H5Dwrite(dataset, H5T_NATIVE_FLOAT, msid, fsid, H5P_DEFAULT, data) < 0) TEST_ERROR if(H5Dclose(dataset) < 0) TEST_ERROR /* * Close/release resources. */ if(H5Pclose(dcpl) < 0) TEST_ERROR#else /* H5_HAVE_FILTER_DEFLATE */ const char *not_supported= "Deflate filter is not enabled. Can't create the dataset."; puts(not_supported);#endif /* H5_HAVE_FILTER_DEFLATE */ return 0;#ifdef H5_HAVE_FILTER_DEFLATEerror: H5E_BEGIN_TRY { H5Pclose(dcpl); H5Dclose(dataset); } H5E_END_TRY; return -1;#endif /* H5_HAVE_FILTER_DEFLATE */}
开发者ID:Starlink,项目名称:hdf5,代码行数:98,
示例7: main//.........这里部分代码省略......... int ncid, grpid, nvars, ngatts, ndims, unlimdimid, ngrps; char name_in[NC_MAX_NAME + 1]; nc_type xtype_in; int ndims_in, natts_in, dimid_in[NDIMS];/* nc_set_log_level(5);*/ if (nc_open(FILE_NAME, NC_NOWRITE, &ncid)) ERR; if (nc_inq(ncid, &ndims, &nvars, &ngatts, &unlimdimid)) ERR; if (ndims != 2 || nvars != 0 || ngatts != 0 || unlimdimid != -1) ERR; if (nc_inq_grps(ncid, &ngrps, &grpid)) ERR; if (ngrps != 1) ERR; if (nc_inq(grpid, &ndims, &nvars, &ngatts, &unlimdimid)) ERR; if (ndims != 0 || nvars != 1 || ngatts != 0 || unlimdimid != -1) ERR; if (nc_inq_var(grpid, 0, name_in, &xtype_in, &ndims_in, dimid_in, &natts_in)) ERR; if (strcmp(name_in, VAR_NAME) || xtype_in != NC_SHORT || ndims_in != NDIMS || dimid_in[0] != 0 || dimid_in[1] != 1 || natts_in != 0) ERR; if (nc_close(ncid)) ERR; } } SUMMARIZE_ERR;#ifdef USE_SZIP printf("*** testing HDF5 compatibility with szip..."); {#define DEFLATE_LEVEL 9#define MAX_NAME 100#define NUM_CD_ELEM 10/* HDF5 defines this... */#define DEFLATE_NAME "deflate"#define DIM1_LEN 3000#define GRP_NAME "George_Washington"#define BATTLE_RECORD "Battle_Record" hid_t fileid, grpid, spaceid, datasetid; int data_out[DIM1_LEN], data_in[DIM1_LEN]; hsize_t dims[1] = {DIM1_LEN}; hid_t propid; char name_in[MAX_NAME + 1]; int ncid, ndims_in, nvars_in, ngatts_in, unlimdimid_in, ngrps_in; int nc_grpid; int dimid_in[1], natts_in; nc_type xtype_in; int i; for (i = 0; i < DIM1_LEN; i++) data_out[i] = i; /* Open file and create group. */ if ((fileid = H5Fcreate(FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) ERR; if ((grpid = H5Gcreate(fileid, GRP_NAME, 0)) < 0) ERR; /* Write an array of bools, with szip compression. */ if ((propid = H5Pcreate(H5P_DATASET_CREATE)) < 0) ERR; if (H5Pset_layout(propid, H5D_CHUNKED)) ERR; if (H5Pset_chunk(propid, 1, dims)) ERR; if (H5Pset_szip(propid, H5_SZIP_EC_OPTION_MASK, 32)) ERR; if ((spaceid = H5Screate_simple(1, dims, dims)) < 0) ERR; if ((datasetid = H5Dcreate(grpid, BATTLE_RECORD, H5T_NATIVE_INT, spaceid, propid)) < 0) ERR; if (H5Dwrite(datasetid, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data_out) < 0) ERR; if (H5Dclose(datasetid) < 0 || H5Pclose(propid) < 0 || H5Sclose(spaceid) < 0 || H5Gclose(grpid) < 0 || H5Fclose(fileid) < 0) ERR; /* Open the file with netCDF and check it. */ if (nc_open(FILE_NAME, NC_NOWRITE, &ncid)) ERR; if (nc_inq(ncid, &ndims_in, &nvars_in, &ngatts_in, &unlimdimid_in)) ERR; if (ndims_in != 0 || nvars_in != 0 || ngatts_in != 0 || unlimdimid_in != -1) ERR; if (nc_inq_grps(ncid, &ngrps_in, &nc_grpid)) ERR; if (ngrps_in != 1) ERR; if (nc_inq(nc_grpid, &ndims_in, &nvars_in, &ngatts_in, &unlimdimid_in)) ERR; if (ndims_in != 1 || nvars_in != 1 || ngatts_in != 0 || unlimdimid_in != -1) ERR; /* Check the variable. */ if (nc_inq_var(nc_grpid, 0, name_in, &xtype_in, &ndims_in, dimid_in, &natts_in)) ERR; if (strcmp(name_in, BATTLE_RECORD) || xtype_in != NC_INT || ndims_in != 1 || dimid_in[0] != 0 || natts_in != 0) ERR; /* Check the data. */ if (nc_get_var(nc_grpid, 0, data_in)) ERR; for (i = 0; i < DIM1_LEN; i++) if (data_in[i] != data_out[i]) ERR; if (nc_close(ncid)) ERR; } SUMMARIZE_ERR;#endif /* USE_SZIP */ FINAL_RESULTS;}
开发者ID:Unidata,项目名称:netcdf-c,代码行数:101,
示例8: fnameAccessTraceWriter::AccessTraceWriter(g_string _fname, uint32_t numChildren) : fname(_fname) { // Create record structure hid_t accType = H5Tenum_create(H5T_NATIVE_USHORT); uint16_t val; H5Tenum_insert(accType, "GETS", (val=GETS,&val)); H5Tenum_insert(accType, "GETX", (val=GETX,&val)); H5Tenum_insert(accType, "PUTS", (val=PUTS,&val)); H5Tenum_insert(accType, "PUTX", (val=PUTX,&val)); size_t offset = 0; size_t size = H5Tget_size(H5T_NATIVE_ULONG)*2 + H5Tget_size(H5T_NATIVE_UINT) + H5Tget_size(H5T_NATIVE_USHORT) + H5Tget_size(accType); hid_t recType = H5Tcreate(H5T_COMPOUND, size); auto insertType = [&](const char* name, hid_t type) { H5Tinsert(recType, name, offset, type); offset += H5Tget_size(type); }; insertType("lineAddr", H5T_NATIVE_ULONG); insertType("cycle", H5T_NATIVE_ULONG); insertType("lat", H5T_NATIVE_UINT); insertType("childId", H5T_NATIVE_USHORT); insertType("accType", accType); hid_t fid = H5Fcreate(fname.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); if (fid == H5I_INVALID_HID) panic("Could not create HDF5 file %s", fname.c_str()); // HACK: We want to use the SHUF filter... create the raw dataset instead of the packet table // hid_t table = H5PTcreate_fl(fid, "accs", recType, PT_CHUNKSIZE, 9); // if (table == H5I_INVALID_HID) panic("Could not create HDF5 packet table"); hsize_t dims[1] = {0}; hsize_t dims_chunk[1] = {PT_CHUNKSIZE}; hsize_t maxdims[1] = {H5S_UNLIMITED}; hid_t space_id = H5Screate_simple(1, dims, maxdims); hid_t plist_id = H5Pcreate(H5P_DATASET_CREATE); H5Pset_chunk(plist_id, 1, dims_chunk); H5Pset_shuffle(plist_id); H5Pset_deflate(plist_id, 9); hid_t table = H5Dcreate2(fid, "accs", recType, space_id, H5P_DEFAULT, plist_id, H5P_DEFAULT); if (table == H5I_INVALID_HID) panic("Could not create HDF5 dataset"); H5Dclose(table); // info("%ld %ld %ld %ld", sizeof(PackedAccessRecord), size, offset, H5Tget_size(recType)); assert(offset == size); assert(size == sizeof(PackedAccessRecord)); hid_t ncAttr = H5Acreate2(fid, "numChildren", H5T_NATIVE_UINT, H5Screate(H5S_SCALAR), H5P_DEFAULT, H5P_DEFAULT); H5Awrite(ncAttr, H5T_NATIVE_UINT, &numChildren); H5Aclose(ncAttr); hid_t fAttr = H5Acreate2(fid, "finished", H5T_NATIVE_UINT, H5Screate(H5S_SCALAR), H5P_DEFAULT, H5P_DEFAULT); uint32_t finished = 0; H5Awrite(fAttr, H5T_NATIVE_UINT, &finished); H5Aclose(fAttr); H5Fclose(fid); // Initialize buffer buf = gm_calloc<PackedAccessRecord>(PT_CHUNKSIZE); cur = 0; max = PT_CHUNKSIZE; assert((uint32_t)(((char*) &buf[1]) - ((char*) &buf[0])) == sizeof(PackedAccessRecord));}
开发者ID:Luffy0011,项目名称:zsim,代码行数:64,
示例9: PYTABLE_make_array/*+++++++++++++++++++++++++.IDENTifer PYTABLE_make_array.PURPOSE create extensible HDF5 dataset.INPUT/OUTPUT call as stat = PYTABLE_make_array( locID, dset_name, title, rank, dims, extdim, typeID, dims_chunk, fill_data, compress, shuffle, fletcher32, buff ); input: hid_t locID : HDF5 identifier of file or group char *dset_name : name of dataset char *title : int rank : number of dimensions hsize_t *dims : size of each dimension int extdim : index of expendable dimension hid_t typeID : data type (HDF5 identifier) hsize_t *dims_chunk : chunk sizes void *fill_data : Fill value for data unsigned int compress : compression level (zero for no compression) bool shuffle : shuffel data for better compression bool fletcher32 : void *buffer : buffer with data to write (or NULL) .RETURNS A negative value is returned on failure. .COMMENTS none-------------------------*/herr_t PYTABLE_make_array( hid_t locID, const char *dset_name, const char *title, const int rank, const hsize_t *dims, int extdim, hid_t typeID, const hsize_t *dims_chunk, void *fill_data, unsigned int compress, bool shuffle, bool fletcher32, const void *buffer ){ register int ni; hid_t dataID = -1, spaceID = -1; herr_t stat;/* check if the array has to be chunked or not */ if ( dims_chunk != NULL ) { hid_t plistID; hsize_t *maxdims = (hsize_t *) malloc( rank * sizeof(hsize_t) ); if ( maxdims == NULL ) NADC_GOTO_ERROR( NADC_ERR_ALLOC, "maxdims" ); for ( ni = 0; ni < rank; ni++ ) { if ( ni == extdim ) maxdims[ni] = H5S_UNLIMITED; else maxdims[ni] = dims[ni] < dims_chunk[ni] ? dims_chunk[ni] : dims[ni]; } spaceID = H5Screate_simple( rank, dims, maxdims ); free( maxdims ); if ( spaceID < 0 ) NADC_GOTO_ERROR( NADC_ERR_HDF_SPACE, "" ); /* Modify dataset creation properties, i.e. enable chunking */ plistID = H5Pcreate( H5P_DATASET_CREATE ); if ( H5Pset_chunk( plistID, rank, dims_chunk ) < 0 ) goto done; /* set the fill value using a struct as the data type */ if ( fill_data != NULL && H5Pset_fill_value( plistID, typeID, fill_data ) < 0 ) goto done; /* dataset creation property list is modified to use */ /* fletcher must be first */ if ( fletcher32 ) { if ( H5Pset_fletcher32( plistID ) < 0 ) goto done; } /* then shuffle */ if ( shuffle ) { if ( H5Pset_shuffle( plistID ) < 0 ) goto done; } /* finally compression */ if ( compress > 0 ) { if ( H5Pset_deflate( plistID, compress ) < 0 ) goto done; } /* create the (chunked) dataset */ dataID = H5Dcreate( locID, dset_name, typeID, spaceID, H5P_DEFAULT, plistID, H5P_DEFAULT ); if ( dataID < 0 ) NADC_GOTO_ERROR( NADC_ERR_HDF_DATA, dset_name ); /* end access to the property list */ if ( H5Pclose( plistID ) < 0 ) goto done; } else { spaceID = H5Screate_simple( rank, dims, NULL ); if ( spaceID < 0 ) return -1; /* create the dataset (not chunked) */ dataID = H5Dcreate( locID, dset_name, typeID, spaceID, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT ); if ( dataID < 0 ) NADC_GOTO_ERROR( NADC_ERR_HDF_DATA, dset_name ); }/* * write the data */ stat = H5Dwrite( dataID, typeID, H5S_ALL, H5S_ALL, H5P_DEFAULT, buffer );//.........这里部分代码省略.........
开发者ID:rmvanhees,项目名称:nadc_tools,代码行数:101,
示例10: H5CreateOrOpenGroupvoid RegionalSummary::writeH5(hid_t &file_id, string group_name) { herr_t status; hid_t group_id = H5CreateOrOpenGroup(file_id, group_name); hid_t dataset_id; hid_t dataspace_id; hsize_t h5_dims[1]; // region_origin vector<unsigned int> coord_buf(2,0); coord_buf[0] = origin_.first; coord_buf[1] = origin_.second; h5_dims[0] = 2; dataspace_id = H5Screate_simple (1, h5_dims, NULL); dataset_id = H5Dcreate2 (group_id, "region_origin", H5T_NATIVE_UINT, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); status = H5Dwrite (dataset_id, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &coord_buf[0]); status = H5Dclose (dataset_id); status = H5Sclose (dataspace_id); // region_dim coord_buf[0] = dim_.first; coord_buf[1] = dim_.second; h5_dims[0] = 2; dataspace_id = H5Screate_simple (1, h5_dims, NULL); dataset_id = H5Dcreate2 (group_id, "region_dim", H5T_NATIVE_UINT, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); status = H5Dwrite (dataset_id, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &coord_buf[0]); status = H5Dclose (dataset_id); status = H5Sclose (dataspace_id); // n_err_ h5_dims[0] = 1; dataspace_id = H5Screate_simple (1, h5_dims, NULL); dataset_id = H5Dcreate2 (group_id, "n_err", H5T_NATIVE_UINT_LEAST64, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); status = H5Dwrite (dataset_id, H5T_NATIVE_UINT_LEAST64, H5S_ALL, H5S_ALL, H5P_DEFAULT, &n_err_); status = H5Dclose (dataset_id); status = H5Sclose (dataspace_id); // n_aligned_ h5_dims[0] = 1; dataspace_id = H5Screate_simple (1, h5_dims, NULL); dataset_id = H5Dcreate2 (group_id, "n_aligned", H5T_NATIVE_UINT_LEAST64, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); status = H5Dwrite (dataset_id, H5T_NATIVE_UINT_LEAST64, H5S_ALL, H5S_ALL, H5P_DEFAULT, &n_aligned_); status = H5Dclose (dataset_id); status = H5Sclose (dataspace_id); // data_dim AssertDims(); coord_buf[0] = n_flow_; coord_buf[1] = max_hp_; h5_dims[0] = 2; dataspace_id = H5Screate_simple (1, h5_dims, NULL); dataset_id = H5Dcreate2 (group_id, "data_dim", H5T_NATIVE_UINT, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); status = H5Dwrite (dataset_id, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &coord_buf[0]); status = H5Dclose (dataset_id); status = H5Sclose (dataspace_id); // hp_count_ and hp_err_ hsize_t dims[2]; dims[0] = n_flow_; dims[1] = max_hp_; vector<uint64_t> data_buf; hsize_t cdims[2]; hid_t plist_id; // hp_count_ LoadDataBuffer(max_hp_,n_flow_,data_buf,hp_count_); dataspace_id = H5Screate_simple (2, dims, NULL); plist_id = H5Pcreate (H5P_DATASET_CREATE); cdims[0] = min(n_flow_,(unsigned int) 20); cdims[1] = min(max_hp_,(unsigned int) 20); status = H5Pset_chunk (plist_id, 2, cdims); status = H5Pset_deflate (plist_id, 9); dataset_id = H5Dcreate2 (group_id, "hp_count", H5T_NATIVE_UINT_LEAST64, dataspace_id, H5P_DEFAULT, plist_id, H5P_DEFAULT); status = H5Dwrite (dataset_id, H5T_NATIVE_UINT_LEAST64, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data_buf[0]); status = H5Dclose (dataset_id); status = H5Pclose (plist_id); status = H5Sclose (dataspace_id); // hp_err_ LoadDataBuffer(max_hp_,n_flow_,data_buf,hp_err_); dataspace_id = H5Screate_simple (2, dims, NULL); plist_id = H5Pcreate (H5P_DATASET_CREATE); cdims[0] = min(n_flow_,(unsigned int) 20); cdims[1] = min(max_hp_,(unsigned int) 20); status = H5Pset_chunk (plist_id, 2, cdims); status = H5Pset_deflate (plist_id, 9); dataset_id = H5Dcreate2 (group_id, "hp_err", H5T_NATIVE_UINT_LEAST64, dataspace_id, H5P_DEFAULT, plist_id, H5P_DEFAULT); status = H5Dwrite (dataset_id, H5T_NATIVE_UINT_LEAST64, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data_buf[0]); status = H5Dclose (dataset_id); status = H5Pclose (plist_id); status = H5Sclose (dataspace_id); status = H5Gclose (group_id);}
开发者ID:Brainiarc7,项目名称:TS,代码行数:95,
示例11: H5Eset_auto2//.........这里部分代码省略......... for (int n = 0; n != nuc_size; n++) { if (0 < comp.count(nuclides[n])) (*mat_data).comp[n] = comp[nuclides[n]]; else (*mat_data).comp[n] = 0.0; }; // get / make the data set bool datapath_exists = h5wrap::path_exists(db, datapath); if (datapath_exists) { data_set = H5Dopen2(db, datapath.c_str(), H5P_DEFAULT); data_space = H5Dget_space(data_set); data_rank = H5Sget_simple_extent_dims(data_space, data_dims, data_max_dims); // Determine the row size. if (std::signbit(row)) row_num = data_dims[0] + row; // careful, row is negative if (data_dims[0] <= row_num) { // row == -0, extend to data set so that we can append, or // row_num is larger than current dimension, resize to accomodate. data_dims[0] = row_num + 1; H5Dset_extent(data_set, data_dims); } data_offset[0] = row_num; } else { // Get full space data_space = H5Screate_simple(1, data_dims, data_max_dims); // Make data set properties to enable chunking hid_t data_set_params = H5Pcreate(H5P_DATASET_CREATE); hsize_t chunk_dims[1] ={chunksize}; H5Pset_chunk(data_set_params, 1, chunk_dims); H5Pset_deflate(data_set_params, 1); material_struct * data_fill_value = new material_struct[material_struct_size]; (*data_fill_value).mass = -1.0; (*data_fill_value).density= -1.0; (*data_fill_value).atoms_per_mol = -1.0; for (int n = 0; n != nuc_size; n++) (*data_fill_value).comp[n] = 0.0; H5Pset_fill_value(data_set_params, desc, &data_fill_value); // Create the data set data_set = H5Dcreate2(db, datapath.c_str(), desc, data_space, H5P_DEFAULT, data_set_params, H5P_DEFAULT); H5Dset_extent(data_set, data_dims); // Add attribute pointing to nuc path hid_t nuc_attr_type = H5Tcopy(H5T_C_S1); H5Tset_size(nuc_attr_type, nucpath.length()); hid_t nuc_attr_space = H5Screate(H5S_SCALAR); hid_t nuc_attr = H5Acreate2(data_set, "nucpath", nuc_attr_type, nuc_attr_space, H5P_DEFAULT, H5P_DEFAULT); H5Awrite(nuc_attr, nuc_attr_type, nucpath.c_str()); H5Fflush(db, H5F_SCOPE_GLOBAL); // Remember to de-allocate delete[] data_fill_value; }; // Get the data hyperslab data_hyperslab = H5Dget_space(data_set); hsize_t data_count[1] = {1}; H5Sselect_hyperslab(data_hyperslab, H5S_SELECT_SET, data_offset, NULL, data_count, NULL);
开发者ID:crbates,项目名称:pyne,代码行数:67,
示例12: mainintmain(void){ hid_t faplid = -1; /* file access property list ID (all files) */ hid_t src_sid = -1; /* source dataset's dataspace ID */ hid_t src_dcplid = -1; /* source dataset property list ID */ hid_t vds_sid = -1; /* VDS dataspace ID */ hid_t vds_dcplid = -1; /* VDS dataset property list ID */ hid_t fid = -1; /* HDF5 file ID */ hid_t did = -1; /* dataset ID */ hsize_t start[RANK]; /* starting point for hyperslab */ int map_start = -1; /* starting point in the VDS map */ int i; /* iterator */ /* Start by creating the virtual dataset (VDS) dataspace and creation * property list. The individual source datasets are then created * and the VDS map (stored in the VDS property list) is updated. */ /* Create VDS dcpl */ if((vds_dcplid = H5Pcreate(H5P_DATASET_CREATE)) < 0) TEST_ERROR if(H5Pset_fill_value(vds_dcplid, VDS_DATATYPE, &VDS_FILL_VALUE) < 0) TEST_ERROR /* Create VDS dataspace */ if((vds_sid = H5Screate_simple(RANK, VDS_DIMS, VDS_MAX_DIMS)) < 0) TEST_ERROR /************************************ * Create source files and datasets * ************************************/ start[0] = 0; start[1] = 0; start[2] = 0; map_start = 0; /* All SWMR files need to use the latest file format */ if((faplid = H5Pcreate(H5P_FILE_ACCESS)) < 0) TEST_ERROR if(H5Pset_libver_bounds(faplid, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) TEST_ERROR for(i = 0; i < N_SOURCES; i++) { /* source dataset dcpl */ if((src_dcplid = H5Pcreate(H5P_DATASET_CREATE)) < 0) TEST_ERROR if(H5Pset_chunk(src_dcplid, RANK, PLANES[i]) < 0) TEST_ERROR if(H5Pset_fill_value(src_dcplid, SOURCE_DATATYPE, &FILL_VALUES[i]) < 0) TEST_ERROR /* Use a mix of compressed and uncompressed datasets */ if(0 != i % 2) if(H5Pset_deflate(src_dcplid, COMPRESSION_LEVEL) < 0) TEST_ERROR /* Create source file, dataspace, and dataset */ if((fid = H5Fcreate(FILE_NAMES[i], H5F_ACC_TRUNC, H5P_DEFAULT, faplid)) < 0) TEST_ERROR if((src_sid = H5Screate_simple(RANK, DIMS[i], MAX_DIMS[i])) < 0) TEST_ERROR if((did = H5Dcreate2(fid, SOURCE_DSET_NAME, SOURCE_DATATYPE, src_sid, H5P_DEFAULT, src_dcplid, H5P_DEFAULT)) < 0) TEST_ERROR /* set up hyperslabs for source and destination datasets */ start[1] = 0; if(H5Sselect_hyperslab(src_sid, H5S_SELECT_SET, start, NULL, MAX_DIMS[i], NULL) < 0) TEST_ERROR start[1] = map_start; if(H5Sselect_hyperslab(vds_sid, H5S_SELECT_SET, start, NULL, MAX_DIMS[i], NULL) < 0) TEST_ERROR map_start += PLANES[i][1]; /* Add VDS mapping */ if(H5Pset_virtual(vds_dcplid, vds_sid, FILE_NAMES[i], SOURCE_DSET_PATH, src_sid) < 0) TEST_ERROR /* close */ if(H5Sclose(src_sid) < 0) TEST_ERROR if(H5Pclose(src_dcplid) < 0)//.........这里部分代码省略.........
开发者ID:Starlink,项目名称:hdf5,代码行数:101,
示例13: create_nbit_dsets_float/*------------------------------------------------------------------------- * Function: create_nbit_dsets_float * * Purpose: Create a dataset of FLOAT datatype with nbit filter * * Return: Success: 0 * Failure: -1 * * Programmer: Raymond Lu * 29 March 2011 * * Modifications: * *------------------------------------------------------------------------- */intcreate_nbit_dsets_float(hid_t fid, hid_t fsid, hid_t msid){ hid_t dataset = -1; /* dataset handles */ hid_t datatype = -1; hid_t dcpl = -1; size_t precision, offset; float data[NX][NY]; /* data to write */ float fillvalue = -2.2f; hsize_t chunk[RANK] = {CHUNK0, CHUNK1}; int i, j; /* * Data and output buffer initialization. */ for (j = 0; j < NX; j++) { for (i = 0; i < NY; i++) data[j][i] = ((float)(i + j + 1))/3; } /* * Create the dataset creation property list, add the Scale-Offset * filter, set the chunk size, and set the fill value. */ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) TEST_ERROR if(H5Pset_nbit(dcpl) < 0) TEST_ERROR if(H5Pset_chunk(dcpl, RANK, chunk) < 0) TEST_ERROR if(H5Pset_fill_value(dcpl, H5T_NATIVE_FLOAT, &fillvalue) < 0) TEST_ERROR /* Define user-defined single-precision floating-point type for dataset. * A 20-bit little-endian data type. */ if((datatype = H5Tcopy(H5T_IEEE_F32LE)) < 0) TEST_ERROR if(H5Tset_fields(datatype, (size_t)26, (size_t)20, (size_t)6, (size_t)7, (size_t)13) < 0) TEST_ERROR offset = 7; if(H5Tset_offset(datatype,offset) < 0) TEST_ERROR precision = 20; if(H5Tset_precision(datatype,precision) < 0) TEST_ERROR if(H5Tset_size(datatype, (size_t)4) < 0) TEST_ERROR if(H5Tset_ebias(datatype, (size_t)31) < 0) TEST_ERROR /* * Create a new dataset within the file using defined dataspace, * user-defined datatype, and default dataset creation properties. */ if((dataset = H5Dcreate2(fid, DATASETNAME22, datatype, fsid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) TEST_ERROR /* * Write the data to the dataset using default transfer properties. */ if(H5Dwrite(dataset, H5T_NATIVE_FLOAT, msid, fsid, H5P_DEFAULT, data) < 0) TEST_ERROR /* Close dataset */ if(H5Dclose(dataset) < 0) TEST_ERROR /* Now create a dataset with a big-endian type */ if(H5Tset_order(datatype, H5T_ORDER_BE) < 0) TEST_ERROR if((dataset = H5Dcreate2(fid, DATASETNAME23, datatype, fsid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) TEST_ERROR if(H5Dwrite(dataset, H5T_NATIVE_FLOAT, msid, fsid, H5P_DEFAULT, data) < 0) TEST_ERROR if(H5Dclose(dataset) < 0) TEST_ERROR /* * Close/release resources. */ if(H5Pclose(dcpl) < 0) TEST_ERROR//.........这里部分代码省略.........
开发者ID:Starlink,项目名称:hdf5,代码行数:101,
示例14: H5Fcreate//.........这里部分代码省略......... if (datasetlength>0) { //Try to open dataset if it exists //turn off errors when we query the group, using open hid_t error_stack = H5Eget_current_stack(); H5E_auto2_t oldfunc; void *old_client_data; H5Eget_auto(error_stack, &oldfunc, &old_client_data); H5Eset_auto(error_stack, NULL, NULL); //query or open dataset curdataset = H5Dopen(channelgroup,dname.toStdString().c_str(),H5P_DEFAULT); //turn errors back on. H5Eset_auto(error_stack, oldfunc, old_client_data); //if cannot open dataset, create it, and make it chunked. if (curdataset<=0) { //set up size info, chunks etc... maxdims[0]=H5S_UNLIMITED; rank = 1; d_dims[0]=datasetlength; curdataspace= H5Screate_simple(rank, d_dims,maxdims); prop = H5Pcreate(H5P_DATASET_CREATE); status = H5Pset_chunk(prop, rank, d_dims); if (status) trap(); curdataset = H5Dcreate( channelgroup, dname.toStdString().c_str(), H5T_NATIVE_FLOAT, curdataspace, H5P_DEFAULT, prop, H5P_DEFAULT); hid_t aid3 = H5Screate(H5S_SCALAR); hid_t atype = H5Tcopy(H5T_C_S1); H5Tset_size(atype, 5); hid_t attr3 = H5Acreate1(curdataset, "type", atype, aid3, H5P_DEFAULT); status = H5Awrite(attr3, atype,"array"); H5Aclose(attr3); H5Tclose(atype); H5Sclose(aid3); H5Pclose(prop); }//if (curdataset<=0) else { //get dataspace from exant dataset
开发者ID:argonnexraydetector,项目名称:RoachFirmPy,代码行数:67,
示例15: create_scale_offset_dsets_long_long/*------------------------------------------------------------------------- * Function: create_scale_offset_dset_long_long * * Purpose: Create a dataset of LONG LONG datatype with scale-offset * filter * * Return: Success: 0 * Failure: -1 * * Programmer: Neil Fortner * 27 January 2011 * * Modifications: * *------------------------------------------------------------------------- */intcreate_scale_offset_dsets_long_long(hid_t fid, hid_t fsid, hid_t msid){ hid_t dataset = -1; /* dataset handles */ hid_t dcpl = -1; long long data[NX][NY]; /* data to write */ long long fillvalue = -2; hsize_t chunk[RANK] = {CHUNK0, CHUNK1}; int i, j; /* * Data and output buffer initialization. */ for (j = 0; j < NX; j++) { for (i = 0; i < NY; i++) data[j][i] = i + j; } /* * 0 1 2 3 4 5 * 1 2 3 4 5 6 * 2 3 4 5 6 7 * 3 4 5 6 7 8 * 4 5 6 7 8 9 * 5 6 7 8 9 10 */ /* * Create the dataset creation property list, add the Scale-Offset * filter, set the chunk size, and set the fill value. */ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) TEST_ERROR if(H5Pset_scaleoffset(dcpl, H5Z_SO_INT, H5Z_SO_INT_MINBITS_DEFAULT) < 0) TEST_ERROR if(H5Pset_chunk(dcpl, RANK, chunk) < 0) TEST_ERROR if(H5Pset_fill_value(dcpl, H5T_NATIVE_LLONG, &fillvalue) < 0) TEST_ERROR /* * Create a new dataset within the file using defined dataspace, little * endian datatype and default dataset creation properties. */ if((dataset = H5Dcreate2(fid, DATASETNAME12, H5T_STD_I64LE, fsid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) TEST_ERROR /* * Write the data to the dataset using default transfer properties. */ if(H5Dwrite(dataset, H5T_NATIVE_LLONG, msid, fsid, H5P_DEFAULT, data) < 0) TEST_ERROR /* Close dataset */ if(H5Dclose(dataset) < 0) TEST_ERROR /* Now create a dataset with a big-endian type */ if((dataset = H5Dcreate2(fid, DATASETNAME13, H5T_STD_I64BE, fsid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) TEST_ERROR if(H5Dwrite(dataset, H5T_NATIVE_LLONG, msid, fsid, H5P_DEFAULT, data) < 0) TEST_ERROR if(H5Dclose(dataset) < 0) TEST_ERROR /* * Close/release resources. */ if(H5Pclose(dcpl) < 0) TEST_ERROR return 0;error: H5E_BEGIN_TRY { H5Pclose(dcpl); H5Dclose(dataset); } H5E_END_TRY; return -1;}
开发者ID:Starlink,项目名称:hdf5,代码行数:98,
示例16: mainint main(int argc, char* argv[]){ char c; int ix, iy, iz, i; MPI_Comm mpicomm; MPI_Info mpiinfo; int mpirank; int mpisize; double *data3d, *data2d, *x, *y, *z, t; int localx, localy, localwidth, localheight; int maxwidth, maxheight; const char* filename = "output.h5"; hid_t fileid, plist, filespace, memspace, dimvar, varid; hsize_t size[NDIMS], maxsize[NDIMS], chunksize[NDIMS]; hsize_t start[NDIMS], count[NDIMS]; char varname[32]; mpicomm = MPI_COMM_WORLD; mpiinfo = MPI_INFO_NULL; MPI_Init(&argc, &argv); MPI_Comm_size(mpicomm, &mpisize); MPI_Comm_rank(mpicomm, &mpirank); if(! mpirank) printf("Creating some data.../n"); // Distribute our data values in a pism-y way GetLocalBounds(XSIZE, YSIZE, mpirank, mpisize, &localx, &localy, &localwidth, &localheight); printf("Rank%02d: x=%d, y=%d, width=%d, height=%d/n", mpirank, localx, localy, localwidth, localheight); data2d = (double*)malloc(localwidth * localheight * sizeof(double)); data3d = (double*)malloc(localwidth * localheight * ZSIZE * sizeof(double)); x = (double*)malloc(localwidth * sizeof(double)); y = (double*)malloc(localheight * sizeof(double)); z = (double*)malloc(ZSIZE * sizeof(double)); t = 0.0; for(ix = 0; ix < localwidth; ix++) { x[ix] = ix + localx; for(iy = 0; iy < localheight; iy++) { y[iy] = iy + localy; data2d[ix*localheight + iy] = (ix+localx)*localheight + iy+localy; for(iz = 0; iz < ZSIZE; iz++) { z[iz] = iz; data3d[ix*localheight*ZSIZE + iy*ZSIZE + iz] = (ix+localx)*YSIZE*ZSIZE + (iy+localy)*ZSIZE + iz; } } } if(! mpirank) printf("Creating HDF5 file.../n"); plist = H5Pcreate(H5P_FILE_ACCESS); H5Pset_fapl_mpio(plist, mpicomm, mpiinfo); // TODO: this seems like a good place to put optimizations, and indeed // PISM is adding several additional properties, like setting block sizes, // cache eviction policies, fs striping parameters, etc. fileid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist); H5Pclose(plist); if(! mpirank) printf("Setting up dimensions.../n"); if(! mpirank) printf("Creating time dimension.../n"); // Define the time dimension size[0] = 1; maxsize[0] = H5S_UNLIMITED; chunksize[0] = 1; filespace = H5Screate_simple(1, size, maxsize); plist = H5Pcreate(H5P_DATASET_CREATE); H5Pset_chunk(plist, 1, chunksize); // It is strictly required to set chunksize when using // the low-level api. Contiguous datasets are not allowed // to use the unlimited dimension. dimvar = H5Dcreate(fileid, TNAME, H5T_NATIVE_DOUBLE, filespace, H5P_DEFAULT, plist, H5P_DEFAULT); H5Pclose(plist); H5DSset_scale(dimvar, TNAME); H5Dclose(dimvar); H5Sclose(filespace);#ifdef OLD_WRITE_PATTERN if(! mpirank) printf("Writing time dimension.../n"); dimvar = H5Dopen(fileid, TNAME, H5P_DEFAULT); filespace = H5Dget_space(dimvar); memspace = H5Screate_simple(1, size, 0); plist = H5Pcreate(H5P_DATASET_XFER); H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE); // TODO: Pism does this, but comments suggest it is questionable start[0] = 0; count[0] = 1; H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, 0, count, 0); H5Dwrite(dimvar, H5T_NATIVE_DOUBLE, memspace, filespace, plist, &t); H5Pclose(plist); H5Sclose(filespace);//.........这里部分代码省略.........
开发者ID:timmorey,项目名称:sandbox,代码行数:101,
示例17: create_szip_dsets_float/*------------------------------------------------------------------------- * Function: create_szip_dsets_float * * Purpose: Create a dataset of FLOAT datatype with szip filter * * Return: Success: 0 * Failure: -1 * * Programmer: Raymond Lu * 29 March 2011 * * Modifications: * *------------------------------------------------------------------------- */intcreate_szip_dsets_float(hid_t fid, hid_t fsid, hid_t msid){ hid_t dataset; /* dataset handles */ hid_t dcpl; float data[NX][NY]; /* data to write */ float fillvalue = -2.2f; hsize_t chunk[RANK] = {CHUNK0, CHUNK1}; int i, j; /* * Data and output buffer initialization. */ for (j = 0; j < NX; j++) { for (i = 0; i < NY; i++) data[j][i] = ((float)(i + j + 1))/3; } /* * Create the dataset creation property list, add the Scale-Offset * filter, set the chunk size, and set the fill value. */ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) TEST_ERROR if(H5Pset_szip(dcpl, H5_SZIP_NN_OPTION_MASK, 4) < 0) TEST_ERROR if(H5Pset_chunk(dcpl, RANK, chunk) < 0) TEST_ERROR if(H5Pset_fill_value(dcpl, H5T_NATIVE_FLOAT, &fillvalue) < 0) TEST_ERROR /* * Create a new dataset within the file using defined dataspace, little * endian datatype and default dataset creation properties. */ if((dataset = H5Dcreate2(fid, DATASETNAME18, H5T_IEEE_F32LE, fsid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) TEST_ERROR /* * Write the data to the dataset using default transfer properties. */ if(H5Dwrite(dataset, H5T_NATIVE_FLOAT, msid, fsid, H5P_DEFAULT, data) < 0) TEST_ERROR /* Close dataset */ if(H5Dclose(dataset) < 0) TEST_ERROR /* Now create a dataset with a big-endian type */ if((dataset = H5Dcreate2(fid, DATASETNAME19, H5T_IEEE_F32BE, fsid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) TEST_ERROR if(H5Dwrite(dataset, H5T_NATIVE_FLOAT, msid, fsid, H5P_DEFAULT, data) < 0) TEST_ERROR if(H5Dclose(dataset) < 0) TEST_ERROR /* * Close/release resources. */ if(H5Pclose(dcpl) < 0) TEST_ERROR return 0;error: H5E_BEGIN_TRY { H5Pclose(dcpl); H5Dclose(dataset); } H5E_END_TRY; return -1;}
开发者ID:Starlink,项目名称:hdf5,代码行数:89,
示例18: test_filters_for_datasets/*------------------------------------------------------------------------- * Function: test_filters_for_datasets * * Purpose: Tests creating datasets and writing data with dynamically * loaded filters * * Return: Success: 0 * Failure: -1 * * Programmer: Raymond Lu * 14 March 2013 * *------------------------------------------------------------------------- */static herr_ttest_filters_for_datasets(hid_t file){ hid_t dc; /* Dataset creation property list ID */ const hsize_t chunk_size[2] = {FILTER_CHUNK_DIM1, FILTER_CHUNK_DIM2}; /* Chunk dimensions */ unsigned int compress_level = 9; /*---------------------------------------------------------- * STEP 1: Test deflation by itself. *---------------------------------------------------------- */#ifdef H5_HAVE_FILTER_DEFLATE puts("Testing deflate filter"); if((dc = H5Pcreate(H5P_DATASET_CREATE)) < 0) goto error; if(H5Pset_chunk (dc, 2, chunk_size) < 0) goto error; if(H5Pset_deflate (dc, 6) < 0) goto error; if(test_filter_internal(file,DSET_DEFLATE_NAME,dc) < 0) goto error; /* Clean up objects used for this test */ if(H5Pclose (dc) < 0) goto error;#else /* H5_HAVE_FILTER_DEFLATE */ TESTING("deflate filter"); SKIPPED(); puts(" Deflate filter not enabled");#endif /* H5_HAVE_FILTER_DEFLATE */ /*---------------------------------------------------------- * STEP 2: Test DYNLIB1 by itself. *---------------------------------------------------------- */ puts("Testing DYNLIB1 filter"); if((dc = H5Pcreate(H5P_DATASET_CREATE)) < 0) goto error; if(H5Pset_chunk (dc, 2, chunk_size) < 0) goto error; if(H5Pset_filter (dc, H5Z_FILTER_DYNLIB1, H5Z_FLAG_MANDATORY, (size_t)1, &compress_level) < 0) goto error; if(test_filter_internal(file,DSET_DYNLIB1_NAME,dc) < 0) goto error; /* Clean up objects used for this test */ if(H5Pclose (dc) < 0) goto error; /* Unregister the dynamic filter DYNLIB1 for testing purpose. The next time when this test is run for * the new file format, the library's H5PL code has to search in the table of loaded plugin libraries * for this filter. */ if(H5Zunregister(H5Z_FILTER_DYNLIB1) < 0) goto error; /*---------------------------------------------------------- * STEP 3: Test DYNLIB2 by itself. *---------------------------------------------------------- */ puts("Testing DYNLIB2 filter"); if((dc = H5Pcreate(H5P_DATASET_CREATE)) < 0) goto error; if(H5Pset_chunk (dc, 2, chunk_size) < 0) goto error; if(H5Pset_filter (dc, H5Z_FILTER_DYNLIB2, H5Z_FLAG_MANDATORY, 0, NULL) < 0) goto error; if(test_filter_internal(file,DSET_DYNLIB2_NAME,dc) < 0) goto error; /* Clean up objects used for this test */ if(H5Pclose (dc) < 0) goto error; /* Unregister the dynamic filter DYNLIB2 for testing purpose. The next time when this test is run for * the new file format, the library's H5PL code has to search in the table of loaded plugin libraries * for this filter. */ if(H5Zunregister(H5Z_FILTER_DYNLIB2) < 0) goto error; return 0;error: return -1;}
开发者ID:schwehr,项目名称:hdf5,代码行数:83,
示例19: apply_filters//.........这里部分代码省略......... hsize_t size = H5TOOLS_BUFSIZE / sm_nbytes; if ( size == 0) /* datum size > H5TOOLS_BUFSIZE */ size = 1; sm_size[i - 1] = MIN(dims[i - 1], size); sm_nbytes *= sm_size[i - 1]; assert(sm_nbytes > 0); } for ( i = 0; i < rank; i++) { obj.chunk.chunk_lengths[i] = sm_size[i]; } } for ( i=0; i<obj.nfilters; i++) { switch (obj.filter[i].filtn) { default: break; /*------------------------------------------------------------------------- * H5Z_FILTER_DEFLATE 1 , deflation like gzip *------------------------------------------------------------------------- */ case H5Z_FILTER_DEFLATE: { unsigned aggression; /* the deflate level */ aggression = obj.filter[i].cd_values[0]; /* set up for deflated data */ if(H5Pset_chunk(dcpl_id, obj.chunk.rank, obj.chunk.chunk_lengths)<0) return -1; if(H5Pset_deflate(dcpl_id,aggression)<0) return -1; } break; /*------------------------------------------------------------------------- * H5Z_FILTER_SZIP 4 , szip compression *------------------------------------------------------------------------- */ case H5Z_FILTER_SZIP: { unsigned options_mask; unsigned pixels_per_block; options_mask = obj.filter[i].cd_values[0]; pixels_per_block = obj.filter[i].cd_values[1]; /* set up for szip data */ if(H5Pset_chunk(dcpl_id,obj.chunk.rank,obj.chunk.chunk_lengths)<0) return -1; if (H5Pset_szip(dcpl_id,options_mask,pixels_per_block)<0) return -1; } break; /*------------------------------------------------------------------------- * H5Z_FILTER_SHUFFLE 2 , shuffle the data *------------------------------------------------------------------------- */ case H5Z_FILTER_SHUFFLE:
开发者ID:svn2github,项目名称:hdf5,代码行数:67,
示例20: H5ARRAYmakeherr_t H5ARRAYmake( hid_t loc_id, const char *dset_name, const char *obversion, const int rank, const hsize_t *dims, int extdim, hid_t type_id, hsize_t *dims_chunk, void *fill_data, int compress, char *complib, int shuffle, int fletcher32, const void *data){ hid_t dataset_id, space_id; hsize_t *maxdims = NULL; hid_t plist_id = 0; unsigned int cd_values[6]; int chunked = 0; int i; /* Check whether the array has to be chunked or not */ if (dims_chunk) { chunked = 1; } if(chunked) { maxdims = malloc(rank*sizeof(hsize_t)); if(!maxdims) return -1; for(i=0;i<rank;i++) { if (i == extdim) { maxdims[i] = H5S_UNLIMITED; } else { maxdims[i] = dims[i] < dims_chunk[i] ? dims_chunk[i] : dims[i]; } } } /* Create the data space for the dataset. */ if ( (space_id = H5Screate_simple( rank, dims, maxdims )) < 0 ) return -1; if (chunked) { /* Modify dataset creation properties, i.e. enable chunking */ plist_id = H5Pcreate (H5P_DATASET_CREATE); if ( H5Pset_chunk ( plist_id, rank, dims_chunk ) < 0 ) return -1; /* Set the fill value using a struct as the data type. */ if (fill_data) { if ( H5Pset_fill_value( plist_id, type_id, fill_data ) < 0 ) return -1; } else { if ( H5Pset_fill_time(plist_id, H5D_FILL_TIME_ALLOC) < 0 ) return -1; } /* Dataset creation property list is modified to use */ /* Fletcher must be first */ if (fletcher32) { if ( H5Pset_fletcher32( plist_id) < 0 ) return -1; } /* Then shuffle (not if blosc is activated) */ if ((shuffle) && (strcmp(complib, "blosc") != 0)) { if ( H5Pset_shuffle( plist_id) < 0 ) return -1; } /* Finally compression */ if (compress) { cd_values[0] = compress; cd_values[1] = (int)(atof(obversion) * 10); if (extdim <0) cd_values[2] = CArray; else cd_values[2] = EArray; /* The default compressor in HDF5 (zlib) */ if (strcmp(complib, "zlib") == 0) { if ( H5Pset_deflate( plist_id, compress) < 0 ) return -1; } /* The Blosc compressor does accept parameters */ else if (strcmp(complib, "blosc") == 0) { cd_values[4] = compress; cd_values[5] = shuffle; if ( H5Pset_filter( plist_id, FILTER_BLOSC, H5Z_FLAG_OPTIONAL, 6, cd_values) < 0 ) return -1; } /* The LZO compressor does accept parameters */ else if (strcmp(complib, "lzo") == 0) { if ( H5Pset_filter( plist_id, FILTER_LZO, H5Z_FLAG_OPTIONAL, 3, cd_values) < 0 )//.........这里部分代码省略.........
开发者ID:87,项目名称:PyTables,代码行数:101,
示例21: ISView_General_HDF5PetscErrorCode ISView_General_HDF5(IS is, PetscViewer viewer){ hid_t filespace; /* file dataspace identifier */ hid_t chunkspace; /* chunk dataset property identifier */ hid_t plist_id; /* property list identifier */ hid_t dset_id; /* dataset identifier */ hid_t memspace; /* memory dataspace identifier */ hid_t inttype; /* int type (H5T_NATIVE_INT or H5T_NATIVE_LLONG) */ hid_t file_id, group; herr_t status; hsize_t dim, maxDims[3], dims[3], chunkDims[3], count[3],offset[3]; PetscInt bs, N, n, timestep, low; const PetscInt *ind; const char *isname; PetscErrorCode ierr; PetscFunctionBegin; ierr = ISGetBlockSize(is,&bs);CHKERRQ(ierr); ierr = PetscViewerHDF5OpenGroup(viewer, &file_id, &group);CHKERRQ(ierr); ierr = PetscViewerHDF5GetTimestep(viewer, ×tep);CHKERRQ(ierr); /* Create the dataspace for the dataset. * * dims - holds the current dimensions of the dataset * * maxDims - holds the maximum dimensions of the dataset (unlimited * for the number of time steps with the current dimensions for the * other dimensions; so only additional time steps can be added). * * chunkDims - holds the size of a single time step (required to * permit extending dataset). */ dim = 0; if (timestep >= 0) { dims[dim] = timestep+1; maxDims[dim] = H5S_UNLIMITED; chunkDims[dim] = 1; ++dim; } ierr = ISGetSize(is, &N);CHKERRQ(ierr); ierr = ISGetLocalSize(is, &n);CHKERRQ(ierr); ierr = PetscHDF5IntCast(N/bs,dims + dim);CHKERRQ(ierr); maxDims[dim] = dims[dim]; chunkDims[dim] = dims[dim]; ++dim; if (bs >= 1) { dims[dim] = bs; maxDims[dim] = dims[dim]; chunkDims[dim] = dims[dim]; ++dim; } filespace = H5Screate_simple(dim, dims, maxDims); if (filespace == -1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"Cannot H5Screate_simple()");#if defined(PETSC_USE_64BIT_INDICES) inttype = H5T_NATIVE_LLONG;#else inttype = H5T_NATIVE_INT;#endif /* Create the dataset with default properties and close filespace */ ierr = PetscObjectGetName((PetscObject) is, &isname);CHKERRQ(ierr); if (!H5Lexists(group, isname, H5P_DEFAULT)) { /* Create chunk */ chunkspace = H5Pcreate(H5P_DATASET_CREATE); if (chunkspace == -1) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "Cannot H5Pcreate()"); status = H5Pset_chunk(chunkspace, dim, chunkDims);CHKERRQ(status);#if (H5_VERS_MAJOR * 10000 + H5_VERS_MINOR * 100 + H5_VERS_RELEASE >= 10800) dset_id = H5Dcreate2(group, isname, inttype, filespace, H5P_DEFAULT, chunkspace, H5P_DEFAULT);#else dset_id = H5Dcreate(group, isname, inttype, filespace, H5P_DEFAULT);#endif if (dset_id == -1) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "Cannot H5Dcreate2()"); status = H5Pclose(chunkspace);CHKERRQ(status); } else { dset_id = H5Dopen2(group, isname, H5P_DEFAULT); status = H5Dset_extent(dset_id, dims);CHKERRQ(status); } status = H5Sclose(filespace);CHKERRQ(status); /* Each process defines a dataset and writes it to the hyperslab in the file */ dim = 0; if (timestep >= 0) { count[dim] = 1; ++dim; } ierr = PetscHDF5IntCast(n/bs,count + dim);CHKERRQ(ierr); ++dim; if (bs >= 1) { count[dim] = bs; ++dim; } if (n > 0) { memspace = H5Screate_simple(dim, count, NULL); if (memspace == -1) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "Cannot H5Screate_simple()"); } else { /* Can't create dataspace with zero for any dimension, so create null dataspace. */ memspace = H5Screate(H5S_NULL);//.........这里部分代码省略.........
开发者ID:lw4992,项目名称:petsc,代码行数:101,
示例22: mainintmain (void){ hid_t vfile, file, src_space, mem_space, vspace, vdset, dset; /* Handles */ hid_t dcpl, dapl; herr_t status; hsize_t vdsdims[3] = {4*DIM0_1, VDSDIM1, VDSDIM2}, vdsdims_max[3] = {VDSDIM0, VDSDIM1, VDSDIM2}, dims[3] = {DIM0_1, DIM1, DIM2}, memdims[3] = {DIM0_1, DIM1, DIM2}, extdims[3] = {0, DIM1, DIM2}, /* Dimensions of the extended source datasets */ chunk_dims[3] = {DIM0_1, DIM1, DIM2}, dims_max[3] = {DIM0, DIM1, DIM2}, vdsdims_out[3], vdsdims_max_out[3], start[3], /* Hyperslab parameters */ stride[3], count[3], src_count[3], block[3]; hsize_t start_out[3], /* Hyperslab parameter out */ stride_out[3], count_out[3], block_out[3]; int i, j; H5D_layout_t layout; /* Storage layout */ size_t num_map; /* Number of mappings */ ssize_t len; /* Length of the string; also a return value */ char *filename; char *dsetname; int wdata[DIM0_1*DIM1*DIM2]; /* * Create source files and datasets. This step is optional. */ for (i=0; i < PLANE_STRIDE; i++) { /* * Initialize data for i-th source dataset. */ for (j = 0; j < DIM0_1*DIM1*DIM2; j++) wdata[j] = i+1; /* * Create the source files and datasets. Write data to each dataset and * close all resources. */ file = H5Fcreate (SRC_FILE[i], H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); src_space = H5Screate_simple (RANK, dims, dims_max); dcpl = H5Pcreate(H5P_DATASET_CREATE); status = H5Pset_chunk (dcpl, RANK, chunk_dims); dset = H5Dcreate2 (file, SRC_DATASET[i], H5T_NATIVE_INT, src_space, H5P_DEFAULT, dcpl, H5P_DEFAULT); status = H5Dwrite (dset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); status = H5Sclose (src_space); status = H5Pclose (dcpl); status = H5Dclose (dset); status = H5Fclose (file); } vfile = H5Fcreate (VFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); /* Create VDS dataspace. */ vspace = H5Screate_simple (RANK, vdsdims, vdsdims_max); /* Create dataspaces for the source dataset. */ src_space = H5Screate_simple (RANK, dims, dims_max); /* Create VDS creation property */ dcpl = H5Pcreate (H5P_DATASET_CREATE); /* Initialize hyperslab values */ start[0] = 0; start[1] = 0; start[2] = 0; stride[0] = PLANE_STRIDE; /* we will select every fifth plane in VDS */ stride[1] = 1; stride[2] = 1; count[0] = H5S_UNLIMITED; count[1] = 1; count[2] = 1; src_count[0] = H5S_UNLIMITED; src_count[1] = 1; src_count[2] = 1; block[0] = 1; block[1] = DIM1; block[2] = DIM2; /* * Build the mappings * */ status = H5Sselect_hyperslab (src_space, H5S_SELECT_SET, start, NULL, src_count, block); for (i=0; i < PLANE_STRIDE; i++) { status = H5Sselect_hyperslab (vspace, H5S_SELECT_SET, start, stride, count, block); status = H5Pset_virtual (dcpl, vspace, SRC_FILE[i], SRC_DATASET[i], src_space); start[0]++; } //.........这里部分代码省略.........
开发者ID:Starlink,项目名称:hdf5,代码行数:101,
示例23: size//.........这里部分代码省略......... /* create variable length string datatype */ types[8] = tid_vlen_s = H5Tcopy (H5T_C_S1); H5Tset_size (tid_vlen_s, H5T_VARIABLE); /* create compound datatypes */ cmp_tid = H5Tcreate (H5T_COMPOUND, sizeof (test_comp_t)); offset = 0; for (i=0; i<NTYPES-2; i++) { H5Tinsert(cmp_tid, names[i], offset, types[i]); offset += H5Tget_size(types[i]); } H5Tinsert(cmp_tid, names[7], offset, types[7]); offset += sizeof (hvl_t); H5Tinsert(cmp_tid, names[8], offset, types[8]); /* create dataspace */ sid_1d = H5Screate_simple (1, dims, NULL); sid_2d = H5Screate_simple (2, dims2d, NULL); sid_2 = H5Screate_simple (1, dim1, NULL); sid_large = H5Screate_simple (1, &nrows, NULL); sid_null = H5Screate (H5S_NULL); sid_scalar = H5Screate (H5S_SCALAR); /* create fid access property */ fapl = H5Pcreate (H5P_FILE_ACCESS); H5Pset_libver_bounds (fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST); /* create dataset creation property */ dcpl = H5Pcreate (H5P_DATASET_CREATE); /* set dataset chunk */ if (chunk>0) { H5Pset_chunk (dcpl, 1, &chunk); } /* set dataset compression */ if (compressed) { if (chunk<=0) { chunk = dim0/10+1;; H5Pset_chunk (dcpl, 1, &chunk); } H5Pset_shuffle (dcpl); H5Pset_deflate (dcpl, 6); } /* allocate buffers */ buf_comp = (test_comp_t *)calloc(dim0, sizeof(test_comp_t)); buf_comp_large = (test_comp_t *)calloc(nrows, sizeof(test_comp_t)); buf_int = (int *)calloc(dim0, sizeof(int)); buf_float_a = malloc(dim0*sizeof(*buf_float_a)); buf_vlen_i = (hvl_t *)calloc(dim0, sizeof (hvl_t)); buf_vlen_s = (char **)calloc(dim0, sizeof(char *)); buf_str = malloc(dim0*sizeof (*buf_str)); /* allocate array of doulbe pointers */ buf_double2d = (double **)calloc(dims2d[0],sizeof(double *)); /* allocate a contigous chunk of memory for the data */ buf_double2d[0] = (double *)calloc( dims2d[0]*dims2d[1],sizeof(double) ); /* assign memory city to pointer array */ for (i=1; i <dims2d[0]; i++) buf_double2d[i] = buf_double2d[0]+i*dims2d[1]; /* fill buffer values */ len = 1; for (i=0; i<dims[0]; i++) { buf_comp[i].i = buf_int[i] = i-2147483648;
开发者ID:FilipeMaia,项目名称:hdf5,代码行数:67,
示例24: test_dataset_write_with_filters/*------------------------------------------------------------------------- * Function: test_dataset_write_with_filters * * Purpose: Tests creating datasets and writing data with dynamically loaded filters * * Return: SUCCEED/FAIL * *------------------------------------------------------------------------- */static herr_ttest_dataset_write_with_filters(hid_t fid){ hid_t dcpl_id = -1; /* Dataset creation property list ID */ unsigned int compress_level; /* Deflate compression level */ unsigned int filter1_data; /* Data used by filter 1 */ unsigned int libver_values[4]; /* Used w/ the filter that makes HDF5 calls */ /*---------------------------------------------------------- * STEP 1: Test deflation by itself. *---------------------------------------------------------- */ HDputs("Testing dataset writes with deflate filter");#ifdef H5_HAVE_FILTER_DEFLATE if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) TEST_ERROR; if (H5Pset_chunk(dcpl_id, 2, chunk_sizes_g) < 0) TEST_ERROR; compress_level = 6; if (H5Pset_deflate(dcpl_id, compress_level) < 0) TEST_ERROR; /* Ensure the filter works */ if (ensure_filter_works(fid, DSET_DEFLATE_NAME, dcpl_id) < 0) TEST_ERROR; /* Clean up objects used for this test */ if (H5Pclose(dcpl_id) < 0) TEST_ERROR;#else /* H5_HAVE_FILTER_DEFLATE */ SKIPPED(); HDputs(" Deflate filter not enabled");#endif /* H5_HAVE_FILTER_DEFLATE */ /*---------------------------------------------------------- * STEP 2: Test filter plugin 1 by itself. *---------------------------------------------------------- */ HDputs(" dataset writes with filter plugin 1"); if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) TEST_ERROR; if (H5Pset_chunk(dcpl_id, 2, chunk_sizes_g) < 0) TEST_ERROR; /* Set up the filter, passing in the amount the filter will add and subtract * from each data element. Note that this value has an arbitrary max of 9. */ filter1_data = 9; if (H5Pset_filter(dcpl_id, FILTER1_ID, H5Z_FLAG_MANDATORY, (size_t)1, &filter1_data) < 0) TEST_ERROR; /* Ensure the filter works */ if (ensure_filter_works(fid, DSET_FILTER1_NAME, dcpl_id) < 0) TEST_ERROR; /* Clean up objects used for this test */ if (H5Pclose(dcpl_id) < 0) TEST_ERROR; /* Unregister the dynamic filter for testing purpose. The next time when this test is run for * the new file format, the library's H5PL code has to search in the table of loaded plugin libraries * for this filter. */ if (H5Zunregister(FILTER1_ID) < 0) TEST_ERROR; /*---------------------------------------------------------- * STEP 3: Test filter plugin 2 by itself. *---------------------------------------------------------- */ HDputs(" dataset writes with filter plugin 2"); if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) TEST_ERROR; if (H5Pset_chunk(dcpl_id, 2, chunk_sizes_g) < 0) TEST_ERROR; if (H5Pset_filter(dcpl_id, FILTER2_ID, H5Z_FLAG_MANDATORY, 0, NULL) < 0) TEST_ERROR; /* Ensure the filter works */ if (ensure_filter_works(fid, DSET_FILTER2_NAME, dcpl_id) < 0) TEST_ERROR; /* Clean up objects used for this test */ if (H5Pclose(dcpl_id) < 0) TEST_ERROR; /* Unregister the dynamic filter for testing purpose. The next time when this test is run for * the new file format, the library's H5PL code has to search in the table of loaded plugin libraries * for this filter. */ if (H5Zunregister(FILTER2_ID) < 0)//.........这里部分代码省略.........
开发者ID:Starlink,项目名称:hdf5,代码行数:101,
示例25: mainint main (void){ hid_t file; hid_t grp; hid_t dataset, dataspace; hid_t plist; herr_t status; hsize_t dims[2]; hsize_t cdims[2]; int idx_f, idx_g; /* * Create a file. */ file = H5Fcreate(H5FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); /* * Create a group in the file. */ grp = H5Gcreate(file, "/Data", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); /* * Create dataset "Compressed Data" in the group using absolute * name. Dataset creation property list is modified to use * GZIP compression with the compression effort set to 6. * Note that compression can be used only when dataset is chunked. */ dims[0] = 1000; dims[1] = 20; cdims[0] = 20; cdims[1] = 20; dataspace = H5Screate_simple(RANK, dims, NULL); plist = H5Pcreate(H5P_DATASET_CREATE); H5Pset_chunk(plist, 2, cdims); H5Pset_deflate( plist, 6); dataset = H5Dcreate(file, "/Data/Compressed_Data", H5T_NATIVE_INT, dataspace, H5P_DEFAULT, plist, H5P_DEFAULT); /* * Close the first dataset . */ H5Sclose(dataspace); H5Dclose(dataset); /* * Create the second dataset. */ dims[0] = 500; dims[1] = 20; dataspace = H5Screate_simple(RANK, dims, NULL); dataset = H5Dcreate(file, "/Data/Float_Data", H5T_NATIVE_FLOAT, dataspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); /* *Close the second dataset and file. */ H5Sclose(dataspace); H5Dclose(dataset); H5Pclose(plist); H5Gclose(grp); H5Fclose(file); /* * Now reopen the file and group in the file. */ file = H5Fopen(H5FILE_NAME, H5F_ACC_RDWR, H5P_DEFAULT); grp = H5Gopen(file, "Data", H5P_DEFAULT); /* * Access "Compressed_Data" dataset in the group. */ dataset = H5Dopen(grp, "Compressed_Data", H5P_DEFAULT); if( dataset < 0) printf(" Dataset 'Compressed-Data' is not found. /n"); printf("/"/Data/Compressed_Data/" dataset is open /n"); /* * Close the dataset. */ status = H5Dclose(dataset); /* * Create hard link to the Data group. */ status = H5Lcreate_hard(file, "Data", H5L_SAME_LOC, "Data_new", H5P_DEFAULT, H5P_DEFAULT); /* * We can access "Compressed_Data" dataset using created * hard link "Data_new". */ dataset = H5Dopen(file, "/Data_new/Compressed_Data", H5P_DEFAULT); if( dataset < 0) printf(" Dataset is not found. /n"); printf("/"/Data_new/Compressed_Data/" dataset is open /n"); /* * Close the dataset. */ status = H5Dclose(dataset); //.........这里部分代码省略.........
开发者ID:SterVeen,项目名称:DAL1,代码行数:101,
示例26: test_refresh/*------------------------------------------------------------------------- * Function: test_refresh * * Purpose: This function tests refresh (evict/reload) of individual * objects' metadata from the metadata cache. * * Return: 0 on Success, 1 on Failure * * Programmer: Mike McGreevy * August 17, 2010 * * Modifications: * *------------------------------------------------------------------------- */herr_t test_refresh(void) { /************************************************************************** * * Test Description: * * This test will build an HDF5 file with several objects in a varying * hierarchical layout. It will then flush the entire file to disk. Then, * an attribute will be added to each object in the file. * * One by one, this process will flush each object to disk, individually. * It will also be coordinating with another process, which will open * the object before it is flushed by this process, and then refresh the * object after it's been flushed, comparing the before and after object * information to ensure that they are as expected. (i.e., most notably, * that an attribute has been added, and is only visible after a * successful call to a H5*refresh function). * * As with the flush case, the implemention is a bit tricky as it's * dealing with signals going back and forth between the two processes * to ensure the timing is correct, but basically, an example: * * Step 1. Dataset is created. * Step 2. Dataset is flushed. * Step 3. Attribute on Dataset is created. * Step 4. Another process opens the dataset and verifies that it does * not see an attribute (as the attribute hasn't been flushed yet). * Step 5. This process flushes the dataset again (with Attribute attached). * Step 6. The other process calls H5Drefresh, which should evict/reload * the object's metadata, and thus pick up the attribute that's * attached to it. Most other before/after object information is * compared for sanity as well. * Step 7. Rinse and Repeat for each object in the file. * **************************************************************************/ /************************************************************************** * Generated Test File will look like this: * * GROUP "/" * DATASET "Dataset1" * GROUP "Group1" { * DATASET "Dataset2" * GROUP "Group2" { * DATATYPE "CommittedDatatype3" * } * } * GROUP "Group3" { * DATASET "Dataset3" * DATATYPE "CommittedDatatype2" * } * DATATYPE "CommittedDatatype1" **************************************************************************/ /* Variables */ hid_t aid,fid,sid,tid1,did,dcpl,fapl = 0; hid_t gid,gid2,gid3,tid2,tid3,did2,did3,status = 0; hsize_t dims[2] = {50,50}; hsize_t cdims[2] = {1,1}; int fillval = 2; /* Testing Message */ HDfprintf(stdout, "Testing individual object refresh behavior:/n"); /* Cleanup any old error or signal files */ CLEANUP_FILES; /* ================ */ /* CREATE TEST FILE */ /* ================ */ /* Create File */ if ((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0) TEST_ERROR; if (H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) TEST_ERROR; if ((fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC|H5F_ACC_SWMR_WRITE, H5P_DEFAULT, fapl)) < 0) TEST_ERROR; /* Create data space and types */ if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) TEST_ERROR; if ( H5Pset_chunk(dcpl, 2, cdims) < 0 ) TEST_ERROR; if ( H5Pset_fill_value(dcpl, H5T_NATIVE_INT, &fillval) < 0 ) TEST_ERROR; if ((sid = H5Screate_simple(2, dims, dims)) < 0) TEST_ERROR; if ((tid1 = H5Tcopy(H5T_NATIVE_INT)) < 0) TEST_ERROR; if ((tid2 = H5Tcopy(H5T_NATIVE_CHAR)) < 0) TEST_ERROR; if ((tid3 = H5Tcopy(H5T_NATIVE_LONG)) < 0) TEST_ERROR;//.........这里部分代码省略.........
开发者ID:FilipeMaia,项目名称:hdf5,代码行数:101,
示例27: test_skip_compress_write2/*------------------------------------------------------------------------- * Function: test_skip_compress_write2 * * Purpose: Test skipping compression filter when there are three filters * for the dataset * * Return: Success: 0 * * Failure: 1 * * Programmer: Raymond Lu * 30 November 2012 * *------------------------------------------------------------------------- */static inttest_skip_compress_write2(hid_t file){ hid_t dataspace = -1, dataset = -1; hid_t mem_space = -1; hid_t cparms = -1, dxpl = -1; hsize_t dims[2] = {NX, NY}; hsize_t maxdims[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; hsize_t chunk_dims[2] ={CHUNK_NX, CHUNK_NY}; herr_t status; int i, j, n; unsigned filter_mask = 0; int origin_direct_buf[CHUNK_NX][CHUNK_NY]; int direct_buf[CHUNK_NX][CHUNK_NY]; int check_chunk[CHUNK_NX][CHUNK_NY]; hsize_t offset[2] = {0, 0}; size_t buf_size = CHUNK_NX*CHUNK_NY*sizeof(int); int aggression = 9; /* Compression aggression setting */ hsize_t start[2]; /* Start of hyperslab */ hsize_t stride[2]; /* Stride of hyperslab */ hsize_t count[2]; /* Block count */ hsize_t block[2]; /* Block sizes */ TESTING("skipping compression filters but keep two other filters"); /* * Create the data space with unlimited dimensions. */ if((dataspace = H5Screate_simple(RANK, dims, maxdims)) < 0) goto error; if((mem_space = H5Screate_simple(RANK, chunk_dims, NULL)) < 0) goto error; /* * Modify dataset creation properties, i.e. enable chunking and compression. * The order of filters is bogus 1 + deflate + bogus 2. */ if((cparms = H5Pcreate(H5P_DATASET_CREATE)) < 0) goto error; if((status = H5Pset_chunk( cparms, RANK, chunk_dims)) < 0) goto error; /* Register and enable first bogus filter */ if(H5Zregister (H5Z_BOGUS1) < 0) goto error; if(H5Pset_filter(cparms, H5Z_FILTER_BOGUS1, 0, (size_t)0, NULL) < 0) goto error; /* Enable compression filter */ if((status = H5Pset_deflate( cparms, (unsigned) aggression)) < 0) goto error; /* Register and enable second bogus filter */ if(H5Zregister (H5Z_BOGUS2) < 0) goto error; if(H5Pset_filter(cparms, H5Z_FILTER_BOGUS2, 0, (size_t)0, NULL) < 0) goto error; /* * Create a new dataset within the file using cparms * creation properties. */ if((dataset = H5Dcreate2(file, DATASETNAME3, H5T_NATIVE_INT, dataspace, H5P_DEFAULT, cparms, H5P_DEFAULT)) < 0) goto error; if((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) goto error; /* Initialize data for one chunk. Apply operations of two bogus filters to the chunk */ for(i = n = 0; i < CHUNK_NX; i++) for(j = 0; j < CHUNK_NY; j++) { origin_direct_buf[i][j] = n++; direct_buf[i][j] = (origin_direct_buf[i][j] + ADD_ON) * FACTOR; } /* write the uncompressed chunk data repeatedly to dataset, using the direct writing function. * Indicate skipping the compression filter but keep the other two bogus filters */ offset[0] = CHUNK_NX;//.........这里部分代码省略.........
开发者ID:FilipeMaia,项目名称:hdf5,代码行数:101,
示例28: do_write//.........这里部分代码省略......... VRFY((h5mem_space_id >= 0), "H5Screate_simple"); /* Create the dataset transfer property list */ h5dxpl = H5Pcreate(H5P_DATASET_XFER); if (h5dxpl < 0) { fprintf(stderr, "HDF5 Property List Create failed/n"); GOTOERROR(FAIL); } break; default: HDfprintf(stderr, "Unknown IO type request (%d)/n", (int)parms->io_type); GOTOERROR(FAIL); break; } /* end switch */ /* create dataset */ switch (parms->io_type) { case POSIXIO: break; case HDF5: h5dcpl = H5Pcreate(H5P_DATASET_CREATE); if (h5dcpl < 0) { fprintf(stderr, "HDF5 Property List Create failed/n"); GOTOERROR(FAIL); } if(parms->h5_use_chunks) { /* Set the chunk size to be the same as the buffer size */ hrc = H5Pset_chunk(h5dcpl, rank, h5chunk); if (hrc < 0) { fprintf(stderr, "HDF5 Property List Set failed/n"); GOTOERROR(FAIL); } /* end if */ } /* end if */ sprintf(dname, "Dataset_%ld", (unsigned long)parms->num_bytes); h5ds_id = H5Dcreate2(fd->h5fd, dname, ELMT_H5_TYPE, h5dset_space_id, H5P_DEFAULT, h5dcpl, H5P_DEFAULT); if (h5ds_id < 0) { HDfprintf(stderr, "HDF5 Dataset Create failed/n"); GOTOERROR(FAIL); } hrc = H5Pclose(h5dcpl); /* verifying the close of the dcpl */ if (hrc < 0) { HDfprintf(stderr, "HDF5 Property List Close failed/n"); GOTOERROR(FAIL); } break; default: /* unknown request */ HDfprintf(stderr, "Unknown IO type request (%d)/n", (int)parms->io_type); GOTOERROR(FAIL); break; } /* Start "raw data" write timer */ set_time(res->timers, HDF5_RAW_WRITE_FIXED_DIMS, TSTART);
开发者ID:flexi-framework,项目名称:HDF5,代码行数:67,
注:本文中的H5Pset_chunk函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 C++ H5Pset_fapl_mpio函数代码示例 C++ H5Pclose函数代码示例 |