这篇教程C++ H5Sclose函数代码示例写得很实用,希望能帮到您。
本文整理汇总了C++中H5Sclose函数的典型用法代码示例。如果您正苦于以下问题:C++ H5Sclose函数的具体用法?C++ H5Sclose怎么用?C++ H5Sclose使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。 在下文中一共展示了H5Sclose函数的30个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。 示例1: H5Dopen//.........这里部分代码省略......... // we can do the comparison. // Count the number of forward slashes in names[string_pos]. uword count = 0; for (uword i = 0; i < search_info->names[string_pos].length(); ++i) { if ((search_info->names[string_pos])[i] == '/') { ++count; } } // Count the number of forward slashes in the full name. uword name_count = 0; const std::string str = std::string(name); for (uword i = 0; i < str.length(); ++i) { if (str[i] == '/') { ++count; } } // If we are asking for more slashes than we have, this can't be a match. // Skip to below, where we decide whether or not to keep it anyway based // on the exactness condition of the search. if (count <= name_count) { uword start_pos = (count == 0) ? 0 : std::string::npos; while (count > 0) { // Move pointer to previous slash. start_pos = str.rfind('/', start_pos); // Break if we've run out of slashes. if (start_pos == std::string::npos) { break; } --count; } // Now take the substring (this may end up being the full string). const std::string substring = str.substr(start_pos); // Are they the same? if (substring == search_info->names[string_pos]) { // We have found the object; it must be better than our existing match. hid_t match_candidate = H5Dopen(loc_id, name, H5P_DEFAULT); // arma_check(match_candidate < 0, "Mat::load(): cannot open an HDF5 dataset"); if(match_candidate < 0) { return -1; } // Ensure that the dataset is valid and of the correct dimensionality. hid_t filespace = H5Dget_space(match_candidate); int num_dims = H5Sget_simple_extent_ndims(filespace); if (num_dims <= search_info->num_dims) { // Valid dataset -- we'll keep it. // If we already have an existing match we have to close it. if (search_info->best_match != -1) { H5Dclose(search_info->best_match); } search_info->best_match_position = string_pos; search_info->best_match = match_candidate; } H5Sclose(filespace); } } // If they are not the same, but we have not found anything and we don't need an exact match, take this. if ((search_info->exact == false) && (search_info->best_match == -1)) { hid_t match_candidate = H5Dopen(loc_id, name, H5P_DEFAULT); // arma_check(match_candidate < 0, "Mat::load(): cannot open an HDF5 dataset"); if(match_candidate < 0) { return -1; } hid_t filespace = H5Dget_space(match_candidate); int num_dims = H5Sget_simple_extent_ndims(filespace); if (num_dims <= search_info->num_dims) { // Valid dataset -- we'll keep it. search_info->best_match = H5Dopen(loc_id, name, H5P_DEFAULT); } H5Sclose(filespace); } } } return 0; }
开发者ID:ELEN4002-Lab-Project-2012,项目名称:ELEN4002-Lab-Project,代码行数:101,
示例2: test_core//.........这里部分代码省略......... /* Check that the values read are the same as the values written */ p1 = points; p2 = check; for(i = 0; i < DSET1_DIM1; i++) for(j = 0; j < DSET1_DIM2; j++) if(*p1++ != *p2++) { H5_FAILED(); printf(" Read different values than written in data set 1./n"); printf(" At index %d,%d/n", i, j); TEST_ERROR; } /* end if */ if(H5Dclose(dset1) < 0) TEST_ERROR; if(H5Fclose(file) < 0) TEST_ERROR; /* Open the file with backing store on for read and write. * Changes will be saved in file. */ if(H5Pset_fapl_core(fapl, (size_t)CORE_INCREMENT, TRUE) < 0) TEST_ERROR; if((file = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0) TEST_ERROR; /* Create the dset1 */ if((dset1 = H5Dcreate2(file, DSET1_NAME, H5T_NATIVE_INT, space1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR; /* Write the data to the dset1 */ if(H5Dwrite(dset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, points) < 0) TEST_ERROR; if(H5Dclose(dset1) < 0) TEST_ERROR; if((dset1 = H5Dopen2(file, DSET1_NAME, H5P_DEFAULT)) < 0) TEST_ERROR; /* Reallocate memory for reading buffer. */ HDassert(check); HDfree(check); if(NULL == (check = (int *)HDmalloc(DSET1_DIM1 * DSET1_DIM2 * sizeof(int)))) TEST_ERROR; /* Read the data back from dset1 */ if(H5Dread(dset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, check) < 0) TEST_ERROR; /* Check that the values read are the same as the values written */ p1 = points; p2 = check; for(i = 0; i < DSET1_DIM1; i++) for(j = 0; j < DSET1_DIM2; j++) if(*p1++ != *p2++) { H5_FAILED(); printf(" Read different values than written in data set 1./n"); printf(" At index %d,%d/n", i, j); TEST_ERROR; } /* end if */ /* Check file size API */ if(H5Fget_filesize(file, &file_size) < 0) TEST_ERROR; /* There is no garantee the size of metadata in file is constant. * Just try to check if it's reasonable. */ if(file_size<64*KB || file_size>256*KB) TEST_ERROR; if(H5Sclose(space1) < 0) TEST_ERROR; if(H5Dclose(dset1) < 0) TEST_ERROR; if(H5Fclose(file) < 0) TEST_ERROR; HDassert(points); HDfree(points); HDassert(check); HDfree(check); h5_cleanup(FILENAME, fapl); PASSED(); return 0;error: H5E_BEGIN_TRY { H5Pclose(fapl); H5Fclose(file); } H5E_END_TRY; if(points) HDfree(points); if(check) HDfree(check); return -1;}
开发者ID:FilipeMaia,项目名称:hdf5,代码行数:101,
示例3: test_multi//.........这里部分代码省略......... /* Before any data is written, the raw data file is empty. So * the file size is only the size of b-tree + HADDR_MAX/4. */ if(file_size < HADDR_MAX/4 || file_size > HADDR_MAX/2) TEST_ERROR; if((dset=H5Dcreate2(file, dname, H5T_NATIVE_INT, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR; for(i=0; i<MULTI_SIZE; i++) for(j=0; j<MULTI_SIZE; j++) buf[i][j] = i*10000+j; if(H5Dwrite(dset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf) < 0) TEST_ERROR; if((fapl2=H5Pcreate(H5P_FILE_ACCESS)) < 0) TEST_ERROR; if(H5Pset_multi_type(fapl2, H5FD_MEM_SUPER) < 0) TEST_ERROR; if(H5Fget_vfd_handle(file, fapl2, (void **)&fhandle) < 0) TEST_ERROR; if(*fhandle<0) TEST_ERROR; if(H5Pset_multi_type(fapl2, H5FD_MEM_DRAW) < 0) TEST_ERROR; if(H5Fget_vfd_handle(file, fapl2, (void **)&fhandle2) < 0) TEST_ERROR; if(*fhandle2<0) TEST_ERROR; /* Check file size API */ if(H5Fget_filesize(file, &file_size) < 0) TEST_ERROR; /* After the data is written, the file size is huge because the * beginning of raw data file is set at HADDR_MAX/2. It's supposed * to be (HADDR_MAX/2 + 128*128*4) */ if(file_size < HADDR_MAX/2 || file_size > HADDR_MAX) TEST_ERROR; if(H5Sclose(space) < 0) TEST_ERROR; if(H5Dclose(dset) < 0) TEST_ERROR; if(H5Pclose(fapl2) < 0) TEST_ERROR; /* Create and write attribute for the root group. */ if((root = H5Gopen2(file, "/", H5P_DEFAULT)) < 0) FAIL_STACK_ERROR /* Attribute string. */ if((atype = H5Tcopy(H5T_C_S1)) < 0) TEST_ERROR; if(H5Tset_size(atype, strlen(meta) + 1) < 0) TEST_ERROR; if(H5Tset_strpad(atype, H5T_STR_NULLTERM) < 0) TEST_ERROR; /* Create and write attribute */ if((aspace = H5Screate_simple(1, adims, NULL)) < 0) TEST_ERROR; if((attr = H5Acreate2(root, "Metadata", atype, aspace, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR; if(H5Awrite(attr, atype, meta) < 0) TEST_ERROR; /* Close IDs */ if(H5Tclose(atype) < 0) TEST_ERROR; if(H5Sclose(aspace) < 0) TEST_ERROR; if(H5Aclose(attr) < 0) TEST_ERROR; if(H5Fclose(file) < 0) TEST_ERROR; h5_cleanup(FILENAME, fapl); PASSED(); return 0;error: H5E_BEGIN_TRY { H5Sclose(space); H5Dclose(dset); H5Pclose(fapl); H5Pclose(fapl2); H5Fclose(file); } H5E_END_TRY; return -1;}
开发者ID:FilipeMaia,项目名称:hdf5,代码行数:101,
示例4: H5Fopen//.........这里部分代码省略......... printf("2dData::readHDF5: unknown floating point type, size=%i/n",(int) size); return; } } else if(dataclass == H5T_INTEGER){ if (size == sizeof(char)) { char* buffer = (char*) calloc(nn, sizeof(char)); H5Dread(dataset_id, datatype_id, H5S_ALL,H5S_ALL, H5P_DEFAULT, buffer); for(long i=0; i<nn; i++) data[i] = buffer[i]; free(buffer); } else if (size == sizeof(short)) { short* buffer = (short*) calloc(nn, sizeof(short)); H5Dread(dataset_id, datatype_id, H5S_ALL,H5S_ALL, H5P_DEFAULT, buffer); for(long i=0; i<nn; i++) data[i] = buffer[i]; free(buffer); } else if (size == sizeof(int)) { int* buffer = (int *) calloc(nn, sizeof(int)); H5Dread(dataset_id, datatype_id, H5S_ALL,H5S_ALL, H5P_DEFAULT, buffer); for(long i=0; i<nn; i++) data[i] = buffer[i]; free(buffer); } else if (size == sizeof(long)) { long* buffer = (long *) calloc(nn, sizeof(long)); H5Dread(dataset_id, datatype_id, H5S_ALL,H5S_ALL, H5P_DEFAULT, buffer); for(long i=0; i<nn; i++) data[i] = buffer[i]; free(buffer); } else { printf("2dData::readHDF5: unknown integer type, size=%lu/n",size); exit(1); } } else { printf("2dData::readHDF5: unknown HDF5 data type/n"); return; } // Read attributes hid_t attr,attr_dtype; htri_t attr_exists; attr_exists = H5Aexists(dataset_id, ATTR_NAME_DETECTOR_NAME); if (attr_exists > 0){ // Attribute exists, read attribute attr = H5Aopen_name(dataset_id,ATTR_NAME_DETECTOR_NAME); attr_dtype = H5Tcopy(H5T_C_S1); H5Tset_size(attr_dtype, 1024); H5Aread(attr,attr_dtype,detectorName); H5Aclose(attr); H5Tclose(attr_dtype); } else { // Attribute does not exist, set detectorName to default value strcpy(detectorName,""); } attr_exists = H5Aexists(dataset_id, ATTR_NAME_DETECTOR_ID); if (attr_exists > 0){ // Attribute exists, read attribute attr = H5Aopen_name(dataset_id,ATTR_NAME_DETECTOR_ID); H5Aread(attr,H5T_NATIVE_INT64,&detectorID); H5Aclose(attr); } else { // Attribute does not exist, set detectorID to default value detectorID = -1; } // Close and cleanup H5Dclose(dataset_id); // Cleanup stale IDs hid_t ids[256]; int n_ids = H5Fget_obj_ids(file_id, H5F_OBJ_ALL, 256, ids); for (long i=0; i<n_ids; i++ ) { hid_t id; H5I_type_t type; id = ids[i]; type = H5Iget_type(id); if ( type == H5I_GROUP ) H5Gclose(id); if ( type == H5I_DATASET ) H5Dclose(id); if ( type == H5I_DATASPACE ) H5Sclose(id); //if ( type == H5I_DATATYPE ) // H5Dclose(id); } H5Fclose(file_id);}
开发者ID:antonbarty,项目名称:cheetah,代码行数:101,
示例5: test_multi_compat//.........这里部分代码省略......... h5_fixname(FILENAME[9], fapl, newname, sizeof newname); /* Make copy for the data file in the build directory, to protect the * original file in the source directory */ sprintf(filename_s, "%s-%c.h5", MULTI_COMPAT_BASENAME, 's'); sprintf(newname_s, "%s-%c.h5", FILENAME[9], 's'); h5_make_local_copy(filename_s, newname_s); sprintf(filename_r, "%s-%c.h5", MULTI_COMPAT_BASENAME, 'r'); sprintf(newname_r, "%s-%c.h5", FILENAME[9], 'r'); h5_make_local_copy(filename_r, newname_r); /* Reopen the file for read only. Verify 1.8 library can open file * created with 1.6 library. */ if((file=H5Fopen(newname, H5F_ACC_RDONLY, fapl)) < 0) TEST_ERROR; if((dset = H5Dopen2(file, DSET1_NAME, H5P_DEFAULT)) < 0) TEST_ERROR; if(H5Dclose(dset) < 0) TEST_ERROR; if(H5Fclose(file) < 0) TEST_ERROR; /* Make sure we can reopen the file for read and write */ if((file=H5Fopen(newname, H5F_ACC_RDWR, fapl)) < 0) TEST_ERROR; if((dset = H5Dopen2(file, DSET1_NAME, H5P_DEFAULT)) < 0) TEST_ERROR; if(H5Dclose(dset) < 0) TEST_ERROR; if(H5Fclose(file) < 0) TEST_ERROR; /* Reopen the file for adding another dataset. The new EOA for metadata file * should be written to the file */ if((file=H5Fopen(newname, H5F_ACC_RDWR, fapl)) < 0) TEST_ERROR; /* Create and write data set */ if((space=H5Screate_simple(2, dims, NULL)) < 0) TEST_ERROR; if((dset=H5Dcreate2(file, DSET3_NAME, H5T_NATIVE_INT, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR; for(i=0; i<MULTI_SIZE; i++) for(j=0; j<MULTI_SIZE; j++) buf[i][j] = i*10000+j; if(H5Dwrite(dset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf) < 0) TEST_ERROR; if(H5Dclose(dset) < 0) TEST_ERROR; if(H5Sclose(space) < 0) TEST_ERROR; if(H5Fclose(file) < 0) TEST_ERROR; /* Reopen the file for read only again. Verify the library can handle * the EOA correctly */ if((file=H5Fopen(newname, H5F_ACC_RDONLY, fapl)) < 0) TEST_ERROR; if((dset = H5Dopen2(file, DSET1_NAME, H5P_DEFAULT)) < 0) TEST_ERROR; if(H5Dclose(dset) < 0) TEST_ERROR; if((dset = H5Dopen2(file, DSET3_NAME, H5P_DEFAULT)) < 0) TEST_ERROR; if(H5Dclose(dset) < 0) TEST_ERROR; if(H5Fclose(file) < 0) TEST_ERROR; h5_cleanup(FILENAME, fapl); PASSED(); return 0;error: H5E_BEGIN_TRY { H5Sclose(space); H5Dclose(dset); H5Pclose(fapl); H5Fclose(file); } H5E_END_TRY; return -1;}
开发者ID:FilipeMaia,项目名称:hdf5,代码行数:101,
示例6: mainintmain(void){ hid_t fid; hid_t fapl; hid_t did; hid_t space; hsize_t dim[1] = {DIM}; unsigned data[DIM]; unsigned u; herr_t ret; /* Generic return value */ /* Initialize the data */ for(u = 0; u < DIM; u++) data[u] = u; /* Create a FAPL with the metadata and small data aggregators turned off */ fapl = H5Pcreate(H5P_FILE_ACCESS); assert(fapl > 0); ret = H5Pset_meta_block_size(fapl, (hsize_t)0); assert(ret >= 0); ret = H5Pset_small_data_block_size(fapl, (hsize_t)0); assert(ret >= 0); /* Create file */ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); assert(fid > 0); /* Close FAPL */ ret = H5Pclose(fapl); assert(ret >= 0); /* Create dataspace */ space = H5Screate_simple(1, dim, NULL); assert(space > 0); /* Create dataset #1 */ did = H5Dcreate2(fid, "dset1", H5T_NATIVE_UINT, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); assert(did > 0); ret = H5Dwrite(did, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data); assert(ret >= 0); ret = H5Dclose(did); assert(ret >= 0); /* Create dataset #2 */ did = H5Dcreate2(fid, "dset2", H5T_NATIVE_UINT, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); assert(did > 0); ret = H5Dwrite(did, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data); assert(ret >= 0); ret = H5Dclose(did); assert(ret >= 0); /* Close dataspace */ ret = H5Sclose(space); assert(ret >= 0); /* Close file */ ret = H5Fclose(fid); assert(ret >= 0); return 0;}
开发者ID:ElaraFX,项目名称:hdf5,代码行数:62,
示例7: read_file//.........这里部分代码省略......... hdf5_dataspace_in_file = H5Screate_simple(rank, dims, NULL); dims[0] = pc; hdf5_dataspace_in_memory = H5Screate_simple(rank, dims, NULL); start[0] = pcsum; start[1] = 0; count[0] = pc; count[1] = get_values_per_blockelement(blocknr); pcsum += pc; H5Sselect_hyperslab(hdf5_dataspace_in_file, H5S_SELECT_SET, start, NULL, count, NULL); switch (get_datatype_in_block(blocknr)) { case 0: hdf5_datatype = H5Tcopy(H5T_NATIVE_UINT); break; case 1: hdf5_datatype = H5Tcopy(H5T_NATIVE_FLOAT); break; case 2: hdf5_datatype = H5Tcopy(H5T_NATIVE_UINT64); break; } H5Dread(hdf5_dataset, hdf5_datatype, hdf5_dataspace_in_memory, hdf5_dataspace_in_file, H5P_DEFAULT, CommBuffer); H5Tclose(hdf5_datatype); H5Sclose(hdf5_dataspace_in_memory); H5Sclose(hdf5_dataspace_in_file); H5Dclose(hdf5_dataset); }#endif } if(ThisTask == readTask && task != readTask) MPI_Ssend(CommBuffer, bytes_per_blockelement * pc, MPI_BYTE, task, TAG_PDATA, MPI_COMM_WORLD); if(ThisTask != readTask && task == ThisTask) MPI_Recv(CommBuffer, bytes_per_blockelement * pc, MPI_BYTE, readTask, TAG_PDATA, MPI_COMM_WORLD, &status); if(ThisTask == task) { empty_read_buffer(blocknr, nstart + offset, pc, type); offset += pc; } n_for_this_task -= pc; } while(n_for_this_task > 0); } } } if(ThisTask == readTask) { if(All.ICFormat == 1 || All.ICFormat == 2) { SKIP2;
开发者ID:zighany,项目名称:gadget2-AccretionDiscs,代码行数:67,
示例8: mainintmain(int argc, char **argv){ printf("/n*** Testing HDF5/NetCDF-4 interoperability.../n"); printf("*** testing HDF5 compatibility..."); {#define GRPA_NAME "grpa"#define VAR_NAME "vara"#define NDIMS 2 int nrowCur = 7; /* current size */ int ncolCur = 3; int nrowMax = nrowCur + 0; /* maximum size */ int ncolMax = ncolCur + 0; hid_t xdimId; hid_t ydimId; hsize_t xscaleDims[1]; hsize_t yscaleDims[1]; hid_t xdimSpaceId, spaceId; hid_t fileId; hid_t fapl; hsize_t curDims[2]; hsize_t maxDims[2]; hid_t dataTypeId, dsPropertyId, grpaId, grpaPropId, dsId; hid_t ydimSpaceId; const char * dimNameBase = "This is a netCDF dimension but not a netCDF variable."; char dimNameBuf[1000]; char *varaName = "/grpa/vara"; short amat[nrowCur][ncolCur]; int ii, jj; xscaleDims[0] = nrowCur; yscaleDims[0] = ncolCur; if ((xdimSpaceId = H5Screate_simple(1, xscaleDims, NULL)) < 0) ERR; /* With the SEMI close degree, the HDF5 file close will fail if * anything is left open. */ if ((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0) ERR; if (H5Pset_fclose_degree(fapl, H5F_CLOSE_SEMI)) ERR; /* Create file */ if((fileId = H5Fcreate(FILE_NAME, H5F_ACC_TRUNC, H5Pcreate(H5P_FILE_CREATE), fapl)) < 0) ERR; if (H5Pclose(fapl) < 0) ERR; /* Create data space */ curDims[0] = nrowCur; curDims[1] = ncolCur; maxDims[0] = nrowMax; maxDims[1] = ncolMax; if ((spaceId = H5Screate_simple(2, curDims, maxDims)) < 0) ERR; if ((dataTypeId = H5Tcopy(H5T_NATIVE_SHORT)) < 0) ERR; if ((dsPropertyId = H5Pcreate(H5P_DATASET_CREATE)) < 0) ERR; if ((grpaPropId = H5Pcreate(H5P_GROUP_CREATE)) < 0) ERR; if ((grpaId = H5Gcreate2(fileId, GRPA_NAME, H5P_DEFAULT, grpaPropId, H5P_DEFAULT)) < 0) ERR; if (H5Pclose(grpaPropId) < 0) ERR; /* Create vara dataset */ if ((dsId = H5Dcreate2(fileId, varaName, dataTypeId, spaceId, H5P_DEFAULT, dsPropertyId, H5P_DEFAULT)) < 0) ERR; if (H5Pclose(dsPropertyId) < 0) ERR; if (H5Tclose(dataTypeId) < 0) ERR; if ((ydimSpaceId = H5Screate_simple(1, yscaleDims, NULL)) < 0) ERR; /* Create xdim dimension dataset */ if ((xdimId = H5Dcreate2(fileId, "/xdim", H5T_IEEE_F32BE, xdimSpaceId, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) ERR; if (H5Sclose(xdimSpaceId) < 0) ERR; /* Create ydim dimension dataset */ if ((ydimId = H5Dcreate2(fileId, "/ydim", H5T_IEEE_F32BE, ydimSpaceId, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) ERR; if (H5Sclose(ydimSpaceId) < 0) ERR; /* Create xdim scale */ sprintf(dimNameBuf, "%s%10d", dimNameBase, nrowCur); if (H5DSset_scale(xdimId, dimNameBuf) < 0) ERR; /* Create ydim scale */ sprintf(dimNameBuf, "%s%10d", dimNameBase, ncolCur); if (H5DSset_scale(ydimId, dimNameBuf) < 0) ERR; /* Attach dimension scales to the dataset */ if (H5DSattach_scale(dsId, xdimId, 0) < 0) ERR; if (H5DSattach_scale(dsId, ydimId, 1) < 0) ERR; /* Close stuff. */ if (H5Dclose(xdimId) < 0) ERR; if (H5Dclose(ydimId) < 0) ERR;//.........这里部分代码省略.........
开发者ID:ArtisticCoding,项目名称:libmesh,代码行数:101,
示例9: mainintmain(){ printf("/n*** Checking HDF5 dimension scales./n");#define GRP_NAME "simple_scales"#define DIMSCALE_NAME "dimscale"#define NAME_ATTRIBUTE "Billy-Bob"#define VAR1_NAME "var1"#define VAR2_NAME "var2"#define VAR3_NAME "var3"#define DIM1_LEN 3#define DIM2_LEN 2#define FIFTIES_SONG "Mamma said they'll be days like this. They'll be days like this, my mamma said." printf("*** Creating simple dimension scales file..."); { hid_t fileid, grpid, dimscaleid; hid_t dimscale_spaceid, var1_spaceid, var3_spaceid; hid_t var1_datasetid, var2_datasetid, var3_datasetid; hsize_t dims[2] = {DIM1_LEN, DIM2_LEN}; hsize_t dimscale_dims[1] = {DIM1_LEN}; /* Open file and create group. */ if ((fileid = H5Fcreate(FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) ERR; if ((grpid = H5Gcreate(fileid, GRP_NAME, 0)) < 0) ERR; /* Create our dimension scale. Use the built-in NAME attribute * on the dimscale. */ if ((dimscale_spaceid = H5Screate_simple(1, dimscale_dims, dimscale_dims)) < 0) ERR; if ((dimscaleid = H5Dcreate(grpid, DIMSCALE_NAME, H5T_NATIVE_INT, dimscale_spaceid, H5P_DEFAULT)) < 0) ERR; if (H5DSset_scale(dimscaleid, NAME_ATTRIBUTE) < 0) ERR; /* Create a 1D variable which uses the dimscale. Attach a label * to this scale. */ if ((var1_spaceid = H5Screate_simple(1, dims, dims)) < 0) ERR; if ((var1_datasetid = H5Dcreate(grpid, VAR1_NAME, H5T_NATIVE_INT, var1_spaceid, H5P_DEFAULT)) < 0) ERR; if (H5DSattach_scale(var1_datasetid, dimscaleid, 0) < 0) ERR; if (H5DSset_label(var1_datasetid, 0, FIFTIES_SONG) < 0) ERR; /* Create a 1D variabls that doesn't use the dimension scale. */ if ((var2_datasetid = H5Dcreate(grpid, VAR2_NAME, H5T_NATIVE_INT, var1_spaceid, H5P_DEFAULT)) < 0) ERR; /* Create a 2D dataset which uses the scale for one of its * dimensions. */ if ((var3_spaceid = H5Screate_simple(2, dims, dims)) < 0) ERR; if ((var3_datasetid = H5Dcreate(grpid, VAR3_NAME, H5T_NATIVE_INT, var3_spaceid, H5P_DEFAULT)) < 0) ERR; if (H5DSattach_scale(var3_datasetid, dimscaleid, 0) < 0) ERR; /* Close up the shop. */ if (H5Dclose(dimscaleid) < 0 || H5Dclose(var1_datasetid) < 0 || H5Dclose(var2_datasetid) < 0 || H5Dclose(var3_datasetid) < 0 || H5Sclose(var1_spaceid) < 0 || H5Sclose(var3_spaceid) < 0 || H5Sclose(dimscale_spaceid) < 0 || H5Gclose(grpid) < 0 || H5Fclose(fileid) < 0) ERR; /* HELP! If you are reading this in the future, and time * machines have been invented, please come back to July 10, * 2005, the Java Java coffee shop in Lafayette, 8:00 am MST +- * 20 minutes. Bring back some advanced weapons systems to * destroy the sound system here, which is playing 50's rock and * roll. Do-op, do-op, la-ma la-ma, ding dong. Save me!!! (Mind * you, James Brown is a different story!) */ } SUMMARIZE_ERR; printf("*** Checking that simple dimscale file can be read..."); { hid_t fileid, grpid, datasetid = 0; hsize_t num_obj, i; int obj_class; char obj_name[STR_LEN + 1]; htri_t is_scale; int num_scales; /* Reopen the file and group. */ if ((fileid = H5Fopen(FILE_NAME, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) ERR; if ((grpid = H5Gopen(fileid, GRP_NAME)) < 0) ERR; /* Loop through datasets to find variables. */ if (H5Gget_num_objs(grpid, &num_obj) < 0) ERR; for (i=0; i<num_obj; i++) { /* Get the type (i.e. group, dataset, etc.), and the name of the * object. Confusingly, this is a different type than the type * of a variable. This type might be better called "class" or * "type of type" */ if ((obj_class = H5Gget_objtype_by_idx(grpid, i)) < 0) ERR; if (H5Gget_objname_by_idx(grpid, i, obj_name, STR_LEN) < 0) ERR; /*printf("/nEncountered: HDF5 object obj_class %d obj_name %s/n", obj_class, obj_name);*/ /* Deal with groups and datasets. *///.........这里部分代码省略.........
开发者ID:ArielleBassanelli,项目名称:gempak,代码行数:101,
示例10: H5TBOdelete_recordsherr_t H5TBOdelete_records( hid_t dataset_id, hid_t mem_type_id, hsize_t ntotal_records, size_t src_size, hsize_t start, hsize_t nrecords, hsize_t maxtuples){ hsize_t nrowsread; hsize_t read_start; hsize_t write_start; hsize_t read_nrecords; hsize_t count[1]; hsize_t offset[1]; hid_t space_id; hid_t mem_space_id; hsize_t mem_size[1]; unsigned char *tmp_buf; hsize_t dims[1]; size_t read_nbuf; /* Shut the compiler up */ tmp_buf = NULL;/*------------------------------------------------------------------------- * Read the records after the deleted one(s) *------------------------------------------------------------------------- */ read_start = start + nrecords; write_start = start; read_nrecords = ntotal_records - read_start; /* This check added for the case that there are no records to be read */ /* F. Alted 2003/07/16 */ if (read_nrecords > 0) { nrowsread = 0; while (nrowsread < read_nrecords) { if (nrowsread + maxtuples < read_nrecords) read_nbuf = (size_t)maxtuples; else read_nbuf = (size_t)(read_nrecords - nrowsread); tmp_buf = (unsigned char *)malloc(read_nbuf * src_size ); if ( tmp_buf == NULL ) return -1; /* Read the records after the deleted one(s) */ if ( H5TBOread_records(dataset_id, mem_type_id, read_start, read_nbuf, tmp_buf ) < 0 ) return -1;/*------------------------------------------------------------------------- * Write the records in another position *------------------------------------------------------------------------- */ /* Get the dataspace handle */ if ( (space_id = H5Dget_space( dataset_id )) < 0 ) goto out; /* Define a hyperslab in the dataset of the size of the records */ offset[0] = write_start; count[0] = read_nbuf; if ( H5Sselect_hyperslab( space_id, H5S_SELECT_SET, offset, NULL, count, NULL) < 0 ) goto out; /* Create a memory dataspace handle */ mem_size[0] = count[0]; if ( (mem_space_id = H5Screate_simple( 1, mem_size, NULL )) < 0 ) goto out; if ( H5Dwrite( dataset_id, mem_type_id, mem_space_id, space_id, H5P_DEFAULT, tmp_buf ) < 0 ) goto out; /* Terminate access to the memory dataspace */ if ( H5Sclose( mem_space_id ) < 0 ) goto out; /* Release the reading buffer */ free( tmp_buf ); /* Terminate access to the dataspace */ if ( H5Sclose( space_id ) < 0 ) goto out; /* Update the counters */ read_start += read_nbuf; write_start += read_nbuf; nrowsread += read_nbuf; } /* while (nrowsread < read_nrecords) */ } /* if (nread_nrecords > 0) *//*------------------------------------------------------------------------- * Change the table dimension *-------------------------------------------------------------------------//.........这里部分代码省略.........
开发者ID:tomkooij,项目名称:PyTables,代码行数:101,
示例11: H5TBOmake_table//.........这里部分代码省略......... else { if ( H5Pset_fill_time(plist_id, H5D_FILL_TIME_ALLOC) < 0 ) return -1; } /* Dataset creation property list is modified to use filters */ /* Fletcher must be first */ if (fletcher32) { if ( H5Pset_fletcher32( plist_id) < 0 ) return -1; } /* Then shuffle (blosc shuffles inplace) */ if ((shuffle && compress) && (strncmp(complib, "blosc", 5) != 0)) { if ( H5Pset_shuffle( plist_id) < 0 ) return -1; } /* Finally compression */ if ( compress ) { cd_values[0] = compress; cd_values[1] = (int)(atof(version) * 10); cd_values[2] = Table; /* The default compressor in HDF5 (zlib) */ if (strcmp(complib, "zlib") == 0) { if ( H5Pset_deflate( plist_id, compress) < 0 ) return -1; } /* The Blosc compressor does accept parameters */ else if (strcmp(complib, "blosc") == 0) { cd_values[4] = compress; cd_values[5] = shuffle; if ( H5Pset_filter( plist_id, FILTER_BLOSC, H5Z_FLAG_OPTIONAL, 6, cd_values) < 0 ) return -1; } /* The Blosc compressor can use other compressors */ else if (strncmp(complib, "blosc:", 6) == 0) { cd_values[4] = compress; cd_values[5] = shuffle; blosc_compname = complib + 6; blosc_compcode = blosc_compname_to_compcode(blosc_compname); cd_values[6] = blosc_compcode; if ( H5Pset_filter( plist_id, FILTER_BLOSC, H5Z_FLAG_OPTIONAL, 7, cd_values) < 0 ) return -1; } /* The LZO compressor does accept parameters */ else if (strcmp(complib, "lzo") == 0) { if ( H5Pset_filter( plist_id, FILTER_LZO, H5Z_FLAG_OPTIONAL, 3, cd_values) < 0 ) return -1; } /* The bzip2 compress does accept parameters */ else if (strcmp(complib, "bzip2") == 0) { if ( H5Pset_filter( plist_id, FILTER_BZIP2, H5Z_FLAG_OPTIONAL, 3, cd_values) < 0 ) return -1; } else { /* Compression library not supported */ return -1; } } /* Create the dataset. */ if ( (dataset_id = H5Dcreate( loc_id, dset_name, type_id, space_id, H5P_DEFAULT, plist_id, H5P_DEFAULT )) < 0 ) goto out; /* Only write if there is something to write */ if ( data ) { /* Write data to the dataset. */ if ( H5Dwrite( dataset_id, type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, data ) < 0 ) goto out; } /* Terminate access to the data space. */ if ( H5Sclose( space_id ) < 0 ) goto out; /* End access to the property list */ if ( H5Pclose( plist_id ) < 0 ) goto out; /* Return the object unique ID for future references */ return dataset_id;/* error zone, gracefully close */out: H5E_BEGIN_TRY { H5Dclose(dataset_id); H5Sclose(space_id); H5Pclose(plist_id); } H5E_END_TRY; return -1;}
开发者ID:tomkooij,项目名称:PyTables,代码行数:101,
示例12: test//.........这里部分代码省略......... case FILL_OUTWARD: j = (int)(cur_size[0]-i)+1; hs_start[0] = j%2 ? j/2 : (hssize_t)cur_size[0]-j/2; break; case FILL_RANDOM: for (j=HDrand()%(int)cur_size[0]; had[j]; j=(j+1)%(int)cur_size[0]) /*void*/; hs_start[0] = j; had[j] = 1; break; case FILL_ALL: abort(); default: /* unknown request */ HDfprintf(stderr, "Unknown fill style/n"); goto error; break; } /* Write the chunk */ if (H5Sselect_hyperslab(fspace, H5S_SELECT_SET, hs_start, NULL, hs_count, NULL) < 0) goto error; if (H5Dwrite(dset, H5T_NATIVE_INT, mspace, fspace, xfer, &i) < 0) { goto error; } /* Determine overhead */ if (verbose) { if (H5Fflush(file, H5F_SCOPE_LOCAL) < 0) goto error; if (HDfstat(fd, &sb) < 0) goto error; /* * The extra cast in the following statement is a bug workaround * for the Win32 version 5.0 compiler. * 1998-11-06 ptl */ printf("%4lu %8.3f ***/n", (unsigned long)i, (double)(hssize_t)(sb.st_size-i*sizeof(int))/(hssize_t)i); } } if(had) { free(had); had = NULL; } /* end if */ H5Dclose(dset); H5Sclose(mspace); H5Sclose(fspace); H5Pclose(dcpl); H5Pclose(xfer); H5Fclose(file); if (!verbose) { switch (fill_style) { case FILL_FORWARD: sname = "forward"; break; case FILL_REVERSE: sname = "reverse"; break; case FILL_INWARD: sname = "inward"; break; case FILL_OUTWARD: sname = "outward"; break; case FILL_RANDOM: sname = "random"; break; case FILL_ALL: abort(); default: /* unknown request */ HDfprintf(stderr, "Unknown fill style/n"); goto error; break; } if (HDfstat(fd, &sb) < 0) goto error; printf("%-7s %8.3f/n", sname, (double)(hssize_t)(sb.st_size-cur_size[0]*sizeof(int))/ (hssize_t)cur_size[0]); } HDclose(fd); return 0; error: H5Dclose(dset); H5Sclose(mspace); H5Sclose(fspace); H5Pclose(dcpl); H5Pclose(xfer); H5Fclose(file); if(had) free(had); HDclose(fd); return 1;}
开发者ID:MichaelToal,项目名称:hdf5,代码行数:101,
示例13: mainintmain(void){ hid_t fid = -1; /* HDF5 file ID */ hid_t did = -1; /* dataset ID */ hid_t msid = -1; /* memory dataspace ID */ hid_t fsid = -1; /* file dataspace ID */ hsize_t start[RANK]; /* hyperslab start point */ int n_elements = 0; /* size of buffer (elements) */ size_t size = 0; /* size of buffer (bytes) */ int *buffer = NULL; /* data buffer */ int n_dims = -1; /* # dimensions in dataset */ hsize_t dims[RANK]; /* current size of dataset */ hsize_t max_dims[RANK]; /* max size of dataset */ /* Open the VDS file and dataset */ if((fid = H5Fopen(VDS_FILE_NAME, H5F_ACC_RDONLY | H5F_ACC_SWMR_READ, H5P_DEFAULT)) < 0) TEST_ERROR if((did = H5Dopen2(fid, VDS_DSET_NAME, H5P_DEFAULT)) < 0) TEST_ERROR /* Create the read buffer */ n_elements = VDS_PLANE[1] * VDS_PLANE[2]; size = n_elements * sizeof(int); if(NULL == (buffer = (int *)HDmalloc(size))) TEST_ERROR /* Create memory dataspace */ if((msid = H5Screate_simple(RANK, VDS_PLANE, NULL)) < 0) TEST_ERROR /* Read data until the dataset is full (via the writer) */ do { /* Refresh metadata */ if(H5Drefresh(did) < 0) TEST_ERROR /* Get the dataset dimensions */ if((fsid = H5Dget_space(did)) < 0) TEST_ERROR if(H5Sget_simple_extent_dims(fsid, dims, max_dims) < 0) TEST_ERROR /* Check the reported size of the VDS */ if((n_dims = H5Sget_simple_extent_ndims(fsid)) < 0) TEST_ERROR if(n_dims != RANK) TEST_ERROR if(H5Sget_simple_extent_dims(fsid, dims, max_dims) < 0) TEST_ERROR /* NOTE: Don't care what dims[0] is. */ if(dims[1] != FULL_HEIGHT) TEST_ERROR if(dims[2] != WIDTH) TEST_ERROR if(max_dims[0] != H5S_UNLIMITED) TEST_ERROR if(max_dims[1] != FULL_HEIGHT) TEST_ERROR if(max_dims[2] != WIDTH) TEST_ERROR /* Continue if there's nothing to read */ if(0 == dims[0]) { if(H5Sclose(fsid) < 0) TEST_ERROR continue; } /* Read a plane from the VDS */ /* At this time, we just make sure we can read planes without errors. */ start[0] = dims[0] - 1; start[1] = 0; start[2] = 0; if(H5Sselect_hyperslab(fsid, H5S_SELECT_SET, start, NULL, VDS_PLANE, NULL) < 0) TEST_ERROR if(H5Dread(did, H5T_NATIVE_INT, msid, fsid, H5P_DEFAULT, buffer) < 0) TEST_ERROR if(H5Sclose(fsid) < 0) TEST_ERROR } while (dims[0] < N_PLANES_TO_WRITE); /* Close file and dataset */ if(H5Sclose(msid) < 0) TEST_ERROR if(H5Dclose(did) < 0) TEST_ERROR if(H5Fclose(fid) < 0) TEST_ERROR HDfree(buffer); HDfprintf(stderr, "SWMR reader exited successfully/n");//.........这里部分代码省略.........
开发者ID:Starlink,项目名称:hdf5,代码行数:101,
示例14: memsetCPLErr HDF5ImageRasterBand::IReadBlock( int nBlockXOff, int nBlockYOff, void * pImage ){ HDF5ImageDataset *poGDS = reinterpret_cast<HDF5ImageDataset * >(poDS); if( poGDS->eAccess == GA_Update ) { memset( pImage, 0, nBlockXSize * nBlockYSize * GDALGetDataTypeSize( eDataType )/8 ); return CE_None; } hsize_t count[3] = {0, 0, 0}; H5OFFSET_TYPE offset[3] = {0, 0, 0}; hsize_t col_dims[3] = {0, 0, 0}; hsize_t rank = 2; if( poGDS->IsComplexCSKL1A() ) { rank = 3; offset[2] = nBand-1; count[2] = 1; col_dims[2] = 1; } else if( poGDS->ndims == 3 ) { rank = 3; offset[0] = nBand-1; count[0] = 1; col_dims[0] = 1; } // Defaults to rank = 2; offset[poGDS->GetYIndex()] = nBlockYOff*static_cast<hsize_t>(nBlockYSize); offset[poGDS->GetXIndex()] = nBlockXOff*static_cast<hsize_t>(nBlockXSize); count[poGDS->GetYIndex()] = nBlockYSize; count[poGDS->GetXIndex()] = nBlockXSize; const int nSizeOfData = static_cast<int>(H5Tget_size( poGDS->native )); memset( pImage,0,nBlockXSize*nBlockYSize*nSizeOfData ); /* blocksize may not be a multiple of imagesize */ count[poGDS->GetYIndex()] = MIN( size_t(nBlockYSize), poDS->GetRasterYSize() - offset[poGDS->GetYIndex()]); count[poGDS->GetXIndex()] = MIN( size_t(nBlockXSize), poDS->GetRasterXSize()- offset[poGDS->GetXIndex()]);/* -------------------------------------------------------------------- *//* Select block from file space *//* -------------------------------------------------------------------- */ herr_t status = H5Sselect_hyperslab( poGDS->dataspace_id, H5S_SELECT_SET, offset, NULL, count, NULL ); if( status < 0 ) return CE_Failure;/* -------------------------------------------------------------------- *//* Create memory space to receive the data *//* -------------------------------------------------------------------- */ col_dims[poGDS->GetYIndex()]=nBlockYSize; col_dims[poGDS->GetXIndex()]=nBlockXSize; const hid_t memspace = H5Screate_simple( static_cast<int>(rank), col_dims, NULL ); H5OFFSET_TYPE mem_offset[3] = {0, 0, 0}; status = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, mem_offset, NULL, count, NULL); if( status < 0 ) return CE_Failure; status = H5Dread ( poGDS->dataset_id, poGDS->native, memspace, poGDS->dataspace_id, H5P_DEFAULT, pImage ); H5Sclose( memspace ); if( status < 0 ) { CPLError( CE_Failure, CPLE_AppDefined, "H5Dread() failed for block." ); return CE_Failure; } return CE_None;}
开发者ID:nextgis-borsch,项目名称:lib_gdal,代码行数:93,
示例15: trav_attrstatic herr_ttrav_attr(hid_t#ifndef H5TRAV_PRINT_SPACE H5_ATTR_UNUSED#endif /* H5TRAV_PRINT_SPACE */ obj, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED *ainfo, void *_op_data){ trav_path_op_data_t *op_data = (trav_path_op_data_t *)_op_data; const char *buf = op_data->path; if((strlen(buf)==1) && (*buf=='/')) printf(" %-10s %s%s", "attribute", buf, attr_name); else printf(" %-10s %s/%s", "attribute", buf, attr_name);#ifdef H5TRAV_PRINT_SPACE if(trav_verbosity < 2) {#endif printf("/n");#ifdef H5TRAV_PRINT_SPACE } else { hid_t attr = -1; hid_t space = -1; hsize_t size[H5S_MAX_RANK]; int ndims; int i; H5S_class_t space_type; if((attr = H5Aopen(obj, attr_name, H5P_DEFAULT))) { space = H5Aget_space(attr); /* Data space */ ndims = H5Sget_simple_extent_dims(space, size, NULL); space_type = H5Sget_simple_extent_type(space); switch(space_type) { case H5S_SCALAR: /* scalar dataspace */ printf(" scalar/n"); break; case H5S_SIMPLE: /* simple dataspace */ printf(" {"); for (i = 0; i < ndims; i++) { printf("%s" HSIZE_T_FORMAT, i?", ":"", size[i]); } printf("}/n"); break; case H5S_NULL: /* null dataspace */ printf(" null/n"); break; default: /* Unknown dataspace type */ printf(" unknown/n"); break; } /* end switch */ H5Sclose(space); H5Aclose(attr); } }#endif return(0);}
开发者ID:GATB,项目名称:gatb-core,代码行数:69,
示例16: create_file/* * Create the skeleton use case file for testing. * It has one 3d dataset using chunked storage. * The dataset is (unlimited, chunksize, chunksize). * Dataset type is 2 bytes integer. * It starts out "empty", i.e., first dimension is 0. * * Return: 0 succeed; -1 fail. */static int create_file(void){ hsize_t dims[3]; /* Dataset starting dimensions */ hid_t fid; /* File ID for new HDF5 file */ hid_t dcpl; /* Dataset creation property list */ hid_t sid; /* Dataspace ID */ hid_t dsid; /* Dataset ID */ hid_t fapl; /* File access property list */ H5D_chunk_index_t idx_type; /* Chunk index type */ /* Create the file */ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0) return -1; if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) return -1; if((fid = H5Fcreate(filename_g, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) return -1; /* Set up dimension sizes */ dims[0] = 0; dims[1] = dims[2] = max_dims_g[1]; /* Create dataspace for creating datasets */ if((sid = H5Screate_simple(3, dims, max_dims_g)) < 0) return -1; /* Create dataset creation property list */ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) return -1; if(H5Pset_chunk(dcpl, 3, chunkdims_g) < 0) return -1; /* create dataset of progname */ if((dsid = H5Dcreate2(fid, progname_g, UC_DATATYPE, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) return -1; /* Check that the chunk index type is not version 1 B-tree. * Version 1 B-trees are not supported under SWMR. */ if(H5D__layout_idx_type_test(dsid, &idx_type) < 0) return -1; if(idx_type == H5D_CHUNK_IDX_BTREE) { fprintf(stderr, "ERROR: Chunk index is version 1 B-tree: aborting./n"); return -1; } /* Close everything */ if(H5Dclose(dsid) < 0) return -1; if(H5Pclose(fapl) < 0) return -1; if(H5Pclose(dcpl) < 0) return -1; if(H5Sclose(sid) < 0) return -1; if(H5Fclose(fid) < 0) return -1; return 0;} /* create_file() */
开发者ID:Starlink,项目名称:hdf5,代码行数:70,
示例17: getProvider//.........这里部分代码省略......... nDimFile = nobjEman; else nDimFile = ( rank<3 || !isStack )?1:dims[0] ; if (select_img > nDimFile) REPORT_ERROR(ERR_INDEX_OUTOFBOUNDS, formatString("readHDF5 (%s): Image number %lu exceeds stack size %lu", filename.c_str(), select_img, nDimFile)); aDim.ndim = replaceNsize = (select_img == ALL_IMAGES)? nDimFile :1 ; setDimensions(aDim); //Read header only if(dataMode == HEADER || (dataMode == _HEADER_ALL && aDim.ndim > 1)) return errCode; // EMAN stores each image in a separate dataset if ( provider.first == EMAN ) select_img = 1; size_t imgStart = IMG_INDEX(select_img); size_t imgEnd = (select_img != ALL_IMAGES) ? imgStart + 1 : aDim.ndim; MD.clear(); MD.resize(imgEnd - imgStart,MDL::emptyHeader); if (dataMode < DATA) // Don't read data if not necessary but read the header return errCode; if ( H5Pget_layout(cparms) == H5D_CONTIGUOUS ) //We can read it directly readData(fimg, select_img, datatype, 0); else // We read it by hyperslabs { // Allocate memory for image data (Assume xdim, ydim, zdim and ndim are already set //if memory already allocated use it (no resize allowed) mdaBase->coreAllocateReuse(); hid_t memspace; hsize_t offset[4]; // Hyperslab offset in the file hsize_t count[4]; // Size of the hyperslab in the file // Define the offset and count of the hyperslab to be read. switch (rank) { case 4: count[0] = 1; case 3: // if (stack) count[rank-3] = aDim.zdim; offset[rank-2] = 0; case 2: count[rank-2] = aDim.ydim; offset[rank-2] = 0; break; } count[rank-1] = aDim.xdim; offset[rank-1] = 0; aDim.xdim = dims[rank-1]; aDim.ydim = (rank>1)?dims[rank-2]:1; aDim.zdim = (rank == 4)?dims[1]:1; // size_t nDimFile = (rank>2)?dims[0]:1 ; // Define the memory space to read a hyperslab. memspace = H5Screate_simple(rank,count,NULL); size_t data = (size_t) this->mdaBase->getArrayPointer(); size_t pad = aDim.zyxdim*gettypesize(myT()); for (size_t idx = imgStart, imN = 0; idx < imgEnd; ++idx, ++imN) { // Set the offset of the hyperslab to be read offset[0] = idx; if ( H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, NULL, count, NULL) < 0 ) REPORT_ERROR(ERR_IO_NOREAD, formatString("readHDF5: Error selecting hyperslab %d from filename %s", imgStart, filename.c_str())); // movePointerTo(ALL_SLICES,imN); // Read if ( H5Dread(dataset, H5Datatype(myT()), memspace, filespace, H5P_DEFAULT, (void*)(data + pad*imN)) < 0 ) REPORT_ERROR(ERR_IO_NOREAD,formatString("readHDF5: Error reading hyperslab %d from filename %s", imgStart, filename.c_str())); } H5Sclose(memspace); } H5Pclose(cparms); H5Sclose(filespace); H5Dclose(dataset); return errCode;}
开发者ID:I2PC,项目名称:scipion,代码行数:101,
示例18: write_file//.........这里部分代码省略......... (unsigned long long)(dims[2])); /* verify that file space dims are as expected and are consistent with memory space dims */ if (dims[0] != 0 || dims[1] != memdims[1] || dims[2] != memdims[2]){ fprintf(stderr, "dataset is not empty. Got dims=(%llu,%llu,%llu)/n", (unsigned long long)dims[0], (unsigned long long)dims[1], (unsigned long long)dims[2]); return -1; } /* setup mem-space for buffer */ if ((m_sid=H5Screate_simple(rank, memdims, NULL))<0){ fprintf(stderr, "H5Screate_simple for memory failed/n"); return -1; }; /* write planes */ count[0]=1; count[1]=dims[1]; count[2]=dims[2]; for (i=0; i<nplanes_g; i++){ /* fill buffer with value i+1 */ bufptr = buffer; for (j=0; j<dims[1]; j++) for (k=0; k<dims[2]; k++) *bufptr++ = i; /* extend the dataset by one for new plane */ dims[0]=i+1; if(H5Dset_extent(dsid, dims) < 0){ fprintf(stderr, "H5Dset_extent failed/n"); return -1; } /* Get the dataset's dataspace */ if((f_sid = H5Dget_space(dsid)) < 0){ fprintf(stderr, "H5Dset_extent failed/n"); return -1; } start[0]=i; /* Choose the next plane to write */ if(H5Sselect_hyperslab(f_sid, H5S_SELECT_SET, start, NULL, count, NULL) < 0){ fprintf(stderr, "Failed H5Sselect_hyperslab/n"); return -1; } /* Write plane to the dataset */ if(H5Dwrite(dsid, UC_DATATYPE, m_sid, f_sid, H5P_DEFAULT, buffer) < 0){ fprintf(stderr, "Failed H5Dwrite/n"); return -1; } /* Flush the dataset for every "chunkplanes_g" planes */ if(!((i + 1) % (hsize_t)chunkplanes_g)) { if(H5Dflush(dsid) < 0) { fprintf(stderr, "Failed to H5Dflush dataset/n"); return -1; } } } if(H5Dflush(dsid) < 0) { fprintf(stderr, "Failed to H5Dflush dataset/n"); return -1; } /* Enable mdc flushes for the dataset */ /* Closing the dataset later will enable mdc flushes automatically if this is not done */ if(disabled) if(H5Oenable_mdc_flushes(dsid) < 0) { fprintf(stderr, "Failed to H5Oenable_mdc_flushes/n"); return -1; } /* Done writing. Free/Close all resources including data file */ HDfree(buffer); if(H5Dclose(dsid) < 0){ fprintf(stderr, "Failed to close datasete/n"); return -1; } if(H5Sclose(m_sid) < 0){ fprintf(stderr, "Failed to close memory space/n"); return -1; } if(H5Sclose(f_sid) < 0){ fprintf(stderr, "Failed to close file space/n"); return -1; } if(H5Pclose(fapl) < 0){ fprintf(stderr, "Failed to property list/n"); return -1; } if(H5Fclose(fid) < 0){ fprintf(stderr, "Failed to close file id/n"); return -1; } return 0;} /* write_file() */
开发者ID:Starlink,项目名称:hdf5,代码行数:101,
示例19: mainintmain (void){ hid_t file, dataset2; /* file and dataset handles */ hid_t datatype16; /* handles */ hid_t dataspace2; /* handles */ hsize_t dimsf2[2]; /* dataset dimensions */ hid_t aid; /* dataspace identifiers */ hid_t attr2; /* attribute identifiers */ herr_t status; int16_t data2[SIZE][SIZE]; /* data to write*/ int i, j, n; n = 0; for(i = 0; i < SIZE; i++) for(j = 0; j < SIZE; j++) data2[i][j] = n++; /* * Assigns minimal and maximal values of int16 to data2 and * they will be used to check boudary values. */ data2[0][0] = -32768; data2[1][1] = 32767; /* * Create a new file using H5F_ACC_TRUNC access, * default file creation properties, and default file * access properties. */ file = H5Fcreate(H5FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); /* * Set each dimension size to 0. */ dimsf2[0] = 0; dimsf2[1] = 0; dataspace2 = H5Screate_simple(2, dimsf2, NULL); /* * Define datatype for the data in the file. */ datatype16 = H5Tcopy(H5T_NATIVE_SHORT); /* * Create a new dataset within the file using defined dataspace and * datatype and default dataset creation properties. */ dataset2 = H5Dcreate2(file, "dataset_2d", datatype16, dataspace2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); /* * Write the data although it has no effect because each dim size is 0. */ status = H5Dwrite(dataset2, H5T_NATIVE_SHORT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data2); /* * Create 2D attributes. */ attr2 = H5Acreate2(dataset2, "attribute_2d", datatype16, dataspace2, H5P_DEFAULT, H5P_DEFAULT); /* * Write the data although it has no effect because each dim size is 0. */ status = H5Awrite(attr2, datatype16, data2); H5Aclose(attr2); /* * Close/release resources. */ H5Dclose(dataset2); H5Tclose(datatype16); H5Sclose(dataspace2); H5Fclose(file); return 0;}
开发者ID:OPENDAP,项目名称:hdf5_handler,代码行数:79,
示例20: HDF5_Xfer/* * Write or read access to file using the HDF5 interface. */static IOR_offset_t HDF5_Xfer(int access, void *fd, IOR_size_t * buffer, IOR_offset_t length, IOR_param_t * param){ static int firstReadCheck = FALSE, startNewDataSet; IOR_offset_t segmentPosition, segmentSize; /* * this toggle is for the read check operation, which passes through * this function twice; note that this function will open a data set * only on the first read check and close only on the second */ if (access == READCHECK) { if (firstReadCheck == TRUE) { firstReadCheck = FALSE; } else { firstReadCheck = TRUE; } } /* determine by offset if need to start new data set */ if (param->filePerProc == TRUE) { segmentPosition = (IOR_offset_t) 0; segmentSize = param->blockSize; } else { segmentPosition = (IOR_offset_t) ((rank + rankOffset) % param->numTasks) * param->blockSize; segmentSize = (IOR_offset_t) (param->numTasks) * param->blockSize; } if ((IOR_offset_t) ((param->offset - segmentPosition) % segmentSize) == 0) { /* * ordinarily start a new data set, unless this is the * second pass through during a read check */ startNewDataSet = TRUE; if (access == READCHECK && firstReadCheck != TRUE) { startNewDataSet = FALSE; } } /* create new data set */ if (startNewDataSet == TRUE) { /* if just opened this file, no data set to close yet */ if (newlyOpenedFile != TRUE) { HDF5_CHECK(H5Dclose(dataSet), "cannot close data set"); HDF5_CHECK(H5Sclose(fileDataSpace), "cannot close file data space"); } SetupDataSet(fd, param); } SeekOffset(fd, param->offset, param); /* this is necessary to reset variables for reaccessing file */ startNewDataSet = FALSE; newlyOpenedFile = FALSE; /* access the file */ if (access == WRITE) { /* WRITE */ HDF5_CHECK(H5Dwrite(dataSet, H5T_NATIVE_LLONG, memDataSpace, fileDataSpace, xferPropList, buffer), "cannot write to data set"); } else { /* READ or CHECK */ HDF5_CHECK(H5Dread(dataSet, H5T_NATIVE_LLONG, memDataSpace, fileDataSpace, xferPropList, buffer), "cannot read from data set"); } return (length);}
开发者ID:roblatham00,项目名称:ior,代码行数:76,
示例21: if/* * Write 2D data to HDF5 file */void cData2d::writeHDF5(char* filename){ // Figure out the HDF5 data type hid_t out_type_id = 0; if(sizeof(tData2d) == sizeof(float)) out_type_id = H5T_NATIVE_FLOAT; else if(sizeof(tData2d) == sizeof(double)) out_type_id = H5T_NATIVE_DOUBLE; else { printf("2dData::writeHDF5: unsuppoted data type/n"); exit(1); } // Create the file and data group hid_t file_id; file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); H5Gcreate1(file_id,"data",0); // Data space dimensions int ndims = 2; hsize_t dims[ndims]; dims[0] = ny; dims[1] = nx; // Write the data hid_t dataspace_id; hid_t dataset_id; dataspace_id = H5Screate_simple(ndims, dims, NULL); dataset_id = H5Dcreate1(file_id, "/data/data", out_type_id, dataspace_id, H5P_DEFAULT); if(H5Dwrite(dataset_id,out_type_id , H5S_ALL, H5S_ALL,H5P_DEFAULT, data)< 0){ printf("2dData::writeHDF5: Error writing data to file/n"); exit(1); } // Close and exit H5Dclose(dataset_id); // Cleanup stale IDs hid_t ids[256]; int n_ids = H5Fget_obj_ids(file_id, H5F_OBJ_ALL, 256, ids); for (long i=0; i<n_ids; i++ ) { hid_t id; H5I_type_t type; id = ids[i]; type = H5Iget_type(id); if ( type == H5I_GROUP ) H5Gclose(id); if ( type == H5I_DATASET ) H5Dclose(id); if ( type == H5I_DATASPACE ) H5Sclose(id); //if ( type == H5I_DATATYPE ) // H5Dclose(id); } H5Fclose(file_id);}
开发者ID:antonbarty,项目名称:cheetah,代码行数:64,
示例22: main/*------------------------------------------------------------------------- * Function: main * * Purpose: H5O_mtime_decode() test. * * Return: Success: * * Failure: * * Programmer: Robb Matzke * Thursday, July 30, 1998 * * Modifications: * Added checks for old and new modification time messages * in pre-created datafiles (generated with gen_old_mtime.c and * gen_new_mtime.c). * Quincey Koziol * Friday, January 3, 2003 * *------------------------------------------------------------------------- */intmain(void){ hid_t fapl, file, space, dset; hsize_t size[1] = {2}; time_t now; struct tm *tm; H5O_info_t oi1, oi2; signed char buf1[32], buf2[32]; char filename[1024]; h5_reset(); fapl = h5_fileaccess(); TESTING("modification time messages"); /* Create the file, create a dataset, then close the file */ h5_fixname(FILENAME[0], fapl, filename, sizeof filename); if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) TEST_ERROR; if((space = H5Screate_simple(1, size, NULL)) < 0) TEST_ERROR; if((dset = H5Dcreate2(file, "dset", H5T_NATIVE_SCHAR, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR; now = HDtime(NULL); if(H5Dclose(dset) < 0) TEST_ERROR; if(H5Sclose(space) < 0) TEST_ERROR; if(H5Fclose(file) < 0) TEST_ERROR; /* * Open the file and get the modification time. We'll test the * H5Oget_info() arguments too: being able to stat something without * knowing its name. */ h5_fixname(FILENAME[0], fapl, filename, sizeof filename); if((file = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0) TEST_ERROR; if(H5Oget_info_by_name(file, "dset", &oi1, H5P_DEFAULT) < 0) TEST_ERROR; if((dset = H5Dopen2(file, "dset", H5P_DEFAULT)) < 0) TEST_ERROR; if(H5Oget_info(dset, &oi2) < 0) TEST_ERROR; if(H5Dclose(dset) < 0) TEST_ERROR; if(H5Fclose(file) < 0) TEST_ERROR; /* Compare addresses & times from the two ways of calling H5Oget_info() */ if(oi1.addr != oi2.addr || oi1.ctime != oi2.ctime) { H5_FAILED(); puts(" Calling H5Oget_info() with the dataset ID returned"); puts(" different values than calling it with a file and dataset"); puts(" name."); goto error; } /* Compare times -- they must be within 60 seconds of one another */ if(0 == oi1.ctime) { SKIPPED(); puts(" The modification time could not be decoded on this OS."); puts(" Modification times will be mantained in the file but"); puts(" cannot be queried on this system. See H5O_mtime_decode()."); return 0; } else if(HDfabs(HDdifftime(now, oi1.ctime)) > 60.0F) { H5_FAILED(); tm = HDlocaltime(&(oi1.ctime)); HDstrftime((char*)buf1, sizeof buf1, "%Y-%m-%d %H:%M:%S", tm); tm = HDlocaltime(&now); HDstrftime((char*)buf2, sizeof buf2, "%Y-%m-%d %H:%M:%S", tm); printf(" got: %s/n ans: %s/n", buf1, buf2); goto error; } PASSED(); /* Check opening existing file with old-style modification time information * and make certain that the time is correct */ TESTING("accessing old modification time messages"); { const char *testfile = H5_get_srcdir_filename(TESTFILE1); /* Corrected test file name */ file = H5Fopen(testfile, H5F_ACC_RDONLY, H5P_DEFAULT); if(file >= 0){//.........这里部分代码省略.........
开发者ID:ElaraFX,项目名称:hdf5,代码行数:101,
示例23: arma_H5Sclose herr_t arma_H5Sclose(hid_t space_id) { return H5Sclose(space_id); }
开发者ID:bingo4508,项目名称:Deep-Learning,代码行数:4,
示例24: output//.........这里部分代码省略......... H5P_DEFAULT); for (ll = 0; ll < ne; ll++) { dimsf[0] = nx; dimsf[1] = ny; dataspace = H5Screate_simple (RANK, dimsf, NULL); /* * Define datatype for the data in the file. * We will store little endian DOUBLE numbers. */ datatype = H5Tcopy (H5T_NATIVE_DOUBLE); status = H5Tset_order (datatype, H5T_ORDER_LE); /* * Create a new dataset within the file using defined dataspace and * datatype and default dataset creation properties. */ dataset = H5Dcreate (file, names[ll], datatype, dataspace, H5P_DEFAULT); for (jj = 0; jj < ny; jj++) { for (ii = 0; ii < nx; ii++) data[ii][jj] = grid[ii][jj][kk].array[ll]; } /* * Write the data to the dataset using default transfer properties. */ status = H5Dwrite (dataset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, data); /* * Close/release resources. */ H5Sclose (dataspace); H5Tclose (datatype); H5Dclose (dataset); } dimsf[0] = nx; dimsf[1] = ny; dataspace = H5Screate_simple (RANK, dimsf, NULL); /* * Define datatype for the data in the file. * We will store little endian DOUBLE numbers. */ datatype = H5Tcopy (H5T_NATIVE_DOUBLE); status = H5Tset_order (datatype, H5T_ORDER_LE); /* * Create a new dataset within the file using defined dataspace and * datatype and default dataset creation properties. */ dataset = H5Dcreate (file, "Pressure", datatype, dataspace, H5P_DEFAULT); for (jj = 0; jj < ny; jj++) { for (ii = 0; ii < nx; ii++) { rl = grid[ii][jj][kk] _MASS; px = grid[ii][jj][kk] _MOMX; py = grid[ii][jj][kk] _MOMY; pz = grid[ii][jj][kk] _MOMZ; et = grid[ii][jj][kk] _ENER; bx = grid[ii][jj][kk] _B_X; by = grid[ii][jj][kk] _B_Y;
开发者ID:garethcmurphy,项目名称:mhdvanleer,代码行数:67,
示例25: test_direct//.........这里部分代码省略......... /* Create the dset1 */ if((dset1 = H5Dcreate2(file, DSET1_NAME, H5T_NATIVE_INT, space1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR; /* Write the data to the dset1 */ if(H5Dwrite(dset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, points) < 0) TEST_ERROR; if(H5Dclose(dset1) < 0) TEST_ERROR; if((dset1 = H5Dopen2(file, DSET1_NAME, H5P_DEFAULT)) < 0) TEST_ERROR; /* Read the data back from dset1 */ if(H5Dread(dset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, check) < 0) TEST_ERROR; /* Check that the values read are the same as the values written */ p1 = points; p2 = check; for(i = 0; i < DSET1_DIM1; i++) for(j = 0; j < DSET1_DIM2; j++) if(*p1++ != *p2++) { H5_FAILED(); printf(" Read different values than written in data set 1./n"); printf(" At index %d,%d/n", i, j); TEST_ERROR; } /* end if */ /* Create the data space2. For data set 2, memory address and data size are not aligned. */ dims2[0] = DSET2_DIM; if((space2 = H5Screate_simple(1, dims2, NULL)) < 0) TEST_ERROR; /* Create the dset2 */ if((dset2 = H5Dcreate2(file, DSET2_NAME, H5T_NATIVE_INT, space2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR; /* Write the data to the dset1 */ if(H5Dwrite(dset2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata2) < 0) TEST_ERROR; if(H5Dclose(dset2) < 0) TEST_ERROR; if((dset2 = H5Dopen2(file, DSET2_NAME, H5P_DEFAULT)) < 0) TEST_ERROR; /* Read the data back from dset1 */ if(H5Dread(dset2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata2) < 0) TEST_ERROR; /* Check that the values read are the same as the values written */ for(i = 0; i < DSET2_DIM; i++) if(wdata2[i] != rdata2[i]) { H5_FAILED(); printf(" Read different values than written in data set 2./n"); printf(" At index %d/n", i); TEST_ERROR; } /* end if */ if(H5Sclose(space1) < 0) TEST_ERROR; if(H5Dclose(dset1) < 0) TEST_ERROR; if(H5Sclose(space2) < 0) TEST_ERROR; if(H5Dclose(dset2) < 0) TEST_ERROR; if(H5Fclose(file) < 0) TEST_ERROR; HDassert(points); HDfree(points); HDassert(check); HDfree(check); h5_cleanup(FILENAME, fapl); PASSED(); return 0;error: H5E_BEGIN_TRY { H5Pclose(fapl); H5Sclose(space1); H5Dclose(dset1); H5Sclose(space2); H5Dclose(dset2); H5Fclose(file); } H5E_END_TRY; if(points) HDfree(points); if(check) HDfree(check); return -1;#endif /*H5_HAVE_DIRECT*/}
开发者ID:FilipeMaia,项目名称:hdf5,代码行数:101,
示例26: forAll forAll(scalarFields_, fieldI) { Info<< " fieldWriteScalar: " << scalarFields_[fieldI] << endl; // Lookup field const volScalarField& field = obr_.lookupObject<volScalarField> ( scalarFields_[fieldI] ); // Initialize a plain continous array for the data ioScalar* scalarData; scalarData = new ioScalar[field.size()]; // Loop through the field and construct the array forAll(field, iter) { scalarData[iter] = field[iter]; } // Create the different datasets (needs to be done collectively) char datasetName[80]; hsize_t dimsf[1]; hid_t fileSpace; hid_t dsetID; hid_t plistID; hid_t plistDCreate; forAll(nCells_, proc) { // Create the dataspace for the dataset dimsf[0] = nCells_[proc]; fileSpace = H5Screate_simple(1, dimsf, NULL); // Set property to create parent groups as neccesary plistID = H5Pcreate(H5P_LINK_CREATE); H5Pset_create_intermediate_group(plistID, 1); // Set chunking, compression and other HDF5 dataset properties plistDCreate = H5Pcreate(H5P_DATASET_CREATE); dsetSetProps(1, sizeof(ioScalar), nCells_[proc], plistDCreate); // Create the dataset for points sprintf ( datasetName, "FIELDS/%s/processor%i/%s", mesh_.time().timeName().c_str(), proc, scalarFields_[fieldI].c_str() ); dsetID = H5Dcreate2 ( fileID_, datasetName, H5T_SCALAR, fileSpace, plistID, plistDCreate, H5P_DEFAULT ); H5Dclose(dsetID); H5Pclose(plistID); H5Pclose(plistDCreate); H5Sclose(fileSpace); }
开发者ID:hakostra,项目名称:IOH5Write,代码行数:71,
示例27: test_family//.........这里部分代码省略......... /* The file size is supposed to be about 800 bytes right now. */ if(file_size < (KB / 2) || file_size > KB) TEST_ERROR; /* Create and write dataset */ if((space=H5Screate_simple(2, dims, NULL)) < 0) TEST_ERROR; /* Retrieve the access property list... */ if ((access_fapl = H5Fget_access_plist(file)) < 0) TEST_ERROR; /* Check that the driver is correct */ if(H5FD_FAMILY != H5Pget_driver(access_fapl)) TEST_ERROR; /* ...and close the property list */ if (H5Pclose(access_fapl) < 0) TEST_ERROR; if((dset=H5Dcreate2(file, dname, H5T_NATIVE_INT, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR; for(i = 0; i < FAMILY_NUMBER; i++) for(j = 0; j < FAMILY_SIZE; j++) buf[i][j] = (int)((i * 10000) + j); if(H5Dwrite(dset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf) < 0) TEST_ERROR; /* check file handle API */ if((fapl2 = H5Pcreate(H5P_FILE_ACCESS)) < 0) TEST_ERROR; if(H5Pset_family_offset(fapl2, (hsize_t)0) < 0) TEST_ERROR; if(H5Fget_vfd_handle(file, fapl2, (void **)&fhandle) < 0) TEST_ERROR; if(*fhandle < 0) TEST_ERROR; if(H5Pset_family_offset(fapl2, (hsize_t)(FAMILY_SIZE*2)) < 0) TEST_ERROR; if(H5Fget_vfd_handle(file, fapl2, (void **)&fhandle2) < 0) TEST_ERROR; if(*fhandle2 < 0) TEST_ERROR; /* Check file size API */ if(H5Fget_filesize(file, &file_size) < 0) TEST_ERROR; /* Some data has been written. The file size should be bigger (18KB+976) * bytes if int size is 4 bytes) now. */#if H5_SIZEOF_INT <= 4 if(file_size < (18 * KB) || file_size > (20 * KB)) TEST_ERROR;#elif H5_SIZEOF_INT >= 8 if(file_size < (32 * KB) || file_size > (40 * KB)) TEST_ERROR;#endif if(H5Sclose(space) < 0) TEST_ERROR; if(H5Dclose(dset) < 0) TEST_ERROR; if(H5Pclose(fapl2) < 0) TEST_ERROR; if(H5Fclose(file) < 0) TEST_ERROR; /* Test different wrong ways to reopen family files when there're multiple * member files existing. */ if(test_family_opens(filename, fapl) < 0) TEST_ERROR; /* Reopen the file with correct member file size. */ if(H5Pset_fapl_family(fapl, (hsize_t)FAMILY_SIZE, H5P_DEFAULT) < 0) TEST_ERROR; if((file = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0) TEST_ERROR; if(H5Fclose(file) < 0) TEST_ERROR; h5_cleanup(FILENAME, fapl); PASSED(); return 0;error: H5E_BEGIN_TRY { H5Sclose(space); H5Dclose(dset); H5Pclose (fapl2); H5Fclose(file); } H5E_END_TRY; return -1;}
开发者ID:FilipeMaia,项目名称:hdf5,代码行数:101,
示例28: readint ias_l1r_read_image ( const L1R_BAND_IO *l1r_band, /* I: HDF IO band structure */ int sca_index, /* I: SCA to read (0-rel) */ int line_start, /* I: Line to start reading (0-rel) */ int sample_start, /* I: Sample to start reading (0-rel) */ int lines, /* I: Number of lines to read */ int samples, /* I: Number of samples to read */ void *data /* O: Data buffer */){ hid_t data_space; /* dataspace for the data buffer dimensions */ hsize_t data_dims[2] = {lines, samples}; /* size of the data buffer */ hsize_t file_size[3] = {1, lines, samples}; /* slab size to read from the file */ /* location to read in the file */ hsize_t file_offset[3] = {sca_index, line_start, sample_start}; int status; /* check for various errors in the input */ if (l1r_band == NULL) { IAS_LOG_ERROR("NULL band pointer passed in"); return ERROR; } if (l1r_band->id < 0) { IAS_LOG_ERROR("Band is not open for reading in file %s", l1r_band->l1r_file->filename); return ERROR; } /* verify the window of data being read actually falls within the band */ if ((sca_index < 0) || (sca_index >= l1r_band->scas) || (line_start < 0) || ((line_start + lines) > l1r_band->lines) || (sample_start < 0) || ((sample_start + samples) > l1r_band->samples)) { IAS_LOG_ERROR("Attempted to read imagery from SCA index %d, band " "number %d of %s at line %d, sample %d for a window %d lines " "x %d samples when the band has %d SCAs, %d lines, %d samples", sca_index, l1r_band->number, l1r_band->l1r_file->filename, line_start, sample_start, lines, samples, l1r_band->scas, l1r_band->lines, l1r_band->samples); return ERROR; } /* define the memory dataspace to read data into */ data_space = H5Screate_simple(2, data_dims, NULL); if (data_space < 0) { IAS_LOG_ERROR("Creating memory dataspace for file %s", l1r_band->l1r_file->filename); return ERROR; } status = H5Sselect_hyperslab(l1r_band->dataspace_id, H5S_SELECT_SET, file_offset, NULL, file_size, NULL); if (status < 0) { IAS_LOG_ERROR("Selecting hyperslab for file %s, band %d", l1r_band->l1r_file->filename, l1r_band->number); H5Sclose(data_space); return ERROR; } /* read the data from the dataset */ status = H5Dread(l1r_band->id, l1r_band->memory_data_type, data_space, l1r_band->dataspace_id, H5P_DEFAULT, data); H5Sclose(data_space); if (status < 0) { IAS_LOG_ERROR("Reading from file %s, band number %d, SCA index %d, " "line %d, sample %d, number of lines %d, number of samples %d", l1r_band->l1r_file->filename, l1r_band->number, sca_index, line_start, sample_start, lines, samples); return ERROR; } return SUCCESS;}
开发者ID:yalinlee,项目名称:update_longitude_latitude,代码行数:81,
示例29: h5_write_coord_sp_void h5_write_coord_sp_(hid_t* file_identifier, int* maximum_blocks, int* coordinates, int* local_blocks, int* total_blocks, int* global_offset){ hid_t dataspace, dataset, memspace, dxfer_template; herr_t status; int rank; hsize_t dimens_2d[2]; hsize_t start_2d[2]; hsize_t stride_2d[2], count_2d[2]; int ierr; /* set the dimensions of the dataset */ rank = 2; dimens_2d[0] = *total_blocks; dimens_2d[1] = NDIM; dataspace = H5Screate_simple(rank, dimens_2d, NULL); /* create the dataset */ dataset = H5Dcreate(*file_identifier, "coordinates", H5T_NATIVE_FLOAT, dataspace, H5P_DEFAULT); /* create the hyperslab -- this will differ on the different processors */ start_2d[0] = (hsize_t) (*global_offset); start_2d[1] = 0; stride_2d[0] = 1; stride_2d[1] = 1; count_2d[0] = (hsize_t) (*local_blocks); count_2d[1] = NDIM; status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, start_2d, stride_2d, count_2d, NULL); /* create the memory space */ rank = 2; dimens_2d[0] = *maximum_blocks; dimens_2d[1] = MDIM; memspace = H5Screate_simple(rank, dimens_2d, NULL); start_2d[0] = 0; start_2d[1] = 0; stride_2d[0] = 1; stride_2d[1] = 1; count_2d[0] = *local_blocks; count_2d[1] = NDIM; ierr = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, start_2d, stride_2d, count_2d, NULL); flash_tune_plist(&dxfer_template); /* write the data */ status = H5Dwrite(dataset, H5T_NATIVE_FLOAT, memspace, dataspace, dxfer_template, coordinates); H5Pclose(dxfer_template); H5Sclose(memspace); H5Sclose(dataspace); H5Dclose(dataset);}
开发者ID:gcongiu,项目名称:E10,代码行数:74,
示例30: mainintmain (void){ hid_t file, src_space, vspace, dset; /* Handles */ hid_t dcpl; herr_t status; hsize_t vdsdims[3] = {VDSDIM0, VDSDIM1, VDSDIM2}, vdsdims_max[3] = {H5S_UNLIMITED, VDSDIM1, VDSDIM1}, dims[3] = {DIM0, DIM1, DIM2}, start[3], /* Hyperslab parameters */ stride[3], count[3], block[3]; hsize_t start_out[3], /* Hyperslab parameter out */ stride_out[3], count_out[3], block_out[3]; int i; H5D_layout_t layout; /* Storage layout */ size_t num_map; /* Number of mappings */ ssize_t len; /* Length of the string; also a return value */ char *filename; char *dsetname; file = H5Fcreate (FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); /* Create VDS dataspace. */ vspace = H5Screate_simple (RANK, vdsdims, vdsdims_max); /* Create dataspaces for the source dataset. */ src_space = H5Screate_simple (RANK, dims, NULL); /* Create VDS creation property */ dcpl = H5Pcreate (H5P_DATASET_CREATE); /* Initialize hyperslab values */ start[0] = 0; start[1] = 0; start[2] = 0; stride[0] = DIM0; stride[1] = 1; stride[2] = 1; count[0] = H5S_UNLIMITED; count[1] = 1; count[2] = 1; block[0] = DIM0; block[1] = DIM1; block[2] = DIM2; /* * Build the mappings * */ status = H5Sselect_hyperslab (vspace, H5S_SELECT_SET, start, stride, count, block); status = H5Pset_virtual (dcpl, vspace, "f-%b.h5", "/A", src_space); /* Create a virtual dataset */ dset = H5Dcreate2 (file, DATASET, H5T_NATIVE_INT, vspace, H5P_DEFAULT, dcpl, H5P_DEFAULT); status = H5Sclose (vspace); status = H5Sclose (src_space); status = H5Dclose (dset); status = H5Fclose (file); /* * Now we begin the read section of this example. */ /* * Open file and dataset using the default properties. */ file = H5Fopen (FILE, H5F_ACC_RDONLY, H5P_DEFAULT); dset = H5Dopen2 (file, DATASET, H5P_DEFAULT); /* * Get creation property list and mapping properties. */ dcpl = H5Dget_create_plist (dset); /* * Get storage layout. */ layout = H5Pget_layout (dcpl); if (H5D_VIRTUAL == layout) printf(" Dataset has a virtual layout /n"); else printf(" Wrong layout found /n"); /* * Find number of mappings. */ status = H5Pget_virtual_count (dcpl, &num_map); printf(" Number of mappings is %d/n", (int)num_map);//.........这里部分代码省略.........
开发者ID:ElaraFX,项目名称:hdf5,代码行数:101,
注:本文中的H5Sclose函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 C++ H5Sget_simple_extent_ndims函数代码示例 C++ H5Pset_fapl_mpio函数代码示例 |