这篇教程C++ H5Dopen函数代码示例写得很实用,希望能帮到您。
本文整理汇总了C++中H5Dopen函数的典型用法代码示例。如果您正苦于以下问题:C++ H5Dopen函数的具体用法?C++ H5Dopen怎么用?C++ H5Dopen使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。 在下文中一共展示了H5Dopen函数的28个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。 示例1: mainint main (int argc, char* argv[]){ // Library initilization. MPI_Init(&argc, &argv); herr_t H5open(); std::string filename = argv[1]; // We determine the size and ranks of our process. int rank, size; MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); // We distribute the indices among the processors. // For proper load balancing, we should have size % 6 = 0. std::vector<std::vector<unsigned int> > indices; indices.resize(size); for (unsigned int i=0; i<6; i++) { int idx = user_mod(i,size); indices[idx].push_back(i); } // Open existing file. hid_t plist_id; plist_id = H5Pcreate(H5P_FILE_ACCESS); H5Pset_fapl_mpio(plist_id,MPI_COMM_WORLD,MPI_INFO_NULL); hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, plist_id); // We store the name of the datasets we want to open. std::vector<std::string> dataset_names; dataset_names.push_back(std::string("/besselJ")); dataset_names.push_back(std::string("/besselY")); dataset_names.push_back(std::string("/besselI")); dataset_names.push_back(std::string("/besselK")); dataset_names.push_back(std::string("/hankelH1")); dataset_names.push_back(std::string("/hankelH2")); // We store the appropriate functions pointers in an array. std::vector<std::complex<double> (*) (double, std::complex<double>, int)> f_ptr; f_ptr.push_back(sp_bessel::besselJp); f_ptr.push_back(sp_bessel::besselYp); f_ptr.push_back(sp_bessel::besselIp); f_ptr.push_back(sp_bessel::besselKp); f_ptr.push_back(sp_bessel::hankelH1p); f_ptr.push_back(sp_bessel::hankelH2p); // We loop over the datasets. for (auto iter = indices[rank].begin(); iter != indices[rank].end(); iter++) { // Open dataset. hid_t dataset_id = H5Dopen(file_id, dataset_names[*iter].c_str(), H5P_DEFAULT); // Obtain the dataspace hid_t dspace = H5Dget_space(dataset_id); // We obtain the dimensions of the dataset. const int ndims = H5Sget_simple_extent_ndims(dspace); hsize_t dims[ndims]; H5Sget_simple_extent_dims(dspace, dims, NULL); // We read the dataset. std::complex<double> values[dims[0]][dims[1]][dims[2]][dims[3]]; hid_t complex_id = H5Dget_type(dataset_id); H5Dread(dataset_id, complex_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, values); // We now open/read the attributes. double vmax, zimin, zimax, zrmin, zrmax; hid_t vmax_id = H5Aopen(dataset_id, "vmax", H5P_DEFAULT); hid_t zimin_id = H5Aopen(dataset_id, "zimin", H5P_DEFAULT); hid_t zimax_id = H5Aopen(dataset_id, "zrmax", H5P_DEFAULT); hid_t zrmin_id = H5Aopen(dataset_id, "zrmin", H5P_DEFAULT); hid_t zrmax_id = H5Aopen(dataset_id, "zrmax", H5P_DEFAULT); H5Aread(vmax_id, H5T_NATIVE_DOUBLE, &vmax); H5Aread(zimin_id, H5T_NATIVE_DOUBLE, &zimin); H5Aread(zimax_id, H5T_NATIVE_DOUBLE, &zimax); H5Aread(zrmin_id, H5T_NATIVE_DOUBLE, &zrmin); H5Aread(zrmax_id, H5T_NATIVE_DOUBLE, &zrmax); // We now evaluate the Bessel functions at the computed values. arma::vec orders = arma::linspace(-vmax, vmax, dims[1]); arma::vec realZ = arma::linspace(zrmin, zrmax, dims[2]); arma::vec imagZ = arma::linspace(zimin, zimax, dims[3]); unsigned int count = 0; for (int i=0; i<dims[0]; i++) { for (int j=0; j<dims[1]; j++) { for (int k=0; k<dims[2]; k++) { for (int l=0; l<dims[3]; l++) { double eps = std::abs(f_ptr[*iter](orders(j), std::complex<double>(realZ(k), imagZ(l)), i) - values[i][j][k][l]); if (eps > 1.0e-13) {//.........这里部分代码省略.........
开发者ID:PlutoniumHeart,项目名称:complex_bessel,代码行数:101,
示例2: mainintmain (void){ hid_t file, filetype, memtype, space, dset; /* Handles */ herr_t status; hvl_t wdata[2], /* Array of vlen structures */ *rdata; /* Pointer to vlen structures */ hsize_t dims[1] = {2}; int *ptr, ndims, i, j; /* * Initialize variable-length data. wdata[0] is a countdown of * length LEN0, wdata[1] is a Fibonacci sequence of length LEN1. */ wdata[0].len = LEN0; ptr = (int *) malloc (wdata[0].len * sizeof (int)); for (i=0; i<wdata[0].len; i++) ptr[i] = wdata[0].len - (size_t)i; /* 3 2 1 */ wdata[0].p = (void *) ptr; wdata[1].len = LEN1; ptr = (int *) malloc (wdata[1].len * sizeof (int)); ptr[0] = 1; ptr[1] = 1; for (i=2; i<wdata[1].len; i++) ptr[i] = ptr[i-1] + ptr[i-2]; /* 1 1 2 3 5 8 etc. */ wdata[1].p = (void *) ptr; /* * Create a new file using the default properties. */ file = H5Fcreate (FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); /* * Create variable-length datatype for file and memory. */ filetype = H5Tvlen_create (H5T_STD_I32LE); memtype = H5Tvlen_create (H5T_NATIVE_INT); /* * Create dataspace. Setting maximum size to NULL sets the maximum * size to be the current size. */ space = H5Screate_simple (1, dims, NULL); /* * Create the dataset and write the variable-length data to it. */ dset = H5Dcreate (file, DATASET, filetype, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); status = H5Dwrite (dset, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); /* * Close and release resources. Note the use of H5Dvlen_reclaim * removes the need to manually free() the previously malloc'ed * data. */ status = H5Dvlen_reclaim (memtype, space, H5P_DEFAULT, wdata); status = H5Dclose (dset); status = H5Sclose (space); status = H5Tclose (filetype); status = H5Tclose (memtype); status = H5Fclose (file); /* * Now we begin the read section of this example. Here we assume * the dataset has the same name and rank, but can have any size. * Therefore we must allocate a new array to read in data using * malloc(). */ /* * Open file and dataset. */ file = H5Fopen (FILE, H5F_ACC_RDONLY, H5P_DEFAULT); dset = H5Dopen (file, DATASET, H5P_DEFAULT); /* * Get dataspace and allocate memory for array of vlen structures. * This does not actually allocate memory for the vlen data, that * will be done by the library. */ space = H5Dget_space (dset); ndims = H5Sget_simple_extent_dims (space, dims, NULL); rdata = (hvl_t *) malloc (dims[0] * sizeof (hvl_t)); /* * Create the memory datatype. */ memtype = H5Tvlen_create (H5T_NATIVE_INT); /* * Read the data. */ status = H5Dread (dset, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata);//.........这里部分代码省略.........
开发者ID:LaHaine,项目名称:ohpc,代码行数:101,
示例3: H5Dopenvoid BAGDataset::LoadMetadata(){/* -------------------------------------------------------------------- *//* Load the metadata from the file. *//* -------------------------------------------------------------------- */ hid_t hMDDS = H5Dopen( hHDF5, "/BAG_root/metadata" ); hid_t datatype = H5Dget_type( hMDDS ); hid_t dataspace = H5Dget_space( hMDDS ); hid_t native = H5Tget_native_type( datatype, H5T_DIR_ASCEND ); hsize_t dims[3], maxdims[3]; H5Sget_simple_extent_dims( dataspace, dims, maxdims ); pszXMLMetadata = (char *) CPLCalloc(dims[0]+1,1); H5Dread( hMDDS, native, H5S_ALL, dataspace, H5P_DEFAULT, pszXMLMetadata ); H5Sclose( dataspace ); H5Tclose( datatype ); H5Dclose( hMDDS ); if( strlen(pszXMLMetadata) == 0 ) return;/* -------------------------------------------------------------------- *//* Try to get the geotransform. *//* -------------------------------------------------------------------- */ CPLXMLNode *psRoot = CPLParseXMLString( pszXMLMetadata ); if( psRoot == NULL ) return; CPLStripXMLNamespace( psRoot, NULL, TRUE ); CPLXMLNode *psGeo = CPLSearchXMLNode( psRoot, "=MD_Georectified" ); if( psGeo != NULL ) { char **papszCornerTokens = CSLTokenizeStringComplex( CPLGetXMLValue( psGeo, "cornerPoints.Point.coordinates", "" ), " ,", FALSE, FALSE ); if( CSLCount(papszCornerTokens ) == 4 ) { double dfLLX = atof( papszCornerTokens[0] ); double dfLLY = atof( papszCornerTokens[1] ); double dfURX = atof( papszCornerTokens[2] ); double dfURY = atof( papszCornerTokens[3] ); adfGeoTransform[0] = dfLLX; adfGeoTransform[1] = (dfURX - dfLLX) / (GetRasterXSize()-1); adfGeoTransform[3] = dfURY; adfGeoTransform[5] = (dfLLY - dfURY) / (GetRasterYSize()-1); adfGeoTransform[0] -= adfGeoTransform[1] * 0.5; adfGeoTransform[3] -= adfGeoTransform[5] * 0.5; } CSLDestroy( papszCornerTokens ); } CPLDestroyXMLNode( psRoot );/* -------------------------------------------------------------------- *//* Try to get the coordinate system. *//* -------------------------------------------------------------------- */ OGRSpatialReference oSRS; if( OGR_SRS_ImportFromISO19115( &oSRS, pszXMLMetadata ) == OGRERR_NONE ) { oSRS.exportToWkt( &pszProjection ); }}
开发者ID:actian-geospatial,项目名称:ogr-ingres,代码行数:75,
示例4: mainint main (int argc, char ** argv) { char filename [256]; int rank, size, i, j; MPI_Comm comm = MPI_COMM_WORLD; uint64_t start[2], count[2], bytes_read = 0; int ndims, nsf; hid_t file; hid_t dataset; hid_t filespace; hid_t memspace; MPI_Init (&argc, &argv); MPI_Comm_rank (comm, &rank); MPI_Comm_size (comm, &size); int data_out[size]; MPI_Barrier (comm); struct timeval t1; gettimeofday (&t1, NULL); strcpy (filename, "pixie3d.bp"); { file = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT); dataset = H5Dopen(file, "index"); filespace = H5Dget_space(dataset); /* Get filespace handle first. */ ndims = H5Sget_simple_extent_ndims(filespace); hsize_t dims[ndims]; herr_t status_n = H5Sget_simple_extent_dims(filespace, dims, NULL);/* printf("dataset rank %d, dimensions %lu/n", ndims, (unsigned long)(dims[0]));*/ if (dims[0] != size) { printf ("can only support same # of reader as writers/n"); exit (0); } /* * Define the memory space to read dataset. */ memspace = H5Screate_simple(ndims,dims,NULL); /* * Read dataset back and display. */ herr_t status = H5Dread(dataset, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, data_out); } int temp_index[size]; int temp; memcpy (temp_index, data_out, 4 * size); nsf = 0; for (i = 0; i < size - 1; i++) { if (temp_index[i] > temp_index[i + 1]) { temp = temp_index[i]; temp_index[i] = temp_index[i + 1]; temp_index[i + 1] = temp; } } nsf = temp_index[size - 1] + 1; int group_size = size / nsf; int grpid = rank / group_size; int token; MPI_Status status; MPI_Comm new_comm; MPI_Comm_split (comm, grpid, rank, &new_comm); int new_rank; MPI_Comm_rank (new_comm, &new_rank); if (new_rank == 0) { // data is in f_idx subfile. int f_idx = data_out[rank]; char temp_string[100], * fname; strcpy (temp_string, filename); fname = strtok (temp_string, "."); sprintf (temp_string ,"%s_%d.bp" ,fname ,f_idx ); file = H5Fopen(temp_string, H5F_ACC_RDONLY, H5P_DEFAULT); sprintf (temp_string, "Var7_%d", rank); dataset = H5Dopen(file, temp_string); filespace = H5Dget_space(dataset); /* Get filespace handle first. */ ndims = H5Sget_simple_extent_ndims(filespace); hsize_t dims[ndims]; herr_t status_n = H5Sget_simple_extent_dims(filespace, dims, NULL);//.........这里部分代码省略.........
开发者ID:qliu21,项目名称:read-aio,代码行数:101,
示例5: ifPyObject *H5UIget_info( hid_t loc_id, const char *dset_name, char *byteorder){ hid_t dataset_id; int rank; hsize_t *dims; hid_t space_id; H5T_class_t class_id; H5T_order_t order; hid_t type_id; PyObject *t; int i; /* Open the dataset. */ if ( (dataset_id = H5Dopen( loc_id, dset_name, H5P_DEFAULT )) < 0 ) { Py_INCREF(Py_None); return Py_None; /* Not chunked, so return None */ } /* Get an identifier for the datatype. */ type_id = H5Dget_type( dataset_id ); /* Get the class. */ class_id = H5Tget_class( type_id ); /* Get the dataspace handle */ if ( (space_id = H5Dget_space( dataset_id )) < 0 ) goto out; /* Get rank */ if ( (rank = H5Sget_simple_extent_ndims( space_id )) < 0 ) goto out; /* Book resources for dims */ dims = (hsize_t *)malloc(rank * sizeof(hsize_t)); /* Get dimensions */ if ( H5Sget_simple_extent_dims( space_id, dims, NULL) < 0 ) goto out; /* Assign the dimensions to a tuple */ t = PyTuple_New(rank); for(i=0;i<rank;i++) { /* I don't know if I should increase the reference count for dims[i]! */ PyTuple_SetItem(t, i, PyInt_FromLong((long)dims[i])); } /* Release resources */ free(dims); /* Terminate access to the dataspace */ if ( H5Sclose( space_id ) < 0 ) goto out; /* Get the byteorder */ /* Only integer, float, time and enum classes can be byteordered */ if ((class_id == H5T_INTEGER) || (class_id == H5T_FLOAT) || (class_id == H5T_BITFIELD) || (class_id == H5T_TIME) || (class_id == H5T_ENUM)) { order = H5Tget_order( type_id ); if (order == H5T_ORDER_LE) strcpy(byteorder, "little"); else if (order == H5T_ORDER_BE) strcpy(byteorder, "big"); else { fprintf(stderr, "Error: unsupported byteorder: %d/n", order); goto out; } } else { strcpy(byteorder, "irrelevant"); } /* End access to the dataset */ H5Dclose( dataset_id ); /* Return the dimensions tuple */ return t;out: H5Tclose( type_id ); H5Dclose( dataset_id ); Py_INCREF(Py_None); return Py_None; /* Not chunked, so return None */}
开发者ID:andreas-h,项目名称:PyTables,代码行数:87,
示例6: mainint main(int argc, char* argv[]){ char c; int ix, iy, iz, i; MPI_Comm mpicomm; MPI_Info mpiinfo; int mpirank; int mpisize; double *data3d, *data2d, *x, *y, *z, t; int localx, localy, localwidth, localheight; int maxwidth, maxheight; const char* filename = "output.h5"; hid_t fileid, plist, filespace, memspace, dimvar, varid; hsize_t size[NDIMS], maxsize[NDIMS], chunksize[NDIMS]; hsize_t start[NDIMS], count[NDIMS]; char varname[32]; mpicomm = MPI_COMM_WORLD; mpiinfo = MPI_INFO_NULL; MPI_Init(&argc, &argv); MPI_Comm_size(mpicomm, &mpisize); MPI_Comm_rank(mpicomm, &mpirank); if(! mpirank) printf("Creating some data.../n"); // Distribute our data values in a pism-y way GetLocalBounds(XSIZE, YSIZE, mpirank, mpisize, &localx, &localy, &localwidth, &localheight); printf("Rank%02d: x=%d, y=%d, width=%d, height=%d/n", mpirank, localx, localy, localwidth, localheight); data2d = (double*)malloc(localwidth * localheight * sizeof(double)); data3d = (double*)malloc(localwidth * localheight * ZSIZE * sizeof(double)); x = (double*)malloc(localwidth * sizeof(double)); y = (double*)malloc(localheight * sizeof(double)); z = (double*)malloc(ZSIZE * sizeof(double)); t = 0.0; for(ix = 0; ix < localwidth; ix++) { x[ix] = ix + localx; for(iy = 0; iy < localheight; iy++) { y[iy] = iy + localy; data2d[ix*localheight + iy] = (ix+localx)*localheight + iy+localy; for(iz = 0; iz < ZSIZE; iz++) { z[iz] = iz; data3d[ix*localheight*ZSIZE + iy*ZSIZE + iz] = (ix+localx)*YSIZE*ZSIZE + (iy+localy)*ZSIZE + iz; } } } if(! mpirank) printf("Creating HDF5 file.../n"); plist = H5Pcreate(H5P_FILE_ACCESS); H5Pset_fapl_mpio(plist, mpicomm, mpiinfo); // TODO: this seems like a good place to put optimizations, and indeed // PISM is adding several additional properties, like setting block sizes, // cache eviction policies, fs striping parameters, etc. fileid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist); H5Pclose(plist); if(! mpirank) printf("Setting up dimensions.../n"); if(! mpirank) printf("Creating time dimension.../n"); // Define the time dimension size[0] = 1; maxsize[0] = H5S_UNLIMITED; chunksize[0] = 1; filespace = H5Screate_simple(1, size, maxsize); plist = H5Pcreate(H5P_DATASET_CREATE); H5Pset_chunk(plist, 1, chunksize); // It is strictly required to set chunksize when using // the low-level api. Contiguous datasets are not allowed // to use the unlimited dimension. dimvar = H5Dcreate(fileid, TNAME, H5T_NATIVE_DOUBLE, filespace, H5P_DEFAULT, plist, H5P_DEFAULT); H5Pclose(plist); H5DSset_scale(dimvar, TNAME); H5Dclose(dimvar); H5Sclose(filespace);#ifdef OLD_WRITE_PATTERN if(! mpirank) printf("Writing time dimension.../n"); dimvar = H5Dopen(fileid, TNAME, H5P_DEFAULT); filespace = H5Dget_space(dimvar); memspace = H5Screate_simple(1, size, 0); plist = H5Pcreate(H5P_DATASET_XFER); H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE); // TODO: Pism does this, but comments suggest it is questionable start[0] = 0; count[0] = 1; H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, 0, count, 0); H5Dwrite(dimvar, H5T_NATIVE_DOUBLE, memspace, filespace, plist, &t); H5Pclose(plist); H5Sclose(filespace);//.........这里部分代码省略.........
开发者ID:timmorey,项目名称:sandbox,代码行数:101,
示例7: main//.........这里部分代码省略.........#ifdef USE_MPE MPE_Log_event(e_write, 0, "end write file");#endif /* USE_MPE */ } write_us = (MPI_Wtime() - ftime) * MILLION; MPI_Reduce(&write_us, &max_write_us, 1, MPI_INT, MPI_MAX, 0, MPI_COMM_WORLD); if (!my_rank) { write_rate = (float)(DIM2_LEN * sizeof(int))/(float)max_write_us; printf("/np=%d, write_rate=%g", p, write_rate); } #ifdef USE_MPE MPE_Log_event(s_close, 0, "start close file");#endif /* USE_MPE */ /* Close. These collective operations will allow every process * to catch up. */ if (H5Dclose(dsid) < 0 || H5Sclose(whole_spaceid) < 0 || H5Sclose(slice_spaceid) < 0 || H5Pclose(fapl_id) < 0 || H5Fclose(fileid) < 0) ERR;#ifdef USE_MPE MPE_Log_event(e_close, 0, "end close file");#endif /* USE_MPE */ /* Open the file. */ if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0) ERR; if (H5Pset_fapl_mpio(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL) < 0) ERR; if (H5Pset_libver_bounds(fapl_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) ERR; if ((fileid = H5Fopen(FILE_NAME, H5F_ACC_RDONLY, fapl_id)) < 0) ERR; /* Create a space to deal with one slice in memory. */ dims[0] = SC1; if ((slice_spaceid = H5Screate_simple(NDIMS, dims, NULL)) < 0) ERR; /* Open the dataset. */ if ((dsid = H5Dopen(fileid, VAR_NAME)) < 0) ERR; if ((whole_spaceid1 = H5Dget_space(dsid)) < 0) ERR; ftime = MPI_Wtime(); /* Read the data, a slice at a time. */ for (s = 0; s < num_steps; s++) { /* Select hyperslab for read of one slice. */ start[0] = s * SC1 * p + my_rank * SC1; count[0] = SC1; if (H5Sselect_hyperslab(whole_spaceid1, H5S_SELECT_SET, start, NULL, count, NULL) < 0) { ERR; return 2; } if (H5Dread(dsid, H5T_NATIVE_INT, slice_spaceid, whole_spaceid1, H5P_DEFAULT, data_in) < 0) { ERR; return 2; }/* //* Check the slice of data. *// *//* for (i = 0; i < SC1; i++) *//* if (data[i] != data_in[i]) *//* { *//* ERR; *//* return 2; *//* } */ } read_us = (MPI_Wtime() - ftime) * MILLION; MPI_Reduce(&read_us, &max_read_us, 1, MPI_INT, MPI_MAX, 0, MPI_COMM_WORLD); if (!my_rank) { read_rate = (float)(DIM2_LEN * sizeof(int))/(float)max_read_us; printf(", read_rate=%g/n", read_rate); } /* Close down. */ if (H5Dclose(dsid) < 0 || H5Sclose(slice_spaceid) < 0 || H5Sclose(whole_spaceid1) < 0 || H5Pclose(fapl_id) < 0 || H5Fclose(fileid) < 0) ERR; } if (!my_rank) SUMMARIZE_ERR; MPI_Finalize(); if (!my_rank) FINAL_RESULTS; return 0;}
开发者ID:dengchangtao,项目名称:libmesh,代码行数:101,
示例8: _io_writeint _io_write(cow_dfield *f, char *fname)// -----------------------------------------------------------------------------// This function uses a collective MPI-IO procedure to write the contents of// 'data' to the HDF5 file named 'fname', which is assumed to have been created// already. The dataset with name 'dname', which is being written to, must not// exist already. Chunking is enabled as per the ChunkSize variable, and is// disabled by default. Recommended chunk size is local subdomain size. This// will result in optimized read/write on the same decomposition layout, but// poor performance for different access patterns, for example the slabs used by// cluster-FFT functions.//// WARNING!//// All processors must define the same chunk size, the behavior of this function// is not defined otherwise. This implies that chunking should be disabled when// running on a strange number of cores, and subdomain sizes are non-uniform.// -----------------------------------------------------------------------------{ cow_domain *d = f->domain; char **pnames = f->members; void *data = f->data; char *gname = f->name; int n_memb = f->n_members; int n_dims = d->n_dims; hsize_t *L_nint = d->L_nint_h5; hsize_t *G_strt = d->G_strt_h5; hsize_t *G_ntot = d->G_ntot_h5; hsize_t ndp1 = n_dims + 1; hsize_t l_nint[4]; hsize_t l_ntot[4]; hsize_t l_strt[4]; hsize_t stride[4]; for (int i=0; i<n_dims; ++i) { l_nint[i] = d->L_nint[i]; // Selection size, target and destination l_ntot[i] = d->L_ntot[i]; // Memory space total size l_strt[i] = d->L_strt[i]; // Memory space selection start stride[i] = 1; } l_nint[ndp1 - 1] = 1; l_ntot[ndp1 - 1] = n_memb; stride[ndp1 - 1] = n_memb; // The loop over processors is needed if COW_MPI support is enabled and // COW_HDF5_MPI is not. If either COW_MPI is disabled, or COW_HDF5_MPI is // enabled, then the write calls occur without the loop. // --------------------------------------------------------------------------- int sequential = 0; int rank = 0;#if (!COW_HDF5_MPI && COW_MPI) sequential = 1; for (rank=0; rank<d->cart_size; ++rank) { if (rank == d->cart_rank) {#endif hid_t file = H5Fopen(fname, H5F_ACC_RDWR, d->fapl); hid_t memb = H5Gopen(file, gname, H5P_DEFAULT); hid_t mspc = H5Screate_simple(ndp1, l_ntot, NULL); hid_t fspc = H5Screate_simple(n_dims, G_ntot, NULL); hid_t dset; for (int n=0; n<n_memb; ++n) { if (sequential && rank != 0) { dset = H5Dopen(memb, pnames[n], H5P_DEFAULT); } else { dset = H5Dcreate(memb, pnames[n], H5T_NATIVE_DOUBLE, fspc, H5P_DEFAULT, d->dcpl, H5P_DEFAULT); } l_strt[ndp1 - 1] = n; H5Sselect_hyperslab(mspc, H5S_SELECT_SET, l_strt, stride, l_nint, NULL); H5Sselect_hyperslab(fspc, H5S_SELECT_SET, G_strt, NULL, L_nint, NULL); H5Dwrite(dset, H5T_NATIVE_DOUBLE, mspc, fspc, d->dxpl, data); H5Dclose(dset); } H5Sclose(fspc); H5Sclose(mspc); H5Gclose(memb); H5Fclose(file);#if (!COW_HDF5_MPI && COW_MPI) } if (cow_mpirunning()) { MPI_Barrier(d->mpi_cart); } }#endif // !COW_HDF5_MPI && COW_MPI return 0;}
开发者ID:geoffryan,项目名称:calvis,代码行数:87,
示例9: _io_readint _io_read(cow_dfield *f, char *fname){ cow_domain *d = f->domain; char **pnames = f->members; void *data = f->data; char *gname = f->name; int n_memb = f->n_members; int n_dims = d->n_dims; hsize_t *L_nint = d->L_nint_h5; hsize_t *G_strt = d->G_strt_h5; hsize_t *G_ntot = d->G_ntot_h5; hsize_t ndp1 = n_dims + 1; hsize_t l_nint[4]; hsize_t l_ntot[4]; hsize_t l_strt[4]; hsize_t stride[4]; for (int i=0; i<n_dims; ++i) { l_nint[i] = d->L_nint[i]; // Selection size, target and destination l_ntot[i] = d->L_ntot[i]; // Memory space total size l_strt[i] = d->L_strt[i]; // Memory space selection start stride[i] = 1; } l_nint[ndp1 - 1] = 1; l_ntot[ndp1 - 1] = n_memb; stride[ndp1 - 1] = n_memb; // The loop over processors is needed if COW_MPI support is enabled and // COW_HDF5_MPI is not. If either COW_MPI is disabled, or COW_HDF5_MPI is // enabled, then the write calls occur without the loop. // ---------------------------------------------------------------------------#if (!COW_HDF5_MPI && COW_MPI) for (int rank=0; rank<d->cart_size; ++rank) { if (rank == d->cart_rank) {#endif hid_t file = H5Fopen(fname, H5F_ACC_RDONLY, d->fapl); hid_t memb = H5Gopen(file, gname, H5P_DEFAULT); hid_t mspc = H5Screate_simple(ndp1, l_ntot, NULL); hid_t fspc = H5Screate_simple(n_dims, G_ntot, NULL); for (int n=0; n<n_memb; ++n) { hid_t dset = H5Dopen(memb, pnames[n], H5P_DEFAULT); l_strt[ndp1 - 1] = n; H5Sselect_hyperslab(mspc, H5S_SELECT_SET, l_strt, stride, l_nint, NULL); H5Sselect_hyperslab(fspc, H5S_SELECT_SET, G_strt, NULL, L_nint, NULL); H5Dread(dset, H5T_NATIVE_DOUBLE, mspc, fspc, d->dxpl, data); H5Dclose(dset); } H5Sclose(fspc); H5Sclose(mspc); H5Gclose(memb); H5Fclose(file);#if (!COW_HDF5_MPI && COW_MPI) } if (cow_mpirunning()) { MPI_Barrier(d->mpi_cart); } }#endif // !COW_HDF5_MPI && COW_MPI return 0;}
开发者ID:geoffryan,项目名称:calvis,代码行数:61,
示例10: main//.........这里部分代码省略......... * 0 0 0 59 0 61 0 0 0 0 0 0 * 0 25 26 0 27 28 0 29 30 0 31 32 * 0 33 34 0 35 36 67 37 38 0 39 40 * 0 41 42 0 43 44 0 45 46 0 47 48 * 0 0 0 0 0 0 0 0 0 0 0 0 * */ /* * Close memory file and memory dataspaces. */ ret = H5Sclose(mid1); ret = H5Sclose(mid2); ret = H5Sclose(fid); /* * Close dataset. */ ret = H5Dclose(dataset); /* * Close the file. */ ret = H5Fclose(file); /* * Open the file. */ file = H5Fopen(H5FILE_NAME, H5F_ACC_RDONLY, H5P_DEFAULT); /* * Open the dataset. */ dataset = H5Dopen(file,"Matrix in file"); /* * Get dataspace of the open dataset. */ fid = H5Dget_space(dataset); /* * Select first hyperslab for the dataset in the file. The following * elements are selected: * 10 0 11 12 * 18 0 19 20 * 0 59 0 61 * */ start[0] = 1; start[1] = 2; block[0] = 1; block[1] = 1; stride[0] = 1; stride[1] = 1; count[0] = 3; count[1] = 4; ret = H5Sselect_hyperslab(fid, H5S_SELECT_SET, start, stride, count, block); /* * Add second selected hyperslab to the selection. * The following elements are selected: * 19 20 0 21 22 * 0 61 0 0 0 * 27 28 0 29 30 * 35 36 67 37 38 * 43 44 0 45 46 * 0 0 0 0 0 * Note that two hyperslabs overlap. Common elements are: * 19 20 * 0 61
开发者ID:MattNapsAlot,项目名称:rHDF5,代码行数:67,
示例11: PYTABLE_append_array/*+++++++++++++++++++++++++.IDENTifer PYTABLE_append_array.PURPOSE append data to HDF5 dataset, extending the dimension ''extdim''.INPUT/OUTPUT call as stat = PYTABLE_append_array( locID, dset_name, extdim, count, buffer ); input: hid_t locID : HDF5 identifier of file or group char *dset_name : name of dataset int extdim : dimension to extend int count : number of arrays to write void *buffer : data to write .RETURNS A negative value is returned on failure. .COMMENTS none-------------------------*/herr_t PYTABLE_append_array( hid_t locID, const char *dset_name, int extdim, int count, const void *buffer ){ int rank; hid_t dataID; hid_t spaceID = -1; hid_t mem_spaceID = -1; hid_t typeID = -1; hsize_t *dims = NULL; hsize_t *dims_ext = NULL; hsize_t *offset = NULL; herr_t stat;/* open the dataset. */ if ( (dataID = H5Dopen( locID, dset_name, H5P_DEFAULT )) < 0 ) return -1;/* get the dataspace handle */ if ( (spaceID = H5Dget_space( dataID )) < 0 ) goto done;/* get rank */ if ( (rank = H5Sget_simple_extent_ndims( spaceID )) < 0 ) goto done;/* get dimensions */ dims = (hsize_t *) malloc( rank * sizeof(hsize_t) ); dims_ext = (hsize_t *) malloc( rank * sizeof(hsize_t) ); offset = (hsize_t *) calloc( rank, sizeof(hsize_t) ); if ( H5Sget_simple_extent_dims( spaceID, dims, NULL ) < 0 ) goto done; offset[extdim] = dims[extdim]; (void) memcpy( dims_ext, dims, rank * sizeof(hsize_t) ); dims_ext[extdim] = count; dims[extdim] += count;/* terminate access to the dataspace */ if ( H5Sclose( spaceID ) < 0 ) goto done;/* extend the dataset */ if ( H5Dset_extent( dataID, dims ) < 0 ) goto done;/* select a hyperslab */ if ( (spaceID = H5Dget_space( dataID )) < 0 ) goto done; stat = H5Sselect_hyperslab( spaceID, H5S_SELECT_SET, offset, NULL, dims_ext, NULL ); free( dims ); free( offset ); if ( stat < 0 ) goto done;/* define memory space */ if ( (mem_spaceID = H5Screate_simple( rank, dims_ext, NULL )) < 0 ) goto done; free( dims_ext );/* get an identifier for the datatype. */ if ( (typeID = H5Dget_type( dataID )) < 0 ) goto done;/* write the data to the hyperslab */ stat = H5Dwrite( dataID, typeID, mem_spaceID, spaceID, H5P_DEFAULT, buffer ); if ( stat < 0 ) goto done;/* end access to the dataset */ if ( H5Dclose( dataID ) ) goto done;/* terminate access to the datatype */ if ( H5Tclose( typeID ) < 0 ) goto done;/* terminate access to the dataspace */ if ( H5Sclose( mem_spaceID ) < 0 ) goto done; if ( H5Sclose( spaceID ) < 0 ) goto done; return 0; done: if ( dims != 0 ) free( dims ); if ( dims_ext != 0 ) free( dims_ext ); if ( offset != 0 ) free( offset ); if ( typeID > 0 ) (void) H5Tclose( typeID ); if ( spaceID > 0 ) (void) H5Sclose( spaceID ); if ( mem_spaceID > 0 ) (void) H5Sclose( mem_spaceID ); if ( dataID > 0 ) (void) H5Dclose( dataID ); return -1;}
开发者ID:rmvanhees,项目名称:nadc_tools,代码行数:98,
示例12: mainint main(int argc, char ** argv){ if(argc < 2){ std::cout << "please specify a file" << std::endl; return 0; } std::string filename = argv[1]; hsize_t offset[2] = {2,3}; if (argc == 4){ offset[0] = atoi(argv[2]); offset[1] = atoi(argv[3]); } int m_rows = 4; int m_cols = 4; herr_t status; hid_t file = H5Fopen (filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT); hid_t dset = H5Dopen (file, "DATASET", H5P_DEFAULT); hid_t dcpl = H5Dget_create_plist (dset); H5D_layout_t layout = H5Pget_layout (dcpl); hid_t space = H5Dget_space (dset); hsize_t dims[2], mdims[2]; status = H5Sget_simple_extent_dims(space,dims,mdims); int dim1 = dims[0]; int dim2 = dims[1]; const int length = m_rows*m_cols; double * data = new double[length]; // Select from the file hsize_t count[2] = {4,4}; status = H5Sselect_hyperslab (space, H5S_SELECT_SET, offset, NULL, count, NULL); // Create memory space hsize_t dim_out[2] = {m_rows, m_cols}; hsize_t offset_out[2] = {0,0}; hsize_t count_out[2] = {m_rows, m_cols}; hid_t memspace = H5Screate_simple(2,dim_out,NULL); status = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, offset_out, NULL, count_out, NULL); status = H5Dread (dset, H5T_NATIVE_DOUBLE, memspace, space, H5P_DEFAULT, data); // Print to screen for(int r=0; r<m_rows; ++r){ for(int c=0; c<m_cols; ++c){ std::cout << std::setfill (' ') << std::setw (8); std::cout << data[c + r*m_cols]; } std::cout << std::endl; } delete [] data; status = H5Pclose (dcpl); status = H5Dclose (dset); status = H5Sclose (space); status = H5Fclose (file); return 0;}
开发者ID:ResearchComputing,项目名称:HPSC-Fall-2013,代码行数:69,
示例13: HDF5CreateGroupObjs//.........这里部分代码省略......... poHparent->pszPath = CreatePath( poHparent ); } switch ( oStatbuf.type ) { case H5G_LINK: poHchild->nbAttrs = 0; poHchild->nbObjs = 0; poHchild->poHchild = NULL; poHchild->nRank = 0; poHchild->paDims = 0; poHchild->HDatatype = 0; break; case H5G_GROUP: if( ( hGroupID = H5Gopen( hHDF5, pszObjName ) ) == -1 ) { printf( "Error: unable to access /"%s/" group./n", pszObjName ); return -1; } nbAttrs = H5Aget_num_attrs( hGroupID ); ret = H5Gget_num_objs( hGroupID, &nbObjs ); poHchild->nbAttrs= nbAttrs; poHchild->nbObjs = (int) nbObjs; poHchild->nRank = 0; poHchild->paDims = 0; poHchild->HDatatype = 0; if( nbObjs > 0 ) { poHchild->poHchild =( HDF5GroupObjects * ) CPLCalloc( (int)nbObjs, sizeof( HDF5GroupObjects ) ); memset( poHchild->poHchild,0, (size_t) (sizeof( HDF5GroupObjects ) * nbObjs) ); } else poHchild->poHchild = NULL; if( !HDF5GroupCheckDuplicate( poHparent, oStatbuf.objno ) ) H5Giterate( hHDF5, pszObjName, NULL, HDF5CreateGroupObjs, (void*) poHchild ); else CPLDebug( "HDF5", "avoiding link looping on node '%s'.", pszObjName ); H5Gclose( hGroupID ); break; case H5G_DATASET: if( ( hDatasetID = H5Dopen( hHDF5, pszObjName ) ) == -1 ) { printf( "Error: unable to access /"%s/" dataset./n", pszObjName ); return -1; } nbAttrs = H5Aget_num_attrs( hDatasetID ); datatype = H5Dget_type( hDatasetID ); dataspace = H5Dget_space( hDatasetID ); n_dims = H5Sget_simple_extent_ndims( dataspace ); native = H5Tget_native_type( datatype, H5T_DIR_ASCEND ); if( n_dims > 0 ) { dims = (hsize_t *) CPLCalloc( n_dims,sizeof( hsize_t ) ); maxdims = (hsize_t *) CPLCalloc( n_dims,sizeof( hsize_t ) ); } status = H5Sget_simple_extent_dims( dataspace, dims, maxdims ); if( maxdims != NULL ) CPLFree( maxdims ); if( n_dims > 0 ) { poHchild->nRank = n_dims; // rank of the array poHchild->paDims = dims; // dimmension of the array. poHchild->HDatatype = datatype; // HDF5 datatype } else { poHchild->nRank = -1; poHchild->paDims = NULL; poHchild->HDatatype = 0; } poHchild->nbAttrs = nbAttrs; poHchild->nbObjs = 0; poHchild->poHchild = NULL; poHchild->native = native; ret = H5Dclose( hDatasetID ); break; case H5G_TYPE: poHchild->nbAttrs = 0; poHchild->nbObjs = 0; poHchild->poHchild = NULL; poHchild->nRank = 0; poHchild->paDims = 0; poHchild->HDatatype = 0; break; default: break; } return 0;}
开发者ID:dlsyaim,项目名称:osgEarthX,代码行数:101,
示例14: mainintmain (void){ hid_t file, space, memspace, dset, dset2, attr; /* Handles */ herr_t status; hsize_t dims[1] = {DIM0}, dims2[2] = {DS2DIM0, DS2DIM1}, coords[4][2] = { {0, 1}, {2, 11}, {1, 0}, {2, 4} }, start[2] = {0, 0}, stride[2] = {2, 11}, count[2] = {2, 2}, block[2] = {1, 3}; hssize_t npoints; hdset_reg_ref_t wdata[DIM0], /* Write buffer */ *rdata; /* Read buffer */ ssize_t size; char wdata2[DS2DIM0][DS2DIM1] = {"The quick brown", "fox jumps over ", "the 5 lazy dogs"}, *rdata2, *name; int ndims, i; /* * Create a new file using the default properties. */ file = H5Fcreate (FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); /* * Create a dataset with character data. */ space = H5Screate_simple (2, dims2, NULL); dset2 = H5Dcreate (file, DATASET2, H5T_STD_I8LE, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); status = H5Dwrite (dset2, H5T_NATIVE_CHAR, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata2); /* * Create reference to a list of elements in dset2. */ status = H5Sselect_elements (space, H5S_SELECT_SET, 4, coords[0]); status = H5Rcreate (&wdata[0], file, DATASET2, H5R_DATASET_REGION, space); /* * Create reference to a hyperslab in dset2, close dataspace. */ status = H5Sselect_hyperslab (space, H5S_SELECT_SET, start, stride, count, block); status = H5Rcreate (&wdata[1], file, DATASET2, H5R_DATASET_REGION, space); status = H5Sclose (space); /* * Create dataset with a null dataspace to serve as the parent for * the attribute. */ space = H5Screate (H5S_NULL); dset = H5Dcreate (file, DATASET, H5T_STD_I32LE, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); status = H5Sclose (space); /* * Create dataspace. Setting maximum size to NULL sets the maximum * size to be the current size. */ space = H5Screate_simple (1, dims, NULL); /* * Create the attribute and write the region references to it. */ attr = H5Acreate (dset, ATTRIBUTE, H5T_STD_REF_DSETREG, space, H5P_DEFAULT, H5P_DEFAULT); status = H5Awrite (attr, H5T_STD_REF_DSETREG, wdata); /* * Close and release resources. */ status = H5Aclose (attr); status = H5Dclose (dset); status = H5Dclose (dset2); status = H5Sclose (space); status = H5Fclose (file); /* * Now we begin the read section of this example. Here we assume * the attribute has the same name and rank, but can have any size. * Therefore we must allocate a new array to read in data using * malloc(). */ /* * Open file, dataset, and attribute. */ file = H5Fopen (FILE, H5F_ACC_RDONLY, H5P_DEFAULT); dset = H5Dopen (file, DATASET, H5P_DEFAULT);//.........这里部分代码省略.........
开发者ID:LaHaine,项目名称:ohpc,代码行数:101,
示例15: H5Fcreate//.........这里部分代码省略......... H5Tset_size(atype, 4); hid_t attr3 = H5Acreate1(channelgroup, "type", atype, aid3, H5P_DEFAULT); status = H5Awrite(attr3, atype,"dict"); H5Aclose(attr3); H5Tclose(atype); H5Sclose(aid3); } if (channelgroup!=0) { QList<QString> keys2=(*events)[keys1[i]].keys(); for (int i2=0;i2<keys2.length();i2++) { QString dname = QString("keystr_%1").arg(keys2[i2]); datasetlength = (*events)[keys1[i]][keys2[i2]].length(); if (datasetlength>0) { //Try to open dataset if it exists //turn off errors when we query the group, using open hid_t error_stack = H5Eget_current_stack(); H5E_auto2_t oldfunc; void *old_client_data; H5Eget_auto(error_stack, &oldfunc, &old_client_data); H5Eset_auto(error_stack, NULL, NULL); //query or open dataset curdataset = H5Dopen(channelgroup,dname.toStdString().c_str(),H5P_DEFAULT); //turn errors back on. H5Eset_auto(error_stack, oldfunc, old_client_data); //if cannot open dataset, create it, and make it chunked. if (curdataset<=0) { //set up size info, chunks etc... maxdims[0]=H5S_UNLIMITED; rank = 1; d_dims[0]=datasetlength; curdataspace= H5Screate_simple(rank, d_dims,maxdims); prop = H5Pcreate(H5P_DATASET_CREATE); status = H5Pset_chunk(prop, rank, d_dims); if (status) trap(); curdataset = H5Dcreate( channelgroup, dname.toStdString().c_str(), H5T_NATIVE_FLOAT, curdataspace, H5P_DEFAULT, prop, H5P_DEFAULT);
开发者ID:argonnexraydetector,项目名称:RoachFirmPy,代码行数:65,
示例16: e5_read_grid_listestatus_te5_read_grid_list( eid_t e5_group_id, const char* list_name, e5_grid_dataset* grid_list){ int i; int d; int log_scale; int close_group; hsize_t h5_max_dim[3]; hsize_t h5_min_dim[3]; eid_t e5_list_group_id; eid_t e5_type_id; eid_t e5_dataset_id; eid_t e5_dataspace_id; eid_t e5_memspace_id; hid_t h5_status; estatus_t e5_status = E5_SUCCESS; if(list_name && strlen(list_name)) { e5_list_group_id = e5_create_group(e5_group_id, list_name); close_group = 1; } else { e5_list_group_id = e5_group_id; close_group = 0; } for(i = 0; grid_list && grid_list[i].name != 0; i++) { e5_grid_dataset* grid = &grid_list[i]; if(grid->name == 0 || strlen(grid->name) < 1) continue; e5_dataset_id = H5Dopen(e5_list_group_id, grid->name); if (e5_dataset_id < 0) { e5_status = E5_INVALID_DATASET; e5_error(e5_list_group_id, e5_status, "Failed to open grid dataset '%s'/n", grid->name); return e5_status; } if(!grid->data) { e5_status = E5_INVALID_POINTER; e5_error(e5_dataspace_id, e5_status, "Failed to provide pointer for reading '%s' from E5 data file/n", grid->name); break; } e5_dataspace_id = H5Dget_space(e5_dataset_id); e5_type_id = H5Dget_type(e5_dataset_id); H5Sget_simple_extent_dims(e5_dataspace_id, h5_min_dim, h5_max_dim); for(d = 0; d < 3; d++) { grid->dim[d] = h5_min_dim[d] >= h5_max_dim[d] ? h5_min_dim[d] : h5_max_dim[d]; grid->dim[d] = grid->dim[d] < 1 ? 1 : grid->dim[d]; } grid->type = e5_convert_hdf_type(e5_type_id); switch(grid->type) { case E5_TYPE_FLOAT: { e5_info(e5_group_id, "Reading grid [type='float', name='%s', dim='%u %u %u']/n", grid->name, grid->dim[0], grid->dim[1], grid->dim[2]); e5_memspace_id = H5Screate_simple(3, h5_min_dim, h5_max_dim); h5_status = H5Dread(e5_dataset_id, H5T_NATIVE_FLOAT, e5_memspace_id, e5_dataspace_id, H5P_DEFAULT, (grid->data)); if (h5_status < 0) { e5_status = E5_READ_FAILURE; e5_error(e5_dataset_id, e5_status, "Failed to read '%s' from F5 data file/n", grid->name); } H5Sclose(e5_memspace_id); break; } case E5_TYPE_DOUBLE: { e5_info(e5_group_id, "Reading grid [type='double', name='%s', dim='%u %u %u']/n", grid->name, grid->dim[0], grid->dim[1], grid->dim[2]); e5_memspace_id = H5Screate_simple(3, h5_min_dim, h5_max_dim); h5_status = H5Dread(e5_dataset_id, H5T_NATIVE_DOUBLE, e5_memspace_id, e5_dataspace_id, H5P_DEFAULT, (grid->data)); if (h5_status < 0) { e5_status = E5_READ_FAILURE; e5_error(e5_dataset_id, e5_status, "Failed to read '%s' from F5 data file/n", grid->name); } H5Sclose(e5_memspace_id); break; } case E5_TYPE_INVALID: default://.........这里部分代码省略.........
开发者ID:voidcycles,项目名称:void,代码行数:101,
示例17: GetMesh /// @todo extend to Wegdes (aka Prisms)void SalomeIO::read(const std::string& name, vector < vector < double> > &coords, const double Lref, std::vector<bool> &type_elem_flag) { Mesh& mesh = GetMesh(); mesh.SetLevel(0); hsize_t dims[2]; // compute number of menus =============== hid_t file_id = H5Fopen(name.c_str(),H5F_ACC_RDWR, H5P_DEFAULT); hid_t gid = H5Gopen(file_id,mesh_ensemble.c_str(),H5P_DEFAULT); hsize_t n_menus; hid_t status= H5Gget_num_objs(gid, &n_menus); // number of menus if(status !=0) { std::cout << "Number of mesh menus not found"; abort(); } // compute number of groups and number of meshes =============== std::vector<std::string> mesh_menus; std::vector<std::string> group_menus; unsigned n_groups = 0; unsigned n_meshes = 0; for (unsigned j=0; j<n_menus; j++) { char * menu_names_j = new char[max_length]; H5Gget_objname_by_idx(gid,j,menu_names_j,max_length); ///@deprecated see the HDF doc to replace this std::string tempj(menu_names_j); if (tempj.substr(0,5).compare("Group") == 0) { n_groups++; group_menus.push_back(tempj); } else if (tempj.substr(0,4).compare("Mesh") == 0) { n_meshes++; mesh_menus.push_back(tempj); } } // compute number of groups and number of meshes =============== unsigned int n_elements_b_bb = 0; // meshes ======================== for (unsigned j=0; j< n_meshes; j++) { std::string tempj = mesh_menus[j];// dimension =============== /// @todo this determination of the dimension from the mesh file would not work with a 2D mesh embedded in 3D std::string my_mesh_name_dir = mesh_ensemble + "/" + tempj + "/" + aux_zeroone + "/" + elem_list + "/"; ///@todo here we have to loop hsize_t n_fem_type; hid_t gid = H5Gopen(file_id,my_mesh_name_dir.c_str(),H5P_DEFAULT); hid_t status0 = H5Gget_num_objs(gid, &n_fem_type); if(status0 !=0) {std::cout << "SalomeIO::read_fem_type: H5Gget_num_objs not found"; abort();} FindDimension(gid,tempj,n_fem_type); H5Gclose(gid); if (mesh.GetDimension() != n_fem_type) { std::cout << "Mismatch between dimension and number of element types" << std::endl; abort(); } // fem type =============== std::vector<std::string> el_fe_type(mesh.GetDimension()); ReadFE(file_id,el_fe_type, n_fem_type, my_mesh_name_dir); // // read NODAL COORDINATES **************** C std::string coord_dataset = mesh_ensemble + "/" + tempj + "/" + aux_zeroone + "/" + node_list + "/" + coord_list + "/"; ///@todo here we have to loop hid_t dtset = H5Dopen(file_id,coord_dataset.c_str(),H5P_DEFAULT); // SET NUMBER OF NODES hid_t filespace = H5Dget_space(dtset); /* Get filespace handle first. */ hid_t status = H5Sget_simple_extent_dims(filespace, dims, NULL); if(status ==0) std::cerr << "SalomeIO::read dims not found"; // reading xyz_med unsigned int n_nodes = dims[0]/mesh.GetDimension(); double *xyz_med = new double[dims[0]]; std::cout << " Number of nodes in med file " << n_nodes << " " << std::endl; mesh.SetNumberOfNodes(n_nodes); // SET NODE COORDINATES coords[0].resize(n_nodes); coords[1].resize(n_nodes); coords[2].resize(n_nodes); status=H5Dread(dtset,H5T_NATIVE_DOUBLE,H5S_ALL,H5S_ALL,H5P_DEFAULT,xyz_med); H5Dclose(dtset); if (mesh.GetDimension()==3) { for (unsigned j=0; j<n_nodes; j++) { coords[0][j] = xyz_med[j]/Lref; coords[1][j] = xyz_med[j+n_nodes]/Lref; coords[2][j] = xyz_med[j+2*n_nodes]/Lref; } }//.........这里部分代码省略.........
开发者ID:rushs777,项目名称:femus,代码行数:101,
示例18: e5_read_data_info_listestatus_te5_read_data_info_list( eid_t e5_group_id, const char* list_name, e5_data_info* info_list){ int i; int d; int log_scale; int close_group; estatus_t status; hsize_t h5_min_dim[3]; hsize_t h5_max_dim[3]; eid_t e5_list_group_id; eid_t e5_type_id; eid_t e5_dataset_id; eid_t e5_dataspace_id; status = E5_SUCCESS; if(list_name && strlen(list_name)) { e5_list_group_id = e5_create_group(e5_group_id, list_name); close_group = 1; } else { e5_list_group_id = e5_group_id; close_group = 0; } for(i = 0; info_list && info_list[i].name != 0; i++) { e5_data_info* info = &info_list[i]; e5_dataset_id = H5Dopen(e5_list_group_id, info->name); if (e5_dataset_id < 0) { status = E5_INVALID_DATASET; e5_error(e5_list_group_id, status, "Failed to open info for dataset '%s'/n", info->name); return status; } e5_dataspace_id = H5Dget_space(e5_dataset_id); e5_type_id = H5Dget_type(e5_dataset_id); H5Sget_simple_extent_dims(e5_dataspace_id, h5_min_dim, h5_max_dim); info->type = e5_convert_hdf_type(e5_type_id); for(d = 0; d < 3; d++) { info->dim[d] = h5_min_dim[d] >= h5_max_dim[d] ? h5_min_dim[d] : h5_max_dim[d]; info->dim[d] = info->dim[d] < 1 ? 1 : info->dim[d]; } log_scale = 0; if(e5_is_valid_attr(e5_group_id, "log10")) e5_read_attr_int(e5_dataset_id, "log10", &log_scale); info->scale = log_scale ? E5_VALUE_SCALE_LOG10 : E5_VALUE_SCALE_LINEAR; e5_info(e5_group_id, "Read data info [type='%s', name='%s', dim='%u %u %u']/n", e5_typename(info->type), info->name, info->dim[0], info->dim[1], info->dim[2]); H5Sclose(e5_dataspace_id); H5Dclose(e5_dataset_id); H5Tclose(e5_type_id); } if(close_group) e5_close_group(e5_list_group_id); return E5_SUCCESS;}
开发者ID:voidcycles,项目名称:void,代码行数:73,
示例19: test_rowmaj/*------------------------------------------------------------------------- * Function: test_rowmaj * * Purpose: Reads the entire dataset using the specified size-squared * I/O requests in row major order. * * Return: Efficiency: data requested divided by data actually read. * * Programmer: Robb Matzke * Thursday, May 14, 1998 * * Modifications: * *------------------------------------------------------------------------- */static doubletest_rowmaj (int op, size_t cache_size, size_t io_size){ hid_t file, dset, mem_space, file_space; signed char *buf = calloc (1, (size_t)(SQUARE(io_size))); hsize_t i, j, hs_size[2]; hsize_t hs_offset[2];#ifdef H5_WANT_H5_V1_4_COMPAT int mdc_nelmts, rdcc_nelmts;#else /* H5_WANT_H5_V1_4_COMPAT */ int mdc_nelmts; size_t rdcc_nelmts;#endif /* H5_WANT_H5_V1_4_COMPAT */ double w0; H5Pget_cache (fapl_g, &mdc_nelmts, &rdcc_nelmts, NULL, &w0);#ifdef RM_W0 w0 = RM_W0;#endif#ifdef RM_NRDCC rdcc_nelmts = RM_NRDCC;#endif H5Pset_cache (fapl_g, mdc_nelmts, rdcc_nelmts, cache_size*SQUARE (CH_SIZE), w0); file = H5Fopen (FILE_NAME, H5F_ACC_RDWR, fapl_g); dset = H5Dopen (file, "dset"); file_space = H5Dget_space (dset); nio_g = 0; for (i=0; i<CH_SIZE*DS_SIZE; i+=io_size) {#if 0 fprintf (stderr, "%5d/b/b/b/b/b", (int)i); fflush (stderr);#endif for (j=0; j<CH_SIZE*DS_SIZE; j+=io_size) { hs_offset[0] = i; hs_size[0] = MIN (io_size, CH_SIZE*DS_SIZE-i); hs_offset[1] = j; hs_size[1] = MIN (io_size, CH_SIZE*DS_SIZE-j); mem_space = H5Screate_simple (2, hs_size, hs_size); H5Sselect_hyperslab (file_space, H5S_SELECT_SET, hs_offset, NULL, hs_size, NULL); if (READ==op) { H5Dread (dset, H5T_NATIVE_SCHAR, mem_space, file_space, H5P_DEFAULT, buf); } else { H5Dwrite (dset, H5T_NATIVE_SCHAR, mem_space, file_space, H5P_DEFAULT, buf); } H5Sclose (mem_space); } } free (buf); H5Sclose (file_space); H5Dclose (dset); H5Fclose (file); return (double)SQUARE(CH_SIZE*DS_SIZE)/(double)nio_g;}
开发者ID:MattNapsAlot,项目名称:rHDF5,代码行数:76,
示例20: e5_merge_flash_scalarsstatic estatus_te5_merge_flash_scalars( eid_t e5_file_id, hid_t f5_file_id, const char* f5_list_name, f5_scalar_list_type f5_list_type){ size_t s; hsize_t dimens_1d; hsize_t maxdimens_1d; hid_t string_type; hsize_t f5_scalar_bytes; hid_t f5_dataspace; hid_t f5_memspace; hid_t f5_dataset; hid_t f5_type; void* f5_data; hid_t e5_em_group_id; int hstatus; size_t e5_scalar_bytes; estatus_t estatus = E5_SUCCESS; string_type = H5Tcopy(H5T_C_S1); H5Tset_size(string_type, F5_MAX_STRING_LENGTH); f5_dataset = H5Dopen(f5_file_id, f5_list_name); if (f5_dataset < 0) { estatus = E5_INVALID_DATASET; e5_error(f5_file_id, estatus, "Failed to open dataset '%s'/n", f5_list_name); return estatus; } f5_dataspace = H5Dget_space(f5_dataset); H5Sget_simple_extent_dims(f5_dataspace, &dimens_1d, &maxdimens_1d); e5_em_group_id = e5_open_group(e5_file_id, E5_EMISSIVITY_GROUP_NAME_ABS); switch(f5_list_type) { case F5_SCALAR_LIST_INTEGER: case F5_SCALAR_LIST_LOGICAL: { hid_t f5_int_list_type; f5_int_list_t *f5_int_list; f5_scalar_bytes = dimens_1d * sizeof(f5_int_list_t); f5_int_list = (f5_int_list_t *) e5_malloc(f5_scalar_bytes); if(!f5_int_list) { estatus = E5_OUT_OF_MEMORY; e5_error(f5_file_id, estatus, "Failed to allocate memory for reading '%s' from F5 data file/n", f5_list_name); break; } memset(f5_int_list, 0, f5_scalar_bytes); f5_int_list_type = H5Tcreate(H5T_COMPOUND, sizeof(f5_int_list_t)); H5Tinsert(f5_int_list_type, "name", HOFFSET(f5_int_list_t, name), string_type); H5Tinsert(f5_int_list_type, "value", HOFFSET(f5_int_list_t, value), H5T_NATIVE_INT); f5_memspace = H5Screate_simple(1, &dimens_1d, NULL); hstatus = H5Dread(f5_dataset, f5_int_list_type, f5_memspace, f5_dataspace, H5P_DEFAULT, f5_int_list); if (hstatus < 0) { estatus = E5_READ_FAILURE; e5_error(f5_file_id, estatus, "Failed to read '%s' dataset from data file/n", f5_list_name); break; } f5_data = f5_int_list; f5_type = f5_int_list_type; if(f5_data) { // Add the scalar list as attributes for the top-level Emissivity group e5_scalar_bytes = (dimens_1d + 1) * sizeof(e5_mutable_attr_int); e5_mutable_attr_int* e5_int_scalars = (e5_mutable_attr_int*) e5_malloc(e5_scalar_bytes); memset(e5_int_scalars, 0, e5_scalar_bytes); for(s = 0; s < dimens_1d; s++) { e5_int_scalars[s].value = f5_int_list[s].value; e5_int_scalars[s].name = e5_trim(f5_int_list[s].name, E5_TRUE, E5_TRUE, F5_MAX_STRING_LENGTH); } e5_write_attr_list_int(e5_em_group_id, (e5_attr_int*)e5_int_scalars); e5_free(e5_int_scalars); } break; } case F5_SCALAR_LIST_REAL: { hid_t f5_real_list_type; f5_real_list_t *f5_real_list; f5_scalar_bytes = dimens_1d * sizeof(f5_real_list_t); f5_real_list = (f5_real_list_t *) e5_malloc(f5_scalar_bytes); if(!f5_real_list) { estatus = E5_OUT_OF_MEMORY; e5_error(f5_file_id, estatus, "Failed to allocate memory for reading '%s' from F5 data file/n", f5_list_name); break;//.........这里部分代码省略.........
开发者ID:voidcycles,项目名称:void,代码行数:101,
示例21: AH5_auto_test_filechar *test_write_string_dataset(){#define DIM0 4#define SDIM 8 hid_t file_id, filetype, memtype, space, dset; size_t sdim; hsize_t dims[1] = {DIM0}; int ndims, i, j; /*char wdata[DIM0][SDIM] =*/ char *wdata[] = {"Parting", "is such", "sweet ", "sorrow."}; char **rdata; // Write a simple mesh test. file_id = AH5_auto_test_file(); mu_assert("Write string dataset.", AH5_write_str_dataset(file_id, "dataset_name", DIM0, SDIM, wdata)); // Test the written data using hdf5 API. dset = H5Dopen(file_id, "/dataset_name", H5P_DEFAULT); filetype = H5Dget_type(dset); sdim = H5Tget_size(filetype); space = H5Dget_space(dset); ndims = H5Sget_simple_extent_dims(space, dims, NULL); rdata = (char **) malloc(dims[0] * sizeof (char *)); rdata[0] = (char *) malloc(dims[0] * sdim * sizeof (char)); for (i=1; i<dims[0]; i++) rdata[i] = rdata[0] + i * sdim; memtype = H5Tcopy(H5T_C_S1); mu_assert("HDF5 error in H5Tset_size.", H5Tset_size(memtype, sdim) >= 0); mu_assert("HDF5 error in H5Dread.", H5Dread(dset, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata[0]) >= 0); for (i = 0; i < dims[0]; i++) { printf("%s %s/n", wdata[i], rdata[i]); /*mu_assert_str_equal("Check the first str dataset values.", wdata[i], rdata[i]);*/ j = 0; while (wdata[i][j] != ' ' && wdata[i][j] != '/0') { mu_assert_equal("Check the first str dataset values.", wdata[i][j], rdata[i][j]); ++j; } } // Release resources. free(rdata[0]); free(rdata); mu_assert("HDF5 error in H5Dclose.", H5Dclose(dset) >= 0); mu_assert("HDF5 error in H5Sclose.", H5Sclose(space) >= 0); mu_assert("HDF5 error in H5Tclose.", H5Tclose(filetype) >= 0); mu_assert("HDF5 error in H5Tclose.", H5Tclose(memtype) >= 0); // Write a string dataset using strlen. mu_assert("Write string dataset using strlen.", AH5_write_str_dataset(file_id, "dataset_name_2", DIM0, strlen(wdata[0]) + 1, wdata)); // Test the written data using hdf5 API. dset = H5Dopen(file_id, "/dataset_name", H5P_DEFAULT); filetype = H5Dget_type(dset); sdim = H5Tget_size(filetype); space = H5Dget_space(dset); ndims = H5Sget_simple_extent_dims(space, dims, NULL); rdata = (char **) malloc(dims[0] * sizeof (char *)); rdata[0] = (char *) malloc(dims[0] * sdim * sizeof (char)); for (i=1; i<dims[0]; i++) rdata[i] = rdata[0] + i * sdim; memtype = H5Tcopy(H5T_C_S1); mu_assert("HDF5 error in H5Tset_size.", H5Tset_size(memtype, sdim) >= 0); mu_assert("HDF5 error in H5Dread.", H5Dread(dset, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata[0]) >= 0); for (i = 0; i < dims[0]; i++) { /*mu_assert_str_equal("Check the first str dataset values.", wdata[i], rdata[i]);*/ j = 0; while (wdata[i][j] != ' ' && wdata[i][j] != '/0') { mu_assert_equal("Check the first str dataset values.", wdata[i][j], rdata[i][j]); ++j; } } // Release resources. free(rdata[0]); free(rdata); mu_assert("HDF5 error in H5Dclose.", H5Dclose(dset) >= 0); mu_assert("HDF5 error in H5Sclose.", H5Sclose(space) >= 0); mu_assert("HDF5 error in H5Tclose.", H5Tclose(filetype) >= 0); mu_assert("HDF5 error in H5Tclose.", H5Tclose(memtype) >= 0); // Close file. AH5_close_test_file(file_id); return MU_FINISHED_WITHOUT_ERRORS;}
开发者ID:ThinkManhattan,项目名称:amelet-hdf,代码行数:95,
示例22: mainint main(void){ /* handles */ hid_t file, dataset; hid_t datatype, dataspace; hid_t memspace; herr_t status; H5T_class_t t_class; /* data type class */ H5T_order_t order; /* data order */ size_t size; /* size of the data element * stored in the file */ hsize_t dimsm[3]; /* memory space dimensions */ hsize_t dims_out[2]; /* dataset dimensions */ int data_out[NX][NY][NZ]; /* output buffer */ hsize_t count[2]; /* size of the hyperslab in the file */ hsize_t offset[2]; /* hyperslab offset in the file */ hsize_t count_out[3]; /* size of the hyperslab in memory */ hsize_t offset_out[3]; /* hyperslab offset in memory */ int i,j,k; int status_n; int rank; for (j = 0; j < NX; j++) { for (i = 0; i < NY; i++) { for (k = 0; k < NZ; k++) { data_out[j][i][k] = 0; } } } /* * Open the file and the dataset. */ file = H5Fopen(H5FILE_NAME, H5F_ACC_RDONLY, H5P_DEFAULT); dataset = H5Dopen(file, DATASET_NAME); /* * Get datatype and dataspace handles and then query * dataset class, order, size, rank and dimensions. */ datatype = H5Dget_type(dataset); /* datatype handle */ t_class = H5Tget_class(datatype); if (t_class == H5T_INTEGER) printf("Dataset has INTEGER type/n"); order = H5Tget_order(datatype); if (order == H5T_ORDER_LE) printf("Little endian order/n"); size = H5Tget_size(datatype); printf("Data size is %d/n", (int)size); dataspace = H5Dget_space(dataset); /* dataspace handle */ rank = H5Sget_simple_extent_ndims(dataspace); status_n = H5Sget_simple_extent_dims(dataspace, dims_out, NULL); printf("rank %d, dimensions %lu x %lu/n", rank, (unsigned long)(dims_out[0]), (unsigned long)(dims_out[1])); /* * Define hyperslab in the dataset. */ offset[0] = 1; offset[1] = 2; count[0] = NX_SUB; count[1] = NY_SUB; status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, NULL, count, NULL); /* * Define the memory dataspace. */ dimsm[0] = NX; dimsm[1] = NY; dimsm[2] = NZ; memspace = H5Screate_simple(RANK_OUT, dimsm, NULL);//.........这里部分代码省略.........
开发者ID:ludwig,项目名称:examples,代码行数:101,
示例23: arma_H5Dopen hid_t arma_H5Dopen(hid_t loc_id, const char* name, hid_t dapl_id) { return H5Dopen(loc_id, name, dapl_id); }
开发者ID:2003pro,项目名称:armadillo,代码行数:4,
示例24: ReadArray//-*****************************************************************************AbcA::ArraySamplePtrReadArray( AbcA::ReadArraySampleCachePtr iCache, hid_t iParent, const std::string &iName, const AbcA::DataType &iDataType, hid_t iFileType, hid_t iNativeType ){ // Dispatch string stuff. if ( iDataType.getPod() == kStringPOD ) { return ReadStringArray( iCache, iParent, iName, iDataType ); } else if ( iDataType.getPod() == kWstringPOD ) { return ReadWstringArray( iCache, iParent, iName, iDataType ); } assert( iDataType.getPod() != kStringPOD && iDataType.getPod() != kWstringPOD ); // Open the data set. hid_t dsetId = H5Dopen( iParent, iName.c_str(), H5P_DEFAULT ); ABCA_ASSERT( dsetId >= 0, "Cannot open dataset: " << iName ); DsetCloser dsetCloser( dsetId ); // Read the data space. hid_t dspaceId = H5Dget_space( dsetId ); ABCA_ASSERT( dspaceId >= 0, "Could not get dataspace for dataSet: " << iName ); DspaceCloser dspaceCloser( dspaceId ); AbcA::ArraySample::Key key; bool foundDigest = false; // if we are caching, get the key and see if it is being used if ( iCache ) { key.origPOD = iDataType.getPod(); key.readPOD = key.origPOD; key.numBytes = Util::PODNumBytes( key.readPOD ) * H5Sget_simple_extent_npoints( dspaceId ); foundDigest = ReadKey( dsetId, "key", key ); AbcA::ReadArraySampleID found = iCache->find( key ); if ( found ) { AbcA::ArraySamplePtr ret = found.getSample(); assert( ret ); if ( ret->getDataType().getPod() != iDataType.getPod() ) { ABCA_THROW( "ERROR: Read data type for dset: " << iName << ": " << ret->getDataType() << " does not match expected data type: " << iDataType ); } // Got it! return ret; } } // Okay, we haven't found it in a cache. // Read the data type. hid_t dtypeId = H5Dget_type( dsetId ); ABCA_ASSERT( dtypeId >= 0, "Could not get datatype for dataSet: " << iName ); DtypeCloser dtypeCloser( dtypeId ); ABCA_ASSERT( EquivalentDatatypes( iFileType, dtypeId ), "File DataType clash for array dataset: " << iName ); AbcA::ArraySamplePtr ret; H5S_class_t dspaceClass = H5Sget_simple_extent_type( dspaceId ); if ( dspaceClass == H5S_SIMPLE ) { // Get the dimensions int rank = H5Sget_simple_extent_ndims( dspaceId ); ABCA_ASSERT( rank == 1, "H5Sget_simple_extent_ndims() must be 1." ); hsize_t hdim = 0; rank = H5Sget_simple_extent_dims( dspaceId, &hdim, NULL ); Dimensions dims; std::string dimName = iName + ".dims"; if ( H5Aexists( iParent, dimName.c_str() ) ) { ReadDimensions( iParent, dimName, dims ); } else { dims.setRank(1);//.........这里部分代码省略.........
开发者ID:AWhetter,项目名称:alembic,代码行数:101,
示例25: CPLErrorGDALDataset *BAGDataset::Open( GDALOpenInfo * poOpenInfo ){/* -------------------------------------------------------------------- *//* Confirm that this appears to be a BAG file. *//* -------------------------------------------------------------------- */ if( !Identify( poOpenInfo ) ) return NULL;/* -------------------------------------------------------------------- *//* Confirm the requested access is supported. *//* -------------------------------------------------------------------- */ if( poOpenInfo->eAccess == GA_Update ) { CPLError( CE_Failure, CPLE_NotSupported, "The BAG driver does not support update access." ); return NULL; } /* -------------------------------------------------------------------- *//* Open the file as an HDF5 file. *//* -------------------------------------------------------------------- */ hid_t hHDF5 = H5Fopen( poOpenInfo->pszFilename, H5F_ACC_RDONLY, H5P_DEFAULT ); if( hHDF5 < 0 ) return NULL;/* -------------------------------------------------------------------- *//* Confirm it is a BAG dataset by checking for the *//* BAG_Root/Bag Version attribute. *//* -------------------------------------------------------------------- */ hid_t hBagRoot = H5Gopen( hHDF5, "/BAG_root" ); hid_t hVersion = -1; if( hBagRoot >= 0 ) hVersion = H5Aopen_name( hBagRoot, "Bag Version" ); if( hVersion < 0 ) { H5Fclose( hHDF5 ); return NULL; } H5Aclose( hVersion );/* -------------------------------------------------------------------- *//* Create a corresponding dataset. *//* -------------------------------------------------------------------- */ BAGDataset *poDS = new BAGDataset(); poDS->hHDF5 = hHDF5;/* -------------------------------------------------------------------- *//* Extract version as metadata. *//* -------------------------------------------------------------------- */ CPLString osVersion; if( GH5_FetchAttribute( hBagRoot, "Bag Version", osVersion ) ) poDS->SetMetadataItem( "BagVersion", osVersion ); H5Gclose( hBagRoot );/* -------------------------------------------------------------------- *//* Fetch the elevation dataset and attach as a band. *//* -------------------------------------------------------------------- */ int nNextBand = 1; hid_t hElevation = H5Dopen( hHDF5, "/BAG_root/elevation" ); if( hElevation < 0 ) { delete poDS; return NULL; } BAGRasterBand *poElevBand = new BAGRasterBand( poDS, nNextBand ); if( !poElevBand->Initialize( hElevation, "elevation" ) ) { delete poElevBand; delete poDS; return NULL; } poDS->nRasterXSize = poElevBand->nRasterXSize; poDS->nRasterYSize = poElevBand->nRasterYSize; poDS->SetBand( nNextBand++, poElevBand );/* -------------------------------------------------------------------- *//* Try to do the same for the uncertainty band. *//* -------------------------------------------------------------------- */ hid_t hUncertainty = H5Dopen( hHDF5, "/BAG_root/uncertainty" ); BAGRasterBand *poUBand = new BAGRasterBand( poDS, nNextBand ); if( hUncertainty >= 0 && poUBand->Initialize( hUncertainty, "uncertainty") ) { poDS->SetBand( nNextBand++, poUBand ); } else delete poUBand;//.........这里部分代码省略.........
开发者ID:actian-geospatial,项目名称:ogr-ingres,代码行数:101,
示例26: mainintmain (void){ hid_t file, space, dset, obj; /* Handles */ herr_t status; hsize_t dims[1] = {DIM0}; hobj_ref_t wdata[DIM0], /* Write buffer */ *rdata; /* Read buffer */ H5O_type_t objtype; ssize_t size; char *name; int ndims, i; /* * Create a new file using the default properties. */ file = H5Fcreate (FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); /* * Create a dataset with a null dataspace. */ space = H5Screate (H5S_NULL); obj = H5Dcreate (file, "DS2", H5T_STD_I32LE, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); status = H5Dclose (obj); status = H5Sclose (space); /* * Create a group. */ obj = H5Gcreate (file, "G1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); status = H5Gclose (obj); /* * Create references to the previously created objects. Passing -1 * as space_id causes this parameter to be ignored. Other values * besides valid dataspaces result in an error. */ status = H5Rcreate (&wdata[0], file, "G1", H5R_OBJECT, -1); status = H5Rcreate (&wdata[1], file, "DS2", H5R_OBJECT, -1); /* * Create dataspace. Setting maximum size to NULL sets the maximum * size to be the current size. */ space = H5Screate_simple (1, dims, NULL); /* * Create the dataset and write the object references to it. */ dset = H5Dcreate (file, DATASET, H5T_STD_REF_OBJ, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); status = H5Dwrite (dset, H5T_STD_REF_OBJ, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); /* * Close and release resources. */ status = H5Dclose (dset); status = H5Sclose (space); status = H5Fclose (file); /* * Now we begin the read section of this example. Here we assume * the dataset has the same name and rank, but can have any size. * Therefore we must allocate a new array to read in data using * malloc(). */ /* * Open file and dataset. */ file = H5Fopen (FILE, H5F_ACC_RDONLY, H5P_DEFAULT); dset = H5Dopen (file, DATASET, H5P_DEFAULT); /* * Get dataspace and allocate memory for read buffer. */ space = H5Dget_space (dset); ndims = H5Sget_simple_extent_dims (space, dims, NULL); rdata = (hobj_ref_t *) malloc (dims[0] * sizeof (hobj_ref_t)); /* * Read the data. */ status = H5Dread (dset, H5T_STD_REF_OBJ, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); /* * Output the data to the screen. */ for (i=0; i<dims[0]; i++) { printf ("%s[%d]:/n ->", DATASET, i); /* * Open the referenced object, get its name and type. */ obj = H5Rdereference (dset, H5R_OBJECT, &rdata[i]);//.........这里部分代码省略.........
开发者ID:LaHaine,项目名称:ohpc,代码行数:101,
示例27: ReadStringArrayTstatic AbcA::ArraySamplePtrReadStringArrayT( AbcA::ReadArraySampleCachePtr iCache, hid_t iParent, const std::string &iName, const AbcA::DataType &iDataType ){ assert( iDataType.getExtent() > 0 ); // Open the data set. hid_t dsetId = H5Dopen( iParent, iName.c_str(), H5P_DEFAULT ); ABCA_ASSERT( dsetId >= 0, "Cannot open dataset: " << iName ); DsetCloser dsetCloser( dsetId ); // Read the digest, if there's a cache. AbcA::ArraySample::Key key; bool foundDigest = ReadKey( dsetId, "key", key ); // If we found a digest and there's a cache, see // if we're in there, and return it if so. if ( foundDigest && iCache ) { AbcA::ReadArraySampleID found = iCache->find( key ); if ( found ) { AbcA::ArraySamplePtr ret = found.getSample(); assert( ret ); if ( ret->getDataType() != iDataType ) { ABCA_THROW( "ERROR: Read data type for dset: " << iName << ": " << ret->getDataType() << " does not match expected data type: " << iDataType ); } // Got it! return ret; } } // Okay, we haven't found it in a cache. // Read the data type. // Checking code. { hid_t dsetFtype = H5Dget_type( dsetId ); DtypeCloser dtypeCloser( dsetFtype ); hid_t nativeDtype = GetNativeDtype<CharT>(); ABCA_ASSERT( H5Tget_class( dsetFtype ) == H5Tget_class( nativeDtype ) && H5Tget_sign( dsetFtype ) == H5Tget_sign( nativeDtype ) // CJH They can now be different // sizes, because wchar_t is sometimes 16-bit, // but we always store 32 bit. // && H5Tget_size( dsetFtype ) == //H5Tget_size( nativeDtype ), , "Invalid datatype for stringT" ); } // String array datatypes require a "dimensions" to be stored // externally, since the strings themselves are stored in a compacted // array of rank 1. // This is an attribute called "dims" that lives in the dset itself. Dimensions realDims; ReadDimensions( dsetId, "dims", realDims ); ABCA_ASSERT( realDims.rank() > 0, "Degenerate rank in Dataset read" ); // Read the data space. hid_t dspaceId = H5Dget_space( dsetId ); ABCA_ASSERT( dspaceId >= 0, "Could not get dataspace for dataSet: " << iName ); DspaceCloser dspaceCloser( dspaceId ); AbcA::ArraySamplePtr ret; H5S_class_t dspaceClass = H5Sget_simple_extent_type( dspaceId ); if ( dspaceClass == H5S_SIMPLE ) { ABCA_ASSERT( realDims.numPoints() > 0, "Degenerate dims in Dataset read" ); size_t totalNumStrings = realDims.numPoints() * iDataType.getExtent(); // Get the dimensions Dimensions dims; int rank = H5Sget_simple_extent_ndims( dspaceId ); ABCA_ASSERT( rank == realDims.rank(), "H5Sget_simple_extent_ndims() failed." ); HDimensions hdims; hdims.setRank( rank ); rank = H5Sget_simple_extent_dims( dspaceId, hdims.rootPtr(), NULL ); ABCA_ASSERT( rank == hdims.rank(), "H5Sget_simple_extent_dims() " "found inconsistent ranks."//.........这里部分代码省略.........
开发者ID:ryutaro765,项目名称:Alembic,代码行数:101,
示例28: init_aux_existing/* ------- begin -------------------- init_aux_existing.c --- */void init_aux_existing(void) { const char routineName[] = "init_aux_existing"; int i, nlevel, nline, ncont; size_t attr_size; hid_t ncid, ncid_atom, ncid_mol, plist; H5T_class_t type_class; char group_name[ARR_STRLEN], *atmosID; Atom *atom; Molecule *molecule; /* Open the file with parallel MPI-IO access */ if (( plist = H5Pcreate(H5P_FILE_ACCESS )) < 0) HERR(routineName); if (( H5Pset_fapl_mpio(plist, mpi.comm, mpi.info) ) < 0) HERR(routineName); if (( ncid = H5Fopen(AUX_FILE, H5F_ACC_RDWR, plist) ) < 0) HERR(routineName); if (( H5Pclose(plist) ) < 0) HERR(routineName); io.aux_ncid = ncid; /* --- Consistency checks --- */ /* Check that atmosID is the same */ if (( H5LTget_attribute_info(ncid, "/", "atmosID", NULL, &type_class, &attr_size) ) < 0) HERR(routineName); atmosID = (char *) malloc(attr_size + 1); if (( H5LTget_attribute_string(ncid, "/", "atmosID", atmosID) ) < 0) HERR(routineName); if (!strstr(atmosID, atmos.ID)) { sprintf(messageStr, "Indata file was calculated for different atmosphere (%s) than current", atmosID); Error(WARNING, routineName, messageStr); } free(atmosID); /* Create arrays for multiple-atom/molecule output */ io.aux_atom_ncid = (hid_t *) malloc(atmos.Nactiveatom * sizeof(hid_t)); io.aux_mol_ncid = (hid_t *) malloc(atmos.Nactivemol * sizeof(hid_t)); if (input.p15d_wpop) { io.aux_atom_pop = (hid_t *) malloc(atmos.Nactiveatom * sizeof(hid_t)); io.aux_atom_poplte = (hid_t *) malloc(atmos.Nactiveatom * sizeof(hid_t)); io.aux_mol_pop = (hid_t *) malloc(atmos.Nactivemol * sizeof(hid_t)); io.aux_mol_poplte = (hid_t *) malloc(atmos.Nactivemol * sizeof(hid_t)); } if (input.p15d_wrates) { io.aux_atom_RijL = (hid_t *) malloc(atmos.Nactiveatom * sizeof(hid_t)); io.aux_atom_RjiL = (hid_t *) malloc(atmos.Nactiveatom * sizeof(hid_t)); io.aux_atom_RijC = (hid_t *) malloc(atmos.Nactiveatom * sizeof(hid_t)); io.aux_atom_RjiC = (hid_t *) malloc(atmos.Nactiveatom * sizeof(hid_t)); } /* --- Group loop over active ATOMS --- */ for (i=0; i < atmos.Nactiveatom; i++) { atom = atmos.activeatoms[i]; /* --- Get ncid of the atom group --- */ sprintf(group_name,(atom->ID[1] == ' ') ? "atom_%.1s" : "atom_%.2s", atom->ID); if (( io.aux_atom_ncid[i] = H5Gopen(io.aux_ncid, group_name, H5P_DEFAULT) ) < 0) HERR(routineName); ncid_atom = io.aux_atom_ncid[i]; /* Check that dimension sizes match */ if (( H5LTget_attribute_int(ncid_atom, ".", "nlevel", &nlevel) ) < 0) HERR(routineName); if (nlevel != atom->Nlevel) { sprintf(messageStr, "Number of levels mismatch: expected %d, found %d.", atom->Nlevel, (int)nlevel); Error(ERROR_LEVEL_2, routineName, messageStr); } if (( H5LTget_attribute_int(ncid_atom, ".", "nline", &nline) ) < 0) HERR(routineName); if (nline != atom->Nline) { sprintf(messageStr, "Number of lines mismatch: expected %d, found %d.", atom->Nline, (int)nline); Error(ERROR_LEVEL_2, routineName, messageStr); } if (( H5LTget_attribute_int(ncid_atom, ".", "ncontinuum", &ncont) ) < 0) HERR(routineName); if (ncont != atom->Ncont) { sprintf(messageStr, "Number of continua mismatch: expected %d, found %d.", atom->Ncont, (int)ncont); Error(ERROR_LEVEL_2, routineName, messageStr); } /* --- Open datasets collectively ---*/ if (input.p15d_wpop) { if (( io.aux_atom_pop[i] = H5Dopen(ncid_atom, POP_NAME, H5P_DEFAULT) ) < 0) HERR(routineName); if (( io.aux_atom_poplte[i] = H5Dopen(ncid_atom, POPLTE_NAME, H5P_DEFAULT) ) < 0) HERR(routineName); } if (input.p15d_wrates) { if (( io.aux_atom_RijL[i] = H5Dopen(ncid_atom, RIJ_L_NAME, H5P_DEFAULT) ) < 0) HERR(routineName); if (( io.aux_atom_RjiL[i] = H5Dopen(ncid_atom, RJI_L_NAME, H5P_DEFAULT) ) < 0) HERR(routineName); if (atom->Ncont > 0) { if (( io.aux_atom_RijC[i] = H5Dopen(ncid_atom, RIJ_C_NAME, H5P_DEFAULT) ) < 0) HERR(routineName);//.........这里部分代码省略.........
开发者ID:tiagopereira,项目名称:rh,代码行数:101,
注:本文中的H5Dopen函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 C++ H5Dopen2函数代码示例 C++ H5Dget_type函数代码示例 |