bes Updated for version 3.21.1
The Backend Server (BES) is the lower two tiers of the Hyrax data server
HDF5CFArray.cc
Go to the documentation of this file.
1// This file is part of the hdf5_handler implementing for the CF-compliant
2// Copyright (c) 2011-2023 The HDF Group, Inc. and OPeNDAP, Inc.
3//
4// This is free software; you can redistribute it and/or modify it under the
5// terms of the GNU Lesser General Public License as published by the Free
6// Software Foundation; either version 2.1 of the License, or (at your
7// option) any later version.
8//
9// This software is distributed in the hope that it will be useful, but
10// WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
12// License for more details.
13//
14// You should have received a copy of the GNU Lesser General Public
15// License along with this library; if not, write to the Free Software
16// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17//
18// You can contact OPeNDAP, Inc. at PO Box 112, Saunderstown, RI. 02874-0112.
19// You can contact The HDF Group, Inc. at 410 E University Ave,
20// Suite 200, Champaign, IL 61820
21
30
31#include <iostream>
32#include <BESDebug.h>
33#include <sys/stat.h>
34#include <libdap/InternalErr.h>
35
36#include <libdap/Str.h>
37#include "HDF5RequestHandler.h"
38#include "HDF5CFArray.h"
39#include "h5cfdaputil.h"
40#include "ObjMemCache.h"
41
42using namespace std;
43using namespace libdap;
44
45
46BaseType *HDF5CFArray::ptr_duplicate()
47{
48 auto HDF5CFArray_unique = make_unique<HDF5CFArray>(*this);
49 return HDF5CFArray_unique.release();
50}
51
52// Read in an HDF5 Array
53bool HDF5CFArray::read()
54{
55
56 BESDEBUG("h5","Coming to HDF5CFArray read "<<endl);
57 if(length() == 0)
58 return true;
59
60 if((nullptr == HDF5RequestHandler::get_lrdata_mem_cache()) &&
61 nullptr == HDF5RequestHandler::get_srdata_mem_cache()){
62 read_data_NOT_from_mem_cache(false,nullptr);
63 return true;
64 }
65
66 // Flag to check if using large raw data cache or small raw data cache.
67 short use_cache_flag = 0;
68
69 // The small data cache is checked first to reduce the resources to operate the big data cache.
70 if(HDF5RequestHandler::get_srdata_mem_cache() != nullptr) {
71 if(((cvtype == CV_EXIST) && (islatlon != true)) || (cvtype == CV_NONLATLON_MISS)
72 || (cvtype == CV_FILLINDEX) ||(cvtype == CV_MODIFY) ||(cvtype == CV_SPECIAL)){
73
74 if(HDF5CFUtil::cf_dap2_support_numeric_type(dtype,is_dap4)==true)
75 use_cache_flag = 1;
76 }
77 }
78
79 // If this variable doesn't fit the small data cache, let's check if it fits the large data cache.
80 if(use_cache_flag !=1) {
81
82 if(HDF5RequestHandler::get_lrdata_mem_cache() != nullptr) {
83
84 // This is the trival case.
85 // If no information is provided in the configuration file of large data cache,
86 // just cache the lat/lon variable per file.
87 if(HDF5RequestHandler::get_common_cache_dirs() == false) {
88 if(cvtype == CV_LAT_MISS || cvtype == CV_LON_MISS
89 || (cvtype == CV_EXIST && islatlon == true)) {
90#if 0
91//cerr<<"coming to use_cache_flag =2 "<<endl;
92#endif
93 // Only the data with the numeric datatype DAP2 and CF support are cached.
94 if(HDF5CFUtil::cf_dap2_support_numeric_type(dtype,is_dap4)==true)
95 use_cache_flag = 2;
96 }
97 }
98 else {// Have large data cache configuration info.
99
100 // Need to check if we don't want to cache some CVs, now
101 // this only applies to lat/lon CV.
102 if(cvtype == CV_LAT_MISS || cvtype == CV_LON_MISS
103 || (cvtype == CV_EXIST && islatlon == true)) {
104
105 vector<string> cur_lrd_non_cache_dir_list;
106 HDF5RequestHandler::get_lrd_non_cache_dir_list(cur_lrd_non_cache_dir_list);
107
108 // Check if this file is included in the non-cache directory
109 if( (cur_lrd_non_cache_dir_list.empty()) ||
110 ("" == check_str_sect_in_list(cur_lrd_non_cache_dir_list,filename,'/'))) {
111
112 // Only data with the numeric datatype DAP2 and CF support are cached.
113 if(HDF5CFUtil::cf_dap2_support_numeric_type(dtype,is_dap4)==true)
114 use_cache_flag = 3;
115 }
116 }
117 // Here we allow all the variable names to be cached.
118 // The file path that includes the variables can also be included.
119 vector<string> cur_lrd_var_cache_file_list;
120 HDF5RequestHandler::get_lrd_var_cache_file_list(cur_lrd_var_cache_file_list);
121 if(cur_lrd_var_cache_file_list.empty() == false){
122#if 0
124//cerr<<"lrd var cache is "<<cur_lrd_var_cache_file_list[i]<<endl;
125#endif
126 if(true == check_var_cache_files(cur_lrd_var_cache_file_list,filename,varname)){
127#if 0
128//cerr<<"varname is "<<varname <<endl;
129//cerr<<"have var cached "<<endl;
130#endif
131
132 // Only the data with the numeric datatype DAP2 and CF support are cached.
133 if(HDF5CFUtil::cf_dap2_support_numeric_type(dtype,is_dap4)==true)
134 use_cache_flag = 4;
135 }
136 }
137 }
138 }
139 }
140
141 if(0 == use_cache_flag)
142 read_data_NOT_from_mem_cache(false,nullptr);
143 else {// memory cache cases
144
145 string cache_key;
146
147 // Possibly we have common lat/lon dirs,so check here.
148 if( 3 == use_cache_flag){
149 vector<string> cur_cache_dlist;
150 HDF5RequestHandler::get_lrd_cache_dir_list(cur_cache_dlist);
151 string cache_dir = check_str_sect_in_list(cur_cache_dlist,filename,'/');
152 if(cache_dir != "")
153 cache_key = cache_dir + varname;
154 else {
155 cache_key = filename + varname;
156 // If this lat/lon is not in the common dir. list, it is still cached as a general lat/lon.
157 // Change the flag to 2.
158 use_cache_flag = 2;
159 }
160
161 }
162 else
163 cache_key = filename + varname;
164
165 handle_data_with_mem_cache(dtype,total_elems,use_cache_flag,cache_key,is_dap4);
166
167 }
168
169 return true;
170}
171
172// Reading data not from memory cache: The data can be read from the disk cache or can be read via the HDF5 APIs
173void HDF5CFArray::read_data_NOT_from_mem_cache(bool add_mem_cache,void*buf) {
174
175 vector<int64_t>offset;
176 vector<int64_t>count;
177 vector<int64_t>step;
178 vector<hsize_t> hoffset;
179 vector<hsize_t>hcount;
180 vector<hsize_t>hstep;
181 int64_t nelms = 1;
182
183 if (rank <= 0)
184 throw InternalErr (__FILE__, __LINE__,
185 "The number of dimension of the variable is <=0 for an array.");
186 else {
187
188 offset.resize(rank);
189 count.resize(rank);
190 step.resize(rank);
191 hoffset.resize(rank);
192 hcount.resize(rank);
193 hstep.resize(rank);
194 nelms = format_constraint (offset.data(), step.data(), count.data());
195 for (int i = 0; i <rank; i++) {
196 hoffset[i] = (hsize_t) offset[i];
197 hcount[i] = (hsize_t) count[i];
198 hstep[i] = (hsize_t) step[i];
199 }
200 }
201
202 BESDEBUG("h5","after format_constraint "<<endl);
203 hid_t dsetid = -1;
204 hid_t dspace = -1;
205 hid_t mspace = -1;
206 hid_t dtypeid = -1;
207 hid_t memtype = -1;
208
209 bool data_from_disk_cache = false;
210 bool data_to_disk_cache = false;
211
212 // Check if the disk cache can be applied.
213 bool use_disk_cache = valid_disk_cache();
214
215 string cache_fpath;
216
217 if(true == use_disk_cache) {
218 BESDEBUG("h5","Coming to use disk cache "<<endl);
219
220
221 unsigned long long disk_cache_size = HDF5RequestHandler::get_disk_cache_size();
222 string diskcache_dir = HDF5RequestHandler::get_disk_cache_dir();
223 string diskcache_prefix = HDF5RequestHandler::get_disk_cachefile_prefix();
224
225 string cache_fname=HDF5CFUtil::obtain_cache_fname(diskcache_prefix,filename,varname);
226 cache_fpath = diskcache_dir + "/"+ cache_fname;
227
228 int64_t temp_total_elems = 1;
229 for (const auto &dimsize:dimsizes)
230 temp_total_elems = temp_total_elems*dimsize;
231
232 short dtype_size = HDF5CFUtil::H5_numeric_atomic_type_size(dtype);
233 // CHECK: I think when signed 8-bit needs to be converted to int16, dtype_size should also change.
234 if(is_dap4 == false && dtype==H5CHAR)
235 dtype_size = 2;
236
237 int64_t expected_file_size = dtype_size *temp_total_elems;
238 int fd = 0;
239 HDF5DiskCache *disk_cache = HDF5DiskCache::get_instance(disk_cache_size,diskcache_dir,diskcache_prefix);
240 if( true == disk_cache->get_data_from_cache(cache_fpath, expected_file_size,fd)) {
241
242 vector<size_t> offset_size_t;
243 offset_size_t.resize(rank);
244 for(int i = 0; i <rank;i++)
245 offset_size_t[i] = (size_t)offset[i];
246 size_t offset_1st = INDEX_nD_TO_1D(dimsizes,offset_size_t);
247 vector<size_t>end;
248 end.resize(rank);
249 for (int i = 0; i < rank; i++)
250 end[i] = offset[i] +(count[i]-1)*step[i];
251 size_t offset_last = INDEX_nD_TO_1D(dimsizes,end);
252#if 0
253//cerr<<"offset_1d is "<<offset_1st <<endl;
254//cerr<<"offset_last is "<<offset_last <<endl;
255#endif
256 size_t total_read = dtype_size*(offset_last-offset_1st+1);
257
258 off_t fpos = lseek(fd,dtype_size*offset_1st,SEEK_SET);
259 if (-1 == fpos) {
260 disk_cache->unlock_and_close(cache_fpath);
261 disk_cache->purge_file(cache_fpath);
262 }
263
265 else
266 data_from_disk_cache = obtain_cached_data(disk_cache,cache_fpath,fd, step,count,total_read,dtype_size);
267
268 }
269
270 if(true == data_from_disk_cache)
271 return;
272 else
273 data_to_disk_cache = true;
274
275 }
276
277// END CACHE
278
279 bool pass_fileid = HDF5RequestHandler::get_pass_fileid();
280 if(false == pass_fileid) {
281 if ((fileid = H5Fopen(filename.c_str(),H5F_ACC_RDONLY,H5P_DEFAULT))<0) {
282 ostringstream eherr;
283 eherr << "HDF5 File " << filename
284 << " cannot be opened. "<<endl;
285 throw InternalErr (__FILE__, __LINE__, eherr.str ());
286 }
287 }
288
289 if ((dsetid = H5Dopen(fileid,varname.c_str(),H5P_DEFAULT))<0) {
290 HDF5CFUtil::close_fileid(fileid,pass_fileid);
291 ostringstream eherr;
292 eherr << "HDF5 dataset " << varname
293 << " cannot be opened. "<<endl;
294 throw InternalErr (__FILE__, __LINE__, eherr.str ());
295 }
296
297 if ((dspace = H5Dget_space(dsetid))<0) {
298
299 H5Dclose(dsetid);
300 HDF5CFUtil::close_fileid(fileid,pass_fileid);
301 ostringstream eherr;
302 eherr << "Space id of the HDF5 dataset " << varname
303 << " cannot be obtained. "<<endl;
304 throw InternalErr (__FILE__, __LINE__, eherr.str ());
305 }
306
307 if (H5Sselect_hyperslab(dspace, H5S_SELECT_SET,
308 hoffset.data(), hstep.data(),
309 hcount.data(), nullptr) < 0) {
310
311 H5Sclose(dspace);
312 H5Dclose(dsetid);
313 HDF5CFUtil::close_fileid(fileid,pass_fileid);
314 ostringstream eherr;
315 eherr << "The selection of hyperslab of the HDF5 dataset " << varname
316 << " fails. "<<endl;
317 throw InternalErr (__FILE__, __LINE__, eherr.str ());
318 }
319
320 mspace = H5Screate_simple(rank, hcount.data(),nullptr);
321 if (mspace < 0) {
322 H5Sclose(dspace);
323 H5Dclose(dsetid);
324 HDF5CFUtil::close_fileid(fileid,pass_fileid);
325 ostringstream eherr;
326 eherr << "The creation of the memory space of the HDF5 dataset " << varname
327 << " fails. "<<endl;
328 throw InternalErr (__FILE__, __LINE__, eherr.str ());
329 }
330
331
332 if ((dtypeid = H5Dget_type(dsetid)) < 0) {
333
334 H5Sclose(mspace);
335 H5Sclose(dspace);
336 H5Dclose(dsetid);
337 HDF5CFUtil::close_fileid(fileid,pass_fileid);
338 ostringstream eherr;
339 eherr << "Obtaining the datatype of the HDF5 dataset " << varname
340 << " fails. "<<endl;
341 throw InternalErr (__FILE__, __LINE__, eherr.str ());
342
343 }
344
345 if ((memtype = H5Tget_native_type(dtypeid, H5T_DIR_ASCEND))<0) {
346
347 H5Sclose(mspace);
348 H5Tclose(dtypeid);
349 H5Sclose(dspace);
350 H5Dclose(dsetid);
351 HDF5CFUtil::close_fileid(fileid,pass_fileid);
352 ostringstream eherr;
353 eherr << "Obtaining the memory type of the HDF5 dataset " << varname
354 << " fails. "<<endl;
355 throw InternalErr (__FILE__, __LINE__, eherr.str ());
356
357 }
358
359 hid_t read_ret = -1;
360
361 // Before reading the data, we will check if the memory cache is turned on,
362 // The add_mem_cache is only true when the data memory cache keys are on and used.
363 if(true == add_mem_cache) {
364 if(buf== nullptr) {
365 H5Sclose(mspace);
366 H5Tclose(dtypeid);
367 H5Sclose(dspace);
368 H5Dclose(dsetid);
369 HDF5CFUtil::close_fileid(fileid,pass_fileid);
370 throw InternalErr(__FILE__,__LINE__,"The memory data cache buffer needs to be set");
371 }
372 read_ret= H5Dread(dsetid,memtype,H5S_ALL,H5S_ALL,H5P_DEFAULT,buf);
373 if(read_ret <0){
374 H5Sclose(mspace);
375 H5Tclose(dtypeid);
376 H5Sclose(dspace);
377 H5Dclose(dsetid);
378 HDF5CFUtil::close_fileid(fileid,pass_fileid);
379 throw InternalErr(__FILE__,__LINE__,"Cannot read the data to the buffer.");
380 }
381 }
382
383
384 // Now reading the data, note dtype is not dtypeid.
385 // dtype is an enum defined by the handler.
386
387 switch (dtype) {
388
389 case H5CHAR:
390 {
391
392 vector<char> val;
393 val.resize(nelms);
394
395 read_ret = H5Dread(dsetid,memtype,mspace,dspace,H5P_DEFAULT,val.data());
396 if (read_ret < 0) {
397
398 H5Sclose(mspace);
399 H5Tclose(memtype);
400 H5Tclose(dtypeid);
401 H5Sclose(dspace);
402 H5Dclose(dsetid);
403 HDF5CFUtil::close_fileid(fileid,pass_fileid);
404 ostringstream eherr;
405 eherr << "Cannot read the HDF5 dataset " << varname
406 << " with the type of H5T_NATIVE_CHAR "<<endl;
407 throw InternalErr (__FILE__, __LINE__, eherr.str ());
408
409 }
410
411 if(is_dap4 == true)
412 set_value_ll((dods_int8 *)val.data(),nelms);
413 else {
414
415 vector<short>newval;
416 newval.resize(nelms);
417
418 for (int64_t counter = 0; counter < nelms; counter++)
419 newval[counter] = (short) (val[counter]);
420
421 set_value_ll(newval.data(), nelms);
422 }
423
424 if(true == data_to_disk_cache) {
425 try {
426 BESDEBUG("h5","writing data to disk cache "<<endl);
427 write_data_to_cache(dsetid,dspace,mspace,memtype,cache_fpath,2,val,nelms);
428 }
429 catch(...) {
430 H5Sclose(mspace);
431 H5Tclose(memtype);
432 H5Tclose(dtypeid);
433 H5Sclose(dspace);
434 H5Dclose(dsetid);
435 HDF5CFUtil::close_fileid(fileid,pass_fileid);
436 ostringstream eherr;
437 eherr << "write data to cache failed.";
438 throw InternalErr (__FILE__, __LINE__, eherr.str ());
439
440 }
441 }
442
443 } // case H5CHAR
444 break;
445
446 // Note: for DAP2, H5INT64,H5UINT64 will be ignored.
447 case H5UCHAR:
448 case H5UINT16:
449 case H5INT16:
450 case H5INT32:
451 case H5UINT32:
452 case H5INT64:
453 case H5UINT64:
454 case H5FLOAT32:
455 case H5FLOAT64:
456
457
458 {
459 size_t dtype_size = HDF5CFUtil::H5_numeric_atomic_type_size(dtype);
460 vector<char> val;
461 val.resize(nelms*dtype_size);
462
463 read_ret = H5Dread(dsetid,memtype,mspace,dspace,H5P_DEFAULT,val.data());
464 if (read_ret < 0) {
465 H5Sclose(mspace);
466 H5Tclose(memtype);
467 H5Tclose(dtypeid);
468 H5Sclose(dspace);
469 H5Dclose(dsetid);
470 HDF5CFUtil::close_fileid(fileid,pass_fileid);
471 ostringstream eherr;
472 eherr << "Cannot read the HDF5 dataset " << varname
473 << " with the type of H5T_NATIVE_UCHAR "<<endl;
474 throw InternalErr (__FILE__, __LINE__, eherr.str ());
475
476 }
477 // Not sure if "set_value ((dods_byte *) val.data(), nelms);" works.
478 BESDEBUG("h5","after H5Dread "<<endl);
479 val2buf(val.data());
480 BESDEBUG("h5","after val2buf "<<endl);
481 set_read_p(true);
482
483 if(true == data_to_disk_cache) {
484 BESDEBUG("h5","writing data to disk cache "<<endl);
485 try {
486 write_data_to_cache(dsetid,dspace,mspace,memtype,cache_fpath,dtype_size,val,nelms);
487 }
488 catch(...) {
489 H5Sclose(mspace);
490 H5Tclose(memtype);
491 H5Tclose(dtypeid);
492 H5Sclose(dspace);
493 H5Dclose(dsetid);
494 HDF5CFUtil::close_fileid(fileid,pass_fileid);
495 ostringstream eherr;
496 eherr << "Write data to cache failed."
497 << "It is very possible the error is caused by the server failure"
498 << " such as filled disk partition at the server rather than Hyrax. Please contact "
499 << " the corresponding data center first. If the issue is not due to "
500 << " the server,";
501 throw InternalErr (__FILE__, __LINE__, eherr.str ());
502
503 }
504
505 }
506 } // case H5UCHAR...
507 break;
508
509
510
511#if 0
512 case H5INT16:
513 {
514 vector<short>val;
515 val.resize(nelms);
516
517 read_ret = H5Dread(dsetid,memtype,mspace,dspace,H5P_DEFAULT,val.data());
518 if (read_ret < 0) {
519
520 H5Sclose(mspace);
521 H5Tclose(memtype);
522 H5Tclose(dtypeid);
523 H5Sclose(dspace);
524 H5Dclose(dsetid);
525 HDF5CFUtil::close_fileid(fileid,pass_fileid);
526 //H5Fclose(fileid);
527 ostringstream eherr;
528 eherr << "Cannot read the HDF5 dataset " << varname
529 << " with the type of H5T_NATIVE_SHORT "<<endl;
530 throw InternalErr (__FILE__, __LINE__, eherr.str ());
531
532 }
533 set_value ((dods_int16 *) val.data(), nelms);
534 }// H5INT16
535 break;
536
537
538 case H5UINT16:
539 {
540 vector<unsigned short> val;
541 val.resize(nelms);
542 read_ret = H5Dread(dsetid,memtype,mspace,dspace,H5P_DEFAULT,val.data());
543 if (read_ret < 0) {
544
545 H5Sclose(mspace);
546 H5Tclose(memtype);
547 H5Tclose(dtypeid);
548 H5Sclose(dspace);
549 H5Dclose(dsetid);
550 HDF5CFUtil::close_fileid(fileid,pass_fileid);
551 ostringstream eherr;
552 eherr << "Cannot read the HDF5 dataset " << varname
553 << " with the type of H5T_NATIVE_USHORT "<<endl;
554 throw InternalErr (__FILE__, __LINE__, eherr.str ());
555
556 }
557 set_value ((dods_uint16 *) val.data(), nelms);
558 } // H5UINT16
559 break;
560
561
562 case H5INT32:
563 {
564 vector<int>val;
565 val.resize(nelms);
566 read_ret = H5Dread(dsetid,memtype,mspace,dspace,H5P_DEFAULT,val.data());
567 if (read_ret < 0) {
568 H5Sclose(mspace);
569 H5Tclose(memtype);
570 H5Tclose(dtypeid);
571 H5Sclose(dspace);
572 H5Dclose(dsetid);
573 HDF5CFUtil::close_fileid(fileid,pass_fileid);
574 ostringstream eherr;
575 eherr << "Cannot read the HDF5 dataset " << varname
576 << " with the type of H5T_NATIVE_INT "<<endl;
577 throw InternalErr (__FILE__, __LINE__, eherr.str ());
578
579 }
580 set_value ((dods_int32 *) val.data(), nelms);
581 } // case H5INT32
582 break;
583
584 case H5UINT32:
585 {
586 vector<unsigned int>val;
587 val.resize(nelms);
588 read_ret = H5Dread(dsetid,memtype,mspace,dspace,H5P_DEFAULT,val.data());
589 if (read_ret < 0) {
590 H5Sclose(mspace);
591 H5Tclose(memtype);
592 H5Tclose(dtypeid);
593 H5Sclose(dspace);
594 H5Dclose(dsetid);
595 HDF5CFUtil::close_fileid(fileid,pass_fileid);
596 ostringstream eherr;
597 eherr << "Cannot read the HDF5 dataset " << varname
598 << " with the type of H5T_NATIVE_UINT "<<endl;
599 throw InternalErr (__FILE__, __LINE__, eherr.str ());
600
601 }
602 set_value ((dods_uint32 *) val.data(), nelms);
603 }
604 break;
605
606 case H5FLOAT32:
607 {
608
609 vector<float>val;
610 val.resize(nelms);
611
612 read_ret = H5Dread(dsetid,memtype,mspace,dspace,H5P_DEFAULT,val.data());
613 if (read_ret < 0) {
614 H5Sclose(mspace);
615 H5Tclose(memtype);
616 H5Tclose(dtypeid);
617 H5Sclose(dspace);
618 H5Dclose(dsetid);
619 HDF5CFUtil::close_fileid(fileid,pass_fileid);
620 ostringstream eherr;
621 eherr << "Cannot read the HDF5 dataset " << varname
622 << " with the type of H5T_NATIVE_FLOAT "<<endl;
623 throw InternalErr (__FILE__, __LINE__, eherr.str ());
624
625 }
626 set_value ((dods_float32 *) val.data(), nelms);
627 }
628 break;
629
630
631 case H5FLOAT64:
632 {
633
634 vector<double>val;
635 val.resize(nelms);
636 read_ret = H5Dread(dsetid,memtype,mspace,dspace,H5P_DEFAULT,val.data());
637
638 if (read_ret < 0) {
639 H5Sclose(mspace);
640 H5Tclose(memtype);
641 H5Tclose(dtypeid);
642 H5Sclose(dspace);
643 H5Dclose(dsetid);
644 HDF5CFUtil::close_fileid(fileid,pass_fileid);
645 ostringstream eherr;
646 eherr << "Cannot read the HDF5 dataset " << varname
647 << " with the type of H5T_NATIVE_DOUBLE "<<endl;
648 throw InternalErr (__FILE__, __LINE__, eherr.str ());
649
650 }
651 set_value ((dods_float64 *) val.data(), nelms);
652 } // case H5FLOAT64
653 break;
654
655#endif
656
657 case H5FSTRING:
658 {
659 size_t ty_size = H5Tget_size(dtypeid);
660 if (ty_size == 0) {
661 H5Sclose(mspace);
662 H5Tclose(memtype);
663 H5Tclose(dtypeid);
664 H5Sclose(dspace);
665 H5Dclose(dsetid);
666 HDF5CFUtil::close_fileid(fileid,pass_fileid);
667 ostringstream eherr;
668 eherr << "Cannot obtain the size of the fixed size HDF5 string of the dataset "
669 << varname <<endl;
670 throw InternalErr (__FILE__, __LINE__, eherr.str ());
671 }
672
673 vector <char> strval;
674 strval.resize(nelms*ty_size);
675 read_ret = H5Dread(dsetid,memtype,mspace,dspace,H5P_DEFAULT,(void*)strval.data());
676
677 if (read_ret < 0) {
678 H5Sclose(mspace);
679 H5Tclose(memtype);
680 H5Tclose(dtypeid);
681 H5Sclose(dspace);
682 H5Dclose(dsetid);
683 HDF5CFUtil::close_fileid(fileid,pass_fileid);
684 ostringstream eherr;
685 eherr << "Cannot read the HDF5 dataset " << varname
686 << " with the type of the fixed size HDF5 string "<<endl;
687 throw InternalErr (__FILE__, __LINE__, eherr.str ());
688 }
689
690 string total_string(strval.begin(),strval.end());
691 strval.clear(); // May not be necessary
692 vector <string> finstrval;
693 finstrval.resize(nelms);
694 for (int64_t i = 0; i<nelms; i++)
695 finstrval[i] = total_string.substr(i*ty_size,ty_size);
696
697 // Check if we should drop the long string
698
699 // If the size of an individual element is longer than the current netCDF JAVA
700 // string and the "EnableDropLongString" key is turned on,
701 // No string is generated.
702 if ((true == HDF5RequestHandler::get_drop_long_string()) &&
703 ty_size > NC_JAVA_STR_SIZE_LIMIT) {
704 for (int64_t i = 0; i<nelms; i++)
705 finstrval[i] = "";
706 }
707 set_value_ll(finstrval,nelms);
708 total_string.clear();
709 }
710 break;
711
712
713 case H5VSTRING:
714 {
715 size_t ty_size = H5Tget_size(memtype);
716 if (ty_size == 0) {
717 H5Sclose(mspace);
718 H5Tclose(memtype);
719 H5Tclose(dtypeid);
720 H5Sclose(dspace);
721 H5Dclose(dsetid);
722 HDF5CFUtil::close_fileid(fileid,pass_fileid);
723 ostringstream eherr;
724 eherr << "Cannot obtain the size of the fixed size HDF5 string of the dataset "
725 << varname <<endl;
726 throw InternalErr (__FILE__, __LINE__, eherr.str ());
727 }
728 vector <char> strval;
729 strval.resize(nelms*ty_size);
730 read_ret = H5Dread(dsetid,memtype,mspace,dspace,H5P_DEFAULT,(void*)strval.data());
731
732 if (read_ret < 0) {
733 H5Sclose(mspace);
734 H5Tclose(memtype);
735 H5Tclose(dtypeid);
736 H5Sclose(dspace);
737 H5Dclose(dsetid);
738 HDF5CFUtil::close_fileid(fileid,pass_fileid);
739 ostringstream eherr;
740 eherr << "Cannot read the HDF5 dataset " << varname
741 << " with the type of the HDF5 variable length string "<<endl;
742 throw InternalErr (__FILE__, __LINE__, eherr.str ());
743 }
744
745 vector<string>finstrval;
746 finstrval.resize(nelms);
747 char*temp_bp = strval.data();
748 char*onestring = nullptr;
749 for (int64_t i =0;i<nelms;i++) {
750 onestring = *(char**)temp_bp;
751 if(onestring!=nullptr )
752 finstrval[i] =string(onestring);
753
754 else // We will add a nullptr if onestring is nullptr.
755 finstrval[i]="";
756 temp_bp +=ty_size;
757 }
758
759 if (false == strval.empty()) {
760 herr_t ret_vlen_claim;
761 ret_vlen_claim = H5Dvlen_reclaim(memtype,mspace,H5P_DEFAULT,(void*)strval.data());
762 if (ret_vlen_claim < 0){
763 H5Sclose(mspace);
764 H5Tclose(memtype);
765 H5Tclose(dtypeid);
766 H5Sclose(dspace);
767 H5Dclose(dsetid);
768 HDF5CFUtil::close_fileid(fileid,pass_fileid);
769 ostringstream eherr;
770 eherr << "Cannot reclaim the memory buffer of the HDF5 variable length string of the dataset "
771 << varname <<endl;
772 throw InternalErr (__FILE__, __LINE__, eherr.str ());
773
774 }
775 }
776
777 // If the size of one string element is longer than the current netCDF JAVA
778 // string and the "EnableDropLongString" key is turned on,
779 // No string is generated.
780 if (true == HDF5RequestHandler::get_drop_long_string()) {
781 bool drop_long_str = false;
782 for (int64_t i =0;i<nelms;i++) {
783 if(finstrval[i].size() >NC_JAVA_STR_SIZE_LIMIT){
784 drop_long_str = true;
785 break;
786 }
787 }
788 if (drop_long_str == true) {
789 for (int64_t i =0;i<nelms;i++)
790 finstrval[i] = "";
791 }
792 }
793 set_value_ll(finstrval,nelms);
794
795 }
796 break;
797
798 default:
799 {
800 H5Tclose(memtype);
801 H5Tclose(dtypeid);
802 H5Sclose(mspace);
803 H5Sclose(dspace);
804 H5Dclose(dsetid);
805 HDF5CFUtil::close_fileid(fileid,pass_fileid);
806 ostringstream eherr;
807 eherr << "Cannot read the HDF5 dataset " << varname
808 << " with the unsupported HDF5 datatype"<<endl;
809 throw InternalErr (__FILE__, __LINE__, eherr.str ());
810 }
811 }
812
813 H5Tclose(memtype);
814 H5Tclose(dtypeid);
815 H5Sclose(mspace);
816 H5Sclose(dspace);
817 H5Dclose(dsetid);
818 HDF5CFUtil::close_fileid(fileid,pass_fileid);
819
820}
821
822bool HDF5CFArray::valid_disk_cache() const {
823
824 bool ret_value = false;
825 if(true == HDF5RequestHandler::get_use_disk_cache()) {
826
827 BESDEBUG("h5","Coming to disk cache "<<endl);
828 // Check if this is a valid numeric datatype we want to support
829 if(dtype == H5CHAR || dtype ==H5UCHAR || dtype==H5INT16 || dtype ==H5UINT16 ||
830 dtype == H5INT32 || dtype ==H5UINT32 || dtype ==H5FLOAT32 || dtype==H5FLOAT64 ||
831 dtype == H5INT64 || dtype ==H5UINT64){
832
833 BESDEBUG("h5","Coming to disk cache datatype block"<<endl);
834
835 string diskcache_dir = HDF5RequestHandler::get_disk_cache_dir();
836 string diskcache_prefix = HDF5RequestHandler::get_disk_cachefile_prefix();
837 long diskcache_size = HDF5RequestHandler::get_disk_cache_size();
838
839 if(("" == diskcache_dir)||(""==diskcache_prefix)||(diskcache_size <=0))
840 throw InternalErr (__FILE__, __LINE__, "Either the cached dir is empty or the prefix is nullptr or the cache size is not set.");
841 else {
842 struct stat sb;
843 if(stat(diskcache_dir.c_str(),&sb) !=0) {
844 string err_mesg="The cached directory " + diskcache_dir;
845 err_mesg = err_mesg + " doesn't exist. ";
846 throw InternalErr(__FILE__,__LINE__,err_mesg);
847 }
848 else {
849 if(true == S_ISDIR(sb.st_mode)) {
850 if(access(diskcache_dir.c_str(),R_OK|W_OK|X_OK) == -1) {
851 string err_mesg="The cached directory " + diskcache_dir;
852 err_mesg = err_mesg + " can NOT be read,written or executable.";
853 throw InternalErr(__FILE__,__LINE__,err_mesg);
854 }
855 }
856 else {
857 string err_mesg="The cached directory " + diskcache_dir;
858 err_mesg = err_mesg + " is not a directory.";
859 throw InternalErr(__FILE__,__LINE__,err_mesg);
860 }
861 }
862 }
863
864 short dtype_size = HDF5CFUtil::H5_numeric_atomic_type_size(dtype);
865 // Check if we only need to cache the specific compressed data
866 if(true == HDF5RequestHandler::get_disk_cache_comp_data()){
867 BESDEBUG("h5","Compression disk cache key is true"<<endl);
868 ret_value = valid_disk_cache_for_compressed_data(dtype_size);
869 BESDEBUG("h5","variable disk cache passes the compression parameter check"<<endl);
870 }
871 else {
872 BESDEBUG("h5","Compression disk cache key is NOT set, disk cache key is true."<<endl);
873 ret_value = true;
874 }
875
876 }
877
878 }
879 return ret_value;
880}
881
882bool HDF5CFArray:: valid_disk_cache_for_compressed_data(short dtype_size) const {
883
884 bool ret_value = false;
885 // The compression ratio should be smaller than the threshold(hard to compress)
886 // and the total var size should be bigger than the defined size(bigger)
887#if 0
888 size_t total_byte = total_elems*dtype_size;
889#endif
890 if((comp_ratio < HDF5RequestHandler::get_disk_comp_threshold())
891 && (total_elems*dtype_size >= HDF5RequestHandler::get_disk_var_size())) {
892 if( true == HDF5RequestHandler::get_disk_cache_float_only_comp()) {
893 if(dtype==H5FLOAT32 || dtype == H5FLOAT64)
894 ret_value = true;
895 }
896 else
897 ret_value = true;
898 }
899 return ret_value;
900
901}
902
903bool HDF5CFArray::obtain_cached_data(HDF5DiskCache *disk_cache,const string & cache_fpath, int fd,vector<int64_t> &cd_step, vector<int64_t>&cd_count,size_t total_read,short dtype_size) {
904
905 ssize_t ret_read_val = -1;
906 vector<char>buf;
907
908 buf.resize(total_read);
909 ret_read_val = HDF5CFUtil::read_buffer_from_file(fd,(void*)buf.data(),total_read);
910 disk_cache->unlock_and_close(cache_fpath);
911 if((-1 == ret_read_val) || (ret_read_val != (ssize_t)total_read)) {
912 disk_cache->purge_file(cache_fpath);
913 return false;
914 }
915 else {
916 size_t nele_to_read = 1;
917 for(int i = 0; i<rank;i++)
918 nele_to_read *=cd_count[i];
919
920 if(nele_to_read == (total_read/dtype_size)) {
921 val2buf(buf.data());
922 set_read_p(true);
923 }
924 else { // Need to re-assemble the buffer according to different datatype
925
926 vector<int64_t>cd_start(rank,0);
927 vector<size_t>cd_pos(rank,0);
928 int64_t nelms_to_send = 1;
929 for(int i = 0; i <rank; i++)
930 nelms_to_send = nelms_to_send*cd_count[i];
931
932 switch (dtype) {
933
934 case H5CHAR:
935 {
936#if 0
937 vector<int>total_val;
938 total_val.resize(total_read/dtype_size);
939 memcpy(total_val.data(),(void*)buf.data(),total_read);
940
941 vector<int>final_val;
943 total_val.data(),
944 rank,
945 dimsizes,
946 cd_start.data(),
947 cd_step.data(),
948 cd_count.data(),
949 &final_val,
950 cd_pos,
951 0
952 );
953
954#endif
955
956 if(is_dap4 == false) {
957 vector<short>final_val;
959 buf.data(),
960 rank,
961 dimsizes,
962 cd_start.data(),
963 cd_step.data(),
964 cd_count.data(),
965 &final_val,
966 cd_pos,
967 0
968 );
969 set_value_ll((dods_int16*)final_val.data(),nelms_to_send);
970 }
971 else {
972 vector<char>final_val;
974 buf.data(),
975 rank,
976 dimsizes,
977 cd_start.data(),
978 cd_step.data(),
979 cd_count.data(),
980 &final_val,
981 cd_pos,
982 0
983 );
984 set_value_ll((dods_int8*)final_val.data(),nelms_to_send);
985 }
986
987 }
988
989 break;
990 case H5UCHAR:
991 {
992#if 0
993 vector<unsigned char>total_val;
994 total_val.resize(total_read/dtype_size);
995 memcpy(total_val.data(),(void*)buf.data(),total_read);
996
997 vector<unsigned char>final_val;
999 total_val.data(),
1000 rank,
1001 dimsizes,
1002 cd_start.data(),
1003 cd_step.data(),
1004 cd_count.data(),
1005 &final_val,
1006 cd_pos,
1007 0
1008 );
1009
1010#endif
1011 vector<unsigned char>final_val;
1013 buf.data(),
1014 rank,
1015 dimsizes,
1016 cd_start.data(),
1017 cd_step.data(),
1018 cd_count.data(),
1019 &final_val,
1020 cd_pos,
1021 0
1022 );
1023
1024 set_value_ll (final_val.data(), nelms_to_send);
1025 }
1026 break;
1027
1028 case H5INT16:
1029 {
1030#if 0
1031 vector<short>total_val;
1032 total_val.resize(total_read/dtype_size);
1033 memcpy(total_val.data(),(void*)buf.data(),total_read);
1034
1035 vector<short>final_val;
1037 total_val.data(),
1038 rank,
1039 dimsizes,
1040 cd_start.data(),
1041 cd_step.data(),
1042 cd_count.data(),
1043 &final_val,
1044 cd_pos,
1045 0
1046 );
1047#endif
1048
1049 vector<short>final_val;
1051 buf.data(),
1052 rank,
1053 dimsizes,
1054 cd_start.data(),
1055 cd_step.data(),
1056 cd_count.data(),
1057 &final_val,
1058 cd_pos,
1059 0
1060 );
1061
1062 set_value_ll (final_val.data(), nelms_to_send);
1063 }
1064 break;
1065
1066 case H5UINT16:
1067 {
1068#if 0
1069 vector<unsigned short>total_val;
1070 total_val.resize(total_read/dtype_size);
1071 memcpy(total_val.data(),(void*)buf.data(),total_read);
1072
1073 vector<unsigned short>final_val;
1075 total_val.data(),
1076 rank,
1077 dimsizes,
1078 cd_start.data(),
1079 cd_step.data(),
1080 cd_count.data(),
1081 &final_val,
1082 cd_pos,
1083 0
1084 );
1085#endif
1086
1087 vector<unsigned short>final_val;
1089 buf.data(),
1090 rank,
1091 dimsizes,
1092 cd_start.data(),
1093 cd_step.data(),
1094 cd_count.data(),
1095 &final_val,
1096 cd_pos,
1097 0
1098 );
1099
1100 set_value_ll (final_val.data(), nelms_to_send);
1101 }
1102 break;
1103
1104 case H5INT32:
1105 {
1106#if 0
1107 vector<int>total_val;
1108 total_val.resize(total_read/dtype_size);
1109 memcpy(total_val.data(),(void*)buf.data(),total_read);
1110
1111 vector<int>final_val;
1113 total_val.data(),
1114 rank,
1115 dimsizes,
1116 cd_start.data(),
1117 cd_step.data(),
1118 cd_count.data(),
1119 &final_val,
1120 cd_pos,
1121 0
1122 );
1123
1124#endif
1125
1126 vector<int>final_val;
1128 buf.data(),
1129 rank,
1130 dimsizes,
1131 cd_start.data(),
1132 cd_step.data(),
1133 cd_count.data(),
1134 &final_val,
1135 cd_pos,
1136 0
1137 );
1138
1139
1140 set_value_ll (final_val.data(), nelms_to_send);
1141 }
1142 break;
1143
1144 case H5UINT32:
1145 {
1146#if 0
1147 vector<unsigned int>total_val;
1148 total_val.resize(total_read/dtype_size);
1149 memcpy(total_val.data(),(void*)buf.data(),total_read);
1150
1151 vector<unsigned int>final_val;
1153 total_val.data(),
1154 rank,
1155 dimsizes,
1156 cd_start.data(),
1157 cd_step.data(),
1158 cd_count.data(),
1159 &final_val,
1160 cd_pos,
1161 0
1162 );
1163#endif
1164
1165 vector<unsigned int>final_val;
1167 buf.data(),
1168 rank,
1169 dimsizes,
1170 cd_start.data(),
1171 cd_step.data(),
1172 cd_count.data(),
1173 &final_val,
1174 cd_pos,
1175 0
1176 );
1177
1178 set_value_ll (final_val.data(), nelms_to_send);
1179 }
1180 break;
1181
1182 case H5INT64: // Only for DAP4 CF
1183 {
1184#if 0
1185 vector<unsigned int>total_val;
1186 total_val.resize(total_read/dtype_size);
1187 memcpy(total_val.data(),(void*)buf.data(),total_read);
1188
1189 vector<unsigned int>final_val;
1191 total_val.data(),
1192 rank,
1193 dimsizes,
1194 cd_start.data(),
1195 cd_step.data(),
1196 cd_count.data(),
1197 &final_val,
1198 cd_pos,
1199 0
1200 );
1201#endif
1202
1203 vector<long long >final_val;
1205 buf.data(),
1206 rank,
1207 dimsizes,
1208 cd_start.data(),
1209 cd_step.data(),
1210 cd_count.data(),
1211 &final_val,
1212 cd_pos,
1213 0
1214 );
1215
1216 set_value_ll ((dods_int64*)final_val.data(), nelms_to_send);
1217 }
1218 break;
1219
1220
1221
1222 case H5UINT64: // Only for DAP4 CF
1223 {
1224#if 0
1225 vector<unsigned int>total_val;
1226 total_val.resize(total_read/dtype_size);
1227 memcpy(total_val.data(),(void*)buf.data(),total_read);
1228
1229 vector<unsigned int>final_val;
1231 total_val.data(),
1232 rank,
1233 dimsizes,
1234 cd_start.data(),
1235 cd_step.data(),
1236 cd_count.data(),
1237 &final_val,
1238 cd_pos,
1239 0
1240 );
1241#endif
1242
1243 vector<unsigned long long >final_val;
1245 buf.data(),
1246 rank,
1247 dimsizes,
1248 cd_start.data(),
1249 cd_step.data(),
1250 cd_count.data(),
1251 &final_val,
1252 cd_pos,
1253 0
1254 );
1255
1256 set_value_ll ((dods_uint64*)final_val.data(), nelms_to_send);
1257 }
1258 break;
1259
1260
1261 case H5FLOAT32:
1262 {
1263#if 0
1264 vector<float>total_val;
1265 total_val.resize(total_read/dtype_size);
1266 memcpy(total_val.data(),(void*)buf.data(),total_read);
1267
1268 vector<float>final_val;
1270 total_val.data(),
1271 rank,
1272 dimsizes,
1273 cd_start.data(),
1274 cd_step.data(),
1275 cd_count.data(),
1276 &final_val,
1277 cd_pos,
1278 0
1279 );
1280#endif
1281
1282 vector<float>final_val;
1284 buf.data(),
1285 rank,
1286 dimsizes,
1287 cd_start.data(),
1288 cd_step.data(),
1289 cd_count.data(),
1290 &final_val,
1291 cd_pos,
1292 0
1293 );
1294
1295
1296 set_value_ll (final_val.data(), nelms_to_send);
1297 }
1298 break;
1299 case H5FLOAT64:
1300 {
1301#if 0
1302 vector<double>total_val;
1303 total_val.resize(total_read/dtype_size);
1304 memcpy(total_val.data(),(void*)buf.data(),total_read);
1305
1306 vector<double>final_val;
1308 total_val.data(),
1309 rank,
1310 dimsizes,
1311 cd_start.data(),
1312 cd_step.data(),
1313 cd_count.data(),
1314 &final_val,
1315 cd_pos,
1316 0
1317 );
1318#endif
1319 vector<double>final_val;
1321 buf.data(),
1322 rank,
1323 dimsizes,
1324 cd_start.data(),
1325 cd_step.data(),
1326 cd_count.data(),
1327 &final_val,
1328 cd_pos,
1329 0
1330 );
1331
1332 set_value_ll (final_val.data(), nelms_to_send);
1333 }
1334 break;
1335 default:
1336 throw InternalErr (__FILE__, __LINE__, "unsupported data type.");
1337
1338 }// "end switch(dtype)"
1339 }// "end else (stride is not 1)"
1340 return true;
1341 }// "end else(full_read = true)"
1342}
1343
1344
1345void
1346HDF5CFArray::write_data_to_cache(hid_t dset_id, hid_t /*dspace_id*/, hid_t /*mspace_id*/, hid_t memtype,
1347 const string& cache_fpath, short dtype_size, const vector<char> &buf, int64_t nelms) {
1348
1349 unsigned long long disk_cache_size = HDF5RequestHandler::get_disk_cache_size();
1350 string disk_cache_dir = HDF5RequestHandler::get_disk_cache_dir();
1351 string disk_cache_prefix = HDF5RequestHandler::get_disk_cachefile_prefix();
1352 HDF5DiskCache *disk_cache = HDF5DiskCache::get_instance(disk_cache_size,disk_cache_dir,disk_cache_prefix);
1353 int64_t total_nelem = 1;
1354 for(int i = 0; i <rank; i++)
1355 total_nelem = total_nelem*dimsizes[i];
1356
1357 vector<char>val;
1358
1359 if(H5CHAR == dtype && is_dap4 == false) {
1360
1361 vector<short>newval;
1362 newval.resize(total_nelem);
1363 if(total_nelem == nelms) {
1364 for (int64_t i = 0; i < total_nelem;i++)
1365 newval[i] = (short)buf[i];
1366 disk_cache->write_cached_data2(cache_fpath,sizeof(short)*total_nelem,(const void*)newval.data());
1367 }
1368 else {
1369 vector<char>val2;
1370 val2.resize(total_nelem);
1371 if(H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL,H5P_DEFAULT, val2.data())<0)
1372 throw InternalErr (__FILE__, __LINE__, "Cannot read the whole HDF5 dataset for the disk cache.");
1373 for (int64_t i = 0; i < total_nelem;i++)
1374 newval[i] = (short)val2[i];
1375 disk_cache->write_cached_data2(cache_fpath,sizeof(short)*total_nelem,(const void*)newval.data());
1376 }
1377 }
1378 else {
1379 if(total_nelem == nelms) {
1380 disk_cache->write_cached_data2(cache_fpath,dtype_size*total_nelem,(const void*)buf.data());
1381 }
1382 else {
1383 val.resize(dtype_size*total_nelem);
1384 if(H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL,H5P_DEFAULT, val.data())<0)
1385 throw InternalErr (__FILE__, __LINE__, "Cannot read the whole SDS for cache.");
1386
1387 disk_cache->write_cached_data2(cache_fpath,dtype_size*total_nelem,(const void*)val.data());
1388 }
1389 }
1390}
1391
1392
1393
1394// We don't inherit libdap Array Class's transform_to_dap4 method since CF option is still using it.
1395// This function is used for 64-bit integer mapping to DAP4 for the CF option. largely borrowed from
1396// DAP4 code.
1397BaseType* HDF5CFArray::h5cfdims_transform_to_dap4_int64(D4Group *grp) {
1398
1399 if(grp == nullptr)
1400 return nullptr;
1401 Array *dest = dynamic_cast<HDF5CFArray*>(ptr_duplicate());
1402
1403 // If there is just a size, don't make
1404 // a D4Dimension (In DAP4 you cannot share a dimension unless it has
1405 // a name). jhrg 3/18/14
1406
1407 for (Array::Dim_iter d = dest->dim_begin(), e = dest->dim_end(); d != e; ++d) {
1408 if (false == (*d).name.empty()) {
1409
1410 D4Group *temp_grp = grp;
1411 D4Dimension *d4_dim = nullptr;
1412 while(temp_grp) {
1413
1414 D4Dimensions *temp_dims = temp_grp->dims();
1415
1416 // Check if the dimension is defined in this group
1417 d4_dim = temp_dims->find_dim((*d).name);
1418 if (d4_dim) {
1419 (*d).dim = d4_dim;
1420 break;
1421 }
1422
1423 if (temp_grp->get_parent())
1424 temp_grp = static_cast<D4Group*>(temp_grp->get_parent());
1425 else
1426 temp_grp = nullptr;
1427
1428 }
1429
1430 // Not find this dimension in any of the ancestor groups, add it to this group.
1431 // The following block is fine, but to avoid the complaint from sonarcloud.
1432 // Use a bool.
1433 bool d4_dim_null = ((d4_dim==nullptr)?true:false);
1434 // Not find this dimension in any of the ancestor groups, add it to this group.
1435 if (d4_dim_null == true) {
1436
1437 auto d4_dim_unique = make_unique<D4Dimension>((*d).name, (*d).size);
1438 D4Dimensions * dims = grp->dims();
1439 d4_dim = d4_dim_unique.release();
1440 dims->add_dim_nocopy(d4_dim);
1441 (*d).dim = d4_dim;
1442 }
1443 }
1444 }
1445
1446 dest->set_is_dap4(true);
1447
1448 return dest;
1449
1450}
This class includes the methods to read data array into DAP buffer from an HDF5 dataset for the CF op...
include the entry functions to execute the handlers
virtual void unlock_and_close(const std::string &target)
virtual void purge_file(const std::string &file)
Purge a single file from the cache.
int subset(void *input, int rank, const std::vector< size_t > &dim, int64_t start[], int64_t stride[], int64_t edge[], std::vector< T > *poutput, std::vector< size_t > &pos, int index)
Getting a subset of a variable.
void read_data_NOT_from_mem_cache(bool add_cache, void *buf) override
static HDF5DiskCache * get_instance(const long, const std::string &, const std::string &)
Helper functions for generating DAS attributes and a function to check BES Key.
static ssize_t read_buffer_from_file(int fd, void *buf, size_t)
Getting a subset of a variable.