bes Updated for version 3.21.1
The Backend Server (BES) is the lower two tiers of the Hyrax data server
hc2dap.cc
Go to the documentation of this file.
1
8// This file is part of the hdf4 data handler for the OPeNDAP data server.
9
10// Copyright (c) The HDF Group
11// Author: Hyo-Kyung Lee <hyoklee@hdfgroup.org>
12//
13// Copyright (c) 2005 OPeNDAP, Inc.
14// Author: James Gallagher <jgallagher@opendap.org>
15//
16// This is free software; you can redistribute it and/or modify it under the
17// terms of the GNU Lesser General Public License as published by the Free
18// Software Foundation; either version 2.1 of the License, or (at your
19// option) any later version.
20//
21// This software is distributed in the hope that it will be useful, but
22// WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
23// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
24// License for more details.
25//
26// You should have received a copy of the GNU Lesser General Public License
27// along with this software; if not, write to the Free Software Foundation,
28// Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
29//
30// You can contact OPeNDAP, Inc. at PO Box 112, Saunderstown, RI. 02874-0112.
31
33// Copyright 1996, by the California Institute of Technology.
34// ALL RIGHTS RESERVED. United States Government Sponsorship
35// acknowledged. Any commercial use must be negotiated with the
36// Office of Technology Transfer at the California Institute of
37// Technology. This software may be subject to U.S. export control
38// laws and regulations. By accepting this software, the user
39// agrees to comply with all applicable U.S. export laws and
40// regulations. User has the responsibility to obtain export
41// licenses, or other export authority as may be required before
42// exporting such information to foreign countries or providing
43// access to foreign persons.
44
45// Author: Todd Karakashian, NASA/Jet Propulsion Laboratory
46// Todd.K.Karakashian@jpl.nasa.gov
47//
49
50
51#include "config_hdf.h"
52
53// STL includes
54#include <fstream>
55#include <sstream>
56#include <string>
57#include <vector>
58#include <algorithm>
59#include <iostream>
60#include <BESDebug.h>
61#include <libdap/debug.h>
62
63// HDF and HDFClass includes
64// Include this on linux to suppres an annoying warning about multiple
65// definitions of MIN and MAX.
66#ifdef HAVE_SYS_PARAM_H
67#include <sys/param.h>
68#endif
69#include <mfhdf.h>
70#include <hdfclass.h>
71#include <hcstream.h>
72
73// DODS/HDF includes
74#include <libdap/escaping.h>
75#include "HDFInt32.h"
76#include "HDFInt16.h"
77#include "HDFUInt32.h"
78#include "HDFUInt16.h"
79#include "HDFFloat64.h"
80#include "HDFFloat32.h"
81#include "HDFByte.h"
82#include "HDFStr.h"
83#include "HDFArray.h"
84#include "HDFGrid.h"
85#include "HDFSequence.h"
86#include "HDFStructure.h"
87#include "hdfutil.h"
88#include "dhdferr.h"
89#include "hdf-maps.h"
90#include <libdap/debug.h>
91
92using namespace std;
93using namespace libdap;
94
95// Undefine the following to send signed bytes using unsigned bytes. 1/13/98
96// jhrg.
97#define SIGNED_BYTE_TO_INT32 1
98
99BaseType *NewDAPVar(const string &varname,
100 const string &dataset,
101 int32 hdf_type);
102void LoadStructureFromField(HDFStructure * stru, hdf_field & f, int row);
103
104// STL predicate comparing equality of hdf_field objects based on their names
105class fieldeq {
106public:
107 explicit fieldeq(const string & s):_val(s) {
108 }
109
110 bool operator() (const hdf_field & f) const {
111 return (f.name == _val);
112 }
113
114private:
115 string _val;
116};
117
118// Create a DAP HDFSequence from an hdf_vdata.
119HDFSequence *NewSequenceFromVdata(const hdf_vdata &vd, const string &dataset)
120{
121 // check to make sure hdf_vdata object is set up properly
122 // Vdata must have a name
123 if (!vd || vd.fields.empty() || vd.name.empty())
124 return nullptr;
125
126 // construct HDFSequence
127 auto seq = new HDFSequence(vd.name, dataset);
128
129 // step through each field and create a variable in the DAP Sequence
130 for (const auto &vdf:vd.fields) {
131 if (!vdf || vdf.vals.size() < 1 ||
132 vdf.name.empty()) {
133 delete seq; // problem with the field
134 return nullptr;
135 }
136 HDFStructure *st = 0;
137 try {
138 st = new HDFStructure(vdf.name, dataset);
139
140 // for each subfield add the subfield to st
141 if (vdf.vals[0].number_type() == DFNT_CHAR8
142 || vdf.vals[0].number_type() == DFNT_UCHAR8) {
143
144 // collapse char subfields into one string
145 string subname = vdf.name + "__0";
146 BaseType *bt = new HDFStr(subname, dataset);
147 st->add_var(bt); // *st now manages *bt
148 delete bt;
149 }
150 else {
151 // create a DODS variable for each subfield
152 for (int j = 0; j < (int) vdf.vals.size(); ++j) {
153 ostringstream strm;
154 strm << vdf.name << "__" << j;
155 BaseType *bt =
156 NewDAPVar(strm.str(), dataset,
157 vdf.vals[j].number_type());
158 st->add_var(bt); // *st now manages *bt
159 delete bt;
160 }
161 }
162 seq->add_var(st); // *seq now manages *st
163 delete st;
164 }
165 catch (...) {
166 delete seq;
167 delete st;
168 throw;
169 }
170 }
171
172#if 0
173 for (int i = 0; i < (int) vd.fields.size(); ++i) {
174 if (!vd.fields[i] || vd.fields[i].vals.size() < 1 ||
175 vd.fields[i].name.empty()) {
176 delete seq; // problem with the field
177 return nullptr;
178 }
179 HDFStructure *st = 0;
180 try {
181 st = new HDFStructure(vd.fields[i].name, dataset);
182
183 // for each subfield add the subfield to st
184 if (vd.fields[i].vals[0].number_type() == DFNT_CHAR8
185 || vd.fields[i].vals[0].number_type() == DFNT_UCHAR8) {
186
187 // collapse char subfields into one string
188 string subname = vd.fields[i].name + "__0";
189 BaseType *bt = new HDFStr(subname, dataset);
190 st->add_var(bt); // *st now manages *bt
191 delete bt;
192 }
193 else {
194 // create a DODS variable for each subfield
195 for (int j = 0; j < (int) vd.fields[i].vals.size(); ++j) {
196 ostringstream strm;
197 strm << vd.fields[i].name << "__" << j;
198 BaseType *bt =
199 NewDAPVar(strm.str(), dataset,
200 vd.fields[i].vals[j].number_type());
201 st->add_var(bt); // *st now manages *bt
202 delete bt;
203 }
204 }
205 seq->add_var(st); // *seq now manages *st
206 delete st;
207 }
208 catch (...) {
209 delete seq;
210 delete st;
211 throw;
212 }
213 }
214#endif
215
216 return seq;
217}
218
219// Create a DAP HDFStructure from an hdf_vgroup.
220HDFStructure *NewStructureFromVgroup(const hdf_vgroup &vg, vg_map &vgmap,
221 sds_map &sdmap, vd_map &vdmap,
222 gr_map &grmap, const string &dataset)
223{
224 // check to make sure hdf_vgroup object is set up properly
225 if (vg.name.size() == 0) // Vgroup must have a name
226 return nullptr;
227 if (!vg) // Vgroup must have some tagrefs
228 return nullptr;
229
230 // construct HDFStructure
231 HDFStructure *str = new HDFStructure(vg.name, dataset);
232 bool nonempty = false;
233
234 // I think coverity is unreasonable on this one. The code is sound. KY 2016-05-12
235 BaseType *bt = nullptr;
236 try {
237 // step through each tagref and copy its contents to DAP
238 for (int i = 0; i < (int) vg.tags.size(); ++i) {
239 int32 tag = vg.tags[i];
240 int32 ref = vg.refs[i];
241
242 switch (tag) {
243 case DFTAG_VH:
244 bt = NewSequenceFromVdata(vdmap[ref].vdata, dataset);
245 break;
246 case DFTAG_NDG:
247 if (sdmap[ref].sds.has_scale()) {
248 bt = NewGridFromSDS(sdmap[ref].sds, dataset);
249 } else {
250 bt = NewArrayFromSDS(sdmap[ref].sds, dataset);
251 }
252 break;
253 case DFTAG_VG:
254 // GR's are also stored as Vgroups
255 if (grmap.find(ref) != grmap.end()){
256 bt = NewArrayFromGR(grmap[ref].gri, dataset);
257 }
258 else
259 bt = NewStructureFromVgroup(vgmap[ref].vgroup, vgmap,
260 sdmap, vdmap, grmap, dataset);
261 break;
262 default:
263 break;
264 }
265 if (bt) {
266 str->add_var(bt); // *st now manages *bt
267 delete bt;
268 bt = nullptr; // See if coverity scan can pass this.
269 nonempty = true;
270 }
271 }
272 }
273 catch(...) {
274 delete str;
275 delete bt;
276 throw;
277 }
278
279 if (nonempty) {
280 return str;
281 } else {
282 delete str;
283 return nullptr;
284 }
285}
286
287// Create a DAP HDFArray out of the primary array in an hdf_sds
288HDFArray *NewArrayFromSDS(const hdf_sds & sds, const string &dataset)
289{
290 if (sds.name.size() == 0) // SDS must have a name
291 return nullptr;
292 if (sds.dims.size() == 0) // SDS must have rank > 0
293 return nullptr;
294
295 // construct HDFArray, assign data type
296 BaseType *bt = NewDAPVar(sds.name, dataset, sds.data.number_type());
297 if (bt == nullptr) { // something is not right with SDS number type?
298 return nullptr;
299 }
300 try {
301 HDFArray *ar = nullptr;
302 ar = new HDFArray(sds.name,dataset,bt);
303 delete bt;
304
305 // add dimension info to HDFArray
306 for (const auto &sds_dim:sds.dims)
307 ar->append_dim(sds_dim.count, sds_dim.name);
308
309 return ar;
310 }
311 catch (...) {
312 delete bt;
313 throw;
314 }
315}
316
317// Create a DAP HDFArray out of a general raster
318HDFArray *NewArrayFromGR(const hdf_gri & gr, const string &dataset)
319{
320 if (gr.name.size() == 0) // GR must have a name
321 return nullptr;
322
323 // construct HDFArray, assign data type
324 BaseType *bt = NewDAPVar(gr.name, dataset, gr.image.number_type());
325 if (bt == 0) { // something is not right with GR number type?
326 return nullptr;
327 }
328
329 try {
330 HDFArray *ar = nullptr;
331 ar = new HDFArray(gr.name, dataset, bt);
332
333 // Array duplicates the base type passed, so delete here
334 delete bt;
335
336 // add dimension info to HDFArray
337 if (gr.num_comp > 1)
338 ar->append_dim(gr.num_comp, gr.name + "__comps");
339 ar->append_dim(gr.dims[1], gr.name + "__Y");
340 ar->append_dim(gr.dims[0], gr.name + "__X");
341 return ar;
342 }
343 catch (...) {
344 delete bt;
345 throw;
346 }
347}
348
349// Create a DAP HDFGrid out of the primary array and dim scale in an hdf_sds
350HDFGrid *NewGridFromSDS(const hdf_sds & sds, const string &dataset)
351{
352 BESDEBUG("h4", "NewGridFromSDS" << endl);
353 if (!sds.has_scale()) // we need a dim scale to make a Grid
354 return nullptr;
355
356 // Create the HDFGrid and the primary array. Add the primary array to
357 // the HDFGrid.
358 HDFArray *ar = NewArrayFromSDS(sds, dataset);
359 if (ar == nullptr)
360 return nullptr;
361
362 HDFGrid *gr = nullptr;
363 HDFArray *dmar = nullptr;
364 BaseType *dsbt = nullptr;
365 try {
366 gr = new HDFGrid(sds.name, dataset);
367 gr->add_var(ar, libdap::array); // note: gr now manages ar
368 delete ar;
369
370 // create dimension scale HDFArrays (i.e., maps) and
371 // add them to the HDFGrid
372 string mapname;
373 for (const auto & sds_dim:sds.dims) {
374 if (sds_dim.name.size() == 0) { // the dim must be named
375 delete gr;
376 return nullptr;
377 }
378 mapname = sds_dim.name;
379 if ((dsbt = NewDAPVar(mapname, dataset,
380 sds_dim.scale.number_type())) == nullptr) {
381 delete gr; // note: ~HDFGrid() cleans up the attached ar
382 return nullptr;
383 }
384 dmar = new HDFArray(mapname, dataset, dsbt);
385 delete dsbt;
386 dmar->append_dim(sds_dim.count); // set dimension size
387 gr->add_var(dmar, maps); // add dimension map to grid;
388 delete dmar;
389 }
390 return gr;
391 }
392 catch (...) {
393 delete dmar;
394 delete dsbt;
395 delete gr;
396 delete ar;
397 throw;
398 }
399}
400
401// Return a ptr to DAP atomic data object corresponding to an HDF Type, or
402// return 0 if the HDF Type is invalid or not supported.
403BaseType *NewDAPVar(const string &varname,
404 const string &dataset,
405 int32 hdf_type)
406{
407 switch (hdf_type) {
408 case DFNT_FLOAT32:
409 return new HDFFloat32(varname, dataset);
410
411 case DFNT_FLOAT64:
412 return new HDFFloat64(varname, dataset);
413
414 case DFNT_INT16:
415 return new HDFInt16(varname, dataset);
416
417#ifdef SIGNED_BYTE_TO_INT32
418 case DFNT_INT8:
419#endif
420 case DFNT_INT32:
421 return new HDFInt32(varname, dataset);
422
423 case DFNT_UINT16:
424 return new HDFUInt16(varname, dataset);
425
426 case DFNT_UINT32:
427 return new HDFUInt32(varname, dataset);
428
429 // INT8 and UINT8 *should* be grouped under Int32 and UInt32, but
430 // that breaks too many programs. jhrg 12/30/97
431#ifndef SIGNED_BYTE_TO_INT32
432 case DFNT_INT8:
433#endif
434 case DFNT_UINT8:
435 case DFNT_UCHAR8:
436 case DFNT_CHAR8:
437 return new HDFByte(varname, dataset);
438
439 default:
440 return 0;
441 }
442}
443
444// Return the DAP type name that corresponds to an HDF data type
445string DAPTypeName(int32 hdf_type)
446{
447 switch (hdf_type) {
448 case DFNT_FLOAT32:
449 return string("Float32");
450
451 case DFNT_FLOAT64:
452 return string("Float64");
453
454 case DFNT_INT16:
455 return string("Int16");
456
457#ifdef SIGNED_BYTE_TO_INT32
458 case DFNT_INT8:
459#endif
460 case DFNT_INT32:
461 return string("Int32");
462
463 case DFNT_UINT16:
464 return string("UInt16");
465
466 case DFNT_UINT32:
467 return string("UInt32");
468
469 // See the note above about INT8 and UINT8. jhrg 12/30/97.
470#ifndef SIGNED_BYTE_TO_INT32
471 case DFNT_INT8:
472#endif
473 case DFNT_UINT8:
474 return string("Byte");
475
476 case DFNT_CHAR8:
477 case DFNT_UCHAR8:
478 // note: DFNT_CHAR8 is Byte in DDS but String in DAS
479 return string("String");
480
481 default:
482 return string("");
483 }
484}
485
486// load an HDFArray from an SDS
487void LoadArrayFromSDS(HDFArray * ar, const hdf_sds & sds)
488{
489#ifdef SIGNED_BYTE_TO_INT32
490 if (sds.data.number_type() == DFNT_INT8) {
491 char *data = static_cast < char *>(ExportDataForDODS(sds.data));
492 ar->val2buf(data);
493 delete[]data;
494 }
495 else
496 ar->val2buf(const_cast < char *>(sds.data.data()));
497#if 0
498 switch (sds.data.number_type()) {
499 case DFNT_INT8:{
500 char *data = static_cast < char *>(ExportDataForDODS(sds.data));
501 ar->val2buf(data);
502 delete[]data;
503 break;
504 }
505 default:
506 ar->val2buf(const_cast < char *>(sds.data.data()));
507 }
508#endif
509#else
510 ar->val2buf(const_cast < char *>(sds.data.data()));
511#endif
512 return;
513}
514
515// load an HDFArray from a GR image
516void LoadArrayFromGR(HDFArray * ar, const hdf_gri & gr)
517{
518#ifdef SIGNED_BYTE_TO_INT32
519 if (gr.image.number_type() == DFNT_INT8) {
520 char *data = static_cast < char *>(ExportDataForDODS(gr.image));
521 ar->val2buf(data);
522 delete[]data;
523 }
524 else
525 ar->val2buf(const_cast < char *>(gr.image.data()));
526#if 0
527 switch (gr.image.number_type()) {
528 case DFNT_INT8:{
529 char *data = static_cast < char *>(ExportDataForDODS(gr.image));
530 ar->val2buf(data);
531 delete[]data;
532 break;
533 }
534 default:
535 ar->val2buf(const_cast < char *>(gr.image.data()));
536 }
537#endif
538#else
539 ar->val2buf(const_cast < char *>(gr.image.data()));
540#endif
541 return;
542}
543
544// load an HDFGrid from an SDS
545// I modified Todd's code so that only the parts of a Grid that are marked as
546// to be sent will be read. 1/29/2002 jhrg
547void LoadGridFromSDS(HDFGrid * gr, const hdf_sds & sds)
548{
549
550 // load data into primary array
551 auto primary_array = static_cast < HDFArray & >(*gr->array_var());
552 if (primary_array.send_p()) {
553 LoadArrayFromSDS(&primary_array, sds);
554 primary_array.set_read_p(true);
555 }
556 // load data into maps
557 if (primary_array.dimensions() != sds.dims.size())
558 THROW(dhdferr_consist); // # of dims of SDS and HDFGrid should agree!
559
560 Grid::Map_iter p = gr->map_begin();
561 for (unsigned int i = 0;
562 i < sds.dims.size() && p != gr->map_end(); ++i, ++p) {
563 if ((*p)->send_p()) {
564#ifdef SIGNED_BYTE_TO_INT32
565#if 0
566 switch (sds.dims[i].scale.number_type()) {
567 case DFNT_INT8:{
568 char *data = static_cast < char *>(ExportDataForDODS(sds.dims[i].scale));
569 (*p)->val2buf(data);
570 delete[]data;
571 break;
572 }
573 default:
574 (*p)->val2buf(const_cast < char *>
575 (sds.dims[i].scale.data()));
576 }
577#endif
578 if (sds.dims[i].scale.number_type() == DFNT_INT8) {
579 char *data = static_cast < char *>(ExportDataForDODS(sds.dims[i].scale));
580 (*p)->val2buf(data);
581 delete[]data;
582 }
583 else
584 (*p)->val2buf(const_cast<char *>(sds.dims[i].scale.data()));
585
586#else
587 (*p)->val2buf(const_cast < char *>(sds.dims[i].scale.data()));
588#endif
589 (*p)->set_read_p(true);
590 }
591 }
592 return;
593}
594
595// load an HDFSequence from a row of an hdf_vdata
596void LoadSequenceFromVdata(HDFSequence * seq, hdf_vdata & vd, int row)
597{
598 for (Constructor::Vars_iter p = seq->var_begin(); p != seq->var_end(); ++p) {
599 HDFStructure & stru = static_cast < HDFStructure & >(**p);
600
601 // find corresponding field in vd
602 vector < hdf_field >::iterator vf =
603 find_if(vd.fields.begin(), vd.fields.end(),
604 fieldeq(stru.name()));
605 if (vf == vd.fields.end())
606 THROW(dhdferr_consist);
607
608 // for each field component of field, extract the proper data element
609 // for the current row being requested and load into the Structure
610 // variable
611 LoadStructureFromField(&stru, *vf, row);
612 stru.set_read_p(true);
613 }
614}
615
616// Load an HDFStructure with the components of a row of an hdf_field. If the
617// field is made of char8 components, collapse these into one String component
618void LoadStructureFromField(HDFStructure * stru, hdf_field & f, int row)
619{
620
621 if (row < 0 || f.vals.size() <= 0 || row > (int) f.vals[0].size())
622 THROW(dhdferr_conv);
623
624 BaseType *firstp = *stru->var_begin();
625 if (firstp->type() == dods_str_c) {
626 // If the Structure contains a String, then that is all it will
627 // contain. In that case, concatenate the different char8
628 // components of the field and load the DODS String with the value.
629 string str = "";
630#if 0
631 for (unsigned int i = 0; i < f.vals.size(); ++i) {
632 str += f.vals[i].elt_char8(row);
633 }
634#endif
635 for (const auto & fval:f.vals)
636 str += fval.elt_char8(row);
637
638 firstp->val2buf(static_cast < void *>(&str));
639 firstp->set_read_p(true);
640 } else {
641 // for each component of the field, load the corresponding component
642 // of the DODS Structure.
643 int i = 0;
644 for (Constructor::Vars_iter q = stru->var_begin(); q != stru->var_end(); ++q, ++i) {
645 char *val = static_cast <char *>(ExportDataForDODS(f.vals[i], row));
646 (*q)->val2buf(val);
647#if 0
648 delete[] val;
649#endif
650 delete val;
651 (*q)->set_read_p(true);
652 }
653
654 }
655 return;
656}
657
658// Load an HDFStructure with the contents of a vgroup.
659void LoadStructureFromVgroup(HDFStructure * str, const hdf_vgroup & vg,
660 const string & /*hdf_file //unused SBL 2/7/20 */)
661{
662 int i = 0;
663 int err = 0;
664 for (Constructor::Vars_iter q = str->var_begin(); err == 0 && q != str->var_end(); ++q, ++i) {
665 BaseType *p = *q;
666 BESDEBUG("h4", "Reading within LoadStructureFromVgroup: " << p->name()
667 << ", send_p: " << p->send_p() << ", vg.names[" << i << "]: "
668 << vg.vnames[i] << endl);
669 if (p && p->send_p() && p->name() == vg.vnames[i]) {
670 (dynamic_cast < ReadTagRef & >(*p)).read_tagref(vg.tags[i],
671 vg.refs[i],
672 err);
673 }
674 }
675}