floodmodeller-api 0.5.0__py3-none-any.whl → 0.5.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. floodmodeller_api/__init__.py +1 -1
  2. floodmodeller_api/_base.py +26 -16
  3. floodmodeller_api/backup.py +3 -2
  4. floodmodeller_api/dat.py +29 -30
  5. floodmodeller_api/diff.py +3 -3
  6. floodmodeller_api/hydrology_plus/hydrology_plus_export.py +14 -13
  7. floodmodeller_api/ied.py +6 -6
  8. floodmodeller_api/ief.py +27 -25
  9. floodmodeller_api/inp.py +3 -4
  10. floodmodeller_api/logs/lf.py +9 -16
  11. floodmodeller_api/logs/lf_helpers.py +18 -18
  12. floodmodeller_api/mapping.py +2 -0
  13. floodmodeller_api/test/__init__.py +2 -2
  14. floodmodeller_api/test/conftest.py +2 -3
  15. floodmodeller_api/test/test_backup.py +2 -2
  16. floodmodeller_api/test/test_conveyance.py +4 -3
  17. floodmodeller_api/test/test_dat.py +2 -2
  18. floodmodeller_api/test/test_data/structure_logs/EX17_expected.csv +4 -0
  19. floodmodeller_api/test/test_data/structure_logs/EX17_expected.json +69 -0
  20. floodmodeller_api/test/test_data/structure_logs/EX18_expected.csv +20 -0
  21. floodmodeller_api/test/test_data/structure_logs/EX18_expected.json +292 -0
  22. floodmodeller_api/test/test_data/structure_logs/EX6_expected.csv +4 -0
  23. floodmodeller_api/test/test_data/structure_logs/EX6_expected.json +35 -0
  24. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_flow.csv +182 -0
  25. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_fr.csv +182 -0
  26. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_mode.csv +182 -0
  27. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_stage.csv +182 -0
  28. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_state.csv +182 -0
  29. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzn_velocity.csv +182 -0
  30. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_left_fp_h.csv +182 -0
  31. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_left_fp_mode.csv +182 -0
  32. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_link_inflow.csv +182 -0
  33. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_max.csv +87 -0
  34. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_right_fp_h.csv +182 -0
  35. floodmodeller_api/test/test_data/tabular_csv_outputs/network_zzx_right_fp_mode.csv +182 -0
  36. floodmodeller_api/test/test_flowtimeprofile.py +2 -2
  37. floodmodeller_api/test/test_hydrology_plus_export.py +4 -2
  38. floodmodeller_api/test/test_ied.py +2 -2
  39. floodmodeller_api/test/test_ief.py +2 -2
  40. floodmodeller_api/test/test_inp.py +2 -2
  41. floodmodeller_api/test/test_json.py +5 -10
  42. floodmodeller_api/test/test_logs_lf.py +6 -6
  43. floodmodeller_api/test/test_read_file.py +1 -0
  44. floodmodeller_api/test/test_river.py +79 -2
  45. floodmodeller_api/test/test_tool.py +8 -5
  46. floodmodeller_api/test/test_toolbox_structure_log.py +149 -158
  47. floodmodeller_api/test/test_xml2d.py +9 -11
  48. floodmodeller_api/test/test_zz.py +143 -0
  49. floodmodeller_api/to_from_json.py +8 -8
  50. floodmodeller_api/tool.py +12 -6
  51. floodmodeller_api/toolbox/example_tool.py +5 -1
  52. floodmodeller_api/toolbox/model_build/add_siltation_definition.py +12 -8
  53. floodmodeller_api/toolbox/model_build/structure_log/structure_log.py +498 -196
  54. floodmodeller_api/toolbox/model_build/structure_log_definition.py +5 -1
  55. floodmodeller_api/units/_base.py +14 -10
  56. floodmodeller_api/units/conveyance.py +10 -8
  57. floodmodeller_api/units/helpers.py +1 -3
  58. floodmodeller_api/units/losses.py +2 -3
  59. floodmodeller_api/units/sections.py +15 -11
  60. floodmodeller_api/units/structures.py +9 -9
  61. floodmodeller_api/units/units.py +2 -0
  62. floodmodeller_api/urban1d/_base.py +6 -9
  63. floodmodeller_api/urban1d/outfalls.py +2 -1
  64. floodmodeller_api/urban1d/raingauges.py +2 -1
  65. floodmodeller_api/urban1d/subsections.py +2 -0
  66. floodmodeller_api/urban1d/xsections.py +3 -2
  67. floodmodeller_api/util.py +16 -2
  68. floodmodeller_api/validation/validation.py +2 -1
  69. floodmodeller_api/version.py +1 -1
  70. floodmodeller_api/xml2d.py +18 -20
  71. floodmodeller_api/zz.py +538 -0
  72. {floodmodeller_api-0.5.0.dist-info → floodmodeller_api-0.5.1.dist-info}/METADATA +20 -14
  73. {floodmodeller_api-0.5.0.dist-info → floodmodeller_api-0.5.1.dist-info}/RECORD +78 -60
  74. {floodmodeller_api-0.5.0.dist-info → floodmodeller_api-0.5.1.dist-info}/WHEEL +1 -1
  75. floodmodeller_api/test/test_zzn.py +0 -36
  76. floodmodeller_api/zzn.py +0 -414
  77. /floodmodeller_api/test/test_data/{network_from_tabularCSV.csv → tabular_csv_outputs/network_zzn_max.csv} +0 -0
  78. {floodmodeller_api-0.5.0.dist-info → floodmodeller_api-0.5.1.dist-info}/LICENSE.txt +0 -0
  79. {floodmodeller_api-0.5.0.dist-info → floodmodeller_api-0.5.1.dist-info}/entry_points.txt +0 -0
  80. {floodmodeller_api-0.5.0.dist-info → floodmodeller_api-0.5.1.dist-info}/top_level.txt +0 -0
floodmodeller_api/zzn.py DELETED
@@ -1,414 +0,0 @@
1
- """
2
- Flood Modeller Python API
3
- Copyright (C) 2024 Jacobs U.K. Limited
4
-
5
- This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License
6
- as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
7
-
8
- This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
9
- of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
10
-
11
- You should have received a copy of the GNU General Public License along with this program. If not, see https://www.gnu.org/licenses/.
12
-
13
- If you have any query about this program or this License, please contact us at support@floodmodeller.com or write to the following
14
- address: Jacobs UK Limited, Flood Modeller, Cottons Centre, Cottons Lane, London, SE1 2QG, United Kingdom.
15
- """
16
-
17
- from __future__ import annotations
18
-
19
- import ctypes as ct
20
- from pathlib import Path
21
- from typing import Any
22
-
23
- import numpy as np
24
- import pandas as pd
25
-
26
- from ._base import FMFile
27
- from .to_from_json import to_json
28
- from .util import handle_exception, is_windows
29
-
30
-
31
- class ZZN(FMFile):
32
- """Reads and processes Flood Modeller 1D binary results format '.zzn'
33
-
34
- Args:
35
- zzn_filepath (str): Full filepath to model zzn file
36
-
37
- Output:
38
- Initiates 'ZZN' class object
39
- """
40
-
41
- _filetype: str = "ZZN"
42
- _suffix: str = ".zzn"
43
-
44
- @handle_exception(when="read")
45
- def __init__( # noqa: PLR0915
46
- self,
47
- zzn_filepath: str | Path | None = None,
48
- from_json: bool = False,
49
- ):
50
- if from_json:
51
- return
52
- FMFile.__init__(self, zzn_filepath)
53
-
54
- # Get zzn_dll path
55
- lib = "zzn_read.dll" if is_windows() else "libzzn_read.so"
56
- zzn_dll = Path(__file__).resolve().parent / "libs" / lib
57
-
58
- # Catch LD_LIBRARY_PATH error for linux
59
- try:
60
- zzn_read = ct.CDLL(str(zzn_dll))
61
- except OSError as e:
62
- msg_1 = "libifport.so.5: cannot open shared object file: No such file or directory"
63
- if msg_1 in str(e):
64
- msg_2 = "Set LD_LIBRARY_PATH environment variable to be floodmodeller_api/lib"
65
- raise OSError(msg_2) from e
66
- raise
67
-
68
- # Get zzl path
69
- zzn = self._filepath
70
- zzl = zzn.with_suffix(".zzl")
71
- if not zzl.exists():
72
- raise FileNotFoundError(
73
- "Error: Could not find associated .ZZL file. Ensure that the zzn results have an associated zzl file with matching name.",
74
- )
75
-
76
- self.meta: dict[str, Any] = {} # Dict object to hold all metadata
77
- self.data = {} # Dict object to hold all data
78
-
79
- # PROCESS_ZZL
80
- self.meta["zzl_name"] = ct.create_string_buffer(bytes(str(zzl), "utf-8"), 255)
81
- self.meta["zzn_name"] = ct.create_string_buffer(bytes(str(zzn), "utf-8"), 255)
82
- self.meta["model_title"] = ct.create_string_buffer(b"", 128)
83
- self.meta["nnodes"] = ct.c_int(0)
84
- self.meta["label_length"] = ct.c_int(0)
85
- self.meta["dt"] = ct.c_float(0.0)
86
- self.meta["timestep0"] = ct.c_int(0)
87
- self.meta["ltimestep"] = ct.c_int(0)
88
- self.meta["save_int"] = ct.c_float(0.0)
89
- self.meta["is_quality"] = ct.c_bool(False)
90
- self.meta["nvars"] = ct.c_int(0)
91
- self.meta["tzero"] = (ct.c_int * 5)()
92
- self.meta["errstat"] = ct.c_int(0)
93
- zzn_read.process_zzl(
94
- ct.byref(self.meta["zzl_name"]),
95
- ct.byref(self.meta["model_title"]),
96
- ct.byref(self.meta["nnodes"]),
97
- ct.byref(self.meta["label_length"]),
98
- ct.byref(self.meta["dt"]),
99
- ct.byref(self.meta["timestep0"]),
100
- ct.byref(self.meta["ltimestep"]),
101
- ct.byref(self.meta["save_int"]),
102
- ct.byref(self.meta["is_quality"]),
103
- ct.byref(self.meta["nvars"]),
104
- ct.byref(self.meta["tzero"]),
105
- ct.byref(self.meta["errstat"]),
106
- )
107
- # PROCESS_LABELS
108
- self.meta["labels"] = (
109
- ct.c_char * self.meta["label_length"].value * self.meta["nnodes"].value
110
- )()
111
- zzn_read.process_labels(
112
- ct.byref(self.meta["zzl_name"]),
113
- ct.byref(self.meta["nnodes"]),
114
- ct.byref(self.meta["label_length"]),
115
- ct.byref(self.meta["errstat"]),
116
- )
117
- for i in range(self.meta["nnodes"].value):
118
- zzn_read.get_zz_label(
119
- ct.byref(ct.c_int(i + 1)),
120
- ct.byref(self.meta["labels"][i]),
121
- ct.byref(self.meta["errstat"]),
122
- )
123
- # PREPROCESS_ZZN
124
- last_hr = (
125
- (self.meta["ltimestep"].value - self.meta["timestep0"].value)
126
- * self.meta["dt"].value
127
- / 3600
128
- )
129
- self.meta["output_hrs"] = (ct.c_float * 2)(0.0, last_hr)
130
- self.meta["aitimestep"] = (ct.c_int * 2)(
131
- self.meta["timestep0"].value,
132
- self.meta["ltimestep"].value,
133
- )
134
- self.meta["isavint"] = (ct.c_int * 2)()
135
- zzn_read.preprocess_zzn(
136
- ct.byref(self.meta["output_hrs"]),
137
- ct.byref(self.meta["aitimestep"]),
138
- ct.byref(self.meta["dt"]),
139
- ct.byref(self.meta["timestep0"]),
140
- ct.byref(self.meta["ltimestep"]),
141
- ct.byref(self.meta["save_int"]),
142
- ct.byref(self.meta["isavint"]),
143
- )
144
- # PROCESS_ZZN
145
- self.meta["node_ID"] = ct.c_int(-1)
146
- self.meta["savint_skip"] = ct.c_int(1)
147
- self.meta["savint_range"] = ct.c_int(
148
- int(
149
- (self.meta["isavint"][1] - self.meta["isavint"][0])
150
- / self.meta["savint_skip"].value,
151
- ),
152
- )
153
- nx = self.meta["nnodes"].value
154
- ny = self.meta["nvars"].value
155
- nz = self.meta["savint_range"].value + 1
156
- self.data["all_results"] = (ct.c_float * nx * ny * nz)()
157
- self.data["max_results"] = (ct.c_float * nx * ny)()
158
- self.data["min_results"] = (ct.c_float * nx * ny)()
159
- self.data["max_times"] = (ct.c_int * nx * ny)()
160
- self.data["min_times"] = (ct.c_int * nx * ny)()
161
- zzn_read.process_zzn(
162
- ct.byref(self.meta["zzn_name"]),
163
- ct.byref(self.meta["node_ID"]),
164
- ct.byref(self.meta["nnodes"]),
165
- ct.byref(self.meta["is_quality"]),
166
- ct.byref(self.meta["nvars"]),
167
- ct.byref(self.meta["savint_range"]),
168
- ct.byref(self.meta["savint_skip"]),
169
- ct.byref(self.data["all_results"]),
170
- ct.byref(self.data["max_results"]),
171
- ct.byref(self.data["min_results"]),
172
- ct.byref(self.data["max_times"]),
173
- ct.byref(self.data["min_times"]),
174
- ct.byref(self.meta["errstat"]),
175
- ct.byref(self.meta["isavint"]),
176
- )
177
-
178
- # Convert useful metadata from C types into python types
179
-
180
- self.meta["dt"] = self.meta["dt"].value
181
- self.meta["nnodes"] = self.meta["nnodes"].value
182
- self.meta["save_int"] = self.meta["save_int"].value
183
- self.meta["nvars"] = self.meta["nvars"].value
184
- self.meta["savint_range"] = self.meta["savint_range"].value
185
-
186
- self.meta["zzn_name"] = self.meta["zzn_name"].value.decode()
187
- self.meta["labels"] = [label.value.decode().strip() for label in list(self.meta["labels"])]
188
- self.meta["model_title"] = self.meta["model_title"].value.decode()
189
-
190
- def to_dataframe( # noqa: PLR0911
191
- self,
192
- result_type: str = "all",
193
- variable: str = "all",
194
- include_time: bool = False,
195
- multilevel_header: bool = True,
196
- ) -> pd.Series | pd.DataFrame:
197
- """Loads zzn results to pandas dataframe object.
198
-
199
- Args:
200
- result_type (str, optional): {'all'} | 'max' | 'min'
201
- Define whether to return all timesteps or just max/min results. Defaults to 'all'.
202
- variable (str, optional): {'all'} | 'Flow' | 'Stage' | 'Froude' | 'Velocity' | 'Mode' | 'State'
203
- Specify a single output variable (e.g 'flow' or 'stage'). Defaults to 'all'.
204
- include_time (bool, optional):
205
- Whether to include the time of max or min results. Defaults to False.
206
- multilevel_header (bool, optional): If True, the returned dataframe will have multi-level column
207
- headers with the variable as first level and node label as second header. If False, the column
208
- names will be formatted "{node label}_{variable}". Defaults to True.
209
-
210
- Returns:
211
- pandas.DataFrame(): dataframe object of simulation results
212
- """
213
- nx = self.meta["nnodes"]
214
- ny = self.meta["nvars"]
215
- nz = self.meta["savint_range"] + 1
216
- result_type = result_type.lower()
217
-
218
- if result_type == "all":
219
- arr = np.array(self.data["all_results"])
220
- time_index = np.linspace(self.meta["output_hrs"][0], self.meta["output_hrs"][1], nz)
221
- vars_list = ["Flow", "Stage", "Froude", "Velocity", "Mode", "State"]
222
- if multilevel_header:
223
- col_names = [vars_list, self.meta["labels"]]
224
- df = pd.DataFrame(
225
- arr.reshape(nz, nx * ny),
226
- index=time_index,
227
- columns=pd.MultiIndex.from_product(col_names),
228
- )
229
- df.index.name = "Time (hr)"
230
- if variable != "all":
231
- return df[variable.capitalize()]
232
-
233
- else:
234
- col_names = [f"{node}_{var}" for var in vars_list for node in self.meta["labels"]]
235
- df = pd.DataFrame(arr.reshape(nz, nx * ny), index=time_index, columns=col_names)
236
- df.index.name = "Time (hr)"
237
- if variable != "all":
238
- use_cols = [col for col in df.columns if col.endswith(variable.capitalize())]
239
- return df[use_cols]
240
- return df
241
-
242
- if result_type in ("max", "min"):
243
- arr = np.array(self.data[f"{result_type}_results"]).transpose()
244
- node_index = self.meta["labels"]
245
- col_names = [
246
- result_type.capitalize() + lbl
247
- for lbl in [
248
- " Flow",
249
- " Stage",
250
- " Froude",
251
- " Velocity",
252
- " Mode",
253
- " State",
254
- ]
255
- ]
256
- df = pd.DataFrame(arr, index=node_index, columns=col_names)
257
- df.index.name = "Node Label"
258
-
259
- if include_time:
260
- times = np.array(self.data[f"{result_type}_times"]).transpose()
261
- # transform timestep into hrs
262
- times = ((times - self.meta["timestep0"]) * self.meta["dt"]) / 3600
263
- time_col_names = [name + " Time(hrs)" for name in col_names]
264
- time_df = pd.DataFrame(times, index=node_index, columns=time_col_names)
265
- time_df.index.name = "Node Label"
266
- df = pd.concat([df, time_df], axis=1)
267
- new_col_order = [x for y in list(zip(col_names, time_col_names)) for x in y]
268
- df = df[new_col_order]
269
- if variable != "all":
270
- return df[
271
- [
272
- f"{result_type.capitalize()} {variable.capitalize()}",
273
- f"{result_type.capitalize()} {variable.capitalize()} Time(hrs)",
274
- ]
275
- ]
276
- return df
277
-
278
- if variable != "all":
279
- return df[f"{result_type.capitalize()} {variable.capitalize()}"]
280
- return df
281
-
282
- raise ValueError(f'Result type: "{result_type}" not recognised')
283
-
284
- def export_to_csv(
285
- self,
286
- save_location: str | Path = "default",
287
- result_type: str = "all",
288
- variable: str = "all",
289
- include_time: bool = False,
290
- ) -> None:
291
- """Exports zzn results to CSV file.
292
-
293
- Args:
294
- save_location (str, optional): {default} | folder or file path
295
- Full or relative path to folder or csv file to save output csv, if no argument given or if set to 'default' then CSV will be saved in same location as ZZN file. Defaults to 'default'.
296
- result_type (str, optional): {all} | max | min
297
- Define whether to output all timesteps or just max/min results. Defaults to 'all'.
298
- variable (str, optional): {'all'} | 'Flow' | 'Stage' | 'Froude' | 'Velocity' | 'Mode' | 'State'
299
- Specify a single output variable (e.g 'flow' or 'stage'). Defaults to 'all'.
300
- include_time (bool, optional):
301
- Whether to include the time of max or min results. Defaults to False.
302
-
303
- Raises:
304
- Exception: Raised if result_type set to invalid option
305
- """
306
- if save_location == "default":
307
- save_location = Path(self.meta["zzn_name"]).with_suffix(".csv")
308
- else:
309
- save_location = Path(save_location)
310
- if not save_location.is_absolute():
311
- # for if relative folder path given
312
- save_location = Path(Path(self.meta["zzn_name"]).parent, save_location)
313
-
314
- if save_location.suffix != ".csv": # Assumed to be pointing to a folder
315
- # Check if the folder exists, if not create it
316
- if not save_location.exists():
317
- Path.mkdir(save_location)
318
- save_location = Path(
319
- save_location,
320
- Path(self.meta["zzn_name"]).with_suffix(".csv").name,
321
- )
322
-
323
- elif not save_location.parent.exists():
324
- Path.mkdir(save_location.parent)
325
-
326
- result_type = result_type.lower()
327
-
328
- if result_type.lower() not in ["all", "max", "min"]:
329
- raise Exception(
330
- f" '{result_type}' is not a valid result type. Valid arguments are: 'all', 'max' or 'min' ",
331
- )
332
-
333
- df = self.to_dataframe(
334
- result_type=result_type,
335
- variable=variable,
336
- include_time=include_time,
337
- )
338
- df.to_csv(save_location)
339
- print(f"CSV saved to {save_location}")
340
-
341
- def to_dict_of_dataframes(self, variable: str = "all") -> dict:
342
- """Loads zzn results to a dictionary of pandas dataframe objects.
343
-
344
- Args:
345
- variable (str, optional): {'all'} | 'Flow' | 'Stage' | 'Froude' | 'Velocity' | 'Mode' | 'State'
346
- Specify a single output variable (e.g 'flow' or 'stage') or any combination passed as comma separated
347
- variable names. Defaults to 'all'.
348
-
349
- Returns:
350
- dict: dictionary of dataframe object of simulation results, keys corresponding to variables.
351
- """
352
- nx = self.meta["nnodes"]
353
- ny = self.meta["nvars"]
354
- nz = self.meta["savint_range"] + 1
355
- output = {}
356
-
357
- arr = np.array(self.data["all_results"])
358
- time_index = np.linspace(self.meta["output_hrs"][0], self.meta["output_hrs"][1], nz)
359
-
360
- vars_list = ["Flow", "Stage", "Froude", "Velocity", "Mode", "State"]
361
-
362
- col_names = self.meta["labels"]
363
- temp_arr = np.reshape(arr, (nz, ny, nx))
364
-
365
- for i, var in enumerate(vars_list):
366
- output[var] = pd.DataFrame(temp_arr[:, i, :], index=time_index, columns=col_names)
367
- output[var].index.name = "Time (hr)"
368
-
369
- output["Time (hr)"] = time_index
370
-
371
- if variable != "all":
372
- input_vars = variable.split(",")
373
- for i, var in enumerate(input_vars):
374
- input_vars[i] = var.strip().capitalize()
375
- if input_vars[i] not in vars_list:
376
- raise Exception(
377
- f" '{input_vars[i]}' is not a valid variable name. Valid arguments are: {vars_list} ",
378
- )
379
-
380
- for var in vars_list:
381
- if var not in input_vars:
382
- del output[var]
383
- return output
384
-
385
- def to_json(
386
- self,
387
- result_type: str = "all",
388
- variable: str = "all",
389
- include_time: bool = False,
390
- multilevel_header: bool = True,
391
- ) -> str:
392
- """Loads zzn results to JSON object.
393
-
394
- Args:
395
- result_type (str, optional): {'all'} | 'max' | 'min'
396
- Define whether to return all timesteps or just max/min results. Defaults to 'all'.
397
- variable (str, optional): {'all'} | 'Flow' | 'Stage' | 'Froude' | 'Velocity' | 'Mode' | 'State'
398
- Specify a single output variable (e.g 'flow' or 'stage'). Defaults to 'all'.
399
- include_time (bool, optional):
400
- Whether to include the time of max or min results. Defaults to False.
401
- multilevel_header (bool, optional): If True, the returned dataframe will have multi-level column
402
- headers with the variable as first level and node label as second header. If False, the column
403
- names will be formatted "{node label}_{variable}". Defaults to True.
404
-
405
- Returns:
406
- str: A JSON string representing the ZZN results.
407
- """
408
- df = self.to_dataframe(result_type, variable, include_time, multilevel_header)
409
- return to_json(df)
410
-
411
- @classmethod
412
- def from_json(cls, json_string: str = ""):
413
- # Not possible
414
- raise NotImplementedError("It is not possible to build a ZZN class instance from JSON")