honeybee-radiance-postprocess 0.4.555__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. honeybee_radiance_postprocess/__init__.py +1 -0
  2. honeybee_radiance_postprocess/__main__.py +4 -0
  3. honeybee_radiance_postprocess/annual.py +73 -0
  4. honeybee_radiance_postprocess/annualdaylight.py +289 -0
  5. honeybee_radiance_postprocess/annualirradiance.py +35 -0
  6. honeybee_radiance_postprocess/breeam/__init__.py +1 -0
  7. honeybee_radiance_postprocess/breeam/breeam.py +552 -0
  8. honeybee_radiance_postprocess/cli/__init__.py +33 -0
  9. honeybee_radiance_postprocess/cli/abnt.py +392 -0
  10. honeybee_radiance_postprocess/cli/breeam.py +96 -0
  11. honeybee_radiance_postprocess/cli/datacollection.py +133 -0
  12. honeybee_radiance_postprocess/cli/grid.py +295 -0
  13. honeybee_radiance_postprocess/cli/leed.py +143 -0
  14. honeybee_radiance_postprocess/cli/merge.py +161 -0
  15. honeybee_radiance_postprocess/cli/mtxop.py +161 -0
  16. honeybee_radiance_postprocess/cli/postprocess.py +1092 -0
  17. honeybee_radiance_postprocess/cli/schedule.py +103 -0
  18. honeybee_radiance_postprocess/cli/translate.py +216 -0
  19. honeybee_radiance_postprocess/cli/two_phase.py +252 -0
  20. honeybee_radiance_postprocess/cli/util.py +121 -0
  21. honeybee_radiance_postprocess/cli/viewfactor.py +157 -0
  22. honeybee_radiance_postprocess/cli/well.py +110 -0
  23. honeybee_radiance_postprocess/data_type.py +102 -0
  24. honeybee_radiance_postprocess/dynamic.py +273 -0
  25. honeybee_radiance_postprocess/electriclight.py +24 -0
  26. honeybee_radiance_postprocess/en17037.py +304 -0
  27. honeybee_radiance_postprocess/helper.py +266 -0
  28. honeybee_radiance_postprocess/ies/__init__.py +1 -0
  29. honeybee_radiance_postprocess/ies/lm.py +224 -0
  30. honeybee_radiance_postprocess/ies/lm_schedule.py +248 -0
  31. honeybee_radiance_postprocess/leed/__init__.py +1 -0
  32. honeybee_radiance_postprocess/leed/leed.py +801 -0
  33. honeybee_radiance_postprocess/leed/leed_schedule.py +256 -0
  34. honeybee_radiance_postprocess/metrics.py +439 -0
  35. honeybee_radiance_postprocess/reader.py +80 -0
  36. honeybee_radiance_postprocess/results/__init__.py +4 -0
  37. honeybee_radiance_postprocess/results/annual_daylight.py +752 -0
  38. honeybee_radiance_postprocess/results/annual_irradiance.py +196 -0
  39. honeybee_radiance_postprocess/results/results.py +1416 -0
  40. honeybee_radiance_postprocess/type_hints.py +38 -0
  41. honeybee_radiance_postprocess/util.py +211 -0
  42. honeybee_radiance_postprocess/vis_metadata.py +49 -0
  43. honeybee_radiance_postprocess/well/__init__.py +1 -0
  44. honeybee_radiance_postprocess/well/well.py +509 -0
  45. honeybee_radiance_postprocess-0.4.555.dist-info/METADATA +79 -0
  46. honeybee_radiance_postprocess-0.4.555.dist-info/RECORD +50 -0
  47. honeybee_radiance_postprocess-0.4.555.dist-info/WHEEL +5 -0
  48. honeybee_radiance_postprocess-0.4.555.dist-info/entry_points.txt +2 -0
  49. honeybee_radiance_postprocess-0.4.555.dist-info/licenses/LICENSE +661 -0
  50. honeybee_radiance_postprocess-0.4.555.dist-info/top_level.txt +1 -0
@@ -0,0 +1,304 @@
1
+ """Functions for EN 17037 post-processing."""
2
+ from typing import Union
3
+ from pathlib import Path
4
+ import json
5
+ try:
6
+ import cupy as np
7
+ is_gpu = True
8
+ except ImportError:
9
+ is_gpu = False
10
+ import numpy as np
11
+
12
+ from ladybug.color import Colorset
13
+ from ladybug.datatype.fraction import Fraction
14
+ from ladybug.legend import LegendParameters
15
+
16
+ from .results.annual_daylight import AnnualDaylight
17
+ from .dynamic import DynamicSchedule
18
+ from .metrics import da_array2d
19
+ from .util import filter_array
20
+
21
+
22
+ def en17037_to_files(
23
+ array: np.ndarray, metrics_folder: Path, grid_info: dict) -> list:
24
+ """Compute annual EN 17037 metrics for a NumPy array and write the results
25
+ to a folder.
26
+
27
+ This function generates 6 different files for daylight autonomy based on
28
+ the varying level of recommendation in EN 17037.
29
+
30
+ Args:
31
+ array: A 2D NumPy array.
32
+ metrics_folder: An output folder where the results will be written to.
33
+ The folder will be created if it does not exist.
34
+ grid_info: A grid information dictionary.
35
+
36
+ Returns:
37
+ tuple -- Tuple of lists of paths for da, sda, and compliance folders.
38
+ """
39
+ recommendations = {
40
+ 'minimum_illuminance': {
41
+ 'minimum': 100,
42
+ 'medium': 300,
43
+ 'high': 500
44
+ },
45
+ 'target_illuminance': {
46
+ 'minimum': 300,
47
+ 'medium': 500,
48
+ 'high': 750
49
+ }
50
+ }
51
+ compliance_value = {
52
+ 'minimum': 1,
53
+ 'medium': 2,
54
+ 'high': 3
55
+ }
56
+
57
+ grid_id = grid_info['full_id']
58
+ grid_count = grid_info['count']
59
+
60
+ da_folders = []
61
+ sda_folders = []
62
+ compliance_folders = []
63
+ da_folder = metrics_folder.joinpath('da')
64
+ sda_folder = metrics_folder.joinpath('sda')
65
+ compliance_folder = metrics_folder.joinpath('compliance_level')
66
+
67
+ for target_type, thresholds in recommendations.items():
68
+ compliance_level = None
69
+ for level, threshold in thresholds.items():
70
+ # da
71
+ da_level_folder = \
72
+ da_folder.joinpath('_'.join([target_type, str(threshold)]))
73
+ da_file = da_level_folder.joinpath(f'{grid_id}.da')
74
+ if not da_file.parent.is_dir():
75
+ da_file.parent.mkdir(parents=True)
76
+ da = da_array2d(array, total_occ=4380, threshold=threshold)
77
+ np.savetxt(da_file, da, fmt='%.2f')
78
+
79
+ # sda
80
+ sda_level_folder = \
81
+ sda_folder.joinpath('_'.join([target_type, str(threshold)]))
82
+ sda_file = sda_level_folder.joinpath(f'{grid_id}.sda')
83
+ if not sda_file.parent.is_dir():
84
+ sda_file.parent.mkdir(parents=True)
85
+ sda = (da >= 50).mean() * 100
86
+ with open(sda_file, 'w') as sdaf:
87
+ sdaf.write(str(round(sda, 2)))
88
+
89
+ space_target = 50 if target_type == 'target_illuminance' else 95
90
+ if sda >= space_target:
91
+ compliance_level = np.full((grid_count), compliance_value[level], dtype=int)
92
+
93
+ da_folders.append(da_file.parent)
94
+ sda_folders.append(sda_file.parent)
95
+
96
+ if compliance_level is None:
97
+ compliance_level = np.zeros(grid_count, dtype=int)
98
+ compliance_level_folder = compliance_folder.joinpath(target_type)
99
+ compliance_level_file = compliance_level_folder.joinpath(f'{grid_id}.pf')
100
+ if not compliance_level_file.parent.is_dir():
101
+ compliance_level_file.parent.mkdir(parents=True)
102
+ np.savetxt(compliance_level_file, compliance_level, fmt='%i')
103
+ compliance_folders.append(compliance_level_file.parent)
104
+
105
+ return da_folders, sda_folders, compliance_folders
106
+
107
+
108
+ def en17037_to_folder(
109
+ results: Union[str, AnnualDaylight], schedule: list,
110
+ states: DynamicSchedule = None, grids_filter: str = '*',
111
+ sub_folder: str = 'en17037') -> Path:
112
+ """Compute annual EN 17037 metrics in a folder and write them in a subfolder.
113
+
114
+ The results is an output folder of annual daylight recipe.
115
+
116
+ Args:
117
+ results: Results folder.
118
+ schedule: An annual schedule for 8760 hours of the year as a list of
119
+ values. This should be a daylight hours schedule.
120
+ grids_filter: A pattern to filter the grids. By default all the grids
121
+ will be processed.
122
+ states: A dictionary of states. Defaults to None.
123
+ sub_folder: An optional relative path for subfolder to copy results
124
+ files. Default: en17037.
125
+
126
+ Returns:
127
+ str -- Path to results folder.
128
+ """
129
+ if not isinstance(results, AnnualDaylight):
130
+ results = AnnualDaylight(results, schedule=schedule)
131
+ else:
132
+ results.schedule = schedule
133
+
134
+ total_occ = results.total_occ
135
+ occ_mask = results.occ_mask
136
+
137
+ grids_info = results._filter_grids(grids_filter=grids_filter)
138
+
139
+ sub_folder = Path(sub_folder)
140
+
141
+ if total_occ != 4380:
142
+ raise ValueError(
143
+ f'There are {total_occ} occupied hours in the schedule. According '
144
+ 'to EN 17037 the schedule must consist of the daylight hours '
145
+ 'which is defined as the half of the year with the largest '
146
+ 'quantity of daylight')
147
+
148
+ for grid_info in grids_info:
149
+ array = results._array_from_states(
150
+ grid_info, states=states, res_type='total', zero_array=True)
151
+ if np.any(array):
152
+ array = np.apply_along_axis(
153
+ filter_array, 1, array, occ_mask)
154
+ da_folders, sda_folders, compliance_folders = en17037_to_files(
155
+ array, sub_folder, grid_info)
156
+
157
+ # copy grids_info.json to all results folders
158
+ for folder in da_folders + sda_folders + compliance_folders:
159
+ grids_info_file = Path(folder, 'grids_info.json')
160
+ with open(grids_info_file, 'w') as outf:
161
+ json.dump(grids_info, outf, indent=2)
162
+
163
+ metric_info_dict = _annual_daylight_en17037_vis_metadata()
164
+ da_folder = sub_folder.joinpath('da')
165
+ for metric, data in metric_info_dict.items():
166
+ file_path = da_folder.joinpath(metric, 'vis_metadata.json')
167
+ with open(file_path, 'w') as fp:
168
+ json.dump(data, fp, indent=4)
169
+
170
+ return sub_folder
171
+
172
+
173
+ def _annual_daylight_en17037_vis_metadata():
174
+ """Return visualization metadata for annual daylight."""
175
+ da_lpar = LegendParameters(min=0, max=100, colors=Colorset.annual_comfort())
176
+
177
+ metric_info_dict = {
178
+ 'minimum_illuminance_100': {
179
+ 'type': 'VisualizationMetaData',
180
+ 'data_type': Fraction('Daylight Autonomy - minimum 100 lux').to_dict(),
181
+ 'unit': '%',
182
+ 'legend_parameters': da_lpar.to_dict()
183
+ },
184
+ 'minimum_illuminance_300': {
185
+ 'type': 'VisualizationMetaData',
186
+ 'data_type': Fraction('Daylight Autonomy - minimum 300 lux').to_dict(),
187
+ 'unit': '%',
188
+ 'legend_parameters': da_lpar.to_dict()
189
+ },
190
+ 'minimum_illuminance_500': {
191
+ 'type': 'VisualizationMetaData',
192
+ 'data_type': Fraction('Daylight Autonomy - minimum 500 lux').to_dict(),
193
+ 'unit': '%',
194
+ 'legend_parameters': da_lpar.to_dict()
195
+ },
196
+ 'target_illuminance_300': {
197
+ 'type': 'VisualizationMetaData',
198
+ 'data_type': Fraction('Daylight Autonomy - target 300 lux').to_dict(),
199
+ 'unit': '%',
200
+ 'legend_parameters': da_lpar.to_dict()
201
+ },
202
+ 'target_illuminance_500': {
203
+ 'type': 'VisualizationMetaData',
204
+ 'data_type': Fraction('Daylight Autonomy - target 500 lux').to_dict(),
205
+ 'unit': '%',
206
+ 'legend_parameters': da_lpar.to_dict()
207
+ },
208
+ 'target_illuminance_750': {
209
+ 'type': 'VisualizationMetaData',
210
+ 'data_type': Fraction('Daylight Autonomy - target 750 lux').to_dict(),
211
+ 'unit': '%',
212
+ 'legend_parameters': da_lpar.to_dict()
213
+ }
214
+ }
215
+
216
+ return metric_info_dict
217
+
218
+
219
+ def _annual_daylight_en17037_config():
220
+ """Return vtk-config for annual daylight EN 17037. """
221
+ cfg = {
222
+ "data": [
223
+ {
224
+ "identifier": "Daylight Autonomy - target 300 lux",
225
+ "object_type": "grid",
226
+ "unit": "Percentage",
227
+ "path": "target_illuminance/minimum/da",
228
+ "hide": False,
229
+ "legend_parameters": {
230
+ "hide_legend": False,
231
+ "min": 0,
232
+ "max": 100,
233
+ "color_set": "nuanced",
234
+ },
235
+ },
236
+ {
237
+ "identifier": "Daylight Autonomy - target 500 lux",
238
+ "object_type": "grid",
239
+ "unit": "Percentage",
240
+ "path": "target_illuminance/medium/da",
241
+ "hide": False,
242
+ "legend_parameters": {
243
+ "hide_legend": False,
244
+ "min": 0,
245
+ "max": 100,
246
+ "color_set": "nuanced",
247
+ },
248
+ },
249
+ {
250
+ "identifier": "Daylight Autonomy - target 750 lux",
251
+ "object_type": "grid",
252
+ "unit": "Percentage",
253
+ "path": "target_illuminance/high/da",
254
+ "hide": False,
255
+ "legend_parameters": {
256
+ "hide_legend": False,
257
+ "min": 0,
258
+ "max": 100,
259
+ "color_set": "nuanced",
260
+ },
261
+ },
262
+ {
263
+ "identifier": "Daylight Autonomy - minimum 100 lux",
264
+ "object_type": "grid",
265
+ "unit": "Percentage",
266
+ "path": "minimum_illuminance/minimum/da",
267
+ "hide": False,
268
+ "legend_parameters": {
269
+ "hide_legend": False,
270
+ "min": 0,
271
+ "max": 100,
272
+ "color_set": "nuanced",
273
+ },
274
+ },
275
+ {
276
+ "identifier": "Daylight Autonomy - minimum 300 lux",
277
+ "object_type": "grid",
278
+ "unit": "Percentage",
279
+ "path": "minimum_illuminance/medium/da",
280
+ "hide": False,
281
+ "legend_parameters": {
282
+ "hide_legend": False,
283
+ "min": 0,
284
+ "max": 100,
285
+ "color_set": "nuanced",
286
+ },
287
+ },
288
+ {
289
+ "identifier": "Daylight Autonomy - minimum 500 lux",
290
+ "object_type": "grid",
291
+ "unit": "Percentage",
292
+ "path": "minimum_illuminance/high/da",
293
+ "hide": False,
294
+ "legend_parameters": {
295
+ "hide_legend": False,
296
+ "min": 0,
297
+ "max": 100,
298
+ "color_set": "nuanced",
299
+ },
300
+ },
301
+ ]
302
+ }
303
+
304
+ return cfg
@@ -0,0 +1,266 @@
1
+ """Helper functions."""
2
+ import json
3
+ from pathlib import Path
4
+ try:
5
+ import cupy as np
6
+ is_gpu = True
7
+ except ImportError:
8
+ is_gpu = False
9
+ import numpy as np
10
+
11
+ from honeybee.model import Model
12
+
13
+
14
+ def model_grid_areas(model, grids_info):
15
+ if isinstance(model, Model):
16
+ hb_model = model
17
+ else:
18
+ hb_model = Model.from_file(model)
19
+
20
+ full_ids = [grid_info['full_id'] for grid_info in grids_info]
21
+ sensor_grids = hb_model.properties.radiance.sensor_grids
22
+ grid_areas = []
23
+ for s_grid in sensor_grids:
24
+ if s_grid.identifier in full_ids:
25
+ if s_grid.mesh is not None:
26
+ grid_areas.append(s_grid.mesh.face_areas)
27
+ grid_areas = [np.array(grid) for grid in grid_areas]
28
+ if not grid_areas:
29
+ grid_areas = [None] * len(full_ids)
30
+
31
+ return grid_areas
32
+
33
+
34
+ def grid_summary(
35
+ folder: Path, grid_areas: list = None,
36
+ grids_info: list = None, name: str = 'grid_summary',
37
+ grid_metrics: list = None, sub_folder: bool = True
38
+ ):
39
+ """Calculate a grid summary for a single metric.
40
+
41
+ Args:
42
+ folder: A folder with results.
43
+ grid_areas: A list of area of each sensor.
44
+ grids_info: Grid information as a dictionary.
45
+ name: Optional filename of grid summary.
46
+ grid_metrics: Additional customized metrics to calculate.
47
+ sub_folder: If set to True it will look for results in all sub-folders
48
+ in the folder input. Else it look for results directly in the folder
49
+ input.
50
+ """
51
+ if sub_folder:
52
+ sub_folders = [sf for sf in folder.iterdir() if sf.is_dir()]
53
+ else:
54
+ sub_folders = [folder]
55
+
56
+ # set up the default data types
57
+ dtype_sensor_grid = ('Sensor Grid', 'O')
58
+ dtype_sensor_grid_id = ('Sensor Grid ID', 'O')
59
+ dtype_base = [
60
+ ('Mean', np.float32),
61
+ ('Minimum', np.float32),
62
+ ('Maximum', np.float32),
63
+ ('Uniformity Ratio', np.float32)
64
+ ]
65
+ dtype = []
66
+
67
+ # set up default format (for first two columns: str)
68
+ fmt = ['%s', '%s']
69
+
70
+ if grids_info is None:
71
+ for sf in sub_folders:
72
+ gi_file = sf.joinpath('grids_info.json')
73
+ if gi_file.exists():
74
+ with open(gi_file) as gi:
75
+ grids_info = json.load(gi)
76
+ break
77
+ if grids_info is None:
78
+ # if it did not find grids_info.json in any folder
79
+ raise FileNotFoundError(
80
+ f'The file grids_info.json was not found in any folder.')
81
+
82
+ if grid_areas is None:
83
+ grid_areas = [None] * len(grids_info)
84
+
85
+
86
+ dtype.append(dtype_sensor_grid)
87
+ dtype.append(dtype_sensor_grid_id)
88
+ for sf in sub_folders:
89
+ _dtype = []
90
+ _fmt = []
91
+ for dt_b in dtype_base:
92
+ col_name = dt_b[0]
93
+ if sub_folder:
94
+ col_name = '-'.join([sf.stem.upper(), col_name])
95
+ _dtype.append((col_name, np.float32))
96
+ _fmt.append('%.2f')
97
+ dtype.extend(_dtype)
98
+ fmt.extend(_fmt)
99
+
100
+ if grid_metrics is not None:
101
+ for grid_metric in grid_metrics:
102
+ if len(grid_metric) == 1:
103
+ if 'allOf' in grid_metric:
104
+ _mname = []
105
+ for gr_m in grid_metric['allOf']:
106
+ _mname.append(_get_grid_metric_name(gr_m))
107
+ mname = ' and '.join(_mname)
108
+ elif 'anyOf' in grid_metric:
109
+ _mname = []
110
+ for gr_m in grid_metric['anyOf']:
111
+ _mname.append(_get_grid_metric_name(gr_m))
112
+ mname = ' or '.join(_mname)
113
+ else:
114
+ mname = _get_grid_metric_name(grid_metric)
115
+ elif len(grid_metric) == 2:
116
+ _mname = []
117
+ for k, v in grid_metric.items():
118
+ _mname.append(_get_grid_metric_name({k: v}))
119
+ mname = ' and '.join(_mname)
120
+ col_name = mname
121
+ if sub_folder:
122
+ col_name = '-'.join([sf.stem.upper(), col_name])
123
+ dtype.append((col_name, np.float32))
124
+ fmt.append('%.2f')
125
+
126
+ arrays = []
127
+ for grid_info, grid_area in zip(grids_info, grid_areas):
128
+ full_id = grid_info['full_id']
129
+ grid_name = grid_info['name']
130
+ data = [grid_name, full_id]
131
+ for sf in sub_folders:
132
+ grid_files = list(sf.glob(f'{full_id}.*'))
133
+ assert len(grid_files) == 1
134
+
135
+ array = np.loadtxt(grid_files[0])
136
+ _mean = array.mean()
137
+ _min = array.min()
138
+ _max = array.max()
139
+ _uniformity_ratio = _min / _mean * 100
140
+
141
+ data.extend([_mean, _min, _max, _uniformity_ratio])
142
+
143
+ if grid_metrics is not None:
144
+ # get grid metrics
145
+ grid_metrics_data = \
146
+ _get_grid_metrics(array, grid_metrics, grid_info, grid_area)
147
+ data.extend(grid_metrics_data)
148
+
149
+ arrays.append(tuple(data))
150
+
151
+ # create structured array
152
+ if is_gpu:
153
+ struct_array = None
154
+ else:
155
+ struct_array = np.array(arrays, dtype=dtype)
156
+
157
+ header = [dt[0] for dt in dtype]
158
+ # write header to file
159
+ with open(folder.joinpath(f'{name}.csv'), 'w') as grid_summary_file:
160
+ grid_summary_file.write(','.join(header))
161
+ # write structured array to grid_summary_file
162
+ with open(folder.joinpath(f'{name}.csv'), 'a') as grid_summary_file:
163
+ grid_summary_file.write('\n')
164
+ if is_gpu:
165
+ # CuPy doesn't support structured arrays; manually format rows
166
+ for row in arrays:
167
+ row_str = ','.join(fmt_val % val for fmt_val, val in zip(fmt, row))
168
+ grid_summary_file.write(row_str + '\n')
169
+ else:
170
+ np.savetxt(grid_summary_file, struct_array, delimiter=',', fmt=fmt)
171
+
172
+ return grid_summary_file
173
+
174
+
175
+ def _calculate_percentage(gr_metric_bool, grid_info, grid_area=None):
176
+ """Calculate percentage of floor area where True.
177
+
178
+ Args:
179
+ gr_metric_bool: A NumPy array of booleans.
180
+ grid_info: Grid information.
181
+ grid_area: A NumPy array of area for each sensor. (Default: None).
182
+
183
+ Returns:
184
+ The percentage of floor area where gr_metric_bool is True.
185
+ """
186
+ if grid_area is not None:
187
+ gr_metric_pct = \
188
+ grid_area[gr_metric_bool].sum() / grid_area.sum() * 100
189
+ else:
190
+ gr_metric_pct = \
191
+ gr_metric_bool.sum() / grid_info['count'] * 100
192
+ return gr_metric_pct
193
+
194
+
195
+ def _logical_operator(keyword):
196
+ lg = {
197
+ 'minimum': '>',
198
+ 'exclusiveMinimum': '>=',
199
+ 'maximum': '<',
200
+ 'exclusiveMaximum': '<='
201
+ }
202
+ return lg[keyword]
203
+
204
+
205
+ def _get_grid_metric_name(grid_metric):
206
+ if 'minimum' in grid_metric:
207
+ return f'{_logical_operator("minimum")}{grid_metric["minimum"]}'
208
+ elif 'exclusiveMinimum' in grid_metric:
209
+ return f'{_logical_operator("exclusiveMinimum")}{grid_metric["exclusiveMinimum"]}'
210
+ elif 'maximum' in grid_metric:
211
+ return f'{_logical_operator("maximum")}{grid_metric["maximum"]}'
212
+ elif 'exclusiveMaximum' in grid_metric:
213
+ return f'{_logical_operator("exclusiveMaximum")}{grid_metric["exclusiveMaximum"]}'
214
+
215
+
216
+ def _numeric_type(array, gr_metric):
217
+ if 'minimum' in gr_metric:
218
+ gr_metric_bool = array > gr_metric['minimum']
219
+ elif 'exclusiveMinimum' in gr_metric:
220
+ gr_metric_bool = array >= gr_metric['minimum']
221
+ elif 'maximum' in gr_metric:
222
+ gr_metric_bool = array < gr_metric['maximum']
223
+ elif 'exclusiveMaximum' in gr_metric:
224
+ gr_metric_bool = array <= gr_metric['exclusiveMaximum']
225
+ return gr_metric_bool
226
+
227
+
228
+ def _grid_summary_all_any(array, gr_metric, grid_info, grid_area, keyword):
229
+ gr_metric_arrays = []
230
+ for gr_m in gr_metric[keyword]:
231
+ assert len(gr_m) == 1
232
+ gr_metric_arrays.append(_numeric_type(array, gr_m))
233
+ if keyword == 'allOf':
234
+ gr_metric_bool = np.all(gr_metric_arrays, axis=0)
235
+ else:
236
+ gr_metric_bool = np.any(gr_metric_arrays, axis=0)
237
+ gr_metric_pct = \
238
+ _calculate_percentage(gr_metric_bool, grid_info, grid_area)
239
+ return gr_metric_pct
240
+
241
+
242
+ def _get_grid_metrics(array, grid_metrics, grid_info, grid_area):
243
+ grid_metrics_data = []
244
+ for gr_metric in grid_metrics:
245
+ if len(gr_metric) == 1:
246
+ if 'allOf' in gr_metric:
247
+ gr_metric_pct = \
248
+ _grid_summary_all_any(
249
+ array, gr_metric, grid_info, grid_area, 'allOf')
250
+ elif 'anyOf' in gr_metric:
251
+ gr_metric_pct = \
252
+ _grid_summary_all_any(
253
+ array, gr_metric, grid_info, grid_area, 'anyOf')
254
+ else:
255
+ gr_metric_bool = _numeric_type(array, gr_metric)
256
+ gr_metric_pct = \
257
+ _calculate_percentage(gr_metric_bool, grid_info, grid_area)
258
+ elif len(gr_metric) == 2:
259
+ gr_metric_arrays = []
260
+ for k, threshold in gr_metric.items():
261
+ gr_metric_arrays.append(_numeric_type(array, {k: threshold}))
262
+ gr_metric_bool = np.all(gr_metric_arrays, axis=0)
263
+ gr_metric_pct = \
264
+ _calculate_percentage(gr_metric_bool, grid_info, grid_area)
265
+ grid_metrics_data.append(gr_metric_pct)
266
+ return grid_metrics_data
@@ -0,0 +1 @@
1
+ """honeybee-radiance-postprocess library."""