epinterface 1.0.5__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1 @@
1
+ """A module for analyzing data from building energy models."""
@@ -0,0 +1,851 @@
1
+ """A module for computing and analyzing metrics related to overheating, such as heat index, exceedance hours, etc."""
2
+
3
+ from dataclasses import dataclass
4
+ from typing import cast
5
+
6
+ import numpy as np
7
+ import pandas as pd
8
+ from archetypal.idfclass.sql import Sql
9
+ from numpy.typing import NDArray
10
+
11
+
12
+ def calculate_hi_categories(
13
+ dbt_mat: NDArray[np.float64],
14
+ rh_mat: NDArray[np.float64],
15
+ zone_names: list[str] | None = None,
16
+ zone_weights: NDArray[np.float64] | None = None,
17
+ ) -> pd.DataFrame:
18
+ """Computes heat index per hour across all zones and bins into 5 categories (sum = 8760), then takes the most common category for each timestep and the worst category for each timestep.
19
+
20
+ Uses Rothfusz regression and NOAA categories:
21
+ - Extreme Danger: ≥130°F
22
+ - Danger: 105 to 129°F
23
+ - Extreme Caution: 90 to 104°F
24
+ - Caution: 80 to 89°F
25
+ - Normal: <80°F
26
+
27
+ Args:
28
+ dbt_mat (np.ndarray): The dry bulb temperature matrix (zones x timesteps).
29
+ rh_mat (np.ndarray): The relative humidity matrix (zones x timesteps).
30
+ zone_names (list[str] | None): The names of the zones. If None, the zones will be named "Zone 001", "Zone 002", etc.
31
+ zone_weights (NDArray[np.float64] | None): The weights of the zones. If None, the zones will be weighted equally when summing the heat index.
32
+
33
+ Returns:
34
+ cat_counts (pd.DataFrame): A dataframe with the count of timesteps that the building is in each category, using various aggregation methods.
35
+
36
+ """
37
+ zone_names_ = (
38
+ [f"Zone {i:03d}" for i in range(dbt_mat.shape[0])]
39
+ if zone_names is None
40
+ else zone_names
41
+ )
42
+ zone_weights_ = (
43
+ zone_weights if zone_weights is not None else np.ones(dbt_mat.shape[0])
44
+ )
45
+ if len(zone_names_) != len(zone_weights_):
46
+ msg = f"Zone names and zone weights must have the same length. Got {len(zone_names_)} zone names and {len(zone_weights_)} zone weights."
47
+ raise ValueError(msg)
48
+ normalized_zone_weights: NDArray[np.float64] = zone_weights_ / zone_weights_.sum()
49
+ n_zones = len(zone_weights_)
50
+ check_timeseries_shape(dbt_mat, expected_zones=n_zones, expected_timesteps=8760)
51
+ check_timeseries_shape(rh_mat, expected_zones=n_zones, expected_timesteps=8760)
52
+ # we use an index map to solve ties and guarantee the "worst" category in the event of a tie is always chosen.
53
+ cat_index_map = {
54
+ "Extreme Danger": 4,
55
+ "Danger": 3,
56
+ "Extreme Caution": 2,
57
+ "Caution": 1,
58
+ "Normal": 0,
59
+ }
60
+
61
+ def compute_hi(temp_c, rh):
62
+ # Convert to Fahrenheit
63
+ temp_f = temp_c * 9 / 5 + 32
64
+ hi_f = (
65
+ -42.379
66
+ + 2.04901523 * temp_f
67
+ + 10.14333127 * rh
68
+ - 0.22475541 * temp_f * rh
69
+ - 6.83783e-3 * temp_f**2
70
+ - 5.481717e-2 * rh**2
71
+ + 1.22874e-3 * temp_f**2 * rh
72
+ + 8.5282e-4 * temp_f * rh**2
73
+ - 1.99e-6 * temp_f**2 * rh**2
74
+ )
75
+ return hi_f
76
+
77
+ def compute_category(hi_f):
78
+ return np.where(
79
+ hi_f >= 130,
80
+ cat_index_map["Extreme Danger"],
81
+ np.where(
82
+ hi_f >= 105,
83
+ cat_index_map["Danger"],
84
+ np.where(
85
+ hi_f >= 90,
86
+ cat_index_map["Extreme Caution"],
87
+ np.where(
88
+ hi_f >= 80, cat_index_map["Caution"], cat_index_map["Normal"]
89
+ ),
90
+ ),
91
+ ),
92
+ )
93
+
94
+ heat_index_mat = compute_hi(dbt_mat, rh_mat)
95
+ zone_weighted_heat_index = heat_index_mat * normalized_zone_weights.reshape(-1, 1)
96
+ aggregated_heat_index = zone_weighted_heat_index.sum(axis=0)
97
+
98
+ bins_by_hour_and_zone = compute_category(heat_index_mat)
99
+ bins_by_hour_building = compute_category(aggregated_heat_index)
100
+
101
+ df = pd.DataFrame(
102
+ bins_by_hour_and_zone,
103
+ index=zone_names_,
104
+ columns=pd.RangeIndex(0, 8760),
105
+ )
106
+
107
+ val_counts_by_timestep = [df.loc[zone].value_counts() for zone in zone_names_]
108
+ cat_counts_by_zone = (
109
+ pd.concat(val_counts_by_timestep, axis=1, keys=zone_names_, names=["Group"])
110
+ .fillna(0)
111
+ .sort_index()
112
+ )
113
+ cat_counts_by_zone = cat_counts_by_zone.rename(
114
+ index={ix: val for val, ix in cat_index_map.items()}
115
+ )
116
+ cat_counts_by_zone.index.name = "Heat Index Category"
117
+
118
+ modes_by_timestep: pd.DataFrame | pd.Series = cast(
119
+ pd.DataFrame | pd.Series, df.mode(axis=0)
120
+ )
121
+
122
+ worst_by_timestep = df.max(axis=0)
123
+
124
+ # since some timesteps may have multiple modes, we choose the mode with the highest count.
125
+ if isinstance(modes_by_timestep, pd.Series):
126
+ modes = modes_by_timestep
127
+ else:
128
+ modes = modes_by_timestep.max(axis=0)
129
+
130
+ val_count_modes = modes.value_counts()
131
+ val_count_modes = val_count_modes.rename(
132
+ index={ix: val for val, ix in cat_index_map.items()}
133
+ )
134
+
135
+ val_count_worst = worst_by_timestep.value_counts()
136
+ val_count_worst = val_count_worst.rename(
137
+ index={ix: val for val, ix in cat_index_map.items()}
138
+ )
139
+
140
+ val_counts_building = pd.Series(
141
+ bins_by_hour_building, name="Building"
142
+ ).value_counts()
143
+ val_counts_building = val_counts_building.rename(
144
+ index={ix: val for val, ix in cat_index_map.items()}
145
+ )
146
+
147
+ for cat in cat_index_map:
148
+ if cat not in val_count_modes.index:
149
+ val_count_modes[cat] = 0
150
+ if cat not in val_count_worst.index:
151
+ val_count_worst[cat] = 0
152
+ if cat not in val_counts_building.index:
153
+ val_counts_building[cat] = 0
154
+
155
+ building_counts = pd.concat(
156
+ [val_count_modes, val_count_worst, val_counts_building],
157
+ axis=1,
158
+ keys=["Modal per Timestep", "Worst per Timestep", "Zone Weighted"],
159
+ ).loc[list(cat_index_map.keys())]
160
+ building_counts.index.name = "Heat Index Category"
161
+ building_counts.columns.name = "Group"
162
+
163
+ return pd.concat(
164
+ [building_counts.T, cat_counts_by_zone.T],
165
+ axis=0,
166
+ names=["Aggregation Unit"],
167
+ keys=["Building", "Zone"],
168
+ ).rename(columns={c: f"{c} [hr]" for c in cat_index_map})
169
+
170
+
171
+ def check_timeseries_shape(
172
+ ts: np.ndarray, expected_zones: int | None = None, expected_timesteps: int = 8760
173
+ ) -> None:
174
+ """Checks if the timeseries is a 2D array with shape (zones, timesteps).
175
+
176
+ Args:
177
+ ts (np.ndarray): The timeseries to check.
178
+ expected_zones (int | None): The expected number of zones. If None, the number of zones will not be checked.
179
+ expected_timesteps (int): The expected number of timesteps.
180
+
181
+ Raises:
182
+ ValueError: If the timeseries is not a 2D array with shape (zones, timesteps).
183
+ ValueError: If the timeseries has a different number of zones than the expected number of zones.
184
+ ValueError: If the timeseries has a different number of timesteps than the expected number of timesteps.
185
+
186
+ Returns:
187
+ None
188
+ """
189
+ if ts.ndim != 2:
190
+ msg = f"Timeseries must be a 2D array with shape (zones, timesteps). Got shape {ts.shape}."
191
+ raise ValueError(msg)
192
+ if ts.shape[0] != expected_zones and expected_zones is not None:
193
+ msg = f"Timeseries must have {expected_zones} zones. Got {ts.shape[0]} zones."
194
+ raise ValueError(msg)
195
+ if ts.shape[1] != expected_timesteps:
196
+ msg = f"Timeseries must have {expected_timesteps} timesteps. Got {ts.shape[1]} timesteps."
197
+ raise ValueError(msg)
198
+
199
+
200
+ def calculate_edh(
201
+ dbt_mat: NDArray[np.float64],
202
+ rh_mat: NDArray[np.float64],
203
+ mrt_mat: NDArray[np.float64],
204
+ met: float = 1.1,
205
+ clo: float = 0.5,
206
+ v: float = 0.1,
207
+ comfort_bounds: tuple[float, float] = (22, 27),
208
+ zone_names: list[str] | None = None,
209
+ zone_weights: NDArray[np.float64] | None = None,
210
+ ) -> pd.DataFrame:
211
+ """Calculates Exceedance Degree Hours (EDH) using fixed comfort bounds and separates hot and cold contributions.
212
+
213
+ When aggregating at the building scale, we compute a few variants:
214
+ - Zone Weighted: The weighted average of the EDHs for all zones.
215
+ - Worst Zone: The EDH for the zone with the worst EDH.
216
+
217
+ Parameters:
218
+ dry_bulb_temperature_mat (np.ndarray): The dry bulb temperature matrix (zones x timesteps).
219
+ relative_humidity_mat (np.ndarray): The relative humidity matrix (zones x timesteps).
220
+ mean_radiant_temperature_mat (np.ndarray): The mean radiant temperature matrix (zones x timesteps).
221
+ met (float): The metabolic rate in metabolic equivalents (MET).
222
+ clo (float): The clothing insulation in clo.
223
+ v (float): The air speed in meters per second.
224
+ comfort_bounds (tuple[float, float]): The comfort bounds in degrees Celsius considered comfortable.
225
+ zone_names (list[str] | None): The names of the zones. If None, the zones will be named "Zone 001", "Zone 002", etc.
226
+ zone_weights (NDArray[np.float64] | None): The weights of the zones. If None, the zones will be weighted equally when summing the EDHs.
227
+
228
+ Returns:
229
+ building_edhs (pd.DataFrame): A dataframe with the building weighted EDHs.
230
+ zone_edhs (pd.DataFrame): A dataframe with the EDHs for each zone.
231
+ """
232
+ from pythermalcomfort.models import set_tmp
233
+
234
+ _zone_weights = (
235
+ zone_weights if zone_weights is not None else np.ones(dbt_mat.shape[0])
236
+ )
237
+ _zone_names = (
238
+ [f"Zone {i:03d}" for i in range(dbt_mat.shape[0])]
239
+ if zone_names is None
240
+ else zone_names
241
+ )
242
+
243
+ zone_names_len = len(_zone_names)
244
+ if zone_names_len != len(_zone_weights):
245
+ msg = f"Zone names and zone weights must have the same length. Got {zone_names_len} zone names and {len(_zone_weights)} zone weights."
246
+ raise ValueError(msg)
247
+
248
+ check_timeseries_shape(
249
+ dbt_mat, expected_zones=zone_names_len, expected_timesteps=8760
250
+ )
251
+ check_timeseries_shape(
252
+ mrt_mat,
253
+ expected_zones=zone_names_len,
254
+ expected_timesteps=8760,
255
+ )
256
+ check_timeseries_shape(
257
+ rh_mat, expected_zones=zone_names_len, expected_timesteps=8760
258
+ )
259
+
260
+ SETs = np.stack(
261
+ [
262
+ set_tmp(
263
+ tdb=dbt_row,
264
+ tr=mrt_row,
265
+ rh=relative_humidity_row,
266
+ met=met,
267
+ clo=clo,
268
+ v=v,
269
+ limit_inputs=False, # TODO: remove this, or set it and handle NaN cases appropriately.
270
+ )["set"]
271
+ for dbt_row, mrt_row, relative_humidity_row in zip(
272
+ dbt_mat,
273
+ mrt_mat,
274
+ rh_mat,
275
+ strict=False,
276
+ )
277
+ ],
278
+ axis=0,
279
+ )
280
+ low, high = np.sort(comfort_bounds)
281
+ hot_edh = np.maximum(0, SETs - high)
282
+ cold_edh = np.maximum(0, low - SETs)
283
+ total_edh = hot_edh + cold_edh
284
+
285
+ edhs_by_zone: NDArray[np.float64] = np.stack(
286
+ [total_edh, hot_edh, cold_edh], axis=2
287
+ ).sum(axis=1)
288
+ edhs = pd.DataFrame(
289
+ edhs_by_zone,
290
+ index=_zone_names,
291
+ columns=["Total", "Heat Exceedance", "Cold Exceedance"],
292
+ dtype=np.float64,
293
+ )
294
+ edhs.index.name = "Zone"
295
+ edhs.columns.name = "EDH Type"
296
+
297
+ normalized_zone_weights: NDArray[np.float64] = _zone_weights / _zone_weights.sum()
298
+ # weighted_edhs_by_zone: NDArray[np.float64] = (
299
+ # edhs_by_zone * normalized_zone_weights.reshape(-1, 1)
300
+ # )
301
+ weighted_edhs_by_zone = edhs.mul(normalized_zone_weights, axis=0)
302
+
303
+ aggregated_edhs = weighted_edhs_by_zone.sum(axis=0)
304
+ worst_edh = edhs.max(axis=0)
305
+
306
+ building_edhs = pd.concat(
307
+ [aggregated_edhs, worst_edh], axis=1, keys=["Zone Weighted", "Worst Zone"]
308
+ ).T
309
+ final = pd.concat(
310
+ [building_edhs, edhs],
311
+ axis=0,
312
+ names=["Aggregation Unit", "Group"],
313
+ keys=["Building", "Zone"],
314
+ )
315
+ return final.rename(columns={c: f"{c} [degC-hr]" for c in final.columns})
316
+
317
+
318
+ def calculate_basic_overheating_stats(
319
+ dbt_mat: NDArray[np.float64],
320
+ zone_names: list[str] | None = None,
321
+ zone_weights: NDArray[np.float64] | None = None,
322
+ overheating_thresholds: tuple[float, ...] = (26, 30, 35),
323
+ undercooling_thresholds: tuple[float, ...] = (10, 5),
324
+ ) -> pd.DataFrame:
325
+ """Calculates basic overheating hours by zone and for the whole building.
326
+
327
+ When aggregating at the building scale, we compute a few variants:
328
+ - Any Zone: The number of timesteps that the threshold is violated for any zone in the whole building.
329
+ - Zone Weighted: The number of timesteps that the threshold is violated for the weighted average of all zones.
330
+ - Worst Zone: The number of timesteps that the threshold is violated for the zone with the worst number of violations.
331
+
332
+ Args:
333
+ dbt_mat (NDArray[np.float64]): The dry bulb temperature matrix (zones x timesteps).
334
+ zone_names (list[str] | None): The names of the zones. If None, the zones will be named "Zone 001", "Zone 002", etc.
335
+ zone_weights (NDArray[np.float64] | None): The weights of the zones. If None, the zones will be weighted equally when summing the overheating hours.
336
+ overheating_thresholds (tuple[float, ...]): The thresholds for overheating.
337
+ undercooling_thresholds (tuple[float, ...]): The thresholds for undercooling.
338
+
339
+ Returns:
340
+ hours (pd.DataFrame): A dataframe with the overheating and undercooling hours by threshold for the whole building and by zone and threshold for each zone.
341
+
342
+ """
343
+ zone_names_ = (
344
+ [f"Zone {i:03d}" for i in range(dbt_mat.shape[0])]
345
+ if zone_names is None
346
+ else zone_names
347
+ )
348
+ zone_weights_ = (
349
+ zone_weights if zone_weights is not None else np.ones(dbt_mat.shape[0])
350
+ )
351
+ if len(zone_names_) != len(zone_weights_):
352
+ msg = f"Zone names and zone weights must have the same length. Got {len(zone_names_)} zone names and {len(zone_weights_)} zone weights."
353
+ raise ValueError(msg)
354
+ normalized_zone_weights: NDArray[np.float64] = zone_weights_ / zone_weights_.sum()
355
+ n_zones = len(zone_weights_)
356
+ check_timeseries_shape(dbt_mat, expected_zones=n_zones, expected_timesteps=8760)
357
+
358
+ overheat_thresholds = np.array(overheating_thresholds)
359
+ undercool_thresholds = np.array(undercooling_thresholds)
360
+
361
+ # threshold comparisons have shape (n_thresholds, n_zones, n_timesteps)
362
+ over_thresh_by_zone = dbt_mat > overheat_thresholds.reshape(-1, 1, 1)
363
+ under_thresh_by_zone = dbt_mat < undercool_thresholds.reshape(-1, 1, 1)
364
+ # max returns true if any of the zones are above the threshold
365
+ # thresh_any has shape (n_thresholds, n_timesteps)
366
+ over_thresh_any: NDArray[np.bool_] = over_thresh_by_zone.max(
367
+ axis=1
368
+ ) # max returns true if threshold is exceeded for any zone
369
+ under_thresh_any: NDArray[np.bool_] = under_thresh_by_zone.max(axis=1)
370
+
371
+ # sum returns the number of timesteps that the threshold is exceeded for each zone
372
+ # hours_by_zone has shape (n_thresholds, n_zones)
373
+ over_hours_by_zone = over_thresh_by_zone.sum(axis=-1)
374
+ under_hours_by_zone = under_thresh_by_zone.sum(axis=-1)
375
+
376
+ over_hours_by_zone = pd.DataFrame(
377
+ over_hours_by_zone,
378
+ columns=pd.Index(zone_names_, name="Zone"),
379
+ index=pd.Index(overheat_thresholds, name="Threshold [degC]"),
380
+ dtype=np.float64,
381
+ )
382
+ under_hours_by_zone = pd.DataFrame(
383
+ under_hours_by_zone,
384
+ columns=pd.Index(zone_names_, name="Zone"),
385
+ index=pd.Index(undercool_thresholds, name="Threshold [degC]"),
386
+ dtype=np.float64,
387
+ )
388
+
389
+ worst_overhours_by_threshold = over_hours_by_zone.max(axis=1)
390
+ worst_underhours_by_threshold = under_hours_by_zone.max(axis=1)
391
+
392
+ weighted_overhours_by_threshold = over_hours_by_zone.mul(
393
+ normalized_zone_weights
394
+ ).sum(axis=1)
395
+ weighted_underhours_by_threshold = under_hours_by_zone.mul(
396
+ normalized_zone_weights
397
+ ).sum(axis=1)
398
+
399
+ over_hours_by_zone["Any Zone"] = over_thresh_any.sum(axis=1)
400
+ under_hours_by_zone["Any Zone"] = under_thresh_any.sum(axis=1)
401
+ over_hours_by_zone["Zone Weighted"] = weighted_overhours_by_threshold
402
+ under_hours_by_zone["Zone Weighted"] = weighted_underhours_by_threshold
403
+ over_hours_by_zone["Worst Zone"] = worst_overhours_by_threshold
404
+ under_hours_by_zone["Worst Zone"] = worst_underhours_by_threshold
405
+ over_hours_by_zone["Equally Weighted"] = over_hours_by_zone.mean(axis=1)
406
+ under_hours_by_zone["Equally Weighted"] = under_hours_by_zone.mean(axis=1)
407
+
408
+ over_whole_bldg = over_hours_by_zone[
409
+ ["Any Zone", "Zone Weighted", "Worst Zone", "Equally Weighted"]
410
+ ]
411
+ under_whole_bldg = under_hours_by_zone[
412
+ ["Any Zone", "Zone Weighted", "Worst Zone", "Equally Weighted"]
413
+ ]
414
+ over_hours_by_zone = over_hours_by_zone.drop(
415
+ columns=["Any Zone", "Zone Weighted", "Worst Zone", "Equally Weighted"]
416
+ )
417
+ under_hours_by_zone = under_hours_by_zone.drop(
418
+ columns=["Any Zone", "Zone Weighted", "Worst Zone", "Equally Weighted"]
419
+ )
420
+
421
+ ouh_counts_by_zone_and_threshold = pd.concat(
422
+ [over_hours_by_zone, under_hours_by_zone],
423
+ axis=0,
424
+ keys=["Overheat", "Underheat"],
425
+ names=["Metric", "Threshold [degC]"],
426
+ )
427
+
428
+ ouh_counts_by_building_and_threshold = pd.concat(
429
+ [over_whole_bldg, under_whole_bldg],
430
+ axis=0,
431
+ keys=["Overheat", "Underheat"],
432
+ names=["Metric", "Threshold [degC]"],
433
+ )
434
+
435
+ combined = pd.concat(
436
+ [ouh_counts_by_building_and_threshold, ouh_counts_by_zone_and_threshold],
437
+ axis=1,
438
+ names=["Aggregation Unit", "Group"],
439
+ keys=["Building", "Zone"],
440
+ ).T
441
+ combined.columns.names = ["Polarity", "Threshold [degC]"]
442
+ combined = (
443
+ cast(pd.Series, cast(pd.DataFrame, combined.stack()).stack())
444
+ .rename("Total Hours [hr]")
445
+ .dropna()
446
+ .to_frame()
447
+ .reorder_levels(
448
+ ["Polarity", "Threshold [degC]", "Aggregation Unit", "Group"], axis=0
449
+ )
450
+ )
451
+
452
+ return combined
453
+
454
+
455
+ def _consecutive_run_lengths_vectorized(
456
+ M_diff: NDArray[np.float64],
457
+ ) -> tuple[NDArray[np.float64], NDArray[np.float64]]:
458
+ """Compute lengths and integrals of consecutive runs where M_diff > 0, along the last axis.
459
+
460
+ M_diff has shape (..., n_timesteps). For each (..., :) slice, identifies runs where
461
+ M_diff > 0 and returns:
462
+ - lengths: (n_slices, max_runs) count of timesteps per run, NaN-padded.
463
+ - integrals: (n_slices, max_runs) sum of M_diff over each run (degree-hours), NaN-padded.
464
+
465
+ Uses run_id + bincount (and bincount with weights for integrals) so no Python loops
466
+ over rows or timesteps.
467
+ """
468
+ M = M_diff > 0
469
+ orig_shape = M.shape
470
+ n_timesteps = orig_shape[-1]
471
+ n_slices = int(np.prod(orig_shape[:-1]))
472
+ M_2d = M.reshape(n_slices, n_timesteps)
473
+ M_diff_2d = M_diff.reshape(n_slices, n_timesteps)
474
+
475
+ # Run starts: True where a new run of True begins
476
+ run_start = np.empty_like(M_2d)
477
+ run_start[:, 0] = M_2d[:, 0]
478
+ run_start[:, 1:] = M_2d[:, 1:] & ~M_2d[:, :-1]
479
+
480
+ # Run id: 0 where False, 1,2,3,... for each run of True
481
+ run_id = np.where(M_2d, np.cumsum(run_start, axis=1), 0)
482
+
483
+ max_run_id = int(run_id.max())
484
+ if max_run_id == 0:
485
+ nan_out = np.full((n_slices, 1), np.nan, dtype=np.float64)
486
+ return nan_out, nan_out.copy()
487
+
488
+ # Linear index for (row, run_id); run_id is 1..max_run_id
489
+ flat_row = np.repeat(np.arange(n_slices, dtype=np.intp), n_timesteps)
490
+ flat_run = run_id.ravel()
491
+ mask = flat_run > 0
492
+ flat_row = flat_row[mask]
493
+ flat_run = flat_run[mask]
494
+ flat_diff = M_diff_2d.ravel()[mask]
495
+
496
+ idx = flat_row * max_run_id + (flat_run - 1)
497
+ counts_flat = np.bincount(idx, minlength=n_slices * max_run_id)
498
+ counts = counts_flat.reshape(n_slices, max_run_id).astype(np.float64)
499
+ counts[counts == 0] = np.nan
500
+
501
+ integrals_flat = np.bincount(
502
+ idx, weights=flat_diff, minlength=n_slices * max_run_id
503
+ )
504
+ integrals = integrals_flat.reshape(n_slices, max_run_id).astype(np.float64)
505
+ integrals[np.isnan(counts)] = np.nan
506
+ return counts, integrals
507
+
508
+
509
+ def calculate_consecutive_hours_above_threshold(
510
+ dbt_mat: NDArray[np.float64],
511
+ overheating_thresholds: list[float] | tuple[float, ...] = (26, 30, 35),
512
+ underheating_thresholds: list[float] | tuple[float, ...] = (10, 5),
513
+ zone_names: list[str] | None = None,
514
+ ) -> pd.DataFrame:
515
+ """Calculates consecutive hours above (overheating) or below (underheating) thresholds per zone.
516
+
517
+ For each overheating threshold and each zone, computes the lengths of every run of
518
+ consecutive hours above that threshold. For each underheating threshold and each zone,
519
+ computes the lengths of every run of consecutive hours below that threshold. Uses
520
+ vectorized operations across thresholds and zones; only the time dimension is
521
+ processed with run-length logic.
522
+
523
+ Args:
524
+ dbt_mat (NDArray[np.float64]): The dry bulb temperature matrix (zones x timesteps).
525
+ overheating_thresholds (list[float] | tuple[float, ...]): Thresholds for consecutive hours *above*. If None, uses (26, 30, 35).
526
+ underheating_thresholds (list[float] | tuple[float, ...]): Thresholds for consecutive hours *below*. If None, uses (10, 5).
527
+ zone_names (list[str] | None): The names of the zones. If None, zones are named "Zone 001", "Zone 002", etc.
528
+
529
+ Returns:
530
+ pd.DataFrame: MultiIndex (Metric, Threshold [degC], Zone). Columns "Streak 001", ... (run lengths, NaN-padded) and "Integral 001", ... (sum of excess/deficit per run, degree-hours, NaN-padded).
531
+ """
532
+ n_zones, _ = dbt_mat.shape
533
+ zone_names_ = (
534
+ [f"Zone {i + 1:03d}" for i in range(n_zones)]
535
+ if zone_names is None
536
+ else zone_names
537
+ )
538
+
539
+ over_arr = np.asarray(overheating_thresholds, dtype=np.float64)
540
+ under_arr = np.asarray(underheating_thresholds, dtype=np.float64)
541
+
542
+ check_timeseries_shape(dbt_mat, expected_zones=n_zones, expected_timesteps=8760)
543
+
544
+ # Excess/deficit for integrals: (n_thresholds, n_zones, n_timesteps)
545
+ over_diff = dbt_mat - over_arr.reshape(-1, 1, 1)
546
+ under_diff = under_arr.reshape(-1, 1, 1) - dbt_mat
547
+
548
+ # Compute run lengths and integrals for all (threshold, zone) slices in one go per metric
549
+ if over_arr.size > 0:
550
+ over_lengths, over_integrals = _consecutive_run_lengths_vectorized(over_diff)
551
+ else:
552
+ over_lengths = over_integrals = np.empty((0, 0), dtype=np.float64)
553
+ if under_arr.size > 0:
554
+ under_lengths, under_integrals = _consecutive_run_lengths_vectorized(under_diff)
555
+ else:
556
+ under_lengths = under_integrals = np.empty((0, 0), dtype=np.float64)
557
+
558
+ def build_df(
559
+ lengths: NDArray[np.float64],
560
+ integrals: NDArray[np.float64],
561
+ thresholds: NDArray[np.float64],
562
+ metric: str,
563
+ ) -> pd.DataFrame:
564
+ if lengths.size == 0:
565
+ return pd.DataFrame()
566
+ n_runs = lengths.shape[1]
567
+ index = pd.MultiIndex.from_product(
568
+ [
569
+ [metric],
570
+ list(thresholds),
571
+ zone_names_,
572
+ ],
573
+ names=["Polarity", "Threshold [degC]", "Zone"],
574
+ )
575
+ streak_cols = [f"{i:05d}" for i in range(n_runs)]
576
+ integral_cols = [f"{i:05d}" for i in range(n_runs)]
577
+ length_df = pd.DataFrame(
578
+ lengths,
579
+ index=index,
580
+ columns=streak_cols,
581
+ dtype=np.float64,
582
+ )
583
+ integral_df = pd.DataFrame(
584
+ integrals,
585
+ index=index,
586
+ columns=integral_cols,
587
+ dtype=np.float64,
588
+ )
589
+ return pd.concat(
590
+ [length_df, integral_df],
591
+ axis=1,
592
+ keys=["Streak [hr]", "Integral [deg-hr]"],
593
+ names=["Metric", "Streak Index"],
594
+ )
595
+
596
+ over_df = build_df(over_lengths, over_integrals, over_arr, "Overheat")
597
+ under_df = build_df(under_lengths, under_integrals, under_arr, "Underheat")
598
+
599
+ if over_df.size == 0 and under_df.size == 0:
600
+ return pd.DataFrame(
601
+ index=pd.MultiIndex.from_tuples(
602
+ [], names=["Metric", "Threshold [degC]", "Zone"]
603
+ ),
604
+ )
605
+
606
+ if over_df.size == 0:
607
+ return under_df
608
+ if under_df.size == 0:
609
+ return over_df
610
+ return cast(
611
+ pd.DataFrame,
612
+ (
613
+ pd.concat([over_df, under_df], axis=0)
614
+ .stack(level="Streak Index", future_stack=True)
615
+ .dropna()
616
+ ),
617
+ )
618
+
619
+
620
+ def _consecutive_run_lengths_looped(
621
+ M_diff: NDArray[np.float64],
622
+ ) -> tuple[NDArray[np.float64], NDArray[np.float64]]:
623
+ """Compute lengths of consecutive True runs along the last axis, for all leading dimensions.
624
+
625
+ M has shape (..., n_timesteps). For each (..., :) slice, returns run lengths in a padded
626
+ 2D array of shape (n_slices, max_runs), with NaN padding.
627
+ """
628
+ # TODO: use this method in a test to validate the vectorized version.
629
+ M = M_diff > 0
630
+ orig_shape = M.shape
631
+ n_timesteps = orig_shape[-1]
632
+ n_slices = int(np.prod(orig_shape[:-1]))
633
+
634
+ M_2d = M.reshape(n_slices, n_timesteps)
635
+ M_diff_2d = M_diff.reshape(n_slices, n_timesteps)
636
+
637
+ slice_streaks = []
638
+ slice_streak_integrals = []
639
+ for slice_ix in range(n_slices):
640
+ slice_data = M_2d[slice_ix, :]
641
+ is_streaking = False
642
+ streak_len = 0
643
+ streak_integral = 0
644
+ streaks = []
645
+ streak_integrals = []
646
+ for i in range(n_timesteps):
647
+ flag = slice_data[i]
648
+ diff = M_diff_2d[slice_ix, i]
649
+ if flag and not is_streaking:
650
+ is_streaking = True
651
+ streak_len = 1
652
+ streak_integral = diff
653
+ elif flag and is_streaking:
654
+ streak_len += 1
655
+ streak_integral += diff
656
+ elif not flag and is_streaking:
657
+ streaks.append(streak_len)
658
+ streak_integrals.append(streak_integral)
659
+ is_streaking = False
660
+ streak_len = 0
661
+ streak_integral = 0
662
+ else:
663
+ streak_len = 0
664
+ streak_integral = 0
665
+ if is_streaking:
666
+ streaks.append(streak_len)
667
+ streak_integrals.append(streak_integral)
668
+ slice_streaks.append(streaks)
669
+ slice_streak_integrals.append(streak_integrals)
670
+ most_streaks = max(len(streaks) for streaks in slice_streaks)
671
+ most_streak_integrals = max(
672
+ len(streak_integrals) for streak_integrals in slice_streak_integrals
673
+ )
674
+ padded_streaks = np.full((n_slices, most_streaks), np.nan, dtype=np.float64)
675
+ padded_streak_integrals = np.full(
676
+ (n_slices, most_streak_integrals), np.nan, dtype=np.float64
677
+ )
678
+ for slice_ix in range(n_slices):
679
+ padded_streaks[slice_ix, : len(slice_streaks[slice_ix])] = slice_streaks[
680
+ slice_ix
681
+ ]
682
+ padded_streak_integrals[slice_ix, : len(slice_streak_integrals[slice_ix])] = (
683
+ slice_streak_integrals[slice_ix]
684
+ )
685
+ reshaped_streaks = padded_streaks.reshape((
686
+ *orig_shape[:-1],
687
+ most_streaks,
688
+ ))
689
+ reshaped_streak_integrals = padded_streak_integrals.reshape((
690
+ *orig_shape[:-1],
691
+ most_streak_integrals,
692
+ ))
693
+ return reshaped_streaks, reshaped_streak_integrals
694
+
695
+
696
+ def overheating_results_postprocess(
697
+ sql: Sql,
698
+ zone_weights: NDArray[np.float64],
699
+ zone_names: list[str],
700
+ ):
701
+ """Postprocess the sql file to get the temperature results.
702
+
703
+ Args:
704
+ sql (Sql): The sql file to postprocess.
705
+ zone_weights (NDArray[np.float64]): The weights of the zones.
706
+ zone_names (list[str]): The names of the zones.
707
+ """
708
+ # TODO: compare the single request flamegraph to splitting it out as multiple requests
709
+ hourly = sql.timeseries_by_name(
710
+ [
711
+ "Zone Mean Air Temperature",
712
+ "Zone Air Relative Humidity",
713
+ "Zone Mean Radiant Temperature",
714
+ ],
715
+ "Hourly",
716
+ )
717
+ hourly.index.names = ["Timestep"]
718
+ hourly.columns.names = ["_", "Zone", "Meter"]
719
+
720
+ hourly: pd.DataFrame = cast(
721
+ pd.DataFrame,
722
+ hourly.droplevel("_", axis=1)
723
+ .stack(level="Zone", future_stack=True)
724
+ .unstack(level="Timestep"),
725
+ )
726
+
727
+ rh = hourly.xs("Zone Air Relative Humidity", level="Meter", axis=1)
728
+ dbt = hourly.xs("Zone Mean Air Temperature", level="Meter", axis=1)
729
+ radiant = hourly.xs("Zone Mean Radiant Temperature", level="Meter", axis=1)
730
+
731
+ zone_names_: list[str] = dbt.index.tolist()
732
+ zone_names__: list[str] = radiant.index.tolist()
733
+ zone_names___: list[str] = rh.index.tolist()
734
+ if (
735
+ {z.lower() for z in zone_names} != {z.lower() for z in zone_names_}
736
+ or {z.lower() for z in zone_names} != {z.lower() for z in zone_names__}
737
+ or {z.lower() for z in zone_names} != {z.lower() for z in zone_names___}
738
+ ):
739
+ msg = f"Zone names do not match! Expected: {zone_names}, Found: {zone_names_}, {zone_names__}, {zone_names___}."
740
+ raise ValueError(msg)
741
+ if zone_names_ != zone_names__ or zone_names_ != zone_names___:
742
+ msg = f"Dataframe zone names are not in the same order as each other! Expected: {zone_names_}, but got {zone_names__}, {zone_names___}."
743
+ raise ValueError(msg)
744
+
745
+ # reorder the zone weights to match the zone names.
746
+ zone_weights_to_use = np.array([
747
+ zone_weights[[z.lower() for z in zone_names].index(zone.lower())]
748
+ for zone in zone_names_
749
+ ])
750
+ zone_names_to_use = zone_names_
751
+
752
+ dbt_mat = dbt.to_numpy()
753
+ rh_mat = rh.to_numpy()
754
+ radiant_mat = radiant.to_numpy()
755
+
756
+ hi = calculate_hi_categories(
757
+ dbt_mat=dbt_mat,
758
+ rh_mat=rh_mat,
759
+ zone_names=zone_names_to_use,
760
+ zone_weights=zone_weights_to_use,
761
+ )
762
+
763
+ edh = calculate_edh(
764
+ dbt_mat=dbt_mat,
765
+ rh_mat=rh_mat,
766
+ mrt_mat=radiant_mat,
767
+ zone_names=zone_names_to_use,
768
+ zone_weights=zone_weights_to_use,
769
+ )
770
+
771
+ consecutive_e_zone = calculate_consecutive_hours_above_threshold(
772
+ dbt_mat=dbt_mat,
773
+ zone_names=zone_names_to_use,
774
+ )
775
+
776
+ basic_oh = calculate_basic_overheating_stats(
777
+ dbt_mat=dbt_mat,
778
+ zone_names=zone_names_to_use,
779
+ zone_weights=zone_weights_to_use,
780
+ )
781
+ return OverheatingAnalysisResults(
782
+ hi=hi, edh=edh, basic_oh=basic_oh, consecutive_e_zone=consecutive_e_zone
783
+ )
784
+
785
+
786
+ @dataclass
787
+ class OverheatingAnalysisResults:
788
+ """The results of a overheating analysis."""
789
+
790
+ hi: pd.DataFrame
791
+ edh: pd.DataFrame
792
+ basic_oh: pd.DataFrame
793
+ consecutive_e_zone: pd.DataFrame
794
+
795
+
796
+ if __name__ == "__main__":
797
+ # Timesteps should be along the columns
798
+ # Zones should be along the rows
799
+ _n_timesteps = 8760
800
+ _n_zones = 10
801
+ _temperature_matrix = np.random.rand(_n_zones, _n_timesteps) * 30 + 10
802
+ _relative_humidity_matrix = np.random.rand(_n_zones, _n_timesteps) * 50 + 50
803
+ # _mean_radiant_temperature_matrix = np.random.rand(_n_zones, _n_timesteps) * 40 - 10
804
+ _mean_radiant_temperature_matrix = (
805
+ _temperature_matrix + np.random.randn(_n_zones, _n_timesteps) * 1
806
+ )
807
+ r = calculate_hi_categories(_temperature_matrix, _relative_humidity_matrix)
808
+
809
+ edh = calculate_edh(
810
+ _temperature_matrix,
811
+ _relative_humidity_matrix,
812
+ _mean_radiant_temperature_matrix,
813
+ )
814
+
815
+ basic_oh_stats = calculate_basic_overheating_stats(_temperature_matrix)
816
+
817
+ consecutive_hours = calculate_consecutive_hours_above_threshold(
818
+ np.array(
819
+ [
820
+ [
821
+ np.sin(i / _n_timesteps * 2 * np.pi) * 30 + 5
822
+ for i in range(_n_timesteps)
823
+ ],
824
+ [
825
+ np.cos(i / _n_timesteps * 2 * np.pi) * 30 + 5
826
+ for i in range(_n_timesteps)
827
+ ],
828
+ [
829
+ np.cos(2 * i / _n_timesteps * 2 * np.pi) * 30 + 5
830
+ for i in range(_n_timesteps)
831
+ ],
832
+ [
833
+ np.sin(2 * i / _n_timesteps * 2 * np.pi) * 30 + 5
834
+ for i in range(_n_timesteps)
835
+ ],
836
+ ],
837
+ ),
838
+ )
839
+
840
+ print("---- Heat Index ----")
841
+ print("\n")
842
+ print(r)
843
+ print("--- EDH ----")
844
+ print("\n")
845
+ print(edh)
846
+ print("--- Basic Overheating Stats ----")
847
+ print("\n")
848
+ print(basic_oh_stats)
849
+ print("--- Consecutive Hours ----")
850
+ print("\n")
851
+ print(consecutive_hours)
epinterface/builder.py CHANGED
@@ -45,7 +45,7 @@ class SimpleResidentialModel(BaseWeather, extra="allow"):
45
45
  Infiltration: float = 0.1
46
46
  timestep: int = 6
47
47
 
48
- async def build(
48
+ def build(
49
49
  self, output_dir: Path | str, weather_cache_dir: Path | str | None = None
50
50
  ) -> IDF:
51
51
  """Build the energy model.
@@ -63,7 +63,7 @@ class SimpleResidentialModel(BaseWeather, extra="allow"):
63
63
 
64
64
  material_lib = DefaultMaterialLibrary()
65
65
  weather_cache_dir = Path(weather_cache_dir) if weather_cache_dir else output_dir
66
- epw_path, ddy_path = await self.fetch_weather(weather_cache_dir)
66
+ epw_path, ddy_path = self.fetch_weather(weather_cache_dir)
67
67
  schedules = pd.read_parquet(
68
68
  Path(__file__).parent / "data" / "res_schedules.parquet"
69
69
  )
@@ -1,6 +1,5 @@
1
1
  """A module for building the energy model using the Climate Studio API."""
2
2
 
3
- import asyncio
4
3
  import gc
5
4
  import shutil
6
5
  import tempfile
@@ -162,7 +161,7 @@ class Model(BaseWeather, validate_assignment=True):
162
161
  base_filepath = EnergyPlusArtifactDir / "Minimal.idf"
163
162
  target_base_filepath = config.output_dir / "Minimal.idf"
164
163
  shutil.copy(base_filepath, target_base_filepath)
165
- epw_path, ddy_path = asyncio.run(self.fetch_weather(config.weather_dir))
164
+ epw_path, ddy_path = self.fetch_weather(config.weather_dir)
166
165
  idf = IDF(
167
166
  target_base_filepath.as_posix(),
168
167
  as_version=None, # pyright: ignore [reportArgumentType]
@@ -594,7 +593,6 @@ class Model(BaseWeather, validate_assignment=True):
594
593
 
595
594
  # TODO: move to interface?
596
595
  if __name__ == "__main__":
597
- import asyncio
598
596
  import json
599
597
 
600
598
  # import tempfile
@@ -1,6 +1,5 @@
1
1
  """A module for building the energy model using the SBEM template library approach."""
2
2
 
3
- import asyncio
4
3
  import gc
5
4
  import logging
6
5
  import shutil
@@ -16,8 +15,13 @@ import pandas as pd
16
15
  from archetypal.idfclass import IDF
17
16
  from archetypal.idfclass.sql import Sql
18
17
  from ladybug.epw import EPW
18
+ from numpy.typing import NDArray
19
19
  from pydantic import BaseModel, Field, field_validator, model_validator
20
20
 
21
+ from epinterface.analysis.overheating import (
22
+ OverheatingAnalysisResults,
23
+ overheating_results_postprocess,
24
+ )
21
25
  from epinterface.constants import assumed_constants, physical_constants
22
26
  from epinterface.data import EnergyPlusArtifactDir
23
27
  from epinterface.ddy_injector_bayes import DDYSizingSpec
@@ -54,6 +58,7 @@ AvailableHourlyVariables = Literal[
54
58
  "Zone Mean Air Temperature",
55
59
  "Zone Air Relative Humidity",
56
60
  "Site Outdoor Air Drybulb Temperature",
61
+ "Zone Mean Radiant Temperature",
57
62
  ]
58
63
 
59
64
  AVAILABLE_HOURLY_VARIABLES = get_args(AvailableHourlyVariables)
@@ -827,7 +832,7 @@ class Model(BaseWeather, validate_assignment=True):
827
832
  base_filepath = EnergyPlusArtifactDir / "Minimal.idf"
828
833
  target_base_filepath = config.output_dir / "Minimal.idf"
829
834
  shutil.copy(base_filepath, target_base_filepath)
830
- epw_path, ddy_path = asyncio.run(self.fetch_weather(config.weather_dir))
835
+ epw_path, ddy_path = self.fetch_weather(config.weather_dir)
831
836
  output_meters = (
832
837
  [
833
838
  {
@@ -862,6 +867,16 @@ class Model(BaseWeather, validate_assignment=True):
862
867
  epw=epw_path.as_posix(),
863
868
  output_directory=config.output_dir.as_posix(),
864
869
  )
870
+
871
+ # Remove undesired outputs from the IDF file.
872
+ # TODO: test the perfrmance benefits, if any
873
+ for output in idf.idfobjects["OUTPUT:METER"]:
874
+ if output.Key_Name not in DESIRED_METERS:
875
+ idf.removeidfobject(output)
876
+ for output in idf.idfobjects["OUTPUT:VARIABLE"]:
877
+ if output.Variable_Name not in AVAILABLE_HOURLY_VARIABLES:
878
+ idf.removeidfobject(output)
879
+
865
880
  ddy = IDF(
866
881
  ddy_path.as_posix(),
867
882
  as_version="22.2",
@@ -1296,20 +1311,18 @@ class Model(BaseWeather, validate_assignment=True):
1296
1311
  weather_dir: Path | None = None,
1297
1312
  post_geometry_callback: Callable[[IDF], IDF] | None = None,
1298
1313
  eplus_parent_dir: Path | None = None,
1299
- ) -> tuple[IDF, pd.Series, str, Sql, Path | None]:
1314
+ calculate_overheating: bool = False,
1315
+ ) -> "ModelRunResults":
1300
1316
  """Build and simualte the idf model.
1301
1317
 
1302
1318
  Args:
1303
1319
  weather_dir (Path): The directory to store the weather files.
1304
1320
  post_geometry_callback (Callable[[IDF],IDF] | None): A callback to run after the geometry is added.
1305
1321
  eplus_parent_dir (Path | None): The parent directory to store the eplus working directory. If None, a temporary directory will be used.
1322
+ calculate_overheating (bool): Whether to calculate the overheating results.
1306
1323
 
1307
1324
  Returns:
1308
- idf (IDF): The built energy model.
1309
- results (pd.Series): The postprocessed results including energy, peak, and temperature data.
1310
- err_text (str): The warning text.
1311
- sql (Sql): The SQL results file with simulation data.
1312
- eplus_dir (Path | None): The path to the eplus artifact directory (None if a temporary directory was used).
1325
+ ModelRunResults: The results of the model run.
1313
1326
  """
1314
1327
  with tempfile.TemporaryDirectory() as output_dir_name:
1315
1328
  output_dir = (
@@ -1332,12 +1345,62 @@ class Model(BaseWeather, validate_assignment=True):
1332
1345
  post_geometry_callback=post_geometry_callback,
1333
1346
  )
1334
1347
  results = self.standard_results_postprocess(sql)
1348
+ zone_weights, zone_names = self.get_zone_weights_and_names(idf)
1349
+
1350
+ overheating_results = (
1351
+ overheating_results_postprocess(
1352
+ sql, zone_weights=zone_weights, zone_names=zone_names
1353
+ )
1354
+ if calculate_overheating
1355
+ else None
1356
+ )
1357
+
1335
1358
  err_text = self.get_warnings(idf)
1336
1359
 
1337
1360
  gc.collect()
1338
1361
  # if eplus_parent_dir is not None, we return the path to the output directory
1339
1362
  output_dir_result = output_dir if eplus_parent_dir is not None else None
1340
- return idf, results, err_text, sql, output_dir_result
1363
+
1364
+ return ModelRunResults(
1365
+ idf=idf,
1366
+ sql=sql,
1367
+ energy_and_peak=results,
1368
+ err_text=err_text,
1369
+ output_dir=output_dir_result,
1370
+ overheating_results=overheating_results,
1371
+ )
1372
+
1373
+ @staticmethod
1374
+ def get_zone_weights_and_names(idf: IDF) -> tuple[NDArray[np.float64], list[str]]:
1375
+ """Get the zone weights and names from the idf model.
1376
+
1377
+ Args:
1378
+ idf (IDF): The idf model to get the zone weights and names from.
1379
+
1380
+ Returns:
1381
+ zone_weights (NDArray[np.float64]): The weights of the zones.
1382
+ zone_names (list[str]): The names of the zones.
1383
+ """
1384
+ zone_weights_: list[float] = []
1385
+ zone_names: list[str] = []
1386
+ for zone in idf.idfobjects["ZONE"]:
1387
+ floor_area = get_zone_floor_area(idf, zone.Name)
1388
+ zone_weights_.append(floor_area)
1389
+ zone_names.append(zone.Name)
1390
+ zone_weights: NDArray[np.float64] = np.array(zone_weights_)
1391
+ return zone_weights, zone_names
1392
+
1393
+
1394
+ @dataclass
1395
+ class ModelRunResults:
1396
+ """The results of a model run."""
1397
+
1398
+ idf: IDF
1399
+ sql: Sql
1400
+ energy_and_peak: pd.Series
1401
+ err_text: str
1402
+ output_dir: Path | None
1403
+ overheating_results: OverheatingAnalysisResults | None = None
1341
1404
 
1342
1405
 
1343
1406
  if __name__ == "__main__":
@@ -1422,9 +1485,16 @@ if __name__ == "__main__":
1422
1485
 
1423
1486
  # post_geometry_callback = lambda x: x.saveas("notebooks/badgeo.idf")
1424
1487
 
1425
- _idf, results, _err_text, _sql, _ = model.run(
1488
+ r = model.run(
1426
1489
  # post_geometry_callback=post_geometry_callback,
1427
1490
  )
1491
+ _idf, results, _err_text, _sql, _ = (
1492
+ r.idf,
1493
+ r.energy_and_peak,
1494
+ r.err_text,
1495
+ r.sql,
1496
+ r.output_dir,
1497
+ )
1428
1498
 
1429
1499
  # temp_config = TemperatureOutputConfig(mode="hours_above_threshold", threshold=26.0)
1430
1500
  # _idf, results, _err_text, _sql = model.run(temp_config=temp_config)
@@ -2122,13 +2122,15 @@ class FlatModel(BaseModel):
2122
2122
  post_geometry_callback,
2123
2123
  )
2124
2124
 
2125
- def simulate(self):
2125
+ def simulate(self, calculate_overheating: bool = False):
2126
2126
  """Simulate the model and return the IDF, result, and error."""
2127
2127
  model, cb = self.to_model()
2128
2128
 
2129
- idf, result, err, _sql, _ = model.run(post_geometry_callback=cb)
2129
+ r = model.run(
2130
+ post_geometry_callback=cb, calculate_overheating=calculate_overheating
2131
+ )
2130
2132
 
2131
- return idf, result, err
2133
+ return r
2132
2134
 
2133
2135
 
2134
2136
  if __name__ == "__main__":
@@ -2207,4 +2209,4 @@ if __name__ == "__main__":
2207
2209
  ),
2208
2210
  )
2209
2211
 
2210
- idf, result, err = flat_model.simulate()
2212
+ r = flat_model.simulate(calculate_overheating=True)
epinterface/weather.py CHANGED
@@ -59,7 +59,7 @@ class BaseWeather(BaseModel):
59
59
  )
60
60
  )
61
61
 
62
- async def fetch_weather(self, cache_dir: Path | str): # noqa: C901
62
+ def fetch_weather(self, cache_dir: Path | str): # noqa: C901
63
63
  """Fetch the weather file from the URL and extract the .epw and .ddy files.
64
64
 
65
65
  Args:
@@ -104,8 +104,8 @@ class BaseWeather(BaseModel):
104
104
  "https",
105
105
  "http",
106
106
  ]:
107
- client = httpx.AsyncClient()
108
- response = await client.get(str(self.Weather))
107
+ client = httpx.Client()
108
+ response = client.get(str(self.Weather))
109
109
  with tempfile.TemporaryFile() as f:
110
110
  f.write(response.content)
111
111
  f.seek(0)
@@ -118,7 +118,7 @@ class BaseWeather(BaseModel):
118
118
  raise FileNotFoundError(msg)
119
119
  z.extract(epw_path.name, weather_dir)
120
120
  z.extract(ddy_path.name, weather_dir)
121
- await client.aclose()
121
+ client.close()
122
122
  elif isinstance(self.Weather, Path):
123
123
  with zipfile.ZipFile(self.Weather, "r") as z:
124
124
  if epw_path.name not in z.namelist():
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: epinterface
3
- Version: 1.0.5
3
+ Version: 1.1.0
4
4
  Summary: This is a repository for dynamically generating energy models within Python, relying on Archetypal and Eppy for most of its functionality.
5
5
  Project-URL: Homepage, https://github.com/szvsw/epinterface
6
6
  Project-URL: Documentation, https://szvsw.github.io/epinterface
@@ -18,6 +18,7 @@ Requires-Dist: openpyxl~=3.1.5
18
18
  Requires-Dist: pandas<2.3,>=2.2
19
19
  Requires-Dist: prisma~=0.15.0
20
20
  Requires-Dist: pydantic<3,>=2.9
21
+ Requires-Dist: pythermalcomfort>=3.8.0
21
22
  Description-Content-Type: text/markdown
22
23
 
23
24
  # epinterface
@@ -1,15 +1,17 @@
1
1
  epinterface/__init__.py,sha256=5b_hXvzX4SJrUI8Ts_siDHadd1tiHqw1nyZ38bwFL3I,49
2
2
  epinterface/actions.py,sha256=1SJLBbLB1evXgIAAvu57v-kyrIjwDyJMYh7ALb5w2Nw,12609
3
- epinterface/builder.py,sha256=o_XXClYgvImt_5-lYhqd2H4xX1-Tb41fOG6CL1cgX20,10845
3
+ epinterface/builder.py,sha256=rLTPMU-fDZnBw30O-SYp7PEDqHEX7PnVZ4sWshavCME,10833
4
4
  epinterface/cli.py,sha256=caxSBsjsKCbjjI_42CnWHd3tPJ8tUTBiQ3vNBYGzq84,9473
5
5
  epinterface/ddy_injector_bayes.py,sha256=ihOJiBDK7meGGCpyZqgZtWZqPru3x4lkPg1W5fXo7Xc,11742
6
6
  epinterface/ddy_interface_bayes.py,sha256=8uGz6mJumdiWxGDMceUnLlkBpf5zbOChnznypjqJGxI,11735
7
7
  epinterface/geometry.py,sha256=uJrRRm5rOM2a2zfGP-yyUtNBOdKAf4zp08XPkncMlXA,27967
8
8
  epinterface/interface.py,sha256=JM1HtxIhpZYdEKcDuknhjShfQyMNUtKncuDT3Q9HQYE,34152
9
9
  epinterface/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
- epinterface/weather.py,sha256=ObUV9eFvPjHVELk7hBTvJwjdd2qQgy_so4X7nECkMBo,5477
10
+ epinterface/weather.py,sha256=3lohq12ob74L3FjqceBl6jfQXNkeagK2jTSYaRbmMRk,5453
11
+ epinterface/analysis/__init__.py,sha256=-ebrg6_jwKxFROK-KcnAK6WHcQGYRmiOcvD0KstavGM,63
12
+ epinterface/analysis/overheating.py,sha256=Du2sEgGXblDOC6hFn-ckGNCSvRdKxYFfWvP4mxMcoPQ,32429
11
13
  epinterface/climate_studio/__init__.py,sha256=zZ1KfnHG6hI6iyQylQYQG4DAcUYYR4C_2NtLXT-V-Wo,73
12
- epinterface/climate_studio/builder.py,sha256=Fy5e2FYVNB-gXolpj-qAnU064rmUeXKKG_xxi7l6JeI,21818
14
+ epinterface/climate_studio/builder.py,sha256=LfBFhjkUiOB8X-aCdu-5r1aG8roLYOZlbOczC3aupec,21771
13
15
  epinterface/climate_studio/interface.py,sha256=Y2-YG7m_R0cQaU_16BF_d__fZ31El6z3W_dObRKqXkQ,71573
14
16
  epinterface/constants/__init__.py,sha256=xCta9qzR6LOFB9-C6iiT0bz8hyldwQt_MsMeCA9wp54,2728
15
17
  epinterface/data/Minimal.idf,sha256=csTotDwECXRxxkd40bQMrWMWB3aPta2vLk0il6gPTEU,6812
@@ -20,10 +22,10 @@ epinterface/data/__init__.py,sha256=r6Uju05rbG3hVVAvCuqJS5wI_zLwK3fU_M8nKMC5DEA,
20
22
  epinterface/data/res_schedules.parquet,sha256=GjMaqb8uywr7FwR3rAByL72BKGfoZiVuSdAH2sAPFvw,4295
21
23
  epinterface/sbem/__init__.py,sha256=aOEtaivujVzFDMFyLrLRx_Xmwv7_Y60MYqObBsn5OR8,48
22
24
  epinterface/sbem/annotations.py,sha256=qnN0z7Suri5eHHPJNvXWUorYbEMHGajX2DPezCYcCSQ,1302
23
- epinterface/sbem/builder.py,sha256=hgXHtILjpT921uAaw5Ictv_wChKVYoFsrPljJtDERTg,58198
25
+ epinterface/sbem/builder.py,sha256=WW9kjmAAeO6-zTkHLdqqgFyRsPVrfBmrHRVez6EOPt4,60307
24
26
  epinterface/sbem/common.py,sha256=s7DekSlosfM2mQUZGiIGEbbgiJfF7h6QsTvYbAPP3Ac,1317
25
27
  epinterface/sbem/exceptions.py,sha256=4uOXlZgJyvhhRG2cbSua2InxxXHanGSe74h2ioiNEkw,3332
26
- epinterface/sbem/flat_model.py,sha256=q-G23Rvw3GBDYRm7Oa6PItI8_Xp-FYieLZmLDR4mNyM,84602
28
+ epinterface/sbem/flat_model.py,sha256=Uk_ck3DlxnJHY4GK4t720aTMp6xzNFogjX_qMq32XNM,84678
27
29
  epinterface/sbem/interface.py,sha256=M9p5XK3psKjExBTcOY66IFLK1sRcPiuKXbu4BckAXgU,25417
28
30
  epinterface/sbem/model_graph_structure.md,sha256=uCky10vTV-yuosd9LyLoXzKUzH-Vu6_oY7UCk35oBCg,952
29
31
  epinterface/sbem/utils.py,sha256=i2BM9iwdW_fYpKfmnmXzOqJ4_APnxTaSlNHinVyWWUo,3581
@@ -50,8 +52,8 @@ epinterface/sbem/prisma/migrations/20250325185158_add_mutually_exclusive_ventila
50
52
  epinterface/sbem/prisma/migrations/20250326141941_reduce_naming_complexity_ventilation/migration.sql,sha256=tslwxYOfpbfWCUPqnOadTGk-BzluLcly27MV-mZy2Sc,2289
51
53
  epinterface/sbem/prisma/migrations/20250331141910_add_support_for_attic_and_basement_constructions/migration.sql,sha256=fvAzKga8qsMmVM3jLFDAlSbBkWbqRGrHu5Mf2doIEgs,6690
52
54
  epinterface/sbem/prisma/migrations/20250919152559_decouple_basement_infiltration/migration.sql,sha256=YrQJxHcU1jrKb6AlBSdfuJKETKkkyxqdKb-X8krBH-8,1876
53
- epinterface-1.0.5.dist-info/METADATA,sha256=2t5ecDAFk_97jAnOAvlvFwHDarCGPXZIAmxJnkpObHA,3463
54
- epinterface-1.0.5.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
55
- epinterface-1.0.5.dist-info/entry_points.txt,sha256=bjjYRuHWvWV0d-QUesH6prRqHqcppTJANSuAfg3h9j8,78
56
- epinterface-1.0.5.dist-info/licenses/LICENSE,sha256=hNp6DmbGMuUcwlnpYS8E-ZHYU7kxfmRUP8pLGQaCnu8,1066
57
- epinterface-1.0.5.dist-info/RECORD,,
55
+ epinterface-1.1.0.dist-info/METADATA,sha256=6cQfH4vCL_-N43bWZfu53jHe0Iz1BxMmYN3QpwqAZzE,3502
56
+ epinterface-1.1.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
57
+ epinterface-1.1.0.dist-info/entry_points.txt,sha256=bjjYRuHWvWV0d-QUesH6prRqHqcppTJANSuAfg3h9j8,78
58
+ epinterface-1.1.0.dist-info/licenses/LICENSE,sha256=hNp6DmbGMuUcwlnpYS8E-ZHYU7kxfmRUP8pLGQaCnu8,1066
59
+ epinterface-1.1.0.dist-info/RECORD,,