irie 0.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of irie might be problematic. Click here for more details.

Files changed (145) hide show
  1. irie/__main__.py +24 -0
  2. irie/apps/__init__.py +5 -0
  3. irie/apps/authentication/__init__.py +1 -0
  4. irie/apps/authentication/admin.py +1 -0
  5. irie/apps/authentication/config.py +6 -0
  6. irie/apps/authentication/forms.py +41 -0
  7. irie/apps/authentication/migrations/__init__.py +1 -0
  8. irie/apps/authentication/models.py +1 -0
  9. irie/apps/authentication/tests.py +1 -0
  10. irie/apps/authentication/urls.py +9 -0
  11. irie/apps/authentication/views.py +53 -0
  12. irie/apps/config.py +8 -0
  13. irie/apps/context_processors.py +5 -0
  14. irie/apps/documents/__init__.py +0 -0
  15. irie/apps/documents/apps.py +7 -0
  16. irie/apps/documents/documents.py +61 -0
  17. irie/apps/documents/migrations/__init__.py +0 -0
  18. irie/apps/documents/tests.py +3 -0
  19. irie/apps/documents/urls.py +12 -0
  20. irie/apps/documents/views.py +27 -0
  21. irie/apps/evaluation/__init__.py +0 -0
  22. irie/apps/evaluation/admin.py +43 -0
  23. irie/apps/evaluation/apps.py +18 -0
  24. irie/apps/evaluation/daemon.py +107 -0
  25. irie/apps/evaluation/identification.py +196 -0
  26. irie/apps/evaluation/migrations/0001_initial.py +25 -0
  27. irie/apps/evaluation/migrations/0002_remove_evaluation_cesmd.py +17 -0
  28. irie/apps/evaluation/migrations/0003_evaluation_asset.py +20 -0
  29. irie/apps/evaluation/migrations/__init__.py +0 -0
  30. irie/apps/evaluation/models.py +72 -0
  31. irie/apps/evaluation/urls.py +16 -0
  32. irie/apps/evaluation/views.py +68 -0
  33. irie/apps/events/__init__.py +0 -0
  34. irie/apps/events/admin.py +9 -0
  35. irie/apps/events/apps.py +12 -0
  36. irie/apps/events/migrations/0001_initial.py +27 -0
  37. irie/apps/events/migrations/0002_alter_event_id.py +18 -0
  38. irie/apps/events/migrations/0003_event_cesmd.py +19 -0
  39. irie/apps/events/migrations/0004_event_record_identifier.py +19 -0
  40. irie/apps/events/migrations/0005_event_asset.py +21 -0
  41. irie/apps/events/migrations/0006_alter_event_event_file.py +18 -0
  42. irie/apps/events/migrations/__init__.py +0 -0
  43. irie/apps/events/models.py +70 -0
  44. irie/apps/events/tests.py +1 -0
  45. irie/apps/events/tests_events.py +240 -0
  46. irie/apps/events/urls.py +29 -0
  47. irie/apps/events/views.py +55 -0
  48. irie/apps/events/views_events.py +215 -0
  49. irie/apps/inventory/CESMD.py +81 -0
  50. irie/apps/inventory/__init__.py +5 -0
  51. irie/apps/inventory/admin.py +10 -0
  52. irie/apps/inventory/apps.py +12 -0
  53. irie/apps/inventory/archive/arcGIS.py +1175 -0
  54. irie/apps/inventory/calid.py +65 -0
  55. irie/apps/inventory/fields.py +5 -0
  56. irie/apps/inventory/forms.py +12 -0
  57. irie/apps/inventory/migrations/0001_initial.py +31 -0
  58. irie/apps/inventory/migrations/0002_assetevaluationmodel_cesmd.py +19 -0
  59. irie/apps/inventory/migrations/0003_auto_20230520_2030.py +23 -0
  60. irie/apps/inventory/migrations/0004_asset.py +27 -0
  61. irie/apps/inventory/migrations/0005_auto_20230731_1802.py +23 -0
  62. irie/apps/inventory/migrations/0006_auto_20230731_1816.py +28 -0
  63. irie/apps/inventory/migrations/0007_auto_20230731_1827.py +24 -0
  64. irie/apps/inventory/migrations/0008_asset_is_complete.py +19 -0
  65. irie/apps/inventory/migrations/0009_auto_20230731_1842.py +29 -0
  66. irie/apps/inventory/migrations/0010_auto_20230801_0025.py +23 -0
  67. irie/apps/inventory/migrations/0011_alter_asset_cgs_data.py +18 -0
  68. irie/apps/inventory/migrations/0012_corridor.py +22 -0
  69. irie/apps/inventory/migrations/0013_alter_asset_cesmd.py +18 -0
  70. irie/apps/inventory/migrations/0014_alter_asset_cesmd.py +18 -0
  71. irie/apps/inventory/migrations/__init__.py +0 -0
  72. irie/apps/inventory/models.py +70 -0
  73. irie/apps/inventory/tables.py +584 -0
  74. irie/apps/inventory/traffic.py +175052 -0
  75. irie/apps/inventory/urls.py +25 -0
  76. irie/apps/inventory/views.py +515 -0
  77. irie/apps/management/__init__.py +0 -0
  78. irie/apps/management/commands/__init__.py +0 -0
  79. irie/apps/networks/__init__.py +0 -0
  80. irie/apps/networks/apps.py +5 -0
  81. irie/apps/networks/forms.py +64 -0
  82. irie/apps/networks/migrations/0001_initial.py +26 -0
  83. irie/apps/networks/migrations/__init__.py +0 -0
  84. irie/apps/networks/models.py +14 -0
  85. irie/apps/networks/networks.py +782 -0
  86. irie/apps/networks/tests.py +1 -0
  87. irie/apps/networks/urls.py +18 -0
  88. irie/apps/networks/views.py +89 -0
  89. irie/apps/prediction/__init__.py +0 -0
  90. irie/apps/prediction/admin.py +9 -0
  91. irie/apps/prediction/apps.py +12 -0
  92. irie/apps/prediction/forms.py +20 -0
  93. irie/apps/prediction/metrics.py +61 -0
  94. irie/apps/prediction/migrations/0001_initial.py +32 -0
  95. irie/apps/prediction/migrations/0002_auto_20230731_1801.py +27 -0
  96. irie/apps/prediction/migrations/0003_rename_assetevaluationmodel_evaluation.py +18 -0
  97. irie/apps/prediction/migrations/0004_delete_evaluation.py +16 -0
  98. irie/apps/prediction/migrations/0005_predictormodel_protocol.py +18 -0
  99. irie/apps/prediction/migrations/0006_alter_predictormodel_protocol.py +18 -0
  100. irie/apps/prediction/migrations/0007_predictormodel_active.py +19 -0
  101. irie/apps/prediction/migrations/0008_predictormodel_description.py +18 -0
  102. irie/apps/prediction/migrations/0009_predictormodel_entry_point.py +19 -0
  103. irie/apps/prediction/migrations/0010_alter_predictormodel_entry_point.py +18 -0
  104. irie/apps/prediction/migrations/0011_remove_predictormodel_entry_point.py +17 -0
  105. irie/apps/prediction/migrations/0012_predictormodel_entry_point.py +18 -0
  106. irie/apps/prediction/migrations/0013_predictormodel_metrics.py +18 -0
  107. irie/apps/prediction/migrations/0014_auto_20240930_0004.py +28 -0
  108. irie/apps/prediction/migrations/0015_alter_predictormodel_render_file.py +18 -0
  109. irie/apps/prediction/migrations/__init__.py +0 -0
  110. irie/apps/prediction/models.py +37 -0
  111. irie/apps/prediction/predictor.py +286 -0
  112. irie/apps/prediction/runners/__init__.py +450 -0
  113. irie/apps/prediction/runners/metrics.py +168 -0
  114. irie/apps/prediction/runners/opensees/__init__.py +0 -0
  115. irie/apps/prediction/runners/opensees/schemas/__init__.py +39 -0
  116. irie/apps/prediction/runners/utilities.py +277 -0
  117. irie/apps/prediction/runners/xmlutils.py +232 -0
  118. irie/apps/prediction/runners/zipped.py +27 -0
  119. irie/apps/prediction/templatetags/__init__.py +0 -0
  120. irie/apps/prediction/templatetags/predictor.py +20 -0
  121. irie/apps/prediction/urls.py +19 -0
  122. irie/apps/prediction/views.py +184 -0
  123. irie/apps/prediction/views_api.py +216 -0
  124. irie/apps/site/__init__.py +0 -0
  125. irie/apps/site/admin.py +1 -0
  126. irie/apps/site/config.py +6 -0
  127. irie/apps/site/migrations/__init__.py +1 -0
  128. irie/apps/site/models.py +2 -0
  129. irie/apps/site/templatetags/__init__.py +0 -0
  130. irie/apps/site/templatetags/indexing.py +7 -0
  131. irie/apps/site/tests.py +1 -0
  132. irie/apps/site/urls.py +8 -0
  133. irie/apps/site/view_sdof.py +40 -0
  134. irie/apps/site/view_utils.py +13 -0
  135. irie/apps/site/views.py +88 -0
  136. irie/core/__init__.py +5 -0
  137. irie/core/asgi.py +12 -0
  138. irie/core/settings.py +223 -0
  139. irie/core/urls.py +39 -0
  140. irie/core/wsgi.py +12 -0
  141. irie-0.0.0.dist-info/METADATA +48 -0
  142. irie-0.0.0.dist-info/RECORD +145 -0
  143. irie-0.0.0.dist-info/WHEEL +5 -0
  144. irie-0.0.0.dist-info/entry_points.txt +2 -0
  145. irie-0.0.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,450 @@
1
+ #===----------------------------------------------------------------------===#
2
+ #
3
+ # STAIRLab -- STructural Artificial Intelligence Laboratory
4
+ #
5
+ #===----------------------------------------------------------------------===#
6
+ #
7
+ import os.path
8
+ import shutil
9
+ from typing import NewType
10
+ import sys, json
11
+ import zipfile
12
+ from pathlib import Path
13
+ from abc import abstractmethod
14
+ import contextlib
15
+
16
+ class Event: pass
17
+
18
+ RunID = NewType("RunID", int)
19
+
20
+ MetricType = NewType("MetricType", str)
21
+
22
+ from .utilities import read_model
23
+ from .metrics import (
24
+ accel_response_history_plot,
25
+ column_strain_state_metric,
26
+ peak_acceleration_metric,
27
+ peak_drift_metric
28
+ )
29
+
30
+ OPENSEES = [
31
+ sys.executable, "-m", "opensees",
32
+ ]
33
+
34
+
35
+ @contextlib.contextmanager
36
+ def new_cd(x):
37
+ d = os.getcwd()
38
+
39
+ # This could raise an exception, but it's probably
40
+ # best to let it propagate and let the caller
41
+ # deal with it, since they requested x
42
+ os.chdir(x)
43
+
44
+ try:
45
+ yield
46
+
47
+ finally:
48
+ # This could also raise an exception, but you *really*
49
+ # aren't equipped to figure out what went wrong if the
50
+ # old working directory can't be restored.
51
+ os.chdir(d)
52
+
53
+ class classproperty(property):
54
+ def __get__(self, cls, owner):
55
+ return classmethod(self.fget).__get__(None, owner)()
56
+
57
+
58
+ class Runner:
59
+ def __init__(self, conf: dict):
60
+
61
+ if isinstance(conf, dict):
62
+ # Create from dict when posted from API; this
63
+ # is used to create a new PredictorModel
64
+ self.name: str = conf["name"]
65
+ self.description = conf.get("description", "")
66
+ self.conf = conf["config"]
67
+ self.metrics = conf["metrics"]
68
+ self.entry_point = conf["entry_point"]
69
+ self.active = conf.get("active", True)
70
+ else:
71
+ # Create from PredictorModel when loaded from database.
72
+ # This is done when running analysis
73
+ self.name: str = conf.name
74
+ self.description = "" # conf.description
75
+ self.conf = conf.config
76
+ self.entry_point = conf.entry_point
77
+ self.metrics = conf.metrics
78
+ self.active = conf.active
79
+ try:
80
+ self.model_file = Path(conf.config_file.path).resolve()
81
+ except ValueError:
82
+ pass
83
+
84
+ self.out_dir = Path(__file__).parents[0]/"Predictions"
85
+ self.runs = {}
86
+
87
+ @abstractmethod
88
+ def newPrediction(self, event)->RunID: ...
89
+
90
+ @abstractmethod
91
+ def runPrediction(self, run_id)->bool: ...
92
+
93
+ def getMetricList(self)->list:
94
+ return self.metrics
95
+
96
+ def activateMetric(self, type, rid=None)->bool:
97
+ return False
98
+
99
+ @abstractmethod
100
+ def getMetricData(self, run: RunID, metric: MetricType)->dict: ...
101
+
102
+
103
+ class OpenSeesRunner(Runner):
104
+ @property
105
+ def platform(self):
106
+ return self.conf.get("platform", "")
107
+
108
+
109
+ @classmethod
110
+ def create(cls, asset, request, form):
111
+ predictor = form.save(commit=False)
112
+
113
+ predictor.entry_point = [
114
+ sys.executable, "-m", "opensees"
115
+ ]
116
+ predictor.config = {}
117
+ predictor.protocol = "BRACE2_CLI_PREDICTOR_T4"
118
+ predictor.active = True
119
+ # predictor.metrics = self.getMetricList()
120
+ return predictor
121
+
122
+
123
+ @classproperty
124
+ def schema(cls):
125
+ from .opensees import schemas
126
+ return {
127
+ "title": "Structural Model",
128
+ "options": {"disable_collaps": True},
129
+ "schema": "http://json-schema.org/draft-04/schema#",
130
+ "type": "object",
131
+ "properties": {
132
+ "platform": {
133
+ "type": "string",
134
+ "title": "Platform",
135
+ "enum": ["OpenSees","CSiBridge"]
136
+ },
137
+ "model": schemas.load("hwd_conf.schema.json"),
138
+ "analysis": schemas.load("hwd_analysis.schema.json"),
139
+ }
140
+ }
141
+
142
+ def getMetricList(self):
143
+ return [
144
+ "COLUMN_STRAIN_STATES",
145
+ "PEAK_ACCEL",
146
+ "PEAK_DRIFT",
147
+ # "ACC_RESPONSE_HISTORY",
148
+ ]
149
+
150
+ def newPrediction(self, event, output_directory = None):
151
+ """
152
+ Create a new prediction run and return the run_id. If output_directory is None,
153
+ the output directory will be created automatically. Otherwise, the output directory
154
+ will be copied to the new output directory.
155
+ """
156
+ event = event.event_file.path
157
+ if output_directory is not None:
158
+ # this case will eventually be deleted, its just for
159
+ # debugging metric renderers.
160
+ run_id = "0"
161
+ self.runs[run_id] = {
162
+ "run_output_directory": Path(output_directory)
163
+ }
164
+
165
+ else:
166
+ # Calculate next output directory and
167
+ # create directory if it doesn't exist
168
+ out_dir = self.out_dir
169
+ if not out_dir.is_dir():
170
+ (out_dir/"0").mkdir(parents=True)
171
+
172
+ latestDir = list(sorted((f for f in out_dir.iterdir() if f.is_dir()), key=lambda m: int(m.name)))[-1]
173
+ run_id = int(latestDir.name)+1
174
+ run_dir = out_dir/str(run_id)
175
+ run_dir = run_dir.resolve()
176
+ run_dir.mkdir(parents=True, exist_ok=False)
177
+
178
+ # Copy files to run directory
179
+ shutil.copyfile(event, run_dir/"event.zip")
180
+ shutil.copyfile(self.model_file.resolve(), run_dir/self.model_file.name)
181
+
182
+ if self.model_file.suffix == ".zip":
183
+ with zipfile.ZipFile(self.model_file, 'r') as zip_ref:
184
+ zip_ref.extractall(run_dir)
185
+ model_file = (run_dir/"nonlinear.tcl").resolve()
186
+
187
+ elif self.model_file.suffix == ".b2k":
188
+ pass
189
+
190
+ elif self.model_file.suffix == ".tcl":
191
+ model_file = (run_dir/self.model_file.name).resolve()
192
+
193
+ self.runs[run_id] = {
194
+ "run_output_directory": run_dir,
195
+ "event_file_name": Path(event),
196
+ "model_file": model_file,
197
+ **self.conf
198
+ }
199
+
200
+ with open(out_dir/str(run_id)/"conf.json", "w") as f:
201
+ json.dump({k: str(v) for k,v in self.runs[run_id].items()}, f)
202
+
203
+ return run_id
204
+
205
+
206
+ def _load_config(self, run_id):
207
+ run_dir = self.out_dir/str(run_id)
208
+ with open(run_dir/"conf.json","r") as f:
209
+ self.runs[run_id] = json.load(f)
210
+
211
+ self.model_file = Path(self.runs[run_id]["model_file"])
212
+
213
+
214
+ def runPrediction(self, run_id, scale: float = None):
215
+ if run_id not in self.runs:
216
+ self._load_config(run_id)
217
+
218
+ event_file_path = os.path.relpath(self.runs[run_id]["event_file_name"],
219
+ self.model_file.parents[0])
220
+ output_directory = os.path.relpath(self.runs[run_id]["run_output_directory"],
221
+ self.model_file.parents[0])
222
+
223
+ event_file_path = self.runs[run_id]["event_file_name"]
224
+
225
+ # Create model
226
+ import opensees.openseespy as ops
227
+
228
+ import sys
229
+ model = ops.Model(echo_file=sys.stdout)
230
+ model.eval("set argv {}")
231
+ with new_cd(self.runs[run_id]["run_output_directory"]):
232
+ model.eval(f"source {self.runs[run_id]['model_file']}")
233
+
234
+ model.eval(f"print -json -file modelDetails.json")
235
+
236
+ model.eval(f"set python {sys.executable}")
237
+
238
+ model.eval(r"""
239
+ proc py {args} {
240
+ global python
241
+ eval "[exec {*}$python {*}$args]"
242
+ }
243
+
244
+ proc pt {args} {
245
+ global python
246
+ puts "[exec {*}$python {*}$args]"
247
+ }
248
+
249
+ proc write_modes {mode_file nmodes} {
250
+ set fid_modes [open $mode_file w+]
251
+ for {set m 1} {$m <= $nmodes} {incr m} {
252
+ puts $fid_modes "$m:"
253
+ foreach n [getNodeTags] {
254
+ puts $fid_modes " $n: \[[join [nodeEigenvector $n $m] {, }]\]";
255
+ }
256
+ }
257
+ close $fid_modes
258
+ }
259
+ proc write_displacements {file_name {resp Disp}} {
260
+ set fid [open "$file_name" "w+"]
261
+ puts $fid "[getTime]:"
262
+ foreach n [getNodeTags] {
263
+ puts $fid " $n: \[[join [node${resp} $n] {, }]\]";
264
+ }
265
+ close $fid;
266
+ }
267
+ """)
268
+
269
+ #
270
+ # Run gravity analysis
271
+ #
272
+ model.eval("""
273
+ wipeAnalysis
274
+ test NormDispIncr 1.0e-8 10 0;
275
+ algorithm Newton;
276
+ integrator LoadControl 0.1;
277
+ numberer Plain;
278
+ constraints Transformation;
279
+ system SparseGeneral;
280
+ analysis Static;
281
+ analyze 10;
282
+ # write_displacements "dispsGrav.yaml"
283
+ """)
284
+
285
+ #
286
+ # DAMPING
287
+ #
288
+ model.eval(r"""
289
+ set nmodes 8; # Number of modes to analyze for modal analysis
290
+
291
+ # set wb [eigen -fullGenLapack $nmodes];
292
+ # puts "\tFundamental-Period After Gravity Analysis:"
293
+ # for {set iPd 1} {$iPd <= $nmodes} {incr iPd 1} {
294
+ # set wwb [lindex $wb $iPd-1];
295
+ # set Tb [expr 2*$pi/sqrt($wwb)];
296
+ # puts "\tPeriod$iPd= $Tb"
297
+ # }
298
+ # write_modes $output_directory/modesPostG.yaml $nmodes
299
+ # remove recorders
300
+
301
+ set nmodes [tcl::mathfunc::max {*}$damping_modes $nmodes]
302
+ set lambdaN [eigen -fullGenLapack $nmodes];
303
+
304
+ # set lambdaN [eigen $nmodes];
305
+ if {$damping_type == "rayleigh"} {
306
+ set nEigenI [lindex $damping_modes 0]; # first rayleigh damping mode
307
+ set nEigenJ [lindex $damping_modes 1]; # second rayleigh damping mode
308
+ set iDamp [lindex $damping_ratios 0]; # first rayleigh damping ratio
309
+ set jDamp [lindex $damping_ratios 1]; # second rayleigh damping ratio
310
+ set lambdaI [lindex $lambdaN [expr $nEigenI-1]];
311
+ set lambdaJ [lindex $lambdaN [expr $nEigenJ-1]];
312
+ set omegaI [expr $lambdaI**0.5];
313
+ set omegaJ [expr $lambdaJ**0.5];
314
+ set TI [expr 2.0*$pi/$omegaI];
315
+ set TJ [expr 2.0*$pi/$omegaJ];
316
+ set alpha0 [expr 2.0*($iDamp/$omegaI-$jDamp/$omegaJ)/(1/$omegaI**2-1/$omegaJ**2)];
317
+ set alpha1 [expr 2.0*$iDamp/$omegaI-$alpha0/$omegaI**2];
318
+ puts "\tRayleigh damping parameters:"
319
+ puts "\tmodes: $nEigenI, $nEigenJ ; ratios: $iDamp, $jDamp"
320
+ puts "\tTI = $TI; TJ = $TJ"
321
+ puts "\tlambdaI = $lambdaI; lambdaJ = $lambdaJ"
322
+ puts "\tomegaI = $omegaI; omegaJ = $omegaJ"
323
+ puts "\talpha0 = $alpha0; alpha1 = $alpha1"
324
+ rayleigh $alpha0 0.0 0.0 $alpha1;
325
+
326
+ } elseif {$damping_type == "modal"} {
327
+ # needs a bit of edit. currently assuming that the ratios are applied in order at the first modes. but should be applied at the specified damping_modes modes.
328
+ set nratios [llength $damping_ratios]
329
+ puts "\tModal damping parameters:"
330
+ puts "\tratios of $damping_ratios at the first $nratios modes"
331
+ for {set i 1} {$i <= [expr $nmodes - $nratios]} {incr i} {
332
+ lappend damping_ratios 0
333
+ }
334
+ modalDamping {*}$damping_ratios
335
+ }
336
+ """)
337
+
338
+
339
+ #
340
+ # DYNAMIC RECORDERS
341
+ #
342
+
343
+ ## COLUMN SECTION DEFORMATIONS AT TOP AND BOTTOM FOR STRAIN-BASED DAMAGE STATES
344
+ column_strains = tuple(k["key"] for k in CONFIG["columns"] if k["strain"])
345
+ if len(column_strains) > 0:
346
+ model.recorder("Element", "section", 1, "deformation", xml="eleDef1.txt", ele=column_strains) # section 1 deformation]
347
+ model.recorder("Element", "section", 4, "deformation", xml="eleDef4.txt", ele=column_strains) # section 4 deformation]
348
+
349
+
350
+
351
+ #
352
+ # Run dynamic analysis
353
+ #
354
+ model.eval(f"""
355
+ wipeAnalysis
356
+ # Uniform Support Excitation
357
+ # lassign [pt -m CE58658.makePattern {event_file_path} --scale $dynamic_scale_factor --node $input_location] dt steps
358
+ # lassign [py -m CE58658.makePattern {event_file_path} --scale $dynamic_scale_factor --node $input_location] dt steps
359
+ set dt 0.1
360
+ set steps 3
361
+ """)
362
+
363
+ # RESPONSE HISTORY RECORDERS
364
+
365
+ model.recorder("Node", "accel", xml="model/AA_all.txt", timeSeries=(1, 2), dof=(1, 2))
366
+ model.recorder("Node", "accel", xml="model/RD_all.txt", dof=(1, 2))
367
+
368
+ column_nodes = tuple(k["node"] for k in CONFIG["bents"] if k["record"])
369
+ model.recorder("Node", "accel", file="TopColAccel_X_txt.txt", timeSeries=1 , node=column_nodes, dof=1)
370
+ model.recorder("Node", "accel", file="TopColAccel_Y_txt.txt", timeSeries=2 , node=column_nodes, dof=2)
371
+ model.recorder("Node", "disp", file="TopColDrift_X_txt.txt", node=column_nodes, dof=1)
372
+ model.recorder("Node", "disp", file="TopColDrift_Y_txt.txt", node=column_nodes, dof=2)
373
+
374
+ model.eval("""
375
+ set dtfact 1;
376
+ set Tol 1.0e-8;
377
+ set maxNumIter 100;
378
+ set printFlag 0;
379
+ set TestType EnergyIncr;
380
+ set NewmarkGamma 0.50;
381
+ set NewmarkBeta 0.25;
382
+ constraints Transformation;
383
+ numberer RCM;
384
+ test $TestType $Tol $maxNumIter $printFlag;
385
+ set algorithmType "Newton";
386
+ system BandSPD;
387
+ integrator Newmark $NewmarkGamma $NewmarkBeta;
388
+
389
+ algorithm {*}$algorithmType;
390
+ analysis Transient;
391
+
392
+ set DtAnalysis $dt;
393
+ set TmaxAnalysis [expr $dt*$steps];
394
+ set Nsteps $steps;
395
+ if {$dynamic_truncated != 0} {
396
+ set Nsteps $dynamic_timesteps;
397
+ }
398
+ puts "\tGround Motion: dt= $DtAnalysis, NumPts= $Nsteps, TmaxAnalysis= $TmaxAnalysis";
399
+
400
+ puts "\tRunning dynamic ground motion analysis..."
401
+ set t3 [clock clicks -millisec];
402
+ catch {progress create $Nsteps} _
403
+
404
+ analyze 2 $DtAnalysis;
405
+
406
+ # for {set ik 1} {$ik <= $Nsteps} {incr ik 1} {
407
+ # catch {progress update} _
408
+ # set ok [analyze 1 $DtAnalysis];
409
+ # }
410
+ """)
411
+
412
+ model.wipe()
413
+
414
+
415
+ def getMetricData(self, run_id:int, type:str)->dict:
416
+ import orjson
417
+ def _clean_json(d):
418
+ return orjson.loads(orjson.dumps(d,option=orjson.OPT_SERIALIZE_NUMPY))
419
+
420
+ if run_id not in self.runs:
421
+ self._load_config(run_id)
422
+
423
+ run_data = self.runs.get(run_id, None)
424
+ config = run_data
425
+
426
+ if run_data is not None:
427
+ output_dir = Path(run_data["run_output_directory"])
428
+ else:
429
+ output_dir = self.out_dir/str(run_id)
430
+
431
+ # with open(output_dir/"modelDetails.json", "r") as f:
432
+ # model = json.load(f)
433
+
434
+ model = read_model(output_dir/"modelDetails.json")
435
+
436
+ if type == "COLUMN_STRAIN_STATES":
437
+ return _clean_json(column_strain_state_metric(model, output_dir, config))
438
+
439
+ elif type == "PEAK_ACCEL":
440
+ return _clean_json(peak_acceleration_metric(output_dir, config))
441
+
442
+ elif type == "PEAK_DRIFT":
443
+ return _clean_json(peak_drift_metric(model, output_dir, config))
444
+
445
+ elif type == "ACC_RESPONSE_HISTORY":
446
+ # config = CONFIG
447
+ # return accel_response_history_plot(output_dir, config)
448
+ return {}
449
+ return {}
450
+
@@ -0,0 +1,168 @@
1
+ #===----------------------------------------------------------------------===#
2
+ #
3
+ # STAIRLab -- STructural Artificial Intelligence Laboratory
4
+ #
5
+ #===----------------------------------------------------------------------===#
6
+ #
7
+ # Chrystal Chern
8
+ #
9
+ import numpy as np
10
+ from pathlib import Path
11
+ from .utilities import getPeak, husid, read_sect_xml, get_DS
12
+
13
+ def _get_node(model, tag)->dict:
14
+ for node in model["StructuralAnalysisModel"]["geometry"]["nodes"]:
15
+ if node["name"] == tag:
16
+ return node
17
+ return {}
18
+
19
+ def _get_elem(model, tag)->dict:
20
+ for elem in model["StructuralAnalysisModel"]["geometry"]["elements"]:
21
+ if elem["name"] == tag:
22
+ return elem
23
+ return {}
24
+
25
+ def _get_bot_nodes(model,toptags, column_tags)->list:
26
+ bot_nodes=[[tag for tag in _get_elem(model,elemtag)["nodes"]
27
+ if tag not in toptags][0]
28
+ for elemtag in column_tags]
29
+ return bot_nodes
30
+
31
+
32
+ def peak_acceleration_metric(output_directory, config):
33
+ nodes = [node["node"] for node in config["bents"]]
34
+ bents = {node["node"]: node["label"] for node in config["bents"]}
35
+ peaksX, timePeaksX, maxPeakColX = getPeak(output_directory/"TopColAccel_X_txt.txt")
36
+ peaksY, timePeaksY, maxPeakColX = getPeak(output_directory/"TopColAccel_Y_txt.txt")
37
+ out = {"column": [bents.get(n, "NA") for n in nodes],
38
+ "peak_acc_X": peaksX.tolist(),
39
+ "peak_acc_Y": peaksY.tolist(),
40
+ "time_peak_X": timePeaksX.tolist(),
41
+ "time_peak_Y": timePeaksY.tolist(),
42
+ }
43
+
44
+ # BUILD SUMMARY
45
+ peaks = np.array([ out["peak_acc_X"], out["peak_acc_Y"] ])
46
+ maxPeaks = np.max(peaks, axis=1)
47
+ maxPeakins2 = max(maxPeaks)
48
+ maxPeakg = maxPeakins2*0.00259007918
49
+ maxPeakdir = np.argmax(maxPeaks)
50
+ maxPeakLoc = np.argmax(peaks[maxPeakdir])
51
+ col = out["column"][maxPeakLoc]
52
+ timesPeaks = np.array([ out["time_peak_X"], out["time_peak_Y"]])
53
+ timeMaxPeak = timesPeaks[maxPeakdir][maxPeakLoc]
54
+ summary = {
55
+ "peak": str(maxPeakg),
56
+ "units": 'g',
57
+ "col": col,
58
+ "time": timeMaxPeak,
59
+ "metric_completion": 70
60
+ }
61
+ # BUILD DETAILS
62
+ details = [["column", *[k for k in out if k != "column"]]] + [
63
+ [c, *[out[k][i] for k in out if k != "column"]] for i,c in enumerate(out["column"])
64
+ ]
65
+ return {"summary": summary, "details": details}
66
+
67
+ def peak_drift_metric(model, output_directory, config):
68
+ VERT = 2
69
+ nodes = [node["node"] for node in config["bents"]]
70
+ bents = {node["node"]: node["label"] for node in config["bents"]}
71
+
72
+ column_tags = [elem["key"] for elem in config["columns"]]
73
+ output_directory = Path(output_directory)
74
+
75
+
76
+
77
+ heights = np.array([
78
+ _get_node(model,top)["crd"][VERT] - _get_node(model,bot)["crd"][VERT]
79
+ for top, bot in zip(nodes, _get_bot_nodes(model,nodes, column_tags))
80
+ ])
81
+ peaksX, timePeaksX, maxPeakColX = getPeak(output_directory/"TopColDrift_X_txt.txt")
82
+ peaksY, timePeaksY, maxPeakColY = getPeak(output_directory/"TopColDrift_Y_txt.txt")
83
+ out = {"column": [bents.get(n, "NA") for n in nodes],
84
+ "peak_drf_X": (100*peaksX/np.append(heights,heights[maxPeakColX])).tolist(),
85
+ "peak_drf_Y": (100*peaksY/np.append(heights,heights[maxPeakColY])).tolist(),
86
+ "time_peak_X": timePeaksX.tolist(),
87
+ "time_peak_Y": timePeaksY.tolist(),
88
+ }
89
+
90
+ # BUILD SUMMARY
91
+ peaks = np.array([ out["peak_drf_X"], out["peak_drf_Y"] ])
92
+ maxPeaks = np.max(peaks, axis=1)
93
+ maxPeak = max(maxPeaks)
94
+ maxPeakdir = np.argmax(maxPeak)
95
+ maxPeakLoc = np.argmax(peaks[maxPeakdir])
96
+ col = out["column"][maxPeakLoc]
97
+ timesPeaks = np.array([ out["time_peak_X"], out["time_peak_Y"] ])
98
+ timeMaxPeak = timesPeaks[maxPeakdir][maxPeakLoc]
99
+ summary = {
100
+ "peak": str(maxPeak),
101
+ "units": '%',
102
+ "col": col,
103
+ "time": timeMaxPeak,
104
+ "metric_completion": 50
105
+ }
106
+ # BUILD DETAILS
107
+ details = [["column", *[k for k in out if k != "column"]]] + [
108
+ [c, *[out[k][i] for k in out if k != "column"]] for i,c in enumerate(out["column"])
109
+ ]
110
+ return {"summary": summary, "details": details}
111
+
112
+ def accel_response_history_plot(output_directory, config):
113
+ nodes = [node["node"] for node in config["bents"]]
114
+ RH = np.loadtxt(output_directory/"TopColAccel_Y_txt.txt")[:,nodes.index(403)]
115
+ dt = 0.01
116
+ window = husid(RH, False, dt, lb=0.005, ub=0.995)
117
+ RH = RH[window[0]:window[1]]
118
+ return {"accel_RH": RH}
119
+
120
+
121
+ def column_strain_state_metric(model, output_directory, config):
122
+ elems = [int(elem["key"]) for elem in config["columns"] if elem["strain"]]
123
+ columns = {item["key"]: item["label"] for item in config["columns"]}
124
+
125
+ output_directory = Path(output_directory)
126
+
127
+ strain_data = {
128
+ file.name: read_sect_xml(file) for file in output_directory.glob("eleDef*.txt")
129
+ }
130
+ DSbyEle = get_DS(output_directory, model, elems, strain_data)
131
+
132
+ keys = {
133
+ 0: "No damage",
134
+ 1: "Minor damage: flexural cracks",
135
+ 2: "Minor spalling",
136
+ 3: "Extensive cracks and spalling",
137
+ 4: "Visible reinforcing bars",
138
+ 5: "Core edge failure",
139
+ 6: "Bar fracture"
140
+ }
141
+
142
+ out = {
143
+ "col_ids": list(DSbyEle.keys()),
144
+ "column": np.array([columns[int(elem)] for elem in DSbyEle]),
145
+ "ds": np.array([elem["state"] for elem in DSbyEle.values()]),
146
+ "damage_state": np.array([keys[elem["state"]] for elem in DSbyEle.values()]),
147
+ "time_of_ds": np.array([elem["time"] for elem in DSbyEle.values()])
148
+ }
149
+
150
+ # BUILD SUMMARY
151
+ colsMaxDS = list(np.array(out["column"])[out["ds"]==np.max(out["ds"])])
152
+ MaxDS = list(np.array(out["damage_state"])[out["ds"]==np.max(out["ds"])])[0]
153
+ DStimes = np.array(out["time_of_ds"])[out["ds"] == np.max(out["ds"])]
154
+ DStimes = [t for t in DStimes if t>0.0]
155
+ summary = {
156
+ "max_ds": "DS"+str(max(out["ds"])),
157
+ "col": colsMaxDS,
158
+ "col_ids": list(np.array(out["col_ids"])[out["ds"]==np.max(out["ds"])]),
159
+ "no_col": len(colsMaxDS),
160
+ "ds_descr": MaxDS,
161
+ "time": min(DStimes, default=0.0),
162
+ "metric_completion": 15
163
+ }
164
+ # BUILD DETAILS
165
+ details = [["column", "damage_state", "time_of_ds"]] + [
166
+ [c, *[out[k][i] for k in ["damage_state", "time_of_ds"]]] for i,c in enumerate(out["column"])
167
+ ]
168
+ return {"summary": summary, "details": details}
File without changes
@@ -0,0 +1,39 @@
1
+ from pathlib import Path
2
+
3
+ def load(name):
4
+ import json
5
+ with open(Path(__file__).parents[0]/name, "r") as f:
6
+ return json.load(f)
7
+
8
+
9
+ from jsonschema import Draft202012Validator, validators
10
+
11
+
12
+ def _extend_with_default(validator_class):
13
+ validate_properties = validator_class.VALIDATORS["properties"]
14
+
15
+ def set_defaults(validator, properties, instance, schema):
16
+ for property, subschema in properties.items():
17
+ if "default" in subschema:
18
+ instance.setdefault(property, subschema["default"])
19
+
20
+ for error in validate_properties(
21
+ validator, properties, instance, schema,
22
+ ):
23
+ yield error
24
+
25
+ return validators.extend(
26
+ validator_class, {"properties" : set_defaults},
27
+ )
28
+
29
+
30
+ DefaultValidatingValidator = _extend_with_default(Draft202012Validator)
31
+
32
+ def default(schema):
33
+ obj = {}
34
+ DefaultValidatingValidator(load(schema)).validate(obj)
35
+ return obj
36
+
37
+ if __name__ == "__main__":
38
+ obj = {}
39
+ print(default("hwd_conf.schema.json"))