irie 0.0.5__py3-none-any.whl → 0.0.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of irie might be problematic. Click here for more details.

Files changed (61) hide show
  1. irie/apps/config.py +0 -1
  2. irie/apps/evaluation/identification.py +1 -1
  3. irie/apps/evaluation/models.py +3 -3
  4. irie/apps/evaluation/views.py +3 -3
  5. irie/apps/events/admin.py +2 -2
  6. irie/apps/events/migrations/0002_rename_event_eventrecord.py +19 -0
  7. irie/apps/events/migrations/0003_hazardevent.py +21 -0
  8. irie/apps/events/models.py +55 -5
  9. irie/apps/events/views.py +48 -3
  10. irie/apps/events/views_events.py +6 -10
  11. irie/apps/inventory/filters.py +37 -0
  12. irie/apps/inventory/models.py +7 -0
  13. irie/apps/inventory/urls.py +1 -0
  14. irie/apps/inventory/views.py +134 -227
  15. irie/apps/prediction/forms.py +4 -8
  16. irie/apps/prediction/metrics.py +0 -2
  17. irie/apps/prediction/migrations/0002_alter_predictormodel_protocol.py +18 -0
  18. irie/apps/prediction/models.py +4 -4
  19. irie/apps/prediction/predictor.py +18 -12
  20. irie/apps/prediction/runners/__init__.py +3 -398
  21. irie/apps/prediction/runners/hazus.py +579 -0
  22. irie/apps/prediction/runners/opensees/__init__.py +395 -0
  23. irie/apps/prediction/runners/{utilities.py → opensees/utilities.py} +7 -7
  24. irie/apps/prediction/runners/ssid.py +414 -0
  25. irie/apps/prediction/urls.py +1 -1
  26. irie/apps/prediction/views.py +45 -22
  27. irie/apps/site/view_sdof.py +2 -2
  28. irie/apps/templates/admin/base_site.html +3 -1
  29. irie/apps/templates/css/admin-extra.css +7 -0
  30. irie/apps/templates/includes/sidebar.html +17 -14
  31. irie/apps/templates/inventory/asset-event-summary.html +3 -2
  32. irie/apps/templates/inventory/asset-profile.html +126 -38
  33. irie/apps/templates/inventory/asset-table.html +191 -135
  34. irie/apps/templates/inventory/dashboard.html +105 -27
  35. irie/apps/templates/inventory/preamble.tex +131 -0
  36. irie/apps/templates/inventory/report.tex +59 -0
  37. irie/apps/templates/networks/corridor_table.html +2 -2
  38. irie/apps/templates/networks/networks.html +164 -0
  39. irie/apps/templates/prediction/asset-predictors.html +6 -6
  40. irie/apps/templates/prediction/form-submission.html +3 -3
  41. irie/apps/templates/prediction/hazus/event.html +33 -0
  42. irie/apps/templates/prediction/hazus/history.html +1 -0
  43. irie/apps/templates/prediction/hazus/history.js +44 -0
  44. irie/apps/templates/prediction/{new-predictor.html → new-runner.html} +12 -8
  45. irie/apps/templates/site/index.html +29 -47
  46. irie/core/urls.py +7 -2
  47. irie/init/__main__.py +2 -0
  48. irie/init/bridges.py +5 -3
  49. irie/init/management/commands/init_assets.py +24 -45
  50. irie/init/management/commands/init_corridors.py +3 -6
  51. irie/init/management/commands/init_predictors.py +23 -8
  52. irie/post/__main__.py +88 -0
  53. {irie-0.0.5.dist-info → irie-0.0.6.dist-info}/METADATA +5 -3
  54. {irie-0.0.5.dist-info → irie-0.0.6.dist-info}/RECORD +61 -47
  55. /irie/apps/prediction/runners/{metrics.py → opensees/metrics.py} +0 -0
  56. /irie/apps/prediction/runners/{xmlutils.py → opensees/xmlutils.py} +0 -0
  57. /irie/apps/prediction/runners/{zipped.py → opensees/zipped.py} +0 -0
  58. /irie/init/data/{04.tar → nbi/04.tar} +0 -0
  59. {irie-0.0.5.dist-info → irie-0.0.6.dist-info}/WHEEL +0 -0
  60. {irie-0.0.5.dist-info → irie-0.0.6.dist-info}/entry_points.txt +0 -0
  61. {irie-0.0.5.dist-info → irie-0.0.6.dist-info}/top_level.txt +0 -0
@@ -1,55 +1,10 @@
1
- #===----------------------------------------------------------------------===#
2
- #
3
- # STAIRLab -- STructural Artificial Intelligence Laboratory
4
- #
5
- #===----------------------------------------------------------------------===#
6
- #
7
- import os.path
8
- import shutil
9
- from typing import NewType
10
- import sys, json
11
- import zipfile
12
1
  from pathlib import Path
2
+ from typing import NewType
13
3
  from abc import abstractmethod
14
- import contextlib
15
-
16
- class Event: pass
17
-
18
4
  RunID = NewType("RunID", int)
19
5
 
20
6
  MetricType = NewType("MetricType", str)
21
7
 
22
- from .utilities import read_model
23
- from .metrics import (
24
- accel_response_history_plot,
25
- column_strain_state_metric,
26
- peak_acceleration_metric,
27
- peak_drift_metric
28
- )
29
-
30
- OPENSEES = [
31
- sys.executable, "-m", "opensees",
32
- ]
33
-
34
-
35
- @contextlib.contextmanager
36
- def new_cd(x):
37
- d = os.getcwd()
38
-
39
- # This could raise an exception, but it's probably
40
- # best to let it propagate and let the caller
41
- # deal with it, since they requested x
42
- os.chdir(x)
43
-
44
- try:
45
- yield
46
-
47
- finally:
48
- # This could also raise an exception, but you *really*
49
- # aren't equipped to figure out what went wrong if the
50
- # old working directory can't be restored.
51
- os.chdir(d)
52
-
53
8
  class classproperty(property):
54
9
  def __get__(self, cls, owner):
55
10
  return classmethod(self.fget).__get__(None, owner)()
@@ -78,8 +33,8 @@ class Runner:
78
33
  self.active = conf.active
79
34
  try:
80
35
  self.model_file = Path(conf.config_file.path).resolve()
81
- except ValueError:
82
- pass
36
+ except ValueError as e:
37
+ print(e)
83
38
 
84
39
  self.out_dir = Path(__file__).parents[0]/"Predictions"
85
40
  self.runs = {}
@@ -98,353 +53,3 @@ class Runner:
98
53
 
99
54
  @abstractmethod
100
55
  def getMetricData(self, run: RunID, metric: MetricType)->dict: ...
101
-
102
-
103
- class OpenSeesRunner(Runner):
104
- @property
105
- def platform(self):
106
- return self.conf.get("platform", "")
107
-
108
-
109
- @classmethod
110
- def create(cls, asset, request, form):
111
- predictor = form.save(commit=False)
112
-
113
- predictor.entry_point = [
114
- sys.executable, "-m", "opensees"
115
- ]
116
- predictor.config = {}
117
- predictor.protocol = "BRACE2_CLI_PREDICTOR_T4"
118
- predictor.active = True
119
- # predictor.metrics = self.getMetricList()
120
- return predictor
121
-
122
-
123
- @classproperty
124
- def schema(cls):
125
- from .opensees import schemas
126
- return {
127
- "title": "Structural Model",
128
- "options": {"disable_collaps": True},
129
- "schema": "http://json-schema.org/draft-04/schema#",
130
- "type": "object",
131
- "properties": {
132
- "platform": {
133
- "type": "string",
134
- "title": "Platform",
135
- "enum": ["OpenSees","CSiBridge"]
136
- },
137
- "model": schemas.load("hwd_conf.schema.json"),
138
- "analysis": schemas.load("hwd_analysis.schema.json"),
139
- }
140
- }
141
-
142
- def getMetricList(self):
143
- return [
144
- "COLUMN_STRAIN_STATES",
145
- "PEAK_ACCEL",
146
- "PEAK_DRIFT",
147
- # "ACC_RESPONSE_HISTORY",
148
- ]
149
-
150
- def newPrediction(self, event, output_directory = None):
151
- """
152
- Create a new prediction run and return the run_id. If output_directory is None,
153
- the output directory will be created automatically. Otherwise, the output directory
154
- will be copied to the new output directory.
155
- """
156
- event = event.event_file.path
157
- if output_directory is not None:
158
- # this case will eventually be deleted, its just for
159
- # debugging metric renderers.
160
- run_id = "0"
161
- self.runs[run_id] = {
162
- "run_output_directory": Path(output_directory)
163
- }
164
-
165
- else:
166
- # Calculate next output directory and
167
- # create directory if it doesn't exist
168
- out_dir = self.out_dir
169
- if not out_dir.is_dir():
170
- (out_dir/"0").mkdir(parents=True)
171
-
172
- latestDir = list(sorted((f for f in out_dir.iterdir() if f.is_dir()), key=lambda m: int(m.name)))[-1]
173
- run_id = int(latestDir.name)+1
174
- run_dir = out_dir/str(run_id)
175
- run_dir = run_dir.resolve()
176
- run_dir.mkdir(parents=True, exist_ok=False)
177
-
178
- # Copy files to run directory
179
- shutil.copyfile(event, run_dir/"event.zip")
180
- shutil.copyfile(self.model_file.resolve(), run_dir/self.model_file.name)
181
-
182
- if self.model_file.suffix == ".zip":
183
- with zipfile.ZipFile(self.model_file, 'r') as zip_ref:
184
- zip_ref.extractall(run_dir)
185
- model_file = (run_dir/"nonlinear.tcl").resolve()
186
-
187
- elif self.model_file.suffix == ".b2k":
188
- pass
189
-
190
- elif self.model_file.suffix == ".tcl":
191
- model_file = (run_dir/self.model_file.name).resolve()
192
-
193
- self.runs[run_id] = {
194
- "run_output_directory": run_dir,
195
- "event_file_name": Path(event),
196
- "model_file": model_file,
197
- **self.conf
198
- }
199
-
200
- with open(out_dir/str(run_id)/"conf.json", "w") as f:
201
- json.dump({k: str(v) for k,v in self.runs[run_id].items()}, f)
202
-
203
- return run_id
204
-
205
-
206
- def _load_config(self, run_id):
207
- run_dir = self.out_dir/str(run_id)
208
- with open(run_dir/"conf.json","r") as f:
209
- self.runs[run_id] = json.load(f)
210
-
211
- self.model_file = Path(self.runs[run_id]["model_file"])
212
-
213
-
214
- def runPrediction(self, run_id, scale: float = None):
215
- if run_id not in self.runs:
216
- self._load_config(run_id)
217
-
218
- event_file_path = os.path.relpath(self.runs[run_id]["event_file_name"],
219
- self.model_file.parents[0])
220
- output_directory = os.path.relpath(self.runs[run_id]["run_output_directory"],
221
- self.model_file.parents[0])
222
-
223
- event_file_path = self.runs[run_id]["event_file_name"]
224
-
225
- # Create model
226
- import opensees.openseespy as ops
227
-
228
- import sys
229
- model = ops.Model(echo_file=sys.stdout)
230
- model.eval("set argv {}")
231
- with new_cd(self.runs[run_id]["run_output_directory"]):
232
- model.eval(f"source {self.runs[run_id]['model_file']}")
233
-
234
- model.eval(f"print -json -file modelDetails.json")
235
-
236
- model.eval(f"set python {sys.executable}")
237
-
238
- model.eval(r"""
239
- proc py {args} {
240
- global python
241
- eval "[exec {*}$python {*}$args]"
242
- }
243
-
244
- proc pt {args} {
245
- global python
246
- puts "[exec {*}$python {*}$args]"
247
- }
248
-
249
- proc write_modes {mode_file nmodes} {
250
- set fid_modes [open $mode_file w+]
251
- for {set m 1} {$m <= $nmodes} {incr m} {
252
- puts $fid_modes "$m:"
253
- foreach n [getNodeTags] {
254
- puts $fid_modes " $n: \[[join [nodeEigenvector $n $m] {, }]\]";
255
- }
256
- }
257
- close $fid_modes
258
- }
259
- proc write_displacements {file_name {resp Disp}} {
260
- set fid [open "$file_name" "w+"]
261
- puts $fid "[getTime]:"
262
- foreach n [getNodeTags] {
263
- puts $fid " $n: \[[join [node${resp} $n] {, }]\]";
264
- }
265
- close $fid;
266
- }
267
- """)
268
-
269
- #
270
- # Run gravity analysis
271
- #
272
- model.eval("""
273
- wipeAnalysis
274
- test NormDispIncr 1.0e-8 10 0;
275
- algorithm Newton;
276
- integrator LoadControl 0.1;
277
- numberer Plain;
278
- constraints Transformation;
279
- system SparseGeneral;
280
- analysis Static;
281
- analyze 10;
282
- # write_displacements "dispsGrav.yaml"
283
- """)
284
-
285
- #
286
- # DAMPING
287
- #
288
- model.eval(r"""
289
- set nmodes 8; # Number of modes to analyze for modal analysis
290
-
291
- # set wb [eigen -fullGenLapack $nmodes];
292
- # puts "\tFundamental-Period After Gravity Analysis:"
293
- # for {set iPd 1} {$iPd <= $nmodes} {incr iPd 1} {
294
- # set wwb [lindex $wb $iPd-1];
295
- # set Tb [expr 2*$pi/sqrt($wwb)];
296
- # puts "\tPeriod$iPd= $Tb"
297
- # }
298
- # write_modes $output_directory/modesPostG.yaml $nmodes
299
- # remove recorders
300
-
301
- set nmodes [tcl::mathfunc::max {*}$damping_modes $nmodes]
302
- set lambdaN [eigen -fullGenLapack $nmodes];
303
-
304
- # set lambdaN [eigen $nmodes];
305
- if {$damping_type == "rayleigh"} {
306
- set nEigenI [lindex $damping_modes 0]; # first rayleigh damping mode
307
- set nEigenJ [lindex $damping_modes 1]; # second rayleigh damping mode
308
- set iDamp [lindex $damping_ratios 0]; # first rayleigh damping ratio
309
- set jDamp [lindex $damping_ratios 1]; # second rayleigh damping ratio
310
- set lambdaI [lindex $lambdaN [expr $nEigenI-1]];
311
- set lambdaJ [lindex $lambdaN [expr $nEigenJ-1]];
312
- set omegaI [expr $lambdaI**0.5];
313
- set omegaJ [expr $lambdaJ**0.5];
314
- set TI [expr 2.0*$pi/$omegaI];
315
- set TJ [expr 2.0*$pi/$omegaJ];
316
- set alpha0 [expr 2.0*($iDamp/$omegaI-$jDamp/$omegaJ)/(1/$omegaI**2-1/$omegaJ**2)];
317
- set alpha1 [expr 2.0*$iDamp/$omegaI-$alpha0/$omegaI**2];
318
- puts "\tRayleigh damping parameters:"
319
- puts "\tmodes: $nEigenI, $nEigenJ ; ratios: $iDamp, $jDamp"
320
- puts "\tTI = $TI; TJ = $TJ"
321
- puts "\tlambdaI = $lambdaI; lambdaJ = $lambdaJ"
322
- puts "\tomegaI = $omegaI; omegaJ = $omegaJ"
323
- puts "\talpha0 = $alpha0; alpha1 = $alpha1"
324
- rayleigh $alpha0 0.0 0.0 $alpha1;
325
-
326
- } elseif {$damping_type == "modal"} {
327
- # needs a bit of edit. currently assuming that the ratios are applied in order at the first modes. but should be applied at the specified damping_modes modes.
328
- set nratios [llength $damping_ratios]
329
- puts "\tModal damping parameters:"
330
- puts "\tratios of $damping_ratios at the first $nratios modes"
331
- for {set i 1} {$i <= [expr $nmodes - $nratios]} {incr i} {
332
- lappend damping_ratios 0
333
- }
334
- modalDamping {*}$damping_ratios
335
- }
336
- """)
337
-
338
-
339
- #
340
- # DYNAMIC RECORDERS
341
- #
342
-
343
- ## COLUMN SECTION DEFORMATIONS AT TOP AND BOTTOM FOR STRAIN-BASED DAMAGE STATES
344
- column_strains = tuple(k["key"] for k in CONFIG["columns"] if k["strain"])
345
- if len(column_strains) > 0:
346
- model.recorder("Element", "section", 1, "deformation", xml="eleDef1.txt", ele=column_strains) # section 1 deformation]
347
- model.recorder("Element", "section", 4, "deformation", xml="eleDef4.txt", ele=column_strains) # section 4 deformation]
348
-
349
-
350
-
351
- #
352
- # Run dynamic analysis
353
- #
354
- model.eval(f"""
355
- wipeAnalysis
356
- # Uniform Support Excitation
357
- # lassign [pt -m CE58658.makePattern {event_file_path} --scale $dynamic_scale_factor --node $input_location] dt steps
358
- # lassign [py -m CE58658.makePattern {event_file_path} --scale $dynamic_scale_factor --node $input_location] dt steps
359
- set dt 0.1
360
- set steps 3
361
- """)
362
-
363
- # RESPONSE HISTORY RECORDERS
364
-
365
- model.recorder("Node", "accel", xml="model/AA_all.txt", timeSeries=(1, 2), dof=(1, 2))
366
- model.recorder("Node", "accel", xml="model/RD_all.txt", dof=(1, 2))
367
-
368
- column_nodes = tuple(k["node"] for k in CONFIG["bents"] if k["record"])
369
- model.recorder("Node", "accel", file="TopColAccel_X_txt.txt", timeSeries=1 , node=column_nodes, dof=1)
370
- model.recorder("Node", "accel", file="TopColAccel_Y_txt.txt", timeSeries=2 , node=column_nodes, dof=2)
371
- model.recorder("Node", "disp", file="TopColDrift_X_txt.txt", node=column_nodes, dof=1)
372
- model.recorder("Node", "disp", file="TopColDrift_Y_txt.txt", node=column_nodes, dof=2)
373
-
374
- model.eval("""
375
- set dtfact 1;
376
- set Tol 1.0e-8;
377
- set maxNumIter 100;
378
- set printFlag 0;
379
- set TestType EnergyIncr;
380
- set NewmarkGamma 0.50;
381
- set NewmarkBeta 0.25;
382
- constraints Transformation;
383
- numberer RCM;
384
- test $TestType $Tol $maxNumIter $printFlag;
385
- set algorithmType "Newton";
386
- system BandSPD;
387
- integrator Newmark $NewmarkGamma $NewmarkBeta;
388
-
389
- algorithm {*}$algorithmType;
390
- analysis Transient;
391
-
392
- set DtAnalysis $dt;
393
- set TmaxAnalysis [expr $dt*$steps];
394
- set Nsteps $steps;
395
- if {$dynamic_truncated != 0} {
396
- set Nsteps $dynamic_timesteps;
397
- }
398
- puts "\tGround Motion: dt= $DtAnalysis, NumPts= $Nsteps, TmaxAnalysis= $TmaxAnalysis";
399
-
400
- puts "\tRunning dynamic ground motion analysis..."
401
- set t3 [clock clicks -millisec];
402
- catch {progress create $Nsteps} _
403
-
404
- analyze 2 $DtAnalysis;
405
-
406
- # for {set ik 1} {$ik <= $Nsteps} {incr ik 1} {
407
- # catch {progress update} _
408
- # set ok [analyze 1 $DtAnalysis];
409
- # }
410
- """)
411
-
412
- model.wipe()
413
-
414
-
415
- def getMetricData(self, run_id:int, type:str)->dict:
416
- import orjson
417
- def _clean_json(d):
418
- return orjson.loads(orjson.dumps(d,option=orjson.OPT_SERIALIZE_NUMPY))
419
-
420
- if run_id not in self.runs:
421
- self._load_config(run_id)
422
-
423
- run_data = self.runs.get(run_id, None)
424
- config = run_data
425
-
426
- if run_data is not None:
427
- output_dir = Path(run_data["run_output_directory"])
428
- else:
429
- output_dir = self.out_dir/str(run_id)
430
-
431
- # with open(output_dir/"modelDetails.json", "r") as f:
432
- # model = json.load(f)
433
-
434
- model = read_model(output_dir/"modelDetails.json")
435
-
436
- if type == "COLUMN_STRAIN_STATES":
437
- return _clean_json(column_strain_state_metric(model, output_dir, config))
438
-
439
- elif type == "PEAK_ACCEL":
440
- return _clean_json(peak_acceleration_metric(output_dir, config))
441
-
442
- elif type == "PEAK_DRIFT":
443
- return _clean_json(peak_drift_metric(model, output_dir, config))
444
-
445
- elif type == "ACC_RESPONSE_HISTORY":
446
- # config = CONFIG
447
- # return accel_response_history_plot(output_dir, config)
448
- return {}
449
- return {}
450
-