NREL-erad 0.0.0a0__py3-none-any.whl → 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. erad/__init__.py +1 -0
  2. erad/constants.py +59 -11
  3. erad/default_fragility_curves/__init__.py +15 -0
  4. erad/default_fragility_curves/default_fire_boundary_dist.py +94 -0
  5. erad/default_fragility_curves/default_flood_depth.py +108 -0
  6. erad/default_fragility_curves/default_flood_velocity.py +101 -0
  7. erad/default_fragility_curves/default_fragility_curves.py +23 -0
  8. erad/default_fragility_curves/default_peak_ground_acceleration.py +163 -0
  9. erad/default_fragility_curves/default_peak_ground_velocity.py +94 -0
  10. erad/default_fragility_curves/default_wind_speed.py +94 -0
  11. erad/enums.py +40 -0
  12. erad/gdm_mapping.py +83 -0
  13. erad/models/__init__.py +1 -0
  14. erad/models/asset.py +287 -0
  15. erad/models/asset_mapping.py +20 -0
  16. erad/models/fragility_curve.py +116 -0
  17. erad/models/hazard/__init__.py +5 -0
  18. erad/models/hazard/base_models.py +12 -0
  19. erad/models/hazard/common.py +26 -0
  20. erad/models/hazard/earthquake.py +93 -0
  21. erad/models/hazard/flood.py +83 -0
  22. erad/models/hazard/wild_fire.py +121 -0
  23. erad/models/hazard/wind.py +143 -0
  24. erad/models/probability.py +73 -0
  25. erad/probability_builder.py +35 -0
  26. erad/quantities.py +25 -0
  27. erad/runner.py +122 -0
  28. erad/systems/__init__.py +2 -0
  29. erad/systems/asset_system.py +414 -0
  30. erad/systems/hazard_system.py +122 -0
  31. nrel_erad-0.1.0.dist-info/METADATA +55 -0
  32. nrel_erad-0.1.0.dist-info/RECORD +35 -0
  33. {NREL_erad-0.0.0a0.dist-info → nrel_erad-0.1.0.dist-info}/WHEEL +1 -1
  34. NREL_erad-0.0.0a0.dist-info/METADATA +0 -61
  35. NREL_erad-0.0.0a0.dist-info/RECORD +0 -42
  36. erad/cypher_queries/load_data_v1.cypher +0 -212
  37. erad/data/World_Earthquakes_1960_2016.csv +0 -23410
  38. erad/db/__init__.py +0 -0
  39. erad/db/assets/__init__.py +0 -0
  40. erad/db/assets/critical_infras.py +0 -171
  41. erad/db/assets/distribution_lines.py +0 -101
  42. erad/db/credential_model.py +0 -20
  43. erad/db/disaster_input_model.py +0 -23
  44. erad/db/inject_earthquake.py +0 -52
  45. erad/db/inject_flooding.py +0 -53
  46. erad/db/neo4j_.py +0 -162
  47. erad/db/utils.py +0 -14
  48. erad/exceptions.py +0 -68
  49. erad/metrics/__init__.py +0 -0
  50. erad/metrics/check_microgrid.py +0 -208
  51. erad/metrics/metric.py +0 -178
  52. erad/programs/__init__.py +0 -0
  53. erad/programs/backup.py +0 -62
  54. erad/programs/microgrid.py +0 -45
  55. erad/scenarios/__init__.py +0 -0
  56. erad/scenarios/abstract_scenario.py +0 -103
  57. erad/scenarios/common.py +0 -93
  58. erad/scenarios/earthquake_scenario.py +0 -161
  59. erad/scenarios/fire_scenario.py +0 -160
  60. erad/scenarios/flood_scenario.py +0 -494
  61. erad/scenarios/utilities.py +0 -76
  62. erad/scenarios/wind_scenario.py +0 -89
  63. erad/utils/__init__.py +0 -0
  64. erad/utils/ditto_utils.py +0 -252
  65. erad/utils/hifld_utils.py +0 -147
  66. erad/utils/opendss_utils.py +0 -357
  67. erad/utils/overpass.py +0 -76
  68. erad/utils/util.py +0 -178
  69. erad/visualization/__init__.py +0 -0
  70. erad/visualization/plot_graph.py +0 -218
  71. {NREL_erad-0.0.0a0.dist-info → nrel_erad-0.1.0.dist-info/licenses}/LICENSE.txt +0 -0
  72. {NREL_erad-0.0.0a0.dist-info → nrel_erad-0.1.0.dist-info}/top_level.txt +0 -0
@@ -1,208 +0,0 @@
1
- """ This module contains functions and utilities to check for the
2
- possibility of microgrid formation.
3
- """
4
-
5
- from typing import List, Dict
6
- import math
7
- import json
8
-
9
- from neo4j import GraphDatabase
10
- import networkx as nx
11
- import matplotlib.pyplot as plt
12
-
13
-
14
- def create_directed_graph(
15
- driver: GraphDatabase.driver,
16
- ):
17
- """Creates a directed graph representation of the power network.
18
-
19
- For now we have read all the relationships and nodes. We will need to
20
- filter this by feeder to avoid running into memory issues in future for
21
- larger graph.
22
-
23
- Args:
24
- driver (GraphDatabase.driver): Instance of `GraphDatabase.driver`
25
- instance
26
- """
27
-
28
- # Get the buses, customers, pvs, energystorage and line sections
29
- power_network_query = """
30
- MATCH (sourceNode:Bus)-[relationship:CONNECTS_TO]-(targetNode:Bus)
31
- return relationship{.*} , sourceNode {.*}, targetNode{.*}
32
- """
33
-
34
- # Gettings relations between customers and buses
35
- customer_bus_network_query = """
36
- MATCH (sourceNode:Bus)-[relationship:CONSUMES_POWER_FROM]-(targetNode:Load)
37
- return relationship{.*} , sourceNode {.*}, targetNode{.*}
38
- """
39
-
40
- # Gettings relations between critical infrastructures and buses
41
- critical_infra_bus_network_query = """
42
- MATCH (sourceNode:Bus)-[relationship:GETS_POWER_FROM]-(targetNode)
43
- return relationship{.*} , sourceNode {.*}, targetNode{.*}
44
- """
45
-
46
- # Gettting relations between PVs and buses
47
- pv_bus_network_query = """
48
- MATCH (sourceNode:Bus)-[relationship:INJECTS_ACTIVE_POWER_TO]-(targetNode:Solar)
49
- return relationship{.*} , sourceNode {.*}, targetNode{.*}
50
- """
51
-
52
- # Getting relations between energy storage and buses
53
- es_bus_network_query = """
54
- MATCH (sourceNode:Bus)-[relationship:INJECTS_POWER]-(targetNode:EnergyStorage)
55
- return relationship{.*} , sourceNode {.*}, targetNode{.*}
56
- """
57
-
58
- relations = []
59
- for query in [
60
- power_network_query,
61
- customer_bus_network_query,
62
- pv_bus_network_query,
63
- es_bus_network_query,
64
- critical_infra_bus_network_query
65
- ]:
66
-
67
- with driver.session() as session:
68
- result = session.read_transaction(lambda tx: tx.run(query).data())
69
- relations.extend(result)
70
-
71
- graph = nx.Graph()
72
- for rel in relations:
73
-
74
- # Unpack the relationship data
75
- relationship = rel["relationship"]
76
- source_node = rel["sourceNode"]
77
- target_node = rel["targetNode"]
78
-
79
- # Add nodes if not already present in the graph
80
- for node in [source_node, target_node]:
81
- if not graph.has_node(node["name"]):
82
- graph.add_node(node["name"], **node)
83
-
84
- # Add relationship
85
- graph.add_edge(source_node["name"], target_node["name"], **relationship)
86
-
87
- return graph.to_directed()
88
-
89
- def node_connected_to_substation(
90
- substation_nodes: List[str],
91
- driver: GraphDatabase.driver
92
- ):
93
- """ Gives list of nodes still connected to substation. """
94
- directed_graph = create_directed_graph(driver)
95
- edges_to_be_removed = []
96
-
97
- for edge in directed_graph.edges():
98
- edge_data = directed_graph.get_edge_data(*edge)
99
- if "survive" in edge_data and int(edge_data["survive"]) == 0:
100
- edges_to_be_removed.append(edge)
101
-
102
- if edges_to_be_removed:
103
- directed_graph.remove_edges_from(edges_to_be_removed)
104
- wcc = nx.weakly_connected_components(directed_graph)
105
-
106
- for _, weak_component in enumerate(wcc):
107
- wcc_graph = directed_graph.subgraph(weak_component)
108
- nodes = wcc_graph.nodes()
109
- for sub_node in substation_nodes:
110
- if sub_node in nodes:
111
- return nodes
112
- else:
113
- nodes = []
114
- for edge in directed_graph.edges():
115
- nodes.extend(edge)
116
- return nodes
117
- return []
118
-
119
-
120
- def check_for_microgrid(driver: GraphDatabase.driver, output_json_path: str):
121
- """Checks for possibility of microgrid in each subgraph.
122
-
123
- Args:
124
- driver (GraphDatabase.driver): Instance of `GraphDatabase.driver`
125
- instance
126
- output_json_path (str): JSON file path for exporting the metric.
127
- """
128
-
129
- directed_graph = create_directed_graph(driver)
130
- node_data = {item[0]: item[1] for item in directed_graph.nodes(data=True)}
131
-
132
- edges_to_be_removed = []
133
- subgraphs = {}
134
-
135
- for edge in directed_graph.edges():
136
- edge_data = directed_graph.get_edge_data(*edge)
137
- if "survive" in edge_data and int(edge_data["survive"]) == 0:
138
- edges_to_be_removed.append(edge)
139
-
140
- if edges_to_be_removed:
141
- directed_graph.remove_edges_from(edges_to_be_removed)
142
- wcc = nx.weakly_connected_components(directed_graph)
143
-
144
- for id, weak_component in enumerate(wcc):
145
-
146
- # Let's create a networkx representation to perform
147
- # multiple source multiple sink max flow problem
148
- # https://faculty.math.illinois.edu/~mlavrov/docs/482-fall-2019/lecture27.pdf
149
- source_capacity, sink_capacity = 0, 0
150
- wcc_graph = directed_graph.subgraph(weak_component)
151
- wcc_graph = nx.DiGraph(wcc_graph)
152
-
153
- for new_node in ["infinity_source", "infinity_sink"]:
154
- if not wcc_graph.has_node(new_node):
155
- wcc_graph.add_node(new_node)
156
-
157
- sinks, sources = [], []
158
- for node in wcc_graph.nodes():
159
- # Connect all loads to infinity sink
160
-
161
- if "pv" in node or "es_" in node or node_data.get(node, {}).get('backup', None) == 1:
162
- wcc_graph.add_edge(node, "infinity_source", capacity=1e9)
163
- wcc_graph.add_edge("infinity_source", node, capacity=1e9)
164
- sources.append(node)
165
-
166
- cap_ = None
167
- if 'kw' in node_data[node]:
168
- cap_ = node_data[node]["kw"]
169
- elif 'capacity' in node_data[node]:
170
- cap_ = node_data[node]["capacity"]
171
- elif 'backup_capacity_kw' in node_data[node]:
172
- cap_ = node_data[node]["backup_capacity_kw"]
173
- else:
174
- raise Exception('Not a valid source!')
175
- source_capacity += cap_
176
-
177
- elif "load" in node or node_data.get(node, {}).get('survive', None) is not None :
178
- wcc_graph.add_edge(node, "infinity_sink", capacity=1e9)
179
- wcc_graph.add_edge("infinity_sink", node, capacity=1e9)
180
- sinks.append(node)
181
- sink_capacity += math.sqrt(
182
- node_data[node].get('kW', 0) ** 2
183
- + node_data[node].get('kvar', 0) ** 2
184
- ) * float(node_data[node].get("critical_load_factor", 0))
185
-
186
-
187
- # if id == 2:
188
- # breakpoint()
189
- flow_value, _ = nx.maximum_flow(
190
- wcc_graph, "infinity_source", "infinity_sink", capacity="kva"
191
- )
192
-
193
- subgraphs[f"weak_component_{id}"] = {
194
- "length": len(weak_component),
195
- "max_flow": flow_value,
196
- "sources": sources,
197
- "sinks": sinks,
198
- "source_capacity": source_capacity,
199
- "sink_capacity": sink_capacity,
200
- }
201
-
202
-
203
-
204
- if output_json_path:
205
- with open(output_json_path, "w") as fpointer:
206
- json.dump(subgraphs, fpointer)
207
-
208
- return subgraphs
erad/metrics/metric.py DELETED
@@ -1,178 +0,0 @@
1
- """ Module for handling computation of equity and resilience metrics.
2
-
3
- Idea is to create separate function for computing each metric.
4
- """
5
-
6
- from pathlib import Path
7
- from typing import Callable, Union, Dict, List
8
- import json
9
-
10
- from neo4j import GraphDatabase
11
- import pandas as pd
12
- import numpy as np
13
-
14
- from erad.utils import util
15
- from erad import exceptions
16
-
17
-
18
- def validate_export_path(file_path: Union[str, Path], file_type: str):
19
- """Function for validating the export file path.
20
-
21
- Args:
22
- file_path (Union[str, Path]): Export file path
23
- file_type (str): File type to be exported e.g. .csv
24
- """
25
-
26
- output_path = Path(file_path)
27
- util.path_validation(output_path.parent)
28
- if output_path.suffix != file_type:
29
- raise exceptions.InvalidFileTypePassed(output_path, file_type)
30
-
31
-
32
- def is_customer_getting_power(
33
- driver: GraphDatabase.driver, output_csv_path: str,
34
- load_list: List[str] = None
35
- ):
36
-
37
- """Function for checking whether customer is still connected
38
- to substation or not.
39
-
40
- Args:
41
- driver (GraphDatabase.driver): Instance of `GraphDatabase.driver`
42
- instance
43
- output_csv_path (str): CSV file path for exporting the metric.
44
- """
45
- if not load_list:
46
- load_list = []
47
-
48
- validate_export_path(output_csv_path, ".csv")
49
- cypher_query = """
50
- MATCH (b:Bus)
51
- MATCH (s:Substation)
52
- WHERE b <> s
53
- WITH b, shortestPath((b)-[:CONNECTS_TO*]-(s)) as p
54
- MATCH (c:Load)-[:CONSUMES_POWER_FROM]-(b)
55
- WITH c, p, apoc.coll.min([r in relationships(p) | r.survive]) AS max_p
56
- RETURN c.name, max_p
57
- """
58
- metric_container = {"load_name": [], "metric": []}
59
- with driver.session() as session:
60
- result = session.read_transaction(
61
- lambda tx: tx.run(cypher_query).data()
62
- )
63
-
64
- for item in result:
65
- metric_container["load_name"].append(item["c.name"])
66
- metric_container["metric"].append(item["max_p"] \
67
- if item["c.name"] not in load_list else 1)
68
-
69
- df = pd.DataFrame(metric_container)
70
- df.to_csv(output_csv_path)
71
-
72
-
73
- def energy_resilience_by_customer(
74
- driver: GraphDatabase.driver, output_csv_path: str,
75
- critical_infras: List = ["Grocery", "Hospital", "Pharmacy"]
76
- ):
77
- """Function for checking whether customer is still connected
78
- to substation or not.
79
-
80
- Args:
81
- driver (GraphDatabase.driver): Instance of `GraphDatabase.driver`
82
- instance
83
- output_csv_path (str): CSV file path for exporting the metric.
84
- """
85
-
86
- validate_export_path(output_csv_path, ".csv")
87
-
88
- metric_container = {"load_name": [], "metric": [], "critical_service": []}
89
- with driver.session() as session:
90
-
91
- for cs in critical_infras:
92
- cypher_query = (
93
- f"""
94
- MATCH (lo:Load)
95
- MATCH (cs:{cs})
96
- """
97
- + """
98
- WITH lo,cs,
99
- point.distance(point({longitude: lo.longitude, latitude:lo.latitude}),
100
- point({longitude: cs.longitude, latitude:cs.latitude}))/1000 AS d
101
- RETURN lo.name, sum(toInteger(toBoolean(cs.survive) OR toBoolean(cs.backup))/d) AS gamma
102
- """
103
- # count(d)/sum(d) AS gamma
104
- # WHERE cs.survive = 1
105
- )
106
- result = session.read_transaction(
107
- lambda tx: tx.run(cypher_query).data()
108
- )
109
-
110
- for item in result:
111
- metric_container["load_name"].append(item["lo.name"])
112
- metric_container["metric"].append(item["gamma"])
113
- metric_container["critical_service"].append(cs)
114
-
115
- df = pd.DataFrame(metric_container)
116
- df.to_csv(output_csv_path)
117
-
118
-
119
- def equity_based_energy_resilience_by_income(
120
- driver: GraphDatabase.driver,
121
- path_to_energy_resilience_metric: str,
122
- output_json_path: str,
123
- category: Dict[str, Callable] = {
124
- "low": lambda x: x < 90000,
125
- "medium": lambda x: (90000 < x < 110000),
126
- "high": lambda x: x > 110000,
127
- },
128
- ):
129
- """Function to compute the equity based energy resilience metric.
130
-
131
- Args:
132
- driver (GraphDatabase.driver): Instance of `GraphDatabase.driver`
133
- instance
134
- path_to_energy_resilience_metric (str): Path to energy resilience metric.
135
- output_json_path (str): JSON file path for exporting the metric.
136
- category (Dict[str, Callable]): Income categories
137
- """
138
-
139
- validate_export_path(output_json_path, ".json")
140
- util.path_validation(path_to_energy_resilience_metric)
141
-
142
- metric_container = {}
143
-
144
- with driver.session() as session:
145
-
146
- cypher_query = """
147
- MATCH (lo:Load)
148
- RETURN lo.income as income, lo.name as name
149
- """
150
- result = session.read_transaction(
151
- lambda tx: tx.run(cypher_query).data()
152
- )
153
-
154
- resilience_metric = pd.read_csv(path_to_energy_resilience_metric)
155
- gamma_dict = (
156
- resilience_metric.groupby("load_name").sum()["metric"].to_dict()
157
- )
158
-
159
- for id, func in category.items():
160
-
161
- metric, income_flag_sum = 0, 0
162
- for load in result:
163
- load_income_flag = func(load["income"])
164
- metric += load_income_flag * gamma_dict[load["name"]]
165
- income_flag_sum += load_income_flag
166
-
167
- metric_container[id] = (
168
- metric / income_flag_sum if income_flag_sum else None
169
- )
170
-
171
- metric_values = [val for _, val in metric_container.items() if val]
172
- if metric_values:
173
- metric_container["community_resilience_equity_score"] = np.mean(
174
- metric_values
175
- ) / np.std(metric_values)
176
-
177
- with open(output_json_path, "w") as fpointer:
178
- json.dump(metric_container, fpointer)
erad/programs/__init__.py DELETED
File without changes
erad/programs/backup.py DELETED
@@ -1,62 +0,0 @@
1
- """ This module will update the backup property for
2
- critical infrastructure. """
3
-
4
- # standard imports
5
- from typing import List
6
- import random
7
-
8
- # third party imports
9
- from neo4j import GraphDatabase
10
-
11
- # internal imports
12
- from erad.db.utils import _run_read_query
13
-
14
- def apply_backup_program(
15
- driver: GraphDatabase.driver,
16
- electricity_backup: any,
17
- critical_infras: List[str],
18
- ):
19
-
20
- """ Function that will update the backup
21
- property of critical infras based on backup percentage.
22
-
23
- Args:
24
- driver (GraphDatabase.driver): Neo4J Driver instance
25
- electricity_backup (float): backup percentage number between 0 and 1 or list of infras to set as backup
26
- critical_infras (List[str]): list of critical infrastructure
27
- """
28
-
29
- infra_with_backups = []
30
- for cri_infra in critical_infras:
31
- cypher_query = f"""
32
- MATCH (c:{cri_infra})
33
- RETURN c.longitude, c.latitude, c.name, c.backup
34
- """
35
- infras = _run_read_query(driver, cypher_query)
36
-
37
- for infra in infras:
38
-
39
- cypher_write_query = f"""
40
- MATCH (c:{cri_infra})
41
- WHERE c.name = $cname
42
- SET c.backup = $backup
43
- """
44
-
45
- if not isinstance(electricity_backup, list):
46
- backup = 1 if random.random() < electricity_backup else 0
47
- else:
48
- backup = 1 if infra['c.name'] in electricity_backup else 0
49
-
50
- if backup == 1:
51
- infra_with_backups.append(infra['c.name'])
52
-
53
- with driver.session() as session:
54
- session.write_transaction(
55
- lambda tx: tx.run(
56
- cypher_write_query,
57
- cname=infra['c.name'],
58
- backup= backup
59
- )
60
- )
61
-
62
- return infra_with_backups
@@ -1,45 +0,0 @@
1
- """
2
- This module will update the survive property based on whether
3
- the critica infrastructure is going to be powered from microgrid or not.
4
- """
5
-
6
-
7
- from neo4j import GraphDatabase
8
-
9
- from erad.metrics.check_microgrid import check_for_microgrid
10
-
11
-
12
- def apply_microgrid_to_critical_infra(
13
- driver: GraphDatabase.driver,
14
- factor: float= (0.5 * 0.4)
15
- ):
16
- """ Function that will update the survive property of
17
- critical infrastructure if it can get power from microgrid."""
18
-
19
- islands_jsons = check_for_microgrid(driver, output_json_path=None)
20
- infra_island = [subdict['sinks'] for _, subdict \
21
- in islands_jsons.items() if len(subdict['sources'])>0]
22
- infra_microgrid = [1 if subdict['source_capacity'] >= subdict['sink_capacity']*factor else 0 for _, subdict \
23
- in islands_jsons.items()]
24
-
25
- infra_survives = [el for id, el in enumerate(infra_island) \
26
- if infra_microgrid[id]]
27
- all_sinks = [x for el in infra_survives for x in el]
28
- infra_survives = [x for el in infra_survives for x in el if 'load' not in x]
29
-
30
- for infra in infra_survives:
31
- cypher_query = f"""
32
- MATCH (c)
33
- WHERE c.name = $cname
34
- SET c.survive = 1
35
- SET c.survival_probability = 1
36
- """
37
-
38
- with driver.session() as session:
39
- session.write_transaction(
40
- lambda tx: tx.run(
41
- cypher_query,
42
- cname=infra,
43
- )
44
- )
45
- return all_sinks
File without changes
@@ -1,103 +0,0 @@
1
- """ Module contains the base class defination for all scenarios.
2
-
3
- Each scenario type will extend the base class to implement a given scenario
4
- """
5
-
6
-
7
- from shapely.geometry import MultiPolygon, Polygon, LineString, Point
8
- from datetime import datetime
9
- from typing import *
10
- import shapely
11
-
12
- from erad.scenarios.common import AssetTypes
13
-
14
- class BaseScenario:
15
-
16
- """Base class for scenario defination.
17
-
18
- Attributes:
19
- geodata (MultiPolygon, Point, LineString): Region of interest
20
- probability_model (dict): Dictionary mapping asset types to probability funcitons
21
- timestamp (datetime): Scenario occurance time
22
- kwargs (dict): Additional parameters relevant for a particular scenario type
23
-
24
- """
25
-
26
- def __init__(self, geodata : Union[MultiPolygon, Point, LineString] , probability_model : dict, timestamp : datetime, **kwargs) -> None:
27
- """Constructor for BaseScenario class.
28
-
29
- Args:
30
- geodata (MultiPolygon, Point, LineString): Region of interest
31
- probability_model (dict): Dictionary mapping asset types to probability funcitons
32
- timestamp (datetime): Scenario occurance time
33
- kwargs (dict): Additional parameters relevant for a particular scenario type
34
- """
35
- if probability_model is None:
36
- probability_model = self.fragility_curves
37
- self.valitate_user_defined_fragility_curves(probability_model)
38
-
39
-
40
- if isinstance(geodata, Polygon):
41
- geodata = MultiPolygon([geodata])
42
-
43
- if isinstance(geodata, MultiPolygon):
44
- print("Is multipolygon")
45
- self.multipolygon = geodata
46
- elif isinstance(geodata, Point):
47
- print("Is point")
48
- self.origin = geodata
49
- elif isinstance(geodata, LineString):
50
- print("Is linestring")
51
- self.front = geodata
52
- else:
53
- print(geodata, Point)
54
- raise Exception(f"Invalid data type {type(geodata)}")
55
-
56
- self.probability_model = probability_model
57
- self.to_projection = f"epsg:{self.identify_stateplane_projection}"
58
- self.timestamp = timestamp
59
- return
60
-
61
- @property
62
- def area(self) -> float:
63
- """Method to calculate area of affected region."""
64
- raise NotImplementedError("Method needs to be defined in derived classes")
65
-
66
- @property
67
- def polygon(self) -> MultiPolygon:
68
- """Method to return polygon for the affected region."""
69
- raise NotImplementedError("Method needs to be defined in derived classes")
70
-
71
- @property
72
- def boundary(self) -> LineString:
73
- """Method to return boundary for the affected region."""
74
- raise NotImplementedError("Method needs to be defined in derived classes")
75
-
76
- @property
77
- def centroid(self) -> Point:
78
- """Method to return the centroid of the affected region."""
79
- raise NotImplementedError("Method needs to be defined in derived classes")
80
-
81
- def increment_time(self) -> dict:
82
- """Method to increment simulation time for time evolviong scenarios."""
83
- raise NotImplementedError("Method needs to be defined in derived classes")
84
-
85
- def calculate_survival_probability(self, assets : dict, timestamp : datetime) -> dict:
86
- """Method to calculate survival probaility of asset types.
87
-
88
- Args:
89
- assets (dict): The dictionary of all assets and their corresponding asset types
90
- """
91
- raise NotImplementedError("Method needs to be defined in derived classes")
92
-
93
- def plot(self):
94
- """Method to plot survival probaility of in the region of interest"""
95
- raise NotImplementedError("Method needs to be defined in derived classes")
96
-
97
- def asset_survial_probability(self, asset_type):
98
- raise NotImplementedError("Method needs to be defined in derived classes")
99
-
100
- def valitate_user_defined_fragility_curves(self, distributions):
101
- for asset_type in distributions:
102
- assert AssetTypes.has_asset(asset_type), f"{asset_type} is not a valid asset type. Valid options are {list(AssetTypes.__members__.keys())}"
103
- return
erad/scenarios/common.py DELETED
@@ -1,93 +0,0 @@
1
- from shapely.geometry import MultiPolygon, Point,Polygon
2
- from random import random,seed
3
- from enum import IntEnum
4
- import numpy as np
5
-
6
- class ScenarioTypes(IntEnum):
7
- flood_m = 0
8
- wind_m_per_s = 1
9
- fire_m = 2
10
- earthquake_pga = 3
11
-
12
- class AssetTypes(IntEnum):
13
- substation = 0
14
- solar_panels = 1
15
- buried_lines = 2
16
- wind_turbines= 3
17
- battery_storage = 4
18
- transmission_poles = 5
19
- distribution_poles = 6
20
- transmission_overhead_lines = 7
21
- distribution_overhead_lines = 8
22
- #substructures
23
- #conduit_burial
24
-
25
- @classmethod
26
- def has_value(cls, value):
27
- return value in cls._value2member_map_
28
-
29
- @classmethod
30
- def has_asset(cls, asset):
31
- print(asset)
32
- return asset in cls.__members__
33
-
34
-
35
- def asset_list(x1=41.255, y1=-117.33, x2=41.255, y2=-117.33, samples=100):
36
-
37
- x = np.linspace(x1, x2, samples)
38
- y = np.linspace(y1, y2, samples)
39
-
40
- seed(3)
41
- asset_probabilities = {
42
- AssetTypes.substation: 1 / 10000.0,
43
- AssetTypes.solar_panels : 1/500,
44
- AssetTypes.buried_lines : 1/10.0,
45
- AssetTypes.wind_turbines : 1/5000,
46
- AssetTypes.battery_storage :1/2000,
47
- AssetTypes.transmission_poles: 1 / 10.0,
48
- AssetTypes.distribution_poles : 1 / 10.0,
49
- AssetTypes.transmission_overhead_lines : 1/10.0,
50
- AssetTypes.distribution_overhead_lines : 1/10.0,
51
- }
52
-
53
- heights_ft = {
54
- AssetTypes.substation.name : 3,
55
- AssetTypes.solar_panels.name : 10,
56
- AssetTypes.buried_lines.name : -3,
57
- AssetTypes.wind_turbines.name : 25,
58
- AssetTypes.battery_storage.name : 4,
59
- AssetTypes.transmission_poles.name : 0,
60
- AssetTypes.distribution_poles.name : 0,
61
- AssetTypes.transmission_overhead_lines.name : 100,
62
- AssetTypes.distribution_overhead_lines.name : 30,
63
- }
64
-
65
- assets = {
66
- AssetTypes.substation.name : {},
67
- AssetTypes.solar_panels.name : {},
68
- AssetTypes.buried_lines.name : {},
69
- AssetTypes.wind_turbines.name : {},
70
- AssetTypes.battery_storage.name :{},
71
- AssetTypes.transmission_poles.name : {},
72
- AssetTypes.distribution_poles.name : {},
73
- AssetTypes.transmission_overhead_lines.name : {},
74
- AssetTypes.distribution_overhead_lines.name : {},
75
- }
76
-
77
- for asset_type, probability in asset_probabilities.items():
78
- asset_id = 0
79
- for x1 in x:
80
- for y1 in y:
81
- if random() < probability:
82
- assets[asset_type.name][f"{asset_type.name} {asset_id}"] = {"coordinates" : (x1, y1), "heights_ft": heights_ft[asset_type.name]}
83
- asset_id += 1
84
-
85
- p1 = Point(x.min(), y.min())
86
- p2 = Point(x.max(), y.min())
87
- p3 = Point(x.max(), y.max())
88
- p4 = Point(x.min(), y.max())
89
- pointList = [p1, p2, p3, p4, p1]
90
- poly = Polygon(pointList)
91
- mypoly = MultiPolygon([poly])
92
-
93
- return assets, mypoly