NREL-erad 0.0.0a0__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- erad/__init__.py +1 -0
- erad/constants.py +20 -20
- erad/cypher_queries/load_data_v1.cypher +211 -211
- erad/data/World_Earthquakes_1960_2016.csv +23410 -23410
- erad/db/assets/critical_infras.py +170 -170
- erad/db/assets/distribution_lines.py +101 -101
- erad/db/credential_model.py +20 -20
- erad/db/disaster_input_model.py +23 -23
- erad/db/inject_earthquake.py +52 -52
- erad/db/inject_flooding.py +53 -53
- erad/db/neo4j_.py +162 -162
- erad/db/utils.py +13 -13
- erad/exceptions.py +68 -68
- erad/metrics/check_microgrid.py +208 -208
- erad/metrics/metric.py +178 -178
- erad/programs/backup.py +61 -61
- erad/programs/microgrid.py +44 -44
- erad/scenarios/abstract_scenario.py +102 -102
- erad/scenarios/common.py +92 -92
- erad/scenarios/earthquake_scenario.py +161 -161
- erad/scenarios/fire_scenario.py +160 -160
- erad/scenarios/flood_scenario.py +493 -493
- erad/scenarios/flows.csv +671 -0
- erad/scenarios/utilities.py +75 -75
- erad/scenarios/wind_scenario.py +89 -89
- erad/utils/ditto_utils.py +252 -252
- erad/utils/hifld_utils.py +147 -147
- erad/utils/opendss_utils.py +357 -357
- erad/utils/overpass.py +76 -76
- erad/utils/util.py +178 -178
- erad/visualization/plot_graph.py +218 -218
- {NREL_erad-0.0.0a0.dist-info → nrel_erad-1.0.0.dist-info}/METADATA +65 -61
- nrel_erad-1.0.0.dist-info/RECORD +42 -0
- {NREL_erad-0.0.0a0.dist-info → nrel_erad-1.0.0.dist-info}/WHEEL +1 -2
- {NREL_erad-0.0.0a0.dist-info → nrel_erad-1.0.0.dist-info/licenses}/LICENSE.txt +28 -28
- NREL_erad-0.0.0a0.dist-info/RECORD +0 -42
- NREL_erad-0.0.0a0.dist-info/top_level.txt +0 -1
erad/metrics/metric.py
CHANGED
@@ -1,178 +1,178 @@
|
|
1
|
-
""" Module for handling computation of equity and resilience metrics.
|
2
|
-
|
3
|
-
Idea is to create separate function for computing each metric.
|
4
|
-
"""
|
5
|
-
|
6
|
-
from pathlib import Path
|
7
|
-
from typing import Callable, Union, Dict, List
|
8
|
-
import json
|
9
|
-
|
10
|
-
from neo4j import GraphDatabase
|
11
|
-
import pandas as pd
|
12
|
-
import numpy as np
|
13
|
-
|
14
|
-
from erad.utils import util
|
15
|
-
from erad import exceptions
|
16
|
-
|
17
|
-
|
18
|
-
def validate_export_path(file_path: Union[str, Path], file_type: str):
|
19
|
-
"""Function for validating the export file path.
|
20
|
-
|
21
|
-
Args:
|
22
|
-
file_path (Union[str, Path]): Export file path
|
23
|
-
file_type (str): File type to be exported e.g. .csv
|
24
|
-
"""
|
25
|
-
|
26
|
-
output_path = Path(file_path)
|
27
|
-
util.path_validation(output_path.parent)
|
28
|
-
if output_path.suffix != file_type:
|
29
|
-
raise exceptions.InvalidFileTypePassed(output_path, file_type)
|
30
|
-
|
31
|
-
|
32
|
-
def is_customer_getting_power(
|
33
|
-
driver: GraphDatabase.driver, output_csv_path: str,
|
34
|
-
load_list: List[str] = None
|
35
|
-
):
|
36
|
-
|
37
|
-
"""Function for checking whether customer is still connected
|
38
|
-
to substation or not.
|
39
|
-
|
40
|
-
Args:
|
41
|
-
driver (GraphDatabase.driver): Instance of `GraphDatabase.driver`
|
42
|
-
instance
|
43
|
-
output_csv_path (str): CSV file path for exporting the metric.
|
44
|
-
"""
|
45
|
-
if not load_list:
|
46
|
-
load_list = []
|
47
|
-
|
48
|
-
validate_export_path(output_csv_path, ".csv")
|
49
|
-
cypher_query = """
|
50
|
-
MATCH (b:Bus)
|
51
|
-
MATCH (s:Substation)
|
52
|
-
WHERE b <> s
|
53
|
-
WITH b, shortestPath((b)-[:CONNECTS_TO*]-(s)) as p
|
54
|
-
MATCH (c:Load)-[:CONSUMES_POWER_FROM]-(b)
|
55
|
-
WITH c, p, apoc.coll.min([r in relationships(p) | r.survive]) AS max_p
|
56
|
-
RETURN c.name, max_p
|
57
|
-
"""
|
58
|
-
metric_container = {"load_name": [], "metric": []}
|
59
|
-
with driver.session() as session:
|
60
|
-
result = session.read_transaction(
|
61
|
-
lambda tx: tx.run(cypher_query).data()
|
62
|
-
)
|
63
|
-
|
64
|
-
for item in result:
|
65
|
-
metric_container["load_name"].append(item["c.name"])
|
66
|
-
metric_container["metric"].append(item["max_p"] \
|
67
|
-
if item["c.name"] not in load_list else 1)
|
68
|
-
|
69
|
-
df = pd.DataFrame(metric_container)
|
70
|
-
df.to_csv(output_csv_path)
|
71
|
-
|
72
|
-
|
73
|
-
def energy_resilience_by_customer(
|
74
|
-
driver: GraphDatabase.driver, output_csv_path: str,
|
75
|
-
critical_infras: List = ["Grocery", "Hospital", "Pharmacy"]
|
76
|
-
):
|
77
|
-
"""Function for checking whether customer is still connected
|
78
|
-
to substation or not.
|
79
|
-
|
80
|
-
Args:
|
81
|
-
driver (GraphDatabase.driver): Instance of `GraphDatabase.driver`
|
82
|
-
instance
|
83
|
-
output_csv_path (str): CSV file path for exporting the metric.
|
84
|
-
"""
|
85
|
-
|
86
|
-
validate_export_path(output_csv_path, ".csv")
|
87
|
-
|
88
|
-
metric_container = {"load_name": [], "metric": [], "critical_service": []}
|
89
|
-
with driver.session() as session:
|
90
|
-
|
91
|
-
for cs in critical_infras:
|
92
|
-
cypher_query = (
|
93
|
-
f"""
|
94
|
-
MATCH (lo:Load)
|
95
|
-
MATCH (cs:{cs})
|
96
|
-
"""
|
97
|
-
+ """
|
98
|
-
WITH lo,cs,
|
99
|
-
point.distance(point({longitude: lo.longitude, latitude:lo.latitude}),
|
100
|
-
point({longitude: cs.longitude, latitude:cs.latitude}))/1000 AS d
|
101
|
-
RETURN lo.name, sum(toInteger(toBoolean(cs.survive) OR toBoolean(cs.backup))/d) AS gamma
|
102
|
-
"""
|
103
|
-
# count(d)/sum(d) AS gamma
|
104
|
-
# WHERE cs.survive = 1
|
105
|
-
)
|
106
|
-
result = session.read_transaction(
|
107
|
-
lambda tx: tx.run(cypher_query).data()
|
108
|
-
)
|
109
|
-
|
110
|
-
for item in result:
|
111
|
-
metric_container["load_name"].append(item["lo.name"])
|
112
|
-
metric_container["metric"].append(item["gamma"])
|
113
|
-
metric_container["critical_service"].append(cs)
|
114
|
-
|
115
|
-
df = pd.DataFrame(metric_container)
|
116
|
-
df.to_csv(output_csv_path)
|
117
|
-
|
118
|
-
|
119
|
-
def equity_based_energy_resilience_by_income(
|
120
|
-
driver: GraphDatabase.driver,
|
121
|
-
path_to_energy_resilience_metric: str,
|
122
|
-
output_json_path: str,
|
123
|
-
category: Dict[str, Callable] = {
|
124
|
-
"low": lambda x: x < 90000,
|
125
|
-
"medium": lambda x: (90000 < x < 110000),
|
126
|
-
"high": lambda x: x > 110000,
|
127
|
-
},
|
128
|
-
):
|
129
|
-
"""Function to compute the equity based energy resilience metric.
|
130
|
-
|
131
|
-
Args:
|
132
|
-
driver (GraphDatabase.driver): Instance of `GraphDatabase.driver`
|
133
|
-
instance
|
134
|
-
path_to_energy_resilience_metric (str): Path to energy resilience metric.
|
135
|
-
output_json_path (str): JSON file path for exporting the metric.
|
136
|
-
category (Dict[str, Callable]): Income categories
|
137
|
-
"""
|
138
|
-
|
139
|
-
validate_export_path(output_json_path, ".json")
|
140
|
-
util.path_validation(path_to_energy_resilience_metric)
|
141
|
-
|
142
|
-
metric_container = {}
|
143
|
-
|
144
|
-
with driver.session() as session:
|
145
|
-
|
146
|
-
cypher_query = """
|
147
|
-
MATCH (lo:Load)
|
148
|
-
RETURN lo.income as income, lo.name as name
|
149
|
-
"""
|
150
|
-
result = session.read_transaction(
|
151
|
-
lambda tx: tx.run(cypher_query).data()
|
152
|
-
)
|
153
|
-
|
154
|
-
resilience_metric = pd.read_csv(path_to_energy_resilience_metric)
|
155
|
-
gamma_dict = (
|
156
|
-
resilience_metric.groupby("load_name").sum()["metric"].to_dict()
|
157
|
-
)
|
158
|
-
|
159
|
-
for id, func in category.items():
|
160
|
-
|
161
|
-
metric, income_flag_sum = 0, 0
|
162
|
-
for load in result:
|
163
|
-
load_income_flag = func(load["income"])
|
164
|
-
metric += load_income_flag * gamma_dict[load["name"]]
|
165
|
-
income_flag_sum += load_income_flag
|
166
|
-
|
167
|
-
metric_container[id] = (
|
168
|
-
metric / income_flag_sum if income_flag_sum else None
|
169
|
-
)
|
170
|
-
|
171
|
-
metric_values = [val for _, val in metric_container.items() if val]
|
172
|
-
if metric_values:
|
173
|
-
metric_container["community_resilience_equity_score"] = np.mean(
|
174
|
-
metric_values
|
175
|
-
) / np.std(metric_values)
|
176
|
-
|
177
|
-
with open(output_json_path, "w") as fpointer:
|
178
|
-
json.dump(metric_container, fpointer)
|
1
|
+
""" Module for handling computation of equity and resilience metrics.
|
2
|
+
|
3
|
+
Idea is to create separate function for computing each metric.
|
4
|
+
"""
|
5
|
+
|
6
|
+
from pathlib import Path
|
7
|
+
from typing import Callable, Union, Dict, List
|
8
|
+
import json
|
9
|
+
|
10
|
+
from neo4j import GraphDatabase
|
11
|
+
import pandas as pd
|
12
|
+
import numpy as np
|
13
|
+
|
14
|
+
from erad.utils import util
|
15
|
+
from erad import exceptions
|
16
|
+
|
17
|
+
|
18
|
+
def validate_export_path(file_path: Union[str, Path], file_type: str):
|
19
|
+
"""Function for validating the export file path.
|
20
|
+
|
21
|
+
Args:
|
22
|
+
file_path (Union[str, Path]): Export file path
|
23
|
+
file_type (str): File type to be exported e.g. .csv
|
24
|
+
"""
|
25
|
+
|
26
|
+
output_path = Path(file_path)
|
27
|
+
util.path_validation(output_path.parent)
|
28
|
+
if output_path.suffix != file_type:
|
29
|
+
raise exceptions.InvalidFileTypePassed(output_path, file_type)
|
30
|
+
|
31
|
+
|
32
|
+
def is_customer_getting_power(
|
33
|
+
driver: GraphDatabase.driver, output_csv_path: str,
|
34
|
+
load_list: List[str] = None
|
35
|
+
):
|
36
|
+
|
37
|
+
"""Function for checking whether customer is still connected
|
38
|
+
to substation or not.
|
39
|
+
|
40
|
+
Args:
|
41
|
+
driver (GraphDatabase.driver): Instance of `GraphDatabase.driver`
|
42
|
+
instance
|
43
|
+
output_csv_path (str): CSV file path for exporting the metric.
|
44
|
+
"""
|
45
|
+
if not load_list:
|
46
|
+
load_list = []
|
47
|
+
|
48
|
+
validate_export_path(output_csv_path, ".csv")
|
49
|
+
cypher_query = """
|
50
|
+
MATCH (b:Bus)
|
51
|
+
MATCH (s:Substation)
|
52
|
+
WHERE b <> s
|
53
|
+
WITH b, shortestPath((b)-[:CONNECTS_TO*]-(s)) as p
|
54
|
+
MATCH (c:Load)-[:CONSUMES_POWER_FROM]-(b)
|
55
|
+
WITH c, p, apoc.coll.min([r in relationships(p) | r.survive]) AS max_p
|
56
|
+
RETURN c.name, max_p
|
57
|
+
"""
|
58
|
+
metric_container = {"load_name": [], "metric": []}
|
59
|
+
with driver.session() as session:
|
60
|
+
result = session.read_transaction(
|
61
|
+
lambda tx: tx.run(cypher_query).data()
|
62
|
+
)
|
63
|
+
|
64
|
+
for item in result:
|
65
|
+
metric_container["load_name"].append(item["c.name"])
|
66
|
+
metric_container["metric"].append(item["max_p"] \
|
67
|
+
if item["c.name"] not in load_list else 1)
|
68
|
+
|
69
|
+
df = pd.DataFrame(metric_container)
|
70
|
+
df.to_csv(output_csv_path)
|
71
|
+
|
72
|
+
|
73
|
+
def energy_resilience_by_customer(
|
74
|
+
driver: GraphDatabase.driver, output_csv_path: str,
|
75
|
+
critical_infras: List = ["Grocery", "Hospital", "Pharmacy"]
|
76
|
+
):
|
77
|
+
"""Function for checking whether customer is still connected
|
78
|
+
to substation or not.
|
79
|
+
|
80
|
+
Args:
|
81
|
+
driver (GraphDatabase.driver): Instance of `GraphDatabase.driver`
|
82
|
+
instance
|
83
|
+
output_csv_path (str): CSV file path for exporting the metric.
|
84
|
+
"""
|
85
|
+
|
86
|
+
validate_export_path(output_csv_path, ".csv")
|
87
|
+
|
88
|
+
metric_container = {"load_name": [], "metric": [], "critical_service": []}
|
89
|
+
with driver.session() as session:
|
90
|
+
|
91
|
+
for cs in critical_infras:
|
92
|
+
cypher_query = (
|
93
|
+
f"""
|
94
|
+
MATCH (lo:Load)
|
95
|
+
MATCH (cs:{cs})
|
96
|
+
"""
|
97
|
+
+ """
|
98
|
+
WITH lo,cs,
|
99
|
+
point.distance(point({longitude: lo.longitude, latitude:lo.latitude}),
|
100
|
+
point({longitude: cs.longitude, latitude:cs.latitude}))/1000 AS d
|
101
|
+
RETURN lo.name, sum(toInteger(toBoolean(cs.survive) OR toBoolean(cs.backup))/d) AS gamma
|
102
|
+
"""
|
103
|
+
# count(d)/sum(d) AS gamma
|
104
|
+
# WHERE cs.survive = 1
|
105
|
+
)
|
106
|
+
result = session.read_transaction(
|
107
|
+
lambda tx: tx.run(cypher_query).data()
|
108
|
+
)
|
109
|
+
|
110
|
+
for item in result:
|
111
|
+
metric_container["load_name"].append(item["lo.name"])
|
112
|
+
metric_container["metric"].append(item["gamma"])
|
113
|
+
metric_container["critical_service"].append(cs)
|
114
|
+
|
115
|
+
df = pd.DataFrame(metric_container)
|
116
|
+
df.to_csv(output_csv_path)
|
117
|
+
|
118
|
+
|
119
|
+
def equity_based_energy_resilience_by_income(
|
120
|
+
driver: GraphDatabase.driver,
|
121
|
+
path_to_energy_resilience_metric: str,
|
122
|
+
output_json_path: str,
|
123
|
+
category: Dict[str, Callable] = {
|
124
|
+
"low": lambda x: x < 90000,
|
125
|
+
"medium": lambda x: (90000 < x < 110000),
|
126
|
+
"high": lambda x: x > 110000,
|
127
|
+
},
|
128
|
+
):
|
129
|
+
"""Function to compute the equity based energy resilience metric.
|
130
|
+
|
131
|
+
Args:
|
132
|
+
driver (GraphDatabase.driver): Instance of `GraphDatabase.driver`
|
133
|
+
instance
|
134
|
+
path_to_energy_resilience_metric (str): Path to energy resilience metric.
|
135
|
+
output_json_path (str): JSON file path for exporting the metric.
|
136
|
+
category (Dict[str, Callable]): Income categories
|
137
|
+
"""
|
138
|
+
|
139
|
+
validate_export_path(output_json_path, ".json")
|
140
|
+
util.path_validation(path_to_energy_resilience_metric)
|
141
|
+
|
142
|
+
metric_container = {}
|
143
|
+
|
144
|
+
with driver.session() as session:
|
145
|
+
|
146
|
+
cypher_query = """
|
147
|
+
MATCH (lo:Load)
|
148
|
+
RETURN lo.income as income, lo.name as name
|
149
|
+
"""
|
150
|
+
result = session.read_transaction(
|
151
|
+
lambda tx: tx.run(cypher_query).data()
|
152
|
+
)
|
153
|
+
|
154
|
+
resilience_metric = pd.read_csv(path_to_energy_resilience_metric)
|
155
|
+
gamma_dict = (
|
156
|
+
resilience_metric.groupby("load_name").sum()["metric"].to_dict()
|
157
|
+
)
|
158
|
+
|
159
|
+
for id, func in category.items():
|
160
|
+
|
161
|
+
metric, income_flag_sum = 0, 0
|
162
|
+
for load in result:
|
163
|
+
load_income_flag = func(load["income"])
|
164
|
+
metric += load_income_flag * gamma_dict[load["name"]]
|
165
|
+
income_flag_sum += load_income_flag
|
166
|
+
|
167
|
+
metric_container[id] = (
|
168
|
+
metric / income_flag_sum if income_flag_sum else None
|
169
|
+
)
|
170
|
+
|
171
|
+
metric_values = [val for _, val in metric_container.items() if val]
|
172
|
+
if metric_values:
|
173
|
+
metric_container["community_resilience_equity_score"] = np.mean(
|
174
|
+
metric_values
|
175
|
+
) / np.std(metric_values)
|
176
|
+
|
177
|
+
with open(output_json_path, "w") as fpointer:
|
178
|
+
json.dump(metric_container, fpointer)
|
erad/programs/backup.py
CHANGED
@@ -1,62 +1,62 @@
|
|
1
|
-
""" This module will update the backup property for
|
2
|
-
critical infrastructure. """
|
3
|
-
|
4
|
-
# standard imports
|
5
|
-
from typing import List
|
6
|
-
import random
|
7
|
-
|
8
|
-
# third party imports
|
9
|
-
from neo4j import GraphDatabase
|
10
|
-
|
11
|
-
# internal imports
|
12
|
-
from erad.db.utils import _run_read_query
|
13
|
-
|
14
|
-
def apply_backup_program(
|
15
|
-
driver: GraphDatabase.driver,
|
16
|
-
electricity_backup: any,
|
17
|
-
critical_infras: List[str],
|
18
|
-
):
|
19
|
-
|
20
|
-
""" Function that will update the backup
|
21
|
-
property of critical infras based on backup percentage.
|
22
|
-
|
23
|
-
Args:
|
24
|
-
driver (GraphDatabase.driver): Neo4J Driver instance
|
25
|
-
electricity_backup (float): backup percentage number between 0 and 1 or list of infras to set as backup
|
26
|
-
critical_infras (List[str]): list of critical infrastructure
|
27
|
-
"""
|
28
|
-
|
29
|
-
infra_with_backups = []
|
30
|
-
for cri_infra in critical_infras:
|
31
|
-
cypher_query = f"""
|
32
|
-
MATCH (c:{cri_infra})
|
33
|
-
RETURN c.longitude, c.latitude, c.name, c.backup
|
34
|
-
"""
|
35
|
-
infras = _run_read_query(driver, cypher_query)
|
36
|
-
|
37
|
-
for infra in infras:
|
38
|
-
|
39
|
-
cypher_write_query = f"""
|
40
|
-
MATCH (c:{cri_infra})
|
41
|
-
WHERE c.name = $cname
|
42
|
-
SET c.backup = $backup
|
43
|
-
"""
|
44
|
-
|
45
|
-
if not isinstance(electricity_backup, list):
|
46
|
-
backup = 1 if random.random() < electricity_backup else 0
|
47
|
-
else:
|
48
|
-
backup = 1 if infra['c.name'] in electricity_backup else 0
|
49
|
-
|
50
|
-
if backup == 1:
|
51
|
-
infra_with_backups.append(infra['c.name'])
|
52
|
-
|
53
|
-
with driver.session() as session:
|
54
|
-
session.write_transaction(
|
55
|
-
lambda tx: tx.run(
|
56
|
-
cypher_write_query,
|
57
|
-
cname=infra['c.name'],
|
58
|
-
backup= backup
|
59
|
-
)
|
60
|
-
)
|
61
|
-
|
1
|
+
""" This module will update the backup property for
|
2
|
+
critical infrastructure. """
|
3
|
+
|
4
|
+
# standard imports
|
5
|
+
from typing import List
|
6
|
+
import random
|
7
|
+
|
8
|
+
# third party imports
|
9
|
+
from neo4j import GraphDatabase
|
10
|
+
|
11
|
+
# internal imports
|
12
|
+
from erad.db.utils import _run_read_query
|
13
|
+
|
14
|
+
def apply_backup_program(
|
15
|
+
driver: GraphDatabase.driver,
|
16
|
+
electricity_backup: any,
|
17
|
+
critical_infras: List[str],
|
18
|
+
):
|
19
|
+
|
20
|
+
""" Function that will update the backup
|
21
|
+
property of critical infras based on backup percentage.
|
22
|
+
|
23
|
+
Args:
|
24
|
+
driver (GraphDatabase.driver): Neo4J Driver instance
|
25
|
+
electricity_backup (float): backup percentage number between 0 and 1 or list of infras to set as backup
|
26
|
+
critical_infras (List[str]): list of critical infrastructure
|
27
|
+
"""
|
28
|
+
|
29
|
+
infra_with_backups = []
|
30
|
+
for cri_infra in critical_infras:
|
31
|
+
cypher_query = f"""
|
32
|
+
MATCH (c:{cri_infra})
|
33
|
+
RETURN c.longitude, c.latitude, c.name, c.backup
|
34
|
+
"""
|
35
|
+
infras = _run_read_query(driver, cypher_query)
|
36
|
+
|
37
|
+
for infra in infras:
|
38
|
+
|
39
|
+
cypher_write_query = f"""
|
40
|
+
MATCH (c:{cri_infra})
|
41
|
+
WHERE c.name = $cname
|
42
|
+
SET c.backup = $backup
|
43
|
+
"""
|
44
|
+
|
45
|
+
if not isinstance(electricity_backup, list):
|
46
|
+
backup = 1 if random.random() < electricity_backup else 0
|
47
|
+
else:
|
48
|
+
backup = 1 if infra['c.name'] in electricity_backup else 0
|
49
|
+
|
50
|
+
if backup == 1:
|
51
|
+
infra_with_backups.append(infra['c.name'])
|
52
|
+
|
53
|
+
with driver.session() as session:
|
54
|
+
session.write_transaction(
|
55
|
+
lambda tx: tx.run(
|
56
|
+
cypher_write_query,
|
57
|
+
cname=infra['c.name'],
|
58
|
+
backup= backup
|
59
|
+
)
|
60
|
+
)
|
61
|
+
|
62
62
|
return infra_with_backups
|
erad/programs/microgrid.py
CHANGED
@@ -1,45 +1,45 @@
|
|
1
|
-
"""
|
2
|
-
This module will update the survive property based on whether
|
3
|
-
the critica infrastructure is going to be powered from microgrid or not.
|
4
|
-
"""
|
5
|
-
|
6
|
-
|
7
|
-
from neo4j import GraphDatabase
|
8
|
-
|
9
|
-
from erad.metrics.check_microgrid import check_for_microgrid
|
10
|
-
|
11
|
-
|
12
|
-
def apply_microgrid_to_critical_infra(
|
13
|
-
driver: GraphDatabase.driver,
|
14
|
-
factor: float= (0.5 * 0.4)
|
15
|
-
):
|
16
|
-
""" Function that will update the survive property of
|
17
|
-
critical infrastructure if it can get power from microgrid."""
|
18
|
-
|
19
|
-
islands_jsons = check_for_microgrid(driver, output_json_path=None)
|
20
|
-
infra_island = [subdict['sinks'] for _, subdict \
|
21
|
-
in islands_jsons.items() if len(subdict['sources'])>0]
|
22
|
-
infra_microgrid = [1 if subdict['source_capacity'] >= subdict['sink_capacity']*factor else 0 for _, subdict \
|
23
|
-
in islands_jsons.items()]
|
24
|
-
|
25
|
-
infra_survives = [el for id, el in enumerate(infra_island) \
|
26
|
-
if infra_microgrid[id]]
|
27
|
-
all_sinks = [x for el in infra_survives for x in el]
|
28
|
-
infra_survives = [x for el in infra_survives for x in el if 'load' not in x]
|
29
|
-
|
30
|
-
for infra in infra_survives:
|
31
|
-
cypher_query = f"""
|
32
|
-
MATCH (c)
|
33
|
-
WHERE c.name = $cname
|
34
|
-
SET c.survive = 1
|
35
|
-
SET c.survival_probability = 1
|
36
|
-
"""
|
37
|
-
|
38
|
-
with driver.session() as session:
|
39
|
-
session.write_transaction(
|
40
|
-
lambda tx: tx.run(
|
41
|
-
cypher_query,
|
42
|
-
cname=infra,
|
43
|
-
)
|
44
|
-
)
|
1
|
+
"""
|
2
|
+
This module will update the survive property based on whether
|
3
|
+
the critica infrastructure is going to be powered from microgrid or not.
|
4
|
+
"""
|
5
|
+
|
6
|
+
|
7
|
+
from neo4j import GraphDatabase
|
8
|
+
|
9
|
+
from erad.metrics.check_microgrid import check_for_microgrid
|
10
|
+
|
11
|
+
|
12
|
+
def apply_microgrid_to_critical_infra(
|
13
|
+
driver: GraphDatabase.driver,
|
14
|
+
factor: float= (0.5 * 0.4)
|
15
|
+
):
|
16
|
+
""" Function that will update the survive property of
|
17
|
+
critical infrastructure if it can get power from microgrid."""
|
18
|
+
|
19
|
+
islands_jsons = check_for_microgrid(driver, output_json_path=None)
|
20
|
+
infra_island = [subdict['sinks'] for _, subdict \
|
21
|
+
in islands_jsons.items() if len(subdict['sources'])>0]
|
22
|
+
infra_microgrid = [1 if subdict['source_capacity'] >= subdict['sink_capacity']*factor else 0 for _, subdict \
|
23
|
+
in islands_jsons.items()]
|
24
|
+
|
25
|
+
infra_survives = [el for id, el in enumerate(infra_island) \
|
26
|
+
if infra_microgrid[id]]
|
27
|
+
all_sinks = [x for el in infra_survives for x in el]
|
28
|
+
infra_survives = [x for el in infra_survives for x in el if 'load' not in x]
|
29
|
+
|
30
|
+
for infra in infra_survives:
|
31
|
+
cypher_query = f"""
|
32
|
+
MATCH (c)
|
33
|
+
WHERE c.name = $cname
|
34
|
+
SET c.survive = 1
|
35
|
+
SET c.survival_probability = 1
|
36
|
+
"""
|
37
|
+
|
38
|
+
with driver.session() as session:
|
39
|
+
session.write_transaction(
|
40
|
+
lambda tx: tx.run(
|
41
|
+
cypher_query,
|
42
|
+
cname=infra,
|
43
|
+
)
|
44
|
+
)
|
45
45
|
return all_sinks
|