NREL-erad 0.0.0a0__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. erad/__init__.py +1 -0
  2. erad/constants.py +20 -20
  3. erad/cypher_queries/load_data_v1.cypher +211 -211
  4. erad/data/World_Earthquakes_1960_2016.csv +23410 -23410
  5. erad/db/assets/critical_infras.py +170 -170
  6. erad/db/assets/distribution_lines.py +101 -101
  7. erad/db/credential_model.py +20 -20
  8. erad/db/disaster_input_model.py +23 -23
  9. erad/db/inject_earthquake.py +52 -52
  10. erad/db/inject_flooding.py +53 -53
  11. erad/db/neo4j_.py +162 -162
  12. erad/db/utils.py +13 -13
  13. erad/exceptions.py +68 -68
  14. erad/metrics/check_microgrid.py +208 -208
  15. erad/metrics/metric.py +178 -178
  16. erad/programs/backup.py +61 -61
  17. erad/programs/microgrid.py +44 -44
  18. erad/scenarios/abstract_scenario.py +102 -102
  19. erad/scenarios/common.py +92 -92
  20. erad/scenarios/earthquake_scenario.py +161 -161
  21. erad/scenarios/fire_scenario.py +160 -160
  22. erad/scenarios/flood_scenario.py +493 -493
  23. erad/scenarios/flows.csv +671 -0
  24. erad/scenarios/utilities.py +75 -75
  25. erad/scenarios/wind_scenario.py +89 -89
  26. erad/utils/ditto_utils.py +252 -252
  27. erad/utils/hifld_utils.py +147 -147
  28. erad/utils/opendss_utils.py +357 -357
  29. erad/utils/overpass.py +76 -76
  30. erad/utils/util.py +178 -178
  31. erad/visualization/plot_graph.py +218 -218
  32. {NREL_erad-0.0.0a0.dist-info → nrel_erad-1.0.0.dist-info}/METADATA +65 -61
  33. nrel_erad-1.0.0.dist-info/RECORD +42 -0
  34. {NREL_erad-0.0.0a0.dist-info → nrel_erad-1.0.0.dist-info}/WHEEL +1 -2
  35. {NREL_erad-0.0.0a0.dist-info → nrel_erad-1.0.0.dist-info/licenses}/LICENSE.txt +28 -28
  36. NREL_erad-0.0.0a0.dist-info/RECORD +0 -42
  37. NREL_erad-0.0.0a0.dist-info/top_level.txt +0 -1
erad/utils/hifld_utils.py CHANGED
@@ -1,147 +1,147 @@
1
- """ Module for parsing Homeland infrastructure foundation level-data.
2
-
3
- Idea is to take the bounding box and find the subset of
4
- infrastructure in that region.
5
- """
6
- # standard imports
7
- from pathlib import Path
8
- import math
9
- from typing import Union, List
10
-
11
- # third-party imports
12
- import pandas as pd
13
- import stateplane
14
-
15
- # internal imports
16
- from erad.utils.util import path_validation
17
-
18
-
19
- def get_subset_of_hifld_data(
20
- csv_file: str,
21
- bounds: List,
22
- output_folder: str,
23
- logitude_column_name: str = "X",
24
- latitude_column_name: str = "Y",
25
- columns_to_keep: List[str] = ["X", "Y"],
26
- name_of_csv_file: Union[str, None] = None,
27
- ) -> None:
28
- """Extracts a subset of HIFLD data set.
29
-
30
- Args:
31
- csv_file (str): Path to HIFLD data csv file
32
- bounds (List): Bounding box coordinates
33
- output_folder (str): Path to output folder
34
- logitude_column_name (str): Expects column with name 'X'
35
- latitude_column_name (str): Expects column with name 'Y'
36
- columns_to_keep (List): List of column names to keep
37
- by default keeps all of them
38
- name_of_csv_file (Union[str, None]): Name of csv file to export
39
- filtered set
40
- """
41
-
42
- # Unpacking the bounds data
43
- longitude_min, latitude_min, longitude_max, latitude_max = bounds
44
-
45
- # Do a path validation
46
- csv_file = Path(csv_file)
47
- output_folder = Path(output_folder)
48
- path_validation(csv_file, check_for_file=True, check_for_file_type=".csv")
49
- path_validation(output_folder)
50
-
51
- # Reading the hifld csv data
52
- df = pd.read_csv(csv_file)
53
-
54
- # filtering for bounds
55
- df_filtered = df[
56
- (df[logitude_column_name] >= longitude_min)
57
- & (df[logitude_column_name] <= longitude_max)
58
- & (df[latitude_column_name] >= latitude_min)
59
- & (df[latitude_column_name] <= latitude_max)
60
- ]
61
-
62
- # Keep only the limited columns
63
- df_subset = df_filtered[columns_to_keep]
64
-
65
- # export the subset
66
- file_name = name_of_csv_file if name_of_csv_file else csv_file.name
67
- df_subset.to_csv(output_folder / file_name)
68
-
69
-
70
- def get_relationship_between_hifld_infrastructures(
71
- hifld_data_csv: str,
72
- unique_id_column: str,
73
- load_csv: str,
74
- bus_csv: str,
75
- output_csv_path: str,
76
- distance_threshold: float = 2000.0,
77
- ):
78
- """Creates a relationship between consumers and HIFLD infrastructures.
79
-
80
- Args:
81
- hifld_data_csv (str): Path to filtered HIFLD data csv file
82
- unique_id_column (List): Column name used as identifier
83
- for critical infrastructures
84
- load_csv (str): Path to load csv file
85
- bus_csv (str): Path to bus csv file
86
- output_csv_path (str): output csv path for storing relationship csv
87
- distance_threshold (float): Distance threshold used for mapping
88
- customer to critical infrastructure
89
- """
90
- hifld_data_csv = Path(hifld_data_csv)
91
- bus_csv = Path(bus_csv)
92
- load_csv = Path(load_csv)
93
- output_csv_path = Path(output_csv_path)
94
-
95
- path_validation(
96
- hifld_data_csv, check_for_file=True, check_for_file_type=".csv"
97
- )
98
- path_validation(bus_csv, check_for_file=True, check_for_file_type=".csv")
99
- path_validation(load_csv, check_for_file=True, check_for_file_type=".csv")
100
- path_validation(output_csv_path.parents[0])
101
-
102
- hifld_data_df = pd.read_csv(hifld_data_csv)
103
- load_df = pd.read_csv(load_csv)
104
- bus_df = pd.read_csv(bus_csv)
105
-
106
- merged_data = pd.merge(
107
- load_df, bus_df, how="left", left_on="source", right_on="name"
108
- ).to_dict(orient="records")
109
-
110
- # Container for storing shelter relationships
111
- _relationship = []
112
- for _record in hifld_data_df.to_dict(orient="records"):
113
- _lon, _lat = _record["LONGITUDE"], _record["LATITUDE"]
114
-
115
- # convert into state plane coordinates
116
- _lon_translated, _lat_translated = stateplane.from_lonlat(_lon, _lat)
117
-
118
- # Loop through all the loads
119
- for load_record in merged_data:
120
-
121
- load_lon, load_lat = (
122
- load_record["longitude"],
123
- load_record["latitude"],
124
- )
125
-
126
- # convert into state plane coordinates
127
- load_lon_translated, load_lat_translated = stateplane.from_lonlat(
128
- load_lon, load_lat
129
- )
130
-
131
- # computes distance
132
- distance = math.sqrt(
133
- (_lat_translated - load_lat_translated) ** 2
134
- + (_lon_translated - load_lon_translated) ** 2
135
- )
136
-
137
- if distance < distance_threshold:
138
- _relationship.append(
139
- {
140
- unique_id_column: _record[unique_id_column],
141
- "load_name": load_record["name_x"],
142
- "distance": distance,
143
- }
144
- )
145
-
146
- df = pd.DataFrame(_relationship)
147
- df.to_csv(output_csv_path)
1
+ """ Module for parsing Homeland infrastructure foundation level-data.
2
+
3
+ Idea is to take the bounding box and find the subset of
4
+ infrastructure in that region.
5
+ """
6
+ # standard imports
7
+ from pathlib import Path
8
+ import math
9
+ from typing import Union, List
10
+
11
+ # third-party imports
12
+ import pandas as pd
13
+ import stateplane
14
+
15
+ # internal imports
16
+ from erad.utils.util import path_validation
17
+
18
+
19
+ def get_subset_of_hifld_data(
20
+ csv_file: str,
21
+ bounds: List,
22
+ output_folder: str,
23
+ logitude_column_name: str = "X",
24
+ latitude_column_name: str = "Y",
25
+ columns_to_keep: List[str] = ["X", "Y"],
26
+ name_of_csv_file: Union[str, None] = None,
27
+ ) -> None:
28
+ """Extracts a subset of HIFLD data set.
29
+
30
+ Args:
31
+ csv_file (str): Path to HIFLD data csv file
32
+ bounds (List): Bounding box coordinates
33
+ output_folder (str): Path to output folder
34
+ logitude_column_name (str): Expects column with name 'X'
35
+ latitude_column_name (str): Expects column with name 'Y'
36
+ columns_to_keep (List): List of column names to keep
37
+ by default keeps all of them
38
+ name_of_csv_file (Union[str, None]): Name of csv file to export
39
+ filtered set
40
+ """
41
+
42
+ # Unpacking the bounds data
43
+ longitude_min, latitude_min, longitude_max, latitude_max = bounds
44
+
45
+ # Do a path validation
46
+ csv_file = Path(csv_file)
47
+ output_folder = Path(output_folder)
48
+ path_validation(csv_file, check_for_file=True, check_for_file_type=".csv")
49
+ path_validation(output_folder)
50
+
51
+ # Reading the hifld csv data
52
+ df = pd.read_csv(csv_file)
53
+
54
+ # filtering for bounds
55
+ df_filtered = df[
56
+ (df[logitude_column_name] >= longitude_min)
57
+ & (df[logitude_column_name] <= longitude_max)
58
+ & (df[latitude_column_name] >= latitude_min)
59
+ & (df[latitude_column_name] <= latitude_max)
60
+ ]
61
+
62
+ # Keep only the limited columns
63
+ df_subset = df_filtered[columns_to_keep]
64
+
65
+ # export the subset
66
+ file_name = name_of_csv_file if name_of_csv_file else csv_file.name
67
+ df_subset.to_csv(output_folder / file_name)
68
+
69
+
70
+ def get_relationship_between_hifld_infrastructures(
71
+ hifld_data_csv: str,
72
+ unique_id_column: str,
73
+ load_csv: str,
74
+ bus_csv: str,
75
+ output_csv_path: str,
76
+ distance_threshold: float = 2000.0,
77
+ ):
78
+ """Creates a relationship between consumers and HIFLD infrastructures.
79
+
80
+ Args:
81
+ hifld_data_csv (str): Path to filtered HIFLD data csv file
82
+ unique_id_column (List): Column name used as identifier
83
+ for critical infrastructures
84
+ load_csv (str): Path to load csv file
85
+ bus_csv (str): Path to bus csv file
86
+ output_csv_path (str): output csv path for storing relationship csv
87
+ distance_threshold (float): Distance threshold used for mapping
88
+ customer to critical infrastructure
89
+ """
90
+ hifld_data_csv = Path(hifld_data_csv)
91
+ bus_csv = Path(bus_csv)
92
+ load_csv = Path(load_csv)
93
+ output_csv_path = Path(output_csv_path)
94
+
95
+ path_validation(
96
+ hifld_data_csv, check_for_file=True, check_for_file_type=".csv"
97
+ )
98
+ path_validation(bus_csv, check_for_file=True, check_for_file_type=".csv")
99
+ path_validation(load_csv, check_for_file=True, check_for_file_type=".csv")
100
+ path_validation(output_csv_path.parents[0])
101
+
102
+ hifld_data_df = pd.read_csv(hifld_data_csv)
103
+ load_df = pd.read_csv(load_csv)
104
+ bus_df = pd.read_csv(bus_csv)
105
+
106
+ merged_data = pd.merge(
107
+ load_df, bus_df, how="left", left_on="source", right_on="name"
108
+ ).to_dict(orient="records")
109
+
110
+ # Container for storing shelter relationships
111
+ _relationship = []
112
+ for _record in hifld_data_df.to_dict(orient="records"):
113
+ _lon, _lat = _record["LONGITUDE"], _record["LATITUDE"]
114
+
115
+ # convert into state plane coordinates
116
+ _lon_translated, _lat_translated = stateplane.from_lonlat(_lon, _lat)
117
+
118
+ # Loop through all the loads
119
+ for load_record in merged_data:
120
+
121
+ load_lon, load_lat = (
122
+ load_record["longitude"],
123
+ load_record["latitude"],
124
+ )
125
+
126
+ # convert into state plane coordinates
127
+ load_lon_translated, load_lat_translated = stateplane.from_lonlat(
128
+ load_lon, load_lat
129
+ )
130
+
131
+ # computes distance
132
+ distance = math.sqrt(
133
+ (_lat_translated - load_lat_translated) ** 2
134
+ + (_lon_translated - load_lon_translated) ** 2
135
+ )
136
+
137
+ if distance < distance_threshold:
138
+ _relationship.append(
139
+ {
140
+ unique_id_column: _record[unique_id_column],
141
+ "load_name": load_record["name_x"],
142
+ "distance": distance,
143
+ }
144
+ )
145
+
146
+ df = pd.DataFrame(_relationship)
147
+ df.to_csv(output_csv_path)