risk-network 0.0.11__py3-none-any.whl → 0.0.12b0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. risk/__init__.py +1 -1
  2. risk/risk.py +5 -5
  3. {risk_network-0.0.11.dist-info → risk_network-0.0.12b0.dist-info}/METADATA +10 -12
  4. risk_network-0.0.12b0.dist-info/RECORD +7 -0
  5. {risk_network-0.0.11.dist-info → risk_network-0.0.12b0.dist-info}/WHEEL +1 -1
  6. risk/annotations/__init__.py +0 -7
  7. risk/annotations/annotations.py +0 -354
  8. risk/annotations/io.py +0 -240
  9. risk/annotations/nltk_setup.py +0 -85
  10. risk/log/__init__.py +0 -11
  11. risk/log/console.py +0 -141
  12. risk/log/parameters.py +0 -172
  13. risk/neighborhoods/__init__.py +0 -8
  14. risk/neighborhoods/api.py +0 -442
  15. risk/neighborhoods/community.py +0 -412
  16. risk/neighborhoods/domains.py +0 -358
  17. risk/neighborhoods/neighborhoods.py +0 -508
  18. risk/network/__init__.py +0 -6
  19. risk/network/geometry.py +0 -150
  20. risk/network/graph/__init__.py +0 -6
  21. risk/network/graph/api.py +0 -200
  22. risk/network/graph/graph.py +0 -269
  23. risk/network/graph/summary.py +0 -254
  24. risk/network/io.py +0 -550
  25. risk/network/plotter/__init__.py +0 -6
  26. risk/network/plotter/api.py +0 -54
  27. risk/network/plotter/canvas.py +0 -291
  28. risk/network/plotter/contour.py +0 -330
  29. risk/network/plotter/labels.py +0 -924
  30. risk/network/plotter/network.py +0 -294
  31. risk/network/plotter/plotter.py +0 -143
  32. risk/network/plotter/utils/colors.py +0 -416
  33. risk/network/plotter/utils/layout.py +0 -94
  34. risk/stats/__init__.py +0 -15
  35. risk/stats/permutation/__init__.py +0 -6
  36. risk/stats/permutation/permutation.py +0 -237
  37. risk/stats/permutation/test_functions.py +0 -70
  38. risk/stats/significance.py +0 -166
  39. risk/stats/stat_tests.py +0 -267
  40. risk_network-0.0.11.dist-info/RECORD +0 -41
  41. {risk_network-0.0.11.dist-info → risk_network-0.0.12b0.dist-info/licenses}/LICENSE +0 -0
  42. {risk_network-0.0.11.dist-info → risk_network-0.0.12b0.dist-info}/top_level.txt +0 -0
risk/annotations/io.py DELETED
@@ -1,240 +0,0 @@
1
- """
2
- risk/annotations/io
3
- ~~~~~~~~~~~~~~~~~~~
4
- """
5
-
6
- import json
7
- from typing import Any, Dict
8
-
9
- import networkx as nx
10
- import pandas as pd
11
-
12
- from risk.annotations.annotations import load_annotations
13
- from risk.log import params, logger, log_header
14
-
15
-
16
- class AnnotationsIO:
17
- """Handles the loading and exporting of annotations in various file formats.
18
-
19
- The AnnotationsIO class provides methods to load annotations from different file types (JSON, CSV, Excel, etc.)
20
- and to export parameter data to various formats like JSON, CSV, and text files.
21
- """
22
-
23
- def __init__(self):
24
- pass
25
-
26
- def load_json_annotation(
27
- self, network: nx.Graph, filepath: str, min_nodes_per_term: int = 2
28
- ) -> Dict[str, Any]:
29
- """Load annotations from a JSON file and convert them to a DataFrame.
30
-
31
- Args:
32
- network (NetworkX graph): The network to which the annotations are related.
33
- filepath (str): Path to the JSON annotations file.
34
- min_nodes_per_term (int, optional): The minimum number of network nodes required for each annotation
35
- term to be included. Defaults to 2.
36
-
37
- Returns:
38
- Dict[str, Any]: A dictionary containing ordered nodes, ordered annotations, and the annotations matrix.
39
- """
40
- filetype = "JSON"
41
- # Log the loading of the JSON file
42
- params.log_annotations(
43
- filetype=filetype, filepath=filepath, min_nodes_per_term=min_nodes_per_term
44
- )
45
- _log_loading(filetype, filepath=filepath)
46
-
47
- # Load the JSON file into a dictionary
48
- with open(filepath, "r", encoding="utf-8") as file:
49
- annotations_input = json.load(file)
50
-
51
- return load_annotations(network, annotations_input, min_nodes_per_term)
52
-
53
- def load_excel_annotation(
54
- self,
55
- network: nx.Graph,
56
- filepath: str,
57
- label_colname: str = "label",
58
- nodes_colname: str = "nodes",
59
- sheet_name: str = "Sheet1",
60
- nodes_delimiter: str = ";",
61
- min_nodes_per_term: int = 2,
62
- ) -> Dict[str, Any]:
63
- """Load annotations from an Excel file and associate them with the network.
64
-
65
- Args:
66
- network (nx.Graph): The NetworkX graph to which the annotations are related.
67
- filepath (str): Path to the Excel annotations file.
68
- label_colname (str): Name of the column containing the labels (e.g., GO terms).
69
- nodes_colname (str): Name of the column containing the nodes associated with each label.
70
- sheet_name (str, optional): The name of the Excel sheet to load (default is 'Sheet1').
71
- nodes_delimiter (str, optional): Delimiter used to separate multiple nodes within the nodes column (default is ';').
72
- min_nodes_per_term (int, optional): The minimum number of network nodes required for each annotation
73
- term to be included. Defaults to 2.
74
-
75
- Returns:
76
- Dict[str, Any]: A dictionary where each label is paired with its respective list of nodes,
77
- linked to the provided network.
78
- """
79
- filetype = "Excel"
80
- # Log the loading of the Excel file
81
- params.log_annotations(
82
- filetype=filetype, filepath=filepath, min_nodes_per_term=min_nodes_per_term
83
- )
84
- _log_loading(filetype, filepath=filepath)
85
-
86
- # Load the specified sheet from the Excel file
87
- annotation = pd.read_excel(filepath, sheet_name=sheet_name)
88
- # Split the nodes column by the specified nodes_delimiter
89
- annotation[nodes_colname] = annotation[nodes_colname].apply(
90
- lambda x: x.split(nodes_delimiter)
91
- )
92
- # Convert the DataFrame to a dictionary pairing labels with their corresponding nodes
93
- annotations_input = annotation.set_index(label_colname)[nodes_colname].to_dict()
94
-
95
- return load_annotations(network, annotations_input, min_nodes_per_term)
96
-
97
- def load_csv_annotation(
98
- self,
99
- network: nx.Graph,
100
- filepath: str,
101
- label_colname: str = "label",
102
- nodes_colname: str = "nodes",
103
- nodes_delimiter: str = ";",
104
- min_nodes_per_term: int = 2,
105
- ) -> Dict[str, Any]:
106
- """Load annotations from a CSV file and associate them with the network.
107
-
108
- Args:
109
- network (nx.Graph): The NetworkX graph to which the annotations are related.
110
- filepath (str): Path to the CSV annotations file.
111
- label_colname (str): Name of the column containing the labels (e.g., GO terms).
112
- nodes_colname (str): Name of the column containing the nodes associated with each label.
113
- nodes_delimiter (str, optional): Delimiter used to separate multiple nodes within the nodes column (default is ';').
114
- min_nodes_per_term (int, optional): The minimum number of network nodes required for each annotation
115
- term to be included. Defaults to 2.
116
-
117
- Returns:
118
- Dict[str, Any]: A dictionary where each label is paired with its respective list of nodes,
119
- linked to the provided network.
120
- """
121
- filetype = "CSV"
122
- # Log the loading of the CSV file
123
- params.log_annotations(
124
- filetype=filetype, filepath=filepath, min_nodes_per_term=min_nodes_per_term
125
- )
126
- _log_loading(filetype, filepath=filepath)
127
-
128
- # Load the CSV file into a dictionary
129
- annotations_input = _load_matrix_file(
130
- filepath, label_colname, nodes_colname, delimiter=",", nodes_delimiter=nodes_delimiter
131
- )
132
-
133
- return load_annotations(network, annotations_input, min_nodes_per_term)
134
-
135
- def load_tsv_annotation(
136
- self,
137
- network: nx.Graph,
138
- filepath: str,
139
- label_colname: str = "label",
140
- nodes_colname: str = "nodes",
141
- nodes_delimiter: str = ";",
142
- min_nodes_per_term: int = 2,
143
- ) -> Dict[str, Any]:
144
- """Load annotations from a TSV file and associate them with the network.
145
-
146
- Args:
147
- network (nx.Graph): The NetworkX graph to which the annotations are related.
148
- filepath (str): Path to the TSV annotations file.
149
- label_colname (str): Name of the column containing the labels (e.g., GO terms).
150
- nodes_colname (str): Name of the column containing the nodes associated with each label.
151
- nodes_delimiter (str, optional): Delimiter used to separate multiple nodes within the nodes column (default is ';').
152
- min_nodes_per_term (int, optional): The minimum number of network nodes required for each annotation
153
- term to be included. Defaults to 2.
154
-
155
- Returns:
156
- Dict[str, Any]: A dictionary where each label is paired with its respective list of nodes,
157
- linked to the provided network.
158
- """
159
- filetype = "TSV"
160
- # Log the loading of the TSV file
161
- params.log_annotations(
162
- filetype=filetype, filepath=filepath, min_nodes_per_term=min_nodes_per_term
163
- )
164
- _log_loading(filetype, filepath=filepath)
165
-
166
- # Load the TSV file into a dictionary
167
- annotations_input = _load_matrix_file(
168
- filepath, label_colname, nodes_colname, delimiter="\t", nodes_delimiter=nodes_delimiter
169
- )
170
-
171
- return load_annotations(network, annotations_input, min_nodes_per_term)
172
-
173
- def load_dict_annotation(
174
- self, network: nx.Graph, content: Dict[str, Any], min_nodes_per_term: int = 2
175
- ) -> Dict[str, Any]:
176
- """Load annotations from a provided dictionary and convert them to a dictionary annotation.
177
-
178
- Args:
179
- network (NetworkX graph): The network to which the annotations are related.
180
- content (Dict[str, Any]): The annotations dictionary to load.
181
- min_nodes_per_term (int, optional): The minimum number of network nodes required for each annotation
182
- term to be included. Defaults to 2.
183
-
184
- Returns:
185
- Dict[str, Any]: A dictionary containing ordered nodes, ordered annotations, and the annotations matrix.
186
- """
187
- # Ensure the input content is a dictionary
188
- if not isinstance(content, dict):
189
- raise TypeError(
190
- f"Expected 'content' to be a dictionary, but got {type(content).__name__} instead."
191
- )
192
-
193
- filetype = "Dictionary"
194
- # Log the loading of the annotations from the dictionary
195
- params.log_annotations(filepath="In-memory dictionary", filetype=filetype)
196
- _log_loading(filetype, "In-memory dictionary")
197
-
198
- # Load the annotations as a dictionary from the provided dictionary
199
- return load_annotations(network, content, min_nodes_per_term)
200
-
201
-
202
- def _load_matrix_file(
203
- filepath: str,
204
- label_colname: str,
205
- nodes_colname: str,
206
- delimiter: str = ",",
207
- nodes_delimiter: str = ";",
208
- ) -> Dict[str, Any]:
209
- """Load annotations from a CSV or TSV file and convert them to a dictionary.
210
-
211
- Args:
212
- filepath (str): Path to the annotation file.
213
- label_colname (str): Name of the column containing the labels (e.g., GO terms).
214
- nodes_colname (str): Name of the column containing the nodes associated with each label.
215
- delimiter (str, optional): Delimiter used to separate columns in the file (default is ',').
216
- nodes_delimiter (str, optional): Delimiter used to separate multiple nodes within the nodes column (default is ';').
217
-
218
- Returns:
219
- Dict[str, Any]: A dictionary where each label is paired with its respective list of nodes.
220
- """
221
- # Load the CSV or TSV file into a DataFrame
222
- annotation = pd.read_csv(filepath, delimiter=delimiter)
223
- # Split the nodes column by the nodes_delimiter to handle multiple nodes per label
224
- annotation[nodes_colname] = annotation[nodes_colname].apply(lambda x: x.split(nodes_delimiter))
225
- # Create a dictionary pairing labels with their corresponding list of nodes
226
- label_node_dict = annotation.set_index(label_colname)[nodes_colname].to_dict()
227
- return label_node_dict
228
-
229
-
230
- def _log_loading(filetype: str, filepath: str = "") -> None:
231
- """Log information about the network file being loaded.
232
-
233
- Args:
234
- filetype (str): The type of the file being loaded (e.g., 'Cytoscape').
235
- filepath (str, optional): The path to the file being loaded.
236
- """
237
- log_header("Loading annotations")
238
- logger.debug(f"Filetype: {filetype}")
239
- if filepath:
240
- logger.debug(f"Filepath: {filepath}")
@@ -1,85 +0,0 @@
1
- """
2
- risk/annotations/nltk_setup
3
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~
4
- """
5
-
6
- import os
7
- import zipfile
8
- from typing import List, Tuple
9
-
10
- import nltk
11
- from nltk.data import find, path as nltk_data_path
12
-
13
- from risk.log import logger
14
-
15
-
16
- def setup_nltk_resources(required_resources: List[Tuple[str, str]] = None) -> None:
17
- """Ensures all required NLTK resources are available and properly extracted.
18
- Uses NLTK's default paths and mechanisms.
19
-
20
- Args:
21
- required_resources (List[Tuple[str, str]], optional): List of required resources
22
- to download and extract. Each tuple should contain the resource path within
23
- NLTK data and the package name. Defaults to None.
24
- """
25
- if required_resources is None:
26
- required_resources = [
27
- ("tokenizers/punkt", "punkt"),
28
- ("tokenizers/punkt_tab", "punkt_tab"),
29
- ("corpora/stopwords", "stopwords"),
30
- ("corpora/wordnet", "wordnet"),
31
- ]
32
-
33
- # Process each resource
34
- for resource_path, package_name in required_resources:
35
- try:
36
- # First try to find the resource - this is how NLTK checks if it's available
37
- find(resource_path)
38
- except LookupError:
39
- # Resource not found, download it
40
- logger.info(f"Downloading missing NLTK resource: {package_name}")
41
- nltk.download(package_name, quiet=True)
42
-
43
- # Even if find() succeeded, the resource might be a zip that failed to extract
44
- # Check if we need to manually extract zips
45
- verify_and_extract_if_needed(resource_path, package_name)
46
-
47
-
48
- def verify_and_extract_if_needed(resource_path: str, package_name: str) -> None:
49
- """Verifies if the resource is properly extracted and extracts if needed. Respects
50
- NLTK's directory structure where the extracted content should be in the same directory
51
- as the zip file.
52
-
53
- Args:
54
- resource_path (str): Path to the resource within NLTK data.
55
- package_name (str): Name of the NLTK package.
56
- """
57
- # Get the directory and base name from the resource path
58
- path_parts = resource_path.split("/")
59
- resource_type = path_parts[0] # 'corpora', 'tokenizers', etc.
60
- resource_name = path_parts[-1] # 'wordnet', 'punkt', etc.
61
-
62
- # Check all NLTK data directories
63
- for base in nltk_data_path:
64
- # For resource paths like 'corpora/wordnet', the zip file is at '~/nltk_data/corpora/wordnet.zip'
65
- # and the extracted directory should be at '~/nltk_data/corpora/wordnet'
66
- resource_dir = os.path.join(base, resource_type)
67
- zip_path = os.path.join(resource_dir, f"{resource_name}.zip")
68
- folder_path = os.path.join(resource_dir, resource_name)
69
-
70
- # If zip exists but folder doesn't, extraction is needed
71
- if os.path.exists(zip_path) and not os.path.exists(folder_path):
72
- logger.info(f"Found unextracted zip for {package_name}, extracting...")
73
- try:
74
- with zipfile.ZipFile(zip_path, "r") as zf:
75
- # Extract files to the same directory where the zip file is located
76
- zf.extractall(path=resource_dir)
77
-
78
- if os.path.exists(folder_path):
79
- logger.info(f"Successfully extracted {package_name}")
80
- else:
81
- logger.warning(
82
- f"Extraction completed but resource directory not found for {package_name}"
83
- )
84
- except Exception as e:
85
- logger.error(f"Failed to extract {package_name}: {e}")
risk/log/__init__.py DELETED
@@ -1,11 +0,0 @@
1
- """
2
- risk/log
3
- ~~~~~~~~
4
- """
5
-
6
- from risk.log.console import logger, log_header, set_global_verbosity
7
- from risk.log.parameters import Params
8
-
9
- # Initialize the global parameters logger
10
- params = Params()
11
- params.initialize()
risk/log/console.py DELETED
@@ -1,141 +0,0 @@
1
- """
2
- risk/log/console
3
- ~~~~~~~~~~~~~~~~
4
- """
5
-
6
- import logging
7
-
8
-
9
- def in_jupyter():
10
- """Check if the code is running in a Jupyter notebook environment.
11
-
12
- Returns:
13
- bool: True if running in a Jupyter notebook or QtConsole, False otherwise.
14
- """
15
- try:
16
- shell = get_ipython().__class__.__name__
17
- if shell == "ZMQInteractiveShell": # Jupyter Notebook or QtConsole
18
- return True
19
- if shell == "TerminalInteractiveShell": # Terminal running IPython
20
- return False
21
-
22
- return False # Other type (?)
23
- except NameError:
24
- return False # Not in Jupyter
25
-
26
-
27
- # Define the MockLogger class to replicate logging behavior with print statements in Jupyter
28
- class MockLogger:
29
- """MockLogger: A lightweight logger replacement using print statements in Jupyter.
30
-
31
- The MockLogger class replicates the behavior of a standard logger using print statements
32
- to display messages. This is primarily used in a Jupyter environment to show outputs
33
- directly in the notebook. The class supports logging levels such as `info`, `debug`,
34
- `warning`, and `error`, while the `verbose` attribute controls whether to display non-error messages.
35
- """
36
-
37
- def __init__(self, verbose: bool = True):
38
- """Initialize the MockLogger with verbosity settings.
39
-
40
- Args:
41
- verbose (bool): If True, display all log messages (info, debug, warning).
42
- If False, only display error messages. Defaults to True.
43
- """
44
- self.verbose = verbose
45
-
46
- def info(self, message: str) -> None:
47
- """Display an informational message.
48
-
49
- Args:
50
- message (str): The informational message to be printed.
51
- """
52
- if self.verbose:
53
- print(message)
54
-
55
- def debug(self, message: str) -> None:
56
- """Display a debug message.
57
-
58
- Args:
59
- message (str): The debug message to be printed.
60
- """
61
- if self.verbose:
62
- print(message)
63
-
64
- def warning(self, message: str) -> None:
65
- """Display a warning message.
66
-
67
- Args:
68
- message (str): The warning message to be printed.
69
- """
70
- print(message)
71
-
72
- def error(self, message: str) -> None:
73
- """Display an error message.
74
-
75
- Args:
76
- message (str): The error message to be printed.
77
- """
78
- print(message)
79
-
80
- def setLevel(self, level: int) -> None:
81
- """Adjust verbosity based on the logging level.
82
-
83
- Args:
84
- level (int): Logging level to control message display.
85
- - logging.DEBUG sets verbose to True (show all messages).
86
- - logging.WARNING sets verbose to False (show only warning, error, and critical messages).
87
- """
88
- if level == logging.DEBUG:
89
- self.verbose = True # Show all messages
90
- elif level == logging.WARNING:
91
- self.verbose = False # Suppress all except warning, error, and critical messages
92
-
93
-
94
- # Set up logger based on environment
95
- if not in_jupyter():
96
- # Set up logger normally for .py files or terminal environments
97
- logger = logging.getLogger("risk_logger")
98
- logger.setLevel(logging.DEBUG)
99
- console_handler = logging.StreamHandler()
100
- console_handler.setLevel(logging.DEBUG)
101
- console_handler.setFormatter(logging.Formatter("%(message)s"))
102
-
103
- if not logger.hasHandlers():
104
- logger.addHandler(console_handler)
105
- else:
106
- # If in Jupyter, use the MockLogger
107
- logger = MockLogger()
108
-
109
-
110
- def set_global_verbosity(verbose):
111
- """Set the global verbosity level for the logger.
112
-
113
- Args:
114
- verbose (bool): Whether to display all log messages (True) or only error messages (False).
115
-
116
- Returns:
117
- None
118
- """
119
- if not isinstance(logger, MockLogger):
120
- # For the regular logger, adjust logging levels
121
- if verbose:
122
- logger.setLevel(logging.DEBUG) # Show all messages
123
- console_handler.setLevel(logging.DEBUG)
124
- else:
125
- logger.setLevel(logging.WARNING) # Show only warning, error, and critical messages
126
- console_handler.setLevel(logging.WARNING)
127
- else:
128
- # For the MockLogger, set verbosity directly
129
- logger.setLevel(logging.DEBUG if verbose else logging.WARNING)
130
-
131
-
132
- def log_header(input_string: str) -> None:
133
- """Log the input string as a header with a line of dashes above and below it.
134
-
135
- Args:
136
- input_string (str): The string to be printed as a header.
137
- """
138
- border = "-" * len(input_string)
139
- logger.info(border)
140
- logger.info(input_string)
141
- logger.info(border)
risk/log/parameters.py DELETED
@@ -1,172 +0,0 @@
1
- """
2
- risk/log/parameters
3
- ~~~~~~~~~~~~~~~~~~~
4
- """
5
-
6
- import csv
7
- import json
8
- import warnings
9
- from datetime import datetime
10
- from typing import Any, Dict
11
-
12
- import numpy as np
13
-
14
- from risk.log.console import logger, log_header
15
-
16
- # Suppress all warnings - this is to resolve warnings from multiprocessing
17
- warnings.filterwarnings("ignore")
18
-
19
-
20
- class Params:
21
- """Handles the storage and logging of various parameters for network analysis.
22
-
23
- The Params class provides methods to log parameters related to different components of the analysis,
24
- such as the network, annotations, neighborhoods, graph, and plotter settings. It also stores
25
- the current datetime when the parameters were initialized.
26
- """
27
-
28
- def __init__(self):
29
- """Initialize the Params object with default settings and current datetime."""
30
- self.initialize()
31
- self.datetime = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
32
-
33
- def initialize(self) -> None:
34
- """Initialize the parameter dictionaries for different components."""
35
- self.network = {}
36
- self.annotations = {}
37
- self.neighborhoods = {}
38
- self.graph = {}
39
- self.plotter = {}
40
-
41
- def log_network(self, **kwargs) -> None:
42
- """Log network-related parameters.
43
-
44
- Args:
45
- **kwargs: Network parameters to log.
46
- """
47
- self.network = {**self.network, **kwargs}
48
-
49
- def log_annotations(self, **kwargs) -> None:
50
- """Log annotation-related parameters.
51
-
52
- Args:
53
- **kwargs: Annotation parameters to log.
54
- """
55
- self.annotations = {**self.annotations, **kwargs}
56
-
57
- def log_neighborhoods(self, **kwargs) -> None:
58
- """Log neighborhood-related parameters.
59
-
60
- Args:
61
- **kwargs: Neighborhood parameters to log.
62
- """
63
- self.neighborhoods = {**self.neighborhoods, **kwargs}
64
-
65
- def log_graph(self, **kwargs) -> None:
66
- """Log graph-related parameters.
67
-
68
- Args:
69
- **kwargs: Graph parameters to log.
70
- """
71
- self.graph = {**self.graph, **kwargs}
72
-
73
- def log_plotter(self, **kwargs) -> None:
74
- """Log plotter-related parameters.
75
-
76
- Args:
77
- **kwargs: Plotter parameters to log.
78
- """
79
- self.plotter = {**self.plotter, **kwargs}
80
-
81
- def to_csv(self, filepath: str) -> None:
82
- """Export the parameters to a CSV file.
83
-
84
- Args:
85
- filepath (str): The path where the CSV file will be saved.
86
- """
87
- # Load the parameter dictionary
88
- params = self.load()
89
- # Open the file in write mode
90
- with open(filepath, "w", encoding="utf-8", newline="") as csv_file:
91
- writer = csv.writer(csv_file)
92
- # Write the header
93
- writer.writerow(["parent_key", "child_key", "value"])
94
- # Write the rows
95
- for parent_key, parent_value in params.items():
96
- if isinstance(parent_value, dict):
97
- for child_key, child_value in parent_value.items():
98
- writer.writerow([parent_key, child_key, child_value])
99
- else:
100
- writer.writerow([parent_key, "", parent_value])
101
-
102
- logger.info(f"Parameters exported to CSV file: {filepath}")
103
-
104
- def to_json(self, filepath: str) -> None:
105
- """Export the parameters to a JSON file.
106
-
107
- Args:
108
- filepath (str): The path where the JSON file will be saved.
109
- """
110
- with open(filepath, "w", encoding="utf-8") as json_file:
111
- json.dump(self.load(), json_file, indent=4)
112
-
113
- logger.info(f"Parameters exported to JSON file: {filepath}")
114
-
115
- def to_txt(self, filepath: str) -> None:
116
- """Export the parameters to a text file.
117
-
118
- Args:
119
- filepath (str): The path where the text file will be saved.
120
- """
121
- # Load the parameter dictionary
122
- params = self.load()
123
- # Open the file in write mode
124
- with open(filepath, "w", encoding="utf-8") as txt_file:
125
- for key, value in params.items():
126
- # Write the key and its corresponding value
127
- txt_file.write(f"{key}: {value}\n")
128
- # Add a blank line after each entry
129
- txt_file.write("\n")
130
-
131
- logger.info(f"Parameters exported to text file: {filepath}")
132
-
133
- def load(self) -> Dict[str, Any]:
134
- """Load and process various parameters, converting any np.ndarray values to lists.
135
-
136
- Returns:
137
- Dict[str, Any]: A dictionary containing the processed parameters.
138
- """
139
- log_header("Loading parameters")
140
- return _convert_ndarray_to_list(
141
- {
142
- "annotations": self.annotations,
143
- "datetime": self.datetime,
144
- "graph": self.graph,
145
- "neighborhoods": self.neighborhoods,
146
- "network": self.network,
147
- "plotter": self.plotter,
148
- }
149
- )
150
-
151
-
152
- def _convert_ndarray_to_list(d: Dict[str, Any]) -> Dict[str, Any]:
153
- """Recursively convert all np.ndarray values in the dictionary to lists.
154
-
155
- Args:
156
- d (Dict[str, Any]): The dictionary to process.
157
-
158
- Returns:
159
- Dict[str, Any]: The processed dictionary with np.ndarray values converted to lists.
160
- """
161
- if isinstance(d, dict):
162
- # Recursively process each value in the dictionary
163
- return {k: _convert_ndarray_to_list(v) for k, v in d.items()}
164
- if isinstance(d, list):
165
- # Recursively process each item in the list
166
- return [_convert_ndarray_to_list(v) for v in d]
167
- if isinstance(d, np.ndarray):
168
- # Convert numpy arrays to lists
169
- return d.tolist()
170
-
171
- # Return the value unchanged if it's not a dict, List, or ndarray
172
- return d
@@ -1,8 +0,0 @@
1
- """
2
- risk/neighborhoods
3
- ~~~~~~~~~~~~~~~~~~
4
- """
5
-
6
- from risk.neighborhoods.domains import define_domains, trim_domains
7
- from risk.neighborhoods.api import NeighborhoodsAPI
8
- from risk.neighborhoods.neighborhoods import process_neighborhoods