pyconvexity 0.4.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pyconvexity might be problematic. Click here for more details.
- pyconvexity/__init__.py +241 -0
- pyconvexity/_version.py +1 -0
- pyconvexity/core/__init__.py +60 -0
- pyconvexity/core/database.py +485 -0
- pyconvexity/core/errors.py +106 -0
- pyconvexity/core/types.py +400 -0
- pyconvexity/dashboard.py +265 -0
- pyconvexity/data/README.md +101 -0
- pyconvexity/data/__init__.py +17 -0
- pyconvexity/data/loaders/__init__.py +3 -0
- pyconvexity/data/loaders/cache.py +213 -0
- pyconvexity/data/schema/01_core_schema.sql +420 -0
- pyconvexity/data/schema/02_data_metadata.sql +120 -0
- pyconvexity/data/schema/03_validation_data.sql +507 -0
- pyconvexity/data/sources/__init__.py +5 -0
- pyconvexity/data/sources/gem.py +442 -0
- pyconvexity/io/__init__.py +26 -0
- pyconvexity/io/excel_exporter.py +1226 -0
- pyconvexity/io/excel_importer.py +1381 -0
- pyconvexity/io/netcdf_exporter.py +191 -0
- pyconvexity/io/netcdf_importer.py +1802 -0
- pyconvexity/models/__init__.py +195 -0
- pyconvexity/models/attributes.py +730 -0
- pyconvexity/models/carriers.py +159 -0
- pyconvexity/models/components.py +611 -0
- pyconvexity/models/network.py +503 -0
- pyconvexity/models/results.py +148 -0
- pyconvexity/models/scenarios.py +234 -0
- pyconvexity/solvers/__init__.py +29 -0
- pyconvexity/solvers/pypsa/__init__.py +30 -0
- pyconvexity/solvers/pypsa/api.py +446 -0
- pyconvexity/solvers/pypsa/batch_loader.py +296 -0
- pyconvexity/solvers/pypsa/builder.py +655 -0
- pyconvexity/solvers/pypsa/clearing_price.py +678 -0
- pyconvexity/solvers/pypsa/constraints.py +405 -0
- pyconvexity/solvers/pypsa/solver.py +1442 -0
- pyconvexity/solvers/pypsa/storage.py +2096 -0
- pyconvexity/timeseries.py +330 -0
- pyconvexity/validation/__init__.py +25 -0
- pyconvexity/validation/rules.py +312 -0
- pyconvexity-0.4.8.dist-info/METADATA +148 -0
- pyconvexity-0.4.8.dist-info/RECORD +44 -0
- pyconvexity-0.4.8.dist-info/WHEEL +5 -0
- pyconvexity-0.4.8.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,296 @@
|
|
|
1
|
+
"""
|
|
2
|
+
PyPSA Batch Data Loader
|
|
3
|
+
Simplified to always create MultiIndex timeseries for consistent multi-period optimization.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import pandas as pd
|
|
7
|
+
import json
|
|
8
|
+
from typing import Dict, Any, List, Optional
|
|
9
|
+
|
|
10
|
+
from pyconvexity.models.attributes import get_timeseries
|
|
11
|
+
from pyconvexity.models import get_network_time_periods
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class PyPSABatchLoader:
|
|
15
|
+
"""
|
|
16
|
+
Simplified batch data loader for PyPSA network construction.
|
|
17
|
+
Always creates MultiIndex timeseries for consistent multi-period optimization.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
def __init__(self):
|
|
21
|
+
pass
|
|
22
|
+
|
|
23
|
+
def batch_load_component_attributes(
|
|
24
|
+
self, conn, component_ids: List[int], scenario_id: Optional[int]
|
|
25
|
+
) -> Dict[int, Dict[str, Any]]:
|
|
26
|
+
"""Batch load all static attributes for multiple components to avoid N+1 queries (single network per database)"""
|
|
27
|
+
if not component_ids:
|
|
28
|
+
return {}
|
|
29
|
+
|
|
30
|
+
# Build a single query to get all attributes for all components
|
|
31
|
+
placeholders = ",".join(["?" for _ in component_ids])
|
|
32
|
+
|
|
33
|
+
# Get all attribute names for all components in one query
|
|
34
|
+
cursor = conn.execute(
|
|
35
|
+
f"""
|
|
36
|
+
SELECT DISTINCT attribute_name
|
|
37
|
+
FROM component_attributes
|
|
38
|
+
WHERE component_id IN ({placeholders}) AND storage_type = 'static'
|
|
39
|
+
""",
|
|
40
|
+
component_ids,
|
|
41
|
+
)
|
|
42
|
+
all_attribute_names = [row[0] for row in cursor.fetchall()]
|
|
43
|
+
|
|
44
|
+
if not all_attribute_names:
|
|
45
|
+
return {comp_id: {} for comp_id in component_ids}
|
|
46
|
+
|
|
47
|
+
# Build query to get all attributes for all components
|
|
48
|
+
attr_placeholders = ",".join(["?" for _ in all_attribute_names])
|
|
49
|
+
|
|
50
|
+
# Scenario fallback: scenario_id -> NULL (base network)
|
|
51
|
+
# Query for both scenario-specific and base network attributes
|
|
52
|
+
if scenario_id is not None:
|
|
53
|
+
# Get both scenario and base network values (scenario takes precedence)
|
|
54
|
+
query = f"""
|
|
55
|
+
SELECT component_id, attribute_name, static_value, data_type, scenario_id
|
|
56
|
+
FROM component_attributes
|
|
57
|
+
WHERE component_id IN ({placeholders})
|
|
58
|
+
AND attribute_name IN ({attr_placeholders})
|
|
59
|
+
AND (scenario_id = ? OR scenario_id IS NULL)
|
|
60
|
+
AND storage_type = 'static'
|
|
61
|
+
ORDER BY component_id, attribute_name,
|
|
62
|
+
CASE WHEN scenario_id = ? THEN 0 ELSE 1 END
|
|
63
|
+
"""
|
|
64
|
+
query_params = (
|
|
65
|
+
component_ids + all_attribute_names + [scenario_id, scenario_id]
|
|
66
|
+
)
|
|
67
|
+
else:
|
|
68
|
+
# Get only base network attributes (scenario_id IS NULL)
|
|
69
|
+
query = f"""
|
|
70
|
+
SELECT component_id, attribute_name, static_value, data_type, scenario_id
|
|
71
|
+
FROM component_attributes
|
|
72
|
+
WHERE component_id IN ({placeholders})
|
|
73
|
+
AND attribute_name IN ({attr_placeholders})
|
|
74
|
+
AND scenario_id IS NULL
|
|
75
|
+
AND storage_type = 'static'
|
|
76
|
+
ORDER BY component_id, attribute_name
|
|
77
|
+
"""
|
|
78
|
+
query_params = component_ids + all_attribute_names
|
|
79
|
+
|
|
80
|
+
cursor = conn.execute(query, query_params)
|
|
81
|
+
|
|
82
|
+
# Group by component_id, preferring current scenario over master
|
|
83
|
+
component_attributes = {}
|
|
84
|
+
for comp_id in component_ids:
|
|
85
|
+
component_attributes[comp_id] = {}
|
|
86
|
+
|
|
87
|
+
# Process results, preferring current scenario over master
|
|
88
|
+
rows = cursor.fetchall()
|
|
89
|
+
|
|
90
|
+
for row in rows:
|
|
91
|
+
comp_id, attr_name, static_value_json, data_type, row_scenario_id = row
|
|
92
|
+
|
|
93
|
+
# Ensure component exists in our dictionary (safety check)
|
|
94
|
+
if comp_id not in component_attributes:
|
|
95
|
+
continue
|
|
96
|
+
|
|
97
|
+
# Skip if we already have this attribute from a preferred scenario
|
|
98
|
+
if attr_name in component_attributes[comp_id]:
|
|
99
|
+
continue
|
|
100
|
+
|
|
101
|
+
# Parse JSON value
|
|
102
|
+
json_value = json.loads(static_value_json)
|
|
103
|
+
|
|
104
|
+
# Convert based on data type
|
|
105
|
+
if data_type == "float":
|
|
106
|
+
value = (
|
|
107
|
+
float(json_value) if isinstance(json_value, (int, float)) else 0.0
|
|
108
|
+
)
|
|
109
|
+
elif data_type == "int":
|
|
110
|
+
value = int(json_value) if isinstance(json_value, (int, float)) else 0
|
|
111
|
+
elif data_type == "boolean":
|
|
112
|
+
value = bool(json_value) if isinstance(json_value, bool) else False
|
|
113
|
+
elif data_type == "string":
|
|
114
|
+
value = str(json_value) if isinstance(json_value, str) else ""
|
|
115
|
+
else:
|
|
116
|
+
value = json_value
|
|
117
|
+
|
|
118
|
+
component_attributes[comp_id][attr_name] = value
|
|
119
|
+
|
|
120
|
+
return component_attributes
|
|
121
|
+
|
|
122
|
+
def batch_load_component_connections(self, conn) -> Dict[str, Dict[str, str]]:
|
|
123
|
+
"""Batch load bus and carrier connections to avoid individual lookups (single network per database)"""
|
|
124
|
+
# Get all bus names in one query
|
|
125
|
+
cursor = conn.execute(
|
|
126
|
+
"""
|
|
127
|
+
SELECT id, name FROM components
|
|
128
|
+
WHERE component_type = 'BUS'
|
|
129
|
+
"""
|
|
130
|
+
)
|
|
131
|
+
bus_id_to_name = {row[0]: row[1] for row in cursor.fetchall()}
|
|
132
|
+
|
|
133
|
+
# Get all carrier names in one query
|
|
134
|
+
cursor = conn.execute(
|
|
135
|
+
"""
|
|
136
|
+
SELECT id, name FROM carriers
|
|
137
|
+
"""
|
|
138
|
+
)
|
|
139
|
+
carrier_id_to_name = {row[0]: row[1] for row in cursor.fetchall()}
|
|
140
|
+
|
|
141
|
+
return {
|
|
142
|
+
"bus_id_to_name": bus_id_to_name,
|
|
143
|
+
"carrier_id_to_name": carrier_id_to_name,
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
def batch_load_component_timeseries(
|
|
147
|
+
self, conn, component_ids: List[int], scenario_id: Optional[int]
|
|
148
|
+
) -> Dict[int, Dict[str, pd.Series]]:
|
|
149
|
+
"""Batch load all timeseries attributes - always create MultiIndex for consistency (single network per database)"""
|
|
150
|
+
if not component_ids:
|
|
151
|
+
return {}
|
|
152
|
+
|
|
153
|
+
# Get network time periods for proper timestamp alignment
|
|
154
|
+
network_time_periods = get_network_time_periods(conn)
|
|
155
|
+
if not network_time_periods:
|
|
156
|
+
return {comp_id: {} for comp_id in component_ids}
|
|
157
|
+
|
|
158
|
+
# Convert to timestamps and extract years
|
|
159
|
+
timestamps = [pd.Timestamp(tp.formatted_time) for tp in network_time_periods]
|
|
160
|
+
years = sorted(list(set([ts.year for ts in timestamps])))
|
|
161
|
+
|
|
162
|
+
# Build a single query to get all timeseries attributes for all components
|
|
163
|
+
placeholders = ",".join(["?" for _ in component_ids])
|
|
164
|
+
|
|
165
|
+
# Get all attribute names for all components in one query
|
|
166
|
+
cursor = conn.execute(
|
|
167
|
+
f"""
|
|
168
|
+
SELECT DISTINCT attribute_name
|
|
169
|
+
FROM component_attributes
|
|
170
|
+
WHERE component_id IN ({placeholders}) AND storage_type = 'timeseries'
|
|
171
|
+
""",
|
|
172
|
+
component_ids,
|
|
173
|
+
)
|
|
174
|
+
all_attribute_names = [row[0] for row in cursor.fetchall()]
|
|
175
|
+
|
|
176
|
+
if not all_attribute_names:
|
|
177
|
+
return {comp_id: {} for comp_id in component_ids}
|
|
178
|
+
|
|
179
|
+
# Build query to get all timeseries for all components
|
|
180
|
+
attr_placeholders = ",".join(["?" for _ in all_attribute_names])
|
|
181
|
+
|
|
182
|
+
# Scenario fallback: scenario_id -> NULL (base network)
|
|
183
|
+
if scenario_id is not None:
|
|
184
|
+
# Get both scenario and base network timeseries (scenario takes precedence)
|
|
185
|
+
query = f"""
|
|
186
|
+
SELECT component_id, attribute_name, timeseries_data, scenario_id
|
|
187
|
+
FROM component_attributes
|
|
188
|
+
WHERE component_id IN ({placeholders})
|
|
189
|
+
AND attribute_name IN ({attr_placeholders})
|
|
190
|
+
AND (scenario_id = ? OR scenario_id IS NULL)
|
|
191
|
+
AND storage_type = 'timeseries'
|
|
192
|
+
ORDER BY component_id, attribute_name,
|
|
193
|
+
CASE WHEN scenario_id = ? THEN 0 ELSE 1 END
|
|
194
|
+
"""
|
|
195
|
+
query_params = (
|
|
196
|
+
component_ids + all_attribute_names + [scenario_id, scenario_id]
|
|
197
|
+
)
|
|
198
|
+
else:
|
|
199
|
+
# Get only base network timeseries (scenario_id IS NULL)
|
|
200
|
+
query = f"""
|
|
201
|
+
SELECT component_id, attribute_name, timeseries_data, scenario_id
|
|
202
|
+
FROM component_attributes
|
|
203
|
+
WHERE component_id IN ({placeholders})
|
|
204
|
+
AND attribute_name IN ({attr_placeholders})
|
|
205
|
+
AND scenario_id IS NULL
|
|
206
|
+
AND storage_type = 'timeseries'
|
|
207
|
+
ORDER BY component_id, attribute_name
|
|
208
|
+
"""
|
|
209
|
+
query_params = component_ids + all_attribute_names
|
|
210
|
+
|
|
211
|
+
cursor = conn.execute(query, query_params)
|
|
212
|
+
|
|
213
|
+
# Group by component_id, preferring current scenario over master
|
|
214
|
+
component_timeseries = {}
|
|
215
|
+
for comp_id in component_ids:
|
|
216
|
+
component_timeseries[comp_id] = {}
|
|
217
|
+
|
|
218
|
+
# Process results, preferring current scenario over master
|
|
219
|
+
rows = cursor.fetchall()
|
|
220
|
+
|
|
221
|
+
for row in rows:
|
|
222
|
+
comp_id, attr_name, timeseries_data, row_scenario_id = row
|
|
223
|
+
|
|
224
|
+
# Ensure component exists in our dictionary (safety check)
|
|
225
|
+
if comp_id not in component_timeseries:
|
|
226
|
+
continue
|
|
227
|
+
|
|
228
|
+
# Skip if we already have this attribute from a preferred scenario
|
|
229
|
+
if attr_name in component_timeseries[comp_id]:
|
|
230
|
+
continue
|
|
231
|
+
|
|
232
|
+
# Deserialize timeseries data
|
|
233
|
+
try:
|
|
234
|
+
timeseries = get_timeseries(conn, comp_id, attr_name, row_scenario_id)
|
|
235
|
+
if timeseries and timeseries.values:
|
|
236
|
+
values = timeseries.values
|
|
237
|
+
|
|
238
|
+
# Always create MultiIndex following PyPSA multi-investment tutorial format
|
|
239
|
+
# First level: investment periods (years), Second level: timesteps
|
|
240
|
+
multi_snapshots = []
|
|
241
|
+
for i, ts in enumerate(timestamps[: len(values)]):
|
|
242
|
+
multi_snapshots.append((ts.year, ts))
|
|
243
|
+
|
|
244
|
+
if multi_snapshots:
|
|
245
|
+
multi_index = pd.MultiIndex.from_tuples(
|
|
246
|
+
multi_snapshots, names=["period", "timestep"]
|
|
247
|
+
)
|
|
248
|
+
component_timeseries[comp_id][attr_name] = pd.Series(
|
|
249
|
+
values, index=multi_index
|
|
250
|
+
)
|
|
251
|
+
|
|
252
|
+
except Exception:
|
|
253
|
+
continue
|
|
254
|
+
|
|
255
|
+
return component_timeseries
|
|
256
|
+
|
|
257
|
+
def batch_load_all_component_timeseries_by_type(
|
|
258
|
+
self, conn, component_type: str, scenario_id: Optional[int]
|
|
259
|
+
) -> Dict[str, pd.DataFrame]:
|
|
260
|
+
"""
|
|
261
|
+
Load all timeseries attributes for a component type and organize by attribute name (single network per database).
|
|
262
|
+
This is a compatibility method for the existing _load_all_component_timeseries interface.
|
|
263
|
+
"""
|
|
264
|
+
from pyconvexity.models import list_components_by_type
|
|
265
|
+
|
|
266
|
+
components = list_components_by_type(conn, component_type)
|
|
267
|
+
component_ids = [comp.id for comp in components]
|
|
268
|
+
|
|
269
|
+
# Use batch loading
|
|
270
|
+
component_timeseries = self.batch_load_component_timeseries(
|
|
271
|
+
conn, component_ids, scenario_id
|
|
272
|
+
)
|
|
273
|
+
|
|
274
|
+
# Reorganize by attribute name (matching original interface)
|
|
275
|
+
timeseries_by_attr = {}
|
|
276
|
+
|
|
277
|
+
for component in components:
|
|
278
|
+
comp_timeseries = component_timeseries.get(component.id, {})
|
|
279
|
+
|
|
280
|
+
for attr_name, series in comp_timeseries.items():
|
|
281
|
+
if attr_name not in timeseries_by_attr:
|
|
282
|
+
timeseries_by_attr[attr_name] = {}
|
|
283
|
+
|
|
284
|
+
# Store series in dict first
|
|
285
|
+
timeseries_by_attr[attr_name][component.name] = series
|
|
286
|
+
|
|
287
|
+
# Convert to DataFrames all at once to avoid fragmentation
|
|
288
|
+
for attr_name in timeseries_by_attr:
|
|
289
|
+
if timeseries_by_attr[attr_name]:
|
|
290
|
+
timeseries_by_attr[attr_name] = pd.DataFrame(
|
|
291
|
+
timeseries_by_attr[attr_name]
|
|
292
|
+
)
|
|
293
|
+
else:
|
|
294
|
+
timeseries_by_attr[attr_name] = pd.DataFrame()
|
|
295
|
+
|
|
296
|
+
return timeseries_by_attr
|