pyconvexity 0.1.2__py3-none-any.whl → 0.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pyconvexity might be problematic. Click here for more details.
- pyconvexity/__init__.py +57 -8
- pyconvexity/_version.py +1 -2
- pyconvexity/core/__init__.py +0 -2
- pyconvexity/core/database.py +158 -0
- pyconvexity/core/types.py +105 -18
- pyconvexity/data/README.md +101 -0
- pyconvexity/data/__init__.py +18 -0
- pyconvexity/data/__pycache__/__init__.cpython-313.pyc +0 -0
- pyconvexity/data/loaders/__init__.py +3 -0
- pyconvexity/data/loaders/__pycache__/__init__.cpython-313.pyc +0 -0
- pyconvexity/data/loaders/__pycache__/cache.cpython-313.pyc +0 -0
- pyconvexity/data/loaders/cache.py +212 -0
- pyconvexity/data/schema/01_core_schema.sql +12 -12
- pyconvexity/data/schema/02_data_metadata.sql +17 -321
- pyconvexity/data/sources/__init__.py +5 -0
- pyconvexity/data/sources/__pycache__/__init__.cpython-313.pyc +0 -0
- pyconvexity/data/sources/__pycache__/gem.cpython-313.pyc +0 -0
- pyconvexity/data/sources/gem.py +412 -0
- pyconvexity/io/__init__.py +32 -0
- pyconvexity/io/excel_exporter.py +1012 -0
- pyconvexity/io/excel_importer.py +1109 -0
- pyconvexity/io/netcdf_exporter.py +192 -0
- pyconvexity/io/netcdf_importer.py +1602 -0
- pyconvexity/models/__init__.py +7 -0
- pyconvexity/models/attributes.py +209 -72
- pyconvexity/models/components.py +3 -0
- pyconvexity/models/network.py +17 -15
- pyconvexity/models/scenarios.py +177 -0
- pyconvexity/solvers/__init__.py +29 -0
- pyconvexity/solvers/pypsa/__init__.py +24 -0
- pyconvexity/solvers/pypsa/api.py +421 -0
- pyconvexity/solvers/pypsa/batch_loader.py +304 -0
- pyconvexity/solvers/pypsa/builder.py +566 -0
- pyconvexity/solvers/pypsa/constraints.py +321 -0
- pyconvexity/solvers/pypsa/solver.py +1106 -0
- pyconvexity/solvers/pypsa/storage.py +1574 -0
- pyconvexity/timeseries.py +327 -0
- pyconvexity/validation/rules.py +2 -2
- {pyconvexity-0.1.2.dist-info → pyconvexity-0.1.4.dist-info}/METADATA +5 -2
- pyconvexity-0.1.4.dist-info/RECORD +46 -0
- pyconvexity-0.1.2.dist-info/RECORD +0 -20
- {pyconvexity-0.1.2.dist-info → pyconvexity-0.1.4.dist-info}/WHEEL +0 -0
- {pyconvexity-0.1.2.dist-info → pyconvexity-0.1.4.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,327 @@
|
|
|
1
|
+
"""
|
|
2
|
+
High-level timeseries API for PyConvexity.
|
|
3
|
+
|
|
4
|
+
This module provides the main interface for working with timeseries data,
|
|
5
|
+
matching the efficient patterns used in the Rust implementation.
|
|
6
|
+
|
|
7
|
+
Key Features:
|
|
8
|
+
- Ultra-fast binary serialization (matches Rust exactly)
|
|
9
|
+
- Array-based data structures for maximum performance
|
|
10
|
+
- Unified API for getting/setting timeseries data
|
|
11
|
+
- Backward compatibility with legacy point-based format
|
|
12
|
+
- Efficient sampling and filtering operations
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
import sqlite3
|
|
16
|
+
from typing import List, Optional, Union
|
|
17
|
+
import numpy as np
|
|
18
|
+
|
|
19
|
+
from pyconvexity.core.database import database_context
|
|
20
|
+
from pyconvexity.core.types import Timeseries, TimeseriesMetadata
|
|
21
|
+
from pyconvexity.models.attributes import (
|
|
22
|
+
get_timeseries as _get_timeseries,
|
|
23
|
+
get_timeseries_metadata as _get_timeseries_metadata,
|
|
24
|
+
set_timeseries_attribute,
|
|
25
|
+
serialize_values_to_binary,
|
|
26
|
+
deserialize_values_from_binary,
|
|
27
|
+
get_timeseries_length_from_binary
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
# ============================================================================
|
|
32
|
+
# HIGH-LEVEL TIMESERIES API
|
|
33
|
+
# ============================================================================
|
|
34
|
+
|
|
35
|
+
def get_timeseries(
|
|
36
|
+
db_path: str,
|
|
37
|
+
component_id: int,
|
|
38
|
+
attribute_name: str,
|
|
39
|
+
scenario_id: Optional[int] = None,
|
|
40
|
+
start_index: Optional[int] = None,
|
|
41
|
+
end_index: Optional[int] = None,
|
|
42
|
+
max_points: Optional[int] = None
|
|
43
|
+
) -> Timeseries:
|
|
44
|
+
"""
|
|
45
|
+
Get timeseries data with efficient array-based format.
|
|
46
|
+
|
|
47
|
+
This is the main function for retrieving timeseries data. It returns
|
|
48
|
+
a Timeseries object with values as a flat array for maximum performance.
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
db_path: Path to the database file
|
|
52
|
+
component_id: Component ID
|
|
53
|
+
attribute_name: Name of the attribute (e.g., 'p', 'p_set', 'marginal_cost')
|
|
54
|
+
scenario_id: Scenario ID (uses master scenario if None)
|
|
55
|
+
start_index: Start index for range queries (optional)
|
|
56
|
+
end_index: End index for range queries (optional)
|
|
57
|
+
max_points: Maximum number of points for sampling (optional)
|
|
58
|
+
|
|
59
|
+
Returns:
|
|
60
|
+
Timeseries object with efficient array-based data
|
|
61
|
+
|
|
62
|
+
Example:
|
|
63
|
+
>>> ts = get_timeseries("model.db", component_id=123, attribute_name="p")
|
|
64
|
+
>>> print(f"Length: {ts.length}, Values: {ts.values[:5]}")
|
|
65
|
+
Length: 8760, Values: [100.5, 95.2, 87.3, 92.1, 88.7]
|
|
66
|
+
|
|
67
|
+
# Get a subset of the data
|
|
68
|
+
>>> ts_subset = get_timeseries("model.db", 123, "p", start_index=100, end_index=200)
|
|
69
|
+
>>> print(f"Subset length: {ts_subset.length}")
|
|
70
|
+
Subset length: 100
|
|
71
|
+
|
|
72
|
+
# Sample large datasets
|
|
73
|
+
>>> ts_sampled = get_timeseries("model.db", 123, "p", max_points=1000)
|
|
74
|
+
>>> print(f"Sampled from {ts.length} to {ts_sampled.length} points")
|
|
75
|
+
"""
|
|
76
|
+
with database_context(db_path, read_only=True) as conn:
|
|
77
|
+
return _get_timeseries(
|
|
78
|
+
conn, component_id, attribute_name, scenario_id,
|
|
79
|
+
start_index, end_index, max_points
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def get_timeseries_metadata(
|
|
84
|
+
db_path: str,
|
|
85
|
+
component_id: int,
|
|
86
|
+
attribute_name: str,
|
|
87
|
+
scenario_id: Optional[int] = None
|
|
88
|
+
) -> TimeseriesMetadata:
|
|
89
|
+
"""
|
|
90
|
+
Get timeseries metadata without loading the full data.
|
|
91
|
+
|
|
92
|
+
This is useful for checking the size and properties of a timeseries
|
|
93
|
+
before deciding whether to load the full data.
|
|
94
|
+
|
|
95
|
+
Args:
|
|
96
|
+
db_path: Path to the database file
|
|
97
|
+
component_id: Component ID
|
|
98
|
+
attribute_name: Name of the attribute
|
|
99
|
+
scenario_id: Scenario ID (uses master scenario if None)
|
|
100
|
+
|
|
101
|
+
Returns:
|
|
102
|
+
TimeseriesMetadata with length and type information
|
|
103
|
+
|
|
104
|
+
Example:
|
|
105
|
+
>>> meta = get_timeseries_metadata("model.db", 123, "p")
|
|
106
|
+
>>> print(f"Length: {meta.length}, Type: {meta.data_type}, Unit: {meta.unit}")
|
|
107
|
+
Length: 8760, Type: float, Unit: MW
|
|
108
|
+
"""
|
|
109
|
+
with database_context(db_path, read_only=True) as conn:
|
|
110
|
+
return _get_timeseries_metadata(conn, component_id, attribute_name, scenario_id)
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
def set_timeseries(
|
|
114
|
+
db_path: str,
|
|
115
|
+
component_id: int,
|
|
116
|
+
attribute_name: str,
|
|
117
|
+
values: Union[List[float], np.ndarray, Timeseries],
|
|
118
|
+
scenario_id: Optional[int] = None
|
|
119
|
+
) -> None:
|
|
120
|
+
"""
|
|
121
|
+
Set timeseries data using efficient array-based format.
|
|
122
|
+
|
|
123
|
+
This is the main function for storing timeseries data. It accepts
|
|
124
|
+
various input formats and stores them efficiently in the database.
|
|
125
|
+
|
|
126
|
+
Args:
|
|
127
|
+
db_path: Path to the database file
|
|
128
|
+
component_id: Component ID
|
|
129
|
+
attribute_name: Name of the attribute
|
|
130
|
+
values: Timeseries values as list, numpy array, or Timeseries object
|
|
131
|
+
scenario_id: Scenario ID (uses master scenario if None)
|
|
132
|
+
|
|
133
|
+
Example:
|
|
134
|
+
# Set from a list
|
|
135
|
+
>>> values = [100.5, 95.2, 87.3, 92.1, 88.7]
|
|
136
|
+
>>> set_timeseries("model.db", 123, "p_set", values)
|
|
137
|
+
|
|
138
|
+
# Set from numpy array
|
|
139
|
+
>>> import numpy as np
|
|
140
|
+
>>> values = np.random.normal(100, 10, 8760) # Hourly data for a year
|
|
141
|
+
>>> set_timeseries("model.db", 123, "p_max_pu", values)
|
|
142
|
+
|
|
143
|
+
# Set from existing Timeseries object
|
|
144
|
+
>>> ts = get_timeseries("model.db", 456, "p")
|
|
145
|
+
>>> set_timeseries("model.db", 123, "p_set", ts)
|
|
146
|
+
"""
|
|
147
|
+
# Convert input to list of floats
|
|
148
|
+
if isinstance(values, Timeseries):
|
|
149
|
+
values_list = values.values
|
|
150
|
+
elif isinstance(values, np.ndarray):
|
|
151
|
+
values_list = values.tolist()
|
|
152
|
+
elif isinstance(values, list):
|
|
153
|
+
values_list = [float(v) for v in values]
|
|
154
|
+
else:
|
|
155
|
+
raise ValueError("values must be List[float], numpy.ndarray, or Timeseries")
|
|
156
|
+
|
|
157
|
+
with database_context(db_path) as conn:
|
|
158
|
+
set_timeseries_attribute(conn, component_id, attribute_name, values_list, scenario_id)
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
def get_multiple_timeseries(
|
|
162
|
+
db_path: str,
|
|
163
|
+
requests: List[dict],
|
|
164
|
+
max_points: Optional[int] = None
|
|
165
|
+
) -> List[Timeseries]:
|
|
166
|
+
"""
|
|
167
|
+
Get multiple timeseries efficiently in a single database connection.
|
|
168
|
+
|
|
169
|
+
This is more efficient than calling get_timeseries multiple times
|
|
170
|
+
when you need to load many timeseries from the same database.
|
|
171
|
+
|
|
172
|
+
Args:
|
|
173
|
+
db_path: Path to the database file
|
|
174
|
+
requests: List of dicts with keys: component_id, attribute_name, scenario_id (optional)
|
|
175
|
+
max_points: Maximum number of points for sampling (applied to all)
|
|
176
|
+
|
|
177
|
+
Returns:
|
|
178
|
+
List of Timeseries objects in the same order as requests
|
|
179
|
+
|
|
180
|
+
Example:
|
|
181
|
+
>>> requests = [
|
|
182
|
+
... {"component_id": 123, "attribute_name": "p"},
|
|
183
|
+
... {"component_id": 124, "attribute_name": "p"},
|
|
184
|
+
... {"component_id": 125, "attribute_name": "p", "scenario_id": 2}
|
|
185
|
+
... ]
|
|
186
|
+
>>> timeseries_list = get_multiple_timeseries("model.db", requests)
|
|
187
|
+
>>> print(f"Loaded {len(timeseries_list)} timeseries")
|
|
188
|
+
"""
|
|
189
|
+
results = []
|
|
190
|
+
|
|
191
|
+
with database_context(db_path, read_only=True) as conn:
|
|
192
|
+
for request in requests:
|
|
193
|
+
component_id = request["component_id"]
|
|
194
|
+
attribute_name = request["attribute_name"]
|
|
195
|
+
scenario_id = request.get("scenario_id")
|
|
196
|
+
|
|
197
|
+
ts = _get_timeseries(
|
|
198
|
+
conn, component_id, attribute_name, scenario_id,
|
|
199
|
+
None, None, max_points
|
|
200
|
+
)
|
|
201
|
+
results.append(ts)
|
|
202
|
+
|
|
203
|
+
return results
|
|
204
|
+
|
|
205
|
+
|
|
206
|
+
# ============================================================================
|
|
207
|
+
# UTILITY FUNCTIONS
|
|
208
|
+
# ============================================================================
|
|
209
|
+
|
|
210
|
+
def timeseries_to_numpy(timeseries: Timeseries) -> np.ndarray:
|
|
211
|
+
"""
|
|
212
|
+
Convert Timeseries to numpy array for scientific computing.
|
|
213
|
+
|
|
214
|
+
Args:
|
|
215
|
+
timeseries: Timeseries object
|
|
216
|
+
|
|
217
|
+
Returns:
|
|
218
|
+
numpy array with float32 dtype for memory efficiency
|
|
219
|
+
|
|
220
|
+
Example:
|
|
221
|
+
>>> ts = get_timeseries("model.db", 123, "p")
|
|
222
|
+
>>> arr = timeseries_to_numpy(ts)
|
|
223
|
+
>>> print(f"Mean: {arr.mean():.2f}, Std: {arr.std():.2f}")
|
|
224
|
+
"""
|
|
225
|
+
return np.array(timeseries.values, dtype=np.float32)
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
def numpy_to_timeseries(
|
|
229
|
+
array: np.ndarray,
|
|
230
|
+
data_type: str = "float",
|
|
231
|
+
unit: Optional[str] = None,
|
|
232
|
+
is_input: bool = True
|
|
233
|
+
) -> Timeseries:
|
|
234
|
+
"""
|
|
235
|
+
Convert numpy array to Timeseries object.
|
|
236
|
+
|
|
237
|
+
Args:
|
|
238
|
+
array: numpy array of values
|
|
239
|
+
data_type: Data type string (default: "float")
|
|
240
|
+
unit: Unit string (optional)
|
|
241
|
+
is_input: Whether this is input data (default: True)
|
|
242
|
+
|
|
243
|
+
Returns:
|
|
244
|
+
Timeseries object
|
|
245
|
+
|
|
246
|
+
Example:
|
|
247
|
+
>>> import numpy as np
|
|
248
|
+
>>> arr = np.random.normal(100, 10, 8760)
|
|
249
|
+
>>> ts = numpy_to_timeseries(arr, unit="MW")
|
|
250
|
+
>>> print(f"Created timeseries with {ts.length} points")
|
|
251
|
+
"""
|
|
252
|
+
values = array.tolist() if hasattr(array, 'tolist') else list(array)
|
|
253
|
+
return Timeseries(
|
|
254
|
+
values=[float(v) for v in values],
|
|
255
|
+
length=len(values),
|
|
256
|
+
start_index=0,
|
|
257
|
+
data_type=data_type,
|
|
258
|
+
unit=unit,
|
|
259
|
+
is_input=is_input
|
|
260
|
+
)
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
def validate_timeseries_alignment(
|
|
264
|
+
db_path: str,
|
|
265
|
+
network_id: int,
|
|
266
|
+
values: Union[List[float], np.ndarray, Timeseries]
|
|
267
|
+
) -> dict:
|
|
268
|
+
"""
|
|
269
|
+
Validate that timeseries data aligns with network time periods.
|
|
270
|
+
|
|
271
|
+
Args:
|
|
272
|
+
db_path: Path to the database file
|
|
273
|
+
network_id: Network ID
|
|
274
|
+
values: Timeseries values to validate
|
|
275
|
+
|
|
276
|
+
Returns:
|
|
277
|
+
Dictionary with validation results
|
|
278
|
+
|
|
279
|
+
Example:
|
|
280
|
+
>>> values = [100.0] * 8760 # Hourly data for a year
|
|
281
|
+
>>> result = validate_timeseries_alignment("model.db", 1, values)
|
|
282
|
+
>>> if result["is_valid"]:
|
|
283
|
+
... print("Timeseries is properly aligned")
|
|
284
|
+
... else:
|
|
285
|
+
... print(f"Alignment issues: {result['issues']}")
|
|
286
|
+
"""
|
|
287
|
+
# Convert to list of floats
|
|
288
|
+
if isinstance(values, Timeseries):
|
|
289
|
+
values_list = values.values
|
|
290
|
+
elif isinstance(values, np.ndarray):
|
|
291
|
+
values_list = values.tolist()
|
|
292
|
+
elif isinstance(values, list):
|
|
293
|
+
values_list = [float(v) for v in values]
|
|
294
|
+
else:
|
|
295
|
+
raise ValueError("values must be List[float], numpy.ndarray, or Timeseries")
|
|
296
|
+
|
|
297
|
+
with database_context(db_path, read_only=True) as conn:
|
|
298
|
+
# Get network time periods
|
|
299
|
+
from pyconvexity.models.network import get_network_time_periods
|
|
300
|
+
try:
|
|
301
|
+
time_periods = get_network_time_periods(conn, network_id)
|
|
302
|
+
expected_length = len(time_periods)
|
|
303
|
+
actual_length = len(values_list)
|
|
304
|
+
|
|
305
|
+
is_valid = actual_length == expected_length
|
|
306
|
+
issues = []
|
|
307
|
+
|
|
308
|
+
if actual_length < expected_length:
|
|
309
|
+
issues.append(f"Missing {expected_length - actual_length} time periods")
|
|
310
|
+
elif actual_length > expected_length:
|
|
311
|
+
issues.append(f"Extra {actual_length - expected_length} time periods")
|
|
312
|
+
|
|
313
|
+
return {
|
|
314
|
+
"is_valid": is_valid,
|
|
315
|
+
"expected_length": expected_length,
|
|
316
|
+
"actual_length": actual_length,
|
|
317
|
+
"issues": issues
|
|
318
|
+
}
|
|
319
|
+
|
|
320
|
+
except Exception as e:
|
|
321
|
+
return {
|
|
322
|
+
"is_valid": False,
|
|
323
|
+
"expected_length": 0,
|
|
324
|
+
"actual_length": len(values_list),
|
|
325
|
+
"issues": [f"Failed to get network time periods: {e}"]
|
|
326
|
+
}
|
|
327
|
+
|
pyconvexity/validation/rules.py
CHANGED
|
@@ -10,7 +10,7 @@ import logging
|
|
|
10
10
|
from typing import Dict, Any, Optional, List
|
|
11
11
|
|
|
12
12
|
from pyconvexity.core.types import (
|
|
13
|
-
ValidationRule, StaticValue,
|
|
13
|
+
ValidationRule, StaticValue, TimePeriod, TimeseriesValidationResult
|
|
14
14
|
)
|
|
15
15
|
from pyconvexity.core.errors import (
|
|
16
16
|
ValidationError, InvalidDataType
|
|
@@ -194,7 +194,7 @@ def validate_static_value(value: StaticValue, rule: ValidationRule) -> None:
|
|
|
194
194
|
def validate_timeseries_alignment(
|
|
195
195
|
conn: sqlite3.Connection,
|
|
196
196
|
network_id: int,
|
|
197
|
-
timeseries: List[
|
|
197
|
+
timeseries: List[float]
|
|
198
198
|
) -> TimeseriesValidationResult:
|
|
199
199
|
"""
|
|
200
200
|
Validate timeseries alignment with network periods.
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pyconvexity
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.4
|
|
4
4
|
Summary: Python library for energy system modeling and optimization with PyPSA
|
|
5
5
|
Author-email: Convexity Team <info@convexity.com>
|
|
6
6
|
License: MIT
|
|
@@ -32,6 +32,9 @@ Requires-Dist: xlsxwriter>=3.0.0; extra == "excel"
|
|
|
32
32
|
Provides-Extra: netcdf
|
|
33
33
|
Requires-Dist: netcdf4>=1.6.0; extra == "netcdf"
|
|
34
34
|
Requires-Dist: xarray>=2022.3.0; extra == "netcdf"
|
|
35
|
+
Provides-Extra: data
|
|
36
|
+
Requires-Dist: country-converter>=1.0.0; extra == "data"
|
|
37
|
+
Requires-Dist: pyyaml>=6.0.0; extra == "data"
|
|
35
38
|
Provides-Extra: dev
|
|
36
39
|
Requires-Dist: pytest>=7.0.0; extra == "dev"
|
|
37
40
|
Requires-Dist: pytest-cov>=4.0.0; extra == "dev"
|
|
@@ -40,7 +43,7 @@ Requires-Dist: isort>=5.10.0; extra == "dev"
|
|
|
40
43
|
Requires-Dist: mypy>=1.0.0; extra == "dev"
|
|
41
44
|
Requires-Dist: pre-commit>=2.20.0; extra == "dev"
|
|
42
45
|
Provides-Extra: all
|
|
43
|
-
Requires-Dist: pyconvexity[excel,netcdf,pypsa]; extra == "all"
|
|
46
|
+
Requires-Dist: pyconvexity[data,excel,netcdf,pypsa]; extra == "all"
|
|
44
47
|
|
|
45
48
|
# PyConvexity
|
|
46
49
|
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
pyconvexity/__init__.py,sha256=eiAFroO4n-z8F0jTLpJgBIO7vtSxu9ovu3G2N-qqpUo,4783
|
|
2
|
+
pyconvexity/_version.py,sha256=Wzf5T3NBDfhQoTnhnRNHSlAsE0XMqbclXG-M81Vas70,22
|
|
3
|
+
pyconvexity/timeseries.py,sha256=4p1Tdpa1otqDvCq2zppA4tw660sF_XWb8Xobib-cCms,11340
|
|
4
|
+
pyconvexity/core/__init__.py,sha256=MgVa5rrRWIi2w1UI1P4leiBntvHeeOPv0Thm0DEXBHo,1209
|
|
5
|
+
pyconvexity/core/database.py,sha256=M02q4UkJqAPeTXuwng9I7kHm16reJ7eq7wccWxnhE5I,15227
|
|
6
|
+
pyconvexity/core/errors.py,sha256=HhrrOOEBJrzyB56_pmqh3NWvX6uHqWWNkdE5XM16rYI,2881
|
|
7
|
+
pyconvexity/core/types.py,sha256=LkB49qklA-9ObI0br0IazfVxvWvYzQrYMdwzrqqAMYQ,11335
|
|
8
|
+
pyconvexity/data/README.md,sha256=-tyDHVjqzfWbVvgM4yYYx8cysmgvFXI6plVQNxSHBmo,3156
|
|
9
|
+
pyconvexity/data/__init__.py,sha256=BrgC3LGSlReLbC0EpM6UxywwUHxRvts379CvZFE0qWU,514
|
|
10
|
+
pyconvexity/data/__pycache__/__init__.cpython-313.pyc,sha256=F2XXlQZlN5aoFmi7Ar1B3RW-DZjIUqp81fr3zNsPxpY,617
|
|
11
|
+
pyconvexity/data/loaders/__init__.py,sha256=6xPtOmH2n1mNby7ZjA-2Mk9F48Q246RNsyMnCnJ6gwA,60
|
|
12
|
+
pyconvexity/data/loaders/cache.py,sha256=nnz8bV3slSehOT0alexFga9tM1XoJqWHBGqaXvz132U,7299
|
|
13
|
+
pyconvexity/data/loaders/__pycache__/__init__.cpython-313.pyc,sha256=AuT3aXy3v5gssxdD1_CBaKqNAVmDt6GBwFSyAe3jHow,265
|
|
14
|
+
pyconvexity/data/loaders/__pycache__/cache.cpython-313.pyc,sha256=9_xMQN6AciMzbzhCmWAzvEKRXfRINmfRsO8Dyg0_CUQ,9804
|
|
15
|
+
pyconvexity/data/schema/01_core_schema.sql,sha256=Ww3eD71JGIBNw-t_eVJ6TVGju-sEDzpLqyRGqGDje54,18871
|
|
16
|
+
pyconvexity/data/schema/02_data_metadata.sql,sha256=oOfwa3PLY2_8rxKDD4cpDeqP5I_PdahcF8m6cSKStJM,10732
|
|
17
|
+
pyconvexity/data/schema/03_validation_data.sql,sha256=1rKFi9y6jQ2OnfH32jnIKnZ5WtB8eG43hz0OVJhwn3w,58325
|
|
18
|
+
pyconvexity/data/schema/04_scenario_schema.sql,sha256=sL4PySJNHIthXsnoJ2T5pdXUbpAi94ld0XGuU8LwNuQ,4641
|
|
19
|
+
pyconvexity/data/sources/__init__.py,sha256=Dn6_oS7wB-vLjMj2YeXlmIl6hNjACbicimSabKxIWnc,108
|
|
20
|
+
pyconvexity/data/sources/gem.py,sha256=Ft2pAYsWe1V9poRge2Q4xdNt15XkG-USSR0XR9KFmsY,14935
|
|
21
|
+
pyconvexity/data/sources/__pycache__/__init__.cpython-313.pyc,sha256=9x5FyLxmTE5ZRaEFNSF375KBd_rDLY6pGHGSWPpcxxA,313
|
|
22
|
+
pyconvexity/data/sources/__pycache__/gem.cpython-313.pyc,sha256=CFyCxtqSg-_M9PiGgWcXl4A3OGDrfYi6NHFpDXW8__Q,14485
|
|
23
|
+
pyconvexity/io/__init__.py,sha256=glGzXBRGHROrTS8xe-RTGyRQUgsaksafxXVt9GpW63E,803
|
|
24
|
+
pyconvexity/io/excel_exporter.py,sha256=pjgvTs5vq9K61mNOVutEzaH5Zx4FgrDG4Xc_YmXhE5o,48483
|
|
25
|
+
pyconvexity/io/excel_importer.py,sha256=M7YcBqKUVzOMoR5HN-v8M2UnZgHRfhqgXBMUVD10-IQ,56898
|
|
26
|
+
pyconvexity/io/netcdf_exporter.py,sha256=AMM-uXBj8sh86n5m57aZ6S7LulAyIx_HM-eM-26BrWQ,7428
|
|
27
|
+
pyconvexity/io/netcdf_importer.py,sha256=nv4CYYqnbCBeznwCU_JGBMTbg-BGNpXKlsqbu2R8fTU,72152
|
|
28
|
+
pyconvexity/models/__init__.py,sha256=-CEdfjwOp-6XvR4vVyV1Z6umF1axs82zzvv7VRZNcys,1690
|
|
29
|
+
pyconvexity/models/attributes.py,sha256=LTvYF0hl56HeLjS8ZVocZWLhbLRTNhmZ5gUKxf93eSE,18254
|
|
30
|
+
pyconvexity/models/components.py,sha256=yccDW9ROtjsk5eIO38Tr420VUj9KeV03IVLrfmZgj3c,14942
|
|
31
|
+
pyconvexity/models/network.py,sha256=ePydR3l60-AaOBbrA4uld3hu3X9sB7GOSyBYMh3_rBA,13117
|
|
32
|
+
pyconvexity/models/scenarios.py,sha256=6-devNWZccnFeQr3IsP19GkO6Ixp914RKD-6lIduN-A,5164
|
|
33
|
+
pyconvexity/solvers/__init__.py,sha256=zoVf6T2Tmyj2XOeiVbEvaIMOX584orqCz1q9t1oXy0M,674
|
|
34
|
+
pyconvexity/solvers/pypsa/__init__.py,sha256=KZqYDo7CvwB-5Kp784xxxtdn5kRcmn3gGSRlaQdDA4c,554
|
|
35
|
+
pyconvexity/solvers/pypsa/api.py,sha256=u1MTSZkCxXMukSUQ83oDz_pWf4kRXKV_Bk7b0yd7lV4,16762
|
|
36
|
+
pyconvexity/solvers/pypsa/batch_loader.py,sha256=eQb8B11akQYtH3aK93WAOoXEI-ktk4imATw9gaYDNR4,13547
|
|
37
|
+
pyconvexity/solvers/pypsa/builder.py,sha256=ikcLPzcrTE0e3Fv6zZPIUTTEL0zMDMYI0s8te6sA0j4,24174
|
|
38
|
+
pyconvexity/solvers/pypsa/constraints.py,sha256=uX0X_EDFak7YtnjgNxj3CSv1nxDQI-fT4eg9lyySAv4,13678
|
|
39
|
+
pyconvexity/solvers/pypsa/solver.py,sha256=NFZ4_8inW-xHkv7pTrfq09vkKFINDwdTuJAQTn8lMfg,54555
|
|
40
|
+
pyconvexity/solvers/pypsa/storage.py,sha256=T-0qEryiEy_8G4KiscPoiiWvTPd_OGqpLczW0_Xm85E,87331
|
|
41
|
+
pyconvexity/validation/__init__.py,sha256=_6SVqXkaDFqmagub_O064Zm_QIdBrOra-Gvvbo9vM4I,549
|
|
42
|
+
pyconvexity/validation/rules.py,sha256=6Kak12BVfUpjmgB5B7Wre55eGc5e1dvIdFca-vN-IFI,9296
|
|
43
|
+
pyconvexity-0.1.4.dist-info/METADATA,sha256=fN4rhmba5CvT3EWLdpadovCMQUvZSZC7pcuV-Q1eD44,4880
|
|
44
|
+
pyconvexity-0.1.4.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
45
|
+
pyconvexity-0.1.4.dist-info/top_level.txt,sha256=wFPEDXVaebR3JO5Tt3HNse-ws5aROCcxEco15d6j64s,12
|
|
46
|
+
pyconvexity-0.1.4.dist-info/RECORD,,
|
|
@@ -1,20 +0,0 @@
|
|
|
1
|
-
pyconvexity/__init__.py,sha256=mEtafBwQ1A9s-hEv3o3XeXlJ7iFtbdS691wVUUw5miY,3303
|
|
2
|
-
pyconvexity/_version.py,sha256=Cu2pGbVzHvik6YUUC3R9lxs9dHnXZq1Uj0YfLLblLUs,133
|
|
3
|
-
pyconvexity/core/__init__.py,sha256=4SYAE4zqzGIRFSP4IoT7EzK-LCTB1HLe9EWhfi2aUmU,1253
|
|
4
|
-
pyconvexity/core/database.py,sha256=4OW1sgwDT4ltxdW6b250ytTlsC9sk26Rz0VrveXSL4A,9605
|
|
5
|
-
pyconvexity/core/errors.py,sha256=HhrrOOEBJrzyB56_pmqh3NWvX6uHqWWNkdE5XM16rYI,2881
|
|
6
|
-
pyconvexity/core/types.py,sha256=eoVOAcDJWzjJKO9lYN7O17Us0XbahBpVBwv6uxdldh0,8508
|
|
7
|
-
pyconvexity/data/schema/01_core_schema.sql,sha256=Vt6vASZEASPPQYuhtsARLj3PxP7VdZtfxmGcs-Id-xc,18785
|
|
8
|
-
pyconvexity/data/schema/02_data_metadata.sql,sha256=9E7tnBBu0wc4w3YijyPf_vG_8KVFyu7cu59TyAP5F94,21818
|
|
9
|
-
pyconvexity/data/schema/03_validation_data.sql,sha256=1rKFi9y6jQ2OnfH32jnIKnZ5WtB8eG43hz0OVJhwn3w,58325
|
|
10
|
-
pyconvexity/data/schema/04_scenario_schema.sql,sha256=sL4PySJNHIthXsnoJ2T5pdXUbpAi94ld0XGuU8LwNuQ,4641
|
|
11
|
-
pyconvexity/models/__init__.py,sha256=eVwf0ZTTEq1nM9M3NSMvj2yLPUOPNKMXv2A5GLT34-c,1470
|
|
12
|
-
pyconvexity/models/attributes.py,sha256=gh02j_hpSeyCNg0Jt20i7Y7wOntajnuzI926W5wzA3Q,14130
|
|
13
|
-
pyconvexity/models/components.py,sha256=K7QWelMVU_D18skvBZbap9dxP2AMS2116fcpmemkE6U,14629
|
|
14
|
-
pyconvexity/models/network.py,sha256=-itmot8StUdXogDpZUhGVIUC5uAEucYQ1LiTN1vPdA4,12923
|
|
15
|
-
pyconvexity/validation/__init__.py,sha256=_6SVqXkaDFqmagub_O064Zm_QIdBrOra-Gvvbo9vM4I,549
|
|
16
|
-
pyconvexity/validation/rules.py,sha256=bshO2Ibw8tBurg708Dmf79rIBoGV32t-jNHltjap9Pw,9323
|
|
17
|
-
pyconvexity-0.1.2.dist-info/METADATA,sha256=Lmk43_bNLN4jVbnqynhutZq9sK29vywiwbKBFjAWL7M,4751
|
|
18
|
-
pyconvexity-0.1.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
19
|
-
pyconvexity-0.1.2.dist-info/top_level.txt,sha256=wFPEDXVaebR3JO5Tt3HNse-ws5aROCcxEco15d6j64s,12
|
|
20
|
-
pyconvexity-0.1.2.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|