pyconvexity 0.4.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pyconvexity might be problematic. Click here for more details.
- pyconvexity/__init__.py +226 -0
- pyconvexity/_version.py +1 -0
- pyconvexity/core/__init__.py +60 -0
- pyconvexity/core/database.py +485 -0
- pyconvexity/core/errors.py +106 -0
- pyconvexity/core/types.py +400 -0
- pyconvexity/data/README.md +101 -0
- pyconvexity/data/__init__.py +17 -0
- pyconvexity/data/loaders/__init__.py +3 -0
- pyconvexity/data/loaders/cache.py +213 -0
- pyconvexity/data/schema/01_core_schema.sql +420 -0
- pyconvexity/data/schema/02_data_metadata.sql +120 -0
- pyconvexity/data/schema/03_validation_data.sql +506 -0
- pyconvexity/data/sources/__init__.py +5 -0
- pyconvexity/data/sources/gem.py +442 -0
- pyconvexity/io/__init__.py +26 -0
- pyconvexity/io/excel_exporter.py +1226 -0
- pyconvexity/io/excel_importer.py +1381 -0
- pyconvexity/io/netcdf_exporter.py +197 -0
- pyconvexity/io/netcdf_importer.py +1833 -0
- pyconvexity/models/__init__.py +195 -0
- pyconvexity/models/attributes.py +730 -0
- pyconvexity/models/carriers.py +159 -0
- pyconvexity/models/components.py +611 -0
- pyconvexity/models/network.py +503 -0
- pyconvexity/models/results.py +148 -0
- pyconvexity/models/scenarios.py +234 -0
- pyconvexity/solvers/__init__.py +29 -0
- pyconvexity/solvers/pypsa/__init__.py +24 -0
- pyconvexity/solvers/pypsa/api.py +460 -0
- pyconvexity/solvers/pypsa/batch_loader.py +307 -0
- pyconvexity/solvers/pypsa/builder.py +675 -0
- pyconvexity/solvers/pypsa/constraints.py +405 -0
- pyconvexity/solvers/pypsa/solver.py +1509 -0
- pyconvexity/solvers/pypsa/storage.py +2048 -0
- pyconvexity/timeseries.py +330 -0
- pyconvexity/validation/__init__.py +25 -0
- pyconvexity/validation/rules.py +312 -0
- pyconvexity-0.4.3.dist-info/METADATA +47 -0
- pyconvexity-0.4.3.dist-info/RECORD +42 -0
- pyconvexity-0.4.3.dist-info/WHEEL +5 -0
- pyconvexity-0.4.3.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,213 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Caching functionality for PyConvexity data operations.
|
|
3
|
+
|
|
4
|
+
This module handles caching of processed datasets to improve performance.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import pandas as pd
|
|
8
|
+
import hashlib
|
|
9
|
+
import json
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import Dict, Any, Optional
|
|
12
|
+
import logging
|
|
13
|
+
from datetime import datetime, timedelta
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class DataCache:
|
|
19
|
+
"""Manages caching of processed datasets."""
|
|
20
|
+
|
|
21
|
+
def __init__(self, cache_dir: Optional[str] = None):
|
|
22
|
+
"""
|
|
23
|
+
Initialize the cache manager.
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
cache_dir: Directory to store cache files. Defaults to 'data/cache'
|
|
27
|
+
"""
|
|
28
|
+
if cache_dir is None:
|
|
29
|
+
cache_dir = "data/cache"
|
|
30
|
+
|
|
31
|
+
self.cache_dir = Path(cache_dir)
|
|
32
|
+
self.cache_dir.mkdir(parents=True, exist_ok=True)
|
|
33
|
+
|
|
34
|
+
# Cache metadata file
|
|
35
|
+
self.metadata_file = self.cache_dir / "cache_metadata.json"
|
|
36
|
+
self._load_metadata()
|
|
37
|
+
|
|
38
|
+
def _load_metadata(self):
|
|
39
|
+
"""Load cache metadata from file."""
|
|
40
|
+
if self.metadata_file.exists():
|
|
41
|
+
try:
|
|
42
|
+
with open(self.metadata_file, "r") as f:
|
|
43
|
+
self.metadata = json.load(f)
|
|
44
|
+
except (json.JSONDecodeError, FileNotFoundError):
|
|
45
|
+
self.metadata = {}
|
|
46
|
+
else:
|
|
47
|
+
self.metadata = {}
|
|
48
|
+
|
|
49
|
+
def _save_metadata(self):
|
|
50
|
+
"""Save cache metadata to file."""
|
|
51
|
+
with open(self.metadata_file, "w") as f:
|
|
52
|
+
json.dump(self.metadata, f, indent=2)
|
|
53
|
+
|
|
54
|
+
def _get_cache_key(self, dataset_name: str, filters: Dict[str, Any]) -> str:
|
|
55
|
+
"""Generate a unique cache key for a dataset and filters combination."""
|
|
56
|
+
# Create a hash of the filters
|
|
57
|
+
filters_str = json.dumps(filters, sort_keys=True)
|
|
58
|
+
filters_hash = hashlib.md5(filters_str.encode()).hexdigest()
|
|
59
|
+
|
|
60
|
+
return f"{dataset_name}_{filters_hash}"
|
|
61
|
+
|
|
62
|
+
def _get_cache_file_path(self, cache_key: str) -> Path:
|
|
63
|
+
"""Get the file path for a cache key."""
|
|
64
|
+
return self.cache_dir / f"{cache_key}.parquet"
|
|
65
|
+
|
|
66
|
+
def get_cached_data(
|
|
67
|
+
self, dataset_name: str, filters: Dict[str, Any]
|
|
68
|
+
) -> Optional[pd.DataFrame]:
|
|
69
|
+
"""
|
|
70
|
+
Retrieve cached data if available and not expired.
|
|
71
|
+
|
|
72
|
+
Args:
|
|
73
|
+
dataset_name: Name of the dataset
|
|
74
|
+
filters: Filters applied to the dataset
|
|
75
|
+
|
|
76
|
+
Returns:
|
|
77
|
+
pandas.DataFrame or None: Cached data if available and valid
|
|
78
|
+
"""
|
|
79
|
+
cache_key = self._get_cache_key(dataset_name, filters)
|
|
80
|
+
cache_file = self._get_cache_file_path(cache_key)
|
|
81
|
+
|
|
82
|
+
# Check if cache file exists
|
|
83
|
+
if not cache_file.exists():
|
|
84
|
+
return None
|
|
85
|
+
|
|
86
|
+
# Check if cache entry exists in metadata
|
|
87
|
+
if cache_key not in self.metadata:
|
|
88
|
+
# Clean up orphaned cache file
|
|
89
|
+
cache_file.unlink(missing_ok=True)
|
|
90
|
+
return None
|
|
91
|
+
|
|
92
|
+
# Check if cache is expired (default: 7 days)
|
|
93
|
+
cache_info = self.metadata[cache_key]
|
|
94
|
+
created_time = datetime.fromisoformat(cache_info["created"])
|
|
95
|
+
max_age = timedelta(days=cache_info.get("max_age_days", 7))
|
|
96
|
+
|
|
97
|
+
if datetime.now() - created_time > max_age:
|
|
98
|
+
logger.info(f"Cache expired for '{dataset_name}', removing...")
|
|
99
|
+
self._remove_cache_entry(cache_key)
|
|
100
|
+
return None
|
|
101
|
+
|
|
102
|
+
# Load cached data
|
|
103
|
+
try:
|
|
104
|
+
cached_data = pd.read_parquet(cache_file)
|
|
105
|
+
logger.info(
|
|
106
|
+
f"Loaded cached data for '{dataset_name}' ({len(cached_data)} rows)"
|
|
107
|
+
)
|
|
108
|
+
return cached_data
|
|
109
|
+
except Exception as e:
|
|
110
|
+
logger.warning(f"Failed to load cached data for '{dataset_name}': {e}")
|
|
111
|
+
self._remove_cache_entry(cache_key)
|
|
112
|
+
return None
|
|
113
|
+
|
|
114
|
+
def cache_data(
|
|
115
|
+
self,
|
|
116
|
+
dataset_name: str,
|
|
117
|
+
data: pd.DataFrame,
|
|
118
|
+
filters: Dict[str, Any],
|
|
119
|
+
max_age_days: int = 7,
|
|
120
|
+
):
|
|
121
|
+
"""
|
|
122
|
+
Cache processed data.
|
|
123
|
+
|
|
124
|
+
Args:
|
|
125
|
+
dataset_name: Name of the dataset
|
|
126
|
+
data: Processed pandas DataFrame
|
|
127
|
+
filters: Filters applied to the dataset
|
|
128
|
+
max_age_days: Maximum age of cache in days
|
|
129
|
+
"""
|
|
130
|
+
cache_key = self._get_cache_key(dataset_name, filters)
|
|
131
|
+
cache_file = self._get_cache_file_path(cache_key)
|
|
132
|
+
|
|
133
|
+
# Save data to parquet file
|
|
134
|
+
data.to_parquet(cache_file, index=False)
|
|
135
|
+
|
|
136
|
+
# Update metadata
|
|
137
|
+
self.metadata[cache_key] = {
|
|
138
|
+
"dataset_name": dataset_name,
|
|
139
|
+
"filters": filters,
|
|
140
|
+
"created": datetime.now().isoformat(),
|
|
141
|
+
"max_age_days": max_age_days,
|
|
142
|
+
"rows": len(data),
|
|
143
|
+
"columns": list(data.columns),
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
self._save_metadata()
|
|
147
|
+
logger.info(f"Cached data for '{dataset_name}' ({len(data)} rows)")
|
|
148
|
+
|
|
149
|
+
def _remove_cache_entry(self, cache_key: str):
|
|
150
|
+
"""Remove a cache entry and its file."""
|
|
151
|
+
cache_file = self._get_cache_file_path(cache_key)
|
|
152
|
+
cache_file.unlink(missing_ok=True)
|
|
153
|
+
|
|
154
|
+
if cache_key in self.metadata:
|
|
155
|
+
del self.metadata[cache_key]
|
|
156
|
+
self._save_metadata()
|
|
157
|
+
|
|
158
|
+
def clear_cache(self, dataset_name: Optional[str] = None):
|
|
159
|
+
"""
|
|
160
|
+
Clear cache entries.
|
|
161
|
+
|
|
162
|
+
Args:
|
|
163
|
+
dataset_name: If provided, only clear cache for this dataset
|
|
164
|
+
"""
|
|
165
|
+
keys_to_remove = []
|
|
166
|
+
|
|
167
|
+
for cache_key, info in self.metadata.items():
|
|
168
|
+
if dataset_name is None or info["dataset_name"] == dataset_name:
|
|
169
|
+
keys_to_remove.append(cache_key)
|
|
170
|
+
|
|
171
|
+
for key in keys_to_remove:
|
|
172
|
+
self._remove_cache_entry(key)
|
|
173
|
+
|
|
174
|
+
logger.info(f"Cleared {len(keys_to_remove)} cache entries")
|
|
175
|
+
|
|
176
|
+
def get_cache_info(self) -> Dict[str, Any]:
|
|
177
|
+
"""Get information about the cache."""
|
|
178
|
+
total_size = 0
|
|
179
|
+
dataset_counts = {}
|
|
180
|
+
|
|
181
|
+
for cache_key, info in self.metadata.items():
|
|
182
|
+
dataset_name = info["dataset_name"]
|
|
183
|
+
dataset_counts[dataset_name] = dataset_counts.get(dataset_name, 0) + 1
|
|
184
|
+
|
|
185
|
+
cache_file = self._get_cache_file_path(cache_key)
|
|
186
|
+
if cache_file.exists():
|
|
187
|
+
total_size += cache_file.stat().st_size
|
|
188
|
+
|
|
189
|
+
return {
|
|
190
|
+
"total_entries": len(self.metadata),
|
|
191
|
+
"total_size_mb": round(total_size / (1024 * 1024), 2),
|
|
192
|
+
"dataset_counts": dataset_counts,
|
|
193
|
+
"cache_dir": str(self.cache_dir),
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
def cleanup_expired_cache(self):
|
|
197
|
+
"""Remove expired cache entries."""
|
|
198
|
+
expired_keys = []
|
|
199
|
+
|
|
200
|
+
for cache_key, info in self.metadata.items():
|
|
201
|
+
created_time = datetime.fromisoformat(info["created"])
|
|
202
|
+
max_age = timedelta(days=info.get("max_age_days", 7))
|
|
203
|
+
|
|
204
|
+
if datetime.now() - created_time > max_age:
|
|
205
|
+
expired_keys.append(cache_key)
|
|
206
|
+
|
|
207
|
+
for key in expired_keys:
|
|
208
|
+
self._remove_cache_entry(key)
|
|
209
|
+
|
|
210
|
+
if expired_keys:
|
|
211
|
+
logger.info(f"Cleaned up {len(expired_keys)} expired cache entries")
|
|
212
|
+
else:
|
|
213
|
+
logger.info("No expired cache entries found")
|
|
@@ -0,0 +1,420 @@
|
|
|
1
|
+
-- ============================================================================
|
|
2
|
+
-- CORE ENERGY NETWORK SCHEMA (SIMPLIFIED)
|
|
3
|
+
-- Single-network-per-file design for desktop SQLite
|
|
4
|
+
-- Optimized for fast timeseries access and simple Rust/Python API
|
|
5
|
+
-- Version 3.1.0 - Single network + Sparse scenarios + Raw timeseries
|
|
6
|
+
-- ============================================================================
|
|
7
|
+
|
|
8
|
+
-- ============================================================================
|
|
9
|
+
-- NETWORK METADATA
|
|
10
|
+
-- ============================================================================
|
|
11
|
+
|
|
12
|
+
-- Network metadata - single row per database file
|
|
13
|
+
CREATE TABLE network_metadata (
|
|
14
|
+
name TEXT NOT NULL,
|
|
15
|
+
description TEXT,
|
|
16
|
+
|
|
17
|
+
-- Time axis definition (single source of truth)
|
|
18
|
+
time_start DATETIME NOT NULL,
|
|
19
|
+
time_end DATETIME NOT NULL,
|
|
20
|
+
time_interval TEXT NOT NULL, -- ISO 8601 duration (PT1H, PT30M, PT2H, etc.)
|
|
21
|
+
|
|
22
|
+
-- Network-level flags
|
|
23
|
+
locked BOOLEAN DEFAULT 0, -- Prevent accidental edits to base network
|
|
24
|
+
|
|
25
|
+
-- Metadata
|
|
26
|
+
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
|
27
|
+
updated_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
|
28
|
+
|
|
29
|
+
CONSTRAINT valid_time_range CHECK (time_end > time_start)
|
|
30
|
+
);
|
|
31
|
+
|
|
32
|
+
-- Network time periods - optimized storage using computed timestamps
|
|
33
|
+
CREATE TABLE network_time_periods (
|
|
34
|
+
period_count INTEGER NOT NULL, -- Total number of periods (e.g., 8760 for hourly year)
|
|
35
|
+
start_timestamp INTEGER NOT NULL, -- Unix timestamp of first period
|
|
36
|
+
interval_seconds INTEGER NOT NULL, -- Seconds between periods (3600 for hourly)
|
|
37
|
+
|
|
38
|
+
CONSTRAINT valid_period_count CHECK (period_count > 0),
|
|
39
|
+
CONSTRAINT valid_interval CHECK (interval_seconds > 0)
|
|
40
|
+
);
|
|
41
|
+
|
|
42
|
+
-- ============================================================================
|
|
43
|
+
-- CARRIERS - ENERGY TYPES
|
|
44
|
+
-- ============================================================================
|
|
45
|
+
|
|
46
|
+
-- Carriers table - energy carriers (electricity, gas, heat, etc.)
|
|
47
|
+
CREATE TABLE carriers (
|
|
48
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
49
|
+
name TEXT NOT NULL UNIQUE,
|
|
50
|
+
|
|
51
|
+
-- Carrier properties from PyPSA reference
|
|
52
|
+
co2_emissions REAL DEFAULT 0.0, -- tonnes/MWh
|
|
53
|
+
color TEXT, -- Plotting color
|
|
54
|
+
nice_name TEXT, -- Display name
|
|
55
|
+
max_growth REAL DEFAULT NULL, -- MW - can be infinite
|
|
56
|
+
max_relative_growth REAL DEFAULT 0.0, -- MW
|
|
57
|
+
curtailable BOOLEAN DEFAULT FALSE, -- Whether the carrier can be curtailed
|
|
58
|
+
|
|
59
|
+
-- Metadata
|
|
60
|
+
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
|
61
|
+
updated_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
|
62
|
+
);
|
|
63
|
+
|
|
64
|
+
-- ============================================================================
|
|
65
|
+
-- UNIFIED COMPONENT SYSTEM
|
|
66
|
+
-- ============================================================================
|
|
67
|
+
|
|
68
|
+
-- Components table - unified table for all network components
|
|
69
|
+
CREATE TABLE components (
|
|
70
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
71
|
+
component_type TEXT NOT NULL, -- 'BUS', 'GENERATOR', 'LOAD', 'LINE', 'LINK', 'STORAGE_UNIT', 'STORE', 'UNMET_LOAD', 'CONSTRAINT', 'TRANSFORMER', 'SHUNT_IMPEDANCE'
|
|
72
|
+
name TEXT NOT NULL UNIQUE,
|
|
73
|
+
|
|
74
|
+
-- Geographic location (optional)
|
|
75
|
+
latitude REAL,
|
|
76
|
+
longitude REAL,
|
|
77
|
+
geometry TEXT, -- GeoJSON geometry (Point, LineString, Polygon, etc.)
|
|
78
|
+
|
|
79
|
+
-- Energy carrier reference
|
|
80
|
+
carrier_id INTEGER,
|
|
81
|
+
|
|
82
|
+
-- Bus connections
|
|
83
|
+
bus_id INTEGER, -- Single bus connection
|
|
84
|
+
bus0_id INTEGER, -- First bus for lines/links
|
|
85
|
+
bus1_id INTEGER, -- Second bus for lines/links
|
|
86
|
+
|
|
87
|
+
-- Metadata
|
|
88
|
+
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
|
89
|
+
updated_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
|
90
|
+
|
|
91
|
+
CONSTRAINT fk_components_carrier
|
|
92
|
+
FOREIGN KEY (carrier_id) REFERENCES carriers(id),
|
|
93
|
+
CONSTRAINT fk_components_bus
|
|
94
|
+
FOREIGN KEY (bus_id) REFERENCES components(id),
|
|
95
|
+
CONSTRAINT fk_components_bus0
|
|
96
|
+
FOREIGN KEY (bus0_id) REFERENCES components(id),
|
|
97
|
+
CONSTRAINT fk_components_bus1
|
|
98
|
+
FOREIGN KEY (bus1_id) REFERENCES components(id),
|
|
99
|
+
CONSTRAINT valid_component_type
|
|
100
|
+
CHECK (component_type IN ('BUS', 'GENERATOR', 'LOAD', 'LINE', 'LINK', 'STORAGE_UNIT', 'STORE', 'UNMET_LOAD', 'CONSTRAINT', 'TRANSFORMER', 'SHUNT_IMPEDANCE'))
|
|
101
|
+
);
|
|
102
|
+
|
|
103
|
+
-- Essential indexes only
|
|
104
|
+
CREATE INDEX idx_components_type ON components(component_type);
|
|
105
|
+
CREATE INDEX idx_components_name ON components(name);
|
|
106
|
+
CREATE INDEX idx_components_bus ON components(bus_id);
|
|
107
|
+
CREATE INDEX idx_components_bus0 ON components(bus0_id);
|
|
108
|
+
CREATE INDEX idx_components_bus1 ON components(bus1_id);
|
|
109
|
+
|
|
110
|
+
-- ============================================================================
|
|
111
|
+
-- ATTRIBUTE VALIDATION SYSTEM
|
|
112
|
+
-- ============================================================================
|
|
113
|
+
|
|
114
|
+
-- Attribute validation rules table
|
|
115
|
+
CREATE TABLE attribute_validation_rules (
|
|
116
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
117
|
+
component_type TEXT NOT NULL,
|
|
118
|
+
attribute_name TEXT NOT NULL,
|
|
119
|
+
display_name TEXT,
|
|
120
|
+
|
|
121
|
+
-- Validation rules
|
|
122
|
+
data_type TEXT NOT NULL, -- 'float', 'boolean', 'string', 'int'
|
|
123
|
+
unit TEXT,
|
|
124
|
+
default_value TEXT,
|
|
125
|
+
allowed_storage_types TEXT NOT NULL, -- 'static', 'timeseries', 'static_or_timeseries'
|
|
126
|
+
is_required BOOLEAN DEFAULT FALSE,
|
|
127
|
+
is_input BOOLEAN DEFAULT TRUE,
|
|
128
|
+
description TEXT,
|
|
129
|
+
|
|
130
|
+
-- Constraints
|
|
131
|
+
min_value REAL,
|
|
132
|
+
max_value REAL,
|
|
133
|
+
allowed_values TEXT, -- JSON array
|
|
134
|
+
|
|
135
|
+
-- Grouping
|
|
136
|
+
group_name TEXT DEFAULT 'other',
|
|
137
|
+
to_save BOOLEAN DEFAULT TRUE,
|
|
138
|
+
|
|
139
|
+
CONSTRAINT uq_validation_rule
|
|
140
|
+
UNIQUE (component_type, attribute_name),
|
|
141
|
+
CONSTRAINT valid_component_type_validation
|
|
142
|
+
CHECK (component_type IN ('BUS', 'GENERATOR', 'LOAD', 'LINE', 'LINK', 'STORAGE_UNIT', 'STORE', 'UNMET_LOAD', 'CONSTRAINT', 'TRANSFORMER', 'SHUNT_IMPEDANCE')),
|
|
143
|
+
CONSTRAINT valid_data_type
|
|
144
|
+
CHECK (data_type IN ('float', 'boolean', 'string', 'int')),
|
|
145
|
+
CONSTRAINT valid_allowed_storage_types
|
|
146
|
+
CHECK (allowed_storage_types IN ('static', 'timeseries', 'static_or_timeseries')),
|
|
147
|
+
CONSTRAINT valid_group_name
|
|
148
|
+
CHECK (group_name IN ('basic', 'capacity', 'power_limits', 'energy', 'unit_commitment', 'ramping', 'costs', 'electrical'))
|
|
149
|
+
);
|
|
150
|
+
|
|
151
|
+
-- Essential indexes only
|
|
152
|
+
CREATE INDEX idx_validation_component_type ON attribute_validation_rules(component_type);
|
|
153
|
+
CREATE INDEX idx_validation_lookup ON attribute_validation_rules(component_type, attribute_name);
|
|
154
|
+
|
|
155
|
+
-- ============================================================================
|
|
156
|
+
-- SCENARIOS - SPARSE OVERRIDE APPROACH
|
|
157
|
+
-- ============================================================================
|
|
158
|
+
|
|
159
|
+
-- Scenarios table - represents alternative scenarios
|
|
160
|
+
-- Base network has NO scenario (scenario_id = NULL in attributes)
|
|
161
|
+
-- Supports both deterministic what-if scenarios (probability = NULL) and stochastic scenarios (probability set)
|
|
162
|
+
-- System scenarios (is_system_scenario = TRUE) are reserved for special purposes like "Actual" values
|
|
163
|
+
CREATE TABLE scenarios (
|
|
164
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
165
|
+
name TEXT NOT NULL UNIQUE,
|
|
166
|
+
description TEXT,
|
|
167
|
+
probability REAL DEFAULT NULL, -- For stochastic optimization (NULL = deterministic what-if)
|
|
168
|
+
|
|
169
|
+
-- System scenario flags
|
|
170
|
+
is_system_scenario BOOLEAN DEFAULT FALSE, -- TRUE = system-reserved, cannot delete, excluded from solves
|
|
171
|
+
system_purpose TEXT DEFAULT NULL, -- 'actual' for actual/measured values, NULL for user scenarios
|
|
172
|
+
|
|
173
|
+
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
|
174
|
+
|
|
175
|
+
CONSTRAINT valid_probability
|
|
176
|
+
CHECK (probability IS NULL OR (probability >= 0 AND probability <= 1)),
|
|
177
|
+
CONSTRAINT valid_system_purpose
|
|
178
|
+
CHECK (system_purpose IS NULL OR system_purpose IN ('actual'))
|
|
179
|
+
);
|
|
180
|
+
|
|
181
|
+
-- ============================================================================
|
|
182
|
+
-- SYSTEM SCENARIO MANAGEMENT
|
|
183
|
+
-- ============================================================================
|
|
184
|
+
|
|
185
|
+
-- Trigger to auto-create the "Actual" system scenario when network_metadata is created
|
|
186
|
+
CREATE TRIGGER create_actual_scenario_on_network_create
|
|
187
|
+
AFTER INSERT ON network_metadata
|
|
188
|
+
FOR EACH ROW
|
|
189
|
+
WHEN NOT EXISTS (SELECT 1 FROM scenarios WHERE system_purpose = 'actual')
|
|
190
|
+
BEGIN
|
|
191
|
+
INSERT INTO scenarios (name, description, is_system_scenario, system_purpose)
|
|
192
|
+
VALUES ('Actual', 'Actual/measured values for validation and comparison', TRUE, 'actual');
|
|
193
|
+
END;
|
|
194
|
+
|
|
195
|
+
-- Trigger to prevent deletion of system scenarios
|
|
196
|
+
CREATE TRIGGER prevent_system_scenario_deletion
|
|
197
|
+
BEFORE DELETE ON scenarios
|
|
198
|
+
FOR EACH ROW
|
|
199
|
+
WHEN OLD.is_system_scenario = TRUE
|
|
200
|
+
BEGIN
|
|
201
|
+
SELECT RAISE(ABORT, 'Cannot delete system scenarios');
|
|
202
|
+
END;
|
|
203
|
+
|
|
204
|
+
-- Trigger to prevent modification of system scenario flags
|
|
205
|
+
CREATE TRIGGER prevent_system_scenario_modification
|
|
206
|
+
BEFORE UPDATE ON scenarios
|
|
207
|
+
FOR EACH ROW
|
|
208
|
+
WHEN OLD.is_system_scenario = TRUE AND (
|
|
209
|
+
NEW.is_system_scenario != OLD.is_system_scenario OR
|
|
210
|
+
NEW.system_purpose != OLD.system_purpose
|
|
211
|
+
)
|
|
212
|
+
BEGIN
|
|
213
|
+
SELECT RAISE(ABORT, 'Cannot modify system scenario flags');
|
|
214
|
+
END;
|
|
215
|
+
|
|
216
|
+
-- ============================================================================
|
|
217
|
+
-- UNIFIED COMPONENT ATTRIBUTES - SPARSE SCENARIOS + RAW TIMESERIES
|
|
218
|
+
-- ============================================================================
|
|
219
|
+
|
|
220
|
+
-- Component attributes - sparse scenario overrides
|
|
221
|
+
-- scenario_id = NULL → Base network (editable)
|
|
222
|
+
-- scenario_id = 1 → Scenario 1 (overrides base, read-only)
|
|
223
|
+
CREATE TABLE component_attributes (
|
|
224
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
225
|
+
component_id INTEGER NOT NULL,
|
|
226
|
+
attribute_name TEXT NOT NULL,
|
|
227
|
+
|
|
228
|
+
-- Scenario support - NULL = base network, non-NULL = scenario override
|
|
229
|
+
scenario_id INTEGER, -- NULLABLE!
|
|
230
|
+
|
|
231
|
+
-- Storage type
|
|
232
|
+
storage_type TEXT NOT NULL CHECK (storage_type IN ('static', 'timeseries')),
|
|
233
|
+
|
|
234
|
+
-- Value storage
|
|
235
|
+
static_value TEXT, -- JSON-encoded static value (all data types)
|
|
236
|
+
timeseries_data BLOB, -- Raw f32 array (NOT Parquet!)
|
|
237
|
+
|
|
238
|
+
-- Cached metadata
|
|
239
|
+
data_type TEXT NOT NULL,
|
|
240
|
+
unit TEXT,
|
|
241
|
+
is_input BOOLEAN DEFAULT TRUE,
|
|
242
|
+
|
|
243
|
+
-- Metadata
|
|
244
|
+
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
|
245
|
+
updated_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
|
246
|
+
|
|
247
|
+
CONSTRAINT fk_attributes_component
|
|
248
|
+
FOREIGN KEY (component_id) REFERENCES components(id) ON DELETE CASCADE,
|
|
249
|
+
CONSTRAINT fk_attributes_scenario
|
|
250
|
+
FOREIGN KEY (scenario_id) REFERENCES scenarios(id) ON DELETE CASCADE,
|
|
251
|
+
|
|
252
|
+
-- Storage validation
|
|
253
|
+
CONSTRAINT check_exactly_one_storage_type CHECK (
|
|
254
|
+
(storage_type = 'static' AND static_value IS NOT NULL AND timeseries_data IS NULL) OR
|
|
255
|
+
(storage_type = 'timeseries' AND static_value IS NULL AND timeseries_data IS NOT NULL)
|
|
256
|
+
),
|
|
257
|
+
|
|
258
|
+
-- Unique per component/attribute/scenario (NULL scenario counts as unique value)
|
|
259
|
+
CONSTRAINT uq_component_attribute_scenario
|
|
260
|
+
UNIQUE (component_id, attribute_name, scenario_id)
|
|
261
|
+
);
|
|
262
|
+
|
|
263
|
+
-- Essential indexes only
|
|
264
|
+
CREATE INDEX idx_attributes_lookup ON component_attributes(
|
|
265
|
+
component_id, attribute_name, scenario_id
|
|
266
|
+
);
|
|
267
|
+
CREATE INDEX idx_attributes_scenario ON component_attributes(scenario_id);
|
|
268
|
+
|
|
269
|
+
-- ============================================================================
|
|
270
|
+
-- SCENARIO CACHE - MATERIALIZED VIEW FOR FAST SCENARIO COUNTING
|
|
271
|
+
-- ============================================================================
|
|
272
|
+
|
|
273
|
+
-- Scenario cache - stores precomputed scenario counts and details
|
|
274
|
+
-- This is a materialized view that's kept in sync via application code
|
|
275
|
+
-- Updated transactionally whenever attribute values change
|
|
276
|
+
CREATE TABLE IF NOT EXISTS attribute_scenario_cache (
|
|
277
|
+
component_id INTEGER NOT NULL,
|
|
278
|
+
attribute_name TEXT NOT NULL,
|
|
279
|
+
|
|
280
|
+
-- Cached computed values
|
|
281
|
+
scenario_count INTEGER NOT NULL DEFAULT 1, -- Display count (includes synthetic base)
|
|
282
|
+
has_base_value BOOLEAN NOT NULL DEFAULT FALSE, -- TRUE if base network has value
|
|
283
|
+
has_scenario_values BOOLEAN NOT NULL DEFAULT FALSE, -- TRUE if any scenarios have values
|
|
284
|
+
|
|
285
|
+
-- Scenario details for dropdown (JSON array)
|
|
286
|
+
-- Format: [{scenario_id: 0, scenario_name: "Base", value: "123", has_value: true}, ...]
|
|
287
|
+
scenario_details TEXT,
|
|
288
|
+
|
|
289
|
+
-- Metadata
|
|
290
|
+
last_updated_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
|
291
|
+
|
|
292
|
+
PRIMARY KEY (component_id, attribute_name),
|
|
293
|
+
CONSTRAINT fk_scenario_cache_component
|
|
294
|
+
FOREIGN KEY (component_id) REFERENCES components(id) ON DELETE CASCADE
|
|
295
|
+
);
|
|
296
|
+
|
|
297
|
+
-- Index for fast component-based lookups
|
|
298
|
+
CREATE INDEX IF NOT EXISTS idx_scenario_cache_component
|
|
299
|
+
ON attribute_scenario_cache(component_id);
|
|
300
|
+
|
|
301
|
+
-- Index for bulk table loads
|
|
302
|
+
CREATE INDEX IF NOT EXISTS idx_scenario_cache_component_type
|
|
303
|
+
ON attribute_scenario_cache(component_id, attribute_name);
|
|
304
|
+
|
|
305
|
+
-- NOTES:
|
|
306
|
+
-- This cache is maintained by application code (Rust) in the same transaction
|
|
307
|
+
-- as attribute writes. This ensures ACID guarantees - the cache can never be
|
|
308
|
+
-- stale or inconsistent with the actual data.
|
|
309
|
+
--
|
|
310
|
+
-- Cache logic:
|
|
311
|
+
-- - If has_scenario_values: scenario_count = num_scenarios + 1 (always include base)
|
|
312
|
+
-- - If only has_base_value: scenario_count = 1 (no indicator shown)
|
|
313
|
+
-- - If neither: scenario_count = 0 (no values at all)
|
|
314
|
+
|
|
315
|
+
-- ============================================================================
|
|
316
|
+
-- VALIDATION TRIGGERS
|
|
317
|
+
-- ============================================================================
|
|
318
|
+
|
|
319
|
+
-- Trigger to validate attributes against rules on insert
|
|
320
|
+
CREATE TRIGGER validate_component_attribute_insert
|
|
321
|
+
BEFORE INSERT ON component_attributes
|
|
322
|
+
FOR EACH ROW
|
|
323
|
+
WHEN NOT EXISTS (
|
|
324
|
+
SELECT 1 FROM components c
|
|
325
|
+
JOIN attribute_validation_rules avr ON c.component_type = avr.component_type
|
|
326
|
+
WHERE c.id = NEW.component_id
|
|
327
|
+
AND avr.attribute_name = NEW.attribute_name
|
|
328
|
+
)
|
|
329
|
+
BEGIN
|
|
330
|
+
SELECT RAISE(ABORT, 'Attribute is not defined for this component type');
|
|
331
|
+
END;
|
|
332
|
+
|
|
333
|
+
-- Trigger to validate storage type on insert
|
|
334
|
+
CREATE TRIGGER validate_storage_type_insert
|
|
335
|
+
BEFORE INSERT ON component_attributes
|
|
336
|
+
FOR EACH ROW
|
|
337
|
+
WHEN EXISTS (
|
|
338
|
+
SELECT 1 FROM components c
|
|
339
|
+
JOIN attribute_validation_rules avr ON c.component_type = avr.component_type
|
|
340
|
+
WHERE c.id = NEW.component_id
|
|
341
|
+
AND avr.attribute_name = NEW.attribute_name
|
|
342
|
+
AND avr.allowed_storage_types != 'static_or_timeseries'
|
|
343
|
+
AND avr.allowed_storage_types != NEW.storage_type
|
|
344
|
+
)
|
|
345
|
+
BEGIN
|
|
346
|
+
SELECT RAISE(ABORT, 'Storage type not allowed for this attribute');
|
|
347
|
+
END;
|
|
348
|
+
|
|
349
|
+
-- Trigger to update timestamps
|
|
350
|
+
CREATE TRIGGER update_component_attributes_timestamp
|
|
351
|
+
BEFORE UPDATE ON component_attributes
|
|
352
|
+
FOR EACH ROW
|
|
353
|
+
BEGIN
|
|
354
|
+
UPDATE component_attributes
|
|
355
|
+
SET updated_at = CURRENT_TIMESTAMP
|
|
356
|
+
WHERE id = NEW.id;
|
|
357
|
+
END;
|
|
358
|
+
|
|
359
|
+
-- Trigger to update component timestamps when attributes change
|
|
360
|
+
CREATE TRIGGER update_component_timestamp_on_attribute_change
|
|
361
|
+
AFTER INSERT ON component_attributes
|
|
362
|
+
FOR EACH ROW
|
|
363
|
+
BEGIN
|
|
364
|
+
UPDATE components
|
|
365
|
+
SET updated_at = CURRENT_TIMESTAMP
|
|
366
|
+
WHERE id = NEW.component_id;
|
|
367
|
+
END;
|
|
368
|
+
|
|
369
|
+
-- ============================================================================
|
|
370
|
+
-- NETWORK CONFIGURATION
|
|
371
|
+
-- ============================================================================
|
|
372
|
+
|
|
373
|
+
-- Network configuration parameters with scenario support
|
|
374
|
+
CREATE TABLE network_config (
|
|
375
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
376
|
+
scenario_id INTEGER, -- NULL for network defaults
|
|
377
|
+
|
|
378
|
+
param_name TEXT NOT NULL,
|
|
379
|
+
param_type TEXT NOT NULL,
|
|
380
|
+
param_value TEXT NOT NULL,
|
|
381
|
+
param_description TEXT,
|
|
382
|
+
|
|
383
|
+
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
|
384
|
+
updated_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
|
385
|
+
|
|
386
|
+
CONSTRAINT fk_network_config_scenario
|
|
387
|
+
FOREIGN KEY (scenario_id) REFERENCES scenarios(id) ON DELETE CASCADE,
|
|
388
|
+
CONSTRAINT uq_network_config_param
|
|
389
|
+
UNIQUE (scenario_id, param_name),
|
|
390
|
+
CONSTRAINT valid_param_type
|
|
391
|
+
CHECK (param_type IN ('boolean', 'real', 'integer', 'string', 'json'))
|
|
392
|
+
);
|
|
393
|
+
|
|
394
|
+
CREATE INDEX idx_network_config_lookup ON network_config(scenario_id, param_name);
|
|
395
|
+
|
|
396
|
+
-- ============================================================================
|
|
397
|
+
-- SYSTEM METADATA
|
|
398
|
+
-- ============================================================================
|
|
399
|
+
|
|
400
|
+
-- System metadata table for schema version tracking and system-level settings
|
|
401
|
+
CREATE TABLE system_metadata (
|
|
402
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
403
|
+
key TEXT NOT NULL UNIQUE,
|
|
404
|
+
value TEXT NOT NULL,
|
|
405
|
+
description TEXT,
|
|
406
|
+
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
|
407
|
+
updated_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
|
408
|
+
);
|
|
409
|
+
|
|
410
|
+
CREATE INDEX idx_system_metadata_key ON system_metadata(key);
|
|
411
|
+
|
|
412
|
+
-- Initialize system metadata with schema version
|
|
413
|
+
INSERT INTO system_metadata (key, value, description)
|
|
414
|
+
VALUES ('schema_version', '3.1.0', 'Database schema version');
|
|
415
|
+
|
|
416
|
+
-- ============================================================================
|
|
417
|
+
-- SCHEMA VERSION
|
|
418
|
+
-- ============================================================================
|
|
419
|
+
|
|
420
|
+
PRAGMA user_version = 31; -- Schema version 3.1
|