pyconvexity 0.1.3__py3-none-any.whl → 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pyconvexity might be problematic. Click here for more details.

@@ -1,8 +1,8 @@
1
1
  -- ============================================================================
2
- -- DATA STORAGE AND METADATA SCHEMA
3
- -- Auxiliary tables for data storage, notes, audit logging, and analysis caching
4
- -- Optimized for atomic operations and data management
5
- -- Version 2.1.0
2
+ -- DATA STORAGE AND METADATA SCHEMA (OPTIMIZED)
3
+ -- Essential tables for data storage and solve results only
4
+ -- Removed unused audit logging and analysis caching for efficiency
5
+ -- Version 2.2.0 - Optimized
6
6
  -- ============================================================================
7
7
 
8
8
  -- ============================================================================
@@ -34,12 +34,9 @@ CREATE TABLE network_data_store (
34
34
  CHECK (data_format IN ('json', 'parquet', 'csv', 'binary', 'text', 'yaml', 'toml'))
35
35
  );
36
36
 
37
- -- Optimized indexes for data retrieval
37
+ -- Minimal indexes for data retrieval
38
38
  CREATE INDEX idx_datastore_network ON network_data_store(network_id);
39
39
  CREATE INDEX idx_datastore_category ON network_data_store(network_id, category);
40
- CREATE INDEX idx_datastore_name ON network_data_store(network_id, category, name);
41
- CREATE INDEX idx_datastore_created_at ON network_data_store(created_at);
42
- CREATE INDEX idx_datastore_format ON network_data_store(data_format);
43
40
 
44
41
  -- ============================================================================
45
42
  -- DOCUMENTATION AND NOTES
@@ -66,11 +63,8 @@ CREATE TABLE network_notes (
66
63
  CHECK (note_type IN ('note', 'todo', 'warning', 'info', 'doc'))
67
64
  );
68
65
 
66
+ -- Minimal indexes for notes
69
67
  CREATE INDEX idx_notes_network ON network_notes(network_id);
70
- CREATE INDEX idx_notes_title ON network_notes(network_id, title);
71
- CREATE INDEX idx_notes_type ON network_notes(note_type);
72
- CREATE INDEX idx_notes_priority ON network_notes(priority);
73
- CREATE INDEX idx_notes_created_at ON network_notes(created_at);
74
68
 
75
69
  -- Component-specific notes and documentation
76
70
  CREATE TABLE component_notes (
@@ -93,139 +87,13 @@ CREATE TABLE component_notes (
93
87
  CHECK (note_type IN ('note', 'todo', 'warning', 'info', 'doc'))
94
88
  );
95
89
 
90
+ -- Minimal indexes for component notes
96
91
  CREATE INDEX idx_component_notes_component ON component_notes(component_id);
97
- CREATE INDEX idx_component_notes_title ON component_notes(component_id, title);
98
- CREATE INDEX idx_component_notes_type ON component_notes(note_type);
99
- CREATE INDEX idx_component_notes_priority ON component_notes(priority);
100
- CREATE INDEX idx_component_notes_created_at ON component_notes(created_at);
101
-
102
- -- ============================================================================
103
- -- AUDIT AND CHANGE TRACKING
104
- -- ============================================================================
105
-
106
- -- Comprehensive audit log for tracking all database changes
107
- CREATE TABLE audit_log (
108
- id INTEGER PRIMARY KEY AUTOINCREMENT,
109
- network_id INTEGER,
110
- table_name TEXT NOT NULL,
111
- record_id INTEGER,
112
- operation TEXT NOT NULL CHECK (operation IN ('INSERT', 'UPDATE', 'DELETE')),
113
-
114
- -- Change data
115
- old_values TEXT, -- JSON of old values for UPDATE/DELETE
116
- new_values TEXT, -- JSON of new values for INSERT/UPDATE
117
- change_summary TEXT, -- Human-readable summary of changes
118
- affected_fields TEXT, -- JSON array of changed field names
119
-
120
- -- Context and metadata
121
- user_id TEXT,
122
- session_id TEXT,
123
- client_info TEXT, -- Application version, client type, etc.
124
- transaction_id TEXT, -- For grouping related changes
125
- timestamp DATETIME DEFAULT CURRENT_TIMESTAMP,
126
-
127
- -- Performance metrics
128
- execution_time_ms INTEGER, -- Time taken for the operation
129
-
130
- CONSTRAINT fk_audit_network
131
- FOREIGN KEY (network_id) REFERENCES networks(id) ON DELETE CASCADE
132
- );
133
-
134
- -- Optimized indexes for audit queries
135
- CREATE INDEX idx_audit_network ON audit_log(network_id);
136
- CREATE INDEX idx_audit_table ON audit_log(table_name);
137
- CREATE INDEX idx_audit_timestamp ON audit_log(timestamp);
138
- CREATE INDEX idx_audit_operation ON audit_log(operation);
139
- CREATE INDEX idx_audit_record ON audit_log(table_name, record_id);
140
- CREATE INDEX idx_audit_user ON audit_log(user_id);
141
- CREATE INDEX idx_audit_session ON audit_log(session_id);
142
- CREATE INDEX idx_audit_transaction ON audit_log(transaction_id);
143
-
144
- -- Change summary view for recent activity
145
- CREATE VIEW recent_changes AS
146
- SELECT
147
- al.id,
148
- al.network_id,
149
- n.name as network_name,
150
- al.table_name,
151
- al.record_id,
152
- al.operation,
153
- al.change_summary,
154
- al.user_id,
155
- al.timestamp,
156
- al.execution_time_ms
157
- FROM audit_log al
158
- LEFT JOIN networks n ON al.network_id = n.id
159
- ORDER BY al.timestamp DESC
160
- LIMIT 1000;
161
-
162
- -- ============================================================================
163
- -- NETWORK ANALYSIS AND RESULTS CACHING
164
- -- ============================================================================
165
-
166
- -- Cache for storing analysis results and computed data
167
- -- This improves performance by avoiding recomputation of expensive operations
168
- CREATE TABLE network_analysis_cache (
169
- id INTEGER PRIMARY KEY AUTOINCREMENT,
170
- network_id INTEGER NOT NULL,
171
- analysis_type TEXT NOT NULL, -- 'optimization', 'statistics', 'validation', 'powerflow', etc.
172
- analysis_key TEXT NOT NULL, -- Unique key for this analysis (hash of inputs)
173
- analysis_version TEXT, -- Version of analysis algorithm
174
-
175
- -- Input tracking
176
- input_hash TEXT, -- Hash of inputs that generated this result
177
- input_summary TEXT, -- Human-readable summary of inputs
178
- dependencies TEXT, -- JSON array of dependent data (components, attributes, etc.)
179
-
180
- -- Results storage
181
- result_data BLOB NOT NULL, -- Serialized results (typically JSON or Parquet)
182
- result_format TEXT DEFAULT 'json', -- 'json', 'parquet', 'csv', 'binary'
183
- result_summary TEXT, -- Human-readable summary of results
184
- result_size_bytes INTEGER, -- Size of result data for management
185
-
186
- -- Analysis metadata
187
- analysis_time_ms INTEGER, -- Time taken for analysis
188
- status TEXT DEFAULT 'completed', -- 'completed', 'failed', 'in_progress', 'stale'
189
- error_message TEXT, -- If status is 'failed'
190
- warnings TEXT, -- JSON array of warnings
191
-
192
- -- Cache management
193
- hit_count INTEGER DEFAULT 0, -- Number of times this cache entry was used
194
- last_accessed DATETIME DEFAULT CURRENT_TIMESTAMP,
195
- expires_at DATETIME, -- Optional expiration for cache cleanup
196
-
197
- -- Metadata
198
- created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
199
- updated_at DATETIME DEFAULT CURRENT_TIMESTAMP,
200
- created_by TEXT,
201
-
202
- CONSTRAINT fk_analysis_cache_network
203
- FOREIGN KEY (network_id) REFERENCES networks(id) ON DELETE CASCADE,
204
- CONSTRAINT uq_analysis_cache_key
205
- UNIQUE (network_id, analysis_type, analysis_key),
206
- CONSTRAINT valid_analysis_status
207
- CHECK (status IN ('completed', 'failed', 'in_progress', 'stale')),
208
- CONSTRAINT valid_result_format
209
- CHECK (result_format IN ('json', 'parquet', 'csv', 'binary', 'text'))
210
- );
211
-
212
- -- Optimized indexes for cache operations
213
- CREATE INDEX idx_analysis_cache_network ON network_analysis_cache(network_id);
214
- CREATE INDEX idx_analysis_cache_type ON network_analysis_cache(network_id, analysis_type);
215
- CREATE INDEX idx_analysis_cache_key ON network_analysis_cache(analysis_key);
216
- CREATE INDEX idx_analysis_cache_status ON network_analysis_cache(status);
217
- CREATE INDEX idx_analysis_cache_expires ON network_analysis_cache(expires_at);
218
- CREATE INDEX idx_analysis_cache_accessed ON network_analysis_cache(last_accessed);
219
- CREATE INDEX idx_analysis_cache_created ON network_analysis_cache(created_at);
220
- CREATE INDEX idx_analysis_cache_size ON network_analysis_cache(result_size_bytes);
221
92
 
222
93
  -- ============================================================================
223
94
  -- SOLVE RESULTS AND STATISTICS
224
95
  -- ============================================================================
225
96
 
226
- -- Drop the old optimization_results table
227
- DROP TABLE IF EXISTS optimization_results;
228
-
229
97
  -- Network solve results - stores solver outputs and statistics
230
98
  -- This is where PyPSA solve results are stored after successful solves
231
99
  CREATE TABLE network_solve_results (
@@ -250,11 +118,9 @@ CREATE TABLE network_solve_results (
250
118
  FOREIGN KEY (scenario_id) REFERENCES scenarios(id) ON DELETE CASCADE
251
119
  );
252
120
 
253
- -- Indexes for performance
121
+ -- Minimal indexes for performance
254
122
  CREATE INDEX idx_solve_results_network ON network_solve_results(network_id);
255
123
  CREATE INDEX idx_solve_results_scenario ON network_solve_results(scenario_id);
256
- CREATE INDEX idx_solve_results_status ON network_solve_results(solve_status);
257
- CREATE INDEX idx_solve_results_solved_at ON network_solve_results(solved_at);
258
124
 
259
125
  -- ============================================================================
260
126
  -- YEAR-BASED SOLVE RESULTS
@@ -284,12 +150,9 @@ CREATE TABLE network_solve_results_by_year (
284
150
  CONSTRAINT valid_year CHECK (year >= 1900 AND year <= 2100)
285
151
  );
286
152
 
287
- -- Indexes for performance
153
+ -- Minimal indexes for performance
288
154
  CREATE INDEX idx_solve_results_year_network ON network_solve_results_by_year(network_id);
289
155
  CREATE INDEX idx_solve_results_year_scenario ON network_solve_results_by_year(scenario_id);
290
- CREATE INDEX idx_solve_results_year_year ON network_solve_results_by_year(year);
291
- CREATE INDEX idx_solve_results_year_network_scenario ON network_solve_results_by_year(network_id, scenario_id);
292
- CREATE INDEX idx_solve_results_year_created_at ON network_solve_results_by_year(created_at);
293
156
 
294
157
  -- Optional: Registry of solve type schemas for frontend introspection
295
158
  CREATE TABLE solve_type_schemas (
@@ -300,178 +163,11 @@ CREATE TABLE solve_type_schemas (
300
163
  created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
301
164
  );
302
165
 
303
- /*
304
- SUGGESTED JSON STRUCTURE FOR results_json:
305
-
306
- For PyPSA optimization (solve_type = 'pypsa_optimization'):
307
- {
308
- "core_summary": {
309
- "total_generation_mwh": 12500.0,
310
- "total_demand_mwh": 12000.0,
311
- "total_cost": 1500000.0,
312
- "load_factor": 0.96,
313
- "unserved_energy_mwh": 0.0
314
- },
315
-
316
- "pypsa_statistics": {
317
- "energy_balance": {
318
- "gas": 5000.0,
319
- "wind": 7500.0,
320
- "solar": 0.0
321
- },
322
- "supply_by_carrier": {
323
- "gas": {"total_mwh": 5000.0, "capacity_factor": 0.85},
324
- "wind": {"total_mwh": 7500.0, "capacity_factor": 0.42}
325
- },
326
- "demand_by_carrier": {
327
- "electricity": 12000.0
328
- },
329
- "capacity_factors": {
330
- "generator_gas_001": 0.85,
331
- "generator_wind_001": 0.42
332
- },
333
- "curtailment": {
334
- "wind": 250.0,
335
- "solar": 0.0
336
- },
337
- "transmission_utilization": {
338
- "line_001": 0.75,
339
- "line_002": 0.32
340
- }
341
- },
342
-
343
- "custom_statistics": {
344
- "emissions_by_carrier": {
345
- "gas": 2500.0,
346
- "wind": 0.0,
347
- "solar": 0.0
348
- },
349
- "total_emissions_tons_co2": 2500.0,
350
- "average_price_per_mwh": 125.0,
351
- "peak_demand_mw": 2500.0,
352
- "renewable_fraction": 0.6
353
- },
354
-
355
- "runtime_info": {
356
- "build_time_seconds": 5.2,
357
- "solve_time_seconds": 45.1,
358
- "result_processing_seconds": 2.3,
359
- "component_count": 150,
360
- "variable_count": 8760,
361
- "constraint_count": 12500,
362
- "memory_usage_mb": 256.5
363
- },
364
-
365
- "solver_info": {
366
- "solver_name": "highs",
367
- "solver_version": "1.6.0",
368
- "solver_options": {"presolve": "on", "parallel": "on"},
369
- "termination_condition": "optimal",
370
- "iterations": 1247,
371
- "barrier_iterations": null
372
- }
373
- }
374
-
375
- For Monte Carlo sampling (solve_type = 'monte_carlo'):
376
- {
377
- "core_summary": {
378
- "scenario_count": 1000,
379
- "convergence_achieved": true,
380
- "confidence_level": 0.95
381
- },
382
-
383
- "probability_distributions": {
384
- "total_cost": {
385
- "mean": 1500000.0,
386
- "std": 150000.0,
387
- "p05": 1250000.0,
388
- "p50": 1500000.0,
389
- "p95": 1750000.0
390
- },
391
- "unserved_energy": {
392
- "mean": 12.5,
393
- "std": 25.2,
394
- "p05": 0.0,
395
- "p50": 0.0,
396
- "p95": 75.0
397
- }
398
- },
399
-
400
- "sensitivity_analysis": {
401
- "most_influential_parameters": [
402
- {"parameter": "wind_capacity", "sensitivity": 0.85},
403
- {"parameter": "fuel_price", "sensitivity": 0.72}
404
- ]
405
- },
406
-
407
- "runtime_info": {
408
- "total_runtime_seconds": 3600.0,
409
- "scenarios_per_second": 0.28
410
- }
411
- }
412
-
413
- For sensitivity analysis (solve_type = 'sensitivity'):
414
- {
415
- "core_summary": {
416
- "parameters_analyzed": 15,
417
- "base_case_objective": 1500000.0
418
- },
419
-
420
- "parameter_sensitivities": {
421
- "fuel_cost_gas": {
422
- "sensitivity_coefficient": 0.85,
423
- "objective_range": [1200000.0, 1800000.0],
424
- "parameter_range": [50.0, 150.0]
425
- },
426
- "wind_capacity": {
427
- "sensitivity_coefficient": -0.72,
428
- "objective_range": [1300000.0, 1700000.0],
429
- "parameter_range": [1000.0, 3000.0]
430
- }
431
- },
432
-
433
- "tornado_chart_data": [
434
- {"parameter": "fuel_cost_gas", "low": -300000.0, "high": 300000.0},
435
- {"parameter": "wind_capacity", "low": -200000.0, "high": 200000.0}
436
- ]
437
- }
438
- */
439
-
440
166
  -- ============================================================================
441
- -- DATA MANAGEMENT TRIGGERS
167
+ -- UTILITY VIEWS (SIMPLIFIED)
442
168
  -- ============================================================================
443
169
 
444
- -- Note: Access tracking for analysis cache would need to be handled in application code
445
- -- SQLite doesn't support AFTER SELECT triggers
446
-
447
- -- Note: Result size calculation should be handled in application code
448
- -- Cannot modify NEW values in SQLite BEFORE INSERT triggers
449
-
450
- -- Note: Timestamp updates should be handled in application code or with DEFAULT CURRENT_TIMESTAMP
451
- -- SQLite triggers cannot update the same record being modified without recursion issues
452
-
453
- -- ============================================================================
454
- -- UTILITY VIEWS
455
- -- ============================================================================
456
-
457
- -- View for network analysis summary
458
- CREATE VIEW network_analysis_summary AS
459
- SELECT
460
- n.id as network_id,
461
- n.name as network_name,
462
- COUNT(DISTINCT nac.analysis_type) as analysis_types_count,
463
- COUNT(nac.id) as total_cache_entries,
464
- SUM(nac.hit_count) as total_cache_hits,
465
- SUM(nac.result_size_bytes) as total_cache_size_bytes,
466
- MAX(nac.last_accessed) as last_analysis_accessed,
467
- COUNT(or1.id) as optimization_runs_count,
468
- MAX(or1.created_at) as last_optimization_run
469
- FROM networks n
470
- LEFT JOIN network_analysis_cache nac ON n.id = nac.network_id
471
- LEFT JOIN optimization_results or1 ON n.id = or1.network_id
472
- GROUP BY n.id, n.name;
473
-
474
- -- View for recent network activity
170
+ -- View for recent network activity (simplified without audit/cache tables)
475
171
  CREATE VIEW network_activity_summary AS
476
172
  SELECT
477
173
  n.id as network_id,
@@ -501,7 +197,7 @@ SELECT
501
197
  c.*,
502
198
  b.name as bus_name
503
199
  FROM components c
504
- LEFT JOIN components b ON json_extract(c.connectivity, '$.bus_id') = b.id AND b.component_type = 'BUS'
200
+ LEFT JOIN components b ON c.bus_id = b.id AND b.component_type = 'BUS'
505
201
  WHERE c.component_type IN ('GENERATOR', 'LOAD', 'STORAGE_UNIT', 'STORE');
506
202
 
507
203
  -- View for components with dual bus connections (lines, links)
@@ -511,8 +207,8 @@ SELECT
511
207
  b0.name as bus0_name,
512
208
  b1.name as bus1_name
513
209
  FROM components c
514
- LEFT JOIN components b0 ON json_extract(c.connectivity, '$.bus0_id') = b0.id AND b0.component_type = 'BUS'
515
- LEFT JOIN components b1 ON json_extract(c.connectivity, '$.bus1_id') = b1.id AND b1.component_type = 'BUS'
210
+ LEFT JOIN components b0 ON c.bus0_id = b0.id AND b0.component_type = 'BUS'
211
+ LEFT JOIN components b1 ON c.bus1_id = b1.id AND b1.component_type = 'BUS'
516
212
  WHERE c.component_type IN ('LINE', 'LINK');
517
213
 
518
214
  -- Unified view for all components with resolved bus connections
@@ -522,17 +218,17 @@ SELECT
522
218
  CASE
523
219
  WHEN c.component_type = 'BUS' THEN NULL
524
220
  WHEN c.component_type IN ('GENERATOR', 'LOAD', 'STORAGE_UNIT', 'STORE') THEN
525
- (SELECT b.name FROM components b WHERE b.id = json_extract(c.connectivity, '$.bus_id') AND b.component_type = 'BUS')
221
+ (SELECT b.name FROM components b WHERE b.id = c.bus_id AND b.component_type = 'BUS')
526
222
  ELSE NULL
527
223
  END as bus_name,
528
224
  CASE
529
225
  WHEN c.component_type IN ('LINE', 'LINK') THEN
530
- (SELECT b.name FROM components b WHERE b.id = json_extract(c.connectivity, '$.bus0_id') AND b.component_type = 'BUS')
226
+ (SELECT b.name FROM components b WHERE b.id = c.bus0_id AND b.component_type = 'BUS')
531
227
  ELSE NULL
532
228
  END as bus0_name,
533
229
  CASE
534
230
  WHEN c.component_type IN ('LINE', 'LINK') THEN
535
- (SELECT b.name FROM components b WHERE b.id = json_extract(c.connectivity, '$.bus1_id') AND b.component_type = 'BUS')
231
+ (SELECT b.name FROM components b WHERE b.id = c.bus1_id AND b.component_type = 'BUS')
536
232
  ELSE NULL
537
233
  END as bus1_name
538
234
  FROM components c;
@@ -30,11 +30,11 @@ def _get_gem_data_path() -> Path:
30
30
  if DEFAULT_GEM_DATA_PATH:
31
31
  return Path(DEFAULT_GEM_DATA_PATH)
32
32
 
33
- # Try to find the datatoolbox data
33
+ # Try to find the examples data
34
34
  possible_paths = [
35
- Path(__file__).parent.parent.parent.parent.parent / "datatoolbox" / "data" / "raw" / "global-energy-monitor" / "Global-Integrated-Power-August-2025.xlsx",
35
+ Path(__file__).parent.parent.parent.parent.parent / "examples" / "data" / "raw" / "global-energy-monitor" / "Global-Integrated-Power-August-2025.xlsx",
36
36
  Path("data/raw/global-energy-monitor/Global-Integrated-Power-August-2025.xlsx"),
37
- Path("../datatoolbox/data/raw/global-energy-monitor/Global-Integrated-Power-August-2025.xlsx"),
37
+ Path("../examples/data/raw/global-energy-monitor/Global-Integrated-Power-August-2025.xlsx"),
38
38
  ]
39
39
 
40
40
  for path in possible_paths:
@@ -55,9 +55,9 @@ def _load_gem_mapping() -> Dict[str, Any]:
55
55
  """Load the GEM to carriers mapping configuration."""
56
56
  # Try to find the mapping file
57
57
  possible_paths = [
58
- Path(__file__).parent.parent.parent.parent.parent / "datatoolbox" / "schema" / "gem_mapping.yaml",
58
+ Path(__file__).parent.parent.parent.parent.parent / "examples" / "schema" / "gem_mapping.yaml",
59
59
  Path("schema/gem_mapping.yaml"),
60
- Path("../datatoolbox/schema/gem_mapping.yaml"),
60
+ Path("../examples/schema/gem_mapping.yaml"),
61
61
  ]
62
62
 
63
63
  for mapping_file in possible_paths:
@@ -20,6 +20,7 @@ from pyconvexity.models import (
20
20
  get_network_config
21
21
  )
22
22
  from pyconvexity.validation import list_validation_rules
23
+ from pyconvexity.models.attributes import get_timeseries as get_timeseries_conn
23
24
 
24
25
  logger = logging.getLogger(__name__)
25
26
 
@@ -130,8 +131,14 @@ class ExcelModelExporter:
130
131
  timeseries_data[comp_type] = {}
131
132
  if attr_name not in timeseries_data[comp_type]:
132
133
  timeseries_data[comp_type][attr_name] = {}
133
- # Store the timeseries points
134
- timeseries_data[comp_type][attr_name][component.name] = attr_data['points']
134
+
135
+ # Handle both new efficient format and legacy format
136
+ if 'values' in attr_data:
137
+ # New efficient format - store values directly
138
+ timeseries_data[comp_type][attr_name][component.name] = attr_data['values']
139
+ elif 'points' in attr_data:
140
+ # Legacy format - store the timeseries points
141
+ timeseries_data[comp_type][attr_name][component.name] = attr_data['points']
135
142
 
136
143
  if progress_callback:
137
144
  progress_callback(50, "Creating Excel workbook...")
@@ -232,11 +239,27 @@ class ExcelModelExporter:
232
239
  attributes[attr_name] = static_value.data
233
240
 
234
241
  elif attr_value.variant == "Timeseries":
235
- # Store timeseries points for later processing
236
- attributes[attr_name] = {
237
- 'Timeseries': True,
238
- 'points': attr_value.timeseries_value
239
- }
242
+ # Use new efficient timeseries access
243
+ try:
244
+ timeseries = get_timeseries_conn(conn, component_id, attr_name, scenario_id)
245
+ if timeseries and timeseries.values:
246
+ attributes[attr_name] = {
247
+ 'Timeseries': True,
248
+ 'values': timeseries.values
249
+ }
250
+ else:
251
+ # Fallback to legacy method if new method fails
252
+ attributes[attr_name] = {
253
+ 'Timeseries': True,
254
+ 'points': attr_value.timeseries_value
255
+ }
256
+ except Exception as ts_e:
257
+ self.logger.warning(f"Failed to load timeseries {attr_name} for component {component_id}: {ts_e}")
258
+ # Fallback to legacy method
259
+ attributes[attr_name] = {
260
+ 'Timeseries': True,
261
+ 'points': attr_value.timeseries_value
262
+ }
240
263
 
241
264
  except AttributeNotFound:
242
265
  # Attribute not set - always use empty string for blank Excel cell
@@ -545,13 +568,11 @@ class ExcelModelExporter:
545
568
 
546
569
  # Add component columns for each attribute
547
570
  for attr_name, component_data in timeseries_data.items():
548
- for component_name, timeseries_points in component_data.items():
549
- if isinstance(timeseries_points, list):
550
- # Remove repetitive logging
551
- # self.logger.info(f"Processing {component_name}_{attr_name} with {len(timeseries_points)} points")
571
+ for component_name, timeseries_data_item in component_data.items():
572
+ if isinstance(timeseries_data_item, list):
573
+ # Handle efficient format (list of values)
574
+ values = timeseries_data_item
552
575
 
553
- # Extract values from TimeseriesPoint objects
554
- values = [point.value for point in timeseries_points]
555
576
  # Pad or truncate to match time periods
556
577
  while len(values) < len(timestamps):
557
578
  values.append(0)