site-calc-investment 1.2.1__py3-none-any.whl → 1.2.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- site_calc_investment/__init__.py +1 -1
- site_calc_investment/mcp/__init__.py +15 -0
- site_calc_investment/mcp/config.py +40 -0
- site_calc_investment/mcp/data_loaders.py +241 -0
- site_calc_investment/mcp/scenario.py +515 -0
- site_calc_investment/mcp/server.py +758 -0
- {site_calc_investment-1.2.1.dist-info → site_calc_investment-1.2.3.dist-info}/METADATA +58 -1
- {site_calc_investment-1.2.1.dist-info → site_calc_investment-1.2.3.dist-info}/RECORD +11 -5
- site_calc_investment-1.2.3.dist-info/entry_points.txt +2 -0
- {site_calc_investment-1.2.1.dist-info → site_calc_investment-1.2.3.dist-info}/WHEEL +0 -0
- {site_calc_investment-1.2.1.dist-info → site_calc_investment-1.2.3.dist-info}/licenses/LICENSE +0 -0
site_calc_investment/__init__.py
CHANGED
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
"""MCP server for Site-Calc investment planning.
|
|
2
|
+
|
|
3
|
+
Exposes investment optimization tools to LLM agents via FastMCP.
|
|
4
|
+
Install with: pip install site-calc-investment[mcp]
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from site_calc_investment.mcp.server import mcp
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def main() -> None:
|
|
11
|
+
"""Entry point for the MCP server."""
|
|
12
|
+
mcp.run()
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
__all__ = ["main", "mcp"]
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
"""Configuration for the MCP server, loaded from environment variables."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from dataclasses import dataclass
|
|
5
|
+
from typing import Optional
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def get_data_dir() -> Optional[str]:
|
|
9
|
+
"""Get the configured data directory from INVESTMENT_DATA_DIR, or None."""
|
|
10
|
+
return os.environ.get("INVESTMENT_DATA_DIR") or None
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@dataclass(frozen=True)
|
|
14
|
+
class Config:
|
|
15
|
+
"""MCP server configuration from environment variables."""
|
|
16
|
+
|
|
17
|
+
api_url: str
|
|
18
|
+
api_key: str
|
|
19
|
+
|
|
20
|
+
@classmethod
|
|
21
|
+
def from_env(cls) -> "Config":
|
|
22
|
+
"""Load configuration from environment variables.
|
|
23
|
+
|
|
24
|
+
:raises ValueError: If required environment variables are missing.
|
|
25
|
+
"""
|
|
26
|
+
api_url = os.environ.get("INVESTMENT_API_URL", "")
|
|
27
|
+
api_key = os.environ.get("INVESTMENT_API_KEY", "")
|
|
28
|
+
|
|
29
|
+
if not api_url:
|
|
30
|
+
raise ValueError(
|
|
31
|
+
"INVESTMENT_API_URL environment variable is required. "
|
|
32
|
+
"Set it to the Site-Calc API URL (e.g., http://site-calc-prod-alb-xxx.elb.amazonaws.com)"
|
|
33
|
+
)
|
|
34
|
+
if not api_key:
|
|
35
|
+
raise ValueError(
|
|
36
|
+
"INVESTMENT_API_KEY environment variable is required. "
|
|
37
|
+
"Set it to your investment API key (starts with 'inv_')"
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
return cls(api_url=api_url, api_key=api_key)
|
|
@@ -0,0 +1,241 @@
|
|
|
1
|
+
"""Data loading utilities for resolving price/profile shorthand to arrays."""
|
|
2
|
+
|
|
3
|
+
import csv
|
|
4
|
+
import json
|
|
5
|
+
import os
|
|
6
|
+
from typing import Any, Optional, Union
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def resolve_price_or_profile(
|
|
10
|
+
value: Union[float, int, list[float], dict[str, Any]],
|
|
11
|
+
expected_length: Optional[int],
|
|
12
|
+
) -> list[float]:
|
|
13
|
+
"""Resolve a price or profile value to a flat list of floats.
|
|
14
|
+
|
|
15
|
+
Accepts:
|
|
16
|
+
- float/int: expanded to constant array of expected_length
|
|
17
|
+
- list[float]: validated length (if expected_length set), returned as-is
|
|
18
|
+
- {"file": "path.csv"}: loaded from CSV (first numeric column)
|
|
19
|
+
- {"file": "path.csv", "column": "price_eur"}: specific column from CSV
|
|
20
|
+
- {"file": "path.json"}: loaded from JSON (flat array)
|
|
21
|
+
|
|
22
|
+
:param value: The value to resolve.
|
|
23
|
+
:param expected_length: Expected array length (from timespan). None skips length validation.
|
|
24
|
+
:returns: List of floats.
|
|
25
|
+
:raises ValueError: If the value cannot be resolved or has wrong length.
|
|
26
|
+
:raises FileNotFoundError: If a referenced file does not exist.
|
|
27
|
+
"""
|
|
28
|
+
if isinstance(value, (int, float)):
|
|
29
|
+
if expected_length is None:
|
|
30
|
+
raise ValueError(
|
|
31
|
+
"Cannot expand scalar value without a timespan. Set the timespan first, or provide an explicit array."
|
|
32
|
+
)
|
|
33
|
+
return [float(value)] * expected_length
|
|
34
|
+
|
|
35
|
+
if isinstance(value, list):
|
|
36
|
+
result = [float(v) for v in value]
|
|
37
|
+
if expected_length is not None and len(result) != expected_length:
|
|
38
|
+
raise ValueError(
|
|
39
|
+
f"Array length {len(result)} does not match expected length {expected_length} "
|
|
40
|
+
f"(from timespan). Provide exactly {expected_length} values."
|
|
41
|
+
)
|
|
42
|
+
return result
|
|
43
|
+
|
|
44
|
+
if isinstance(value, dict):
|
|
45
|
+
return _load_from_file(value, expected_length)
|
|
46
|
+
|
|
47
|
+
raise ValueError(
|
|
48
|
+
f"Unsupported value type: {type(value).__name__}. "
|
|
49
|
+
"Expected a number (flat value), list of numbers, or "
|
|
50
|
+
'{"file": "path.csv"} for file loading.'
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def _load_from_file(spec: dict[str, Any], expected_length: Optional[int]) -> list[float]:
|
|
55
|
+
"""Load data from a file reference.
|
|
56
|
+
|
|
57
|
+
:param spec: Dict with "file" key and optional "column" key.
|
|
58
|
+
:param expected_length: Expected array length.
|
|
59
|
+
:returns: List of floats loaded from file.
|
|
60
|
+
"""
|
|
61
|
+
file_path = spec.get("file")
|
|
62
|
+
if not file_path:
|
|
63
|
+
raise ValueError('File reference must include a "file" key with the path.')
|
|
64
|
+
|
|
65
|
+
if not os.path.exists(file_path):
|
|
66
|
+
raise FileNotFoundError(
|
|
67
|
+
f"Data file not found: {file_path}. Provide an absolute path to a CSV or JSON file on the local filesystem."
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
ext = os.path.splitext(file_path)[1].lower()
|
|
71
|
+
column = spec.get("column")
|
|
72
|
+
|
|
73
|
+
if ext == ".json":
|
|
74
|
+
result = _load_json(file_path)
|
|
75
|
+
elif ext in (".csv", ".tsv", ".txt"):
|
|
76
|
+
result = _load_csv(file_path, column)
|
|
77
|
+
else:
|
|
78
|
+
raise ValueError(f"Unsupported file format: '{ext}'. Supported formats: .csv, .tsv, .json")
|
|
79
|
+
|
|
80
|
+
if expected_length is not None and len(result) != expected_length:
|
|
81
|
+
raise ValueError(
|
|
82
|
+
f"File '{file_path}' has {len(result)} values, but expected {expected_length} "
|
|
83
|
+
f"(from timespan). The file must contain exactly {expected_length} values."
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
return result
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def _load_json(file_path: str) -> list[float]:
|
|
90
|
+
"""Load a flat array from a JSON file."""
|
|
91
|
+
with open(file_path, encoding="utf-8") as f:
|
|
92
|
+
data = json.load(f)
|
|
93
|
+
|
|
94
|
+
if not isinstance(data, list):
|
|
95
|
+
raise ValueError(
|
|
96
|
+
f"JSON file '{file_path}' must contain a flat array of numbers, but got {type(data).__name__}."
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
try:
|
|
100
|
+
return [float(v) for v in data]
|
|
101
|
+
except (TypeError, ValueError) as e:
|
|
102
|
+
raise ValueError(f"JSON file '{file_path}' contains non-numeric values: {e}") from e
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def _load_csv(file_path: str, column: Optional[str] = None) -> list[float]:
|
|
106
|
+
"""Load numeric data from a CSV file.
|
|
107
|
+
|
|
108
|
+
If column is specified, reads that column by header name.
|
|
109
|
+
Otherwise, reads the first numeric column.
|
|
110
|
+
"""
|
|
111
|
+
with open(file_path, encoding="utf-8", newline="") as f:
|
|
112
|
+
sample = f.read(8192)
|
|
113
|
+
f.seek(0)
|
|
114
|
+
|
|
115
|
+
try:
|
|
116
|
+
dialect = csv.Sniffer().sniff(sample)
|
|
117
|
+
except csv.Error:
|
|
118
|
+
dialect = csv.excel # type: ignore[assignment]
|
|
119
|
+
|
|
120
|
+
has_header = csv.Sniffer().has_header(sample)
|
|
121
|
+
f.seek(0)
|
|
122
|
+
|
|
123
|
+
reader = csv.reader(f, dialect)
|
|
124
|
+
|
|
125
|
+
if has_header:
|
|
126
|
+
headers = next(reader)
|
|
127
|
+
if column:
|
|
128
|
+
try:
|
|
129
|
+
col_idx = headers.index(column)
|
|
130
|
+
except ValueError:
|
|
131
|
+
raise ValueError(
|
|
132
|
+
f"Column '{column}' not found in '{file_path}'. Available columns: {', '.join(headers)}"
|
|
133
|
+
)
|
|
134
|
+
else:
|
|
135
|
+
col_idx = _find_first_numeric_column(headers, file_path)
|
|
136
|
+
else:
|
|
137
|
+
if column:
|
|
138
|
+
raise ValueError(f"Cannot use column='{column}' with '{file_path}': the file has no header row.")
|
|
139
|
+
col_idx = 0
|
|
140
|
+
|
|
141
|
+
values: list[float] = []
|
|
142
|
+
for row_num, row in enumerate(reader, start=2 if has_header else 1):
|
|
143
|
+
if not row or all(cell.strip() == "" for cell in row):
|
|
144
|
+
continue
|
|
145
|
+
if col_idx >= len(row):
|
|
146
|
+
raise ValueError(
|
|
147
|
+
f"Row {row_num} in '{file_path}' has only {len(row)} columns, "
|
|
148
|
+
f"but column index {col_idx} was expected."
|
|
149
|
+
)
|
|
150
|
+
try:
|
|
151
|
+
values.append(float(row[col_idx]))
|
|
152
|
+
except ValueError:
|
|
153
|
+
raise ValueError(
|
|
154
|
+
f"Non-numeric value '{row[col_idx]}' at row {row_num}, column {col_idx} in '{file_path}'."
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
if not values:
|
|
158
|
+
raise ValueError(f"No data found in '{file_path}'.")
|
|
159
|
+
|
|
160
|
+
return values
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
def _find_first_numeric_column(headers: list[str], file_path: str) -> int:
|
|
164
|
+
"""Find the first column that looks numeric based on the header name."""
|
|
165
|
+
numeric_hints = ["price", "value", "cost", "demand", "power", "mw", "mwh", "eur", "profile"]
|
|
166
|
+
for i, h in enumerate(headers):
|
|
167
|
+
h_lower = h.lower().strip()
|
|
168
|
+
for hint in numeric_hints:
|
|
169
|
+
if hint in h_lower:
|
|
170
|
+
return i
|
|
171
|
+
return 0
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
def _resolve_save_path(file_path: str, data_dir: Optional[str] = None) -> str:
|
|
175
|
+
"""Resolve a file path for saving, applying data_dir for relative paths.
|
|
176
|
+
|
|
177
|
+
:param file_path: Filename or path (relative or absolute).
|
|
178
|
+
:param data_dir: Base directory for relative paths (or None for cwd).
|
|
179
|
+
:returns: Absolute path string.
|
|
180
|
+
:raises ValueError: If the extension is present but not '.csv'.
|
|
181
|
+
"""
|
|
182
|
+
_, ext = os.path.splitext(file_path)
|
|
183
|
+
if ext and ext.lower() != ".csv":
|
|
184
|
+
raise ValueError(f"Only .csv files are supported, got '{ext}'. Use a .csv extension or omit the extension.")
|
|
185
|
+
if not ext:
|
|
186
|
+
file_path = file_path + ".csv"
|
|
187
|
+
|
|
188
|
+
if os.path.isabs(file_path):
|
|
189
|
+
return file_path
|
|
190
|
+
|
|
191
|
+
base = data_dir if data_dir else os.getcwd()
|
|
192
|
+
return os.path.abspath(os.path.join(base, file_path))
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
def save_csv(
|
|
196
|
+
file_path: str,
|
|
197
|
+
columns: dict[str, list[float]],
|
|
198
|
+
data_dir: Optional[str] = None,
|
|
199
|
+
overwrite: bool = False,
|
|
200
|
+
) -> str:
|
|
201
|
+
"""Save column data as a CSV file.
|
|
202
|
+
|
|
203
|
+
:param file_path: Filename or path. Relative paths resolve against data_dir (or cwd).
|
|
204
|
+
Extension '.csv' is appended if missing.
|
|
205
|
+
:param columns: Named columns of numeric data. All must have the same length.
|
|
206
|
+
:param data_dir: Base directory for relative paths.
|
|
207
|
+
:param overwrite: Allow overwriting an existing file (default: False).
|
|
208
|
+
:returns: Absolute path to the saved file.
|
|
209
|
+
:raises ValueError: If columns are empty, have no rows, or have mismatched lengths.
|
|
210
|
+
:raises FileExistsError: If file exists and overwrite is False.
|
|
211
|
+
"""
|
|
212
|
+
if not columns:
|
|
213
|
+
raise ValueError("columns must not be empty -- provide at least one named column.")
|
|
214
|
+
|
|
215
|
+
lengths = {name: len(vals) for name, vals in columns.items()}
|
|
216
|
+
unique_lengths = set(lengths.values())
|
|
217
|
+
|
|
218
|
+
if unique_lengths == {0}:
|
|
219
|
+
raise ValueError("All columns have 0 rows -- provide at least one row of data.")
|
|
220
|
+
if len(unique_lengths) > 1:
|
|
221
|
+
raise ValueError(f"All columns must have the same length, got: {lengths}")
|
|
222
|
+
|
|
223
|
+
resolved = _resolve_save_path(file_path, data_dir)
|
|
224
|
+
|
|
225
|
+
if not overwrite and os.path.exists(resolved):
|
|
226
|
+
raise FileExistsError(f"File already exists: {resolved}. Set overwrite=True to replace it.")
|
|
227
|
+
|
|
228
|
+
parent = os.path.dirname(resolved)
|
|
229
|
+
if parent:
|
|
230
|
+
os.makedirs(parent, exist_ok=True)
|
|
231
|
+
|
|
232
|
+
col_names = list(columns.keys())
|
|
233
|
+
row_count = len(next(iter(columns.values())))
|
|
234
|
+
|
|
235
|
+
with open(resolved, "w", encoding="utf-8", newline="") as f:
|
|
236
|
+
writer = csv.writer(f)
|
|
237
|
+
writer.writerow(col_names)
|
|
238
|
+
for i in range(row_count):
|
|
239
|
+
writer.writerow([columns[name][i] for name in col_names])
|
|
240
|
+
|
|
241
|
+
return resolved
|