iflow-mcp_xrds76354_sumo-mcp 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
utils/timeout.py ADDED
@@ -0,0 +1,364 @@
1
+ """
2
+ 智能超时控制器
3
+
4
+ 提供三层超时策略:
5
+ 1. 静态超时 - 用于快速、可预测的操作
6
+ 2. 参数自适应超时 - 根据输入参数估算合理超时
7
+ 3. 心跳+指数退避 - 用于长时间运行的操作(如 RL 训练)
8
+ """
9
+
10
+ import os
11
+ import logging
12
+ import inspect
13
+ import subprocess
14
+ import sys
15
+ import threading
16
+ import time
17
+ from dataclasses import dataclass
18
+ from typing import Any, Callable, Optional, TypeVar
19
+
20
+ logger = logging.getLogger(__name__)
21
+
22
+ T = TypeVar("T")
23
+
24
+
25
+ @dataclass
26
+ class TimeoutConfig:
27
+ """超时配置"""
28
+ base_timeout: float = 60.0 # 基础超时(秒)
29
+ max_timeout: float = 3600.0 # 最大超时(秒)
30
+ backoff_factor: float = 2.0 # 指数退避因子
31
+ heartbeat_interval: float = 10.0 # 心跳检测间隔(秒)
32
+
33
+
34
+ # 预定义的超时配置
35
+ TIMEOUT_CONFIGS = {
36
+ # Layer 1: 静态超时
37
+ "netconvert": TimeoutConfig(base_timeout=300, max_timeout=600),
38
+ "netgenerate": TimeoutConfig(base_timeout=120, max_timeout=300),
39
+ "osmGet": TimeoutConfig(base_timeout=120, max_timeout=300),
40
+
41
+ # Layer 2: 参数自适应(基础值,实际会根据参数调整)
42
+ "randomTrips": TimeoutConfig(base_timeout=300, max_timeout=900),
43
+ "duarouter": TimeoutConfig(base_timeout=120, max_timeout=1800),
44
+ "simulation": TimeoutConfig(base_timeout=60, max_timeout=1800),
45
+ "tlsCycleAdaptation": TimeoutConfig(base_timeout=300, max_timeout=1800),
46
+ "tlsCoordinator": TimeoutConfig(base_timeout=300, max_timeout=1800),
47
+
48
+ # Layer 3: 心跳+指数退避
49
+ "rl_training": TimeoutConfig(
50
+ base_timeout=300, # 首次尝试 5 分钟
51
+ max_timeout=7200, # 最大 2 小时
52
+ backoff_factor=1.5, # 每次失败后扩展 1.5 倍
53
+ heartbeat_interval=30.0 # 每 30 秒检查一次进程存活
54
+ ),
55
+ }
56
+
57
+
58
+ def calculate_adaptive_timeout(
59
+ operation: str,
60
+ params: Optional[dict] = None
61
+ ) -> float:
62
+ """
63
+ 根据操作类型和参数计算自适应超时时间。
64
+
65
+ Args:
66
+ operation: 操作名称(如 "randomTrips", "simulation", "rl_training")
67
+ params: 操作参数,用于估算耗时
68
+
69
+ Returns:
70
+ 估算的超时时间(秒)
71
+ """
72
+ config = TIMEOUT_CONFIGS.get(operation, TimeoutConfig())
73
+ params = params or {}
74
+
75
+ timeout = config.base_timeout
76
+
77
+ if operation == "randomTrips":
78
+ # 根据 end_time 调整:每 1000 秒仿真时间增加 10 秒超时
79
+ end_time = params.get("end_time", 3600)
80
+ timeout += end_time / 100
81
+
82
+ elif operation == "duarouter":
83
+ # 根据预估路径数量调整
84
+ # 如果有 trips 文件大小信息,可以更精确估算
85
+ timeout += params.get("estimated_routes", 1000) * 0.05
86
+
87
+ elif operation == "simulation":
88
+ # 根据仿真步数调整
89
+ steps = params.get("steps", 1000)
90
+ timeout += steps * 0.01
91
+
92
+ elif operation in {"tlsCycleAdaptation", "tlsCoordinator"}:
93
+ # TLS tools are pure file-processing scripts: the main predictor for runtime
94
+ # is route file size (vehicle count) and, to a lesser extent, net size.
95
+ route_files_bytes = params.get("route_files_bytes", 0) or 0
96
+ net_file_bytes = params.get("net_file_bytes", 0) or 0
97
+
98
+ try:
99
+ route_files_bytes = float(route_files_bytes)
100
+ except (TypeError, ValueError):
101
+ route_files_bytes = 0
102
+ try:
103
+ net_file_bytes = float(net_file_bytes)
104
+ except (TypeError, ValueError):
105
+ net_file_bytes = 0
106
+
107
+ # Heuristic: each additional 100KB of routes adds ~1s budget.
108
+ timeout += route_files_bytes / 100_000
109
+ # Net XML tends to be smaller; use a gentler slope.
110
+ timeout += net_file_bytes / 500_000
111
+
112
+ elif operation == "rl_training":
113
+ # RL 训练:根据 episodes × steps 估算
114
+ episodes = params.get("episodes", 1)
115
+ steps_per_episode = params.get("steps_per_episode", 1000)
116
+ # 估算:每个 episode 大约需要 steps/100 秒(保守估计)
117
+ estimated_time = episodes * (steps_per_episode / 50)
118
+ timeout = max(config.base_timeout, estimated_time * 1.5) # 1.5x 安全余量
119
+
120
+ return min(timeout, config.max_timeout)
121
+
122
+
123
+ class HeartbeatTimeoutExecutor:
124
+ """
125
+ 心跳+指数退避超时执行器
126
+
127
+ 适用于长时间运行的操作,如 RL 训练。
128
+ 特点:
129
+ 1. 定期检查进程/操作是否存活(心跳)
130
+ 2. 首次超时后,使用指数退避扩展超时窗口
131
+ 3. 支持进度回调,避免误判"卡住"
132
+ """
133
+
134
+ def __init__(self, config: TimeoutConfig):
135
+ self.config = config
136
+ self.current_timeout = config.base_timeout
137
+ self.retry_count = 0
138
+ self._last_heartbeat = time.time()
139
+ self._is_alive = True
140
+ self._lock = threading.Lock()
141
+
142
+ def heartbeat(self) -> None:
143
+ """记录心跳,表示操作仍在进行"""
144
+ with self._lock:
145
+ self._last_heartbeat = time.time()
146
+
147
+ def check_alive(self) -> bool:
148
+ """检查是否在心跳间隔内有活动"""
149
+ with self._lock:
150
+ elapsed = time.time() - self._last_heartbeat
151
+ return elapsed < self.config.heartbeat_interval * 3 # 3 倍容忍度
152
+
153
+ def expand_timeout(self) -> float:
154
+ """扩展超时窗口(指数退避)"""
155
+ self.retry_count += 1
156
+ self.current_timeout = min(
157
+ self.current_timeout * self.config.backoff_factor,
158
+ self.config.max_timeout
159
+ )
160
+ logger.info(
161
+ "Timeout expanded: retry=%d, new_timeout=%.1fs",
162
+ self.retry_count, self.current_timeout
163
+ )
164
+ return self.current_timeout
165
+
166
+ def get_current_timeout(self) -> float:
167
+ """获取当前超时时间"""
168
+ return self.current_timeout
169
+
170
+
171
+ def run_with_adaptive_timeout(
172
+ func: Callable[..., T],
173
+ operation: str,
174
+ params: Optional[dict] = None,
175
+ on_progress: Optional[Callable[[str], None]] = None,
176
+ ) -> T:
177
+ """
178
+ 使用自适应超时执行函数。
179
+
180
+ 对于 RL 训练等长时间操作,使用心跳机制而非简单超时。
181
+
182
+ Args:
183
+ func: 要执行的函数
184
+ operation: 操作名称
185
+ params: 操作参数(用于估算超时)
186
+ on_progress: 进度回调函数
187
+
188
+ Returns:
189
+ 函数执行结果
190
+
191
+ Raises:
192
+ TimeoutError: 如果操作超时且无法恢复
193
+ """
194
+ timeout = calculate_adaptive_timeout(operation, params)
195
+
196
+ if operation == "rl_training":
197
+ # 使用心跳机制
198
+ config = TIMEOUT_CONFIGS[operation]
199
+ executor = HeartbeatTimeoutExecutor(config)
200
+ executor.current_timeout = timeout
201
+
202
+ cancel_event = threading.Event()
203
+ cancel_lock = threading.Lock()
204
+ cancel_callback: dict[str, Optional[Callable[[], None]]] = {"cb": None}
205
+
206
+ def register_cancel_callback(cb: Callable[[], None]) -> None:
207
+ with cancel_lock:
208
+ cancel_callback["cb"] = cb
209
+
210
+ def request_cancel() -> None:
211
+ cancel_event.set()
212
+ with cancel_lock:
213
+ cb = cancel_callback["cb"]
214
+ if cb is not None:
215
+ try:
216
+ cb()
217
+ except Exception:
218
+ logger.debug("Cancel callback failed", exc_info=True)
219
+
220
+ # 在后台线程中运行,主线程监控心跳
221
+ result_container: dict = {"result": None, "error": None, "done": False}
222
+
223
+ heartbeat = executor.heartbeat
224
+
225
+ def _call_func() -> T:
226
+ try:
227
+ sig = inspect.signature(func)
228
+ except (TypeError, ValueError):
229
+ return func()
230
+
231
+ kwargs: dict[str, Any] = {}
232
+ if "cancel_event" in sig.parameters:
233
+ kwargs["cancel_event"] = cancel_event
234
+ if "register_cancel_callback" in sig.parameters:
235
+ kwargs["register_cancel_callback"] = register_cancel_callback
236
+
237
+ params = list(sig.parameters.values())
238
+ if not params:
239
+ return func(**kwargs) if kwargs else func()
240
+
241
+ first = params[0]
242
+ if first.kind in (
243
+ inspect.Parameter.POSITIONAL_ONLY,
244
+ inspect.Parameter.POSITIONAL_OR_KEYWORD,
245
+ inspect.Parameter.VAR_POSITIONAL,
246
+ ):
247
+ return func(heartbeat, **kwargs)
248
+
249
+ return func(**kwargs) if kwargs else func()
250
+
251
+ def worker():
252
+ try:
253
+ result_container["result"] = _call_func()
254
+ except Exception as e:
255
+ result_container["error"] = e
256
+ finally:
257
+ result_container["done"] = True
258
+
259
+ thread = threading.Thread(target=worker, daemon=True)
260
+ thread.start()
261
+
262
+ start_time = time.time()
263
+ poll_interval = min(1.0, max(0.1, config.heartbeat_interval / 10))
264
+ while not result_container["done"]:
265
+ elapsed = time.time() - start_time
266
+
267
+ if elapsed > executor.get_current_timeout():
268
+ if executor.check_alive():
269
+ # 有心跳活动,扩展超时
270
+ new_timeout = executor.expand_timeout()
271
+ if on_progress:
272
+ on_progress(f"Operation still running, extended timeout to {new_timeout:.0f}s")
273
+ else:
274
+ # 无心跳,认为卡死
275
+ request_cancel()
276
+ raise TimeoutError(
277
+ f"Operation '{operation}' timed out after {elapsed:.0f}s with no activity"
278
+ )
279
+
280
+ time.sleep(poll_interval)
281
+
282
+ if result_container["error"]:
283
+ raise result_container["error"]
284
+ return result_container["result"]
285
+
286
+ else:
287
+ # 简单超时
288
+ result_container: dict = {"result": None, "error": None, "done": False}
289
+
290
+ def worker():
291
+ try:
292
+ result_container["result"] = func()
293
+ except Exception as e:
294
+ result_container["error"] = e
295
+ finally:
296
+ result_container["done"] = True
297
+
298
+ thread = threading.Thread(target=worker, daemon=True)
299
+ thread.start()
300
+ thread.join(timeout=timeout)
301
+
302
+ if not result_container["done"]:
303
+ raise TimeoutError(f"Operation '{operation}' timed out after {timeout:.0f}s")
304
+
305
+ if result_container["error"]:
306
+ raise result_container["error"]
307
+ return result_container["result"]
308
+
309
+
310
+ def subprocess_run_with_timeout(
311
+ cmd: list,
312
+ operation: str,
313
+ params: Optional[dict] = None,
314
+ **kwargs
315
+ ) -> subprocess.CompletedProcess:
316
+ """
317
+ 使用自适应超时执行 subprocess.run。
318
+
319
+ Args:
320
+ cmd: 命令列表
321
+ operation: 操作名称
322
+ params: 操作参数
323
+ **kwargs: 传递给 subprocess.run 的其他参数
324
+
325
+ Returns:
326
+ subprocess.CompletedProcess
327
+ """
328
+ timeout = calculate_adaptive_timeout(operation, params)
329
+
330
+ # 确保 capture_output 以避免 stdout 污染
331
+ kwargs.setdefault("capture_output", True)
332
+ kwargs.setdefault("text", True)
333
+ # Avoid child processes accidentally reading MCP JSON-RPC from stdin.
334
+ kwargs.setdefault("stdin", subprocess.DEVNULL)
335
+
336
+ # Ensure tool subprocesses don't inherit "server stdio" behaviors that are only
337
+ # needed for the MCP transport (e.g., PYTHONUNBUFFERED for JSON-RPC flushing).
338
+ env = kwargs.get("env")
339
+ if env is None:
340
+ env = os.environ.copy()
341
+ else:
342
+ env = dict(env)
343
+ env.pop("PYTHONUNBUFFERED", None)
344
+ kwargs["env"] = env
345
+
346
+ # Windows: prevent leaking inheritable handles into nested subprocesses and
347
+ # avoid spawning a console window (can be surprisingly slow under piped stdio).
348
+ if sys.platform == "win32":
349
+ kwargs.setdefault("close_fds", True)
350
+ if hasattr(subprocess, "CREATE_NO_WINDOW"):
351
+ kwargs.setdefault("creationflags", subprocess.CREATE_NO_WINDOW)
352
+
353
+ try:
354
+ return subprocess.run(cmd, timeout=timeout, **kwargs)
355
+ except subprocess.TimeoutExpired as e:
356
+ logger.warning(
357
+ "Command timed out after %.1fs: %s",
358
+ timeout, " ".join(cmd[:3]) + "..."
359
+ )
360
+ raise TimeoutError(
361
+ f"Operation '{operation}' timed out after {timeout:.0f}s. "
362
+ f"This may indicate a very large input or a hanging process. "
363
+ f"Consider breaking down the operation or increasing timeout limits."
364
+ ) from e
utils/traci.py ADDED
@@ -0,0 +1,82 @@
1
+ from __future__ import annotations
2
+
3
+ import inspect
4
+ import subprocess
5
+ import threading
6
+ from typing import Any, Callable, Optional
7
+
8
+
9
+ def ensure_traci_start_stdout_suppressed() -> None:
10
+ """
11
+ Ensure `traci.start()` defaults to `stdout=subprocess.DEVNULL`.
12
+
13
+ Why:
14
+ MCP uses JSON-RPC over stdio; any SUMO/TraCI stdout output can corrupt the
15
+ protocol stream and cause clients to hang or show `undefined`.
16
+
17
+ This wrapper is:
18
+ - idempotent (won't double-wrap)
19
+ - non-invasive (doesn't override an explicit `stdout=...`)
20
+ - best-effort (no-op if `traci` isn't available or doesn't support `stdout`)
21
+ """
22
+ try:
23
+ import traci # type: ignore
24
+ except Exception:
25
+ return
26
+
27
+ start: Optional[Callable[..., Any]] = getattr(traci, "start", None)
28
+ if start is None:
29
+ return
30
+
31
+ if getattr(start, "_mcp_stdout_suppressed", False):
32
+ return
33
+
34
+ try:
35
+ sig = inspect.signature(start)
36
+ except (TypeError, ValueError):
37
+ # Can't introspect; be conservative.
38
+ return
39
+
40
+ supports_stdout = "stdout" in sig.parameters or any(
41
+ p.kind is inspect.Parameter.VAR_KEYWORD for p in sig.parameters.values()
42
+ )
43
+ if not supports_stdout:
44
+ return
45
+
46
+ original_start = start
47
+
48
+ def _start(cmd, *args: Any, **kwargs: Any):
49
+ kwargs.setdefault("stdout", subprocess.DEVNULL)
50
+ return original_start(cmd, *args, **kwargs)
51
+
52
+ setattr(_start, "_mcp_stdout_suppressed", True)
53
+ setattr(_start, "_mcp_original_start", original_start)
54
+
55
+ traci.start = _start # type: ignore[attr-defined]
56
+
57
+
58
+ def traci_close_best_effort(timeout_s: float = 5.0) -> bool:
59
+ """
60
+ Best-effort close TraCI without risking an indefinite hang.
61
+
62
+ Returns:
63
+ True if `traci.close()` finished within timeout_s, else False.
64
+ """
65
+ try:
66
+ import traci # type: ignore
67
+ except Exception:
68
+ return True
69
+
70
+ done = threading.Event()
71
+
72
+ def _close() -> None:
73
+ try:
74
+ traci.close()
75
+ except Exception:
76
+ pass
77
+ finally:
78
+ done.set()
79
+
80
+ thread = threading.Thread(target=_close, daemon=True, name="sumo-mcp:traci.close")
81
+ thread.start()
82
+ return done.wait(timeout_s)
workflows/__init__.py ADDED
File without changes
workflows/py.typed ADDED
File without changes
workflows/rl_train.py ADDED
@@ -0,0 +1,34 @@
1
+ from mcp_tools.rl import find_sumo_rl_scenario_files, list_rl_scenarios, run_rl_training
2
+
3
+ def rl_train_workflow(
4
+ scenario_name: str,
5
+ output_dir: str,
6
+ episodes: int = 5,
7
+ steps: int = 1000
8
+ ) -> str:
9
+ """
10
+ Workflow to train an RL agent on a built-in sumo-rl scenario.
11
+ 1. Locate scenario files
12
+ 2. Run training
13
+ 3. Return summary
14
+ """
15
+ if not scenario_name:
16
+ return (
17
+ "Error: rl_train workflow requires scenario_name.\n"
18
+ "Hint: Use manage_rl_task(list_scenarios) to list built-in scenarios, "
19
+ "or use manage_rl_task(train_custom) for custom net/route files."
20
+ )
21
+
22
+ net_file, route_file, err = find_sumo_rl_scenario_files(scenario_name)
23
+ if err:
24
+ available = list_rl_scenarios()
25
+ return f"{err}\nAvailable: {available}"
26
+
27
+ return run_rl_training(
28
+ net_file=net_file,
29
+ route_file=route_file,
30
+ out_dir=output_dir,
31
+ episodes=episodes,
32
+ steps_per_episode=steps,
33
+ algorithm="ql"
34
+ )
@@ -0,0 +1,210 @@
1
+ import os
2
+ import shutil
3
+ import warnings
4
+ import logging
5
+ from filecmp import cmp
6
+ from typing import List, Optional
7
+
8
+ from mcp_tools.simulation import run_simple_simulation
9
+ from mcp_tools.signal import tls_cycle_adaptation, tls_coordinator
10
+ from mcp_tools.analysis import analyze_fcd
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+
15
+ def _copy_to_dir(src_file: str, dst_dir: str) -> str:
16
+ """
17
+ Copy src_file into dst_dir (if needed) and return the local path.
18
+
19
+ This is used to ensure generated SUMO config files can reference inputs via
20
+ relative paths, even on Windows when source and destination are on different drives.
21
+ """
22
+ dst_file = os.path.join(dst_dir, os.path.basename(src_file))
23
+ if os.path.abspath(src_file) == os.path.abspath(dst_file):
24
+ return dst_file
25
+
26
+ if os.path.exists(dst_file):
27
+ try:
28
+ if cmp(src_file, dst_file, shallow=False):
29
+ return dst_file
30
+ except OSError:
31
+ pass
32
+
33
+ shutil.copy2(src_file, dst_file)
34
+ return dst_file
35
+
36
+
37
+ def signal_opt_workflow(
38
+ net_file: str,
39
+ route_file: str,
40
+ output_dir: str,
41
+ steps: int = 3600,
42
+ use_coordinator: bool = False
43
+ ) -> str:
44
+ """
45
+ Signal Optimization Workflow.
46
+ 1. Run Baseline Simulation
47
+ 2. Optimize Signals (Cycle Adaptation or Coordinator)
48
+ 3. Run Optimized Simulation
49
+ 4. Compare Results
50
+
51
+ Note:
52
+ To keep generated `.sumocfg` files portable (especially on Windows across drives),
53
+ `net_file` and `route_file` will be copied into `output_dir` when needed.
54
+ """
55
+ if not os.path.exists(output_dir):
56
+ os.makedirs(output_dir)
57
+
58
+ local_net_file = _copy_to_dir(net_file, output_dir)
59
+ local_route_file = _copy_to_dir(route_file, output_dir)
60
+
61
+ # Baseline paths
62
+ baseline_cfg = os.path.join(output_dir, "baseline.sumocfg")
63
+ baseline_fcd = os.path.join(output_dir, "baseline_fcd.xml")
64
+
65
+ # Optimized paths
66
+ opt_net_file = os.path.join(output_dir, "optimized.net.xml")
67
+ opt_cfg = os.path.join(output_dir, "optimized.sumocfg")
68
+ opt_fcd = os.path.join(output_dir, "optimized_fcd.xml")
69
+
70
+ # 1. Run Baseline
71
+ _create_config(baseline_cfg, local_net_file, local_route_file, baseline_fcd, steps)
72
+ res_baseline = run_simple_simulation(baseline_cfg, steps)
73
+ if "error" in res_baseline.lower():
74
+ return f"Baseline Simulation Failed: {res_baseline}"
75
+
76
+ analysis_baseline = analyze_fcd(baseline_fcd)
77
+
78
+ # 2. Optimize
79
+ def _is_failure(result: str) -> bool:
80
+ lowered = result.lower()
81
+ return "failed" in lowered or "error" in lowered
82
+
83
+ optimization_notes: list[str] = []
84
+ optimized_net_input = opt_net_file
85
+
86
+ primary_method = "tlsCoordinator" if use_coordinator else "tlsCycleAdaptation"
87
+ fallback_method = "tlsCycleAdaptation" if use_coordinator else "tlsCoordinator"
88
+
89
+ if use_coordinator:
90
+ res_opt_primary = tls_coordinator(local_net_file, local_route_file, opt_net_file)
91
+ else:
92
+ res_opt_primary = tls_cycle_adaptation(local_net_file, local_route_file, opt_net_file)
93
+
94
+ res_opt = res_opt_primary
95
+
96
+ if _is_failure(res_opt_primary):
97
+ optimization_notes.append(f"Primary method failed: {primary_method}\n{res_opt_primary}")
98
+
99
+ if use_coordinator:
100
+ res_opt_fallback = tls_cycle_adaptation(local_net_file, local_route_file, opt_net_file)
101
+ else:
102
+ res_opt_fallback = tls_coordinator(local_net_file, local_route_file, opt_net_file)
103
+
104
+ if not _is_failure(res_opt_fallback):
105
+ optimization_notes.append(f"Fell back to: {fallback_method}")
106
+ res_opt = "\n\n".join(optimization_notes + [res_opt_fallback])
107
+ else:
108
+ optimization_notes.append(f"Fallback method failed: {fallback_method}\n{res_opt_fallback}")
109
+ optimization_notes.append(
110
+ "Optimization was skipped; optimized simulation will reuse the baseline network."
111
+ )
112
+ res_opt = "\n\n".join(optimization_notes)
113
+ optimized_net_input = local_net_file
114
+
115
+ # Check if optimized file is valid and determines if it is a net or additional
116
+ is_additional = False
117
+ if optimized_net_input != local_net_file:
118
+ is_additional = _is_additional_file(opt_net_file)
119
+
120
+ # 3. Run Optimized
121
+ if is_additional:
122
+ # Use original net + additional file
123
+ _create_config(
124
+ opt_cfg,
125
+ local_net_file,
126
+ local_route_file,
127
+ opt_fcd,
128
+ steps,
129
+ additional_files=[opt_net_file],
130
+ )
131
+ else:
132
+ # Use new net file (or baseline net if optimization was skipped)
133
+ _create_config(opt_cfg, optimized_net_input, local_route_file, opt_fcd, steps)
134
+
135
+ res_optimized = run_simple_simulation(opt_cfg, steps)
136
+ if "error" in res_optimized.lower():
137
+ return f"Optimized Simulation Failed: {res_optimized}"
138
+
139
+ analysis_optimized = analyze_fcd(opt_fcd)
140
+
141
+ return (f"Signal Optimization Workflow Completed.\n\n"
142
+ f"--- Baseline Results ---\n{res_baseline}\n{analysis_baseline}\n\n"
143
+ f"--- Optimization Step ---\n{res_opt}\n\n"
144
+ f"--- Optimized Results ---\n{res_optimized}\n{analysis_optimized}")
145
+
146
+
147
+ def _is_additional_file(file_path: str) -> bool:
148
+ if not os.path.exists(file_path): return False
149
+ try:
150
+ with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
151
+ head = f.read(1000)
152
+ return '<additional' in head
153
+ except Exception as e:
154
+ logger.debug("Failed to inspect additional file %s: %s", file_path, e)
155
+ return False
156
+
157
+
158
+ def _create_config(cfg_path: str, net_file: str, route_file: str, fcd_file: str, steps: int, additional_files: Optional[List[str]] = None) -> None:
159
+ cfg_dir = os.path.dirname(os.path.abspath(cfg_path))
160
+
161
+ def _as_cfg_path(file_path: str) -> str:
162
+ abs_path = os.path.abspath(file_path)
163
+ try:
164
+ rel_path = os.path.relpath(abs_path, cfg_dir)
165
+ except ValueError:
166
+ basename = os.path.basename(abs_path)
167
+ warnings.warn(
168
+ f"Cannot compute relative path from config dir '{cfg_dir}' to '{abs_path}'. "
169
+ f"Using basename '{basename}' for portability; ensure the file exists in '{cfg_dir}'.",
170
+ RuntimeWarning,
171
+ stacklevel=2,
172
+ )
173
+ return basename
174
+
175
+ if rel_path.startswith(".."):
176
+ basename = os.path.basename(abs_path)
177
+ warnings.warn(
178
+ f"Path '{abs_path}' is outside config dir '{cfg_dir}'. "
179
+ f"Using basename '{basename}' for portability; ensure the file exists in '{cfg_dir}'.",
180
+ RuntimeWarning,
181
+ stacklevel=2,
182
+ )
183
+ return basename
184
+
185
+ return rel_path
186
+
187
+ additional_str = ""
188
+ if additional_files:
189
+ val = ",".join([_as_cfg_path(f) for f in additional_files])
190
+ additional_str = f'<additional-files value="{val}"/>'
191
+
192
+ net_value = _as_cfg_path(net_file)
193
+ route_value = _as_cfg_path(route_file)
194
+ fcd_value = _as_cfg_path(fcd_file)
195
+
196
+ with open(cfg_path, "w", encoding="utf-8") as f:
197
+ f.write(f"""<configuration>
198
+ <input>
199
+ <net-file value="{net_value}"/>
200
+ <route-files value="{route_value}"/>
201
+ {additional_str}
202
+ </input>
203
+ <time>
204
+ <begin value="0"/>
205
+ <end value="{steps}"/>
206
+ </time>
207
+ <output>
208
+ <fcd-output value="{fcd_value}"/>
209
+ </output>
210
+ </configuration>""")