matlab-mcp-python 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
matlab_mcp/__init__.py ADDED
@@ -0,0 +1,2 @@
1
+ """MATLAB MCP Server — Expose MATLAB capabilities to AI agents via MCP."""
2
+ __version__ = "0.1.0"
matlab_mcp/config.py ADDED
@@ -0,0 +1,212 @@
1
+ """Configuration system for MATLAB MCP Server.
2
+
3
+ Loads YAML config, applies environment variable overrides (MATLAB_MCP_* prefix),
4
+ and validates settings with Pydantic models.
5
+ """
6
+ from __future__ import annotations
7
+
8
+ import logging
9
+ import os
10
+ import platform
11
+ import warnings
12
+ from pathlib import Path
13
+ from typing import List, Literal, Optional
14
+
15
+ import yaml
16
+ from pydantic import BaseModel, Field, model_validator
17
+
18
+ logger = logging.getLogger(__name__)
19
+
20
+
21
+ class ServerConfig(BaseModel):
22
+ name: str = "matlab-mcp-server"
23
+ transport: Literal["stdio", "sse"] = "stdio"
24
+ host: str = "0.0.0.0"
25
+ port: int = 8765
26
+ log_level: Literal["debug", "info", "warning", "error"] = "info"
27
+ log_file: str = "./logs/server.log"
28
+ result_dir: str = "./results"
29
+ drain_timeout_seconds: int = 300
30
+
31
+
32
+ class PoolConfig(BaseModel):
33
+ min_engines: int = 2
34
+ max_engines: int = 10
35
+ scale_down_idle_timeout: int = 900
36
+ engine_start_timeout: int = 120
37
+ health_check_interval: int = 60
38
+ proactive_warmup_threshold: float = 0.8
39
+ queue_max_size: int = 50
40
+ matlab_root: Optional[str] = None
41
+
42
+
43
+ class ExecutionConfig(BaseModel):
44
+ sync_timeout: int = 30
45
+ max_execution_time: int = 86400
46
+ workspace_isolation: bool = True
47
+ engine_affinity: bool = False
48
+ temp_dir: str = "./temp"
49
+ temp_cleanup_on_disconnect: bool = True
50
+
51
+
52
+ class WorkspaceConfig(BaseModel):
53
+ default_paths: List[str] = Field(default_factory=list)
54
+ startup_commands: List[str] = Field(default_factory=lambda: ["format long"])
55
+
56
+
57
+ class ToolboxesConfig(BaseModel):
58
+ mode: Literal["whitelist", "blacklist", "all"] = "whitelist"
59
+ list: List[str] = Field(default_factory=list)
60
+
61
+
62
+ class CustomToolsConfig(BaseModel):
63
+ config_file: str = "./custom_tools.yaml"
64
+
65
+
66
+ class SecurityConfig(BaseModel):
67
+ blocked_functions_enabled: bool = True
68
+ blocked_functions: List[str] = Field(
69
+ default_factory=lambda: [
70
+ "system", "unix", "dos", "!",
71
+ "eval", "feval", "evalc", "evalin", "assignin",
72
+ "perl", "python",
73
+ ]
74
+ )
75
+ max_upload_size_mb: int = 100
76
+ require_proxy_auth: bool = False
77
+
78
+
79
+ class CodeCheckerConfig(BaseModel):
80
+ enabled: bool = True
81
+ auto_check_before_execute: bool = False
82
+ severity_levels: List[str] = Field(default_factory=lambda: ["error", "warning"])
83
+
84
+
85
+ class OutputConfig(BaseModel):
86
+ plotly_conversion: bool = True
87
+ static_image_format: Literal["png", "jpg", "svg"] = "png"
88
+ static_image_dpi: int = 150
89
+ thumbnail_enabled: bool = True
90
+ thumbnail_max_width: int = 400
91
+ large_result_threshold: int = 10000
92
+ max_inline_text_length: int = 50000
93
+
94
+
95
+ class SessionsConfig(BaseModel):
96
+ max_sessions: int = 50
97
+ session_timeout: int = 3600
98
+ job_retention_seconds: int = 86400
99
+
100
+
101
+ class MonitoringConfig(BaseModel):
102
+ enabled: bool = True
103
+ sample_interval: int = 10
104
+ retention_days: int = 7
105
+ db_path: str = "./monitoring/metrics.db"
106
+ dashboard_enabled: bool = True
107
+ http_port: int = 8766
108
+
109
+
110
+ class AppConfig(BaseModel):
111
+ server: ServerConfig = Field(default_factory=ServerConfig)
112
+ pool: PoolConfig = Field(default_factory=PoolConfig)
113
+ execution: ExecutionConfig = Field(default_factory=ExecutionConfig)
114
+ workspace: WorkspaceConfig = Field(default_factory=WorkspaceConfig)
115
+ toolboxes: ToolboxesConfig = Field(default_factory=ToolboxesConfig)
116
+ custom_tools: CustomToolsConfig = Field(default_factory=CustomToolsConfig)
117
+ security: SecurityConfig = Field(default_factory=SecurityConfig)
118
+ code_checker: CodeCheckerConfig = Field(default_factory=CodeCheckerConfig)
119
+ output: OutputConfig = Field(default_factory=OutputConfig)
120
+ sessions: SessionsConfig = Field(default_factory=SessionsConfig)
121
+ monitoring: MonitoringConfig = Field(default_factory=MonitoringConfig)
122
+
123
+ # Internal: stored after resolution so validators can use it
124
+ _config_dir: Optional[Path] = None
125
+
126
+ @model_validator(mode="after")
127
+ def validate_pool(self) -> "AppConfig":
128
+ if self.pool.min_engines > self.pool.max_engines:
129
+ raise ValueError(
130
+ f"pool.min_engines ({self.pool.min_engines}) must not exceed "
131
+ f"pool.max_engines ({self.pool.max_engines})"
132
+ )
133
+ if platform.system() == "Darwin" and self.pool.max_engines > 4:
134
+ warnings.warn(
135
+ f"pool.max_engines is {self.pool.max_engines} on macOS. "
136
+ "Running more than 4 matlab.engine instances in a single Python process "
137
+ "on macOS has known stability issues. Consider setting max_engines <= 4.",
138
+ stacklevel=2,
139
+ )
140
+ return self
141
+
142
+ def resolve_paths(self, base_dir: Path) -> None:
143
+ """Resolve all relative paths to absolute paths relative to base_dir."""
144
+
145
+ def _resolve(p: str) -> str:
146
+ path = Path(p)
147
+ if not path.is_absolute():
148
+ return str((base_dir / path).resolve())
149
+ return p
150
+
151
+ self.server.result_dir = _resolve(self.server.result_dir)
152
+ self.server.log_file = _resolve(self.server.log_file)
153
+ self.execution.temp_dir = _resolve(self.execution.temp_dir)
154
+ self.custom_tools.config_file = _resolve(self.custom_tools.config_file)
155
+ self.monitoring.db_path = _resolve(self.monitoring.db_path)
156
+
157
+
158
+ def _apply_env_overrides(data: dict) -> dict:
159
+ """Apply MATLAB_MCP_* environment variable overrides.
160
+
161
+ Convention: MATLAB_MCP_SECTION_KEY maps to data[section][key].
162
+ E.g. MATLAB_MCP_POOL_MAX_ENGINES=20 → data["pool"]["max_engines"] = 20.
163
+ """
164
+ prefix = "MATLAB_MCP_"
165
+ for env_key, env_val in os.environ.items():
166
+ if not env_key.startswith(prefix):
167
+ continue
168
+ remainder = env_key[len(prefix):] # e.g. "POOL_MAX_ENGINES"
169
+ parts = remainder.lower().split("_", 1)
170
+ if len(parts) != 2:
171
+ continue
172
+ section, key = parts # e.g. ("pool", "max_engines")
173
+ if section not in data:
174
+ data[section] = {}
175
+
176
+ # Attempt type coercion: int → float → bool → str
177
+ coerced: object = env_val
178
+ try:
179
+ coerced = int(env_val)
180
+ except ValueError:
181
+ try:
182
+ coerced = float(env_val)
183
+ except ValueError:
184
+ if env_val.lower() in ("true", "false"):
185
+ coerced = env_val.lower() == "true"
186
+
187
+ data[section][key] = coerced
188
+ return data
189
+
190
+
191
+ def load_config(path: Optional[Path] = None) -> AppConfig:
192
+ """Load application config from a YAML file with env var overrides.
193
+
194
+ If *path* is None or the file does not exist, default values are used.
195
+ """
196
+ data: dict = {}
197
+ config_dir = Path.cwd()
198
+
199
+ if path is not None:
200
+ path = Path(path)
201
+ if path.exists():
202
+ with open(path, "r", encoding="utf-8") as fh:
203
+ loaded = yaml.safe_load(fh) or {}
204
+ data = loaded
205
+ config_dir = path.parent
206
+ else:
207
+ logger.warning("Config file not found: %s — using defaults", path)
208
+
209
+ data = _apply_env_overrides(data)
210
+ config = AppConfig.model_validate(data)
211
+ config.resolve_paths(config_dir)
212
+ return config
File without changes
@@ -0,0 +1,366 @@
1
+ """Job executor for MATLAB MCP Server.
2
+
3
+ Orchestrates the full lifecycle of a MATLAB code execution request:
4
+ 1. Create a job in the tracker
5
+ 2. Acquire an engine from the pool
6
+ 3. Inject job context into the MATLAB workspace
7
+ 4. Execute code (sync or promoted to async)
8
+ 5. Build and return a structured result dict
9
+ """
10
+ from __future__ import annotations
11
+
12
+ import asyncio
13
+ import concurrent.futures
14
+ import io
15
+ import logging
16
+ import os
17
+ from pathlib import Path
18
+ from typing import Any, Optional
19
+
20
+ from matlab_mcp.jobs.models import Job
21
+ from matlab_mcp.jobs.tracker import JobTracker
22
+
23
+ logger = logging.getLogger(__name__)
24
+
25
+
26
+ class JobExecutor:
27
+ """Executes MATLAB code jobs using a pool of engines.
28
+
29
+ Parameters
30
+ ----------
31
+ pool:
32
+ An :class:`~matlab_mcp.pool.manager.EnginePoolManager` instance
33
+ (or any object with async ``acquire()`` / ``release()`` methods).
34
+ tracker:
35
+ A :class:`JobTracker` instance for storing job state.
36
+ config:
37
+ The full :class:`~matlab_mcp.config.AppConfig` instance.
38
+ """
39
+
40
+ def __init__(self, pool: Any, tracker: JobTracker, config: Any, collector: Any = None) -> None:
41
+ self._pool = pool
42
+ self._tracker = tracker
43
+ self._config = config
44
+ self._collector = collector
45
+
46
+ # ------------------------------------------------------------------
47
+ # Public API
48
+ # ------------------------------------------------------------------
49
+
50
+ async def execute(
51
+ self,
52
+ session_id: str,
53
+ code: str,
54
+ temp_dir: Optional[str] = None,
55
+ ) -> dict:
56
+ """Execute MATLAB code for a session.
57
+
58
+ Hybrid sync/async execution:
59
+ - Creates a job and acquires an engine.
60
+ - Injects job context into the MATLAB workspace.
61
+ - Starts background execution via ``engine.execute(code, background=True)``.
62
+ - Waits up to ``sync_timeout`` seconds.
63
+ - If the future completes: returns result inline (status="completed").
64
+ - If it times out: promotes to async background task (status="pending").
65
+ - On sync execution error: marks job failed and returns error result.
66
+
67
+ Returns a dict with at minimum ``status`` and ``job_id`` keys.
68
+ """
69
+ sync_timeout = self._config.execution.sync_timeout
70
+
71
+ # 1. Create job
72
+ job = self._tracker.create_job(session_id, code)
73
+ logger.info("[job %s] Created for session=%s code=%s",
74
+ job.job_id[:8], session_id[:8], repr(code[:120]))
75
+
76
+ # 2. Acquire engine
77
+ engine = await self._pool.acquire()
78
+ job.mark_running(engine.engine_id)
79
+ logger.info("[job %s] Acquired engine %s — executing", job.job_id[:8], engine.engine_id)
80
+
81
+ # 3. Inject job context
82
+ self._inject_job_context(engine, job, temp_dir)
83
+
84
+ # 4. Start background execution with stdout/stderr capture
85
+ job._stdout = io.StringIO()
86
+ job._stderr = io.StringIO()
87
+ try:
88
+ future = engine.execute(code, background=True,
89
+ stdout=job._stdout, stderr=job._stderr)
90
+ job.future = future
91
+ except Exception as exc:
92
+ logger.error("[job %s] Failed to start execution: %s: %s",
93
+ job.job_id[:8], type(exc).__name__, exc)
94
+ job.mark_failed(
95
+ error_type=type(exc).__name__,
96
+ message=str(exc),
97
+ )
98
+ await self._pool.release(engine)
99
+ if self._collector:
100
+ self._collector.record_event("job_failed", {
101
+ "job_id": job.job_id,
102
+ "code": code[:500],
103
+ "error": str(exc)[:500],
104
+ })
105
+ return self._error_result(job)
106
+
107
+ # 5. Wait for sync_timeout
108
+ if sync_timeout > 0:
109
+ try:
110
+ loop = asyncio.get_running_loop()
111
+ raw_result = await asyncio.wait_for(
112
+ loop.run_in_executor(None, lambda: future.result(timeout=sync_timeout)),
113
+ timeout=sync_timeout + 1,
114
+ )
115
+ # Completed within timeout
116
+ result = self._build_result(engine, raw_result, job, temp_dir)
117
+ job.mark_completed(result)
118
+ elapsed_ms = (job.completed_at - job.started_at) * 1000 if job.started_at and job.completed_at else 0
119
+ output_preview = (result.get("text") or "")[:200]
120
+ logger.info("[job %s] Completed in %.1fms output=%s",
121
+ job.job_id[:8], elapsed_ms, repr(output_preview))
122
+ await self._pool.release(engine)
123
+ if self._collector:
124
+ self._collector.record_event("job_completed", {
125
+ "job_id": job.job_id,
126
+ "execution_ms": elapsed_ms,
127
+ "code": code[:500],
128
+ "output": (result.get("text") or "")[:2000],
129
+ })
130
+ return {"status": "completed", "job_id": job.job_id, **result}
131
+ except (TimeoutError, concurrent.futures.TimeoutError, asyncio.TimeoutError):
132
+ # Promote to async
133
+ logger.info("[job %s] Sync timeout (%ds) — promoting to async background job",
134
+ job.job_id[:8], sync_timeout)
135
+ asyncio.create_task(
136
+ self._wait_for_completion(job, engine, future, temp_dir)
137
+ )
138
+ return {"status": "pending", "job_id": job.job_id}
139
+ except Exception as exc:
140
+ logger.error("[job %s] Execution failed: %s: %s",
141
+ job.job_id[:8], type(exc).__name__, exc)
142
+ job.mark_failed(
143
+ error_type=type(exc).__name__,
144
+ message=str(exc),
145
+ )
146
+ await self._pool.release(engine)
147
+ if self._collector:
148
+ self._collector.record_event("job_failed", {
149
+ "job_id": job.job_id,
150
+ "code": code[:500],
151
+ "error": str(exc)[:500],
152
+ })
153
+ return self._error_result(job)
154
+ else:
155
+ # sync_timeout == 0: immediately promote to async
156
+ asyncio.create_task(
157
+ self._wait_for_completion(job, engine, future, temp_dir)
158
+ )
159
+ return {"status": "pending", "job_id": job.job_id}
160
+
161
+ # ------------------------------------------------------------------
162
+ # Internal helpers
163
+ # ------------------------------------------------------------------
164
+
165
+ def _inject_job_context(
166
+ self,
167
+ engine: Any,
168
+ job: Job,
169
+ temp_dir: Optional[str],
170
+ ) -> None:
171
+ """Inject job metadata into the MATLAB workspace."""
172
+ try:
173
+ engine._engine.workspace["__mcp_job_id__"] = job.job_id
174
+ except Exception:
175
+ logger.debug("Could not inject __mcp_job_id__ into workspace")
176
+
177
+ if temp_dir is not None:
178
+ try:
179
+ engine._engine.workspace["__mcp_temp_dir__"] = str(temp_dir)
180
+ except Exception:
181
+ logger.debug("Could not inject __mcp_temp_dir__ into workspace")
182
+
183
+ async def _wait_for_completion(
184
+ self,
185
+ job: Job,
186
+ engine: Any,
187
+ future: Any,
188
+ temp_dir: Optional[str],
189
+ ) -> None:
190
+ """Background task that waits for an async job to complete."""
191
+ max_time = self._config.execution.max_execution_time
192
+ loop = asyncio.get_running_loop()
193
+ try:
194
+ raw_result = await asyncio.wait_for(
195
+ loop.run_in_executor(None, lambda: future.result(timeout=max_time)),
196
+ timeout=max_time + 1,
197
+ )
198
+ result = self._build_result(engine, raw_result, job, temp_dir)
199
+ job.mark_completed(result)
200
+ elapsed_ms = (job.completed_at - job.started_at) * 1000 if job.started_at and job.completed_at else 0
201
+ logger.info("[job %s] Async job completed in %.1fms", job.job_id[:8], elapsed_ms)
202
+ if self._collector:
203
+ self._collector.record_event("job_completed", {
204
+ "job_id": job.job_id,
205
+ "execution_ms": elapsed_ms,
206
+ "code": job.code[:500] if job.code else "",
207
+ "output": (result.get("text") or "")[:2000],
208
+ })
209
+ except asyncio.CancelledError:
210
+ logger.warning("[job %s] Async job cancelled", job.job_id[:8])
211
+ job.mark_cancelled()
212
+ except Exception as exc:
213
+ logger.error("[job %s] Async job failed: %s: %s",
214
+ job.job_id[:8], type(exc).__name__, exc)
215
+ job.mark_failed(
216
+ error_type=type(exc).__name__,
217
+ message=str(exc),
218
+ )
219
+ if self._collector:
220
+ self._collector.record_event("job_failed", {
221
+ "job_id": job.job_id,
222
+ "code": job.code[:500] if job.code else "",
223
+ "error": str(exc)[:500],
224
+ })
225
+ finally:
226
+ try:
227
+ await self._pool.release(engine)
228
+ except Exception:
229
+ logger.warning("Failed to release engine after async job %s", job.job_id)
230
+
231
+ def _build_result(
232
+ self,
233
+ engine: Any,
234
+ raw_result: Any,
235
+ job: Job,
236
+ temp_dir: Optional[str],
237
+ ) -> dict:
238
+ """Build a structured result dict from the engine's output.
239
+
240
+ Collects:
241
+ - text: captured stdout from the engine
242
+ - variables: key/value pairs from the workspace (excluding internal vars)
243
+ - figures: Plotly-converted figures if configured
244
+ - files: any files written to temp_dir
245
+ - warnings / errors: empty lists by default (extended by real engine)
246
+ """
247
+ # Capture text output from StringIO buffers
248
+ text = ""
249
+ try:
250
+ stdout_buf = getattr(job, "_stdout", None)
251
+ if stdout_buf is not None:
252
+ text = stdout_buf.getvalue()
253
+ stderr_buf = getattr(job, "_stderr", None)
254
+ if stderr_buf is not None:
255
+ err_text = stderr_buf.getvalue()
256
+ if err_text:
257
+ text = text + "\n[stderr]\n" + err_text if text else err_text
258
+ except Exception:
259
+ pass
260
+
261
+ # Capture workspace variables (excluding internal MCP variables)
262
+ variables: dict = {}
263
+ try:
264
+ for k, v in engine._engine.workspace.items():
265
+ if not k.startswith("__mcp_"):
266
+ variables[k] = self._safe_serialize(v)
267
+ except Exception:
268
+ pass
269
+
270
+ # Figures — extract properties and convert to Plotly
271
+ figures: list = []
272
+ if self._config.output.plotly_conversion and temp_dir is not None:
273
+ try:
274
+ import glob as glob_mod
275
+ from matlab_mcp.output.plotly_convert import load_plotly_json
276
+ from matlab_mcp.output.plotly_style_mapper import convert_figure
277
+
278
+ # Run MATLAB-side figure extraction
279
+ # Note: MATLAB eval() rejects identifiers starting with __
280
+ escaped_dir = str(temp_dir).replace("\\", "\\\\").replace("'", "''")
281
+ extract_code = (
282
+ f"mcpFigs_ = findobj(0, 'Type', 'figure'); "
283
+ f"for mcpIdx_ = 1:length(mcpFigs_), "
284
+ f"mcp_extract_props(mcpFigs_(mcpIdx_), "
285
+ f"fullfile('{escaped_dir}', sprintf('{job.job_id}_fig%d.json', mcpIdx_))); "
286
+ f"close(mcpFigs_(mcpIdx_)); "
287
+ f"end; "
288
+ f"clear mcpFigs_ mcpIdx_;"
289
+ )
290
+ logger.debug("Figure extraction code: %r", extract_code)
291
+ try:
292
+ engine.execute(extract_code, background=False)
293
+ except Exception as exc:
294
+ logger.warning("Figure extraction failed: %s", exc)
295
+ logger.debug("Extraction code was: %r", extract_code)
296
+
297
+ # Load and convert each figure JSON
298
+ fig_pattern = os.path.join(temp_dir, f"{job.job_id}_fig*.json")
299
+ for fig_file in sorted(glob_mod.glob(fig_pattern)):
300
+ matlab_data = load_plotly_json(fig_file)
301
+ if matlab_data:
302
+ plotly_fig = convert_figure(matlab_data)
303
+ figures.append(plotly_fig)
304
+ try:
305
+ os.remove(fig_file)
306
+ except OSError:
307
+ pass
308
+ except Exception as exc:
309
+ logger.warning("Figure conversion pipeline failed: %s", exc)
310
+
311
+ # Files in temp_dir
312
+ files: list = []
313
+ if temp_dir is not None:
314
+ try:
315
+ td = Path(temp_dir)
316
+ if td.exists():
317
+ files = [str(p) for p in td.iterdir() if p.is_file()]
318
+ except Exception:
319
+ pass
320
+
321
+ return {
322
+ "text": text,
323
+ "variables": variables,
324
+ "figures": figures,
325
+ "files": files,
326
+ "warnings": [],
327
+ "errors": [],
328
+ }
329
+
330
+ @staticmethod
331
+ def _safe_serialize(value: Any) -> Any:
332
+ """Convert a MATLAB workspace value to a JSON-serializable Python type."""
333
+ if value is None or isinstance(value, (bool, int, float, str)):
334
+ return value
335
+ if isinstance(value, (list, tuple)):
336
+ return [JobExecutor._safe_serialize(v) for v in value]
337
+ if isinstance(value, dict):
338
+ return {k: JobExecutor._safe_serialize(v) for k, v in value.items()}
339
+ # numpy arrays
340
+ try:
341
+ import numpy as np
342
+ if isinstance(value, np.ndarray):
343
+ return value.tolist()
344
+ if isinstance(value, (np.integer, np.floating)):
345
+ return value.item()
346
+ except ImportError:
347
+ pass
348
+ # MATLAB arrays / matrices
349
+ try:
350
+ if hasattr(value, '_data'):
351
+ return list(value._data)
352
+ if hasattr(value, 'tolist'):
353
+ return value.tolist()
354
+ except Exception:
355
+ pass
356
+ # Fallback: repr
357
+ return repr(value)
358
+
359
+ @staticmethod
360
+ def _error_result(job: Job) -> dict:
361
+ """Return a failure result dict from a failed job."""
362
+ return {
363
+ "status": "failed",
364
+ "job_id": job.job_id,
365
+ "error": job.error,
366
+ }
@@ -0,0 +1,95 @@
1
+ """Job models for MATLAB MCP Server.
2
+
3
+ Defines the Job data model and JobStatus enum used to track the lifecycle
4
+ of MATLAB code execution requests.
5
+ """
6
+ from __future__ import annotations
7
+
8
+ import time
9
+ import uuid
10
+ from dataclasses import dataclass, field
11
+ from enum import Enum
12
+ from typing import Any, Optional
13
+
14
+
15
+ class JobStatus(Enum):
16
+ PENDING = "pending"
17
+ RUNNING = "running"
18
+ COMPLETED = "completed"
19
+ FAILED = "failed"
20
+ CANCELLED = "cancelled"
21
+
22
+
23
+ @dataclass
24
+ class Job:
25
+ """Represents a single MATLAB code execution job.
26
+
27
+ Parameters
28
+ ----------
29
+ session_id:
30
+ ID of the session that owns this job.
31
+ code:
32
+ MATLAB code to execute.
33
+ """
34
+
35
+ session_id: str
36
+ code: str
37
+ job_id: str = field(default_factory=lambda: f"j-{uuid.uuid4()}")
38
+ status: JobStatus = field(default=JobStatus.PENDING)
39
+ engine_id: Optional[str] = field(default=None)
40
+ result: Optional[Any] = field(default=None)
41
+ error: Optional[dict] = field(default=None)
42
+ created_at: float = field(default_factory=time.time)
43
+ started_at: Optional[float] = field(default=None)
44
+ completed_at: Optional[float] = field(default=None)
45
+ future: Optional[Any] = field(default=None)
46
+
47
+ # ------------------------------------------------------------------
48
+ # State transitions
49
+ # ------------------------------------------------------------------
50
+
51
+ def mark_running(self, engine_id: str) -> None:
52
+ """Transition job to RUNNING state."""
53
+ self.status = JobStatus.RUNNING
54
+ self.engine_id = engine_id
55
+ self.started_at = time.time()
56
+
57
+ def mark_completed(self, result: Any) -> None:
58
+ """Transition job to COMPLETED state with a result."""
59
+ self.status = JobStatus.COMPLETED
60
+ self.result = result
61
+ self.completed_at = time.time()
62
+
63
+ def mark_failed(
64
+ self,
65
+ error_type: str,
66
+ message: str,
67
+ matlab_id: Optional[str] = None,
68
+ stack_trace: Optional[str] = None,
69
+ ) -> None:
70
+ """Transition job to FAILED state with error details."""
71
+ self.status = JobStatus.FAILED
72
+ self.error = {
73
+ "type": error_type,
74
+ "message": message,
75
+ "matlab_id": matlab_id,
76
+ "stack_trace": stack_trace,
77
+ }
78
+ self.completed_at = time.time()
79
+
80
+ def mark_cancelled(self) -> None:
81
+ """Transition job to CANCELLED state."""
82
+ self.status = JobStatus.CANCELLED
83
+ self.completed_at = time.time()
84
+
85
+ # ------------------------------------------------------------------
86
+ # Properties
87
+ # ------------------------------------------------------------------
88
+
89
+ @property
90
+ def elapsed_seconds(self) -> Optional[float]:
91
+ """Elapsed time in seconds since job started, or None if not started."""
92
+ if self.started_at is None:
93
+ return None
94
+ end = self.completed_at if self.completed_at is not None else time.time()
95
+ return end - self.started_at