loki-mode 6.59.0 → 6.60.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/SKILL.md +2 -2
- package/VERSION +1 -1
- package/autonomy/loki +150 -0
- package/autonomy/mirofish-adapter.py +1530 -0
- package/autonomy/run.sh +162 -4
- package/dashboard/__init__.py +1 -1
- package/docs/INSTALLATION.md +1 -1
- package/mcp/__init__.py +1 -1
- package/package.json +1 -1
- package/skills/00-index.md +8 -0
- package/skills/mirofish-integration.md +100 -0
|
@@ -0,0 +1,1530 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""MiroFish Market Validation Adapter for Loki Mode
|
|
3
|
+
|
|
4
|
+
Orchestrates the MiroFish swarm intelligence API to provide pre-build
|
|
5
|
+
market validation. Runs MiroFish's 4-stage async pipeline and normalizes
|
|
6
|
+
results into Loki Mode's .loki/ directory format.
|
|
7
|
+
|
|
8
|
+
Stdlib only - no pip dependencies required. Python 3.9+.
|
|
9
|
+
|
|
10
|
+
Usage:
|
|
11
|
+
python3 mirofish-adapter.py <prd-path> --output-dir .loki/ [--validate] [--json] [--url URL] [--background] [--max-rounds N]
|
|
12
|
+
python3 mirofish-adapter.py --status [--output-dir .loki/]
|
|
13
|
+
python3 mirofish-adapter.py --resume --output-dir .loki/ --url URL
|
|
14
|
+
python3 mirofish-adapter.py --docker-start --docker-image IMAGE [--port N]
|
|
15
|
+
python3 mirofish-adapter.py --docker-stop
|
|
16
|
+
python3 mirofish-adapter.py --health --url URL
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
import argparse
|
|
20
|
+
import json
|
|
21
|
+
import os
|
|
22
|
+
import re
|
|
23
|
+
import sys
|
|
24
|
+
import time
|
|
25
|
+
import tempfile
|
|
26
|
+
import hashlib
|
|
27
|
+
import signal
|
|
28
|
+
import subprocess
|
|
29
|
+
import urllib.request
|
|
30
|
+
import urllib.parse
|
|
31
|
+
import urllib.error
|
|
32
|
+
from pathlib import Path
|
|
33
|
+
from typing import Any, Dict, List, Optional, Tuple
|
|
34
|
+
from datetime import datetime, timezone
|
|
35
|
+
|
|
36
|
+
# Maximum artifact file size (10 MB)
|
|
37
|
+
MAX_ARTIFACT_SIZE = 10 * 1024 * 1024
|
|
38
|
+
|
|
39
|
+
# Container and connection defaults
|
|
40
|
+
CONTAINER_NAME = "loki-mirofish"
|
|
41
|
+
DEFAULT_URL = "http://localhost:5001"
|
|
42
|
+
DEFAULT_PORT = 5001
|
|
43
|
+
DEFAULT_MAX_ROUNDS = 100
|
|
44
|
+
DEFAULT_TIMEOUT = 3600 # 1 hour total pipeline timeout
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
# ---------------------------------------------------------------------------
|
|
48
|
+
# Utility Functions
|
|
49
|
+
# ---------------------------------------------------------------------------
|
|
50
|
+
|
|
51
|
+
def _safe_read(path: Path) -> str:
|
|
52
|
+
"""Read a file with size limit and encoding safety."""
|
|
53
|
+
size = path.stat().st_size
|
|
54
|
+
if size > MAX_ARTIFACT_SIZE:
|
|
55
|
+
raise ValueError(
|
|
56
|
+
f"Artifact too large ({size} bytes, max {MAX_ARTIFACT_SIZE}): {path.name}"
|
|
57
|
+
)
|
|
58
|
+
return path.read_text(encoding="utf-8", errors="replace")
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def _write_atomic(path: Path, content: str) -> None:
|
|
62
|
+
"""Write content to file atomically using temp file + rename."""
|
|
63
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
|
64
|
+
fd, tmp_path = tempfile.mkstemp(dir=path.parent, suffix=".tmp")
|
|
65
|
+
try:
|
|
66
|
+
with os.fdopen(fd, "w", encoding="utf-8") as f:
|
|
67
|
+
f.write(content)
|
|
68
|
+
os.replace(tmp_path, str(path))
|
|
69
|
+
except Exception:
|
|
70
|
+
try:
|
|
71
|
+
os.unlink(tmp_path)
|
|
72
|
+
except OSError:
|
|
73
|
+
pass
|
|
74
|
+
raise
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def _write_json(path: Path, data: Any) -> None:
|
|
78
|
+
"""Write JSON data atomically to a file."""
|
|
79
|
+
_write_atomic(path, json.dumps(data, indent=2) + "\n")
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def _now_iso() -> str:
|
|
83
|
+
"""Return current UTC timestamp in ISO-8601 format."""
|
|
84
|
+
return datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
# ---------------------------------------------------------------------------
|
|
88
|
+
# MiroFish HTTP Client
|
|
89
|
+
# ---------------------------------------------------------------------------
|
|
90
|
+
|
|
91
|
+
class MiroFishClient:
|
|
92
|
+
"""HTTP client for the MiroFish swarm intelligence API.
|
|
93
|
+
|
|
94
|
+
Uses stdlib urllib only -- no third-party dependencies.
|
|
95
|
+
All methods return parsed JSON dicts. Raises RuntimeError on errors.
|
|
96
|
+
"""
|
|
97
|
+
|
|
98
|
+
def __init__(self, base_url: str = DEFAULT_URL, timeout: int = 30):
|
|
99
|
+
self.base_url = base_url.rstrip("/")
|
|
100
|
+
self.timeout = timeout
|
|
101
|
+
|
|
102
|
+
def _request(
|
|
103
|
+
self,
|
|
104
|
+
method: str,
|
|
105
|
+
path: str,
|
|
106
|
+
json_data: Optional[dict] = None,
|
|
107
|
+
form_data: Optional[bytes] = None,
|
|
108
|
+
content_type: Optional[str] = None,
|
|
109
|
+
) -> dict:
|
|
110
|
+
"""Make HTTP request. Returns parsed JSON response.
|
|
111
|
+
|
|
112
|
+
Raises RuntimeError on non-2xx status or connection error.
|
|
113
|
+
"""
|
|
114
|
+
url = f"{self.base_url}{path}"
|
|
115
|
+
headers: Dict[str, str] = {"Accept": "application/json"}
|
|
116
|
+
body: Optional[bytes] = None
|
|
117
|
+
|
|
118
|
+
if json_data is not None:
|
|
119
|
+
body = json.dumps(json_data).encode("utf-8")
|
|
120
|
+
headers["Content-Type"] = "application/json"
|
|
121
|
+
elif form_data is not None:
|
|
122
|
+
body = form_data
|
|
123
|
+
if content_type:
|
|
124
|
+
headers["Content-Type"] = content_type
|
|
125
|
+
|
|
126
|
+
req = urllib.request.Request(url, data=body, headers=headers, method=method)
|
|
127
|
+
|
|
128
|
+
try:
|
|
129
|
+
with urllib.request.urlopen(req, timeout=self.timeout) as resp:
|
|
130
|
+
resp_body = resp.read().decode("utf-8")
|
|
131
|
+
if not resp_body.strip():
|
|
132
|
+
return {}
|
|
133
|
+
return json.loads(resp_body)
|
|
134
|
+
except urllib.error.HTTPError as exc:
|
|
135
|
+
resp_text = ""
|
|
136
|
+
try:
|
|
137
|
+
resp_text = exc.read().decode("utf-8", errors="replace")
|
|
138
|
+
except Exception:
|
|
139
|
+
pass
|
|
140
|
+
raise RuntimeError(
|
|
141
|
+
f"MiroFish API error: {exc.code} {exc.reason} "
|
|
142
|
+
f"on {method} {path}: {resp_text}"
|
|
143
|
+
) from exc
|
|
144
|
+
except urllib.error.URLError as exc:
|
|
145
|
+
raise RuntimeError(
|
|
146
|
+
f"MiroFish connection error on {method} {path}: {exc.reason}"
|
|
147
|
+
) from exc
|
|
148
|
+
except Exception as exc:
|
|
149
|
+
raise RuntimeError(
|
|
150
|
+
f"MiroFish request failed on {method} {path}: {exc}"
|
|
151
|
+
) from exc
|
|
152
|
+
|
|
153
|
+
def health_check(self) -> bool:
|
|
154
|
+
"""GET /health - returns True if MiroFish is healthy."""
|
|
155
|
+
try:
|
|
156
|
+
resp = self._request("GET", "/health")
|
|
157
|
+
return resp.get("status") == "ok" or bool(resp)
|
|
158
|
+
except RuntimeError:
|
|
159
|
+
return False
|
|
160
|
+
|
|
161
|
+
# -- Stage 1: Graph Construction ------------------------------------------
|
|
162
|
+
|
|
163
|
+
def generate_ontology(
|
|
164
|
+
self,
|
|
165
|
+
prd_path: str,
|
|
166
|
+
simulation_requirement: str,
|
|
167
|
+
project_name: str = "",
|
|
168
|
+
) -> dict:
|
|
169
|
+
"""POST /api/graph/ontology/generate - multipart/form-data upload.
|
|
170
|
+
|
|
171
|
+
Uploads the PRD file and simulation_requirement text.
|
|
172
|
+
Returns: {project_id, ontology: {entity_types, edge_types, analysis_summary}}
|
|
173
|
+
"""
|
|
174
|
+
boundary = hashlib.md5(
|
|
175
|
+
f"{time.time()}-{prd_path}".encode()
|
|
176
|
+
).hexdigest()
|
|
177
|
+
|
|
178
|
+
parts: List[bytes] = []
|
|
179
|
+
|
|
180
|
+
# File field
|
|
181
|
+
filename = Path(prd_path).name
|
|
182
|
+
file_content = Path(prd_path).read_bytes()
|
|
183
|
+
parts.append(
|
|
184
|
+
f"--{boundary}\r\n"
|
|
185
|
+
f'Content-Disposition: form-data; name="prd_file"; filename="{filename}"\r\n'
|
|
186
|
+
f"Content-Type: application/octet-stream\r\n\r\n".encode("utf-8")
|
|
187
|
+
+ file_content
|
|
188
|
+
+ b"\r\n"
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
# Text fields
|
|
192
|
+
for field_name, field_value in [
|
|
193
|
+
("simulation_requirement", simulation_requirement),
|
|
194
|
+
("project_name", project_name),
|
|
195
|
+
]:
|
|
196
|
+
parts.append(
|
|
197
|
+
f"--{boundary}\r\n"
|
|
198
|
+
f'Content-Disposition: form-data; name="{field_name}"\r\n\r\n'
|
|
199
|
+
f"{field_value}\r\n".encode("utf-8")
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
parts.append(f"--{boundary}--\r\n".encode("utf-8"))
|
|
203
|
+
form_body = b"".join(parts)
|
|
204
|
+
ct = f"multipart/form-data; boundary={boundary}"
|
|
205
|
+
|
|
206
|
+
return self._request(
|
|
207
|
+
"POST",
|
|
208
|
+
"/api/graph/ontology/generate",
|
|
209
|
+
form_data=form_body,
|
|
210
|
+
content_type=ct,
|
|
211
|
+
)
|
|
212
|
+
|
|
213
|
+
def build_graph(self, project_id: str) -> dict:
|
|
214
|
+
"""POST /api/graph/build - start async graph building.
|
|
215
|
+
|
|
216
|
+
Returns: {project_id, graph_id, task_id, status}
|
|
217
|
+
"""
|
|
218
|
+
return self._request(
|
|
219
|
+
"POST", "/api/graph/build", json_data={"project_id": project_id}
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
def poll_graph_build(
|
|
223
|
+
self, task_id: str, max_wait: int = 600, interval: int = 5
|
|
224
|
+
) -> dict:
|
|
225
|
+
"""POST /api/graph/build/status - poll until completed or timeout.
|
|
226
|
+
|
|
227
|
+
Returns final status dict with graph_id.
|
|
228
|
+
"""
|
|
229
|
+
deadline = time.time() + max_wait
|
|
230
|
+
while time.time() < deadline:
|
|
231
|
+
resp = self._request(
|
|
232
|
+
"POST",
|
|
233
|
+
"/api/graph/build/status",
|
|
234
|
+
json_data={"task_id": task_id},
|
|
235
|
+
)
|
|
236
|
+
data = resp.get("data", resp)
|
|
237
|
+
status = data.get("status", "")
|
|
238
|
+
if status == "completed":
|
|
239
|
+
return data
|
|
240
|
+
if status in ("failed", "error"):
|
|
241
|
+
raise RuntimeError(
|
|
242
|
+
f"Graph build failed: {data.get('error', 'unknown')}"
|
|
243
|
+
)
|
|
244
|
+
time.sleep(interval)
|
|
245
|
+
raise RuntimeError(
|
|
246
|
+
f"Graph build timed out after {max_wait}s (task_id={task_id})"
|
|
247
|
+
)
|
|
248
|
+
|
|
249
|
+
# -- Stage 2: Simulation Setup --------------------------------------------
|
|
250
|
+
|
|
251
|
+
def create_simulation(
|
|
252
|
+
self,
|
|
253
|
+
project_id: str,
|
|
254
|
+
graph_id: str,
|
|
255
|
+
enable_twitter: bool = True,
|
|
256
|
+
enable_reddit: bool = True,
|
|
257
|
+
) -> dict:
|
|
258
|
+
"""POST /api/simulation/create
|
|
259
|
+
|
|
260
|
+
Returns: {simulation_id, status}
|
|
261
|
+
"""
|
|
262
|
+
return self._request(
|
|
263
|
+
"POST",
|
|
264
|
+
"/api/simulation/create",
|
|
265
|
+
json_data={
|
|
266
|
+
"project_id": project_id,
|
|
267
|
+
"graph_id": graph_id,
|
|
268
|
+
"enable_twitter": enable_twitter,
|
|
269
|
+
"enable_reddit": enable_reddit,
|
|
270
|
+
},
|
|
271
|
+
)
|
|
272
|
+
|
|
273
|
+
def prepare_simulation(self, simulation_id: str) -> dict:
|
|
274
|
+
"""POST /api/simulation/prepare - async profile generation.
|
|
275
|
+
|
|
276
|
+
Returns: {simulation_id, task_id, status, expected_entities_count}
|
|
277
|
+
"""
|
|
278
|
+
return self._request(
|
|
279
|
+
"POST",
|
|
280
|
+
"/api/simulation/prepare",
|
|
281
|
+
json_data={"simulation_id": simulation_id},
|
|
282
|
+
)
|
|
283
|
+
|
|
284
|
+
def poll_prepare(
|
|
285
|
+
self, task_id: str, max_wait: int = 900, interval: int = 10
|
|
286
|
+
) -> dict:
|
|
287
|
+
"""POST /api/simulation/prepare/status - poll until ready."""
|
|
288
|
+
deadline = time.time() + max_wait
|
|
289
|
+
while time.time() < deadline:
|
|
290
|
+
resp = self._request(
|
|
291
|
+
"POST",
|
|
292
|
+
"/api/simulation/prepare/status",
|
|
293
|
+
json_data={"task_id": task_id},
|
|
294
|
+
)
|
|
295
|
+
data = resp.get("data", resp)
|
|
296
|
+
status = data.get("status", "")
|
|
297
|
+
if status == "completed":
|
|
298
|
+
return data
|
|
299
|
+
if status in ("failed", "error"):
|
|
300
|
+
raise RuntimeError(
|
|
301
|
+
f"Simulation prepare failed: {data.get('error', 'unknown')}"
|
|
302
|
+
)
|
|
303
|
+
time.sleep(interval)
|
|
304
|
+
raise RuntimeError(
|
|
305
|
+
f"Simulation prepare timed out after {max_wait}s (task_id={task_id})"
|
|
306
|
+
)
|
|
307
|
+
|
|
308
|
+
# -- Stage 3: Simulation Execution ----------------------------------------
|
|
309
|
+
|
|
310
|
+
def start_simulation(
|
|
311
|
+
self,
|
|
312
|
+
simulation_id: str,
|
|
313
|
+
platform: str = "parallel",
|
|
314
|
+
max_rounds: int = 100,
|
|
315
|
+
) -> dict:
|
|
316
|
+
"""POST /api/simulation/start
|
|
317
|
+
|
|
318
|
+
Returns: {simulation_id, runner_status, process_pid}
|
|
319
|
+
"""
|
|
320
|
+
return self._request(
|
|
321
|
+
"POST",
|
|
322
|
+
"/api/simulation/start",
|
|
323
|
+
json_data={
|
|
324
|
+
"simulation_id": simulation_id,
|
|
325
|
+
"platform": platform,
|
|
326
|
+
"max_rounds": max_rounds,
|
|
327
|
+
},
|
|
328
|
+
)
|
|
329
|
+
|
|
330
|
+
def poll_run_status(
|
|
331
|
+
self, simulation_id: str, max_wait: int = 1800, interval: int = 15
|
|
332
|
+
) -> dict:
|
|
333
|
+
"""GET /api/simulation/{simulation_id}/run-status - poll until completed.
|
|
334
|
+
|
|
335
|
+
Returns: {runner_status, current_round, total_rounds, progress}
|
|
336
|
+
"""
|
|
337
|
+
deadline = time.time() + max_wait
|
|
338
|
+
while time.time() < deadline:
|
|
339
|
+
resp = self._request(
|
|
340
|
+
"GET", f"/api/simulation/{simulation_id}/run-status"
|
|
341
|
+
)
|
|
342
|
+
data = resp.get("data", resp)
|
|
343
|
+
status = data.get("runner_status", "")
|
|
344
|
+
if status == "completed":
|
|
345
|
+
return data
|
|
346
|
+
if status in ("failed", "error", "stopped"):
|
|
347
|
+
raise RuntimeError(
|
|
348
|
+
f"Simulation run failed: status={status}, "
|
|
349
|
+
f"detail={data.get('error', 'unknown')}"
|
|
350
|
+
)
|
|
351
|
+
time.sleep(interval)
|
|
352
|
+
raise RuntimeError(
|
|
353
|
+
f"Simulation run timed out after {max_wait}s "
|
|
354
|
+
f"(simulation_id={simulation_id})"
|
|
355
|
+
)
|
|
356
|
+
|
|
357
|
+
def stop_simulation(self, simulation_id: str) -> dict:
|
|
358
|
+
"""POST /api/simulation/stop"""
|
|
359
|
+
return self._request(
|
|
360
|
+
"POST",
|
|
361
|
+
"/api/simulation/stop",
|
|
362
|
+
json_data={"simulation_id": simulation_id},
|
|
363
|
+
)
|
|
364
|
+
|
|
365
|
+
# -- Stage 4: Report ------------------------------------------------------
|
|
366
|
+
|
|
367
|
+
def generate_report(self, simulation_id: str) -> dict:
|
|
368
|
+
"""POST /api/report/generate - async report generation.
|
|
369
|
+
|
|
370
|
+
Returns: {report_id, task_id, status}
|
|
371
|
+
"""
|
|
372
|
+
return self._request(
|
|
373
|
+
"POST",
|
|
374
|
+
"/api/report/generate",
|
|
375
|
+
json_data={"simulation_id": simulation_id},
|
|
376
|
+
)
|
|
377
|
+
|
|
378
|
+
def poll_report(
|
|
379
|
+
self, task_id: str, max_wait: int = 600, interval: int = 5
|
|
380
|
+
) -> dict:
|
|
381
|
+
"""POST /api/report/generate/status - poll until completed."""
|
|
382
|
+
deadline = time.time() + max_wait
|
|
383
|
+
while time.time() < deadline:
|
|
384
|
+
resp = self._request(
|
|
385
|
+
"POST",
|
|
386
|
+
"/api/report/generate/status",
|
|
387
|
+
json_data={"task_id": task_id},
|
|
388
|
+
)
|
|
389
|
+
data = resp.get("data", resp)
|
|
390
|
+
status = data.get("status", "")
|
|
391
|
+
if status == "completed":
|
|
392
|
+
return data
|
|
393
|
+
if status in ("failed", "error"):
|
|
394
|
+
raise RuntimeError(
|
|
395
|
+
f"Report generation failed: {data.get('error', 'unknown')}"
|
|
396
|
+
)
|
|
397
|
+
time.sleep(interval)
|
|
398
|
+
raise RuntimeError(
|
|
399
|
+
f"Report generation timed out after {max_wait}s (task_id={task_id})"
|
|
400
|
+
)
|
|
401
|
+
|
|
402
|
+
def get_report(self, report_id: str) -> dict:
|
|
403
|
+
"""GET /api/report/{report_id}
|
|
404
|
+
|
|
405
|
+
Returns: {report_id, simulation_id, status, sections: [{index, title, content}]}
|
|
406
|
+
"""
|
|
407
|
+
return self._request("GET", f"/api/report/{report_id}")
|
|
408
|
+
|
|
409
|
+
|
|
410
|
+
# ---------------------------------------------------------------------------
|
|
411
|
+
# PRD Translation
|
|
412
|
+
# ---------------------------------------------------------------------------
|
|
413
|
+
|
|
414
|
+
def extract_simulation_context(prd_path: Path) -> dict:
|
|
415
|
+
"""Parse PRD markdown to extract simulation seed material.
|
|
416
|
+
|
|
417
|
+
Looks for (case-insensitive):
|
|
418
|
+
- Project name: from first H1 heading, or filename
|
|
419
|
+
- Simulation requirement: from ## Problem Statement, ## Value Proposition,
|
|
420
|
+
## Overview, ## Summary sections
|
|
421
|
+
- Target audience: from ## Target Audience, ## Users, ## User Personas sections
|
|
422
|
+
|
|
423
|
+
Fallback: first 2000 chars of PRD as simulation_requirement.
|
|
424
|
+
|
|
425
|
+
Returns: {
|
|
426
|
+
"project_name": str,
|
|
427
|
+
"simulation_requirement": str,
|
|
428
|
+
"target_audience": str,
|
|
429
|
+
"prd_summary": str
|
|
430
|
+
}
|
|
431
|
+
"""
|
|
432
|
+
text = _safe_read(prd_path)
|
|
433
|
+
|
|
434
|
+
result: Dict[str, str] = {
|
|
435
|
+
"project_name": "",
|
|
436
|
+
"simulation_requirement": "",
|
|
437
|
+
"target_audience": "",
|
|
438
|
+
"prd_summary": "",
|
|
439
|
+
}
|
|
440
|
+
|
|
441
|
+
# Extract project name from first H1 heading
|
|
442
|
+
h1_match = re.search(r"^#\s+(.+)$", text, re.MULTILINE)
|
|
443
|
+
if h1_match:
|
|
444
|
+
result["project_name"] = h1_match.group(1).strip()
|
|
445
|
+
else:
|
|
446
|
+
# Fallback to filename without extension
|
|
447
|
+
result["project_name"] = prd_path.stem.replace("-", " ").replace("_", " ").title()
|
|
448
|
+
|
|
449
|
+
# Split into sections by ## headings
|
|
450
|
+
sections = _split_sections(text)
|
|
451
|
+
|
|
452
|
+
# Keywords for simulation requirement (priority order)
|
|
453
|
+
req_keywords = [
|
|
454
|
+
"problem statement",
|
|
455
|
+
"value proposition",
|
|
456
|
+
"overview",
|
|
457
|
+
"summary",
|
|
458
|
+
]
|
|
459
|
+
|
|
460
|
+
# Keywords for target audience
|
|
461
|
+
audience_keywords = [
|
|
462
|
+
"target audience",
|
|
463
|
+
"users",
|
|
464
|
+
"user personas",
|
|
465
|
+
]
|
|
466
|
+
|
|
467
|
+
for heading, body in sections.items():
|
|
468
|
+
heading_lower = heading.lower().strip()
|
|
469
|
+
# Check for simulation requirement sections
|
|
470
|
+
if not result["simulation_requirement"]:
|
|
471
|
+
for kw in req_keywords:
|
|
472
|
+
if kw in heading_lower:
|
|
473
|
+
result["simulation_requirement"] = body.strip()
|
|
474
|
+
break
|
|
475
|
+
# Check for target audience sections
|
|
476
|
+
if not result["target_audience"]:
|
|
477
|
+
for kw in audience_keywords:
|
|
478
|
+
if kw in heading_lower:
|
|
479
|
+
result["target_audience"] = body.strip()
|
|
480
|
+
break
|
|
481
|
+
|
|
482
|
+
# Fallback: use first 2000 chars of PRD as simulation_requirement
|
|
483
|
+
if not result["simulation_requirement"]:
|
|
484
|
+
result["simulation_requirement"] = text[:2000].strip()
|
|
485
|
+
|
|
486
|
+
# PRD summary: first 200 chars for logging
|
|
487
|
+
result["prd_summary"] = text[:200].strip()
|
|
488
|
+
|
|
489
|
+
return result
|
|
490
|
+
|
|
491
|
+
|
|
492
|
+
def _split_sections(text: str, level: int = 2) -> Dict[str, str]:
|
|
493
|
+
"""Split markdown text into sections by heading level.
|
|
494
|
+
|
|
495
|
+
Returns {heading_text: body_text} preserving order.
|
|
496
|
+
"""
|
|
497
|
+
prefix = "#" * level
|
|
498
|
+
pattern = re.compile(rf"^{prefix}\s+(.+)$", re.MULTILINE)
|
|
499
|
+
matches = list(pattern.finditer(text))
|
|
500
|
+
sections: Dict[str, str] = {}
|
|
501
|
+
for i, m in enumerate(matches):
|
|
502
|
+
heading = m.group(1).strip()
|
|
503
|
+
start = m.end()
|
|
504
|
+
end = matches[i + 1].start() if i + 1 < len(matches) else len(text)
|
|
505
|
+
sections[heading] = text[start:end].strip()
|
|
506
|
+
return sections
|
|
507
|
+
|
|
508
|
+
|
|
509
|
+
# ---------------------------------------------------------------------------
|
|
510
|
+
# Pipeline State Management
|
|
511
|
+
# ---------------------------------------------------------------------------
|
|
512
|
+
|
|
513
|
+
def _update_pipeline_state(output_dir: Path, state: dict) -> None:
|
|
514
|
+
"""Write pipeline-state.json atomically."""
|
|
515
|
+
state["updated_at"] = _now_iso()
|
|
516
|
+
state_path = output_dir / "mirofish" / "pipeline-state.json"
|
|
517
|
+
_write_json(state_path, state)
|
|
518
|
+
|
|
519
|
+
|
|
520
|
+
def _load_pipeline_state(output_dir: Path) -> Optional[dict]:
|
|
521
|
+
"""Load existing pipeline state, or None."""
|
|
522
|
+
state_path = output_dir / "mirofish" / "pipeline-state.json"
|
|
523
|
+
if not state_path.exists():
|
|
524
|
+
return None
|
|
525
|
+
try:
|
|
526
|
+
text = _safe_read(state_path)
|
|
527
|
+
return json.loads(text)
|
|
528
|
+
except (json.JSONDecodeError, ValueError, OSError):
|
|
529
|
+
return None
|
|
530
|
+
|
|
531
|
+
|
|
532
|
+
# ---------------------------------------------------------------------------
|
|
533
|
+
# Report Normalization
|
|
534
|
+
# ---------------------------------------------------------------------------
|
|
535
|
+
|
|
536
|
+
# Sentiment keyword sets for simple heuristic analysis
|
|
537
|
+
_POSITIVE_KEYWORDS = [
|
|
538
|
+
"positive", "strong", "adoption", "interest", "favorable",
|
|
539
|
+
"loved", "benefit", "opportunity", "success", "growth",
|
|
540
|
+
"promising", "enthusiastic", "excited", "valuable",
|
|
541
|
+
]
|
|
542
|
+
_NEGATIVE_KEYWORDS = [
|
|
543
|
+
"concern", "risk", "negative", "reluctance", "resistance",
|
|
544
|
+
"problem", "issue", "challenge", "difficult", "unfavorable",
|
|
545
|
+
"reject", "opposed", "cautious", "wary", "worried",
|
|
546
|
+
]
|
|
547
|
+
|
|
548
|
+
|
|
549
|
+
def normalize_report(
|
|
550
|
+
report_data: dict, simulation_data: Optional[dict] = None
|
|
551
|
+
) -> dict:
|
|
552
|
+
"""Convert MiroFish report into Loki Mode advisory format.
|
|
553
|
+
|
|
554
|
+
Analyzes report sections to extract:
|
|
555
|
+
- overall_sentiment: positive/negative/mixed
|
|
556
|
+
- confidence: low/medium/high
|
|
557
|
+
- sentiment_score: 0.0-1.0 (keyword-based heuristic)
|
|
558
|
+
- key_concerns: list of risk/concern strings
|
|
559
|
+
- feature_rankings: list of {feature, reception_score}
|
|
560
|
+
- notable_quotes: interesting agent reactions (max 5)
|
|
561
|
+
- recommendation: proceed/review_concerns/reconsider
|
|
562
|
+
|
|
563
|
+
The sentiment analysis uses simple keyword matching since we cannot
|
|
564
|
+
import NLP libraries.
|
|
565
|
+
|
|
566
|
+
Returns the full mirofish-context.json structure.
|
|
567
|
+
"""
|
|
568
|
+
data = report_data.get("data", report_data)
|
|
569
|
+
sections = data.get("sections", [])
|
|
570
|
+
|
|
571
|
+
# Combine all section content for keyword analysis
|
|
572
|
+
all_content = " ".join(s.get("content", "") for s in sections).lower()
|
|
573
|
+
|
|
574
|
+
# --- Sentiment score (keyword heuristic) ---
|
|
575
|
+
pos_count = sum(1 for kw in _POSITIVE_KEYWORDS if kw in all_content)
|
|
576
|
+
neg_count = sum(1 for kw in _NEGATIVE_KEYWORDS if kw in all_content)
|
|
577
|
+
total_kw = pos_count + neg_count
|
|
578
|
+
if total_kw > 0:
|
|
579
|
+
sentiment_score = round(pos_count / total_kw, 2)
|
|
580
|
+
else:
|
|
581
|
+
sentiment_score = 0.5 # neutral default
|
|
582
|
+
|
|
583
|
+
# Overall sentiment classification
|
|
584
|
+
if sentiment_score >= 0.65:
|
|
585
|
+
overall_sentiment = "positive"
|
|
586
|
+
elif sentiment_score <= 0.35:
|
|
587
|
+
overall_sentiment = "negative"
|
|
588
|
+
else:
|
|
589
|
+
overall_sentiment = "mixed"
|
|
590
|
+
|
|
591
|
+
# --- Confidence level ---
|
|
592
|
+
# Based on available data richness
|
|
593
|
+
section_count = len(sections)
|
|
594
|
+
entity_count = 0
|
|
595
|
+
round_count = 0
|
|
596
|
+
if simulation_data:
|
|
597
|
+
sim_inner = simulation_data.get("data", simulation_data)
|
|
598
|
+
entity_count = sim_inner.get("expected_entities_count", 0)
|
|
599
|
+
round_count = sim_inner.get("total_rounds", 0)
|
|
600
|
+
|
|
601
|
+
if section_count >= 5 and (entity_count >= 50 or round_count >= 80):
|
|
602
|
+
confidence = "high"
|
|
603
|
+
elif section_count >= 3:
|
|
604
|
+
confidence = "medium"
|
|
605
|
+
else:
|
|
606
|
+
confidence = "low"
|
|
607
|
+
|
|
608
|
+
# --- Key concerns ---
|
|
609
|
+
key_concerns: List[str] = []
|
|
610
|
+
for section in sections:
|
|
611
|
+
title = section.get("title", "").lower()
|
|
612
|
+
content = section.get("content", "")
|
|
613
|
+
if "concern" in title or "risk" in title:
|
|
614
|
+
# Extract numbered items
|
|
615
|
+
for m in re.finditer(
|
|
616
|
+
r"\d+\.\s+([^.]+?)(?::\s*(.+?))?(?=\d+\.\s|\Z)",
|
|
617
|
+
content,
|
|
618
|
+
re.DOTALL,
|
|
619
|
+
):
|
|
620
|
+
concern = m.group(1).strip()
|
|
621
|
+
if concern:
|
|
622
|
+
key_concerns.append(concern)
|
|
623
|
+
|
|
624
|
+
# --- Feature rankings ---
|
|
625
|
+
feature_rankings: List[Dict[str, Any]] = []
|
|
626
|
+
for section in sections:
|
|
627
|
+
title = section.get("title", "").lower()
|
|
628
|
+
content = section.get("content", "")
|
|
629
|
+
if "feature" in title or "ranking" in title or "reception" in title:
|
|
630
|
+
# Extract lines like: N. Feature name (reception: 0.XX)
|
|
631
|
+
for m in re.finditer(
|
|
632
|
+
r"\d+\.\s+(.+?)\s*\(reception:\s*([\d.]+)\)",
|
|
633
|
+
content,
|
|
634
|
+
):
|
|
635
|
+
feature_rankings.append({
|
|
636
|
+
"feature": m.group(1).strip(),
|
|
637
|
+
"reception_score": float(m.group(2)),
|
|
638
|
+
})
|
|
639
|
+
|
|
640
|
+
# --- Notable quotes ---
|
|
641
|
+
notable_quotes: List[str] = []
|
|
642
|
+
for section in sections:
|
|
643
|
+
title = section.get("title", "").lower()
|
|
644
|
+
content = section.get("content", "")
|
|
645
|
+
if "notable" in title or "reaction" in title or "quote" in title:
|
|
646
|
+
for m in re.finditer(r"'([^']{10,})'", content):
|
|
647
|
+
notable_quotes.append(m.group(1).strip())
|
|
648
|
+
if len(notable_quotes) >= 5:
|
|
649
|
+
break
|
|
650
|
+
if len(notable_quotes) >= 5:
|
|
651
|
+
break
|
|
652
|
+
|
|
653
|
+
# --- Recommendation ---
|
|
654
|
+
if sentiment_score >= 0.65 and len(key_concerns) <= 2:
|
|
655
|
+
recommendation = "proceed"
|
|
656
|
+
elif sentiment_score <= 0.35:
|
|
657
|
+
recommendation = "reconsider"
|
|
658
|
+
else:
|
|
659
|
+
recommendation = "review_concerns"
|
|
660
|
+
|
|
661
|
+
return {
|
|
662
|
+
"source": "mirofish",
|
|
663
|
+
"version": "1.0",
|
|
664
|
+
"generated_at": _now_iso(),
|
|
665
|
+
"report_id": data.get("report_id", ""),
|
|
666
|
+
"simulation_id": data.get("simulation_id", ""),
|
|
667
|
+
"analysis": {
|
|
668
|
+
"overall_sentiment": overall_sentiment,
|
|
669
|
+
"sentiment_score": sentiment_score,
|
|
670
|
+
"confidence": confidence,
|
|
671
|
+
"recommendation": recommendation,
|
|
672
|
+
"key_concerns": key_concerns,
|
|
673
|
+
"feature_rankings": feature_rankings,
|
|
674
|
+
"notable_quotes": notable_quotes,
|
|
675
|
+
},
|
|
676
|
+
"section_count": section_count,
|
|
677
|
+
"raw_sections": [
|
|
678
|
+
{"title": s.get("title", ""), "index": s.get("index", i)}
|
|
679
|
+
for i, s in enumerate(sections)
|
|
680
|
+
],
|
|
681
|
+
}
|
|
682
|
+
|
|
683
|
+
|
|
684
|
+
def build_mirofish_tasks(report_data: dict) -> list:
|
|
685
|
+
"""Extract actionable items from report sections.
|
|
686
|
+
|
|
687
|
+
Looks for:
|
|
688
|
+
- Sections with "risk" or "concern" in title -> high priority tasks
|
|
689
|
+
- Sections with "recommendation" in title -> medium priority tasks
|
|
690
|
+
- Bullet points starting with "Should", "Must", "Consider" -> tasks
|
|
691
|
+
|
|
692
|
+
Returns list of {id, title, description, priority, source, category}.
|
|
693
|
+
"""
|
|
694
|
+
data = report_data.get("data", report_data)
|
|
695
|
+
sections = data.get("sections", [])
|
|
696
|
+
tasks: List[Dict[str, Any]] = []
|
|
697
|
+
task_counter = 0
|
|
698
|
+
|
|
699
|
+
for section in sections:
|
|
700
|
+
title = section.get("title", "").lower()
|
|
701
|
+
content = section.get("content", "")
|
|
702
|
+
|
|
703
|
+
# Determine priority and category based on section title
|
|
704
|
+
if "risk" in title or "concern" in title:
|
|
705
|
+
priority = "high"
|
|
706
|
+
category = "risk_mitigation"
|
|
707
|
+
elif "recommendation" in title:
|
|
708
|
+
priority = "medium"
|
|
709
|
+
category = "recommendation"
|
|
710
|
+
else:
|
|
711
|
+
# Scan content for actionable keywords even in other sections
|
|
712
|
+
priority = "low"
|
|
713
|
+
category = "insight"
|
|
714
|
+
|
|
715
|
+
# Extract sentences starting with action keywords
|
|
716
|
+
for m in re.finditer(
|
|
717
|
+
r"(?:^|(?<=\.\s)|(?<=\d\.\s))"
|
|
718
|
+
r"((?:Should|Must|Consider|Recommendation:)\s+[^.]+\.?)",
|
|
719
|
+
content,
|
|
720
|
+
re.MULTILINE,
|
|
721
|
+
):
|
|
722
|
+
task_counter += 1
|
|
723
|
+
task_text = m.group(1).strip().rstrip(".")
|
|
724
|
+
tasks.append({
|
|
725
|
+
"id": f"mirofish-{task_counter:03d}",
|
|
726
|
+
"title": task_text[:120],
|
|
727
|
+
"description": task_text,
|
|
728
|
+
"priority": priority,
|
|
729
|
+
"source": "mirofish",
|
|
730
|
+
"category": category,
|
|
731
|
+
})
|
|
732
|
+
|
|
733
|
+
return tasks
|
|
734
|
+
|
|
735
|
+
|
|
736
|
+
# ---------------------------------------------------------------------------
|
|
737
|
+
# Pipeline Summary Generation
|
|
738
|
+
# ---------------------------------------------------------------------------
|
|
739
|
+
|
|
740
|
+
def _build_summary_markdown(context: dict, tasks: list) -> str:
|
|
741
|
+
"""Build a human-readable summary markdown from normalized data."""
|
|
742
|
+
lines: List[str] = []
|
|
743
|
+
analysis = context.get("analysis", {})
|
|
744
|
+
|
|
745
|
+
lines.append("# MiroFish Market Validation Summary")
|
|
746
|
+
lines.append("")
|
|
747
|
+
lines.append(f"Generated: {context.get('generated_at', 'unknown')}")
|
|
748
|
+
lines.append(f"Report ID: {context.get('report_id', 'unknown')}")
|
|
749
|
+
lines.append(f"Simulation ID: {context.get('simulation_id', 'unknown')}")
|
|
750
|
+
lines.append("")
|
|
751
|
+
|
|
752
|
+
lines.append("## Overall Assessment")
|
|
753
|
+
lines.append("")
|
|
754
|
+
lines.append(f"- Sentiment: {analysis.get('overall_sentiment', 'unknown')}")
|
|
755
|
+
lines.append(f"- Sentiment Score: {analysis.get('sentiment_score', 'N/A')}")
|
|
756
|
+
lines.append(f"- Confidence: {analysis.get('confidence', 'unknown')}")
|
|
757
|
+
lines.append(f"- Recommendation: {analysis.get('recommendation', 'unknown')}")
|
|
758
|
+
lines.append("")
|
|
759
|
+
|
|
760
|
+
concerns = analysis.get("key_concerns", [])
|
|
761
|
+
if concerns:
|
|
762
|
+
lines.append("## Key Concerns")
|
|
763
|
+
lines.append("")
|
|
764
|
+
for concern in concerns:
|
|
765
|
+
lines.append(f"- {concern}")
|
|
766
|
+
lines.append("")
|
|
767
|
+
|
|
768
|
+
rankings = analysis.get("feature_rankings", [])
|
|
769
|
+
if rankings:
|
|
770
|
+
lines.append("## Feature Rankings")
|
|
771
|
+
lines.append("")
|
|
772
|
+
for rank in rankings:
|
|
773
|
+
lines.append(
|
|
774
|
+
f"- {rank.get('feature', 'unknown')}: "
|
|
775
|
+
f"{rank.get('reception_score', 'N/A')}"
|
|
776
|
+
)
|
|
777
|
+
lines.append("")
|
|
778
|
+
|
|
779
|
+
quotes = analysis.get("notable_quotes", [])
|
|
780
|
+
if quotes:
|
|
781
|
+
lines.append("## Notable Agent Reactions")
|
|
782
|
+
lines.append("")
|
|
783
|
+
for quote in quotes:
|
|
784
|
+
lines.append(f"> {quote}")
|
|
785
|
+
lines.append("")
|
|
786
|
+
|
|
787
|
+
if tasks:
|
|
788
|
+
lines.append("## Action Items")
|
|
789
|
+
lines.append("")
|
|
790
|
+
for task in tasks:
|
|
791
|
+
prio = task.get("priority", "medium")
|
|
792
|
+
lines.append(f"- [{prio.upper()}] {task.get('title', '')}")
|
|
793
|
+
lines.append("")
|
|
794
|
+
|
|
795
|
+
return "\n".join(lines)
|
|
796
|
+
|
|
797
|
+
|
|
798
|
+
# ---------------------------------------------------------------------------
|
|
799
|
+
# Pipeline Orchestration
|
|
800
|
+
# ---------------------------------------------------------------------------
|
|
801
|
+
|
|
802
|
+
def run_pipeline(
|
|
803
|
+
client: MiroFishClient,
|
|
804
|
+
prd_path: Path,
|
|
805
|
+
output_dir: Path,
|
|
806
|
+
max_rounds: int = DEFAULT_MAX_ROUNDS,
|
|
807
|
+
) -> int:
|
|
808
|
+
"""Execute the 4-stage MiroFish pipeline.
|
|
809
|
+
|
|
810
|
+
Stage 1: Generate ontology from PRD (upload file + requirement)
|
|
811
|
+
Stage 2: Build knowledge graph (async, poll)
|
|
812
|
+
Stage 3: Create + prepare + run simulation (async, poll)
|
|
813
|
+
Stage 4: Generate + retrieve report (async, poll)
|
|
814
|
+
|
|
815
|
+
After each stage:
|
|
816
|
+
- Write stage output to output_dir/mirofish/
|
|
817
|
+
- Update pipeline-state.json
|
|
818
|
+
|
|
819
|
+
On completion:
|
|
820
|
+
- Write mirofish-context.json (normalized advisory)
|
|
821
|
+
- Write mirofish-tasks.json (queue-ready tasks)
|
|
822
|
+
- Write mirofish-summary.md (human-readable)
|
|
823
|
+
|
|
824
|
+
Returns 0 on success, 1 on failure.
|
|
825
|
+
"""
|
|
826
|
+
mf_dir = output_dir / "mirofish"
|
|
827
|
+
mf_dir.mkdir(parents=True, exist_ok=True)
|
|
828
|
+
|
|
829
|
+
# Compute PRD hash for caching / dedup
|
|
830
|
+
prd_content = _safe_read(prd_path)
|
|
831
|
+
prd_hash = hashlib.sha256(prd_content.encode("utf-8")).hexdigest()[:16]
|
|
832
|
+
|
|
833
|
+
# Initialize pipeline state
|
|
834
|
+
state: Dict[str, Any] = {
|
|
835
|
+
"version": "1.0",
|
|
836
|
+
"prd_path": str(prd_path.resolve()),
|
|
837
|
+
"prd_hash": prd_hash,
|
|
838
|
+
"base_url": client.base_url,
|
|
839
|
+
"started_at": _now_iso(),
|
|
840
|
+
"updated_at": _now_iso(),
|
|
841
|
+
"status": "running",
|
|
842
|
+
"current_stage": 1,
|
|
843
|
+
"pid": os.getpid(),
|
|
844
|
+
"stages": {
|
|
845
|
+
"1_ontology": {"status": "pending"},
|
|
846
|
+
"2_graph": {"status": "pending"},
|
|
847
|
+
"3_simulation": {"status": "pending"},
|
|
848
|
+
"4_report": {"status": "pending"},
|
|
849
|
+
},
|
|
850
|
+
"error": None,
|
|
851
|
+
}
|
|
852
|
+
_update_pipeline_state(output_dir, state)
|
|
853
|
+
|
|
854
|
+
try:
|
|
855
|
+
# Extract simulation context from PRD
|
|
856
|
+
context = extract_simulation_context(prd_path)
|
|
857
|
+
print(
|
|
858
|
+
f"MiroFish: project={context['project_name']!r} "
|
|
859
|
+
f"prd_hash={prd_hash}"
|
|
860
|
+
)
|
|
861
|
+
|
|
862
|
+
# -- Stage 1: Ontology ------------------------------------------------
|
|
863
|
+
print("MiroFish Stage 1/4: Generating ontology...")
|
|
864
|
+
onto_resp = client.generate_ontology(
|
|
865
|
+
prd_path=str(prd_path),
|
|
866
|
+
simulation_requirement=context["simulation_requirement"],
|
|
867
|
+
project_name=context["project_name"],
|
|
868
|
+
)
|
|
869
|
+
onto_data = onto_resp.get("data", onto_resp)
|
|
870
|
+
project_id = onto_data["project_id"]
|
|
871
|
+
|
|
872
|
+
state["stages"]["1_ontology"] = {
|
|
873
|
+
"status": "completed",
|
|
874
|
+
"project_id": project_id,
|
|
875
|
+
"completed_at": _now_iso(),
|
|
876
|
+
}
|
|
877
|
+
state["current_stage"] = 2
|
|
878
|
+
_update_pipeline_state(output_dir, state)
|
|
879
|
+
_write_json(mf_dir / "ontology.json", onto_resp)
|
|
880
|
+
print(f"MiroFish Stage 1/4: Ontology complete (project={project_id})")
|
|
881
|
+
|
|
882
|
+
# -- Stage 2: Graph Build ---------------------------------------------
|
|
883
|
+
print("MiroFish Stage 2/4: Building knowledge graph...")
|
|
884
|
+
build_resp = client.build_graph(project_id)
|
|
885
|
+
build_data = build_resp.get("data", build_resp)
|
|
886
|
+
task_id = build_data["task_id"]
|
|
887
|
+
|
|
888
|
+
state["stages"]["2_graph"]["status"] = "running"
|
|
889
|
+
state["stages"]["2_graph"]["task_id"] = task_id
|
|
890
|
+
_update_pipeline_state(output_dir, state)
|
|
891
|
+
|
|
892
|
+
graph_result = client.poll_graph_build(task_id)
|
|
893
|
+
graph_id = graph_result.get("graph_id", "")
|
|
894
|
+
|
|
895
|
+
state["stages"]["2_graph"] = {
|
|
896
|
+
"status": "completed",
|
|
897
|
+
"graph_id": graph_id,
|
|
898
|
+
"completed_at": _now_iso(),
|
|
899
|
+
}
|
|
900
|
+
state["current_stage"] = 3
|
|
901
|
+
_update_pipeline_state(output_dir, state)
|
|
902
|
+
_write_json(mf_dir / "graph.json", graph_result)
|
|
903
|
+
print(f"MiroFish Stage 2/4: Graph complete (graph_id={graph_id})")
|
|
904
|
+
|
|
905
|
+
# -- Stage 3: Simulation ----------------------------------------------
|
|
906
|
+
print("MiroFish Stage 3/4: Running simulation...")
|
|
907
|
+
|
|
908
|
+
# Create simulation
|
|
909
|
+
sim_resp = client.create_simulation(project_id, graph_id)
|
|
910
|
+
sim_data = sim_resp.get("data", sim_resp)
|
|
911
|
+
simulation_id = sim_data["simulation_id"]
|
|
912
|
+
|
|
913
|
+
state["stages"]["3_simulation"]["status"] = "running"
|
|
914
|
+
state["stages"]["3_simulation"]["simulation_id"] = simulation_id
|
|
915
|
+
_update_pipeline_state(output_dir, state)
|
|
916
|
+
|
|
917
|
+
# Prepare simulation (async profile generation)
|
|
918
|
+
prep_resp = client.prepare_simulation(simulation_id)
|
|
919
|
+
prep_data = prep_resp.get("data", prep_resp)
|
|
920
|
+
prep_task_id = prep_data["task_id"]
|
|
921
|
+
|
|
922
|
+
prep_result = client.poll_prepare(prep_task_id)
|
|
923
|
+
|
|
924
|
+
# Start simulation run
|
|
925
|
+
start_resp = client.start_simulation(
|
|
926
|
+
simulation_id, max_rounds=max_rounds
|
|
927
|
+
)
|
|
928
|
+
_write_json(mf_dir / "simulation-start.json", start_resp)
|
|
929
|
+
|
|
930
|
+
# Poll until simulation completes
|
|
931
|
+
sim_result = client.poll_run_status(simulation_id)
|
|
932
|
+
|
|
933
|
+
state["stages"]["3_simulation"] = {
|
|
934
|
+
"status": "completed",
|
|
935
|
+
"simulation_id": simulation_id,
|
|
936
|
+
"completed_at": _now_iso(),
|
|
937
|
+
}
|
|
938
|
+
state["current_stage"] = 4
|
|
939
|
+
_update_pipeline_state(output_dir, state)
|
|
940
|
+
_write_json(mf_dir / "simulation-result.json", sim_result)
|
|
941
|
+
print(
|
|
942
|
+
f"MiroFish Stage 3/4: Simulation complete "
|
|
943
|
+
f"(simulation_id={simulation_id})"
|
|
944
|
+
)
|
|
945
|
+
|
|
946
|
+
# -- Stage 4: Report --------------------------------------------------
|
|
947
|
+
print("MiroFish Stage 4/4: Generating report...")
|
|
948
|
+
report_resp = client.generate_report(simulation_id)
|
|
949
|
+
report_data = report_resp.get("data", report_resp)
|
|
950
|
+
report_task_id = report_data["task_id"]
|
|
951
|
+
|
|
952
|
+
state["stages"]["4_report"]["status"] = "running"
|
|
953
|
+
state["stages"]["4_report"]["task_id"] = report_task_id
|
|
954
|
+
_update_pipeline_state(output_dir, state)
|
|
955
|
+
|
|
956
|
+
report_result = client.poll_report(report_task_id)
|
|
957
|
+
report_id = report_result.get("report_id", "")
|
|
958
|
+
|
|
959
|
+
# Fetch full report
|
|
960
|
+
full_report = client.get_report(report_id)
|
|
961
|
+
_write_json(mf_dir / "report.json", full_report)
|
|
962
|
+
|
|
963
|
+
state["stages"]["4_report"] = {
|
|
964
|
+
"status": "completed",
|
|
965
|
+
"report_id": report_id,
|
|
966
|
+
"completed_at": _now_iso(),
|
|
967
|
+
}
|
|
968
|
+
_update_pipeline_state(output_dir, state)
|
|
969
|
+
print(f"MiroFish Stage 4/4: Report complete (report_id={report_id})")
|
|
970
|
+
|
|
971
|
+
# -- Normalize and write final outputs --------------------------------
|
|
972
|
+
normalized = normalize_report(full_report, simulation_data=prep_result)
|
|
973
|
+
tasks = build_mirofish_tasks(full_report)
|
|
974
|
+
summary_md = _build_summary_markdown(normalized, tasks)
|
|
975
|
+
|
|
976
|
+
_write_json(output_dir / "mirofish-context.json", normalized)
|
|
977
|
+
_write_json(output_dir / "mirofish-tasks.json", tasks)
|
|
978
|
+
_write_atomic(output_dir / "mirofish-summary.md", summary_md)
|
|
979
|
+
|
|
980
|
+
# Mark pipeline complete
|
|
981
|
+
state["status"] = "completed"
|
|
982
|
+
state["completed_at"] = _now_iso()
|
|
983
|
+
_update_pipeline_state(output_dir, state)
|
|
984
|
+
|
|
985
|
+
print(
|
|
986
|
+
f"MiroFish: Pipeline complete. "
|
|
987
|
+
f"Sentiment={normalized['analysis']['overall_sentiment']} "
|
|
988
|
+
f"Recommendation={normalized['analysis']['recommendation']} "
|
|
989
|
+
f"Tasks={len(tasks)}"
|
|
990
|
+
)
|
|
991
|
+
return 0
|
|
992
|
+
|
|
993
|
+
except RuntimeError as exc:
|
|
994
|
+
state["status"] = "failed"
|
|
995
|
+
state["error"] = str(exc)
|
|
996
|
+
_update_pipeline_state(output_dir, state)
|
|
997
|
+
print(f"ERROR: MiroFish pipeline failed: {exc}", file=sys.stderr)
|
|
998
|
+
return 1
|
|
999
|
+
except KeyError as exc:
|
|
1000
|
+
state["status"] = "failed"
|
|
1001
|
+
state["error"] = f"Missing key in API response: {exc}"
|
|
1002
|
+
_update_pipeline_state(output_dir, state)
|
|
1003
|
+
print(
|
|
1004
|
+
f"ERROR: MiroFish pipeline failed (missing key): {exc}",
|
|
1005
|
+
file=sys.stderr,
|
|
1006
|
+
)
|
|
1007
|
+
return 1
|
|
1008
|
+
except Exception as exc:
|
|
1009
|
+
state["status"] = "failed"
|
|
1010
|
+
state["error"] = str(exc)
|
|
1011
|
+
_update_pipeline_state(output_dir, state)
|
|
1012
|
+
print(
|
|
1013
|
+
f"ERROR: MiroFish pipeline failed unexpectedly: {exc}",
|
|
1014
|
+
file=sys.stderr,
|
|
1015
|
+
)
|
|
1016
|
+
return 1
|
|
1017
|
+
|
|
1018
|
+
|
|
1019
|
+
def run_background(
|
|
1020
|
+
prd_path: str,
|
|
1021
|
+
output_dir: str,
|
|
1022
|
+
base_url: str,
|
|
1023
|
+
max_rounds: int = DEFAULT_MAX_ROUNDS,
|
|
1024
|
+
) -> int:
|
|
1025
|
+
"""Fork child process to run pipeline. Parent returns 0 immediately.
|
|
1026
|
+
|
|
1027
|
+
Uses os.fork(). Child process:
|
|
1028
|
+
1. Detaches from terminal (setsid)
|
|
1029
|
+
2. Redirects stdout/stderr to output_dir/mirofish/pipeline.log
|
|
1030
|
+
3. Writes PID to pipeline-state.json
|
|
1031
|
+
4. Runs run_pipeline()
|
|
1032
|
+
5. Updates state to completed/failed
|
|
1033
|
+
|
|
1034
|
+
Parent returns 0 immediately.
|
|
1035
|
+
"""
|
|
1036
|
+
# Ensure output directory exists before fork
|
|
1037
|
+
mf_dir = Path(output_dir) / "mirofish"
|
|
1038
|
+
mf_dir.mkdir(parents=True, exist_ok=True)
|
|
1039
|
+
|
|
1040
|
+
pid = os.fork()
|
|
1041
|
+
if pid > 0:
|
|
1042
|
+
# Parent process
|
|
1043
|
+
print(f"MiroFish: Background pipeline started (pid={pid})")
|
|
1044
|
+
return 0
|
|
1045
|
+
|
|
1046
|
+
# Child process
|
|
1047
|
+
try:
|
|
1048
|
+
os.setsid()
|
|
1049
|
+
|
|
1050
|
+
# Redirect stdout/stderr to log file
|
|
1051
|
+
log_path = mf_dir / "pipeline.log"
|
|
1052
|
+
log_fd = os.open(
|
|
1053
|
+
str(log_path),
|
|
1054
|
+
os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
|
|
1055
|
+
0o644,
|
|
1056
|
+
)
|
|
1057
|
+
os.dup2(log_fd, 1) # stdout
|
|
1058
|
+
os.dup2(log_fd, 2) # stderr
|
|
1059
|
+
os.close(log_fd)
|
|
1060
|
+
|
|
1061
|
+
# Close stdin
|
|
1062
|
+
devnull = os.open(os.devnull, os.O_RDONLY)
|
|
1063
|
+
os.dup2(devnull, 0)
|
|
1064
|
+
os.close(devnull)
|
|
1065
|
+
|
|
1066
|
+
# Set up signal handler so graceful termination updates state
|
|
1067
|
+
def _handle_term(signum: int, frame: Any) -> None:
|
|
1068
|
+
state = _load_pipeline_state(Path(output_dir))
|
|
1069
|
+
if state:
|
|
1070
|
+
state["status"] = "cancelled"
|
|
1071
|
+
state["updated_at"] = _now_iso()
|
|
1072
|
+
state["error"] = f"Cancelled by signal {signum}"
|
|
1073
|
+
_update_pipeline_state(Path(output_dir), state)
|
|
1074
|
+
os._exit(130)
|
|
1075
|
+
|
|
1076
|
+
signal.signal(signal.SIGTERM, _handle_term)
|
|
1077
|
+
signal.signal(signal.SIGINT, _handle_term)
|
|
1078
|
+
|
|
1079
|
+
client = MiroFishClient(base_url=base_url)
|
|
1080
|
+
exit_code = run_pipeline(
|
|
1081
|
+
client=client,
|
|
1082
|
+
prd_path=Path(prd_path),
|
|
1083
|
+
output_dir=Path(output_dir),
|
|
1084
|
+
max_rounds=max_rounds,
|
|
1085
|
+
)
|
|
1086
|
+
os._exit(exit_code) # noqa: use _exit in forked child
|
|
1087
|
+
except Exception:
|
|
1088
|
+
os._exit(1)
|
|
1089
|
+
|
|
1090
|
+
|
|
1091
|
+
# ---------------------------------------------------------------------------
|
|
1092
|
+
# Docker Management
|
|
1093
|
+
# ---------------------------------------------------------------------------
|
|
1094
|
+
|
|
1095
|
+
def _run_docker(
|
|
1096
|
+
args: List[str], check: bool = False, capture: bool = True
|
|
1097
|
+
) -> subprocess.CompletedProcess:
|
|
1098
|
+
"""Run docker command, return CompletedProcess."""
|
|
1099
|
+
cmd = ["docker"] + args
|
|
1100
|
+
return subprocess.run(
|
|
1101
|
+
cmd,
|
|
1102
|
+
capture_output=capture,
|
|
1103
|
+
text=True,
|
|
1104
|
+
timeout=120,
|
|
1105
|
+
check=check,
|
|
1106
|
+
)
|
|
1107
|
+
|
|
1108
|
+
|
|
1109
|
+
def check_container() -> str:
|
|
1110
|
+
"""Check MiroFish container status.
|
|
1111
|
+
|
|
1112
|
+
Returns: 'running', 'stopped', or 'not_found'.
|
|
1113
|
+
"""
|
|
1114
|
+
result = _run_docker(
|
|
1115
|
+
["inspect", "--format", "{{.State.Status}}", CONTAINER_NAME]
|
|
1116
|
+
)
|
|
1117
|
+
if result.returncode != 0:
|
|
1118
|
+
return "not_found"
|
|
1119
|
+
status = result.stdout.strip().lower()
|
|
1120
|
+
if status == "running":
|
|
1121
|
+
return "running"
|
|
1122
|
+
return "stopped"
|
|
1123
|
+
|
|
1124
|
+
|
|
1125
|
+
def start_container(
|
|
1126
|
+
image: str,
|
|
1127
|
+
port: int = DEFAULT_PORT,
|
|
1128
|
+
env_vars: Optional[Dict[str, str]] = None,
|
|
1129
|
+
) -> bool:
|
|
1130
|
+
"""Start MiroFish Docker container. Check-before-create pattern.
|
|
1131
|
+
|
|
1132
|
+
1. If running: return True (already running)
|
|
1133
|
+
2. If stopped: docker start loki-mirofish
|
|
1134
|
+
3. If not_found: docker run -d --name loki-mirofish
|
|
1135
|
+
-p {port}:5001 -e LLM_API_KEY -e ZEP_API_KEY {image}
|
|
1136
|
+
4. Wait for healthy (poll /health, max 60s)
|
|
1137
|
+
|
|
1138
|
+
Passes LLM_API_KEY and ZEP_API_KEY from os.environ if present.
|
|
1139
|
+
"""
|
|
1140
|
+
status = check_container()
|
|
1141
|
+
|
|
1142
|
+
if status == "running":
|
|
1143
|
+
print(f"MiroFish: Container {CONTAINER_NAME} already running")
|
|
1144
|
+
return True
|
|
1145
|
+
|
|
1146
|
+
if status == "stopped":
|
|
1147
|
+
print(f"MiroFish: Starting stopped container {CONTAINER_NAME}")
|
|
1148
|
+
result = _run_docker(["start", CONTAINER_NAME])
|
|
1149
|
+
if result.returncode != 0:
|
|
1150
|
+
print(
|
|
1151
|
+
f"ERROR: Failed to start container: {result.stderr}",
|
|
1152
|
+
file=sys.stderr,
|
|
1153
|
+
)
|
|
1154
|
+
return False
|
|
1155
|
+
else:
|
|
1156
|
+
# not_found -- create new container
|
|
1157
|
+
print(f"MiroFish: Creating container {CONTAINER_NAME} from {image}")
|
|
1158
|
+
cmd = [
|
|
1159
|
+
"run", "-d",
|
|
1160
|
+
"--name", CONTAINER_NAME,
|
|
1161
|
+
"-p", f"{port}:5001",
|
|
1162
|
+
]
|
|
1163
|
+
|
|
1164
|
+
# Pass through environment variables
|
|
1165
|
+
all_env = dict(env_vars or {})
|
|
1166
|
+
for key in ("LLM_API_KEY", "ZEP_API_KEY"):
|
|
1167
|
+
val = os.environ.get(key)
|
|
1168
|
+
if val:
|
|
1169
|
+
all_env[key] = val
|
|
1170
|
+
for key, val in all_env.items():
|
|
1171
|
+
cmd.extend(["-e", f"{key}={val}"])
|
|
1172
|
+
|
|
1173
|
+
cmd.append(image)
|
|
1174
|
+
result = _run_docker(cmd)
|
|
1175
|
+
if result.returncode != 0:
|
|
1176
|
+
print(
|
|
1177
|
+
f"ERROR: Failed to create container: {result.stderr}",
|
|
1178
|
+
file=sys.stderr,
|
|
1179
|
+
)
|
|
1180
|
+
return False
|
|
1181
|
+
|
|
1182
|
+
# Wait for healthy
|
|
1183
|
+
base_url = f"http://localhost:{port}"
|
|
1184
|
+
if wait_for_healthy(base_url):
|
|
1185
|
+
print(f"MiroFish: Container healthy at {base_url}")
|
|
1186
|
+
return True
|
|
1187
|
+
else:
|
|
1188
|
+
print(
|
|
1189
|
+
f"ERROR: Container started but health check failed at {base_url}",
|
|
1190
|
+
file=sys.stderr,
|
|
1191
|
+
)
|
|
1192
|
+
return False
|
|
1193
|
+
|
|
1194
|
+
|
|
1195
|
+
def stop_container() -> bool:
|
|
1196
|
+
"""Stop MiroFish container gracefully.
|
|
1197
|
+
|
|
1198
|
+
docker stop loki-mirofish --time 10, then docker rm if needed.
|
|
1199
|
+
"""
|
|
1200
|
+
status = check_container()
|
|
1201
|
+
if status == "not_found":
|
|
1202
|
+
print(f"MiroFish: Container {CONTAINER_NAME} not found")
|
|
1203
|
+
return True
|
|
1204
|
+
|
|
1205
|
+
if status == "running":
|
|
1206
|
+
print(f"MiroFish: Stopping container {CONTAINER_NAME}")
|
|
1207
|
+
result = _run_docker(["stop", "--time", "10", CONTAINER_NAME])
|
|
1208
|
+
if result.returncode != 0:
|
|
1209
|
+
print(
|
|
1210
|
+
f"ERROR: Failed to stop container: {result.stderr}",
|
|
1211
|
+
file=sys.stderr,
|
|
1212
|
+
)
|
|
1213
|
+
return False
|
|
1214
|
+
|
|
1215
|
+
# Remove stopped container
|
|
1216
|
+
result = _run_docker(["rm", CONTAINER_NAME])
|
|
1217
|
+
if result.returncode != 0:
|
|
1218
|
+
print(
|
|
1219
|
+
f"WARNING: Failed to remove container: {result.stderr}",
|
|
1220
|
+
file=sys.stderr,
|
|
1221
|
+
)
|
|
1222
|
+
# Not a fatal error -- container is stopped
|
|
1223
|
+
else:
|
|
1224
|
+
print(f"MiroFish: Container {CONTAINER_NAME} removed")
|
|
1225
|
+
|
|
1226
|
+
return True
|
|
1227
|
+
|
|
1228
|
+
|
|
1229
|
+
def wait_for_healthy(base_url: str, max_wait: int = 60) -> bool:
|
|
1230
|
+
"""Poll GET {base_url}/health every 2s until ok or timeout."""
|
|
1231
|
+
client = MiroFishClient(base_url=base_url, timeout=5)
|
|
1232
|
+
deadline = time.time() + max_wait
|
|
1233
|
+
while time.time() < deadline:
|
|
1234
|
+
if client.health_check():
|
|
1235
|
+
return True
|
|
1236
|
+
time.sleep(2)
|
|
1237
|
+
return False
|
|
1238
|
+
|
|
1239
|
+
|
|
1240
|
+
# ---------------------------------------------------------------------------
|
|
1241
|
+
# CLI Entry Points
|
|
1242
|
+
# ---------------------------------------------------------------------------
|
|
1243
|
+
|
|
1244
|
+
def validate_prd(prd_path: Path, base_url: str = DEFAULT_URL) -> int:
|
|
1245
|
+
"""Validate PRD can be parsed for MiroFish. Returns 0 if valid."""
|
|
1246
|
+
if not prd_path.exists():
|
|
1247
|
+
print(f"ERROR: PRD file not found: {prd_path}", file=sys.stderr)
|
|
1248
|
+
return 1
|
|
1249
|
+
|
|
1250
|
+
if prd_path.stat().st_size == 0:
|
|
1251
|
+
print(f"ERROR: PRD file is empty: {prd_path}", file=sys.stderr)
|
|
1252
|
+
return 1
|
|
1253
|
+
|
|
1254
|
+
try:
|
|
1255
|
+
context = extract_simulation_context(prd_path)
|
|
1256
|
+
except Exception as exc:
|
|
1257
|
+
print(f"ERROR: Failed to parse PRD: {exc}", file=sys.stderr)
|
|
1258
|
+
return 1
|
|
1259
|
+
|
|
1260
|
+
if not context.get("simulation_requirement"):
|
|
1261
|
+
print(
|
|
1262
|
+
"ERROR: Could not extract simulation requirement from PRD",
|
|
1263
|
+
file=sys.stderr,
|
|
1264
|
+
)
|
|
1265
|
+
return 1
|
|
1266
|
+
|
|
1267
|
+
print(
|
|
1268
|
+
f"MiroFish PRD validation: OK\n"
|
|
1269
|
+
f" Project: {context['project_name']}\n"
|
|
1270
|
+
f" Requirement length: {len(context['simulation_requirement'])} chars\n"
|
|
1271
|
+
f" Target audience: {'found' if context.get('target_audience') else 'not found'}"
|
|
1272
|
+
)
|
|
1273
|
+
return 0
|
|
1274
|
+
|
|
1275
|
+
|
|
1276
|
+
def show_status(output_dir: Path) -> int:
|
|
1277
|
+
"""Show pipeline status from pipeline-state.json."""
|
|
1278
|
+
state = _load_pipeline_state(output_dir)
|
|
1279
|
+
if state is None:
|
|
1280
|
+
print("MiroFish: No pipeline state found. No pipeline has been run.")
|
|
1281
|
+
return 0
|
|
1282
|
+
|
|
1283
|
+
print(f"MiroFish Pipeline Status")
|
|
1284
|
+
print(f" Status: {state.get('status', 'unknown')}")
|
|
1285
|
+
print(f" Started: {state.get('started_at', 'unknown')}")
|
|
1286
|
+
print(f" Updated: {state.get('updated_at', 'unknown')}")
|
|
1287
|
+
print(f" Current stage: {state.get('current_stage', 'unknown')}")
|
|
1288
|
+
print(f" PRD: {state.get('prd_path', 'unknown')}")
|
|
1289
|
+
|
|
1290
|
+
if state.get("completed_at"):
|
|
1291
|
+
print(f" Completed: {state['completed_at']}")
|
|
1292
|
+
|
|
1293
|
+
if state.get("error"):
|
|
1294
|
+
print(f" Error: {state['error']}")
|
|
1295
|
+
|
|
1296
|
+
stages = state.get("stages", {})
|
|
1297
|
+
for stage_name, stage_data in sorted(stages.items()):
|
|
1298
|
+
stage_status = stage_data.get("status", "unknown")
|
|
1299
|
+
extra = ""
|
|
1300
|
+
if stage_data.get("project_id"):
|
|
1301
|
+
extra = f" (project={stage_data['project_id']})"
|
|
1302
|
+
elif stage_data.get("graph_id"):
|
|
1303
|
+
extra = f" (graph={stage_data['graph_id']})"
|
|
1304
|
+
elif stage_data.get("simulation_id"):
|
|
1305
|
+
extra = f" (simulation={stage_data['simulation_id']})"
|
|
1306
|
+
elif stage_data.get("report_id"):
|
|
1307
|
+
extra = f" (report={stage_data['report_id']})"
|
|
1308
|
+
print(f" Stage {stage_name}: {stage_status}{extra}")
|
|
1309
|
+
|
|
1310
|
+
# Check if background process is still running
|
|
1311
|
+
pid = state.get("pid")
|
|
1312
|
+
if pid and state.get("status") == "running":
|
|
1313
|
+
try:
|
|
1314
|
+
os.kill(pid, 0) # Check if process exists
|
|
1315
|
+
print(f" Process: running (pid={pid})")
|
|
1316
|
+
except OSError:
|
|
1317
|
+
print(f" Process: not running (pid={pid} -- may have crashed)")
|
|
1318
|
+
|
|
1319
|
+
return 0
|
|
1320
|
+
|
|
1321
|
+
|
|
1322
|
+
def resume_pipeline(output_dir: Path, base_url: str, max_rounds: int) -> int:
|
|
1323
|
+
"""Resume an interrupted pipeline from last completed stage."""
|
|
1324
|
+
state = _load_pipeline_state(output_dir)
|
|
1325
|
+
if state is None:
|
|
1326
|
+
print(
|
|
1327
|
+
"ERROR: No pipeline state found. Cannot resume.",
|
|
1328
|
+
file=sys.stderr,
|
|
1329
|
+
)
|
|
1330
|
+
return 1
|
|
1331
|
+
|
|
1332
|
+
if state.get("status") == "completed":
|
|
1333
|
+
print("MiroFish: Pipeline already completed. Nothing to resume.")
|
|
1334
|
+
return 0
|
|
1335
|
+
|
|
1336
|
+
prd_path = Path(state.get("prd_path", ""))
|
|
1337
|
+
if not prd_path.exists():
|
|
1338
|
+
print(
|
|
1339
|
+
f"ERROR: Original PRD not found at {prd_path}",
|
|
1340
|
+
file=sys.stderr,
|
|
1341
|
+
)
|
|
1342
|
+
return 1
|
|
1343
|
+
|
|
1344
|
+
# For resume, we re-run the full pipeline.
|
|
1345
|
+
# A more sophisticated implementation would skip completed stages,
|
|
1346
|
+
# but that requires persisting all intermediate IDs.
|
|
1347
|
+
print("MiroFish: Resuming pipeline (re-running from start)...")
|
|
1348
|
+
client = MiroFishClient(base_url=base_url)
|
|
1349
|
+
return run_pipeline(
|
|
1350
|
+
client=client,
|
|
1351
|
+
prd_path=prd_path,
|
|
1352
|
+
output_dir=output_dir,
|
|
1353
|
+
max_rounds=max_rounds,
|
|
1354
|
+
)
|
|
1355
|
+
|
|
1356
|
+
|
|
1357
|
+
def main() -> None:
|
|
1358
|
+
parser = argparse.ArgumentParser(
|
|
1359
|
+
description="MiroFish Market Validation Adapter for Loki Mode",
|
|
1360
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
1361
|
+
epilog=(
|
|
1362
|
+
"Examples:\n"
|
|
1363
|
+
" python3 mirofish-adapter.py ./prd.md --output-dir .loki/\n"
|
|
1364
|
+
" python3 mirofish-adapter.py ./prd.md --validate\n"
|
|
1365
|
+
" python3 mirofish-adapter.py ./prd.md --json --url http://mf:5001\n"
|
|
1366
|
+
" python3 mirofish-adapter.py --status --output-dir .loki/\n"
|
|
1367
|
+
" python3 mirofish-adapter.py --health --url http://localhost:5001\n"
|
|
1368
|
+
" python3 mirofish-adapter.py --docker-start --docker-image mirofish/app\n"
|
|
1369
|
+
" python3 mirofish-adapter.py --docker-stop\n"
|
|
1370
|
+
),
|
|
1371
|
+
)
|
|
1372
|
+
|
|
1373
|
+
# Positional
|
|
1374
|
+
parser.add_argument("prd_path", nargs="?", help="Path to PRD file")
|
|
1375
|
+
|
|
1376
|
+
# Modes
|
|
1377
|
+
parser.add_argument(
|
|
1378
|
+
"--validate", action="store_true", help="Validate PRD only"
|
|
1379
|
+
)
|
|
1380
|
+
parser.add_argument(
|
|
1381
|
+
"--json",
|
|
1382
|
+
action="store_true",
|
|
1383
|
+
dest="as_json",
|
|
1384
|
+
help="Output as JSON to stdout",
|
|
1385
|
+
)
|
|
1386
|
+
parser.add_argument(
|
|
1387
|
+
"--status", action="store_true", help="Show pipeline status"
|
|
1388
|
+
)
|
|
1389
|
+
parser.add_argument(
|
|
1390
|
+
"--resume",
|
|
1391
|
+
action="store_true",
|
|
1392
|
+
help="Resume interrupted pipeline",
|
|
1393
|
+
)
|
|
1394
|
+
parser.add_argument(
|
|
1395
|
+
"--health", action="store_true", help="Health check only"
|
|
1396
|
+
)
|
|
1397
|
+
parser.add_argument(
|
|
1398
|
+
"--docker-start",
|
|
1399
|
+
action="store_true",
|
|
1400
|
+
help="Start MiroFish container",
|
|
1401
|
+
)
|
|
1402
|
+
parser.add_argument(
|
|
1403
|
+
"--docker-stop",
|
|
1404
|
+
action="store_true",
|
|
1405
|
+
help="Stop MiroFish container",
|
|
1406
|
+
)
|
|
1407
|
+
|
|
1408
|
+
# Options
|
|
1409
|
+
parser.add_argument(
|
|
1410
|
+
"--output-dir", default=".loki", help="Output directory"
|
|
1411
|
+
)
|
|
1412
|
+
parser.add_argument(
|
|
1413
|
+
"--url", default=DEFAULT_URL, help="MiroFish API URL"
|
|
1414
|
+
)
|
|
1415
|
+
parser.add_argument(
|
|
1416
|
+
"--background",
|
|
1417
|
+
action="store_true",
|
|
1418
|
+
help="Run in background",
|
|
1419
|
+
)
|
|
1420
|
+
parser.add_argument(
|
|
1421
|
+
"--max-rounds",
|
|
1422
|
+
type=int,
|
|
1423
|
+
default=DEFAULT_MAX_ROUNDS,
|
|
1424
|
+
help="Maximum simulation rounds",
|
|
1425
|
+
)
|
|
1426
|
+
parser.add_argument(
|
|
1427
|
+
"--docker-image", help="Docker image for --docker-start"
|
|
1428
|
+
)
|
|
1429
|
+
parser.add_argument(
|
|
1430
|
+
"--port",
|
|
1431
|
+
type=int,
|
|
1432
|
+
default=DEFAULT_PORT,
|
|
1433
|
+
help="Port for Docker container",
|
|
1434
|
+
)
|
|
1435
|
+
parser.add_argument(
|
|
1436
|
+
"--timeout",
|
|
1437
|
+
type=int,
|
|
1438
|
+
default=int(os.environ.get("LOKI_MIROFISH_TIMEOUT", str(DEFAULT_TIMEOUT))),
|
|
1439
|
+
help="Total pipeline timeout in seconds (default: 3600)",
|
|
1440
|
+
)
|
|
1441
|
+
|
|
1442
|
+
args = parser.parse_args()
|
|
1443
|
+
|
|
1444
|
+
# -- Route to appropriate handler --
|
|
1445
|
+
|
|
1446
|
+
if args.docker_stop:
|
|
1447
|
+
ok = stop_container()
|
|
1448
|
+
sys.exit(0 if ok else 1)
|
|
1449
|
+
|
|
1450
|
+
if args.docker_start:
|
|
1451
|
+
if not args.docker_image:
|
|
1452
|
+
print(
|
|
1453
|
+
"ERROR: --docker-image is required with --docker-start",
|
|
1454
|
+
file=sys.stderr,
|
|
1455
|
+
)
|
|
1456
|
+
sys.exit(1)
|
|
1457
|
+
ok = start_container(args.docker_image, port=args.port)
|
|
1458
|
+
sys.exit(0 if ok else 1)
|
|
1459
|
+
|
|
1460
|
+
if args.health:
|
|
1461
|
+
client = MiroFishClient(base_url=args.url)
|
|
1462
|
+
ok = client.health_check()
|
|
1463
|
+
if ok:
|
|
1464
|
+
print(f"MiroFish: Healthy at {args.url}")
|
|
1465
|
+
else:
|
|
1466
|
+
print(f"MiroFish: NOT healthy at {args.url}", file=sys.stderr)
|
|
1467
|
+
sys.exit(0 if ok else 1)
|
|
1468
|
+
|
|
1469
|
+
if args.status:
|
|
1470
|
+
output_dir = Path(args.output_dir)
|
|
1471
|
+
sys.exit(show_status(output_dir))
|
|
1472
|
+
|
|
1473
|
+
if args.resume:
|
|
1474
|
+
output_dir = Path(args.output_dir)
|
|
1475
|
+
sys.exit(resume_pipeline(output_dir, args.url, args.max_rounds))
|
|
1476
|
+
|
|
1477
|
+
# Remaining modes require prd_path
|
|
1478
|
+
if not args.prd_path:
|
|
1479
|
+
parser.error("prd_path is required for this mode")
|
|
1480
|
+
|
|
1481
|
+
prd_path = Path(args.prd_path)
|
|
1482
|
+
|
|
1483
|
+
if args.validate:
|
|
1484
|
+
sys.exit(validate_prd(prd_path, base_url=args.url))
|
|
1485
|
+
|
|
1486
|
+
# Main pipeline run
|
|
1487
|
+
if not prd_path.exists():
|
|
1488
|
+
print(f"ERROR: PRD file not found: {prd_path}", file=sys.stderr)
|
|
1489
|
+
sys.exit(1)
|
|
1490
|
+
|
|
1491
|
+
output_dir = Path(args.output_dir)
|
|
1492
|
+
|
|
1493
|
+
if args.as_json:
|
|
1494
|
+
# JSON mode: extract context and output to stdout (no pipeline)
|
|
1495
|
+
context = extract_simulation_context(prd_path)
|
|
1496
|
+
output = {
|
|
1497
|
+
"source": "mirofish",
|
|
1498
|
+
"prd_path": str(prd_path),
|
|
1499
|
+
"project_name": context["project_name"],
|
|
1500
|
+
"simulation_requirement": context["simulation_requirement"],
|
|
1501
|
+
"target_audience": context["target_audience"],
|
|
1502
|
+
"prd_summary": context["prd_summary"],
|
|
1503
|
+
}
|
|
1504
|
+
print(json.dumps(output, indent=2))
|
|
1505
|
+
sys.exit(0)
|
|
1506
|
+
|
|
1507
|
+
if args.background:
|
|
1508
|
+
sys.exit(
|
|
1509
|
+
run_background(
|
|
1510
|
+
prd_path=str(prd_path),
|
|
1511
|
+
output_dir=str(output_dir),
|
|
1512
|
+
base_url=args.url,
|
|
1513
|
+
max_rounds=args.max_rounds,
|
|
1514
|
+
)
|
|
1515
|
+
)
|
|
1516
|
+
|
|
1517
|
+
# Foreground pipeline run
|
|
1518
|
+
client = MiroFishClient(base_url=args.url)
|
|
1519
|
+
sys.exit(
|
|
1520
|
+
run_pipeline(
|
|
1521
|
+
client=client,
|
|
1522
|
+
prd_path=prd_path,
|
|
1523
|
+
output_dir=output_dir,
|
|
1524
|
+
max_rounds=args.max_rounds,
|
|
1525
|
+
)
|
|
1526
|
+
)
|
|
1527
|
+
|
|
1528
|
+
|
|
1529
|
+
if __name__ == "__main__":
|
|
1530
|
+
main()
|