m8flow 1.1.1 → 1.1.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bundled/backend/api/routes/flows.py +43 -3
- package/bundled/backend/api/routes/nodes.py +3 -3
- package/bundled/backend/config.py +4 -2
- package/bundled/backend/core/code_validator.py +1 -1
- package/bundled/backend/core/executor.py +67 -0
- package/bundled/backend/core/runtime.py +36 -1
- package/bundled/backend/main.py +21 -0
- package/bundled/backend/services/llm_service.py +611 -67
- package/bundled/backend/services/pipeline_executor.py +41 -0
- package/bundled/backend/templates.py +8 -13
- package/bundled/frontend-dist/assets/index-BI1hb_gi.js +45 -0
- package/bundled/frontend-dist/assets/index-D9h1Krrv.css +1 -0
- package/bundled/frontend-dist/index.html +2 -2
- package/package.json +1 -1
- package/bundled/frontend-dist/assets/index-CKUZ27n8.css +0 -1
- package/bundled/frontend-dist/assets/index-DNaB6zf0.js +0 -46
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import json
|
|
1
2
|
from fastapi import APIRouter, HTTPException, Request
|
|
2
3
|
from pydantic import BaseModel
|
|
3
4
|
from domain.models import FlowSchema
|
|
@@ -86,9 +87,20 @@ def _to_canvas_node(node: dict) -> dict:
|
|
|
86
87
|
|
|
87
88
|
|
|
88
89
|
def _inject_api_key(http_request: Request) -> None:
|
|
89
|
-
"""Read
|
|
90
|
-
|
|
91
|
-
llm_service.
|
|
90
|
+
"""Read provider key + per-agent model headers and set per-request ContextVars."""
|
|
91
|
+
llm_service._request_api_key.set(http_request.headers.get("X-OpenRouter-Key") or None)
|
|
92
|
+
llm_service._request_gemini_key.set(http_request.headers.get("X-Gemini-Key") or None)
|
|
93
|
+
llm_service._request_mistral_key.set(http_request.headers.get("X-Mistral-Key") or None)
|
|
94
|
+
|
|
95
|
+
# Parse per-agent model matrix sent as JSON: {"architect":"gemini-2.5-flash", ...}
|
|
96
|
+
raw_agents = http_request.headers.get("X-Agent-Models")
|
|
97
|
+
if raw_agents:
|
|
98
|
+
try:
|
|
99
|
+
llm_service._request_agent_models.set(json.loads(raw_agents))
|
|
100
|
+
except Exception:
|
|
101
|
+
llm_service._request_agent_models.set(None)
|
|
102
|
+
else:
|
|
103
|
+
llm_service._request_agent_models.set(None)
|
|
92
104
|
|
|
93
105
|
|
|
94
106
|
@router.post("/generate")
|
|
@@ -257,6 +269,34 @@ async def ask_flow(http_request: Request, req: AskRequest):
|
|
|
257
269
|
}
|
|
258
270
|
|
|
259
271
|
|
|
272
|
+
class InterviewRequest(BaseModel):
|
|
273
|
+
"""Trigger Phase 1 analysis when a CSV is first uploaded."""
|
|
274
|
+
context: str # dataset summary string from the upload response
|
|
275
|
+
|
|
276
|
+
|
|
277
|
+
@router.post("/interview")
|
|
278
|
+
async def interview_flow(http_request: Request, req: InterviewRequest):
|
|
279
|
+
"""
|
|
280
|
+
Phase 1 — Interactive interview entry point.
|
|
281
|
+
|
|
282
|
+
Called immediately after a CSV upload. Returns a conversational
|
|
283
|
+
analysis text (with [PLANNING], [ANALYSIS], [DEDUCTION],
|
|
284
|
+
[AWAITING CONFIRMATION] status labels) WITHOUT generating any nodes.
|
|
285
|
+
The frontend displays this as an assistant message and waits for user
|
|
286
|
+
confirmation before proceeding to pipeline generation.
|
|
287
|
+
"""
|
|
288
|
+
_inject_api_key(http_request)
|
|
289
|
+
if not req.context.strip():
|
|
290
|
+
raise HTTPException(status_code=422, detail="Context cannot be empty")
|
|
291
|
+
try:
|
|
292
|
+
text = await llm_service.interview_dataset(req.context)
|
|
293
|
+
return {"result_type": "interview", "message": text}
|
|
294
|
+
except RuntimeError as exc:
|
|
295
|
+
raise HTTPException(status_code=503, detail=str(exc))
|
|
296
|
+
except Exception as exc:
|
|
297
|
+
raise HTTPException(status_code=500, detail=f"{type(exc).__name__}: {exc}")
|
|
298
|
+
|
|
299
|
+
|
|
260
300
|
@router.post("/execute")
|
|
261
301
|
def execute_flow(flow: FlowSchema):
|
|
262
302
|
try:
|
|
@@ -77,9 +77,9 @@ async def generate_node_code_route(http_request: Request, req: GenerateCodeReque
|
|
|
77
77
|
"""Generate M8Flow-compatible Python node code from a plain-English description."""
|
|
78
78
|
from services import llm_service
|
|
79
79
|
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
llm_service.
|
|
80
|
+
llm_service._request_api_key.set(http_request.headers.get("X-OpenRouter-Key") or None)
|
|
81
|
+
llm_service._request_gemini_key.set(http_request.headers.get("X-Gemini-Key") or None)
|
|
82
|
+
llm_service._request_mistral_key.set(http_request.headers.get("X-Mistral-Key") or None)
|
|
83
83
|
|
|
84
84
|
if not req.description.strip():
|
|
85
85
|
raise HTTPException(status_code=422, detail="Description cannot be empty")
|
|
@@ -10,9 +10,11 @@ if _global_env.exists():
|
|
|
10
10
|
load_dotenv(_global_env, override=False) # don't override already-set vars
|
|
11
11
|
|
|
12
12
|
class Config:
|
|
13
|
-
MISTRAL_API_KEY = os.getenv("MISTRAL_API_KEY",
|
|
14
|
-
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY",
|
|
13
|
+
MISTRAL_API_KEY = os.getenv("MISTRAL_API_KEY", "")
|
|
14
|
+
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
|
|
15
15
|
OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY", "")
|
|
16
|
+
GEMINI_API_KEY = os.getenv("GEMINI_API_KEY", "") # Google AI Studio key
|
|
17
|
+
MISTRAL_API_KEY = os.getenv("MISTRAL_API_KEY", "") # Mistral La Plateforme key
|
|
16
18
|
STORAGE_DIR = os.getenv("M8FLOW_UPLOAD_DIR", os.getenv("STORAGE_DIR", "./storage"))
|
|
17
19
|
|
|
18
20
|
config = Config()
|
|
@@ -41,7 +41,7 @@ ALLOWED_IMPORTS = frozenset({
|
|
|
41
41
|
"numpy", "pandas", "scipy", "sklearn", "xgboost", "lightgbm",
|
|
42
42
|
"statsmodels", "imblearn",
|
|
43
43
|
# Plotting
|
|
44
|
-
"matplotlib", "seaborn", "plotly",
|
|
44
|
+
"matplotlib", "seaborn", "plotly", "mpl_toolkits",
|
|
45
45
|
# Standard safe libs
|
|
46
46
|
"math", "statistics", "itertools", "functools", "collections",
|
|
47
47
|
"json", "re", "datetime", "typing",
|
|
@@ -62,6 +62,49 @@ def _serialize_value(val: Any) -> Any:
|
|
|
62
62
|
"shape": list(val.shape),
|
|
63
63
|
"dtype": str(val.dtype),
|
|
64
64
|
}
|
|
65
|
+
|
|
66
|
+
# ── sklearn displays ─────────────────────────────────────────────────────
|
|
67
|
+
is_display = hasattr(val, "figure_") and hasattr(val.figure_, "savefig")
|
|
68
|
+
if is_display:
|
|
69
|
+
try:
|
|
70
|
+
import io
|
|
71
|
+
import base64
|
|
72
|
+
buf = io.BytesIO()
|
|
73
|
+
val.figure_.savefig(buf, format="png", bbox_inches="tight")
|
|
74
|
+
b64 = base64.b64encode(buf.getvalue()).decode("utf-8")
|
|
75
|
+
return {
|
|
76
|
+
"image_base64": b64,
|
|
77
|
+
"title": getattr(val, "estimator_name", type(val).__name__)
|
|
78
|
+
}
|
|
79
|
+
except Exception:
|
|
80
|
+
pass
|
|
81
|
+
|
|
82
|
+
# ── matplotlib / seaborn ─────────────────────────────────────────────────
|
|
83
|
+
is_figure = hasattr(val, "savefig") and type(val).__module__.startswith("matplotlib")
|
|
84
|
+
if is_figure:
|
|
85
|
+
try:
|
|
86
|
+
import io
|
|
87
|
+
import base64
|
|
88
|
+
buf = io.BytesIO()
|
|
89
|
+
val.savefig(buf, format="png", bbox_inches="tight")
|
|
90
|
+
b64 = base64.b64encode(buf.getvalue()).decode("utf-8")
|
|
91
|
+
return {
|
|
92
|
+
"image_base64": b64,
|
|
93
|
+
"title": "Matplotlib / Seaborn Plot"
|
|
94
|
+
}
|
|
95
|
+
except Exception:
|
|
96
|
+
pass
|
|
97
|
+
|
|
98
|
+
# ── plotly ───────────────────────────────────────────────────────────────
|
|
99
|
+
if hasattr(val, "to_html") and type(val).__module__.startswith("plotly"):
|
|
100
|
+
try:
|
|
101
|
+
return {
|
|
102
|
+
"plotly_html": val.to_html(full_html=False, include_plotlyjs="cdn"),
|
|
103
|
+
"title": "Plotly Visualization"
|
|
104
|
+
}
|
|
105
|
+
except Exception:
|
|
106
|
+
pass
|
|
107
|
+
|
|
65
108
|
# sklearn / any estimator
|
|
66
109
|
if hasattr(val, "predict") and hasattr(val, "fit"):
|
|
67
110
|
return {
|
|
@@ -75,9 +118,33 @@ def _serialize_value(val: Any) -> Any:
|
|
|
75
118
|
if isinstance(val, (np.bool_,)):
|
|
76
119
|
return bool(val)
|
|
77
120
|
if isinstance(val, dict):
|
|
121
|
+
if "data" in val and isinstance(val.get("data"), list) and "layout" in val:
|
|
122
|
+
try:
|
|
123
|
+
import json
|
|
124
|
+
import plotly.io as pio
|
|
125
|
+
val_json = json.dumps(val)
|
|
126
|
+
fig = pio.from_json(val_json)
|
|
127
|
+
return {
|
|
128
|
+
"plotly_html": fig.to_html(full_html=False, include_plotlyjs="cdn"),
|
|
129
|
+
"title": "Plotly Visualization"
|
|
130
|
+
}
|
|
131
|
+
except Exception:
|
|
132
|
+
pass
|
|
78
133
|
return {k: _serialize_value(v) for k, v in val.items()}
|
|
79
134
|
if isinstance(val, (list, tuple)):
|
|
80
135
|
return [_serialize_value(v) for v in val]
|
|
136
|
+
if isinstance(val, str):
|
|
137
|
+
if val.startswith('{') and '"data":' in val and '"layout":' in val:
|
|
138
|
+
try:
|
|
139
|
+
import plotly.io as pio
|
|
140
|
+
fig = pio.from_json(val)
|
|
141
|
+
return {
|
|
142
|
+
"plotly_html": fig.to_html(full_html=False, include_plotlyjs="cdn"),
|
|
143
|
+
"title": "Plotly Visualization"
|
|
144
|
+
}
|
|
145
|
+
except Exception:
|
|
146
|
+
pass
|
|
147
|
+
return val
|
|
81
148
|
return val
|
|
82
149
|
|
|
83
150
|
|
|
@@ -3,6 +3,13 @@ import inspect
|
|
|
3
3
|
import functools
|
|
4
4
|
from typing import Any
|
|
5
5
|
|
|
6
|
+
# Ensure matplotlib uses a non-interactive backend
|
|
7
|
+
try:
|
|
8
|
+
import matplotlib
|
|
9
|
+
matplotlib.use('Agg')
|
|
10
|
+
except ImportError:
|
|
11
|
+
pass
|
|
12
|
+
|
|
6
13
|
|
|
7
14
|
@functools.lru_cache(maxsize=128)
|
|
8
15
|
def _compile(code: str):
|
|
@@ -45,5 +52,33 @@ def execute_node_code(code: str, inputs: dict[str, Any]) -> dict[str, Any]:
|
|
|
45
52
|
|
|
46
53
|
result = run_fn(**filtered)
|
|
47
54
|
if not isinstance(result, dict):
|
|
48
|
-
|
|
55
|
+
result = {"output": result}
|
|
56
|
+
|
|
57
|
+
# ── Automatically capture unreturned matplotlib figures ──────────────────
|
|
58
|
+
import sys
|
|
59
|
+
if "matplotlib.pyplot" in sys.modules:
|
|
60
|
+
import matplotlib.pyplot as plt
|
|
61
|
+
figs = plt.get_fignums()
|
|
62
|
+
if figs:
|
|
63
|
+
# Identify figures already returned explicitly to avoid duplicates
|
|
64
|
+
returned_fig_ids = {id(v) for v in result.values() if hasattr(v, "savefig")}
|
|
65
|
+
|
|
66
|
+
import io, base64
|
|
67
|
+
for i, num in enumerate(figs):
|
|
68
|
+
fig = plt.figure(num)
|
|
69
|
+
if id(fig) in returned_fig_ids:
|
|
70
|
+
continue
|
|
71
|
+
|
|
72
|
+
buf = io.BytesIO()
|
|
73
|
+
fig.savefig(buf, format="png", bbox_inches="tight")
|
|
74
|
+
b64 = base64.b64encode(buf.getvalue()).decode("utf-8")
|
|
75
|
+
# Avoid overwriting explicit returns
|
|
76
|
+
key = f"plot_{i}" if i > 0 else "plot"
|
|
77
|
+
if key not in result:
|
|
78
|
+
result[key] = {
|
|
79
|
+
"image_base64": b64,
|
|
80
|
+
"title": f"Figure {num}"
|
|
81
|
+
}
|
|
82
|
+
plt.close("all")
|
|
83
|
+
|
|
49
84
|
return result
|
package/bundled/backend/main.py
CHANGED
|
@@ -1,6 +1,16 @@
|
|
|
1
1
|
from fastapi import FastAPI, Request
|
|
2
2
|
from fastapi.middleware.cors import CORSMiddleware
|
|
3
3
|
from fastapi.responses import JSONResponse
|
|
4
|
+
|
|
5
|
+
# Force matplotlib to use a non-interactive backend (Agg)
|
|
6
|
+
# This prevents "main thread is not in main loop" errors when running
|
|
7
|
+
# inside background threads (like FastAPI/uvicorn).
|
|
8
|
+
try:
|
|
9
|
+
import matplotlib
|
|
10
|
+
matplotlib.use('Agg')
|
|
11
|
+
except ImportError:
|
|
12
|
+
pass
|
|
13
|
+
|
|
4
14
|
from api.routes import flows, nodes, appstate
|
|
5
15
|
import os
|
|
6
16
|
import time
|
|
@@ -60,6 +70,17 @@ app.include_router(nodes.router, prefix="/api/nodes", tags=["Nodes"])
|
|
|
60
70
|
app.include_router(appstate.router, prefix="/api/app/state", tags=["AppState"])
|
|
61
71
|
|
|
62
72
|
|
|
73
|
+
@app.get("/v1/models")
|
|
74
|
+
def openai_compat_models():
|
|
75
|
+
"""
|
|
76
|
+
OpenAI-compatible /v1/models stub.
|
|
77
|
+
External tools (VS Code extensions, Cursor, Continue.dev …) probe this
|
|
78
|
+
endpoint to check if the server speaks the OpenAI protocol.
|
|
79
|
+
Return a minimal valid response so they get a 200 instead of a noisy 404.
|
|
80
|
+
"""
|
|
81
|
+
return {"object": "list", "data": []}
|
|
82
|
+
|
|
83
|
+
|
|
63
84
|
@app.get("/api/health")
|
|
64
85
|
def health_check():
|
|
65
86
|
from config import config
|