m8flow 1.0.2 → 1.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bundled/backend/Dockerfile +41 -0
- package/bundled/backend/add_nodes.py +416 -0
- package/bundled/backend/api/routes/appstate.py +102 -0
- package/bundled/backend/api/routes/flows.py +64 -5
- package/bundled/backend/api/routes/nodes.py +25 -1
- package/bundled/backend/core/code_validator.py +2 -0
- package/bundled/backend/core/executor.py +19 -3
- package/bundled/backend/main.py +16 -4
- package/bundled/backend/requirements.txt +27 -6
- package/bundled/backend/services/llm_service.py +984 -108
- package/bundled/backend/services/self_healer.py +1 -1
- package/bundled/backend/temp.json +0 -0
- package/bundled/backend/templates.json +0 -0
- package/bundled/backend/templates.py +2907 -745
- package/bundled/backend/warmup.py +65 -0
- package/bundled/frontend-dist/assets/index-CKUZ27n8.css +1 -0
- package/bundled/frontend-dist/assets/index-DNaB6zf0.js +46 -0
- package/bundled/frontend-dist/index.html +2 -2
- package/lib/backend.js +184 -35
- package/lib/ports.js +42 -0
- package/lib/run.js +42 -15
- package/lib/setup.js +143 -59
- package/package.json +5 -4
- package/scripts/check-docker.js +35 -0
- package/bundled/frontend-dist/assets/index-BAQ3lKsy.css +0 -1
- package/bundled/frontend-dist/assets/index-CZCCzeUC.js +0 -41
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
# ── M8Flow Backend ────────────────────────────────────────────────────────────
|
|
2
|
+
# Multi-stage build: all heavy ML packages are baked into the image during
|
|
3
|
+
# `docker build`. The result is a warm container that starts in ~2 seconds
|
|
4
|
+
# with zero pip-install delay at runtime.
|
|
5
|
+
|
|
6
|
+
FROM python:3.11-slim AS base
|
|
7
|
+
|
|
8
|
+
WORKDIR /app
|
|
9
|
+
|
|
10
|
+
# ── System dependencies (needed to compile numpy/scipy/xgboost C extensions) ──
|
|
11
|
+
RUN apt-get update && apt-get install -y --no-install-recommends \
|
|
12
|
+
gcc g++ libgomp1 curl && \
|
|
13
|
+
rm -rf /var/lib/apt/lists/*
|
|
14
|
+
|
|
15
|
+
# ── Python dependencies (separate layer — cached unless requirements.txt changes)
|
|
16
|
+
COPY requirements.txt .
|
|
17
|
+
RUN pip install --no-cache-dir --upgrade pip && \
|
|
18
|
+
pip install --no-cache-dir -r requirements.txt
|
|
19
|
+
|
|
20
|
+
# ── Pre-import warmup — compiles .pyc bytecode during build so the first
|
|
21
|
+
# real request doesn't pay any import overhead.
|
|
22
|
+
COPY warmup.py .
|
|
23
|
+
RUN python warmup.py
|
|
24
|
+
|
|
25
|
+
# ── Application code (last layer — changes here don't bust the dep cache) ────
|
|
26
|
+
COPY . .
|
|
27
|
+
|
|
28
|
+
# ── Runtime config ────────────────────────────────────────────────────────────
|
|
29
|
+
ENV PYTHONUNBUFFERED=1
|
|
30
|
+
ENV M8FLOW_ENV=docker
|
|
31
|
+
|
|
32
|
+
EXPOSE 8000
|
|
33
|
+
|
|
34
|
+
HEALTHCHECK --interval=10s --timeout=5s --start-period=5s --retries=3 \
|
|
35
|
+
CMD curl -f http://localhost:8000/api/health || exit 1
|
|
36
|
+
|
|
37
|
+
CMD ["uvicorn", "main:app", \
|
|
38
|
+
"--host", "0.0.0.0", \
|
|
39
|
+
"--port", "8000", \
|
|
40
|
+
"--log-level", "warning", \
|
|
41
|
+
"--workers", "1"]
|
|
@@ -0,0 +1,416 @@
|
|
|
1
|
+
import re
|
|
2
|
+
|
|
3
|
+
with open("templates.py", "r") as f:
|
|
4
|
+
content = f.read()
|
|
5
|
+
|
|
6
|
+
# The string constants
|
|
7
|
+
NEW_CODE = """
|
|
8
|
+
BINARY_ENCODER = '''import pandas as pd
|
|
9
|
+
try:
|
|
10
|
+
import category_encoders as ce
|
|
11
|
+
except ImportError:
|
|
12
|
+
pass
|
|
13
|
+
|
|
14
|
+
def run(data, columns: str = "") -> dict:
|
|
15
|
+
df = data.copy()
|
|
16
|
+
cols = [c.strip() for c in columns.split(",") if c.strip()] if columns.strip() else \
|
|
17
|
+
df.select_dtypes(include=["object", "category"]).columns.tolist()
|
|
18
|
+
|
|
19
|
+
if not cols:
|
|
20
|
+
return {"data": df, "summary": "No columns to encode"}
|
|
21
|
+
|
|
22
|
+
try:
|
|
23
|
+
encoder = ce.BinaryEncoder(cols=cols)
|
|
24
|
+
df = encoder.fit_transform(df)
|
|
25
|
+
return {"data": df, "summary": f"Binary encoded {len(cols)} columns"}
|
|
26
|
+
except NameError:
|
|
27
|
+
return {"data": df, "summary": "category_encoders not installed. Skipping."}
|
|
28
|
+
'''
|
|
29
|
+
|
|
30
|
+
FREQUENCY_ENCODER = '''import pandas as pd
|
|
31
|
+
|
|
32
|
+
def run(data, columns: str = "") -> dict:
|
|
33
|
+
df = data.copy()
|
|
34
|
+
cols = [c.strip() for c in columns.split(",") if c.strip()] if columns.strip() else \
|
|
35
|
+
df.select_dtypes(include=["object", "category"]).columns.tolist()
|
|
36
|
+
|
|
37
|
+
if not cols:
|
|
38
|
+
return {"data": df, "summary": "No columns to encode"}
|
|
39
|
+
|
|
40
|
+
for col in cols:
|
|
41
|
+
if col in df.columns:
|
|
42
|
+
freq = df[col].value_counts(normalize=True)
|
|
43
|
+
df[col] = df[col].map(freq)
|
|
44
|
+
|
|
45
|
+
return {"data": df, "summary": f"Frequency encoded {len(cols)} columns"}
|
|
46
|
+
'''
|
|
47
|
+
|
|
48
|
+
ORDINAL_ENCODER = '''import pandas as pd
|
|
49
|
+
from sklearn.preprocessing import OrdinalEncoder
|
|
50
|
+
|
|
51
|
+
def run(data, columns: str = "") -> dict:
|
|
52
|
+
df = data.copy()
|
|
53
|
+
cols = [c.strip() for c in columns.split(",") if c.strip()] if columns.strip() else \
|
|
54
|
+
df.select_dtypes(include=["object", "category"]).columns.tolist()
|
|
55
|
+
|
|
56
|
+
if not cols:
|
|
57
|
+
return {"data": df, "summary": "No columns to encode"}
|
|
58
|
+
|
|
59
|
+
encoder = OrdinalEncoder(handle_unknown='use_encoded_value', unknown_value=-1)
|
|
60
|
+
df[cols] = encoder.fit_transform(df[cols].astype(str))
|
|
61
|
+
|
|
62
|
+
return {"data": df, "summary": f"Ordinal encoded {len(cols)} columns"}
|
|
63
|
+
'''
|
|
64
|
+
|
|
65
|
+
VIF_FEATURE_SELECTION = '''import pandas as pd
|
|
66
|
+
import numpy as np
|
|
67
|
+
from statsmodels.stats.outliers_influence import variance_inflation_factor
|
|
68
|
+
|
|
69
|
+
def run(data, threshold: float = 5.0) -> dict:
|
|
70
|
+
df = data.copy()
|
|
71
|
+
num_cols = df.select_dtypes(include=[np.number]).columns.tolist()
|
|
72
|
+
|
|
73
|
+
if len(num_cols) < 2:
|
|
74
|
+
return {"data": df, "summary": "Not enough numeric columns for VIF"}
|
|
75
|
+
|
|
76
|
+
X = df[num_cols].dropna()
|
|
77
|
+
dropped = []
|
|
78
|
+
|
|
79
|
+
while True:
|
|
80
|
+
vif_data = pd.DataFrame()
|
|
81
|
+
vif_data["feature"] = X.columns
|
|
82
|
+
vif_data["VIF"] = [variance_inflation_factor(X.values, i) for i in range(len(X.columns))]
|
|
83
|
+
|
|
84
|
+
max_vif = vif_data["VIF"].max()
|
|
85
|
+
if max_vif > threshold:
|
|
86
|
+
max_feat = vif_data.sort_values("VIF", ascending=False).iloc[0]["feature"]
|
|
87
|
+
X = X.drop(columns=[max_feat])
|
|
88
|
+
dropped.append(max_feat)
|
|
89
|
+
else:
|
|
90
|
+
break
|
|
91
|
+
|
|
92
|
+
df = df.drop(columns=dropped)
|
|
93
|
+
return {"data": df, "summary": f"Dropped {len(dropped)} features due to high VIF: {dropped}"}
|
|
94
|
+
'''
|
|
95
|
+
|
|
96
|
+
PCA_WHITENING = '''import pandas as pd
|
|
97
|
+
import numpy as np
|
|
98
|
+
from sklearn.decomposition import PCA
|
|
99
|
+
|
|
100
|
+
def run(data, n_components: int = 0, whiten: bool = True) -> dict:
|
|
101
|
+
df = data.copy()
|
|
102
|
+
num_cols = df.select_dtypes(include=[np.number]).columns.tolist()
|
|
103
|
+
|
|
104
|
+
if not num_cols:
|
|
105
|
+
return {"data": df, "summary": "No numeric columns for PCA"}
|
|
106
|
+
|
|
107
|
+
n = n_components if n_components > 0 else None
|
|
108
|
+
pca = PCA(n_components=n, whiten=whiten)
|
|
109
|
+
|
|
110
|
+
pca_features = pca.fit_transform(df[num_cols].fillna(0))
|
|
111
|
+
|
|
112
|
+
feature_names = [f"pca_{i}" for i in range(pca_features.shape[1])]
|
|
113
|
+
pca_df = pd.DataFrame(pca_features, columns=feature_names, index=df.index)
|
|
114
|
+
|
|
115
|
+
df = df.drop(columns=num_cols)
|
|
116
|
+
df = pd.concat([df, pca_df], axis=1)
|
|
117
|
+
|
|
118
|
+
return {"data": df, "summary": f"Applied PCA whitening, created {len(feature_names)} components"}
|
|
119
|
+
'''
|
|
120
|
+
|
|
121
|
+
K_MEANS_CLUSTERING_FEATURES = '''import pandas as pd
|
|
122
|
+
import numpy as np
|
|
123
|
+
from sklearn.cluster import KMeans
|
|
124
|
+
|
|
125
|
+
def run(data, n_clusters: int = 3, random_state: int = 42) -> dict:
|
|
126
|
+
df = data.copy()
|
|
127
|
+
num_cols = df.select_dtypes(include=[np.number]).columns.tolist()
|
|
128
|
+
|
|
129
|
+
if not num_cols:
|
|
130
|
+
return {"data": df, "summary": "No numeric columns for KMeans"}
|
|
131
|
+
|
|
132
|
+
kmeans = KMeans(n_clusters=n_clusters, random_state=random_state, n_init='auto')
|
|
133
|
+
df['ClusterID'] = kmeans.fit_predict(df[num_cols].fillna(0))
|
|
134
|
+
|
|
135
|
+
return {"data": df, "summary": f"Added ClusterID with {n_clusters} clusters"}
|
|
136
|
+
'''
|
|
137
|
+
|
|
138
|
+
XGBOOST_NODE = '''import pandas as pd
|
|
139
|
+
import numpy as np
|
|
140
|
+
|
|
141
|
+
def run(X_train, X_test, y_train, task_type: str = "classifier", n_estimators: int = 100, learning_rate: float = 0.1, max_depth: int = 3) -> dict:
|
|
142
|
+
if task_type.lower() == "classifier":
|
|
143
|
+
from xgboost import XGBClassifier
|
|
144
|
+
model = XGBClassifier(n_estimators=n_estimators, learning_rate=learning_rate, max_depth=max_depth, use_label_encoder=False, eval_metric='logloss')
|
|
145
|
+
else:
|
|
146
|
+
from xgboost import XGBRegressor
|
|
147
|
+
model = XGBRegressor(n_estimators=n_estimators, learning_rate=learning_rate, max_depth=max_depth)
|
|
148
|
+
|
|
149
|
+
model.fit(X_train, y_train)
|
|
150
|
+
y_pred = model.predict(X_test)
|
|
151
|
+
|
|
152
|
+
return {"model": model, "y_pred": y_pred}
|
|
153
|
+
'''
|
|
154
|
+
|
|
155
|
+
LIGHTGBM_NODE = '''import pandas as pd
|
|
156
|
+
|
|
157
|
+
def run(X_train, X_test, y_train, task_type: str = "classifier", n_estimators: int = 100, learning_rate: float = 0.1, max_depth: int = -1) -> dict:
|
|
158
|
+
if task_type.lower() == "classifier":
|
|
159
|
+
from lightgbm import LGBMClassifier
|
|
160
|
+
model = LGBMClassifier(n_estimators=n_estimators, learning_rate=learning_rate, max_depth=max_depth)
|
|
161
|
+
else:
|
|
162
|
+
from lightgbm import LGBMRegressor
|
|
163
|
+
model = LGBMRegressor(n_estimators=n_estimators, learning_rate=learning_rate, max_depth=max_depth)
|
|
164
|
+
|
|
165
|
+
model.fit(X_train, y_train)
|
|
166
|
+
y_pred = model.predict(X_test)
|
|
167
|
+
|
|
168
|
+
return {"model": model, "y_pred": y_pred}
|
|
169
|
+
'''
|
|
170
|
+
|
|
171
|
+
ADABOOST_NODE = '''import pandas as pd
|
|
172
|
+
|
|
173
|
+
def run(X_train, X_test, y_train, task_type: str = "classifier", n_estimators: int = 50, learning_rate: float = 1.0) -> dict:
|
|
174
|
+
if task_type.lower() == "classifier":
|
|
175
|
+
from sklearn.ensemble import AdaBoostClassifier
|
|
176
|
+
model = AdaBoostClassifier(n_estimators=n_estimators, learning_rate=learning_rate)
|
|
177
|
+
else:
|
|
178
|
+
from sklearn.ensemble import AdaBoostRegressor
|
|
179
|
+
model = AdaBoostRegressor(n_estimators=n_estimators, learning_rate=learning_rate)
|
|
180
|
+
|
|
181
|
+
model.fit(X_train, y_train)
|
|
182
|
+
y_pred = model.predict(X_test)
|
|
183
|
+
|
|
184
|
+
return {"model": model, "y_pred": y_pred}
|
|
185
|
+
'''
|
|
186
|
+
|
|
187
|
+
VOTING_ENSEMBLE = '''import pandas as pd
|
|
188
|
+
import numpy as np
|
|
189
|
+
|
|
190
|
+
def run(model1, model2, model3, X_train, X_test, y_train, task_type: str = "classifier", voting: str = "hard") -> dict:
|
|
191
|
+
estimators = [('m1', model1), ('m2', model2), ('m3', model3)]
|
|
192
|
+
|
|
193
|
+
if task_type.lower() == "classifier":
|
|
194
|
+
from sklearn.ensemble import VotingClassifier
|
|
195
|
+
model = VotingClassifier(estimators=estimators, voting=voting)
|
|
196
|
+
else:
|
|
197
|
+
from sklearn.ensemble import VotingRegressor
|
|
198
|
+
model = VotingRegressor(estimators=estimators)
|
|
199
|
+
|
|
200
|
+
model.fit(X_train, y_train)
|
|
201
|
+
y_pred = model.predict(X_test)
|
|
202
|
+
|
|
203
|
+
return {"model": model, "y_pred": y_pred}
|
|
204
|
+
'''
|
|
205
|
+
|
|
206
|
+
LAG_FEATURE_GENERATOR = '''import pandas as pd
|
|
207
|
+
from typing import Annotated
|
|
208
|
+
|
|
209
|
+
def run(data, column: Annotated[str, "column"], lags: int = 3) -> dict:
|
|
210
|
+
df = data.copy()
|
|
211
|
+
if column not in df.columns:
|
|
212
|
+
raise ValueError(f"Column '{column}' not found.")
|
|
213
|
+
|
|
214
|
+
for i in range(1, lags + 1):
|
|
215
|
+
df[f"{column}_lag_{i}"] = df[column].shift(i)
|
|
216
|
+
|
|
217
|
+
return {"data": df, "summary": f"Created {lags} lag features for {column}"}
|
|
218
|
+
'''
|
|
219
|
+
|
|
220
|
+
ROLLING_WINDOW_STATS = '''import pandas as pd
|
|
221
|
+
from typing import Annotated
|
|
222
|
+
|
|
223
|
+
def run(data, column: Annotated[str, "column"], window: int = 7) -> dict:
|
|
224
|
+
df = data.copy()
|
|
225
|
+
if column not in df.columns:
|
|
226
|
+
raise ValueError(f"Column '{column}' not found.")
|
|
227
|
+
|
|
228
|
+
df[f"{column}_roll_mean_{window}"] = df[column].rolling(window=window).mean()
|
|
229
|
+
df[f"{column}_roll_std_{window}"] = df[column].rolling(window=window).std()
|
|
230
|
+
|
|
231
|
+
return {"data": df, "summary": f"Created rolling window ({window}) stats for {column}"}
|
|
232
|
+
'''
|
|
233
|
+
|
|
234
|
+
PERMUTATION_IMPORTANCE = '''import numpy as np
|
|
235
|
+
from sklearn.inspection import permutation_importance
|
|
236
|
+
|
|
237
|
+
def run(model, X_test, y_test, scoring: str = "accuracy", n_repeats: int = 5, random_state: int = 42) -> dict:
|
|
238
|
+
result = permutation_importance(model, X_test, y_test, scoring=scoring, n_repeats=n_repeats, random_state=random_state)
|
|
239
|
+
|
|
240
|
+
importances = result.importances_mean
|
|
241
|
+
|
|
242
|
+
if hasattr(X_test, 'columns'):
|
|
243
|
+
names = X_test.columns
|
|
244
|
+
else:
|
|
245
|
+
names = [f"Feature {i}" for i in range(len(importances))]
|
|
246
|
+
|
|
247
|
+
indices = np.argsort(importances)[::-1]
|
|
248
|
+
|
|
249
|
+
top_importances = importances[indices].tolist()
|
|
250
|
+
top_names = [str(names[i]) for i in indices]
|
|
251
|
+
|
|
252
|
+
return {
|
|
253
|
+
"features": top_names,
|
|
254
|
+
"importances": top_importances,
|
|
255
|
+
"summary": "Permutation Importance"
|
|
256
|
+
}
|
|
257
|
+
'''
|
|
258
|
+
|
|
259
|
+
LEARNING_CURVE_DATA = '''import numpy as np
|
|
260
|
+
from sklearn.model_selection import learning_curve
|
|
261
|
+
|
|
262
|
+
def run(model, X_train, y_train, cv_folds: int = 5, scoring: str = "accuracy") -> dict:
|
|
263
|
+
train_sizes, train_scores, test_scores = learning_curve(
|
|
264
|
+
model, X_train, y_train, cv=cv_folds, scoring=scoring,
|
|
265
|
+
train_sizes=np.linspace(0.1, 1.0, 10), random_state=42
|
|
266
|
+
)
|
|
267
|
+
|
|
268
|
+
train_mean = np.mean(train_scores, axis=1).tolist()
|
|
269
|
+
test_mean = np.mean(test_scores, axis=1).tolist()
|
|
270
|
+
sizes = train_sizes.tolist()
|
|
271
|
+
|
|
272
|
+
return {
|
|
273
|
+
"train_sizes": sizes,
|
|
274
|
+
"train_scores": train_mean,
|
|
275
|
+
"val_scores": test_mean,
|
|
276
|
+
"scoring": scoring,
|
|
277
|
+
"summary": "Learning Curve Data"
|
|
278
|
+
}
|
|
279
|
+
'''
|
|
280
|
+
|
|
281
|
+
LIFT_GAIN_CHARTS = '''import numpy as np
|
|
282
|
+
|
|
283
|
+
def run(model, X_test, y_test) -> dict:
|
|
284
|
+
if not hasattr(model, "predict_proba"):
|
|
285
|
+
raise ValueError("Model does not support predict_proba required for lift/gain charts.")
|
|
286
|
+
|
|
287
|
+
classes = model.classes_
|
|
288
|
+
if len(classes) != 2:
|
|
289
|
+
raise ValueError("Lift/Gain charts require binary classification.")
|
|
290
|
+
|
|
291
|
+
y_scores = model.predict_proba(X_test)[:, 1]
|
|
292
|
+
pos_label = classes[1]
|
|
293
|
+
|
|
294
|
+
y_true_bin = (np.array(y_test) == pos_label).astype(int)
|
|
295
|
+
|
|
296
|
+
indices = np.argsort(y_scores)[::-1]
|
|
297
|
+
y_true_sorted = y_true_bin[indices]
|
|
298
|
+
|
|
299
|
+
total_positives = y_true_bin.sum()
|
|
300
|
+
total_samples = len(y_true_bin)
|
|
301
|
+
|
|
302
|
+
cum_positives = np.cumsum(y_true_sorted)
|
|
303
|
+
|
|
304
|
+
gain = cum_positives / max(total_positives, 1)
|
|
305
|
+
|
|
306
|
+
count = np.arange(1, total_samples + 1)
|
|
307
|
+
lift = (cum_positives / count) / (total_positives / total_samples)
|
|
308
|
+
|
|
309
|
+
deciles = np.linspace(0, 1, 11)[1:]
|
|
310
|
+
gain_deciles = [gain[min(int(d * total_samples) - 1, total_samples - 1)] for d in deciles]
|
|
311
|
+
lift_deciles = [lift[min(int(d * total_samples) - 1, total_samples - 1)] for d in deciles]
|
|
312
|
+
|
|
313
|
+
return {
|
|
314
|
+
"deciles": deciles.tolist(),
|
|
315
|
+
"gain": gain_deciles,
|
|
316
|
+
"lift": lift_deciles,
|
|
317
|
+
"summary": "Lift and Gain Data"
|
|
318
|
+
}
|
|
319
|
+
'''
|
|
320
|
+
|
|
321
|
+
TOMEK_LINKS = '''import pandas as pd
|
|
322
|
+
import numpy as np
|
|
323
|
+
from typing import Annotated
|
|
324
|
+
|
|
325
|
+
def run(data, target_column: Annotated[str, "column"] = "target") -> dict:
|
|
326
|
+
df = data.copy()
|
|
327
|
+
if target_column not in df.columns:
|
|
328
|
+
raise ValueError(f"Target column '{target_column}' not found.")
|
|
329
|
+
|
|
330
|
+
X = df.drop(columns=[target_column])
|
|
331
|
+
y = df[target_column]
|
|
332
|
+
|
|
333
|
+
try:
|
|
334
|
+
from imblearn.under_sampling import TomekLinks
|
|
335
|
+
tl = TomekLinks()
|
|
336
|
+
X_res, y_res = tl.fit_resample(X, y)
|
|
337
|
+
df_out = X_res.copy()
|
|
338
|
+
df_out[target_column] = y_res
|
|
339
|
+
|
|
340
|
+
return {"data": df_out, "summary": f"Tomek Links removed {len(df) - len(df_out)} noisy samples."}
|
|
341
|
+
except ImportError:
|
|
342
|
+
return {"data": df, "summary": "imbalanced-learn not installed. Skipping Tomek Links."}
|
|
343
|
+
'''
|
|
344
|
+
|
|
345
|
+
RANDOM_UNDER_SAMPLER = '''import pandas as pd
|
|
346
|
+
from typing import Annotated
|
|
347
|
+
|
|
348
|
+
def run(data, target_column: Annotated[str, "column"] = "target", random_state: int = 42) -> dict:
|
|
349
|
+
df = data.copy()
|
|
350
|
+
if target_column not in df.columns:
|
|
351
|
+
raise ValueError(f"Target column '{target_column}' not found.")
|
|
352
|
+
|
|
353
|
+
try:
|
|
354
|
+
from imblearn.under_sampling import RandomUnderSampler
|
|
355
|
+
X = df.drop(columns=[target_column])
|
|
356
|
+
y = df[target_column]
|
|
357
|
+
rus = RandomUnderSampler(random_state=random_state)
|
|
358
|
+
X_res, y_res = rus.fit_resample(X, y)
|
|
359
|
+
df_out = X_res.copy()
|
|
360
|
+
df_out[target_column] = y_res
|
|
361
|
+
|
|
362
|
+
return {"data": df_out, "summary": f"Undersampled from {len(df)} to {len(df_out)} rows."}
|
|
363
|
+
except ImportError:
|
|
364
|
+
min_class_size = df[target_column].value_counts().min()
|
|
365
|
+
df_out = df.groupby(target_column).sample(n=min_class_size, random_state=random_state)
|
|
366
|
+
return {"data": df_out, "summary": f"Manual undersampled from {len(df)} to {len(df_out)} rows."}
|
|
367
|
+
'''
|
|
368
|
+
"""
|
|
369
|
+
|
|
370
|
+
# Find where TEMPLATES = [ starts
|
|
371
|
+
parts = content.split("TEMPLATES: list[dict] = [")
|
|
372
|
+
before_templates = parts[0]
|
|
373
|
+
templates_list = parts[1]
|
|
374
|
+
|
|
375
|
+
# Insert the new code before TEMPLATES
|
|
376
|
+
new_content = before_templates + NEW_CODE + "\nTEMPLATES: list[dict] = ["
|
|
377
|
+
|
|
378
|
+
# PREPROCESSING (find # Preprocessing)
|
|
379
|
+
templates_list = templates_list.replace(
|
|
380
|
+
' {"id": "datetime_extractor", "label": "Datetime Extractor", "category": "Preprocessing", "code": DATETIME_EXTRACTOR},',
|
|
381
|
+
''' {"id": "datetime_extractor", "label": "Datetime Extractor", "category": "Preprocessing", "code": DATETIME_EXTRACTOR},
|
|
382
|
+
{"id": "binary_encoder", "label": "Binary Encoder", "category": "Preprocessing", "code": BINARY_ENCODER},
|
|
383
|
+
{"id": "frequency_encoder", "label": "Frequency Encoder", "category": "Preprocessing", "code": FREQUENCY_ENCODER},
|
|
384
|
+
{"id": "ordinal_encoder", "label": "Ordinal Encoder", "category": "Preprocessing", "code": ORDINAL_ENCODER},
|
|
385
|
+
{"id": "vif_feature_selection", "label": "VIF Feature Selection", "category": "Preprocessing", "code": VIF_FEATURE_SELECTION},
|
|
386
|
+
{"id": "pca_whitening", "label": "PCA Whitening", "category": "Preprocessing", "code": PCA_WHITENING},
|
|
387
|
+
{"id": "k_means_clustering_features", "label": "K-Means Features", "category": "Preprocessing", "code": K_MEANS_CLUSTERING_FEATURES},
|
|
388
|
+
{"id": "lag_feature_generator", "label": "Lag Features", "category": "Preprocessing", "code": LAG_FEATURE_GENERATOR},
|
|
389
|
+
{"id": "rolling_window_stats", "label": "Rolling Window Stats", "category": "Preprocessing", "code": ROLLING_WINDOW_STATS},
|
|
390
|
+
{"id": "tomek_links", "label": "Tomek Links", "category": "Preprocessing", "code": TOMEK_LINKS},
|
|
391
|
+
{"id": "random_under_sampler", "label": "Random Under Sampler", "category": "Preprocessing", "code": RANDOM_UNDER_SAMPLER},'''
|
|
392
|
+
)
|
|
393
|
+
|
|
394
|
+
# CLASSIFIERS (find # Classifiers)
|
|
395
|
+
templates_list = templates_list.replace(
|
|
396
|
+
' {"id": "knn_classifier", "label": "KNN Classifier", "category": "Classifiers", "code": KNN_CLASSIFIER},',
|
|
397
|
+
''' {"id": "knn_classifier", "label": "KNN Classifier", "category": "Classifiers", "code": KNN_CLASSIFIER},
|
|
398
|
+
{"id": "xgboost_node", "label": "XGBoost", "category": "Classifiers", "code": XGBOOST_NODE},
|
|
399
|
+
{"id": "lightgbm_node", "label": "LightGBM", "category": "Classifiers", "code": LIGHTGBM_NODE},
|
|
400
|
+
{"id": "adaboost_node", "label": "AdaBoost", "category": "Classifiers", "code": ADABOOST_NODE},
|
|
401
|
+
{"id": "voting_ensemble", "label": "Voting Ensemble", "category": "Classifiers", "code": VOTING_ENSEMBLE},'''
|
|
402
|
+
)
|
|
403
|
+
|
|
404
|
+
# EVALUATION
|
|
405
|
+
templates_list = templates_list.replace(
|
|
406
|
+
' {"id": "auto_ml", "label": "AutoML", "category": "Evaluation", "code": AUTO_ML},',
|
|
407
|
+
''' {"id": "permutation_importance", "label": "Permutation Importance", "category": "Evaluation", "code": PERMUTATION_IMPORTANCE},
|
|
408
|
+
{"id": "learning_curve_data", "label": "Learning Curve", "category": "Evaluation", "code": LEARNING_CURVE_DATA},
|
|
409
|
+
{"id": "lift_gain_charts", "label": "Lift & Gain Charts", "category": "Evaluation", "code": LIFT_GAIN_CHARTS},
|
|
410
|
+
{"id": "auto_ml", "label": "AutoML", "category": "Evaluation", "code": AUTO_ML},'''
|
|
411
|
+
)
|
|
412
|
+
|
|
413
|
+
with open("templates.py", "w") as f:
|
|
414
|
+
f.write(new_content + templates_list)
|
|
415
|
+
|
|
416
|
+
print("success")
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
"""
|
|
2
|
+
App-state persistence — saves and loads the full frontend state
|
|
3
|
+
(flows, projects, myFiles) to/from a single JSON file on the server.
|
|
4
|
+
|
|
5
|
+
This makes the app work consistently across any browser on the same machine,
|
|
6
|
+
because all browsers hit the same localhost backend which reads the same file.
|
|
7
|
+
"""
|
|
8
|
+
import json
|
|
9
|
+
import logging
|
|
10
|
+
import os
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
|
|
13
|
+
from fastapi import APIRouter, HTTPException
|
|
14
|
+
from pydantic import BaseModel
|
|
15
|
+
|
|
16
|
+
logger = logging.getLogger(__name__)
|
|
17
|
+
router = APIRouter()
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def _state_file() -> Path:
|
|
21
|
+
"""Return the path to app_state.json, creating parent dirs if needed."""
|
|
22
|
+
base = Path(
|
|
23
|
+
os.environ.get("M8FLOW_PIPELINE_DIR")
|
|
24
|
+
or (Path.home() / ".m8flow" / "pipelines")
|
|
25
|
+
)
|
|
26
|
+
base.mkdir(parents=True, exist_ok=True)
|
|
27
|
+
return base.parent / "app_state.json"
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
@router.get("")
|
|
31
|
+
def get_state():
|
|
32
|
+
"""Load the full app state from disk. Returns empty defaults if no file yet."""
|
|
33
|
+
f = _state_file()
|
|
34
|
+
if f.exists():
|
|
35
|
+
try:
|
|
36
|
+
return json.loads(f.read_text(encoding="utf-8"))
|
|
37
|
+
except Exception as exc:
|
|
38
|
+
logger.warning("app_state.json unreadable: %s — returning defaults", exc)
|
|
39
|
+
return {"flows": [], "projects": [], "myFiles": [], "openRouterKey": None}
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class StatePayload(BaseModel):
|
|
43
|
+
flows: list = []
|
|
44
|
+
projects: list = []
|
|
45
|
+
myFiles: list = []
|
|
46
|
+
openRouterKey: str | None = None
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
@router.post("")
|
|
50
|
+
def save_state(payload: StatePayload):
|
|
51
|
+
"""Persist the full app state to disk atomically."""
|
|
52
|
+
f = _state_file()
|
|
53
|
+
try:
|
|
54
|
+
tmp = f.with_suffix(".tmp")
|
|
55
|
+
tmp.write_text(
|
|
56
|
+
json.dumps(payload.model_dump(), ensure_ascii=False, indent=2),
|
|
57
|
+
encoding="utf-8",
|
|
58
|
+
)
|
|
59
|
+
tmp.replace(f)
|
|
60
|
+
except Exception as exc:
|
|
61
|
+
logger.error("Failed to save app_state: %s", exc)
|
|
62
|
+
raise HTTPException(status_code=500, detail=f"State save failed: {exc}")
|
|
63
|
+
return {"ok": True}
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
# ── Dedicated key endpoints (used by settings UI) ─────────────────────────────
|
|
67
|
+
|
|
68
|
+
class KeyPayload(BaseModel):
|
|
69
|
+
key: str | None = None
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
@router.get("/api-key")
|
|
73
|
+
def get_api_key():
|
|
74
|
+
"""Return the stored OpenRouter key (masked for security in logs, full for UI)."""
|
|
75
|
+
f = _state_file()
|
|
76
|
+
if f.exists():
|
|
77
|
+
try:
|
|
78
|
+
data = json.loads(f.read_text(encoding="utf-8"))
|
|
79
|
+
return {"key": data.get("openRouterKey") or None}
|
|
80
|
+
except Exception:
|
|
81
|
+
pass
|
|
82
|
+
return {"key": None}
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
@router.post("/api-key")
|
|
86
|
+
def save_api_key(payload: KeyPayload):
|
|
87
|
+
"""Save or clear the OpenRouter key server-side."""
|
|
88
|
+
f = _state_file()
|
|
89
|
+
data: dict = {"flows": [], "projects": [], "myFiles": [], "openRouterKey": None}
|
|
90
|
+
if f.exists():
|
|
91
|
+
try:
|
|
92
|
+
data = json.loads(f.read_text(encoding="utf-8"))
|
|
93
|
+
except Exception:
|
|
94
|
+
pass
|
|
95
|
+
data["openRouterKey"] = payload.key or None
|
|
96
|
+
try:
|
|
97
|
+
tmp = f.with_suffix(".tmp")
|
|
98
|
+
tmp.write_text(json.dumps(data, ensure_ascii=False, indent=2), encoding="utf-8")
|
|
99
|
+
tmp.replace(f)
|
|
100
|
+
except Exception as exc:
|
|
101
|
+
raise HTTPException(status_code=500, detail=f"Key save failed: {exc}")
|
|
102
|
+
return {"ok": True}
|