mcli-framework 7.5.1__py3-none-any.whl → 7.6.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mcli-framework might be problematic. Click here for more details.
- mcli/app/commands_cmd.py +51 -39
- mcli/app/completion_helpers.py +4 -13
- mcli/app/main.py +21 -25
- mcli/app/model_cmd.py +119 -9
- mcli/lib/custom_commands.py +16 -11
- mcli/ml/api/app.py +1 -5
- mcli/ml/dashboard/app.py +2 -2
- mcli/ml/dashboard/app_integrated.py +168 -116
- mcli/ml/dashboard/app_supabase.py +7 -3
- mcli/ml/dashboard/app_training.py +3 -6
- mcli/ml/dashboard/components/charts.py +74 -115
- mcli/ml/dashboard/components/metrics.py +24 -44
- mcli/ml/dashboard/components/tables.py +32 -40
- mcli/ml/dashboard/overview.py +102 -78
- mcli/ml/dashboard/pages/cicd.py +103 -56
- mcli/ml/dashboard/pages/debug_dependencies.py +35 -28
- mcli/ml/dashboard/pages/gravity_viz.py +374 -313
- mcli/ml/dashboard/pages/monte_carlo_predictions.py +50 -48
- mcli/ml/dashboard/pages/predictions_enhanced.py +396 -248
- mcli/ml/dashboard/pages/scrapers_and_logs.py +299 -273
- mcli/ml/dashboard/pages/test_portfolio.py +153 -121
- mcli/ml/dashboard/pages/trading.py +238 -169
- mcli/ml/dashboard/pages/workflows.py +129 -84
- mcli/ml/dashboard/streamlit_extras_utils.py +70 -79
- mcli/ml/dashboard/utils.py +24 -21
- mcli/ml/dashboard/warning_suppression.py +6 -4
- mcli/ml/database/session.py +16 -5
- mcli/ml/mlops/pipeline_orchestrator.py +1 -3
- mcli/ml/predictions/monte_carlo.py +6 -18
- mcli/ml/trading/alpaca_client.py +95 -96
- mcli/ml/trading/migrations.py +76 -40
- mcli/ml/trading/models.py +78 -60
- mcli/ml/trading/paper_trading.py +92 -74
- mcli/ml/trading/risk_management.py +106 -85
- mcli/ml/trading/trading_service.py +155 -110
- mcli/ml/training/train_model.py +1 -3
- mcli/{app → self}/completion_cmd.py +6 -6
- mcli/self/self_cmd.py +100 -57
- mcli/test/test_cmd.py +30 -0
- mcli/workflow/daemon/daemon.py +2 -0
- mcli/workflow/model_service/openai_adapter.py +347 -0
- mcli/workflow/politician_trading/models.py +6 -2
- mcli/workflow/politician_trading/scrapers_corporate_registry.py +39 -88
- mcli/workflow/politician_trading/scrapers_free_sources.py +32 -39
- mcli/workflow/politician_trading/scrapers_third_party.py +21 -39
- mcli/workflow/politician_trading/seed_database.py +70 -89
- {mcli_framework-7.5.1.dist-info → mcli_framework-7.6.1.dist-info}/METADATA +1 -1
- {mcli_framework-7.5.1.dist-info → mcli_framework-7.6.1.dist-info}/RECORD +56 -54
- /mcli/{app → self}/logs_cmd.py +0 -0
- /mcli/{app → self}/redis_cmd.py +0 -0
- /mcli/{app → self}/visual_cmd.py +0 -0
- /mcli/{app → test}/cron_test_cmd.py +0 -0
- {mcli_framework-7.5.1.dist-info → mcli_framework-7.6.1.dist-info}/WHEEL +0 -0
- {mcli_framework-7.5.1.dist-info → mcli_framework-7.6.1.dist-info}/entry_points.txt +0 -0
- {mcli_framework-7.5.1.dist-info → mcli_framework-7.6.1.dist-info}/licenses/LICENSE +0 -0
- {mcli_framework-7.5.1.dist-info → mcli_framework-7.6.1.dist-info}/top_level.txt +0 -0
|
@@ -1,23 +1,34 @@
|
|
|
1
1
|
"""Workflow Management Dashboard"""
|
|
2
2
|
|
|
3
|
-
import streamlit as st
|
|
4
|
-
import pandas as pd
|
|
5
|
-
import requests
|
|
6
|
-
import os
|
|
7
3
|
import json
|
|
4
|
+
import os
|
|
8
5
|
from datetime import datetime, timedelta
|
|
6
|
+
from typing import Any, Dict, Optional
|
|
7
|
+
|
|
8
|
+
import pandas as pd
|
|
9
9
|
import plotly.express as px
|
|
10
|
-
|
|
10
|
+
import requests
|
|
11
|
+
import streamlit as st
|
|
11
12
|
|
|
12
13
|
# Import components
|
|
13
14
|
try:
|
|
14
|
-
from ..components.
|
|
15
|
-
|
|
15
|
+
from ..components.charts import (
|
|
16
|
+
create_gantt_chart,
|
|
17
|
+
create_status_pie_chart,
|
|
18
|
+
create_timeline_chart,
|
|
19
|
+
render_chart,
|
|
20
|
+
)
|
|
21
|
+
from ..components.metrics import display_health_indicator, display_kpi_row, display_status_badge
|
|
16
22
|
from ..components.tables import display_filterable_dataframe, export_dataframe
|
|
17
23
|
except ImportError:
|
|
18
24
|
# Fallback for when imported outside package context
|
|
19
|
-
from components.
|
|
20
|
-
|
|
25
|
+
from components.charts import (
|
|
26
|
+
create_gantt_chart,
|
|
27
|
+
create_status_pie_chart,
|
|
28
|
+
create_timeline_chart,
|
|
29
|
+
render_chart,
|
|
30
|
+
)
|
|
31
|
+
from components.metrics import display_health_indicator, display_kpi_row, display_status_badge
|
|
21
32
|
from components.tables import display_filterable_dataframe, export_dataframe
|
|
22
33
|
|
|
23
34
|
|
|
@@ -67,7 +78,7 @@ def create_mock_workflow_data() -> pd.DataFrame:
|
|
|
67
78
|
"next_run": (datetime.now() + timedelta(hours=3)).isoformat(),
|
|
68
79
|
"success_rate": 0.95,
|
|
69
80
|
"avg_duration_min": 12,
|
|
70
|
-
"total_runs": 150
|
|
81
|
+
"total_runs": 150,
|
|
71
82
|
},
|
|
72
83
|
{
|
|
73
84
|
"id": "wf-2",
|
|
@@ -79,7 +90,7 @@ def create_mock_workflow_data() -> pd.DataFrame:
|
|
|
79
90
|
"next_run": (datetime.now() + timedelta(hours=22)).isoformat(),
|
|
80
91
|
"success_rate": 0.88,
|
|
81
92
|
"avg_duration_min": 45,
|
|
82
|
-
"total_runs": 30
|
|
93
|
+
"total_runs": 30,
|
|
83
94
|
},
|
|
84
95
|
{
|
|
85
96
|
"id": "wf-3",
|
|
@@ -91,7 +102,7 @@ def create_mock_workflow_data() -> pd.DataFrame:
|
|
|
91
102
|
"next_run": (datetime.now() + timedelta(minutes=30)).isoformat(),
|
|
92
103
|
"success_rate": 1.0,
|
|
93
104
|
"avg_duration_min": 5,
|
|
94
|
-
"total_runs": 500
|
|
105
|
+
"total_runs": 500,
|
|
95
106
|
},
|
|
96
107
|
{
|
|
97
108
|
"id": "wf-4",
|
|
@@ -103,8 +114,8 @@ def create_mock_workflow_data() -> pd.DataFrame:
|
|
|
103
114
|
"next_run": None,
|
|
104
115
|
"success_rate": 0.92,
|
|
105
116
|
"avg_duration_min": 20,
|
|
106
|
-
"total_runs": 75
|
|
107
|
-
}
|
|
117
|
+
"total_runs": 75,
|
|
118
|
+
},
|
|
108
119
|
]
|
|
109
120
|
|
|
110
121
|
return pd.DataFrame(workflows)
|
|
@@ -143,22 +154,37 @@ def create_mock_execution_data(workflow_id: Optional[str] = None) -> pd.DataFram
|
|
|
143
154
|
|
|
144
155
|
executions = []
|
|
145
156
|
for i in range(50):
|
|
146
|
-
start_time = datetime.now() - timedelta(
|
|
157
|
+
start_time = datetime.now() - timedelta(
|
|
158
|
+
days=random.randint(0, 30), hours=random.randint(0, 23)
|
|
159
|
+
)
|
|
147
160
|
duration = random.randint(300, 3600) # 5-60 minutes in seconds
|
|
148
161
|
status = random.choices(["completed", "failed", "running"], weights=[80, 15, 5])[0]
|
|
149
162
|
|
|
150
|
-
executions.append(
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
163
|
+
executions.append(
|
|
164
|
+
{
|
|
165
|
+
"id": f"exec-{i+1}",
|
|
166
|
+
"workflow_id": workflow_id or f"wf-{random.randint(1,4)}",
|
|
167
|
+
"workflow_name": random.choice(
|
|
168
|
+
[
|
|
169
|
+
"Data Ingestion Pipeline",
|
|
170
|
+
"ML Model Training",
|
|
171
|
+
"Data Validation",
|
|
172
|
+
"Prediction Generation",
|
|
173
|
+
]
|
|
174
|
+
),
|
|
175
|
+
"status": status,
|
|
176
|
+
"started_at": start_time.isoformat(),
|
|
177
|
+
"completed_at": (
|
|
178
|
+
(start_time + timedelta(seconds=duration)).isoformat()
|
|
179
|
+
if status != "running"
|
|
180
|
+
else None
|
|
181
|
+
),
|
|
182
|
+
"duration_sec": duration if status != "running" else None,
|
|
183
|
+
"triggered_by": random.choice(["schedule", "manual", "api"]),
|
|
184
|
+
"steps_completed": random.randint(3, 8),
|
|
185
|
+
"steps_total": 8,
|
|
186
|
+
}
|
|
187
|
+
)
|
|
162
188
|
|
|
163
189
|
return pd.DataFrame(executions)
|
|
164
190
|
|
|
@@ -188,7 +214,7 @@ def show_workflows_dashboard():
|
|
|
188
214
|
# Convert timestamps
|
|
189
215
|
for col in ["last_run", "next_run"]:
|
|
190
216
|
if col in workflows_df.columns:
|
|
191
|
-
workflows_df[col] = pd.to_datetime(workflows_df[col], errors=
|
|
217
|
+
workflows_df[col] = pd.to_datetime(workflows_df[col], errors="coerce")
|
|
192
218
|
|
|
193
219
|
# === KPIs ===
|
|
194
220
|
st.subheader("📊 Workflow Metrics")
|
|
@@ -196,7 +222,9 @@ def show_workflows_dashboard():
|
|
|
196
222
|
total_workflows = len(workflows_df)
|
|
197
223
|
active_workflows = len(workflows_df[workflows_df["status"] == "active"])
|
|
198
224
|
paused_workflows = len(workflows_df[workflows_df["status"] == "paused"])
|
|
199
|
-
avg_success_rate =
|
|
225
|
+
avg_success_rate = (
|
|
226
|
+
workflows_df["success_rate"].mean() * 100 if "success_rate" in workflows_df.columns else 0
|
|
227
|
+
)
|
|
200
228
|
|
|
201
229
|
metrics = {
|
|
202
230
|
"Total Workflows": {"value": total_workflows, "icon": "⚙️"},
|
|
@@ -210,7 +238,9 @@ def show_workflows_dashboard():
|
|
|
210
238
|
st.divider()
|
|
211
239
|
|
|
212
240
|
# === Tabs ===
|
|
213
|
-
tab1, tab2, tab3, tab4 = st.tabs(
|
|
241
|
+
tab1, tab2, tab3, tab4 = st.tabs(
|
|
242
|
+
["📋 Workflows", "📈 Executions", "➕ Create Workflow", "📚 Templates"]
|
|
243
|
+
)
|
|
214
244
|
|
|
215
245
|
with tab1:
|
|
216
246
|
show_workflow_list(workflows_df)
|
|
@@ -236,14 +266,14 @@ def show_workflow_list(workflows_df: pd.DataFrame):
|
|
|
236
266
|
}
|
|
237
267
|
|
|
238
268
|
filtered_df = display_filterable_dataframe(
|
|
239
|
-
workflows_df,
|
|
240
|
-
filter_columns=filter_config,
|
|
241
|
-
key_prefix="workflow_filter"
|
|
269
|
+
workflows_df, filter_columns=filter_config, key_prefix="workflow_filter"
|
|
242
270
|
)
|
|
243
271
|
|
|
244
272
|
# Workflow details
|
|
245
273
|
for _, workflow in filtered_df.iterrows():
|
|
246
|
-
with st.expander(
|
|
274
|
+
with st.expander(
|
|
275
|
+
f"{workflow['name']} - {display_status_badge(workflow['status'], 'small')}"
|
|
276
|
+
):
|
|
247
277
|
col1, col2 = st.columns(2)
|
|
248
278
|
|
|
249
279
|
with col1:
|
|
@@ -264,7 +294,7 @@ def show_workflow_list(workflows_df: pd.DataFrame):
|
|
|
264
294
|
st.success(f"Workflow '{workflow['name']}' triggered!")
|
|
265
295
|
|
|
266
296
|
with col_b:
|
|
267
|
-
if workflow[
|
|
297
|
+
if workflow["status"] == "active":
|
|
268
298
|
if st.button("⏸️ Pause", key=f"pause_{workflow['id']}"):
|
|
269
299
|
st.info(f"Workflow '{workflow['name']}' paused")
|
|
270
300
|
else:
|
|
@@ -273,25 +303,25 @@ def show_workflow_list(workflows_df: pd.DataFrame):
|
|
|
273
303
|
|
|
274
304
|
with col_c:
|
|
275
305
|
if st.button("✏️ Edit", key=f"edit_{workflow['id']}"):
|
|
276
|
-
st.session_state[
|
|
306
|
+
st.session_state["edit_workflow_id"] = workflow["id"]
|
|
277
307
|
st.info("Edit mode activated")
|
|
278
308
|
|
|
279
309
|
with col_d:
|
|
280
310
|
if st.button("📊 View Executions", key=f"view_exec_{workflow['id']}"):
|
|
281
|
-
st.session_state[
|
|
311
|
+
st.session_state["selected_workflow"] = workflow["id"]
|
|
282
312
|
|
|
283
313
|
# Show workflow definition
|
|
284
314
|
if st.checkbox("Show Workflow Definition", key=f"def_{workflow['id']}"):
|
|
285
315
|
workflow_def = {
|
|
286
|
-
"name": workflow[
|
|
287
|
-
"description": workflow[
|
|
288
|
-
"schedule": workflow[
|
|
316
|
+
"name": workflow["name"],
|
|
317
|
+
"description": workflow["description"],
|
|
318
|
+
"schedule": workflow["schedule"],
|
|
289
319
|
"steps": [
|
|
290
320
|
{"name": "Fetch Data", "action": "api_call", "params": {}},
|
|
291
321
|
{"name": "Transform Data", "action": "python_script", "params": {}},
|
|
292
322
|
{"name": "Validate Data", "action": "validation", "params": {}},
|
|
293
|
-
{"name": "Store Results", "action": "database_write", "params": {}}
|
|
294
|
-
]
|
|
323
|
+
{"name": "Store Results", "action": "database_write", "params": {}},
|
|
324
|
+
],
|
|
295
325
|
}
|
|
296
326
|
st.json(workflow_def)
|
|
297
327
|
|
|
@@ -311,19 +341,17 @@ def show_workflow_executions():
|
|
|
311
341
|
# Convert timestamps
|
|
312
342
|
for col in ["started_at", "completed_at"]:
|
|
313
343
|
if col in executions_df.columns:
|
|
314
|
-
executions_df[col] = pd.to_datetime(executions_df[col], errors=
|
|
344
|
+
executions_df[col] = pd.to_datetime(executions_df[col], errors="coerce")
|
|
315
345
|
|
|
316
346
|
# Filter
|
|
317
347
|
filter_config = {
|
|
318
348
|
"workflow_name": "multiselect",
|
|
319
349
|
"status": "multiselect",
|
|
320
|
-
"triggered_by": "multiselect"
|
|
350
|
+
"triggered_by": "multiselect",
|
|
321
351
|
}
|
|
322
352
|
|
|
323
353
|
filtered_df = display_filterable_dataframe(
|
|
324
|
-
executions_df,
|
|
325
|
-
filter_columns=filter_config,
|
|
326
|
-
key_prefix="exec_filter"
|
|
354
|
+
executions_df, filter_columns=filter_config, key_prefix="exec_filter"
|
|
327
355
|
)
|
|
328
356
|
|
|
329
357
|
# Status distribution
|
|
@@ -340,8 +368,8 @@ def show_workflow_executions():
|
|
|
340
368
|
fig = px.bar(
|
|
341
369
|
x=workflow_counts.values,
|
|
342
370
|
y=workflow_counts.index,
|
|
343
|
-
orientation=
|
|
344
|
-
title="Executions by Workflow"
|
|
371
|
+
orientation="h",
|
|
372
|
+
title="Executions by Workflow",
|
|
345
373
|
)
|
|
346
374
|
render_chart(fig)
|
|
347
375
|
|
|
@@ -349,7 +377,9 @@ def show_workflow_executions():
|
|
|
349
377
|
st.markdown("#### Recent Executions")
|
|
350
378
|
|
|
351
379
|
for _, execution in filtered_df.head(20).iterrows():
|
|
352
|
-
with st.expander(
|
|
380
|
+
with st.expander(
|
|
381
|
+
f"{execution.get('workflow_name')} - {execution.get('started_at')} - {display_status_badge(execution.get('status'), 'small')}"
|
|
382
|
+
):
|
|
353
383
|
col1, col2 = st.columns(2)
|
|
354
384
|
|
|
355
385
|
with col1:
|
|
@@ -361,52 +391,57 @@ def show_workflow_executions():
|
|
|
361
391
|
st.markdown(f"**Status:** {display_status_badge(execution.get('status'), 'small')}")
|
|
362
392
|
st.markdown(f"**Triggered By:** {execution.get('triggered_by')}")
|
|
363
393
|
|
|
364
|
-
if pd.notna(execution.get(
|
|
394
|
+
if pd.notna(execution.get("duration_sec")):
|
|
365
395
|
st.markdown(f"**Duration:** {execution['duration_sec']/60:.1f} min")
|
|
366
396
|
|
|
367
|
-
if execution.get(
|
|
368
|
-
progress = execution.get(
|
|
397
|
+
if execution.get("steps_total"):
|
|
398
|
+
progress = execution.get("steps_completed", 0) / execution["steps_total"]
|
|
369
399
|
st.progress(progress)
|
|
370
|
-
st.caption(
|
|
400
|
+
st.caption(
|
|
401
|
+
f"Steps: {execution.get('steps_completed')}/{execution['steps_total']}"
|
|
402
|
+
)
|
|
371
403
|
|
|
372
404
|
# Action buttons
|
|
373
405
|
col_btn1, col_btn2, col_btn3 = st.columns(3)
|
|
374
406
|
|
|
375
407
|
with col_btn1:
|
|
376
408
|
if st.button("📋 View Logs", key=f"logs_{execution.get('id')}"):
|
|
377
|
-
st.code(
|
|
409
|
+
st.code(
|
|
410
|
+
f"""
|
|
378
411
|
[INFO] Workflow execution started: {execution.get('id')}
|
|
379
412
|
[INFO] Step 1/8: Fetching data from sources...
|
|
380
413
|
[INFO] Step 2/8: Transforming data...
|
|
381
414
|
[INFO] Step 3/8: Validating data quality...
|
|
382
415
|
[INFO] Execution {'completed' if execution.get('status') == 'completed' else execution.get('status')}
|
|
383
|
-
""",
|
|
416
|
+
""",
|
|
417
|
+
language="log",
|
|
418
|
+
)
|
|
384
419
|
|
|
385
420
|
with col_btn2:
|
|
386
421
|
# Download results as JSON
|
|
387
422
|
result_data = {
|
|
388
|
-
"execution_id": execution.get(
|
|
389
|
-
"workflow_name": execution.get(
|
|
390
|
-
"status": execution.get(
|
|
391
|
-
"started_at": str(execution.get(
|
|
392
|
-
"completed_at": str(execution.get(
|
|
393
|
-
"duration_seconds": execution.get(
|
|
394
|
-
"triggered_by": execution.get(
|
|
395
|
-
"steps_completed": execution.get(
|
|
396
|
-
"steps_total": execution.get(
|
|
423
|
+
"execution_id": execution.get("id"),
|
|
424
|
+
"workflow_name": execution.get("workflow_name"),
|
|
425
|
+
"status": execution.get("status"),
|
|
426
|
+
"started_at": str(execution.get("started_at")),
|
|
427
|
+
"completed_at": str(execution.get("completed_at")),
|
|
428
|
+
"duration_seconds": execution.get("duration_sec"),
|
|
429
|
+
"triggered_by": execution.get("triggered_by"),
|
|
430
|
+
"steps_completed": execution.get("steps_completed"),
|
|
431
|
+
"steps_total": execution.get("steps_total"),
|
|
397
432
|
"results": {
|
|
398
433
|
"records_processed": 1250,
|
|
399
434
|
"errors": 0,
|
|
400
435
|
"warnings": 3,
|
|
401
|
-
"output_location": f"/data/workflows/{execution.get('id')}/output.parquet"
|
|
402
|
-
}
|
|
436
|
+
"output_location": f"/data/workflows/{execution.get('id')}/output.parquet",
|
|
437
|
+
},
|
|
403
438
|
}
|
|
404
439
|
st.download_button(
|
|
405
440
|
label="💾 Download Results",
|
|
406
441
|
data=json.dumps(result_data, indent=2),
|
|
407
442
|
file_name=f"workflow_result_{execution.get('id')}.json",
|
|
408
443
|
mime="application/json",
|
|
409
|
-
key=f"download_{execution.get('id')}"
|
|
444
|
+
key=f"download_{execution.get('id')}",
|
|
410
445
|
)
|
|
411
446
|
|
|
412
447
|
with col_btn3:
|
|
@@ -432,10 +467,14 @@ def show_workflow_builder():
|
|
|
432
467
|
col1, col2 = st.columns(2)
|
|
433
468
|
|
|
434
469
|
with col1:
|
|
435
|
-
schedule_type = st.selectbox(
|
|
470
|
+
schedule_type = st.selectbox(
|
|
471
|
+
"Schedule Type", ["Cron Expression", "Interval", "Manual Only"]
|
|
472
|
+
)
|
|
436
473
|
|
|
437
474
|
if schedule_type == "Cron Expression":
|
|
438
|
-
schedule = st.text_input(
|
|
475
|
+
schedule = st.text_input(
|
|
476
|
+
"Cron Schedule", placeholder="0 0 * * *", help="Cron expression for scheduling"
|
|
477
|
+
)
|
|
439
478
|
elif schedule_type == "Interval":
|
|
440
479
|
interval_value = st.number_input("Every", min_value=1, value=1)
|
|
441
480
|
interval_unit = st.selectbox("Unit", ["minutes", "hours", "days"])
|
|
@@ -456,15 +495,21 @@ def show_workflow_builder():
|
|
|
456
495
|
steps = []
|
|
457
496
|
for i in range(num_steps):
|
|
458
497
|
with st.expander(f"Step {i+1}"):
|
|
459
|
-
step_name = st.text_input(
|
|
460
|
-
|
|
461
|
-
|
|
498
|
+
step_name = st.text_input(
|
|
499
|
+
f"Step Name", key=f"step_name_{i}", placeholder=f"Step {i+1}"
|
|
500
|
+
)
|
|
501
|
+
step_type = st.selectbox(
|
|
502
|
+
f"Step Type",
|
|
503
|
+
["API Call", "Python Script", "Database Query", "Data Transform", "Validation"],
|
|
504
|
+
key=f"step_type_{i}",
|
|
505
|
+
)
|
|
506
|
+
step_config = st.text_area(
|
|
507
|
+
f"Configuration (JSON)",
|
|
508
|
+
key=f"step_config_{i}",
|
|
509
|
+
placeholder='{"param": "value"}',
|
|
510
|
+
)
|
|
462
511
|
|
|
463
|
-
steps.append({
|
|
464
|
-
"name": step_name,
|
|
465
|
-
"type": step_type,
|
|
466
|
-
"config": step_config
|
|
467
|
-
})
|
|
512
|
+
steps.append({"name": step_name, "type": step_type, "config": step_config})
|
|
468
513
|
|
|
469
514
|
submitted = st.form_submit_button("Create Workflow")
|
|
470
515
|
|
|
@@ -477,7 +522,7 @@ def show_workflow_builder():
|
|
|
477
522
|
"enabled": enabled,
|
|
478
523
|
"retry_on_failure": retry_on_failure,
|
|
479
524
|
"max_retries": max_retries,
|
|
480
|
-
"steps": steps
|
|
525
|
+
"steps": steps,
|
|
481
526
|
}
|
|
482
527
|
|
|
483
528
|
st.success(f"✅ Workflow '{name}' created successfully!")
|
|
@@ -496,26 +541,26 @@ def show_workflow_templates():
|
|
|
496
541
|
"name": "Data Ingestion Pipeline",
|
|
497
542
|
"description": "Fetch data from external APIs and store in database",
|
|
498
543
|
"category": "Data Engineering",
|
|
499
|
-
"steps": 4
|
|
544
|
+
"steps": 4,
|
|
500
545
|
},
|
|
501
546
|
{
|
|
502
547
|
"name": "ML Training Pipeline",
|
|
503
548
|
"description": "Train and evaluate ML models on schedule",
|
|
504
549
|
"category": "Machine Learning",
|
|
505
|
-
"steps": 6
|
|
550
|
+
"steps": 6,
|
|
506
551
|
},
|
|
507
552
|
{
|
|
508
553
|
"name": "Data Quality Check",
|
|
509
554
|
"description": "Validate data integrity and quality metrics",
|
|
510
555
|
"category": "Data Quality",
|
|
511
|
-
"steps": 3
|
|
556
|
+
"steps": 3,
|
|
512
557
|
},
|
|
513
558
|
{
|
|
514
559
|
"name": "Report Generation",
|
|
515
560
|
"description": "Generate and distribute periodic reports",
|
|
516
561
|
"category": "Reporting",
|
|
517
|
-
"steps": 5
|
|
518
|
-
}
|
|
562
|
+
"steps": 5,
|
|
563
|
+
},
|
|
519
564
|
]
|
|
520
565
|
|
|
521
566
|
for template in templates:
|
|
@@ -5,13 +5,13 @@ import streamlit as st
|
|
|
5
5
|
# Try to import streamlit-extras components
|
|
6
6
|
HAS_EXTRAS = True
|
|
7
7
|
try:
|
|
8
|
-
from streamlit_extras.
|
|
8
|
+
from streamlit_extras.add_vertical_space import add_vertical_space
|
|
9
9
|
from streamlit_extras.badges import badge
|
|
10
|
-
from streamlit_extras.colored_header import colored_header
|
|
11
10
|
from streamlit_extras.card import card
|
|
12
|
-
from streamlit_extras.
|
|
11
|
+
from streamlit_extras.colored_header import colored_header
|
|
13
12
|
from streamlit_extras.grid import grid
|
|
14
|
-
from streamlit_extras.
|
|
13
|
+
from streamlit_extras.metric_cards import style_metric_cards
|
|
14
|
+
from streamlit_extras.stoggle import stoggle
|
|
15
15
|
from streamlit_extras.stylable_container import stylable_container
|
|
16
16
|
except ImportError:
|
|
17
17
|
HAS_EXTRAS = False
|
|
@@ -44,22 +44,14 @@ def enhanced_metrics(metrics_data: list, use_container_width: bool = True):
|
|
|
44
44
|
cols = st.columns(len(metrics_data))
|
|
45
45
|
for i, metric in enumerate(metrics_data):
|
|
46
46
|
with cols[i]:
|
|
47
|
-
st.metric(
|
|
48
|
-
label=metric["label"],
|
|
49
|
-
value=metric["value"],
|
|
50
|
-
delta=metric.get("delta")
|
|
51
|
-
)
|
|
47
|
+
st.metric(label=metric["label"], value=metric["value"], delta=metric.get("delta"))
|
|
52
48
|
return
|
|
53
49
|
|
|
54
50
|
# Use streamlit-extras styled metrics
|
|
55
51
|
cols = st.columns(len(metrics_data))
|
|
56
52
|
for i, metric in enumerate(metrics_data):
|
|
57
53
|
with cols[i]:
|
|
58
|
-
st.metric(
|
|
59
|
-
label=metric["label"],
|
|
60
|
-
value=metric["value"],
|
|
61
|
-
delta=metric.get("delta")
|
|
62
|
-
)
|
|
54
|
+
st.metric(label=metric["label"], value=metric["value"], delta=metric.get("delta"))
|
|
63
55
|
style_metric_cards()
|
|
64
56
|
|
|
65
57
|
|
|
@@ -94,15 +86,17 @@ def section_header(label: str, description: str = None, divider: str = "rainbow"
|
|
|
94
86
|
st.divider()
|
|
95
87
|
return
|
|
96
88
|
|
|
97
|
-
colored_header(
|
|
98
|
-
label=label,
|
|
99
|
-
description=description or "",
|
|
100
|
-
color_name=divider
|
|
101
|
-
)
|
|
89
|
+
colored_header(label=label, description=description or "", color_name=divider)
|
|
102
90
|
|
|
103
91
|
|
|
104
|
-
def info_card(
|
|
105
|
-
|
|
92
|
+
def info_card(
|
|
93
|
+
title: str,
|
|
94
|
+
text: str,
|
|
95
|
+
image: str = None,
|
|
96
|
+
url: str = None,
|
|
97
|
+
has_button: bool = False,
|
|
98
|
+
button_text: str = "Learn More",
|
|
99
|
+
):
|
|
106
100
|
"""
|
|
107
101
|
Display an information card
|
|
108
102
|
|
|
@@ -199,14 +193,12 @@ def styled_container(key: str, css_styles: str):
|
|
|
199
193
|
if not HAS_EXTRAS:
|
|
200
194
|
return st.container()
|
|
201
195
|
|
|
202
|
-
return stylable_container(
|
|
203
|
-
key=key,
|
|
204
|
-
css_styles=css_styles
|
|
205
|
-
)
|
|
196
|
+
return stylable_container(key=key, css_styles=css_styles)
|
|
206
197
|
|
|
207
198
|
|
|
208
|
-
def trading_status_card(
|
|
209
|
-
|
|
199
|
+
def trading_status_card(
|
|
200
|
+
status: str, portfolio_value: float, daily_pnl: float, positions: int, cash: float
|
|
201
|
+
):
|
|
210
202
|
"""
|
|
211
203
|
Display a trading status summary card
|
|
212
204
|
|
|
@@ -224,28 +216,29 @@ def trading_status_card(status: str, portfolio_value: float, daily_pnl: float,
|
|
|
224
216
|
section_header(
|
|
225
217
|
f"{status_color} Trading Status: {status}",
|
|
226
218
|
f"Real-time portfolio monitoring and execution",
|
|
227
|
-
divider="blue"
|
|
219
|
+
divider="blue",
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
enhanced_metrics(
|
|
223
|
+
[
|
|
224
|
+
{
|
|
225
|
+
"label": "Portfolio Value",
|
|
226
|
+
"value": f"${portfolio_value:,.2f}",
|
|
227
|
+
"delta": f"{pnl_sign}${daily_pnl:,.2f}",
|
|
228
|
+
},
|
|
229
|
+
{
|
|
230
|
+
"label": "Open Positions",
|
|
231
|
+
"value": str(positions),
|
|
232
|
+
},
|
|
233
|
+
{
|
|
234
|
+
"label": "Available Cash",
|
|
235
|
+
"value": f"${cash:,.2f}",
|
|
236
|
+
},
|
|
237
|
+
]
|
|
228
238
|
)
|
|
229
239
|
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
"label": "Portfolio Value",
|
|
233
|
-
"value": f"${portfolio_value:,.2f}",
|
|
234
|
-
"delta": f"{pnl_sign}${daily_pnl:,.2f}"
|
|
235
|
-
},
|
|
236
|
-
{
|
|
237
|
-
"label": "Open Positions",
|
|
238
|
-
"value": str(positions),
|
|
239
|
-
},
|
|
240
|
-
{
|
|
241
|
-
"label": "Available Cash",
|
|
242
|
-
"value": f"${cash:,.2f}",
|
|
243
|
-
},
|
|
244
|
-
])
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
def data_quality_indicators(total_records: int, clean_records: int,
|
|
248
|
-
errors: int, last_update: str):
|
|
240
|
+
|
|
241
|
+
def data_quality_indicators(total_records: int, clean_records: int, errors: int, last_update: str):
|
|
249
242
|
"""
|
|
250
243
|
Display data quality indicators
|
|
251
244
|
|
|
@@ -257,41 +250,39 @@ def data_quality_indicators(total_records: int, clean_records: int,
|
|
|
257
250
|
"""
|
|
258
251
|
quality_pct = (clean_records / total_records * 100) if total_records > 0 else 0
|
|
259
252
|
|
|
260
|
-
section_header(
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
253
|
+
section_header("📊 Data Quality Metrics", f"Last updated: {last_update}", divider="green")
|
|
254
|
+
|
|
255
|
+
enhanced_metrics(
|
|
256
|
+
[
|
|
257
|
+
{
|
|
258
|
+
"label": "Total Records",
|
|
259
|
+
"value": f"{total_records:,}",
|
|
260
|
+
},
|
|
261
|
+
{
|
|
262
|
+
"label": "Data Quality",
|
|
263
|
+
"value": f"{quality_pct:.1f}%",
|
|
264
|
+
"delta": f"{clean_records:,} clean",
|
|
265
|
+
},
|
|
266
|
+
{
|
|
267
|
+
"label": "Errors",
|
|
268
|
+
"value": str(errors),
|
|
269
|
+
"delta": f"{(errors/total_records*100):.2f}%" if total_records > 0 else "0%",
|
|
270
|
+
},
|
|
271
|
+
]
|
|
264
272
|
)
|
|
265
273
|
|
|
266
|
-
enhanced_metrics([
|
|
267
|
-
{
|
|
268
|
-
"label": "Total Records",
|
|
269
|
-
"value": f"{total_records:,}",
|
|
270
|
-
},
|
|
271
|
-
{
|
|
272
|
-
"label": "Data Quality",
|
|
273
|
-
"value": f"{quality_pct:.1f}%",
|
|
274
|
-
"delta": f"{clean_records:,} clean"
|
|
275
|
-
},
|
|
276
|
-
{
|
|
277
|
-
"label": "Errors",
|
|
278
|
-
"value": str(errors),
|
|
279
|
-
"delta": f"{(errors/total_records*100):.2f}%" if total_records > 0 else "0%"
|
|
280
|
-
},
|
|
281
|
-
])
|
|
282
|
-
|
|
283
274
|
|
|
284
275
|
# Export available components
|
|
285
276
|
__all__ = [
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
277
|
+
"HAS_EXTRAS",
|
|
278
|
+
"enhanced_metrics",
|
|
279
|
+
"status_badge",
|
|
280
|
+
"section_header",
|
|
281
|
+
"info_card",
|
|
282
|
+
"collapsible_section",
|
|
283
|
+
"dashboard_grid",
|
|
284
|
+
"vertical_space",
|
|
285
|
+
"styled_container",
|
|
286
|
+
"trading_status_card",
|
|
287
|
+
"data_quality_indicators",
|
|
297
288
|
]
|