ds-agent-cli 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/ds-agent.js +451 -0
- package/ds_agent/__init__.py +8 -0
- package/package.json +28 -0
- package/requirements.txt +126 -0
- package/setup.py +35 -0
- package/src/__init__.py +7 -0
- package/src/_compress_tool_result.py +118 -0
- package/src/api/__init__.py +4 -0
- package/src/api/app.py +1626 -0
- package/src/cache/__init__.py +5 -0
- package/src/cache/cache_manager.py +561 -0
- package/src/cli.py +2886 -0
- package/src/dynamic_prompts.py +281 -0
- package/src/orchestrator.py +4799 -0
- package/src/progress_manager.py +139 -0
- package/src/reasoning/__init__.py +332 -0
- package/src/reasoning/business_summary.py +431 -0
- package/src/reasoning/data_understanding.py +356 -0
- package/src/reasoning/model_explanation.py +383 -0
- package/src/reasoning/reasoning_trace.py +239 -0
- package/src/registry/__init__.py +3 -0
- package/src/registry/tools_registry.py +3 -0
- package/src/session_memory.py +448 -0
- package/src/session_store.py +370 -0
- package/src/storage/__init__.py +19 -0
- package/src/storage/artifact_store.py +620 -0
- package/src/storage/helpers.py +116 -0
- package/src/storage/huggingface_storage.py +694 -0
- package/src/storage/r2_storage.py +0 -0
- package/src/storage/user_files_service.py +288 -0
- package/src/tools/__init__.py +335 -0
- package/src/tools/advanced_analysis.py +823 -0
- package/src/tools/advanced_feature_engineering.py +708 -0
- package/src/tools/advanced_insights.py +578 -0
- package/src/tools/advanced_preprocessing.py +549 -0
- package/src/tools/advanced_training.py +906 -0
- package/src/tools/agent_tool_mapping.py +326 -0
- package/src/tools/auto_pipeline.py +420 -0
- package/src/tools/autogluon_training.py +1480 -0
- package/src/tools/business_intelligence.py +860 -0
- package/src/tools/cloud_data_sources.py +581 -0
- package/src/tools/code_interpreter.py +390 -0
- package/src/tools/computer_vision.py +614 -0
- package/src/tools/data_cleaning.py +614 -0
- package/src/tools/data_profiling.py +593 -0
- package/src/tools/data_type_conversion.py +268 -0
- package/src/tools/data_wrangling.py +433 -0
- package/src/tools/eda_reports.py +284 -0
- package/src/tools/enhanced_feature_engineering.py +241 -0
- package/src/tools/feature_engineering.py +302 -0
- package/src/tools/matplotlib_visualizations.py +1327 -0
- package/src/tools/model_training.py +520 -0
- package/src/tools/nlp_text_analytics.py +761 -0
- package/src/tools/plotly_visualizations.py +497 -0
- package/src/tools/production_mlops.py +852 -0
- package/src/tools/time_series.py +507 -0
- package/src/tools/tools_registry.py +2133 -0
- package/src/tools/visualization_engine.py +559 -0
- package/src/utils/__init__.py +42 -0
- package/src/utils/error_recovery.py +313 -0
- package/src/utils/parallel_executor.py +402 -0
- package/src/utils/polars_helpers.py +248 -0
- package/src/utils/schema_extraction.py +132 -0
- package/src/utils/semantic_layer.py +392 -0
- package/src/utils/token_budget.py +411 -0
- package/src/utils/validation.py +377 -0
- package/src/workflow_state.py +154 -0
|
@@ -0,0 +1,377 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Validation utilities for data science operations.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import polars as pl
|
|
6
|
+
from typing import List, Dict, Any, Optional
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class ValidationError(Exception):
|
|
11
|
+
"""Custom exception for validation errors."""
|
|
12
|
+
pass
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def validate_file_exists(file_path: str) -> None:
|
|
16
|
+
"""
|
|
17
|
+
Validate that a file exists.
|
|
18
|
+
|
|
19
|
+
Args:
|
|
20
|
+
file_path: Path to file
|
|
21
|
+
|
|
22
|
+
Raises:
|
|
23
|
+
ValidationError: If file doesn't exist
|
|
24
|
+
"""
|
|
25
|
+
if not Path(file_path).exists():
|
|
26
|
+
raise ValidationError(f"File not found: {file_path}")
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def validate_file_format(file_path: str, allowed_formats: List[str] = None) -> None:
|
|
30
|
+
"""
|
|
31
|
+
Validate file format.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
file_path: Path to file
|
|
35
|
+
allowed_formats: List of allowed extensions (default: ['.csv', '.parquet'])
|
|
36
|
+
|
|
37
|
+
Raises:
|
|
38
|
+
ValidationError: If file format is not supported
|
|
39
|
+
"""
|
|
40
|
+
if allowed_formats is None:
|
|
41
|
+
allowed_formats = ['.csv', '.parquet']
|
|
42
|
+
|
|
43
|
+
file_ext = Path(file_path).suffix.lower()
|
|
44
|
+
if file_ext not in allowed_formats:
|
|
45
|
+
raise ValidationError(
|
|
46
|
+
f"Unsupported file format: {file_ext}. Allowed: {', '.join(allowed_formats)}"
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def validate_dataframe(df: pl.DataFrame) -> None:
|
|
51
|
+
"""
|
|
52
|
+
Validate that dataframe is valid and not empty.
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
df: Polars DataFrame
|
|
56
|
+
|
|
57
|
+
Raises:
|
|
58
|
+
ValidationError: If dataframe is invalid or empty
|
|
59
|
+
"""
|
|
60
|
+
if df is None:
|
|
61
|
+
raise ValidationError("DataFrame is None")
|
|
62
|
+
|
|
63
|
+
if len(df) == 0:
|
|
64
|
+
raise ValidationError("DataFrame is empty (0 rows)")
|
|
65
|
+
|
|
66
|
+
if len(df.columns) == 0:
|
|
67
|
+
raise ValidationError("DataFrame has no columns")
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def validate_column_exists(df: pl.DataFrame, column: str) -> None:
|
|
71
|
+
"""
|
|
72
|
+
Validate that a column exists in dataframe.
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
df: Polars DataFrame
|
|
76
|
+
column: Column name
|
|
77
|
+
|
|
78
|
+
Raises:
|
|
79
|
+
ValidationError: If column doesn't exist
|
|
80
|
+
"""
|
|
81
|
+
if column not in df.columns:
|
|
82
|
+
raise ValidationError(
|
|
83
|
+
f"Column '{column}' not found. Available columns: {', '.join(df.columns)}"
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def validate_columns_exist(df: pl.DataFrame, columns: List[str]) -> None:
|
|
88
|
+
"""
|
|
89
|
+
Validate that multiple columns exist in dataframe.
|
|
90
|
+
|
|
91
|
+
Args:
|
|
92
|
+
df: Polars DataFrame
|
|
93
|
+
columns: List of column names
|
|
94
|
+
|
|
95
|
+
Raises:
|
|
96
|
+
ValidationError: If any column doesn't exist
|
|
97
|
+
"""
|
|
98
|
+
missing = [col for col in columns if col not in df.columns]
|
|
99
|
+
if missing:
|
|
100
|
+
raise ValidationError(
|
|
101
|
+
f"Columns not found: {', '.join(missing)}. "
|
|
102
|
+
f"Available: {', '.join(df.columns)}"
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def validate_numeric_column(df: pl.DataFrame, column: str) -> None:
|
|
107
|
+
"""
|
|
108
|
+
Validate that a column is numeric.
|
|
109
|
+
|
|
110
|
+
Args:
|
|
111
|
+
df: Polars DataFrame
|
|
112
|
+
column: Column name
|
|
113
|
+
|
|
114
|
+
Raises:
|
|
115
|
+
ValidationError: If column is not numeric
|
|
116
|
+
"""
|
|
117
|
+
validate_column_exists(df, column)
|
|
118
|
+
|
|
119
|
+
if df[column].dtype not in pl.NUMERIC_DTYPES:
|
|
120
|
+
raise ValidationError(
|
|
121
|
+
f"Column '{column}' is not numeric (dtype: {df[column].dtype})"
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def validate_categorical_column(df: pl.DataFrame, column: str) -> None:
|
|
126
|
+
"""
|
|
127
|
+
Validate that a column is categorical.
|
|
128
|
+
|
|
129
|
+
Args:
|
|
130
|
+
df: Polars DataFrame
|
|
131
|
+
column: Column name
|
|
132
|
+
|
|
133
|
+
Raises:
|
|
134
|
+
ValidationError: If column is not categorical
|
|
135
|
+
"""
|
|
136
|
+
validate_column_exists(df, column)
|
|
137
|
+
|
|
138
|
+
if df[column].dtype not in [pl.Utf8, pl.Categorical]:
|
|
139
|
+
raise ValidationError(
|
|
140
|
+
f"Column '{column}' is not categorical (dtype: {df[column].dtype})"
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def validate_datetime_column(df: pl.DataFrame, column: str) -> None:
|
|
145
|
+
"""
|
|
146
|
+
Validate that a column is datetime.
|
|
147
|
+
|
|
148
|
+
Args:
|
|
149
|
+
df: Polars DataFrame
|
|
150
|
+
column: Column name
|
|
151
|
+
|
|
152
|
+
Raises:
|
|
153
|
+
ValidationError: If column is not datetime
|
|
154
|
+
"""
|
|
155
|
+
validate_column_exists(df, column)
|
|
156
|
+
|
|
157
|
+
if df[column].dtype not in [pl.Date, pl.Datetime]:
|
|
158
|
+
raise ValidationError(
|
|
159
|
+
f"Column '{column}' is not datetime (dtype: {df[column].dtype})"
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
def validate_target_column(df: pl.DataFrame, target_col: str,
|
|
164
|
+
task_type: Optional[str] = None) -> str:
|
|
165
|
+
"""
|
|
166
|
+
Validate target column and infer task type if not provided.
|
|
167
|
+
|
|
168
|
+
Args:
|
|
169
|
+
df: Polars DataFrame
|
|
170
|
+
target_col: Target column name
|
|
171
|
+
task_type: Optional task type ('classification' or 'regression')
|
|
172
|
+
|
|
173
|
+
Returns:
|
|
174
|
+
Inferred or validated task type
|
|
175
|
+
|
|
176
|
+
Raises:
|
|
177
|
+
ValidationError: If target column is invalid
|
|
178
|
+
"""
|
|
179
|
+
validate_column_exists(df, target_col)
|
|
180
|
+
|
|
181
|
+
target = df[target_col]
|
|
182
|
+
n_unique = target.n_unique()
|
|
183
|
+
|
|
184
|
+
# Infer task type if not provided
|
|
185
|
+
if task_type is None:
|
|
186
|
+
if target.dtype in pl.NUMERIC_DTYPES and n_unique > 10:
|
|
187
|
+
task_type = "regression"
|
|
188
|
+
else:
|
|
189
|
+
task_type = "classification"
|
|
190
|
+
|
|
191
|
+
# Validate task type
|
|
192
|
+
if task_type not in ["classification", "regression"]:
|
|
193
|
+
raise ValidationError(
|
|
194
|
+
f"Invalid task_type: {task_type}. Must be 'classification' or 'regression'"
|
|
195
|
+
)
|
|
196
|
+
|
|
197
|
+
# Validate target column matches task type
|
|
198
|
+
if task_type == "classification":
|
|
199
|
+
if n_unique > 100:
|
|
200
|
+
raise ValidationError(
|
|
201
|
+
f"Classification target has too many unique values ({n_unique}). "
|
|
202
|
+
f"Consider regression or check if this is the correct target."
|
|
203
|
+
)
|
|
204
|
+
|
|
205
|
+
if task_type == "regression":
|
|
206
|
+
if target.dtype not in pl.NUMERIC_DTYPES:
|
|
207
|
+
raise ValidationError(
|
|
208
|
+
f"Regression target must be numeric (dtype: {target.dtype})"
|
|
209
|
+
)
|
|
210
|
+
|
|
211
|
+
return task_type
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
def validate_train_test_split(X_train: Any, X_test: Any,
|
|
215
|
+
y_train: Any, y_test: Any) -> None:
|
|
216
|
+
"""
|
|
217
|
+
Validate train/test split data.
|
|
218
|
+
|
|
219
|
+
Args:
|
|
220
|
+
X_train: Training features
|
|
221
|
+
X_test: Test features
|
|
222
|
+
y_train: Training target
|
|
223
|
+
y_test: Test target
|
|
224
|
+
|
|
225
|
+
Raises:
|
|
226
|
+
ValidationError: If split data is invalid
|
|
227
|
+
"""
|
|
228
|
+
if len(X_train) == 0:
|
|
229
|
+
raise ValidationError("X_train is empty")
|
|
230
|
+
|
|
231
|
+
if len(X_test) == 0:
|
|
232
|
+
raise ValidationError("X_test is empty")
|
|
233
|
+
|
|
234
|
+
if len(y_train) == 0:
|
|
235
|
+
raise ValidationError("y_train is empty")
|
|
236
|
+
|
|
237
|
+
if len(y_test) == 0:
|
|
238
|
+
raise ValidationError("y_test is empty")
|
|
239
|
+
|
|
240
|
+
if len(X_train) != len(y_train):
|
|
241
|
+
raise ValidationError(
|
|
242
|
+
f"X_train ({len(X_train)}) and y_train ({len(y_train)}) have different lengths"
|
|
243
|
+
)
|
|
244
|
+
|
|
245
|
+
if len(X_test) != len(y_test):
|
|
246
|
+
raise ValidationError(
|
|
247
|
+
f"X_test ({len(X_test)}) and y_test ({len(y_test)}) have different lengths"
|
|
248
|
+
)
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
def validate_strategy_config(strategy: Dict[str, Any],
|
|
252
|
+
required_keys: List[str]) -> None:
|
|
253
|
+
"""
|
|
254
|
+
Validate strategy configuration dictionary.
|
|
255
|
+
|
|
256
|
+
Args:
|
|
257
|
+
strategy: Strategy configuration
|
|
258
|
+
required_keys: List of required keys
|
|
259
|
+
|
|
260
|
+
Raises:
|
|
261
|
+
ValidationError: If configuration is invalid
|
|
262
|
+
"""
|
|
263
|
+
if not isinstance(strategy, dict):
|
|
264
|
+
raise ValidationError(f"Strategy must be a dictionary, got {type(strategy)}")
|
|
265
|
+
|
|
266
|
+
missing = [key for key in required_keys if key not in strategy]
|
|
267
|
+
if missing:
|
|
268
|
+
raise ValidationError(
|
|
269
|
+
f"Missing required strategy keys: {', '.join(missing)}"
|
|
270
|
+
)
|
|
271
|
+
|
|
272
|
+
|
|
273
|
+
def validate_schema_pandera(
|
|
274
|
+
df: pl.DataFrame,
|
|
275
|
+
schema_config: Dict[str, Any]
|
|
276
|
+
) -> Dict[str, Any]:
|
|
277
|
+
"""
|
|
278
|
+
Validate a DataFrame against a pandera schema.
|
|
279
|
+
|
|
280
|
+
Schema config format:
|
|
281
|
+
{
|
|
282
|
+
"columns": {
|
|
283
|
+
"age": {"dtype": "int", "nullable": False, "checks": {"ge": 0, "le": 150}},
|
|
284
|
+
"name": {"dtype": "str", "nullable": False},
|
|
285
|
+
"salary": {"dtype": "float", "nullable": True, "checks": {"ge": 0}}
|
|
286
|
+
},
|
|
287
|
+
"coerce": True
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
Args:
|
|
291
|
+
df: Polars DataFrame to validate
|
|
292
|
+
schema_config: Dictionary defining the expected schema
|
|
293
|
+
|
|
294
|
+
Returns:
|
|
295
|
+
Dictionary with validation results and any errors found
|
|
296
|
+
"""
|
|
297
|
+
try:
|
|
298
|
+
import pandera as pa
|
|
299
|
+
import pandas as pd
|
|
300
|
+
except ImportError:
|
|
301
|
+
return {
|
|
302
|
+
'status': 'error',
|
|
303
|
+
'message': 'pandera not installed. Install with: pip install pandera>=0.18'
|
|
304
|
+
}
|
|
305
|
+
|
|
306
|
+
columns_config = schema_config.get("columns", {})
|
|
307
|
+
coerce = schema_config.get("coerce", True)
|
|
308
|
+
|
|
309
|
+
# Build pandera schema from config
|
|
310
|
+
schema_columns = {}
|
|
311
|
+
dtype_map = {
|
|
312
|
+
"int": pa.Int,
|
|
313
|
+
"float": pa.Float,
|
|
314
|
+
"str": pa.String,
|
|
315
|
+
"bool": pa.Bool,
|
|
316
|
+
"datetime": pa.DateTime,
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
check_map = {
|
|
320
|
+
"ge": lambda v: pa.Check.ge(v),
|
|
321
|
+
"le": lambda v: pa.Check.le(v),
|
|
322
|
+
"gt": lambda v: pa.Check.gt(v),
|
|
323
|
+
"lt": lambda v: pa.Check.lt(v),
|
|
324
|
+
"in_range": lambda v: pa.Check.in_range(v[0], v[1]),
|
|
325
|
+
"isin": lambda v: pa.Check.isin(v),
|
|
326
|
+
"str_matches": lambda v: pa.Check.str_matches(v),
|
|
327
|
+
"str_length": lambda v: pa.Check.str_length(max_value=v),
|
|
328
|
+
}
|
|
329
|
+
|
|
330
|
+
for col_name, col_config in columns_config.items():
|
|
331
|
+
col_dtype = dtype_map.get(col_config.get("dtype", ""), None)
|
|
332
|
+
nullable = col_config.get("nullable", True)
|
|
333
|
+
checks_config = col_config.get("checks", {})
|
|
334
|
+
|
|
335
|
+
checks = []
|
|
336
|
+
for check_name, check_val in checks_config.items():
|
|
337
|
+
if check_name in check_map:
|
|
338
|
+
checks.append(check_map[check_name](check_val))
|
|
339
|
+
|
|
340
|
+
schema_columns[col_name] = pa.Column(
|
|
341
|
+
dtype=col_dtype,
|
|
342
|
+
nullable=nullable,
|
|
343
|
+
checks=checks if checks else None,
|
|
344
|
+
coerce=coerce
|
|
345
|
+
)
|
|
346
|
+
|
|
347
|
+
schema = pa.DataFrameSchema(columns=schema_columns, coerce=coerce)
|
|
348
|
+
|
|
349
|
+
# Convert Polars to Pandas for pandera validation
|
|
350
|
+
df_pd = df.to_pandas()
|
|
351
|
+
|
|
352
|
+
try:
|
|
353
|
+
schema.validate(df_pd, lazy=True)
|
|
354
|
+
return {
|
|
355
|
+
'status': 'success',
|
|
356
|
+
'valid': True,
|
|
357
|
+
'message': 'DataFrame passed all schema validations',
|
|
358
|
+
'columns_validated': list(columns_config.keys())
|
|
359
|
+
}
|
|
360
|
+
except pa.errors.SchemaErrors as err:
|
|
361
|
+
errors = []
|
|
362
|
+
for _, row in err.failure_cases.iterrows():
|
|
363
|
+
errors.append({
|
|
364
|
+
'column': str(row.get('column', '')),
|
|
365
|
+
'check': str(row.get('check', '')),
|
|
366
|
+
'failure_case': str(row.get('failure_case', '')),
|
|
367
|
+
'index': int(row.get('index', -1)) if row.get('index') is not None else None
|
|
368
|
+
})
|
|
369
|
+
|
|
370
|
+
return {
|
|
371
|
+
'status': 'success',
|
|
372
|
+
'valid': False,
|
|
373
|
+
'message': f'Schema validation failed with {len(errors)} errors',
|
|
374
|
+
'errors': errors[:50], # Limit to 50 errors
|
|
375
|
+
'total_errors': len(errors),
|
|
376
|
+
'columns_validated': list(columns_config.keys())
|
|
377
|
+
}
|
|
@@ -0,0 +1,154 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Workflow State Management
|
|
3
|
+
Stores intermediate results and metadata between steps to minimize LLM context.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import json
|
|
7
|
+
from typing import Dict, Any, List, Optional
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from datetime import datetime
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class WorkflowState:
|
|
13
|
+
"""
|
|
14
|
+
Structured state object that holds workflow context.
|
|
15
|
+
Replaces storing everything in LLM conversation history.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
def __init__(self):
|
|
19
|
+
self.dataset_info: Optional[Dict[str, Any]] = None
|
|
20
|
+
self.profiling_summary: Optional[Dict[str, Any]] = None
|
|
21
|
+
self.quality_issues: Optional[Dict[str, Any]] = None
|
|
22
|
+
self.cleaning_results: Optional[Dict[str, Any]] = None
|
|
23
|
+
self.feature_engineering: Optional[Dict[str, Any]] = None
|
|
24
|
+
self.modeling_results: Optional[Dict[str, Any]] = None
|
|
25
|
+
self.visualization_paths: List[str] = []
|
|
26
|
+
self.current_file: Optional[str] = None
|
|
27
|
+
self.target_column: Optional[str] = None
|
|
28
|
+
self.task_type: Optional[str] = None # 'classification', 'regression', etc.
|
|
29
|
+
self.steps_completed: List[str] = []
|
|
30
|
+
self.created_at = datetime.utcnow().isoformat()
|
|
31
|
+
|
|
32
|
+
def update_dataset_info(self, info: Dict[str, Any]):
|
|
33
|
+
"""Store basic dataset metadata (schema, shape, etc.)"""
|
|
34
|
+
self.dataset_info = info
|
|
35
|
+
self.current_file = info.get('file_path')
|
|
36
|
+
self.steps_completed.append('dataset_loaded')
|
|
37
|
+
|
|
38
|
+
def update_profiling(self, summary: Dict[str, Any]):
|
|
39
|
+
"""Store profiling results summary"""
|
|
40
|
+
self.profiling_summary = summary
|
|
41
|
+
self.steps_completed.append('profiling_complete')
|
|
42
|
+
|
|
43
|
+
def update_quality(self, issues: Dict[str, Any]):
|
|
44
|
+
"""Store data quality assessment"""
|
|
45
|
+
self.quality_issues = issues
|
|
46
|
+
self.steps_completed.append('quality_checked')
|
|
47
|
+
|
|
48
|
+
def update_cleaning(self, results: Dict[str, Any]):
|
|
49
|
+
"""Store cleaning/preprocessing results"""
|
|
50
|
+
self.cleaning_results = results
|
|
51
|
+
if results.get('output_file'):
|
|
52
|
+
self.current_file = results['output_file']
|
|
53
|
+
self.steps_completed.append('data_cleaned')
|
|
54
|
+
|
|
55
|
+
def update_features(self, results: Dict[str, Any]):
|
|
56
|
+
"""Store feature engineering results"""
|
|
57
|
+
self.feature_engineering = results
|
|
58
|
+
if results.get('output_file'):
|
|
59
|
+
self.current_file = results['output_file']
|
|
60
|
+
self.steps_completed.append('features_engineered')
|
|
61
|
+
|
|
62
|
+
def update_modeling(self, results: Dict[str, Any]):
|
|
63
|
+
"""Store model training results"""
|
|
64
|
+
self.modeling_results = results
|
|
65
|
+
self.steps_completed.append('model_trained')
|
|
66
|
+
|
|
67
|
+
def add_visualization(self, path: str):
|
|
68
|
+
"""Track generated visualization"""
|
|
69
|
+
self.visualization_paths.append(path)
|
|
70
|
+
|
|
71
|
+
def get_context_for_step(self, step_name: str) -> Dict[str, Any]:
|
|
72
|
+
"""
|
|
73
|
+
Get minimal context needed for a specific step.
|
|
74
|
+
This replaces sending full conversation history to LLM.
|
|
75
|
+
"""
|
|
76
|
+
context = {
|
|
77
|
+
'current_file': self.current_file,
|
|
78
|
+
'target_column': self.target_column,
|
|
79
|
+
'task_type': self.task_type,
|
|
80
|
+
'steps_completed': self.steps_completed
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
# Step-specific context slicing
|
|
84
|
+
if step_name == 'profiling':
|
|
85
|
+
context['dataset_info'] = self.dataset_info
|
|
86
|
+
|
|
87
|
+
elif step_name == 'quality_check':
|
|
88
|
+
context['dataset_info'] = self.dataset_info
|
|
89
|
+
context['profiling'] = self.profiling_summary
|
|
90
|
+
|
|
91
|
+
elif step_name == 'cleaning':
|
|
92
|
+
context['quality_issues'] = self.quality_issues
|
|
93
|
+
context['profiling'] = self.profiling_summary
|
|
94
|
+
|
|
95
|
+
elif step_name == 'feature_engineering':
|
|
96
|
+
context['cleaning_results'] = self.cleaning_results
|
|
97
|
+
context['dataset_info'] = self.dataset_info
|
|
98
|
+
|
|
99
|
+
elif step_name == 'modeling':
|
|
100
|
+
context['feature_engineering'] = self.feature_engineering
|
|
101
|
+
context['cleaning_results'] = self.cleaning_results
|
|
102
|
+
context['target_column'] = self.target_column
|
|
103
|
+
context['task_type'] = self.task_type
|
|
104
|
+
|
|
105
|
+
elif step_name == 'visualization':
|
|
106
|
+
context['modeling_results'] = self.modeling_results
|
|
107
|
+
context['dataset_info'] = self.dataset_info
|
|
108
|
+
|
|
109
|
+
return context
|
|
110
|
+
|
|
111
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
112
|
+
"""Serialize state for storage/debugging"""
|
|
113
|
+
return {
|
|
114
|
+
'dataset_info': self.dataset_info,
|
|
115
|
+
'profiling_summary': self.profiling_summary,
|
|
116
|
+
'quality_issues': self.quality_issues,
|
|
117
|
+
'cleaning_results': self.cleaning_results,
|
|
118
|
+
'feature_engineering': self.feature_engineering,
|
|
119
|
+
'modeling_results': self.modeling_results,
|
|
120
|
+
'visualization_paths': self.visualization_paths,
|
|
121
|
+
'current_file': self.current_file,
|
|
122
|
+
'target_column': self.target_column,
|
|
123
|
+
'task_type': self.task_type,
|
|
124
|
+
'steps_completed': self.steps_completed,
|
|
125
|
+
'created_at': self.created_at
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
def save_to_file(self, path: str):
|
|
129
|
+
"""Save state to JSON file"""
|
|
130
|
+
Path(path).parent.mkdir(parents=True, exist_ok=True)
|
|
131
|
+
with open(path, 'w') as f:
|
|
132
|
+
json.dump(self.to_dict(), f, indent=2)
|
|
133
|
+
|
|
134
|
+
@classmethod
|
|
135
|
+
def load_from_file(cls, path: str) -> 'WorkflowState':
|
|
136
|
+
"""Load state from JSON file"""
|
|
137
|
+
with open(path, 'r') as f:
|
|
138
|
+
data = json.load(f)
|
|
139
|
+
|
|
140
|
+
state = cls()
|
|
141
|
+
state.dataset_info = data.get('dataset_info')
|
|
142
|
+
state.profiling_summary = data.get('profiling_summary')
|
|
143
|
+
state.quality_issues = data.get('quality_issues')
|
|
144
|
+
state.cleaning_results = data.get('cleaning_results')
|
|
145
|
+
state.feature_engineering = data.get('feature_engineering')
|
|
146
|
+
state.modeling_results = data.get('modeling_results')
|
|
147
|
+
state.visualization_paths = data.get('visualization_paths', [])
|
|
148
|
+
state.current_file = data.get('current_file')
|
|
149
|
+
state.target_column = data.get('target_column')
|
|
150
|
+
state.task_type = data.get('task_type')
|
|
151
|
+
state.steps_completed = data.get('steps_completed', [])
|
|
152
|
+
state.created_at = data.get('created_at')
|
|
153
|
+
|
|
154
|
+
return state
|