ds-agent-cli 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/ds-agent.js +451 -0
- package/ds_agent/__init__.py +8 -0
- package/package.json +28 -0
- package/requirements.txt +126 -0
- package/setup.py +35 -0
- package/src/__init__.py +7 -0
- package/src/_compress_tool_result.py +118 -0
- package/src/api/__init__.py +4 -0
- package/src/api/app.py +1626 -0
- package/src/cache/__init__.py +5 -0
- package/src/cache/cache_manager.py +561 -0
- package/src/cli.py +2886 -0
- package/src/dynamic_prompts.py +281 -0
- package/src/orchestrator.py +4799 -0
- package/src/progress_manager.py +139 -0
- package/src/reasoning/__init__.py +332 -0
- package/src/reasoning/business_summary.py +431 -0
- package/src/reasoning/data_understanding.py +356 -0
- package/src/reasoning/model_explanation.py +383 -0
- package/src/reasoning/reasoning_trace.py +239 -0
- package/src/registry/__init__.py +3 -0
- package/src/registry/tools_registry.py +3 -0
- package/src/session_memory.py +448 -0
- package/src/session_store.py +370 -0
- package/src/storage/__init__.py +19 -0
- package/src/storage/artifact_store.py +620 -0
- package/src/storage/helpers.py +116 -0
- package/src/storage/huggingface_storage.py +694 -0
- package/src/storage/r2_storage.py +0 -0
- package/src/storage/user_files_service.py +288 -0
- package/src/tools/__init__.py +335 -0
- package/src/tools/advanced_analysis.py +823 -0
- package/src/tools/advanced_feature_engineering.py +708 -0
- package/src/tools/advanced_insights.py +578 -0
- package/src/tools/advanced_preprocessing.py +549 -0
- package/src/tools/advanced_training.py +906 -0
- package/src/tools/agent_tool_mapping.py +326 -0
- package/src/tools/auto_pipeline.py +420 -0
- package/src/tools/autogluon_training.py +1480 -0
- package/src/tools/business_intelligence.py +860 -0
- package/src/tools/cloud_data_sources.py +581 -0
- package/src/tools/code_interpreter.py +390 -0
- package/src/tools/computer_vision.py +614 -0
- package/src/tools/data_cleaning.py +614 -0
- package/src/tools/data_profiling.py +593 -0
- package/src/tools/data_type_conversion.py +268 -0
- package/src/tools/data_wrangling.py +433 -0
- package/src/tools/eda_reports.py +284 -0
- package/src/tools/enhanced_feature_engineering.py +241 -0
- package/src/tools/feature_engineering.py +302 -0
- package/src/tools/matplotlib_visualizations.py +1327 -0
- package/src/tools/model_training.py +520 -0
- package/src/tools/nlp_text_analytics.py +761 -0
- package/src/tools/plotly_visualizations.py +497 -0
- package/src/tools/production_mlops.py +852 -0
- package/src/tools/time_series.py +507 -0
- package/src/tools/tools_registry.py +2133 -0
- package/src/tools/visualization_engine.py +559 -0
- package/src/utils/__init__.py +42 -0
- package/src/utils/error_recovery.py +313 -0
- package/src/utils/parallel_executor.py +402 -0
- package/src/utils/polars_helpers.py +248 -0
- package/src/utils/schema_extraction.py +132 -0
- package/src/utils/semantic_layer.py +392 -0
- package/src/utils/token_budget.py +411 -0
- package/src/utils/validation.py +377 -0
- package/src/workflow_state.py +154 -0
|
@@ -0,0 +1,694 @@
|
|
|
1
|
+
"""
|
|
2
|
+
HuggingFace Storage Service
|
|
3
|
+
|
|
4
|
+
Stores user artifacts (datasets, models, plots, reports) directly to the user's
|
|
5
|
+
HuggingFace account, enabling:
|
|
6
|
+
1. Persistent storage at no cost
|
|
7
|
+
2. Easy model deployment
|
|
8
|
+
3. User ownership of data
|
|
9
|
+
4. Version control via Git
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import os
|
|
13
|
+
import json
|
|
14
|
+
import gzip
|
|
15
|
+
import tempfile
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
from typing import Optional, Dict, Any, List, BinaryIO, Union
|
|
18
|
+
from datetime import datetime
|
|
19
|
+
import logging
|
|
20
|
+
|
|
21
|
+
logger = logging.getLogger(__name__)
|
|
22
|
+
|
|
23
|
+
# Optional: huggingface_hub for HF operations
|
|
24
|
+
try:
|
|
25
|
+
from huggingface_hub import HfApi, upload_folder
|
|
26
|
+
from huggingface_hub.utils import RepositoryNotFoundError
|
|
27
|
+
HF_AVAILABLE = True
|
|
28
|
+
except ImportError:
|
|
29
|
+
HF_AVAILABLE = False
|
|
30
|
+
logger.warning("huggingface_hub not installed. Install with: pip install huggingface_hub")
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class HuggingFaceStorage:
|
|
34
|
+
"""
|
|
35
|
+
Manages file storage on HuggingFace for user artifacts.
|
|
36
|
+
|
|
37
|
+
Storage structure on HuggingFace:
|
|
38
|
+
- Datasets repo: {username}/ds-agent-data
|
|
39
|
+
- /datasets/{session_id}/cleaned_data.csv.gz
|
|
40
|
+
- /datasets/{session_id}/encoded_data.csv.gz
|
|
41
|
+
|
|
42
|
+
- Models repo: {username}/ds-agent-models
|
|
43
|
+
- /models/{session_id}/{model_name}.pkl
|
|
44
|
+
- /models/{session_id}/model_config.json
|
|
45
|
+
|
|
46
|
+
- Spaces repo (for reports/plots): {username}/ds-agent-outputs
|
|
47
|
+
- /plots/{session_id}/correlation_heatmap.json
|
|
48
|
+
- /reports/{session_id}/eda_report.html.gz
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
def __init__(self, hf_token: Optional[str] = None):
|
|
52
|
+
"""
|
|
53
|
+
Initialize HuggingFace storage.
|
|
54
|
+
|
|
55
|
+
Args:
|
|
56
|
+
hf_token: HuggingFace API token with write permissions
|
|
57
|
+
"""
|
|
58
|
+
if not HF_AVAILABLE:
|
|
59
|
+
raise ImportError("huggingface_hub is required. Install with: pip install huggingface_hub")
|
|
60
|
+
|
|
61
|
+
self.token = hf_token or os.environ.get("HF_TOKEN")
|
|
62
|
+
if not self.token:
|
|
63
|
+
raise ValueError("HuggingFace token is required")
|
|
64
|
+
|
|
65
|
+
self.api = HfApi(token=self.token)
|
|
66
|
+
self._username: Optional[str] = None
|
|
67
|
+
|
|
68
|
+
# Repo names
|
|
69
|
+
self.DATA_REPO_SUFFIX = "ds-agent-data"
|
|
70
|
+
self.MODELS_REPO_SUFFIX = "ds-agent-models"
|
|
71
|
+
self.OUTPUTS_REPO_SUFFIX = "ds-agent-outputs"
|
|
72
|
+
|
|
73
|
+
@property
|
|
74
|
+
def username(self) -> str:
|
|
75
|
+
"""Get the authenticated user's username."""
|
|
76
|
+
if self._username is None:
|
|
77
|
+
user_info = self.api.whoami()
|
|
78
|
+
self._username = user_info["name"]
|
|
79
|
+
return self._username
|
|
80
|
+
|
|
81
|
+
def _get_repo_id(self, repo_type: str) -> str:
|
|
82
|
+
"""Get the full repo ID for a given type."""
|
|
83
|
+
suffix_map = {
|
|
84
|
+
"data": self.DATA_REPO_SUFFIX,
|
|
85
|
+
"models": self.MODELS_REPO_SUFFIX,
|
|
86
|
+
"outputs": self.OUTPUTS_REPO_SUFFIX
|
|
87
|
+
}
|
|
88
|
+
suffix = suffix_map.get(repo_type, self.OUTPUTS_REPO_SUFFIX)
|
|
89
|
+
return f"{self.username}/{suffix}"
|
|
90
|
+
|
|
91
|
+
def _ensure_repo_exists(self, repo_type: str, repo_kind: str = "dataset") -> str:
|
|
92
|
+
"""
|
|
93
|
+
Ensure the repository exists, create if not.
|
|
94
|
+
|
|
95
|
+
Args:
|
|
96
|
+
repo_type: "data", "models", or "outputs"
|
|
97
|
+
repo_kind: "dataset", "model", or "space"
|
|
98
|
+
|
|
99
|
+
Returns:
|
|
100
|
+
The repo ID
|
|
101
|
+
"""
|
|
102
|
+
repo_id = self._get_repo_id(repo_type)
|
|
103
|
+
|
|
104
|
+
try:
|
|
105
|
+
self.api.repo_info(repo_id=repo_id, repo_type=repo_kind)
|
|
106
|
+
logger.info(f"Repo {repo_id} exists")
|
|
107
|
+
except RepositoryNotFoundError:
|
|
108
|
+
logger.info(f"Creating repo {repo_id}")
|
|
109
|
+
self.api.create_repo(
|
|
110
|
+
repo_id=repo_id,
|
|
111
|
+
repo_type=repo_kind,
|
|
112
|
+
private=True, # Default to private
|
|
113
|
+
exist_ok=True # Don't fail if already exists
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
return repo_id
|
|
117
|
+
|
|
118
|
+
def upload_dataset(
|
|
119
|
+
self,
|
|
120
|
+
file_path: str,
|
|
121
|
+
session_id: str,
|
|
122
|
+
file_name: Optional[str] = None,
|
|
123
|
+
compress: bool = True,
|
|
124
|
+
metadata: Optional[Dict[str, Any]] = None
|
|
125
|
+
) -> Dict[str, Any]:
|
|
126
|
+
"""
|
|
127
|
+
Upload a dataset (CSV, Parquet) to user's HuggingFace.
|
|
128
|
+
|
|
129
|
+
Args:
|
|
130
|
+
file_path: Local path to the file
|
|
131
|
+
session_id: Session ID for organizing files
|
|
132
|
+
file_name: Optional custom filename
|
|
133
|
+
compress: Whether to gzip compress the file
|
|
134
|
+
metadata: Optional metadata to store alongside
|
|
135
|
+
|
|
136
|
+
Returns:
|
|
137
|
+
Dict with upload info (url, path, size, etc.)
|
|
138
|
+
"""
|
|
139
|
+
repo_id = self._ensure_repo_exists("data", "dataset")
|
|
140
|
+
|
|
141
|
+
original_path = Path(file_path)
|
|
142
|
+
file_name = file_name or original_path.name
|
|
143
|
+
|
|
144
|
+
# Compress if requested and not already compressed
|
|
145
|
+
if compress and not file_name.endswith('.gz'):
|
|
146
|
+
with tempfile.NamedTemporaryFile(suffix='.gz', delete=False) as tmp:
|
|
147
|
+
with open(file_path, 'rb') as f_in:
|
|
148
|
+
with gzip.open(tmp.name, 'wb') as f_out:
|
|
149
|
+
f_out.write(f_in.read())
|
|
150
|
+
upload_path = tmp.name
|
|
151
|
+
file_name = f"{file_name}.gz"
|
|
152
|
+
else:
|
|
153
|
+
upload_path = file_path
|
|
154
|
+
|
|
155
|
+
# Upload to HuggingFace
|
|
156
|
+
path_in_repo = f"datasets/{session_id}/{file_name}"
|
|
157
|
+
|
|
158
|
+
try:
|
|
159
|
+
result = self.api.upload_file(
|
|
160
|
+
path_or_fileobj=upload_path,
|
|
161
|
+
path_in_repo=path_in_repo,
|
|
162
|
+
repo_id=repo_id,
|
|
163
|
+
repo_type="dataset",
|
|
164
|
+
commit_message=f"Add dataset: {file_name}"
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
# Upload metadata if provided
|
|
168
|
+
if metadata:
|
|
169
|
+
metadata_path = f"datasets/{session_id}/{file_name}.meta.json"
|
|
170
|
+
with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as tmp:
|
|
171
|
+
json.dump({
|
|
172
|
+
**metadata,
|
|
173
|
+
"uploaded_at": datetime.now().isoformat(),
|
|
174
|
+
"original_name": original_path.name,
|
|
175
|
+
"compressed": compress
|
|
176
|
+
}, tmp)
|
|
177
|
+
tmp.flush()
|
|
178
|
+
|
|
179
|
+
self.api.upload_file(
|
|
180
|
+
path_or_fileobj=tmp.name,
|
|
181
|
+
path_in_repo=metadata_path,
|
|
182
|
+
repo_id=repo_id,
|
|
183
|
+
repo_type="dataset",
|
|
184
|
+
commit_message=f"Add metadata for {file_name}"
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
file_size = os.path.getsize(upload_path)
|
|
188
|
+
|
|
189
|
+
return {
|
|
190
|
+
"success": True,
|
|
191
|
+
"repo_id": repo_id,
|
|
192
|
+
"path": path_in_repo,
|
|
193
|
+
"url": f"https://huggingface.co/datasets/{repo_id}/blob/main/{path_in_repo}",
|
|
194
|
+
"download_url": f"https://huggingface.co/datasets/{repo_id}/resolve/main/{path_in_repo}",
|
|
195
|
+
"size_bytes": file_size,
|
|
196
|
+
"compressed": compress
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
except Exception as e:
|
|
200
|
+
logger.error(f"Failed to upload dataset: {e}")
|
|
201
|
+
return {
|
|
202
|
+
"success": False,
|
|
203
|
+
"error": str(e)
|
|
204
|
+
}
|
|
205
|
+
finally:
|
|
206
|
+
# Clean up temp file if we created one
|
|
207
|
+
if compress and upload_path != file_path:
|
|
208
|
+
try:
|
|
209
|
+
os.unlink(upload_path)
|
|
210
|
+
except:
|
|
211
|
+
pass
|
|
212
|
+
|
|
213
|
+
def upload_model(
|
|
214
|
+
self,
|
|
215
|
+
model_path: str,
|
|
216
|
+
session_id: str,
|
|
217
|
+
model_name: str,
|
|
218
|
+
model_type: str = "sklearn",
|
|
219
|
+
metrics: Optional[Dict[str, float]] = None,
|
|
220
|
+
feature_names: Optional[List[str]] = None,
|
|
221
|
+
target_column: Optional[str] = None
|
|
222
|
+
) -> Dict[str, Any]:
|
|
223
|
+
"""
|
|
224
|
+
Upload a trained model to user's HuggingFace.
|
|
225
|
+
|
|
226
|
+
Args:
|
|
227
|
+
model_path: Local path to the model file (.pkl, .joblib, .pt, etc.)
|
|
228
|
+
session_id: Session ID
|
|
229
|
+
model_name: Name for the model
|
|
230
|
+
model_type: Type of model (sklearn, xgboost, pytorch, etc.)
|
|
231
|
+
metrics: Model performance metrics
|
|
232
|
+
feature_names: List of feature names the model expects
|
|
233
|
+
target_column: Target column name
|
|
234
|
+
|
|
235
|
+
Returns:
|
|
236
|
+
Dict with upload info
|
|
237
|
+
"""
|
|
238
|
+
repo_id = self._ensure_repo_exists("models", "model")
|
|
239
|
+
|
|
240
|
+
path_in_repo = f"models/{session_id}/{model_name}"
|
|
241
|
+
model_file_name = Path(model_path).name
|
|
242
|
+
|
|
243
|
+
try:
|
|
244
|
+
# Upload the model file
|
|
245
|
+
self.api.upload_file(
|
|
246
|
+
path_or_fileobj=model_path,
|
|
247
|
+
path_in_repo=f"{path_in_repo}/{model_file_name}",
|
|
248
|
+
repo_id=repo_id,
|
|
249
|
+
repo_type="model",
|
|
250
|
+
commit_message=f"Add model: {model_name}"
|
|
251
|
+
)
|
|
252
|
+
|
|
253
|
+
# Create and upload model card
|
|
254
|
+
model_card = self._generate_model_card(
|
|
255
|
+
model_name=model_name,
|
|
256
|
+
model_type=model_type,
|
|
257
|
+
metrics=metrics,
|
|
258
|
+
feature_names=feature_names,
|
|
259
|
+
target_column=target_column
|
|
260
|
+
)
|
|
261
|
+
|
|
262
|
+
with tempfile.NamedTemporaryFile(mode='w', suffix='.md', delete=False) as tmp:
|
|
263
|
+
tmp.write(model_card)
|
|
264
|
+
tmp.flush()
|
|
265
|
+
|
|
266
|
+
self.api.upload_file(
|
|
267
|
+
path_or_fileobj=tmp.name,
|
|
268
|
+
path_in_repo=f"{path_in_repo}/README.md",
|
|
269
|
+
repo_id=repo_id,
|
|
270
|
+
repo_type="model",
|
|
271
|
+
commit_message=f"Add model card for {model_name}"
|
|
272
|
+
)
|
|
273
|
+
|
|
274
|
+
# Upload config
|
|
275
|
+
config = {
|
|
276
|
+
"model_name": model_name,
|
|
277
|
+
"model_type": model_type,
|
|
278
|
+
"model_file": model_file_name,
|
|
279
|
+
"metrics": metrics or {},
|
|
280
|
+
"feature_names": feature_names or [],
|
|
281
|
+
"target_column": target_column,
|
|
282
|
+
"created_at": datetime.now().isoformat(),
|
|
283
|
+
"session_id": session_id
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as tmp:
|
|
287
|
+
json.dump(config, tmp, indent=2)
|
|
288
|
+
tmp.flush()
|
|
289
|
+
|
|
290
|
+
self.api.upload_file(
|
|
291
|
+
path_or_fileobj=tmp.name,
|
|
292
|
+
path_in_repo=f"{path_in_repo}/config.json",
|
|
293
|
+
repo_id=repo_id,
|
|
294
|
+
repo_type="model",
|
|
295
|
+
commit_message=f"Add config for {model_name}"
|
|
296
|
+
)
|
|
297
|
+
|
|
298
|
+
return {
|
|
299
|
+
"success": True,
|
|
300
|
+
"repo_id": repo_id,
|
|
301
|
+
"path": path_in_repo,
|
|
302
|
+
"url": f"https://huggingface.co/{repo_id}/tree/main/{path_in_repo}",
|
|
303
|
+
"model_type": model_type,
|
|
304
|
+
"metrics": metrics
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
except Exception as e:
|
|
308
|
+
logger.error(f"Failed to upload model: {e}")
|
|
309
|
+
return {
|
|
310
|
+
"success": False,
|
|
311
|
+
"error": str(e)
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
def upload_plot(
|
|
315
|
+
self,
|
|
316
|
+
plot_data: Union[str, Dict],
|
|
317
|
+
session_id: str,
|
|
318
|
+
plot_name: str,
|
|
319
|
+
plot_type: str = "plotly"
|
|
320
|
+
) -> Dict[str, Any]:
|
|
321
|
+
"""
|
|
322
|
+
Upload plot data (as JSON) to user's HuggingFace.
|
|
323
|
+
|
|
324
|
+
For Plotly charts, we store the JSON data and render client-side,
|
|
325
|
+
which is much smaller than storing full HTML.
|
|
326
|
+
|
|
327
|
+
Args:
|
|
328
|
+
plot_data: Either JSON string or dict of plot data
|
|
329
|
+
session_id: Session ID
|
|
330
|
+
plot_name: Name for the plot
|
|
331
|
+
plot_type: Type of plot (plotly, matplotlib, etc.)
|
|
332
|
+
|
|
333
|
+
Returns:
|
|
334
|
+
Dict with upload info
|
|
335
|
+
"""
|
|
336
|
+
repo_id = self._ensure_repo_exists("outputs", "dataset")
|
|
337
|
+
|
|
338
|
+
# Ensure we have JSON string
|
|
339
|
+
if isinstance(plot_data, dict):
|
|
340
|
+
plot_json = json.dumps(plot_data)
|
|
341
|
+
else:
|
|
342
|
+
plot_json = plot_data
|
|
343
|
+
|
|
344
|
+
path_in_repo = f"plots/{session_id}/{plot_name}.json"
|
|
345
|
+
|
|
346
|
+
try:
|
|
347
|
+
with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as tmp:
|
|
348
|
+
tmp.write(plot_json)
|
|
349
|
+
tmp.flush()
|
|
350
|
+
|
|
351
|
+
self.api.upload_file(
|
|
352
|
+
path_or_fileobj=tmp.name,
|
|
353
|
+
path_in_repo=path_in_repo,
|
|
354
|
+
repo_id=repo_id,
|
|
355
|
+
repo_type="dataset",
|
|
356
|
+
commit_message=f"Add plot: {plot_name}"
|
|
357
|
+
)
|
|
358
|
+
|
|
359
|
+
return {
|
|
360
|
+
"success": True,
|
|
361
|
+
"repo_id": repo_id,
|
|
362
|
+
"path": path_in_repo,
|
|
363
|
+
"url": f"https://huggingface.co/datasets/{repo_id}/blob/main/{path_in_repo}",
|
|
364
|
+
"download_url": f"https://huggingface.co/datasets/{repo_id}/resolve/main/{path_in_repo}",
|
|
365
|
+
"plot_type": plot_type,
|
|
366
|
+
"size_bytes": len(plot_json.encode())
|
|
367
|
+
}
|
|
368
|
+
|
|
369
|
+
except Exception as e:
|
|
370
|
+
logger.error(f"Failed to upload plot: {e}")
|
|
371
|
+
return {
|
|
372
|
+
"success": False,
|
|
373
|
+
"error": str(e)
|
|
374
|
+
}
|
|
375
|
+
|
|
376
|
+
def upload_report(
|
|
377
|
+
self,
|
|
378
|
+
report_path: str,
|
|
379
|
+
session_id: str,
|
|
380
|
+
report_name: str,
|
|
381
|
+
compress: bool = True
|
|
382
|
+
) -> Dict[str, Any]:
|
|
383
|
+
"""
|
|
384
|
+
Upload an HTML report to user's HuggingFace.
|
|
385
|
+
|
|
386
|
+
Args:
|
|
387
|
+
report_path: Local path to the HTML report
|
|
388
|
+
session_id: Session ID
|
|
389
|
+
report_name: Name for the report
|
|
390
|
+
compress: Whether to gzip compress
|
|
391
|
+
|
|
392
|
+
Returns:
|
|
393
|
+
Dict with upload info
|
|
394
|
+
"""
|
|
395
|
+
repo_id = self._ensure_repo_exists("outputs", "dataset")
|
|
396
|
+
|
|
397
|
+
file_name = f"{report_name}.html"
|
|
398
|
+
|
|
399
|
+
# Compress if requested
|
|
400
|
+
if compress:
|
|
401
|
+
with tempfile.NamedTemporaryFile(suffix='.html.gz', delete=False) as tmp:
|
|
402
|
+
with open(report_path, 'rb') as f_in:
|
|
403
|
+
with gzip.open(tmp.name, 'wb') as f_out:
|
|
404
|
+
f_out.write(f_in.read())
|
|
405
|
+
upload_path = tmp.name
|
|
406
|
+
file_name = f"{file_name}.gz"
|
|
407
|
+
else:
|
|
408
|
+
upload_path = report_path
|
|
409
|
+
|
|
410
|
+
path_in_repo = f"reports/{session_id}/{file_name}"
|
|
411
|
+
|
|
412
|
+
try:
|
|
413
|
+
self.api.upload_file(
|
|
414
|
+
path_or_fileobj=upload_path,
|
|
415
|
+
path_in_repo=path_in_repo,
|
|
416
|
+
repo_id=repo_id,
|
|
417
|
+
repo_type="dataset",
|
|
418
|
+
commit_message=f"Add report: {report_name}"
|
|
419
|
+
)
|
|
420
|
+
|
|
421
|
+
file_size = os.path.getsize(upload_path)
|
|
422
|
+
|
|
423
|
+
return {
|
|
424
|
+
"success": True,
|
|
425
|
+
"repo_id": repo_id,
|
|
426
|
+
"path": path_in_repo,
|
|
427
|
+
"url": f"https://huggingface.co/datasets/{repo_id}/blob/main/{path_in_repo}",
|
|
428
|
+
"download_url": f"https://huggingface.co/datasets/{repo_id}/resolve/main/{path_in_repo}",
|
|
429
|
+
"size_bytes": file_size,
|
|
430
|
+
"compressed": compress
|
|
431
|
+
}
|
|
432
|
+
|
|
433
|
+
except Exception as e:
|
|
434
|
+
logger.error(f"Failed to upload report: {e}")
|
|
435
|
+
return {
|
|
436
|
+
"success": False,
|
|
437
|
+
"error": str(e)
|
|
438
|
+
}
|
|
439
|
+
finally:
|
|
440
|
+
if compress and upload_path != report_path:
|
|
441
|
+
try:
|
|
442
|
+
os.unlink(upload_path)
|
|
443
|
+
except:
|
|
444
|
+
pass
|
|
445
|
+
|
|
446
|
+
def upload_generic_file(
|
|
447
|
+
self,
|
|
448
|
+
file_path: str,
|
|
449
|
+
session_id: str,
|
|
450
|
+
subfolder: str = "files"
|
|
451
|
+
) -> Dict[str, Any]:
|
|
452
|
+
"""
|
|
453
|
+
Upload any file to user's HuggingFace outputs repo.
|
|
454
|
+
|
|
455
|
+
Args:
|
|
456
|
+
file_path: Local path to the file
|
|
457
|
+
session_id: Session ID
|
|
458
|
+
subfolder: Subfolder within outputs (e.g., "plots", "images", "files")
|
|
459
|
+
|
|
460
|
+
Returns:
|
|
461
|
+
Dict with upload info
|
|
462
|
+
"""
|
|
463
|
+
repo_id = self._ensure_repo_exists("outputs", "dataset")
|
|
464
|
+
|
|
465
|
+
file_name = Path(file_path).name
|
|
466
|
+
path_in_repo = f"{subfolder}/{session_id}/{file_name}"
|
|
467
|
+
|
|
468
|
+
try:
|
|
469
|
+
self.api.upload_file(
|
|
470
|
+
path_or_fileobj=file_path,
|
|
471
|
+
path_in_repo=path_in_repo,
|
|
472
|
+
repo_id=repo_id,
|
|
473
|
+
repo_type="dataset",
|
|
474
|
+
commit_message=f"Add {subfolder}: {file_name}"
|
|
475
|
+
)
|
|
476
|
+
|
|
477
|
+
file_size = os.path.getsize(file_path)
|
|
478
|
+
|
|
479
|
+
return {
|
|
480
|
+
"success": True,
|
|
481
|
+
"repo_id": repo_id,
|
|
482
|
+
"path": path_in_repo,
|
|
483
|
+
"url": f"https://huggingface.co/datasets/{repo_id}/blob/main/{path_in_repo}",
|
|
484
|
+
"download_url": f"https://huggingface.co/datasets/{repo_id}/resolve/main/{path_in_repo}",
|
|
485
|
+
"size_bytes": file_size
|
|
486
|
+
}
|
|
487
|
+
|
|
488
|
+
except Exception as e:
|
|
489
|
+
logger.error(f"Failed to upload file: {e}")
|
|
490
|
+
return {
|
|
491
|
+
"success": False,
|
|
492
|
+
"error": str(e)
|
|
493
|
+
}
|
|
494
|
+
|
|
495
|
+
def list_user_files(
|
|
496
|
+
self,
|
|
497
|
+
session_id: Optional[str] = None,
|
|
498
|
+
file_type: Optional[str] = None
|
|
499
|
+
) -> Dict[str, List[Dict[str, Any]]]:
|
|
500
|
+
"""
|
|
501
|
+
List all files for the user, optionally filtered by session or type.
|
|
502
|
+
|
|
503
|
+
Args:
|
|
504
|
+
session_id: Optional session ID to filter by
|
|
505
|
+
file_type: Optional type ("datasets", "models", "plots", "reports")
|
|
506
|
+
|
|
507
|
+
Returns:
|
|
508
|
+
Dict with lists of files by type
|
|
509
|
+
"""
|
|
510
|
+
result = {
|
|
511
|
+
"datasets": [],
|
|
512
|
+
"models": [],
|
|
513
|
+
"plots": [],
|
|
514
|
+
"reports": []
|
|
515
|
+
}
|
|
516
|
+
|
|
517
|
+
try:
|
|
518
|
+
# List datasets
|
|
519
|
+
if file_type is None or file_type == "datasets":
|
|
520
|
+
repo_id = self._get_repo_id("data")
|
|
521
|
+
try:
|
|
522
|
+
files = self.api.list_repo_files(repo_id=repo_id, repo_type="dataset")
|
|
523
|
+
for f in files:
|
|
524
|
+
if f.startswith("datasets/") and not f.endswith(".meta.json"):
|
|
525
|
+
if session_id is None or f"/{session_id}/" in f:
|
|
526
|
+
result["datasets"].append({
|
|
527
|
+
"path": f,
|
|
528
|
+
"name": Path(f).name,
|
|
529
|
+
"session_id": f.split("/")[1] if len(f.split("/")) > 1 else None,
|
|
530
|
+
"download_url": f"https://huggingface.co/datasets/{repo_id}/resolve/main/{f}"
|
|
531
|
+
})
|
|
532
|
+
except:
|
|
533
|
+
pass
|
|
534
|
+
|
|
535
|
+
# List models
|
|
536
|
+
if file_type is None or file_type == "models":
|
|
537
|
+
repo_id = self._get_repo_id("models")
|
|
538
|
+
try:
|
|
539
|
+
files = self.api.list_repo_files(repo_id=repo_id, repo_type="model")
|
|
540
|
+
for f in files:
|
|
541
|
+
if f.startswith("models/") and f.endswith("config.json"):
|
|
542
|
+
if session_id is None or f"/{session_id}/" in f:
|
|
543
|
+
model_path = "/".join(f.split("/")[:-1])
|
|
544
|
+
result["models"].append({
|
|
545
|
+
"path": model_path,
|
|
546
|
+
"name": f.split("/")[-2] if len(f.split("/")) > 2 else None,
|
|
547
|
+
"session_id": f.split("/")[1] if len(f.split("/")) > 1 else None,
|
|
548
|
+
"url": f"https://huggingface.co/{repo_id}/tree/main/{model_path}"
|
|
549
|
+
})
|
|
550
|
+
except:
|
|
551
|
+
pass
|
|
552
|
+
|
|
553
|
+
# List plots and reports
|
|
554
|
+
if file_type is None or file_type in ["plots", "reports"]:
|
|
555
|
+
repo_id = self._get_repo_id("outputs")
|
|
556
|
+
try:
|
|
557
|
+
files = self.api.list_repo_files(repo_id=repo_id, repo_type="dataset")
|
|
558
|
+
for f in files:
|
|
559
|
+
if f.startswith("plots/"):
|
|
560
|
+
if session_id is None or f"/{session_id}/" in f:
|
|
561
|
+
result["plots"].append({
|
|
562
|
+
"path": f,
|
|
563
|
+
"name": Path(f).stem,
|
|
564
|
+
"session_id": f.split("/")[1] if len(f.split("/")) > 1 else None,
|
|
565
|
+
"download_url": f"https://huggingface.co/datasets/{repo_id}/resolve/main/{f}"
|
|
566
|
+
})
|
|
567
|
+
elif f.startswith("reports/"):
|
|
568
|
+
if session_id is None or f"/{session_id}/" in f:
|
|
569
|
+
result["reports"].append({
|
|
570
|
+
"path": f,
|
|
571
|
+
"name": Path(f).stem.replace(".html", ""),
|
|
572
|
+
"session_id": f.split("/")[1] if len(f.split("/")) > 1 else None,
|
|
573
|
+
"download_url": f"https://huggingface.co/datasets/{repo_id}/resolve/main/{f}"
|
|
574
|
+
})
|
|
575
|
+
except:
|
|
576
|
+
pass
|
|
577
|
+
|
|
578
|
+
except Exception as e:
|
|
579
|
+
logger.error(f"Failed to list files: {e}")
|
|
580
|
+
|
|
581
|
+
return result
|
|
582
|
+
|
|
583
|
+
def _generate_model_card(
|
|
584
|
+
self,
|
|
585
|
+
model_name: str,
|
|
586
|
+
model_type: str,
|
|
587
|
+
metrics: Optional[Dict[str, float]] = None,
|
|
588
|
+
feature_names: Optional[List[str]] = None,
|
|
589
|
+
target_column: Optional[str] = None
|
|
590
|
+
) -> str:
|
|
591
|
+
"""Generate a HuggingFace model card."""
|
|
592
|
+
|
|
593
|
+
metrics_str = ""
|
|
594
|
+
if metrics:
|
|
595
|
+
metrics_str = "\n".join([f"- **{k}**: {v:.4f}" for k, v in metrics.items()])
|
|
596
|
+
|
|
597
|
+
features_str = ""
|
|
598
|
+
if feature_names:
|
|
599
|
+
features_str = ", ".join(f"`{f}`" for f in feature_names[:20])
|
|
600
|
+
if len(feature_names) > 20:
|
|
601
|
+
features_str += f" ... and {len(feature_names) - 20} more"
|
|
602
|
+
|
|
603
|
+
return f"""---
|
|
604
|
+
license: apache-2.0
|
|
605
|
+
tags:
|
|
606
|
+
- tabular
|
|
607
|
+
- {model_type}
|
|
608
|
+
- ds-agent
|
|
609
|
+
---
|
|
610
|
+
|
|
611
|
+
# {model_name}
|
|
612
|
+
|
|
613
|
+
This model was trained using [DS Agent](https://huggingface.co/spaces/Pulastya0/Data-Science-Agent),
|
|
614
|
+
an AI-powered data science assistant.
|
|
615
|
+
|
|
616
|
+
## Model Details
|
|
617
|
+
|
|
618
|
+
- **Model Type**: {model_type}
|
|
619
|
+
- **Target Column**: {target_column or "Not specified"}
|
|
620
|
+
- **Created**: {datetime.now().strftime("%Y-%m-%d %H:%M")}
|
|
621
|
+
|
|
622
|
+
## Performance Metrics
|
|
623
|
+
|
|
624
|
+
{metrics_str or "No metrics recorded"}
|
|
625
|
+
|
|
626
|
+
## Features
|
|
627
|
+
|
|
628
|
+
{features_str or "Feature names not recorded"}
|
|
629
|
+
|
|
630
|
+
## Usage
|
|
631
|
+
|
|
632
|
+
```python
|
|
633
|
+
import joblib
|
|
634
|
+
|
|
635
|
+
# Load the model
|
|
636
|
+
model = joblib.load("model.pkl")
|
|
637
|
+
|
|
638
|
+
# Make predictions
|
|
639
|
+
predictions = model.predict(X_new)
|
|
640
|
+
```
|
|
641
|
+
|
|
642
|
+
## Training
|
|
643
|
+
|
|
644
|
+
This model was automatically trained using DS Agent's ML pipeline which includes:
|
|
645
|
+
- Automated data cleaning
|
|
646
|
+
- Feature engineering
|
|
647
|
+
- Hyperparameter optimization with Optuna
|
|
648
|
+
- Cross-validation
|
|
649
|
+
|
|
650
|
+
---
|
|
651
|
+
|
|
652
|
+
*Generated by DS Agent*
|
|
653
|
+
"""
|
|
654
|
+
|
|
655
|
+
def get_user_storage_stats(self) -> Dict[str, Any]:
|
|
656
|
+
"""Get storage statistics for the user."""
|
|
657
|
+
stats = {
|
|
658
|
+
"datasets_count": 0,
|
|
659
|
+
"models_count": 0,
|
|
660
|
+
"plots_count": 0,
|
|
661
|
+
"reports_count": 0,
|
|
662
|
+
"total_files": 0
|
|
663
|
+
}
|
|
664
|
+
|
|
665
|
+
files = self.list_user_files()
|
|
666
|
+
stats["datasets_count"] = len(files["datasets"])
|
|
667
|
+
stats["models_count"] = len(files["models"])
|
|
668
|
+
stats["plots_count"] = len(files["plots"])
|
|
669
|
+
stats["reports_count"] = len(files["reports"])
|
|
670
|
+
stats["total_files"] = sum(stats.values()) - stats["total_files"]
|
|
671
|
+
|
|
672
|
+
return stats
|
|
673
|
+
|
|
674
|
+
|
|
675
|
+
# Convenience function for creating storage instance
|
|
676
|
+
def get_hf_storage(token: str) -> Optional[HuggingFaceStorage]:
|
|
677
|
+
"""
|
|
678
|
+
Create a HuggingFace storage instance.
|
|
679
|
+
|
|
680
|
+
Args:
|
|
681
|
+
token: HuggingFace API token
|
|
682
|
+
|
|
683
|
+
Returns:
|
|
684
|
+
HuggingFaceStorage instance or None if not available
|
|
685
|
+
"""
|
|
686
|
+
if not HF_AVAILABLE:
|
|
687
|
+
logger.error("huggingface_hub not installed")
|
|
688
|
+
return None
|
|
689
|
+
|
|
690
|
+
try:
|
|
691
|
+
return HuggingFaceStorage(hf_token=token)
|
|
692
|
+
except Exception as e:
|
|
693
|
+
logger.error(f"Failed to create HF storage: {e}")
|
|
694
|
+
return None
|
|
File without changes
|