vesper-wizard 2.0.4 → 2.0.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (205) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +300 -37
  3. package/build/cache/cdn.js +34 -0
  4. package/build/cache/service.js +63 -0
  5. package/build/cleaning/cleaner.js +81 -0
  6. package/build/cleaning/evaluator.js +89 -0
  7. package/build/cleaning/executor.js +62 -0
  8. package/build/cleaning/exporter.js +87 -0
  9. package/build/cleaning/planner.js +127 -0
  10. package/build/cleaning/rules.js +57 -0
  11. package/build/cleaning/types.js +1 -0
  12. package/build/cloud/adapters/local.js +37 -0
  13. package/build/cloud/adapters/s3.js +24 -0
  14. package/build/cloud/adapters/supabase.js +49 -0
  15. package/build/cloud/storage-manager.js +26 -0
  16. package/build/cloud/types.js +1 -0
  17. package/build/compliance/service.js +73 -0
  18. package/build/compliance/store.js +80 -0
  19. package/build/compliance/types.js +1 -0
  20. package/build/config/config-manager.js +221 -0
  21. package/build/config/secure-keys.js +51 -0
  22. package/build/config/user-config.js +48 -0
  23. package/build/data/processing-worker.js +23 -0
  24. package/build/data/streaming.js +38 -0
  25. package/build/data/worker-pool.js +39 -0
  26. package/build/export/exporter.js +69 -0
  27. package/build/export/packager.js +100 -0
  28. package/build/export/types.js +1 -0
  29. package/build/fusion/aligner.js +56 -0
  30. package/build/fusion/deduplicator.js +69 -0
  31. package/build/fusion/engine.js +69 -0
  32. package/build/fusion/harmonizer.js +39 -0
  33. package/build/fusion/orchestrator.js +86 -0
  34. package/build/fusion/types.js +1 -0
  35. package/build/gateway/unified-dataset-gateway.js +409 -0
  36. package/build/index.js +2704 -0
  37. package/build/ingestion/hf-downloader.js +171 -0
  38. package/build/ingestion/ingestor.js +271 -0
  39. package/build/ingestion/kaggle-downloader.js +102 -0
  40. package/build/install/install-service.js +41 -0
  41. package/build/jobs/manager.js +136 -0
  42. package/build/jobs/queue.js +59 -0
  43. package/build/jobs/types.js +1 -0
  44. package/build/lib/supabase.js +3 -0
  45. package/build/metadata/dataworld-source.js +89 -0
  46. package/build/metadata/domain.js +147 -0
  47. package/build/metadata/github-scraper.js +47 -0
  48. package/build/metadata/institutional-scrapers.js +49 -0
  49. package/build/metadata/kaggle-scraper.js +182 -0
  50. package/build/metadata/kaggle-source.js +70 -0
  51. package/build/metadata/license.js +68 -0
  52. package/build/metadata/monitoring-service.js +107 -0
  53. package/build/metadata/monitoring-store.js +78 -0
  54. package/build/metadata/monitoring-types.js +1 -0
  55. package/build/metadata/openml-source.js +87 -0
  56. package/build/metadata/quality.js +48 -0
  57. package/build/metadata/rate-limiter.js +128 -0
  58. package/build/metadata/scraper.js +377 -0
  59. package/build/metadata/store.js +340 -0
  60. package/build/metadata/types.js +1 -0
  61. package/build/metadata/uci-scraper.js +49 -0
  62. package/build/monitoring/observability.js +76 -0
  63. package/build/preparation/target-detector.js +75 -0
  64. package/build/python/__pycache__/config.cpython-312.pyc +0 -0
  65. package/build/python/asset_downloader_engine.py +92 -0
  66. package/build/python/cleaner.py +226 -0
  67. package/build/python/config.py +263 -0
  68. package/build/python/dataworld_engine.py +208 -0
  69. package/build/python/export_engine.py +243 -0
  70. package/build/python/framework_adapters.py +100 -0
  71. package/build/python/fusion_engine.py +368 -0
  72. package/build/python/github_adapter.py +106 -0
  73. package/build/python/hf_fallback.py +298 -0
  74. package/build/python/image_engine.py +86 -0
  75. package/build/python/kaggle_engine.py +295 -0
  76. package/build/python/media_engine.py +133 -0
  77. package/build/python/nasa_adapter.py +82 -0
  78. package/build/python/openml_engine.py +146 -0
  79. package/build/python/quality_engine.py +267 -0
  80. package/build/python/row_count.py +54 -0
  81. package/build/python/splitter_engine.py +283 -0
  82. package/build/python/target_engine.py +154 -0
  83. package/build/python/test_framework_adapters.py +61 -0
  84. package/build/python/test_fusion_engine.py +89 -0
  85. package/build/python/uci_adapter.py +94 -0
  86. package/build/python/vesper/__init__.py +1 -0
  87. package/build/python/vesper/__pycache__/__init__.cpython-312.pyc +0 -0
  88. package/build/python/vesper/core/__init__.py +1 -0
  89. package/build/python/vesper/core/__pycache__/__init__.cpython-312.pyc +0 -0
  90. package/build/python/vesper/core/__pycache__/asset_downloader.cpython-312.pyc +0 -0
  91. package/build/python/vesper/core/__pycache__/download_recipe.cpython-312.pyc +0 -0
  92. package/build/python/vesper/core/asset_downloader.py +675 -0
  93. package/build/python/vesper/core/download_recipe.py +104 -0
  94. package/build/python/worldbank_adapter.py +99 -0
  95. package/build/quality/analyzer.js +93 -0
  96. package/build/quality/image-analyzer.js +114 -0
  97. package/build/quality/media-analyzer.js +115 -0
  98. package/build/quality/quality-orchestrator.js +162 -0
  99. package/build/quality/types.js +1 -0
  100. package/build/scripts/build-index.js +54 -0
  101. package/build/scripts/check-db.js +73 -0
  102. package/build/scripts/check-jobs.js +24 -0
  103. package/build/scripts/check-naruto.js +17 -0
  104. package/build/scripts/cleanup-kaggle.js +41 -0
  105. package/build/scripts/demo-full-pipeline.js +62 -0
  106. package/build/scripts/demo-ui.js +58 -0
  107. package/build/scripts/e2e-demo.js +72 -0
  108. package/build/scripts/massive-scrape.js +103 -0
  109. package/build/scripts/ops-dashboard.js +33 -0
  110. package/build/scripts/repro-bug.js +37 -0
  111. package/build/scripts/repro-export-bug.js +56 -0
  112. package/build/scripts/scrape-metadata.js +100 -0
  113. package/build/scripts/search-cli.js +26 -0
  114. package/build/scripts/test-bias.js +45 -0
  115. package/build/scripts/test-caching.js +51 -0
  116. package/build/scripts/test-cleaning.js +76 -0
  117. package/build/scripts/test-cloud-storage.js +48 -0
  118. package/build/scripts/test-compliance.js +58 -0
  119. package/build/scripts/test-conversion.js +64 -0
  120. package/build/scripts/test-custom-rules.js +58 -0
  121. package/build/scripts/test-db-opt.js +63 -0
  122. package/build/scripts/test-export-custom.js +33 -0
  123. package/build/scripts/test-exporter.js +53 -0
  124. package/build/scripts/test-fusion.js +61 -0
  125. package/build/scripts/test-github.js +27 -0
  126. package/build/scripts/test-group-split.js +52 -0
  127. package/build/scripts/test-hf-download.js +29 -0
  128. package/build/scripts/test-holdout-manager.js +61 -0
  129. package/build/scripts/test-hybrid-search.js +41 -0
  130. package/build/scripts/test-image-analysis.js +50 -0
  131. package/build/scripts/test-ingestion-infra.js +39 -0
  132. package/build/scripts/test-install.js +40 -0
  133. package/build/scripts/test-institutional.js +26 -0
  134. package/build/scripts/test-integrity.js +41 -0
  135. package/build/scripts/test-jit.js +42 -0
  136. package/build/scripts/test-job-queue.js +62 -0
  137. package/build/scripts/test-kaggle-download.js +34 -0
  138. package/build/scripts/test-large-data.js +50 -0
  139. package/build/scripts/test-mcp-v5.js +74 -0
  140. package/build/scripts/test-media-analysis.js +61 -0
  141. package/build/scripts/test-monitoring.js +91 -0
  142. package/build/scripts/test-observability.js +106 -0
  143. package/build/scripts/test-packager.js +55 -0
  144. package/build/scripts/test-pipeline.js +50 -0
  145. package/build/scripts/test-planning.js +64 -0
  146. package/build/scripts/test-privacy.js +38 -0
  147. package/build/scripts/test-production-sync.js +36 -0
  148. package/build/scripts/test-quality.js +43 -0
  149. package/build/scripts/test-robust-ingestion.js +41 -0
  150. package/build/scripts/test-schema.js +45 -0
  151. package/build/scripts/test-split-validation.js +40 -0
  152. package/build/scripts/test-splitter.js +93 -0
  153. package/build/scripts/test-target-detector.js +29 -0
  154. package/build/scripts/test-uci.js +27 -0
  155. package/build/scripts/test-unified-quality.js +86 -0
  156. package/build/scripts/test-write.js +14 -0
  157. package/build/scripts/verify-integration.js +57 -0
  158. package/build/scripts/verify-priority.js +33 -0
  159. package/build/search/embedder.js +34 -0
  160. package/build/search/engine.js +152 -0
  161. package/build/search/jit-orchestrator.js +258 -0
  162. package/build/search/vector-store.js +123 -0
  163. package/build/splitting/splitter.js +82 -0
  164. package/build/splitting/types.js +1 -0
  165. package/build/tools/formatter.js +251 -0
  166. package/build/utils/downloader.js +52 -0
  167. package/build/utils/selector.js +69 -0
  168. package/mcp-config-template.json +18 -0
  169. package/package.json +101 -29
  170. package/scripts/postinstall.cjs +114 -0
  171. package/scripts/preindex_registry.cjs +157 -0
  172. package/scripts/refresh-index.cjs +87 -0
  173. package/{wizard.js → scripts/wizard.js} +148 -32
  174. package/src/python/__pycache__/config.cpython-312.pyc +0 -0
  175. package/src/python/__pycache__/export_engine.cpython-312.pyc +0 -0
  176. package/src/python/__pycache__/framework_adapters.cpython-312.pyc +0 -0
  177. package/src/python/__pycache__/fusion_engine.cpython-312.pyc +0 -0
  178. package/src/python/__pycache__/kaggle_engine.cpython-312.pyc +0 -0
  179. package/src/python/asset_downloader_engine.py +92 -0
  180. package/src/python/cleaner.py +226 -0
  181. package/src/python/config.py +263 -0
  182. package/src/python/dataworld_engine.py +208 -0
  183. package/src/python/export_engine.py +243 -0
  184. package/src/python/framework_adapters.py +100 -0
  185. package/src/python/fusion_engine.py +368 -0
  186. package/src/python/github_adapter.py +106 -0
  187. package/src/python/hf_fallback.py +298 -0
  188. package/src/python/image_engine.py +86 -0
  189. package/src/python/kaggle_engine.py +295 -0
  190. package/src/python/media_engine.py +133 -0
  191. package/src/python/nasa_adapter.py +82 -0
  192. package/src/python/openml_engine.py +146 -0
  193. package/src/python/quality_engine.py +267 -0
  194. package/src/python/row_count.py +54 -0
  195. package/src/python/splitter_engine.py +283 -0
  196. package/src/python/target_engine.py +154 -0
  197. package/src/python/test_framework_adapters.py +61 -0
  198. package/src/python/test_fusion_engine.py +89 -0
  199. package/src/python/uci_adapter.py +94 -0
  200. package/src/python/vesper/__init__.py +1 -0
  201. package/src/python/vesper/core/__init__.py +1 -0
  202. package/src/python/vesper/core/asset_downloader.py +675 -0
  203. package/src/python/vesper/core/download_recipe.py +104 -0
  204. package/src/python/worldbank_adapter.py +99 -0
  205. package/vesper-mcp-config.json +0 -6
@@ -0,0 +1,226 @@
1
+ import sys
2
+ import json
3
+ import polars as pl
4
+ import numpy as np
5
+
6
+ # --- Operations Library ---
7
+
8
+ def op_remove_duplicates(df, params):
9
+ subset = params.get("subset", None) # List of cols or None
10
+ before = len(df)
11
+ if subset:
12
+ df = df.unique(subset=subset)
13
+ else:
14
+ df = df.unique()
15
+ return df, {"rows_removed": before - len(df)}
16
+
17
+ def op_drop_columns(df, params):
18
+ cols = params.get("columns", [])
19
+ before = len(df.columns)
20
+ # Filter only existing cols to avoid errors
21
+ cols_to_drop = [c for c in cols if c in df.columns]
22
+ df = df.drop(cols_to_drop)
23
+ return df, {"columns_dropped": len(cols_to_drop)}
24
+
25
+ def op_fill_missing(df, params):
26
+ col = params["column"]
27
+ method = params.get("method", "mean") # mean, median, mode, constant
28
+ value = params.get("value", None)
29
+
30
+ if col not in df.columns:
31
+ return df, {"error": f"Column {col} not found"}
32
+
33
+ affected = df[col].null_count()
34
+
35
+ if method == "constant":
36
+ df = df.with_columns(pl.col(col).fill_null(value))
37
+ elif method == "mean":
38
+ mean_val = df[col].mean()
39
+ df = df.with_columns(pl.col(col).fill_null(mean_val))
40
+ elif method == "median":
41
+ median_val = df[col].median()
42
+ df = df.with_columns(pl.col(col).fill_null(median_val))
43
+
44
+ return df, {"rows_imputed": affected}
45
+
46
+ def op_fix_types(df, params):
47
+ col = params["column"]
48
+ target_type = params["type"] # "int", "float", "string", "date"
49
+
50
+ if col not in df.columns:
51
+ return df, {"error": f"Column {col} not found"}
52
+
53
+ try:
54
+ if target_type == "int":
55
+ df = df.with_columns(pl.col(col).cast(pl.Int64, strict=False))
56
+ elif target_type == "float":
57
+ df = df.with_columns(pl.col(col).cast(pl.Float64, strict=False))
58
+ elif target_type == "string":
59
+ df = df.with_columns(pl.col(col).cast(pl.Utf8))
60
+ elif target_type == "date":
61
+ df = df.with_columns(pl.col(col).str.to_date(strict=False))
62
+
63
+ return df, {"status": "Converted"}
64
+ except Exception as e:
65
+ return df, {"error": str(e)}
66
+
67
+ def op_remove_outliers(df, params):
68
+ col = params["column"]
69
+ method = params.get("method", "iqr")
70
+ threshold = params.get("threshold", 1.5)
71
+
72
+ if col not in df.columns:
73
+ return df, {"error": f"Column {col} not found"}
74
+
75
+ before = len(df)
76
+
77
+ if method == "iqr":
78
+ q1 = df[col].quantile(0.25)
79
+ q3 = df[col].quantile(0.75)
80
+ iqr = q3 - q1
81
+ lower = q1 - (threshold * iqr)
82
+ upper = q3 + (threshold * iqr)
83
+
84
+ df = df.filter((pl.col(col) >= lower) & (pl.col(col) <= upper))
85
+
86
+ return df, {"rows_removed": before - len(df)}
87
+
88
+ def op_encode_categories(df, params):
89
+ col = params["column"]
90
+ method = params.get("method", "label") # label, onehot
91
+
92
+ if col not in df.columns:
93
+ return df, {"error": f"Column {col} not found"}
94
+
95
+ if method == "label":
96
+ # Polars dense_rank acts similar to label encoding
97
+ df = df.with_columns(pl.col(col).rank("dense").alias(f"{col}_encoded"))
98
+ elif method == "onehot":
99
+ dummies = df[col].to_dummies()
100
+ df = pl.concat([df, dummies], how="horizontal")
101
+
102
+ return df, {"status": f"Encoded using {method}"}
103
+
104
+ # --- Registry ---
105
+
106
+ OPERATIONS = {
107
+ "RemoveDuplicates": op_remove_duplicates,
108
+ "DropColumns": op_drop_columns,
109
+ "FillMissing": op_fill_missing,
110
+ "FixTypes": op_fix_types,
111
+ "RemoveOutliers": op_remove_outliers,
112
+ "EncodeCategories": op_encode_categories
113
+ }
114
+
115
+ def main():
116
+ if len(sys.argv) < 3:
117
+ print(json.dumps({"error": "Usage: cleaner.py <file_path> <operations_json>"}), file=sys.stderr)
118
+ sys.exit(1)
119
+
120
+ file_path = sys.argv[1]
121
+ ops_json = sys.argv[2]
122
+
123
+ try:
124
+ operations = json.loads(ops_json)
125
+
126
+ # Load Data
127
+ file_path_lower = file_path.lower()
128
+ if file_path_lower.endswith(".csv"):
129
+ df = pl.read_csv(file_path, ignore_errors=True)
130
+ elif file_path_lower.endswith(".parquet"):
131
+ df = pl.read_parquet(file_path)
132
+ elif file_path_lower.endswith(".jsonl") or file_path_lower.endswith(".ndjson"):
133
+ # Explicit NDJSON
134
+ df = pl.read_ndjson(file_path)
135
+ elif file_path_lower.endswith(".json"):
136
+ # Ambiguous .json
137
+ try:
138
+ df = pl.read_json(file_path)
139
+ except Exception:
140
+ try:
141
+ df = pl.read_ndjson(file_path)
142
+ except Exception as e:
143
+ raise ValueError(f"Failed to read JSON: {str(e)}")
144
+ else:
145
+ raise ValueError(f"Unsupported format: {file_path}")
146
+
147
+ logs = []
148
+ total_rows_affected = 0
149
+
150
+ # Execute Pipeline
151
+ for op in operations:
152
+ op_type = op["type"]
153
+ params = op.get("params", {})
154
+
155
+ if op_type == "RenameTarget":
156
+ old_name = params.get("old_name")
157
+ new_name = params.get("new_name", "target")
158
+ if old_name and old_name in df.columns:
159
+ df = df.rename({old_name: new_name})
160
+ logs.append(f"Renamed column '{old_name}' to '{new_name}'")
161
+ else:
162
+ logs.append(f"Failed RenameTarget: Column '{old_name}' not found or not specified.")
163
+ elif op_type in OPERATIONS:
164
+ try:
165
+ df, stats = OPERATIONS[op_type](df, params)
166
+ logs.append(f"Executed {op_type}: {stats}")
167
+ total_rows_affected += stats.get("rows_removed", 0)
168
+ except Exception as e:
169
+ logs.append(f"Failed {op_type}: {str(e)}")
170
+ else:
171
+ logs.append(f"Unknown operation: {op_type}")
172
+
173
+ # Save Result (overwrite or new file)
174
+ # Save Result (overwrite or new file)
175
+ output_format = sys.argv[3] if len(sys.argv) > 3 else None
176
+
177
+ if not output_format:
178
+ # Legacy logic: preserve CSV or default to parquet
179
+ if file_path_lower.endswith(".csv"):
180
+ output_format = "csv"
181
+ else:
182
+ output_format = "parquet"
183
+
184
+ base_name = file_path.rsplit(".", 1)[0]
185
+ if output_format == "csv":
186
+ output_path = f"{base_name}_cleaned.csv"
187
+ # Stringify ANY column that might not be CSV-safe (List, Struct, Object, etc.)
188
+ for col in df.columns:
189
+ dtype = df.schema[col]
190
+ # Only keep simple types; stringify everything else for CSV
191
+ is_simple = (
192
+ dtype.is_numeric() or
193
+ dtype.is_temporal() or
194
+ str(dtype).lower() in ["string", "utf8", "boolean", "bool"]
195
+ )
196
+ if not is_simple:
197
+ # Use a robust helper for clean JSON serialization
198
+ def safe_serialize(val):
199
+ try:
200
+ # Handle Polars nested types (convert to Python list/dict first)
201
+ if hasattr(val, "to_list"):
202
+ return json.dumps(val.to_list())
203
+ if hasattr(val, "to_dict"):
204
+ return json.dumps(val.to_dict())
205
+ return json.dumps(val)
206
+ except:
207
+ return str(val)
208
+ df = df.with_columns(pl.col(col).map_elements(safe_serialize, return_dtype=pl.Utf8))
209
+ df.write_csv(output_path)
210
+ else:
211
+ output_path = f"{base_name}_cleaned.parquet"
212
+ df.write_parquet(output_path)
213
+
214
+ print(json.dumps({
215
+ "success": True,
216
+ "output_path": output_path,
217
+ "rows_affected": total_rows_affected,
218
+ "logs": logs
219
+ }, default=str))
220
+
221
+ except Exception as e:
222
+ print(json.dumps({"success": False, "error": str(e)}, default=str))
223
+ sys.exit(1)
224
+
225
+ if __name__ == "__main__":
226
+ main()
@@ -0,0 +1,263 @@
1
+ import os
2
+ import sys
3
+ import json
4
+ import base64
5
+ import hashlib
6
+ import secrets
7
+ from pathlib import Path
8
+ from typing import Dict, Optional
9
+
10
+ SERVICE_NAME = "vesper"
11
+
12
+ KEY_ALIASES = {
13
+ "hf_token": ["HF_TOKEN", "HUGGINGFACE_TOKEN"],
14
+ "kaggle_username": ["KAGGLE_USERNAME"],
15
+ "kaggle_key": ["KAGGLE_KEY"],
16
+ "dataworld_token": ["DW_AUTH_TOKEN"],
17
+ }
18
+
19
+ try:
20
+ import keyring # type: ignore
21
+ HAS_KEYRING = True
22
+ except Exception:
23
+ HAS_KEYRING = False
24
+
25
+ try:
26
+ from cryptography.fernet import Fernet, InvalidToken # type: ignore
27
+ HAS_FERNET = True
28
+ except Exception:
29
+ HAS_FERNET = False
30
+
31
+
32
+ def _config_path() -> Path:
33
+ return Path.home() / ".vesper" / "config.toml"
34
+
35
+
36
+ def _secret_path() -> Path:
37
+ return Path.home() / ".vesper" / ".config_key"
38
+
39
+
40
+ def _ensure_parent(path: Path) -> None:
41
+ path.parent.mkdir(parents=True, exist_ok=True)
42
+
43
+
44
+ def _read_fallback_toml() -> Dict[str, str]:
45
+ path = _config_path()
46
+ if not path.exists():
47
+ return {}
48
+
49
+ values: Dict[str, str] = {}
50
+ in_keys = False
51
+ method = ""
52
+
53
+ for raw in path.read_text(encoding="utf-8").splitlines():
54
+ line = raw.strip()
55
+ if not line or line.startswith("#"):
56
+ continue
57
+ if line.startswith("[") and line.endswith("]"):
58
+ in_keys = (line == "[keys]")
59
+ continue
60
+ if line.startswith("method") and "=" in line:
61
+ method = line.split("=", 1)[1].strip().strip('"').strip("'")
62
+ continue
63
+ if not in_keys or "=" not in line:
64
+ continue
65
+
66
+ key, val = line.split("=", 1)
67
+ key = key.strip()
68
+ val = val.strip().strip('"').strip("'")
69
+ values[key] = val
70
+
71
+ if method:
72
+ values["__method__"] = method
73
+
74
+ return values
75
+
76
+
77
+ def _get_or_create_local_secret() -> str:
78
+ secret_file = _secret_path()
79
+ _ensure_parent(secret_file)
80
+
81
+ if secret_file.exists():
82
+ return secret_file.read_text(encoding="utf-8").strip()
83
+
84
+ secret = base64.urlsafe_b64encode(secrets.token_bytes(32)).decode("utf-8")
85
+ secret_file.write_text(secret, encoding="utf-8")
86
+ try:
87
+ os.chmod(secret_file, 0o600)
88
+ except Exception:
89
+ pass
90
+ return secret
91
+
92
+
93
+ def _xor_encrypt(plain: str, secret: str) -> str:
94
+ key = hashlib.sha256(secret.encode("utf-8")).digest()
95
+ data = plain.encode("utf-8")
96
+ out = bytes([data[i] ^ key[i % len(key)] for i in range(len(data))])
97
+ return base64.urlsafe_b64encode(out).decode("utf-8")
98
+
99
+
100
+ def _xor_decrypt(cipher_text: str, secret: str) -> str:
101
+ key = hashlib.sha256(secret.encode("utf-8")).digest()
102
+ data = base64.urlsafe_b64decode(cipher_text.encode("utf-8"))
103
+ out = bytes([data[i] ^ key[i % len(key)] for i in range(len(data))])
104
+ return out.decode("utf-8")
105
+
106
+
107
+ def _encrypt_value(value: str, secret: str) -> Dict[str, str]:
108
+ if HAS_FERNET:
109
+ token = Fernet(secret.encode("utf-8")).encrypt(value.encode("utf-8")).decode("utf-8")
110
+ return {"method": "fernet", "value": token}
111
+ # fallback encryption (weaker than fernet, but still not plaintext)
112
+ return {"method": "xor", "value": _xor_encrypt(value, secret)}
113
+
114
+
115
+ def _decrypt_value(value: str, method: str, secret: str) -> Optional[str]:
116
+ try:
117
+ if method == "fernet" and HAS_FERNET:
118
+ return Fernet(secret.encode("utf-8")).decrypt(value.encode("utf-8")).decode("utf-8")
119
+ if method == "xor":
120
+ return _xor_decrypt(value, secret)
121
+ return None
122
+ except InvalidToken:
123
+ return None
124
+ except Exception:
125
+ return None
126
+
127
+
128
+ def _write_fallback_toml(values: Dict[str, str]) -> None:
129
+ path = _config_path()
130
+ _ensure_parent(path)
131
+
132
+ method = values.get("__method__", "fernet" if HAS_FERNET else "xor")
133
+ lines = [
134
+ "# Vesper optional API keys fallback storage",
135
+ "# Encrypted fallback (keyring is preferred)",
136
+ "[meta]",
137
+ f'method = "{method}"',
138
+ "[keys]",
139
+ ]
140
+ for key in sorted(values.keys()):
141
+ if key.startswith("__"):
142
+ continue
143
+ val = str(values[key]).replace('"', '\\"')
144
+ lines.append(f'{key} = "{val}"')
145
+
146
+ path.write_text("\n".join(lines) + "\n", encoding="utf-8")
147
+
148
+
149
+ def _get_from_env(name: str) -> Optional[str]:
150
+ for env_key in KEY_ALIASES.get(name, []):
151
+ val = os.getenv(env_key)
152
+ if val:
153
+ return val
154
+ return None
155
+
156
+
157
+ def get_key(name: str) -> Optional[str]:
158
+ # 1) keyring (secure)
159
+ if HAS_KEYRING:
160
+ try:
161
+ val = keyring.get_password(SERVICE_NAME, name)
162
+ if val:
163
+ return val
164
+ except Exception:
165
+ pass
166
+
167
+ # 2) encrypted fallback config.toml
168
+ fallback = _read_fallback_toml()
169
+ enc = fallback.get(name)
170
+ if enc:
171
+ secret = _get_or_create_local_secret()
172
+ method = fallback.get("__method__", "fernet" if HAS_FERNET else "xor")
173
+ dec = _decrypt_value(enc, method, secret)
174
+ if dec:
175
+ return dec
176
+
177
+ # 3) env vars (fallback only)
178
+ env_val = _get_from_env(name)
179
+ if env_val:
180
+ return env_val
181
+ return None
182
+
183
+
184
+ def set_key(name: str, value: str) -> Dict[str, str]:
185
+ if not value:
186
+ return {"ok": "false", "method": "none", "error": "Empty value"}
187
+
188
+ if HAS_KEYRING:
189
+ try:
190
+ keyring.set_password(SERVICE_NAME, name, value)
191
+ return {"ok": "true", "method": "keyring"}
192
+ except Exception:
193
+ pass
194
+
195
+ fallback = _read_fallback_toml()
196
+ secret = _get_or_create_local_secret()
197
+ enc = _encrypt_value(value, secret)
198
+ fallback["__method__"] = enc["method"]
199
+ fallback[name] = enc["value"]
200
+ _write_fallback_toml(fallback)
201
+ return {"ok": "true", "method": f'toml:{enc["method"]}'}
202
+
203
+
204
+ def has_key(name: str) -> bool:
205
+ return bool(get_key(name))
206
+
207
+
208
+ def get_all() -> Dict[str, Optional[str]]:
209
+ return {
210
+ "hf_token": get_key("hf_token"),
211
+ "kaggle_username": get_key("kaggle_username"),
212
+ "kaggle_key": get_key("kaggle_key"),
213
+ "dataworld_token": get_key("dataworld_token"),
214
+ }
215
+
216
+
217
+ def _print_json(data):
218
+ print(json.dumps(data))
219
+
220
+
221
+ def main() -> None:
222
+ if len(sys.argv) < 2:
223
+ _print_json({
224
+ "ok": False,
225
+ "error": "Usage: config.py <get|set|has|all> [name] [value]",
226
+ })
227
+ sys.exit(1)
228
+
229
+ cmd = sys.argv[1].lower()
230
+
231
+ if cmd == "all":
232
+ _print_json({"ok": True, "data": get_all()})
233
+ return
234
+
235
+ if len(sys.argv) < 3:
236
+ _print_json({"ok": False, "error": "Missing key name"})
237
+ sys.exit(1)
238
+
239
+ name = sys.argv[2]
240
+
241
+ if cmd == "get":
242
+ _print_json({"ok": True, "name": name, "value": get_key(name)})
243
+ return
244
+
245
+ if cmd == "has":
246
+ _print_json({"ok": True, "name": name, "value": has_key(name)})
247
+ return
248
+
249
+ if cmd == "set":
250
+ if len(sys.argv) < 4:
251
+ _print_json({"ok": False, "error": "Missing value for set"})
252
+ sys.exit(1)
253
+ value = sys.argv[3]
254
+ result = set_key(name, value)
255
+ _print_json({"ok": result.get("ok") == "true", "name": name, "method": result.get("method"), "error": result.get("error")})
256
+ return
257
+
258
+ _print_json({"ok": False, "error": f"Unknown command: {cmd}"})
259
+ sys.exit(1)
260
+
261
+
262
+ if __name__ == "__main__":
263
+ main()
@@ -0,0 +1,208 @@
1
+ import sys
2
+ import json
3
+ import argparse
4
+ import tempfile
5
+ import os
6
+ import urllib.request
7
+ import urllib.error
8
+ import urllib.parse
9
+ from typing import Dict, Any, List
10
+
11
+ def _get_token() -> str:
12
+ token = os.environ.get("DW_AUTH_TOKEN")
13
+ if not token:
14
+ raise ValueError("DW_AUTH_TOKEN environment variable is required for data.world")
15
+ return token
16
+
17
+ def _dataset_to_dict(ds: Dict[str, Any]) -> Dict[str, Any]:
18
+ owner_field = ds.get("owner", "")
19
+ if isinstance(owner_field, dict):
20
+ owner = owner_field.get("id") or owner_field.get("name") or ""
21
+ else:
22
+ owner = owner_field or ""
23
+
24
+ id_str = ds.get("id", "")
25
+ title = ds.get("title", "")
26
+
27
+ if (not owner or not id_str) and isinstance(ds.get("resourceLink"), str):
28
+ # Expected format includes /<owner>/<dataset-id>
29
+ parts = ds["resourceLink"].strip("/").split("/")
30
+ if len(parts) >= 2:
31
+ owner = owner or parts[-2]
32
+ id_str = id_str or parts[-1]
33
+
34
+ if isinstance(id_str, str) and "/" in id_str and not owner:
35
+ split_ref = id_str.split("/", 1)
36
+ owner = split_ref[0]
37
+ id_str = split_ref[1]
38
+
39
+ if not owner and not id_str:
40
+ owner = "unknown"
41
+ id_str = "unknown"
42
+
43
+ if not title:
44
+ title = f"{owner}/{id_str}"
45
+
46
+ return {
47
+ "id": f"dataworld:{owner}/{id_str}",
48
+ "name": title,
49
+ "source": "dataworld",
50
+ "description": ds.get("description", f"data.world dataset {title}"),
51
+ "author": owner,
52
+ "license": {
53
+ "id": "Unknown",
54
+ "category": "unknown",
55
+ "commercial_use": None,
56
+ "warnings": []
57
+ },
58
+ "tags": ds.get("tags", []) + ["dataworld"],
59
+ "downloads": 0,
60
+ "likes": 0,
61
+ "created_at": ds.get("created", ""),
62
+ "updated_at": ds.get("updated", ""),
63
+ "size_bytes": 0,
64
+ "quality_score": 0.8,
65
+ "domain": "general",
66
+ "is_gated": False,
67
+ "is_nsfw": False,
68
+ "description_length": len(ds.get("description", "")),
69
+ "has_readme": False,
70
+ "download_url": f"https://data.world/{owner}/{id_str}",
71
+ }
72
+
73
+ def discover(query: str, limit: int = 20) -> Dict[str, Any]:
74
+ try:
75
+ token = _get_token()
76
+
77
+ # data.world simple search API
78
+ url = f"https://api.data.world/v0/search/resources?size={limit}"
79
+
80
+ headers = {
81
+ "Authorization": f"Bearer {token}",
82
+ "Content-Type": "application/json",
83
+ "Accept": "application/json"
84
+ }
85
+
86
+ # Search datasets and include community results to improve recall
87
+ body = {
88
+ "query": query,
89
+ "category": ["dataset"],
90
+ "includeCommunityResults": True,
91
+ }
92
+
93
+ req = urllib.request.Request(url, data=json.dumps(body).encode('utf-8'), headers=headers, method="POST")
94
+
95
+ with urllib.request.urlopen(req) as response:
96
+ data = json.loads(response.read().decode('utf-8'))
97
+
98
+ records = data.get("records", [])
99
+
100
+ # Fallback to advanced endpoint if simple search returns nothing
101
+ if not records:
102
+ adv_url = f"https://api.data.world/v0/search?size={limit}"
103
+ adv_body = {
104
+ "query": query,
105
+ "category": ["dataset"],
106
+ }
107
+ adv_req = urllib.request.Request(
108
+ adv_url,
109
+ data=json.dumps(adv_body).encode("utf-8"),
110
+ headers=headers,
111
+ method="POST",
112
+ )
113
+ with urllib.request.urlopen(adv_req) as response:
114
+ adv_data = json.loads(response.read().decode("utf-8"))
115
+ records = adv_data.get("records", [])
116
+
117
+ items = [_dataset_to_dict(r) for r in records]
118
+
119
+ return {"ok": True, "results": items, "count": len(items)}
120
+ except Exception as e:
121
+ return {"ok": False, "error": f"data.world discover failed: {str(e)}"}
122
+
123
+ def download(dataset_ref: str, target_dir: str) -> Dict[str, Any]:
124
+ try:
125
+ token = _get_token()
126
+
127
+ # dataset_ref is expected to be "dataworld:owner/id"
128
+ if dataset_ref.startswith("dataworld:"):
129
+ ref = dataset_ref.split(":", 1)[1]
130
+ else:
131
+ ref = dataset_ref
132
+
133
+ parts = ref.split("/")
134
+ if len(parts) != 2:
135
+ return {"ok": False, "error": f"Invalid data.world dataset ID format. Expected owner/id, got {ref}"}
136
+
137
+ owner, dataset_id = parts
138
+
139
+ if not target_dir:
140
+ target_dir = tempfile.mkdtemp(prefix="vesper_dataworld_")
141
+
142
+ os.makedirs(target_dir, exist_ok=True)
143
+
144
+ # First, get the dataset metadata to find the files
145
+ url = f"https://api.data.world/v0/datasets/{owner}/{dataset_id}"
146
+ headers = {
147
+ "Authorization": f"Bearer {token}",
148
+ "Accept": "application/json"
149
+ }
150
+
151
+ req = urllib.request.Request(url, headers=headers)
152
+ with urllib.request.urlopen(req) as response:
153
+ dataset_meta = json.loads(response.read().decode('utf-8'))
154
+
155
+ files = dataset_meta.get("files", [])
156
+ if not files:
157
+ return {"ok": False, "error": "No files found in this dataset"}
158
+
159
+ # Find the best file to download (prefer csv, parquet, jsonl)
160
+ best_file = None
161
+ for ext in [".parquet", ".csv", ".jsonl", ".json"]:
162
+ for f in files:
163
+ if f.get("name", "").lower().endswith(ext):
164
+ best_file = f
165
+ break
166
+ if best_file:
167
+ break
168
+
169
+ if not best_file:
170
+ best_file = files[0] # Just take the first one if no preferred format
171
+
172
+ filename = best_file.get("name")
173
+
174
+ # Download the file
175
+ download_url = f"https://api.data.world/v0/file_download/{owner}/{dataset_id}/{urllib.parse.quote(filename)}"
176
+
177
+ file_path = os.path.join(target_dir, filename)
178
+
179
+ download_req = urllib.request.Request(download_url, headers=headers)
180
+ with urllib.request.urlopen(download_req) as response, open(file_path, 'wb') as out_file:
181
+ out_file.write(response.read())
182
+
183
+ return {
184
+ "ok": True,
185
+ "local_path": file_path,
186
+ "target_dir": target_dir
187
+ }
188
+ except Exception as e:
189
+ return {"ok": False, "error": f"data.world download failed: {str(e)}"}
190
+
191
+ def main():
192
+ parser = argparse.ArgumentParser(description="Vesper data.world Engine")
193
+ parser.add_argument("action", choices=["discover", "download"])
194
+ parser.add_argument("arg1", help="Query for discover, Dataset ID for download")
195
+ parser.add_argument("arg2", nargs="?", help="Limit for discover, Target Dir for download")
196
+
197
+ args = parser.parse_args()
198
+
199
+ if args.action == "discover":
200
+ limit = int(args.arg2) if args.arg2 else 20
201
+ result = discover(args.arg1, limit)
202
+ print(json.dumps(result))
203
+ elif args.action == "download":
204
+ result = download(args.arg1, args.arg2)
205
+ print(json.dumps(result))
206
+
207
+ if __name__ == "__main__":
208
+ main()