@vespermcp/mcp-server 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (131) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +259 -0
  3. package/build/cache/cdn.js +34 -0
  4. package/build/cache/service.js +63 -0
  5. package/build/cleaning/cleaner.js +50 -0
  6. package/build/cleaning/evaluator.js +89 -0
  7. package/build/cleaning/executor.js +60 -0
  8. package/build/cleaning/exporter.js +87 -0
  9. package/build/cleaning/planner.js +111 -0
  10. package/build/cleaning/rules.js +57 -0
  11. package/build/cleaning/types.js +1 -0
  12. package/build/cloud/adapters/local.js +37 -0
  13. package/build/cloud/adapters/s3.js +24 -0
  14. package/build/cloud/storage-manager.js +20 -0
  15. package/build/cloud/types.js +1 -0
  16. package/build/compliance/service.js +73 -0
  17. package/build/compliance/store.js +80 -0
  18. package/build/compliance/types.js +1 -0
  19. package/build/data/processing-worker.js +23 -0
  20. package/build/data/streaming.js +38 -0
  21. package/build/data/worker-pool.js +39 -0
  22. package/build/export/exporter.js +45 -0
  23. package/build/export/packager.js +100 -0
  24. package/build/export/types.js +1 -0
  25. package/build/fusion/aligner.js +56 -0
  26. package/build/fusion/deduplicator.js +69 -0
  27. package/build/fusion/harmonizer.js +39 -0
  28. package/build/fusion/orchestrator.js +86 -0
  29. package/build/fusion/types.js +1 -0
  30. package/build/index.js +632 -0
  31. package/build/ingestion/hf-downloader.js +64 -0
  32. package/build/ingestion/ingestor.js +96 -0
  33. package/build/ingestion/kaggle-downloader.js +79 -0
  34. package/build/install/install-service.js +41 -0
  35. package/build/jobs/manager.js +129 -0
  36. package/build/jobs/queue.js +59 -0
  37. package/build/jobs/types.js +1 -0
  38. package/build/metadata/domain.js +147 -0
  39. package/build/metadata/github-scraper.js +47 -0
  40. package/build/metadata/institutional-scrapers.js +49 -0
  41. package/build/metadata/kaggle-scraper.js +182 -0
  42. package/build/metadata/license.js +68 -0
  43. package/build/metadata/monitoring-service.js +107 -0
  44. package/build/metadata/monitoring-store.js +78 -0
  45. package/build/metadata/monitoring-types.js +1 -0
  46. package/build/metadata/quality.js +48 -0
  47. package/build/metadata/rate-limiter.js +128 -0
  48. package/build/metadata/scraper.js +353 -0
  49. package/build/metadata/store.js +325 -0
  50. package/build/metadata/types.js +1 -0
  51. package/build/metadata/uci-scraper.js +49 -0
  52. package/build/monitoring/observability.js +76 -0
  53. package/build/quality/analyzer.js +57 -0
  54. package/build/quality/image-analyzer.js +46 -0
  55. package/build/quality/media-analyzer.js +46 -0
  56. package/build/quality/quality-orchestrator.js +162 -0
  57. package/build/quality/types.js +1 -0
  58. package/build/scripts/build-index.js +54 -0
  59. package/build/scripts/check-db.js +73 -0
  60. package/build/scripts/check-jobs.js +24 -0
  61. package/build/scripts/check-naruto.js +17 -0
  62. package/build/scripts/demo-full-pipeline.js +62 -0
  63. package/build/scripts/demo-ui.js +58 -0
  64. package/build/scripts/e2e-demo.js +72 -0
  65. package/build/scripts/massive-scrape.js +103 -0
  66. package/build/scripts/ops-dashboard.js +33 -0
  67. package/build/scripts/scrape-metadata.js +100 -0
  68. package/build/scripts/search-cli.js +26 -0
  69. package/build/scripts/test-bias.js +45 -0
  70. package/build/scripts/test-caching.js +51 -0
  71. package/build/scripts/test-cleaning.js +76 -0
  72. package/build/scripts/test-cloud-storage.js +48 -0
  73. package/build/scripts/test-compliance.js +58 -0
  74. package/build/scripts/test-conversion.js +64 -0
  75. package/build/scripts/test-custom-rules.js +58 -0
  76. package/build/scripts/test-db-opt.js +63 -0
  77. package/build/scripts/test-export-custom.js +33 -0
  78. package/build/scripts/test-exporter.js +53 -0
  79. package/build/scripts/test-fusion.js +61 -0
  80. package/build/scripts/test-github.js +27 -0
  81. package/build/scripts/test-group-split.js +52 -0
  82. package/build/scripts/test-hf-download.js +29 -0
  83. package/build/scripts/test-holdout-manager.js +61 -0
  84. package/build/scripts/test-hybrid-search.js +41 -0
  85. package/build/scripts/test-image-analysis.js +50 -0
  86. package/build/scripts/test-ingestion-infra.js +39 -0
  87. package/build/scripts/test-install.js +40 -0
  88. package/build/scripts/test-institutional.js +26 -0
  89. package/build/scripts/test-integrity.js +41 -0
  90. package/build/scripts/test-jit.js +42 -0
  91. package/build/scripts/test-job-queue.js +62 -0
  92. package/build/scripts/test-kaggle-download.js +34 -0
  93. package/build/scripts/test-large-data.js +50 -0
  94. package/build/scripts/test-mcp-v5.js +73 -0
  95. package/build/scripts/test-media-analysis.js +61 -0
  96. package/build/scripts/test-monitoring.js +91 -0
  97. package/build/scripts/test-observability.js +106 -0
  98. package/build/scripts/test-packager.js +55 -0
  99. package/build/scripts/test-pipeline.js +50 -0
  100. package/build/scripts/test-planning.js +64 -0
  101. package/build/scripts/test-privacy.js +38 -0
  102. package/build/scripts/test-quality.js +43 -0
  103. package/build/scripts/test-robust-ingestion.js +41 -0
  104. package/build/scripts/test-schema.js +45 -0
  105. package/build/scripts/test-split-validation.js +40 -0
  106. package/build/scripts/test-splitter.js +93 -0
  107. package/build/scripts/test-uci.js +27 -0
  108. package/build/scripts/test-unified-quality.js +86 -0
  109. package/build/search/embedder.js +34 -0
  110. package/build/search/engine.js +129 -0
  111. package/build/search/jit-orchestrator.js +232 -0
  112. package/build/search/vector-store.js +105 -0
  113. package/build/splitting/splitter.js +57 -0
  114. package/build/splitting/types.js +1 -0
  115. package/build/tools/formatter.js +227 -0
  116. package/build/utils/downloader.js +52 -0
  117. package/mcp-config-template.json +15 -0
  118. package/package.json +84 -0
  119. package/src/python/__pycache__/framework_adapters.cpython-312.pyc +0 -0
  120. package/src/python/cleaner.py +196 -0
  121. package/src/python/export_engine.py +112 -0
  122. package/src/python/framework_adapters.py +100 -0
  123. package/src/python/github_adapter.py +106 -0
  124. package/src/python/image_engine.py +86 -0
  125. package/src/python/media_engine.py +133 -0
  126. package/src/python/nasa_adapter.py +82 -0
  127. package/src/python/quality_engine.py +243 -0
  128. package/src/python/splitter_engine.py +283 -0
  129. package/src/python/test_framework_adapters.py +61 -0
  130. package/src/python/uci_adapter.py +94 -0
  131. package/src/python/worldbank_adapter.py +99 -0
@@ -0,0 +1,243 @@
1
+ import sys
2
+ import json
3
+ import polars as pl
4
+ import numpy as np
5
+
6
+ def analyze_column(df, col_name, dtype):
7
+ stats = {
8
+ "name": col_name,
9
+ "type": str(dtype),
10
+ "inferred_type": str(dtype), # Default to actual
11
+ "missing_count": 0,
12
+ "missing_percentage": 0.0,
13
+ "unique_count": 0,
14
+ "is_constant": False,
15
+ "is_mixed_type": False
16
+ }
17
+
18
+ try:
19
+ col = df[col_name]
20
+ null_count = col.null_count()
21
+ row_count = len(col)
22
+
23
+ stats["missing_count"] = null_count
24
+ stats["missing_percentage"] = (null_count / row_count) * 100 if row_count > 0 else 0
25
+ stats["unique_count"] = col.n_unique()
26
+ stats["is_constant"] = stats["unique_count"] <= 1 and row_count > 0
27
+
28
+ # Schema Inference & Validation
29
+ is_string = dtype == pl.Utf8 or dtype == pl.Object
30
+
31
+ if is_string and row_count > 0:
32
+ # Try inferring Numeric
33
+ # Check if majority can be cast to float
34
+ try:
35
+ # Use strict=False to turn non-numbers into nulls
36
+ numeric_cast = col.str.strip_chars().cast(pl.Float64, strict=False)
37
+ numeric_nulls = numeric_cast.null_count()
38
+
39
+ # If valid numbers are significantly more than original nulls, it might be numeric
40
+ valid_numbers = row_count - numeric_nulls
41
+ original_valid = row_count - null_count
42
+
43
+ if valid_numbers > 0 and (valid_numbers / original_valid) > 0.9:
44
+ stats["inferred_type"] = "Numeric (Stored as String)"
45
+
46
+ # Mixed type check: If valid numbers exist but plenty of strings too
47
+ elif valid_numbers > 0 and (valid_numbers / original_valid) < 0.9:
48
+ stats["is_mixed_type"] = True
49
+ except:
50
+ pass
51
+
52
+ # Numeric Analysis
53
+ if dtype in [pl.Int64, pl.Int32, pl.Float64, pl.Float32] or stats["inferred_type"].startswith("Numeric"):
54
+ clean_col = col
55
+ if is_string:
56
+ # Cast for analysis if it was inferred
57
+ clean_col = col.str.strip_chars().cast(pl.Float64, strict=False)
58
+
59
+ clean_col = clean_col.drop_nulls()
60
+
61
+ if len(clean_col) > 0:
62
+ stats["distribution"] = {
63
+ "min": float(clean_col.min()),
64
+ "max": float(clean_col.max()),
65
+ "mean": float(clean_col.mean()),
66
+ "std": float(clean_col.std()) if len(clean_col) > 1 else 0,
67
+ "p25": float(clean_col.quantile(0.25)),
68
+ "p50": float(clean_col.median()),
69
+ "p75": float(clean_col.quantile(0.75))
70
+ }
71
+
72
+ # Categorical Analysis
73
+ if dtype == pl.Utf8 or dtype == pl.Categorical:
74
+ value_counts = col.value_counts(sort=True).head(5)
75
+ # Handle different polars versions return structure for value_counts
76
+ try:
77
+ # Format: struct with name/counts or columns
78
+ rows = value_counts.rows()
79
+ top_values = {}
80
+ for row in rows:
81
+ val = str(row[0]) if row[0] is not None else "null"
82
+ count = int(row[1])
83
+ top_values[val] = count
84
+ stats["top_values"] = top_values
85
+ except:
86
+ pass
87
+
88
+ except Exception as e:
89
+ stats["error"] = str(e)
90
+
91
+ return stats
92
+
93
+ def main():
94
+ if len(sys.argv) < 2:
95
+ print(json.dumps({"error": "No file path provided"}))
96
+ sys.exit(1)
97
+
98
+ file_path = sys.argv[1]
99
+
100
+ try:
101
+ # Robust file reading with extension detection
102
+ file_path_lower = file_path.lower()
103
+ if file_path_lower.endswith(".csv"):
104
+ df = pl.read_csv(file_path, ignore_errors=True, n_rows=10000)
105
+ elif file_path_lower.endswith(".parquet"):
106
+ try:
107
+ # Try scanning first (faster for large files)
108
+ df = pl.scan_parquet(file_path).limit(10000).collect()
109
+ except:
110
+ df = pl.read_parquet(file_path)
111
+ if len(df) > 10000: df = df.head(10000)
112
+ elif file_path_lower.endswith(".jsonl") or file_path_lower.endswith(".ndjson"):
113
+ # Explicit NDJSON
114
+ df = pl.scan_ndjson(file_path).limit(10000).collect()
115
+ elif file_path_lower.endswith(".json"):
116
+ # Ambiguous .json: Try standard JSON first, then NDJSON fallback
117
+ try:
118
+ # read_json reads standard JSON array [{}, {}]
119
+ df = pl.read_json(file_path)
120
+ if len(df) > 10000: df = df.head(10000)
121
+ except Exception:
122
+ try:
123
+ # Fallback to NDJSON (common for large datasets mislabeled as .json)
124
+ df = pl.scan_ndjson(file_path).limit(10000).collect()
125
+ except Exception as e:
126
+ print(json.dumps({"error": f"Failed to read JSON: {str(e)}"}))
127
+ sys.exit(1)
128
+ else:
129
+ print(json.dumps({"error": f"Unsupported file extension: {file_path}"}))
130
+ sys.exit(1)
131
+
132
+ row_count = len(df)
133
+ column_count = len(df.columns)
134
+
135
+ # Duplicate detection (exact)
136
+ try:
137
+ duplicate_count = df.is_duplicated().sum()
138
+ except Exception:
139
+ # Duplicate check might fail on complex nested types (List, Struct)
140
+ duplicate_count = 0
141
+
142
+ columns_stats = []
143
+ text_cols = []
144
+ for col in df.columns:
145
+ stats = analyze_column(df, col, df.schema[col])
146
+ columns_stats.append(stats)
147
+ # Check for String type (Polars can return 'String' or 'Utf8' depending on version)
148
+ dtype_str = stats["type"]
149
+ if ("String" in dtype_str or "Utf8" in dtype_str) and stats["unique_count"] > 1:
150
+ text_cols.append(col)
151
+
152
+ report = {
153
+ "row_count": row_count,
154
+ "column_count": column_count,
155
+ "duplicate_rows": int(duplicate_count),
156
+ "duplicate_percentage": (duplicate_count / row_count * 100) if row_count > 0 else 0,
157
+ "columns": columns_stats,
158
+ "warnings": [],
159
+ "schema_warnings": [],
160
+ "overall_score": 100
161
+ }
162
+
163
+ # Integrity Check 1: Text Duplicates (Fuzzyish Proxy)
164
+ # If duplicated rows are 0, check if main text content is duplicated
165
+ if duplicate_count == 0 and len(text_cols) > 0:
166
+ # Pick longest text column as likely "content"
167
+ # In real impl, we'd use heuristics. For now, first text col.
168
+ target_col = text_cols[0]
169
+ text_dupes = df.select(pl.col(target_col)).is_duplicated().sum()
170
+ if text_dupes > 0:
171
+ report["text_duplicates"] = int(text_dupes)
172
+ if text_dupes > (row_count * 0.2):
173
+ report["warnings"].append(f"High text duplication in '{target_col}' ({text_dupes} rows)")
174
+
175
+ # Integrity Check 2: Contamination / Leakage (Basic)
176
+ # (Skipping correlation for now)
177
+
178
+ report["class_imbalance_warnings"] = []
179
+ report["pii_warnings"] = []
180
+
181
+ # PII Patterns (Regex)
182
+ import re
183
+ pii_patterns = {
184
+ "Email": r'[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}',
185
+ "Phone": r'\(?\d{3}\)?[-.\s]?\d{3}[-.\s]?\d{4}', # Basic US-ish pattern
186
+ "SSN": r'\d{3}-\d{2}-\d{4}',
187
+ "IPv4": r'\b(?:\d{1,3}\.){3}\d{1,3}\b'
188
+ }
189
+
190
+ # Bias & PII Analysis
191
+ for col_name, stats in zip(df.columns, columns_stats):
192
+ # Class Imbalance
193
+ if stats["unique_count"] > 1 and stats["unique_count"] < 50:
194
+ try:
195
+ col = df[col_name]
196
+ top_val_count = col.value_counts().sort("count", descending=True).row(0)[1]
197
+ total = len(col)
198
+ if total > 0:
199
+ ratio = top_val_count / total
200
+ if ratio > 0.9:
201
+ report["class_imbalance_warnings"].append(f"Severe imbalance in '{col_name}': Top class is {(ratio*100):.1f}% of data")
202
+ except:
203
+ pass
204
+
205
+ # PII Detection (on Text Columns only)
206
+ if ("String" in stats["type"] or "Utf8" in stats["type"]):
207
+ try:
208
+ # Sample for performance (check first 1000 non-null values)
209
+ sample_text = df[col_name].drop_nulls().head(1000).to_list()
210
+ # Join a subset to regex against (faster than row-by-row for simple checks)
211
+ combined_text = " ".join([str(x) for x in sample_text])
212
+
213
+ for pii_type, pattern in pii_patterns.items():
214
+ if re.search(pattern, combined_text):
215
+ # Ensure we don't flag column names like "email_address" but actual content
216
+ # Double check with a strict count if trigger found
217
+ matches = len(re.findall(pattern, combined_text))
218
+ if matches > 0:
219
+ report["pii_warnings"].append(f"Potential {pii_type} detected in column '{col_name}' ({matches} matches in sample)")
220
+ except:
221
+ pass
222
+
223
+ # Basic warnings
224
+ if report["duplicate_percentage"] > 10:
225
+ report["warnings"].append("High duplication rate (>10%)")
226
+ if row_count < 50:
227
+ report["warnings"].append("Dataset is very small (<50 rows)")
228
+
229
+ # Schema warnings
230
+ for col in columns_stats:
231
+ if "Numeric" in col.get("inferred_type", "") and "Utf8" in col.get("type", ""):
232
+ report["schema_warnings"].append(f"Column '{col['name']}' looks Numeric but is stored as String")
233
+ if col.get("is_mixed_type"):
234
+ report["schema_warnings"].append(f"Column '{col['name']}' likely contains mixed types (numbers and strings)")
235
+
236
+ print(json.dumps(report))
237
+
238
+ except Exception as e:
239
+ print(json.dumps({"error": f"Analysis failed: {str(e)}"}))
240
+ sys.exit(1)
241
+
242
+ if __name__ == "__main__":
243
+ main()
@@ -0,0 +1,283 @@
1
+ import sys
2
+ import json
3
+ import polars as pl
4
+ import numpy as np
5
+ from sklearn.model_selection import train_test_split
6
+
7
+ def execute_split(file_path, config):
8
+ # Load Data
9
+ if file_path.endswith(".csv"):
10
+ df = pl.read_csv(file_path, ignore_errors=True)
11
+ elif file_path.endswith(".parquet"):
12
+ df = pl.read_parquet(file_path)
13
+ else:
14
+ raise ValueError("Unsupported format")
15
+
16
+ train_ratio = config["ratios"]["train"]
17
+ val_ratio = config["ratios"]["val"]
18
+ test_ratio = config["ratios"]["test"]
19
+ holdout_ratio = config["ratios"].get("holdout", 0)
20
+ seed = config.get("random_seed", 42)
21
+ shuffle = config.get("shuffle", True)
22
+
23
+ # Strategy
24
+ strategy = config["type"]
25
+ target_col = config.get("target_column", None)
26
+ time_col = config.get("time_column", None)
27
+
28
+ train_df, val_df, test_df, holdout_df = None, None, None, None
29
+
30
+ # --- 1. RANDOM / STRATIFIED SPLIT ---
31
+ if strategy in ["random", "stratified"]:
32
+ if strategy == "random":
33
+ if shuffle:
34
+ df = df.sample(fraction=1.0, seed=seed, shuffle=True)
35
+
36
+ n = len(df)
37
+ n_train = int(n * train_ratio)
38
+ n_val = int(n * val_ratio)
39
+ n_test = int(n * test_ratio)
40
+
41
+ train_df = df.slice(0, n_train)
42
+ val_df = df.slice(n_train, n_val)
43
+ test_df = df.slice(n_train + n_val, n_test)
44
+ holdout_df = df.slice(n_train + n_val + n_test, n - (n_train + n_val + n_test))
45
+
46
+ elif strategy == "stratified":
47
+ if not target_col or target_col not in df.columns:
48
+ return {"error": f"Target column '{target_col}' not found needed for stratification"}
49
+
50
+ y = df[target_col].to_list()
51
+ indices = np.arange(len(df))
52
+
53
+ # Split 1: Train vs Others
54
+ others_ratio = val_ratio + test_ratio + holdout_ratio
55
+ if others_ratio == 0:
56
+ train_idx, others_idx = indices, []
57
+ else:
58
+ train_idx, others_idx = train_test_split(indices, test_size=others_ratio, stratify=y, random_state=seed, shuffle=True)
59
+
60
+ train_df = df[train_idx]
61
+
62
+ if len(others_idx) > 0:
63
+ y_others = [y[i] for i in others_idx]
64
+
65
+ # Split 2: Val vs (Test + Holdout)
66
+ test_holdout_ratio = (test_ratio + holdout_ratio) / others_ratio
67
+ if test_holdout_ratio > 0 and test_holdout_ratio < 1:
68
+ val_idx, test_holdout_idx = train_test_split(others_idx, test_size=test_holdout_ratio, stratify=y_others, random_state=seed, shuffle=True)
69
+ val_df = df[val_idx]
70
+
71
+ if len(test_holdout_idx) > 0:
72
+ y_th = [y[i] for i in test_holdout_idx]
73
+ relative_holdout_ratio = holdout_ratio / (test_ratio + holdout_ratio)
74
+
75
+ if relative_holdout_ratio > 0 and relative_holdout_ratio < 1:
76
+ test_idx, holdout_idx = train_test_split(test_holdout_idx, test_size=relative_holdout_ratio, stratify=y_th, random_state=seed, shuffle=True)
77
+ test_df = df[test_idx]
78
+ holdout_df = df[holdout_idx]
79
+ elif relative_holdout_ratio >= 1:
80
+ test_df = df.slice(0, 0)
81
+ holdout_df = df[test_holdout_idx]
82
+ else:
83
+ test_df = df[test_holdout_idx]
84
+ holdout_df = df.slice(0, 0)
85
+ elif test_holdout_ratio >= 1:
86
+ val_df = df.slice(0, 0)
87
+ # Chained split for Test/Holdout
88
+ y_th = y_others
89
+ relative_holdout_ratio = holdout_ratio / (test_ratio + holdout_ratio)
90
+ if relative_holdout_ratio > 0 and relative_holdout_ratio < 1:
91
+ test_idx, holdout_idx = train_test_split(others_idx, test_size=relative_holdout_ratio, stratify=y_th, random_state=seed, shuffle=True)
92
+ test_df = df[test_idx]
93
+ holdout_df = df[holdout_idx]
94
+ else:
95
+ test_df = df[others_idx]
96
+ holdout_df = df.slice(0, 0)
97
+ else:
98
+ val_df = df[others_idx]
99
+ test_df = df.slice(0, 0)
100
+ holdout_df = df.slice(0, 0)
101
+
102
+ # --- 2. TIME-BASED SPLIT ---
103
+ elif strategy == "time":
104
+ if not time_col or time_col not in df.columns:
105
+ return {"error": f"Time column '{time_col}' not found"}
106
+
107
+ df = df.sort(time_col)
108
+
109
+ n = len(df)
110
+ n_train = int(n * train_ratio)
111
+ n_val = int(n * val_ratio)
112
+ n_test = int(n * test_ratio)
113
+
114
+ train_df = df.slice(0, n_train)
115
+ val_df = df.slice(n_train, n_val)
116
+ test_df = df.slice(n_train + n_val, n_test)
117
+ holdout_df = df.slice(n_train + n_val + n_test, n - (n_train + n_val + n_test))
118
+
119
+ # --- 3. GROUP-BASED SPLIT ---
120
+ elif strategy == "group":
121
+ if not config.get("group_column") or config["group_column"] not in df.columns:
122
+ return {"error": f"Group column '{config.get('group_column')}' not found"}
123
+
124
+ group_col = config["group_column"]
125
+ groups = df[group_col].unique().to_list()
126
+
127
+ # Split groups first to ensure zero leakage
128
+ n_grps = len(groups)
129
+ n_train = int(n_grps * train_ratio)
130
+ n_val = int(n_grps * val_ratio)
131
+ n_test = int(n_grps * test_ratio)
132
+
133
+ if shuffle:
134
+ np.random.seed(seed)
135
+ np.random.shuffle(groups)
136
+
137
+ train_grps = set(groups[:n_train])
138
+ val_grps = set(groups[n_train:n_train+n_val])
139
+ test_grps = set(groups[n_train+n_val:n_train+n_val+n_test])
140
+ holdout_grps = set(groups[n_train+n_val+n_test:])
141
+
142
+ train_df = df.filter(pl.col(group_col).is_in(train_grps))
143
+ val_df = df.filter(pl.col(group_col).is_in(val_grps))
144
+ test_df = df.filter(pl.col(group_col).is_in(test_grps))
145
+ holdout_df = df.filter(pl.col(group_col).is_in(holdout_grps))
146
+
147
+ else:
148
+ return {"error": f"Strategy {strategy} not implemented yet"}
149
+
150
+ # Save outputs
151
+ base_name = file_path.replace(".csv", "").replace(".parquet", "")
152
+ train_path = f"{base_name}_train.csv"
153
+ val_path = f"{base_name}_val.csv"
154
+ test_path = f"{base_name}_test.csv"
155
+ holdout_path = f"{base_name}_holdout.csv"
156
+
157
+ train_df.write_csv(train_path)
158
+ val_df.write_csv(val_path)
159
+ test_df.write_csv(test_path)
160
+ holdout_df.write_csv(holdout_path)
161
+
162
+ return {
163
+ "success": True,
164
+ "paths": { "train": train_path, "val": val_path, "test": test_path, "holdout": holdout_path },
165
+ "stats": {
166
+ "train_rows": len(train_df),
167
+ "val_rows": len(val_df),
168
+ "test_rows": len(test_df),
169
+ "holdout_rows": len(holdout_df)
170
+ }
171
+ }
172
+
173
+ def validate_split(config):
174
+ # Config contains paths to check and optional ID column
175
+ train_path = config["paths"]["train"]
176
+ val_path = config["paths"]["val"]
177
+ test_path = config["paths"]["test"]
178
+ holdout_path = config["paths"].get("holdout")
179
+ id_col = config.get("id_column", "id") # Default to 'id' if exists
180
+ target_col = config.get("target_column", None)
181
+
182
+ # Load dfs
183
+ try:
184
+ train_df = pl.read_csv(train_path) if train_path.endswith(".csv") else pl.read_parquet(train_path)
185
+ val_df = pl.read_csv(val_path) if val_path.endswith(".csv") else pl.read_parquet(val_path)
186
+ test_df = pl.read_csv(test_path) if test_path.endswith(".csv") else pl.read_parquet(test_path)
187
+ holdout_df = None
188
+ if holdout_path:
189
+ holdout_df = pl.read_csv(holdout_path) if holdout_path.endswith(".csv") else pl.read_parquet(holdout_path)
190
+ except:
191
+ return {"error": "Failed to load split files for validation"}
192
+
193
+ report = {
194
+ "leakage_detected": False,
195
+ "leakage_count": 0,
196
+ "distribution_mismatch": False,
197
+ "warnings": []
198
+ }
199
+
200
+ # 1. Leakage Check (ID intersection)
201
+ if id_col in train_df.columns:
202
+ train_ids = set(train_df[id_col].to_list())
203
+ val_ids = set(val_df[id_col].to_list())
204
+ test_ids = set(test_df[id_col].to_list())
205
+ holdout_ids = set(holdout_df[id_col].to_list()) if holdout_df is not None else set()
206
+
207
+ leakage_tv = len(train_ids.intersection(val_ids))
208
+ leakage_tt = len(train_ids.intersection(test_ids))
209
+ leakage_th = len(train_ids.intersection(holdout_ids))
210
+ leakage_vt = len(val_ids.intersection(test_ids))
211
+ leakage_vh = len(val_ids.intersection(holdout_ids))
212
+ leakage_th_val = len(test_ids.intersection(holdout_ids))
213
+
214
+ total_leakage = leakage_tv + leakage_tt + leakage_th + leakage_vt + leakage_vh + leakage_th_val
215
+
216
+ if total_leakage > 0:
217
+ report["leakage_detected"] = True
218
+ report["leakage_count"] = total_leakage
219
+ report["warnings"].append(f"Found {total_leakage} overlapping IDs between splits.")
220
+ else:
221
+ report["warnings"].append(f"ID column '{id_col}' not found. Skipping exact leakage check.")
222
+
223
+ # 2. Distribution Check (Target Distribution)
224
+ if target_col and target_col in train_df.columns:
225
+ try:
226
+ def get_ratios(df, col):
227
+ counts = df[col].value_counts()
228
+ total = len(df)
229
+ ratios = {}
230
+ for row in counts.rows():
231
+ ratios[str(row[0])] = row[1] / total
232
+ return ratios
233
+
234
+ train_metrics = get_ratios(train_df, target_col)
235
+ val_metrics = get_ratios(val_df, target_col)
236
+ # test_metrics = get_ratios(test_df, target_col) # Optional: could check all
237
+
238
+ for cls in train_metrics:
239
+ train_r = train_metrics[cls]
240
+ val_r = val_metrics.get(cls, 0)
241
+ diff = abs(train_r - val_r)
242
+ if diff > 0.1: # 10% drift
243
+ report["distribution_mismatch"] = True
244
+ report["warnings"].append(f"Class '{cls}' drift: Train={train_r:.2f}, Val={val_r:.2f}")
245
+ except:
246
+ pass
247
+
248
+ return report
249
+
250
+ def main():
251
+ # Usage:
252
+ # split: python splitter_engine.py split <file_path> <config_json>
253
+ # validate: python splitter_engine.py validate <config_json> (dummy file arg ignored)
254
+
255
+ if len(sys.argv) < 3:
256
+ print(json.dumps({"error": "Usage: splitter_engine.py <action> <arg1> [arg2]"}), file=sys.stderr)
257
+ sys.exit(1)
258
+
259
+ action = sys.argv[1]
260
+
261
+ try:
262
+ if action == "split":
263
+ file_path = sys.argv[2]
264
+ config = json.loads(sys.argv[3])
265
+ result = execute_split(file_path, config)
266
+ print(json.dumps(result))
267
+
268
+ elif action == "validate":
269
+ config = json.loads(sys.argv[2])
270
+ result = validate_split(config)
271
+ print(json.dumps(result))
272
+
273
+ else:
274
+ # Fallback for old calls (implicit split) - if users used old signature
275
+ # But since we control the caller, we can just update the caller (DataSplitter.ts).
276
+ raise ValueError(f"Unknown action: {action}")
277
+
278
+ except Exception as e:
279
+ print(json.dumps({"success": False, "error": str(e)}))
280
+ sys.exit(1)
281
+
282
+ if __name__ == "__main__":
283
+ main()
@@ -0,0 +1,61 @@
1
+
2
+ import sys
3
+ import os
4
+ import polars as pl
5
+ import numpy as np
6
+
7
+ # Mock data creation
8
+ def create_mock_data():
9
+ df = pl.DataFrame({
10
+ "feature1": np.random.rand(100),
11
+ "feature2": np.random.rand(100),
12
+ "label": np.random.randint(0, 2, 100)
13
+ })
14
+ os.makedirs("test_adapters", exist_ok=True)
15
+ df.write_parquet("test_adapters/data.parquet")
16
+ df.write_csv("test_adapters/data.csv")
17
+ print("Created mock data in test_adapters/")
18
+
19
+ def test_pytorch():
20
+ print("\n--- Testing PyTorch Adapter ---")
21
+ try:
22
+ from framework_adapters import VesperPyTorchDataset
23
+ import torch
24
+ from torch.utils.data import DataLoader
25
+
26
+ dataset = VesperPyTorchDataset("test_adapters/data.parquet", target_col="label")
27
+ loader = DataLoader(dataset, batch_size=10, shuffle=True)
28
+
29
+ batch = next(iter(loader))
30
+ print(f"Loaded batch: {batch}")
31
+ print("PASS: PyTorch DataLoader works")
32
+
33
+ except ImportError:
34
+ print("SKIP: PyTorch not installed")
35
+ except Exception as e:
36
+ print(f"FAIL: PyTorch test failed: {e}")
37
+
38
+ def test_huggingface():
39
+ print("\n--- Testing HuggingFace Adapter ---")
40
+ try:
41
+ from framework_adapters import load_vesper_dataset
42
+ ds = load_vesper_dataset("test_adapters/data.csv")
43
+ print(f"Loaded dataset: {ds}")
44
+ print("PASS: HuggingFace Dataset works")
45
+
46
+ except ImportError:
47
+ print("SKIP: HuggingFace datasets not installed")
48
+ except Exception as e:
49
+ print(f"FAIL: HuggingFace test failed: {e}")
50
+
51
+ if __name__ == "__main__":
52
+ create_mock_data()
53
+ # Add src/python to path to import adapters
54
+ sys.path.append(os.path.join(os.getcwd(), "src", "python"))
55
+
56
+ test_pytorch()
57
+ test_huggingface()
58
+
59
+ # Cleanup
60
+ import shutil
61
+ shutil.rmtree("test_adapters")
@@ -0,0 +1,94 @@
1
+ import sys
2
+ import json
3
+ import argparse
4
+ import urllib.request
5
+ import urllib.parse
6
+ from datetime import datetime
7
+
8
+ # API Endpoint found in network inspection of UCI website
9
+ UCI_API_URL = "https://archive.ics.uci.edu/api/datasets/list"
10
+
11
+ def search_uci(query: str, limit: int = 10):
12
+ """
13
+ Search UCI datasets using their internal API.
14
+ """
15
+ try:
16
+ # Fetch data dictionary from API
17
+ # Only fetching first 100 to filter locally
18
+ params = {
19
+ "skip": 0,
20
+ "take": 100,
21
+ "sort": "desc",
22
+ "orderBy": "NumHits",
23
+ "search": query
24
+ }
25
+
26
+ query_string = urllib.parse.urlencode(params)
27
+ url = f"{UCI_API_URL}?{query_string}"
28
+
29
+ req = urllib.request.Request(url)
30
+ with urllib.request.urlopen(req) as response:
31
+ data = json.load(response)
32
+
33
+ datasets = data.get('data', [])
34
+ if not datasets:
35
+ datasets = []
36
+
37
+ results = []
38
+ count = 0
39
+
40
+ # We trust the API search mostly, but can do extra filtering if needed
41
+ # The API "search" param is supported
42
+
43
+ for ds in datasets:
44
+ # Normalize to Vesper schema
45
+ # API fields: id, name, abstract, numHits, area, task, dateDonated
46
+
47
+ metadata = {
48
+ "id": f"uci:{ds.get('id')}",
49
+ "source": "uci",
50
+ "name": ds.get('name'),
51
+ "description": ds.get('abstract') or "No description available.",
52
+ "downloads": ds.get('numHits') or 0,
53
+ "likes": 0,
54
+ "last_updated": ds.get('dateDonated') or datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ"),
55
+ "quality_score": 80,
56
+ "license": {
57
+ "id": "other",
58
+ "category": "open",
59
+ "usage_restrictions": [],
60
+ "warnings": []
61
+ },
62
+ "tags": [t for t in [ds.get('area'), ds.get('task')] if t],
63
+ "total_examples": ds.get('numInstances'),
64
+ "is_safe_source": True,
65
+ "is_structured": True,
66
+ "metadata_url": f"https://archive.ics.uci.edu/dataset/{ds.get('id')}/{ds.get('name').replace(' ', '+')}"
67
+ }
68
+
69
+ results.append(metadata)
70
+ count += 1
71
+ if count >= limit:
72
+ break
73
+
74
+ return results
75
+
76
+ except Exception as e:
77
+ # Fallback empty or specific error
78
+ return {"error": str(e)}
79
+
80
+ def main():
81
+ parser = argparse.ArgumentParser(description="UCI Adapter")
82
+ parser.add_argument("--action", required=True, choices=["search"])
83
+ parser.add_argument("--query", required=True)
84
+ parser.add_argument("--limit", type=int, default=10)
85
+
86
+ args = parser.parse_args()
87
+
88
+ if args.action == "search":
89
+ results = search_uci(args.query, args.limit)
90
+ # JSON dump print for stdout capture
91
+ print(json.dumps(results))
92
+
93
+ if __name__ == "__main__":
94
+ main()