ml-dash 0.0.11__py3-none-any.whl → 0.5.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. ml_dash/__init__.py +59 -1
  2. ml_dash/auto_start.py +42 -0
  3. ml_dash/cli.py +67 -0
  4. ml_dash/cli_commands/__init__.py +1 -0
  5. ml_dash/cli_commands/download.py +797 -0
  6. ml_dash/cli_commands/list.py +343 -0
  7. ml_dash/cli_commands/upload.py +1298 -0
  8. ml_dash/client.py +955 -0
  9. ml_dash/config.py +114 -11
  10. ml_dash/experiment.py +1020 -0
  11. ml_dash/files.py +688 -0
  12. ml_dash/log.py +181 -0
  13. ml_dash/metric.py +292 -0
  14. ml_dash/params.py +188 -0
  15. ml_dash/storage.py +1115 -0
  16. ml_dash-0.5.9.dist-info/METADATA +244 -0
  17. ml_dash-0.5.9.dist-info/RECORD +20 -0
  18. ml_dash-0.5.9.dist-info/WHEEL +4 -0
  19. ml_dash-0.5.9.dist-info/entry_points.txt +3 -0
  20. ml_dash/app.py +0 -33
  21. ml_dash/file_events.py +0 -71
  22. ml_dash/file_handlers.py +0 -141
  23. ml_dash/file_utils.py +0 -5
  24. ml_dash/file_watcher.py +0 -30
  25. ml_dash/main.py +0 -60
  26. ml_dash/mime_types.py +0 -20
  27. ml_dash/schema/__init__.py +0 -110
  28. ml_dash/schema/archive.py +0 -165
  29. ml_dash/schema/directories.py +0 -59
  30. ml_dash/schema/experiments.py +0 -65
  31. ml_dash/schema/files/__init__.py +0 -204
  32. ml_dash/schema/files/file_helpers.py +0 -79
  33. ml_dash/schema/files/images.py +0 -27
  34. ml_dash/schema/files/metrics.py +0 -64
  35. ml_dash/schema/files/parameters.py +0 -50
  36. ml_dash/schema/files/series.py +0 -235
  37. ml_dash/schema/files/videos.py +0 -27
  38. ml_dash/schema/helpers.py +0 -66
  39. ml_dash/schema/projects.py +0 -65
  40. ml_dash/schema/schema_helpers.py +0 -19
  41. ml_dash/schema/users.py +0 -33
  42. ml_dash/sse.py +0 -18
  43. ml_dash-0.0.11.dist-info/METADATA +0 -67
  44. ml_dash-0.0.11.dist-info/RECORD +0 -30
  45. ml_dash-0.0.11.dist-info/WHEEL +0 -5
  46. ml_dash-0.0.11.dist-info/top_level.txt +0 -1
  47. /ml_dash/{example.py → py.typed} +0 -0
ml_dash/client.py ADDED
@@ -0,0 +1,955 @@
1
+ """
2
+ Remote API client for ML-Dash server.
3
+ """
4
+
5
+ from typing import Optional, Dict, Any, List
6
+ import httpx
7
+
8
+
9
+ class RemoteClient:
10
+ """Client for communicating with ML-Dash server."""
11
+
12
+ def __init__(self, base_url: str, api_key: str):
13
+ """
14
+ Initialize remote client.
15
+
16
+ Args:
17
+ base_url: Base URL of ML-Dash server (e.g., "http://localhost:3000")
18
+ api_key: JWT token for authentication
19
+ """
20
+ self.base_url = base_url.rstrip("/")
21
+ self.api_key = api_key
22
+ self._client = httpx.Client(
23
+ base_url=self.base_url,
24
+ headers={
25
+ "Authorization": f"Bearer {api_key}",
26
+ # Note: Don't set Content-Type here as default
27
+ # It will be set per-request (json or multipart)
28
+ },
29
+ timeout=30.0,
30
+ )
31
+
32
+ def create_or_update_experiment(
33
+ self,
34
+ project: str,
35
+ name: str,
36
+ description: Optional[str] = None,
37
+ tags: Optional[List[str]] = None,
38
+ bindrs: Optional[List[str]] = None,
39
+ folder: Optional[str] = None,
40
+ write_protected: bool = False,
41
+ metadata: Optional[Dict[str, Any]] = None,
42
+ ) -> Dict[str, Any]:
43
+ """
44
+ Create or update an experiment.
45
+
46
+ Args:
47
+ project: Project name
48
+ name: Experiment name
49
+ description: Optional description
50
+ tags: Optional list of tags
51
+ bindrs: Optional list of bindrs
52
+ folder: Optional folder path
53
+ write_protected: If True, experiment becomes immutable
54
+ metadata: Optional metadata dict
55
+
56
+ Returns:
57
+ Response dict with experiment, project, folder, and namespace data
58
+
59
+ Raises:
60
+ httpx.HTTPStatusError: If request fails
61
+ """
62
+ payload = {
63
+ "name": name,
64
+ }
65
+
66
+ if description is not None:
67
+ payload["description"] = description
68
+ if tags is not None:
69
+ payload["tags"] = tags
70
+ if bindrs is not None:
71
+ payload["bindrs"] = bindrs
72
+ if folder is not None:
73
+ payload["folder"] = folder
74
+ if write_protected:
75
+ payload["writeProtected"] = write_protected
76
+ if metadata is not None:
77
+ payload["metadata"] = metadata
78
+
79
+ response = self._client.post(
80
+ f"/projects/{project}/experiments",
81
+ json=payload,
82
+ )
83
+ response.raise_for_status()
84
+ return response.json()
85
+
86
+ def update_experiment_status(
87
+ self,
88
+ experiment_id: str,
89
+ status: str,
90
+ ) -> Dict[str, Any]:
91
+ """
92
+ Update experiment status.
93
+
94
+ Args:
95
+ experiment_id: Experiment ID
96
+ status: Status value - "RUNNING" | "COMPLETED" | "FAILED" | "CANCELLED"
97
+
98
+ Returns:
99
+ Response dict with updated experiment data
100
+
101
+ Raises:
102
+ httpx.HTTPStatusError: If request fails
103
+ """
104
+ payload = {
105
+ "status": status,
106
+ }
107
+
108
+ response = self._client.patch(
109
+ f"/experiments/{experiment_id}/status",
110
+ json=payload,
111
+ )
112
+ response.raise_for_status()
113
+ return response.json()
114
+
115
+ def create_log_entries(
116
+ self,
117
+ experiment_id: str,
118
+ logs: List[Dict[str, Any]]
119
+ ) -> Dict[str, Any]:
120
+ """
121
+ Create log entries in batch.
122
+
123
+ Supports both single log and multiple logs via array.
124
+
125
+ Args:
126
+ experiment_id: Experiment ID (Snowflake ID)
127
+ logs: List of log entries, each with fields:
128
+ - timestamp: ISO 8601 string
129
+ - level: "info"|"warn"|"error"|"debug"|"fatal"
130
+ - message: Log message string
131
+ - metadata: Optional dict
132
+
133
+ Returns:
134
+ Response dict:
135
+ {
136
+ "created": 1,
137
+ "startSequence": 42,
138
+ "endSequence": 42,
139
+ "experimentId": "123456789"
140
+ }
141
+
142
+ Raises:
143
+ httpx.HTTPStatusError: If request fails
144
+ """
145
+ response = self._client.post(
146
+ f"/experiments/{experiment_id}/logs",
147
+ json={"logs": logs}
148
+ )
149
+ response.raise_for_status()
150
+ return response.json()
151
+
152
+ def set_parameters(
153
+ self,
154
+ experiment_id: str,
155
+ data: Dict[str, Any]
156
+ ) -> Dict[str, Any]:
157
+ """
158
+ Set/merge parameters for an experiment.
159
+
160
+ Always merges with existing parameters (upsert behavior).
161
+
162
+ Args:
163
+ experiment_id: Experiment ID (Snowflake ID)
164
+ data: Flattened parameter dict with dot notation
165
+ Example: {"model.lr": 0.001, "model.batch_size": 32}
166
+
167
+ Returns:
168
+ Response dict:
169
+ {
170
+ "id": "snowflake_id",
171
+ "experimentId": "experiment_id",
172
+ "data": {...},
173
+ "version": 2,
174
+ "createdAt": "...",
175
+ "updatedAt": "..."
176
+ }
177
+
178
+ Raises:
179
+ httpx.HTTPStatusError: If request fails
180
+ """
181
+ response = self._client.post(
182
+ f"/experiments/{experiment_id}/parameters",
183
+ json={"data": data}
184
+ )
185
+ response.raise_for_status()
186
+ return response.json()
187
+
188
+ def get_parameters(self, experiment_id: str) -> Dict[str, Any]:
189
+ """
190
+ Get parameters for an experiment.
191
+
192
+ Args:
193
+ experiment_id: Experiment ID (Snowflake ID)
194
+
195
+ Returns:
196
+ Flattened parameter dict with dot notation
197
+ Example: {"model.lr": 0.001, "model.batch_size": 32}
198
+
199
+ Raises:
200
+ httpx.HTTPStatusError: If request fails or parameters don't exist
201
+ """
202
+ response = self._client.get(f"/experiments/{experiment_id}/parameters")
203
+ response.raise_for_status()
204
+ result = response.json()
205
+ return result.get("data", {})
206
+
207
+ def upload_file(
208
+ self,
209
+ experiment_id: str,
210
+ file_path: str,
211
+ prefix: str,
212
+ filename: str,
213
+ description: Optional[str],
214
+ tags: Optional[List[str]],
215
+ metadata: Optional[Dict[str, Any]],
216
+ checksum: str,
217
+ content_type: str,
218
+ size_bytes: int
219
+ ) -> Dict[str, Any]:
220
+ """
221
+ Upload a file to an experiment.
222
+
223
+ Args:
224
+ experiment_id: Experiment ID (Snowflake ID)
225
+ file_path: Local file path
226
+ prefix: Logical path prefix
227
+ filename: Original filename
228
+ description: Optional description
229
+ tags: Optional tags
230
+ metadata: Optional metadata
231
+ checksum: SHA256 checksum
232
+ content_type: MIME type
233
+ size_bytes: File size in bytes
234
+
235
+ Returns:
236
+ File metadata dict
237
+
238
+ Raises:
239
+ httpx.HTTPStatusError: If request fails
240
+ """
241
+ # Prepare multipart form data
242
+ # Read file content first (httpx needs content, not file handle)
243
+ with open(file_path, "rb") as f:
244
+ file_content = f.read()
245
+
246
+ files = {"file": (filename, file_content, content_type)}
247
+ data = {
248
+ "prefix": prefix,
249
+ "checksum": checksum,
250
+ "sizeBytes": str(size_bytes),
251
+ }
252
+ if description:
253
+ data["description"] = description
254
+ if tags:
255
+ data["tags"] = ",".join(tags)
256
+ if metadata:
257
+ import json
258
+ data["metadata"] = json.dumps(metadata)
259
+
260
+ # httpx will automatically set multipart/form-data content-type
261
+ response = self._client.post(
262
+ f"/experiments/{experiment_id}/files",
263
+ files=files,
264
+ data=data
265
+ )
266
+
267
+ response.raise_for_status()
268
+ return response.json()
269
+
270
+ def list_files(
271
+ self,
272
+ experiment_id: str,
273
+ prefix: Optional[str] = None,
274
+ tags: Optional[List[str]] = None
275
+ ) -> List[Dict[str, Any]]:
276
+ """
277
+ List files in an experiment.
278
+
279
+ Args:
280
+ experiment_id: Experiment ID (Snowflake ID)
281
+ prefix: Optional prefix filter
282
+ tags: Optional tags filter
283
+
284
+ Returns:
285
+ List of file metadata dicts
286
+
287
+ Raises:
288
+ httpx.HTTPStatusError: If request fails
289
+ """
290
+ params = {}
291
+ if prefix:
292
+ params["prefix"] = prefix
293
+ if tags:
294
+ params["tags"] = ",".join(tags)
295
+
296
+ response = self._client.get(
297
+ f"/experiments/{experiment_id}/files",
298
+ params=params
299
+ )
300
+ response.raise_for_status()
301
+ result = response.json()
302
+ return result.get("files", [])
303
+
304
+ def get_file(self, experiment_id: str, file_id: str) -> Dict[str, Any]:
305
+ """
306
+ Get file metadata.
307
+
308
+ Args:
309
+ experiment_id: Experiment ID (Snowflake ID)
310
+ file_id: File ID (Snowflake ID)
311
+
312
+ Returns:
313
+ File metadata dict
314
+
315
+ Raises:
316
+ httpx.HTTPStatusError: If request fails
317
+ """
318
+ response = self._client.get(f"/experiments/{experiment_id}/files/{file_id}")
319
+ response.raise_for_status()
320
+ return response.json()
321
+
322
+ def download_file(
323
+ self,
324
+ experiment_id: str,
325
+ file_id: str,
326
+ dest_path: Optional[str] = None
327
+ ) -> str:
328
+ """
329
+ Download a file from a experiment.
330
+
331
+ Args:
332
+ experiment_id: Experiment ID (Snowflake ID)
333
+ file_id: File ID (Snowflake ID)
334
+ dest_path: Optional destination path (defaults to original filename)
335
+
336
+ Returns:
337
+ Path to downloaded file
338
+
339
+ Raises:
340
+ httpx.HTTPStatusError: If request fails
341
+ ValueError: If checksum verification fails
342
+ """
343
+ # Get file metadata first to get filename and checksum
344
+ file_metadata = self.get_file(experiment_id, file_id)
345
+ filename = file_metadata["filename"]
346
+ expected_checksum = file_metadata["checksum"]
347
+
348
+ # Determine destination path
349
+ if dest_path is None:
350
+ dest_path = filename
351
+
352
+ # Download file
353
+ response = self._client.get(
354
+ f"/experiments/{experiment_id}/files/{file_id}/download"
355
+ )
356
+ response.raise_for_status()
357
+
358
+ # Write to file
359
+ with open(dest_path, "wb") as f:
360
+ f.write(response.content)
361
+
362
+ # Verify checksum
363
+ from .files import verify_checksum
364
+ if not verify_checksum(dest_path, expected_checksum):
365
+ # Delete corrupted file
366
+ import os
367
+ os.remove(dest_path)
368
+ raise ValueError(f"Checksum verification failed for file {file_id}")
369
+
370
+ return dest_path
371
+
372
+ def delete_file(self, experiment_id: str, file_id: str) -> Dict[str, Any]:
373
+ """
374
+ Delete a file (soft delete).
375
+
376
+ Args:
377
+ experiment_id: Experiment ID (Snowflake ID)
378
+ file_id: File ID (Snowflake ID)
379
+
380
+ Returns:
381
+ Dict with id and deletedAt
382
+
383
+ Raises:
384
+ httpx.HTTPStatusError: If request fails
385
+ """
386
+ response = self._client.delete(f"/experiments/{experiment_id}/files/{file_id}")
387
+ response.raise_for_status()
388
+ return response.json()
389
+
390
+ def update_file(
391
+ self,
392
+ experiment_id: str,
393
+ file_id: str,
394
+ description: Optional[str] = None,
395
+ tags: Optional[List[str]] = None,
396
+ metadata: Optional[Dict[str, Any]] = None
397
+ ) -> Dict[str, Any]:
398
+ """
399
+ Update file metadata.
400
+
401
+ Args:
402
+ experiment_id: Experiment ID (Snowflake ID)
403
+ file_id: File ID (Snowflake ID)
404
+ description: Optional description
405
+ tags: Optional tags
406
+ metadata: Optional metadata
407
+
408
+ Returns:
409
+ Updated file metadata dict
410
+
411
+ Raises:
412
+ httpx.HTTPStatusError: If request fails
413
+ """
414
+ payload = {}
415
+ if description is not None:
416
+ payload["description"] = description
417
+ if tags is not None:
418
+ payload["tags"] = tags
419
+ if metadata is not None:
420
+ payload["metadata"] = metadata
421
+
422
+ response = self._client.patch(
423
+ f"/experiments/{experiment_id}/files/{file_id}",
424
+ json=payload
425
+ )
426
+ response.raise_for_status()
427
+ return response.json()
428
+
429
+ def append_to_metric(
430
+ self,
431
+ experiment_id: str,
432
+ metric_name: str,
433
+ data: Dict[str, Any],
434
+ description: Optional[str] = None,
435
+ tags: Optional[List[str]] = None,
436
+ metadata: Optional[Dict[str, Any]] = None
437
+ ) -> Dict[str, Any]:
438
+ """
439
+ Append a single data point to a metric.
440
+
441
+ Args:
442
+ experiment_id: Experiment ID (Snowflake ID)
443
+ metric_name: Metric name (unique within experiment)
444
+ data: Data point (flexible schema)
445
+ description: Optional metric description
446
+ tags: Optional tags
447
+ metadata: Optional metadata
448
+
449
+ Returns:
450
+ Dict with metricId, index, bufferedDataPoints, chunkSize
451
+
452
+ Raises:
453
+ httpx.HTTPStatusError: If request fails
454
+ """
455
+ payload = {"data": data}
456
+ if description:
457
+ payload["description"] = description
458
+ if tags:
459
+ payload["tags"] = tags
460
+ if metadata:
461
+ payload["metadata"] = metadata
462
+
463
+ response = self._client.post(
464
+ f"/experiments/{experiment_id}/metrics/{metric_name}/append",
465
+ json=payload
466
+ )
467
+ response.raise_for_status()
468
+ return response.json()
469
+
470
+ def append_batch_to_metric(
471
+ self,
472
+ experiment_id: str,
473
+ metric_name: str,
474
+ data_points: List[Dict[str, Any]],
475
+ description: Optional[str] = None,
476
+ tags: Optional[List[str]] = None,
477
+ metadata: Optional[Dict[str, Any]] = None
478
+ ) -> Dict[str, Any]:
479
+ """
480
+ Append multiple data points to a metric in batch.
481
+
482
+ Args:
483
+ experiment_id: Experiment ID (Snowflake ID)
484
+ metric_name: Metric name (unique within experiment)
485
+ data_points: List of data points
486
+ description: Optional metric description
487
+ tags: Optional tags
488
+ metadata: Optional metadata
489
+
490
+ Returns:
491
+ Dict with metricId, startIndex, endIndex, count, bufferedDataPoints, chunkSize
492
+
493
+ Raises:
494
+ httpx.HTTPStatusError: If request fails
495
+ """
496
+ payload = {"dataPoints": data_points}
497
+ if description:
498
+ payload["description"] = description
499
+ if tags:
500
+ payload["tags"] = tags
501
+ if metadata:
502
+ payload["metadata"] = metadata
503
+
504
+ response = self._client.post(
505
+ f"/experiments/{experiment_id}/metrics/{metric_name}/append-batch",
506
+ json=payload
507
+ )
508
+ response.raise_for_status()
509
+ return response.json()
510
+
511
+ def read_metric_data(
512
+ self,
513
+ experiment_id: str,
514
+ metric_name: str,
515
+ start_index: int = 0,
516
+ limit: int = 1000
517
+ ) -> Dict[str, Any]:
518
+ """
519
+ Read data points from a metric.
520
+
521
+ Args:
522
+ experiment_id: Experiment ID (Snowflake ID)
523
+ metric_name: Metric name
524
+ start_index: Starting index (default 0)
525
+ limit: Max points to read (default 1000, max 10000)
526
+
527
+ Returns:
528
+ Dict with data, startIndex, endIndex, total, hasMore
529
+
530
+ Raises:
531
+ httpx.HTTPStatusError: If request fails
532
+ """
533
+ response = self._client.get(
534
+ f"/experiments/{experiment_id}/metrics/{metric_name}/data",
535
+ params={"startIndex": start_index, "limit": limit}
536
+ )
537
+ response.raise_for_status()
538
+ return response.json()
539
+
540
+ def get_metric_stats(
541
+ self,
542
+ experiment_id: str,
543
+ metric_name: str
544
+ ) -> Dict[str, Any]:
545
+ """
546
+ Get metric statistics and metadata.
547
+
548
+ Args:
549
+ experiment_id: Experiment ID (Snowflake ID)
550
+ metric_name: Metric name
551
+
552
+ Returns:
553
+ Dict with metric stats (totalDataPoints, bufferedDataPoints, etc.)
554
+
555
+ Raises:
556
+ httpx.HTTPStatusError: If request fails
557
+ """
558
+ response = self._client.get(
559
+ f"/experiments/{experiment_id}/metrics/{metric_name}/stats"
560
+ )
561
+ response.raise_for_status()
562
+ return response.json()
563
+
564
+ def list_metrics(
565
+ self,
566
+ experiment_id: str
567
+ ) -> List[Dict[str, Any]]:
568
+ """
569
+ List all metrics in an experiment.
570
+
571
+ Args:
572
+ experiment_id: Experiment ID (Snowflake ID)
573
+
574
+ Returns:
575
+ List of metric summaries
576
+
577
+ Raises:
578
+ httpx.HTTPStatusError: If request fails
579
+ """
580
+ response = self._client.get(f"/experiments/{experiment_id}/metrics")
581
+ response.raise_for_status()
582
+ return response.json()["metrics"]
583
+
584
+ def graphql_query(self, query: str, variables: Optional[Dict] = None) -> Dict[str, Any]:
585
+ """
586
+ Execute a GraphQL query.
587
+
588
+ Args:
589
+ query: GraphQL query string
590
+ variables: Optional variables for the query
591
+
592
+ Returns:
593
+ Query result data
594
+
595
+ Raises:
596
+ httpx.HTTPStatusError: If request fails
597
+ Exception: If GraphQL returns errors
598
+ """
599
+ response = self._client.post(
600
+ "/graphql",
601
+ json={"query": query, "variables": variables or {}}
602
+ )
603
+ response.raise_for_status()
604
+ result = response.json()
605
+
606
+ if "errors" in result:
607
+ raise Exception(f"GraphQL errors: {result['errors']}")
608
+
609
+ return result.get("data", {})
610
+
611
+ def list_projects_graphql(self, namespace_slug: str) -> List[Dict[str, Any]]:
612
+ """
613
+ List all projects in a namespace via GraphQL.
614
+
615
+ Args:
616
+ namespace_slug: Namespace slug
617
+
618
+ Returns:
619
+ List of project dicts with experimentCount
620
+
621
+ Raises:
622
+ httpx.HTTPStatusError: If request fails
623
+ """
624
+ query = """
625
+ query Projects($namespaceSlug: String!) {
626
+ projects(namespaceSlug: $namespaceSlug) {
627
+ id
628
+ name
629
+ slug
630
+ description
631
+ tags
632
+ }
633
+ }
634
+ """
635
+ result = self.graphql_query(query, {"namespaceSlug": namespace_slug})
636
+ projects = result.get("projects", [])
637
+
638
+ # For each project, count experiments
639
+ for project in projects:
640
+ exp_query = """
641
+ query ExperimentsCount($namespaceSlug: String!, $projectSlug: String!) {
642
+ experiments(namespaceSlug: $namespaceSlug, projectSlug: $projectSlug) {
643
+ id
644
+ }
645
+ }
646
+ """
647
+ exp_result = self.graphql_query(exp_query, {
648
+ "namespaceSlug": namespace_slug,
649
+ "projectSlug": project['slug']
650
+ })
651
+ experiments = exp_result.get("experiments", [])
652
+ project['experimentCount'] = len(experiments)
653
+
654
+ return projects
655
+
656
+ def list_experiments_graphql(
657
+ self, namespace_slug: str, project_slug: str, status: Optional[str] = None
658
+ ) -> List[Dict[str, Any]]:
659
+ """
660
+ List experiments in a project via GraphQL.
661
+
662
+ Args:
663
+ namespace_slug: Namespace slug
664
+ project_slug: Project slug
665
+ status: Optional experiment status filter (RUNNING, COMPLETED, FAILED, CANCELLED)
666
+
667
+ Returns:
668
+ List of experiment dicts with metadata
669
+
670
+ Raises:
671
+ httpx.HTTPStatusError: If request fails
672
+ """
673
+ query = """
674
+ query Experiments($namespaceSlug: String!, $projectSlug: String!, $status: ExperimentStatus) {
675
+ experiments(namespaceSlug: $namespaceSlug, projectSlug: $projectSlug, status: $status) {
676
+ id
677
+ name
678
+ description
679
+ tags
680
+ status
681
+ startedAt
682
+ endedAt
683
+ metadata
684
+ project {
685
+ slug
686
+ }
687
+ logMetadata {
688
+ totalLogs
689
+ }
690
+ metrics {
691
+ name
692
+ metricMetadata {
693
+ totalDataPoints
694
+ }
695
+ }
696
+ files {
697
+ id
698
+ filename
699
+ path
700
+ contentType
701
+ sizeBytes
702
+ checksum
703
+ description
704
+ tags
705
+ metadata
706
+ }
707
+ parameters {
708
+ id
709
+ data
710
+ }
711
+ }
712
+ }
713
+ """
714
+ result = self.graphql_query(query, {
715
+ "namespaceSlug": namespace_slug,
716
+ "projectSlug": project_slug,
717
+ "status": status
718
+ })
719
+ return result.get("experiments", [])
720
+
721
+ def get_experiment_graphql(
722
+ self, namespace_slug: str, project_slug: str, experiment_name: str
723
+ ) -> Optional[Dict[str, Any]]:
724
+ """
725
+ Get a single experiment via GraphQL.
726
+
727
+ Args:
728
+ namespace_slug: Namespace slug
729
+ project_slug: Project slug
730
+ experiment_name: Experiment name
731
+
732
+ Returns:
733
+ Experiment dict with metadata, or None if not found
734
+
735
+ Raises:
736
+ httpx.HTTPStatusError: If request fails
737
+ """
738
+ query = """
739
+ query Experiment($namespaceSlug: String!, $projectSlug: String!, $experimentName: String!) {
740
+ experiment(namespaceSlug: $namespaceSlug, projectSlug: $projectSlug, experimentName: $experimentName) {
741
+ id
742
+ name
743
+ description
744
+ tags
745
+ status
746
+ metadata
747
+ project {
748
+ slug
749
+ }
750
+ logMetadata {
751
+ totalLogs
752
+ }
753
+ metrics {
754
+ name
755
+ metricMetadata {
756
+ totalDataPoints
757
+ }
758
+ }
759
+ files {
760
+ id
761
+ filename
762
+ path
763
+ contentType
764
+ sizeBytes
765
+ checksum
766
+ description
767
+ tags
768
+ metadata
769
+ }
770
+ parameters {
771
+ id
772
+ data
773
+ }
774
+ }
775
+ }
776
+ """
777
+ result = self.graphql_query(query, {
778
+ "namespaceSlug": namespace_slug,
779
+ "projectSlug": project_slug,
780
+ "experimentName": experiment_name
781
+ })
782
+ return result.get("experiment")
783
+
784
+ def download_file_streaming(
785
+ self, experiment_id: str, file_id: str, dest_path: str
786
+ ) -> str:
787
+ """
788
+ Download a file with streaming for large files.
789
+
790
+ Args:
791
+ experiment_id: Experiment ID (Snowflake ID)
792
+ file_id: File ID (Snowflake ID)
793
+ dest_path: Destination path to save file
794
+
795
+ Returns:
796
+ Path to downloaded file
797
+
798
+ Raises:
799
+ httpx.HTTPStatusError: If request fails
800
+ ValueError: If checksum verification fails
801
+ """
802
+ # Get metadata first for checksum
803
+ file_metadata = self.get_file(experiment_id, file_id)
804
+ expected_checksum = file_metadata["checksum"]
805
+
806
+ # Stream download
807
+ with self._client.stream("GET", f"/experiments/{experiment_id}/files/{file_id}/download") as response:
808
+ response.raise_for_status()
809
+
810
+ with open(dest_path, "wb") as f:
811
+ for chunk in response.iter_bytes(chunk_size=8192):
812
+ f.write(chunk)
813
+
814
+ # Verify checksum
815
+ from .files import verify_checksum
816
+ if not verify_checksum(dest_path, expected_checksum):
817
+ import os
818
+ os.remove(dest_path)
819
+ raise ValueError(f"Checksum verification failed for file {file_id}")
820
+
821
+ return dest_path
822
+
823
+ def query_logs(
824
+ self,
825
+ experiment_id: str,
826
+ limit: Optional[int] = None,
827
+ offset: Optional[int] = None,
828
+ order_by: Optional[str] = None,
829
+ order: Optional[str] = None,
830
+ level: Optional[List[str]] = None,
831
+ start_time: Optional[str] = None,
832
+ end_time: Optional[str] = None,
833
+ search: Optional[str] = None,
834
+ ) -> Dict[str, Any]:
835
+ """
836
+ Query logs for an experiment.
837
+
838
+ Args:
839
+ experiment_id: Experiment ID
840
+ limit: Maximum number of logs to return
841
+ offset: Number of logs to skip
842
+ order_by: Field to order by (timestamp or sequenceNumber)
843
+ order: Sort order (asc or desc)
844
+ level: List of log levels to filter by
845
+ start_time: Filter logs after this timestamp
846
+ end_time: Filter logs before this timestamp
847
+ search: Search query for log messages
848
+
849
+ Returns:
850
+ Dict with logs array and pagination info
851
+
852
+ Raises:
853
+ httpx.HTTPStatusError: If request fails
854
+ """
855
+ params: Dict[str, str] = {}
856
+
857
+ if limit is not None:
858
+ params["limit"] = str(limit)
859
+ if offset is not None:
860
+ params["offset"] = str(offset)
861
+ if order_by is not None:
862
+ params["orderBy"] = order_by
863
+ if order is not None:
864
+ params["order"] = order
865
+ if level is not None:
866
+ params["level"] = ",".join(level)
867
+ if start_time is not None:
868
+ params["startTime"] = start_time
869
+ if end_time is not None:
870
+ params["endTime"] = end_time
871
+ if search is not None:
872
+ params["search"] = search
873
+
874
+ response = self._client.get(f"/experiments/{experiment_id}/logs", params=params)
875
+ response.raise_for_status()
876
+ return response.json()
877
+
878
+ def get_metric_data(
879
+ self,
880
+ experiment_id: str,
881
+ metric_name: str,
882
+ start_index: Optional[int] = None,
883
+ limit: Optional[int] = None,
884
+ buffer_only: bool = False,
885
+ ) -> Dict[str, Any]:
886
+ """
887
+ Get data points for a metric.
888
+
889
+ Args:
890
+ experiment_id: Experiment ID
891
+ metric_name: Name of the metric
892
+ start_index: Starting index for pagination
893
+ limit: Maximum number of data points to return
894
+ buffer_only: If True, only fetch buffer data (skip chunks)
895
+
896
+ Returns:
897
+ Dict with dataPoints array and pagination info
898
+
899
+ Raises:
900
+ httpx.HTTPStatusError: If request fails
901
+ """
902
+ params: Dict[str, str] = {}
903
+
904
+ if start_index is not None:
905
+ params["startIndex"] = str(start_index)
906
+ if limit is not None:
907
+ params["limit"] = str(limit)
908
+ if buffer_only:
909
+ params["bufferOnly"] = "true"
910
+
911
+ response = self._client.get(
912
+ f"/experiments/{experiment_id}/metrics/{metric_name}/data",
913
+ params=params
914
+ )
915
+ response.raise_for_status()
916
+ return response.json()
917
+
918
+ def download_metric_chunk(
919
+ self,
920
+ experiment_id: str,
921
+ metric_name: str,
922
+ chunk_number: int,
923
+ ) -> Dict[str, Any]:
924
+ """
925
+ Download a specific chunk by chunk number.
926
+
927
+ Args:
928
+ experiment_id: Experiment ID
929
+ metric_name: Name of the metric
930
+ chunk_number: Chunk number to download
931
+
932
+ Returns:
933
+ Dict with chunk data including chunkNumber, startIndex, endIndex, dataCount, and data array
934
+
935
+ Raises:
936
+ httpx.HTTPStatusError: If request fails
937
+ """
938
+ response = self._client.get(
939
+ f"/experiments/{experiment_id}/metrics/{metric_name}/chunks/{chunk_number}"
940
+ )
941
+ response.raise_for_status()
942
+ return response.json()
943
+
944
+ def close(self):
945
+ """Close the HTTP client."""
946
+ self._client.close()
947
+
948
+ def __enter__(self):
949
+ """Context manager entry."""
950
+ return self
951
+
952
+ def __exit__(self, exc_type, exc_val, exc_tb):
953
+ """Context manager exit."""
954
+ self.close()
955
+ return False