ml-dash 0.6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ml_dash/client.py ADDED
@@ -0,0 +1,1003 @@
1
+ """
2
+ Remote API client for ML-Dash server.
3
+ """
4
+
5
+ from typing import Optional, Dict, Any, List
6
+ import httpx
7
+
8
+
9
+ class RemoteClient:
10
+ """Client for communicating with ML-Dash server."""
11
+
12
+ def __init__(self, base_url: str, api_key: Optional[str] = None):
13
+ """
14
+ Initialize remote client.
15
+
16
+ Args:
17
+ base_url: Base URL of ML-Dash server (e.g., "http://localhost:3000")
18
+ api_key: JWT token for authentication (optional - auto-loads from storage if not provided)
19
+
20
+ Note:
21
+ If no api_key is provided, token will be loaded from storage on first API call.
22
+ If still not found, AuthenticationError will be raised at that time.
23
+ """
24
+ # Store original base URL for GraphQL (no /api prefix)
25
+ self.graphql_base_url = base_url.rstrip("/")
26
+
27
+ # Add /api prefix to base URL for REST API calls
28
+ self.base_url = base_url.rstrip("/") + "/api"
29
+
30
+ # If no api_key provided, try to load from storage
31
+ if not api_key:
32
+ from .auth.token_storage import get_token_storage
33
+
34
+ storage = get_token_storage()
35
+ api_key = storage.load("ml-dash-token")
36
+
37
+ self.api_key = api_key
38
+ self._rest_client = None
39
+ self._gql_client = None
40
+
41
+ def _ensure_authenticated(self):
42
+ """Check if authenticated, raise error if not."""
43
+ if not self.api_key:
44
+ from .auth.exceptions import AuthenticationError
45
+ raise AuthenticationError(
46
+ "Not authenticated. Run 'ml-dash login' to authenticate, "
47
+ "or provide an explicit api_key parameter."
48
+ )
49
+
50
+ @property
51
+ def _client(self):
52
+ """Lazy REST API client (with /api prefix)."""
53
+ if self._rest_client is None:
54
+ self._ensure_authenticated()
55
+ self._rest_client = httpx.Client(
56
+ base_url=self.base_url,
57
+ headers={
58
+ "Authorization": f"Bearer {self.api_key}",
59
+ # Note: Don't set Content-Type here as default
60
+ # It will be set per-request (json or multipart)
61
+ },
62
+ timeout=30.0,
63
+ )
64
+ return self._rest_client
65
+
66
+ @property
67
+ def _graphql_client(self):
68
+ """Lazy GraphQL client (without /api prefix)."""
69
+ if self._gql_client is None:
70
+ self._ensure_authenticated()
71
+ self._gql_client = httpx.Client(
72
+ base_url=self.graphql_base_url,
73
+ headers={
74
+ "Authorization": f"Bearer {self.api_key}",
75
+ },
76
+ timeout=30.0,
77
+ )
78
+ return self._gql_client
79
+
80
+ def create_or_update_experiment(
81
+ self,
82
+ project: str,
83
+ name: str,
84
+ description: Optional[str] = None,
85
+ tags: Optional[List[str]] = None,
86
+ bindrs: Optional[List[str]] = None,
87
+ folder: Optional[str] = None,
88
+ write_protected: bool = False,
89
+ metadata: Optional[Dict[str, Any]] = None,
90
+ ) -> Dict[str, Any]:
91
+ """
92
+ Create or update an experiment.
93
+
94
+ Args:
95
+ project: Project name
96
+ name: Experiment name
97
+ description: Optional description
98
+ tags: Optional list of tags
99
+ bindrs: Optional list of bindrs
100
+ folder: Optional folder path
101
+ write_protected: If True, experiment becomes immutable
102
+ metadata: Optional metadata dict
103
+
104
+ Returns:
105
+ Response dict with experiment, project, folder, and namespace data
106
+
107
+ Raises:
108
+ httpx.HTTPStatusError: If request fails
109
+ """
110
+ payload = {
111
+ "name": name,
112
+ }
113
+
114
+ if description is not None:
115
+ payload["description"] = description
116
+ if tags is not None:
117
+ payload["tags"] = tags
118
+ if bindrs is not None:
119
+ payload["bindrs"] = bindrs
120
+ if folder is not None:
121
+ payload["folder"] = folder
122
+ if write_protected:
123
+ payload["writeProtected"] = write_protected
124
+ if metadata is not None:
125
+ payload["metadata"] = metadata
126
+
127
+ response = self._client.post(
128
+ f"/projects/{project}/experiments",
129
+ json=payload,
130
+ )
131
+ response.raise_for_status()
132
+ return response.json()
133
+
134
+ def update_experiment_status(
135
+ self,
136
+ experiment_id: str,
137
+ status: str,
138
+ ) -> Dict[str, Any]:
139
+ """
140
+ Update experiment status.
141
+
142
+ Args:
143
+ experiment_id: Experiment ID
144
+ status: Status value - "RUNNING" | "COMPLETED" | "FAILED" | "CANCELLED"
145
+
146
+ Returns:
147
+ Response dict with updated experiment data
148
+
149
+ Raises:
150
+ httpx.HTTPStatusError: If request fails
151
+ """
152
+ payload = {
153
+ "status": status,
154
+ }
155
+
156
+ response = self._client.patch(
157
+ f"/experiments/{experiment_id}/status",
158
+ json=payload,
159
+ )
160
+ response.raise_for_status()
161
+ return response.json()
162
+
163
+ def create_log_entries(
164
+ self,
165
+ experiment_id: str,
166
+ logs: List[Dict[str, Any]]
167
+ ) -> Dict[str, Any]:
168
+ """
169
+ Create log entries in batch.
170
+
171
+ Supports both single log and multiple logs via array.
172
+
173
+ Args:
174
+ experiment_id: Experiment ID (Snowflake ID)
175
+ logs: List of log entries, each with fields:
176
+ - timestamp: ISO 8601 string
177
+ - level: "info"|"warn"|"error"|"debug"|"fatal"
178
+ - message: Log message string
179
+ - metadata: Optional dict
180
+
181
+ Returns:
182
+ Response dict:
183
+ {
184
+ "created": 1,
185
+ "startSequence": 42,
186
+ "endSequence": 42,
187
+ "experimentId": "123456789"
188
+ }
189
+
190
+ Raises:
191
+ httpx.HTTPStatusError: If request fails
192
+ """
193
+ response = self._client.post(
194
+ f"/experiments/{experiment_id}/logs",
195
+ json={"logs": logs}
196
+ )
197
+ response.raise_for_status()
198
+ return response.json()
199
+
200
+ def set_parameters(
201
+ self,
202
+ experiment_id: str,
203
+ data: Dict[str, Any]
204
+ ) -> Dict[str, Any]:
205
+ """
206
+ Set/merge parameters for an experiment.
207
+
208
+ Always merges with existing parameters (upsert behavior).
209
+
210
+ Args:
211
+ experiment_id: Experiment ID (Snowflake ID)
212
+ data: Flattened parameter dict with dot notation
213
+ Example: {"model.lr": 0.001, "model.batch_size": 32}
214
+
215
+ Returns:
216
+ Response dict:
217
+ {
218
+ "id": "snowflake_id",
219
+ "experimentId": "experiment_id",
220
+ "data": {...},
221
+ "version": 2,
222
+ "createdAt": "...",
223
+ "updatedAt": "..."
224
+ }
225
+
226
+ Raises:
227
+ httpx.HTTPStatusError: If request fails
228
+ """
229
+ response = self._client.post(
230
+ f"/experiments/{experiment_id}/parameters",
231
+ json={"data": data}
232
+ )
233
+ response.raise_for_status()
234
+ return response.json()
235
+
236
+ def get_parameters(self, experiment_id: str) -> Dict[str, Any]:
237
+ """
238
+ Get parameters for an experiment.
239
+
240
+ Args:
241
+ experiment_id: Experiment ID (Snowflake ID)
242
+
243
+ Returns:
244
+ Flattened parameter dict with dot notation
245
+ Example: {"model.lr": 0.001, "model.batch_size": 32}
246
+
247
+ Raises:
248
+ httpx.HTTPStatusError: If request fails or parameters don't exist
249
+ """
250
+ response = self._client.get(f"/experiments/{experiment_id}/parameters")
251
+ response.raise_for_status()
252
+ result = response.json()
253
+ return result.get("data", {})
254
+
255
+ def upload_file(
256
+ self,
257
+ experiment_id: str,
258
+ file_path: str,
259
+ prefix: str,
260
+ filename: str,
261
+ description: Optional[str],
262
+ tags: Optional[List[str]],
263
+ metadata: Optional[Dict[str, Any]],
264
+ checksum: str,
265
+ content_type: str,
266
+ size_bytes: int
267
+ ) -> Dict[str, Any]:
268
+ """
269
+ Upload a file to an experiment.
270
+
271
+ Args:
272
+ experiment_id: Experiment ID (Snowflake ID)
273
+ file_path: Local file path
274
+ prefix: Logical path prefix
275
+ filename: Original filename
276
+ description: Optional description
277
+ tags: Optional tags
278
+ metadata: Optional metadata
279
+ checksum: SHA256 checksum
280
+ content_type: MIME type
281
+ size_bytes: File size in bytes
282
+
283
+ Returns:
284
+ File metadata dict
285
+
286
+ Raises:
287
+ httpx.HTTPStatusError: If request fails
288
+ """
289
+ # Prepare multipart form data
290
+ # Read file content first (httpx needs content, not file handle)
291
+ with open(file_path, "rb") as f:
292
+ file_content = f.read()
293
+
294
+ files = {"file": (filename, file_content, content_type)}
295
+ data = {
296
+ "prefix": prefix,
297
+ "checksum": checksum,
298
+ "sizeBytes": str(size_bytes),
299
+ }
300
+ if description:
301
+ data["description"] = description
302
+ if tags:
303
+ data["tags"] = ",".join(tags)
304
+ if metadata:
305
+ import json
306
+ data["metadata"] = json.dumps(metadata)
307
+
308
+ # httpx will automatically set multipart/form-data content-type
309
+ response = self._client.post(
310
+ f"/experiments/{experiment_id}/files",
311
+ files=files,
312
+ data=data
313
+ )
314
+
315
+ response.raise_for_status()
316
+ return response.json()
317
+
318
+ def list_files(
319
+ self,
320
+ experiment_id: str,
321
+ prefix: Optional[str] = None,
322
+ tags: Optional[List[str]] = None
323
+ ) -> List[Dict[str, Any]]:
324
+ """
325
+ List files in an experiment.
326
+
327
+ Args:
328
+ experiment_id: Experiment ID (Snowflake ID)
329
+ prefix: Optional prefix filter
330
+ tags: Optional tags filter
331
+
332
+ Returns:
333
+ List of file metadata dicts
334
+
335
+ Raises:
336
+ httpx.HTTPStatusError: If request fails
337
+ """
338
+ params = {}
339
+ if prefix:
340
+ params["prefix"] = prefix
341
+ if tags:
342
+ params["tags"] = ",".join(tags)
343
+
344
+ response = self._client.get(
345
+ f"/experiments/{experiment_id}/files",
346
+ params=params
347
+ )
348
+ response.raise_for_status()
349
+ result = response.json()
350
+ return result.get("files", [])
351
+
352
+ def get_file(self, experiment_id: str, file_id: str) -> Dict[str, Any]:
353
+ """
354
+ Get file metadata.
355
+
356
+ Args:
357
+ experiment_id: Experiment ID (Snowflake ID)
358
+ file_id: File ID (Snowflake ID)
359
+
360
+ Returns:
361
+ File metadata dict
362
+
363
+ Raises:
364
+ httpx.HTTPStatusError: If request fails
365
+ """
366
+ response = self._client.get(f"/experiments/{experiment_id}/files/{file_id}")
367
+ response.raise_for_status()
368
+ return response.json()
369
+
370
+ def download_file(
371
+ self,
372
+ experiment_id: str,
373
+ file_id: str,
374
+ dest_path: Optional[str] = None
375
+ ) -> str:
376
+ """
377
+ Download a file from a experiment.
378
+
379
+ Args:
380
+ experiment_id: Experiment ID (Snowflake ID)
381
+ file_id: File ID (Snowflake ID)
382
+ dest_path: Optional destination path (defaults to original filename)
383
+
384
+ Returns:
385
+ Path to downloaded file
386
+
387
+ Raises:
388
+ httpx.HTTPStatusError: If request fails
389
+ ValueError: If checksum verification fails
390
+ """
391
+ # Get file metadata first to get filename and checksum
392
+ file_metadata = self.get_file(experiment_id, file_id)
393
+ filename = file_metadata["filename"]
394
+ expected_checksum = file_metadata["checksum"]
395
+
396
+ # Determine destination path
397
+ if dest_path is None:
398
+ dest_path = filename
399
+
400
+ # Download file
401
+ response = self._client.get(
402
+ f"/experiments/{experiment_id}/files/{file_id}/download"
403
+ )
404
+ response.raise_for_status()
405
+
406
+ # Write to file
407
+ with open(dest_path, "wb") as f:
408
+ f.write(response.content)
409
+
410
+ # Verify checksum
411
+ from .files import verify_checksum
412
+ if not verify_checksum(dest_path, expected_checksum):
413
+ # Delete corrupted file
414
+ import os
415
+ os.remove(dest_path)
416
+ raise ValueError(f"Checksum verification failed for file {file_id}")
417
+
418
+ return dest_path
419
+
420
+ def delete_file(self, experiment_id: str, file_id: str) -> Dict[str, Any]:
421
+ """
422
+ Delete a file (soft delete).
423
+
424
+ Args:
425
+ experiment_id: Experiment ID (Snowflake ID)
426
+ file_id: File ID (Snowflake ID)
427
+
428
+ Returns:
429
+ Dict with id and deletedAt
430
+
431
+ Raises:
432
+ httpx.HTTPStatusError: If request fails
433
+ """
434
+ response = self._client.delete(f"/experiments/{experiment_id}/files/{file_id}")
435
+ response.raise_for_status()
436
+ return response.json()
437
+
438
+ def update_file(
439
+ self,
440
+ experiment_id: str,
441
+ file_id: str,
442
+ description: Optional[str] = None,
443
+ tags: Optional[List[str]] = None,
444
+ metadata: Optional[Dict[str, Any]] = None
445
+ ) -> Dict[str, Any]:
446
+ """
447
+ Update file metadata.
448
+
449
+ Args:
450
+ experiment_id: Experiment ID (Snowflake ID)
451
+ file_id: File ID (Snowflake ID)
452
+ description: Optional description
453
+ tags: Optional tags
454
+ metadata: Optional metadata
455
+
456
+ Returns:
457
+ Updated file metadata dict
458
+
459
+ Raises:
460
+ httpx.HTTPStatusError: If request fails
461
+ """
462
+ payload = {}
463
+ if description is not None:
464
+ payload["description"] = description
465
+ if tags is not None:
466
+ payload["tags"] = tags
467
+ if metadata is not None:
468
+ payload["metadata"] = metadata
469
+
470
+ response = self._client.patch(
471
+ f"/experiments/{experiment_id}/files/{file_id}",
472
+ json=payload
473
+ )
474
+ response.raise_for_status()
475
+ return response.json()
476
+
477
+ def append_to_metric(
478
+ self,
479
+ experiment_id: str,
480
+ metric_name: str,
481
+ data: Dict[str, Any],
482
+ description: Optional[str] = None,
483
+ tags: Optional[List[str]] = None,
484
+ metadata: Optional[Dict[str, Any]] = None
485
+ ) -> Dict[str, Any]:
486
+ """
487
+ Append a single data point to a metric.
488
+
489
+ Args:
490
+ experiment_id: Experiment ID (Snowflake ID)
491
+ metric_name: Metric name (unique within experiment)
492
+ data: Data point (flexible schema)
493
+ description: Optional metric description
494
+ tags: Optional tags
495
+ metadata: Optional metadata
496
+
497
+ Returns:
498
+ Dict with metricId, index, bufferedDataPoints, chunkSize
499
+
500
+ Raises:
501
+ httpx.HTTPStatusError: If request fails
502
+ """
503
+ payload = {"data": data}
504
+ if description:
505
+ payload["description"] = description
506
+ if tags:
507
+ payload["tags"] = tags
508
+ if metadata:
509
+ payload["metadata"] = metadata
510
+
511
+ response = self._client.post(
512
+ f"/experiments/{experiment_id}/metrics/{metric_name}/append",
513
+ json=payload
514
+ )
515
+ response.raise_for_status()
516
+ return response.json()
517
+
518
+ def append_batch_to_metric(
519
+ self,
520
+ experiment_id: str,
521
+ metric_name: str,
522
+ data_points: List[Dict[str, Any]],
523
+ description: Optional[str] = None,
524
+ tags: Optional[List[str]] = None,
525
+ metadata: Optional[Dict[str, Any]] = None
526
+ ) -> Dict[str, Any]:
527
+ """
528
+ Append multiple data points to a metric in batch.
529
+
530
+ Args:
531
+ experiment_id: Experiment ID (Snowflake ID)
532
+ metric_name: Metric name (unique within experiment)
533
+ data_points: List of data points
534
+ description: Optional metric description
535
+ tags: Optional tags
536
+ metadata: Optional metadata
537
+
538
+ Returns:
539
+ Dict with metricId, startIndex, endIndex, count, bufferedDataPoints, chunkSize
540
+
541
+ Raises:
542
+ httpx.HTTPStatusError: If request fails
543
+ """
544
+ payload = {"dataPoints": data_points}
545
+ if description:
546
+ payload["description"] = description
547
+ if tags:
548
+ payload["tags"] = tags
549
+ if metadata:
550
+ payload["metadata"] = metadata
551
+
552
+ response = self._client.post(
553
+ f"/experiments/{experiment_id}/metrics/{metric_name}/append-batch",
554
+ json=payload
555
+ )
556
+ response.raise_for_status()
557
+ return response.json()
558
+
559
+ def read_metric_data(
560
+ self,
561
+ experiment_id: str,
562
+ metric_name: str,
563
+ start_index: int = 0,
564
+ limit: int = 1000
565
+ ) -> Dict[str, Any]:
566
+ """
567
+ Read data points from a metric.
568
+
569
+ Args:
570
+ experiment_id: Experiment ID (Snowflake ID)
571
+ metric_name: Metric name
572
+ start_index: Starting index (default 0)
573
+ limit: Max points to read (default 1000, max 10000)
574
+
575
+ Returns:
576
+ Dict with data, startIndex, endIndex, total, hasMore
577
+
578
+ Raises:
579
+ httpx.HTTPStatusError: If request fails
580
+ """
581
+ response = self._client.get(
582
+ f"/experiments/{experiment_id}/metrics/{metric_name}/data",
583
+ params={"startIndex": start_index, "limit": limit}
584
+ )
585
+ response.raise_for_status()
586
+ return response.json()
587
+
588
+ def get_metric_stats(
589
+ self,
590
+ experiment_id: str,
591
+ metric_name: str
592
+ ) -> Dict[str, Any]:
593
+ """
594
+ Get metric statistics and metadata.
595
+
596
+ Args:
597
+ experiment_id: Experiment ID (Snowflake ID)
598
+ metric_name: Metric name
599
+
600
+ Returns:
601
+ Dict with metric stats (totalDataPoints, bufferedDataPoints, etc.)
602
+
603
+ Raises:
604
+ httpx.HTTPStatusError: If request fails
605
+ """
606
+ response = self._client.get(
607
+ f"/experiments/{experiment_id}/metrics/{metric_name}/stats"
608
+ )
609
+ response.raise_for_status()
610
+ return response.json()
611
+
612
+ def list_metrics(
613
+ self,
614
+ experiment_id: str
615
+ ) -> List[Dict[str, Any]]:
616
+ """
617
+ List all metrics in an experiment.
618
+
619
+ Args:
620
+ experiment_id: Experiment ID (Snowflake ID)
621
+
622
+ Returns:
623
+ List of metric summaries
624
+
625
+ Raises:
626
+ httpx.HTTPStatusError: If request fails
627
+ """
628
+ response = self._client.get(f"/experiments/{experiment_id}/metrics")
629
+ response.raise_for_status()
630
+ return response.json()["metrics"]
631
+
632
+ def graphql_query(self, query: str, variables: Optional[Dict] = None) -> Dict[str, Any]:
633
+ """
634
+ Execute a GraphQL query.
635
+
636
+ Args:
637
+ query: GraphQL query string
638
+ variables: Optional variables for the query
639
+
640
+ Returns:
641
+ Query result data
642
+
643
+ Raises:
644
+ httpx.HTTPStatusError: If request fails
645
+ Exception: If GraphQL returns errors
646
+ """
647
+ response = self._graphql_client.post(
648
+ "/graphql",
649
+ json={"query": query, "variables": variables or {}}
650
+ )
651
+ response.raise_for_status()
652
+ result = response.json()
653
+
654
+ if "errors" in result:
655
+ raise Exception(f"GraphQL errors: {result['errors']}")
656
+
657
+ return result.get("data", {})
658
+
659
+ def list_projects_graphql(self) -> List[Dict[str, Any]]:
660
+ """
661
+ List all projects via GraphQL.
662
+
663
+ Namespace is automatically inferred from JWT token on the server.
664
+
665
+ Returns:
666
+ List of project dicts with experimentCount
667
+
668
+ Raises:
669
+ httpx.HTTPStatusError: If request fails
670
+ """
671
+ query = """
672
+ query Projects {
673
+ projects {
674
+ id
675
+ name
676
+ slug
677
+ description
678
+ tags
679
+ }
680
+ }
681
+ """
682
+ result = self.graphql_query(query, {})
683
+ projects = result.get("projects", [])
684
+
685
+ # For each project, count experiments
686
+ for project in projects:
687
+ exp_query = """
688
+ query ExperimentsCount($projectSlug: String!) {
689
+ experiments(projectSlug: $projectSlug) {
690
+ id
691
+ }
692
+ }
693
+ """
694
+ exp_result = self.graphql_query(exp_query, {"projectSlug": project['slug']})
695
+ experiments = exp_result.get("experiments", [])
696
+ project['experimentCount'] = len(experiments)
697
+
698
+ return projects
699
+
700
+ def list_experiments_graphql(
701
+ self, project_slug: str, status: Optional[str] = None
702
+ ) -> List[Dict[str, Any]]:
703
+ """
704
+ List experiments in a project via GraphQL.
705
+
706
+ Namespace is automatically inferred from JWT token on the server.
707
+
708
+ Args:
709
+ project_slug: Project slug
710
+ status: Optional experiment status filter (RUNNING, COMPLETED, FAILED, CANCELLED)
711
+
712
+ Returns:
713
+ List of experiment dicts with metadata
714
+
715
+ Raises:
716
+ httpx.HTTPStatusError: If request fails
717
+ """
718
+ query = """
719
+ query Experiments($projectSlug: String!, $status: ExperimentStatus) {
720
+ experiments(projectSlug: $projectSlug, status: $status) {
721
+ id
722
+ name
723
+ description
724
+ tags
725
+ status
726
+ startedAt
727
+ endedAt
728
+ metadata
729
+ project {
730
+ slug
731
+ }
732
+ logMetadata {
733
+ totalLogs
734
+ }
735
+ metrics {
736
+ name
737
+ metricMetadata {
738
+ totalDataPoints
739
+ }
740
+ }
741
+ files {
742
+ id
743
+ filename
744
+ path
745
+ contentType
746
+ sizeBytes
747
+ checksum
748
+ description
749
+ tags
750
+ metadata
751
+ }
752
+ parameters {
753
+ id
754
+ data
755
+ }
756
+ }
757
+ }
758
+ """
759
+ variables = {"projectSlug": project_slug}
760
+ if status is not None:
761
+ variables["status"] = status
762
+
763
+ result = self.graphql_query(query, variables)
764
+ return result.get("experiments", [])
765
+
766
+ def get_experiment_graphql(
767
+ self, project_slug: str, experiment_name: str
768
+ ) -> Optional[Dict[str, Any]]:
769
+ """
770
+ Get a single experiment via GraphQL.
771
+
772
+ Namespace is automatically inferred from JWT token on the server.
773
+
774
+ Args:
775
+ project_slug: Project slug
776
+ experiment_name: Experiment name
777
+
778
+ Returns:
779
+ Experiment dict with metadata, or None if not found
780
+
781
+ Raises:
782
+ httpx.HTTPStatusError: If request fails
783
+ """
784
+ query = """
785
+ query Experiment($projectSlug: String!, $experimentName: String!) {
786
+ experiment(projectSlug: $projectSlug, experimentName: $experimentName) {
787
+ id
788
+ name
789
+ description
790
+ tags
791
+ status
792
+ metadata
793
+ project {
794
+ slug
795
+ }
796
+ logMetadata {
797
+ totalLogs
798
+ }
799
+ metrics {
800
+ name
801
+ metricMetadata {
802
+ totalDataPoints
803
+ }
804
+ }
805
+ files {
806
+ id
807
+ filename
808
+ path
809
+ contentType
810
+ sizeBytes
811
+ checksum
812
+ description
813
+ tags
814
+ metadata
815
+ }
816
+ parameters {
817
+ id
818
+ data
819
+ }
820
+ }
821
+ }
822
+ """
823
+ variables = {
824
+ "projectSlug": project_slug,
825
+ "experimentName": experiment_name
826
+ }
827
+
828
+ result = self.graphql_query(query, variables)
829
+ return result.get("experiment")
830
+
831
+ def download_file_streaming(
832
+ self, experiment_id: str, file_id: str, dest_path: str
833
+ ) -> str:
834
+ """
835
+ Download a file with streaming for large files.
836
+
837
+ Args:
838
+ experiment_id: Experiment ID (Snowflake ID)
839
+ file_id: File ID (Snowflake ID)
840
+ dest_path: Destination path to save file
841
+
842
+ Returns:
843
+ Path to downloaded file
844
+
845
+ Raises:
846
+ httpx.HTTPStatusError: If request fails
847
+ ValueError: If checksum verification fails
848
+ """
849
+ # Get metadata first for checksum
850
+ file_metadata = self.get_file(experiment_id, file_id)
851
+ expected_checksum = file_metadata["checksum"]
852
+
853
+ # Stream download
854
+ with self._client.stream("GET", f"/experiments/{experiment_id}/files/{file_id}/download") as response:
855
+ response.raise_for_status()
856
+
857
+ with open(dest_path, "wb") as f:
858
+ for chunk in response.iter_bytes(chunk_size=8192):
859
+ f.write(chunk)
860
+
861
+ # Verify checksum
862
+ from .files import verify_checksum
863
+ if not verify_checksum(dest_path, expected_checksum):
864
+ import os
865
+ os.remove(dest_path)
866
+ raise ValueError(f"Checksum verification failed for file {file_id}")
867
+
868
+ return dest_path
869
+
870
+ def query_logs(
871
+ self,
872
+ experiment_id: str,
873
+ limit: Optional[int] = None,
874
+ offset: Optional[int] = None,
875
+ order_by: Optional[str] = None,
876
+ order: Optional[str] = None,
877
+ level: Optional[List[str]] = None,
878
+ start_time: Optional[str] = None,
879
+ end_time: Optional[str] = None,
880
+ search: Optional[str] = None,
881
+ ) -> Dict[str, Any]:
882
+ """
883
+ Query logs for an experiment.
884
+
885
+ Args:
886
+ experiment_id: Experiment ID
887
+ limit: Maximum number of logs to return
888
+ offset: Number of logs to skip
889
+ order_by: Field to order by (timestamp or sequenceNumber)
890
+ order: Sort order (asc or desc)
891
+ level: List of log levels to filter by
892
+ start_time: Filter logs after this timestamp
893
+ end_time: Filter logs before this timestamp
894
+ search: Search query for log messages
895
+
896
+ Returns:
897
+ Dict with logs array and pagination info
898
+
899
+ Raises:
900
+ httpx.HTTPStatusError: If request fails
901
+ """
902
+ params: Dict[str, str] = {}
903
+
904
+ if limit is not None:
905
+ params["limit"] = str(limit)
906
+ if offset is not None:
907
+ params["offset"] = str(offset)
908
+ if order_by is not None:
909
+ params["orderBy"] = order_by
910
+ if order is not None:
911
+ params["order"] = order
912
+ if level is not None:
913
+ params["level"] = ",".join(level)
914
+ if start_time is not None:
915
+ params["startTime"] = start_time
916
+ if end_time is not None:
917
+ params["endTime"] = end_time
918
+ if search is not None:
919
+ params["search"] = search
920
+
921
+ response = self._client.get(f"/experiments/{experiment_id}/logs", params=params)
922
+ response.raise_for_status()
923
+ return response.json()
924
+
925
+ def get_metric_data(
926
+ self,
927
+ experiment_id: str,
928
+ metric_name: str,
929
+ start_index: Optional[int] = None,
930
+ limit: Optional[int] = None,
931
+ buffer_only: bool = False,
932
+ ) -> Dict[str, Any]:
933
+ """
934
+ Get data points for a metric.
935
+
936
+ Args:
937
+ experiment_id: Experiment ID
938
+ metric_name: Name of the metric
939
+ start_index: Starting index for pagination
940
+ limit: Maximum number of data points to return
941
+ buffer_only: If True, only fetch buffer data (skip chunks)
942
+
943
+ Returns:
944
+ Dict with dataPoints array and pagination info
945
+
946
+ Raises:
947
+ httpx.HTTPStatusError: If request fails
948
+ """
949
+ params: Dict[str, str] = {}
950
+
951
+ if start_index is not None:
952
+ params["startIndex"] = str(start_index)
953
+ if limit is not None:
954
+ params["limit"] = str(limit)
955
+ if buffer_only:
956
+ params["bufferOnly"] = "true"
957
+
958
+ response = self._client.get(
959
+ f"/experiments/{experiment_id}/metrics/{metric_name}/data",
960
+ params=params
961
+ )
962
+ response.raise_for_status()
963
+ return response.json()
964
+
965
+ def download_metric_chunk(
966
+ self,
967
+ experiment_id: str,
968
+ metric_name: str,
969
+ chunk_number: int,
970
+ ) -> Dict[str, Any]:
971
+ """
972
+ Download a specific chunk by chunk number.
973
+
974
+ Args:
975
+ experiment_id: Experiment ID
976
+ metric_name: Name of the metric
977
+ chunk_number: Chunk number to download
978
+
979
+ Returns:
980
+ Dict with chunk data including chunkNumber, startIndex, endIndex, dataCount, and data array
981
+
982
+ Raises:
983
+ httpx.HTTPStatusError: If request fails
984
+ """
985
+ response = self._client.get(
986
+ f"/experiments/{experiment_id}/metrics/{metric_name}/chunks/{chunk_number}"
987
+ )
988
+ response.raise_for_status()
989
+ return response.json()
990
+
991
+ def close(self):
992
+ """Close the HTTP clients."""
993
+ self._client.close()
994
+ self._graphql_client.close()
995
+
996
+ def __enter__(self):
997
+ """Context manager entry."""
998
+ return self
999
+
1000
+ def __exit__(self, exc_type, exc_val, exc_tb):
1001
+ """Context manager exit."""
1002
+ self.close()
1003
+ return False