ml-dash 0.5.7__py3-none-any.whl → 0.5.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ml_dash/client.py CHANGED
@@ -581,6 +581,366 @@ class RemoteClient:
581
581
  response.raise_for_status()
582
582
  return response.json()["metrics"]
583
583
 
584
+ def graphql_query(self, query: str, variables: Optional[Dict] = None) -> Dict[str, Any]:
585
+ """
586
+ Execute a GraphQL query.
587
+
588
+ Args:
589
+ query: GraphQL query string
590
+ variables: Optional variables for the query
591
+
592
+ Returns:
593
+ Query result data
594
+
595
+ Raises:
596
+ httpx.HTTPStatusError: If request fails
597
+ Exception: If GraphQL returns errors
598
+ """
599
+ response = self._client.post(
600
+ "/graphql",
601
+ json={"query": query, "variables": variables or {}}
602
+ )
603
+ response.raise_for_status()
604
+ result = response.json()
605
+
606
+ if "errors" in result:
607
+ raise Exception(f"GraphQL errors: {result['errors']}")
608
+
609
+ return result.get("data", {})
610
+
611
+ def list_projects_graphql(self, namespace_slug: str) -> List[Dict[str, Any]]:
612
+ """
613
+ List all projects in a namespace via GraphQL.
614
+
615
+ Args:
616
+ namespace_slug: Namespace slug
617
+
618
+ Returns:
619
+ List of project dicts with experimentCount
620
+
621
+ Raises:
622
+ httpx.HTTPStatusError: If request fails
623
+ """
624
+ query = """
625
+ query Projects($namespaceSlug: String!) {
626
+ projects(namespaceSlug: $namespaceSlug) {
627
+ id
628
+ name
629
+ slug
630
+ description
631
+ tags
632
+ }
633
+ }
634
+ """
635
+ result = self.graphql_query(query, {"namespaceSlug": namespace_slug})
636
+ projects = result.get("projects", [])
637
+
638
+ # For each project, count experiments
639
+ for project in projects:
640
+ exp_query = """
641
+ query ExperimentsCount($namespaceSlug: String!, $projectSlug: String!) {
642
+ experiments(namespaceSlug: $namespaceSlug, projectSlug: $projectSlug) {
643
+ id
644
+ }
645
+ }
646
+ """
647
+ exp_result = self.graphql_query(exp_query, {
648
+ "namespaceSlug": namespace_slug,
649
+ "projectSlug": project['slug']
650
+ })
651
+ experiments = exp_result.get("experiments", [])
652
+ project['experimentCount'] = len(experiments)
653
+
654
+ return projects
655
+
656
+ def list_experiments_graphql(
657
+ self, namespace_slug: str, project_slug: str, status: Optional[str] = None
658
+ ) -> List[Dict[str, Any]]:
659
+ """
660
+ List experiments in a project via GraphQL.
661
+
662
+ Args:
663
+ namespace_slug: Namespace slug
664
+ project_slug: Project slug
665
+ status: Optional experiment status filter (RUNNING, COMPLETED, FAILED, CANCELLED)
666
+
667
+ Returns:
668
+ List of experiment dicts with metadata
669
+
670
+ Raises:
671
+ httpx.HTTPStatusError: If request fails
672
+ """
673
+ query = """
674
+ query Experiments($namespaceSlug: String!, $projectSlug: String!, $status: ExperimentStatus) {
675
+ experiments(namespaceSlug: $namespaceSlug, projectSlug: $projectSlug, status: $status) {
676
+ id
677
+ name
678
+ description
679
+ tags
680
+ status
681
+ startedAt
682
+ endedAt
683
+ metadata
684
+ project {
685
+ slug
686
+ }
687
+ logMetadata {
688
+ totalLogs
689
+ }
690
+ metrics {
691
+ name
692
+ metricMetadata {
693
+ totalDataPoints
694
+ }
695
+ }
696
+ files {
697
+ id
698
+ filename
699
+ path
700
+ contentType
701
+ sizeBytes
702
+ checksum
703
+ description
704
+ tags
705
+ metadata
706
+ }
707
+ parameters {
708
+ id
709
+ data
710
+ }
711
+ }
712
+ }
713
+ """
714
+ result = self.graphql_query(query, {
715
+ "namespaceSlug": namespace_slug,
716
+ "projectSlug": project_slug,
717
+ "status": status
718
+ })
719
+ return result.get("experiments", [])
720
+
721
+ def get_experiment_graphql(
722
+ self, namespace_slug: str, project_slug: str, experiment_name: str
723
+ ) -> Optional[Dict[str, Any]]:
724
+ """
725
+ Get a single experiment via GraphQL.
726
+
727
+ Args:
728
+ namespace_slug: Namespace slug
729
+ project_slug: Project slug
730
+ experiment_name: Experiment name
731
+
732
+ Returns:
733
+ Experiment dict with metadata, or None if not found
734
+
735
+ Raises:
736
+ httpx.HTTPStatusError: If request fails
737
+ """
738
+ query = """
739
+ query Experiment($namespaceSlug: String!, $projectSlug: String!, $experimentName: String!) {
740
+ experiment(namespaceSlug: $namespaceSlug, projectSlug: $projectSlug, experimentName: $experimentName) {
741
+ id
742
+ name
743
+ description
744
+ tags
745
+ status
746
+ metadata
747
+ project {
748
+ slug
749
+ }
750
+ logMetadata {
751
+ totalLogs
752
+ }
753
+ metrics {
754
+ name
755
+ metricMetadata {
756
+ totalDataPoints
757
+ }
758
+ }
759
+ files {
760
+ id
761
+ filename
762
+ path
763
+ contentType
764
+ sizeBytes
765
+ checksum
766
+ description
767
+ tags
768
+ metadata
769
+ }
770
+ parameters {
771
+ id
772
+ data
773
+ }
774
+ }
775
+ }
776
+ """
777
+ result = self.graphql_query(query, {
778
+ "namespaceSlug": namespace_slug,
779
+ "projectSlug": project_slug,
780
+ "experimentName": experiment_name
781
+ })
782
+ return result.get("experiment")
783
+
784
+ def download_file_streaming(
785
+ self, experiment_id: str, file_id: str, dest_path: str
786
+ ) -> str:
787
+ """
788
+ Download a file with streaming for large files.
789
+
790
+ Args:
791
+ experiment_id: Experiment ID (Snowflake ID)
792
+ file_id: File ID (Snowflake ID)
793
+ dest_path: Destination path to save file
794
+
795
+ Returns:
796
+ Path to downloaded file
797
+
798
+ Raises:
799
+ httpx.HTTPStatusError: If request fails
800
+ ValueError: If checksum verification fails
801
+ """
802
+ # Get metadata first for checksum
803
+ file_metadata = self.get_file(experiment_id, file_id)
804
+ expected_checksum = file_metadata["checksum"]
805
+
806
+ # Stream download
807
+ with self._client.stream("GET", f"/experiments/{experiment_id}/files/{file_id}/download") as response:
808
+ response.raise_for_status()
809
+
810
+ with open(dest_path, "wb") as f:
811
+ for chunk in response.iter_bytes(chunk_size=8192):
812
+ f.write(chunk)
813
+
814
+ # Verify checksum
815
+ from .files import verify_checksum
816
+ if not verify_checksum(dest_path, expected_checksum):
817
+ import os
818
+ os.remove(dest_path)
819
+ raise ValueError(f"Checksum verification failed for file {file_id}")
820
+
821
+ return dest_path
822
+
823
+ def query_logs(
824
+ self,
825
+ experiment_id: str,
826
+ limit: Optional[int] = None,
827
+ offset: Optional[int] = None,
828
+ order_by: Optional[str] = None,
829
+ order: Optional[str] = None,
830
+ level: Optional[List[str]] = None,
831
+ start_time: Optional[str] = None,
832
+ end_time: Optional[str] = None,
833
+ search: Optional[str] = None,
834
+ ) -> Dict[str, Any]:
835
+ """
836
+ Query logs for an experiment.
837
+
838
+ Args:
839
+ experiment_id: Experiment ID
840
+ limit: Maximum number of logs to return
841
+ offset: Number of logs to skip
842
+ order_by: Field to order by (timestamp or sequenceNumber)
843
+ order: Sort order (asc or desc)
844
+ level: List of log levels to filter by
845
+ start_time: Filter logs after this timestamp
846
+ end_time: Filter logs before this timestamp
847
+ search: Search query for log messages
848
+
849
+ Returns:
850
+ Dict with logs array and pagination info
851
+
852
+ Raises:
853
+ httpx.HTTPStatusError: If request fails
854
+ """
855
+ params: Dict[str, str] = {}
856
+
857
+ if limit is not None:
858
+ params["limit"] = str(limit)
859
+ if offset is not None:
860
+ params["offset"] = str(offset)
861
+ if order_by is not None:
862
+ params["orderBy"] = order_by
863
+ if order is not None:
864
+ params["order"] = order
865
+ if level is not None:
866
+ params["level"] = ",".join(level)
867
+ if start_time is not None:
868
+ params["startTime"] = start_time
869
+ if end_time is not None:
870
+ params["endTime"] = end_time
871
+ if search is not None:
872
+ params["search"] = search
873
+
874
+ response = self._client.get(f"/experiments/{experiment_id}/logs", params=params)
875
+ response.raise_for_status()
876
+ return response.json()
877
+
878
+ def get_metric_data(
879
+ self,
880
+ experiment_id: str,
881
+ metric_name: str,
882
+ start_index: Optional[int] = None,
883
+ limit: Optional[int] = None,
884
+ buffer_only: bool = False,
885
+ ) -> Dict[str, Any]:
886
+ """
887
+ Get data points for a metric.
888
+
889
+ Args:
890
+ experiment_id: Experiment ID
891
+ metric_name: Name of the metric
892
+ start_index: Starting index for pagination
893
+ limit: Maximum number of data points to return
894
+ buffer_only: If True, only fetch buffer data (skip chunks)
895
+
896
+ Returns:
897
+ Dict with dataPoints array and pagination info
898
+
899
+ Raises:
900
+ httpx.HTTPStatusError: If request fails
901
+ """
902
+ params: Dict[str, str] = {}
903
+
904
+ if start_index is not None:
905
+ params["startIndex"] = str(start_index)
906
+ if limit is not None:
907
+ params["limit"] = str(limit)
908
+ if buffer_only:
909
+ params["bufferOnly"] = "true"
910
+
911
+ response = self._client.get(
912
+ f"/experiments/{experiment_id}/metrics/{metric_name}/data",
913
+ params=params
914
+ )
915
+ response.raise_for_status()
916
+ return response.json()
917
+
918
+ def download_metric_chunk(
919
+ self,
920
+ experiment_id: str,
921
+ metric_name: str,
922
+ chunk_number: int,
923
+ ) -> Dict[str, Any]:
924
+ """
925
+ Download a specific chunk by chunk number.
926
+
927
+ Args:
928
+ experiment_id: Experiment ID
929
+ metric_name: Name of the metric
930
+ chunk_number: Chunk number to download
931
+
932
+ Returns:
933
+ Dict with chunk data including chunkNumber, startIndex, endIndex, dataCount, and data array
934
+
935
+ Raises:
936
+ httpx.HTTPStatusError: If request fails
937
+ """
938
+ response = self._client.get(
939
+ f"/experiments/{experiment_id}/metrics/{metric_name}/chunks/{chunk_number}"
940
+ )
941
+ response.raise_for_status()
942
+ return response.json()
943
+
584
944
  def close(self):
585
945
  """Close the HTTP client."""
586
946
  self._client.close()
ml_dash/config.py ADDED
@@ -0,0 +1,119 @@
1
+ """Configuration file management for ML-Dash CLI."""
2
+
3
+ from pathlib import Path
4
+ import json
5
+ from typing import Optional, Dict, Any
6
+
7
+
8
+ class Config:
9
+ """
10
+ Manages ML-Dash CLI configuration file.
11
+
12
+ Configuration is stored in ~/.ml-dash/config.json with structure:
13
+ {
14
+ "remote_url": "https://api.dash.ml",
15
+ "api_key": "token",
16
+ "default_batch_size": 100
17
+ }
18
+ """
19
+
20
+ DEFAULT_CONFIG_DIR = Path.home() / ".ml-dash"
21
+ CONFIG_FILE = "config.json"
22
+
23
+ def __init__(self, config_dir: Optional[Path] = None):
24
+ """
25
+ Initialize config manager.
26
+
27
+ Args:
28
+ config_dir: Config directory path (defaults to ~/.ml-dash)
29
+ """
30
+ self.config_dir = config_dir or self.DEFAULT_CONFIG_DIR
31
+ self.config_path = self.config_dir / self.CONFIG_FILE
32
+ self._data = self._load()
33
+
34
+ def _load(self) -> Dict[str, Any]:
35
+ """Load config from file."""
36
+ if self.config_path.exists():
37
+ try:
38
+ with open(self.config_path, "r") as f:
39
+ return json.load(f)
40
+ except (json.JSONDecodeError, IOError):
41
+ # If config is corrupted, return empty dict
42
+ return {}
43
+ return {}
44
+
45
+ def save(self):
46
+ """Save config to file."""
47
+ self.config_dir.mkdir(parents=True, exist_ok=True)
48
+ with open(self.config_path, "w") as f:
49
+ json.dump(self._data, f, indent=2)
50
+
51
+ def get(self, key: str, default: Any = None) -> Any:
52
+ """
53
+ Get config value.
54
+
55
+ Args:
56
+ key: Config key
57
+ default: Default value if key not found
58
+
59
+ Returns:
60
+ Config value or default
61
+ """
62
+ return self._data.get(key, default)
63
+
64
+ def set(self, key: str, value: Any):
65
+ """
66
+ Set config value and save.
67
+
68
+ Args:
69
+ key: Config key
70
+ value: Config value
71
+ """
72
+ self._data[key] = value
73
+ self.save()
74
+
75
+ def delete(self, key: str):
76
+ """
77
+ Delete config key and save.
78
+
79
+ Args:
80
+ key: Config key to delete
81
+ """
82
+ if key in self._data:
83
+ del self._data[key]
84
+ self.save()
85
+
86
+ def clear(self):
87
+ """Clear all config and save."""
88
+ self._data = {}
89
+ self.save()
90
+
91
+ @property
92
+ def remote_url(self) -> Optional[str]:
93
+ """Get default remote URL."""
94
+ return self.get("remote_url")
95
+
96
+ @remote_url.setter
97
+ def remote_url(self, url: str):
98
+ """Set default remote URL."""
99
+ self.set("remote_url", url)
100
+
101
+ @property
102
+ def api_key(self) -> Optional[str]:
103
+ """Get default API key."""
104
+ return self.get("api_key")
105
+
106
+ @api_key.setter
107
+ def api_key(self, key: str):
108
+ """Set default API key."""
109
+ self.set("api_key", key)
110
+
111
+ @property
112
+ def batch_size(self) -> int:
113
+ """Get default batch size for uploads."""
114
+ return self.get("default_batch_size", 100)
115
+
116
+ @batch_size.setter
117
+ def batch_size(self, size: int):
118
+ """Set default batch size."""
119
+ self.set("default_batch_size", size)
ml_dash/storage.py CHANGED
@@ -62,8 +62,16 @@ class LocalStorage:
62
62
  Returns:
63
63
  Path to experiment directory
64
64
  """
65
+ # Determine base path - include folder in hierarchy if specified
66
+ if folder is not None:
67
+ # Strip leading / to make it relative, then use as base path
68
+ folder_path = folder.lstrip('/')
69
+ base_path = self.root_path / folder_path
70
+ else:
71
+ base_path = self.root_path
72
+
65
73
  # Create project directory
66
- project_dir = self.root_path / project
74
+ project_dir = base_path / project
67
75
  project_dir.mkdir(parents=True, exist_ok=True)
68
76
 
69
77
  # Create experiment directory
@@ -138,7 +146,7 @@ class LocalStorage:
138
146
  timestamp: ISO timestamp string
139
147
  metadata: Optional metadata
140
148
  """
141
- experiment_dir = self.root_path / project / experiment
149
+ experiment_dir = self._get_experiment_dir(project, experiment)
142
150
  logs_dir = experiment_dir / "logs"
143
151
  logs_file = logs_dir / "logs.jsonl"
144
152
  seq_file = logs_dir / ".log_sequence"
@@ -184,7 +192,7 @@ class LocalStorage:
184
192
  metric_name: Metric name
185
193
  data: Data point
186
194
  """
187
- experiment_dir = self.root_path / project / experiment
195
+ experiment_dir = self._get_experiment_dir(project, experiment)
188
196
  metric_file = experiment_dir / "metrics" / f"{metric_name}.jsonl"
189
197
 
190
198
  data_point = {
@@ -216,7 +224,7 @@ class LocalStorage:
216
224
  experiment: Experiment name
217
225
  data: Flattened parameter dict with dot notation (already flattened)
218
226
  """
219
- experiment_dir = self.root_path / project / experiment
227
+ experiment_dir = self._get_experiment_dir(project, experiment)
220
228
  params_file = experiment_dir / "parameters.json"
221
229
 
222
230
  # Read existing if present
@@ -263,7 +271,7 @@ class LocalStorage:
263
271
  Returns:
264
272
  Flattened parameter dict, or None if file doesn't exist
265
273
  """
266
- experiment_dir = self.root_path / project / experiment
274
+ experiment_dir = self._get_experiment_dir(project, experiment)
267
275
  params_file = experiment_dir / "parameters.json"
268
276
 
269
277
  if not params_file.exists():
@@ -315,7 +323,7 @@ class LocalStorage:
315
323
  import shutil
316
324
  from .files import generate_snowflake_id
317
325
 
318
- experiment_dir = self.root_path / project / experiment
326
+ experiment_dir = self._get_experiment_dir(project, experiment)
319
327
  files_dir = experiment_dir / "files"
320
328
  metadata_file = files_dir / ".files_metadata.json"
321
329
 
@@ -411,7 +419,7 @@ class LocalStorage:
411
419
  Returns:
412
420
  List of file metadata dicts (only non-deleted files)
413
421
  """
414
- experiment_dir = self.root_path / project / experiment
422
+ experiment_dir = self._get_experiment_dir(project, experiment)
415
423
  metadata_file = experiment_dir / "files" / ".files_metadata.json"
416
424
 
417
425
  if not metadata_file.exists():
@@ -464,7 +472,7 @@ class LocalStorage:
464
472
  import shutil
465
473
  from .files import verify_checksum
466
474
 
467
- experiment_dir = self.root_path / project / experiment
475
+ experiment_dir = self._get_experiment_dir(project, experiment)
468
476
  files_dir = experiment_dir / "files"
469
477
  metadata_file = files_dir / ".files_metadata.json"
470
478
 
@@ -529,7 +537,7 @@ class LocalStorage:
529
537
  Raises:
530
538
  FileNotFoundError: If file not found
531
539
  """
532
- experiment_dir = self.root_path / project / experiment
540
+ experiment_dir = self._get_experiment_dir(project, experiment)
533
541
  metadata_file = experiment_dir / "files" / ".files_metadata.json"
534
542
 
535
543
  if not metadata_file.exists():
@@ -588,7 +596,7 @@ class LocalStorage:
588
596
  Raises:
589
597
  FileNotFoundError: If file not found
590
598
  """
591
- experiment_dir = self.root_path / project / experiment
599
+ experiment_dir = self._get_experiment_dir(project, experiment)
592
600
  metadata_file = experiment_dir / "files" / ".files_metadata.json"
593
601
 
594
602
  if not metadata_file.exists():
@@ -628,9 +636,52 @@ class LocalStorage:
628
636
 
629
637
  return updated_file
630
638
 
631
- def _get_experiment_dir(self, project: str, experiment: str) -> Path:
632
- """Get experiment directory path."""
633
- return self.root_path / project / experiment
639
+ def _get_experiment_dir(self, project: str, experiment: str, folder: Optional[str] = None) -> Path:
640
+ """
641
+ Get experiment directory path.
642
+
643
+ If folder is not provided, tries to read it from experiment.json metadata.
644
+ Falls back to root_path/project/experiment if not found.
645
+ """
646
+ # If folder explicitly provided, use it
647
+ if folder is not None:
648
+ folder_path = folder.lstrip('/')
649
+ return self.root_path / folder_path / project / experiment
650
+
651
+ # Try to read folder from experiment metadata
652
+ # Check common locations where experiment might exist
653
+ possible_paths = []
654
+
655
+ # First, try without folder (most common case)
656
+ default_path = self.root_path / project / experiment
657
+ possible_paths.append(default_path)
658
+
659
+ # Then scan for experiment.json in subdirectories (for folder-based experiments)
660
+ try:
661
+ for item in self.root_path.rglob(f"*/{project}/{experiment}/experiment.json"):
662
+ exp_dir = item.parent
663
+ if exp_dir not in [p for p in possible_paths]:
664
+ possible_paths.insert(0, exp_dir) # Prioritize found paths
665
+ except:
666
+ pass
667
+
668
+ # Check each possible path for experiment.json with folder metadata
669
+ for path in possible_paths:
670
+ exp_json = path / "experiment.json"
671
+ if exp_json.exists():
672
+ try:
673
+ with open(exp_json, 'r') as f:
674
+ metadata = json.load(f)
675
+ if metadata.get('folder'):
676
+ folder_path = metadata['folder'].lstrip('/')
677
+ return self.root_path / folder_path / project / experiment
678
+ except:
679
+ pass
680
+ # Found experiment.json, use this path even if no folder metadata
681
+ return path
682
+
683
+ # Fallback to default path
684
+ return default_path
634
685
 
635
686
  def append_to_metric(
636
687
  self,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: ml-dash
3
- Version: 0.5.7
3
+ Version: 0.5.8
4
4
  Summary: ML experiment tracking and data storage
5
5
  Keywords: machine-learning,experiment-tracking,mlops,data-storage
6
6
  Author: Ge Yang, Tom Tao
@@ -41,6 +41,7 @@ Requires-Dist: pyjwt>=2.8.0
41
41
  Requires-Dist: imageio>=2.31.0
42
42
  Requires-Dist: imageio-ffmpeg>=0.4.9
43
43
  Requires-Dist: scikit-image>=0.21.0
44
+ Requires-Dist: rich>=13.0.0
44
45
  Requires-Dist: pytest>=8.0.0 ; extra == 'dev'
45
46
  Requires-Dist: pytest-asyncio>=0.23.0 ; extra == 'dev'
46
47
  Requires-Dist: sphinx>=7.2.0 ; extra == 'dev'
@@ -0,0 +1,20 @@
1
+ ml_dash/__init__.py,sha256=o_LrWVJBY_VkUGhSBs5wdb_NqEsHD1AK9HGsjZGxHxQ,1414
2
+ ml_dash/auto_start.py,sha256=c3XcXFpZdvjtWauEoK5043Gw9k0L_5IDq4fdiB2ha88,959
3
+ ml_dash/cli.py,sha256=lyWVVhmsflSXQt2UCDb8IqC-mSRQwwlB2l1qEIYBUb8,1743
4
+ ml_dash/cli_commands/__init__.py,sha256=bjAmV7MsW-bhtW_4SnLJ0Cfkt9h82vMDC8ebW1Ke8KE,38
5
+ ml_dash/cli_commands/download.py,sha256=TomyUFwelqfQHfh60K7rCyCwEZVp1CkMToogprgC64Q,29614
6
+ ml_dash/cli_commands/list.py,sha256=Cx9yWsTV5HPaevYpQ6BugCEr5z_4bhxQ0T51OXExuTU,10900
7
+ ml_dash/cli_commands/upload.py,sha256=jo6FVdbuokTz64rjvOEWWhLBzlh2gM0Ru4TRNv9hX60,47943
8
+ ml_dash/client.py,sha256=31C2Kb3KULwhrb3UlpCFY7HDA3-kvj3XVmWUvXEvQHY,27993
9
+ ml_dash/config.py,sha256=iQbHCu4lM_Sg8YadyEXSJ6Ht9yKIJHN26L7L-rMH4gE,3112
10
+ ml_dash/experiment.py,sha256=K36HkHJb_O2-vdaPPOCq74_2nZtfiLaS0o7qhTntD8Q,30646
11
+ ml_dash/files.py,sha256=JptjoxGJiXJ-nkj6C7vDhw-cgJRCB0cHt_SIUJG665o,23024
12
+ ml_dash/log.py,sha256=0yXaNnFwYeBI3tRLHX3kkqWRpg0MbSGwmgjnOfsElCk,5350
13
+ ml_dash/metric.py,sha256=c0Zl0wEufmQuVfwIMvrORLwqe92Iaf0PfKRgmlgQWzQ,10343
14
+ ml_dash/params.py,sha256=xaByDSVar4D1pZqxTANkMPeZTL5-V7ewJe5TXfPLhMQ,5980
15
+ ml_dash/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
+ ml_dash/storage.py,sha256=eldh_gMivwTlVhSosu-KYrHrG9mmEMspc7MfY5L7Wzc,32991
17
+ ml_dash-0.5.8.dist-info/WHEEL,sha256=z-mOpxbJHqy3cq6SvUThBZdaLGFZzdZPtgWLcP2NKjQ,79
18
+ ml_dash-0.5.8.dist-info/entry_points.txt,sha256=dYs2EHX1uRNO7AQGNnVaJJpgiy0Z9q7tiy4fHSyaf3Q,46
19
+ ml_dash-0.5.8.dist-info/METADATA,sha256=HywjX8kVHUXB5OD3bFeCTDUDwCM_FsCULTp_WJ_Z0eI,6175
20
+ ml_dash-0.5.8.dist-info/RECORD,,
@@ -0,0 +1,3 @@
1
+ [console_scripts]
2
+ ml-dash = ml_dash.cli:main
3
+
@@ -1,13 +0,0 @@
1
- ml_dash/__init__.py,sha256=o_LrWVJBY_VkUGhSBs5wdb_NqEsHD1AK9HGsjZGxHxQ,1414
2
- ml_dash/auto_start.py,sha256=c3XcXFpZdvjtWauEoK5043Gw9k0L_5IDq4fdiB2ha88,959
3
- ml_dash/client.py,sha256=vhWcS5o2n3o4apEjVeLmu7flCEzxBbBOoLSQNcAx_ew,17267
4
- ml_dash/experiment.py,sha256=K36HkHJb_O2-vdaPPOCq74_2nZtfiLaS0o7qhTntD8Q,30646
5
- ml_dash/files.py,sha256=JptjoxGJiXJ-nkj6C7vDhw-cgJRCB0cHt_SIUJG665o,23024
6
- ml_dash/log.py,sha256=0yXaNnFwYeBI3tRLHX3kkqWRpg0MbSGwmgjnOfsElCk,5350
7
- ml_dash/metric.py,sha256=c0Zl0wEufmQuVfwIMvrORLwqe92Iaf0PfKRgmlgQWzQ,10343
8
- ml_dash/params.py,sha256=xaByDSVar4D1pZqxTANkMPeZTL5-V7ewJe5TXfPLhMQ,5980
9
- ml_dash/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
- ml_dash/storage.py,sha256=8lyT5ZdvhS2nEyrEgMnFAT0LzV5ne1v8tkI3w1PUHJ4,30793
11
- ml_dash-0.5.7.dist-info/WHEEL,sha256=z-mOpxbJHqy3cq6SvUThBZdaLGFZzdZPtgWLcP2NKjQ,79
12
- ml_dash-0.5.7.dist-info/METADATA,sha256=zgcqykoN7fKnJuQi94m8bjww66baNgEdf4lUs_kba6k,6147
13
- ml_dash-0.5.7.dist-info/RECORD,,