wandb 0.22.1__py3-none-macosx_12_0_arm64.whl → 0.22.2__py3-none-macosx_12_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. wandb/__init__.py +1 -1
  2. wandb/__init__.pyi +6 -3
  3. wandb/bin/gpu_stats +0 -0
  4. wandb/bin/wandb-core +0 -0
  5. wandb/cli/beta.py +16 -2
  6. wandb/cli/beta_leet.py +74 -0
  7. wandb/cli/cli.py +34 -7
  8. wandb/proto/v3/wandb_api_pb2.py +86 -0
  9. wandb/proto/v3/wandb_internal_pb2.py +352 -351
  10. wandb/proto/v3/wandb_settings_pb2.py +2 -2
  11. wandb/proto/v4/wandb_api_pb2.py +37 -0
  12. wandb/proto/v4/wandb_internal_pb2.py +352 -351
  13. wandb/proto/v4/wandb_settings_pb2.py +2 -2
  14. wandb/proto/v5/wandb_api_pb2.py +38 -0
  15. wandb/proto/v5/wandb_internal_pb2.py +352 -351
  16. wandb/proto/v5/wandb_settings_pb2.py +2 -2
  17. wandb/proto/v6/wandb_api_pb2.py +48 -0
  18. wandb/proto/v6/wandb_internal_pb2.py +352 -351
  19. wandb/proto/v6/wandb_settings_pb2.py +2 -2
  20. wandb/proto/wandb_api_pb2.py +18 -0
  21. wandb/proto/wandb_generate_proto.py +1 -0
  22. wandb/sdk/artifacts/artifact.py +30 -30
  23. wandb/sdk/artifacts/artifact_manifest_entry.py +6 -12
  24. wandb/sdk/artifacts/storage_handler.py +18 -12
  25. wandb/sdk/artifacts/storage_handlers/azure_handler.py +11 -6
  26. wandb/sdk/artifacts/storage_handlers/gcs_handler.py +9 -6
  27. wandb/sdk/artifacts/storage_handlers/http_handler.py +9 -4
  28. wandb/sdk/artifacts/storage_handlers/local_file_handler.py +10 -6
  29. wandb/sdk/artifacts/storage_handlers/multi_handler.py +5 -4
  30. wandb/sdk/artifacts/storage_handlers/s3_handler.py +10 -8
  31. wandb/sdk/artifacts/storage_handlers/tracking_handler.py +6 -4
  32. wandb/sdk/artifacts/storage_handlers/wb_artifact_handler.py +24 -21
  33. wandb/sdk/artifacts/storage_handlers/wb_local_artifact_handler.py +4 -2
  34. wandb/sdk/artifacts/storage_policies/_multipart.py +187 -0
  35. wandb/sdk/artifacts/storage_policies/wandb_storage_policy.py +61 -242
  36. wandb/sdk/artifacts/storage_policy.py +25 -12
  37. wandb/sdk/data_types/object_3d.py +67 -2
  38. wandb/sdk/internal/job_builder.py +27 -10
  39. wandb/sdk/internal/sender.py +4 -1
  40. wandb/sdk/launch/create_job.py +2 -1
  41. wandb/sdk/lib/progress.py +1 -70
  42. wandb/sdk/wandb_init.py +1 -1
  43. wandb/sdk/wandb_run.py +5 -2
  44. wandb/sdk/wandb_settings.py +13 -12
  45. {wandb-0.22.1.dist-info → wandb-0.22.2.dist-info}/METADATA +1 -1
  46. {wandb-0.22.1.dist-info → wandb-0.22.2.dist-info}/RECORD +49 -42
  47. {wandb-0.22.1.dist-info → wandb-0.22.2.dist-info}/WHEEL +0 -0
  48. {wandb-0.22.1.dist-info → wandb-0.22.2.dist-info}/entry_points.txt +0 -0
  49. {wandb-0.22.1.dist-info → wandb-0.22.2.dist-info}/licenses/LICENSE +0 -0
@@ -2,7 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from typing import TYPE_CHECKING, Sequence
5
+ from typing import TYPE_CHECKING
6
6
  from urllib.parse import urlparse
7
7
 
8
8
  from wandb.errors.term import termwarn
@@ -17,7 +17,9 @@ if TYPE_CHECKING:
17
17
 
18
18
 
19
19
  class TrackingHandler(StorageHandler):
20
- def __init__(self, scheme: str | None = None) -> None:
20
+ _scheme: str
21
+
22
+ def __init__(self, scheme: str = "") -> None:
21
23
  """Track paths with no modification or special processing.
22
24
 
23
25
  Useful when paths being tracked are on file systems mounted at a standardized
@@ -26,7 +28,7 @@ class TrackingHandler(StorageHandler):
26
28
  For example, if the data to track is located on an NFS share mounted on
27
29
  `/data`, then it is sufficient to just track the paths.
28
30
  """
29
- self._scheme = scheme or ""
31
+ self._scheme = scheme
30
32
 
31
33
  def can_handle(self, parsed_url: ParseResult) -> bool:
32
34
  return parsed_url.scheme == self._scheme
@@ -55,7 +57,7 @@ class TrackingHandler(StorageHandler):
55
57
  name: StrPath | None = None,
56
58
  checksum: bool = True,
57
59
  max_objects: int | None = None,
58
- ) -> Sequence[ArtifactManifestEntry]:
60
+ ) -> list[ArtifactManifestEntry]:
59
61
  url = urlparse(path)
60
62
  if name is None:
61
63
  raise ValueError(
@@ -3,27 +3,29 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  import os
6
- from typing import TYPE_CHECKING, Sequence
6
+ from typing import TYPE_CHECKING, Literal
7
7
  from urllib.parse import urlparse
8
8
 
9
- import wandb
10
- from wandb import util
9
+ from wandb._strutils import removeprefix
11
10
  from wandb.apis import PublicApi
12
11
  from wandb.sdk.artifacts.artifact_file_cache import get_artifact_file_cache
13
12
  from wandb.sdk.artifacts.artifact_manifest_entry import ArtifactManifestEntry
14
13
  from wandb.sdk.artifacts.storage_handler import StorageHandler
15
- from wandb.sdk.lib.hashutil import B64MD5, b64_to_hex_id, hex_to_b64_id
14
+ from wandb.sdk.lib.hashutil import b64_to_hex_id, hex_to_b64_id
16
15
  from wandb.sdk.lib.paths import FilePathStr, StrPath, URIStr
17
16
 
18
17
  if TYPE_CHECKING:
19
18
  from urllib.parse import ParseResult
20
19
 
21
20
  from wandb.sdk.artifacts.artifact import Artifact
21
+ from wandb.sdk.artifacts.artifact_file_cache import ArtifactFileCache
22
22
 
23
23
 
24
24
  class WBArtifactHandler(StorageHandler):
25
25
  """Handles loading and storing Artifact reference-type files."""
26
26
 
27
+ _scheme: Literal["wandb-artifact"]
28
+ _cache: ArtifactFileCache
27
29
  _client: PublicApi | None
28
30
 
29
31
  def __init__(self) -> None:
@@ -55,6 +57,8 @@ class WBArtifactHandler(StorageHandler):
55
57
  Returns:
56
58
  (os.PathLike): A path to the file represented by `index_entry`
57
59
  """
60
+ from wandb.sdk.artifacts.artifact import Artifact # avoids circular import
61
+
58
62
  # We don't check for cache hits here. Since we have 0 for size (since this
59
63
  # is a cross-artifact reference which and we've made the choice to store 0
60
64
  # in the size field), we can't confirm if the file is complete. So we just
@@ -62,19 +66,17 @@ class WBArtifactHandler(StorageHandler):
62
66
  # check.
63
67
 
64
68
  # Parse the reference path and download the artifact if needed
65
- artifact_id = util.host_from_path(manifest_entry.ref)
66
- artifact_file_path = util.uri_from_path(manifest_entry.ref)
69
+ parsed = urlparse(manifest_entry.ref)
70
+ artifact_id = hex_to_b64_id(parsed.netloc)
71
+ artifact_file_path = removeprefix(str(parsed.path), "/")
67
72
 
68
- dep_artifact = wandb.Artifact._from_id(
69
- hex_to_b64_id(artifact_id), self.client.client
70
- )
73
+ dep_artifact = Artifact._from_id(artifact_id, self.client.client)
71
74
  assert dep_artifact is not None
72
75
  link_target_path: URIStr | FilePathStr
73
76
  if local:
74
77
  link_target_path = dep_artifact.get_entry(artifact_file_path).download()
75
78
  else:
76
79
  link_target_path = dep_artifact.get_entry(artifact_file_path).ref_target()
77
-
78
80
  return link_target_path
79
81
 
80
82
  def store_path(
@@ -84,7 +86,7 @@ class WBArtifactHandler(StorageHandler):
84
86
  name: StrPath | None = None,
85
87
  checksum: bool = True,
86
88
  max_objects: int | None = None,
87
- ) -> Sequence[ArtifactManifestEntry]:
89
+ ) -> list[ArtifactManifestEntry]:
88
90
  """Store the file or directory at the given path into the specified artifact.
89
91
 
90
92
  Recursively resolves the reference until the result is a concrete asset.
@@ -97,26 +99,27 @@ class WBArtifactHandler(StorageHandler):
97
99
  (list[ArtifactManifestEntry]): A list of manifest entries to store within
98
100
  the artifact
99
101
  """
102
+ from wandb.sdk.artifacts.artifact import Artifact # avoids circular import
103
+
100
104
  # Recursively resolve the reference until a concrete asset is found
101
105
  # TODO: Consider resolving server-side for performance improvements.
102
- iter_path: URIStr | FilePathStr | None = path
103
- while iter_path is not None and urlparse(iter_path).scheme == self._scheme:
104
- artifact_id = util.host_from_path(iter_path)
105
- artifact_file_path = util.uri_from_path(iter_path)
106
- target_artifact = wandb.Artifact._from_id(
107
- hex_to_b64_id(artifact_id), self.client.client
108
- )
106
+ curr_path: URIStr | FilePathStr | None = path
107
+ while curr_path and (parsed := urlparse(curr_path)).scheme == self._scheme:
108
+ artifact_id = hex_to_b64_id(parsed.netloc)
109
+ artifact_file_path = removeprefix(parsed.path, "/")
110
+
111
+ target_artifact = Artifact._from_id(artifact_id, self.client.client)
109
112
  assert target_artifact is not None
110
113
 
111
114
  entry = target_artifact.manifest.get_entry_by_path(artifact_file_path)
112
115
  assert entry is not None
113
- iter_path = entry.ref
116
+ curr_path = entry.ref
114
117
 
115
118
  # Create the path reference
116
119
  assert target_artifact is not None
117
120
  assert target_artifact.id is not None
118
- path = URIStr(
119
- f"{self._scheme}://{b64_to_hex_id(B64MD5(target_artifact.id))}/{artifact_file_path}"
121
+ path = (
122
+ f"{self._scheme}://{b64_to_hex_id(target_artifact.id)}/{artifact_file_path}"
120
123
  )
121
124
 
122
125
  # Return the new entry
@@ -3,7 +3,7 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  import os
6
- from typing import TYPE_CHECKING, Sequence
6
+ from typing import TYPE_CHECKING, Literal
7
7
 
8
8
  import wandb
9
9
  from wandb import util
@@ -21,6 +21,8 @@ if TYPE_CHECKING:
21
21
  class WBLocalArtifactHandler(StorageHandler):
22
22
  """Handles loading and storing Artifact reference-type files."""
23
23
 
24
+ _scheme: Literal["wandb-client-artifact"]
25
+
24
26
  def __init__(self) -> None:
25
27
  self._scheme = "wandb-client-artifact"
26
28
 
@@ -43,7 +45,7 @@ class WBLocalArtifactHandler(StorageHandler):
43
45
  name: StrPath | None = None,
44
46
  checksum: bool = True,
45
47
  max_objects: int | None = None,
46
- ) -> Sequence[ArtifactManifestEntry]:
48
+ ) -> list[ArtifactManifestEntry]:
47
49
  """Store the file or directory at the given path within the specified artifact.
48
50
 
49
51
  Args:
@@ -0,0 +1,187 @@
1
+ """Helpers and constants for multipart upload and download."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import logging
6
+ import math
7
+ import threading
8
+ from concurrent.futures import FIRST_EXCEPTION, Executor, wait
9
+ from dataclasses import dataclass, field
10
+ from queue import Queue
11
+ from typing import Any, Final, Iterator, Union
12
+
13
+ from requests import Session
14
+ from typing_extensions import TypeAlias, TypeIs, final
15
+
16
+ from wandb import env
17
+ from wandb.sdk.artifacts.artifact_file_cache import Opener
18
+
19
+ logger = logging.getLogger(__name__)
20
+
21
+ KiB: Final[int] = 1024
22
+ MiB: Final[int] = 1024**2
23
+ GiB: Final[int] = 1024**3
24
+ TiB: Final[int] = 1024**4
25
+
26
+ # AWS S3 max upload parts without having to make additional requests for extra parts
27
+ MAX_PARTS = 1_000
28
+ MIN_MULTI_UPLOAD_SIZE = 2 * GiB
29
+ MAX_MULTI_UPLOAD_SIZE = 5 * TiB
30
+
31
+ # Minimum size to switch to multipart download, same threshold as upload.
32
+ MIN_MULTI_DOWNLOAD_SIZE = MIN_MULTI_UPLOAD_SIZE
33
+
34
+ # Multipart download part size is same as multpart upload size, which is hard coded to 100MB.
35
+ # https://github.com/wandb/wandb/blob/7b2a13cb8efcd553317167b823c8e52d8c3f7c4e/core/pkg/artifacts/saver.go#L496
36
+ # https://docs.aws.amazon.com/AmazonS3/latest/userguide/optimizing-performance-guidelines.html#optimizing-performance-guidelines-get-range
37
+ MULTI_DEFAULT_PART_SIZE = 100 * MiB
38
+
39
+ # Chunk size for reading http response and writing to disk.
40
+ RSP_CHUNK_SIZE = 1 * MiB
41
+
42
+
43
+ @final
44
+ class _ChunkSentinel:
45
+ """Signals the end of the multipart chunk queue.
46
+
47
+ Queue consumer(s) (file writer) should terminate on receiving an item of this type from the queue.
48
+ Do not instantiate this class directly, use the `END_CHUNK` constant as a pseudo-singleton instead.
49
+
50
+ NOTE: As implemented, this should only be used in multi-threaded (not multi-process) contexts, as
51
+ it's not currently guaranteed to be process-safe.
52
+ """
53
+
54
+ def __repr__(self) -> str:
55
+ return "ChunkSentinel"
56
+
57
+
58
+ END_CHUNK: Final[_ChunkSentinel] = _ChunkSentinel()
59
+
60
+
61
+ def is_end_chunk(obj: Any) -> TypeIs[_ChunkSentinel]:
62
+ """Returns True if the object is the terminal queue item for multipart downloads."""
63
+ # Needed for type checking, since _ChunkSentinel isn't formally a singleton.
64
+ return obj is END_CHUNK
65
+
66
+
67
+ @dataclass(frozen=True)
68
+ class ChunkContent:
69
+ __slots__ = ("offset", "data") # slots=True only introduced in Python 3.10
70
+ offset: int
71
+ data: bytes
72
+
73
+
74
+ QueuedChunk: TypeAlias = Union[ChunkContent, _ChunkSentinel]
75
+
76
+
77
+ def should_multipart_download(size: int | None, override: bool | None = None) -> bool:
78
+ return ((size or 0) >= MIN_MULTI_DOWNLOAD_SIZE) if (override is None) else override
79
+
80
+
81
+ def calc_part_size(file_size: int, min_part_size: int = MULTI_DEFAULT_PART_SIZE) -> int:
82
+ # Default to a chunk size of 100MiB. S3 has a cap of 10,000 upload parts.
83
+ return max(math.ceil(file_size / MAX_PARTS), min_part_size)
84
+
85
+
86
+ def scan_chunks(path: str, chunk_size: int) -> Iterator[bytes]:
87
+ with open(path, "rb") as f:
88
+ while data := f.read(chunk_size):
89
+ yield data
90
+
91
+
92
+ @dataclass
93
+ class MultipartDownloadContext:
94
+ q: Queue[QueuedChunk]
95
+ cancel: threading.Event = field(default_factory=threading.Event)
96
+
97
+
98
+ def multipart_download(
99
+ executor: Executor,
100
+ session: Session,
101
+ url: str,
102
+ size: int,
103
+ cached_open: Opener,
104
+ part_size: int = MULTI_DEFAULT_PART_SIZE,
105
+ ):
106
+ """Download file as multiple parts in parallel.
107
+
108
+ Only one thread for writing to file. Each part run one http request in one thread.
109
+ HTTP response chunk of a file part is sent to the writer thread via a queue.
110
+ """
111
+ # ------------------------------------------------------------------------------
112
+ # Shared between threads
113
+ ctx = MultipartDownloadContext(q=Queue(maxsize=500))
114
+
115
+ # Put cache_open at top so we remove the tmp file when there is network error.
116
+ with cached_open("wb") as f:
117
+
118
+ def download_chunk(start: int, end: int | None = None) -> None:
119
+ # Error from another thread, no need to start
120
+ if ctx.cancel.is_set():
121
+ return
122
+
123
+ # https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers/Range
124
+ # Start and end are both inclusive, empty end means use the actual end of the file.
125
+ # e.g. "bytes=0-499"
126
+ bytes_range = f"{start}-" if (end is None) else f"{start}-{end}"
127
+ headers = {"Range": f"bytes={bytes_range}"}
128
+ with session.get(url=url, headers=headers, stream=True) as rsp:
129
+ offset = start
130
+ for chunk in rsp.iter_content(chunk_size=RSP_CHUNK_SIZE):
131
+ if ctx.cancel.is_set():
132
+ return
133
+ ctx.q.put(ChunkContent(offset=offset, data=chunk))
134
+ offset += len(chunk)
135
+
136
+ def write_chunks() -> None:
137
+ # If all chunks are written or there's an error in another thread, shutdown
138
+ while not (ctx.cancel.is_set() or is_end_chunk(chunk := ctx.q.get())):
139
+ try:
140
+ # NOTE: Seek works without pre allocating the file on disk.
141
+ # It automatically creates a sparse file, e.g. ls -hl would show
142
+ # a bigger size compared to du -sh * because downloading different
143
+ # chunks is not a sequential write.
144
+ # See https://man7.org/linux/man-pages/man2/lseek.2.html
145
+ f.seek(chunk.offset)
146
+ f.write(chunk.data)
147
+
148
+ except Exception as e:
149
+ if env.is_debug():
150
+ logger.debug(f"Error writing chunk to file: {e}")
151
+ ctx.cancel.set()
152
+ raise
153
+
154
+ # Start writer thread first.
155
+ write_future = executor.submit(write_chunks)
156
+
157
+ # Start download threads for each chunk.
158
+ download_futures = set()
159
+ for start in range(0, size, part_size):
160
+ # https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers/Range
161
+ # Start and end are both inclusive, empty end means use the actual end of the file.
162
+ # e.g. bytes=0-499
163
+ end = end if (end := (start + part_size - 1)) < size else None
164
+ download_futures.add(executor.submit(download_chunk, start=start, end=end))
165
+
166
+ # Wait for download
167
+ done, not_done = wait(download_futures, return_when=FIRST_EXCEPTION)
168
+ try:
169
+ for fut in done:
170
+ fut.result()
171
+ except Exception as e:
172
+ if env.is_debug():
173
+ logger.debug(f"Error downloading file: {e}")
174
+ ctx.cancel.set()
175
+
176
+ # Cancel any pending futures. Note:
177
+ # - `Future.cancel()` does NOT stop the future if it's running, which is why
178
+ # there's a separate `threading.Event` to ensure cooperative cancellation.
179
+ # - Once Python 3.8 support is dropped, replace these `fut.cancel()`
180
+ # calls with `Executor.shutdown(cancel_futures=True)`.
181
+ for fut in not_done:
182
+ fut.cancel()
183
+ raise
184
+ finally:
185
+ # Always signal the writer to stop
186
+ ctx.q.put(END_CHUNK)
187
+ write_future.result()