wandb 0.19.5__py3-none-macosx_11_0_arm64.whl → 0.19.6rc4__py3-none-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
wandb/sdk/wandb_init.py CHANGED
@@ -877,7 +877,7 @@ class _WandbInit:
877
877
  tel.feature.core = True
878
878
  if settings._shared:
879
879
  wandb.termwarn(
880
- "The `_shared` feature is experimental and may change. "
880
+ "The `shared` mode feature is experimental and may change. "
881
881
  "Please contact support@wandb.com for guidance and to report any issues."
882
882
  )
883
883
  tel.feature.shared_mode = True
@@ -921,6 +921,16 @@ class _WandbInit:
921
921
  )
922
922
  error: wandb.Error | None = None
923
923
 
924
+ # In shared mode, generate a unique label if not provided.
925
+ # The label is used to distinguish between system metrics and console logs
926
+ # from different writers to the same run.
927
+ if settings._shared and not settings.x_label:
928
+ # TODO: If executed in a known distributed environment (e.g. Ray or SLURM),
929
+ # use the env vars to generate a label (e.g. SLURM_JOB_ID or RANK)
930
+ prefix = settings.host or ""
931
+ label = runid.generate_id()
932
+ settings.x_label = f"{prefix}-{label}" if prefix else label
933
+
924
934
  timeout = settings.init_timeout
925
935
 
926
936
  self._logger.info(
@@ -95,6 +95,7 @@ class GpuNvidiaInfo(BaseModel, validate_assignment=True):
95
95
  memory_total: int | None = None
96
96
  cuda_cores: int | None = None
97
97
  architecture: str | None = None
98
+ uuid: str | None = None
98
99
 
99
100
  def to_proto(self) -> wandb_internal_pb2.GpuNvidiaInfo:
100
101
  return wandb_internal_pb2.GpuNvidiaInfo(
@@ -102,6 +103,7 @@ class GpuNvidiaInfo(BaseModel, validate_assignment=True):
102
103
  memory_total=self.memory_total or 0,
103
104
  cuda_cores=self.cuda_cores or 0,
104
105
  architecture=self.architecture or "",
106
+ uuid=self.uuid or "",
105
107
  )
106
108
 
107
109
  @classmethod
@@ -111,6 +113,7 @@ class GpuNvidiaInfo(BaseModel, validate_assignment=True):
111
113
  memory_total=proto.memory_total,
112
114
  cuda_cores=proto.cuda_cores,
113
115
  architecture=proto.architecture,
116
+ uuid=proto.uuid,
114
117
  )
115
118
 
116
119