wandb 0.21.0__py3-none-win_amd64.whl → 0.21.1__py3-none-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (97) hide show
  1. wandb/__init__.py +16 -14
  2. wandb/__init__.pyi +427 -450
  3. wandb/agents/pyagent.py +41 -12
  4. wandb/analytics/sentry.py +7 -2
  5. wandb/apis/importers/mlflow.py +1 -1
  6. wandb/apis/public/__init__.py +1 -1
  7. wandb/apis/public/api.py +526 -360
  8. wandb/apis/public/artifacts.py +204 -8
  9. wandb/apis/public/automations.py +19 -3
  10. wandb/apis/public/files.py +172 -33
  11. wandb/apis/public/history.py +67 -15
  12. wandb/apis/public/integrations.py +25 -2
  13. wandb/apis/public/jobs.py +90 -2
  14. wandb/apis/public/projects.py +130 -79
  15. wandb/apis/public/query_generator.py +11 -1
  16. wandb/apis/public/registries/registries_search.py +7 -15
  17. wandb/apis/public/reports.py +83 -5
  18. wandb/apis/public/runs.py +299 -105
  19. wandb/apis/public/sweeps.py +222 -22
  20. wandb/apis/public/teams.py +41 -4
  21. wandb/apis/public/users.py +45 -4
  22. wandb/beta/workflows.py +66 -30
  23. wandb/bin/gpu_stats.exe +0 -0
  24. wandb/bin/wandb-core +0 -0
  25. wandb/cli/cli.py +80 -1
  26. wandb/env.py +8 -0
  27. wandb/errors/errors.py +4 -1
  28. wandb/integration/lightning/fabric/logger.py +3 -4
  29. wandb/integration/metaflow/__init__.py +6 -0
  30. wandb/integration/metaflow/data_pandas.py +74 -0
  31. wandb/integration/metaflow/errors.py +13 -0
  32. wandb/integration/metaflow/metaflow.py +205 -190
  33. wandb/integration/openai/fine_tuning.py +1 -2
  34. wandb/jupyter.py +5 -5
  35. wandb/plot/custom_chart.py +30 -7
  36. wandb/proto/v3/wandb_internal_pb2.py +280 -280
  37. wandb/proto/v3/wandb_telemetry_pb2.py +4 -4
  38. wandb/proto/v4/wandb_internal_pb2.py +280 -280
  39. wandb/proto/v4/wandb_telemetry_pb2.py +4 -4
  40. wandb/proto/v5/wandb_internal_pb2.py +280 -280
  41. wandb/proto/v5/wandb_telemetry_pb2.py +4 -4
  42. wandb/proto/v6/wandb_internal_pb2.py +280 -280
  43. wandb/proto/v6/wandb_telemetry_pb2.py +4 -4
  44. wandb/proto/wandb_deprecated.py +6 -0
  45. wandb/sdk/artifacts/_internal_artifact.py +19 -8
  46. wandb/sdk/artifacts/_validators.py +8 -0
  47. wandb/sdk/artifacts/artifact.py +106 -75
  48. wandb/sdk/data_types/audio.py +38 -10
  49. wandb/sdk/data_types/base_types/media.py +6 -56
  50. wandb/sdk/data_types/graph.py +48 -14
  51. wandb/sdk/data_types/helper_types/bounding_boxes_2d.py +1 -3
  52. wandb/sdk/data_types/helper_types/image_mask.py +1 -3
  53. wandb/sdk/data_types/histogram.py +34 -21
  54. wandb/sdk/data_types/html.py +35 -12
  55. wandb/sdk/data_types/image.py +104 -68
  56. wandb/sdk/data_types/molecule.py +32 -19
  57. wandb/sdk/data_types/object_3d.py +36 -17
  58. wandb/sdk/data_types/plotly.py +18 -5
  59. wandb/sdk/data_types/saved_model.py +4 -6
  60. wandb/sdk/data_types/table.py +59 -30
  61. wandb/sdk/data_types/video.py +53 -26
  62. wandb/sdk/integration_utils/auto_logging.py +2 -2
  63. wandb/sdk/internal/internal_api.py +6 -0
  64. wandb/sdk/internal/job_builder.py +6 -0
  65. wandb/sdk/launch/agent/agent.py +8 -1
  66. wandb/sdk/launch/agent/run_queue_item_file_saver.py +2 -2
  67. wandb/sdk/launch/create_job.py +3 -1
  68. wandb/sdk/launch/inputs/internal.py +3 -4
  69. wandb/sdk/launch/inputs/schema.py +1 -0
  70. wandb/sdk/launch/runner/kubernetes_monitor.py +1 -0
  71. wandb/sdk/launch/runner/kubernetes_runner.py +328 -1
  72. wandb/sdk/launch/sweeps/scheduler.py +2 -3
  73. wandb/sdk/lib/asyncio_compat.py +3 -0
  74. wandb/sdk/lib/deprecate.py +1 -7
  75. wandb/sdk/lib/disabled.py +1 -1
  76. wandb/sdk/lib/hashutil.py +14 -1
  77. wandb/sdk/lib/module.py +7 -13
  78. wandb/sdk/lib/progress.py +0 -19
  79. wandb/sdk/lib/sock_client.py +0 -4
  80. wandb/sdk/wandb_init.py +66 -91
  81. wandb/sdk/wandb_login.py +18 -14
  82. wandb/sdk/wandb_metric.py +2 -0
  83. wandb/sdk/wandb_run.py +406 -414
  84. wandb/sdk/wandb_settings.py +130 -2
  85. wandb/sdk/wandb_setup.py +28 -28
  86. wandb/sdk/wandb_sweep.py +14 -13
  87. wandb/sdk/wandb_watch.py +4 -6
  88. wandb/sync/sync.py +10 -0
  89. wandb/util.py +57 -0
  90. wandb/wandb_run.py +1 -2
  91. {wandb-0.21.0.dist-info → wandb-0.21.1.dist-info}/METADATA +1 -1
  92. {wandb-0.21.0.dist-info → wandb-0.21.1.dist-info}/RECORD +95 -95
  93. wandb/vendor/pynvml/__init__.py +0 -0
  94. wandb/vendor/pynvml/pynvml.py +0 -4779
  95. {wandb-0.21.0.dist-info → wandb-0.21.1.dist-info}/WHEEL +0 -0
  96. {wandb-0.21.0.dist-info → wandb-0.21.1.dist-info}/entry_points.txt +0 -0
  97. {wandb-0.21.0.dist-info → wandb-0.21.1.dist-info}/licenses/LICENSE +0 -0
@@ -1,124 +1,67 @@
1
- """W&B Integration for Metaflow.
2
-
3
- This integration lets users apply decorators to Metaflow flows and steps to automatically log parameters and artifacts to W&B by type dispatch.
4
-
5
- - Decorating a step will enable or disable logging for certain types within that step
6
- - Decorating the flow is equivalent to decorating all steps with a default
7
- - Decorating a step after decorating the flow will overwrite the flow decoration
8
-
9
- Examples can be found at wandb/wandb/functional_tests/metaflow
10
- """
11
-
12
1
  import inspect
13
2
  import pickle
14
3
  from functools import wraps
15
4
  from pathlib import Path
16
- from typing import Union
5
+ from typing import Optional, Union
17
6
 
18
7
  import wandb
19
8
  from wandb.sdk.lib import telemetry as wb_telemetry
20
9
 
10
+ from . import errors
11
+
21
12
  try:
22
13
  from metaflow import current
23
14
  except ImportError as e:
24
15
  raise Exception(
25
- "Error: `metaflow` not installed >> This integration requires metaflow! To fix, please `pip install -Uqq metaflow`"
16
+ "Error: `metaflow` not installed >> This integration requires metaflow!"
17
+ " To fix, please `pip install -Uqq metaflow`"
26
18
  ) from e
27
19
 
28
- try:
29
- from plum import dispatch
30
- except ImportError as e:
31
- raise Exception(
32
- "Error: `plum-dispatch` not installed >> "
33
- "This integration requires plum-dispatch! To fix, please `pip install -Uqq plum-dispatch`"
34
- ) from e
35
20
 
21
+ # Classes for isinstance() checks.
22
+ _NN_MODULE = None
23
+ _BASE_ESTIMATOR = None
36
24
 
37
25
  try:
38
- import pandas as pd
39
-
40
- @dispatch
41
- def _wandb_use(
42
- name: str,
43
- data: pd.DataFrame,
44
- datasets=False,
45
- run=None,
46
- testing=False,
47
- *args,
48
- **kwargs,
49
- ): # type: ignore
50
- if testing:
51
- return "datasets" if datasets else None
52
-
53
- if datasets:
54
- run.use_artifact(f"{name}:latest")
55
- wandb.termlog(f"Using artifact: {name} ({type(data)})")
56
-
57
- @dispatch
58
- def wandb_track(
59
- name: str,
60
- data: pd.DataFrame,
61
- datasets=False,
62
- run=None,
63
- testing=False,
64
- *args,
65
- **kwargs,
66
- ):
67
- if testing:
68
- return "pd.DataFrame" if datasets else None
69
-
70
- if datasets:
71
- artifact = wandb.Artifact(name, type="dataset")
72
- with artifact.new_file(f"{name}.parquet", "wb") as f:
73
- data.to_parquet(f, engine="pyarrow")
74
- run.log_artifact(artifact)
75
- wandb.termlog(f"Logging artifact: {name} ({type(data)})")
76
-
77
- except ImportError:
78
- wandb.termwarn(
79
- "`pandas` not installed >> @wandb_log(datasets=True) may not auto log your dataset!"
80
- )
26
+ from . import data_pandas
27
+ except errors.MissingDependencyError as e:
28
+ e.warn()
29
+ data_pandas = None
81
30
 
82
31
  try:
83
32
  import torch
84
33
  import torch.nn as nn
85
34
 
86
- @dispatch
87
- def _wandb_use(
35
+ _NN_MODULE = nn.Module
36
+
37
+ def _use_torch_module(
88
38
  name: str,
89
39
  data: nn.Module,
90
- models=False,
91
- run=None,
92
- testing=False,
93
- *args,
94
- **kwargs,
95
- ): # type: ignore
40
+ run,
41
+ testing: bool = False,
42
+ ) -> Optional[str]:
96
43
  if testing:
97
- return "models" if models else None
44
+ return "models"
98
45
 
99
- if models:
100
- run.use_artifact(f"{name}:latest")
101
- wandb.termlog(f"Using artifact: {name} ({type(data)})")
46
+ run.use_artifact(f"{name}:latest")
47
+ wandb.termlog(f"Using artifact: {name} ({type(data)})")
48
+ return None
102
49
 
103
- @dispatch
104
- def wandb_track(
50
+ def _track_torch_module(
105
51
  name: str,
106
52
  data: nn.Module,
107
- models=False,
108
- run=None,
109
- testing=False,
110
- *args,
111
- **kwargs,
112
- ):
53
+ run,
54
+ testing: bool = False,
55
+ ) -> Optional[str]:
113
56
  if testing:
114
- return "nn.Module" if models else None
57
+ return "nn.Module"
115
58
 
116
- if models:
117
- artifact = wandb.Artifact(name, type="model")
118
- with artifact.new_file(f"{name}.pkl", "wb") as f:
119
- torch.save(data, f)
120
- run.log_artifact(artifact)
121
- wandb.termlog(f"Logging artifact: {name} ({type(data)})")
59
+ artifact = wandb.Artifact(name, type="model")
60
+ with artifact.new_file(f"{name}.pkl", "wb") as f:
61
+ torch.save(data, f)
62
+ run.log_artifact(artifact)
63
+ wandb.termlog(f"Logging artifact: {name} ({type(data)})")
64
+ return None
122
65
 
123
66
  except ImportError:
124
67
  wandb.termwarn(
@@ -128,42 +71,36 @@ except ImportError:
128
71
  try:
129
72
  from sklearn.base import BaseEstimator
130
73
 
131
- @dispatch
132
- def _wandb_use(
74
+ _BASE_ESTIMATOR = BaseEstimator
75
+
76
+ def _use_sklearn_estimator(
133
77
  name: str,
134
78
  data: BaseEstimator,
135
- models=False,
136
- run=None,
137
- testing=False,
138
- *args,
139
- **kwargs,
140
- ): # type: ignore
79
+ run,
80
+ testing: bool = False,
81
+ ) -> Optional[str]:
141
82
  if testing:
142
- return "models" if models else None
83
+ return "models"
143
84
 
144
- if models:
145
- run.use_artifact(f"{name}:latest")
146
- wandb.termlog(f"Using artifact: {name} ({type(data)})")
85
+ run.use_artifact(f"{name}:latest")
86
+ wandb.termlog(f"Using artifact: {name} ({type(data)})")
87
+ return None
147
88
 
148
- @dispatch
149
- def wandb_track(
89
+ def _track_sklearn_estimator(
150
90
  name: str,
151
91
  data: BaseEstimator,
152
- models=False,
153
- run=None,
154
- testing=False,
155
- *args,
156
- **kwargs,
157
- ):
92
+ run,
93
+ testing: bool = False,
94
+ ) -> Optional[str]:
158
95
  if testing:
159
- return "BaseEstimator" if models else None
96
+ return "BaseEstimator"
160
97
 
161
- if models:
162
- artifact = wandb.Artifact(name, type="model")
163
- with artifact.new_file(f"{name}.pkl", "wb") as f:
164
- pickle.dump(data, f)
165
- run.log_artifact(artifact)
166
- wandb.termlog(f"Logging artifact: {name} ({type(data)})")
98
+ artifact = wandb.Artifact(name, type="model")
99
+ with artifact.new_file(f"{name}.pkl", "wb") as f:
100
+ pickle.dump(data, f)
101
+ run.log_artifact(artifact)
102
+ wandb.termlog(f"Logging artifact: {name} ({type(data)})")
103
+ return None
167
104
 
168
105
  except ImportError:
169
106
  wandb.termwarn(
@@ -194,93 +131,166 @@ class ArtifactProxy:
194
131
  return getattr(self.flow, key)
195
132
 
196
133
 
197
- @dispatch
198
- def wandb_track(
134
+ def _track_scalar(
199
135
  name: str,
200
136
  data: Union[dict, list, set, str, int, float, bool],
201
- run=None,
202
- testing=False,
203
- *args,
204
- **kwargs,
205
- ): # type: ignore
137
+ run,
138
+ testing: bool = False,
139
+ ) -> Optional[str]:
206
140
  if testing:
207
141
  return "scalar"
208
142
 
209
143
  run.log({name: data})
144
+ return None
210
145
 
211
146
 
212
- @dispatch
213
- def wandb_track(
214
- name: str, data: Path, datasets=False, run=None, testing=False, *args, **kwargs
215
- ):
147
+ def _track_path(
148
+ name: str,
149
+ data: Path,
150
+ run,
151
+ testing: bool = False,
152
+ ) -> Optional[str]:
216
153
  if testing:
217
- return "Path" if datasets else None
218
-
219
- if datasets:
220
- artifact = wandb.Artifact(name, type="dataset")
221
- if data.is_dir():
222
- artifact.add_dir(data)
223
- elif data.is_file():
224
- artifact.add_file(data)
225
- run.log_artifact(artifact)
226
- wandb.termlog(f"Logging artifact: {name} ({type(data)})")
154
+ return "Path"
227
155
 
156
+ artifact = wandb.Artifact(name, type="dataset")
157
+ if data.is_dir():
158
+ artifact.add_dir(data)
159
+ elif data.is_file():
160
+ artifact.add_file(data)
161
+ run.log_artifact(artifact)
162
+ wandb.termlog(f"Logging artifact: {name} ({type(data)})")
163
+ return None
228
164
 
229
- # this is the base case
230
- @dispatch
231
- def wandb_track(
232
- name: str, data, others=False, run=None, testing=False, *args, **kwargs
233
- ):
165
+
166
+ def _track_generic(
167
+ name: str,
168
+ data,
169
+ run,
170
+ testing: bool = False,
171
+ ) -> Optional[str]:
234
172
  if testing:
235
- return "generic" if others else None
173
+ return "generic"
174
+
175
+ artifact = wandb.Artifact(name, type="other")
176
+ with artifact.new_file(f"{name}.pkl", "wb") as f:
177
+ pickle.dump(data, f)
178
+ run.log_artifact(artifact)
179
+ wandb.termlog(f"Logging artifact: {name} ({type(data)})")
180
+ return None
236
181
 
182
+
183
+ def wandb_track(
184
+ name: str,
185
+ data,
186
+ datasets: bool = False,
187
+ models: bool = False,
188
+ others: bool = False,
189
+ run: Optional[wandb.Run] = None,
190
+ testing: bool = False,
191
+ ) -> Optional[str]:
192
+ """Track data as wandb artifacts based on type and flags."""
193
+ # Check for pandas DataFrame
194
+ if data_pandas and data_pandas.is_dataframe(data) and datasets:
195
+ return data_pandas.track_dataframe(name, data, run, testing)
196
+
197
+ # Check for PyTorch Module
198
+ if _NN_MODULE and isinstance(data, _NN_MODULE) and models:
199
+ return _track_torch_module(name, data, run, testing)
200
+
201
+ # Check for scikit-learn BaseEstimator
202
+ if _BASE_ESTIMATOR and isinstance(data, _BASE_ESTIMATOR) and models:
203
+ return _track_sklearn_estimator(name, data, run, testing)
204
+
205
+ # Check for Path objects
206
+ if isinstance(data, Path) and datasets:
207
+ return _track_path(name, data, run, testing)
208
+
209
+ # Check for scalar types
210
+ if isinstance(data, (dict, list, set, str, int, float, bool)):
211
+ return _track_scalar(name, data, run, testing)
212
+
213
+ # Generic fallback
237
214
  if others:
238
- artifact = wandb.Artifact(name, type="other")
239
- with artifact.new_file(f"{name}.pkl", "wb") as f:
240
- pickle.dump(data, f)
241
- run.log_artifact(artifact)
242
- wandb.termlog(f"Logging artifact: {name} ({type(data)})")
215
+ return _track_generic(name, data, run, testing)
243
216
 
217
+ # No action taken
218
+ return None
219
+
220
+
221
+ def wandb_use(
222
+ name: str,
223
+ data,
224
+ datasets: bool = False,
225
+ models: bool = False,
226
+ others: bool = False,
227
+ run=None,
228
+ testing: bool = False,
229
+ ) -> Optional[str]:
230
+ """Use wandb artifacts based on data type and flags."""
231
+ # Skip scalar types - nothing to use
232
+ if isinstance(data, (dict, list, set, str, int, float, bool)):
233
+ return None
244
234
 
245
- @dispatch
246
- def wandb_use(name: str, data, *args, **kwargs):
247
235
  try:
248
- return _wandb_use(name, data, *args, **kwargs)
236
+ # Check for pandas DataFrame
237
+ if data_pandas and data_pandas.is_dataframe(data) and datasets:
238
+ return data_pandas.use_dataframe(name, run, testing)
239
+
240
+ # Check for PyTorch Module
241
+ elif _NN_MODULE and isinstance(data, _NN_MODULE) and models:
242
+ return _use_torch_module(name, data, run, testing)
243
+
244
+ # Check for scikit-learn BaseEstimator
245
+ elif _BASE_ESTIMATOR and isinstance(data, _BASE_ESTIMATOR) and models:
246
+ return _use_sklearn_estimator(name, data, run, testing)
247
+
248
+ # Check for Path objects
249
+ elif isinstance(data, Path) and datasets:
250
+ return _use_path(name, data, run, testing)
251
+
252
+ # Generic fallback
253
+ elif others:
254
+ return _use_generic(name, data, run, testing)
255
+
256
+ else:
257
+ return None
258
+
249
259
  except wandb.CommError:
250
260
  wandb.termwarn(
251
261
  f"This artifact ({name}, {type(data)}) does not exist in the wandb datastore!"
252
- f"If you created an instance inline (e.g. sklearn.ensemble.RandomForestClassifier), then you can safely ignore this"
253
- f"Otherwise you may want to check your internet connection!"
262
+ " If you created an instance inline (e.g. sklearn.ensemble.RandomForestClassifier),"
263
+ " then you can safely ignore this. Otherwise you may want to check your internet connection!"
254
264
  )
265
+ return None
255
266
 
256
267
 
257
- @dispatch
258
- def wandb_use(
259
- name: str, data: Union[dict, list, set, str, int, float, bool], *args, **kwargs
260
- ): # type: ignore
261
- pass # do nothing for these types
262
-
263
-
264
- @dispatch
265
- def _wandb_use(
266
- name: str, data: Path, datasets=False, run=None, testing=False, *args, **kwargs
267
- ): # type: ignore
268
+ def _use_path(
269
+ name: str,
270
+ data: Path,
271
+ run,
272
+ testing: bool = False,
273
+ ) -> Optional[str]:
268
274
  if testing:
269
- return "datasets" if datasets else None
275
+ return "datasets"
270
276
 
271
- if datasets:
272
- run.use_artifact(f"{name}:latest")
273
- wandb.termlog(f"Using artifact: {name} ({type(data)})")
277
+ run.use_artifact(f"{name}:latest")
278
+ wandb.termlog(f"Using artifact: {name} ({type(data)})")
279
+ return None
274
280
 
275
281
 
276
- @dispatch
277
- def _wandb_use(name: str, data, others=False, run=None, testing=False, *args, **kwargs): # type: ignore
282
+ def _use_generic(
283
+ name: str,
284
+ data,
285
+ run,
286
+ testing: bool = False,
287
+ ) -> Optional[str]:
278
288
  if testing:
279
- return "others" if others else None
289
+ return "others"
280
290
 
281
- if others:
282
- run.use_artifact(f"{name}:latest")
283
- wandb.termlog(f"Using artifact: {name} ({type(data)})")
291
+ run.use_artifact(f"{name}:latest")
292
+ wandb.termlog(f"Using artifact: {name} ({type(data)})")
293
+ return None
284
294
 
285
295
 
286
296
  def coalesce(*arg):
@@ -289,25 +299,30 @@ def coalesce(*arg):
289
299
 
290
300
  def wandb_log(
291
301
  func=None,
292
- # /, # py38 only
293
- datasets=False,
294
- models=False,
295
- others=False,
296
- settings=None,
302
+ /,
303
+ datasets: bool = False,
304
+ models: bool = False,
305
+ others: bool = False,
306
+ settings: Optional[wandb.Settings] = None,
297
307
  ):
298
- """Automatically log parameters and artifacts to W&B by type dispatch.
308
+ """Automatically log parameters and artifacts to W&B.
309
+
310
+ This decorator can be applied to a flow, step, or both:
299
311
 
300
- This decorator can be applied to a flow, step, or both.
301
- - Decorating a step will enable or disable logging for certain types within that step
302
- - Decorating the flow is equivalent to decorating all steps with a default
303
- - Decorating a step after decorating the flow will overwrite the flow decoration
312
+ - Decorating a step enables or disables logging within that step
313
+ - Decorating a flow is equivalent to decorating all steps
314
+ - Decorating a step after decorating its flow overwrites the flow decoration
304
315
 
305
316
  Args:
306
- func: (`Callable`). The method or class being decorated (if decorating a step or flow respectively).
307
- datasets: (`bool`). If `True`, log datasets. Datasets can be a `pd.DataFrame` or `pathlib.Path`. The default value is `False`, so datasets are not logged.
308
- models: (`bool`). If `True`, log models. Models can be a `nn.Module` or `sklearn.base.BaseEstimator`. The default value is `False`, so models are not logged.
309
- others: (`bool`). If `True`, log anything pickle-able. The default value is `False`, so files are not logged.
310
- settings: (`wandb.sdk.wandb_settings.Settings`). Custom settings passed to `wandb.init`. The default value is `None`, and is the same as passing `wandb.Settings()`. If `settings.run_group` is `None`, it will be set to `{flow_name}/{run_id}. If `settings.run_job_type` is `None`, it will be set to `{run_job_type}/{step_name}`
317
+ func: The step method or flow class to decorate.
318
+ datasets: Whether to log `pd.DataFrame` and `pathlib.Path`
319
+ types. Defaults to False.
320
+ models: Whether to log `nn.Module` and `sklearn.base.BaseEstimator`
321
+ types. Defaults to False.
322
+ others: If `True`, log anything pickle-able. Defaults to False.
323
+ settings: Custom settings to pass to `wandb.init`.
324
+ If `run_group` is `None`, it is set to `{flow_name}/{run_id}`.
325
+ If `run_job_type` is `None`, it is set to `{run_job_type}/{step_name}`.
311
326
  """
312
327
 
313
328
  @wraps(func)
@@ -14,7 +14,6 @@ import wandb
14
14
  from wandb import util
15
15
  from wandb.data_types import Table
16
16
  from wandb.sdk.lib import telemetry
17
- from wandb.sdk.wandb_run import Run
18
17
 
19
18
  openai = util.get_module(
20
19
  name="openai",
@@ -54,7 +53,7 @@ class WandbLogger:
54
53
  _wandb_api: Optional[wandb.Api] = None
55
54
  _logged_in: bool = False
56
55
  openai_client: Optional[OpenAI] = None
57
- _run: Optional[Run] = None
56
+ _run: Optional[wandb.Run] = None
58
57
 
59
58
  @classmethod
60
59
  def sync(
wandb/jupyter.py CHANGED
@@ -19,13 +19,13 @@ from requests.compat import urljoin
19
19
 
20
20
  import wandb
21
21
  import wandb.util
22
- from wandb.sdk import wandb_run, wandb_setup
22
+ from wandb.sdk import wandb_setup
23
23
  from wandb.sdk.lib import filesystem
24
24
 
25
25
  logger = logging.getLogger(__name__)
26
26
 
27
27
 
28
- def display_if_magic_is_used(run: wandb_run.Run) -> bool:
28
+ def display_if_magic_is_used(run: wandb.Run) -> bool:
29
29
  """Display a run's page if the cell has the %%wandb cell magic.
30
30
 
31
31
  Args:
@@ -53,7 +53,7 @@ class _WandbCellMagicState:
53
53
  self._height = height
54
54
  self._already_displayed = False
55
55
 
56
- def display_if_allowed(self, run: wandb_run.Run) -> None:
56
+ def display_if_allowed(self, run: wandb.Run) -> None:
57
57
  """Display a run's iframe if one is not already displayed.
58
58
 
59
59
  Args:
@@ -93,7 +93,7 @@ def _display_by_wandb_path(path: str, *, height: int) -> None:
93
93
  )
94
94
 
95
95
 
96
- def _display_wandb_run(run: wandb_run.Run, *, height: int) -> None:
96
+ def _display_wandb_run(run: wandb.Run, *, height: int) -> None:
97
97
  """Display a run (usually in an iframe).
98
98
 
99
99
  Args:
@@ -461,7 +461,7 @@ class Notebook:
461
461
 
462
462
  return False
463
463
 
464
- def save_history(self, run: wandb_run.Run):
464
+ def save_history(self, run: wandb.Run):
465
465
  """This saves all cell executions in the current session as a new notebook."""
466
466
  try:
467
467
  from nbformat import v4, validator, write # type: ignore
@@ -85,27 +85,50 @@ def plot_table(
85
85
  This function creates a custom chart based on a Vega-Lite specification and
86
86
  a data table represented by a `wandb.Table` object. The specification needs
87
87
  to be predefined and stored in the W&B backend. The function returns a custom
88
- chart object that can be logged to W&B using `wandb.log()`.
88
+ chart object that can be logged to W&B using `wandb.Run.log()`.
89
89
 
90
90
  Args:
91
- vega_spec_name (str): The name or identifier of the Vega-Lite spec
91
+ vega_spec_name: The name or identifier of the Vega-Lite spec
92
92
  that defines the visualization structure.
93
- data_table (wandb.Table): A `wandb.Table` object containing the data to be
93
+ data_table: A `wandb.Table` object containing the data to be
94
94
  visualized.
95
- fields (dict[str, Any]): A mapping between the fields in the Vega-Lite spec and the
95
+ fields: A mapping between the fields in the Vega-Lite spec and the
96
96
  corresponding columns in the data table to be visualized.
97
- string_fields (dict[str, Any] | None): A dictionary for providing values for any string constants
97
+ string_fields: A dictionary for providing values for any string constants
98
98
  required by the custom visualization.
99
- split_table (bool): Whether the table should be split into a separate section
99
+ split_table: Whether the table should be split into a separate section
100
100
  in the W&B UI. If `True`, the table will be displayed in a section named
101
101
  "Custom Chart Tables". Default is `False`.
102
102
 
103
103
  Returns:
104
104
  CustomChart: A custom chart object that can be logged to W&B. To log the
105
- chart, pass it to `wandb.log()`.
105
+ chart, pass the chart object as argument to `wandb.Run.log()`.
106
106
 
107
107
  Raises:
108
108
  wandb.Error: If `data_table` is not a `wandb.Table` object.
109
+
110
+ Example:
111
+ ```python
112
+ # Create a custom chart using a Vega-Lite spec and the data table.
113
+ import wandb
114
+
115
+ data = [[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]
116
+ table = wandb.Table(data=data, columns=["x", "y"])
117
+ fields = {"x": "x", "y": "y", "title": "MY TITLE"}
118
+
119
+ with wandb.init() as run:
120
+ # Training code goes here
121
+
122
+ # Create a custom title with `string_fields`.
123
+ my_custom_chart = wandb.plot_table(
124
+ vega_spec_name="wandb/line/v0",
125
+ data_table=table,
126
+ fields=fields,
127
+ string_fields={"title": "Title"},
128
+ )
129
+
130
+ run.log({"custom_chart": my_custom_chart})
131
+ ```
109
132
  """
110
133
 
111
134
  if not isinstance(data_table, wandb.Table):