wandb 0.19.12rc1__py3-none-win32.whl → 0.20.1__py3-none-win32.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- wandb/__init__.py +1 -2
- wandb/__init__.pyi +3 -6
- wandb/_iterutils.py +26 -7
- wandb/_pydantic/__init__.py +2 -1
- wandb/_pydantic/utils.py +7 -0
- wandb/agents/pyagent.py +9 -15
- wandb/analytics/sentry.py +1 -2
- wandb/apis/attrs.py +3 -4
- wandb/apis/importers/internals/util.py +1 -1
- wandb/apis/importers/validation.py +2 -2
- wandb/apis/importers/wandb.py +30 -25
- wandb/apis/normalize.py +2 -2
- wandb/apis/public/__init__.py +1 -0
- wandb/apis/public/api.py +37 -33
- wandb/apis/public/artifacts.py +103 -72
- wandb/apis/public/jobs.py +3 -2
- wandb/apis/public/registries/registries_search.py +4 -2
- wandb/apis/public/registries/registry.py +1 -1
- wandb/apis/public/registries/utils.py +9 -9
- wandb/apis/public/runs.py +18 -6
- wandb/automations/_filters/expressions.py +1 -1
- wandb/automations/_filters/operators.py +1 -1
- wandb/automations/_filters/run_metrics.py +1 -1
- wandb/beta/workflows.py +6 -5
- wandb/bin/gpu_stats.exe +0 -0
- wandb/bin/wandb-core +0 -0
- wandb/cli/cli.py +54 -73
- wandb/docker/__init__.py +21 -74
- wandb/docker/names.py +40 -0
- wandb/env.py +0 -1
- wandb/errors/util.py +1 -1
- wandb/filesync/step_checksum.py +1 -1
- wandb/filesync/step_upload.py +1 -1
- wandb/integration/diffusers/resolvers/multimodal.py +1 -2
- wandb/integration/gym/__init__.py +5 -6
- wandb/integration/keras/callbacks/model_checkpoint.py +2 -2
- wandb/integration/keras/keras.py +13 -19
- wandb/integration/kfp/kfp_patch.py +2 -3
- wandb/integration/langchain/wandb_tracer.py +1 -1
- wandb/integration/metaflow/metaflow.py +13 -13
- wandb/integration/openai/fine_tuning.py +3 -2
- wandb/integration/sagemaker/auth.py +2 -1
- wandb/integration/sklearn/utils.py +2 -1
- wandb/integration/tensorboard/__init__.py +1 -1
- wandb/integration/tensorboard/log.py +2 -5
- wandb/integration/tensorflow/__init__.py +2 -2
- wandb/jupyter.py +20 -17
- wandb/plot/confusion_matrix.py +1 -1
- wandb/plot/utils.py +8 -7
- wandb/proto/v3/wandb_internal_pb2.py +355 -335
- wandb/proto/v3/wandb_settings_pb2.py +2 -2
- wandb/proto/v3/wandb_telemetry_pb2.py +12 -12
- wandb/proto/v4/wandb_internal_pb2.py +339 -335
- wandb/proto/v4/wandb_settings_pb2.py +2 -2
- wandb/proto/v4/wandb_telemetry_pb2.py +12 -12
- wandb/proto/v5/wandb_internal_pb2.py +339 -335
- wandb/proto/v5/wandb_settings_pb2.py +2 -2
- wandb/proto/v5/wandb_telemetry_pb2.py +12 -12
- wandb/proto/v6/wandb_internal_pb2.py +339 -335
- wandb/proto/v6/wandb_settings_pb2.py +2 -2
- wandb/proto/v6/wandb_telemetry_pb2.py +12 -12
- wandb/proto/wandb_deprecated.py +6 -8
- wandb/sdk/artifacts/_internal_artifact.py +43 -0
- wandb/sdk/artifacts/_validators.py +55 -35
- wandb/sdk/artifacts/artifact.py +117 -115
- wandb/sdk/artifacts/artifact_download_logger.py +2 -0
- wandb/sdk/artifacts/artifact_saver.py +1 -3
- wandb/sdk/artifacts/artifact_state.py +2 -0
- wandb/sdk/artifacts/artifact_ttl.py +2 -0
- wandb/sdk/artifacts/exceptions.py +14 -0
- wandb/sdk/artifacts/staging.py +2 -0
- wandb/sdk/artifacts/storage_handlers/local_file_handler.py +2 -6
- wandb/sdk/artifacts/storage_handlers/multi_handler.py +1 -1
- wandb/sdk/artifacts/storage_handlers/tracking_handler.py +2 -6
- wandb/sdk/artifacts/storage_handlers/wb_artifact_handler.py +1 -5
- wandb/sdk/artifacts/storage_handlers/wb_local_artifact_handler.py +1 -1
- wandb/sdk/artifacts/storage_layout.py +2 -0
- wandb/sdk/artifacts/storage_policies/wandb_storage_policy.py +3 -3
- wandb/sdk/backend/backend.py +11 -182
- wandb/sdk/data_types/_dtypes.py +2 -6
- wandb/sdk/data_types/audio.py +20 -3
- wandb/sdk/data_types/base_types/media.py +12 -7
- wandb/sdk/data_types/base_types/wb_value.py +8 -18
- wandb/sdk/data_types/bokeh.py +19 -2
- wandb/sdk/data_types/helper_types/bounding_boxes_2d.py +17 -1
- wandb/sdk/data_types/helper_types/image_mask.py +7 -1
- wandb/sdk/data_types/html.py +4 -4
- wandb/sdk/data_types/image.py +178 -103
- wandb/sdk/data_types/molecule.py +6 -6
- wandb/sdk/data_types/object_3d.py +10 -5
- wandb/sdk/data_types/saved_model.py +11 -6
- wandb/sdk/data_types/table.py +313 -83
- wandb/sdk/data_types/table_decorators.py +108 -0
- wandb/sdk/data_types/utils.py +43 -7
- wandb/sdk/data_types/video.py +21 -3
- wandb/sdk/interface/interface.py +10 -0
- wandb/sdk/internal/datastore.py +2 -6
- wandb/sdk/internal/file_pusher.py +1 -5
- wandb/sdk/internal/file_stream.py +8 -17
- wandb/sdk/internal/handler.py +2 -2
- wandb/sdk/internal/incremental_table_util.py +53 -0
- wandb/sdk/internal/internal.py +3 -5
- wandb/sdk/internal/internal_api.py +66 -89
- wandb/sdk/internal/job_builder.py +2 -7
- wandb/sdk/internal/profiler.py +2 -2
- wandb/sdk/internal/progress.py +1 -3
- wandb/sdk/internal/run.py +1 -6
- wandb/sdk/internal/sender.py +24 -36
- wandb/sdk/internal/system/assets/aggregators.py +1 -7
- wandb/sdk/internal/system/assets/disk.py +3 -3
- wandb/sdk/internal/system/assets/gpu.py +4 -4
- wandb/sdk/internal/system/assets/gpu_amd.py +4 -4
- wandb/sdk/internal/system/assets/interfaces.py +6 -6
- wandb/sdk/internal/system/assets/tpu.py +1 -1
- wandb/sdk/internal/system/assets/trainium.py +6 -6
- wandb/sdk/internal/system/system_info.py +5 -7
- wandb/sdk/internal/system/system_monitor.py +4 -4
- wandb/sdk/internal/tb_watcher.py +5 -7
- wandb/sdk/launch/_launch.py +1 -1
- wandb/sdk/launch/_project_spec.py +19 -20
- wandb/sdk/launch/agent/agent.py +3 -3
- wandb/sdk/launch/agent/config.py +1 -1
- wandb/sdk/launch/agent/job_status_tracker.py +2 -2
- wandb/sdk/launch/builder/build.py +2 -3
- wandb/sdk/launch/builder/kaniko_builder.py +5 -4
- wandb/sdk/launch/environment/gcp_environment.py +1 -2
- wandb/sdk/launch/registry/azure_container_registry.py +2 -2
- wandb/sdk/launch/registry/elastic_container_registry.py +2 -2
- wandb/sdk/launch/registry/google_artifact_registry.py +3 -3
- wandb/sdk/launch/runner/abstract.py +5 -5
- wandb/sdk/launch/runner/kubernetes_monitor.py +2 -2
- wandb/sdk/launch/runner/kubernetes_runner.py +1 -1
- wandb/sdk/launch/runner/sagemaker_runner.py +2 -4
- wandb/sdk/launch/runner/vertex_runner.py +2 -7
- wandb/sdk/launch/sweeps/__init__.py +1 -1
- wandb/sdk/launch/sweeps/scheduler.py +2 -2
- wandb/sdk/launch/sweeps/utils.py +3 -3
- wandb/sdk/launch/utils.py +3 -4
- wandb/sdk/lib/apikey.py +5 -8
- wandb/sdk/lib/config_util.py +3 -3
- wandb/sdk/lib/fsm.py +3 -18
- wandb/sdk/lib/gitlib.py +6 -5
- wandb/sdk/lib/ipython.py +2 -2
- wandb/sdk/lib/json_util.py +9 -14
- wandb/sdk/lib/printer.py +3 -8
- wandb/sdk/lib/redirect.py +1 -1
- wandb/sdk/lib/retry.py +3 -7
- wandb/sdk/lib/run_moment.py +2 -2
- wandb/sdk/lib/service_connection.py +3 -1
- wandb/sdk/lib/service_token.py +1 -2
- wandb/sdk/mailbox/mailbox_handle.py +3 -7
- wandb/sdk/mailbox/response_handle.py +2 -6
- wandb/sdk/service/streams.py +3 -7
- wandb/sdk/verify/verify.py +5 -6
- wandb/sdk/wandb_config.py +1 -1
- wandb/sdk/wandb_init.py +38 -106
- wandb/sdk/wandb_login.py +7 -6
- wandb/sdk/wandb_run.py +52 -240
- wandb/sdk/wandb_settings.py +71 -60
- wandb/sdk/wandb_setup.py +40 -14
- wandb/sdk/wandb_watch.py +5 -7
- wandb/sync/__init__.py +1 -1
- wandb/sync/sync.py +13 -13
- wandb/util.py +17 -35
- wandb/wandb_agent.py +8 -11
- {wandb-0.19.12rc1.dist-info → wandb-0.20.1.dist-info}/METADATA +5 -5
- {wandb-0.19.12rc1.dist-info → wandb-0.20.1.dist-info}/RECORD +170 -168
- wandb/docker/auth.py +0 -435
- wandb/docker/www_authenticate.py +0 -94
- {wandb-0.19.12rc1.dist-info → wandb-0.20.1.dist-info}/WHEEL +0 -0
- {wandb-0.19.12rc1.dist-info → wandb-0.20.1.dist-info}/entry_points.txt +0 -0
- {wandb-0.19.12rc1.dist-info → wandb-0.20.1.dist-info}/licenses/LICENSE +0 -0
@@ -175,10 +175,10 @@ class WandbModelCheckpoint(callbacks.ModelCheckpoint):
|
|
175
175
|
@property
|
176
176
|
def is_old_tf_keras_version(self) -> Optional[bool]:
|
177
177
|
if self._is_old_tf_keras_version is None:
|
178
|
-
from
|
178
|
+
from packaging.version import parse
|
179
179
|
|
180
180
|
try:
|
181
|
-
if
|
181
|
+
if parse(tf.keras.__version__) < parse("2.6.0"):
|
182
182
|
self._is_old_tf_keras_version = True
|
183
183
|
else:
|
184
184
|
self._is_old_tf_keras_version = False
|
wandb/integration/keras/keras.py
CHANGED
@@ -20,10 +20,9 @@ from wandb.util import add_import_hook
|
|
20
20
|
|
21
21
|
def _check_keras_version():
|
22
22
|
from keras import __version__ as keras_version
|
23
|
+
from packaging.version import parse
|
23
24
|
|
24
|
-
|
25
|
-
|
26
|
-
if parse_version(keras_version) < parse_version("2.4.0"):
|
25
|
+
if parse(keras_version) < parse("2.4.0"):
|
27
26
|
wandb.termwarn(
|
28
27
|
f"Keras version {keras_version} is not fully supported. Required keras >= 2.4.0"
|
29
28
|
)
|
@@ -31,9 +30,9 @@ def _check_keras_version():
|
|
31
30
|
|
32
31
|
def _can_compute_flops() -> bool:
|
33
32
|
"""FLOPS computation is restricted to TF 2.x as it requires tf.compat.v1."""
|
34
|
-
from
|
33
|
+
from packaging.version import parse
|
35
34
|
|
36
|
-
if
|
35
|
+
if parse(tf.__version__) >= parse("2.0.0"):
|
37
36
|
return True
|
38
37
|
|
39
38
|
return False
|
@@ -75,15 +74,10 @@ def is_generator_like(data):
|
|
75
74
|
|
76
75
|
|
77
76
|
def patch_tf_keras(): # noqa: C901
|
77
|
+
from packaging.version import parse
|
78
78
|
from tensorflow.python.eager import context
|
79
79
|
|
80
|
-
|
81
|
-
|
82
|
-
if (
|
83
|
-
parse_version("2.6.0")
|
84
|
-
<= parse_version(tf.__version__)
|
85
|
-
< parse_version("2.13.0")
|
86
|
-
):
|
80
|
+
if parse("2.6.0") <= parse(tf.__version__) < parse("2.13.0"):
|
87
81
|
keras_engine = "keras.engine"
|
88
82
|
try:
|
89
83
|
from keras.engine import training
|
@@ -238,9 +232,9 @@ patch_tf_keras()
|
|
238
232
|
|
239
233
|
|
240
234
|
def _get_custom_optimizer_parent_class():
|
241
|
-
from
|
235
|
+
from packaging.version import parse
|
242
236
|
|
243
|
-
if
|
237
|
+
if parse(tf.__version__) >= parse("2.9.0"):
|
244
238
|
custom_optimizer_parent_class = tf.keras.optimizers.legacy.Optimizer
|
245
239
|
else:
|
246
240
|
custom_optimizer_parent_class = tf.keras.optimizers.Optimizer
|
@@ -734,9 +728,9 @@ class WandbCallback(tf.keras.callbacks.Callback):
|
|
734
728
|
if self.compute_flops and _can_compute_flops():
|
735
729
|
try:
|
736
730
|
wandb.summary["GFLOPs"] = self.get_flops()
|
737
|
-
except Exception
|
731
|
+
except Exception:
|
732
|
+
logger.exception("Error computing FLOPs")
|
738
733
|
wandb.termwarn("Unable to compute FLOPs for this model.")
|
739
|
-
logger.exception(e)
|
740
734
|
|
741
735
|
def on_train_end(self, logs=None):
|
742
736
|
if self._model_trained_since_last_eval:
|
@@ -1018,12 +1012,12 @@ class WandbCallback(tf.keras.callbacks.Callback):
|
|
1018
1012
|
self.model.save(self.filepath, overwrite=True)
|
1019
1013
|
# Was getting `RuntimeError: Unable to create link` in TF 1.13.1
|
1020
1014
|
# also saw `TypeError: can't pickle _thread.RLock objects`
|
1021
|
-
except (ImportError, RuntimeError, TypeError, AttributeError)
|
1015
|
+
except (ImportError, RuntimeError, TypeError, AttributeError):
|
1016
|
+
logger.exception("Error saving model in the h5py format")
|
1022
1017
|
wandb.termerror(
|
1023
1018
|
"Can't save model in the h5py format. The model will be saved as "
|
1024
1019
|
"as an W&B Artifact in the 'tf' format."
|
1025
1020
|
)
|
1026
|
-
logger.exception(e)
|
1027
1021
|
|
1028
1022
|
def _save_model_as_artifact(self, epoch):
|
1029
1023
|
if wandb.run.disabled:
|
@@ -1054,7 +1048,7 @@ class WandbCallback(tf.keras.callbacks.Callback):
|
|
1054
1048
|
if not isinstance(
|
1055
1049
|
self.model, (tf.keras.models.Sequential, tf.keras.models.Model)
|
1056
1050
|
):
|
1057
|
-
raise
|
1051
|
+
raise TypeError(
|
1058
1052
|
"Calculating FLOPS is only supported for "
|
1059
1053
|
"`tf.keras.Model` and `tf.keras.Sequential` instances."
|
1060
1054
|
)
|
@@ -10,12 +10,11 @@ try:
|
|
10
10
|
from kfp.components import structures
|
11
11
|
from kfp.components._components import _create_task_factory_from_component_spec
|
12
12
|
from kfp.components._python_op import _func_to_component_spec
|
13
|
-
|
14
|
-
from wandb.util import parse_version
|
13
|
+
from packaging.version import parse
|
15
14
|
|
16
15
|
MIN_KFP_VERSION = "1.6.1"
|
17
16
|
|
18
|
-
if
|
17
|
+
if parse(kfp_version) < parse(MIN_KFP_VERSION):
|
19
18
|
wandb.termwarn(
|
20
19
|
f"Your version of kfp {kfp_version} may not work. This integration requires kfp>={MIN_KFP_VERSION}"
|
21
20
|
)
|
@@ -34,7 +34,7 @@ if version.parse(langchain.__version__) < version.parse("0.0.188"):
|
|
34
34
|
)
|
35
35
|
|
36
36
|
# isort: off
|
37
|
-
from langchain.callbacks.tracers import WandbTracer # noqa: E402
|
37
|
+
from langchain.callbacks.tracers import WandbTracer # noqa: E402
|
38
38
|
|
39
39
|
|
40
40
|
class WandbTracer(WandbTracer):
|
@@ -37,7 +37,7 @@ except ImportError as e:
|
|
37
37
|
try:
|
38
38
|
import pandas as pd
|
39
39
|
|
40
|
-
@dispatch
|
40
|
+
@dispatch
|
41
41
|
def _wandb_use(
|
42
42
|
name: str,
|
43
43
|
data: pd.DataFrame,
|
@@ -54,7 +54,7 @@ try:
|
|
54
54
|
run.use_artifact(f"{name}:latest")
|
55
55
|
wandb.termlog(f"Using artifact: {name} ({type(data)})")
|
56
56
|
|
57
|
-
@dispatch
|
57
|
+
@dispatch
|
58
58
|
def wandb_track(
|
59
59
|
name: str,
|
60
60
|
data: pd.DataFrame,
|
@@ -83,7 +83,7 @@ try:
|
|
83
83
|
import torch
|
84
84
|
import torch.nn as nn
|
85
85
|
|
86
|
-
@dispatch
|
86
|
+
@dispatch
|
87
87
|
def _wandb_use(
|
88
88
|
name: str,
|
89
89
|
data: nn.Module,
|
@@ -100,7 +100,7 @@ try:
|
|
100
100
|
run.use_artifact(f"{name}:latest")
|
101
101
|
wandb.termlog(f"Using artifact: {name} ({type(data)})")
|
102
102
|
|
103
|
-
@dispatch
|
103
|
+
@dispatch
|
104
104
|
def wandb_track(
|
105
105
|
name: str,
|
106
106
|
data: nn.Module,
|
@@ -128,7 +128,7 @@ except ImportError:
|
|
128
128
|
try:
|
129
129
|
from sklearn.base import BaseEstimator
|
130
130
|
|
131
|
-
@dispatch
|
131
|
+
@dispatch
|
132
132
|
def _wandb_use(
|
133
133
|
name: str,
|
134
134
|
data: BaseEstimator,
|
@@ -145,7 +145,7 @@ try:
|
|
145
145
|
run.use_artifact(f"{name}:latest")
|
146
146
|
wandb.termlog(f"Using artifact: {name} ({type(data)})")
|
147
147
|
|
148
|
-
@dispatch
|
148
|
+
@dispatch
|
149
149
|
def wandb_track(
|
150
150
|
name: str,
|
151
151
|
data: BaseEstimator,
|
@@ -194,7 +194,7 @@ class ArtifactProxy:
|
|
194
194
|
return getattr(self.flow, key)
|
195
195
|
|
196
196
|
|
197
|
-
@dispatch
|
197
|
+
@dispatch
|
198
198
|
def wandb_track(
|
199
199
|
name: str,
|
200
200
|
data: Union[dict, list, set, str, int, float, bool],
|
@@ -209,7 +209,7 @@ def wandb_track(
|
|
209
209
|
run.log({name: data})
|
210
210
|
|
211
211
|
|
212
|
-
@dispatch
|
212
|
+
@dispatch
|
213
213
|
def wandb_track(
|
214
214
|
name: str, data: Path, datasets=False, run=None, testing=False, *args, **kwargs
|
215
215
|
):
|
@@ -227,7 +227,7 @@ def wandb_track(
|
|
227
227
|
|
228
228
|
|
229
229
|
# this is the base case
|
230
|
-
@dispatch
|
230
|
+
@dispatch
|
231
231
|
def wandb_track(
|
232
232
|
name: str, data, others=False, run=None, testing=False, *args, **kwargs
|
233
233
|
):
|
@@ -242,7 +242,7 @@ def wandb_track(
|
|
242
242
|
wandb.termlog(f"Logging artifact: {name} ({type(data)})")
|
243
243
|
|
244
244
|
|
245
|
-
@dispatch
|
245
|
+
@dispatch
|
246
246
|
def wandb_use(name: str, data, *args, **kwargs):
|
247
247
|
try:
|
248
248
|
return _wandb_use(name, data, *args, **kwargs)
|
@@ -254,14 +254,14 @@ def wandb_use(name: str, data, *args, **kwargs):
|
|
254
254
|
)
|
255
255
|
|
256
256
|
|
257
|
-
@dispatch
|
257
|
+
@dispatch
|
258
258
|
def wandb_use(
|
259
259
|
name: str, data: Union[dict, list, set, str, int, float, bool], *args, **kwargs
|
260
260
|
): # type: ignore
|
261
261
|
pass # do nothing for these types
|
262
262
|
|
263
263
|
|
264
|
-
@dispatch
|
264
|
+
@dispatch
|
265
265
|
def _wandb_use(
|
266
266
|
name: str, data: Path, datasets=False, run=None, testing=False, *args, **kwargs
|
267
267
|
): # type: ignore
|
@@ -273,7 +273,7 @@ def _wandb_use(
|
|
273
273
|
wandb.termlog(f"Using artifact: {name} ({type(data)})")
|
274
274
|
|
275
275
|
|
276
|
-
@dispatch
|
276
|
+
@dispatch
|
277
277
|
def _wandb_use(name: str, data, others=False, run=None, testing=False, *args, **kwargs): # type: ignore
|
278
278
|
if testing:
|
279
279
|
return "others" if others else None
|
@@ -8,12 +8,13 @@ import tempfile
|
|
8
8
|
import time
|
9
9
|
from typing import Any, Dict, List, Optional, Tuple, Union
|
10
10
|
|
11
|
+
from packaging.version import parse
|
12
|
+
|
11
13
|
import wandb
|
12
14
|
from wandb import util
|
13
15
|
from wandb.data_types import Table
|
14
16
|
from wandb.sdk.lib import telemetry
|
15
17
|
from wandb.sdk.wandb_run import Run
|
16
|
-
from wandb.util import parse_version
|
17
18
|
|
18
19
|
openai = util.get_module(
|
19
20
|
name="openai",
|
@@ -21,7 +22,7 @@ openai = util.get_module(
|
|
21
22
|
lazy=False,
|
22
23
|
)
|
23
24
|
|
24
|
-
if
|
25
|
+
if parse(openai.__version__) < parse("1.12.0"):
|
25
26
|
raise wandb.Error(
|
26
27
|
f"This integration requires openai version 1.12.0 and above. Your current version is {openai.__version__} "
|
27
28
|
"To fix, please `pip install -U openai`"
|
@@ -2,6 +2,7 @@ import os
|
|
2
2
|
|
3
3
|
import wandb
|
4
4
|
from wandb import env
|
5
|
+
from wandb.sdk import wandb_setup
|
5
6
|
|
6
7
|
|
7
8
|
def sagemaker_auth(overrides=None, path=".", api_key=None):
|
@@ -12,7 +13,7 @@ def sagemaker_auth(overrides=None, path=".", api_key=None):
|
|
12
13
|
to secrets.env
|
13
14
|
path (str, optional): The path to write the secrets file.
|
14
15
|
"""
|
15
|
-
settings =
|
16
|
+
settings = wandb_setup.singleton().settings
|
16
17
|
current_api_key = wandb.wandb_lib.apikey.api_key(settings=settings)
|
17
18
|
|
18
19
|
overrides = overrides or dict()
|
@@ -114,10 +114,11 @@ def test_fitted(model):
|
|
114
114
|
],
|
115
115
|
all_or_any=any,
|
116
116
|
)
|
117
|
-
return True
|
118
117
|
except sklearn.exceptions.NotFittedError:
|
119
118
|
wandb.termerror("Please fit the model before passing it in.")
|
120
119
|
return False
|
120
|
+
else:
|
121
|
+
return True
|
121
122
|
except Exception:
|
122
123
|
# Assume it's fitted, since ``NotFittedError`` wasn't raised
|
123
124
|
return True
|
@@ -167,11 +167,8 @@ def tf_summary_to_dict( # noqa: C901
|
|
167
167
|
)
|
168
168
|
except ValueError:
|
169
169
|
wandb.termwarn(
|
170
|
-
'Not logging key "{}". '
|
171
|
-
"Histograms must have fewer than {} bins"
|
172
|
-
namespaced_tag(value.tag, namespace),
|
173
|
-
wandb.Histogram.MAX_LENGTH,
|
174
|
-
),
|
170
|
+
f'Not logging key "{namespaced_tag(value.tag, namespace)}". '
|
171
|
+
f"Histograms must have fewer than {wandb.Histogram.MAX_LENGTH} bins",
|
175
172
|
repeat=False,
|
176
173
|
)
|
177
174
|
elif plugin_name == "pr_curves":
|
wandb/jupyter.py
CHANGED
@@ -148,7 +148,7 @@ class WandBMagics(Magics):
|
|
148
148
|
if path:
|
149
149
|
_display_by_wandb_path(path, height=height)
|
150
150
|
displayed = True
|
151
|
-
elif run := wandb_setup.
|
151
|
+
elif run := wandb_setup.singleton().most_recent_active_run:
|
152
152
|
_display_wandb_run(run, height=height)
|
153
153
|
displayed = True
|
154
154
|
else:
|
@@ -246,12 +246,11 @@ def notebook_metadata(silent: bool) -> dict[str, str]:
|
|
246
246
|
|
247
247
|
if jupyter_metadata:
|
248
248
|
return jupyter_metadata
|
249
|
-
wandb.termerror(error_message)
|
250
|
-
return {}
|
251
249
|
except Exception:
|
252
|
-
wandb.termerror(error_message)
|
253
250
|
logger.exception(error_message)
|
254
|
-
|
251
|
+
|
252
|
+
wandb.termerror(error_message)
|
253
|
+
return {}
|
255
254
|
|
256
255
|
|
257
256
|
def jupyter_servers_and_kernel_id():
|
@@ -273,10 +272,11 @@ def jupyter_servers_and_kernel_id():
|
|
273
272
|
servers.extend(list(serverapp.list_running_servers()))
|
274
273
|
if notebookapp is not None:
|
275
274
|
servers.extend(list(notebookapp.list_running_servers()))
|
276
|
-
return servers, kernel_id
|
277
275
|
except (AttributeError, ValueError, ImportError):
|
278
276
|
return [], None
|
279
277
|
|
278
|
+
return servers, kernel_id
|
279
|
+
|
280
280
|
|
281
281
|
def attempt_colab_load_ipynb():
|
282
282
|
colab = wandb.util.get_module("google.colab")
|
@@ -289,17 +289,20 @@ def attempt_colab_load_ipynb():
|
|
289
289
|
|
290
290
|
def attempt_kaggle_load_ipynb():
|
291
291
|
kaggle = wandb.util.get_module("kaggle_session")
|
292
|
-
if kaggle:
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
|
298
|
-
|
299
|
-
|
300
|
-
|
301
|
-
|
302
|
-
|
292
|
+
if not kaggle:
|
293
|
+
return None
|
294
|
+
|
295
|
+
try:
|
296
|
+
client = kaggle.UserSessionClient()
|
297
|
+
parsed = json.loads(client.get_exportable_ipynb()["source"])
|
298
|
+
# TODO: couldn't find a way to get the name of the notebook...
|
299
|
+
parsed["metadata"]["name"] = "kaggle.ipynb"
|
300
|
+
except Exception:
|
301
|
+
wandb.termerror("Unable to load kaggle notebook.")
|
302
|
+
logger.exception("Unable to load kaggle notebook.")
|
303
|
+
return None
|
304
|
+
|
305
|
+
return parsed
|
303
306
|
|
304
307
|
|
305
308
|
def attempt_colab_login(
|
wandb/plot/confusion_matrix.py
CHANGED
@@ -150,7 +150,7 @@ def confusion_matrix(
|
|
150
150
|
else:
|
151
151
|
class_idx = set(preds).union(set(y_true))
|
152
152
|
n_classes = len(class_idx)
|
153
|
-
class_names = [f"Class_{i+1}" for i in range(n_classes)]
|
153
|
+
class_names = [f"Class_{i + 1}" for i in range(n_classes)]
|
154
154
|
|
155
155
|
# Create a mapping from class name to index
|
156
156
|
class_mapping = {val: i for i, val in enumerate(sorted(list(class_idx)))}
|
wandb/plot/utils.py
CHANGED
@@ -13,7 +13,7 @@ def test_missing(**kwargs):
|
|
13
13
|
for k, v in kwargs.items():
|
14
14
|
# Missing/empty params/datapoint arrays
|
15
15
|
if v is None:
|
16
|
-
wandb.termerror("{} is None. Please try again."
|
16
|
+
wandb.termerror(f"{k} is None. Please try again.")
|
17
17
|
test_passed = False
|
18
18
|
if (k == "X") or (k == "X_test"):
|
19
19
|
if isinstance(v, scipy.sparse.csr.csr_matrix):
|
@@ -98,10 +98,11 @@ def test_fitted(model):
|
|
98
98
|
],
|
99
99
|
all_or_any=any,
|
100
100
|
)
|
101
|
-
return True
|
102
101
|
except scikit_exceptions.NotFittedError:
|
103
102
|
wandb.termerror("Please fit the model before passing it in.")
|
104
103
|
return False
|
104
|
+
else:
|
105
|
+
return True
|
105
106
|
except Exception:
|
106
107
|
# Assume it's fitted, since ``NotFittedError`` wasn't raised
|
107
108
|
return True
|
@@ -159,25 +160,25 @@ def test_types(**kwargs):
|
|
159
160
|
list,
|
160
161
|
),
|
161
162
|
):
|
162
|
-
wandb.termerror("{} is not an array. Please try again."
|
163
|
+
wandb.termerror(f"{k} is not an array. Please try again.")
|
163
164
|
test_passed = False
|
164
165
|
# check for classifier types
|
165
166
|
if k == "model":
|
166
167
|
if (not base.is_classifier(v)) and (not base.is_regressor(v)):
|
167
168
|
wandb.termerror(
|
168
|
-
"{} is not a classifier or regressor. Please try again."
|
169
|
+
f"{k} is not a classifier or regressor. Please try again."
|
169
170
|
)
|
170
171
|
test_passed = False
|
171
172
|
elif k == "clf" or k == "binary_clf":
|
172
173
|
if not (base.is_classifier(v)):
|
173
|
-
wandb.termerror("{} is not a classifier. Please try again."
|
174
|
+
wandb.termerror(f"{k} is not a classifier. Please try again.")
|
174
175
|
test_passed = False
|
175
176
|
elif k == "regressor":
|
176
177
|
if not base.is_regressor(v):
|
177
|
-
wandb.termerror("{} is not a regressor. Please try again."
|
178
|
+
wandb.termerror(f"{k} is not a regressor. Please try again.")
|
178
179
|
test_passed = False
|
179
180
|
elif k == "clusterer":
|
180
181
|
if not (getattr(v, "_estimator_type", None) == "clusterer"):
|
181
|
-
wandb.termerror("{} is not a clusterer. Please try again."
|
182
|
+
wandb.termerror(f"{k} is not a clusterer. Please try again.")
|
182
183
|
test_passed = False
|
183
184
|
return test_passed
|