keras-hub-nightly 0.21.0.dev202505280410__py3-none-any.whl → 0.22.0.dev202505290412__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -18,10 +18,10 @@ class ViTBackbone(Backbone):
18
18
 
19
19
  Args:
20
20
  image_shape: A tuple or list of 3 integers representing the shape of the
21
- input image `(height, width, channels)`, `height` and `width` must
22
- be equal.
23
- patch_size: int. The size of each image patch, the input image will be
24
- divided into patches of shape `(patch_size, patch_size)`.
21
+ input image `(height, width, channels)`.
22
+ patch_size: int or (int, int). The size of each image patch, the input
23
+ image will be divided into patches of shape
24
+ `(patch_size_h, patch_size_w)`.
25
25
  num_layers: int. The number of transformer encoder layers.
26
26
  num_heads: int. specifying the number of attention heads in each
27
27
  Transformer encoder layer.
@@ -37,6 +37,10 @@ class ViTBackbone(Backbone):
37
37
  use_mha_bias: bool. Whether to use bias in the multi-head
38
38
  attention layers.
39
39
  use_mlp_bias: bool. Whether to use bias in the MLP layers.
40
+ use_class_token: bool. Whether to use class token to be part of
41
+ patch embedding. Defaults to `True`.
42
+ use_patch_bias: bool. Whether to use bias in Conv2d of patch embedding
43
+ layer. Defaults to `True`.
40
44
  data_format: str. `"channels_last"` or `"channels_first"`, specifying
41
45
  the data format for the input image. If `None`, defaults to
42
46
  `"channels_last"`.
@@ -58,6 +62,8 @@ class ViTBackbone(Backbone):
58
62
  layer_norm_epsilon=1e-6,
59
63
  use_mha_bias=True,
60
64
  use_mlp_bias=True,
65
+ use_class_token=True,
66
+ use_patch_bias=True,
61
67
  data_format=None,
62
68
  dtype=None,
63
69
  **kwargs,
@@ -74,24 +80,34 @@ class ViTBackbone(Backbone):
74
80
  f"at index {h_axis} (height) or {w_axis} (width). "
75
81
  f"Image shape: {image_shape}"
76
82
  )
77
- if image_shape[h_axis] != image_shape[w_axis]:
83
+
84
+ if isinstance(patch_size, int):
85
+ patch_size = (patch_size, patch_size)
86
+
87
+ if image_shape[h_axis] % patch_size[0] != 0:
88
+ raise ValueError(
89
+ f"Input height {image_shape[h_axis]} should be divisible by "
90
+ f"patch size {patch_size[0]}."
91
+ )
92
+
93
+ if image_shape[w_axis] % patch_size[1] != 0:
78
94
  raise ValueError(
79
- f"Image height and width must be equal. Found height: "
80
- f"{image_shape[h_axis]}, width: {image_shape[w_axis]} at "
81
- f"indices {h_axis} and {w_axis} respectively. Image shape: "
82
- f"{image_shape}"
95
+ f"Input width {image_shape[h_axis]} should be divisible by "
96
+ f"patch size {patch_size[1]}."
83
97
  )
84
98
 
85
99
  num_channels = image_shape[channels_axis]
86
100
 
87
101
  # === Functional Model ===
88
- inputs = keras.layers.Input(shape=image_shape)
102
+ inputs = keras.layers.Input(shape=image_shape, name="images")
89
103
 
90
104
  x = ViTPatchingAndEmbedding(
91
- image_size=image_shape[h_axis],
105
+ image_size=(image_shape[h_axis], image_shape[w_axis]),
92
106
  patch_size=patch_size,
93
107
  hidden_dim=hidden_dim,
94
108
  num_channels=num_channels,
109
+ use_class_token=use_class_token,
110
+ use_patch_bias=use_patch_bias,
95
111
  data_format=data_format,
96
112
  dtype=dtype,
97
113
  name="vit_patching_and_embedding",
@@ -130,6 +146,8 @@ class ViTBackbone(Backbone):
130
146
  self.layer_norm_epsilon = layer_norm_epsilon
131
147
  self.use_mha_bias = use_mha_bias
132
148
  self.use_mlp_bias = use_mlp_bias
149
+ self.use_class_token = use_class_token
150
+ self.use_patch_bias = use_patch_bias
133
151
  self.data_format = data_format
134
152
 
135
153
  def get_config(self):
@@ -147,6 +165,8 @@ class ViTBackbone(Backbone):
147
165
  "layer_norm_epsilon": self.layer_norm_epsilon,
148
166
  "use_mha_bias": self.use_mha_bias,
149
167
  "use_mlp_bias": self.use_mlp_bias,
168
+ "use_class_token": self.use_class_token,
169
+ "use_patch_bias": self.use_patch_bias,
150
170
  }
151
171
  )
152
172
  return config
@@ -1,78 +1,8 @@
1
1
  from keras_hub.src.api_export import keras_hub_export
2
2
  from keras_hub.src.layers.preprocessing.image_converter import ImageConverter
3
3
  from keras_hub.src.models.vit.vit_backbone import ViTBackbone
4
- from keras_hub.src.utils.tensor_utils import preprocessing_function
5
4
 
6
5
 
7
6
  @keras_hub_export("keras_hub.layers.ViTImageConverter")
8
7
  class ViTImageConverter(ImageConverter):
9
- """Converts images to the format expected by a ViT model.
10
-
11
- This layer performs image normalization using mean and standard deviation
12
- values. By default, it uses the same normalization as the
13
- "google/vit-large-patch16-224" model on Hugging Face:
14
- `norm_mean=[0.5, 0.5, 0.5]` and `norm_std=[0.5, 0.5, 0.5]`
15
- ([reference](https://huggingface.co/google/vit-large-patch16-224/blob/main/preprocessor_config.json)).
16
- These defaults are suitable for models pretrained using this normalization.
17
-
18
- Args:
19
- norm_mean: list or tuple of floats. Mean values for image normalization.
20
- Defaults to `[0.5, 0.5, 0.5]`.
21
- norm_std: list or tuple of floats. Standard deviation values for
22
- image normalization. Defaults to `[0.5, 0.5, 0.5]`.
23
- **kwargs: Additional keyword arguments passed to
24
- `keras_hub.layers.preprocessing.ImageConverter`.
25
-
26
- Examples:
27
- ```python
28
- import keras
29
- import numpy as np
30
- from keras_hub.src.layers import ViTImageConverter
31
-
32
- # Example image (replace with your actual image data)
33
- image = np.random.rand(1, 224, 224, 3) # Example: (B, H, W, C)
34
-
35
- # Create a ViTImageConverter instance
36
- converter = ViTImageConverter(
37
- image_size=(28,28),
38
- scale=1/255.
39
- )
40
- # Preprocess the image
41
- preprocessed_image = converter(image)
42
- ```
43
- """
44
-
45
8
  backbone_cls = ViTBackbone
46
-
47
- def __init__(
48
- self, norm_mean=[0.5, 0.5, 0.5], norm_std=[0.5, 0.5, 0.5], **kwargs
49
- ):
50
- super().__init__(**kwargs)
51
- self.norm_mean = norm_mean
52
- self.norm_std = norm_std
53
-
54
- @preprocessing_function
55
- def call(self, inputs):
56
- # TODO: Remove this whole function. Why can just use scale and offset
57
- # in the base class.
58
- x = super().call(inputs)
59
- if self.norm_mean:
60
- norm_mean = self._expand_non_channel_dims(self.norm_mean, x)
61
- x, norm_mean = self._convert_types(x, norm_mean, self.compute_dtype)
62
- x = x - norm_mean
63
- if self.norm_std:
64
- norm_std = self._expand_non_channel_dims(self.norm_std, x)
65
- x, norm_std = self._convert_types(x, norm_std, x.dtype)
66
- x = x / norm_std
67
-
68
- return x
69
-
70
- def get_config(self):
71
- config = super().get_config()
72
- config.update(
73
- {
74
- "norm_mean": self.norm_mean,
75
- "norm_std": self.norm_std,
76
- }
77
- )
78
- return config
@@ -75,12 +75,13 @@ class ViTPatchingAndEmbedding(keras.layers.Layer):
75
75
  """Patches the image and embeds the patches.
76
76
 
77
77
  Args:
78
- image_size: int. Size of the input image (height or width).
79
- Assumed to be square.
80
- patch_size: int. Size of each image patch.
78
+ image_size: (int, int). Size of the input image.
79
+ patch_size: (int, int). Size of each image patch.
81
80
  hidden_dim: int. Dimensionality of the patch embeddings.
82
81
  num_channels: int. Number of channels in the input image. Defaults to
83
82
  `3`.
83
+ use_class_token: bool. Whether to use class token to be part of
84
+ patch embedding. Defaults to `True`.
84
85
  data_format: str. `"channels_last"` or `"channels_first"`. Defaults to
85
86
  `None` (which uses `"channels_last"`).
86
87
  **kwargs: Additional keyword arguments passed to `keras.layers.Layer`
@@ -92,12 +93,15 @@ class ViTPatchingAndEmbedding(keras.layers.Layer):
92
93
  patch_size,
93
94
  hidden_dim,
94
95
  num_channels=3,
96
+ use_class_token=True,
97
+ use_patch_bias=True,
95
98
  data_format=None,
96
99
  **kwargs,
97
100
  ):
98
101
  super().__init__(**kwargs)
99
- num_patches = (image_size // patch_size) ** 2
100
- num_positions = num_patches + 1
102
+ grid_size = tuple([s // p for s, p in zip(image_size, patch_size)])
103
+ num_patches = grid_size[0] * grid_size[1]
104
+ num_positions = num_patches + 1 if use_class_token else num_patches
101
105
 
102
106
  # === Config ===
103
107
  self.image_size = image_size
@@ -106,19 +110,22 @@ class ViTPatchingAndEmbedding(keras.layers.Layer):
106
110
  self.num_channels = num_channels
107
111
  self.num_patches = num_patches
108
112
  self.num_positions = num_positions
113
+ self.use_class_token = use_class_token
114
+ self.use_patch_bias = use_patch_bias
109
115
  self.data_format = standardize_data_format(data_format)
110
116
 
111
117
  def build(self, input_shape):
112
- self.class_token = self.add_weight(
113
- shape=(
114
- 1,
115
- 1,
116
- self.hidden_dim,
117
- ),
118
- initializer="random_normal",
119
- dtype=self.variable_dtype,
120
- name="class_token",
121
- )
118
+ if self.use_class_token:
119
+ self.class_token = self.add_weight(
120
+ shape=(
121
+ 1,
122
+ 1,
123
+ self.hidden_dim,
124
+ ),
125
+ initializer="random_normal",
126
+ dtype=self.variable_dtype,
127
+ name="class_token",
128
+ )
122
129
  self.patch_embedding = keras.layers.Conv2D(
123
130
  filters=self.hidden_dim,
124
131
  kernel_size=self.patch_size,
@@ -127,6 +134,7 @@ class ViTPatchingAndEmbedding(keras.layers.Layer):
127
134
  activation=None,
128
135
  dtype=self.dtype_policy,
129
136
  data_format=self.data_format,
137
+ use_bias=self.use_patch_bias,
130
138
  name="patch_embedding",
131
139
  )
132
140
  self.patch_embedding.build(input_shape)
@@ -153,10 +161,16 @@ class ViTPatchingAndEmbedding(keras.layers.Layer):
153
161
  patch_embeddings = ops.reshape(
154
162
  patch_embeddings, [embeddings_shape[0], -1, embeddings_shape[-1]]
155
163
  )
156
- class_token = ops.tile(self.class_token, (embeddings_shape[0], 1, 1))
157
164
  position_embeddings = self.position_embedding(self.position_ids)
158
- embeddings = ops.concatenate([class_token, patch_embeddings], axis=1)
159
- return ops.add(embeddings, position_embeddings)
165
+
166
+ if self.use_class_token:
167
+ class_token = ops.tile(
168
+ self.class_token, (embeddings_shape[0], 1, 1)
169
+ )
170
+ patch_embeddings = ops.concatenate(
171
+ [class_token, patch_embeddings], axis=1
172
+ )
173
+ return ops.add(patch_embeddings, position_embeddings)
160
174
 
161
175
  def compute_output_shape(self, input_shape):
162
176
  return (
@@ -175,6 +189,7 @@ class ViTPatchingAndEmbedding(keras.layers.Layer):
175
189
  "num_channels": self.num_channels,
176
190
  "num_patches": self.num_patches,
177
191
  "num_positions": self.num_positions,
192
+ "use_class_token": self.use_class_token,
178
193
  }
179
194
  )
180
195
  return config
@@ -11,7 +11,7 @@ backbone_presets = {
11
11
  "params": 85798656,
12
12
  "path": "vit",
13
13
  },
14
- "kaggle_handle": "kaggle://keras/vit/keras/vit_base_patch16_224_imagenet/2",
14
+ "kaggle_handle": "kaggle://keras/vit/keras/vit_base_patch16_224_imagenet/3",
15
15
  },
16
16
  "vit_base_patch16_384_imagenet": {
17
17
  "metadata": {
@@ -22,7 +22,7 @@ backbone_presets = {
22
22
  "params": 86090496,
23
23
  "path": "vit",
24
24
  },
25
- "kaggle_handle": "kaggle://keras/vit/keras/vit_base_patch16_384_imagenet/2",
25
+ "kaggle_handle": "kaggle://keras/vit/keras/vit_base_patch16_384_imagenet/3",
26
26
  },
27
27
  "vit_large_patch16_224_imagenet": {
28
28
  "metadata": {
@@ -33,7 +33,7 @@ backbone_presets = {
33
33
  "params": 303301632,
34
34
  "path": "vit",
35
35
  },
36
- "kaggle_handle": "kaggle://keras/vit/keras/vit_large_patch16_224_imagenet/2",
36
+ "kaggle_handle": "kaggle://keras/vit/keras/vit_large_patch16_224_imagenet/3",
37
37
  },
38
38
  "vit_large_patch16_384_imagenet": {
39
39
  "metadata": {
@@ -44,7 +44,7 @@ backbone_presets = {
44
44
  "params": 303690752,
45
45
  "path": "vit",
46
46
  },
47
- "kaggle_handle": "kaggle://keras/vit/keras/vit_large_patch16_384_imagenet/2",
47
+ "kaggle_handle": "kaggle://keras/vit/keras/vit_large_patch16_384_imagenet/3",
48
48
  },
49
49
  "vit_base_patch32_384_imagenet": {
50
50
  "metadata": {
@@ -55,7 +55,7 @@ backbone_presets = {
55
55
  "params": 87528192,
56
56
  "path": "vit",
57
57
  },
58
- "kaggle_handle": "kaggle://keras/vit/keras/vit_base_patch32_384_imagenet/1",
58
+ "kaggle_handle": "kaggle://keras/vit/keras/vit_base_patch32_384_imagenet/2",
59
59
  },
60
60
  "vit_large_patch32_384_imagenet": {
61
61
  "metadata": {
@@ -66,7 +66,7 @@ backbone_presets = {
66
66
  "params": 305607680,
67
67
  "path": "vit",
68
68
  },
69
- "kaggle_handle": "kaggle://keras/vit/keras/vit_large_patch32_384_imagenet/1",
69
+ "kaggle_handle": "kaggle://keras/vit/keras/vit_large_patch32_384_imagenet/2",
70
70
  },
71
71
  "vit_base_patch16_224_imagenet21k": {
72
72
  "metadata": {
@@ -77,7 +77,7 @@ backbone_presets = {
77
77
  "params": 85798656,
78
78
  "path": "vit",
79
79
  },
80
- "kaggle_handle": "kaggle://keras/vit/keras/vit_base_patch16_224_imagenet21k/1",
80
+ "kaggle_handle": "kaggle://keras/vit/keras/vit_base_patch16_224_imagenet21k/2",
81
81
  },
82
82
  "vit_base_patch32_224_imagenet21k": {
83
83
  "metadata": {
@@ -88,7 +88,7 @@ backbone_presets = {
88
88
  "params": 87455232,
89
89
  "path": "vit",
90
90
  },
91
- "kaggle_handle": "kaggle://keras/vit/keras/vit_base_patch32_224_imagenet21k/1",
91
+ "kaggle_handle": "kaggle://keras/vit/keras/vit_base_patch32_224_imagenet21k/2",
92
92
  },
93
93
  "vit_huge_patch14_224_imagenet21k": {
94
94
  "metadata": {
@@ -99,7 +99,7 @@ backbone_presets = {
99
99
  "params": 630764800,
100
100
  "path": "vit",
101
101
  },
102
- "kaggle_handle": "kaggle://keras/vit/keras/vit_huge_patch14_224_imagenet21k/1",
102
+ "kaggle_handle": "kaggle://keras/vit/keras/vit_huge_patch14_224_imagenet21k/2",
103
103
  },
104
104
  "vit_large_patch16_224_imagenet21k": {
105
105
  "metadata": {
@@ -110,7 +110,7 @@ backbone_presets = {
110
110
  "params": 303301632,
111
111
  "path": "vit",
112
112
  },
113
- "kaggle_handle": "kaggle://keras/vit/keras/vit_large_patch16_224_imagenet21k/1",
113
+ "kaggle_handle": "kaggle://keras/vit/keras/vit_large_patch16_224_imagenet21k/2",
114
114
  },
115
115
  "vit_large_patch32_224_imagenet21k": {
116
116
  "metadata": {
@@ -121,6 +121,6 @@ backbone_presets = {
121
121
  "params": 305510400,
122
122
  "path": "vit",
123
123
  },
124
- "kaggle_handle": "kaggle://keras/vit/keras/vit_large_patch32_224_imagenet21k/1",
124
+ "kaggle_handle": "kaggle://keras/vit/keras/vit_large_patch32_224_imagenet21k/2",
125
125
  },
126
126
  }
keras_hub/src/version.py CHANGED
@@ -1,7 +1,7 @@
1
1
  from keras_hub.src.api_export import keras_hub_export
2
2
 
3
3
  # Unique source of truth for the version number.
4
- __version__ = "0.21.0.dev202505280410"
4
+ __version__ = "0.22.0.dev202505290412"
5
5
 
6
6
 
7
7
  @keras_hub_export("keras_hub.version")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: keras-hub-nightly
3
- Version: 0.21.0.dev202505280410
3
+ Version: 0.22.0.dev202505290412
4
4
  Summary: Pretrained models for Keras.
5
5
  Author-email: Keras team <keras-users@googlegroups.com>
6
6
  License-Expression: Apache-2.0
@@ -5,7 +5,7 @@ keras_hub/models/__init__.py,sha256=itSzodVUeuX6HQnmsSXY0Wv-5Htbu397410R-SFW_4I,
5
5
  keras_hub/samplers/__init__.py,sha256=aFQIkiqbZpi8vjrPp2MVII4QUfE-eQjra5fMeHsoy7k,886
6
6
  keras_hub/src/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
7
  keras_hub/src/api_export.py,sha256=9pQZK27JObxWZ96QPLBp1OBsjWigh1iuV6RglPGMRk0,1499
8
- keras_hub/src/version.py,sha256=LkNchDcdwovYSb5xEjf09V8MHim3X9wy6VrAyA_7afw,222
8
+ keras_hub/src/version.py,sha256=DDvaRSyKJcjRMYdIJIroiLLIbnEZPXF5mlsR_VQNowQ,222
9
9
  keras_hub/src/layers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
10
  keras_hub/src/layers/modeling/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
11
  keras_hub/src/layers/modeling/alibi_bias.py,sha256=1XBTHI52L_iJDhN_w5ydu_iMhCuTgQAxEPwcLA6BPuk,4411
@@ -409,12 +409,12 @@ keras_hub/src/models/vgg/vgg_image_classifier_preprocessor.py,sha256=M7hBbDPws5Z
409
409
  keras_hub/src/models/vgg/vgg_image_converter.py,sha256=FKVrSNNBxIkiKvApzf4TZxidBb1z917Xs9nooHCcRLM,324
410
410
  keras_hub/src/models/vgg/vgg_presets.py,sha256=UL7a8hdZ22duMADXwVypGnc20ME-ywI4QjtXu15usEI,1491
411
411
  keras_hub/src/models/vit/__init__.py,sha256=GH7x3VjEXZLm-4F-c9-55QZE0lP2OLVICH0Hr5YCp9A,239
412
- keras_hub/src/models/vit/vit_backbone.py,sha256=kGmRZO4u-1q4PBcbhJbiWVIEVYAcp2H4SPJgQimrJd0,5909
412
+ keras_hub/src/models/vit/vit_backbone.py,sha256=VnypiTAf0ORaBTVzdDOXsnKnQxKbrIlX9z9qOumZH50,6699
413
413
  keras_hub/src/models/vit/vit_image_classifier.py,sha256=lMVxiD1_6drx7XQ7P7YzlqnFP7kT1zlMe84f-T3SDQI,6332
414
414
  keras_hub/src/models/vit/vit_image_classifier_preprocessor.py,sha256=wu6YcBlXMWB9sKCPvmNdGBZKTLQt_HyHWS6P9nyDwsk,504
415
- keras_hub/src/models/vit/vit_image_converter.py,sha256=IQYgLOhnsXudZ_S344lzGvO6pbMOhXK6rW12Q3kHykI,2824
416
- keras_hub/src/models/vit/vit_layers.py,sha256=_cZ1FMYEXcnjwvNPVJXug3rEbatv89OzRTMuzx62dnA,13312
417
- keras_hub/src/models/vit/vit_presets.py,sha256=zZhxUleOom1ie3gn0Mi-_xhhdFEEsnqSQyKADV2L38k,4479
415
+ keras_hub/src/models/vit/vit_image_converter.py,sha256=JhdXcbfKu9pKSJZiaKk7FKf_CjSXztSa2rsBFQvlgAo,324
416
+ keras_hub/src/models/vit/vit_layers.py,sha256=c0ApxF7cMqeEEa0LcWrBhc6zIolwOFVb2HjzLV-q98k,13940
417
+ keras_hub/src/models/vit/vit_presets.py,sha256=mlLBJxxonru14fBiMnMF4ud-JgbJHclpVV3FsoIubrk,4479
418
418
  keras_hub/src/models/vit_det/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
419
419
  keras_hub/src/models/vit_det/vit_det_backbone.py,sha256=DOZ5J7c1t5PAZ6y0pMmBoQTMOUup7UoUrYVfCs69ltY,7697
420
420
  keras_hub/src/models/vit_det/vit_layers.py,sha256=mnwu56chMc6zxmfp_hsLdR7TXYy1_YsWy1KwGX9M5Ic,19840
@@ -502,7 +502,7 @@ keras_hub/src/utils/transformers/preset_loader.py,sha256=1nfS5xVsl-JROGXJXltTqV1
502
502
  keras_hub/src/utils/transformers/safetensor_utils.py,sha256=CYUHyA4y-B61r7NDnCsFb4t_UmSwZ1k9L-8gzEd6KRg,3339
503
503
  keras_hub/tokenizers/__init__.py,sha256=uMjjm0mzUkRb0e4Ac_JK8aJ9cKGUi5UqmzWoWAFJprE,4164
504
504
  keras_hub/utils/__init__.py,sha256=jXPqVGBpJr_PpYmqD8aDG-fRMlxH-ulqCR2SZMn288Y,646
505
- keras_hub_nightly-0.21.0.dev202505280410.dist-info/METADATA,sha256=DW6jOe7Tbk32rdB5bnZHYlyBZYuzTYIui1EoKkhPMpY,7393
506
- keras_hub_nightly-0.21.0.dev202505280410.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
507
- keras_hub_nightly-0.21.0.dev202505280410.dist-info/top_level.txt,sha256=N4J6piIWBKa38A4uV-CnIopnOEf8mHAbkNXafXm_CuA,10
508
- keras_hub_nightly-0.21.0.dev202505280410.dist-info/RECORD,,
505
+ keras_hub_nightly-0.22.0.dev202505290412.dist-info/METADATA,sha256=W4vT73-ho1j4QwQv59qS5xF4i6bWH5k7tHiUJ7-_y4k,7393
506
+ keras_hub_nightly-0.22.0.dev202505290412.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
507
+ keras_hub_nightly-0.22.0.dev202505290412.dist-info/top_level.txt,sha256=N4J6piIWBKa38A4uV-CnIopnOEf8mHAbkNXafXm_CuA,10
508
+ keras_hub_nightly-0.22.0.dev202505290412.dist-info/RECORD,,