keras-hub-nightly 0.22.0.dev202507160421__py3-none-any.whl → 0.22.0.dev202507170424__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. keras_hub/layers/__init__.py +3 -0
  2. keras_hub/models/__init__.py +3 -0
  3. keras_hub/src/models/clip/clip_backbone.py +3 -102
  4. keras_hub/src/models/clip/clip_layers.py +295 -0
  5. keras_hub/src/models/clip/clip_preprocessor.py +57 -48
  6. keras_hub/src/models/clip/clip_text_encoder.py +2 -2
  7. keras_hub/src/models/clip/clip_vision_encoder.py +3 -3
  8. keras_hub/src/models/dinov2/__init__.py +5 -0
  9. keras_hub/src/models/dinov2/dinov2_backbone.py +228 -0
  10. keras_hub/src/models/dinov2/dinov2_image_converter.py +8 -0
  11. keras_hub/src/models/dinov2/dinov2_layers.py +886 -0
  12. keras_hub/src/models/dinov2/dinov2_presets.py +4 -0
  13. keras_hub/src/models/flux/flux_text_to_image_preprocessor.py +6 -2
  14. keras_hub/src/models/stable_diffusion_3/flow_match_euler_discrete_scheduler.py +16 -7
  15. keras_hub/src/models/stable_diffusion_3/mmdit.py +61 -4
  16. keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_backbone.py +23 -32
  17. keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_image_to_image.py +1 -0
  18. keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_inpaint.py +1 -0
  19. keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_text_to_image.py +1 -0
  20. keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_text_to_image_preprocessor.py +6 -2
  21. keras_hub/src/utils/preset_utils.py +4 -1
  22. keras_hub/src/utils/transformers/convert_dinov2.py +180 -0
  23. keras_hub/src/utils/transformers/export/gemma.py +89 -0
  24. keras_hub/src/utils/transformers/export/hf_exporter.py +98 -0
  25. keras_hub/src/utils/transformers/preset_loader.py +4 -1
  26. keras_hub/src/version.py +1 -1
  27. {keras_hub_nightly-0.22.0.dev202507160421.dist-info → keras_hub_nightly-0.22.0.dev202507170424.dist-info}/METADATA +1 -1
  28. {keras_hub_nightly-0.22.0.dev202507160421.dist-info → keras_hub_nightly-0.22.0.dev202507170424.dist-info}/RECORD +30 -23
  29. keras_hub/src/models/clip/clip_encoder_block.py +0 -111
  30. keras_hub/src/models/clip/clip_vision_embedding.py +0 -101
  31. {keras_hub_nightly-0.22.0.dev202507160421.dist-info → keras_hub_nightly-0.22.0.dev202507170424.dist-info}/WHEEL +0 -0
  32. {keras_hub_nightly-0.22.0.dev202507160421.dist-info → keras_hub_nightly-0.22.0.dev202507170424.dist-info}/top_level.txt +0 -0
@@ -7,6 +7,7 @@ from keras_hub.src.utils.transformers import convert_albert
7
7
  from keras_hub.src.utils.transformers import convert_bart
8
8
  from keras_hub.src.utils.transformers import convert_bert
9
9
  from keras_hub.src.utils.transformers import convert_deit
10
+ from keras_hub.src.utils.transformers import convert_dinov2
10
11
  from keras_hub.src.utils.transformers import convert_distilbert
11
12
  from keras_hub.src.utils.transformers import convert_gemma
12
13
  from keras_hub.src.utils.transformers import convert_gpt2
@@ -35,7 +36,9 @@ class TransformersPresetLoader(PresetLoader):
35
36
  self.converter = convert_deit
36
37
  elif model_type == "distilbert":
37
38
  self.converter = convert_distilbert
38
- elif model_type == "gemma" or model_type == "gemma2":
39
+ elif model_type in ("dinov2", "dinov2_with_registers"):
40
+ self.converter = convert_dinov2
41
+ elif model_type in ("gemma", "gemma2"):
39
42
  self.converter = convert_gemma
40
43
  elif model_type == "gpt2":
41
44
  self.converter = convert_gpt2
keras_hub/src/version.py CHANGED
@@ -1,7 +1,7 @@
1
1
  from keras_hub.src.api_export import keras_hub_export
2
2
 
3
3
  # Unique source of truth for the version number.
4
- __version__ = "0.22.0.dev202507160421"
4
+ __version__ = "0.22.0.dev202507170424"
5
5
 
6
6
 
7
7
  @keras_hub_export("keras_hub.version")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: keras-hub-nightly
3
- Version: 0.22.0.dev202507160421
3
+ Version: 0.22.0.dev202507170424
4
4
  Summary: Pretrained models for Keras.
5
5
  Author-email: Keras team <keras-users@googlegroups.com>
6
6
  License-Expression: Apache-2.0
@@ -1,11 +1,11 @@
1
1
  keras_hub/__init__.py,sha256=bJbUZkqwhZvTb1Tqx1fbkq6mzBYiEyq-Hin3oQIkhdE,558
2
- keras_hub/layers/__init__.py,sha256=T1XBtpT0UH9-r0Jc-ljCxtZD_ccapf86ByvUgzdRbvg,5311
2
+ keras_hub/layers/__init__.py,sha256=SMkchjCbNydCBULOFC1pzZRaD-KWZ2CaH6CEVf1MRWE,5428
3
3
  keras_hub/metrics/__init__.py,sha256=KYalsMPBnfwim9BdGHFfJ5WxUKFXOQ1QoKIMT_0lwlM,439
4
- keras_hub/models/__init__.py,sha256=52UNIL7my_9g6ubPtOMDnGYeuGD4SOldfnGTVRMKTeE,27558
4
+ keras_hub/models/__init__.py,sha256=-Rz2qSdQezPihXYnZlaG3S18HLDrCe37pZjzNIPU7D8,27656
5
5
  keras_hub/samplers/__init__.py,sha256=aFQIkiqbZpi8vjrPp2MVII4QUfE-eQjra5fMeHsoy7k,886
6
6
  keras_hub/src/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
7
  keras_hub/src/api_export.py,sha256=9pQZK27JObxWZ96QPLBp1OBsjWigh1iuV6RglPGMRk0,1499
8
- keras_hub/src/version.py,sha256=bGpyIjY7qgiepM6bItxrtWv4j03uzqa1cwuFabkaRtE,222
8
+ keras_hub/src/version.py,sha256=-Bevtrx2qCQT5lthAimfMBv46nlIAEIb7EDasoOPGcE,222
9
9
  keras_hub/src/layers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
10
  keras_hub/src/layers/modeling/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
11
  keras_hub/src/layers/modeling/alibi_bias.py,sha256=1XBTHI52L_iJDhN_w5ydu_iMhCuTgQAxEPwcLA6BPuk,4411
@@ -102,15 +102,14 @@ keras_hub/src/models/bloom/bloom_decoder.py,sha256=fda8iX4wzx2M8AoLX7fDHkyoir89K
102
102
  keras_hub/src/models/bloom/bloom_presets.py,sha256=7RptuZi__oJyiX6X4xE5ToANcEwsmLDqhuEKwFyKIPU,3215
103
103
  keras_hub/src/models/bloom/bloom_tokenizer.py,sha256=6Konh7B_L9BqgjkA0z8-APFpr9sQmQPuAJFZSsCIClU,2574
104
104
  keras_hub/src/models/clip/__init__.py,sha256=NcjBkTNWxLY4Ss9wV-NW9iS8k6AwMiS2ARMcxr6KEps,245
105
- keras_hub/src/models/clip/clip_backbone.py,sha256=AyVhLwFg5nLFSaoaL8mLuNkK9uBPJ9y5FMQu4psTGvo,9877
106
- keras_hub/src/models/clip/clip_encoder_block.py,sha256=4Jxqb0Pq3Joh-lHDq-Y2c8v-gcMm1sDjPID4eRGK0DE,3823
105
+ keras_hub/src/models/clip/clip_backbone.py,sha256=DRAXEJFVPcgf1-AeVDDmuoxplwTCl4Xt7-D4whM4w04,6619
107
106
  keras_hub/src/models/clip/clip_image_converter.py,sha256=XyHEDB4RbYiveMN1hLQxHgGADb_goyWyE0TceAd2owM,330
108
- keras_hub/src/models/clip/clip_preprocessor.py,sha256=nUYu8Bgf3TU7jrR10kr0BIe7ph3aABvGtIqnjqrIb9k,4752
107
+ keras_hub/src/models/clip/clip_layers.py,sha256=ns3Zzm5UzMpm-ynyU3aJu2d4i3HmzNiZKdAea624ako,10184
108
+ keras_hub/src/models/clip/clip_preprocessor.py,sha256=xj-FzK7gLIUyvTo2iM1zHh9f2Ff25tZCYFxsPE3dwFU,4771
109
109
  keras_hub/src/models/clip/clip_presets.py,sha256=b9Azial1dUtuNV96Q0Ahz-bcBRmlIjnZPUzMvAMb8OY,3348
110
- keras_hub/src/models/clip/clip_text_encoder.py,sha256=BCIE24eKZJ3yc4T0sjD6-Msjr1FQRKpdTP7vpGEn_7M,5456
110
+ keras_hub/src/models/clip/clip_text_encoder.py,sha256=lZa9Ditvn4DH9As3NEML_Wl6g2qeYer_LzRHGu1hqCM,5449
111
111
  keras_hub/src/models/clip/clip_tokenizer.py,sha256=6gIm_LWRbCeBQUI9M2gA8-OXb4tXGygixkbcL6joV1c,7444
112
- keras_hub/src/models/clip/clip_vision_embedding.py,sha256=6_qC7T1dqKd-39EreGmHZj-YfjOLEDDKjWnEKcKIyuY,3667
113
- keras_hub/src/models/clip/clip_vision_encoder.py,sha256=q62MXySZN38uCsjqq8cttfBxD7P5abaKQV2i8_u4N6E,6385
112
+ keras_hub/src/models/clip/clip_vision_encoder.py,sha256=C5grKgIgFF8ls-kkGdYorpw5tbfgbmBQe6VJg_3yWII,6368
114
113
  keras_hub/src/models/cspnet/__init__.py,sha256=TOpvk2cfOVv1bPA1BOGZj0mhmhc6E98zZmW9e0PIvhk,257
115
114
  keras_hub/src/models/cspnet/cspnet_backbone.py,sha256=meHzxubG_9vHQHSelDfrROaQERkDiWkjTtk_gKaWsDc,42457
116
115
  keras_hub/src/models/cspnet/cspnet_image_classifier.py,sha256=JqfBHIBTFxaLOyAWx6TdXs0aAOMbcCx1oo47RoQnytc,510
@@ -148,6 +147,11 @@ keras_hub/src/models/densenet/densenet_image_classifier.py,sha256=ye-Ix3oU42pfsD
148
147
  keras_hub/src/models/densenet/densenet_image_classifier_preprocessor.py,sha256=xDZbTw_h6pjLDzf8QmbDyMnMsFzgh-dPX1ldg9kddhg,563
149
148
  keras_hub/src/models/densenet/densenet_image_converter.py,sha256=DoxYlJVZ9uaabFhVjWOmzvhONoc8KNcQj2vQ6Z1AUpU,354
150
149
  keras_hub/src/models/densenet/densenet_presets.py,sha256=d2GEB9cWYrzP8Qj1w8CWiRW976MibQBuk_YQYvgCzr4,1222
150
+ keras_hub/src/models/dinov2/__init__.py,sha256=qacZi82EfAloVND4gDLZjqgR5_yVdz_dc4mMKyCsjOA,257
151
+ keras_hub/src/models/dinov2/dinov2_backbone.py,sha256=kwzd5eqftMS0m5v1HB_4y7JiHxp13ECgG9dNsDoknWo,9491
152
+ keras_hub/src/models/dinov2/dinov2_image_converter.py,sha256=gfFROdYV5rOzo3kJFlRvRHYjek8z9YirKfrFwlVJO3g,342
153
+ keras_hub/src/models/dinov2/dinov2_layers.py,sha256=-G3elRWDy09_VPJDJa0qYS5P8vkBGgxPooMZhy2ifu0,33140
154
+ keras_hub/src/models/dinov2/dinov2_presets.py,sha256=maGHlMM_9Fa9UXm7xRS0h9w7nQwMkS53TTmtY2juF0M,114
151
155
  keras_hub/src/models/distil_bert/__init__.py,sha256=3Z0w-Mt3aOR0u9RGzjHQ7B3J3qBF2pGjupDGQ9yyzoc,303
152
156
  keras_hub/src/models/distil_bert/distil_bert_backbone.py,sha256=rnAf_GokB3wAeJwVZtgUKQO_bKJIa8RavhL_ykTJpNw,6440
153
157
  keras_hub/src/models/distil_bert/distil_bert_masked_lm.py,sha256=axeZd5UcxFr3_Q8H4yG10CINh93wbcyjlPLauqe5N9E,4289
@@ -191,7 +195,7 @@ keras_hub/src/models/flux/flux_maths.py,sha256=2pnHW8HW7V2JZ8HIrUwE-UU4klpFQaOko
191
195
  keras_hub/src/models/flux/flux_model.py,sha256=K92PyeFHIp8SwXuxhv__XCEaQ2wqSW1jOb97I4S24Rw,8991
192
196
  keras_hub/src/models/flux/flux_presets.py,sha256=z7C_FbI1_F5YETXuWpc7Yh_0w-5N0eBQy6Oks_X9W88,54
193
197
  keras_hub/src/models/flux/flux_text_to_image.py,sha256=Rf5dD2EhG0bE8Gyg9sqaA8YEexS1kdraofIkxiZDjvc,4166
194
- keras_hub/src/models/flux/flux_text_to_image_preprocessor.py,sha256=Fs9jr97QtmRUbRRz1kITpkuhDM2GoV3n0XSFC-qQA14,2252
198
+ keras_hub/src/models/flux/flux_text_to_image_preprocessor.py,sha256=2kI2vSZvTia5ISb4BVPgC_e1l5rkirLSjhm13P-UR_k,2362
195
199
  keras_hub/src/models/gemma/__init__.py,sha256=rVzOJMJ39bgVlT8UdC0t8PlN2c237GKTBmfHIsbPuOQ,251
196
200
  keras_hub/src/models/gemma/gemma_attention.py,sha256=wmU5FgQu1Ajg-KHKVXTLHWH7pXqN4_zVJTCp_FXMcAs,10095
197
201
  keras_hub/src/models/gemma/gemma_backbone.py,sha256=GzAUSArw_pN9dtWQzTVhWDbW-XyWt4GyMcFLn9hwmh0,13391
@@ -406,14 +410,14 @@ keras_hub/src/models/siglip/siglip_text_encoder.py,sha256=xOVvzyQHLX9ne30y4ussar
406
410
  keras_hub/src/models/siglip/siglip_tokenizer.py,sha256=j_67JbIHJDRk-CbiemG2dgAO6lp3_0_JdnfroZ90G18,2579
407
411
  keras_hub/src/models/siglip/siglip_vision_encoder.py,sha256=CaNaFq5thBC3TUXXOf2qknk5vWsauM20ZoaDPYRnXcs,5927
408
412
  keras_hub/src/models/stable_diffusion_3/__init__.py,sha256=ZKYQuaRObyhKq8GVAHmoRvlXp6FpU8ChvutVCHyXKuc,343
409
- keras_hub/src/models/stable_diffusion_3/flow_match_euler_discrete_scheduler.py,sha256=vtVhieAv277mAiZj7Kvvqg_Ba7klfQxZVk4PPxNNQ0s,3062
410
- keras_hub/src/models/stable_diffusion_3/mmdit.py,sha256=jkO7uP3fNrbuFLiOJV-7_S8hz-DqkasZNkoJIdsg58Q,40859
411
- keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_backbone.py,sha256=M1e8G83CzwOgFvtDVYbm8HKSODa9keN2Jan18EWpWU0,24370
412
- keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_image_to_image.py,sha256=oQcVCWOwrdUTrr_JNekoMqdSlKYMGz5tG6v8uD25lTc,5479
413
- keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_inpaint.py,sha256=t4uw920Jn1k80air3WRGimKf71aMVu6q73oWFH348vk,6384
413
+ keras_hub/src/models/stable_diffusion_3/flow_match_euler_discrete_scheduler.py,sha256=1K_B3d3fNn50eY84OgxVHyIHHZhmlJY03b71pMSmE9s,3246
414
+ keras_hub/src/models/stable_diffusion_3/mmdit.py,sha256=emyDmtpJiFU_9crSDBC5CaXoZnM1Eti8uAQtwv2v8B0,42794
415
+ keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_backbone.py,sha256=odg1nA02dmINSjAfwKnzPt_HC6b7nQfP000swHxIfaI,24055
416
+ keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_image_to_image.py,sha256=uNsNSQ4EFceGfIMzgjYWFMuL0XdfM58rubTcrCVPrts,5532
417
+ keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_inpaint.py,sha256=2UIRz11DRbHJ7IVbkjpBjtbkZGC3-eYhMtVUWTmWMH8,6437
414
418
  keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_presets.py,sha256=x7Ez4L955MJE4ABtBy-63YpU9XpR0Ro8QWPzYYJs1yE,2167
415
- keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_text_to_image.py,sha256=Yt-UIatVKANjjKFCFEj1rIHhOrt8hqefKKQJIAWcTLc,4567
416
- keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_text_to_image_preprocessor.py,sha256=sQvrhuWvC4hM99cxFiI5532QLeiW4YXpH0zAvnnCXQA,2801
419
+ keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_text_to_image.py,sha256=-xmmCaoPc1ixJvyIBwVTW1yKBA-rP4nWReovcs7OLKE,4620
420
+ keras_hub/src/models/stable_diffusion_3/stable_diffusion_3_text_to_image_preprocessor.py,sha256=crUT82moaPx8RVKrLtUHx1zry602f8DWItek9aFkojg,2903
417
421
  keras_hub/src/models/stable_diffusion_3/t5_encoder.py,sha256=oV7P1uwCKdGiD93zXq7kmqX0elMZQU4UvBa8wg6P1hs,5113
418
422
  keras_hub/src/models/t5/__init__.py,sha256=OWyoUeDY3v4DnO8Ry02DWV1bNSVGcC89PF9oCftyi1s,233
419
423
  keras_hub/src/models/t5/t5_backbone.py,sha256=MUmabugPx5_BkAHkuJXr2-8z_yZfKD19SO0KJtlcHhA,10331
@@ -493,7 +497,7 @@ keras_hub/src/tokenizers/word_piece_tokenizer_trainer.py,sha256=cylrs02ZrYQ1TuZr
493
497
  keras_hub/src/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
494
498
  keras_hub/src/utils/keras_utils.py,sha256=IWsbg-p-XVLuOkba8PAYNf9zDo4G2RkINLr58p12MhA,5291
495
499
  keras_hub/src/utils/pipeline_model.py,sha256=jgzB6NQPSl0KOu08N-TazfOnXnUJbZjH2EXXhx25Ftg,9084
496
- keras_hub/src/utils/preset_utils.py,sha256=GKYFKK9YcdIrMm0_hC_KTIXgpiMYD6SauMnSRpNsDQo,34975
500
+ keras_hub/src/utils/preset_utils.py,sha256=dEOAGjkjnu69nhWuS1wnHVyMmkYnlzUQAUPzbLexLhY,35142
497
501
  keras_hub/src/utils/python_utils.py,sha256=N8nWeO3san4YnGkffRXG3Ix7VEIMTKSN21FX5TuL7G8,202
498
502
  keras_hub/src/utils/tensor_utils.py,sha256=WrohV6-hvxtLE6rRRhtN4hy8GkHikV-NrRnVEYUwJQo,16133
499
503
  keras_hub/src/utils/coco/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -513,6 +517,7 @@ keras_hub/src/utils/transformers/convert_albert.py,sha256=VdKclZpCxtDWq3UbUUQZf4
513
517
  keras_hub/src/utils/transformers/convert_bart.py,sha256=Tk4h9Md9rwN5wjQbGIVrC7qzDpF8kI8qm-FKL8HlUok,14411
514
518
  keras_hub/src/utils/transformers/convert_bert.py,sha256=4gQqXCJzC9QWdLPDUAq741K8t_kjPIET050YjUnLeDA,5977
515
519
  keras_hub/src/utils/transformers/convert_deit.py,sha256=ubcqYzMlhWTCE2S_TsXICCMmqjN9RsQPaw_70vArnjo,5306
520
+ keras_hub/src/utils/transformers/convert_dinov2.py,sha256=FvmB3ggEgowVFPSO5WOzC2hKkG2JvjSb-DeVffu78iU,6908
516
521
  keras_hub/src/utils/transformers/convert_distilbert.py,sha256=SlfIRhSRk5c1ir2HGiDPiXa5XdOId_DbcnZO9lbwyZ8,6498
517
522
  keras_hub/src/utils/transformers/convert_gemma.py,sha256=ElCgwBpSN5Q7rV5PJawTsoytPzs5ZjuwoY60YAe8y_A,6533
518
523
  keras_hub/src/utils/transformers/convert_gpt2.py,sha256=HCeHN_-GiQJRxLCM9OCJJ1watPVpIBF8ujS8pGbBOWc,5703
@@ -524,11 +529,13 @@ keras_hub/src/utils/transformers/convert_qwen.py,sha256=WUxMAEFVqRs7TRw7QU5TH3_e
524
529
  keras_hub/src/utils/transformers/convert_qwen3.py,sha256=LIormvCMWPq6X9Wo2eNbADjtFZ0nI7tFGZFBxmo4GKw,5700
525
530
  keras_hub/src/utils/transformers/convert_qwen_moe.py,sha256=a7R28aln-PdAcNuKAXdrtzvslho2Co6GypChxLMKPpc,10618
526
531
  keras_hub/src/utils/transformers/convert_vit.py,sha256=9SUZ9utNJhW_5cj3acMn9cRy47u2eIcDsrhmzj77o9k,5187
527
- keras_hub/src/utils/transformers/preset_loader.py,sha256=K5FzDAtCuXS9rmZc0Zj7UCwbz5J9_pf7ozWov1qRAfg,4495
532
+ keras_hub/src/utils/transformers/preset_loader.py,sha256=uEmDzEa6v-W59WssBXi8RnvW_dR9km9_3K6bJmBGpcY,4649
528
533
  keras_hub/src/utils/transformers/safetensor_utils.py,sha256=CYUHyA4y-B61r7NDnCsFb4t_UmSwZ1k9L-8gzEd6KRg,3339
534
+ keras_hub/src/utils/transformers/export/gemma.py,sha256=NpTSgRUSWp3WXQil1CjYUVFVyyVhpO-4-3q2en2Wxwg,3264
535
+ keras_hub/src/utils/transformers/export/hf_exporter.py,sha256=oTdRS8SalPCbi_cZPC55aZUBc-1_pdviUIp0XysA4cI,3234
529
536
  keras_hub/tokenizers/__init__.py,sha256=uMjjm0mzUkRb0e4Ac_JK8aJ9cKGUi5UqmzWoWAFJprE,4164
530
537
  keras_hub/utils/__init__.py,sha256=jXPqVGBpJr_PpYmqD8aDG-fRMlxH-ulqCR2SZMn288Y,646
531
- keras_hub_nightly-0.22.0.dev202507160421.dist-info/METADATA,sha256=tUmL-rPpkANmlk9XGLTK-y_LYefpvEtIJL-dU9R3O7s,7393
532
- keras_hub_nightly-0.22.0.dev202507160421.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
533
- keras_hub_nightly-0.22.0.dev202507160421.dist-info/top_level.txt,sha256=N4J6piIWBKa38A4uV-CnIopnOEf8mHAbkNXafXm_CuA,10
534
- keras_hub_nightly-0.22.0.dev202507160421.dist-info/RECORD,,
538
+ keras_hub_nightly-0.22.0.dev202507170424.dist-info/METADATA,sha256=S2Qlx04gz55z7gcbYbvnkHBfqT-oR3LCrZ164IdLORM,7393
539
+ keras_hub_nightly-0.22.0.dev202507170424.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
540
+ keras_hub_nightly-0.22.0.dev202507170424.dist-info/top_level.txt,sha256=N4J6piIWBKa38A4uV-CnIopnOEf8mHAbkNXafXm_CuA,10
541
+ keras_hub_nightly-0.22.0.dev202507170424.dist-info/RECORD,,
@@ -1,111 +0,0 @@
1
- from keras import dtype_policies
2
- from keras import layers
3
- from keras import ops
4
-
5
-
6
- def quick_gelu(x):
7
- return x * ops.sigmoid(1.702 * x)
8
-
9
-
10
- # TODO: Deprecate this in favor of `keras.layers.MultiHeadAttention` once the
11
- # dtype compatibility issue is resolved.
12
- class CLIPMultiHeadAttention(layers.MultiHeadAttention):
13
- def _masked_softmax(self, attention_scores, attention_mask=None):
14
- attention_scores = super()._masked_softmax(
15
- attention_scores, attention_mask
16
- )
17
- return ops.cast(attention_scores, self._value_dense.compute_dtype)
18
-
19
-
20
- class CLIPEncoderBlock(layers.Layer):
21
- def __init__(
22
- self,
23
- hidden_dim,
24
- num_heads,
25
- intermediate_dim,
26
- intermediate_activation="quick_gelu",
27
- use_causal_mask=True,
28
- **kwargs,
29
- ):
30
- super().__init__(**kwargs)
31
- if hidden_dim % num_heads != 0:
32
- raise ValueError(
33
- "`hidden_dim` must be divisible by `num_heads`. "
34
- f"Received: hidden_dim={hidden_dim}, num_heads={num_heads}"
35
- )
36
- self.hidden_dim = hidden_dim
37
- self.num_heads = num_heads
38
- self.intermediate_dim = intermediate_dim
39
- self.intermediate_activation = intermediate_activation
40
- self.use_causal_mask = use_causal_mask
41
-
42
- if intermediate_activation == "quick_gelu":
43
- intermediate_activation = quick_gelu
44
-
45
- self.layer_norm_1 = layers.LayerNormalization(
46
- epsilon=1e-5, dtype=self.dtype_policy, name="layer_norm_1"
47
- )
48
- self.attention = CLIPMultiHeadAttention(
49
- num_heads,
50
- hidden_dim // num_heads,
51
- dtype=self.dtype_policy,
52
- name="attention",
53
- )
54
- self.layer_norm_2 = layers.LayerNormalization(
55
- epsilon=1e-5, dtype=self.dtype_policy, name="layer_norm_2"
56
- )
57
- self.dense_1 = layers.Dense(
58
- self.intermediate_dim, dtype=self.dtype_policy, name="dense_1"
59
- )
60
- self.activation = layers.Activation(
61
- intermediate_activation, dtype=self.dtype_policy, name="activation"
62
- )
63
- self.dense_2 = layers.Dense(
64
- self.hidden_dim, dtype=self.dtype_policy, name="dense_2"
65
- )
66
-
67
- def build(self, input_shape):
68
- self.layer_norm_1.build(input_shape)
69
- self.attention.build(input_shape, input_shape, input_shape)
70
- # Before Keras 3.2, there was no setter for `dtype_policy`. Directly
71
- # assign a `DTypePolicy` instead.
72
- self.attention._softmax.dtype_policy = dtype_policies.DTypePolicy(
73
- "float32"
74
- )
75
- self.layer_norm_2.build(input_shape)
76
- self.dense_1.build(input_shape)
77
- input_shape = self.dense_1.compute_output_shape(input_shape)
78
- self.dense_2.build(input_shape)
79
-
80
- def compute_output_shape(self, inputs_shape):
81
- outputs_shape = list(inputs_shape)
82
- outputs_shape[-1] = self.hidden_dim
83
- return outputs_shape
84
-
85
- def call(self, x, training=None):
86
- residual = x
87
- x = self.layer_norm_1(x)
88
- x = self.attention(
89
- x, x, x, training=training, use_causal_mask=self.use_causal_mask
90
- )
91
- x = ops.add(residual, x)
92
-
93
- residual = x
94
- x = self.dense_1(self.layer_norm_2(residual))
95
- x = self.activation(x)
96
- x = self.dense_2(x)
97
- x = ops.add(residual, x)
98
- return x
99
-
100
- def get_config(self):
101
- config = super().get_config()
102
- config.update(
103
- {
104
- "hidden_dim": self.hidden_dim,
105
- "num_heads": self.num_heads,
106
- "intermediate_dim": self.intermediate_dim,
107
- "intermediate_activation": self.intermediate_activation,
108
- "use_causal_mask": self.use_causal_mask,
109
- }
110
- )
111
- return config
@@ -1,101 +0,0 @@
1
- from keras import layers
2
- from keras import ops
3
-
4
- from keras_hub.src.utils.keras_utils import standardize_data_format
5
-
6
-
7
- class CLIPVisionEmbedding(layers.Layer):
8
- def __init__(
9
- self,
10
- hidden_dim,
11
- patch_size,
12
- image_size,
13
- data_format=None,
14
- dtype=None,
15
- **kwargs,
16
- ):
17
- super().__init__(dtype=dtype, **kwargs)
18
- self.hidden_dim = int(hidden_dim)
19
- self.patch_size = int(patch_size)
20
- self.image_size = int(image_size)
21
- data_format = standardize_data_format(data_format)
22
- self.data_format = data_format
23
- num_patches = (image_size // patch_size) ** 2
24
- self.num_positions = num_patches + 1
25
-
26
- self.patch_embedding = layers.Conv2D(
27
- hidden_dim,
28
- kernel_size=patch_size,
29
- strides=patch_size,
30
- data_format=data_format,
31
- use_bias=False,
32
- dtype=dtype,
33
- name="patch_embedding",
34
- )
35
- self.position_embedding = layers.Embedding(
36
- num_patches + 1, hidden_dim, dtype=dtype, name="position_embedding"
37
- )
38
-
39
- def build(self, input_shape):
40
- self.class_embedding = self.add_weight(
41
- shape=(self.hidden_dim,),
42
- initializer="random_normal",
43
- dtype=self.variable_dtype,
44
- name="class_embedding",
45
- )
46
- self.position_ids = self.add_weight(
47
- shape=(1, self.num_positions),
48
- initializer="zeros",
49
- # Let the backend determine the int dtype. For example, tf
50
- # requires int64 for correct device placement, whereas jax and torch
51
- # don't.
52
- dtype=int,
53
- trainable=False,
54
- name="position_ids",
55
- )
56
- self.patch_embedding.build(input_shape)
57
- self.position_embedding.build(self.position_ids.shape)
58
-
59
- def call(self, inputs, training=None):
60
- x = inputs
61
- batch_size = ops.shape(x)[0]
62
- patch_embeddings = self.patch_embedding(x, training=training)
63
- if self.data_format == "channels_last":
64
- patch_embeddings = ops.reshape(
65
- patch_embeddings, (batch_size, -1, self.hidden_dim)
66
- )
67
- else:
68
- patch_embeddings = ops.reshape(
69
- patch_embeddings, (batch_size, self.hidden_dim, -1)
70
- )
71
- patch_embeddings = ops.transpose(patch_embeddings, (0, 2, 1))
72
- class_embeddings = ops.expand_dims(self.class_embedding, axis=(0, 1))
73
- class_embeddings = ops.tile(class_embeddings, (batch_size, 1, 1))
74
- position_embeddings = self.position_embedding(self.position_ids)
75
- embeddings = ops.concatenate(
76
- [class_embeddings, patch_embeddings], axis=1
77
- )
78
- return ops.add(embeddings, position_embeddings)
79
-
80
- def get_config(self):
81
- config = super().get_config()
82
- config.update(
83
- {
84
- "hidden_dim": self.hidden_dim,
85
- "patch_size": self.patch_size,
86
- "image_size": self.image_size,
87
- }
88
- )
89
- return config
90
-
91
- def compute_output_shape(self, input_shape):
92
- output_shape = [input_shape[0], None, self.hidden_dim]
93
- if self.data_format == "channels_last":
94
- if input_shape[1] is not None and input_shape[2] is not None:
95
- patch_num = input_shape[1] // self.patch_size
96
- output_shape[1] = patch_num**2 + 1
97
- else:
98
- if input_shape[2] is not None and input_shape[3] is not None:
99
- patch_num = input_shape[2] // self.patch_size
100
- output_shape[1] = patch_num**2 + 1
101
- return output_shape