keras-hub-nightly 0.16.1.dev202410010346__py3-none-any.whl → 0.16.1.dev202410030339__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. keras_hub/api/layers/__init__.py +0 -3
  2. keras_hub/api/models/__init__.py +1 -1
  3. keras_hub/src/layers/preprocessing/audio_converter.py +3 -7
  4. keras_hub/src/layers/preprocessing/image_converter.py +164 -34
  5. keras_hub/src/models/backbone.py +3 -9
  6. keras_hub/src/models/csp_darknet/csp_darknet_image_classifier.py +0 -109
  7. keras_hub/src/models/densenet/densenet_image_classifier.py +0 -128
  8. keras_hub/src/models/densenet/densenet_image_converter.py +2 -4
  9. keras_hub/src/models/feature_pyramid_backbone.py +1 -1
  10. keras_hub/src/models/image_classifier.py +147 -2
  11. keras_hub/src/models/image_classifier_preprocessor.py +3 -3
  12. keras_hub/src/models/image_segmenter.py +0 -5
  13. keras_hub/src/models/mix_transformer/mix_transformer_classifier.py +0 -109
  14. keras_hub/src/models/mobilenet/mobilenet_image_classifier.py +0 -92
  15. keras_hub/src/models/pali_gemma/pali_gemma_image_converter.py +2 -4
  16. keras_hub/src/models/preprocessor.py +3 -5
  17. keras_hub/src/models/resnet/resnet_backbone.py +1 -11
  18. keras_hub/src/models/resnet/resnet_image_classifier.py +0 -137
  19. keras_hub/src/models/resnet/resnet_image_converter.py +2 -4
  20. keras_hub/src/models/sam/__init__.py +5 -0
  21. keras_hub/src/models/sam/sam_image_converter.py +2 -4
  22. keras_hub/src/models/sam/sam_image_segmenter_preprocessor.py +11 -1
  23. keras_hub/src/models/sam/sam_presets.py +3 -3
  24. keras_hub/src/models/task.py +23 -25
  25. keras_hub/src/models/vgg/vgg_backbone.py +1 -20
  26. keras_hub/src/models/vgg/vgg_image_classifier.py +108 -29
  27. keras_hub/src/tokenizers/tokenizer.py +3 -6
  28. keras_hub/src/utils/preset_utils.py +103 -61
  29. keras_hub/src/utils/timm/preset_loader.py +8 -9
  30. keras_hub/src/version_utils.py +1 -1
  31. {keras_hub_nightly-0.16.1.dev202410010346.dist-info → keras_hub_nightly-0.16.1.dev202410030339.dist-info}/METADATA +1 -1
  32. {keras_hub_nightly-0.16.1.dev202410010346.dist-info → keras_hub_nightly-0.16.1.dev202410030339.dist-info}/RECORD +34 -35
  33. keras_hub/src/layers/preprocessing/resizing_image_converter.py +0 -138
  34. {keras_hub_nightly-0.16.1.dev202410010346.dist-info → keras_hub_nightly-0.16.1.dev202410030339.dist-info}/WHEEL +0 -0
  35. {keras_hub_nightly-0.16.1.dev202410010346.dist-info → keras_hub_nightly-0.16.1.dev202410030339.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: keras-hub-nightly
3
- Version: 0.16.1.dev202410010346
3
+ Version: 0.16.1.dev202410030339
4
4
  Summary: Industry-strength Natural Language Processing extensions for Keras.
5
5
  Home-page: https://github.com/keras-team/keras-hub
6
6
  Author: Keras team
@@ -1,15 +1,15 @@
1
1
  keras_hub/__init__.py,sha256=QGdXyHgYt6cMUAP1ebxwc6oR86dE0dkMxNy2eOCQtFo,855
2
2
  keras_hub/api/__init__.py,sha256=spMxsgqzjpeuC8rY4WP-2kAZ2qwwKRSbFwddXgUjqQE,524
3
3
  keras_hub/api/bounding_box/__init__.py,sha256=T8R_X7BPm0et1xaZq8565uJmid7dylsSFSj4V-rGuFQ,1097
4
- keras_hub/api/layers/__init__.py,sha256=jX6K16_pDvfm8ScLsRnO5OoF91WpHda0SiOLGkoIGp4,2331
4
+ keras_hub/api/layers/__init__.py,sha256=P1Zn4sjTx1OnmlRyX8-QRxSe-2gkvyQ-90BzCjqr3oU,2227
5
5
  keras_hub/api/metrics/__init__.py,sha256=So8Ec-lOcTzn_UUMmAdzDm8RKkPu2dbRUm2px8gpUEI,381
6
- keras_hub/api/models/__init__.py,sha256=mPLLdpHJ6AIOei9dFKImcIrHjXOm3-pHYiUZVA_ry0g,14027
6
+ keras_hub/api/models/__init__.py,sha256=dyancDilnzbHByiTYQNhqfm6JFeZH_DKHl4PZuvWoA0,13994
7
7
  keras_hub/api/samplers/__init__.py,sha256=n-_SEXxr2LNUzK2FqVFN7alsrkx1P_HOVTeLZKeGCdE,730
8
8
  keras_hub/api/tokenizers/__init__.py,sha256=_f-r_cyUM2fjBB7iO84ThOdqqsAxHNIewJ2EBDlM0cA,2524
9
9
  keras_hub/api/utils/__init__.py,sha256=Gp1E6gG-RtKQS3PBEQEOz9PQvXkXaJ0ySGMqZ7myN7A,215
10
10
  keras_hub/src/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
11
  keras_hub/src/api_export.py,sha256=9pQZK27JObxWZ96QPLBp1OBsjWigh1iuV6RglPGMRk0,1499
12
- keras_hub/src/version_utils.py,sha256=NH7d2eE6DNxa_SHZWLz69EGvzOJOo6uXftLERho96ps,222
12
+ keras_hub/src/version_utils.py,sha256=Q7sWkBqN11QJLqnWmwU9B2XhXWRKLr1vv199Ud-cp4A,222
13
13
  keras_hub/src/bounding_box/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
14
  keras_hub/src/bounding_box/converters.py,sha256=a5po8DBm87oz2EXfi-0uEZHCMlCJPIb4-MaZIdYx3Dg,17865
15
15
  keras_hub/src/bounding_box/formats.py,sha256=YmskOz2BOSat7NaE__J9VfpSNGPJJR0znSzA4lp8MMI,3868
@@ -33,14 +33,13 @@ keras_hub/src/layers/modeling/transformer_decoder.py,sha256=_JbCSdLSn1Am5Gqf32c5
33
33
  keras_hub/src/layers/modeling/transformer_encoder.py,sha256=howjIXH_vgBOKaXaIa7mTg8xuIeXrmMZS29Zg1vSXOQ,9900
34
34
  keras_hub/src/layers/modeling/transformer_layer_utils.py,sha256=FuznrW33iG50B-VDN8R1RjuA5JG72yNMJ1TBgWLxR0E,3487
35
35
  keras_hub/src/layers/preprocessing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
36
- keras_hub/src/layers/preprocessing/audio_converter.py,sha256=UNai8KPmRrJCeFb7Df7h-uW-5M9A-JoIn5RrBeBe93w,4286
37
- keras_hub/src/layers/preprocessing/image_converter.py,sha256=isBi_EkzLTCElTczsetiqomX60eLBI5ykMc4bC-Hnd4,4763
36
+ keras_hub/src/layers/preprocessing/audio_converter.py,sha256=YGh_kQw65a1Z6S5zzSNVP-ChyLYHq3-eOYpOS53xIN8,4156
37
+ keras_hub/src/layers/preprocessing/image_converter.py,sha256=zlg6VKQWjKDCojJnI9VfK4Rt88QE29XjpDewZQNT8IE,10166
38
38
  keras_hub/src/layers/preprocessing/masked_lm_mask_generator.py,sha256=itxWq3FHYlR0I7jKarQlSKbSmRLl9ut_UTSP3ZDwP0A,8162
39
39
  keras_hub/src/layers/preprocessing/multi_segment_packer.py,sha256=ZNqnUFnc9Af122Q7T6YyUoXgIdU9AgIJfsvR1UrCjFU,12068
40
40
  keras_hub/src/layers/preprocessing/preprocessing_layer.py,sha256=WyX41b9Ev_YJ5uVQVOAqD0PQasMOPDoyDjl_PkzkAkE,687
41
41
  keras_hub/src/layers/preprocessing/random_deletion.py,sha256=x23nRo0ir2J4Ps42i9Xo9dVEkD22P9tZNhI2hXvREbM,9763
42
42
  keras_hub/src/layers/preprocessing/random_swap.py,sha256=w2z7yNQsII5g4sEFi4GXfgxIc1S6UUt3a8YWZew_f4U,9504
43
- keras_hub/src/layers/preprocessing/resizing_image_converter.py,sha256=2v_wAcryc2yWUzuseTdqmZxamEtHLsdnHRV-_radGRU,5855
44
43
  keras_hub/src/layers/preprocessing/start_end_packer.py,sha256=lY2K937z6JucxNe7VknynhhjrcUfFigU6mqIdv2gS-Y,7973
45
44
  keras_hub/src/metrics/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
46
45
  keras_hub/src/metrics/bleu.py,sha256=9ch4_HnrQpmpkeip022hQDetMjJY853zB-0-kXM5Ev4,13673
@@ -50,20 +49,20 @@ keras_hub/src/metrics/rouge_base.py,sha256=Pt2DUznhTTeR-fX1nQ_wSbPtmuTgxQTvrGpu8
50
49
  keras_hub/src/metrics/rouge_l.py,sha256=JlZhMBV6wS_6zMd57pkTc6yxHkEJT9fVQMlPZKekQzQ,2729
51
50
  keras_hub/src/metrics/rouge_n.py,sha256=JoFtmgjF4Ic263ny6bfD6vMHKreH9le3HnOOxemupRc,3620
52
51
  keras_hub/src/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
53
- keras_hub/src/models/backbone.py,sha256=vGyEgW_qu1GilcqhZLzHNd-pJcwuF3J0J-tlDD-TKns,11489
52
+ keras_hub/src/models/backbone.py,sha256=2OZx6WAx2q9JK2yue5BoUUipIBjpOJRVNnMjXLVDLRk,11185
54
53
  keras_hub/src/models/causal_lm.py,sha256=p3C5R6hbe1BARHNXJZqtgwlp3bDqkv3gguO19PeJC2c,14791
55
54
  keras_hub/src/models/causal_lm_preprocessor.py,sha256=YY7VJZicdmnjDSWi9g4_pEpd5bdJK166GlWcapvokF0,6663
56
- keras_hub/src/models/feature_pyramid_backbone.py,sha256=2Mh0G5y3CYjSyWqyw5X-NvtJf2cDegd7j4VVMSO7Nws,2247
57
- keras_hub/src/models/image_classifier.py,sha256=PFbuXXYDzryfvftsCJs2eLJ8IoSAwIbsMSiAW-zn-8M,3169
58
- keras_hub/src/models/image_classifier_preprocessor.py,sha256=BRAdFfK4oQ0TsvGDM-Dpjj38eV0xmpKdQwdnGZOvt_c,2614
59
- keras_hub/src/models/image_segmenter.py,sha256=OngkYiqvgs49Q-bNQ86TE1w_HYTorcgSg_mkmwbhO00,3014
55
+ keras_hub/src/models/feature_pyramid_backbone.py,sha256=clEW-TTQSVJ_5qFNdDF0iABkin1p_xlBUFjJrC7T0IA,2247
56
+ keras_hub/src/models/image_classifier.py,sha256=yt6cjhPfqs8A_eWXBsXdXFzn-aRgH2rVHUq7Zu7CyK8,7804
57
+ keras_hub/src/models/image_classifier_preprocessor.py,sha256=YdewYfMPVHI7gdhbBI-zVcy4NSfg0bhiOHTmGEKoOYI,2668
58
+ keras_hub/src/models/image_segmenter.py,sha256=C1bzIO59pG58iist5GLn_qnlotDpcAVxPV_8a68BkAc,2876
60
59
  keras_hub/src/models/image_segmenter_preprocessor.py,sha256=vJoZc1OebQWlqUP_ygCS7P1Pyq1KmmUc-0V_-maDzX4,2658
61
60
  keras_hub/src/models/masked_lm.py,sha256=uXO_dE_hILlOC9jNr6oK6IHi9IGUqLyNGvr6nMt8Rk0,3576
62
61
  keras_hub/src/models/masked_lm_preprocessor.py,sha256=g8vrnyYwqdnSw5xppROM1Gzo_jmMWKYZoQCsKdfrFKk,5656
63
- keras_hub/src/models/preprocessor.py,sha256=_hNy2qWRK_vcycS1eDB_Dz9r33T3eZ7dUwB85f7FzOs,8133
62
+ keras_hub/src/models/preprocessor.py,sha256=pJodz7KRVncvsC3o4qoKDYWP2J0a8E9CD6oVGYgJzIM,7970
64
63
  keras_hub/src/models/seq_2_seq_lm.py,sha256=w0gX-5YZjatfvAJmFAgSHyqS_BLqc8FF8DPLGK8mrgI,1864
65
64
  keras_hub/src/models/seq_2_seq_lm_preprocessor.py,sha256=HUHRbWRG5SF1pPpotGzBhXlrMh4pLFxgAoFk05FIrB4,9687
66
- keras_hub/src/models/task.py,sha256=I-kkHbvzCu2A4aMdQDd8hFMKAXHWom8MxBlKNQm_5cs,13846
65
+ keras_hub/src/models/task.py,sha256=MfrzIoj3XFaRiNlUg-K6D8l-ylWfpzBjjmSy-guXtG8,13935
67
66
  keras_hub/src/models/text_classifier.py,sha256=VBDvQUHTpJPqKp7A4VAtm35FOmJ3yMo0DW6GdX67xG0,4159
68
67
  keras_hub/src/models/text_classifier_preprocessor.py,sha256=EoWp-GHnaLnAKTdAzDmC-soAV92ATF3QozdubdV2WXI,4722
69
68
  keras_hub/src/models/text_to_image.py,sha256=N42l1W8YEUBHOdGiT4BQNqzTpgjB2O5dtLU5FbKpMy0,10792
@@ -104,7 +103,7 @@ keras_hub/src/models/clip/clip_text_encoder.py,sha256=0bBiBnDLkm2Dsyogcpb6nudL16
104
103
  keras_hub/src/models/clip/clip_tokenizer.py,sha256=X68w_-Bq-UHhQ_O-n_T3QIA6WwUqbnxk22J_rqRX97w,7061
105
104
  keras_hub/src/models/csp_darknet/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
106
105
  keras_hub/src/models/csp_darknet/csp_darknet_backbone.py,sha256=7Lmk98S7PLI3ONeVNRPAPshbs6zWrzfaGgvoAS9CRkQ,13727
107
- keras_hub/src/models/csp_darknet/csp_darknet_image_classifier.py,sha256=AqzL7TIjCz3sB3bNRcNr0A4KA6uYoPWofneTUgi4Y7k,3770
106
+ keras_hub/src/models/csp_darknet/csp_darknet_image_classifier.py,sha256=2nMkmym36EF4v3BO-wwPIjO3OsRvGloDKW0RbHGB7ag,368
108
107
  keras_hub/src/models/deberta_v3/__init__.py,sha256=6E-QtAD1uvTBobrn5bUoyB1qtaCJU-t73TtbAEH6i9g,288
109
108
  keras_hub/src/models/deberta_v3/deberta_v3_backbone.py,sha256=jAxG0XQ4CrHwzqruvYh2ZixC5ML09M4uhy0pWipgt0Y,7244
110
109
  keras_hub/src/models/deberta_v3/deberta_v3_masked_lm.py,sha256=ADBktf1DdiP9T6LCaMhdFiZ_mUbBRKMekY5mGwAeJIo,4186
@@ -118,9 +117,9 @@ keras_hub/src/models/deberta_v3/disentangled_self_attention.py,sha256=3l7Hy7JfiZ
118
117
  keras_hub/src/models/deberta_v3/relative_embedding.py,sha256=3WIQ1nWcEhfWF0U9DcKyYz3AAhO3Pmg7ykpzrYe0Jgw,2886
119
118
  keras_hub/src/models/densenet/__init__.py,sha256=r7StyamnWeeZxOk9r4ZYNbS_YVhu9YGPyXhNxljvdPg,269
120
119
  keras_hub/src/models/densenet/densenet_backbone.py,sha256=dN9lUwKzO3E2HthNV2x54ozeBEQ0ilNs5uYHshFQpT0,6723
121
- keras_hub/src/models/densenet/densenet_image_classifier.py,sha256=oWjN_Rn5ffOF8WW8U_KJJFn-EcJbqyyuAbgmr0lp2gk,4742
120
+ keras_hub/src/models/densenet/densenet_image_classifier.py,sha256=ptuV6PwgoUpmrSPqX7-a85IpWsElwcCv_G5IVkP9E_Q,530
122
121
  keras_hub/src/models/densenet/densenet_image_classifier_preprocessor.py,sha256=xDZbTw_h6pjLDzf8QmbDyMnMsFzgh-dPX1ldg9kddhg,563
123
- keras_hub/src/models/densenet/densenet_image_converter.py,sha256=dJEMrevAL7F3OF6W-Xh7h0AZLtgUoa1BFTP963Bj3Ao,388
122
+ keras_hub/src/models/densenet/densenet_image_converter.py,sha256=DoxYlJVZ9uaabFhVjWOmzvhONoc8KNcQj2vQ6Z1AUpU,354
124
123
  keras_hub/src/models/densenet/densenet_presets.py,sha256=GawLJOd_Kn_Kj_1ue7DYFLx7UPYvPGGOYKrNIqhQe2I,1534
125
124
  keras_hub/src/models/distil_bert/__init__.py,sha256=3Z0w-Mt3aOR0u9RGzjHQ7B3J3qBF2pGjupDGQ9yyzoc,303
126
125
  keras_hub/src/models/distil_bert/distil_bert_backbone.py,sha256=rnAf_GokB3wAeJwVZtgUKQO_bKJIa8RavhL_ykTJpNw,6440
@@ -203,11 +202,11 @@ keras_hub/src/models/mistral/mistral_tokenizer.py,sha256=wyzR_Y2XwrDiBV3jIeBChSP
203
202
  keras_hub/src/models/mistral/mistral_transformer_decoder.py,sha256=RDIIB3FhneHZP11tNUFQT9DcWawCMnrtVxtSvtnP3ts,9542
204
203
  keras_hub/src/models/mix_transformer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
205
204
  keras_hub/src/models/mix_transformer/mix_transformer_backbone.py,sha256=B4hdhWHZ93lS937BGSSxovDKVXQZVuWrMbFwECFoWrg,6048
206
- keras_hub/src/models/mix_transformer/mix_transformer_classifier.py,sha256=XF0RvdocC4CPOrIEStw1V9KQ8IKs09Ti3Dd4dbZteac,3692
205
+ keras_hub/src/models/mix_transformer/mix_transformer_classifier.py,sha256=uXO2-GzI_25TdlXe8O8qvnM7tryadfetVDW3yJLGfiI,348
207
206
  keras_hub/src/models/mix_transformer/mix_transformer_layers.py,sha256=SzyJJhuyESlsCgndmZNYuuF0Ogb1FKoYkSfDJnThgT0,9538
208
207
  keras_hub/src/models/mobilenet/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
209
208
  keras_hub/src/models/mobilenet/mobilenet_backbone.py,sha256=nlXdMqcj18iahy60aew4ON79EHUEuNIgvKY9dToH284,18191
210
- keras_hub/src/models/mobilenet/mobilenet_image_classifier.py,sha256=v7uuHiJXNgPIH5yaZHY5kVj4ddnrQgCPU0Gfh1DHTks,3174
209
+ keras_hub/src/models/mobilenet/mobilenet_image_classifier.py,sha256=l5jo99I0fLlbwLub5jHw07CjC-NnmuV-ySJwXGI20Ek,351
211
210
  keras_hub/src/models/opt/__init__.py,sha256=6Ybj8etxNaPsVcuZvaeHnKB3As92Px--dbiFAqOCIT0,239
212
211
  keras_hub/src/models/opt/opt_backbone.py,sha256=mK5z_E5mSiIX5s0w4hr4IVQpT7K46W2ajZBmuMjxwaY,5873
213
212
  keras_hub/src/models/opt/opt_causal_lm.py,sha256=DzQuOy3xIXgzPEbcoY_s_CLYpanpghGnS1OFWCx_zxc,10851
@@ -219,7 +218,7 @@ keras_hub/src/models/pali_gemma/pali_gemma_backbone.py,sha256=srZyBsA5tulO_Fb03g
219
218
  keras_hub/src/models/pali_gemma/pali_gemma_causal_lm.py,sha256=qlcBnFtPgKIRtdHgA4rrhiktBJq4h_uV-HriuuRBVwc,11196
220
219
  keras_hub/src/models/pali_gemma/pali_gemma_causal_lm_preprocessor.py,sha256=F57y0fZ0wYYxfGIjfrJc1W9uQpViYFx5bvFjj5CqUbI,4814
221
220
  keras_hub/src/models/pali_gemma/pali_gemma_decoder_block.py,sha256=Q_sPAULiSo_ZJeXklZjCLhvOMXk8MrPZhEXtL5yNOiI,5175
222
- keras_hub/src/models/pali_gemma/pali_gemma_image_converter.py,sha256=zQxvp1jUK6ASTeDxi5GLOfn26DBOO3El36z73MZ-xz8,405
221
+ keras_hub/src/models/pali_gemma/pali_gemma_image_converter.py,sha256=5yM_jUtrFsWIieiwfFBoP7mtPmQAwywkeLKbd7fhmzk,371
223
222
  keras_hub/src/models/pali_gemma/pali_gemma_presets.py,sha256=4D6qfWuxJtY-tyo31gxAaUlhV6wF7BhL1_FgiPmTQT0,2401
224
223
  keras_hub/src/models/pali_gemma/pali_gemma_tokenizer.py,sha256=ljTiADHo0Ok88q-jVzwJIle2C8xcxnudLTsBLzIySaM,2415
225
224
  keras_hub/src/models/pali_gemma/pali_gemma_vit.py,sha256=wP1UtW0WnlRmga-JQRxWTfAZNt_q-vaF1Qy4siJDpyY,18685
@@ -234,10 +233,10 @@ keras_hub/src/models/phi3/phi3_presets.py,sha256=DNyPTDA7PzFC8Ys2QmR2-mxUDa8Y8Id
234
233
  keras_hub/src/models/phi3/phi3_rotary_embedding.py,sha256=WTPCN8IKq3R7kMzsES1b8JEKV-8iNi_49WkhNTXoNUk,5012
235
234
  keras_hub/src/models/phi3/phi3_tokenizer.py,sha256=bOPH14wTVVHJHq8mgzXLjsgvKMNhfO8eayevAPpjYVA,1992
236
235
  keras_hub/src/models/resnet/__init__.py,sha256=C5UqlQ6apm8WSp1bnrxB6Bi3BGaknxRQs-r3b2wpaGA,257
237
- keras_hub/src/models/resnet/resnet_backbone.py,sha256=1BD2MqaDi_NZ6PgHrWxzqffbTKY0FZYMRA1Bk0-mTeQ,31866
238
- keras_hub/src/models/resnet/resnet_image_classifier.py,sha256=7RB-ELo0MW725NLwD38IfMIpcbaS9RLTh5F821gDcGU,4777
236
+ keras_hub/src/models/resnet/resnet_backbone.py,sha256=mqVdGUj8YtjZ3zIhAQXgNqu8SqiQiFlYChn0rRKF_IE,31287
237
+ keras_hub/src/models/resnet/resnet_image_classifier.py,sha256=nf35EKDzvBkfhHsK-s6Ks0nbhvKO7HEOYZm94YckyWE,510
239
238
  keras_hub/src/models/resnet/resnet_image_classifier_preprocessor.py,sha256=fM7gyQ0qB-RRuI4USJkRD6q9-HVfuC71e-BLTo-UhHQ,543
240
- keras_hub/src/models/resnet/resnet_image_converter.py,sha256=zO1cO76eYR70qQyGm5F9msiF7D08BprItvpvm8VOSuY,376
239
+ keras_hub/src/models/resnet/resnet_image_converter.py,sha256=fgTxihJznGFss-y3Z-jp0JE3X1gaaB2y-f2KMwrT8Pk,342
241
240
  keras_hub/src/models/resnet/resnet_presets.py,sha256=eYB6vrtoSd9xC2KzUToa3R9e5G6T-AyuFKZDOKOBbMI,2965
242
241
  keras_hub/src/models/retinanet/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
243
242
  keras_hub/src/models/retinanet/anchor_generator.py,sha256=43NoI7djbRudH98hUm-9fw5OEGQNRXOUYzypIZhLYhE,6750
@@ -253,14 +252,14 @@ keras_hub/src/models/roberta/roberta_presets.py,sha256=_0kYwJySwPCX5MVRPapT_PE2R
253
252
  keras_hub/src/models/roberta/roberta_text_classifier.py,sha256=EcxudQle2gW9RB6wmpoIJ7YM4UOzin74fluelSrJ6YY,6681
254
253
  keras_hub/src/models/roberta/roberta_text_classifier_preprocessor.py,sha256=gAJa8JdPUmT1N7nxBqtaIbnfXV-xlNjTtkEevQhfjNU,5993
255
254
  keras_hub/src/models/roberta/roberta_tokenizer.py,sha256=VKPrgXVT9aMKP7et2DIWKlTN8g4tIzjya0MHqNz9BwQ,2712
256
- keras_hub/src/models/sam/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
255
+ keras_hub/src/models/sam/__init__.py,sha256=fp71Q288xeE81tIOZkkudec4Acs8v4aO5WdyzCD9x-c,239
257
256
  keras_hub/src/models/sam/sam_backbone.py,sha256=fbvtGG6du7tnkcGtEsRyT9TRwPBUJ99GBolGkWR5pkc,4351
258
- keras_hub/src/models/sam/sam_image_converter.py,sha256=Gr6OB-R3jXKjnWk_ndKEQLveIUj8NGKQ3vMG3tpBy9Y,358
257
+ keras_hub/src/models/sam/sam_image_converter.py,sha256=5POp3aYFu6CK3R0NNfeUBbjhguBkincSMNvlcIJXarE,324
259
258
  keras_hub/src/models/sam/sam_image_segmenter.py,sha256=gJ-O7XaSvn9KTI-QPguhAiGfvxLUBar-KVQ-EEH5kko,7680
260
- keras_hub/src/models/sam/sam_image_segmenter_preprocessor.py,sha256=lZynaWAAdHer3NZZ6YLymZ9h5x1D6dIMQDwmPp-JSc4,499
259
+ keras_hub/src/models/sam/sam_image_segmenter_preprocessor.py,sha256=7slvyhGoMHmSigagqIcjDJ3gX8fUJbuMBwmozC4FlCg,849
261
260
  keras_hub/src/models/sam/sam_layers.py,sha256=SE5e6tYc-lsIVfMp9khewvuA1jY-dEHQmLT00YUok4M,13862
262
261
  keras_hub/src/models/sam/sam_mask_decoder.py,sha256=9RfjoNL7GSY6I9LZ3ulUa5cIoYSPJNP4KnHvq16lnM4,9549
263
- keras_hub/src/models/sam/sam_presets.py,sha256=PYQrmhsNApgLO6CAOYPnd24IbFqztyJwrhkLT5bx1wk,1220
262
+ keras_hub/src/models/sam/sam_presets.py,sha256=AfGUKNOkz0G11OMYqVebXKgEBar1qpIkA_f0u7akBU8,1220
264
263
  keras_hub/src/models/sam/sam_prompt_encoder.py,sha256=2foB7900QbzQfZjBo335XYsdjmhOnVT8fKD1CubJNVE,11801
265
264
  keras_hub/src/models/sam/sam_transformer.py,sha256=L2bdxdc2RUF1juRZ0F0Z6r0gTva1sUwEdjItJmKKf6w,5730
266
265
  keras_hub/src/models/stable_diffusion_3/__init__.py,sha256=ZKYQuaRObyhKq8GVAHmoRvlXp6FpU8ChvutVCHyXKuc,343
@@ -281,8 +280,8 @@ keras_hub/src/models/t5/t5_presets.py,sha256=95zU4cTNEZMH2yiCLptA9zhu2D4mE1Cay18
281
280
  keras_hub/src/models/t5/t5_tokenizer.py,sha256=pLTu15JeYSpVmy-2600vBc-Mxn_uHyTKts4PI2MxxBM,2517
282
281
  keras_hub/src/models/t5/t5_transformer_layer.py,sha256=uDeP84F1x7xJxki5iKe12Zn6eWD_4yVjoFXMuod-a3A,5347
283
282
  keras_hub/src/models/vgg/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
284
- keras_hub/src/models/vgg/vgg_backbone.py,sha256=rI8pAscWJlda9P9L6gcfROpAo461l3v3foXhJ6uyhBA,4495
285
- keras_hub/src/models/vgg/vgg_image_classifier.py,sha256=eRJiisvXhO6SK3FPO6aOdI8ipxabktyT9Dt03l-_uxw,3486
283
+ keras_hub/src/models/vgg/vgg_backbone.py,sha256=QnEDKn5n9bA9p3nvt5fBHnAssvnLxR0qv-oB372Ts0U,3702
284
+ keras_hub/src/models/vgg/vgg_image_classifier.py,sha256=Dtq_HIJP6fHe8m7ZVLVn8IbHEsVMFWLvWMmn8TU1ntw,6600
286
285
  keras_hub/src/models/vit_det/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
287
286
  keras_hub/src/models/vit_det/vit_det_backbone.py,sha256=GzwHXAfttExqDaGU4R2LAvng1gzjuvO3HMqUPwNUy9g,7656
288
287
  keras_hub/src/models/vit_det/vit_layers.py,sha256=oCKeUw5ckyUAGvmFPuxIiIAqgmC3uqh85LfZcgyh964,19852
@@ -323,14 +322,14 @@ keras_hub/src/tokenizers/byte_pair_tokenizer.py,sha256=Wocarha6ZuzrfiWHPiQUPLLRL
323
322
  keras_hub/src/tokenizers/byte_tokenizer.py,sha256=vjgrTT8FdtZVAlr0mU13alzADcUhtMrzgOs4lYeHvAQ,10648
324
323
  keras_hub/src/tokenizers/sentence_piece_tokenizer.py,sha256=_PaVn4re3AwBkHylJWsvdvOCCYjOnFXLZmj-V34KehU,9562
325
324
  keras_hub/src/tokenizers/sentence_piece_tokenizer_trainer.py,sha256=8X_IN-hPDiUETGrSX3wPzFnip73xTYcN6FhLNIwfy-Y,4834
326
- keras_hub/src/tokenizers/tokenizer.py,sha256=22ugg9y_YT9-dUFoxuZHoQ8jMd8C5anjGDkcN_0HDr0,9921
325
+ keras_hub/src/tokenizers/tokenizer.py,sha256=xiT8efGyNmTgsbi6JoJzKUoGg3rWbHjykhfW5mnDbbw,9722
327
326
  keras_hub/src/tokenizers/unicode_codepoint_tokenizer.py,sha256=efyjXjUyUCsnh97_kPFq1z2QZENiZSdV0voZytLBffg,13531
328
327
  keras_hub/src/tokenizers/word_piece_tokenizer.py,sha256=vP6AZgbzsRiuPCt3W_n94nsF7XiERnagWcH_rqJHtVU,19943
329
328
  keras_hub/src/tokenizers/word_piece_tokenizer_trainer.py,sha256=xUhc9EMswarzghNfrDLUFYQBExZOQxbMlfKp9G6A63k,6549
330
329
  keras_hub/src/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
331
330
  keras_hub/src/utils/keras_utils.py,sha256=lrZuC8HL2lmQfbHaS_t1JUyJann_ji2iTYE0Fzos8PU,1969
332
331
  keras_hub/src/utils/pipeline_model.py,sha256=33-0vIB9KGYh2mRtyjHxBPvgGZHDusRcRy-xjki3_gg,9024
333
- keras_hub/src/utils/preset_utils.py,sha256=sfEJm5ia1N5tD_7TWS4e4_Z-luPS1rNAifSVAlgfbis,27613
332
+ keras_hub/src/utils/preset_utils.py,sha256=O7SbhcJJAoPeMhAF77ppG6XkIAIqBqAQVhKoE-Yt61c,30119
334
333
  keras_hub/src/utils/python_utils.py,sha256=N8nWeO3san4YnGkffRXG3Ix7VEIMTKSN21FX5TuL7G8,202
335
334
  keras_hub/src/utils/tensor_utils.py,sha256=JipeJUDnnvLuT-ToVQC0t9dmSzebwPG6XiZgEwGEGI4,14646
336
335
  keras_hub/src/utils/imagenet/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -338,7 +337,7 @@ keras_hub/src/utils/imagenet/imagenet_utils.py,sha256=MvIvv1WJo51ZXBxy4S7t_DsN3Z
338
337
  keras_hub/src/utils/timm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
339
338
  keras_hub/src/utils/timm/convert_densenet.py,sha256=V-GRjWuDnlh3b1EMxqahwZ3GMwSgOa3v0HOfb2ZZ-d0,3342
340
339
  keras_hub/src/utils/timm/convert_resnet.py,sha256=ee8eTml0ffJKE8avzGoLFcpjPF63DsvoIUArAGa8Ngg,5832
341
- keras_hub/src/utils/timm/preset_loader.py,sha256=toecv57_A1QbmAehNZe4X1Pdf5xU7Ut4AvJc_ptdMPQ,3156
340
+ keras_hub/src/utils/timm/preset_loader.py,sha256=SbDqy2nr54_Y7bwe4sICQ8n-kHnw0PtvNI52tgrH170,3095
342
341
  keras_hub/src/utils/transformers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
343
342
  keras_hub/src/utils/transformers/convert_albert.py,sha256=VdKclZpCxtDWq3UbUUQZf4fR9DJK_JYZ73B4O_G9skg,7695
344
343
  keras_hub/src/utils/transformers/convert_bart.py,sha256=Tk4h9Md9rwN5wjQbGIVrC7qzDpF8kI8qm-FKL8HlUok,14411
@@ -351,7 +350,7 @@ keras_hub/src/utils/transformers/convert_mistral.py,sha256=kVhN9h1ZFVhwkNW8p3wnS
351
350
  keras_hub/src/utils/transformers/convert_pali_gemma.py,sha256=B1leeDw96Yvu81hYumf66hIid07k5NLqoeWAJgPnaLs,10649
352
351
  keras_hub/src/utils/transformers/preset_loader.py,sha256=GS44hZUuGQCtzsyn8z44ZpHdftd3DFemwV2hx2bQa-U,2738
353
352
  keras_hub/src/utils/transformers/safetensor_utils.py,sha256=rPK-Uw1CG0DX0d_UAD-r2cG9fw8GI8bvAlrcXfQ9g4c,3323
354
- keras_hub_nightly-0.16.1.dev202410010346.dist-info/METADATA,sha256=q_YCWBdg95yQvhaaHRaaLHxjh_zSBItI4TrdnUn0fvI,7458
355
- keras_hub_nightly-0.16.1.dev202410010346.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
356
- keras_hub_nightly-0.16.1.dev202410010346.dist-info/top_level.txt,sha256=N4J6piIWBKa38A4uV-CnIopnOEf8mHAbkNXafXm_CuA,10
357
- keras_hub_nightly-0.16.1.dev202410010346.dist-info/RECORD,,
353
+ keras_hub_nightly-0.16.1.dev202410030339.dist-info/METADATA,sha256=tLxESmpHL96pjwqK1gteBF1IdJ_CKtgBOvGEIG9gfyU,7458
354
+ keras_hub_nightly-0.16.1.dev202410030339.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
355
+ keras_hub_nightly-0.16.1.dev202410030339.dist-info/top_level.txt,sha256=N4J6piIWBKa38A4uV-CnIopnOEf8mHAbkNXafXm_CuA,10
356
+ keras_hub_nightly-0.16.1.dev202410030339.dist-info/RECORD,,
@@ -1,138 +0,0 @@
1
- import keras
2
- from keras import ops
3
-
4
- from keras_hub.src.api_export import keras_hub_export
5
- from keras_hub.src.layers.preprocessing.image_converter import ImageConverter
6
- from keras_hub.src.utils.keras_utils import standardize_data_format
7
- from keras_hub.src.utils.tensor_utils import preprocessing_function
8
-
9
-
10
- @keras_hub_export("keras_hub.layers.ResizingImageConverter")
11
- class ResizingImageConverter(ImageConverter):
12
- """An `ImageConverter` that simply resizes the input image.
13
-
14
- The `ResizingImageConverter` is a subclass of `ImageConverter` for models
15
- that need to resize (and optionally rescale) image tensors before using them
16
- for modeling. The layer will take as input a raw image tensor (batched or
17
- unbatched) in the channels last or channels first format, and output a
18
- resize tensor.
19
-
20
- Args:
21
- height: int, the height of the output shape.
22
- width: int, the width of the output shape.
23
- scale: float or `None`. If set, the image we be rescaled with a
24
- `keras.layers.Rescaling` layer, multiplying the image by this
25
- scale.
26
- mean: tuples of floats per channel or `None`. If set, the image will be
27
- normalized per channel by subtracting mean.
28
- If set, also set `variance`.
29
- variance: tuples of floats per channel or `None`. If set, the image will
30
- be normalized per channel by dividing by `sqrt(variance)`.
31
- If set, also set `mean`.
32
- crop_to_aspect_ratio: If `True`, resize the images without aspect
33
- ratio distortion. When the original aspect ratio differs
34
- from the target aspect ratio, the output image will be
35
- cropped so as to return the
36
- largest possible window in the image (of size `(height, width)`)
37
- that matches the target aspect ratio. By default
38
- (`crop_to_aspect_ratio=False`), aspect ratio may not be preserved.
39
- interpolation: String, the interpolation method.
40
- Supports `"bilinear"`, `"nearest"`, `"bicubic"`,
41
- `"lanczos3"`, `"lanczos5"`. Defaults to `"bilinear"`.
42
- data_format: String, either `"channels_last"` or `"channels_first"`.
43
- The ordering of the dimensions in the inputs. `"channels_last"`
44
- corresponds to inputs with shape `(batch, height, width, channels)`
45
- while `"channels_first"` corresponds to inputs with shape
46
- `(batch, channels, height, width)`. It defaults to the
47
- `image_data_format` value found in your Keras config file at
48
- `~/.keras/keras.json`. If you never set it, then it will be
49
- `"channels_last"`.
50
-
51
- Examples:
52
- ```python
53
- # Resize images for `"pali_gemma_3b_224"`.
54
- converter = keras_hub.layers.ImageConverter.from_preset("pali_gemma_3b_224")
55
- converter(np.ones(2, 512, 512, 3)) # Output shape: (2, 224, 224, 3)
56
- # Resize images for `"pali_gemma_3b_224"`.
57
- converter = keras_hub.layers.ImageConverter.from_preset("pali_gemma_3b_448")
58
- converter(np.ones(2, 512, 512, 3)) # Output shape: (2, 448, 448, 3)
59
- ```
60
- """
61
-
62
- def __init__(
63
- self,
64
- height,
65
- width,
66
- scale=None,
67
- mean=None,
68
- variance=None,
69
- crop_to_aspect_ratio=True,
70
- interpolation="bilinear",
71
- data_format=None,
72
- **kwargs,
73
- ):
74
- super().__init__(**kwargs)
75
- # By default, we just do a simple resize. Any model can subclass this
76
- # layer for preprocessing of a raw image to a model image input.
77
- self.resizing = keras.layers.Resizing(
78
- height=height,
79
- width=width,
80
- crop_to_aspect_ratio=crop_to_aspect_ratio,
81
- interpolation=interpolation,
82
- data_format=data_format,
83
- dtype=self.dtype_policy,
84
- name="resizing",
85
- )
86
- if scale is not None:
87
- self.rescaling = keras.layers.Rescaling(
88
- scale=scale,
89
- dtype=self.dtype_policy,
90
- name="rescaling",
91
- )
92
- else:
93
- self.rescaling = None
94
- if (mean is not None) != (variance is not None):
95
- raise ValueError(
96
- "Both `mean` and `variance` should be set or `None`. Received "
97
- f"`mean={mean}`, `variance={variance}`."
98
- )
99
- self.scale = scale
100
- self.mean = mean
101
- self.variance = variance
102
- self.data_format = standardize_data_format(data_format)
103
-
104
- def image_size(self):
105
- """Returns the preprocessed size of a single image."""
106
- return (self.resizing.height, self.resizing.width)
107
-
108
- @preprocessing_function
109
- def call(self, inputs):
110
- x = self.resizing(inputs)
111
- if self.rescaling:
112
- x = self.rescaling(x)
113
- if self.mean is not None:
114
- # Avoid `layers.Normalization` so this works batched and unbatched.
115
- channels_first = self.data_format == "channels_first"
116
- if len(ops.shape(inputs)) == 3:
117
- broadcast_dims = (1, 2) if channels_first else (0, 1)
118
- else:
119
- broadcast_dims = (0, 2, 3) if channels_first else (0, 1, 2)
120
- mean = ops.expand_dims(ops.array(self.mean), broadcast_dims)
121
- std = ops.expand_dims(ops.sqrt(self.variance), broadcast_dims)
122
- x = (x - mean) / std
123
- return x
124
-
125
- def get_config(self):
126
- config = super().get_config()
127
- config.update(
128
- {
129
- "height": self.resizing.height,
130
- "width": self.resizing.width,
131
- "interpolation": self.resizing.interpolation,
132
- "crop_to_aspect_ratio": self.resizing.crop_to_aspect_ratio,
133
- "scale": self.scale,
134
- "mean": self.mean,
135
- "variance": self.variance,
136
- }
137
- )
138
- return config