tfds-nightly 4.9.9.dev202507140045__py3-none-any.whl → 4.9.9.dev202507160045__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,46 @@
1
+ @inproceedings{bogin-etal-2021-covr,
2
+ title = "{COVR}: A Test-Bed for Visually Grounded Compositional Generalization with Real Images",
3
+ author = "Bogin, Ben and
4
+ Gupta, Shivanshu and
5
+ Gardner, Matt and
6
+ Berant, Jonathan",
7
+ editor = "Moens, Marie-Francine and
8
+ Huang, Xuanjing and
9
+ Specia, Lucia and
10
+ Yih, Scott Wen-tau",
11
+ booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing",
12
+ month = nov,
13
+ year = "2021",
14
+ address = "Online and Punta Cana, Dominican Republic",
15
+ publisher = "Association for Computational Linguistics",
16
+ url = "https://aclanthology.org/2021.emnlp-main.774/",
17
+ doi = "10.18653/v1/2021.emnlp-main.774",
18
+ pages = "9824--9846",
19
+ abstract = "While interest in models that generalize at test time to new compositions has risen in recent years, benchmarks in the visually-grounded domain have thus far been restricted to synthetic images. In this work, we propose COVR, a new test-bed for visually-grounded compositional generalization with real images. To create COVR, we use real images annotated with scene graphs, and propose an almost fully automatic procedure for generating question-answer pairs along with a set of context images. COVR focuses on questions that require complex reasoning, including higher-order operations such as quantification and aggregation. Due to the automatic generation process, COVR facilitates the creation of compositional splits, where models at test time need to generalize to new concepts and compositions in a zero- or few-shot setting. We construct compositional splits using COVR and demonstrate a myriad of cases where state-of-the-art pre-trained language-and-vision models struggle to compositionally generalize."
20
+ }
21
+
22
+ @inproceedings{yatskar2016,
23
+ title={Situation Recognition: Visual Semantic Role Labeling for Image Understanding},
24
+ author={Yatskar, Mark and Zettlemoyer, Luke and Farhadi, Ali},
25
+ booktitle={Conference on Computer Vision and Pattern Recognition},
26
+ year={2016}
27
+ }
28
+
29
+ @article{cite-key,
30
+ abstract = {Despite progress in perceptual tasks such as image classification, computers still perform poorly on cognitive tasks such as image description and question answering. Cognition is core to tasks that involve not just recognizing, but reasoning about our visual world. However, models used to tackle the rich content in images for cognitive tasks are still being trained using the same datasets designed for perceptual tasks. To achieve success at cognitive tasks, models need to understand the interactions and relationships between objects in an image. When asked ``What vehicle is the person riding?'', computers will need to identify the objects in an image as well as the relationships riding(man, carriage) and pulling(horse, carriage) to answer correctly that ``the person is riding a horse-drawn carriage.''In this paper, we present the Visual Genome dataset to enable the modeling of such relationships. We collect dense annotations of objects, attributes, and relationships within each image to learn these models. Specifically, our dataset contains over 108K images where each image has an average of {\$}{\$}35{\$}{\$}objects, {\$}{\$}26{\$}{\$}attributes, and {\$}{\$}21{\$}{\$}pairwise relationships between objects. We canonicalize the objects, attributes, relationships, and noun phrases in region descriptions and questions answer pairs to WordNet synsets. Together, these annotations represent the densest and largest dataset of image descriptions, objects, attributes, relationships, and question answer pairs.},
31
+ author = {Krishna, Ranjay and Zhu, Yuke and Groth, Oliver and Johnson, Justin and Hata, Kenji and Kravitz, Joshua and Chen, Stephanie and Kalantidis, Yannis and Li, Li-Jia and Shamma, David A. and Bernstein, Michael S. and Fei-Fei, Li},
32
+ date = {2017/05/01},
33
+ date-added = {2025-07-10 08:32:03 -0700},
34
+ date-modified = {2025-07-10 08:32:03 -0700},
35
+ doi = {10.1007/s11263-016-0981-7},
36
+ id = {Krishna2017},
37
+ isbn = {1573-1405},
38
+ journal = {International Journal of Computer Vision},
39
+ number = {1},
40
+ pages = {32--73},
41
+ title = {Visual Genome: Connecting Language and Vision Using Crowdsourced Dense Image Annotations},
42
+ url = {https://doi.org/10.1007/s11263-016-0981-7},
43
+ volume = {123},
44
+ year = {2017},
45
+ bdsk-url-1 = {https://doi.org/10.1007/s11263-016-0981-7}}
46
+
@@ -0,0 +1 @@
1
+ [COVR](https://covr-dataset.github.io/) dataset with [imSitu](https://github.com/my89/imSitu) and [Visual Genome](https://homes.cs.washington.edu/~ranjay/visualgenome/index.html) images.
@@ -0,0 +1,8 @@
1
+ content.data-type.image # Contains image data.
2
+ content.data-type.text # Contains text data.
3
+ content.language.en # Contains text in language English / en.
4
+ content.monolingual # Contains text in 1 natural language.
5
+ ml.task.common-sense-reasoning # Relates to Common Sense Reasoning, a machine learning task.
6
+ ml.task.natural-language-inference # Relates to Natural Language Inference, a machine learning task.
7
+ ml.task.natural-language-understanding # Relates to Natural Language Understanding, a machine learning task.
8
+ ml.task.object-detection # Relates to Object Detection, a machine learning task.
@@ -0,0 +1,15 @@
1
+ # coding=utf-8
2
+ # Copyright 2025 The TensorFlow Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
@@ -0,0 +1,4 @@
1
+ https://cs.stanford.edu/people/rak248/VG_100K_2/images.zip 9731705982 51c682d2721f880150720bb416e0346a4c787e4c55d7f80dfd1bd3f73ba81646 images.zip
2
+ https://cs.stanford.edu/people/rak248/VG_100K_2/images2.zip 5471658058 99da1a0ddf87011319ff3b05cf9176ffee2731cc3c52951162d9ef0d68e3cfb5 images2.zip
3
+ https://drive.google.com/uc?export=download&id=10xlQ6isRdGX94BypoqN6klniGeqdLBJA 21964401 83443ffd6493cdc807aaab8c559a38ad757d47e40d4d6f27b8c65efd4d889091 covr_v1_0.zip
4
+ https://s3.amazonaws.com/my89-frame-annotation/public/of500_images.tar 36690524160 94dee93095d0325fb9aef1e8d956b6be297ab13bf2e62d6027fd5dcc782e8f61 of500_images.tar
@@ -0,0 +1,114 @@
1
+ # coding=utf-8
2
+ # Copyright 2025 The TensorFlow Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """covr dataset."""
17
+
18
+ import json
19
+
20
+ from etils import epath
21
+ import tensorflow_datasets.public_api as tfds
22
+
23
+
24
+ class Builder(tfds.core.GeneratorBasedBuilder):
25
+ """DatasetBuilder for covr dataset."""
26
+
27
+ VERSION = tfds.core.Version('1.0.0')
28
+ RELEASE_NOTES = {
29
+ '1.0.0': 'Initial release.',
30
+ }
31
+
32
+ def _info(self) -> tfds.core.DatasetInfo:
33
+ """Returns the dataset metadata."""
34
+ return self.dataset_info_from_configs(
35
+ features=tfds.features.FeaturesDict({
36
+ 'utterance': tfds.features.Text(),
37
+ 'scenes': tfds.features.Sequence(
38
+ feature=tfds.features.Text(),
39
+ ),
40
+ 'properties': tfds.features.Sequence(
41
+ feature=tfds.features.Text(),
42
+ ),
43
+ 'pattern_name': tfds.features.Text(),
44
+ 'program': tfds.features.Text(),
45
+ 'label': tfds.features.Text(),
46
+ 'images': tfds.features.Sequence(
47
+ feature=tfds.features.Image(),
48
+ ),
49
+ }),
50
+ supervised_keys=None,
51
+ homepage='https://covr-dataset.github.io/',
52
+ )
53
+
54
+ def _split_generators(self, dl_manager: tfds.download.DownloadManager):
55
+ """Returns SplitGenerators."""
56
+ extracted_dirs = dl_manager.download_and_extract({
57
+ 'covr_dir': (
58
+ 'https://drive.google.com/uc?export=download&'
59
+ 'id=10xlQ6isRdGX94BypoqN6klniGeqdLBJA'
60
+ ),
61
+ 'imsitu_dir': (
62
+ 'https://s3.amazonaws.com/my89-frame-annotation'
63
+ '/public/of500_images.tar'
64
+ ),
65
+ 'vg1_dir': 'https://cs.stanford.edu/people/rak248/VG_100K_2/images.zip',
66
+ 'vg2_dir': (
67
+ 'https://cs.stanford.edu/people/rak248/VG_100K_2/images2.zip'
68
+ ),
69
+ })
70
+
71
+ # Each name is the image file name without the ".jpg" extension, which is
72
+ # also used as the scene id in COVR.
73
+ image_path_by_scene_id: dict[str, epath.Path] = {}
74
+ image_globs = [
75
+ extracted_dirs['vg1_dir'].glob('*/*.jpg'),
76
+ extracted_dirs['vg2_dir'].glob('*/*.jpg'),
77
+ extracted_dirs['imsitu_dir'].glob('of500_images/*/*.jpg'),
78
+ ]
79
+ for image_glob in image_globs:
80
+ for image_path in image_glob:
81
+ name = image_path.stem
82
+ image_path_by_scene_id[name] = image_path
83
+ path = extracted_dirs['covr_dir']
84
+ return {
85
+ 'train': self._generate_examples(
86
+ path / 'train.jsonl', image_path_by_scene_id
87
+ ),
88
+ 'test': self._generate_examples(
89
+ path / 'test.jsonl', image_path_by_scene_id
90
+ ),
91
+ 'validation': self._generate_examples(
92
+ path / 'val.jsonl', image_path_by_scene_id
93
+ ),
94
+ }
95
+
96
+ def _generate_examples(
97
+ self, path: epath.Path, image_path_by_scene_id: dict[str, epath.Path]
98
+ ):
99
+ """Yields examples."""
100
+ with path.open() as f:
101
+ for line in f:
102
+ item = json.loads(line)
103
+ images = [
104
+ image_path_by_scene_id[scene_id] for scene_id in item['scenes']
105
+ ]
106
+ yield item['qid'], {
107
+ 'utterance': item['utterance'],
108
+ 'scenes': item['scenes'],
109
+ 'properties': item['properties'],
110
+ 'pattern_name': item['pattern_name'],
111
+ 'program': str(item['program']),
112
+ 'label': str(item.get('answer')),
113
+ 'images': images,
114
+ }
@@ -0,0 +1,41 @@
1
+ # coding=utf-8
2
+ # Copyright 2025 The TensorFlow Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """covr dataset."""
17
+
18
+ from tensorflow_datasets.datasets.covr import covr_dataset_builder
19
+ import tensorflow_datasets.public_api as tfds
20
+
21
+
22
+ class CovrTest(tfds.testing.DatasetBuilderTestCase):
23
+ """Tests for covr dataset."""
24
+
25
+ DATASET_CLASS = covr_dataset_builder.Builder
26
+ SPLITS = {
27
+ 'train': 1,
28
+ 'test': 1,
29
+ 'validation': 1,
30
+ }
31
+
32
+ DL_EXTRACT_RESULT = {
33
+ 'covr_dir': 'covr',
34
+ 'imsitu_dir': 'imsitu',
35
+ 'vg1_dir': 'vg1',
36
+ 'vg2_dir': 'vg2',
37
+ }
38
+
39
+
40
+ if __name__ == '__main__':
41
+ tfds.testing.test_main()
@@ -14,15 +14,14 @@
14
14
  # limitations under the License.
15
15
 
16
16
  """Dataset class for Cars196 Dataset."""
17
-
18
17
  import os
19
18
  import urllib
20
19
 
21
20
  from tensorflow_datasets.core.utils.lazy_imports_utils import tensorflow as tf
22
21
  import tensorflow_datasets.public_api as tfds
23
22
 
24
- _URL = 'https://web.archive.org/web/20221212053154/http://ai.stanford.edu/~jkrause/car196/'
25
- _EXTRA_URL = 'https://web.archive.org/web/20230323151230/https://ai.stanford.edu/~jkrause/cars/car_devkit.tgz'
23
+ _URL = 'http://ai.stanford.edu/~jkrause/car196/'
24
+ _EXTRA_URL = 'https://ai.stanford.edu/~jkrause/cars/car_devkit.tgz'
26
25
 
27
26
  _DESCRIPTION = (
28
27
  'The Cars dataset contains 16,185 images of 196 classes of cars. The data '
@@ -247,7 +246,7 @@ _CITATION = """\
247
246
  class Cars196(tfds.core.GeneratorBasedBuilder):
248
247
  """Car Images dataset."""
249
248
 
250
- VERSION = tfds.core.Version('2.2.0')
249
+ VERSION = tfds.core.Version('2.1.0')
251
250
  SUPPORTED_VERSIONS = [
252
251
  tfds.core.Version('2.1.0'),
253
252
  ]
@@ -256,7 +255,6 @@ class Cars196(tfds.core.GeneratorBasedBuilder):
256
255
  '2.0.0': 'Initial release',
257
256
  '2.0.1': 'Website URL update',
258
257
  '2.1.0': 'Fixing bug https://github.com/tensorflow/datasets/issues/3927',
259
- '2.2.0': 'Fix broken links',
260
258
  }
261
259
 
262
260
  def _info(self):
@@ -273,7 +271,7 @@ class Cars196(tfds.core.GeneratorBasedBuilder):
273
271
  description=(_DESCRIPTION),
274
272
  features=tfds.features.FeaturesDict(features_dict),
275
273
  supervised_keys=('image', 'label'),
276
- homepage='https://web.archive.org/web/20230323151220/https://ai.stanford.edu/~jkrause/cars/car_dataset.html',
274
+ homepage='https://ai.stanford.edu/~jkrause/cars/car_dataset.html',
277
275
  citation=_CITATION,
278
276
  )
279
277
 
@@ -2,7 +2,3 @@ http://ai.stanford.edu/~jkrause/car196/cars_test.tgz 977350468 bffea656d6f425cba
2
2
  http://ai.stanford.edu/~jkrause/car196/cars_test_annos_withlabels.mat 185758 790f75be8ea34eeded134cc559332baf23e30e91367e9ddca97d26ed9b895f05 cars_test_annos_withlabels.mat
3
3
  http://ai.stanford.edu/~jkrause/car196/cars_train.tgz 979269282 512b227b30e2f0a8aab9e09485786ab4479582073a144998da74d64b801fd288 cars_train.tgz
4
4
  https://ai.stanford.edu/~jkrause/cars/car_devkit.tgz 330960 b97deb463af7d58b6bfaa18b2a4de9829f0f79e8ce663dfa9261bf7810e9accd car_devkit.tgz
5
- https://web.archive.org/web/20221212053154/http:/ai.stanford.edu/~jkrause/car196/cars_test.tgz 977350468 bffea656d6f425cba3c91c6d83336e4c5f86c6cffd8975b0f375d3a10da8e243 cars_test.tgz
6
- https://web.archive.org/web/20221212053154/http:/ai.stanford.edu/~jkrause/car196/cars_test_annos_withlabels.mat 185758 790f75be8ea34eeded134cc559332baf23e30e91367e9ddca97d26ed9b895f05 cars_test_annos_withlabels.mat
7
- https://web.archive.org/web/20221212053154/http:/ai.stanford.edu/~jkrause/car196/cars_train.tgz 979269282 512b227b30e2f0a8aab9e09485786ab4479582073a144998da74d64b801fd288 cars_train.tgz
8
- https://web.archive.org/web/20230323151230/https://ai.stanford.edu/~jkrause/cars/car_devkit.tgz 330960 b97deb463af7d58b6bfaa18b2a4de9829f0f79e8ce663dfa9261bf7810e9accd car_devkit.tgz
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tfds-nightly
3
- Version: 4.9.9.dev202507140045
3
+ Version: 4.9.9.dev202507160045
4
4
  Summary: tensorflow/datasets is a library of datasets ready to use with TensorFlow.
5
5
  Home-page: https://github.com/tensorflow/datasets
6
6
  Download-URL: https://github.com/tensorflow/datasets/tags
@@ -600,6 +600,13 @@ tensorflow_datasets/datasets/corr2cause/__init__.py,sha256=eFqnTjU7s5iubj6XcKoU8
600
600
  tensorflow_datasets/datasets/corr2cause/checksums.tsv,sha256=dYJMEf-ao5tMIkRCjR3S79D4Sblhtfk7OmL8I3BeHBE,465
601
601
  tensorflow_datasets/datasets/corr2cause/corr2cause_dataset_builder.py,sha256=VXatympHwcL2AP3V_sfhTlRrUJF0oXPbR3LZ4r_aQLQ,2241
602
602
  tensorflow_datasets/datasets/corr2cause/corr2cause_dataset_builder_test.py,sha256=L9JQZeoICzmk81y3BBZ0tFm_6ykdt56SESvX8HiJ0gc,1241
603
+ tensorflow_datasets/datasets/covr/CITATIONS.bib,sha256=HtEfWhrwoTs_PkBfxikQTk4iI8kZP5Ta8RZ5qUv0860,4384
604
+ tensorflow_datasets/datasets/covr/README.md,sha256=ZNUTXpr1HW69QPMzRhZhBTyT4q4EqM1s_ylG8DIbfkg,187
605
+ tensorflow_datasets/datasets/covr/TAGS.txt,sha256=uI1DvzlZB7eOfouNnCadWPrlbHqk9Y7A4ni7SsaLmaU,597
606
+ tensorflow_datasets/datasets/covr/__init__.py,sha256=eFqnTjU7s5iubj6XcKoU8lZUSHecOdnebZFm1vTkjbA,612
607
+ tensorflow_datasets/datasets/covr/checksums.tsv,sha256=UjGSI4gpEff0pmm1Vzi1xi1QpxTU3U5yG6sT3myaft4,628
608
+ tensorflow_datasets/datasets/covr/covr_dataset_builder.py,sha256=yujNgZLjyFg-hfncR7kv7cRSgrerUHupRyCOsQBMYs0,3896
609
+ tensorflow_datasets/datasets/covr/covr_dataset_builder_test.py,sha256=w1c1MGjxUu9zix_TH_ajzNVlWppEghqkFpxcTI0o5Ag,1145
603
610
  tensorflow_datasets/datasets/databricks_dolly/CITATIONS.bib,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
604
611
  tensorflow_datasets/datasets/databricks_dolly/README.md,sha256=f-oYORQSGHIsFAi0BVUVKenFZYfzL97F5J2FiQZ6X0A,676
605
612
  tensorflow_datasets/datasets/databricks_dolly/TAGS.txt,sha256=pFCvqtzYbd0QNzYLGJsMf1giLMcEZvKYFpngGxIBQyI,1130
@@ -1668,7 +1675,7 @@ tensorflow_datasets/image_classification/caltech.py,sha256=YrrxITYyd0lSZ117jROgd
1668
1675
  tensorflow_datasets/image_classification/caltech101_labels.txt,sha256=35kwS35Pc3QJJB7yKXy2U0dvldE20BtAbuUaWDn6R8Q,906
1669
1676
  tensorflow_datasets/image_classification/caltech_birds.py,sha256=MUOyqmoz1iYbXW4KoqyTaZe0kQ9MUQ5JEYTOdlM9jlI,12861
1670
1677
  tensorflow_datasets/image_classification/caltech_birds_test.py,sha256=o8iHg_bjzjg4TaH-ArjxKFpBJROm4cXG5ZTwn8P5T1A,1407
1671
- tensorflow_datasets/image_classification/cars196.py,sha256=v08PqZCDDrFerQ39TesO0g-Kxhn3Us6F68W07-MdAB0,12516
1678
+ tensorflow_datasets/image_classification/cars196.py,sha256=RD1lef5ba3oVSy1JHN-E5J1Mvr23jN9Af72AOYlXCU0,12351
1672
1679
  tensorflow_datasets/image_classification/cars196_test.py,sha256=AoMjpfS_Ii3EOm_eyCle3E9cIRpcRudMUN4sGJ6d_Ks,1051
1673
1680
  tensorflow_datasets/image_classification/cassava.py,sha256=eDsVKepCy2oNAbQep_Vj9LtxUTRpcA6wVilbJUhoIHM,3578
1674
1681
  tensorflow_datasets/image_classification/cassava_test.py,sha256=qXQTLM_lnhD4xZqJXxspbCHTq18Tc59IKWPh4TE9I1I,964
@@ -2330,7 +2337,7 @@ tensorflow_datasets/translate/tatoeba/tatoeba.py,sha256=suDWj4CKVLPeDbG949G5GUH-
2330
2337
  tensorflow_datasets/url_checksums/c4.txt,sha256=dDDWbmWFr7mwhUle1z5ZybWpgA218XjXKytjZUUIXos,19354
2331
2338
  tensorflow_datasets/url_checksums/caltech_birds2010.txt,sha256=U2UmCDpnZCaB22ZGxRjU73eC9b4_WEGO5EXlSioFCbA,486
2332
2339
  tensorflow_datasets/url_checksums/caltech_birds2011.txt,sha256=43edRPKr-ZVW9tuPXLnA637tRkFRcddAvOJOtbYdthg,691
2333
- tensorflow_datasets/url_checksums/cars196.txt,sha256=0uxgkOOMZZZ6l-nln43S5j4bsnoPzlkB5RqlSqxoEys,1367
2340
+ tensorflow_datasets/url_checksums/cars196.txt,sha256=WnrQ5W0PEhJ6z_Cjj1Ju-6oVbrRy1MM83b03FTBME3E,599
2334
2341
  tensorflow_datasets/url_checksums/cassava.txt,sha256=kInp15oLtZbDXVqzaRrLujvJgaXHKHzPNewrsAqfqbg,161
2335
2342
  tensorflow_datasets/url_checksums/cats_vs_dogs.txt,sha256=T7UMkE-e4wdSVMoAlNTS9tXQMG2NTH8A2Z7jrXk_GqQ,212
2336
2343
  tensorflow_datasets/url_checksums/cfq.txt,sha256=A4iRuXnw-kjHckjHE086H4h7xW8Sz8HVCNpCByJhyLE,140
@@ -2461,10 +2468,10 @@ tensorflow_datasets/vision_language/wit/wit_test.py,sha256=PXS8DMNW-MDrT2p5oy4Ic
2461
2468
  tensorflow_datasets/vision_language/wit_kaggle/__init__.py,sha256=vGwSGeM8WE4Q-l0-eEE1sBojmk6YT0l1OO60AWa4Q40,719
2462
2469
  tensorflow_datasets/vision_language/wit_kaggle/wit_kaggle.py,sha256=q-vX_FBzIwsFxL4sY9vuyQ3UQD2PLM4yhUR4U6l-qao,16903
2463
2470
  tensorflow_datasets/vision_language/wit_kaggle/wit_kaggle_test.py,sha256=ZymHT1NkmD-pUnh3BmM3_g30c5afsWYnmqDD9dVyDSA,1778
2464
- tfds_nightly-4.9.9.dev202507140045.dist-info/licenses/AUTHORS,sha256=nvBG4WwfgjuOu1oZkuQKw9kg7X6rve679ObS-YDDmXg,309
2465
- tfds_nightly-4.9.9.dev202507140045.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
2466
- tfds_nightly-4.9.9.dev202507140045.dist-info/METADATA,sha256=3lsJqMRurjHyGG6lr9UriERUe7UC9ejoJ8X6OXxHs2s,11694
2467
- tfds_nightly-4.9.9.dev202507140045.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
2468
- tfds_nightly-4.9.9.dev202507140045.dist-info/entry_points.txt,sha256=eHEL7nF5y1uCY2FgkuYIdE062epJXlAQTSdq89px4p4,73
2469
- tfds_nightly-4.9.9.dev202507140045.dist-info/top_level.txt,sha256=bAevmk9209s_oxVZVlN6hSDIVS423qrMQvmcWSvW4do,20
2470
- tfds_nightly-4.9.9.dev202507140045.dist-info/RECORD,,
2471
+ tfds_nightly-4.9.9.dev202507160045.dist-info/licenses/AUTHORS,sha256=nvBG4WwfgjuOu1oZkuQKw9kg7X6rve679ObS-YDDmXg,309
2472
+ tfds_nightly-4.9.9.dev202507160045.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
2473
+ tfds_nightly-4.9.9.dev202507160045.dist-info/METADATA,sha256=f1qRJdbrEjRnhS5iNf10pK3bG6V3KV21ZusYp9cEzCc,11694
2474
+ tfds_nightly-4.9.9.dev202507160045.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
2475
+ tfds_nightly-4.9.9.dev202507160045.dist-info/entry_points.txt,sha256=eHEL7nF5y1uCY2FgkuYIdE062epJXlAQTSdq89px4p4,73
2476
+ tfds_nightly-4.9.9.dev202507160045.dist-info/top_level.txt,sha256=bAevmk9209s_oxVZVlN6hSDIVS423qrMQvmcWSvW4do,20
2477
+ tfds_nightly-4.9.9.dev202507160045.dist-info/RECORD,,