tfds-nightly 4.9.9.dev202508150045__py3-none-any.whl → 4.9.9.dev202508160044__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,8 @@
1
+ @misc{alex2019multinews,
2
+ title={Multi-News: a Large-Scale Multi-Document Summarization Dataset and Abstractive Hierarchical Model},
3
+ author={Alexander R. Fabbri and Irene Li and Tianwei She and Suyi Li and Dragomir R. Radev},
4
+ year={2019},
5
+ eprint={1906.01749},
6
+ archivePrefix={arXiv},
7
+ primaryClass={cs.CL}
8
+ }
@@ -0,0 +1,8 @@
1
+ Multi-News, consists of news articles and human-written summaries
2
+ of these articles from the site newser.com.
3
+ Each summary is professionally written by editors and
4
+ includes links to the original articles cited.
5
+
6
+ There are two features:
7
+ - document: text of news articles seperated by special token "|||||".
8
+ - summary: news summary.
File without changes
@@ -0,0 +1,15 @@
1
+ # coding=utf-8
2
+ # Copyright 2025 The TensorFlow Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
@@ -0,0 +1,6 @@
1
+ https://huggingface.co/datasets/alexfabbri/multi_news/raw/main/data/test.src.cleaned 133 d04c4581d52321a30c246d2caa72853ee7f28c6b7a3985ee436f54c4bc264315 test.src.cleaned
2
+ https://huggingface.co/datasets/alexfabbri/multi_news/raw/main/data/test.tgt 132 afba4aa26d95bb557c0eaa0cb8f7495af2104f1e43f4b5f9ef429b8752477abd test.tgt
3
+ https://huggingface.co/datasets/alexfabbri/multi_news/raw/main/data/train.src.cleaned 134 75f87b786ff1982bf1bd5803c6a7377d1834b81956ac680a6955789ba047cc0b train.src.cleaned
4
+ https://huggingface.co/datasets/alexfabbri/multi_news/raw/main/data/train.tgt 133 9f1e9b290a6aae1aa67bd5b361c934ee9db32486e5cd97d83184c097ef8b27e5 train.tgt
5
+ https://huggingface.co/datasets/alexfabbri/multi_news/raw/main/data/val.src.cleaned 133 8df3ef6bd1882094de8120fa635c3abf758e10427f81f306aaa4786df7b57861 val.src.cleaned
6
+ https://huggingface.co/datasets/alexfabbri/multi_news/raw/main/data/val.tgt 132 9c0377a443ea92b17449f7df17f1cdfa7c7ebbfe3a45f2f8cd7b3e0ffb47b1df val.tgt
@@ -0,0 +1,78 @@
1
+ # coding=utf-8
2
+ # Copyright 2025 The TensorFlow Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """multi_news dataset."""
17
+
18
+ from etils import epath
19
+ import tensorflow_datasets.public_api as tfds
20
+
21
+ _URL_PATH = (
22
+ "https://huggingface.co/datasets/alexfabbri/multi_news/raw/main/data/"
23
+ )
24
+
25
+
26
+ _DOCUMENT = "document"
27
+ _SUMMARY = "summary"
28
+
29
+
30
+ class Builder(tfds.core.GeneratorBasedBuilder):
31
+ """DatasetBuilder for multi_news dataset."""
32
+
33
+ VERSION = tfds.core.Version("2.0.0")
34
+
35
+ def _info(self) -> tfds.core.DatasetInfo:
36
+ """Returns the dataset metadata."""
37
+ return self.dataset_info_from_configs(
38
+ features=tfds.features.FeaturesDict(
39
+ {_DOCUMENT: tfds.features.Text(), _SUMMARY: tfds.features.Text()}
40
+ ),
41
+ supervised_keys=(_DOCUMENT, _SUMMARY),
42
+ homepage="https://github.com/Alex-Fabbri/Multi-News",
43
+ )
44
+
45
+ def _split_generators(self, dl_manager: tfds.download.DownloadManager):
46
+ """Returns SplitGenerators."""
47
+ data_dict = {
48
+ "train_src": _URL_PATH + "train.src.cleaned",
49
+ "train_tgt": _URL_PATH + "train.tgt",
50
+ "val_src": _URL_PATH + "val.src.cleaned",
51
+ "val_tgt": _URL_PATH + "val.tgt",
52
+ "test_src": _URL_PATH + "test.src.cleaned",
53
+ "test_tgt": _URL_PATH + "test.tgt",
54
+ }
55
+ files = dl_manager.download_and_extract(data_dict)
56
+ return {
57
+ "train": self._generate_examples(
58
+ files["train_src"], files["train_tgt"]
59
+ ),
60
+ "validation": self._generate_examples(
61
+ files["val_src"], files["val_tgt"]
62
+ ),
63
+ "test": self._generate_examples(files["test_src"], files["test_tgt"]),
64
+ }
65
+
66
+ def _generate_examples(self, src_file, tgt_file):
67
+ """Yields examples."""
68
+ with epath.Path(src_file).open() as src_f, epath.Path(
69
+ tgt_file
70
+ ).open() as tgt_f:
71
+ for i, (src_line, tgt_line) in enumerate(zip(src_f, tgt_f)):
72
+ yield i, {
73
+ # In original file, each line has one example and natural newline
74
+ # tokens "\n" are being replaced with "NEWLINE_CHAR". Here restore
75
+ # the natural newline token to avoid special vocab "NEWLINE_CHAR".
76
+ _DOCUMENT: src_line.strip().replace("NEWLINE_CHAR", "\n"),
77
+ _SUMMARY: tgt_line.strip().lstrip(),
78
+ }
@@ -13,20 +13,30 @@
13
13
  # See the License for the specific language governing permissions and
14
14
  # limitations under the License.
15
15
 
16
- """Test for Multi-News dataset."""
16
+ """multi_news dataset."""
17
17
 
18
18
  from tensorflow_datasets import testing
19
- from tensorflow_datasets.summarization import multi_news
19
+ from tensorflow_datasets.datasets.multi_news import multi_news_dataset_builder
20
20
 
21
21
 
22
- class MultiNewsTest(testing.DatasetBuilderTestCase):
23
- DATASET_CLASS = multi_news.MultiNews
22
+ class MlqaTest(testing.DatasetBuilderTestCase):
23
+ """Tests for multi_news dataset."""
24
+
25
+ DATASET_CLASS = multi_news_dataset_builder.Builder
24
26
  SPLITS = {
25
- "train": 1, # Number of fake train example
26
- "validation": 1, # Number of fake validation example
27
- "test": 1, # Number of fake test example
27
+ "train": 1,
28
+ "validation": 1,
29
+ "test": 1,
30
+ }
31
+
32
+ DL_EXTRACT_RESULT = {
33
+ "train_src": "train.src.cleaned",
34
+ "train_tgt": "train.tgt",
35
+ "val_src": "val.src.cleaned",
36
+ "val_tgt": "val.tgt",
37
+ "test_src": "test.src.cleaned",
38
+ "test_tgt": "test.tgt",
28
39
  }
29
- DL_EXTRACT_RESULT = ""
30
40
 
31
41
 
32
42
  if __name__ == "__main__":
@@ -184,12 +184,13 @@ class PathOptions:
184
184
  """Path options.
185
185
 
186
186
  Attributes:
187
- data_dir: Where to place datasets. Default to `~/tensorflow_datasets/` or
187
+ data_dir: Where to place datasets. Defaults to `~/tensorflow_datasets/` or
188
188
  `TFDS_DATA_DIR` environement variable.
189
- download_dir: Where to place downloads. Default to `<data_dir>/downloads/`.
190
- extract_dir: Where to extract files. Default to `<download_dir>/extracted/`.
189
+ download_dir: Where to place downloads. Defaults to `<data_dir>/downloads/`.
190
+ extract_dir: Where to extract files. Defaults to
191
+ `<download_dir>/extracted/`.
191
192
  manual_dir: Where to manually download data (required for some datasets).
192
- Default to `<download_dir>/manual/`.
193
+ Defaults to `<download_dir>/manual/`.
193
194
  add_name_to_manual_dir: If true, append the dataset name to the `manual_dir`
194
195
  (e.g. `<download_dir>/manual/<dataset_name>/`). Useful to avoid collisions
195
196
  if many datasets are generated.
@@ -324,7 +325,7 @@ def download_and_prepare(
324
325
  if nondeterministic_order:
325
326
  download_config.nondeterministic_order = True
326
327
 
327
- # Add Apache Beam options to download config
328
+ # Add Apache Beam options to download config.
328
329
  try:
329
330
  import apache_beam as beam # pylint: disable=g-import-not-at-top
330
331
 
@@ -342,8 +343,8 @@ def download_and_prepare(
342
343
  download_config=download_config,
343
344
  )
344
345
 
345
- # Dataset generated successfully
346
- logging.info('Dataset generation complete...')
346
+ # Dataset generated successfully.
347
+ logging.info('Dataset generation completed...')
347
348
 
348
349
  print()
349
350
  print(repr(builder.info))
@@ -24,7 +24,6 @@ from tensorflow_datasets.summarization.covid19sum import Covid19sum
24
24
  from tensorflow_datasets.summarization.gigaword import Gigaword
25
25
  from tensorflow_datasets.summarization.gov_report import GovReport
26
26
  from tensorflow_datasets.summarization.media_sum import MediaSum
27
- from tensorflow_datasets.summarization.multi_news import MultiNews
28
27
  from tensorflow_datasets.summarization.newsroom import Newsroom
29
28
  from tensorflow_datasets.summarization.opinion_abstracts import OpinionAbstracts
30
29
  from tensorflow_datasets.summarization.opinosis import Opinosis
@@ -13,89 +13,13 @@
13
13
  # See the License for the specific language governing permissions and
14
14
  # limitations under the License.
15
15
 
16
- """Multi-News dataset."""
16
+ """Dataset definition for Multi-News.
17
17
 
18
- import os
19
-
20
- from tensorflow_datasets.core.utils.lazy_imports_utils import tensorflow as tf
21
- import tensorflow_datasets.public_api as tfds
22
-
23
- _CITATION = """
24
- @misc{alex2019multinews,
25
- title={Multi-News: a Large-Scale Multi-Document Summarization Dataset and Abstractive Hierarchical Model},
26
- author={Alexander R. Fabbri and Irene Li and Tianwei She and Suyi Li and Dragomir R. Radev},
27
- year={2019},
28
- eprint={1906.01749},
29
- archivePrefix={arXiv},
30
- primaryClass={cs.CL}
31
- }
32
- """
33
-
34
- _DESCRIPTION = """
35
- Multi-News, consists of news articles and human-written summaries
36
- of these articles from the site newser.com.
37
- Each summary is professionally written by editors and
38
- includes links to the original articles cited.
39
-
40
- There are two features:
41
- - document: text of news articles seperated by special token "|||||".
42
- - summary: news summary.
18
+ DEPRECATED!
19
+ If you want to use the Multi-News dataset builder class, use:
20
+ tfds.builder_cls('multi_news')
43
21
  """
44
22
 
45
- _URL = "https://drive.google.com/uc?export=download&id=1vRY2wM6rlOZrf9exGTm5pXj5ExlVwJ0C"
46
-
47
- _DOCUMENT = "document"
48
- _SUMMARY = "summary"
49
-
50
-
51
- class MultiNews(tfds.core.GeneratorBasedBuilder):
52
- """Multi-News dataset."""
53
-
54
- VERSION = tfds.core.Version("1.0.0")
55
-
56
- def _info(self):
57
- return tfds.core.DatasetInfo(
58
- builder=self,
59
- description=_DESCRIPTION,
60
- features=tfds.features.FeaturesDict(
61
- {_DOCUMENT: tfds.features.Text(), _SUMMARY: tfds.features.Text()}
62
- ),
63
- supervised_keys=(_DOCUMENT, _SUMMARY),
64
- homepage="https://github.com/Alex-Fabbri/Multi-News",
65
- citation=_CITATION,
66
- )
67
-
68
- def _split_generators(self, dl_manager):
69
- """Returns SplitGenerators."""
70
- extract_path = os.path.join(
71
- dl_manager.download_and_extract(_URL), "multi-news-original"
72
- )
73
- return [
74
- tfds.core.SplitGenerator(
75
- name=tfds.Split.TRAIN,
76
- gen_kwargs={"path": os.path.join(extract_path, "train")},
77
- ),
78
- tfds.core.SplitGenerator(
79
- name=tfds.Split.VALIDATION,
80
- gen_kwargs={"path": os.path.join(extract_path, "val")},
81
- ),
82
- tfds.core.SplitGenerator(
83
- name=tfds.Split.TEST,
84
- gen_kwargs={"path": os.path.join(extract_path, "test")},
85
- ),
86
- ]
23
+ from tensorflow_datasets.core import lazy_builder_import
87
24
 
88
- def _generate_examples(self, path=None):
89
- """Yields examples."""
90
- with tf.io.gfile.GFile(
91
- os.path.join(path + ".src")
92
- ) as src_f, tf.io.gfile.GFile(os.path.join(path + ".tgt")) as tgt_f:
93
- for i, (src_line, tgt_line) in enumerate(zip(src_f, tgt_f)):
94
- yield i, {
95
- # In original file, each line has one example and natural newline
96
- # tokens "\n" are being replaced with "NEWLINE_CHAR". Here restore
97
- # the natural newline token to avoid special vocab "NEWLINE_CHAR".
98
- _DOCUMENT: src_line.strip().replace("NEWLINE_CHAR", "\n"),
99
- # Remove the starting token "- " for every target sequence.
100
- _SUMMARY: tgt_line.strip().lstrip("- "),
101
- }
25
+ Tedlium = lazy_builder_import.LazyBuilderImport('multi_news')
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tfds-nightly
3
- Version: 4.9.9.dev202508150045
3
+ Version: 4.9.9.dev202508160044
4
4
  Summary: tensorflow/datasets is a library of datasets ready to use with TensorFlow.
5
5
  Home-page: https://github.com/tensorflow/datasets
6
6
  Download-URL: https://github.com/tensorflow/datasets/tags
@@ -930,6 +930,13 @@ tensorflow_datasets/datasets/mlqa/__init__.py,sha256=eFqnTjU7s5iubj6XcKoU8lZUSHe
930
930
  tensorflow_datasets/datasets/mlqa/checksums.tsv,sha256=_lbjCALdPkQgy4EM4tyFPDusv5juKllDjRBu_bkrlv8,134
931
931
  tensorflow_datasets/datasets/mlqa/mlqa_dataset_builder.py,sha256=j629tSE41KnEo5rywqyCPZteA14xD2YsPPIrjxsawks,2813
932
932
  tensorflow_datasets/datasets/mlqa/mlqa_dataset_builder_test.py,sha256=qZXHcslBHUNAn-BRynUHouA_vclALLOubCWHpoIhPdc,1074
933
+ tensorflow_datasets/datasets/multi_news/CITATIONS.bib,sha256=oRe0wDEh7EgfsT4OJJFpphYCDBmZyRVHuR1YkihYYl0,328
934
+ tensorflow_datasets/datasets/multi_news/README.md,sha256=EiTJGxDz0t2iBVb4fJexEr3u6jTxc4NaAaS5mHqghW0,335
935
+ tensorflow_datasets/datasets/multi_news/TAGS.txt,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
936
+ tensorflow_datasets/datasets/multi_news/__init__.py,sha256=eFqnTjU7s5iubj6XcKoU8lZUSHecOdnebZFm1vTkjbA,612
937
+ tensorflow_datasets/datasets/multi_news/checksums.tsv,sha256=T1iRT0w_VPEt_W0bwhIb8b-g3ZLigrmY1_nuIPnpOWU,978
938
+ tensorflow_datasets/datasets/multi_news/multi_news_dataset_builder.py,sha256=ef0sBtL4B3_O_XWDpX-rzGTjCcRh623ca5wdRYE30jE,2776
939
+ tensorflow_datasets/datasets/multi_news/multi_news_dataset_builder_test.py,sha256=5amBMQ7PKbPLeZ2kiT18tEb_Z-CMS0DasTRT6goTjXQ,1259
933
940
  tensorflow_datasets/datasets/natural_instructions/CITATIONS.bib,sha256=tcQG5eEGL_wr_5MEnZ6Q_ce2oZm6InbbRKiFqee9g7I,412
934
941
  tensorflow_datasets/datasets/natural_instructions/README.md,sha256=mceGvviI62PO5mh59sYPP_9vuuwKo0g-m7LQilP1mBI,370
935
942
  tensorflow_datasets/datasets/natural_instructions/TAGS.txt,sha256=0haPKTrOLvonnUl_YmWId0WzLp2wqeiXEkMKimavq-E,162
@@ -1983,7 +1990,7 @@ tensorflow_datasets/scripts/cli/build.py,sha256=uBR2mPo1YO1Of83zZ6A3m5NU0GhP0nJd
1983
1990
  tensorflow_datasets/scripts/cli/build_test.py,sha256=K7ho7IRtAty1ZNPLj33Th_nZajYBkXRLA4u3dbElQmo,10615
1984
1991
  tensorflow_datasets/scripts/cli/builder_templates.py,sha256=99SvH3skigkc2Qg737BV2OzhXL_Rgu4az8eVHsxKCLk,7985
1985
1992
  tensorflow_datasets/scripts/cli/builder_templates_test.py,sha256=HBNB-v2zlImKULPI8Webs9hXCkeFmWT29urxav-tDe8,2062
1986
- tensorflow_datasets/scripts/cli/cli_utils.py,sha256=6B3LLFwEvB41LLsQ5__el7F0Pd-COlLnaU3nvUTBdCw,13407
1993
+ tensorflow_datasets/scripts/cli/cli_utils.py,sha256=ddZ06qZ2FCQ8J80HqbT3pCIzEpo-2HuqVnsupJoUZCU,13420
1987
1994
  tensorflow_datasets/scripts/cli/conftest.py,sha256=3PNh_BbR013G4HyLAZOleUXsQ9mICrD03NaKwdHFMXs,1291
1988
1995
  tensorflow_datasets/scripts/cli/convert_format.py,sha256=ZS7CmWJ-oZ0usO4TB8GKDj9TBJ5MyEO0I9QLRg7eQOw,3797
1989
1996
  tensorflow_datasets/scripts/cli/convert_format_utils.py,sha256=U_q5WVgMNrjBkOc166U4Y_eca5KOS3Xb3jSDjp4XdK4,29078
@@ -2089,7 +2096,7 @@ tensorflow_datasets/structured/wiki_table_text/wiki_table_text_test.py,sha256=U6
2089
2096
  tensorflow_datasets/structured/wine_quality/__init__.py,sha256=dUK0cSZ2ML2ZwZ6Ko1-jt17QTQZO_2yAWOTgeKzAqjs,722
2090
2097
  tensorflow_datasets/structured/wine_quality/wine_quality.py,sha256=R2p-gQVeu5-CQ8YnAKkrRH8XnpkhCvgG8G9vOS4sW4U,5227
2091
2098
  tensorflow_datasets/structured/wine_quality/wine_quality_test.py,sha256=fxn98M-Hl9iNIROFtNbAowEBbS7XiBLmJVE-pGEmJ_k,1277
2092
- tensorflow_datasets/summarization/__init__.py,sha256=ZD-D4iVxcgXvqxt3DRBTPpP9irrCq0gRYcSjrbYC5no,1952
2099
+ tensorflow_datasets/summarization/__init__.py,sha256=OITuaVujgI2VtEDpwAMCbOxDt0nkKT1XYbh07WXAj_M,1885
2093
2100
  tensorflow_datasets/summarization/aeslc.py,sha256=z-iDHaL_GW7V9ZrwHSbTPx_RDMTnXEUl0Wyk3fBghis,859
2094
2101
  tensorflow_datasets/summarization/big_patent.py,sha256=Ny078U5GA1iQss1U2XVO_a-FvQBGiKJaZPwXQb8lRTc,882
2095
2102
  tensorflow_datasets/summarization/billsum.py,sha256=O2kCcahtY0rKy-Rqk81zVawuDAbXHeQOmMZVhJcsiGs,869
@@ -2099,8 +2106,7 @@ tensorflow_datasets/summarization/covid19sum.py,sha256=g5_Xu-lrr5W5HtHdoProJ0P0h
2099
2106
  tensorflow_datasets/summarization/covid19sum_test.py,sha256=zr4s5qZ_zOYoazjmKJR5ZtyXk-fQMxe_QuFHQMnUMEA,960
2100
2107
  tensorflow_datasets/summarization/gigaword.py,sha256=8ggOY1XRHqrvHb5gxnqC23hZNzsNaFqo91BT4BZ3GSA,4240
2101
2108
  tensorflow_datasets/summarization/gigaword_test.py,sha256=ZZiIBDRkMEzIzG-H1z8hLzGUKvdZONisHcv7EaHIDLs,1090
2102
- tensorflow_datasets/summarization/multi_news.py,sha256=2l0lKbTCZqR2AHVhjxt7mufITXvGpr-tcEHzHwNEPX8,3520
2103
- tensorflow_datasets/summarization/multi_news_test.py,sha256=k8YT4yr_5WRk7DtnScEXFJzvhHMxqVIRtgRGkbYNxyg,1090
2109
+ tensorflow_datasets/summarization/multi_news.py,sha256=kaEVG8RjiB_cbQN8QZdLljBcvHw4rnn62v0ZZzBfdWE,881
2104
2110
  tensorflow_datasets/summarization/newsroom.py,sha256=AM8t3QBffL1fY2FmcFolDnHdJWlyXk168kDa-HgRplU,874
2105
2111
  tensorflow_datasets/summarization/opinion_abstracts.py,sha256=oTsaIx4CERoSKpV6-uRJHBB-YIzZD6eF_u-GHtfOKoQ,917
2106
2112
  tensorflow_datasets/summarization/opinosis.py,sha256=yGkkwkzo0Ygzh8X--5CrSImsIANCgm7DawsCBk5o-LE,874
@@ -2468,10 +2474,10 @@ tensorflow_datasets/vision_language/wit/wit_test.py,sha256=PXS8DMNW-MDrT2p5oy4Ic
2468
2474
  tensorflow_datasets/vision_language/wit_kaggle/__init__.py,sha256=vGwSGeM8WE4Q-l0-eEE1sBojmk6YT0l1OO60AWa4Q40,719
2469
2475
  tensorflow_datasets/vision_language/wit_kaggle/wit_kaggle.py,sha256=q-vX_FBzIwsFxL4sY9vuyQ3UQD2PLM4yhUR4U6l-qao,16903
2470
2476
  tensorflow_datasets/vision_language/wit_kaggle/wit_kaggle_test.py,sha256=ZymHT1NkmD-pUnh3BmM3_g30c5afsWYnmqDD9dVyDSA,1778
2471
- tfds_nightly-4.9.9.dev202508150045.dist-info/licenses/AUTHORS,sha256=nvBG4WwfgjuOu1oZkuQKw9kg7X6rve679ObS-YDDmXg,309
2472
- tfds_nightly-4.9.9.dev202508150045.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
2473
- tfds_nightly-4.9.9.dev202508150045.dist-info/METADATA,sha256=3GAb0FSJHgdUUFf1do-Od7H7EhdMdcIQhGjo-vNaTYk,11694
2474
- tfds_nightly-4.9.9.dev202508150045.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
2475
- tfds_nightly-4.9.9.dev202508150045.dist-info/entry_points.txt,sha256=eHEL7nF5y1uCY2FgkuYIdE062epJXlAQTSdq89px4p4,73
2476
- tfds_nightly-4.9.9.dev202508150045.dist-info/top_level.txt,sha256=bAevmk9209s_oxVZVlN6hSDIVS423qrMQvmcWSvW4do,20
2477
- tfds_nightly-4.9.9.dev202508150045.dist-info/RECORD,,
2477
+ tfds_nightly-4.9.9.dev202508160044.dist-info/licenses/AUTHORS,sha256=nvBG4WwfgjuOu1oZkuQKw9kg7X6rve679ObS-YDDmXg,309
2478
+ tfds_nightly-4.9.9.dev202508160044.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
2479
+ tfds_nightly-4.9.9.dev202508160044.dist-info/METADATA,sha256=FLyM8JDvgbDzTJ5H1kZV9KT_4vvfeN_EeRH6Kb9nrRk,11694
2480
+ tfds_nightly-4.9.9.dev202508160044.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
2481
+ tfds_nightly-4.9.9.dev202508160044.dist-info/entry_points.txt,sha256=eHEL7nF5y1uCY2FgkuYIdE062epJXlAQTSdq89px4p4,73
2482
+ tfds_nightly-4.9.9.dev202508160044.dist-info/top_level.txt,sha256=bAevmk9209s_oxVZVlN6hSDIVS423qrMQvmcWSvW4do,20
2483
+ tfds_nightly-4.9.9.dev202508160044.dist-info/RECORD,,