returnn 1.20250207.143045__py3-none-any.whl → 1.20250211.110723__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of returnn might be problematic. Click here for more details.

returnn/PKG-INFO CHANGED
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: returnn
3
- Version: 1.20250207.143045
3
+ Version: 1.20250211.110723
4
4
  Summary: The RWTH extensible training framework for universal recurrent neural networks
5
5
  Home-page: https://github.com/rwth-i6/returnn/
6
6
  Author: Albert Zeyer
@@ -1,2 +1,2 @@
1
- version = '1.20250207.143045'
2
- long_version = '1.20250207.143045+git.b994e87'
1
+ version = '1.20250211.110723'
2
+ long_version = '1.20250211.110723+git.f032108'
returnn/datasets/lm.py CHANGED
@@ -7,9 +7,10 @@ and some related helpers.
7
7
 
8
8
  from __future__ import annotations
9
9
 
10
- from typing import Optional, Union, Callable, Iterator, List, Tuple, BinaryIO, cast
10
+ from typing import Optional, Union, Any, Callable, Iterator, List, Tuple, Set, BinaryIO, Dict, cast, Generator
11
11
  import typing
12
12
  import os
13
+ from io import IOBase
13
14
  import sys
14
15
  import time
15
16
  import re
@@ -1043,17 +1044,17 @@ class Lexicon:
1043
1044
  Lexicon. Map of words to phoneme sequences (can have multiple pronunciations).
1044
1045
  """
1045
1046
 
1046
- def __init__(self, filename):
1047
+ def __init__(self, filename: str):
1047
1048
  """
1048
- :param str filename:
1049
+ :param filename:
1049
1050
  """
1050
1051
  print("Loading lexicon", filename, file=log.v4)
1051
1052
  lex_file = open(filename, "rb")
1052
1053
  if filename.endswith(".gz"):
1053
1054
  lex_file = gzip.GzipFile(fileobj=lex_file)
1054
- self.phoneme_list = [] # type: typing.List[str]
1055
- self.phonemes = {} # type: typing.Dict[str,typing.Dict[str]] # phone -> {index, symbol, variation}
1056
- self.lemmas = {} # type: typing.Dict[str,typing.Dict[str]] # orth -> {orth, phons}
1055
+ self.phoneme_list: List[str] = []
1056
+ self.phonemes: Dict[str, Dict[str, Any]] = {} # phone -> {index, symbol, variation}
1057
+ self.lemmas: Dict[str, Dict[str, Any]] = {} # orth -> {orth, phons}
1057
1058
 
1058
1059
  context = iter(ElementTree.iterparse(lex_file, events=("start", "end")))
1059
1060
  _, root = next(context) # get root element
@@ -1097,12 +1098,12 @@ class StateTying:
1097
1098
  Clustering of (allophone) states into classes.
1098
1099
  """
1099
1100
 
1100
- def __init__(self, state_tying_file):
1101
+ def __init__(self, state_tying_file: str):
1101
1102
  """
1102
- :param str state_tying_file:
1103
+ :param state_tying_file:
1103
1104
  """
1104
- self.allo_map = {} # allophone-state-str -> class-idx
1105
- self.class_map = {} # class-idx -> set(allophone-state-str)
1105
+ self.allo_map: Dict[str, int] = {} # allophone-state-str -> class-idx
1106
+ self.class_map: Dict[int, Set[str]] = {} # class-idx -> set(allophone-state-str)
1106
1107
  lines = open(state_tying_file).read().splitlines()
1107
1108
  for line in lines:
1108
1109
  allo_str, class_idx_str = line.split()
@@ -1124,29 +1125,45 @@ class PhoneSeqGenerator:
1124
1125
 
1125
1126
  def __init__(
1126
1127
  self,
1127
- lexicon_file,
1128
- allo_num_states=3,
1129
- allo_context_len=1,
1130
- state_tying_file=None,
1131
- add_silence_beginning=0.1,
1132
- add_silence_between_words=0.1,
1133
- add_silence_end=0.1,
1134
- repetition=0.9,
1135
- silence_repetition=0.95,
1128
+ *,
1129
+ lexicon_file: str,
1130
+ phoneme_vocab_file: Optional[str] = None,
1131
+ allo_num_states: int = 3,
1132
+ allo_context_len: int = 1,
1133
+ state_tying_file: Optional[str] = None,
1134
+ add_silence_beginning: float = 0.1,
1135
+ add_silence_between_words: float = 0.1,
1136
+ add_silence_end: float = 0.1,
1137
+ repetition: float = 0.9,
1138
+ silence_repetition: float = 0.95,
1139
+ silence_lemma_orth: str = "[SILENCE]",
1140
+ extra_begin_lemma: Optional[Dict[str, Any]] = None,
1141
+ add_extra_begin_lemma: float = 1.0,
1142
+ extra_end_lemma: Optional[Dict[str, Any]] = None,
1143
+ add_extra_end_lemma: float = 1.0,
1136
1144
  ):
1137
1145
  """
1138
- :param str lexicon_file: lexicon XML file
1139
- :param int allo_num_states: how much HMM states per allophone (all but silence)
1140
- :param int allo_context_len: how much context to store left and right. 1 -> triphone
1141
- :param str | None state_tying_file: for state-tying, if you want that
1142
- :param float add_silence_beginning: prob of adding silence at beginning
1143
- :param float add_silence_between_words: prob of adding silence between words
1144
- :param float add_silence_end: prob of adding silence at end
1145
- :param float repetition: prob of repeating an allophone
1146
- :param float silence_repetition: prob of repeating the silence allophone
1146
+ :param lexicon_file: lexicon XML file
1147
+ :param phoneme_vocab_file: defines the vocab, label indices.
1148
+ If not given, automatically inferred via all (sorted) phonemes from the lexicon.
1149
+ :param allo_num_states: how much HMM states per allophone (all but silence)
1150
+ :param allo_context_len: how much context to store left and right. 1 -> triphone
1151
+ :param state_tying_file: for state-tying, if you want that
1152
+ :param add_silence_beginning: prob of adding silence at beginning
1153
+ :param add_silence_between_words: prob of adding silence between words
1154
+ :param add_silence_end: prob of adding silence at end
1155
+ :param repetition: prob of repeating an allophone
1156
+ :param silence_repetition: prob of repeating the silence allophone
1157
+ :param silence_lemma_orth: silence orth in the lexicon
1158
+ :param extra_begin_lemma: {"phons": [{"phon": "P1 P2 ...", ...}, ...], ...}.
1159
+ If given, then with prob add_extra_begin_lemma, this will be added at the beginning.
1160
+ :param add_extra_begin_lemma:
1161
+ :param extra_end_lemma: just like ``extra_begin_lemma``, but for the end
1162
+ :param add_extra_end_lemma:
1147
1163
  """
1148
1164
  self.lexicon = Lexicon(lexicon_file)
1149
1165
  self.phonemes = sorted(self.lexicon.phonemes.keys(), key=lambda s: self.lexicon.phonemes[s]["index"])
1166
+ self.phoneme_vocab = Vocabulary(phoneme_vocab_file, unknown_label=None) if phoneme_vocab_file else None
1150
1167
  self.rnd = Random(0)
1151
1168
  self.allo_num_states = allo_num_states
1152
1169
  self.allo_context_len = allo_context_len
@@ -1155,40 +1172,42 @@ class PhoneSeqGenerator:
1155
1172
  self.add_silence_end = add_silence_end
1156
1173
  self.repetition = repetition
1157
1174
  self.silence_repetition = silence_repetition
1158
- self.si_lemma = self.lexicon.lemmas["[SILENCE]"]
1159
- self.si_phone = self.si_lemma["phons"][0]["phon"]
1160
- if state_tying_file:
1161
- self.state_tying = StateTying(state_tying_file)
1162
- else:
1163
- self.state_tying = None
1164
-
1165
- def random_seed(self, seed):
1166
- """
1167
- :param int seed:
1168
- """
1175
+ self.si_lemma: Dict[str, Any] = self.lexicon.lemmas[silence_lemma_orth]
1176
+ self.si_phone: str = self.si_lemma["phons"][0]["phon"]
1177
+ self.state_tying = StateTying(state_tying_file) if state_tying_file else None
1178
+ if self.phoneme_vocab:
1179
+ assert not self.state_tying
1180
+ self.extra_begin_lemma = extra_begin_lemma
1181
+ self.add_extra_begin_lemma = add_extra_begin_lemma
1182
+ self.extra_end_lemma = extra_end_lemma
1183
+ self.add_extra_end_lemma = add_extra_end_lemma
1184
+
1185
+ def random_seed(self, seed: int):
1186
+ """Reset RNG via given seed"""
1169
1187
  self.rnd.seed(seed)
1170
1188
 
1171
- def get_class_labels(self):
1172
- """
1173
- :rtype: list[str]
1174
- """
1175
- if self.state_tying:
1189
+ def get_class_labels(self) -> List[str]:
1190
+ """:return: class labels"""
1191
+ if self.phoneme_vocab:
1192
+ return self.phoneme_vocab.labels
1193
+ elif self.state_tying:
1176
1194
  # State tying labels. Represented by some allophone state str.
1177
1195
  return ["|".join(sorted(self.state_tying.class_map[i])) for i in range(self.state_tying.num_classes)]
1178
1196
  else:
1179
1197
  # The phonemes are the labels.
1180
1198
  return self.phonemes
1181
1199
 
1182
- def seq_to_class_idxs(self, phones, dtype=None):
1200
+ def seq_to_class_idxs(self, phones: List[AllophoneState], dtype: Optional[str] = None) -> numpy.ndarray:
1183
1201
  """
1184
- :param list[AllophoneState] phones: list of allophone states
1185
- :param str dtype: eg "int32"
1186
- :rtype: numpy.ndarray
1187
- :returns 1D numpy array with the indices
1202
+ :param phones: list of allophone states
1203
+ :param dtype: eg "int32". "int32" by default
1204
+ :returns: 1D numpy array with the indices
1188
1205
  """
1189
1206
  if dtype is None:
1190
1207
  dtype = "int32"
1191
- if self.state_tying:
1208
+ if self.phoneme_vocab:
1209
+ return numpy.array([self.phoneme_vocab.label_to_id(a.id) for a in phones], dtype=dtype)
1210
+ elif self.state_tying:
1192
1211
  # State tying indices.
1193
1212
  return numpy.array([self.state_tying.allo_map[a.format()] for a in phones], dtype=dtype)
1194
1213
  else:
@@ -1196,11 +1215,9 @@ class PhoneSeqGenerator:
1196
1215
  # It should not happen that we don't have some phoneme. The lexicon should not be inconsistent.
1197
1216
  return numpy.array([self.lexicon.phonemes[p.id]["index"] for p in phones], dtype=dtype)
1198
1217
 
1199
- def _iter_orth(self, orth):
1200
- """
1201
- :param str orth:
1202
- :rtype: typing.Iterator[typing.Dict[str]]
1203
- """
1218
+ def _iter_orth_lemmas(self, orth: str) -> Generator[Dict[str, Any], None, None]:
1219
+ if self.extra_begin_lemma and self.rnd.random() < self.add_extra_begin_lemma:
1220
+ yield self.extra_begin_lemma
1204
1221
  if self.rnd.random() < self.add_silence_beginning:
1205
1222
  yield self.si_lemma
1206
1223
  symbols = list(orth.split())
@@ -1224,26 +1241,25 @@ class PhoneSeqGenerator:
1224
1241
  yield self.si_lemma
1225
1242
  if self.rnd.random() < self.add_silence_end:
1226
1243
  yield self.si_lemma
1244
+ if self.extra_end_lemma and self.rnd.random() < self.add_extra_end_lemma:
1245
+ yield self.extra_end_lemma
1227
1246
 
1228
- def orth_to_phones(self, orth):
1229
- """
1230
- :param str orth:
1231
- :rtype: str
1232
- """
1247
+ def orth_to_phones(self, orth: str) -> str:
1248
+ """:return: space-separated phones"""
1233
1249
  phones = []
1234
- for lemma in self._iter_orth(orth):
1250
+ for lemma in self._iter_orth_lemmas(orth):
1235
1251
  phon = self.rnd.choice(lemma["phons"])
1236
- phones += [phon["phon"]]
1252
+ phones.append(phon["phon"])
1237
1253
  return " ".join(phones)
1238
1254
 
1239
1255
  # noinspection PyMethodMayBeStatic
1240
- def _phones_to_allos(self, phones):
1256
+ def _phones_to_allos(self, phones: Iterator[str]) -> Generator[AllophoneState, None, None]:
1241
1257
  for p in phones:
1242
1258
  a = AllophoneState()
1243
1259
  a.id = p
1244
1260
  yield a
1245
1261
 
1246
- def _random_allo_silence(self, phone=None):
1262
+ def _random_allo_silence(self, phone: Optional[str] = None) -> Generator[AllophoneState, None, None]:
1247
1263
  if phone is None:
1248
1264
  phone = self.si_phone
1249
1265
  while True:
@@ -1256,7 +1272,7 @@ class PhoneSeqGenerator:
1256
1272
  if self.rnd.random() >= self.silence_repetition:
1257
1273
  break
1258
1274
 
1259
- def _allos_add_states(self, allos):
1275
+ def _allos_add_states(self, allos: Iterator[AllophoneState]) -> Generator[AllophoneState, None, None]:
1260
1276
  for _a in allos:
1261
1277
  if _a.id == self.si_phone:
1262
1278
  for a in self._random_allo_silence(_a.id):
@@ -1274,9 +1290,9 @@ class PhoneSeqGenerator:
1274
1290
  if self.rnd.random() >= self.repetition:
1275
1291
  break
1276
1292
 
1277
- def _allos_set_context(self, allos):
1293
+ def _allos_set_context(self, allos: List[AllophoneState]) -> None:
1278
1294
  """
1279
- :param list[AllophoneState] allos:
1295
+ :param allos: modify inplace, ``context_history``, ``context_future``
1280
1296
  """
1281
1297
  if self.allo_context_len == 0:
1282
1298
  return
@@ -1297,15 +1313,14 @@ class PhoneSeqGenerator:
1297
1313
  else:
1298
1314
  ctx = []
1299
1315
 
1300
- def generate_seq(self, orth):
1316
+ def generate_seq(self, orth: str) -> List[AllophoneState]:
1301
1317
  """
1302
- :param str orth: orthography as a str. orth.split() should give words in the lexicon
1303
- :rtype: list[AllophoneState]
1304
- :returns allophone state list. those will have repetitions etc
1318
+ :param orth: orthography as a str. orth.split() should give words in the lexicon
1319
+ :returns: allophone state list. those will have repetitions etc
1305
1320
  """
1306
- allos = [] # type: typing.List[AllophoneState]
1307
- for lemma in self._iter_orth(orth):
1308
- phon = self.rnd.choice(lemma["phons"])
1321
+ allos: List[AllophoneState] = []
1322
+ for lemma in self._iter_orth_lemmas(orth):
1323
+ phon = self.rnd.choice(lemma["phons"]) # space-separated phones in phon["phon"]
1309
1324
  l_allos = list(self._phones_to_allos(phon["phon"].split()))
1310
1325
  l_allos[0].mark_initial()
1311
1326
  l_allos[-1].mark_final()
@@ -1314,13 +1329,13 @@ class PhoneSeqGenerator:
1314
1329
  allos = list(self._allos_add_states(allos))
1315
1330
  return allos
1316
1331
 
1317
- def _random_phone_seq(self, prob_add=0.8):
1332
+ def _random_phone_seq(self, prob_add: float = 0.8) -> Generator[str, None, None]:
1318
1333
  while True:
1319
1334
  yield self.rnd.choice(self.phonemes)
1320
1335
  if self.rnd.random() >= prob_add:
1321
1336
  break
1322
1337
 
1323
- def _random_allo_seq(self, prob_word_add=0.8):
1338
+ def _random_allo_seq(self, prob_word_add: float = 0.8) -> List[AllophoneState]:
1324
1339
  allos = []
1325
1340
  while True:
1326
1341
  phones = self._random_phone_seq()
@@ -1333,15 +1348,14 @@ class PhoneSeqGenerator:
1333
1348
  self._allos_set_context(allos)
1334
1349
  return list(self._allos_add_states(allos))
1335
1350
 
1336
- def generate_garbage_seq(self, target_len):
1351
+ def generate_garbage_seq(self, target_len: int) -> List[AllophoneState]:
1337
1352
  """
1338
- :param int target_len: len of the returned seq
1339
- :rtype: list[AllophoneState]
1340
- :returns allophone state list. those will have repetitions etc.
1341
- It will randomly generate a sequence of phonemes and transform that
1342
- into a list of allophones in a similar way than generate_seq().
1353
+ :param target_len: len of the returned seq
1354
+ :returns: allophone state list. those will have repetitions etc.
1355
+ It will randomly generate a sequence of phonemes and transform that
1356
+ into a list of allophones in a similar way than generate_seq().
1343
1357
  """
1344
- allos = []
1358
+ allos: List[AllophoneState] = []
1345
1359
  while True:
1346
1360
  allos += self._random_allo_seq()
1347
1361
  # Add some silence so that left/right context is correct for further allophones.
@@ -1435,7 +1449,9 @@ class TranslationDataset(CachedDataset2):
1435
1449
  for prefix in self._main_data_key_map.keys()
1436
1450
  if not (prefix == self.target_file_prefix and search_without_reference)
1437
1451
  ]
1438
- self._data_files = {prefix: self._get_data_file(prefix) for prefix in self._files_to_read}
1452
+ self._data_files: Dict[str, Union[None, BinaryIO, IOBase]] = {
1453
+ prefix: self._get_data_file(prefix) for prefix in self._files_to_read
1454
+ }
1439
1455
 
1440
1456
  self._data_keys = self._source_data_keys + self._target_data_keys
1441
1457
  self._data = {data_key: [] for data_key in self._data_keys} # type: typing.Dict[str,typing.List[numpy.ndarray]]
@@ -1541,11 +1557,10 @@ class TranslationDataset(CachedDataset2):
1541
1557
  filename = cf(filename)
1542
1558
  return filename
1543
1559
 
1544
- def _get_data_file(self, prefix):
1560
+ def _get_data_file(self, prefix) -> Union[BinaryIO, IOBase]:
1545
1561
  """
1546
1562
  :param str prefix: e.g. "source" or "target"
1547
1563
  :return: full filename
1548
- :rtype: io.FileIO
1549
1564
  """
1550
1565
  import os
1551
1566
 
returnn/torch/engine.py CHANGED
@@ -1077,7 +1077,7 @@ class Engine(EngineBase):
1077
1077
  get_model_func = self.config.typed_value("get_model")
1078
1078
  assert get_model_func, "get_model not defined in config"
1079
1079
  sentinel_kw = util.get_fwd_compat_kwargs()
1080
- model = get_model_func(epoch=epoch, step=step, **sentinel_kw)
1080
+ model = get_model_func(epoch=epoch, step=step, device=self._device, **sentinel_kw)
1081
1081
  self._orig_model = model
1082
1082
  if isinstance(model, rf.Module):
1083
1083
  self._pt_model = rf_module_to_pt_module(model)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: returnn
3
- Version: 1.20250207.143045
3
+ Version: 1.20250211.110723
4
4
  Summary: The RWTH extensible training framework for universal recurrent neural networks
5
5
  Home-page: https://github.com/rwth-i6/returnn/
6
6
  Author: Albert Zeyer
@@ -1,9 +1,9 @@
1
- returnn/PKG-INFO,sha256=JxwNmuittLMoytS17vVYTCUstvu63egjofVHNtxAWoI,5215
1
+ returnn/PKG-INFO,sha256=URxQfD6qddQqyGuoM5_5h2XS9kTA0Qv6EZDqCCD3Pxk,5215
2
2
  returnn/__init__.py,sha256=biBtRsM0WZ406vShaeH-9WFoqJ8XwTbn6g0EeFJ7l8E,1012
3
3
  returnn/__main__.py,sha256=qBFbuB1yN3adgVM5pXt2-Yq9vorjRNchNPL8kDKx44M,31752
4
4
  returnn/__old_mod_loader__.py,sha256=nvsNY-xELdS_IPNkv66Q9Rmvg4dbGW0-EBRDcCmctos,7654
5
5
  returnn/__setup__.py,sha256=22kQn2fh11iPM0hLb2Fy5sLmoU1JGvmDxXRYuRgQkwU,4659
6
- returnn/_setup_info_generated.py,sha256=PiumrsjdDM8B1EXRYPtxkhE50REGsTfjClC6U1cNh58,77
6
+ returnn/_setup_info_generated.py,sha256=R-VEKbgSRUuCxbz9CHRW-XZyUzwY_q_kEm5AZAMfMpM,77
7
7
  returnn/config.py,sha256=3tmKhB6FnQZaNdtcYsiB61JnEY--iZ2qmJ4yq0b6tE0,29140
8
8
  returnn/forward_iface.py,sha256=A_OJiaXsX4MlXQRzST86ylyxSUZbC402PQL1REcqHjM,911
9
9
  returnn/learning_rate_control.py,sha256=ZvWryAn_tv9DhV8sh1LV3eE34Yltl3On3mYZAG4hR9s,34684
@@ -20,7 +20,7 @@ returnn/datasets/cached2.py,sha256=STojLL2Ivvd0xMfZRlYgzsHKlikYKL-caZCIDCgc_9g,1
20
20
  returnn/datasets/distrib_files.py,sha256=kyqIQILDPAO2TXr39hjslmDxIAc3pkY1UOoj8nuiFXo,27534
21
21
  returnn/datasets/generating.py,sha256=e2-SXcax7xQ4fkVW_Q5MgOLP6KlB7EQXJi_v64gVAWI,99805
22
22
  returnn/datasets/hdf.py,sha256=shif0aQqWWNJ0b6YnycpPjIVNsxjLrA41Y66-_SluGI,66993
23
- returnn/datasets/lm.py,sha256=dP5VtKiIWyy9vNhlT7FddQvcwjA6CpbhYb6IGLGGtZc,96027
23
+ returnn/datasets/lm.py,sha256=CX06bc06hAvf3c9-Gku_2DmqexgmR9TlRxsSPzRPno4,98193
24
24
  returnn/datasets/map.py,sha256=kOBJVZmwDhLsOplzDNByIfa0NRSUaMo2Lsy36lBvxrM,10907
25
25
  returnn/datasets/meta.py,sha256=wHquywF1C7-YWhcSFSAdDNc0nEHRjE-ks7YIEuDFMIE,94731
26
26
  returnn/datasets/multi_proc.py,sha256=7kppiXGiel824HM3GvHegluIxtiNAHafm-e6qh6W7YU,21948
@@ -207,7 +207,7 @@ returnn/tf/util/open_fst.py,sha256=sZRDw4TbxvhGqpGdUJWy1ebvlZm4_RPhygpRw9uLAOQ,1
207
207
  returnn/torch/README.md,sha256=jzJ2FpOHW02vxN69yKaV97C9LI-hmvjBglKfdZXIDdc,85
208
208
  returnn/torch/__init__.py,sha256=MHEUyNHB20Vy89uKAqZoj6FxJKF1Gq3HW-i6ra1pNcI,24
209
209
  returnn/torch/distributed.py,sha256=i13cUVjI7GxpO0TAresrNyCM0ZBAaf-cXNr09Fmg_2k,6266
210
- returnn/torch/engine.py,sha256=neM-AL7XQLpZ3V1K4ziqVmij19ey1k2MpLCaFXATOpg,76301
210
+ returnn/torch/engine.py,sha256=BjPF6Kifu1b8kCavnCGAXrp0uhqY4WFexLvP3kR6tmg,76322
211
211
  returnn/torch/updater.py,sha256=GqtBvZpElPVMm0lq84JPl4NVLFFETZAzAbR0rTomSao,28249
212
212
  returnn/torch/data/__init__.py,sha256=6cLNEi8KoGI12PF6akN7mI_mtjlx-0hcQAfMYoExwik,132
213
213
  returnn/torch/data/extern_data.py,sha256=_uT_9_gd5HIh1IoRsrebVG-nufSnb7fgC5jyU05GxJg,7580
@@ -253,8 +253,8 @@ returnn/util/sig_proc.py,sha256=Tjz0VOAVyqu2qDCF5HZ1JjALjcFsHcNkcd96WgZeKfE,7265
253
253
  returnn/util/task_system.py,sha256=y4sMVXQ25Qd2z0rx03uOlXlkE-jbCYC1Sjfn-XlraVU,26003
254
254
  returnn/util/train_proc_manager.py,sha256=Pjht28k6uz6BNQ47uW6Gf880iyq5q4wx7P_K2tmoAM8,3266
255
255
  returnn/util/watch_memory.py,sha256=BR5P2kvBN6UI81cE0_1WAA6Hd1SByLbBaiDxvLhPOew,4213
256
- returnn-1.20250207.143045.dist-info/LICENSE,sha256=ywBD_U2aD4vpuoIgNAsjIGBYydl0tVKll3De0Z8s77c,11041
257
- returnn-1.20250207.143045.dist-info/METADATA,sha256=JxwNmuittLMoytS17vVYTCUstvu63egjofVHNtxAWoI,5215
258
- returnn-1.20250207.143045.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
259
- returnn-1.20250207.143045.dist-info/top_level.txt,sha256=Lsn4WZc5Pbfk0-xDQOgnFCxOoqxL4CyeM3N1TFbJncw,8
260
- returnn-1.20250207.143045.dist-info/RECORD,,
256
+ returnn-1.20250211.110723.dist-info/LICENSE,sha256=ywBD_U2aD4vpuoIgNAsjIGBYydl0tVKll3De0Z8s77c,11041
257
+ returnn-1.20250211.110723.dist-info/METADATA,sha256=URxQfD6qddQqyGuoM5_5h2XS9kTA0Qv6EZDqCCD3Pxk,5215
258
+ returnn-1.20250211.110723.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
259
+ returnn-1.20250211.110723.dist-info/top_level.txt,sha256=Lsn4WZc5Pbfk0-xDQOgnFCxOoqxL4CyeM3N1TFbJncw,8
260
+ returnn-1.20250211.110723.dist-info/RECORD,,