thds.mops 3.6.20250407135813__py3-none-any.whl → 3.6.20250407153122__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of thds.mops might be problematic. Click here for more details.

thds/mops/k8s/config.py CHANGED
@@ -16,9 +16,22 @@ k8s_job_retry_count = config.item("mops.k8s.job.retry_count", 6, parse=int)
16
16
  k8s_job_cleanup_ttl_seconds_after_completion = config.item(
17
17
  "mops.k8s.job.cleanup_ttl_seconds", int(timedelta(minutes=60).total_seconds()), parse=int
18
18
  )
19
- k8s_job_timeout_seconds = config.item(
20
- "mops.k8s.job.timeout_seconds", int(timedelta(minutes=3).total_seconds()), parse=int
19
+
20
+ # https://github.com/kubernetes-client/python/blob/master/examples/watch/timeout-settings.md
21
+ k8s_watch_server_timeout_seconds = config.item(
22
+ "mops.k8s.watch.server_timeout", int(timedelta(hours=1).total_seconds()), parse=int
23
+ )
24
+ k8s_watch_connection_timeout_seconds = config.item(
25
+ "mops.k8s.watch.connection_timeout", int(timedelta(seconds=5).total_seconds()), parse=int
21
26
  )
27
+ k8s_watch_read_timeout_seconds = config.item(
28
+ "mops.k8s.watch.read_timeout", int(timedelta(seconds=20).total_seconds()), parse=int
29
+ )
30
+ # the above values are designed to retry aggressively if we're not receiving events; from
31
+ # what I've read online, k9s exhibits similar behavior (though I don't know what their
32
+ # values are, and it's not 100% clear to me that they have different types of timeouts
33
+ # from trying to grep their codebase.).
34
+
22
35
  k8s_monitor_delay = config.item("mops.k8s.monitor.delay_seconds", 5, parse=int)
23
36
  k8s_monitor_max_attempts = config.item("mops.k8s.monitor.max_attempts", 100, parse=int)
24
37
 
thds/mops/k8s/jobs.py CHANGED
@@ -1,10 +1,11 @@
1
1
  import typing as ty
2
+ from datetime import datetime, timezone
2
3
 
3
4
  from kubernetes import client
4
5
 
5
6
  from ._shared import logger
6
7
  from .retry import k8s_sdk_retry
7
- from .watch import WatchingObjectSource
8
+ from .watch import EventType, WatchingObjectSource, watch_forever
8
9
 
9
10
 
10
11
  @k8s_sdk_retry()
@@ -32,18 +33,26 @@ def get_job(job_name: str, namespace: str = "") -> ty.Optional[client.models.V1J
32
33
  # https://kubernetes.io/docs/concepts/workloads/controllers/job/#terminal-job-conditions
33
34
 
34
35
 
35
- def is_job_succeeded(job: client.models.V1Job) -> bool:
36
+ def job_completion_time(job: client.models.V1Job) -> ty.Optional[datetime]:
36
37
  if not job.status:
37
- return False
38
+ return None
38
39
 
39
- if not job.status.completion_time:
40
- return False
40
+ if job.status.completion_time:
41
+ return job.status.completion_time
41
42
 
42
43
  for condition in job.status.conditions or tuple():
43
44
  if condition.type == "Complete" and condition.status == "True":
44
- return True
45
+ return (
46
+ condition.last_transition_time
47
+ if condition.last_transition_time
48
+ else datetime.now(tz=timezone.utc)
49
+ )
45
50
 
46
- return False
51
+ return None
52
+
53
+
54
+ def is_job_succeeded(job: client.models.V1Job) -> bool:
55
+ return bool(job_completion_time(job))
47
56
 
48
57
 
49
58
  def is_job_failed(job: client.models.V1Job) -> bool:
@@ -55,3 +64,49 @@ def is_job_failed(job: client.models.V1Job) -> bool:
55
64
  return True
56
65
 
57
66
  return False
67
+
68
+
69
+ def watch_jobs(
70
+ namespace: str, timeout: ty.Optional[int] = None
71
+ ) -> ty.Iterator[ty.Tuple[client.models.V1Job, EventType]]:
72
+ yield from watch_forever(
73
+ lambda _, __: client.BatchV1Api().list_namespaced_job,
74
+ namespace,
75
+ typename="Job",
76
+ timeout=timeout,
77
+ )
78
+
79
+
80
+ def watch_jobs_cli() -> None:
81
+ import argparse
82
+ from datetime import datetime
83
+
84
+ parser = argparse.ArgumentParser(description="Watch Kubernetes jobs")
85
+ parser.add_argument("namespace", help="Kubernetes namespace to watch")
86
+ parser.add_argument("--timeout", type=int, help="Timeout in seconds", default=None)
87
+ args = parser.parse_args()
88
+
89
+ for job, event_type in watch_jobs(args.namespace, timeout=args.timeout):
90
+ completion_time = job_completion_time(job)
91
+ creation_time = job.metadata.creation_timestamp
92
+
93
+ status = ""
94
+ if is_job_failed(job):
95
+ status = "FAILED"
96
+ elif completion_time := job_completion_time(job):
97
+ job_duration = completion_time - creation_time
98
+ status = f"completed_after={job_duration}"
99
+ else:
100
+ status = "incomplete" + " " * 20
101
+
102
+ print(
103
+ datetime.now().isoformat(),
104
+ f"{event_type:<10}",
105
+ f"{job.metadata.name:<64}",
106
+ job.metadata.creation_timestamp,
107
+ status,
108
+ )
109
+
110
+
111
+ if __name__ == "__main__":
112
+ watch_jobs_cli()
thds/mops/k8s/launch.py CHANGED
@@ -41,6 +41,19 @@ class Counter:
41
41
  return self.value
42
42
 
43
43
 
44
+ def construct_job_name(user_prefix: str, job_num: str) -> str:
45
+ # we want some consistency here, but also some randomness in case the prefixes don't exist or aren't unique.
46
+ mops_name_part = "-".join([str(os.getpid()), job_num, str(uuid.uuid4())[:8]]).lstrip("-")
47
+ if len(mops_name_part) > 63:
48
+ # this should be _impossible_, because having a job num longer than even 20 digits would be an impossibly large
49
+ # number of jobs. but just in case, we'll truncate it to the last 63 characters.
50
+ mops_name_part = mops_name_part[-63:] # keep the most random part, to avoid collisions
51
+ name = f"{user_prefix[:63 - (len(mops_name_part) + 1)]}-{mops_name_part}"
52
+ name = "".join([c if c.isalnum() or c == "-" else "-" for c in name.lower()])
53
+ assert len(name) <= 63, f"Job name `{name}` is too long; max length is 63 characters."
54
+ return name
55
+
56
+
44
57
  _LAUNCH_COUNT = Counter()
45
58
  _FINISH_COUNT = Counter()
46
59
  _SIMULTANEOUS_LAUNCHES = threading.BoundedSemaphore(20)
@@ -80,7 +93,7 @@ def launch(
80
93
  if not container_image:
81
94
  raise ValueError("container_image (the fully qualified Docker tag) must not be empty.")
82
95
  job_num = f"{_LAUNCH_COUNT.inc():0>3}"
83
- name = "-".join([name_prefix, str(os.getpid()), job_num, str(uuid.uuid4())[:8]]).lstrip("-")
96
+ name = construct_job_name(name_prefix, job_num)
84
97
  scope.enter(logger_context(job=name))
85
98
  node_narrowing = node_narrowing or dict()
86
99
 
thds/mops/k8s/logging.py CHANGED
@@ -161,7 +161,10 @@ def _get_pod_phase(pod_name: str) -> str:
161
161
  .read_namespaced_pod(
162
162
  namespace=config.k8s_namespace(),
163
163
  name=pod_name,
164
- _request_timeout=(10, config.k8s_job_timeout_seconds()),
164
+ _request_timeout=(
165
+ config.k8s_watch_connection_timeout_seconds(),
166
+ config.k8s_watch_read_timeout_seconds(),
167
+ ),
165
168
  )
166
169
  .status.phase
167
170
  )
@@ -188,7 +191,10 @@ def _scrape_pod_logs(
188
191
  base_kwargs = dict(
189
192
  name=pod_name,
190
193
  namespace=config.k8s_namespace(),
191
- _request_timeout=(10, config.k8s_job_timeout_seconds()),
194
+ _request_timeout=(
195
+ config.k8s_watch_connection_timeout_seconds(),
196
+ config.k8s_watch_read_timeout_seconds(),
197
+ ),
192
198
  # i'm occasionally seeing the `stream()` call below hang
193
199
  # indefinitely if logs don't come back from the pod for a
194
200
  # while. Which is ironic, since most of this code is here to
@@ -24,7 +24,7 @@ def _emit_basic(event: client.CoreV1Event) -> None:
24
24
  def _warn_image_pull_backoff(namespace: str, on_backoff: OnCoreEvent = _emit_basic) -> None:
25
25
  """Log scary errors when ImagePullBackoff is observed."""
26
26
  start_dt = datetime.now(tz=timezone.utc)
27
- for _ns, obj in yield_objects_from_list(
27
+ for _ns, obj, _event_type in yield_objects_from_list(
28
28
  namespace,
29
29
  lambda _, __: ty.cast(
30
30
  # do NOT use client.EventsV1Api here - for some reason
thds/mops/k8s/watch.py CHANGED
@@ -3,6 +3,7 @@
3
3
  This is a general-purpose fix for using watchers in a thread reliably.
4
4
  """
5
5
 
6
+ import queue
6
7
  import threading
7
8
  import time
8
9
  import typing as ty
@@ -39,17 +40,23 @@ class K8sList(ty.Protocol[T]):
39
40
  # If this does not return a K8sList API method, the loop will exit
40
41
  GetListMethod = ty.Callable[[str, ty.Optional[Exception]], ty.Optional[K8sList[T]]]
41
42
  # if this returns True, the loop will exit.
42
- OnEvent = ty.Callable[[str, T], ty.Optional[bool]]
43
+ EventType = ty.Literal["FETCH", "ADDED", "MODIFIED", "DELETED"]
44
+ OnEvent = ty.Callable[[str, T, EventType], ty.Optional[bool]]
43
45
 
44
46
 
45
47
  def yield_objects_from_list(
46
48
  namespace: str,
47
49
  get_list_method: GetListMethod[T],
48
- server_timeout: int = 10,
50
+ *,
51
+ server_timeout: int = config.k8s_watch_server_timeout_seconds(),
52
+ connection_timeout: int = config.k8s_watch_connection_timeout_seconds(),
53
+ read_timeout: int = config.k8s_watch_read_timeout_seconds(),
54
+ # connection and read timeout should generally be fairly aggressive so that we retry
55
+ # quickly if we don't hear anything for a while, and the config defaults are.
49
56
  object_type_hint: str = "items",
50
57
  init: ty.Optional[ty.Callable[[], None]] = None,
51
58
  **kwargs: ty.Any,
52
- ) -> ty.Iterator[ty.Tuple[str, T]]:
59
+ ) -> ty.Iterator[ty.Tuple[str, T, EventType]]:
53
60
  ex = None
54
61
  if init:
55
62
  init()
@@ -58,14 +65,15 @@ def yield_objects_from_list(
58
65
  load_config()
59
66
  list_method = get_list_method(namespace, ex)
60
67
  if not list_method:
61
- logger.debug(f"No longer watching {object_type_hint} events in namespace: {namespace}")
62
- break
68
+ logger.debug(f"Stopped watching {object_type_hint} events in namespace: {namespace}")
69
+ return
70
+
63
71
  initial_list = list_method(namespace=namespace)
64
72
  logger.debug(
65
73
  f"Listed {len(initial_list.items)} {object_type_hint} in namespace: {namespace}"
66
74
  )
67
75
  for object in initial_list.items:
68
- yield namespace, object
76
+ yield namespace, object, "FETCH"
69
77
 
70
78
  if initial_list.metadata._continue:
71
79
  logger.warning(
@@ -76,11 +84,12 @@ def yield_objects_from_list(
76
84
  namespace=namespace,
77
85
  resource_version=initial_list.metadata.resource_version,
78
86
  **kwargs,
79
- _request_timeout=(server_timeout, config.k8s_job_timeout_seconds()),
87
+ timeout_seconds=server_timeout,
88
+ _request_timeout=(connection_timeout, read_timeout),
80
89
  ):
81
90
  object = evt.get("object")
82
91
  if object:
83
- yield namespace, object
92
+ yield namespace, object, evt["type"]
84
93
  # once we've received events, let the resource version
85
94
  # be managed automatically if possible.
86
95
  except urllib3.exceptions.ProtocolError:
@@ -96,10 +105,12 @@ def yield_objects_from_list(
96
105
  ex = e
97
106
 
98
107
 
99
- def callback_events(on_event: OnEvent[T], event_yielder: ty.Iterable[ty.Tuple[str, T]]) -> None:
108
+ def callback_events(
109
+ on_event: OnEvent[T], event_yielder: ty.Iterable[ty.Tuple[str, T, EventType]]
110
+ ) -> None:
100
111
  """Suitable for use with a daemon thread."""
101
- for namespace, event in event_yielder:
102
- should_exit = on_event(namespace, event)
112
+ for namespace, obj, event in event_yielder:
113
+ should_exit = on_event(namespace, obj, event)
103
114
  if should_exit:
104
115
  break
105
116
 
@@ -116,7 +127,7 @@ def _default_get_namespace(obj: ty.Any) -> str:
116
127
  return obj.metadata.namespace
117
128
 
118
129
 
119
- STARTING = colorized(fg="white", bg="orange")
130
+ STARTING = colorized(fg="black", bg="orange")
120
131
 
121
132
 
122
133
  class OneShotLimiter:
@@ -151,6 +162,67 @@ def is_stale(api_last_update_time: float, obj_last_seen_time: float) -> bool:
151
162
  return (time_since_obj_update := now - obj_last_seen_time) > allowed_stale_seconds # noqa: F841
152
163
 
153
164
 
165
+ def _wrap_get_list_method_with_too_old_check(
166
+ typename: str,
167
+ get_list_method: GetListMethod[T],
168
+ ) -> GetListMethod[T]:
169
+ def wrapped_get_list_method(namespace: str, exc: ty.Optional[Exception]) -> ty.Optional[K8sList[T]]:
170
+ suffix = ""
171
+ if exc:
172
+ too_old = parse_too_old_resource_version(exc)
173
+ if not too_old:
174
+ logger.exception(f"Not fatal, but sleeping before we retry {typename} scraping...")
175
+ time.sleep(config.k8s_monitor_delay())
176
+ suffix = f" after {type(exc).__name__}: {exc}"
177
+ logger.info(f"Watching {typename}s in namespace: {namespace}{suffix}")
178
+ return get_list_method(namespace, exc)
179
+
180
+ return wrapped_get_list_method
181
+
182
+
183
+ def create_watch_thread(
184
+ get_list_method: GetListMethod[T],
185
+ callback: ty.Callable[[str, T, EventType], None],
186
+ namespace: str,
187
+ *,
188
+ typename: str = "object",
189
+ ) -> threading.Thread:
190
+ return threading.Thread(
191
+ target=callback_events,
192
+ args=(
193
+ callback,
194
+ yield_objects_from_list(
195
+ namespace,
196
+ _wrap_get_list_method_with_too_old_check(typename, get_list_method),
197
+ # arguably this wrapper could be composed externally, but i see no use cases so far where we'd want that.
198
+ object_type_hint=typename + "s",
199
+ init=lambda: logger.info(STARTING(f"Watching {typename}s in {namespace}")),
200
+ ),
201
+ ),
202
+ daemon=True,
203
+ )
204
+
205
+
206
+ def watch_forever(
207
+ get_list_method: GetListMethod[T],
208
+ namespace: str,
209
+ *,
210
+ typename: str = "object",
211
+ timeout: ty.Optional[int] = None,
212
+ ) -> ty.Iterator[ty.Tuple[T, EventType]]:
213
+ q: queue.Queue[ty.Tuple[T, EventType]] = queue.Queue()
214
+
215
+ def put_queue(namespace: str, obj: T, event_type: EventType) -> None:
216
+ q.put((obj, event_type))
217
+
218
+ create_watch_thread(get_list_method, put_queue, namespace, typename=typename).start()
219
+ while True:
220
+ try:
221
+ yield q.get(timeout=timeout)
222
+ except queue.Empty:
223
+ break
224
+
225
+
154
226
  class WatchingObjectSource(ty.Generic[T]):
155
227
  """Efficiently 'get' objects by reliably watching for changes to all such objects in a given namespace.
156
228
 
@@ -184,21 +256,11 @@ class WatchingObjectSource(ty.Generic[T]):
184
256
  self._limiter = OneShotLimiter()
185
257
 
186
258
  def _start_thread(self, namespace: str) -> None:
187
- threading.Thread(
188
- target=callback_events,
189
- args=(
190
- self._add_object,
191
- yield_objects_from_list(
192
- namespace,
193
- self._get_list_method_on_restart,
194
- object_type_hint=self.typename + "s",
195
- init=lambda: logger.info(STARTING(f"Watching {self.typename}s in {namespace}")),
196
- ),
197
- ),
198
- daemon=True,
259
+ create_watch_thread(
260
+ self.get_list_method, self._add_object, namespace, typename=self.typename
199
261
  ).start()
200
262
 
201
- def _add_object(self, namespace: str, obj: T) -> None:
263
+ def _add_object(self, namespace: str, obj: T, _event_type: EventType) -> None:
202
264
  """This is where we receive updates from the k8s API."""
203
265
  self._last_api_update_time = time.monotonic()
204
266
 
@@ -211,19 +273,6 @@ class WatchingObjectSource(ty.Generic[T]):
211
273
  self._last_seen_time_by_name[name] = time.monotonic()
212
274
  self._objs_by_name[name] = obj
213
275
 
214
- def _get_list_method_on_restart(
215
- self, namespace: str, exc: ty.Optional[Exception]
216
- ) -> ty.Optional[K8sList[T]]:
217
- suffix = ""
218
- if exc:
219
- too_old = parse_too_old_resource_version(exc)
220
- if not too_old:
221
- logger.exception(f"Not fatal, but sleeping before we retry {self.typename} scraping...")
222
- time.sleep(config.k8s_monitor_delay())
223
- suffix = f" after {type(exc).__name__}: {exc}"
224
- logger.info(f"Watching {self.typename}s in namespace: {namespace}{suffix}")
225
- return self.get_list_method(namespace, exc)
226
-
227
276
  def _is_stale(self, name: str) -> bool:
228
277
  return is_stale(self._last_api_update_time, self._last_seen_time_by_name.get(name) or 0)
229
278
 
@@ -250,7 +299,7 @@ class WatchingObjectSource(ty.Generic[T]):
250
299
  # doing a lot of manual fetches may indicate that the k8s API is having trouble keeping up...
251
300
  try:
252
301
  if obj := self.backup_fetch(namespace, obj_name):
253
- self._add_object(namespace, obj) # updates last seen, too
302
+ self._add_object(namespace, obj, "FETCH") # updates last seen, too
254
303
  return obj
255
304
 
256
305
  except Exception:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: thds.mops
3
- Version: 3.6.20250407135813
3
+ Version: 3.6.20250407153122
4
4
  Summary: ML Ops tools for Trilliant Health
5
5
  Author-email: Trilliant Health <info@trillianthealth.com>
6
6
  Project-URL: Repository, https://github.com/TrilliantHealth/ds-monorepo
@@ -20,18 +20,18 @@ thds/mops/k8s/__init__.py,sha256=y4bvR5DQLiriBV0I8CZoQdNlNrXWQ2VpcxY5lOrdnzU,669
20
20
  thds/mops/k8s/_shared.py,sha256=MR-s6ijWUHZGjxK_fsOpHuRDB6kuofjo5xiIb7ul2VM,86
21
21
  thds/mops/k8s/apply_yaml.py,sha256=hVW6dIVbNdzHdbGlc2VAPGkdByv_rH2oPybyIm7tKIM,820
22
22
  thds/mops/k8s/auth.py,sha256=mXFPZvyJYEPASsBatv1r8syB9AoayuHGptHHnNUg8LE,1517
23
- thds/mops/k8s/config.py,sha256=AeOhstn2OhMvq53Zt0pSP0EF4DPmuCSLYKkTXHhk4ZQ,1876
23
+ thds/mops/k8s/config.py,sha256=FyzuiUaNq8mQR01GqrezQWSc21QVqjLY9iaJUGu5WAo,2568
24
24
  thds/mops/k8s/container_registry.py,sha256=qOiGCE4t_tLYgJDGrhKV9KNv48lF_AlwCDHyFgucd2s,539
25
- thds/mops/k8s/jobs.py,sha256=4jVoj-mo__VYVdzfLwueHr_n192qBM3kKosWikkmcsU,1506
26
- thds/mops/k8s/launch.py,sha256=JF2de7iQFu_-n2vtXJQaQtwqV2qwCMiF329l-V6jdgY,9255
27
- thds/mops/k8s/logging.py,sha256=K7wVam88rgi09EczomtY0YyD2uJUNjlr2Pi8ViwHCls,9409
25
+ thds/mops/k8s/jobs.py,sha256=3u0jc5Fnll2ncnmcdTUHlcxJ_KYNK9s66W7r6ez49As,3271
26
+ thds/mops/k8s/launch.py,sha256=PXAcqFSLp839bKhu13RPFVH-UzOdS6u7hm_J-N5Kkeg,10087
27
+ thds/mops/k8s/logging.py,sha256=yJ5Gt_luyo6LXbUJSXbhA4f1z7il1WuHQjMcP8eB00U,9591
28
28
  thds/mops/k8s/namespace.py,sha256=Z6trVTU9WFashto4PqIhTcxu-foOF93W0TpgqCU7WIA,383
29
29
  thds/mops/k8s/node_selection.py,sha256=Gy2Jz8IxZblg2LmtGg8-MtKI4RmXz2AMXqFPP8OQyu0,2065
30
30
  thds/mops/k8s/retry.py,sha256=JVfP304kItpLs5nrONHE5UWkVWlrFGlV_oFQqhq3zHg,2846
31
31
  thds/mops/k8s/too_old_resource_version.py,sha256=S7ltVA-LrxUpQ8Q__AB0nQmezN8Mmnx5oKK62_baAKI,1500
32
32
  thds/mops/k8s/wait_job.py,sha256=aCzjuhkiSnhW5SkOgRM1ufuMavAuOztkvn2mM2Rx1Ok,2440
33
- thds/mops/k8s/warn_image_backoff.py,sha256=a0b2OQf0VOG0n0W1nVOIOaazLSUATPFIDWYpNoHfEx0,1916
34
- thds/mops/k8s/watch.py,sha256=M2SSPm_IER7s45OO_f7gIAGeFpzmADgjRus1uEZtA7Y,9733
33
+ thds/mops/k8s/warn_image_backoff.py,sha256=sUoKHaT7eh5C26veb8X7FH1CnBNBE9kX3ZMEDaH9kK4,1929
34
+ thds/mops/k8s/watch.py,sha256=zeexTpXMoWKuhDOfdawmzWxMWkX__XutecueYXSADqY,11420
35
35
  thds/mops/k8s/tools/krsync.py,sha256=us7pXX0-bRMwD2oAno7Z6BJcPs6FgaUabHW0STyQJYg,1773
36
36
  thds/mops/k8s/tools/krsync.sh,sha256=lskw4COt51Bv1yy2IAYUc8u8uQV-coSyUiOT8rADKkQ,546
37
37
  thds/mops/pure/__init__.py,sha256=kbG0lMvXRBS3LGbb2gPPE9-qjYMXrypyb2tJX2__aZc,1533
@@ -103,8 +103,8 @@ thds/mops/pure/tools/summarize/cli.py,sha256=gaechsJhRZsOxGJGG1dQsW5dMBlgSv2sUmE
103
103
  thds/mops/pure/tools/summarize/run_summary.py,sha256=HCfAJhgAa9u6kXbzJlHa2n-9vVTaTHYSxrN_DP-Sjo4,4892
104
104
  thds/mops/testing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
105
105
  thds/mops/testing/deferred_imports.py,sha256=f0ezCgQAtzTqW1yAOb0OWgsB9ZrlztLB894LtpWDaVw,3780
106
- thds_mops-3.6.20250407135813.dist-info/METADATA,sha256=gRS0ByEEpaFHqCO1I7oA50_D_MZG4AikVQMkdCvbHws,2161
107
- thds_mops-3.6.20250407135813.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
108
- thds_mops-3.6.20250407135813.dist-info/entry_points.txt,sha256=GShNqjcjbq0TAJuwpyeCI5XCltiwdZxnNHkBpmYbNkU,329
109
- thds_mops-3.6.20250407135813.dist-info/top_level.txt,sha256=LTZaE5SkWJwv9bwOlMbIhiS-JWQEEIcjVYnJrt-CriY,5
110
- thds_mops-3.6.20250407135813.dist-info/RECORD,,
106
+ thds_mops-3.6.20250407153122.dist-info/METADATA,sha256=Mxupz7gO3eUSDt4fmHtAmmb4CyTmxZ9SY5ayxJuio5A,2161
107
+ thds_mops-3.6.20250407153122.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
108
+ thds_mops-3.6.20250407153122.dist-info/entry_points.txt,sha256=GShNqjcjbq0TAJuwpyeCI5XCltiwdZxnNHkBpmYbNkU,329
109
+ thds_mops-3.6.20250407153122.dist-info/top_level.txt,sha256=LTZaE5SkWJwv9bwOlMbIhiS-JWQEEIcjVYnJrt-CriY,5
110
+ thds_mops-3.6.20250407153122.dist-info/RECORD,,