ob-metaflow-extensions 1.1.131rc0__py2.py3-none-any.whl → 1.1.133__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ob-metaflow-extensions might be problematic. Click here for more details.

@@ -320,6 +320,7 @@ STEP_DECORATORS_DESC = [
320
320
  ("tensorboard", ".tensorboard.TensorboardDecorator"),
321
321
  ("gpu_profile", ".profilers.gpu_profile_decorator.GPUProfileDecorator"),
322
322
  ("nim", ".nim.NimDecorator"),
323
+ ("ollama", ".ollama.OllamaDecorator"),
323
324
  ]
324
325
 
325
326
  TOGGLE_STEP_DECORATOR = [
@@ -338,4 +339,4 @@ SECRETS_PROVIDERS_DESC = [
338
339
  ("outerbounds", ".secrets.secrets.OuterboundsSecretsProvider"),
339
340
  ]
340
341
  # Adding an override here so the library can be imported at the metaflow.plugins level
341
- __mf_promote_submodules__ = ["snowflake"]
342
+ __mf_promote_submodules__ = ["snowflake", "ollama"]
@@ -1,10 +1,5 @@
1
- import os, sqlite3
2
- from metaflow.cards import (
3
- Markdown,
4
- Table,
5
- ProgressBar,
6
- )
7
- from metaflow.decorators import StepDecorator
1
+ import sqlite3
2
+ from metaflow.cards import Markdown, Table
8
3
  from metaflow.metaflow_current import current
9
4
 
10
5
  from .utilities import get_storage_path
@@ -12,10 +7,6 @@ from ..card_utilities.async_cards import CardRefresher
12
7
  from ..card_utilities.extra_components import BarPlot, ViolinPlot
13
8
 
14
9
 
15
- def json_to_artifact_table(data):
16
- return ArtifactTable(data)
17
-
18
-
19
10
  class NimMetricsRefresher(CardRefresher):
20
11
  CARD_ID = "nim_metrics"
21
12
 
@@ -0,0 +1,70 @@
1
+ from metaflow.decorators import StepDecorator
2
+ from metaflow import current
3
+ import functools
4
+
5
+ from .ollama import OllamaManager
6
+ from ..card_utilities.injector import CardDecoratorInjector
7
+
8
+ __mf_promote_submodules__ = ["plugins.ollama"]
9
+
10
+
11
+ class OllamaDecorator(StepDecorator, CardDecoratorInjector):
12
+ """
13
+ This decorator is used to run Ollama APIs as Metaflow task sidecars.
14
+
15
+ User code call
16
+ -----------
17
+ @ollama(
18
+ models=['meta/llama3-8b-instruct', 'meta/llama3-70b-instruct'],
19
+ backend='local'
20
+ )
21
+
22
+ Valid backend options
23
+ ---------------------
24
+ - 'local': Run as a separate process on the local task machine.
25
+ - (TODO) 'managed': Outerbounds hosts and selects compute provider.
26
+ - (TODO) 'remote': Spin up separate instance to serve Ollama models.
27
+
28
+ Valid model options
29
+ ----------------
30
+ - 'llama3.2'
31
+ - 'llama3.3'
32
+ - any model here https://ollama.com/search
33
+
34
+ Parameters
35
+ ----------
36
+ models: list[Ollama]
37
+ List of Ollama containers running models in sidecars.
38
+ backend: str
39
+ Determines where and how to run the Ollama process.
40
+ """
41
+
42
+ name = "ollama"
43
+ defaults = {"models": [], "backend": "local", "debug": False}
44
+
45
+ def task_decorate(
46
+ self, step_func, flow, graph, retry_count, max_user_code_retries, ubf_context
47
+ ):
48
+ @functools.wraps(step_func)
49
+ def ollama_wrapper():
50
+ try:
51
+ self.ollama_manager = OllamaManager(
52
+ models=self.attributes["models"],
53
+ backend=self.attributes["backend"],
54
+ debug=self.attributes["debug"],
55
+ )
56
+ except Exception as e:
57
+ print(f"[@ollama] Error initializing OllamaManager: {e}")
58
+ raise
59
+ try:
60
+ step_func()
61
+ finally:
62
+ try:
63
+ self.ollama_manager.terminate_models()
64
+ except Exception as term_e:
65
+ print(f"[@ollama] Error during sidecar termination: {term_e}")
66
+ if self.attributes["debug"]:
67
+ print(f"[@ollama] process statuses: {self.ollama_manager.processes}")
68
+ print(f"[@ollama] process runtime stats: {self.ollama_manager.stats}")
69
+
70
+ return ollama_wrapper
@@ -0,0 +1,328 @@
1
+ import subprocess
2
+ from concurrent.futures import ThreadPoolExecutor, as_completed
3
+ import time
4
+ import socket
5
+ import sys
6
+ import os
7
+ import functools
8
+
9
+
10
+ class ProcessStatus:
11
+ RUNNING = "RUNNING"
12
+ FAILED = "FAILED"
13
+ SUCCESSFUL = "SUCCESSFUL"
14
+
15
+
16
+ class OllamaManager:
17
+
18
+ """
19
+ A process manager for Ollama runtimes.
20
+ This is run locally, e.g., whether @ollama has a local, remote, or managed backend.
21
+ """
22
+
23
+ def __init__(self, models, backend="local", debug=False):
24
+ self.models = {}
25
+ self.processes = {}
26
+ self.debug = debug
27
+ self.stats = {}
28
+
29
+ if backend != "local":
30
+ raise ValueError(
31
+ "OllamaManager only supports the 'local' backend at this time."
32
+ )
33
+
34
+ self._timeit(self._install_ollama, "install_ollama")
35
+ self._timeit(self._launch_server, "launch_server")
36
+
37
+ # Pull models concurrently
38
+ with ThreadPoolExecutor() as executor:
39
+ futures = [executor.submit(self._pull_model, m) for m in models]
40
+ for future in as_completed(futures):
41
+ try:
42
+ future.result()
43
+ except Exception as e:
44
+ raise RuntimeError(f"Error pulling one or more models: {e}") from e
45
+
46
+ # Run models as background processes.
47
+ for m in models:
48
+ f = functools.partial(self._run_model, m)
49
+ self._timeit(f, f"model_{m.lower()}")
50
+
51
+ def _timeit(self, f, name):
52
+ t0 = time.time()
53
+ f()
54
+ tf = time.time()
55
+ self.stats[name] = {"process_runtime": tf - t0}
56
+
57
+ def _install_ollama(self, max_retries=3):
58
+
59
+ try:
60
+ result = subprocess.run(["which", "ollama"], capture_output=True, text=True)
61
+ if result.returncode == 0:
62
+ if self.debug:
63
+ print("[@ollama] is already installed.")
64
+ return
65
+ except Exception as e:
66
+ print("[@ollama] Did not find Ollama installation: %s" % e)
67
+ if sys.platform == "darwin":
68
+ raise RuntimeError(
69
+ "On macOS, please install Ollama manually from https://ollama.com/download"
70
+ )
71
+
72
+ env = os.environ.copy()
73
+ env["CURL_IPRESOLVE"] = "4"
74
+ for attempt in range(max_retries):
75
+ try:
76
+ install_cmd = ["curl", "-fsSL", "https://ollama.com/install.sh"]
77
+ curl_proc = subprocess.run(
78
+ install_cmd, capture_output=True, text=True, env=env
79
+ )
80
+ if curl_proc.returncode != 0:
81
+ raise RuntimeError(
82
+ f"Failed to download Ollama install script: stdout: {curl_proc.stdout}, stderr: {curl_proc.stderr}"
83
+ )
84
+ sh_proc = subprocess.run(
85
+ ["sh"],
86
+ input=curl_proc.stdout,
87
+ capture_output=True,
88
+ text=True,
89
+ env=env,
90
+ )
91
+ if sh_proc.returncode != 0:
92
+ raise RuntimeError(
93
+ f"Ollama installation script failed: stdout: {sh_proc.stdout}, stderr: {sh_proc.stderr}"
94
+ )
95
+ if self.debug:
96
+ print("[@ollama] Installed successfully.")
97
+ break
98
+ except Exception as e:
99
+ print(f"Installation attempt {attempt+1} failed: {e}")
100
+ if attempt < max_retries - 1:
101
+ time.sleep(5)
102
+ else:
103
+ raise RuntimeError(
104
+ f"Error installing Ollama after {max_retries} attempts: {e}"
105
+ ) from e
106
+
107
+ def _is_port_open(self, host, port, timeout=1):
108
+ """Check if a TCP port is open on a given host."""
109
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
110
+ sock.settimeout(timeout)
111
+ try:
112
+ sock.connect((host, port))
113
+ return True
114
+ except socket.error:
115
+ return False
116
+
117
+ def _launch_server(self):
118
+ """
119
+ Start the Ollama server process and ensure it's running.
120
+ This version waits until the server is listening on port 11434.
121
+ """
122
+ try:
123
+ if self.debug:
124
+ print("[@ollama] Starting Ollama server...")
125
+ process = subprocess.Popen(
126
+ ["ollama", "serve"],
127
+ stdout=subprocess.PIPE,
128
+ stderr=subprocess.PIPE,
129
+ text=True,
130
+ )
131
+ self.processes[process.pid] = {
132
+ "p": process,
133
+ "properties": {"type": "api-server", "error_details": None},
134
+ "status": ProcessStatus.RUNNING,
135
+ }
136
+ if self.debug:
137
+ print(
138
+ "[@ollama] Started Ollama server process with PID %s" % process.pid
139
+ )
140
+
141
+ # Wait until the server is ready (listening on 127.0.0.1:11434)
142
+ host, port = "127.0.0.1", 11434
143
+ retries = 0
144
+ max_retries = 10
145
+ while (
146
+ not self._is_port_open(host, port, timeout=1) and retries < max_retries
147
+ ):
148
+ print(
149
+ "[@ollama] Waiting for server to be ready... (%d/%d)"
150
+ % (retries + 1, max_retries)
151
+ )
152
+ time.sleep(1)
153
+ retries += 1
154
+
155
+ if not self._is_port_open(host, port, timeout=1):
156
+ error_details = (
157
+ f"Ollama server did not start listening on {host}:{port}"
158
+ )
159
+ self.processes[process.pid]["properties"][
160
+ "error_details"
161
+ ] = error_details
162
+ self.processes[process.pid]["status"] = ProcessStatus.FAILED
163
+ raise RuntimeError(f"Ollama server failed to start. {error_details}")
164
+
165
+ # Check if the process has unexpectedly terminated
166
+ returncode = process.poll()
167
+ if returncode is not None:
168
+ stdout, stderr = process.communicate()
169
+ error_details = f"Return code: {returncode}, Error: {stderr}"
170
+ self.processes[process.pid]["properties"][
171
+ "error_details"
172
+ ] = error_details
173
+ self.processes[process.pid]["status"] = ProcessStatus.FAILED
174
+ raise RuntimeError(f"Ollama server failed to start. {error_details}")
175
+
176
+ print("[@ollama] Server is ready.")
177
+
178
+ except Exception as e:
179
+ if "process" in locals() and process.pid in self.processes:
180
+ self.processes[process.pid]["status"] = ProcessStatus.FAILED
181
+ self.processes[process.pid]["properties"]["error_details"] = str(e)
182
+ raise RuntimeError(f"Error starting Ollama server: {e}") from e
183
+
184
+ def _pull_model(self, m):
185
+ try:
186
+ if self.debug:
187
+ print("[@ollama] Pulling model: %s" % m)
188
+ result = subprocess.run(
189
+ ["ollama", "pull", m], capture_output=True, text=True
190
+ )
191
+ if result.returncode != 0:
192
+ raise RuntimeError(
193
+ f"Failed to pull model {m}: stdout: {result.stdout}, stderr: {result.stderr}"
194
+ )
195
+ if self.debug:
196
+ print("[@ollama] Model %s pulled successfully." % m)
197
+ except Exception as e:
198
+ raise RuntimeError(f"Error pulling Ollama model {m}: {e}") from e
199
+
200
+ def _run_model(self, m):
201
+ """
202
+ Start the Ollama model as a subprocess and record its status.
203
+ """
204
+ process = None
205
+ try:
206
+ if self.debug:
207
+ print("[@ollama] Running model: %s" % m)
208
+ process = subprocess.Popen(
209
+ ["ollama", "run", m],
210
+ stdout=subprocess.PIPE,
211
+ stderr=subprocess.PIPE,
212
+ text=True,
213
+ )
214
+ self.processes[process.pid] = {
215
+ "p": process,
216
+ "properties": {"type": "model", "model": m, "error_details": None},
217
+ "status": ProcessStatus.RUNNING,
218
+ }
219
+ if self.debug:
220
+ print("[@ollama] Stored process %s for model %s." % (process.pid, m))
221
+
222
+ try:
223
+ process.wait(timeout=1)
224
+ except subprocess.TimeoutExpired:
225
+ pass
226
+
227
+ returncode = process.poll()
228
+ if returncode is not None:
229
+ stdout, stderr = process.communicate()
230
+ if returncode == 0:
231
+ self.processes[process.pid]["status"] = ProcessStatus.SUCCESSFUL
232
+ if self.debug:
233
+ print(
234
+ "[@ollama] Process %s for model %s exited successfully."
235
+ % (process.pid, m)
236
+ )
237
+ else:
238
+ error_details = f"Return code: {returncode}, Error: {stderr}"
239
+ self.processes[process.pid]["properties"][
240
+ "error_details"
241
+ ] = error_details
242
+ self.processes[process.pid]["status"] = ProcessStatus.FAILED
243
+ if self.debug:
244
+ print(
245
+ "[@ollama] Process %s for model %s failed: %s"
246
+ % (process.pid, m, error_details)
247
+ )
248
+ except Exception as e:
249
+ if process and process.pid in self.processes:
250
+ self.processes[process.pid]["status"] = ProcessStatus.FAILED
251
+ self.processes[process.pid]["properties"]["error_details"] = str(e)
252
+ raise RuntimeError(f"Error running Ollama model {m}: {e}") from e
253
+
254
+ def terminate_models(self):
255
+ """
256
+ Terminate all processes gracefully.
257
+ First, stop model processes using 'ollama stop <model>'.
258
+ Then, shut down the API server process.
259
+ """
260
+
261
+ for pid, process_info in list(self.processes.items()):
262
+ if process_info["properties"].get("type") == "model":
263
+ model_name = process_info["properties"].get("model")
264
+ if self.debug:
265
+ print(
266
+ "[@ollama] Stopping model %s using 'ollama stop'" % model_name
267
+ )
268
+ try:
269
+ result = subprocess.run(
270
+ ["ollama", "stop", model_name], capture_output=True, text=True
271
+ )
272
+ if result.returncode == 0:
273
+ process_info["status"] = ProcessStatus.SUCCESSFUL
274
+ if self.debug:
275
+ print(
276
+ "[@ollama] Model %s stopped successfully." % model_name
277
+ )
278
+ else:
279
+ process_info["status"] = ProcessStatus.FAILED
280
+ if self.debug:
281
+ print(
282
+ "[@ollama] Model %s failed to stop gracefully. Return code: %s, Error: %s"
283
+ % (model_name, result.returncode, result.stderr)
284
+ )
285
+ except Exception as e:
286
+ process_info["status"] = ProcessStatus.FAILED
287
+ print("[@ollama] Error stopping model %s: %s" % (model_name, e))
288
+
289
+ # Then, stop the API server
290
+ for pid, process_info in list(self.processes.items()):
291
+ if process_info["properties"].get("type") == "api-server":
292
+ if self.debug:
293
+ print(
294
+ "[@ollama] Stopping API server process with PID %s using process.terminate()"
295
+ % pid
296
+ )
297
+ process = process_info["p"]
298
+ try:
299
+ process.terminate()
300
+ try:
301
+ process.wait(timeout=5)
302
+ except subprocess.TimeoutExpired:
303
+ print(
304
+ "[@ollama] API server process %s did not terminate in time; killing it."
305
+ % pid
306
+ )
307
+ process.kill()
308
+ process.wait()
309
+ returncode = process.poll()
310
+ if returncode is None or returncode != 0:
311
+ process_info["status"] = ProcessStatus.FAILED
312
+ print(
313
+ "[@ollama] API server process %s terminated with error code %s."
314
+ % (pid, returncode)
315
+ )
316
+ else:
317
+ process_info["status"] = ProcessStatus.SUCCESSFUL
318
+ if self.debug:
319
+ print(
320
+ "[@ollama] API server process %s terminated successfully."
321
+ % pid
322
+ )
323
+ except Exception as e:
324
+ process_info["status"] = ProcessStatus.FAILED
325
+ print(
326
+ "[@ollama] Warning: Error while terminating API server process %s: %s"
327
+ % (pid, e)
328
+ )
@@ -0,0 +1 @@
1
+ __mf_promote_submodules__ = ["plugins.ollama"]
@@ -1,13 +1,13 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ob-metaflow-extensions
3
- Version: 1.1.131rc0
3
+ Version: 1.1.133
4
4
  Summary: Outerbounds Platform Extensions for Metaflow
5
5
  Author: Outerbounds, Inc.
6
6
  License: Commercial
7
7
  Description-Content-Type: text/markdown
8
8
  Requires-Dist: boto3
9
9
  Requires-Dist: kubernetes
10
- Requires-Dist: ob-metaflow (==2.14.0.2rc0)
10
+ Requires-Dist: ob-metaflow (==2.14.3.1)
11
11
 
12
12
  # Outerbounds platform package
13
13
 
@@ -1,7 +1,7 @@
1
1
  metaflow_extensions/outerbounds/__init__.py,sha256=TRGvIUMjkfneWtYUFSWoubu_Kf2ekAL4WLbV3IxOj9k,499
2
2
  metaflow_extensions/outerbounds/remote_config.py,sha256=Zpfpjgz68_ZgxlXezjzlsDLo4840rkWuZgwDB_5H57U,4059
3
3
  metaflow_extensions/outerbounds/config/__init__.py,sha256=JsQGRuGFz28fQWjUvxUgR8EKBLGRdLUIk_buPLJplJY,1225
4
- metaflow_extensions/outerbounds/plugins/__init__.py,sha256=5BFqGhlS9M23WUCoTNQfnOu-mB4iO9z7yK0586IRHMI,12749
4
+ metaflow_extensions/outerbounds/plugins/__init__.py,sha256=nYs0AgKs58TYtRhm6luTp72UFj8-skdO5QPi-sIC3AQ,12802
5
5
  metaflow_extensions/outerbounds/plugins/auth_server.py,sha256=_Q9_2EL0Xy77bCRphkwT1aSu8gQXRDOH-Z-RxTUO8N4,2202
6
6
  metaflow_extensions/outerbounds/plugins/perimeters.py,sha256=QXh3SFP7GQbS-RAIxUOPbhPzQ7KDFVxZkTdKqFKgXjI,2697
7
7
  metaflow_extensions/outerbounds/plugins/card_utilities/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -16,7 +16,7 @@ metaflow_extensions/outerbounds/plugins/fast_bakery/fast_bakery_decorator.py,sha
16
16
  metaflow_extensions/outerbounds/plugins/kubernetes/__init__.py,sha256=5zG8gShSj8m7rgF4xgWBZFuY3GDP5n1T0ktjRpGJLHA,69
17
17
  metaflow_extensions/outerbounds/plugins/kubernetes/kubernetes_client.py,sha256=fx_XUkgR4r6hF2ilDfT5LubRyVrYMVIv5f6clHkCaEk,5988
18
18
  metaflow_extensions/outerbounds/plugins/nim/__init__.py,sha256=MEdX6TPdY9POflCiaYbVmwT-nUNeqwregZBzBZ5CNz0,4487
19
- metaflow_extensions/outerbounds/plugins/nim/card.py,sha256=EfV8x2XNZkhdOqlNPAGRY_YhahPYxcbFKQAjrXwZTvI,4941
19
+ metaflow_extensions/outerbounds/plugins/nim/card.py,sha256=EM6QtevpJmXpeCesKDk2L6ts6M2qLSYUbajaEEU_yys,4794
20
20
  metaflow_extensions/outerbounds/plugins/nim/nim_manager.py,sha256=fCFdRuvagzgPSVQfhy5bzbxfVBeO562h-cDz6CCLQLw,12118
21
21
  metaflow_extensions/outerbounds/plugins/nim/utilities.py,sha256=jSdNP3tSCrDjxD2E9bIzxVqDDu6S14femlxSjsMv57o,151
22
22
  metaflow_extensions/outerbounds/plugins/nvcf/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -27,6 +27,8 @@ metaflow_extensions/outerbounds/plugins/nvcf/nvcf.py,sha256=zhHgoE76WOpCJnoMvSNe
27
27
  metaflow_extensions/outerbounds/plugins/nvcf/nvcf_cli.py,sha256=3D-r5XO88Yh2k1EAZFJTe_PwdbhWp5qXflG8AgE4ZIU,9500
28
28
  metaflow_extensions/outerbounds/plugins/nvcf/nvcf_decorator.py,sha256=pxxNxW4bW3kbB6ybRam657GyKIhvIkMuidB94iFfCD8,9116
29
29
  metaflow_extensions/outerbounds/plugins/nvcf/utils.py,sha256=DxWSCayfa95e0HJkWacey1s1nxoTpaunGhrb_0Ayv28,133
30
+ metaflow_extensions/outerbounds/plugins/ollama/__init__.py,sha256=HEsI5U4ckQby7K2NsGBOdizhPY3WWqXSnXx_IHL7_No,2307
31
+ metaflow_extensions/outerbounds/plugins/ollama/ollama.py,sha256=KlP8_EmnUoi8-PidyU0IDuENYxKjQaHFC33yGsvaeic,13320
30
32
  metaflow_extensions/outerbounds/plugins/profilers/deco_injector.py,sha256=oI_C3c64XBm7n88FILqHwn-Nnc5DeT_68I67lM9rXaI,2434
31
33
  metaflow_extensions/outerbounds/plugins/profilers/gpu_profile_decorator.py,sha256=gDHQ2sMIp4NuZSzUspbSd8RGdFAoO5mgZAyFcZ2a51Y,2619
32
34
  metaflow_extensions/outerbounds/plugins/secrets/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -49,8 +51,9 @@ metaflow_extensions/outerbounds/toplevel/global_aliases_for_metaflow_package.py,
49
51
  metaflow_extensions/outerbounds/toplevel/plugins/azure/__init__.py,sha256=WUuhz2YQfI4fz7nIcipwwWq781eaoHEk7n4GAn1npDg,63
50
52
  metaflow_extensions/outerbounds/toplevel/plugins/gcp/__init__.py,sha256=BbZiaH3uILlEZ6ntBLKeNyqn3If8nIXZFq_Apd7Dhco,70
51
53
  metaflow_extensions/outerbounds/toplevel/plugins/kubernetes/__init__.py,sha256=5zG8gShSj8m7rgF4xgWBZFuY3GDP5n1T0ktjRpGJLHA,69
54
+ metaflow_extensions/outerbounds/toplevel/plugins/ollama/__init__.py,sha256=GRSz2zwqkvlmFS6bcfYD_CX6CMko9DHQokMaH1iBshA,47
52
55
  metaflow_extensions/outerbounds/toplevel/plugins/snowflake/__init__.py,sha256=LptpH-ziXHrednMYUjIaosS1SXD3sOtF_9_eRqd8SJw,50
53
- ob_metaflow_extensions-1.1.131rc0.dist-info/METADATA,sha256=X_n4lJEbpxO3uOnW2OdurRIzuTfHcfHmQecNspjx_uU,526
54
- ob_metaflow_extensions-1.1.131rc0.dist-info/WHEEL,sha256=bb2Ot9scclHKMOLDEHY6B2sicWOgugjFKaJsT7vwMQo,110
55
- ob_metaflow_extensions-1.1.131rc0.dist-info/top_level.txt,sha256=NwG0ukwjygtanDETyp_BUdtYtqIA_lOjzFFh1TsnxvI,20
56
- ob_metaflow_extensions-1.1.131rc0.dist-info/RECORD,,
56
+ ob_metaflow_extensions-1.1.133.dist-info/METADATA,sha256=OA0TksO1c611FUdupK69MqlXGzthWPwYthaHjPqC4dA,520
57
+ ob_metaflow_extensions-1.1.133.dist-info/WHEEL,sha256=bb2Ot9scclHKMOLDEHY6B2sicWOgugjFKaJsT7vwMQo,110
58
+ ob_metaflow_extensions-1.1.133.dist-info/top_level.txt,sha256=NwG0ukwjygtanDETyp_BUdtYtqIA_lOjzFFh1TsnxvI,20
59
+ ob_metaflow_extensions-1.1.133.dist-info/RECORD,,