ob-metaflow-extensions 1.1.114__py2.py3-none-any.whl → 1.1.116__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ob-metaflow-extensions might be problematic. Click here for more details.

@@ -0,0 +1,3 @@
1
+ SUPPORTABLE_GPU_TYPES = ["L40", "L40S", "L40G", "H100"]
2
+ DEFAULT_GPU_TYPE = "H100"
3
+ MAX_N_GPU_BY_TYPE = {"L40": 1, "L40S": 1, "L40G": 1, "H100": 4}
@@ -0,0 +1,83 @@
1
+ from metaflow.exception import MetaflowException
2
+ from .constants import SUPPORTABLE_GPU_TYPES
3
+
4
+
5
+ class NvcfJobFailedException(MetaflowException):
6
+ headline = "[@nvidia] error"
7
+
8
+ def __init__(self, msg):
9
+ super(NvcfJobFailedException, self).__init__(msg)
10
+
11
+
12
+ class NvcfPollingConnectionError(MetaflowException):
13
+ headline = "[@nvidia] polling error."
14
+
15
+ def __init__(self, og_error_msg):
16
+ msg = (
17
+ "An error occurred while polling the job status. "
18
+ "\n\nOriginal error message: %s" % (og_error_msg)
19
+ )
20
+
21
+ super(NvcfPollingConnectionError, self).__init__(msg)
22
+
23
+
24
+ class RequestedGPUTypeUnavailableException(MetaflowException):
25
+ headline = "[@nvidia RequestedGPUTypeUnavailableException] GPU type unavailable."
26
+
27
+ def __init__(self, requested_gpu_type):
28
+ msg = (
29
+ f"The requested GPU type @nvidia(..., gpu_type='{requested_gpu_type}') is not available. "
30
+ f"Please choose from the following supported GPU types when using @nvidia: {SUPPORTABLE_GPU_TYPES}"
31
+ )
32
+ super(RequestedGPUTypeUnavailableException, self).__init__(msg)
33
+
34
+
35
+ class UnsupportedNvcfConfigurationException(MetaflowException):
36
+ headline = (
37
+ "[@nvidia UnsupportedNvcfConfigurationException] Unsupported GPU configuration"
38
+ )
39
+
40
+ def __init__(self, n_gpu, gpu_type, available_configurations, step):
41
+ msg = f"The requested configuration of @nvidia(gpu={n_gpu}, gpu_type='{gpu_type}') for @step {step} is not available."
42
+ if len(available_configurations) == 0:
43
+ msg += (
44
+ "\n\nNo configurations are available in your Outerbounds deployment."
45
+ " Please contact Outerbounds support if you wish to use @nvidia."
46
+ )
47
+ else:
48
+ msg += f"\n\nAvailable configurations for your deployment include: \n\t- {self._display(available_configurations)}"
49
+ msg += "\n\nPlease contact Outerbounds support if you wish to use a configuration not listed above."
50
+ super(UnsupportedNvcfConfigurationException, self).__init__(msg)
51
+
52
+ def _display(self, configs):
53
+ _available_decos = []
54
+ for cfg in configs:
55
+ n_gpu, gpu_type = cfg[0], cfg[1]
56
+ _available_decos.append(f"@nvidia(gpu={n_gpu}, gpu_type='{gpu_type}')")
57
+ return "\n\t- ".join(_available_decos)
58
+
59
+
60
+ class UnsupportedNvcfDatastoreException(MetaflowException):
61
+ headline = "[@nvidia UnsupportedNvcfDatastoreException] Unsupported datastore"
62
+
63
+ def __init__(self, ds_type):
64
+ msg = (
65
+ "The *@nvidia* decorator requires --datastore=s3 or --datastore=azure or --datastore=gs at the moment."
66
+ f"Current datastore type: {ds_type}."
67
+ )
68
+ super(UnsupportedNvcfDatastoreException, self).__init__(msg)
69
+
70
+
71
+ class NvcfTimeoutTooShortException(MetaflowException):
72
+ headline = "[@nvidia NvcfTimeoutTooShortException] Timeout too short"
73
+
74
+ def __init__(self, step):
75
+ msg = (
76
+ "The timeout for step *{step}* should be at least 60 seconds for "
77
+ "execution with @nvidia".format(step=step)
78
+ )
79
+ super(NvcfTimeoutTooShortException, self).__init__(msg)
80
+
81
+
82
+ class NvcfKilledException(MetaflowException):
83
+ headline = "Nvidia job killed"
@@ -5,7 +5,6 @@ from urllib.parse import urlparse
5
5
  from urllib.request import HTTPError, Request, URLError, urlopen
6
6
 
7
7
  from metaflow import util
8
- from metaflow.exception import MetaflowException
9
8
  from metaflow.mflog import (
10
9
  BASH_SAVE_LOGS,
11
10
  bash_capture_logs,
@@ -14,16 +13,7 @@ from metaflow.mflog import (
14
13
  get_log_tailer,
15
14
  )
16
15
  import requests
17
- from metaflow.metaflow_config_funcs import init_config
18
-
19
-
20
- class NvcfException(MetaflowException):
21
- headline = "Nvidia error"
22
-
23
-
24
- class NvcfKilledException(MetaflowException):
25
- headline = "Nvidia job killed"
26
-
16
+ from .exceptions import NvcfJobFailedException, NvcfPollingConnectionError
27
17
 
28
18
  # Redirect structured logs to $PWD/.logs/
29
19
  LOGS_DIR = "$PWD/.logs"
@@ -34,10 +24,12 @@ STDERR_PATH = os.path.join(LOGS_DIR, STDERR_FILE)
34
24
 
35
25
 
36
26
  class Nvcf(object):
37
- def __init__(self, metadata, datastore, environment):
27
+ def __init__(self, metadata, datastore, environment, function_id, ngc_api_key):
38
28
  self.metadata = metadata
39
29
  self.datastore = datastore
40
30
  self.environment = environment
31
+ self._function_id = function_id
32
+ self._ngc_api_key = ngc_api_key
41
33
 
42
34
  def launch_job(
43
35
  self,
@@ -93,7 +85,12 @@ class Nvcf(object):
93
85
  % cmd_str
94
86
  )
95
87
  self.job = Job(
96
- 'bash -c "%s"' % cmd_str, env, task_spec, self.datastore._storage_impl
88
+ 'bash -c "%s"' % cmd_str,
89
+ env,
90
+ task_spec,
91
+ self.datastore._storage_impl,
92
+ self._function_id,
93
+ self._ngc_api_key,
97
94
  )
98
95
  self.job.submit()
99
96
 
@@ -128,7 +125,9 @@ class Nvcf(object):
128
125
  _id=self.job.id,
129
126
  )
130
127
  if self.job.has_failed:
131
- raise NvcfException("This could be a transient error. Use @retry to retry.")
128
+ raise NvcfJobFailedException(
129
+ "This could be a transient error. Use @retry to retry."
130
+ )
132
131
 
133
132
 
134
133
  class JobStatus(object):
@@ -144,37 +143,14 @@ result_endpoint = f"{nvcf_url}/v2/nvcf/pexec/status"
144
143
 
145
144
 
146
145
  class Job(object):
147
- def __init__(self, command, env, task_spec, backend):
146
+ def __init__(self, command, env, task_spec, backend, function_id, ngc_api_key):
148
147
  self._payload = {
149
148
  "command": command,
150
149
  "env": {k: v for k, v in env.items() if v is not None},
151
150
  }
152
151
  self._result = {}
153
-
154
- conf = init_config()
155
- if "OBP_AUTH_SERVER" in conf:
156
- auth_host = conf["OBP_AUTH_SERVER"]
157
- else:
158
- auth_host = "auth." + urlparse(SERVICE_URL).hostname.split(".", 1)[1]
159
-
160
- # NOTE: reusing the same auth_host as the one used in NimMetadata,
161
- # however, user should not need to use nim container to use @nvidia.
162
- # May want to refactor this to a common endpoint.
163
- nim_info_url = "https://" + auth_host + "/generate/nim"
164
-
165
- if "METAFLOW_SERVICE_AUTH_KEY" in conf:
166
- headers = {"x-api-key": conf["METAFLOW_SERVICE_AUTH_KEY"]}
167
- res = requests.get(nim_info_url, headers=headers)
168
- else:
169
- headers = json.loads(os.environ.get("METAFLOW_SERVICE_HEADERS"))
170
- res = requests.get(nim_info_url, headers=headers)
171
-
172
- res.raise_for_status()
173
- self._ngc_api_key = res.json()["nvcf"]["api_key"]
174
-
175
- for f in res.json()["nvcf"]["functions"]:
176
- if f["model_key"] == "metaflow_task_executor":
177
- self._function_id = f["id"]
152
+ self._function_id = function_id
153
+ self._ngc_api_key = ngc_api_key
178
154
 
179
155
  flow_name = task_spec.get("flow_name")
180
156
  run_id = task_spec.get("run_id")
@@ -236,6 +212,8 @@ class Job(object):
236
212
  self._status = JobStatus.FAILED
237
213
  # TODO: Handle 404s nicely
238
214
  except (HTTPError, URLError) as e:
215
+ # TODO: If queue is full, wait in line and retry?
216
+ # without that, branching over concurrent requests causes error.
239
217
  self._state = JobStatus.FAILED
240
218
  raise e
241
219
 
@@ -281,5 +259,9 @@ class Job(object):
281
259
  self._result = data
282
260
  elif response.getcode() in [400, 500]:
283
261
  self._status = JobStatus.FAILED
262
+ elif response.getcode() != 202:
263
+ print(
264
+ f"[@nvidia] Unexpected response code: {response.getcode()}. Please notify an Outerbounds support engineer if this error persists."
265
+ )
284
266
  except (HTTPError, URLError) as e:
285
- print(f"Error occurred while polling for result: {e}")
267
+ raise NvcfPollingConnectionError(e)
@@ -31,7 +31,8 @@ from metaflow.metaflow_config import (
31
31
  )
32
32
  from metaflow.mflog import TASK_LOG_SOURCE
33
33
 
34
- from .nvcf import Nvcf, NvcfKilledException
34
+ from .nvcf import Nvcf
35
+ from .exceptions import NvcfKilledException
35
36
 
36
37
 
37
38
  @click.group()
@@ -108,6 +109,8 @@ def kill(ctx, run_id):
108
109
  @click.argument("step-name")
109
110
  @click.argument("code-package-sha")
110
111
  @click.argument("code-package-url")
112
+ @click.option("--function-id", help="NVCF function id.")
113
+ @click.option("--ngc-api-key", help="NGC API key.")
111
114
  @click.option("--run-id", help="Passed to the top-level 'step'.")
112
115
  @click.option("--task-id", help="Passed to the top-level 'step'.")
113
116
  @click.option("--input-paths", help="Passed to the top-level 'step'.")
@@ -123,7 +126,15 @@ def kill(ctx, run_id):
123
126
  "--max-user-code-retries", default=0, help="Passed to the top-level 'step'."
124
127
  )
125
128
  @click.pass_context
126
- def step(ctx, step_name, code_package_sha, code_package_url, **kwargs):
129
+ def step(
130
+ ctx,
131
+ step_name,
132
+ code_package_sha,
133
+ code_package_url,
134
+ function_id,
135
+ ngc_api_key,
136
+ **kwargs,
137
+ ):
127
138
  def echo(msg, stream="stderr", _id=None, **kwargs):
128
139
  msg = util.to_unicode(msg)
129
140
  if _id:
@@ -232,7 +243,13 @@ def step(ctx, step_name, code_package_sha, code_package_url, **kwargs):
232
243
  ),
233
244
  )
234
245
 
235
- nvcf = Nvcf(ctx.obj.metadata, ctx.obj.flow_datastore, ctx.obj.environment)
246
+ nvcf = Nvcf(
247
+ ctx.obj.metadata,
248
+ ctx.obj.flow_datastore,
249
+ ctx.obj.environment,
250
+ function_id,
251
+ ngc_api_key,
252
+ )
236
253
  try:
237
254
  with ctx.obj.monitor.measure("metaflow.nvcf.launch_job"):
238
255
  nvcf.launch_job(
@@ -242,7 +259,6 @@ def step(ctx, step_name, code_package_sha, code_package_url, **kwargs):
242
259
  code_package_sha,
243
260
  code_package_url,
244
261
  ctx.obj.flow_datastore.TYPE,
245
- # function_id=function_id,
246
262
  env=env,
247
263
  )
248
264
  except Exception as e:
@@ -1,23 +1,45 @@
1
1
  import os
2
2
  import sys
3
+ import json
4
+ import requests
5
+ from urllib.parse import urlparse
3
6
 
7
+ from metaflow import current
4
8
  from metaflow.exception import MetaflowException
5
9
  from metaflow.decorators import StepDecorator
10
+ from metaflow.plugins.parallel_decorator import ParallelDecorator
6
11
  from metaflow.metadata_provider.util import sync_local_metadata_to_datastore
7
12
  from metaflow.metaflow_config import DATASTORE_LOCAL_DIR
8
13
  from metaflow.sidecar import Sidecar
9
14
  from metaflow.plugins.timeout_decorator import get_run_time_limit_for_task
10
- from .nvcf import NvcfException
11
-
12
15
  from metaflow.metadata_provider import MetaDatum
16
+ from metaflow.metaflow_config_funcs import init_config
17
+ from .constants import SUPPORTABLE_GPU_TYPES, DEFAULT_GPU_TYPE
18
+ from .exceptions import (
19
+ RequestedGPUTypeUnavailableException,
20
+ UnsupportedNvcfConfigurationException,
21
+ UnsupportedNvcfDatastoreException,
22
+ NvcfTimeoutTooShortException,
23
+ )
24
+
25
+ from metaflow.metaflow_config import SERVICE_URL
13
26
 
14
27
 
15
28
  class NvcfDecorator(StepDecorator):
29
+
30
+ """
31
+ Specifies that this step should execute on DGX cloud.
32
+
33
+ Parameters
34
+ ----------
35
+ gpu : int
36
+ Number of GPUs to use.
37
+ gpu_type : str
38
+ Type of Nvidia GPU to use.
39
+ """
40
+
16
41
  name = "nvidia"
17
- # defaults = {"function_id": "9e5647f2-740f-4101-a129-1c961a075575"}
18
- defaults = {}
19
- # "0817006f-018b-4590-b2a5-6cf9d64d9d9a"}
20
- #
42
+ defaults = {"gpu": 1, "gpu_type": None}
21
43
 
22
44
  package_url = None
23
45
  package_sha = None
@@ -28,13 +50,8 @@ class NvcfDecorator(StepDecorator):
28
50
  def step_init(self, flow, graph, step, decos, environment, flow_datastore, logger):
29
51
  # Executing NVCF functions requires a non-local datastore.
30
52
  if flow_datastore.TYPE not in ("s3", "azure", "gs"):
31
- raise NvcfException(
32
- "The *@nvidia* decorator requires --datastore=s3 or --datastore=azure or --datastore=gs at the moment."
33
- )
34
- # if self.attributes["function_id"] is None:
35
- # raise NvcfException(
36
- # "The *@nvidia* decorator requires a function_id. Please reach out to Outerbounds if you are unsure how to get access to one."
37
- # )
53
+ raise UnsupportedNvcfDatastoreException(flow_datastore.TYPE)
54
+
38
55
  # Set internal state.
39
56
  self.logger = logger
40
57
  self.environment = environment
@@ -46,10 +63,10 @@ class NvcfDecorator(StepDecorator):
46
63
  "Step *{step}* is marked for execution both on Kubernetes and "
47
64
  "Nvidia. Please use one or the other.".format(step=step)
48
65
  )
49
- if any([deco.name == "parallel" for deco in decos]):
66
+ if any([isinstance(deco, ParallelDecorator) for deco in decos]):
50
67
  raise MetaflowException(
51
68
  "Step *{step}* contains a @parallel decorator "
52
- "with the @nvidia decorator. @parallel is not supported with @nvidia.".format(
69
+ "with the @nvidia decorator. @parallel decorators are not currently supported with @nvidia.".format(
53
70
  step=step
54
71
  )
55
72
  )
@@ -57,10 +74,49 @@ class NvcfDecorator(StepDecorator):
57
74
  # Set run time limit for the NVCF function.
58
75
  self.run_time_limit = get_run_time_limit_for_task(decos)
59
76
  if self.run_time_limit < 60:
60
- raise NvcfException(
61
- "The timeout for step *{step}* should be at least 60 seconds for "
62
- "execution with @nvidia.".format(step=step)
77
+ raise NvcfTimeoutTooShortException(step)
78
+
79
+ conf = init_config()
80
+ if "OBP_AUTH_SERVER" in conf:
81
+ auth_host = conf["OBP_AUTH_SERVER"]
82
+ else:
83
+ auth_host = "auth." + urlparse(SERVICE_URL).hostname.split(".", 1)[1]
84
+
85
+ # NOTE: reusing the same auth_host as the one used in NimMetadata,
86
+ # however, user should not need to use nim container to use @nvidia.
87
+ # May want to refactor this to a common endpoint.
88
+ nim_info_url = "https://" + auth_host + "/generate/nim"
89
+
90
+ if "METAFLOW_SERVICE_AUTH_KEY" in conf:
91
+ headers = {"x-api-key": conf["METAFLOW_SERVICE_AUTH_KEY"]}
92
+ res = requests.get(nim_info_url, headers=headers)
93
+ else:
94
+ headers = json.loads(os.environ.get("METAFLOW_SERVICE_HEADERS"))
95
+ res = requests.get(nim_info_url, headers=headers)
96
+
97
+ res.raise_for_status()
98
+ self.attributes["ngc_api_key"] = res.json()["nvcf"]["api_key"]
99
+
100
+ available_functions_info = res.json()["nvcf"]["functions"]
101
+ requested_gpu_type = self.attributes["gpu_type"]
102
+ n_gpu = self.attributes["gpu"]
103
+
104
+ if requested_gpu_type is None:
105
+ requested_gpu_type = DEFAULT_GPU_TYPE
106
+ if requested_gpu_type not in SUPPORTABLE_GPU_TYPES:
107
+ raise RequestedGPUTypeUnavailableException(requested_gpu_type)
108
+
109
+ desired_configuration = (n_gpu, requested_gpu_type)
110
+ available_configurations = {}
111
+ for f in available_functions_info:
112
+ if f["model_key"] == "metaflow_task_executor":
113
+ available_configurations[(f["gpu"], f["gpu_type"])] = f["id"]
114
+
115
+ if desired_configuration not in available_configurations:
116
+ raise UnsupportedNvcfConfigurationException(
117
+ n_gpu, requested_gpu_type, available_configurations, step
63
118
  )
119
+ self.attributes["function_id"] = available_configurations[desired_configuration]
64
120
 
65
121
  def runtime_init(self, flow, graph, package, run_id):
66
122
  # Set some more internal state.
@@ -85,8 +141,11 @@ class NvcfDecorator(StepDecorator):
85
141
  cli_args.commands = ["nvidia", "step"]
86
142
  cli_args.command_args.append(self.package_sha)
87
143
  cli_args.command_args.append(self.package_url)
88
- cli_args.command_options.update(self.attributes)
89
- # cli_args.command_options["run-time-limit"] = self.run_time_limit
144
+ cli_options = {
145
+ "function_id": self.attributes["function_id"],
146
+ "ngc_api_key": self.attributes["ngc_api_key"],
147
+ }
148
+ cli_args.command_options.update(cli_options)
90
149
  cli_args.entrypoint[0] = sys.executable
91
150
 
92
151
  def task_pre_step(
@@ -0,0 +1,6 @@
1
+ import sys
2
+
3
+
4
+ def warning_message(message, prefix="[@nvidia]"):
5
+ msg = "%s %s" % (prefix, message)
6
+ print(msg, file=sys.stderr)
@@ -1,13 +1,13 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ob-metaflow-extensions
3
- Version: 1.1.114
3
+ Version: 1.1.116
4
4
  Summary: Outerbounds Platform Extensions for Metaflow
5
5
  Author: Outerbounds, Inc.
6
6
  License: Commercial
7
7
  Description-Content-Type: text/markdown
8
8
  Requires-Dist: boto3
9
9
  Requires-Dist: kubernetes
10
- Requires-Dist: ob-metaflow (==2.12.39.1)
10
+ Requires-Dist: ob-metaflow (==2.13.0.1)
11
11
 
12
12
  # Outerbounds platform package
13
13
 
@@ -14,10 +14,13 @@ metaflow_extensions/outerbounds/plugins/kubernetes/kubernetes_client.py,sha256=f
14
14
  metaflow_extensions/outerbounds/plugins/nim/__init__.py,sha256=GVnvSTjqYVj5oG2yh8KJFt7iZ33cEadDD5HbdmC9hJ0,1457
15
15
  metaflow_extensions/outerbounds/plugins/nim/nim_manager.py,sha256=SWieODDxtIaeZwdMYtObDi57Kjyfw2DUuE6pJtU750w,9206
16
16
  metaflow_extensions/outerbounds/plugins/nvcf/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
17
+ metaflow_extensions/outerbounds/plugins/nvcf/constants.py,sha256=aGHdNw_hqBu8i0zWXcatQM6e769wUXox0l8g0f6fNZ8,146
18
+ metaflow_extensions/outerbounds/plugins/nvcf/exceptions.py,sha256=Wn5WvE_sY-L2jEz-iObMLii5Ds_HQJuE437ufadPFLk,3258
17
19
  metaflow_extensions/outerbounds/plugins/nvcf/heartbeat_store.py,sha256=pOWwm8LFQBbtku0zNBBwCyXxLK8U-hhC4naQcmU69nE,6217
18
- metaflow_extensions/outerbounds/plugins/nvcf/nvcf.py,sha256=1ayR5YLiEy3XRGVEnRUpsDCH_UyK7z-7A8L5GxI4qOE,9535
19
- metaflow_extensions/outerbounds/plugins/nvcf/nvcf_cli.py,sha256=dX14geUdLcj2T7epv0jDcdLfxiZ_AgK2w29JDcCbx5E,9143
20
- metaflow_extensions/outerbounds/plugins/nvcf/nvcf_decorator.py,sha256=IsUTT_ijpmSw6-15ldK8ZVcFIJDp9aOqrR3NSF0IoXU,6543
20
+ metaflow_extensions/outerbounds/plugins/nvcf/nvcf.py,sha256=zOIDbN4PhRM2VMHczfoAHUeo1df2UrqWMgTwcppsTwc,8990
21
+ metaflow_extensions/outerbounds/plugins/nvcf/nvcf_cli.py,sha256=5pLEekiw3krlwpcjfjjfUL-URep6soZgmfTqtzLz4Vo,9362
22
+ metaflow_extensions/outerbounds/plugins/nvcf/nvcf_decorator.py,sha256=yGv_6EmrBZNiQQP0rEWWE3akAL-KfI3Wd4ZFrcgl3VQ,8663
23
+ metaflow_extensions/outerbounds/plugins/nvcf/utils.py,sha256=DxWSCayfa95e0HJkWacey1s1nxoTpaunGhrb_0Ayv28,133
21
24
  metaflow_extensions/outerbounds/plugins/profilers/deco_injector.py,sha256=oI_C3c64XBm7n88FILqHwn-Nnc5DeT_68I67lM9rXaI,2434
22
25
  metaflow_extensions/outerbounds/plugins/profilers/gpu_profile_decorator.py,sha256=gDHQ2sMIp4NuZSzUspbSd8RGdFAoO5mgZAyFcZ2a51Y,2619
23
26
  metaflow_extensions/outerbounds/plugins/secrets/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -41,7 +44,7 @@ metaflow_extensions/outerbounds/toplevel/plugins/azure/__init__.py,sha256=WUuhz2
41
44
  metaflow_extensions/outerbounds/toplevel/plugins/gcp/__init__.py,sha256=BbZiaH3uILlEZ6ntBLKeNyqn3If8nIXZFq_Apd7Dhco,70
42
45
  metaflow_extensions/outerbounds/toplevel/plugins/kubernetes/__init__.py,sha256=5zG8gShSj8m7rgF4xgWBZFuY3GDP5n1T0ktjRpGJLHA,69
43
46
  metaflow_extensions/outerbounds/toplevel/plugins/snowflake/__init__.py,sha256=LptpH-ziXHrednMYUjIaosS1SXD3sOtF_9_eRqd8SJw,50
44
- ob_metaflow_extensions-1.1.114.dist-info/METADATA,sha256=Z_l0voS_e6k7Z9jqOSBg6lTx2cxbsc2d-zSfoPdXC8A,521
45
- ob_metaflow_extensions-1.1.114.dist-info/WHEEL,sha256=bb2Ot9scclHKMOLDEHY6B2sicWOgugjFKaJsT7vwMQo,110
46
- ob_metaflow_extensions-1.1.114.dist-info/top_level.txt,sha256=NwG0ukwjygtanDETyp_BUdtYtqIA_lOjzFFh1TsnxvI,20
47
- ob_metaflow_extensions-1.1.114.dist-info/RECORD,,
47
+ ob_metaflow_extensions-1.1.116.dist-info/METADATA,sha256=J5dqyrzD2HvLSgr8g8jgFCVnfdQ6K1p1SwdKIA8t0F8,520
48
+ ob_metaflow_extensions-1.1.116.dist-info/WHEEL,sha256=bb2Ot9scclHKMOLDEHY6B2sicWOgugjFKaJsT7vwMQo,110
49
+ ob_metaflow_extensions-1.1.116.dist-info/top_level.txt,sha256=NwG0ukwjygtanDETyp_BUdtYtqIA_lOjzFFh1TsnxvI,20
50
+ ob_metaflow_extensions-1.1.116.dist-info/RECORD,,