runnable 0.10.0__py3-none-any.whl → 0.11.1__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
runnable/sdk.py CHANGED
@@ -15,10 +15,17 @@ from pydantic import (
15
15
  field_validator,
16
16
  model_validator,
17
17
  )
18
- from rich import print
18
+ from rich.progress import (
19
+ BarColumn,
20
+ Progress,
21
+ SpinnerColumn,
22
+ TextColumn,
23
+ TimeElapsedColumn,
24
+ )
25
+ from rich.table import Column
19
26
  from typing_extensions import Self
20
27
 
21
- from runnable import defaults, entrypoints, graph, utils
28
+ from runnable import console, defaults, entrypoints, graph, utils
22
29
  from runnable.extensions.nodes import (
23
30
  FailNode,
24
31
  MapNode,
@@ -39,6 +46,10 @@ def pickled(name: str) -> TaskReturns:
39
46
  return TaskReturns(name=name, kind="object")
40
47
 
41
48
 
49
+ def metric(name: str) -> TaskReturns:
50
+ return TaskReturns(name=name, kind="metric")
51
+
52
+
42
53
  class Catalog(BaseModel):
43
54
  """
44
55
  Use to instruct a task to sync data from/to the central catalog.
@@ -65,7 +76,7 @@ class Catalog(BaseModel):
65
76
 
66
77
  class BaseTraversal(ABC, BaseModel):
67
78
  name: str
68
- next_node: str = Field(default="", alias="next")
79
+ next_node: str = Field(default="", serialization_alias="next_node")
69
80
  terminate_with_success: bool = Field(default=False, exclude=True)
70
81
  terminate_with_failure: bool = Field(default=False, exclude=True)
71
82
  on_failure: str = Field(default="", alias="on_failure")
@@ -77,6 +88,12 @@ class BaseTraversal(ABC, BaseModel):
77
88
  def internal_name(self) -> str:
78
89
  return self.name
79
90
 
91
+ def __hash__(self):
92
+ """
93
+ Needed to Uniqueize DataCatalog objects.
94
+ """
95
+ return hash(self.name)
96
+
80
97
  def __rshift__(self, other: StepType) -> StepType:
81
98
  if self.next_node:
82
99
  raise Exception(f"The node {self} already has a next node: {self.next_node}")
@@ -174,6 +191,7 @@ class BaseTask(BaseTraversal):
174
191
  catalog: Optional[Catalog] = Field(default=None, alias="catalog")
175
192
  overrides: Dict[str, Any] = Field(default_factory=dict, alias="overrides")
176
193
  returns: List[Union[str, TaskReturns]] = Field(default_factory=list, alias="returns")
194
+ secrets: List[str] = Field(default_factory=list)
177
195
 
178
196
  @field_validator("returns", mode="before")
179
197
  @classmethod
@@ -195,7 +213,7 @@ class BaseTask(BaseTraversal):
195
213
  if not (self.terminate_with_failure or self.terminate_with_success):
196
214
  raise AssertionError("A node not being terminated must have a user defined next node")
197
215
 
198
- return TaskNode.parse_from_config(self.model_dump(exclude_none=True))
216
+ return TaskNode.parse_from_config(self.model_dump(exclude_none=True, by_alias=True))
199
217
 
200
218
 
201
219
  class PythonTask(BaseTask):
@@ -291,9 +309,9 @@ class NotebookTask(BaseTask):
291
309
 
292
310
  """
293
311
 
294
- notebook: str = Field(alias="command")
312
+ notebook: str = Field(serialization_alias="command")
295
313
 
296
- notebook_output_path: Optional[str] = Field(default=None, alias="notebook_output_path")
314
+ notebook_output_path: Optional[str] = Field(default=None, alias="notebook_output_path", validate_default=True)
297
315
  optional_ploomber_args: Optional[Dict[str, Any]] = Field(default=None, alias="optional_ploomber_args")
298
316
 
299
317
  @computed_field
@@ -360,7 +378,7 @@ class Stub(BaseTraversal):
360
378
 
361
379
  """
362
380
 
363
- model_config = ConfigDict(extra="allow")
381
+ model_config = ConfigDict(extra="ignore")
364
382
  catalog: Optional[Catalog] = Field(default=None, alias="catalog")
365
383
 
366
384
  def create_node(self) -> StubNode:
@@ -520,7 +538,7 @@ class Pipeline(BaseModel):
520
538
  _dag: graph.Graph = PrivateAttr()
521
539
  model_config = ConfigDict(extra="forbid")
522
540
 
523
- def _validate_path(self, path: List[StepType]) -> None:
541
+ def _validate_path(self, path: List[StepType], failure_path: bool = False) -> None:
524
542
  # Check if one and only one step terminates with success
525
543
  # Check no more than one step terminates with failure
526
544
 
@@ -538,7 +556,7 @@ class Pipeline(BaseModel):
538
556
  raise Exception("A pipeline cannot have more than one step that terminates with failure")
539
557
  reached_failure = True
540
558
 
541
- if not reached_success:
559
+ if not reached_success and not reached_failure:
542
560
  raise Exception("A pipeline must have at least one step that terminates with success")
543
561
 
544
562
  def _construct_path(self, path: List[StepType]) -> None:
@@ -588,11 +606,21 @@ class Pipeline(BaseModel):
588
606
 
589
607
  # Check all paths are valid and construct the path
590
608
  paths = [success_path] + on_failure_paths
609
+ failure_path = False
591
610
  for path in paths:
592
- self._validate_path(path)
611
+ self._validate_path(path, failure_path)
593
612
  self._construct_path(path)
594
613
 
595
- all_steps: List[StepType] = [step for step in success_path + on_failure_paths] # type: ignore
614
+ failure_path = True
615
+
616
+ all_steps: List[StepType] = []
617
+
618
+ for path in paths:
619
+ for step in path:
620
+ all_steps.append(step)
621
+
622
+ seen = set()
623
+ unique = [x for x in all_steps if not (x in seen or seen.add(x))] # type: ignore
596
624
 
597
625
  self._dag = graph.Graph(
598
626
  start_at=all_steps[0].name,
@@ -600,7 +628,7 @@ class Pipeline(BaseModel):
600
628
  internal_branch_name=self.internal_branch_name,
601
629
  )
602
630
 
603
- for step in all_steps:
631
+ for step in unique:
604
632
  self._dag.add_node(step.create_node())
605
633
 
606
634
  if self.add_terminal_nodes:
@@ -669,8 +697,9 @@ class Pipeline(BaseModel):
669
697
 
670
698
  run_context.dag = graph.create_graph(dag_definition)
671
699
 
672
- print("Working with context:")
673
- print(run_context)
700
+ console.print("Working with context:")
701
+ console.print(run_context)
702
+ console.rule(style="[dark orange]")
674
703
 
675
704
  if not run_context.executor._local:
676
705
  # We are not working with non local executor
@@ -686,8 +715,28 @@ class Pipeline(BaseModel):
686
715
  # Prepare for graph execution
687
716
  run_context.executor.prepare_for_graph_execution()
688
717
 
689
- logger.info("Executing the graph")
690
- run_context.executor.execute_graph(dag=run_context.dag)
718
+ with Progress(
719
+ SpinnerColumn(spinner_name="runner"),
720
+ TextColumn("[progress.description]{task.description}", table_column=Column(ratio=2)),
721
+ BarColumn(table_column=Column(ratio=1), style="dark_orange"),
722
+ TimeElapsedColumn(table_column=Column(ratio=1)),
723
+ console=console,
724
+ expand=True,
725
+ ) as progress:
726
+ try:
727
+ run_context.progress = progress
728
+ pipeline_execution_task = progress.add_task("[dark_orange] Starting execution .. ", total=1)
729
+ run_context.executor.execute_graph(dag=run_context.dag)
730
+
731
+ run_log = run_context.run_log_store.get_run_log_by_id(run_id=run_context.run_id, full=False)
732
+
733
+ if run_log.status == defaults.SUCCESS:
734
+ progress.update(pipeline_execution_task, description="[green] Success", completed=True)
735
+ else:
736
+ progress.update(pipeline_execution_task, description="[red] Failed", completed=True)
737
+ except Exception as e: # noqa: E722
738
+ console.print(e, style=defaults.error_style)
739
+ progress.update(pipeline_execution_task, description="[red] Errored execution", completed=True)
691
740
 
692
741
  if run_context.executor._local:
693
742
  return run_context.run_log_store.get_run_log_by_id(run_id=run_context.run_id)
runnable/tasks.py CHANGED
@@ -9,14 +9,21 @@ import sys
9
9
  from datetime import datetime
10
10
  from pickle import PicklingError
11
11
  from string import Template
12
- from typing import Any, Dict, List, Literal, Tuple
12
+ from typing import Any, Dict, List, Literal, Optional, Tuple
13
13
 
14
14
  from pydantic import BaseModel, ConfigDict, Field, ValidationInfo, field_validator
15
+ from rich.console import Console
15
16
  from stevedore import driver
16
17
 
17
18
  import runnable.context as context
18
- from runnable import defaults, parameters, utils
19
- from runnable.datastore import JsonParameter, ObjectParameter, Parameter, StepAttempt
19
+ from runnable import defaults, exceptions, parameters, utils
20
+ from runnable.datastore import (
21
+ JsonParameter,
22
+ MetricParameter,
23
+ ObjectParameter,
24
+ Parameter,
25
+ StepAttempt,
26
+ )
20
27
  from runnable.defaults import TypeMapVariable
21
28
 
22
29
  logger = logging.getLogger(defaults.LOGGER_NAME)
@@ -26,9 +33,12 @@ logging.getLogger("stevedore").setLevel(logging.CRITICAL)
26
33
  # TODO: Can we add memory peak, cpu usage, etc. to the metrics?
27
34
 
28
35
 
36
+ console = Console(file=io.StringIO())
37
+
38
+
29
39
  class TaskReturns(BaseModel):
30
40
  name: str
31
- kind: Literal["json", "object"] = Field(default="json")
41
+ kind: Literal["json", "object", "metric"] = Field(default="json")
32
42
 
33
43
 
34
44
  class BaseTaskType(BaseModel):
@@ -36,11 +46,14 @@ class BaseTaskType(BaseModel):
36
46
 
37
47
  task_type: str = Field(serialization_alias="command_type")
38
48
  node_name: str = Field(exclude=True)
39
- secrets: Dict[str, str] = Field(default_factory=dict)
49
+ secrets: List[str] = Field(default_factory=list)
40
50
  returns: List[TaskReturns] = Field(default_factory=list, alias="returns")
41
51
 
42
52
  model_config = ConfigDict(extra="forbid")
43
53
 
54
+ def get_summary(self) -> Dict[str, Any]:
55
+ return self.model_dump(by_alias=True, exclude_none=True)
56
+
44
57
  @property
45
58
  def _context(self):
46
59
  return context.run_context
@@ -60,15 +73,14 @@ class BaseTaskType(BaseModel):
60
73
  raise NotImplementedError()
61
74
 
62
75
  def set_secrets_as_env_variables(self):
63
- for key, value in self.secrets.items():
76
+ for key in self.secrets:
64
77
  secret_value = context.run_context.secrets_handler.get(key)
65
- self.secrets[value] = secret_value
66
- os.environ[value] = secret_value
78
+ os.environ[key] = secret_value
67
79
 
68
80
  def delete_secrets_from_env_variables(self):
69
- for _, value in self.secrets.items():
70
- if value in os.environ:
71
- del os.environ[value]
81
+ for key in self.secrets:
82
+ if key in os.environ:
83
+ del os.environ[key]
72
84
 
73
85
  def execute_command(
74
86
  self,
@@ -98,12 +110,15 @@ class BaseTaskType(BaseModel):
98
110
  self.set_secrets_as_env_variables()
99
111
  try:
100
112
  yield
113
+ except Exception as e: # pylint: disable=broad-except
114
+ logger.exception(e)
101
115
  finally:
102
116
  self.delete_secrets_from_env_variables()
103
117
 
104
118
  @contextlib.contextmanager
105
119
  def execution_context(self, map_variable: TypeMapVariable = None, allow_complex: bool = True):
106
120
  params = self._context.run_log_store.get_parameters(run_id=self._context.run_id).copy()
121
+ logger.info(f"Parameters available for the execution: {params}")
107
122
 
108
123
  for param_name, param in params.items():
109
124
  # Any access to unreduced param should be replaced.
@@ -118,20 +133,28 @@ class BaseTaskType(BaseModel):
118
133
  if context_param in params:
119
134
  params[param_name].value = params[context_param].value
120
135
 
136
+ logger.debug(f"Resolved parameters: {params}")
137
+
121
138
  if not allow_complex:
122
139
  params = {key: value for key, value in params.items() if isinstance(value, JsonParameter)}
123
140
 
124
- log_file_name = self.node_name.replace(" ", "_") + ".execution.log"
141
+ log_file_name = self.node_name # + ".execution.log"
125
142
  if map_variable:
126
143
  for _, value in map_variable.items():
127
144
  log_file_name += "_" + str(value)
128
145
 
146
+ log_file_name = "".join(x for x in log_file_name if x.isalnum()) + ".execution.log"
147
+
129
148
  log_file = open(log_file_name, "w")
130
149
 
131
150
  f = io.StringIO()
132
151
  try:
133
152
  with contextlib.redirect_stdout(f):
153
+ # with contextlib.nullcontext():
134
154
  yield params
155
+ print(console.file.getvalue()) # type: ignore
156
+ except Exception as e: # pylint: disable=broad-except
157
+ logger.exception(e)
135
158
  finally:
136
159
  print(f.getvalue()) # print to console
137
160
  log_file.write(f.getvalue()) # Print to file
@@ -140,15 +163,13 @@ class BaseTaskType(BaseModel):
140
163
  log_file.close()
141
164
 
142
165
  # Put the log file in the catalog
143
- catalog_handler = context.run_context.catalog_handler
144
- catalog_handler.put(name=log_file.name, run_id=context.run_context.run_id)
166
+ self._context.catalog_handler.put(name=log_file.name, run_id=context.run_context.run_id)
145
167
  os.remove(log_file.name)
146
168
 
147
169
  # Update parameters
170
+ # This should only update the parameters that are changed at the root level.
148
171
  self._context.run_log_store.set_parameters(parameters=params, run_id=self._context.run_id)
149
172
 
150
- return True # To suppress exceptions
151
-
152
173
 
153
174
  def task_return_to_parameter(task_return: TaskReturns, value: Any) -> Parameter:
154
175
  # implicit support for pydantic models
@@ -161,6 +182,9 @@ def task_return_to_parameter(task_return: TaskReturns, value: Any) -> Parameter:
161
182
  if task_return.kind == "json":
162
183
  return JsonParameter(kind="json", value=value)
163
184
 
185
+ if task_return.kind == "metric":
186
+ return MetricParameter(kind="metric", value=value)
187
+
164
188
  if task_return.kind == "object":
165
189
  obj = ObjectParameter(value=task_return.name, kind="object")
166
190
  obj.put_object(data=value)
@@ -197,13 +221,22 @@ class PythonTaskType(BaseTaskType): # pylint: disable=too-few-public-methods
197
221
  imported_module = importlib.import_module(module)
198
222
  f = getattr(imported_module, func)
199
223
 
200
- filtered_parameters = parameters.filter_arguments_for_func(f, params.copy(), map_variable)
201
- logger.info(f"Calling {func} from {module} with {filtered_parameters}")
202
-
203
224
  try:
204
- user_set_parameters = f(**filtered_parameters) # This is a tuple or single value
225
+ try:
226
+ filtered_parameters = parameters.filter_arguments_for_func(f, params.copy(), map_variable)
227
+ logger.info(f"Calling {func} from {module} with {filtered_parameters}")
228
+ user_set_parameters = f(**filtered_parameters) # This is a tuple or single value
229
+ except Exception as e:
230
+ console.log(e, style=defaults.error_style, markup=False)
231
+ raise exceptions.CommandCallError(f"Function call: {self.command} did not succeed.\n") from e
232
+
205
233
  attempt_log.input_parameters = params.copy()
206
234
 
235
+ if map_variable:
236
+ attempt_log.input_parameters.update(
237
+ {k: JsonParameter(value=v, kind="json") for k, v in map_variable.items()}
238
+ )
239
+
207
240
  if self.returns:
208
241
  if not isinstance(user_set_parameters, tuple): # make it a tuple
209
242
  user_set_parameters = (user_set_parameters,)
@@ -212,6 +245,7 @@ class PythonTaskType(BaseTaskType): # pylint: disable=too-few-public-methods
212
245
  raise ValueError("Returns task signature does not match the function returns")
213
246
 
214
247
  output_parameters: Dict[str, Parameter] = {}
248
+ metrics: Dict[str, Parameter] = {}
215
249
 
216
250
  for i, task_return in enumerate(self.returns):
217
251
  output_parameter = task_return_to_parameter(
@@ -219,6 +253,9 @@ class PythonTaskType(BaseTaskType): # pylint: disable=too-few-public-methods
219
253
  value=user_set_parameters[i],
220
254
  )
221
255
 
256
+ if task_return.kind == "metric":
257
+ metrics[task_return.name] = output_parameter
258
+
222
259
  param_name = task_return.name
223
260
  if map_variable:
224
261
  for _, v in map_variable.items():
@@ -227,14 +264,15 @@ class PythonTaskType(BaseTaskType): # pylint: disable=too-few-public-methods
227
264
  output_parameters[param_name] = output_parameter
228
265
 
229
266
  attempt_log.output_parameters = output_parameters
267
+ attempt_log.user_defined_metrics = metrics
230
268
  params.update(output_parameters)
231
269
 
232
270
  attempt_log.status = defaults.SUCCESS
233
271
  except Exception as _e:
234
- msg = f"Call to the function {self.command} with {filtered_parameters} did not succeed.\n"
235
- logger.exception(msg)
236
- logger.exception(_e)
237
- attempt_log.status = defaults.FAIL
272
+ msg = f"Call to the function {self.command} did not succeed.\n"
273
+ attempt_log.message = msg
274
+ console.print_exception(show_locals=False)
275
+ console.log(_e, style=defaults.error_style)
238
276
 
239
277
  attempt_log.end_time = str(datetime.now())
240
278
 
@@ -246,7 +284,7 @@ class NotebookTaskType(BaseTaskType):
246
284
 
247
285
  task_type: str = Field(default="notebook", serialization_alias="command_type")
248
286
  command: str
249
- notebook_output_path: str = Field(default="", validate_default=True)
287
+ notebook_output_path: Optional[str] = Field(default=None, validate_default=True)
250
288
  optional_ploomber_args: dict = {}
251
289
 
252
290
  @field_validator("command")
@@ -288,7 +326,7 @@ class NotebookTaskType(BaseTaskType):
288
326
  import ploomber_engine as pm
289
327
  from ploomber_engine.ipython import PloomberClient
290
328
 
291
- notebook_output_path = self.notebook_output_path
329
+ notebook_output_path = self.notebook_output_path or ""
292
330
 
293
331
  with self.execution_context(
294
332
  map_variable=map_variable, allow_complex=False
@@ -296,7 +334,6 @@ class NotebookTaskType(BaseTaskType):
296
334
  if map_variable:
297
335
  for key, value in map_variable.items():
298
336
  notebook_output_path += "_" + str(value)
299
-
300
337
  params[key] = value
301
338
 
302
339
  notebook_params = {k: v.get_value() for k, v in params.items()}
@@ -394,15 +431,17 @@ class ShellTaskType(BaseTaskType):
394
431
 
395
432
  # Expose secrets as environment variables
396
433
  if self.secrets:
397
- for key, value in self.secrets.items():
434
+ for key in self.secrets:
398
435
  secret_value = context.run_context.secrets_handler.get(key)
399
- subprocess_env[value] = secret_value
436
+ subprocess_env[key] = secret_value
400
437
 
401
438
  with self.execution_context(map_variable=map_variable, allow_complex=False) as params:
402
439
  subprocess_env.update({k: v.get_value() for k, v in params.items()})
403
440
 
404
441
  # Json dumps all runnable environment variables
405
442
  for key, value in subprocess_env.items():
443
+ if isinstance(value, str):
444
+ continue
406
445
  subprocess_env[key] = json.dumps(value)
407
446
 
408
447
  collect_delimiter = "=== COLLECT ==="
@@ -411,37 +450,80 @@ class ShellTaskType(BaseTaskType):
411
450
  logger.info(f"Executing shell command: {command}")
412
451
 
413
452
  capture = False
414
- return_keys = [x.name for x in self.returns]
453
+ return_keys = {x.name: x for x in self.returns}
415
454
 
416
- with subprocess.Popen(
455
+ proc = subprocess.Popen(
417
456
  command,
418
457
  shell=True,
419
458
  env=subprocess_env,
420
459
  stdout=subprocess.PIPE,
421
460
  stderr=subprocess.PIPE,
422
461
  text=True,
423
- ) as proc:
424
- for line in proc.stdout: # type: ignore
425
- logger.info(line)
426
- print(line)
427
-
428
- if line.strip() == collect_delimiter:
429
- # The lines from now on should be captured
430
- capture = True
431
- continue
432
-
433
- if capture:
434
- key, value = line.strip().split("=", 1)
435
- if key in (return_keys or []):
436
- param_name = Template(key).safe_substitute(map_variable) # type: ignore
437
- try:
438
- params[param_name] = JsonParameter(kind="json", value=json.loads(value))
439
- except json.JSONDecodeError:
440
- params[param_name] = JsonParameter(kind="json", value=value)
441
-
442
- proc.wait()
443
- if proc.returncode == 0:
444
- attempt_log.status = defaults.SUCCESS
462
+ )
463
+ result = proc.communicate()
464
+ logger.debug(result)
465
+ logger.info(proc.returncode)
466
+
467
+ if proc.returncode != 0:
468
+ msg = ",".join(result[1].split("\n"))
469
+ attempt_log.status = defaults.FAIL
470
+ attempt_log.end_time = str(datetime.now())
471
+ attempt_log.message = msg
472
+ console.print(msg, style=defaults.error_style)
473
+ return attempt_log
474
+
475
+ # for stderr
476
+ for line in result[1].split("\n"):
477
+ if line.strip() == "":
478
+ continue
479
+ console.print(line, style=defaults.warning_style)
480
+
481
+ output_parameters: Dict[str, Parameter] = {}
482
+ metrics: Dict[str, Parameter] = {}
483
+
484
+ # only from stdout
485
+ for line in result[0].split("\n"):
486
+ if line.strip() == "":
487
+ continue
488
+
489
+ logger.info(line)
490
+ console.print(line)
491
+
492
+ if line.strip() == collect_delimiter:
493
+ # The lines from now on should be captured
494
+ capture = True
495
+ continue
496
+
497
+ if capture:
498
+ key, value = line.strip().split("=", 1)
499
+ if key in return_keys:
500
+ task_return = return_keys[key]
501
+
502
+ try:
503
+ value = json.loads(value)
504
+ except json.JSONDecodeError:
505
+ value = value
506
+
507
+ output_parameter = task_return_to_parameter(
508
+ task_return=task_return,
509
+ value=value,
510
+ )
511
+
512
+ if task_return.kind == "metric":
513
+ metrics[task_return.name] = output_parameter
514
+
515
+ param_name = task_return.name
516
+ if map_variable:
517
+ for _, v in map_variable.items():
518
+ param_name = f"{param_name}_{v}"
519
+
520
+ output_parameters[param_name] = output_parameter
521
+
522
+ attempt_log.output_parameters = output_parameters
523
+ attempt_log.user_defined_metrics = metrics
524
+ params.update(output_parameters)
525
+
526
+ attempt_log.status = defaults.SUCCESS
445
527
 
446
528
  attempt_log.end_time = str(datetime.now())
447
529
  return attempt_log
runnable/utils.py CHANGED
@@ -538,7 +538,7 @@ def get_provider_by_name_and_type(service_type: str, service_details: defaults.S
538
538
  if "config" in service_details:
539
539
  service_config = service_details.get("config", {})
540
540
 
541
- logger.info(f"Trying to get a service of {service_type} of the name {service_name} with config: {service_config}")
541
+ logger.debug(f"Trying to get a service of {service_type} of the name {service_name} with config: {service_config}")
542
542
  try:
543
543
  mgr = driver.DriverManager(
544
544
  namespace=namespace,
@@ -548,6 +548,7 @@ def get_provider_by_name_and_type(service_type: str, service_details: defaults.S
548
548
  )
549
549
  return mgr.driver
550
550
  except Exception as _e:
551
+ logger.exception(f"Could not find the service of type: {service_type} with config: {service_details}")
551
552
  raise Exception(f"Could not find the service of type: {service_type} with config: {service_details}") from _e
552
553
 
553
554
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: runnable
3
- Version: 0.10.0
3
+ Version: 0.11.1
4
4
  Summary: A Compute agnostic pipelining software
5
5
  Home-page: https://github.com/vijayvammi/runnable
6
6
  License: Apache-2.0
@@ -15,13 +15,12 @@ Classifier: Programming Language :: Python :: 3.11
15
15
  Classifier: Programming Language :: Python :: 3.12
16
16
  Provides-Extra: database
17
17
  Provides-Extra: docker
18
- Provides-Extra: mlflow
19
18
  Provides-Extra: notebook
20
19
  Requires-Dist: click
21
20
  Requires-Dist: click-plugins (>=1.1.1,<2.0.0)
22
21
  Requires-Dist: dill (>=0.3.8,<0.4.0)
23
22
  Requires-Dist: docker ; extra == "docker"
24
- Requires-Dist: mlflow-skinny ; extra == "mlflow"
23
+ Requires-Dist: mlflow-skinny
25
24
  Requires-Dist: ploomber-engine (>=0.0.31,<0.0.32) ; extra == "notebook"
26
25
  Requires-Dist: pydantic (>=2.5,<3.0)
27
26
  Requires-Dist: rich (>=13.5.2,<14.0.0)