gllm-pipeline-binary 0.4.22.post1__cp312-cp312-win_amd64.whl → 0.4.24__cp312-cp312-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,5 @@
1
1
  from _typeshed import Incomplete
2
+ from gllm_core.schema.tool import Tool
2
3
  from gllm_datastore.cache.cache import BaseCache as BaseCache
3
4
  from gllm_pipeline.alias import PipelineState as PipelineState
4
5
  from gllm_pipeline.exclusions import ExclusionManager as ExclusionManager, ExclusionSet as ExclusionSet
@@ -8,11 +9,13 @@ from gllm_pipeline.steps.pipeline_step import BasePipelineStep as BasePipelineSt
8
9
  from gllm_pipeline.steps.terminator_step import TerminatorStep as TerminatorStep
9
10
  from gllm_pipeline.utils.graph import create_edge as create_edge
10
11
  from gllm_pipeline.utils.mermaid import MERMAID_HEADER as MERMAID_HEADER, combine_mermaid_diagrams as combine_mermaid_diagrams, extract_step_diagrams as extract_step_diagrams
12
+ from gllm_pipeline.utils.typing_compat import TypedDict as TypedDict, is_typeddict as is_typeddict
11
13
  from langgraph.graph import StateGraph
12
14
  from langgraph.graph.state import CompiledStateGraph as CompiledStateGraph
13
15
  from pydantic import BaseModel
14
- from typing import Any, TypedDict
16
+ from typing import Any
15
17
 
18
+ DEFAULT_TOOL_NAME: str
16
19
  INDENTATION: str
17
20
 
18
21
  class Pipeline:
@@ -192,6 +195,43 @@ class Pipeline:
192
195
  Returns:
193
196
  Composer: A composer instance that manages this pipeline.
194
197
  """
198
+ def as_tool(self, description: str | None = None) -> Tool:
199
+ '''Convert the pipeline to a Tool instance.
200
+
201
+ This method allows a Pipeline instance to be used as a tool, with the input schema
202
+ being derived from the pipeline\'s input schema. The pipeline must have an input_type
203
+ defined to be convertible as a tool.
204
+
205
+ Args:
206
+ description (str | None, optional): Optional description to associate with the tool.
207
+ Defaults to None, in which case a description will be generated automatically.
208
+
209
+ Returns:
210
+ Tool: A Tool instance that wraps the pipeline.
211
+
212
+ Raises:
213
+ ValueError: If the pipeline does not have an input schema defined.
214
+
215
+ Examples:
216
+ ```python
217
+ class InputState(TypedDict):
218
+ user_query: str
219
+ context: str
220
+
221
+ class OutputState(TypedDict):
222
+ result: str
223
+
224
+ pipeline = Pipeline(
225
+ [retrieval_step, generation_step],
226
+ input_type=InputState,
227
+ output_type=OutputState,
228
+ name="rag_pipeline"
229
+ )
230
+
231
+ tool = pipeline.as_tool(description="Process user questions")
232
+ result = await tool.invoke(user_query="What is AI?", context="")
233
+ ```
234
+ '''
195
235
  def clear(self) -> None:
196
236
  """Clears the pipeline by resetting steps, graph, and app to their initial state.
197
237
 
@@ -199,14 +239,17 @@ class Pipeline:
199
239
  invalidating any built graph or compiled app. Useful for reusing a pipeline
200
240
  instance with different configurations.
201
241
  """
202
- async def invoke(self, initial_state: PipelineState, config: dict[str, Any] | None = None) -> dict[str, Any]:
242
+ async def invoke(self, initial_state: PipelineState, config: dict[str, Any] | None = None, thread_id: str | None = None) -> dict[str, Any]:
203
243
  '''Runs the pipeline asynchronously with the given initial state and configuration.
204
244
 
205
245
  Args:
206
246
  initial_state (PipelineState): The initial state to start the pipeline with.
207
247
  This initial state should comply with the state type of the pipeline.
208
248
  config (dict[str, Any], optional): Additional configuration for the pipeline. User-defined config should not
209
- have "langraph_" prefix as it should be reserved for internal use. Defaults to None.
249
+ have "langgraph_" prefix as it should be reserved for internal use. Defaults to None.
250
+ thread_id (str | None, optional): The thread ID for this specific pipeline invocation. This will be passed
251
+ in the invocation_config.configurable when invoking the pipeline. Useful for checkpointing and
252
+ tracking related invocations. Defaults to None.
210
253
 
211
254
  Returns:
212
255
  dict[str, Any]: The final state after the pipeline execution.
@@ -1,7 +1,8 @@
1
1
  from _typeshed import Incomplete
2
2
  from gllm_core.event.event_emitter import EventEmitter
3
+ from gllm_pipeline.utils.typing_compat import TypedDict as TypedDict
3
4
  from pydantic import BaseModel
4
- from typing import Any, TypedDict
5
+ from typing import Any
5
6
 
6
7
  class RAGState(TypedDict):
7
8
  '''A TypedDict representing the state of a Retrieval-Augmented Generation (RAG) pipeline.
@@ -665,7 +665,7 @@ def subgraph(subgraph: Pipeline, input_state_map: dict[str, str] | None = None,
665
665
 
666
666
  Examples:
667
667
  ```python
668
- from typing import TypedDict
668
+ from gllm_pipeline.utils.typing_compat import TypedDict
669
669
  from gllm_pipeline.pipeline.pipeline import Pipeline
670
670
 
671
671
  # Define state schemas using TypedDict
@@ -9,6 +9,7 @@ from gllm_pipeline.utils.error_handling import ErrorContext as ErrorContext
9
9
  from gllm_pipeline.utils.has_inputs_mixin import HasInputsMixin as HasInputsMixin
10
10
  from gllm_pipeline.utils.input_map import shallow_dump as shallow_dump
11
11
  from langgraph.runtime import Runtime
12
+ from langgraph.types import RunnableConfig as RunnableConfig
12
13
  from pydantic import BaseModel as BaseModel
13
14
  from typing import Any
14
15
 
@@ -60,7 +61,7 @@ class ComponentStep(BasePipelineStep, HasInputsMixin):
60
61
  cache_config (dict[str, Any] | None, optional): Cache configuration to be used for caching.
61
62
  Defaults to None, in which case no cache configuration is used.
62
63
  '''
63
- async def execute(self, state: PipelineState, runtime: Runtime[dict[str, Any] | BaseModel]) -> dict[str, Any] | None:
64
+ async def execute(self, state: PipelineState, runtime: Runtime[dict[str, Any] | BaseModel], config: RunnableConfig | None = None) -> dict[str, Any] | None:
64
65
  """Executes the component and processes its output.
65
66
 
66
67
  This method validates inputs, prepares data, executes the component, and formats the output for integration
@@ -69,6 +70,7 @@ class ComponentStep(BasePipelineStep, HasInputsMixin):
69
70
  Args:
70
71
  state (PipelineState): The current state of the pipeline, containing all data.
71
72
  runtime (Runtime[dict[str, Any] | BaseModel]): Runtime information for this step's execution.
73
+ config (RunnableConfig | None, optional): The runnable configuration. Defaults to None.
72
74
 
73
75
  Returns:
74
76
  dict[str, Any] | None: The update to the pipeline state after this step's operation, or None if
@@ -18,7 +18,7 @@ from gllm_pipeline.utils.mermaid import MERMAID_HEADER as MERMAID_HEADER
18
18
  from gllm_pipeline.utils.step_execution import execute_sequential_steps as execute_sequential_steps
19
19
  from langgraph.graph import StateGraph as StateGraph
20
20
  from langgraph.runtime import Runtime as Runtime
21
- from langgraph.types import Command, RetryPolicy as RetryPolicy
21
+ from langgraph.types import Command, RetryPolicy as RetryPolicy, RunnableConfig as RunnableConfig
22
22
  from pydantic import BaseModel as BaseModel
23
23
  from typing import Any, Callable
24
24
 
@@ -122,17 +122,18 @@ class ConditionalStep(BranchingStep, HasInputsMixin):
122
122
  cache_config (dict[str, Any] | None, optional): Cache configuration to be used for caching.
123
123
  Defaults to None, in which case no cache configuration is used.
124
124
  '''
125
- async def execute(self, state: PipelineState, runtime: Runtime[dict[str, Any] | BaseModel]) -> Command:
125
+ async def execute(self, state: PipelineState, runtime: Runtime[dict[str, Any] | BaseModel], config: RunnableConfig | None = None) -> Command:
126
126
  """Executes the conditional step, determines the route, and returns a Command.
127
127
 
128
128
  Args:
129
129
  state (PipelineState): The current state of the pipeline.
130
130
  runtime (Runtime[dict[str, Any] | BaseModel]): Runtime information for this step's execution.
131
+ config (RunnableConfig | None, optional): The runnable configuration. Defaults to None.
131
132
 
132
133
  Returns:
133
134
  Command: A LangGraph Command object with 'goto' for routing and 'update' for state changes.
134
135
  """
135
- async def execute_direct(self, state: dict[str, Any], runtime: Runtime[dict[str, Any] | BaseModel]) -> dict[str, Any] | None:
136
+ async def execute_direct(self, state: dict[str, Any], runtime: Runtime[dict[str, Any] | BaseModel], config: RunnableConfig | None = None) -> dict[str, Any] | None:
136
137
  """Execute this step directly, handling both branch selection and execution.
137
138
 
138
139
  This method is used when the step needs to be executed directly (e.g. in parallel execution).
@@ -141,6 +142,7 @@ class ConditionalStep(BranchingStep, HasInputsMixin):
141
142
  Args:
142
143
  state (dict[str, Any]): The current state of the pipeline.
143
144
  runtime (Runtime[dict[str, Any] | BaseModel]): Runtime information for this step's execution.
145
+ config (RunnableConfig | None, optional): The runnable configuration. Defaults to None.
144
146
 
145
147
  Returns:
146
148
  dict[str, Any] | None: Updates to apply to the pipeline state, or None if no updates.
@@ -5,6 +5,7 @@ from gllm_pipeline.alias import PipelineState as PipelineState
5
5
  from gllm_pipeline.steps.pipeline_step import BasePipelineStep as BasePipelineStep
6
6
  from gllm_pipeline.utils.input_map import shallow_dump as shallow_dump
7
7
  from langgraph.runtime import Runtime as Runtime
8
+ from langgraph.types import RunnableConfig as RunnableConfig
8
9
  from pydantic import BaseModel as BaseModel
9
10
  from typing import Any
10
11
 
@@ -38,12 +39,13 @@ class LogStep(BasePipelineStep):
38
39
  cache_config (dict[str, Any] | None, optional): Cache configuration to be used for caching.
39
40
  Defaults to None, in which case no cache configuration is used.
40
41
  '''
41
- async def execute(self, state: PipelineState, runtime: Runtime[dict[str, Any] | BaseModel]) -> None:
42
+ async def execute(self, state: PipelineState, runtime: Runtime[dict[str, Any] | BaseModel], config: RunnableConfig | None = None) -> None:
42
43
  """Executes the log step by formatting and emitting the message.
43
44
 
44
45
  Args:
45
46
  state (PipelineState): The current state of the pipeline, containing all data.
46
47
  runtime (Runtime[dict[str, Any] | BaseModel]): Runtime information for this step's execution.
48
+ config (RunnableConfig | None, optional): The runnable configuration. Defaults to None.
47
49
 
48
50
  Returns:
49
51
  None: This step does not modify the pipeline state.
@@ -10,6 +10,7 @@ from gllm_pipeline.utils.error_handling import ErrorContext as ErrorContext
10
10
  from gllm_pipeline.utils.has_inputs_mixin import HasInputsMixin as HasInputsMixin
11
11
  from gllm_pipeline.utils.input_map import shallow_dump as shallow_dump
12
12
  from langgraph.runtime import Runtime as Runtime
13
+ from langgraph.types import RunnableConfig as RunnableConfig
13
14
  from pydantic import BaseModel as BaseModel
14
15
  from typing import Any, Callable
15
16
 
@@ -77,12 +78,13 @@ class MapReduceStep(BasePipelineStep, HasInputsMixin):
77
78
  cache_config (dict[str, Any] | None, optional): Cache configuration to be used for caching.
78
79
  Defaults to None, in which case no cache configuration is used.
79
80
  '''
80
- async def execute(self, state: dict[str, Any], runtime: Runtime[dict[str, Any] | BaseModel]) -> dict[str, Any]:
81
+ async def execute(self, state: dict[str, Any], runtime: Runtime[dict[str, Any] | BaseModel], config: RunnableConfig | None = None) -> dict[str, Any]:
81
82
  """Execute the map and reduce operations.
82
83
 
83
84
  Args:
84
85
  state (dict[str, Any]): The current state of the pipeline.
85
86
  runtime (Runtime[dict[str, Any] | BaseModel]): Runtime information for this step's execution.
87
+ config (RunnableConfig | None, optional): The runnable configuration. Defaults to None.
86
88
 
87
89
  Returns:
88
90
  dict[str, Any]: The reduced result stored under output_state.
@@ -1,5 +1,6 @@
1
1
  from gllm_pipeline.steps.pipeline_step import BasePipelineStep as BasePipelineStep
2
2
  from langgraph.runtime import Runtime as Runtime
3
+ from langgraph.types import RunnableConfig as RunnableConfig
3
4
  from pydantic import BaseModel as BaseModel
4
5
  from typing import Any
5
6
 
@@ -28,12 +29,13 @@ class NoOpStep(BasePipelineStep):
28
29
  Attributes:
29
30
  name (str): A unique identifier for this pipeline step.
30
31
  '''
31
- async def execute(self, state: dict[str, Any], runtime: Runtime[dict[str, Any] | BaseModel]) -> None:
32
+ async def execute(self, state: dict[str, Any], runtime: Runtime[dict[str, Any] | BaseModel], config: RunnableConfig | None = None) -> None:
32
33
  """Executes this step, which does nothing.
33
34
 
34
35
  Args:
35
36
  state (dict[str, Any]): The current state of the pipeline.
36
37
  runtime (Runtime[dict[str, Any] | BaseModel]): Runtime information for this step's execution.
38
+ config (RunnableConfig | None, optional): The runnable configuration. Defaults to None.
37
39
 
38
40
  Returns:
39
41
  None: This step does not modify the pipeline state.
@@ -14,7 +14,7 @@ from gllm_pipeline.utils.mermaid import MERMAID_HEADER as MERMAID_HEADER
14
14
  from gllm_pipeline.utils.step_execution import execute_sequential_steps as execute_sequential_steps
15
15
  from langgraph.graph import StateGraph as StateGraph
16
16
  from langgraph.runtime import Runtime as Runtime
17
- from langgraph.types import RetryPolicy as RetryPolicy
17
+ from langgraph.types import RetryPolicy as RetryPolicy, RunnableConfig as RunnableConfig
18
18
  from pydantic import BaseModel as BaseModel
19
19
  from typing import Any
20
20
 
@@ -106,7 +106,7 @@ class ParallelStep(BranchingStep, HasInputsMixin):
106
106
  Returns:
107
107
  list[str]: Exit points after adding all child steps.
108
108
  """
109
- async def execute(self, state: dict[str, Any], runtime: Runtime[dict[str, Any] | BaseModel]) -> dict[str, Any] | None:
109
+ async def execute(self, state: dict[str, Any], runtime: Runtime[dict[str, Any] | BaseModel], config: RunnableConfig | None = None) -> dict[str, Any] | None:
110
110
  """Execute all branches in parallel and merge their results.
111
111
 
112
112
  This method is only used for the squashed approach. For the expanded approach,
@@ -115,6 +115,7 @@ class ParallelStep(BranchingStep, HasInputsMixin):
115
115
  Args:
116
116
  state (dict[str, Any]): The current state of the pipeline.
117
117
  runtime (Runtime[dict[str, Any] | BaseModel]): Runtime information for this step's execution.
118
+ config (RunnableConfig | None, optional): The runnable configuration. Defaults to None.
118
119
 
119
120
  Returns:
120
121
  dict[str, Any] | None: The merged results from all parallel branches, or None if no updates were produced.
@@ -13,7 +13,7 @@ from gllm_pipeline.utils.graph import create_edge as create_edge
13
13
  from gllm_pipeline.utils.retry_converter import retry_config_to_langgraph_policy as retry_config_to_langgraph_policy
14
14
  from langgraph.graph import StateGraph as StateGraph
15
15
  from langgraph.runtime import Runtime as Runtime
16
- from langgraph.types import RetryPolicy as RetryPolicy
16
+ from langgraph.types import RetryPolicy as RetryPolicy, RunnableConfig as RunnableConfig
17
17
  from pydantic import BaseModel as BaseModel
18
18
  from typing import Any
19
19
 
@@ -153,7 +153,7 @@ class BasePipelineStep(ABC, metaclass=abc.ABCMeta):
153
153
  list[str]: The exit points (endpoints) of this step.
154
154
  """
155
155
  @abstractmethod
156
- async def execute(self, state: PipelineState, runtime: Runtime[dict[str, Any] | BaseModel]) -> dict[str, Any] | None:
156
+ async def execute(self, state: PipelineState, runtime: Runtime[dict[str, Any] | BaseModel], config: RunnableConfig | None = None) -> dict[str, Any] | None:
157
157
  """Executes the operation defined for this pipeline step.
158
158
 
159
159
  This method should be implemented by subclasses to perform the actual processing or computation for this step.
@@ -161,6 +161,9 @@ class BasePipelineStep(ABC, metaclass=abc.ABCMeta):
161
161
  Args:
162
162
  state (PipelineState): The current state of the pipeline, containing all data.
163
163
  runtime (Runtime[dict[str, Any] | BaseModel]): Runtime information for this step's execution.
164
+ config (RunnableConfig | None, optional): Runnable configuration containing thread_id and other
165
+ LangGraph config. This allows steps to access invocation-level configuration like thread_id for
166
+ tracking and checkpointing. Defaults to None.
164
167
 
165
168
  Returns:
166
169
  dict[str, Any] | None: The update to the pipeline state after this step's operation.
@@ -170,7 +173,7 @@ class BasePipelineStep(ABC, metaclass=abc.ABCMeta):
170
173
  Raises:
171
174
  NotImplementedError: If the subclass does not implement this method.
172
175
  """
173
- async def execute_direct(self, state: dict[str, Any], runtime: Runtime[dict[str, Any] | BaseModel]) -> dict[str, Any] | None:
176
+ async def execute_direct(self, state: dict[str, Any], runtime: Runtime[dict[str, Any] | BaseModel], config: RunnableConfig | None = None) -> dict[str, Any] | None:
174
177
  """Execute this step directly, bypassing graph-based execution.
175
178
 
176
179
  This method is used when a step needs to be executed directly, such as in parallel execution.
@@ -179,6 +182,7 @@ class BasePipelineStep(ABC, metaclass=abc.ABCMeta):
179
182
  Args:
180
183
  state (dict[str, Any]): The current state of the pipeline.
181
184
  runtime (Runtime[dict[str, Any] | BaseModel]): Runtime information for this step's execution.
185
+ config (RunnableConfig | None, optional): The runnable configuration to pass to the step.
182
186
 
183
187
  Returns:
184
188
  dict[str, Any] | None: Updates to apply to the pipeline state, or None if no updates.
@@ -9,6 +9,7 @@ from gllm_pipeline.utils.error_handling import ErrorContext as ErrorContext
9
9
  from gllm_pipeline.utils.has_inputs_mixin import HasInputsMixin as HasInputsMixin
10
10
  from gllm_pipeline.utils.input_map import shallow_dump as shallow_dump
11
11
  from langgraph.runtime import Runtime
12
+ from langgraph.types import RunnableConfig as RunnableConfig
12
13
  from pydantic import BaseModel as BaseModel
13
14
  from typing import Any, Callable
14
15
 
@@ -56,7 +57,7 @@ class StateOperatorStep(BasePipelineStep, HasInputsMixin):
56
57
  cache_config (dict[str, Any] | None, optional): Cache configuration to be used for caching.
57
58
  Defaults to None, in which case no cache configuration is used.
58
59
  '''
59
- async def execute(self, state: PipelineState, runtime: Runtime[dict[str, Any] | BaseModel]) -> dict[str, Any]:
60
+ async def execute(self, state: PipelineState, runtime: Runtime[dict[str, Any] | BaseModel], config: RunnableConfig | None = None) -> dict[str, Any]:
60
61
  """Executes the operation and processes its output.
61
62
 
62
63
  This method validates inputs, prepares data, executes the operation, and formats the output for integration
@@ -65,6 +66,7 @@ class StateOperatorStep(BasePipelineStep, HasInputsMixin):
65
66
  Args:
66
67
  state (PipelineState): The current state of the pipeline, containing all data.
67
68
  runtime (Runtime[dict[str, Any] | BaseModel]): Runtime information for this step's execution.
69
+ config (RunnableConfig | None, optional): The runnable configuration. Defaults to None.
68
70
 
69
71
  Returns:
70
72
  dict[str, Any]: The update to the pipeline state after this step's operation.
@@ -11,6 +11,7 @@ from gllm_pipeline.utils.error_handling import ErrorContext as ErrorContext
11
11
  from gllm_pipeline.utils.has_inputs_mixin import HasInputsMixin as HasInputsMixin
12
12
  from gllm_pipeline.utils.input_map import shallow_dump as shallow_dump
13
13
  from langgraph.runtime import Runtime as Runtime
14
+ from langgraph.types import RunnableConfig as RunnableConfig
14
15
  from pydantic import BaseModel as BaseModel
15
16
  from typing import Any
16
17
 
@@ -59,7 +60,7 @@ class SubgraphStep(BaseCompositeStep, HasInputsMixin):
59
60
  cache_config (dict[str, Any] | None, optional): Cache configuration to be used for caching.
60
61
  Defaults to None, in which case no cache configuration is used.
61
62
  '''
62
- async def execute(self, state: PipelineState, runtime: Runtime[dict[str, Any] | BaseModel]) -> dict[str, Any]:
63
+ async def execute(self, state: PipelineState, runtime: Runtime[dict[str, Any] | BaseModel], config: RunnableConfig | None = None) -> dict[str, Any]:
63
64
  """Executes the subgraph and processes its output.
64
65
 
65
66
  This method prepares data, executes the subgraph, and formats the output for integration
@@ -69,6 +70,7 @@ class SubgraphStep(BaseCompositeStep, HasInputsMixin):
69
70
  Args:
70
71
  state (PipelineState): The current state of the pipeline, containing all data.
71
72
  runtime (Runtime[dict[str, Any] | BaseModel]): Runtime information for this step's execution.
73
+ config (RunnableConfig | None, optional): The runnable configuration. Defaults to None.
72
74
 
73
75
  Returns:
74
76
  dict[str, Any]: The update to the pipeline state after this step's operation.
@@ -2,7 +2,7 @@ from gllm_pipeline.alias import PipelineState as PipelineState
2
2
  from gllm_pipeline.steps.pipeline_step import BasePipelineStep as BasePipelineStep
3
3
  from langgraph.graph import StateGraph as StateGraph
4
4
  from langgraph.runtime import Runtime as Runtime
5
- from langgraph.types import RetryPolicy as RetryPolicy
5
+ from langgraph.types import RetryPolicy as RetryPolicy, RunnableConfig as RunnableConfig
6
6
  from pydantic import BaseModel as BaseModel
7
7
  from typing import Any
8
8
 
@@ -48,10 +48,11 @@ class TerminatorStep(BasePipelineStep):
48
48
  Returns:
49
49
  list[str]: Empty list as this step has no endpoints (it terminates the flow).
50
50
  """
51
- async def execute(self, state: PipelineState, runtime: Runtime[dict[str, Any] | BaseModel]) -> None:
51
+ async def execute(self, state: PipelineState, runtime: Runtime[dict[str, Any] | BaseModel], config: RunnableConfig | None = None) -> None:
52
52
  """Executes this step, which does nothing but pass through the state.
53
53
 
54
54
  Args:
55
55
  state (PipelineState): The current pipeline state.
56
56
  runtime (Runtime[dict[str, Any] | BaseModel]): The runtime information.
57
+ config (RunnableConfig | None, optional): The runnable configuration. Defaults to None.
57
58
  """
@@ -0,0 +1,3 @@
1
+ from typing import TypedDict as TypedDict, is_typeddict as is_typeddict
2
+
3
+ __all__ = ['TypedDict', 'is_typeddict']
Binary file
gllm_pipeline.pyi CHANGED
@@ -20,7 +20,9 @@ import gllm_datastore
20
20
  import gllm_datastore.cache
21
21
  import gllm_datastore.cache.cache
22
22
  import asyncio
23
+ import uuid
23
24
  import copy
25
+ import gllm_core.schema.tool
24
26
  import gllm_core.utils.imports
25
27
  import gllm_core.utils.logger_manager
26
28
  import gllm_inference
@@ -83,4 +85,5 @@ import langgraph.types
83
85
  import dataclasses
84
86
  import gllm_core.event.messenger
85
87
  import gllm_pipeline.steps.step_error_handler.RaiseStepErrorHandler
86
- import traceback
88
+ import traceback
89
+ import sys
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: gllm-pipeline-binary
3
- Version: 0.4.22.post1
3
+ Version: 0.4.24
4
4
  Summary: A library containing components related to Gen AI applications pipeline orchestration.
5
5
  Author-email: Dimitrij Ray <dimitrij.ray@gdplabs.id>, Henry Wicaksono <henry.wicaksono@gdplabs.id>, Kadek Denaya <kadek.d.r.diana@gdplabs.id>
6
6
  Requires-Python: <3.13,>=3.11
@@ -11,6 +11,7 @@ Requires-Dist: gllm-core-binary<0.4.0,>=0.3.0
11
11
  Requires-Dist: gllm-inference-binary<0.6.0,>=0.5.0
12
12
  Requires-Dist: aiohttp<3.13.0,>=3.12.14
13
13
  Requires-Dist: langgraph<0.7.0,>=0.6.0
14
+ Requires-Dist: typing-extensions<5.0.0,>=4.5.0
14
15
  Provides-Extra: dev
15
16
  Requires-Dist: coverage<7.5.0,>=7.4.4; extra == "dev"
16
17
  Requires-Dist: mypy<1.16.0,>=1.15.0; extra == "dev"
@@ -1,5 +1,5 @@
1
- gllm_pipeline.cp312-win_amd64.pyd,sha256=YSw3Rr4WsqkElPTVFgQ_DWnMab1invNsn4oPiruiTiI,2174976
2
- gllm_pipeline.pyi,sha256=QRQhvO2Qq-xIEm7IGhaJ7ZELWzla_t4KiOV9MtH6zSQ,2294
1
+ gllm_pipeline.cp312-win_amd64.pyd,sha256=qjhsaFUWKRHDq6mgIqfYy52U1uMsunzKOu0tQLcGRhk,2189312
2
+ gllm_pipeline.pyi,sha256=KL3UbRkjsWRktt_UM70HNxLAvyYzPTQ0pvNfwTZt8SI,2346
3
3
  gllm_pipeline/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  gllm_pipeline/alias.pyi,sha256=FbALRYZpDlmQMsKNUvgCi6ji11PrEtNo2kgzbt0iT7g,237
5
5
  gllm_pipeline/types.pyi,sha256=CV3cEAxlNsnVatYz5iCxqmEFPEqeKW5vv-qUD3FpF54,241
@@ -7,8 +7,8 @@ gllm_pipeline/exclusions/__init__.pyi,sha256=_LwIlqmH4Iiksn7p09d2vZG4Ek8CdKC8UcD
7
7
  gllm_pipeline/exclusions/exclusion_manager.pyi,sha256=DzoL-2KeTRmFgJEo8rzYViFYKbzZVTZGJmKvzaoTC0M,2960
8
8
  gllm_pipeline/exclusions/exclusion_set.pyi,sha256=11XTt6IfkHpzomcNybA78SfWlp752Z3AGhXfm2rL0Fk,1685
9
9
  gllm_pipeline/pipeline/__init__.pyi,sha256=1IKGdMvmLWEiOOmAKFNUPm-gdw13zrnU1gs7tDNzgEU,168
10
- gllm_pipeline/pipeline/pipeline.pyi,sha256=3aPgaDfAUohJo5yCbJ68uSJyD7QE3jD8LszFkkTVA-Y,14651
11
- gllm_pipeline/pipeline/states.pyi,sha256=EiyfBPwrVDZ336w5wyD1q8W4E6G1uZNzsP-bzrHDumo,6464
10
+ gllm_pipeline/pipeline/pipeline.pyi,sha256=CgP179RgbwtIsz_U0cMi3jdwbVOBz-D4sIKjoLkV55g,16504
11
+ gllm_pipeline/pipeline/states.pyi,sha256=WezH44ULqlim8CoEupQGHXs5xNzkqLvK8epwYQYRT4I,6523
12
12
  gllm_pipeline/pipeline/composer/__init__.pyi,sha256=-hcOUQgpTRt1QjQfRurTf-UApFnTrhilx6vN-gYd5J0,666
13
13
  gllm_pipeline/pipeline/composer/composer.pyi,sha256=7h7EhEA-hex6w36Is6uGTz9OBUbmq6C0SdkeBeLFcAI,28715
14
14
  gllm_pipeline/pipeline/composer/guard_composer.pyi,sha256=YfbXmzyU3CwAvGnCfM-6MVcTdxk53-j6Cv3IdzNr_-c,3335
@@ -40,20 +40,20 @@ gllm_pipeline/router/preset/aurelio/router_image_domain_specific.pyi,sha256=6pm2
40
40
  gllm_pipeline/router/preset/lm_based/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
41
41
  gllm_pipeline/router/preset/lm_based/router_image_domain_specific.pyi,sha256=UdiuoSXm2MVAL8AspAaSkyXYkE59bYj1y4xRRgKwavE,655
42
42
  gllm_pipeline/steps/__init__.pyi,sha256=5HtVA5CODr_9_7_OGEXFXU40edqhHa9YlCV5qVx3xbU,1989
43
- gllm_pipeline/steps/_func.pyi,sha256=FS-g9LiyHb8OHLM8bXrAMiqvk4-KgaxV8RacD0IQw-Q,64125
43
+ gllm_pipeline/steps/_func.pyi,sha256=TDTSsBwiUMwlFXLcLVolcd_7g7bAcg4mDx_r_y2mDb4,64152
44
44
  gllm_pipeline/steps/branching_step.pyi,sha256=iNarrcZgWfiRdr8CJfGm8GzUlYq13Rx5cgYXnBsNWN4,1041
45
- gllm_pipeline/steps/component_step.pyi,sha256=2rHqYeVHcKCqsA8GXeTyVAWus6rCkYp0GHK7qnACZjc,5547
45
+ gllm_pipeline/steps/component_step.pyi,sha256=VNBFZscK2Q4HgBt8ZrbE6B69oLTKcXo-KkqbTjmYYhM,5748
46
46
  gllm_pipeline/steps/composite_step.pyi,sha256=lvueTBQG_t0TtS5qRvUzZOIt3h6-uD26DJXW4ZSkuUc,3544
47
- gllm_pipeline/steps/conditional_step.pyi,sha256=8WLvtNAS-wuYxjelTBBKo8KwkLaJmNWYrMXgsnpIeSk,10189
47
+ gllm_pipeline/steps/conditional_step.pyi,sha256=Nm7zjxyPooRjJboT8Pqc3UwWKiA1gJHOwCtPW7KtrpE,10501
48
48
  gllm_pipeline/steps/guard_step.pyi,sha256=c_vdRz3hjfPu2DFkBt0_UBGJErQpdJwl0AE-GNxC4gM,4779
49
- gllm_pipeline/steps/log_step.pyi,sha256=p0DVXHV1mf4zhC5QP5uuzM8cVytrzY9uZHS_s1SYgKs,3100
50
- gllm_pipeline/steps/map_reduce_step.pyi,sha256=WE4a7SqeIf9BC-aJWQ-KnikN9q2YfJzQc5-_slIpRtg,6034
51
- gllm_pipeline/steps/no_op_step.pyi,sha256=mVCpfUOIGGs_q6BMy0g1tsaDH8JBiKyFMcVHeeOTMXY,1379
52
- gllm_pipeline/steps/parallel_step.pyi,sha256=Z5FhVXJslQ777cdi7Bnzm27yqJU61MflNic7lAIscno,8382
53
- gllm_pipeline/steps/pipeline_step.pyi,sha256=oc_Sg-YhFSFbGKaNZoLvkka44kyQap7euj30AB0WWJ4,11213
54
- gllm_pipeline/steps/state_operator_step.pyi,sha256=60hvqAw8QLYyhO9zo8F56bVpG2494WZpoZOtpYyiaT8,5111
55
- gllm_pipeline/steps/subgraph_step.pyi,sha256=j0oF18OJ3VjApXO27l6B2zZO-joBdkS3J2dZYjSwTAo,5825
56
- gllm_pipeline/steps/terminator_step.pyi,sha256=ZBZEWS232HRWJUgxqznB3dCjLw0XoezBAuK3gczaKp0,2514
49
+ gllm_pipeline/steps/log_step.pyi,sha256=XjL_-mlsWAEcoCBZ--qa8XkGdIXrx1K-chI8aXVsOFE,3301
50
+ gllm_pipeline/steps/map_reduce_step.pyi,sha256=c9qo_EmQWlu-jjAHeOKeyj8dVRL6yR89Tj5RHRoxhhA,6235
51
+ gllm_pipeline/steps/no_op_step.pyi,sha256=3DWW_gmY_66_mmNmnn2ic_NJZ0tNi_obDfjN8oLZByk,1580
52
+ gllm_pipeline/steps/parallel_step.pyi,sha256=XI_gM3Z1WS2R0j_qfrwf0tausxvX6VWuiknSCLgAetg,8555
53
+ gllm_pipeline/steps/pipeline_step.pyi,sha256=p0qNBL7v8Gv0wc7CeN3hIv08u_I8uaT1xPQwt-YOQPM,11711
54
+ gllm_pipeline/steps/state_operator_step.pyi,sha256=55mX2vxUUmRy2sS4iqIE4ZM_D4pB1FFoQa5Ox9N1Jdg,5312
55
+ gllm_pipeline/steps/subgraph_step.pyi,sha256=xvHxBiFbXJhMPW-06YcoW1SjVGthTj1_O4nymSew9L0,6026
56
+ gllm_pipeline/steps/terminator_step.pyi,sha256=M1LNw1AszTVMtwOHGrWvnr15A8qIgWuHYUdbFNrCzUM,2687
57
57
  gllm_pipeline/steps/step_error_handler/__init__.pyi,sha256=6eUbWMlQKQjlqS2KJHIMZksb_dXUxmsgRzoJ03tnX4o,618
58
58
  gllm_pipeline/steps/step_error_handler/empty_step_error_handler.pyi,sha256=JwD09mJD-80_pmeOEaB7blKCuYXOsju4JyVejOGtTLI,901
59
59
  gllm_pipeline/steps/step_error_handler/fallback_step_error_handler.pyi,sha256=N4Skd5C5df34KdephUX5l_KclMBnnvGBmhNQFKhmtBs,1326
@@ -70,8 +70,9 @@ gllm_pipeline/utils/input_map.pyi,sha256=mPWU9_b3VGhszuTjB3yQggZWJCxjZth4_WQdKec
70
70
  gllm_pipeline/utils/mermaid.pyi,sha256=B096GTXxVAO--kw3UDsbysOsnjGOytYfozX39YaM21A,1174
71
71
  gllm_pipeline/utils/retry_converter.pyi,sha256=JPUuaGzKpVLshrbhX9rQHYl5XmC9GDa59rGU-FtOpWM,1128
72
72
  gllm_pipeline/utils/step_execution.pyi,sha256=3o28tiCHR8t-6Vk3Poz91V-CLdYrdhvJblPW9AoOK-c,996
73
+ gllm_pipeline/utils/typing_compat.pyi,sha256=V4812i25ncSqZ0o_30lUX65tU07bWEs4LIpdLPt2ngg,116
73
74
  gllm_pipeline.build/.gitignore,sha256=aEiIwOuxfzdCmLZe4oB1JsBmCUxwG8x-u-HBCV9JT8E,1
74
- gllm_pipeline_binary-0.4.22.post1.dist-info/METADATA,sha256=MDPC7Ag8JQsz-3Uda8KmQxjSpXeE47hFZbQqrJvCOVE,4482
75
- gllm_pipeline_binary-0.4.22.post1.dist-info/WHEEL,sha256=x5rgv--I0NI0IT1Lh9tN1VG2cI637p3deednwYLKnxc,96
76
- gllm_pipeline_binary-0.4.22.post1.dist-info/top_level.txt,sha256=C3yeOtoE6ZhuOnBEq_FFc_Rp954IHJBlB6fBgSdAWYI,14
77
- gllm_pipeline_binary-0.4.22.post1.dist-info/RECORD,,
75
+ gllm_pipeline_binary-0.4.24.dist-info/METADATA,sha256=8mzbtoKhwpZLS3hpZ2rK6Rwu-eNZRs_fV_gD7rdh9eo,4524
76
+ gllm_pipeline_binary-0.4.24.dist-info/WHEEL,sha256=x5rgv--I0NI0IT1Lh9tN1VG2cI637p3deednwYLKnxc,96
77
+ gllm_pipeline_binary-0.4.24.dist-info/top_level.txt,sha256=C3yeOtoE6ZhuOnBEq_FFc_Rp954IHJBlB6fBgSdAWYI,14
78
+ gllm_pipeline_binary-0.4.24.dist-info/RECORD,,