gllm-pipeline-binary 0.4.21__cp311-cp311-macosx_13_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. gllm_pipeline/__init__.pyi +0 -0
  2. gllm_pipeline/alias.pyi +7 -0
  3. gllm_pipeline/exclusions/__init__.pyi +4 -0
  4. gllm_pipeline/exclusions/exclusion_manager.pyi +74 -0
  5. gllm_pipeline/exclusions/exclusion_set.pyi +46 -0
  6. gllm_pipeline/pipeline/__init__.pyi +4 -0
  7. gllm_pipeline/pipeline/composer/__init__.pyi +8 -0
  8. gllm_pipeline/pipeline/composer/composer.pyi +350 -0
  9. gllm_pipeline/pipeline/composer/guard_composer.pyi +58 -0
  10. gllm_pipeline/pipeline/composer/if_else_composer.pyi +57 -0
  11. gllm_pipeline/pipeline/composer/parallel_composer.pyi +47 -0
  12. gllm_pipeline/pipeline/composer/switch_composer.pyi +57 -0
  13. gllm_pipeline/pipeline/composer/toggle_composer.pyi +48 -0
  14. gllm_pipeline/pipeline/pipeline.pyi +280 -0
  15. gllm_pipeline/pipeline/states.pyi +139 -0
  16. gllm_pipeline/router/__init__.pyi +6 -0
  17. gllm_pipeline/router/aurelio_semantic_router/__init__.pyi +3 -0
  18. gllm_pipeline/router/aurelio_semantic_router/aurelio_semantic_router.pyi +86 -0
  19. gllm_pipeline/router/aurelio_semantic_router/bytes_compat_route.pyi +40 -0
  20. gllm_pipeline/router/aurelio_semantic_router/encoders/__init__.pyi +5 -0
  21. gllm_pipeline/router/aurelio_semantic_router/encoders/em_invoker_encoder.pyi +46 -0
  22. gllm_pipeline/router/aurelio_semantic_router/encoders/langchain_encoder.pyi +50 -0
  23. gllm_pipeline/router/aurelio_semantic_router/encoders/tei_encoder.pyi +49 -0
  24. gllm_pipeline/router/aurelio_semantic_router/index/__init__.pyi +4 -0
  25. gllm_pipeline/router/aurelio_semantic_router/index/aurelio_index.pyi +65 -0
  26. gllm_pipeline/router/aurelio_semantic_router/index/azure_ai_search_aurelio_index.pyi +71 -0
  27. gllm_pipeline/router/aurelio_semantic_router/index/vector_store_adapter_index.pyi +119 -0
  28. gllm_pipeline/router/lm_based_router.pyi +60 -0
  29. gllm_pipeline/router/preset/__init__.pyi +0 -0
  30. gllm_pipeline/router/preset/aurelio/__init__.pyi +0 -0
  31. gllm_pipeline/router/preset/aurelio/router_image_domain_specific.pyi +21 -0
  32. gllm_pipeline/router/preset/lm_based/__init__.pyi +0 -0
  33. gllm_pipeline/router/preset/lm_based/router_image_domain_specific.pyi +14 -0
  34. gllm_pipeline/router/preset/preset_loader.pyi +24 -0
  35. gllm_pipeline/router/router.pyi +46 -0
  36. gllm_pipeline/router/rule_based_router.pyi +80 -0
  37. gllm_pipeline/router/similarity_based_router.pyi +72 -0
  38. gllm_pipeline/router/utils.pyi +26 -0
  39. gllm_pipeline/steps/__init__.pyi +17 -0
  40. gllm_pipeline/steps/_func.pyi +958 -0
  41. gllm_pipeline/steps/branching_step.pyi +24 -0
  42. gllm_pipeline/steps/component_step.pyi +82 -0
  43. gllm_pipeline/steps/composite_step.pyi +65 -0
  44. gllm_pipeline/steps/conditional_step.pyi +161 -0
  45. gllm_pipeline/steps/guard_step.pyi +71 -0
  46. gllm_pipeline/steps/log_step.pyi +53 -0
  47. gllm_pipeline/steps/map_reduce_step.pyi +92 -0
  48. gllm_pipeline/steps/no_op_step.pyi +40 -0
  49. gllm_pipeline/steps/parallel_step.pyi +128 -0
  50. gllm_pipeline/steps/pipeline_step.pyi +231 -0
  51. gllm_pipeline/steps/state_operator_step.pyi +75 -0
  52. gllm_pipeline/steps/step_error_handler/__init__.pyi +6 -0
  53. gllm_pipeline/steps/step_error_handler/empty_step_error_handler.pyi +20 -0
  54. gllm_pipeline/steps/step_error_handler/fallback_step_error_handler.pyi +24 -0
  55. gllm_pipeline/steps/step_error_handler/keep_step_error_handler.pyi +9 -0
  56. gllm_pipeline/steps/step_error_handler/raise_step_error_handler.pyi +9 -0
  57. gllm_pipeline/steps/step_error_handler/step_error_handler.pyi +46 -0
  58. gllm_pipeline/steps/subgraph_step.pyi +90 -0
  59. gllm_pipeline/steps/terminator_step.pyi +57 -0
  60. gllm_pipeline/types.pyi +10 -0
  61. gllm_pipeline/utils/__init__.pyi +9 -0
  62. gllm_pipeline/utils/async_utils.pyi +21 -0
  63. gllm_pipeline/utils/copy.pyi +11 -0
  64. gllm_pipeline/utils/error_handling.pyi +61 -0
  65. gllm_pipeline/utils/graph.pyi +16 -0
  66. gllm_pipeline/utils/has_inputs_mixin.pyi +50 -0
  67. gllm_pipeline/utils/input_map.pyi +12 -0
  68. gllm_pipeline/utils/mermaid.pyi +29 -0
  69. gllm_pipeline/utils/retry_converter.pyi +25 -0
  70. gllm_pipeline/utils/step_execution.pyi +19 -0
  71. gllm_pipeline.build/.gitignore +1 -0
  72. gllm_pipeline.cpython-311-darwin.so +0 -0
  73. gllm_pipeline.pyi +86 -0
  74. gllm_pipeline_binary-0.4.21.dist-info/METADATA +105 -0
  75. gllm_pipeline_binary-0.4.21.dist-info/RECORD +77 -0
  76. gllm_pipeline_binary-0.4.21.dist-info/WHEEL +5 -0
  77. gllm_pipeline_binary-0.4.21.dist-info/top_level.txt +1 -0
@@ -0,0 +1,24 @@
1
+ import abc
2
+ from _typeshed import Incomplete
3
+ from gllm_pipeline.alias import PipelineSteps as PipelineSteps
4
+ from gllm_pipeline.exclusions import ExclusionSet as ExclusionSet
5
+ from gllm_pipeline.steps.composite_step import BaseCompositeStep as BaseCompositeStep
6
+ from gllm_pipeline.steps.pipeline_step import BasePipelineStep as BasePipelineStep
7
+
8
+ class BranchingStep(BaseCompositeStep, metaclass=abc.ABCMeta):
9
+ """Mixin-like base for composites that maintain named branches.
10
+
11
+ Attributes:
12
+ branches (dict[str, PipelineSteps]): The branches to execute in parallel.
13
+ """
14
+ branches: dict[str, PipelineSteps]
15
+ is_excluded: Incomplete
16
+ def apply_exclusions(self, exclusions: ExclusionSet) -> None:
17
+ """Apply exclusions to this branching step and its children.
18
+
19
+ Marks self excluded, lets subclass perform internal structural changes,
20
+ then propagates exclusions to children per branch.
21
+
22
+ Args:
23
+ exclusions (ExclusionSet): The exclusion set to apply.
24
+ """
@@ -0,0 +1,82 @@
1
+ from _typeshed import Incomplete
2
+ from gllm_core.schema import Component as Component
3
+ from gllm_core.utils.retry import RetryConfig as RetryConfig
4
+ from gllm_datastore.cache.cache import BaseCache as BaseCache
5
+ from gllm_pipeline.alias import InputMapSpec as InputMapSpec, PipelineState as PipelineState
6
+ from gllm_pipeline.steps.pipeline_step import BasePipelineStep as BasePipelineStep
7
+ from gllm_pipeline.steps.step_error_handler.step_error_handler import BaseStepErrorHandler as BaseStepErrorHandler
8
+ from gllm_pipeline.utils.error_handling import ErrorContext as ErrorContext
9
+ from gllm_pipeline.utils.has_inputs_mixin import HasInputsMixin as HasInputsMixin
10
+ from gllm_pipeline.utils.input_map import shallow_dump as shallow_dump
11
+ from langgraph.runtime import Runtime
12
+ from pydantic import BaseModel as BaseModel
13
+ from typing import Any
14
+
15
+ class ComponentStep(BasePipelineStep, HasInputsMixin):
16
+ """A pipeline step that executes a specific component.
17
+
18
+ This step wraps a component, manages its inputs and outputs, and integrates it into the pipeline.
19
+
20
+ Attributes:
21
+ name (str): A unique identifier for this pipeline step.
22
+ component (Component): The component to be executed in this step.
23
+ input_map (dict[str, str | Val] | None): Unified input map.
24
+ output_state (str | list[str] | None): Key(s) to extract from the component result and add to the pipeline
25
+ state. If None, the component is executed but no state updates are performed.
26
+ retry_policy (RetryPolicy | None): Configuration for retry behavior using LangGraph's RetryPolicy.
27
+ error_handler (BaseStepErrorHandler | None): Strategy to handle errors during execution.
28
+ """
29
+ component: Incomplete
30
+ output_state: Incomplete
31
+ def __init__(self, name: str, component: Component, input_state_map: dict[str, str] | None = None, output_state: str | list[str] | None = None, runtime_config_map: dict[str, str] | None = None, fixed_args: dict[str, Any] | None = None, input_map: InputMapSpec | None = None, retry_config: RetryConfig | None = None, error_handler: BaseStepErrorHandler | None = None, cache_store: BaseCache | None = None, cache_config: dict[str, Any] | None = None) -> None:
32
+ '''Initializes a new ComponentStep.
33
+
34
+ Args:
35
+ name (str): A unique identifier for this pipeline step.
36
+ component (Component): The component to be executed in this step.
37
+ input_state_map (dict[str, str]): Mapping of component input arguments to pipeline state keys.
38
+ Keys are input arguments expected by the component, values are corresponding state keys.
39
+ output_state ((str | list[str]) | None, optional): Key(s) to extract from the component result and add to
40
+ the pipeline state. If None, the component is executed but no state updates are performed.
41
+ Defaults to None.
42
+ runtime_config_map (dict[str, str] | None, optional): Mapping of component input arguments to runtime
43
+ configuration keys.
44
+ Keys are input arguments expected by the component, values are runtime configuration keys.
45
+ Defaults to None.
46
+ fixed_args (dict[str, Any] | None, optional): Fixed arguments to be passed to the component.
47
+ Defaults to None.
48
+ input_map (InputMapSpec | None, optional):
49
+ Unified input map. Can be a dict (arg -> str|Val) or a list with elements:
50
+ 1. str for identity mapping
51
+ 2. dict[str, str] for state/config mapping
52
+ 3. dict[str, Val] for fixed args.
53
+ Defaults to None.
54
+ retry_config (RetryConfig | None, optional): Configuration for retry behavior using
55
+ GLLM Core\'s RetryConfig. Defaults to None, in which case no retry config is applied.
56
+ error_handler (BaseStepErrorHandler | None, optional): Strategy to handle errors during execution.
57
+ Defaults to None, in which case the RaiseStepErrorHandler is used.
58
+ cache_store ("BaseCache" | None, optional): Cache store to be used for caching.
59
+ Defaults to None, in which case no cache store is used.
60
+ cache_config (dict[str, Any] | None, optional): Cache configuration to be used for caching.
61
+ Defaults to None, in which case no cache configuration is used.
62
+ '''
63
+ async def execute(self, state: PipelineState, runtime: Runtime[dict[str, Any] | BaseModel]) -> dict[str, Any] | None:
64
+ """Executes the component and processes its output.
65
+
66
+ This method validates inputs, prepares data, executes the component, and formats the output for integration
67
+ into the pipeline state.
68
+
69
+ Args:
70
+ state (PipelineState): The current state of the pipeline, containing all data.
71
+ runtime (Runtime[dict[str, Any] | BaseModel]): Runtime information for this step's execution.
72
+
73
+ Returns:
74
+ dict[str, Any] | None: The update to the pipeline state after this step's operation, or None if
75
+ output_state is None. When not None, this includes new or modified data produced by the component,
76
+ not the entire state.
77
+
78
+ Raises:
79
+ RuntimeError: If an error occurs during component execution.
80
+ TimeoutError: If the component execution times out.
81
+ asyncio.CancelledError: If the component execution is cancelled.
82
+ """
@@ -0,0 +1,65 @@
1
+ import abc
2
+ from abc import ABC, abstractmethod
3
+ from gllm_core.utils.retry import RetryConfig as RetryConfig
4
+ from gllm_datastore.cache.cache import BaseCache as BaseCache
5
+ from gllm_pipeline.exclusions import ExclusionSet as ExclusionSet
6
+ from gllm_pipeline.steps.pipeline_step import BasePipelineStep as BasePipelineStep
7
+ from gllm_pipeline.steps.step_error_handler.step_error_handler import BaseStepErrorHandler as BaseStepErrorHandler
8
+ from gllm_pipeline.utils.error_handling import ErrorContext as ErrorContext
9
+ from gllm_pipeline.utils.graph import create_edge as create_edge
10
+ from gllm_pipeline.utils.mermaid import combine_mermaid_diagrams as combine_mermaid_diagrams, extract_step_diagrams as extract_step_diagrams
11
+ from langgraph.graph import StateGraph as StateGraph
12
+ from langgraph.types import RetryPolicy as RetryPolicy
13
+ from typing import Any
14
+
15
+ class BaseCompositeStep(BasePipelineStep, ABC, metaclass=abc.ABCMeta):
16
+ """Base class for all composite pipeline steps.
17
+
18
+ Attributes:
19
+ name (str): A unique identifier for the pipeline step.
20
+ retry_policy (RetryPolicy | None): Configuration for retry behavior using LangGraph's RetryPolicy.
21
+ """
22
+ def __init__(self, name: str, retry_config: RetryConfig | None = None, error_handler: BaseStepErrorHandler | None = None, cache_store: BaseCache | None = None, cache_config: dict[str, Any] | None = None) -> None:
23
+ '''Initialize the composite step.
24
+
25
+ Args:
26
+ name (str): A unique identifier for the pipeline step.
27
+ retry_config (RetryConfig | None, optional): Configuration for retry behavior. Defaults to None.
28
+ error_handler (BaseStepErrorHandler | None, optional): Strategy to handle errors during execution.
29
+ cache_store ("BaseCache" | None, optional): Cache store to be used for caching.
30
+ Defaults to None, in which case no cache store is used.
31
+ cache_config (dict[str, Any] | None, optional): Cache configuration to be used for caching.
32
+ Defaults to None, in which case no cache configuration is used.
33
+ '''
34
+ def add_to_graph(self, graph: StateGraph, previous_endpoints: list[str], retry_policy: RetryPolicy | None = None) -> list[str]:
35
+ """Template method: Add this composite step to the graph.
36
+
37
+ Args:
38
+ graph (StateGraph): The graph to add this step to.
39
+ previous_endpoints (list[str]): Endpoints from previous steps to connect to.
40
+ retry_policy (RetryPolicy | None, optional): Retry policy to use for this step and propagate to child steps.
41
+ If provided, takes precedence over the step's own retry policy.
42
+ Defaults to None, in which case the step's own retry policy is used.
43
+
44
+ Returns:
45
+ list[str]: Exit points after adding all child steps
46
+ """
47
+ @abstractmethod
48
+ def apply_exclusions(self, exclusions: ExclusionSet) -> None:
49
+ """Apply exclusions to this composite step and its children.
50
+
51
+ Subclasses must implement full exclusion behavior, including any
52
+ internal structure updates and propagation to children.
53
+
54
+ Args:
55
+ exclusions: The exclusion set to apply.
56
+
57
+ Raises:
58
+ NotImplementedError: If not implemented by subclass.
59
+ """
60
+ def get_mermaid_diagram(self) -> str:
61
+ """Template method: Generate complete mermaid diagram.
62
+
63
+ Returns:
64
+ str: Complete mermaid diagram representation.
65
+ """
@@ -0,0 +1,161 @@
1
+ from _typeshed import Incomplete
2
+ from dataclasses import dataclass
3
+ from gllm_core.event.event_emitter import EventEmitter as EventEmitter
4
+ from gllm_core.schema import Component
5
+ from gllm_core.utils.retry import RetryConfig as RetryConfig
6
+ from gllm_datastore.cache.cache import BaseCache as BaseCache
7
+ from gllm_pipeline.alias import InputMapSpec as InputMapSpec, PipelineState as PipelineState, PipelineSteps as PipelineSteps
8
+ from gllm_pipeline.steps.branching_step import BranchingStep as BranchingStep
9
+ from gllm_pipeline.steps.pipeline_step import BasePipelineStep as BasePipelineStep
10
+ from gllm_pipeline.steps.step_error_handler.step_error_handler import BaseStepErrorHandler as BaseStepErrorHandler
11
+ from gllm_pipeline.steps.terminator_step import TerminatorStep as TerminatorStep
12
+ from gllm_pipeline.types import Val as Val
13
+ from gllm_pipeline.utils.async_utils import execute_callable as execute_callable
14
+ from gllm_pipeline.utils.graph import create_edge as create_edge
15
+ from gllm_pipeline.utils.has_inputs_mixin import HasInputsMixin as HasInputsMixin
16
+ from gllm_pipeline.utils.input_map import shallow_dump as shallow_dump
17
+ from gllm_pipeline.utils.mermaid import MERMAID_HEADER as MERMAID_HEADER
18
+ from gllm_pipeline.utils.step_execution import execute_sequential_steps as execute_sequential_steps
19
+ from langgraph.graph import StateGraph as StateGraph
20
+ from langgraph.runtime import Runtime as Runtime
21
+ from langgraph.types import Command, RetryPolicy as RetryPolicy
22
+ from pydantic import BaseModel as BaseModel
23
+ from typing import Any, Callable
24
+
25
+ ConditionType = Component | Callable[[dict[str, Any]], str]
26
+ DEFAULT_BRANCH: str
27
+
28
+ @dataclass
29
+ class ConditionInputs:
30
+ """Container for different types of inputs used in condition evaluation.
31
+
32
+ Attributes:
33
+ merged (dict[str, Any]): Complete merged dictionary containing all state, config, and fixed args.
34
+ Used for Callable conditions.
35
+ mapped (dict[str, Any]): Dictionary containing only explicitly mapped inputs.
36
+ Used for Component conditions.
37
+ event_emitter (EventEmitter | None): Event emitter instance for logging.
38
+ has_mapped_specs (bool): Whether the mapped inputs have specs or is a literal value.
39
+ """
40
+ merged: dict[str, Any]
41
+ mapped: dict[str, Any]
42
+ event_emitter: EventEmitter | None
43
+ has_mapped_specs: bool
44
+
45
+ class ConditionalStep(BranchingStep, HasInputsMixin):
46
+ '''A conditional pipeline step that conditionally executes different branches based on specified conditions.
47
+
48
+ This step evaluates one or more conditions and selects a branch to execute based on the result.
49
+ It provides flexibility in defining complex conditional logic within a pipeline.
50
+
51
+ A minimal usage requires defining the branches to execute based on a `condition`, which is a callable
52
+ that takes input from the state and returns a string identifying the branch to execute.
53
+
54
+ The condition can be a `Component` or a `Callable`. The handling of inputs differs:
55
+ 1. If the condition is a `Component`, `input_map` is used to map the pipeline\'s
56
+ state and config to the component\'s inputs.
57
+ 2. If the condition is a `Callable`, it receives a merged dictionary of the
58
+ pipeline\'s state and config directly. In this case, `input_map` is not used
59
+ to build the payload and should not be passed.
60
+
61
+ Example:
62
+ ```python
63
+ ConditionalStep(
64
+ name="UseCaseSelection",
65
+ branches={"A": step_a, DEFAULT_BRANCH: step_b},
66
+ condition=lambda x: "A" if "<A>" in x["query"] else "__default__"
67
+ )
68
+ ```
69
+ This will execute `step_a` if the query contains "<A>", and `step_b` otherwise.
70
+
71
+ The special key `__default__` (importable as DEFAULT_BRANCH) defines the default branch to execute
72
+ if no other condition matches. If the DEFAULT_BRANCH is not defined and no condition matches,
73
+ the step will raise an error.
74
+
75
+ Attributes:
76
+ name (str): A unique identifier for this pipeline step.
77
+ branches (dict[str, BasePipelineStep | list[BasePipelineStep]]): Mapping of condition results to steps.
78
+ condition (list[ConditionType] | None): The condition(s) to evaluate for branch selection.
79
+ input_map (dict[str, str | Val] | None): Unified input map.
80
+ output_state (str | None): Key to store the condition result in the state, if desired.
81
+ condition_aggregator (Callable[[list[Any]], str]): Function to aggregate multiple condition results.
82
+ retry_policy (RetryPolicy | None): Configuration for retry behavior using LangGraph\'s RetryPolicy.
83
+ error_handler (BaseStepErrorHandler | None): Strategy to handle errors during execution.
84
+ '''
85
+ branches: Incomplete
86
+ condition: Incomplete
87
+ output_state: Incomplete
88
+ condition_aggregator: Incomplete
89
+ def __init__(self, name: str, branches: dict[str, BasePipelineStep | list[BasePipelineStep]], condition: ConditionType | list[ConditionType] | None = None, input_state_map: dict[str, str] | None = None, output_state: str | None = None, condition_aggregator: Callable[[list[Any]], str] = ..., runtime_config_map: dict[str, str] | None = None, fixed_args: dict[str, Any] | None = None, input_map: InputMapSpec | None = None, retry_config: RetryConfig | None = None, error_handler: BaseStepErrorHandler | None = None, cache_store: BaseCache | None = None, cache_config: dict[str, Any] | None = None) -> None:
90
+ '''Initializes a new ConditionalStep.
91
+
92
+ Args:
93
+ name (str): A unique identifier for this pipeline step.
94
+ branches (dict[str, BasePipelineStep | list[BasePipelineStep]]): Mapping of condition results to steps to
95
+ execute.
96
+ condition (ConditionType | list[ConditionType] | None, optional): The condition(s) to evaluate for branch
97
+ selection. If a `Callable`, it receives the merged state and config as keyword arguments. If None,
98
+ the condition is evaluated from the state. Defaults to None.
99
+ input_state_map (dict[str, str] | None, optional): A dictionary mapping the state keys to the component\'s
100
+ input keys. This is only used if the condition is a `Component`. Defaults to None.
101
+ output_state (str | None, optional): Key to store the condition result in the state. If None, the
102
+ output is not saved in the state. Defaults to None.
103
+ condition_aggregator (Callable[[list[Any]], str], optional): Function to aggregate multiple condition
104
+ results. Defaults to joining results with a semicolon (";").
105
+ runtime_config_map (dict[str, str] | None, optional): A dictionary mapping the runtime config keys to the
106
+ component\'s input keys. This is only used if the condition is a
107
+ `Component`. Defaults to None.
108
+ fixed_args (dict[str, Any] | None, optional): Fixed arguments to be passed to the condition.
109
+ Defaults to None.
110
+ input_map (InputMapSpec | None, optional):
111
+ Unified input map. Can be a dict (arg -> str|Val) or a list with elements:
112
+ 1. str for identity mapping
113
+ 2. dict[str, str] for state/config mapping
114
+ 3. dict[str, Val] for fixed args.
115
+ Defaults to None.
116
+ retry_config (RetryConfig | None, optional): Configuration for retry behavior using
117
+ GLLM Core\'s RetryConfig. Defaults to None, in which case no retry config is applied.
118
+ error_handler (BaseStepErrorHandler | None, optional): Strategy to handle errors during execution.
119
+ Defaults to None, in which case the RaiseStepErrorHandler is used.
120
+ cache_store ("BaseCache" | None, optional): Cache store to be used for caching.
121
+ Defaults to None, in which case no cache store is used.
122
+ cache_config (dict[str, Any] | None, optional): Cache configuration to be used for caching.
123
+ Defaults to None, in which case no cache configuration is used.
124
+ '''
125
+ async def execute(self, state: PipelineState, runtime: Runtime[dict[str, Any] | BaseModel]) -> Command:
126
+ """Executes the conditional step, determines the route, and returns a Command.
127
+
128
+ Args:
129
+ state (PipelineState): The current state of the pipeline.
130
+ runtime (Runtime[dict[str, Any] | BaseModel]): Runtime information for this step's execution.
131
+
132
+ Returns:
133
+ Command: A LangGraph Command object with 'goto' for routing and 'update' for state changes.
134
+ """
135
+ async def execute_direct(self, state: dict[str, Any], runtime: Runtime[dict[str, Any] | BaseModel]) -> dict[str, Any] | None:
136
+ """Execute this step directly, handling both branch selection and execution.
137
+
138
+ This method is used when the step needs to be executed directly (e.g. in parallel execution).
139
+ It will both select and execute the appropriate branch, unlike execute() which only handles selection.
140
+
141
+ Args:
142
+ state (dict[str, Any]): The current state of the pipeline.
143
+ runtime (Runtime[dict[str, Any] | BaseModel]): Runtime information for this step's execution.
144
+
145
+ Returns:
146
+ dict[str, Any] | None: Updates to apply to the pipeline state, or None if no updates.
147
+ """
148
+ async def select_path(self, state: dict[str, Any], runtime: Runtime[dict[str, Any] | BaseModel]) -> str:
149
+ """Determines the logical route key based on the evaluated condition(s).
150
+
151
+ This method prepares input data, evaluates conditions, aggregates results,
152
+ and determines the logical route key.
153
+
154
+ Args:
155
+ state (dict[str, Any]): The current state of the pipeline, containing all data.
156
+ runtime (Runtime[dict[str, Any] | BaseModel]): Runtime information for this step's execution.
157
+
158
+ Returns:
159
+ str: The identifier of the selected logical route. Returns DEFAULT_BRANCH if an error occurs
160
+ or if the condition result doesn't match any branch key.
161
+ """
@@ -0,0 +1,71 @@
1
+ from gllm_core.utils.retry import RetryConfig as RetryConfig
2
+ from gllm_datastore.cache.cache import BaseCache as BaseCache
3
+ from gllm_pipeline.alias import InputMapSpec as InputMapSpec
4
+ from gllm_pipeline.steps.conditional_step import ConditionType as ConditionType, ConditionalStep as ConditionalStep, DEFAULT_BRANCH as DEFAULT_BRANCH
5
+ from gllm_pipeline.steps.pipeline_step import BasePipelineStep as BasePipelineStep
6
+ from gllm_pipeline.steps.step_error_handler.step_error_handler import BaseStepErrorHandler as BaseStepErrorHandler
7
+ from gllm_pipeline.steps.terminator_step import TerminatorStep as TerminatorStep
8
+ from typing import Any
9
+
10
+ class GuardStep(ConditionalStep):
11
+ '''A conditional step that can terminate pipeline execution if a condition is not met.
12
+
13
+ This step evaluates a condition and either:
14
+ 1. Continues execution through the success_branch if the condition is True
15
+ 2. Executes the failure_branch and terminates if the condition is False
16
+
17
+ Example:
18
+ ```python
19
+ pipeline = (
20
+ step_a
21
+ | GuardStep(
22
+ name="auth_check",
23
+ condition=lambda x: x["is_authenticated"],
24
+ success_branch=step_b,
25
+ failure_branch=error_handling_step,
26
+ )
27
+ | step_c
28
+ )
29
+ ```
30
+
31
+ Attributes:
32
+ name (str): A unique identifier for this pipeline step.
33
+ condition (ConditionType): The condition to evaluate.
34
+ input_map (dict[str, str | Any] | None): Unified input map.
35
+ success_branch (BasePipelineStep | list[BasePipelineStep]): Steps to execute if condition is True.
36
+ failure_branch (BasePipelineStep | list[BasePipelineStep] | None): Steps to execute if condition is False.
37
+ If None, pipeline terminates immediately on False condition.
38
+ retry_policy (RetryPolicy | None): Configuration for retry behavior using LangGraph\'s RetryPolicy.
39
+ '''
40
+ def __init__(self, name: str, condition: ConditionType, success_branch: BasePipelineStep | list[BasePipelineStep], failure_branch: BasePipelineStep | list[BasePipelineStep] | None = None, input_state_map: dict[str, str] | None = None, output_state: str | None = None, runtime_config_map: dict[str, str] | None = None, fixed_args: dict[str, Any] | None = None, input_map: InputMapSpec | None = None, retry_config: RetryConfig | None = None, error_handler: BaseStepErrorHandler | None = None, cache_store: BaseCache | None = None, cache_config: dict[str, Any] | None = None) -> None:
41
+ '''Initializes a new GuardStep.
42
+
43
+ Args:
44
+ name (str): A unique identifier for this pipeline step.
45
+ condition (ConditionType): The condition to evaluate.
46
+ success_branch (BasePipelineStep | list[BasePipelineStep]): Steps to execute if condition is True.
47
+ failure_branch (BasePipelineStep | list[BasePipelineStep] | None, optional): Steps to execute if
48
+ condition is False. If None, pipeline terminates immediately. Defaults to None.
49
+ input_state_map (dict[str, str] | None, optional): Mapping of condition input arguments to
50
+ pipeline state keys. Defaults to None.
51
+ output_state (str | None, optional): Key to store the condition result in the pipeline state.
52
+ Defaults to None.
53
+ runtime_config_map (dict[str, str] | None, optional): Mapping of condition input arguments to runtime
54
+ configuration keys. Defaults to None.
55
+ fixed_args (dict[str, Any] | None, optional): Fixed arguments to be passed to the condition.
56
+ Defaults to None.
57
+ input_map (InputMapSpec | None, optional):
58
+ Unified input map. Can be a dict (arg -> str|Val) or a list with elements:
59
+ 1. str for identity mapping
60
+ 2. dict[str, str] for state/config mapping
61
+ 3. dict[str, Val] for fixed args.
62
+ Defaults to None.
63
+ error_handler (BaseStepErrorHandler | None, optional): Strategy to handle errors during execution.
64
+ Defaults to None, in which case the RaiseStepErrorHandler is used.
65
+ retry_config (RetryConfig | None, optional): Configuration for retry behavior using
66
+ GLLM Core\'s RetryConfig. Defaults to None, in which case no retry config is applied.
67
+ cache_store ("BaseCache" | None, optional): Cache store to be used for caching.
68
+ Defaults to None, in which case no cache store is used.
69
+ cache_config (dict[str, Any] | None, optional): Cache configuration to be used for caching.
70
+ Defaults to None, in which case no cache configuration is used.
71
+ '''
@@ -0,0 +1,53 @@
1
+ from _typeshed import Incomplete
2
+ from gllm_core.utils.retry import RetryConfig as RetryConfig
3
+ from gllm_datastore.cache.cache import BaseCache as BaseCache
4
+ from gllm_pipeline.alias import PipelineState as PipelineState
5
+ from gllm_pipeline.steps.pipeline_step import BasePipelineStep as BasePipelineStep
6
+ from gllm_pipeline.utils.input_map import shallow_dump as shallow_dump
7
+ from langgraph.runtime import Runtime as Runtime
8
+ from pydantic import BaseModel as BaseModel
9
+ from typing import Any
10
+
11
+ class LogStep(BasePipelineStep):
12
+ """A specialized pipeline step for logging messages.
13
+
14
+ This step uses the Messenger component to log messages during pipeline execution.
15
+ It supports both plain text messages and template messages with placeholders for state variables.
16
+
17
+ Attributes:
18
+ name (str): A unique identifier for this pipeline step.
19
+ messenger (Messenger): The messenger component used to format and send messages.
20
+ emit_kwargs (dict[str, Any]): Additional arguments to pass to the event emitter.
21
+ retry_policy (RetryPolicy | None): Configuration for retry behavior using LangGraph's RetryPolicy.
22
+ """
23
+ messenger: Incomplete
24
+ emit_kwargs: Incomplete
25
+ def __init__(self, name: str, message: str, is_template: bool = True, emit_kwargs: dict[str, Any] | None = None, retry_config: RetryConfig | None = None, cache_store: BaseCache | None = None, cache_config: dict[str, Any] | None = None) -> None:
26
+ '''Initializes a new LogStep.
27
+
28
+ Args:
29
+ name (str): A unique identifier for this pipeline step.
30
+ message (str): The message to be logged, may contain placeholders enclosed in curly braces.
31
+ is_template (bool, optional): Whether the message contains placeholders. Defaults to True.
32
+ emit_kwargs (dict[str, Any] | None, optional): Additional arguments to pass to the event emitter.
33
+ Defaults to None.
34
+ retry_config (RetryConfig | None, optional): Configuration for retry behavior using
35
+ GLLM Core\'s RetryConfig. Defaults to None, in which case no retry config is applied.
36
+ cache_store ("BaseCache" | None, optional): Cache store to be used for caching.
37
+ Defaults to None, in which case no cache store is used.
38
+ cache_config (dict[str, Any] | None, optional): Cache configuration to be used for caching.
39
+ Defaults to None, in which case no cache configuration is used.
40
+ '''
41
+ async def execute(self, state: PipelineState, runtime: Runtime[dict[str, Any] | BaseModel]) -> None:
42
+ """Executes the log step by formatting and emitting the message.
43
+
44
+ Args:
45
+ state (PipelineState): The current state of the pipeline, containing all data.
46
+ runtime (Runtime[dict[str, Any] | BaseModel]): Runtime information for this step's execution.
47
+
48
+ Returns:
49
+ None: This step does not modify the pipeline state.
50
+
51
+ Raises:
52
+ RuntimeError: If an error occurs during message emission.
53
+ """
@@ -0,0 +1,92 @@
1
+ from _typeshed import Incomplete
2
+ from gllm_core.schema import Component
3
+ from gllm_core.utils.retry import RetryConfig as RetryConfig
4
+ from gllm_datastore.cache.cache import BaseCache as BaseCache
5
+ from gllm_pipeline.alias import InputMapSpec as InputMapSpec
6
+ from gllm_pipeline.steps.pipeline_step import BasePipelineStep as BasePipelineStep
7
+ from gllm_pipeline.steps.step_error_handler.step_error_handler import BaseStepErrorHandler as BaseStepErrorHandler
8
+ from gllm_pipeline.utils.async_utils import execute_callable as execute_callable
9
+ from gllm_pipeline.utils.error_handling import ErrorContext as ErrorContext
10
+ from gllm_pipeline.utils.has_inputs_mixin import HasInputsMixin as HasInputsMixin
11
+ from gllm_pipeline.utils.input_map import shallow_dump as shallow_dump
12
+ from langgraph.runtime import Runtime as Runtime
13
+ from pydantic import BaseModel as BaseModel
14
+ from typing import Any, Callable
15
+
16
+ class MapReduceStep(BasePipelineStep, HasInputsMixin):
17
+ """A step that applies a mapping function to multiple inputs and reduces the results.
18
+
19
+ This step performs parallel processing of multiple input items using:
20
+ 1. A map function that processes each input item independently. The map function receives a dictionary
21
+ containing the input values for the current item (derived from input_state_map, runtime_config_map,
22
+ and fixed_args).
23
+ 2. A reduce function that combines all the mapped results.
24
+
25
+ Note on parallel execution:
26
+ 1. For true parallelism, the map_func MUST be an async function or a Component.
27
+ 2. Synchronous map functions will block the event loop and run sequentially.
28
+
29
+ Input handling:
30
+ 1. Automatically detects which inputs are lists/sequences.
31
+ 2. Ensures all list inputs have the same length.
32
+ 3. Broadcasts scalar values to match list lengths.
33
+ 4. If no list inputs, applies the map function once to the whole input.
34
+
35
+ Internally, this step uses asyncio.gather() for efficient parallel execution.
36
+
37
+ Attributes:
38
+ name (str): A unique identifier for this step.
39
+ map_func (Component | Callable[[dict[str, Any]], Any]): Function to apply to each input item.
40
+ Will be run in parallel if the function is an asynchronous function.
41
+ reduce_func (Callable[[list[Any]], Any]): Function to reduce the mapped results.
42
+ input_map (dict[str, str | Any] | None): Unified input map.
43
+ output_state (str): Key to store the reduced result in the pipeline state.
44
+ retry_policy (RetryPolicy | None): Configuration for retry behavior using LangGraph's RetryPolicy.
45
+ """
46
+ map_func: Incomplete
47
+ reduce_func: Incomplete
48
+ output_state: Incomplete
49
+ def __init__(self, name: str, output_state: str, map_func: Component | Callable[[dict[str, Any]], Any], reduce_func: Callable[[list[Any]], Any] = ..., input_state_map: dict[str, str] | None = None, runtime_config_map: dict[str, str] | None = None, fixed_args: dict[str, Any] | None = None, input_map: InputMapSpec | None = None, retry_config: RetryConfig | None = None, error_handler: BaseStepErrorHandler | None = None, cache_store: BaseCache | None = None, cache_config: dict[str, Any] | None = None) -> None:
50
+ '''Initialize a new MapReduceStep.
51
+
52
+ Args:
53
+ name (str): A unique identifier for this step.
54
+ output_state (str): Key to store the reduced result in the pipeline state.
55
+ map_func (Component | Callable[[dict[str, Any]], Any]): Function to apply to each input item.
56
+ The map function receives a dictionary containing the input values derived from input_state_map,
57
+ runtime_config_map, and fixed_args.
58
+ reduce_func (Callable[[list[Any]], Any], optional): Function to reduce the mapped results.
59
+ Defaults to a function that returns the list of results as is.
60
+ input_state_map (dict[str, str] | None, optional): Mapping of function arguments to pipeline state keys.
61
+ Defaults to None.
62
+ runtime_config_map (dict[str, str] | None, optional): Mapping of arguments to runtime config keys.
63
+ Defaults to None.
64
+ fixed_args (dict[str, Any] | None, optional): Fixed arguments to pass to the functions. Defaults to None.
65
+ input_map (InputMapSpec | None, optional):
66
+ Unified input map. Can be a dict (arg -> str|Val) or a list with elements:
67
+ 1. str for identity mapping
68
+ 2. dict[str, str] for state/config mapping
69
+ 3. dict[str, Val] for fixed args.
70
+ If provided, it will be used directly instead of synthesizing from maps. Defaults to None.
71
+ retry_config (RetryConfig | None, optional): Configuration for retry behavior using
72
+ GLLM Core\'s RetryConfig. Defaults to None, in which case no retry config is applied.
73
+ error_handler (BaseStepErrorHandler | None, optional): Strategy to handle errors during execution.
74
+ Defaults to None, in which case the RaiseStepErrorHandler is used.
75
+ cache_store ("BaseCache" | None, optional): Cache store to be used for caching.
76
+ Defaults to None, in which case no cache store is used.
77
+ cache_config (dict[str, Any] | None, optional): Cache configuration to be used for caching.
78
+ Defaults to None, in which case no cache configuration is used.
79
+ '''
80
+ async def execute(self, state: dict[str, Any], runtime: Runtime[dict[str, Any] | BaseModel]) -> dict[str, Any]:
81
+ """Execute the map and reduce operations.
82
+
83
+ Args:
84
+ state (dict[str, Any]): The current state of the pipeline.
85
+ runtime (Runtime[dict[str, Any] | BaseModel]): Runtime information for this step's execution.
86
+
87
+ Returns:
88
+ dict[str, Any]: The reduced result stored under output_state.
89
+
90
+ Raises:
91
+ RuntimeError: If an error occurs during execution.
92
+ """
@@ -0,0 +1,40 @@
1
+ from gllm_pipeline.steps.pipeline_step import BasePipelineStep as BasePipelineStep
2
+ from langgraph.runtime import Runtime as Runtime
3
+ from pydantic import BaseModel as BaseModel
4
+ from typing import Any
5
+
6
+ class NoOpStep(BasePipelineStep):
7
+ '''A step that does nothing.
8
+
9
+ This step is useful when you want to add a step that does not perform any processing.
10
+ For example, you can use this step to implement a toggle pattern for a certain component.
11
+
12
+ Example:
13
+ ```python
14
+ pipeline = (
15
+ step_a
16
+ | ConditionalStep(
17
+ name="branch",
18
+ branches={
19
+ "execute": step_b,
20
+ "continue": NoOpStep("no_op")
21
+ },
22
+ condition=lambda x: "execute" if x["should_execute"] else "continue"
23
+ )
24
+ | step_c
25
+ )
26
+ ```
27
+
28
+ Attributes:
29
+ name (str): A unique identifier for this pipeline step.
30
+ '''
31
+ async def execute(self, state: dict[str, Any], runtime: Runtime[dict[str, Any] | BaseModel]) -> None:
32
+ """Executes this step, which does nothing.
33
+
34
+ Args:
35
+ state (dict[str, Any]): The current state of the pipeline.
36
+ runtime (Runtime[dict[str, Any] | BaseModel]): Runtime information for this step's execution.
37
+
38
+ Returns:
39
+ None: This step does not modify the pipeline state.
40
+ """