lionagi 0.6.1__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. lionagi/libs/token_transform/__init__.py +0 -0
  2. lionagi/libs/token_transform/llmlingua.py +1 -0
  3. lionagi/libs/token_transform/perplexity.py +439 -0
  4. lionagi/libs/token_transform/synthlang.py +409 -0
  5. lionagi/operations/ReAct/ReAct.py +126 -0
  6. lionagi/operations/ReAct/utils.py +28 -0
  7. lionagi/operations/__init__.py +1 -9
  8. lionagi/operations/_act/act.py +73 -0
  9. lionagi/operations/chat/__init__.py +3 -0
  10. lionagi/operations/chat/chat.py +173 -0
  11. lionagi/operations/communicate/__init__.py +0 -0
  12. lionagi/operations/communicate/communicate.py +167 -0
  13. lionagi/operations/instruct/__init__.py +3 -0
  14. lionagi/operations/instruct/instruct.py +29 -0
  15. lionagi/operations/interpret/__init__.py +3 -0
  16. lionagi/operations/interpret/interpret.py +40 -0
  17. lionagi/operations/operate/__init__.py +3 -0
  18. lionagi/operations/operate/operate.py +189 -0
  19. lionagi/operations/parse/__init__.py +3 -0
  20. lionagi/operations/parse/parse.py +125 -0
  21. lionagi/operations/select/__init__.py +0 -4
  22. lionagi/operations/select/select.py +11 -30
  23. lionagi/operations/select/utils.py +13 -2
  24. lionagi/operations/translate/__init__.py +0 -0
  25. lionagi/operations/translate/translate.py +47 -0
  26. lionagi/operations/types.py +16 -0
  27. lionagi/operatives/action/manager.py +20 -21
  28. lionagi/operatives/strategies/__init__.py +3 -0
  29. lionagi/session/branch.py +1098 -929
  30. lionagi/version.py +1 -1
  31. {lionagi-0.6.1.dist-info → lionagi-0.7.0.dist-info}/METADATA +1 -1
  32. {lionagi-0.6.1.dist-info → lionagi-0.7.0.dist-info}/RECORD +45 -26
  33. lionagi/libs/compress/models.py +0 -66
  34. lionagi/libs/compress/utils.py +0 -69
  35. lionagi/operations/select/prompt.py +0 -5
  36. /lionagi/{libs/compress → operations/ReAct}/__init__.py +0 -0
  37. /lionagi/operations/{strategies → _act}/__init__.py +0 -0
  38. /lionagi/{operations → operatives}/strategies/base.py +0 -0
  39. /lionagi/{operations → operatives}/strategies/concurrent.py +0 -0
  40. /lionagi/{operations → operatives}/strategies/concurrent_chunk.py +0 -0
  41. /lionagi/{operations → operatives}/strategies/concurrent_sequential_chunk.py +0 -0
  42. /lionagi/{operations → operatives}/strategies/params.py +0 -0
  43. /lionagi/{operations → operatives}/strategies/sequential.py +0 -0
  44. /lionagi/{operations → operatives}/strategies/sequential_chunk.py +0 -0
  45. /lionagi/{operations → operatives}/strategies/sequential_concurrent_chunk.py +0 -0
  46. /lionagi/{operations → operatives}/strategies/utils.py +0 -0
  47. {lionagi-0.6.1.dist-info → lionagi-0.7.0.dist-info}/WHEEL +0 -0
  48. {lionagi-0.6.1.dist-info → lionagi-0.7.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,173 @@
1
+ # Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from typing import TYPE_CHECKING, Literal
6
+
7
+ from pydantic import BaseModel
8
+
9
+ from lionagi.protocols.types import (
10
+ ActionResponse,
11
+ AssistantResponse,
12
+ Instruction,
13
+ Log,
14
+ RoledMessage,
15
+ )
16
+ from lionagi.service.imodel import iModel
17
+ from lionagi.utils import copy
18
+
19
+ if TYPE_CHECKING:
20
+ from lionagi.session.branch import Branch
21
+
22
+
23
+ async def chat(
24
+ branch: "Branch",
25
+ instruction=None,
26
+ guidance=None,
27
+ context=None,
28
+ sender=None,
29
+ recipient=None,
30
+ request_fields=None,
31
+ response_format: type[BaseModel] = None,
32
+ progression=None,
33
+ imodel: iModel = None,
34
+ tool_schemas=None,
35
+ images: list = None,
36
+ image_detail: Literal["low", "high", "auto"] = None,
37
+ plain_content: str = None,
38
+ return_ins_res_message: bool = False,
39
+ **kwargs,
40
+ ) -> tuple[Instruction, AssistantResponse]:
41
+ ins: Instruction = branch.msgs.create_instruction(
42
+ instruction=instruction,
43
+ guidance=guidance,
44
+ context=context,
45
+ sender=sender or branch.user or "user",
46
+ recipient=recipient or branch.id,
47
+ response_format=response_format,
48
+ request_fields=request_fields,
49
+ images=images,
50
+ image_detail=image_detail,
51
+ tool_schemas=tool_schemas,
52
+ plain_content=plain_content,
53
+ )
54
+
55
+ progression = progression or branch.msgs.progression
56
+ messages: list[RoledMessage] = [
57
+ branch.msgs.messages[i] for i in progression
58
+ ]
59
+
60
+ use_ins = None
61
+ _to_use = []
62
+ _action_responses: set[ActionResponse] = set()
63
+
64
+ for i in messages:
65
+ if isinstance(i, ActionResponse):
66
+ _action_responses.add(i)
67
+ if isinstance(i, AssistantResponse):
68
+ j = AssistantResponse(
69
+ role=i.role,
70
+ content=copy(i.content),
71
+ sender=i.sender,
72
+ recipient=i.recipient,
73
+ template=i.template,
74
+ )
75
+ _to_use.append(j)
76
+ if isinstance(i, Instruction):
77
+ j = Instruction(
78
+ role=i.role,
79
+ content=copy(i.content),
80
+ sender=i.sender,
81
+ recipient=i.recipient,
82
+ template=i.template,
83
+ )
84
+ j.tool_schemas = None
85
+ j.respond_schema_info = None
86
+ j.request_response_format = None
87
+
88
+ if _action_responses:
89
+ d_ = [k.content for k in _action_responses]
90
+ for z in d_:
91
+ if z not in j.context:
92
+ j.context.append(z)
93
+
94
+ _to_use.append(j)
95
+ _action_responses = set()
96
+ else:
97
+ _to_use.append(j)
98
+
99
+ messages = _to_use
100
+ if _action_responses:
101
+ j = ins.model_copy()
102
+ d_ = [k.content for k in _action_responses]
103
+ for z in d_:
104
+ if z not in j.context:
105
+ j.context.append(z)
106
+ use_ins = j
107
+
108
+ if messages and len(messages) > 1:
109
+ _msgs = [messages[0]]
110
+
111
+ for i in messages[1:]:
112
+ if isinstance(i, AssistantResponse):
113
+ if isinstance(_msgs[-1], AssistantResponse):
114
+ _msgs[-1].response = (
115
+ f"{_msgs[-1].response}\n\n{i.response}"
116
+ )
117
+ else:
118
+ _msgs.append(i)
119
+ else:
120
+ if isinstance(_msgs[-1], AssistantResponse):
121
+ _msgs.append(i)
122
+ messages = _msgs
123
+
124
+ imodel = imodel or branch.chat_model
125
+ if branch.msgs.system and imodel.sequential_exchange:
126
+ messages = [msg for msg in messages if msg.role != "system"]
127
+ first_instruction = None
128
+
129
+ if len(messages) == 0:
130
+ first_instruction = ins.model_copy()
131
+ first_instruction.guidance = branch.msgs.system.rendered + (
132
+ first_instruction.guidance or ""
133
+ )
134
+ messages.append(first_instruction)
135
+ elif len(messages) >= 1:
136
+ first_instruction = messages[0]
137
+ if not isinstance(first_instruction, Instruction):
138
+ raise ValueError(
139
+ "First message in progression must be an Instruction or System"
140
+ )
141
+ first_instruction = first_instruction.model_copy()
142
+ first_instruction.guidance = branch.msgs.system.rendered + (
143
+ first_instruction.guidance or ""
144
+ )
145
+ messages[0] = first_instruction
146
+ messages.append(use_ins or ins)
147
+
148
+ else:
149
+ messages.append(use_ins or ins)
150
+
151
+ kwargs["messages"] = [i.chat_msg for i in messages]
152
+ imodel = imodel or branch.chat_model
153
+
154
+ api_call = None
155
+ if kwargs.get("stream", None) is True:
156
+ api_call = await imodel.stream(**kwargs)
157
+ else:
158
+ api_call = await imodel.invoke(**kwargs)
159
+
160
+ branch._log_manager.log(Log.create(api_call))
161
+
162
+ if return_ins_res_message:
163
+ # Wrap result in `AssistantResponse` and return
164
+ return ins, AssistantResponse.create(
165
+ assistant_response=api_call.response,
166
+ sender=branch.id,
167
+ recipient=branch.user,
168
+ )
169
+ return AssistantResponse.create(
170
+ assistant_response=api_call.response,
171
+ sender=branch.id,
172
+ recipient=branch.user,
173
+ ).response
File without changes
@@ -0,0 +1,167 @@
1
+ # Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ import logging
6
+ from typing import TYPE_CHECKING
7
+
8
+ from lionagi.libs.validate.fuzzy_validate_mapping import fuzzy_validate_mapping
9
+ from lionagi.utils import UNDEFINED
10
+
11
+ if TYPE_CHECKING:
12
+ from lionagi.session.branch import Branch
13
+
14
+
15
+ async def communicate(
16
+ branch: "Branch",
17
+ instruction=None,
18
+ *,
19
+ guidance=None,
20
+ context=None,
21
+ plain_content=None,
22
+ sender=None,
23
+ recipient=None,
24
+ progression=None,
25
+ request_model=None,
26
+ response_format=None,
27
+ request_fields=None,
28
+ imodel=None,
29
+ chat_model=None,
30
+ parse_model=None,
31
+ skip_validation=False,
32
+ images=None,
33
+ image_detail="auto",
34
+ num_parse_retries=3,
35
+ fuzzy_match_kwargs=None,
36
+ clear_messages=False,
37
+ operative_model=None,
38
+ **kwargs,
39
+ ):
40
+ """
41
+ A simpler orchestration than `operate()`, typically without tool invocation.
42
+
43
+ **Flow**:
44
+ 1. Sends an instruction (or conversation) to the chat model.
45
+ 2. Optionally parses the response into a structured model or fields.
46
+ 3. Returns either the raw string, the parsed model, or a dict of fields.
47
+
48
+ Args:
49
+ instruction (Instruction | dict, optional):
50
+ The user's main query or data.
51
+ guidance (JsonValue, optional):
52
+ Additional instructions or context for the LLM.
53
+ context (JsonValue, optional):
54
+ Extra data or context.
55
+ plain_content (str, optional):
56
+ Plain text content appended to the instruction.
57
+ sender (SenderRecipient, optional):
58
+ Sender ID (defaults to `Branch.user`).
59
+ recipient (SenderRecipient, optional):
60
+ Recipient ID (defaults to `self.id`).
61
+ progression (ID.IDSeq, optional):
62
+ Custom ordering of messages.
63
+ request_model (type[BaseModel] | BaseModel | None, optional):
64
+ Model for validating or structuring the LLM's response.
65
+ response_format (type[BaseModel], optional):
66
+ Alias for `request_model`. If both are provided, raises ValueError.
67
+ request_fields (dict|list[str], optional):
68
+ If you only need certain fields from the LLM's response.
69
+ imodel (iModel, optional):
70
+ Deprecated alias for `chat_model`.
71
+ chat_model (iModel, optional):
72
+ An alternative to the default chat model.
73
+ parse_model (iModel, optional):
74
+ If parsing is needed, you can override the default parse model.
75
+ skip_validation (bool, optional):
76
+ If True, returns the raw response string unvalidated.
77
+ images (list, optional):
78
+ Any relevant images.
79
+ image_detail (Literal["low","high","auto"], optional):
80
+ Image detail level (if used).
81
+ num_parse_retries (int, optional):
82
+ Maximum parsing retries (capped at 5).
83
+ fuzzy_match_kwargs (dict, optional):
84
+ Additional settings for fuzzy field matching (if used).
85
+ clear_messages (bool, optional):
86
+ Whether to clear stored messages before sending.
87
+ operative_model (type[BaseModel], optional):
88
+ Deprecated, alias for `response_format`.
89
+ **kwargs:
90
+ Additional arguments for the underlying LLM call.
91
+
92
+ Returns:
93
+ Any:
94
+ - Raw string (if `skip_validation=True`),
95
+ - A validated Pydantic model,
96
+ - A dict of the requested fields,
97
+ - or `None` if parsing fails and `handle_validation='return_none'`.
98
+ """
99
+ if operative_model:
100
+ logging.warning(
101
+ "operative_model is deprecated. Use response_format instead."
102
+ )
103
+ if (
104
+ (operative_model and response_format)
105
+ or (operative_model and request_model)
106
+ or (response_format and request_model)
107
+ ):
108
+ raise ValueError(
109
+ "Cannot specify both operative_model and response_format"
110
+ "or operative_model and request_model as they are aliases"
111
+ "for the same parameter."
112
+ )
113
+
114
+ response_format = response_format or operative_model or request_model
115
+
116
+ imodel = imodel or chat_model or branch.chat_model
117
+ parse_model = parse_model or branch.parse_model
118
+
119
+ if clear_messages:
120
+ branch.msgs.clear_messages()
121
+
122
+ if num_parse_retries > 5:
123
+ logging.warning(
124
+ f"Are you sure you want to retry {num_parse_retries} "
125
+ "times? lowering retry attempts to 5. Suggestion is under 3"
126
+ )
127
+ num_parse_retries = 5
128
+
129
+ ins, res = await branch.chat(
130
+ instruction=instruction,
131
+ guidance=guidance,
132
+ context=context,
133
+ sender=sender,
134
+ recipient=recipient,
135
+ response_format=response_format,
136
+ progression=progression,
137
+ imodel=imodel,
138
+ images=images,
139
+ image_detail=image_detail,
140
+ plain_content=plain_content,
141
+ return_ins_res_message=True,
142
+ **kwargs,
143
+ )
144
+ branch.msgs.add_message(instruction=ins)
145
+ branch.msgs.add_message(assistant_response=res)
146
+
147
+ if skip_validation:
148
+ return res.response
149
+
150
+ if response_format is not None:
151
+ return await branch.parse(
152
+ text=res.response,
153
+ request_type=response_format,
154
+ max_retries=num_parse_retries,
155
+ **(fuzzy_match_kwargs or {}),
156
+ )
157
+
158
+ if request_fields is not None:
159
+ _d = fuzzy_validate_mapping(
160
+ res.response,
161
+ request_fields,
162
+ handle_unmatched="force",
163
+ fill_value=UNDEFINED,
164
+ )
165
+ return {k: v for k, v in _d.items() if v != UNDEFINED}
166
+
167
+ return res.response
@@ -0,0 +1,3 @@
1
+ # Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
@@ -0,0 +1,29 @@
1
+ # Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from typing import TYPE_CHECKING, Any
6
+
7
+ from lionagi.operatives.types import Instruct
8
+
9
+ if TYPE_CHECKING:
10
+ from lionagi.session.branch import Branch
11
+
12
+
13
+ async def instruct(
14
+ branch: "Branch",
15
+ instruct: Instruct,
16
+ /,
17
+ **kwargs,
18
+ ) -> Any:
19
+ config = {
20
+ **(instruct.to_dict() if isinstance(instruct, Instruct) else instruct),
21
+ **kwargs,
22
+ }
23
+ if any(i in config and config[i] for i in Instruct.reserved_kwargs):
24
+ if "response_format" in config or "request_model" in config:
25
+ return await branch.operate(**config)
26
+ for i in Instruct.reserved_kwargs:
27
+ config.pop(i, None)
28
+
29
+ return await branch.communicate(**config)
@@ -0,0 +1,3 @@
1
+ # Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
@@ -0,0 +1,40 @@
1
+ # Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from typing import TYPE_CHECKING
6
+
7
+ if TYPE_CHECKING:
8
+ from lionagi.session.branch import Branch
9
+
10
+
11
+ async def interpret(
12
+ branch: "Branch",
13
+ text: str,
14
+ domain: str | None = None,
15
+ style: str | None = None,
16
+ **kwargs,
17
+ ) -> str:
18
+ instruction = (
19
+ "Rewrite the following user input into a clear, structured prompt or "
20
+ "query for an LLM, ensuring any implicit details are made explicit. "
21
+ "Return only the improved user prompt."
22
+ )
23
+ guidance = (
24
+ f"Domain hint: {domain or 'general'}. "
25
+ f"Desired style: {style or 'concise'}. "
26
+ "You can add or clarify context if needed."
27
+ )
28
+ context = [f"User input: {text}"]
29
+
30
+ # Default temperature if none provided
31
+ kwargs["temperature"] = kwargs.get("temperature", 0.1)
32
+
33
+ refined_prompt = await branch.communicate(
34
+ instruction=instruction,
35
+ guidance=guidance,
36
+ context=context,
37
+ skip_validation=True,
38
+ **kwargs,
39
+ )
40
+ return str(refined_prompt)
@@ -0,0 +1,3 @@
1
+ # Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
@@ -0,0 +1,189 @@
1
+ # Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ import logging
6
+ from typing import TYPE_CHECKING, Literal
7
+
8
+ from pydantic import BaseModel, JsonValue
9
+
10
+ from lionagi.operatives.models.field_model import FieldModel
11
+ from lionagi.operatives.models.model_params import ModelParams
12
+ from lionagi.operatives.types import Instruct, Operative, Step, ToolRef
13
+ from lionagi.protocols.types import Instruction, Progression, SenderRecipient
14
+ from lionagi.service.imodel import iModel
15
+
16
+ if TYPE_CHECKING:
17
+ from lionagi.session.branch import Branch
18
+
19
+
20
+ async def operate(
21
+ branch: "Branch",
22
+ *,
23
+ instruct: Instruct = None,
24
+ instruction: Instruction | JsonValue = None,
25
+ guidance: JsonValue = None,
26
+ context: JsonValue = None,
27
+ sender: SenderRecipient = None,
28
+ recipient: SenderRecipient = None,
29
+ progression: Progression = None,
30
+ imodel: iModel = None, # deprecated, alias of chat_model
31
+ chat_model: iModel = None,
32
+ invoke_actions: bool = True,
33
+ tool_schemas: list[dict] = None,
34
+ images: list = None,
35
+ image_detail: Literal["low", "high", "auto"] = None,
36
+ parse_model: iModel = None,
37
+ skip_validation: bool = False,
38
+ tools: ToolRef = None,
39
+ operative: Operative = None,
40
+ response_format: type[BaseModel] = None, # alias of operative.request_type
41
+ return_operative: bool = False,
42
+ actions: bool = False,
43
+ reason: bool = False,
44
+ action_kwargs: dict = None,
45
+ field_models: list[FieldModel] = None,
46
+ exclude_fields: list | dict | None = None,
47
+ request_params: ModelParams = None,
48
+ request_param_kwargs: dict = None,
49
+ response_params: ModelParams = None,
50
+ response_param_kwargs: dict = None,
51
+ handle_validation: Literal[
52
+ "raise", "return_value", "return_none"
53
+ ] = "return_value",
54
+ operative_model: type[BaseModel] = None,
55
+ request_model: type[BaseModel] = None,
56
+ **kwargs,
57
+ ) -> list | BaseModel | None | dict | str:
58
+ if operative_model:
59
+ logging.warning(
60
+ "`operative_model` is deprecated. Use `response_format` instead."
61
+ )
62
+ if (
63
+ (operative_model and response_format)
64
+ or (operative_model and request_model)
65
+ or (response_format and request_model)
66
+ ):
67
+ raise ValueError(
68
+ "Cannot specify both `operative_model` and `response_format` (or `request_model`) "
69
+ "as they are aliases of each other."
70
+ )
71
+
72
+ # Use the final chosen format
73
+ response_format = response_format or operative_model or request_model
74
+
75
+ # Decide which chat model to use
76
+ chat_model = chat_model or imodel or branch.chat_model
77
+ parse_model = parse_model or chat_model
78
+
79
+ # Convert dict-based instructions to Instruct if needed
80
+ if isinstance(instruct, dict):
81
+ instruct = Instruct(**instruct)
82
+
83
+ # Or create a new Instruct if not provided
84
+ instruct = instruct or Instruct(
85
+ instruction=instruction,
86
+ guidance=guidance,
87
+ context=context,
88
+ )
89
+
90
+ # If reason or actions are requested, apply them to instruct
91
+ if reason:
92
+ instruct.reason = True
93
+ if actions:
94
+ instruct.actions = True
95
+
96
+ # 1) Create or update the Operative
97
+ operative = Step.request_operative(
98
+ request_params=request_params,
99
+ reason=instruct.reason,
100
+ actions=instruct.actions,
101
+ exclude_fields=exclude_fields,
102
+ base_type=response_format,
103
+ field_models=field_models,
104
+ **(request_param_kwargs or {}),
105
+ )
106
+
107
+ # If the instruction signals actions, ensure tools are provided
108
+ if instruct.actions:
109
+ tools = tools or True
110
+
111
+ # If we want to auto-invoke tools, fetch or generate the schemas
112
+ if invoke_actions and tools:
113
+ tool_schemas = branch.acts.get_tool_schema(tools=tools)
114
+
115
+ # 2) Send the instruction to the chat model
116
+ ins, res = await branch.chat(
117
+ instruction=instruct.instruction,
118
+ guidance=instruct.guidance,
119
+ context=instruct.context,
120
+ sender=sender,
121
+ recipient=recipient,
122
+ response_format=operative.request_type,
123
+ progression=progression,
124
+ imodel=chat_model, # or the override
125
+ images=images,
126
+ image_detail=image_detail,
127
+ tool_schemas=tool_schemas,
128
+ return_ins_res_message=True,
129
+ **kwargs,
130
+ )
131
+ branch.msgs.add_message(instruction=ins)
132
+ branch.msgs.add_message(assistant_response=res)
133
+
134
+ # 3) Populate the operative with the raw response
135
+ operative.response_str_dict = res.response
136
+
137
+ # 4) Possibly skip validation
138
+ if skip_validation:
139
+ return operative if return_operative else operative.response_str_dict
140
+
141
+ # 5) Parse or validate the response into the operative’s model
142
+ response_model = operative.update_response_model(res.response)
143
+ if not isinstance(response_model, BaseModel):
144
+ # If the response isn't directly a model, attempt a parse
145
+ response_model = await branch.parse(
146
+ text=res.response,
147
+ request_type=operative.request_type,
148
+ max_retries=operative.max_retries,
149
+ handle_validation="return_value",
150
+ )
151
+ operative.response_model = operative.update_response_model(
152
+ text=response_model
153
+ )
154
+
155
+ # If we still fail to parse, handle according to user preference
156
+ if not isinstance(response_model, BaseModel):
157
+ match handle_validation:
158
+ case "return_value":
159
+ return response_model
160
+ case "return_none":
161
+ return None
162
+ case "raise":
163
+ raise ValueError(
164
+ "Failed to parse the LLM response into the requested format."
165
+ )
166
+
167
+ # 6) If no tool invocation is needed, return result or operative
168
+ if not invoke_actions:
169
+ return operative if return_operative else operative.response_model
170
+
171
+ # 7) If the model indicates an action is required, call the tools
172
+ if (
173
+ getattr(response_model, "action_required", None) is True
174
+ and getattr(response_model, "action_requests", None) is not None
175
+ ):
176
+ action_response_models = await branch.act(
177
+ response_model.action_requests,
178
+ **(action_kwargs or {}),
179
+ )
180
+ # Possibly refine the operative with the tool outputs
181
+ operative = Step.respond_operative(
182
+ response_params=response_params,
183
+ operative=operative,
184
+ additional_data={"action_responses": action_response_models},
185
+ **(response_param_kwargs or {}),
186
+ )
187
+
188
+ # Return final result or the full operative
189
+ return operative if return_operative else operative.response_model
@@ -0,0 +1,3 @@
1
+ # Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0