lionagi 0.17.11__py3-none-any.whl → 0.18.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lionagi/libs/schema/minimal_yaml.py +98 -0
- lionagi/ln/types.py +32 -5
- lionagi/models/field_model.py +9 -0
- lionagi/operations/ReAct/ReAct.py +474 -237
- lionagi/operations/ReAct/utils.py +3 -0
- lionagi/operations/act/act.py +206 -0
- lionagi/operations/chat/chat.py +130 -114
- lionagi/operations/communicate/communicate.py +101 -42
- lionagi/operations/flow.py +4 -4
- lionagi/operations/interpret/interpret.py +65 -20
- lionagi/operations/operate/operate.py +212 -106
- lionagi/operations/parse/parse.py +170 -142
- lionagi/operations/select/select.py +78 -17
- lionagi/operations/select/utils.py +1 -1
- lionagi/operations/types.py +119 -23
- lionagi/protocols/generic/log.py +3 -2
- lionagi/protocols/messages/__init__.py +27 -0
- lionagi/protocols/messages/action_request.py +86 -184
- lionagi/protocols/messages/action_response.py +73 -131
- lionagi/protocols/messages/assistant_response.py +130 -159
- lionagi/protocols/messages/base.py +26 -18
- lionagi/protocols/messages/instruction.py +281 -625
- lionagi/protocols/messages/manager.py +112 -62
- lionagi/protocols/messages/message.py +87 -197
- lionagi/protocols/messages/system.py +52 -123
- lionagi/protocols/types.py +0 -2
- lionagi/service/connections/endpoint.py +0 -8
- lionagi/service/connections/providers/oai_.py +29 -94
- lionagi/service/connections/providers/ollama_.py +3 -2
- lionagi/service/hooks/hooked_event.py +2 -2
- lionagi/service/third_party/claude_code.py +3 -2
- lionagi/service/third_party/openai_models.py +433 -0
- lionagi/session/branch.py +170 -178
- lionagi/session/session.py +3 -9
- lionagi/tools/file/reader.py +2 -2
- lionagi/version.py +1 -1
- {lionagi-0.17.11.dist-info → lionagi-0.18.0.dist-info}/METADATA +1 -2
- {lionagi-0.17.11.dist-info → lionagi-0.18.0.dist-info}/RECORD +41 -49
- lionagi/operations/_act/act.py +0 -86
- lionagi/protocols/messages/templates/README.md +0 -28
- lionagi/protocols/messages/templates/action_request.jinja2 +0 -5
- lionagi/protocols/messages/templates/action_response.jinja2 +0 -9
- lionagi/protocols/messages/templates/assistant_response.jinja2 +0 -6
- lionagi/protocols/messages/templates/instruction_message.jinja2 +0 -61
- lionagi/protocols/messages/templates/system_message.jinja2 +0 -11
- lionagi/protocols/messages/templates/tool_schemas.jinja2 +0 -7
- lionagi/service/connections/providers/types.py +0 -28
- lionagi/service/third_party/openai_model_names.py +0 -198
- lionagi/service/types.py +0 -58
- /lionagi/operations/{_act → act}/__init__.py +0 -0
- {lionagi-0.17.11.dist-info → lionagi-0.18.0.dist-info}/WHEEL +0 -0
- {lionagi-0.17.11.dist-info → lionagi-0.18.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,46 +1,26 @@
|
|
1
1
|
# Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
|
2
2
|
# SPDX-License-Identifier: Apache-2.0
|
3
3
|
|
4
|
-
import
|
4
|
+
import warnings
|
5
5
|
from typing import TYPE_CHECKING, Literal
|
6
6
|
|
7
7
|
from pydantic import BaseModel, JsonValue
|
8
8
|
|
9
9
|
from lionagi.fields.instruct import Instruct
|
10
|
+
from lionagi.ln.fuzzy import FuzzyMatchKeysParams
|
10
11
|
from lionagi.models import FieldModel, ModelParams
|
11
|
-
from lionagi.protocols.operatives.step import Operative, Step
|
12
12
|
from lionagi.protocols.types import Instruction, Progression, SenderRecipient
|
13
|
-
from lionagi.service.imodel import iModel
|
14
13
|
from lionagi.session.branch import AlcallParams
|
15
14
|
|
15
|
+
from ..types import ActionParam, ChatParam, HandleValidation, ParseParam
|
16
|
+
|
16
17
|
if TYPE_CHECKING:
|
18
|
+
from lionagi.protocols.operatives.step import Operative
|
19
|
+
from lionagi.service.imodel import iModel
|
17
20
|
from lionagi.session.branch import Branch, ToolRef
|
18
21
|
|
19
22
|
|
20
|
-
def
|
21
|
-
operative_model: type[BaseModel] = None,
|
22
|
-
request_model: type[BaseModel] = None,
|
23
|
-
response_format: type[BaseModel] = None,
|
24
|
-
):
|
25
|
-
if operative_model:
|
26
|
-
logging.warning(
|
27
|
-
"`operative_model` is deprecated. Use `response_format` instead."
|
28
|
-
)
|
29
|
-
if (
|
30
|
-
(operative_model and response_format)
|
31
|
-
or (operative_model and request_model)
|
32
|
-
or (response_format and request_model)
|
33
|
-
):
|
34
|
-
raise ValueError(
|
35
|
-
"Cannot specify both `operative_model` and `response_format` (or `request_model`) "
|
36
|
-
"as they are aliases of each other."
|
37
|
-
)
|
38
|
-
|
39
|
-
# Use the final chosen format
|
40
|
-
return response_format or operative_model or request_model
|
41
|
-
|
42
|
-
|
43
|
-
async def operate(
|
23
|
+
def prepare_operate_kw(
|
44
24
|
branch: "Branch",
|
45
25
|
*,
|
46
26
|
instruct: Instruct = None,
|
@@ -50,18 +30,17 @@ async def operate(
|
|
50
30
|
sender: SenderRecipient = None,
|
51
31
|
recipient: SenderRecipient = None,
|
52
32
|
progression: Progression = None,
|
53
|
-
imodel: iModel = None, # deprecated, alias of chat_model
|
54
|
-
chat_model: iModel = None,
|
33
|
+
imodel: "iModel" = None, # deprecated, alias of chat_model
|
34
|
+
chat_model: "iModel" = None,
|
55
35
|
invoke_actions: bool = True,
|
56
36
|
tool_schemas: list[dict] = None,
|
57
37
|
images: list = None,
|
58
38
|
image_detail: Literal["low", "high", "auto"] = None,
|
59
|
-
parse_model: iModel = None,
|
39
|
+
parse_model: "iModel" = None,
|
60
40
|
skip_validation: bool = False,
|
61
41
|
tools: "ToolRef" = None,
|
62
42
|
operative: "Operative" = None,
|
63
43
|
response_format: type[BaseModel] = None, # alias of operative.request_type
|
64
|
-
return_operative: bool = False,
|
65
44
|
actions: bool = False,
|
66
45
|
reason: bool = False,
|
67
46
|
call_params: AlcallParams = None,
|
@@ -71,21 +50,38 @@ async def operate(
|
|
71
50
|
exclude_fields: list | dict | None = None,
|
72
51
|
request_params: ModelParams = None,
|
73
52
|
request_param_kwargs: dict = None,
|
74
|
-
|
75
|
-
response_param_kwargs: dict = None,
|
76
|
-
handle_validation: Literal[
|
77
|
-
"raise", "return_value", "return_none"
|
78
|
-
] = "return_value",
|
53
|
+
handle_validation: HandleValidation = "return_value",
|
79
54
|
operative_model: type[BaseModel] = None,
|
80
55
|
request_model: type[BaseModel] = None,
|
81
56
|
include_token_usage_to_model: bool = False,
|
57
|
+
clear_messages: bool = False,
|
82
58
|
**kwargs,
|
83
59
|
) -> list | BaseModel | None | dict | str:
|
84
|
-
|
85
|
-
|
86
|
-
|
60
|
+
# Handle deprecated parameters
|
61
|
+
if operative_model:
|
62
|
+
warnings.warn(
|
63
|
+
"Parameter 'operative_model' is deprecated. Use 'response_format' instead.",
|
64
|
+
DeprecationWarning,
|
65
|
+
stacklevel=2,
|
66
|
+
)
|
67
|
+
if imodel:
|
68
|
+
warnings.warn(
|
69
|
+
"Parameter 'imodel' is deprecated. Use 'chat_model' instead.",
|
70
|
+
DeprecationWarning,
|
71
|
+
stacklevel=2,
|
72
|
+
)
|
87
73
|
|
88
|
-
|
74
|
+
if (
|
75
|
+
(operative_model and response_format)
|
76
|
+
or (operative_model and request_model)
|
77
|
+
or (response_format and request_model)
|
78
|
+
):
|
79
|
+
raise ValueError(
|
80
|
+
"Cannot specify both `operative_model` and `response_format` (or `request_model`) "
|
81
|
+
"as they are aliases of each other."
|
82
|
+
)
|
83
|
+
|
84
|
+
response_format = response_format or operative_model or request_model
|
89
85
|
chat_model = chat_model or imodel or branch.chat_model
|
90
86
|
parse_model = parse_model or chat_model
|
91
87
|
|
@@ -108,103 +104,213 @@ async def operate(
|
|
108
104
|
if action_strategy:
|
109
105
|
instruct.action_strategy = action_strategy
|
110
106
|
|
111
|
-
#
|
107
|
+
# Build the Operative - always create it for backwards compatibility
|
108
|
+
from lionagi.protocols.operatives.step import Step
|
109
|
+
|
112
110
|
operative = Step.request_operative(
|
113
111
|
request_params=request_params,
|
114
112
|
reason=instruct.reason,
|
115
|
-
actions=instruct.actions,
|
113
|
+
actions=instruct.actions or actions,
|
116
114
|
exclude_fields=exclude_fields,
|
117
115
|
base_type=response_format,
|
118
116
|
field_models=field_models,
|
119
117
|
**(request_param_kwargs or {}),
|
120
118
|
)
|
119
|
+
# Use the operative's request_type which is a proper Pydantic model
|
120
|
+
# created from field_models if provided
|
121
|
+
final_response_format = operative.request_type
|
121
122
|
|
122
|
-
#
|
123
|
-
|
124
|
-
tools = tools or True
|
125
|
-
|
126
|
-
# If we want to auto-invoke tools, fetch or generate the schemas
|
127
|
-
if invoke_actions and tools:
|
128
|
-
tool_schemas = tool_schemas or branch.acts.get_tool_schema(tools=tools)
|
129
|
-
|
130
|
-
# 2) Send the instruction to the chat model
|
131
|
-
ins, res = await branch.chat(
|
132
|
-
instruction=instruct.instruction,
|
123
|
+
# Build contexts
|
124
|
+
chat_param = ChatParam(
|
133
125
|
guidance=instruct.guidance,
|
134
126
|
context=instruct.context,
|
135
|
-
sender=sender,
|
136
|
-
recipient=recipient,
|
137
|
-
response_format=
|
127
|
+
sender=sender or branch.user or "user",
|
128
|
+
recipient=recipient or branch.id,
|
129
|
+
response_format=final_response_format,
|
138
130
|
progression=progression,
|
139
|
-
|
131
|
+
tool_schemas=tool_schemas,
|
140
132
|
images=images,
|
141
133
|
image_detail=image_detail,
|
142
|
-
|
143
|
-
return_ins_res_message=True,
|
134
|
+
plain_content=None,
|
144
135
|
include_token_usage_to_model=include_token_usage_to_model,
|
145
|
-
|
136
|
+
imodel=chat_model,
|
137
|
+
imodel_kw=kwargs,
|
146
138
|
)
|
147
|
-
branch.msgs.add_message(instruction=ins)
|
148
|
-
branch.msgs.add_message(assistant_response=res)
|
149
139
|
|
150
|
-
|
151
|
-
|
140
|
+
parse_param = None
|
141
|
+
if final_response_format and not skip_validation:
|
142
|
+
from ..parse.parse import get_default_call
|
152
143
|
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
144
|
+
parse_param = ParseParam(
|
145
|
+
response_format=final_response_format,
|
146
|
+
fuzzy_match_params=FuzzyMatchKeysParams(),
|
147
|
+
handle_validation="return_value",
|
148
|
+
alcall_params=get_default_call(),
|
149
|
+
imodel=parse_model,
|
150
|
+
imodel_kw={},
|
151
|
+
)
|
152
|
+
|
153
|
+
action_param = None
|
154
|
+
if invoke_actions and (instruct.actions or actions):
|
155
|
+
from ..act.act import _get_default_call_params
|
156
|
+
|
157
|
+
action_param = ActionParam(
|
158
|
+
action_call_params=call_params or _get_default_call_params(),
|
159
|
+
tools=tools,
|
160
|
+
strategy=action_strategy
|
161
|
+
or instruct.action_strategy
|
162
|
+
or "concurrent",
|
163
|
+
suppress_errors=True,
|
164
|
+
verbose_action=verbose_action,
|
165
|
+
)
|
166
|
+
|
167
|
+
return {
|
168
|
+
"instruction": instruct.instruction,
|
169
|
+
"chat_param": chat_param,
|
170
|
+
"parse_param": parse_param,
|
171
|
+
"action_param": action_param,
|
172
|
+
"handle_validation": handle_validation,
|
173
|
+
"invoke_actions": invoke_actions,
|
174
|
+
"skip_validation": skip_validation,
|
175
|
+
"clear_messages": clear_messages,
|
176
|
+
}
|
177
|
+
|
178
|
+
|
179
|
+
async def operate(
|
180
|
+
branch: "Branch",
|
181
|
+
instruction: JsonValue | Instruction,
|
182
|
+
chat_param: ChatParam,
|
183
|
+
action_param: ActionParam | None = None,
|
184
|
+
parse_param: ParseParam | None = None,
|
185
|
+
handle_validation: HandleValidation = "return_value",
|
186
|
+
invoke_actions: bool = True,
|
187
|
+
skip_validation: bool = False,
|
188
|
+
clear_messages: bool = False,
|
189
|
+
reason: bool = False,
|
190
|
+
field_models: list[FieldModel] | None = None,
|
191
|
+
) -> BaseModel | dict | str | None:
|
192
|
+
|
193
|
+
# 1. communicate chat context building to avoid changing parameters
|
194
|
+
# Start with base chat param
|
195
|
+
_cctx = chat_param
|
196
|
+
_pctx = (
|
197
|
+
parse_param.with_updates(handle_validation="return_value")
|
198
|
+
if parse_param
|
199
|
+
else ParseParam(
|
200
|
+
response_format=chat_param.response_format,
|
201
|
+
imodel=branch.parse_model,
|
165
202
|
handle_validation="return_value",
|
166
203
|
)
|
167
|
-
|
168
|
-
|
204
|
+
)
|
205
|
+
|
206
|
+
# Update tool schemas if needed
|
207
|
+
if tools := (action_param.tools or True) if action_param else None:
|
208
|
+
tool_schemas = branch.acts.get_tool_schema(tools=tools)
|
209
|
+
_cctx = _cctx.with_updates(tool_schemas=tool_schemas)
|
210
|
+
|
211
|
+
# Extract model class from response_format (can be class, instance, or dict)
|
212
|
+
model_class = None
|
213
|
+
if chat_param.response_format is not None:
|
214
|
+
if isinstance(chat_param.response_format, type) and issubclass(
|
215
|
+
chat_param.response_format, BaseModel
|
216
|
+
):
|
217
|
+
model_class = chat_param.response_format
|
218
|
+
elif isinstance(chat_param.response_format, BaseModel):
|
219
|
+
model_class = type(chat_param.response_format)
|
220
|
+
|
221
|
+
def normalize_field_model(fms):
|
222
|
+
if not fms:
|
223
|
+
return []
|
224
|
+
if not isinstance(fms, list):
|
225
|
+
return [fms]
|
226
|
+
return fms
|
227
|
+
|
228
|
+
fms = normalize_field_model(field_models)
|
229
|
+
operative = None
|
230
|
+
|
231
|
+
if model_class:
|
232
|
+
from lionagi.protocols.operatives.step import Step
|
233
|
+
|
234
|
+
operative = Step.request_operative(
|
235
|
+
reason=reason,
|
236
|
+
actions=bool(action_param is not None),
|
237
|
+
base_type=model_class,
|
238
|
+
field_models=fms,
|
169
239
|
)
|
240
|
+
# Update contexts with new response format
|
241
|
+
_cctx = _cctx.with_updates(response_format=operative.request_type)
|
242
|
+
_pctx = _pctx.with_updates(response_format=operative.request_type)
|
243
|
+
elif field_models:
|
244
|
+
dict_ = {}
|
245
|
+
for fm in fms:
|
246
|
+
if fm.name:
|
247
|
+
dict_[fm.name] = str(fm.annotated())
|
248
|
+
# Update contexts with dict format
|
249
|
+
_cctx = _cctx.with_updates(response_format=dict_)
|
250
|
+
_pctx = _pctx.with_updates(response_format=dict_)
|
251
|
+
|
252
|
+
from ..communicate.communicate import communicate
|
170
253
|
|
171
|
-
|
172
|
-
|
254
|
+
result = await communicate(
|
255
|
+
branch,
|
256
|
+
instruction,
|
257
|
+
_cctx,
|
258
|
+
_pctx,
|
259
|
+
clear_messages,
|
260
|
+
skip_validation=skip_validation,
|
261
|
+
request_fields=None,
|
262
|
+
)
|
263
|
+
if skip_validation:
|
264
|
+
return result
|
265
|
+
if model_class and not isinstance(result, model_class):
|
173
266
|
match handle_validation:
|
174
267
|
case "return_value":
|
175
|
-
return
|
268
|
+
return result
|
176
269
|
case "return_none":
|
177
270
|
return None
|
178
271
|
case "raise":
|
179
272
|
raise ValueError(
|
180
273
|
"Failed to parse the LLM response into the requested format."
|
181
274
|
)
|
182
|
-
|
183
|
-
# 6) If no tool invocation is needed, return result or operative
|
184
275
|
if not invoke_actions:
|
185
|
-
return
|
276
|
+
return result
|
186
277
|
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
)
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
# Possibly refine the operative with the tool outputs
|
202
|
-
operative = Step.respond_operative(
|
203
|
-
response_params=response_params,
|
204
|
-
operative=operative,
|
205
|
-
additional_data={"action_responses": action_response_models},
|
206
|
-
**(response_param_kwargs or {}),
|
278
|
+
requests = (
|
279
|
+
getattr(result, "action_requests", None)
|
280
|
+
if model_class
|
281
|
+
else result.get("action_requests", None)
|
282
|
+
)
|
283
|
+
|
284
|
+
action_response_models = None
|
285
|
+
if action_param and requests is not None:
|
286
|
+
from ..act.act import act
|
287
|
+
|
288
|
+
action_response_models = await act(
|
289
|
+
branch,
|
290
|
+
requests,
|
291
|
+
action_param,
|
207
292
|
)
|
208
293
|
|
209
|
-
|
210
|
-
|
294
|
+
if not action_response_models:
|
295
|
+
return result
|
296
|
+
|
297
|
+
# Filter out None values from action responses
|
298
|
+
action_response_models = [
|
299
|
+
r for r in action_response_models if r is not None
|
300
|
+
]
|
301
|
+
|
302
|
+
if not action_response_models: # All were None
|
303
|
+
return result
|
304
|
+
|
305
|
+
if not model_class: # Dict response
|
306
|
+
result.update({"action_responses": action_response_models})
|
307
|
+
return result
|
308
|
+
|
309
|
+
from lionagi.protocols.operatives.step import Step
|
310
|
+
|
311
|
+
operative.response_model = result
|
312
|
+
operative = Step.respond_operative(
|
313
|
+
operative=operative,
|
314
|
+
additional_data={"action_responses": action_response_models},
|
315
|
+
)
|
316
|
+
return operative.response_model
|