lionagi 0.17.11__py3-none-any.whl → 0.18.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lionagi/libs/schema/minimal_yaml.py +98 -0
- lionagi/ln/types.py +32 -5
- lionagi/models/field_model.py +9 -0
- lionagi/operations/ReAct/ReAct.py +474 -237
- lionagi/operations/ReAct/utils.py +3 -0
- lionagi/operations/act/act.py +206 -0
- lionagi/operations/chat/chat.py +130 -114
- lionagi/operations/communicate/communicate.py +101 -42
- lionagi/operations/flow.py +4 -4
- lionagi/operations/interpret/interpret.py +65 -20
- lionagi/operations/operate/operate.py +212 -106
- lionagi/operations/parse/parse.py +170 -142
- lionagi/operations/select/select.py +78 -17
- lionagi/operations/select/utils.py +1 -1
- lionagi/operations/types.py +119 -23
- lionagi/protocols/generic/log.py +3 -2
- lionagi/protocols/messages/__init__.py +27 -0
- lionagi/protocols/messages/action_request.py +86 -184
- lionagi/protocols/messages/action_response.py +73 -131
- lionagi/protocols/messages/assistant_response.py +130 -159
- lionagi/protocols/messages/base.py +26 -18
- lionagi/protocols/messages/instruction.py +281 -625
- lionagi/protocols/messages/manager.py +112 -62
- lionagi/protocols/messages/message.py +87 -197
- lionagi/protocols/messages/system.py +52 -123
- lionagi/protocols/types.py +0 -2
- lionagi/service/connections/endpoint.py +0 -8
- lionagi/service/connections/providers/oai_.py +29 -94
- lionagi/service/connections/providers/ollama_.py +3 -2
- lionagi/service/hooks/hooked_event.py +2 -2
- lionagi/service/third_party/claude_code.py +3 -2
- lionagi/service/third_party/openai_models.py +433 -0
- lionagi/session/branch.py +170 -178
- lionagi/session/session.py +3 -9
- lionagi/tools/file/reader.py +2 -2
- lionagi/version.py +1 -1
- {lionagi-0.17.11.dist-info → lionagi-0.18.0.dist-info}/METADATA +1 -2
- {lionagi-0.17.11.dist-info → lionagi-0.18.0.dist-info}/RECORD +41 -49
- lionagi/operations/_act/act.py +0 -86
- lionagi/protocols/messages/templates/README.md +0 -28
- lionagi/protocols/messages/templates/action_request.jinja2 +0 -5
- lionagi/protocols/messages/templates/action_response.jinja2 +0 -9
- lionagi/protocols/messages/templates/assistant_response.jinja2 +0 -6
- lionagi/protocols/messages/templates/instruction_message.jinja2 +0 -61
- lionagi/protocols/messages/templates/system_message.jinja2 +0 -11
- lionagi/protocols/messages/templates/tool_schemas.jinja2 +0 -7
- lionagi/service/connections/providers/types.py +0 -28
- lionagi/service/third_party/openai_model_names.py +0 -198
- lionagi/service/types.py +0 -58
- /lionagi/operations/{_act → act}/__init__.py +0 -0
- {lionagi-0.17.11.dist-info → lionagi-0.18.0.dist-info}/WHEEL +0 -0
- {lionagi-0.17.11.dist-info → lionagi-0.18.0.dist-info}/licenses/LICENSE +0 -0
@@ -3,24 +3,36 @@
|
|
3
3
|
|
4
4
|
import logging
|
5
5
|
from collections.abc import AsyncGenerator
|
6
|
-
from typing import TYPE_CHECKING, Any, Literal
|
6
|
+
from typing import TYPE_CHECKING, Any, Literal, TypeVar
|
7
7
|
|
8
8
|
from pydantic import BaseModel
|
9
9
|
|
10
|
+
logger = logging.getLogger(__name__)
|
11
|
+
|
10
12
|
from lionagi.fields.instruct import Instruct
|
11
13
|
from lionagi.libs.schema.as_readable import as_readable
|
12
14
|
from lionagi.libs.validate.common_field_validators import (
|
13
15
|
validate_model_to_type,
|
14
16
|
)
|
15
|
-
from lionagi.
|
17
|
+
from lionagi.ln.fuzzy import FuzzyMatchKeysParams
|
18
|
+
from lionagi.models.field_model import FieldModel
|
16
19
|
from lionagi.service.imodel import iModel
|
17
|
-
from lionagi.utils import copy
|
18
20
|
|
21
|
+
from ..types import (
|
22
|
+
ActionParam,
|
23
|
+
ChatParam,
|
24
|
+
HandleValidation,
|
25
|
+
InterpretParam,
|
26
|
+
ParseParam,
|
27
|
+
)
|
19
28
|
from .utils import Analysis, ReActAnalysis
|
20
29
|
|
21
30
|
if TYPE_CHECKING:
|
22
31
|
from lionagi.session.branch import Branch
|
23
32
|
|
33
|
+
B = TypeVar("B", bound=type[BaseModel])
|
34
|
+
logger = logging.getLogger(__name__)
|
35
|
+
|
24
36
|
|
25
37
|
async def ReAct(
|
26
38
|
branch: "Branch",
|
@@ -29,7 +41,7 @@ async def ReAct(
|
|
29
41
|
interpret_domain: str | None = None,
|
30
42
|
interpret_style: str | None = None,
|
31
43
|
interpret_sample: str | None = None,
|
32
|
-
interpret_model:
|
44
|
+
interpret_model: iModel | None = None,
|
33
45
|
interpret_kwargs: dict | None = None,
|
34
46
|
tools: Any = None,
|
35
47
|
tool_schemas: Any = None,
|
@@ -49,263 +61,469 @@ async def ReAct(
|
|
49
61
|
continue_after_failed_response: bool = False,
|
50
62
|
**kwargs,
|
51
63
|
):
|
64
|
+
"""ReAct reasoning loop with legacy API - wrapper around ReAct_v1."""
|
65
|
+
|
66
|
+
# Handle legacy verbose parameter
|
67
|
+
if "verbose" in kwargs:
|
68
|
+
verbose_analysis = kwargs.pop("verbose")
|
69
|
+
|
70
|
+
# Convert Instruct to dict if needed
|
71
|
+
instruct_dict = (
|
72
|
+
instruct.to_dict()
|
73
|
+
if isinstance(instruct, Instruct)
|
74
|
+
else dict(instruct)
|
75
|
+
)
|
76
|
+
|
77
|
+
# Build InterpretParam if interpretation requested
|
78
|
+
intp_param = None
|
79
|
+
if interpret:
|
80
|
+
intp_param = InterpretParam(
|
81
|
+
domain=interpret_domain or "general",
|
82
|
+
style=interpret_style or "concise",
|
83
|
+
sample_writing=interpret_sample or "",
|
84
|
+
imodel=interpret_model or analysis_model or branch.chat_model,
|
85
|
+
imodel_kw=interpret_kwargs or {},
|
86
|
+
)
|
87
|
+
|
88
|
+
# Build ChatParam
|
89
|
+
chat_param = ChatParam(
|
90
|
+
guidance=instruct_dict.get("guidance"),
|
91
|
+
context=instruct_dict.get("context"),
|
92
|
+
sender=branch.user or "user",
|
93
|
+
recipient=branch.id,
|
94
|
+
response_format=None, # Will be set in operate calls
|
95
|
+
progression=None,
|
96
|
+
tool_schemas=tool_schemas or [],
|
97
|
+
images=[],
|
98
|
+
image_detail="auto",
|
99
|
+
plain_content="",
|
100
|
+
include_token_usage_to_model=include_token_usage_to_model,
|
101
|
+
imodel=analysis_model or branch.chat_model,
|
102
|
+
imodel_kw=kwargs,
|
103
|
+
)
|
104
|
+
|
105
|
+
# Build ActionParam
|
106
|
+
action_param = None
|
107
|
+
if tools is not None or tool_schemas is not None:
|
108
|
+
from ..act.act import _get_default_call_params
|
109
|
+
|
110
|
+
action_param = ActionParam(
|
111
|
+
action_call_params=_get_default_call_params(),
|
112
|
+
tools=tools or True,
|
113
|
+
strategy="concurrent",
|
114
|
+
suppress_errors=True,
|
115
|
+
verbose_action=False,
|
116
|
+
)
|
117
|
+
|
118
|
+
# Build ParseParam
|
119
|
+
from ..parse.parse import get_default_call
|
120
|
+
|
121
|
+
parse_param = ParseParam(
|
122
|
+
response_format=ReActAnalysis, # Initial format
|
123
|
+
fuzzy_match_params=FuzzyMatchKeysParams(),
|
124
|
+
handle_validation="return_value",
|
125
|
+
alcall_params=get_default_call(),
|
126
|
+
imodel=analysis_model or branch.chat_model,
|
127
|
+
imodel_kw={},
|
128
|
+
)
|
129
|
+
|
130
|
+
# Response context for final answer
|
131
|
+
resp_ctx = response_kwargs or {}
|
132
|
+
if response_format:
|
133
|
+
resp_ctx["response_format"] = response_format
|
134
|
+
|
135
|
+
return await ReAct_v1(
|
136
|
+
branch,
|
137
|
+
instruction=instruct_dict.get("instruction", str(instruct)),
|
138
|
+
chat_param=chat_param,
|
139
|
+
action_param=action_param,
|
140
|
+
parse_param=parse_param,
|
141
|
+
intp_param=intp_param,
|
142
|
+
resp_ctx=resp_ctx,
|
143
|
+
reasoning_effort=reasoning_effort,
|
144
|
+
reason=True, # ReAct always uses reasoning
|
145
|
+
field_models=None,
|
146
|
+
handle_validation="return_value",
|
147
|
+
invoke_actions=True, # ReAct always invokes actions
|
148
|
+
clear_messages=False,
|
149
|
+
intermediate_response_options=intermediate_response_options,
|
150
|
+
intermediate_listable=intermediate_listable,
|
151
|
+
intermediate_nullable=False,
|
152
|
+
max_extensions=max_extensions,
|
153
|
+
extension_allowed=extension_allowed,
|
154
|
+
verbose_analysis=verbose_analysis,
|
155
|
+
display_as=display_as,
|
156
|
+
verbose_length=verbose_length,
|
157
|
+
continue_after_failed_response=continue_after_failed_response,
|
158
|
+
return_analysis=return_analysis,
|
159
|
+
)
|
160
|
+
|
161
|
+
|
162
|
+
async def ReAct_v1(
|
163
|
+
branch: "Branch",
|
164
|
+
instruction: str,
|
165
|
+
chat_param: ChatParam,
|
166
|
+
action_param: ActionParam | None = None,
|
167
|
+
parse_param: ParseParam | None = None,
|
168
|
+
intp_param: InterpretParam | None = None,
|
169
|
+
resp_ctx: dict | None = None,
|
170
|
+
reasoning_effort: Literal["low", "medium", "high"] | None = None,
|
171
|
+
reason: bool = False,
|
172
|
+
field_models: list[FieldModel] | None = None,
|
173
|
+
handle_validation: HandleValidation = "raise",
|
174
|
+
invoke_actions: bool = True,
|
175
|
+
clear_messages=False,
|
176
|
+
intermediate_response_options: B | list[B] = None,
|
177
|
+
intermediate_listable: bool = False,
|
178
|
+
intermediate_nullable: bool = False,
|
179
|
+
max_extensions: int | None = 0,
|
180
|
+
extension_allowed: bool = True,
|
181
|
+
verbose_analysis: bool = False,
|
182
|
+
display_as: Literal["yaml", "json"] = "yaml",
|
183
|
+
verbose_length: int = None,
|
184
|
+
continue_after_failed_response: bool = False,
|
185
|
+
return_analysis: bool = False,
|
186
|
+
):
|
187
|
+
"""
|
188
|
+
Context-based ReAct implementation - collects all outputs from ReActStream.
|
189
|
+
|
190
|
+
Args:
|
191
|
+
return_analysis: If True, returns list of all intermediate analyses.
|
192
|
+
If False, returns only the final result.
|
193
|
+
"""
|
52
194
|
outs = []
|
195
|
+
|
53
196
|
if verbose_analysis:
|
54
197
|
async for i in ReActStream(
|
55
198
|
branch=branch,
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
199
|
+
instruction=instruction,
|
200
|
+
chat_param=chat_param,
|
201
|
+
action_param=action_param,
|
202
|
+
parse_param=parse_param,
|
203
|
+
intp_param=intp_param,
|
204
|
+
resp_ctx=resp_ctx,
|
205
|
+
reasoning_effort=reasoning_effort,
|
206
|
+
reason=reason,
|
207
|
+
field_models=field_models,
|
208
|
+
handle_validation=handle_validation,
|
209
|
+
invoke_actions=invoke_actions,
|
210
|
+
clear_messages=clear_messages,
|
66
211
|
intermediate_response_options=intermediate_response_options,
|
67
212
|
intermediate_listable=intermediate_listable,
|
68
|
-
|
69
|
-
extension_allowed=extension_allowed,
|
213
|
+
intermediate_nullable=intermediate_nullable,
|
70
214
|
max_extensions=max_extensions,
|
71
|
-
|
72
|
-
analysis_model=analysis_model,
|
215
|
+
extension_allowed=extension_allowed,
|
73
216
|
verbose_analysis=verbose_analysis,
|
74
217
|
display_as=display_as,
|
75
218
|
verbose_length=verbose_length,
|
76
|
-
include_token_usage_to_model=include_token_usage_to_model,
|
77
219
|
continue_after_failed_response=continue_after_failed_response,
|
78
|
-
**kwargs,
|
79
220
|
):
|
80
221
|
analysis, str_ = i
|
81
|
-
str_
|
82
|
-
as_readable(
|
222
|
+
# str_ is already formatted markdown - just print it
|
223
|
+
as_readable(
|
224
|
+
str_,
|
225
|
+
md=True,
|
226
|
+
display_str=True,
|
227
|
+
)
|
83
228
|
outs.append(analysis)
|
84
229
|
else:
|
85
230
|
async for i in ReActStream(
|
86
231
|
branch=branch,
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
232
|
+
instruction=instruction,
|
233
|
+
chat_param=chat_param,
|
234
|
+
action_param=action_param,
|
235
|
+
parse_param=parse_param,
|
236
|
+
intp_param=intp_param,
|
237
|
+
resp_ctx=resp_ctx,
|
238
|
+
reasoning_effort=reasoning_effort,
|
239
|
+
reason=reason,
|
240
|
+
field_models=field_models,
|
241
|
+
handle_validation=handle_validation,
|
242
|
+
invoke_actions=invoke_actions,
|
243
|
+
clear_messages=clear_messages,
|
97
244
|
intermediate_response_options=intermediate_response_options,
|
98
245
|
intermediate_listable=intermediate_listable,
|
99
|
-
|
100
|
-
extension_allowed=extension_allowed,
|
246
|
+
intermediate_nullable=intermediate_nullable,
|
101
247
|
max_extensions=max_extensions,
|
102
|
-
|
103
|
-
analysis_model=analysis_model,
|
248
|
+
extension_allowed=extension_allowed,
|
104
249
|
display_as=display_as,
|
105
250
|
verbose_length=verbose_length,
|
106
|
-
include_token_usage_to_model=include_token_usage_to_model,
|
107
251
|
continue_after_failed_response=continue_after_failed_response,
|
108
|
-
**kwargs,
|
109
252
|
):
|
110
253
|
outs.append(i)
|
254
|
+
|
111
255
|
if return_analysis:
|
112
256
|
return outs
|
113
|
-
|
257
|
+
|
258
|
+
# Extract answer from the final Analysis object
|
259
|
+
final_result = outs[-1]
|
260
|
+
if hasattr(final_result, "answer"):
|
261
|
+
return final_result.answer
|
262
|
+
return final_result
|
263
|
+
|
264
|
+
|
265
|
+
async def handle_instruction_interpretation(
|
266
|
+
branch: "Branch",
|
267
|
+
instruction: str,
|
268
|
+
chat_param: ChatParam,
|
269
|
+
intp_param: InterpretParam | None,
|
270
|
+
):
|
271
|
+
"""Handle instruction interpretation if requested."""
|
272
|
+
if not intp_param:
|
273
|
+
return instruction
|
274
|
+
|
275
|
+
from ..interpret.interpret import interpret
|
276
|
+
|
277
|
+
return await interpret(branch, instruction, intp_param)
|
278
|
+
|
279
|
+
|
280
|
+
def handle_field_models(
|
281
|
+
field_models: list[FieldModel] | None,
|
282
|
+
intermediate_response_options: B | list[B] = None,
|
283
|
+
intermediate_listable: bool = False,
|
284
|
+
intermediate_nullable: bool = False,
|
285
|
+
):
|
286
|
+
"""Build field models including intermediate response options."""
|
287
|
+
fms = [] if not field_models else field_models
|
288
|
+
|
289
|
+
if intermediate_response_options:
|
290
|
+
|
291
|
+
def create_intermediate_response_field_model():
|
292
|
+
from lionagi.models import OperableModel
|
293
|
+
|
294
|
+
_iro = intermediate_response_options
|
295
|
+
iro = [_iro] if not isinstance(_iro, list) else _iro
|
296
|
+
opm = OperableModel()
|
297
|
+
|
298
|
+
for i in iro:
|
299
|
+
type_ = validate_model_to_type(None, i)
|
300
|
+
opm.add_field(
|
301
|
+
str(type_.__name__).lower(),
|
302
|
+
annotation=type_ | None,
|
303
|
+
# Remove lambda validator to avoid Pydantic serialization errors
|
304
|
+
)
|
305
|
+
|
306
|
+
m_ = opm.new_model(name="IntermediateResponseOptions")
|
307
|
+
irfm = FieldModel(
|
308
|
+
name="intermediate_response_options",
|
309
|
+
base_type=m_,
|
310
|
+
description="Intermediate deliverable outputs. fill as needed ",
|
311
|
+
# Remove lambda validator to avoid Pydantic serialization errors
|
312
|
+
)
|
313
|
+
|
314
|
+
if intermediate_listable:
|
315
|
+
irfm = irfm.as_listable()
|
316
|
+
|
317
|
+
if intermediate_nullable:
|
318
|
+
irfm = irfm.as_nullable()
|
319
|
+
|
320
|
+
return irfm
|
321
|
+
|
322
|
+
fms = [fms] if not isinstance(fms, list) else fms
|
323
|
+
fms += [create_intermediate_response_field_model()]
|
324
|
+
|
325
|
+
return fms
|
114
326
|
|
115
327
|
|
116
328
|
async def ReActStream(
|
117
329
|
branch: "Branch",
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
330
|
+
instruction: str,
|
331
|
+
chat_param: ChatParam,
|
332
|
+
action_param: ActionParam | None = None,
|
333
|
+
parse_param: ParseParam | None = None,
|
334
|
+
intp_param: InterpretParam | None = None,
|
335
|
+
resp_ctx: dict | None = None,
|
336
|
+
reasoning_effort: Literal["low", "medium", "high"] | None = None,
|
337
|
+
reason: bool = False,
|
338
|
+
field_models: list[FieldModel] | None = None,
|
339
|
+
handle_validation: HandleValidation = "raise",
|
340
|
+
invoke_actions: bool = True,
|
341
|
+
clear_messages=False,
|
342
|
+
intermediate_response_options: B | list[B] = None,
|
129
343
|
intermediate_listable: bool = False,
|
130
|
-
|
344
|
+
intermediate_nullable: bool = False,
|
345
|
+
max_extensions: int | None = 0,
|
131
346
|
extension_allowed: bool = True,
|
132
|
-
max_extensions: int | None = 3,
|
133
|
-
response_kwargs: dict | None = None,
|
134
|
-
analysis_model: iModel | None = None,
|
135
347
|
verbose_analysis: bool = False,
|
136
|
-
display_as: Literal["
|
348
|
+
display_as: Literal["yaml", "json"] = "yaml",
|
137
349
|
verbose_length: int = None,
|
138
|
-
include_token_usage_to_model: bool = True,
|
139
350
|
continue_after_failed_response: bool = False,
|
140
|
-
**kwargs,
|
141
351
|
) -> AsyncGenerator:
|
142
|
-
|
352
|
+
"""Core ReAct streaming implementation with context-based architecture."""
|
143
353
|
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
else intermediate_response_options
|
149
|
-
)
|
150
|
-
field_models = []
|
151
|
-
for i in iro:
|
152
|
-
type_ = validate_model_to_type(None, i)
|
153
|
-
fm = FieldModel(
|
154
|
-
name=str(type_.__name__).lower(),
|
155
|
-
annotation=type_ | None,
|
156
|
-
validator=lambda cls, x: None if x == {} else x,
|
157
|
-
)
|
158
|
-
field_models.append(fm)
|
159
|
-
|
160
|
-
m_ = ModelParams(
|
161
|
-
name="IntermediateResponseOptions", field_models=field_models
|
162
|
-
).create_new_model()
|
163
|
-
|
164
|
-
irfm = FieldModel(
|
165
|
-
name="intermediate_response_options",
|
166
|
-
annotation=(
|
167
|
-
m_ | None if not intermediate_listable else list[m_] | None
|
168
|
-
),
|
169
|
-
description="Optional intermediate deliverable outputs. fill as needed ",
|
170
|
-
validator=lambda cls, x: None if not x else x,
|
354
|
+
# Validate and clamp max_extensions
|
355
|
+
if max_extensions and max_extensions > 100:
|
356
|
+
logger.warning(
|
357
|
+
"max_extensions should not exceed 100; defaulting to 100."
|
171
358
|
)
|
359
|
+
max_extensions = 100
|
172
360
|
|
173
|
-
|
174
|
-
if not tools and not tool_schemas:
|
175
|
-
tools = True
|
176
|
-
|
177
|
-
# Possibly interpret the instruction to refine it
|
178
|
-
instruction_str = None
|
179
|
-
if interpret:
|
180
|
-
instruction_str = await branch.interpret(
|
181
|
-
str(
|
182
|
-
instruct.to_dict()
|
183
|
-
if isinstance(instruct, Instruct)
|
184
|
-
else instruct
|
185
|
-
),
|
186
|
-
domain=interpret_domain,
|
187
|
-
style=interpret_style,
|
188
|
-
sample_writing=interpret_sample,
|
189
|
-
interpret_model=interpret_model,
|
190
|
-
**(interpret_kwargs or {}),
|
191
|
-
)
|
361
|
+
def verbose_yield(title, s_):
|
192
362
|
if verbose_analysis:
|
193
|
-
str_ = "\n
|
363
|
+
str_ = title + "\n"
|
194
364
|
str_ += as_readable(
|
195
|
-
|
365
|
+
s_,
|
196
366
|
md=True,
|
197
367
|
format_curly=True if display_as == "yaml" else False,
|
198
368
|
max_chars=verbose_length,
|
199
369
|
)
|
200
|
-
|
370
|
+
return s_, str_
|
201
371
|
else:
|
202
|
-
|
203
|
-
|
204
|
-
#
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
372
|
+
return s_
|
373
|
+
|
374
|
+
# Step 1: Interpret instruction if requested
|
375
|
+
ins_str = await handle_instruction_interpretation(
|
376
|
+
branch,
|
377
|
+
instruction=instruction,
|
378
|
+
chat_param=chat_param,
|
379
|
+
intp_param=intp_param,
|
209
380
|
)
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
instruct_dict["instruction"] = (
|
214
|
-
instruction_str
|
215
|
-
or (instruct_dict.get("instruction") or "") # in case it's missing
|
216
|
-
) + max_ext_info
|
217
|
-
|
218
|
-
# Prepare a copy of user-provided kwargs for the first operate call
|
219
|
-
kwargs_for_operate = copy(kwargs)
|
220
|
-
kwargs_for_operate["actions"] = True
|
221
|
-
kwargs_for_operate["reason"] = True
|
222
|
-
kwargs_for_operate["include_token_usage_to_model"] = (
|
223
|
-
include_token_usage_to_model
|
224
|
-
)
|
225
|
-
|
226
|
-
# Step 1: Generate initial ReAct analysis
|
227
|
-
analysis: ReActAnalysis = await branch.operate(
|
228
|
-
instruct=instruct_dict,
|
229
|
-
response_format=ReActAnalysis,
|
230
|
-
tools=tools,
|
231
|
-
tool_schemas=tool_schemas,
|
232
|
-
chat_model=analysis_model or branch.chat_model,
|
233
|
-
**kwargs_for_operate,
|
234
|
-
)
|
235
|
-
# If verbose, show round #1 analysis
|
236
|
-
if verbose_analysis:
|
237
|
-
str_ = "\n### ReAct Round No.1 Analysis:\n"
|
381
|
+
# Print interpreted instruction if verbose (don't yield it - not an analysis object)
|
382
|
+
if verbose_analysis and intp_param:
|
383
|
+
str_ = "\n### Interpreted instruction:\n"
|
238
384
|
str_ += as_readable(
|
239
|
-
|
385
|
+
ins_str,
|
240
386
|
md=True,
|
241
387
|
format_curly=True if display_as == "yaml" else False,
|
242
388
|
max_chars=verbose_length,
|
243
389
|
)
|
244
|
-
yield analysis, str_
|
245
|
-
else:
|
246
|
-
yield analysis
|
247
390
|
|
248
|
-
#
|
249
|
-
|
250
|
-
|
251
|
-
|
391
|
+
# Step 2: Handle field models
|
392
|
+
fms = handle_field_models(
|
393
|
+
field_models,
|
394
|
+
intermediate_response_options,
|
395
|
+
intermediate_listable,
|
396
|
+
intermediate_nullable,
|
397
|
+
)
|
398
|
+
|
399
|
+
# Step 3: Initial ReAct analysis
|
400
|
+
from ..operate.operate import operate
|
401
|
+
|
402
|
+
# Build context for initial analysis
|
403
|
+
initial_chat_param = chat_param.with_updates(response_format=ReActAnalysis)
|
404
|
+
|
405
|
+
initial_parse_param = (
|
406
|
+
parse_param.with_updates(response_format=ReActAnalysis)
|
407
|
+
if parse_param
|
408
|
+
else None
|
409
|
+
)
|
410
|
+
|
411
|
+
# Add proper extension prompt for initial analysis
|
412
|
+
initial_instruction = ins_str
|
413
|
+
if extension_allowed and max_extensions:
|
414
|
+
initial_instruction += "\n\n" + ReActAnalysis.FIRST_EXT_PROMPT.format(
|
415
|
+
extensions=max_extensions
|
252
416
|
)
|
253
|
-
max_extensions = 100
|
254
417
|
|
255
|
-
|
256
|
-
|
418
|
+
analysis = await operate(
|
419
|
+
branch,
|
420
|
+
instruction=initial_instruction,
|
421
|
+
chat_param=initial_chat_param,
|
422
|
+
action_param=action_param,
|
423
|
+
parse_param=initial_parse_param,
|
424
|
+
handle_validation=handle_validation,
|
425
|
+
invoke_actions=invoke_actions,
|
426
|
+
skip_validation=False,
|
427
|
+
clear_messages=clear_messages,
|
428
|
+
reason=reason,
|
429
|
+
field_models=fms,
|
430
|
+
)
|
431
|
+
|
432
|
+
out = verbose_yield("\n### ReAct Round No.1 Analysis:\n", analysis)
|
433
|
+
yield out
|
434
|
+
|
435
|
+
# Step 4: Extension loop
|
436
|
+
extensions = max_extensions or 0
|
257
437
|
round_count = 1
|
258
438
|
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
|
263
|
-
analysis.get("extension_needed",
|
264
|
-
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
|
439
|
+
def _need_extension(analysis):
|
440
|
+
if hasattr(analysis, "extension_needed"):
|
441
|
+
return analysis.extension_needed
|
442
|
+
if isinstance(analysis, dict):
|
443
|
+
return analysis.get("extension_needed", False)
|
444
|
+
return False
|
445
|
+
|
446
|
+
def _extension_allowed(exts):
|
447
|
+
return extension_allowed and exts > 0
|
448
|
+
|
449
|
+
def prepare_analysis_kwargs(exts):
|
269
450
|
new_instruction = None
|
270
|
-
if
|
451
|
+
if exts == max_extensions:
|
271
452
|
new_instruction = ReActAnalysis.FIRST_EXT_PROMPT.format(
|
272
|
-
extensions=
|
453
|
+
extensions=exts
|
273
454
|
)
|
274
455
|
else:
|
275
456
|
new_instruction = ReActAnalysis.CONTINUE_EXT_PROMPT.format(
|
276
|
-
extensions=
|
457
|
+
extensions=exts
|
277
458
|
)
|
278
459
|
|
279
|
-
|
280
|
-
|
281
|
-
|
282
|
-
operate_kwargs["response_format"] = ReActAnalysis
|
283
|
-
operate_kwargs["action_strategy"] = analysis.action_strategy
|
284
|
-
operate_kwargs["include_token_usage_to_model"] = (
|
285
|
-
include_token_usage_to_model
|
286
|
-
)
|
287
|
-
if irfm:
|
288
|
-
operate_kwargs["field_models"] = operate_kwargs.get(
|
289
|
-
"field_models", []
|
290
|
-
) + [irfm]
|
460
|
+
# Use with_updates to create new context instances
|
461
|
+
updates = {"response_format": ReActAnalysis}
|
462
|
+
|
291
463
|
if reasoning_effort:
|
292
|
-
guide =
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
|
298
|
-
|
299
|
-
|
300
|
-
|
464
|
+
guide = {
|
465
|
+
"low": "Quick concise reasoning.\n",
|
466
|
+
"medium": "Reasonably balanced reasoning.\n",
|
467
|
+
"high": "Thorough, try as hard as you can in reasoning.\n",
|
468
|
+
}.get(reasoning_effort, "")
|
469
|
+
|
470
|
+
updates["guidance"] = (guide or "") + (chat_param.guidance or "")
|
471
|
+
updates["imodel_kw"] = {
|
472
|
+
**(chat_param.imodel_kw or {}),
|
473
|
+
"reasoning_effort": reasoning_effort,
|
474
|
+
}
|
475
|
+
|
476
|
+
_cctx = chat_param.with_updates(**updates)
|
477
|
+
|
478
|
+
# Import default call params if needed
|
479
|
+
from ..act.act import _get_default_call_params
|
480
|
+
|
481
|
+
_actx = (
|
482
|
+
action_param.with_updates(
|
483
|
+
strategy=getattr(analysis, "action_strategy", "concurrent")
|
484
|
+
)
|
485
|
+
if action_param
|
486
|
+
else ActionParam(
|
487
|
+
action_call_params=_get_default_call_params(),
|
488
|
+
tools=True,
|
489
|
+
strategy=getattr(analysis, "action_strategy", "concurrent"),
|
490
|
+
suppress_errors=True,
|
491
|
+
verbose_action=False,
|
301
492
|
)
|
302
|
-
|
493
|
+
)
|
303
494
|
|
304
|
-
|
305
|
-
instruction
|
306
|
-
|
307
|
-
|
308
|
-
|
495
|
+
return {
|
496
|
+
"instruction": new_instruction,
|
497
|
+
"chat_param": _cctx,
|
498
|
+
"action_param": _actx,
|
499
|
+
"reason": reason,
|
500
|
+
"field_models": fms,
|
501
|
+
}
|
502
|
+
|
503
|
+
while _extension_allowed(extensions) and _need_extension(analysis):
|
504
|
+
kwargs = prepare_analysis_kwargs(extensions)
|
505
|
+
|
506
|
+
# Build parse context for extension
|
507
|
+
ext_parse_param = (
|
508
|
+
parse_param.with_updates(
|
509
|
+
response_format=kwargs["chat_param"].response_format
|
510
|
+
)
|
511
|
+
if parse_param
|
512
|
+
else None
|
513
|
+
)
|
514
|
+
|
515
|
+
analysis = await operate(
|
516
|
+
branch,
|
517
|
+
instruction=kwargs["instruction"],
|
518
|
+
chat_param=kwargs["chat_param"],
|
519
|
+
action_param=kwargs.get("action_param"),
|
520
|
+
parse_param=ext_parse_param,
|
521
|
+
handle_validation=handle_validation,
|
522
|
+
invoke_actions=invoke_actions,
|
523
|
+
skip_validation=False,
|
524
|
+
clear_messages=False, # Keep messages to maintain context
|
525
|
+
reason=kwargs.get("reason", True),
|
526
|
+
field_models=kwargs.get("field_models"),
|
309
527
|
)
|
310
528
|
round_count += 1
|
311
529
|
|
@@ -319,40 +537,73 @@ async def ReActStream(
|
|
319
537
|
"Set `continue_after_failed_response=True` to ignore this error."
|
320
538
|
)
|
321
539
|
|
322
|
-
|
323
|
-
|
324
|
-
|
325
|
-
|
326
|
-
str_ += as_readable(
|
327
|
-
analysis,
|
328
|
-
md=True,
|
329
|
-
format_curly=True if display_as == "yaml" else False,
|
330
|
-
max_chars=verbose_length,
|
331
|
-
)
|
332
|
-
|
333
|
-
yield analysis, str_
|
334
|
-
else:
|
335
|
-
yield analysis
|
540
|
+
out = verbose_yield(
|
541
|
+
f"\n### ReAct Round No.{round_count} Analysis:\n", analysis
|
542
|
+
)
|
543
|
+
yield out
|
336
544
|
|
337
545
|
if extensions:
|
338
546
|
extensions -= 1
|
339
547
|
|
340
|
-
# Step
|
341
|
-
answer_prompt = ReActAnalysis.ANSWER_PROMPT.format(
|
342
|
-
|
548
|
+
# Step 5: Final answer
|
549
|
+
answer_prompt = ReActAnalysis.ANSWER_PROMPT.format(instruction=ins_str)
|
550
|
+
|
551
|
+
final_response_format = (
|
552
|
+
resp_ctx.get("response_format") if resp_ctx else None
|
553
|
+
)
|
554
|
+
if not final_response_format:
|
555
|
+
final_response_format = Analysis
|
556
|
+
|
557
|
+
# Build contexts for final answer
|
558
|
+
resp_ctx_updates = {"response_format": final_response_format}
|
559
|
+
if resp_ctx:
|
560
|
+
# Merge resp_ctx into updates (filter allowed keys)
|
561
|
+
for k, v in resp_ctx.items():
|
562
|
+
if k in chat_param.allowed() and k != "response_format":
|
563
|
+
resp_ctx_updates[k] = v
|
564
|
+
|
565
|
+
final_chat_param = chat_param.with_updates(**resp_ctx_updates)
|
566
|
+
|
567
|
+
final_parse_param = (
|
568
|
+
parse_param.with_updates(response_format=final_response_format)
|
569
|
+
if parse_param
|
570
|
+
else None
|
343
571
|
)
|
344
|
-
|
345
|
-
|
572
|
+
|
573
|
+
# Build operate kwargs, honoring response_kwargs
|
574
|
+
operate_kwargs = {
|
575
|
+
"branch": branch,
|
576
|
+
"instruction": answer_prompt,
|
577
|
+
"chat_param": final_chat_param,
|
578
|
+
"action_param": None, # No actions in final answer
|
579
|
+
"parse_param": final_parse_param,
|
580
|
+
"invoke_actions": False,
|
581
|
+
"clear_messages": False,
|
582
|
+
"reason": False, # No reasoning wrapper in final answer
|
583
|
+
"field_models": None,
|
584
|
+
# Defaults that can be overridden by resp_ctx
|
585
|
+
"handle_validation": handle_validation,
|
586
|
+
"skip_validation": False,
|
587
|
+
}
|
588
|
+
|
589
|
+
# Honor response_kwargs for final answer generation
|
590
|
+
if resp_ctx:
|
591
|
+
# Extract operate specific parameters from resp_ctx
|
592
|
+
operate_params = {
|
593
|
+
"skip_validation",
|
594
|
+
"handle_validation",
|
595
|
+
"clear_messages",
|
596
|
+
"reason",
|
597
|
+
"field_models",
|
598
|
+
}
|
599
|
+
for param in operate_params:
|
600
|
+
if param in resp_ctx:
|
601
|
+
operate_kwargs[param] = resp_ctx[param]
|
346
602
|
|
347
603
|
try:
|
348
|
-
out = await
|
349
|
-
|
350
|
-
|
351
|
-
**(response_kwargs or {}),
|
352
|
-
)
|
353
|
-
if isinstance(analysis, dict) and all(
|
354
|
-
i is None for i in analysis.values()
|
355
|
-
):
|
604
|
+
out = await operate(**operate_kwargs)
|
605
|
+
|
606
|
+
if isinstance(out, dict) and all(i is None for i in out.values()):
|
356
607
|
if not continue_after_failed_response:
|
357
608
|
raise ValueError(
|
358
609
|
"All values in the response are None. "
|
@@ -362,20 +613,6 @@ async def ReActStream(
|
|
362
613
|
except Exception:
|
363
614
|
out = branch.msgs.last_response.response
|
364
615
|
|
365
|
-
|
366
|
-
|
367
|
-
|
368
|
-
if verbose_analysis:
|
369
|
-
str_ = "\n### ReAct Final Answer:\n"
|
370
|
-
str_ += as_readable(
|
371
|
-
out,
|
372
|
-
md=True,
|
373
|
-
format_curly=True if display_as == "yaml" else False,
|
374
|
-
max_chars=verbose_length,
|
375
|
-
)
|
376
|
-
yield out, str_
|
377
|
-
else:
|
378
|
-
yield out
|
379
|
-
|
380
|
-
|
381
|
-
# TODO: Do partial intermeditate output for longer analysis with form and report
|
616
|
+
# Don't extract .answer - return the full Analysis object
|
617
|
+
_o = verbose_yield("\n### ReAct Final Answer:\n", out)
|
618
|
+
yield _o
|