lionagi 0.8.8__py3-none-any.whl → 0.9.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lionagi/__init__.py +1 -1
- lionagi/_class_registry.py +1 -1
- lionagi/_errors.py +1 -1
- lionagi/libs/__init__.py +1 -1
- lionagi/libs/file/__init__.py +1 -1
- lionagi/libs/file/chunk.py +1 -1
- lionagi/libs/file/file_ops.py +1 -1
- lionagi/libs/file/params.py +1 -1
- lionagi/libs/file/process.py +1 -1
- lionagi/libs/file/save.py +1 -1
- lionagi/libs/nested/__init__.py +1 -1
- lionagi/libs/nested/flatten.py +1 -1
- lionagi/libs/nested/nfilter.py +1 -1
- lionagi/libs/nested/nget.py +1 -1
- lionagi/libs/nested/ninsert.py +1 -1
- lionagi/libs/nested/nmerge.py +1 -1
- lionagi/libs/nested/npop.py +1 -1
- lionagi/libs/nested/nset.py +1 -1
- lionagi/libs/nested/unflatten.py +1 -1
- lionagi/libs/nested/utils.py +1 -1
- lionagi/libs/package/__init__.py +1 -1
- lionagi/libs/package/imports.py +1 -1
- lionagi/libs/package/management.py +1 -1
- lionagi/libs/package/params.py +1 -1
- lionagi/libs/package/system.py +1 -1
- lionagi/libs/parse.py +1 -1
- lionagi/libs/schema/__init__.py +1 -1
- lionagi/libs/schema/as_readable.py +151 -87
- lionagi/libs/schema/extract_code_block.py +1 -1
- lionagi/libs/schema/extract_docstring.py +1 -1
- lionagi/libs/schema/function_to_schema.py +1 -1
- lionagi/libs/schema/json_schema.py +1 -1
- lionagi/libs/validate/__init__.py +1 -1
- lionagi/libs/validate/common_field_validators.py +1 -1
- lionagi/libs/validate/fuzzy_match_keys.py +1 -1
- lionagi/libs/validate/fuzzy_validate_mapping.py +1 -1
- lionagi/libs/validate/string_similarity.py +1 -1
- lionagi/libs/validate/validate_boolean.py +1 -1
- lionagi/operations/ReAct/ReAct.py +214 -21
- lionagi/operations/ReAct/__init__.py +1 -1
- lionagi/operations/ReAct/utils.py +14 -3
- lionagi/operations/__init__.py +1 -1
- lionagi/operations/_act/__init__.py +1 -1
- lionagi/operations/_act/act.py +6 -1
- lionagi/operations/brainstorm/__init__.py +1 -1
- lionagi/operations/brainstorm/brainstorm.py +1 -1
- lionagi/operations/brainstorm/prompt.py +1 -1
- lionagi/operations/chat/__init__.py +1 -1
- lionagi/operations/chat/chat.py +1 -1
- lionagi/operations/communicate/communicate.py +1 -1
- lionagi/operations/instruct/__init__.py +1 -1
- lionagi/operations/instruct/instruct.py +1 -1
- lionagi/operations/interpret/__init__.py +1 -1
- lionagi/operations/interpret/interpret.py +9 -38
- lionagi/operations/operate/__init__.py +1 -1
- lionagi/operations/operate/operate.py +1 -1
- lionagi/operations/parse/__init__.py +1 -1
- lionagi/operations/parse/parse.py +12 -2
- lionagi/operations/plan/__init__.py +1 -1
- lionagi/operations/plan/plan.py +1 -1
- lionagi/operations/plan/prompt.py +1 -1
- lionagi/operations/select/__init__.py +1 -1
- lionagi/operations/select/select.py +1 -1
- lionagi/operations/select/utils.py +1 -1
- lionagi/operations/types.py +1 -1
- lionagi/operations/utils.py +1 -1
- lionagi/operatives/__init__.py +1 -1
- lionagi/operatives/action/__init__.py +1 -1
- lionagi/operatives/action/function_calling.py +1 -1
- lionagi/operatives/action/manager.py +1 -1
- lionagi/operatives/action/request_response_model.py +1 -1
- lionagi/operatives/action/tool.py +1 -1
- lionagi/operatives/action/utils.py +1 -1
- lionagi/operatives/forms/__init__.py +1 -1
- lionagi/operatives/instruct/__init__.py +1 -1
- lionagi/operatives/instruct/base.py +1 -1
- lionagi/operatives/instruct/instruct.py +1 -1
- lionagi/operatives/instruct/instruct_collection.py +1 -1
- lionagi/operatives/instruct/node.py +1 -1
- lionagi/operatives/instruct/prompts.py +1 -1
- lionagi/operatives/instruct/reason.py +1 -1
- lionagi/operatives/manager.py +1 -1
- lionagi/operatives/models/__init__.py +1 -1
- lionagi/operatives/models/field_model.py +1 -1
- lionagi/operatives/models/model_params.py +1 -1
- lionagi/operatives/models/note.py +1 -1
- lionagi/operatives/models/operable_model.py +1 -1
- lionagi/operatives/models/schema_model.py +1 -1
- lionagi/operatives/operative.py +1 -1
- lionagi/operatives/step.py +1 -1
- lionagi/operatives/strategies/__init__.py +1 -1
- lionagi/operatives/strategies/base.py +1 -1
- lionagi/operatives/strategies/concurrent.py +1 -1
- lionagi/operatives/strategies/concurrent_chunk.py +1 -1
- lionagi/operatives/strategies/concurrent_sequential_chunk.py +1 -1
- lionagi/operatives/strategies/params.py +1 -1
- lionagi/operatives/strategies/sequential.py +1 -1
- lionagi/operatives/strategies/sequential_chunk.py +1 -1
- lionagi/operatives/strategies/sequential_concurrent_chunk.py +1 -1
- lionagi/operatives/strategies/utils.py +1 -1
- lionagi/operatives/types.py +1 -1
- lionagi/protocols/__init__.py +1 -1
- lionagi/protocols/_concepts.py +1 -1
- lionagi/protocols/adapters/adapter.py +1 -1
- lionagi/protocols/generic/__init__.py +1 -1
- lionagi/protocols/generic/element.py +1 -1
- lionagi/protocols/generic/event.py +1 -1
- lionagi/protocols/generic/log.py +1 -1
- lionagi/protocols/generic/pile.py +1 -1
- lionagi/protocols/generic/processor.py +1 -1
- lionagi/protocols/generic/progression.py +1 -1
- lionagi/protocols/graph/__init__.py +1 -1
- lionagi/protocols/graph/edge.py +1 -1
- lionagi/protocols/graph/graph.py +1 -1
- lionagi/protocols/graph/node.py +1 -1
- lionagi/protocols/mail/__init__.py +1 -1
- lionagi/protocols/mail/exchange.py +1 -1
- lionagi/protocols/mail/mail.py +1 -1
- lionagi/protocols/mail/mailbox.py +1 -1
- lionagi/protocols/mail/manager.py +1 -1
- lionagi/protocols/mail/package.py +1 -1
- lionagi/protocols/messages/__init__.py +1 -1
- lionagi/protocols/messages/action_request.py +1 -1
- lionagi/protocols/messages/action_response.py +1 -1
- lionagi/protocols/messages/assistant_response.py +1 -1
- lionagi/protocols/messages/base.py +1 -1
- lionagi/protocols/messages/instruction.py +2 -1
- lionagi/protocols/messages/manager.py +1 -1
- lionagi/protocols/messages/message.py +1 -1
- lionagi/protocols/messages/system.py +1 -1
- lionagi/protocols/types.py +1 -1
- lionagi/service/endpoints/__init__.py +1 -1
- lionagi/service/endpoints/base.py +54 -49
- lionagi/service/endpoints/chat_completion.py +1 -1
- lionagi/service/endpoints/match_endpoint.py +1 -1
- lionagi/service/endpoints/rate_limited_processor.py +1 -1
- lionagi/service/endpoints/token_calculator.py +1 -1
- lionagi/service/imodel.py +2 -3
- lionagi/service/manager.py +1 -1
- lionagi/service/providers/__init__.py +1 -1
- lionagi/service/providers/anthropic_/__init__.py +1 -1
- lionagi/service/providers/anthropic_/messages.py +1 -1
- lionagi/service/providers/groq_/__init__.py +1 -1
- lionagi/service/providers/groq_/chat_completions.py +1 -1
- lionagi/service/providers/openai_/__init__.py +1 -1
- lionagi/service/providers/openai_/chat_completions.py +37 -2
- lionagi/service/providers/openrouter_/__init__.py +1 -1
- lionagi/service/providers/openrouter_/chat_completions.py +1 -1
- lionagi/service/providers/perplexity_/__init__.py +1 -1
- lionagi/service/providers/perplexity_/chat_completions.py +1 -1
- lionagi/service/types.py +1 -1
- lionagi/session/__init__.py +1 -1
- lionagi/session/branch.py +104 -11
- lionagi/session/prompts.py +61 -0
- lionagi/session/session.py +1 -1
- lionagi/settings.py +1 -1
- lionagi/tools/file/reader.py +12 -7
- lionagi/utils.py +1 -1
- lionagi/version.py +1 -1
- {lionagi-0.8.8.dist-info → lionagi-0.9.1.dist-info}/METADATA +1 -1
- lionagi-0.9.1.dist-info/RECORD +202 -0
- lionagi-0.8.8.dist-info/RECORD +0 -201
- {lionagi-0.8.8.dist-info → lionagi-0.9.1.dist-info}/WHEEL +0 -0
- {lionagi-0.8.8.dist-info → lionagi-0.9.1.dist-info}/licenses/LICENSE +0 -0
@@ -1,17 +1,24 @@
|
|
1
|
-
# Copyright (c) 2023 -
|
1
|
+
# Copyright (c) 2023 - 2025, HaiyangLi <quantocean.li at gmail dot com>
|
2
2
|
#
|
3
3
|
# SPDX-License-Identifier: Apache-2.0
|
4
4
|
|
5
5
|
import logging
|
6
|
-
from
|
6
|
+
from collections.abc import AsyncGenerator
|
7
|
+
from typing import TYPE_CHECKING, Any, Literal
|
7
8
|
|
8
9
|
from pydantic import BaseModel
|
9
10
|
|
11
|
+
from lionagi.libs.schema.as_readable import as_readable
|
12
|
+
from lionagi.libs.validate.common_field_validators import (
|
13
|
+
validate_model_to_type,
|
14
|
+
)
|
15
|
+
from lionagi.operatives.models.field_model import FieldModel
|
16
|
+
from lionagi.operatives.models.model_params import ModelParams
|
10
17
|
from lionagi.operatives.types import Instruct
|
11
18
|
from lionagi.service.imodel import iModel
|
12
19
|
from lionagi.utils import copy
|
13
20
|
|
14
|
-
from .utils import ReActAnalysis
|
21
|
+
from .utils import Analysis, ReActAnalysis
|
15
22
|
|
16
23
|
if TYPE_CHECKING:
|
17
24
|
from lionagi.session.branch import Branch
|
@@ -24,18 +31,139 @@ async def ReAct(
|
|
24
31
|
interpret_domain: str | None = None,
|
25
32
|
interpret_style: str | None = None,
|
26
33
|
interpret_sample: str | None = None,
|
34
|
+
interpret_model: str | None = None,
|
27
35
|
interpret_kwargs: dict | None = None,
|
28
36
|
tools: Any = None,
|
29
37
|
tool_schemas: Any = None,
|
30
38
|
response_format: type[BaseModel] | BaseModel = None,
|
39
|
+
intermediate_response_options: list[BaseModel] | BaseModel = None,
|
40
|
+
intermediate_listable: bool = False,
|
41
|
+
reasoning_effort: Literal["low", "medium", "high"] = None,
|
31
42
|
extension_allowed: bool = True,
|
32
43
|
max_extensions: int | None = 3,
|
33
44
|
response_kwargs: dict | None = None,
|
45
|
+
display_as: Literal["json", "yaml"] = "yaml",
|
34
46
|
return_analysis: bool = False,
|
35
47
|
analysis_model: iModel | None = None,
|
36
48
|
verbose_analysis: bool = False,
|
49
|
+
verbose_length: int = None,
|
37
50
|
**kwargs,
|
38
51
|
):
|
52
|
+
outs = []
|
53
|
+
if verbose_analysis:
|
54
|
+
async for i in ReActStream(
|
55
|
+
branch=branch,
|
56
|
+
instruct=instruct,
|
57
|
+
interpret=interpret,
|
58
|
+
interpret_domain=interpret_domain,
|
59
|
+
interpret_style=interpret_style,
|
60
|
+
interpret_sample=interpret_sample,
|
61
|
+
interpret_model=interpret_model,
|
62
|
+
interpret_kwargs=interpret_kwargs,
|
63
|
+
tools=tools,
|
64
|
+
tool_schemas=tool_schemas,
|
65
|
+
response_format=response_format,
|
66
|
+
intermediate_response_options=intermediate_response_options,
|
67
|
+
intermediate_listable=intermediate_listable,
|
68
|
+
reasoning_effort=reasoning_effort,
|
69
|
+
extension_allowed=extension_allowed,
|
70
|
+
max_extensions=max_extensions,
|
71
|
+
response_kwargs=response_kwargs,
|
72
|
+
analysis_model=analysis_model,
|
73
|
+
verbose_analysis=verbose_analysis,
|
74
|
+
display_as=display_as,
|
75
|
+
verbose_length=verbose_length,
|
76
|
+
**kwargs,
|
77
|
+
):
|
78
|
+
analysis, str_ = i
|
79
|
+
str_ += "\n---------\n"
|
80
|
+
as_readable(str_, md=True, display_str=True)
|
81
|
+
outs.append(analysis)
|
82
|
+
else:
|
83
|
+
async for i in ReActStream(
|
84
|
+
branch=branch,
|
85
|
+
instruct=instruct,
|
86
|
+
interpret=interpret,
|
87
|
+
interpret_domain=interpret_domain,
|
88
|
+
interpret_style=interpret_style,
|
89
|
+
interpret_sample=interpret_sample,
|
90
|
+
interpret_model=interpret_model,
|
91
|
+
interpret_kwargs=interpret_kwargs,
|
92
|
+
tools=tools,
|
93
|
+
tool_schemas=tool_schemas,
|
94
|
+
response_format=response_format,
|
95
|
+
intermediate_response_options=intermediate_response_options,
|
96
|
+
intermediate_listable=intermediate_listable,
|
97
|
+
reasoning_effort=reasoning_effort,
|
98
|
+
extension_allowed=extension_allowed,
|
99
|
+
max_extensions=max_extensions,
|
100
|
+
response_kwargs=response_kwargs,
|
101
|
+
analysis_model=analysis_model,
|
102
|
+
display_as=display_as,
|
103
|
+
verbose_length=verbose_length,
|
104
|
+
**kwargs,
|
105
|
+
):
|
106
|
+
outs.append(i)
|
107
|
+
if return_analysis:
|
108
|
+
return outs
|
109
|
+
return outs[-1]
|
110
|
+
|
111
|
+
|
112
|
+
async def ReActStream(
|
113
|
+
branch: "Branch",
|
114
|
+
instruct: Instruct | dict[str, Any],
|
115
|
+
interpret: bool = False,
|
116
|
+
interpret_domain: str | None = None,
|
117
|
+
interpret_style: str | None = None,
|
118
|
+
interpret_sample: str | None = None,
|
119
|
+
interpret_model: str | None = None,
|
120
|
+
interpret_kwargs: dict | None = None,
|
121
|
+
tools: Any = None,
|
122
|
+
tool_schemas: Any = None,
|
123
|
+
response_format: type[BaseModel] | BaseModel = None,
|
124
|
+
intermediate_response_options: list[BaseModel] | BaseModel = None,
|
125
|
+
intermediate_listable: bool = False,
|
126
|
+
reasoning_effort: Literal["low", "medium", "high"] = None,
|
127
|
+
extension_allowed: bool = True,
|
128
|
+
max_extensions: int | None = 3,
|
129
|
+
response_kwargs: dict | None = None,
|
130
|
+
analysis_model: iModel | None = None,
|
131
|
+
verbose_analysis: bool = False,
|
132
|
+
display_as: Literal["json", "yaml"] = "yaml",
|
133
|
+
verbose_length: int = None,
|
134
|
+
**kwargs,
|
135
|
+
) -> AsyncGenerator:
|
136
|
+
irfm: FieldModel | None = None
|
137
|
+
|
138
|
+
if intermediate_response_options is not None:
|
139
|
+
iro = (
|
140
|
+
[intermediate_response_options]
|
141
|
+
if not isinstance(intermediate_response_options, list)
|
142
|
+
else intermediate_response_options
|
143
|
+
)
|
144
|
+
field_models = []
|
145
|
+
for i in iro:
|
146
|
+
type_ = validate_model_to_type(None, i)
|
147
|
+
fm = FieldModel(
|
148
|
+
name=str(type_.__name__).lower(),
|
149
|
+
annotation=type_ | None,
|
150
|
+
validator=lambda cls, x: None if x == {} else x,
|
151
|
+
)
|
152
|
+
field_models.append(fm)
|
153
|
+
|
154
|
+
m_ = ModelParams(
|
155
|
+
name="IntermediateResponseOptions", field_models=field_models
|
156
|
+
).create_new_model()
|
157
|
+
|
158
|
+
irfm = FieldModel(
|
159
|
+
name="intermediate_response_options",
|
160
|
+
annotation=(
|
161
|
+
m_ | None if not intermediate_listable else list[m_] | None
|
162
|
+
),
|
163
|
+
description="Optional intermediate deliverable outputs. fill as needed ",
|
164
|
+
validator=lambda cls, x: None if not x else x,
|
165
|
+
)
|
166
|
+
|
39
167
|
# If no tools or tool schemas are provided, default to "all tools"
|
40
168
|
if not tools and not tool_schemas:
|
41
169
|
tools = True
|
@@ -52,10 +180,20 @@ async def ReAct(
|
|
52
180
|
domain=interpret_domain,
|
53
181
|
style=interpret_style,
|
54
182
|
sample_writing=interpret_sample,
|
183
|
+
interpret_model=interpret_model,
|
55
184
|
**(interpret_kwargs or {}),
|
56
185
|
)
|
57
186
|
if verbose_analysis:
|
58
|
-
|
187
|
+
str_ = "\n### Interpreted instruction:\n"
|
188
|
+
str_ += as_readable(
|
189
|
+
instruction_str,
|
190
|
+
md=True,
|
191
|
+
format_curly=True if display_as == "yaml" else False,
|
192
|
+
max_chars=verbose_length,
|
193
|
+
)
|
194
|
+
yield instruction_str, str_
|
195
|
+
else:
|
196
|
+
yield instruction_str
|
59
197
|
|
60
198
|
# Convert Instruct to dict if necessary
|
61
199
|
instruct_dict = (
|
@@ -85,13 +223,18 @@ async def ReAct(
|
|
85
223
|
chat_model=analysis_model or branch.chat_model,
|
86
224
|
**kwargs_for_operate,
|
87
225
|
)
|
88
|
-
analyses = [analysis]
|
89
|
-
|
90
226
|
# If verbose, show round #1 analysis
|
91
227
|
if verbose_analysis:
|
92
|
-
|
93
|
-
|
228
|
+
str_ = "\n### ReAct Round No.1 Analysis:\n"
|
229
|
+
str_ += as_readable(
|
230
|
+
analysis,
|
231
|
+
md=True,
|
232
|
+
format_curly=True if display_as == "yaml" else False,
|
233
|
+
max_chars=verbose_length,
|
94
234
|
)
|
235
|
+
yield analysis, str_
|
236
|
+
else:
|
237
|
+
yield analysis
|
95
238
|
|
96
239
|
# Validate and clamp max_extensions if needed
|
97
240
|
if max_extensions and max_extensions > 100:
|
@@ -105,8 +248,13 @@ async def ReAct(
|
|
105
248
|
round_count = 1
|
106
249
|
|
107
250
|
while (
|
108
|
-
extension_allowed
|
109
|
-
|
251
|
+
extension_allowed and analysis.extension_needed
|
252
|
+
if hasattr(analysis, "extension_needed")
|
253
|
+
else (
|
254
|
+
analysis.get("extension_needed", None)
|
255
|
+
if isinstance(analysis, dict)
|
256
|
+
else False
|
257
|
+
)
|
110
258
|
and (extensions if max_extensions else 0) > 0
|
111
259
|
):
|
112
260
|
new_instruction = None
|
@@ -126,6 +274,21 @@ async def ReAct(
|
|
126
274
|
operate_kwargs["action_strategy"] = analysis.action_strategy
|
127
275
|
if analysis.action_batch_size:
|
128
276
|
operate_kwargs["action_batch_size"] = analysis.action_batch_size
|
277
|
+
if irfm:
|
278
|
+
operate_kwargs["field_models"] = operate_kwargs.get(
|
279
|
+
"field_models", []
|
280
|
+
) + [irfm]
|
281
|
+
if reasoning_effort:
|
282
|
+
guide = None
|
283
|
+
if reasoning_effort == "low":
|
284
|
+
guide = "Quick concise reasoning.\n"
|
285
|
+
if reasoning_effort == "medium":
|
286
|
+
guide = "Reasonably balanced reasoning.\n"
|
287
|
+
if reasoning_effort == "high":
|
288
|
+
guide = "Thorough, try as hard as you can in reasoning.\n"
|
289
|
+
operate_kwargs["guidance"] = guide + operate_kwargs.get(
|
290
|
+
"guidance", ""
|
291
|
+
)
|
129
292
|
|
130
293
|
analysis = await branch.operate(
|
131
294
|
instruction=new_instruction,
|
@@ -133,14 +296,23 @@ async def ReAct(
|
|
133
296
|
tool_schemas=tool_schemas,
|
134
297
|
**operate_kwargs,
|
135
298
|
)
|
136
|
-
analyses.append(analysis)
|
137
299
|
round_count += 1
|
138
300
|
|
139
301
|
# If verbose, show round analysis
|
140
302
|
if verbose_analysis:
|
141
|
-
|
142
|
-
|
303
|
+
str_ = f"\n### ReAct Round No.{round_count} Analysis:\n"
|
304
|
+
|
305
|
+
str_ += as_readable(
|
306
|
+
analysis,
|
307
|
+
md=True,
|
308
|
+
format_curly=True if display_as == "yaml" else False,
|
309
|
+
max_chars=verbose_length,
|
143
310
|
)
|
311
|
+
|
312
|
+
yield analysis, str_
|
313
|
+
else:
|
314
|
+
yield analysis
|
315
|
+
|
144
316
|
if extensions:
|
145
317
|
extensions -= 1
|
146
318
|
|
@@ -148,11 +320,32 @@ async def ReAct(
|
|
148
320
|
answer_prompt = ReActAnalysis.ANSWER_PROMPT.format(
|
149
321
|
instruction=instruct_dict["instruction"]
|
150
322
|
)
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
323
|
+
if not response_format:
|
324
|
+
response_format = Analysis
|
325
|
+
|
326
|
+
try:
|
327
|
+
out = await branch.operate(
|
328
|
+
instruction=answer_prompt,
|
329
|
+
response_format=response_format,
|
330
|
+
**(response_kwargs or {}),
|
331
|
+
)
|
332
|
+
except Exception:
|
333
|
+
out = branch.msgs.last_response.response
|
334
|
+
|
335
|
+
if isinstance(out, Analysis):
|
336
|
+
out = out.answer
|
337
|
+
|
338
|
+
if verbose_analysis:
|
339
|
+
str_ = "\n### ReAct Final Answer:\n"
|
340
|
+
str_ += as_readable(
|
341
|
+
out,
|
342
|
+
md=True,
|
343
|
+
format_curly=True if display_as == "yaml" else False,
|
344
|
+
max_chars=verbose_length,
|
345
|
+
)
|
346
|
+
yield out, str_
|
347
|
+
else:
|
348
|
+
yield out
|
349
|
+
|
350
|
+
|
351
|
+
# TODO: Do partial intermeditate output for longer analysis with form and report
|
@@ -1,4 +1,4 @@
|
|
1
|
-
# Copyright (c) 2023 -
|
1
|
+
# Copyright (c) 2023 - 2025, HaiyangLi <quantocean.li at gmail dot com>
|
2
2
|
#
|
3
3
|
# SPDX-License-Identifier: Apache-2.0
|
4
4
|
|
@@ -36,7 +36,9 @@ class ReActAnalysis(BaseModel):
|
|
36
36
|
FIRST_EXT_PROMPT: ClassVar[str] = (
|
37
37
|
"You can perform multiple reason-action steps for accuracy. "
|
38
38
|
"If you are not ready to finalize, set extension_needed to True. "
|
39
|
-
"
|
39
|
+
"hint: you should set extension_needed to True if the overall goal"
|
40
|
+
"is not yet achieved. Do not set it to False, if you are just providing"
|
41
|
+
"an interim answer. You have up to {extensions} expansions. Please continue."
|
40
42
|
)
|
41
43
|
CONTINUE_EXT_PROMPT: ClassVar[str] = (
|
42
44
|
"Another round is available. You may do multiple actions if needed. "
|
@@ -49,7 +51,11 @@ class ReActAnalysis(BaseModel):
|
|
49
51
|
|
50
52
|
analysis: str = Field(
|
51
53
|
...,
|
52
|
-
description=
|
54
|
+
description=(
|
55
|
+
"Free-form reasoning or chain-of-thought summary. Must be consistent with"
|
56
|
+
" the plan. Commonly used for divide_and_conquer, brainstorming, reflections, "
|
57
|
+
"regurgitation, review_checkpoints ...etc."
|
58
|
+
),
|
53
59
|
)
|
54
60
|
|
55
61
|
planned_actions: list[PlannedAction] = Field(
|
@@ -90,3 +96,8 @@ class ReActAnalysis(BaseModel):
|
|
90
96
|
"provide if and only if action_strategy is 'batch', this specifies the number of actions to run in parallel per batch."
|
91
97
|
),
|
92
98
|
)
|
99
|
+
|
100
|
+
|
101
|
+
class Analysis(BaseModel):
|
102
|
+
|
103
|
+
answer: str
|
lionagi/operations/__init__.py
CHANGED
lionagi/operations/_act/act.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
# Copyright (c) 2023 -
|
1
|
+
# Copyright (c) 2023 - 2025, HaiyangLi <quantocean.li at gmail dot com>
|
2
2
|
#
|
3
3
|
# SPDX-License-Identifier: Apache-2.0
|
4
4
|
|
@@ -35,6 +35,11 @@ async def _act(
|
|
35
35
|
_request["arguments"] = action_request["arguments"]
|
36
36
|
|
37
37
|
try:
|
38
|
+
if verbose_action:
|
39
|
+
args_ = str(_request["arguments"])
|
40
|
+
args_ = args_[:50] + "..." if len(args_) > 50 else args_
|
41
|
+
print(f"Invoking action {_request['function']} with {args_}.")
|
42
|
+
|
38
43
|
func_call = await branch._action_manager.invoke(_request)
|
39
44
|
if verbose_action:
|
40
45
|
print(
|
lionagi/operations/chat/chat.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
# Copyright (c) 2023 -
|
1
|
+
# Copyright (c) 2023 - 2025, HaiyangLi <quantocean.li at gmail dot com>
|
2
2
|
#
|
3
3
|
# SPDX-License-Identifier: Apache-2.0
|
4
4
|
|
@@ -14,45 +14,14 @@ async def interpret(
|
|
14
14
|
domain: str | None = None,
|
15
15
|
style: str | None = None,
|
16
16
|
sample_writing: str | None = None,
|
17
|
+
interpret_model: str | None = None,
|
17
18
|
**kwargs,
|
18
19
|
) -> str:
|
19
|
-
instruction =
|
20
|
-
You are given a user's raw instruction or question. Your task is to rewrite it into a clearer,
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
1. **Dissect the user's request**:
|
25
|
-
- If the user references a local file, note it clearly (e.g., "paper_file_path": "…").
|
26
|
-
- If the user might need external references or up-to-date data, mention that possibility.
|
27
|
-
- If the user's question is ambiguous, propose clarifications.
|
28
|
-
|
29
|
-
2. **Be explicit about the user's final objective**:
|
30
|
-
- For example, if the user wants a comparison with other works, add that as a bullet point or sub-question.
|
31
|
-
- If the user wants a summary plus code snippet, highlight that in your structured prompt.
|
32
|
-
|
33
|
-
3. **Do NOT produce final system actions**:
|
34
|
-
- You're not calling any tools directly here; only rewriting the user query to reflect potential next steps.
|
35
|
-
- If the user's request might require searching or doc reading, note it as an *option*, e.g. "Potential tool usage: {search, partial doc read}."
|
36
|
-
|
37
|
-
4. **Return only the improved user prompt**:
|
38
|
-
- The final output should be a single text block or short JSON specifying the clarified user request.
|
39
|
-
- Keep it concise yet thorough.
|
40
|
-
|
41
|
-
For instance, if the user's original text is:
|
42
|
-
"Please read my local PDF on RL and compare it to the newest research methods from exa or perplexity."
|
43
|
-
|
44
|
-
A re-written version might be:
|
45
|
-
"**Task**:
|
46
|
-
- Summarize the local PDF (paper_file_path: 'myRLpaper.pdf').
|
47
|
-
- Compare its approach with recent reinforcement learning research found via exa/perplexity searches.
|
48
|
-
**Potential Tool Usage**:
|
49
|
-
- Doc reading (reader_tool)
|
50
|
-
- External search (search_exa, search_perplexity)
|
51
|
-
**Output**:
|
52
|
-
- A structured summary + comparative analysis."
|
53
|
-
|
54
|
-
Now, apply this rewriting to the input below. Return only the re-written prompt.
|
55
|
-
"""
|
20
|
+
instruction = (
|
21
|
+
"You are given a user's raw instruction or question. Your task is to rewrite it into a clearer,"
|
22
|
+
"more structured prompt for an LLM or system, making any implicit or missing details explicit. "
|
23
|
+
"Return only the re-written prompt."
|
24
|
+
)
|
56
25
|
guidance = (
|
57
26
|
f"Domain hint: {domain or 'general'}. "
|
58
27
|
f"Desired style: {style or 'concise'}. "
|
@@ -66,6 +35,8 @@ Now, apply this rewriting to the input below. Return only the re-written prompt.
|
|
66
35
|
kwargs["guidance"] = guidance + "\n" + kwargs.get("guidance", "")
|
67
36
|
kwargs["instruction"] = instruction + "\n" + kwargs.get("instruction", "")
|
68
37
|
kwargs["temperature"] = kwargs.get("temperature", 0.1)
|
38
|
+
if interpret_model:
|
39
|
+
kwargs["chat_model"] = interpret_model
|
69
40
|
|
70
41
|
refined_prompt = await branch.chat(
|
71
42
|
context=context,
|
@@ -1,4 +1,4 @@
|
|
1
|
-
# Copyright (c) 2023 -
|
1
|
+
# Copyright (c) 2023 - 2025, HaiyangLi <quantocean.li at gmail dot com>
|
2
2
|
#
|
3
3
|
# SPDX-License-Identifier: Apache-2.0
|
4
4
|
|
@@ -48,6 +48,8 @@ async def parse(
|
|
48
48
|
and not isinstance(response_model, BaseModel)
|
49
49
|
):
|
50
50
|
num_try += 1
|
51
|
+
if num_try == max_retries:
|
52
|
+
_should_try = False
|
51
53
|
_, res = await branch.chat(
|
52
54
|
instruction="reformat text into specified model",
|
53
55
|
guidane="follow the required response format, using the model schema as a guide",
|
@@ -73,7 +75,15 @@ async def parse(
|
|
73
75
|
strict=strict,
|
74
76
|
suppress_conversion_errors=suppress_conversion_errors,
|
75
77
|
)
|
76
|
-
|
78
|
+
try:
|
79
|
+
response_model = request_type.model_validate(response_model)
|
80
|
+
except InterruptedError as e:
|
81
|
+
raise e
|
82
|
+
except Exception:
|
83
|
+
if _should_try:
|
84
|
+
continue
|
85
|
+
else:
|
86
|
+
break
|
77
87
|
|
78
88
|
if not isinstance(response_model, BaseModel):
|
79
89
|
match handle_validation:
|
lionagi/operations/plan/plan.py
CHANGED
lionagi/operations/types.py
CHANGED
lionagi/operations/utils.py
CHANGED
lionagi/operatives/__init__.py
CHANGED