lionagi 0.17.11__py3-none-any.whl → 0.18.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lionagi/libs/schema/minimal_yaml.py +98 -0
- lionagi/ln/types.py +32 -5
- lionagi/models/field_model.py +9 -0
- lionagi/operations/ReAct/ReAct.py +474 -237
- lionagi/operations/ReAct/utils.py +3 -0
- lionagi/operations/act/act.py +206 -0
- lionagi/operations/chat/chat.py +130 -114
- lionagi/operations/communicate/communicate.py +101 -42
- lionagi/operations/flow.py +4 -4
- lionagi/operations/interpret/interpret.py +65 -20
- lionagi/operations/operate/operate.py +212 -106
- lionagi/operations/parse/parse.py +170 -142
- lionagi/operations/select/select.py +78 -17
- lionagi/operations/select/utils.py +1 -1
- lionagi/operations/types.py +119 -23
- lionagi/protocols/generic/log.py +3 -2
- lionagi/protocols/messages/__init__.py +27 -0
- lionagi/protocols/messages/action_request.py +86 -184
- lionagi/protocols/messages/action_response.py +73 -131
- lionagi/protocols/messages/assistant_response.py +130 -159
- lionagi/protocols/messages/base.py +26 -18
- lionagi/protocols/messages/instruction.py +281 -625
- lionagi/protocols/messages/manager.py +112 -62
- lionagi/protocols/messages/message.py +87 -197
- lionagi/protocols/messages/system.py +52 -123
- lionagi/protocols/types.py +0 -2
- lionagi/service/connections/endpoint.py +0 -8
- lionagi/service/connections/providers/oai_.py +29 -94
- lionagi/service/connections/providers/ollama_.py +3 -2
- lionagi/service/hooks/hooked_event.py +2 -2
- lionagi/service/third_party/claude_code.py +3 -2
- lionagi/service/third_party/openai_models.py +433 -0
- lionagi/session/branch.py +170 -178
- lionagi/session/session.py +3 -9
- lionagi/tools/file/reader.py +2 -2
- lionagi/version.py +1 -1
- {lionagi-0.17.11.dist-info → lionagi-0.18.0.dist-info}/METADATA +1 -2
- {lionagi-0.17.11.dist-info → lionagi-0.18.0.dist-info}/RECORD +41 -49
- lionagi/operations/_act/act.py +0 -86
- lionagi/protocols/messages/templates/README.md +0 -28
- lionagi/protocols/messages/templates/action_request.jinja2 +0 -5
- lionagi/protocols/messages/templates/action_response.jinja2 +0 -9
- lionagi/protocols/messages/templates/assistant_response.jinja2 +0 -6
- lionagi/protocols/messages/templates/instruction_message.jinja2 +0 -61
- lionagi/protocols/messages/templates/system_message.jinja2 +0 -11
- lionagi/protocols/messages/templates/tool_schemas.jinja2 +0 -7
- lionagi/service/connections/providers/types.py +0 -28
- lionagi/service/third_party/openai_model_names.py +0 -198
- lionagi/service/types.py +0 -58
- /lionagi/operations/{_act → act}/__init__.py +0 -0
- {lionagi-0.17.11.dist-info → lionagi-0.18.0.dist-info}/WHEEL +0 -0
- {lionagi-0.17.11.dist-info → lionagi-0.18.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,20 +1,35 @@
|
|
1
1
|
# Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
|
2
2
|
# SPDX-License-Identifier: Apache-2.0
|
3
3
|
|
4
|
+
import contextlib
|
5
|
+
import warnings
|
4
6
|
from typing import TYPE_CHECKING, Any, Literal
|
5
7
|
|
6
8
|
from pydantic import BaseModel
|
7
9
|
|
10
|
+
from lionagi.ln import (
|
11
|
+
extract_json,
|
12
|
+
fuzzy_validate_mapping,
|
13
|
+
get_cancelled_exc_class,
|
14
|
+
to_list,
|
15
|
+
)
|
16
|
+
from lionagi.ln.fuzzy import FuzzyMatchKeysParams
|
17
|
+
from lionagi.protocols.types import AssistantResponse
|
18
|
+
from lionagi.session.branch import AlcallParams
|
19
|
+
|
20
|
+
from ..types import HandleValidation, ParseParam
|
21
|
+
|
8
22
|
if TYPE_CHECKING:
|
9
23
|
from lionagi.session.branch import Branch
|
10
24
|
|
11
25
|
|
12
|
-
|
26
|
+
_CALL = None # type: ignore
|
27
|
+
|
28
|
+
|
29
|
+
def prepare_parse_kws(
|
13
30
|
branch: "Branch",
|
14
31
|
text: str,
|
15
|
-
handle_validation:
|
16
|
-
"raise", "return_value", "return_none"
|
17
|
-
] = "return_value",
|
32
|
+
handle_validation: HandleValidation = "return_value",
|
18
33
|
max_retries: int = 3,
|
19
34
|
request_type: type[BaseModel] = None,
|
20
35
|
operative=None,
|
@@ -29,159 +44,172 @@ async def parse(
|
|
29
44
|
strict: bool = False,
|
30
45
|
suppress_conversion_errors: bool = False,
|
31
46
|
response_format=None,
|
47
|
+
request_fields=None,
|
48
|
+
return_res_message: bool = False,
|
49
|
+
**kw,
|
32
50
|
):
|
33
|
-
from lionagi.libs.schema.breakdown_pydantic_annotation import (
|
34
|
-
breakdown_pydantic_annotation,
|
35
|
-
)
|
36
|
-
from lionagi.ln.fuzzy._fuzzy_validate import fuzzy_validate_mapping
|
37
|
-
|
38
|
-
if operative is not None:
|
39
|
-
max_retries = operative.max_retries
|
40
|
-
response_format = operative.request_type or response_format
|
41
|
-
request_type = request_type or operative.request_type
|
42
51
|
|
43
|
-
if
|
44
|
-
|
45
|
-
"
|
52
|
+
if suppress_conversion_errors:
|
53
|
+
warnings.warn(
|
54
|
+
"Parameter 'suppress_conversion_errors' is deprecated and no longer used. "
|
55
|
+
"It will be removed in a future version.",
|
56
|
+
DeprecationWarning,
|
57
|
+
stacklevel=2,
|
46
58
|
)
|
47
59
|
|
48
|
-
|
60
|
+
response_format = (
|
61
|
+
operative.request_type
|
62
|
+
if operative
|
63
|
+
else response_format or request_type
|
64
|
+
)
|
65
|
+
_alcall_params = get_default_call()
|
66
|
+
max_retries = operative.max_retries if operative else max_retries or 3
|
67
|
+
|
68
|
+
fuzzy_params = FuzzyMatchKeysParams(
|
69
|
+
similarity_algo=similarity_algo,
|
70
|
+
similarity_threshold=similarity_threshold,
|
71
|
+
handle_unmatched=handle_unmatched,
|
72
|
+
fill_value=fill_value,
|
73
|
+
fill_mapping=fill_mapping,
|
74
|
+
strict=strict,
|
75
|
+
fuzzy_match=fuzzy_match,
|
76
|
+
)
|
49
77
|
|
50
|
-
|
51
|
-
|
78
|
+
return {
|
79
|
+
"text": text,
|
80
|
+
"parse_param": ParseParam(
|
81
|
+
response_format=response_format or request_fields,
|
82
|
+
fuzzy_match_params=fuzzy_params,
|
83
|
+
handle_validation=handle_validation,
|
84
|
+
alcall_params=_alcall_params.with_updates(
|
85
|
+
retry_attempts=max_retries
|
86
|
+
),
|
87
|
+
imodel=branch.parse_model,
|
88
|
+
imodel_kw=kw,
|
89
|
+
),
|
90
|
+
"return_res_message": return_res_message,
|
91
|
+
}
|
52
92
|
|
53
|
-
initial_error = None
|
54
|
-
parsed_data = None # Initialize to avoid scoping issues
|
55
93
|
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
94
|
+
async def parse(
|
95
|
+
branch: "Branch",
|
96
|
+
text: str,
|
97
|
+
parse_param: ParseParam,
|
98
|
+
return_res_message: bool = False,
|
99
|
+
) -> Any | tuple[Any, AssistantResponse | None]:
|
100
|
+
|
101
|
+
# Try direct validation first
|
102
|
+
with contextlib.suppress(Exception):
|
103
|
+
result = _validate_dict_or_model(
|
104
|
+
text, parse_param.response_format, parse_param.fuzzy_match_params
|
105
|
+
)
|
106
|
+
return result if not return_res_message else (result, None)
|
107
|
+
|
108
|
+
async def _inner_parse(i):
|
109
|
+
_, res = await branch.chat(
|
110
|
+
instruction="reformat text into specified model or structure",
|
111
|
+
guidance="follow the required response format, using the model schema as a guide",
|
112
|
+
context=[{"text_to_format": text}],
|
113
|
+
request_fields=(
|
114
|
+
parse_param.response_format
|
115
|
+
if isinstance(parse_param.response_format, dict)
|
116
|
+
else None
|
117
|
+
),
|
118
|
+
response_format=(
|
119
|
+
parse_param.response_format
|
120
|
+
if isinstance(parse_param.response_format, BaseModel)
|
121
|
+
else None
|
122
|
+
),
|
123
|
+
imodel=parse_param.imodel or branch.parse_model,
|
124
|
+
sender=branch.user,
|
125
|
+
recipient=branch.id,
|
126
|
+
return_ins_res_message=True,
|
69
127
|
)
|
70
128
|
|
71
|
-
|
129
|
+
res.metadata["is_parsed"] = True
|
130
|
+
res.metadata["original_text"] = text
|
72
131
|
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
132
|
+
return (
|
133
|
+
_validate_dict_or_model(
|
134
|
+
res.response,
|
135
|
+
parse_param.response_format,
|
136
|
+
parse_param.fuzzy_match_params,
|
137
|
+
),
|
138
|
+
res,
|
139
|
+
)
|
78
140
|
|
79
|
-
|
80
|
-
|
81
|
-
|
141
|
+
_call = parse_param.alcall_params or get_default_call()
|
142
|
+
if isinstance(parse_param.alcall_params, dict):
|
143
|
+
_call = AlcallParams(**parse_param.alcall_params)
|
82
144
|
|
145
|
+
try:
|
146
|
+
result = await _call([0], _inner_parse)
|
147
|
+
except get_cancelled_exc_class():
|
148
|
+
raise
|
83
149
|
except Exception as e:
|
84
|
-
|
85
|
-
|
86
|
-
logging.debug(
|
87
|
-
f"Initial parsing failed for text '{text[:100]}...': {e}"
|
88
|
-
)
|
89
|
-
logging.debug(
|
90
|
-
f"Parsed data was: {locals().get('parsed_data', 'not set')}"
|
91
|
-
)
|
92
|
-
|
93
|
-
# Only continue if we have retries left
|
94
|
-
if max_retries <= 0:
|
95
|
-
if handle_validation == "raise":
|
150
|
+
match parse_param.handle_validation:
|
151
|
+
case "raise":
|
96
152
|
raise ValueError(f"Failed to parse response: {e}") from e
|
97
|
-
|
98
|
-
return None
|
99
|
-
|
100
|
-
return text
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
if
|
116
|
-
|
117
|
-
):
|
118
|
-
|
119
|
-
|
120
|
-
|
153
|
+
case "return_none":
|
154
|
+
return (None, None) if return_res_message else None
|
155
|
+
case "return_value":
|
156
|
+
return (text, None) if return_res_message else text
|
157
|
+
return (*result[0],) if return_res_message else result[0][0]
|
158
|
+
|
159
|
+
|
160
|
+
def _validate_dict_or_model(
|
161
|
+
text: str,
|
162
|
+
response_format: type[BaseModel] | dict,
|
163
|
+
fuzzy_match_params: FuzzyMatchKeysParams | dict = None,
|
164
|
+
):
|
165
|
+
try:
|
166
|
+
if isinstance(fuzzy_match_params, dict):
|
167
|
+
fuzzy_match_params = FuzzyMatchKeysParams(**fuzzy_match_params)
|
168
|
+
|
169
|
+
d_ = extract_json(text, fuzzy_parse=True, return_one_if_single=False)
|
170
|
+
dict_, keys_ = None, None
|
171
|
+
if d_:
|
172
|
+
dict_ = to_list(d_, flatten=True)[0]
|
173
|
+
if isinstance(fuzzy_match_params, FuzzyMatchKeysParams):
|
174
|
+
keys_ = (
|
175
|
+
response_format.model_fields
|
176
|
+
if isinstance(response_format, type)
|
177
|
+
else response_format
|
121
178
|
)
|
122
|
-
|
123
|
-
|
124
|
-
f"Failed to parse response: {initial_error}"
|
125
|
-
) from initial_error
|
126
|
-
elif handle_validation == "return_none":
|
127
|
-
return None
|
128
|
-
else:
|
129
|
-
return text
|
130
|
-
|
131
|
-
while num_try < max_retries:
|
132
|
-
num_try += 1
|
133
|
-
|
134
|
-
try:
|
135
|
-
logging.debug(f"Retry {num_try}: Using parse model to reformat")
|
136
|
-
_, res = await branch.chat(
|
137
|
-
instruction="reformat text into specified model",
|
138
|
-
guidance="follow the required response format, using the model schema as a guide",
|
139
|
-
context=[{"text_to_format": text}],
|
140
|
-
response_format=request_type,
|
141
|
-
sender=branch.user,
|
142
|
-
recipient=branch.id,
|
143
|
-
imodel=branch.parse_model,
|
144
|
-
return_ins_res_message=True,
|
179
|
+
dict_ = fuzzy_validate_mapping(
|
180
|
+
dict_, keys_, **fuzzy_match_params.to_dict()
|
145
181
|
)
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
similarity_algo=similarity_algo,
|
152
|
-
similarity_threshold=similarity_threshold,
|
153
|
-
fuzzy_match=fuzzy_match,
|
154
|
-
handle_unmatched=handle_unmatched,
|
155
|
-
fill_value=fill_value,
|
156
|
-
fill_mapping=fill_mapping,
|
157
|
-
strict=strict,
|
158
|
-
suppress_conversion_errors=suppress_conversion_errors,
|
182
|
+
elif fuzzy_match_params:
|
183
|
+
keys_ = (
|
184
|
+
response_format.model_fields
|
185
|
+
if isinstance(response_format, type)
|
186
|
+
else response_format
|
159
187
|
)
|
188
|
+
dict_ = fuzzy_validate_mapping(
|
189
|
+
dict_,
|
190
|
+
keys_,
|
191
|
+
handle_unmatched="force",
|
192
|
+
fill_value=None,
|
193
|
+
strict=False,
|
194
|
+
)
|
195
|
+
if isinstance(response_format, type) and issubclass(
|
196
|
+
response_format, BaseModel
|
197
|
+
):
|
198
|
+
return response_format.model_validate(dict_)
|
199
|
+
return dict_
|
160
200
|
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
# All retries exhausted
|
178
|
-
match handle_validation:
|
179
|
-
case "return_value":
|
180
|
-
return text
|
181
|
-
case "return_none":
|
182
|
-
return None
|
183
|
-
case "raise":
|
184
|
-
error_msg = "Failed to parse response into request format"
|
185
|
-
if last_error:
|
186
|
-
error_msg += f": {last_error}"
|
187
|
-
raise ValueError(error_msg) from last_error
|
201
|
+
except Exception as e:
|
202
|
+
raise ValueError(f"Failed to parse text: {e}") from e
|
203
|
+
|
204
|
+
|
205
|
+
def get_default_call() -> AlcallParams:
|
206
|
+
global _CALL
|
207
|
+
if _CALL is None:
|
208
|
+
_CALL = AlcallParams(
|
209
|
+
retry_initial_delay=1,
|
210
|
+
retry_backoff=1.85,
|
211
|
+
retry_attempts=3,
|
212
|
+
max_concurrent=1,
|
213
|
+
throttle_period=1,
|
214
|
+
)
|
215
|
+
return _CALL
|
@@ -8,7 +8,7 @@ from pydantic import BaseModel
|
|
8
8
|
|
9
9
|
from lionagi.fields.instruct import Instruct
|
10
10
|
|
11
|
-
from .utils import SelectionModel
|
11
|
+
from .utils import SelectionModel, parse_selection, parse_to_representation
|
12
12
|
|
13
13
|
if TYPE_CHECKING:
|
14
14
|
from lionagi.session.branch import Branch
|
@@ -24,42 +24,104 @@ async def select(
|
|
24
24
|
verbose: bool = False,
|
25
25
|
**kwargs: Any,
|
26
26
|
) -> SelectionModel | tuple[SelectionModel, "Branch"]:
|
27
|
+
"""
|
28
|
+
Select from choices using LLM - legacy wrapper with backwards compatibility.
|
29
|
+
|
30
|
+
Args:
|
31
|
+
branch: Branch instance to use
|
32
|
+
instruct: Instruction for selection
|
33
|
+
choices: Available choices (list, dict, or Enum)
|
34
|
+
max_num_selections: Max number of selections
|
35
|
+
branch_kwargs: Kwargs for branch creation (deprecated)
|
36
|
+
return_branch: Return (result, branch) tuple
|
37
|
+
verbose: Print progress
|
38
|
+
**kwargs: Additional operate kwargs
|
39
|
+
|
40
|
+
Returns:
|
41
|
+
SelectionModel or (SelectionModel, Branch) tuple
|
42
|
+
"""
|
27
43
|
if verbose:
|
28
44
|
print(f"Starting selection with up to {max_num_selections} choices.")
|
29
45
|
|
30
|
-
|
46
|
+
# Handle branch creation for backwards compatibility
|
47
|
+
if branch is None and branch_kwargs:
|
48
|
+
from lionagi.session.branch import Branch
|
31
49
|
|
32
|
-
|
50
|
+
branch = Branch(**branch_kwargs)
|
51
|
+
|
52
|
+
result = await select_v1(
|
53
|
+
branch=branch,
|
54
|
+
instruct=instruct,
|
55
|
+
choices=choices,
|
56
|
+
max_num_selections=max_num_selections,
|
57
|
+
verbose=verbose,
|
58
|
+
**kwargs,
|
59
|
+
)
|
60
|
+
|
61
|
+
if return_branch:
|
62
|
+
return result, branch
|
63
|
+
return result
|
64
|
+
|
65
|
+
|
66
|
+
async def select_v1(
|
67
|
+
branch: "Branch",
|
68
|
+
instruct: Instruct | dict[str, Any],
|
69
|
+
choices: list[str] | type[Enum] | dict[str, Any],
|
70
|
+
max_num_selections: int = 1,
|
71
|
+
verbose: bool = False,
|
72
|
+
**operate_kwargs: Any,
|
73
|
+
) -> SelectionModel:
|
74
|
+
"""
|
75
|
+
Context-based selection implementation.
|
76
|
+
|
77
|
+
Args:
|
78
|
+
branch: Branch instance
|
79
|
+
instruct: Selection instruction
|
80
|
+
choices: Available choices
|
81
|
+
max_num_selections: Maximum selections allowed
|
82
|
+
verbose: Print progress
|
83
|
+
**operate_kwargs: Additional operate parameters
|
84
|
+
|
85
|
+
Returns:
|
86
|
+
SelectionModel with corrected selections
|
87
|
+
"""
|
88
|
+
# Parse choices into keys and representations
|
33
89
|
selections, contents = parse_to_representation(choices)
|
34
90
|
prompt = SelectionModel.PROMPT.format(
|
35
91
|
max_num_selections=max_num_selections, choices=selections
|
36
92
|
)
|
37
93
|
|
94
|
+
# Build instruction dictionary
|
38
95
|
if isinstance(instruct, Instruct):
|
39
|
-
|
40
|
-
|
41
|
-
|
96
|
+
instruct_dict = instruct.to_dict()
|
97
|
+
else:
|
98
|
+
instruct_dict = instruct or {}
|
42
99
|
|
43
|
-
|
44
|
-
|
45
|
-
|
100
|
+
# Append selection prompt to instruction
|
101
|
+
if instruct_dict.get("instruction", None) is not None:
|
102
|
+
instruct_dict["instruction"] = (
|
103
|
+
f"{instruct_dict['instruction']}\n\n{prompt} \n\n "
|
46
104
|
)
|
47
105
|
else:
|
48
|
-
|
106
|
+
instruct_dict["instruction"] = prompt
|
49
107
|
|
50
|
-
|
108
|
+
# Add choice representations to context
|
109
|
+
context = instruct_dict.get("context", None) or []
|
51
110
|
context = [context] if not isinstance(context, list) else context
|
52
111
|
context.extend([{k: v} for k, v in zip(selections, contents)])
|
53
|
-
|
112
|
+
instruct_dict["context"] = context
|
54
113
|
|
114
|
+
# Call branch.operate with SelectionModel as response format
|
55
115
|
response_model: SelectionModel = await branch.operate(
|
56
116
|
response_format=SelectionModel,
|
57
|
-
**
|
58
|
-
**
|
117
|
+
**operate_kwargs,
|
118
|
+
**instruct_dict,
|
59
119
|
)
|
120
|
+
|
60
121
|
if verbose:
|
61
122
|
print(f"Received selection: {response_model.selected}")
|
62
123
|
|
124
|
+
# Extract and normalize selected values
|
63
125
|
selected = response_model
|
64
126
|
if isinstance(response_model, BaseModel) and hasattr(
|
65
127
|
response_model, "selected"
|
@@ -67,14 +129,13 @@ async def select(
|
|
67
129
|
selected = response_model.selected
|
68
130
|
selected = [selected] if not isinstance(selected, list) else selected
|
69
131
|
|
132
|
+
# Parse selections back to original choice values
|
70
133
|
corrected_selections = [parse_selection(i, choices) for i in selected]
|
71
134
|
|
135
|
+
# Update response model with corrected selections
|
72
136
|
if isinstance(response_model, BaseModel):
|
73
137
|
response_model.selected = corrected_selections
|
74
|
-
|
75
138
|
elif isinstance(response_model, dict):
|
76
139
|
response_model["selected"] = corrected_selections
|
77
140
|
|
78
|
-
if return_branch:
|
79
|
-
return response_model, branch
|
80
141
|
return response_model
|
lionagi/operations/types.py
CHANGED
@@ -1,26 +1,122 @@
|
|
1
1
|
# Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
|
2
2
|
# SPDX-License-Identifier: Apache-2.0
|
3
3
|
|
4
|
-
from
|
5
|
-
from
|
6
|
-
from
|
7
|
-
|
8
|
-
from
|
9
|
-
|
10
|
-
from .
|
11
|
-
from .
|
12
|
-
from .
|
13
|
-
from .
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
"
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
4
|
+
from dataclasses import dataclass
|
5
|
+
from enum import Enum
|
6
|
+
from typing import ClassVar, Literal
|
7
|
+
|
8
|
+
from pydantic import BaseModel, JsonValue
|
9
|
+
|
10
|
+
from lionagi.ln._async_call import AlcallParams
|
11
|
+
from lionagi.ln.fuzzy import FuzzyMatchKeysParams
|
12
|
+
from lionagi.ln.types import Params
|
13
|
+
from lionagi.protocols.action.tool import ToolRef
|
14
|
+
from lionagi.protocols.types import ID, SenderRecipient
|
15
|
+
from lionagi.service.imodel import iModel
|
16
|
+
|
17
|
+
HandleValidation = Literal["raise", "return_value", "return_none"]
|
18
|
+
|
19
|
+
|
20
|
+
class ContextPolicy(str, Enum):
|
21
|
+
"""Policy for merging prompt context across morphism invocations.
|
22
|
+
|
23
|
+
Attributes:
|
24
|
+
REPLACE: New context completely replaces existing context
|
25
|
+
EXTEND: New context is appended to existing context
|
26
|
+
DEDUP: New context is appended but duplicates are removed
|
27
|
+
"""
|
28
|
+
|
29
|
+
REPLACE = "replace"
|
30
|
+
EXTEND = "extend"
|
31
|
+
DEDUP = "dedup"
|
32
|
+
|
33
|
+
|
34
|
+
@dataclass(slots=True, frozen=True, init=False)
|
35
|
+
class MorphParam(Params):
|
36
|
+
"""Base class for morphism parameters (invariants).
|
37
|
+
|
38
|
+
MorphParams represent the invariant properties that define a morphism
|
39
|
+
in LionAGI's categorical framework. They are frozen (immutable) and
|
40
|
+
hashable, enabling reproducible operations and efficient caching.
|
41
|
+
|
42
|
+
Morphisms are the fundamental abstraction in LionAGI - they represent
|
43
|
+
transformations between message states with well-defined parameters.
|
44
|
+
"""
|
45
|
+
|
46
|
+
_none_as_sentinel: ClassVar[bool] = True
|
47
|
+
|
48
|
+
|
49
|
+
@dataclass(slots=True, frozen=True, init=False)
|
50
|
+
class ChatParam(MorphParam):
|
51
|
+
"""Parameters for chat/communicate morphism.
|
52
|
+
|
53
|
+
Defines the invariant properties of a chat operation, including
|
54
|
+
guidance, context, response format, and LLM-visible content.
|
55
|
+
|
56
|
+
Note: 'context' field contains prompt context (LLM-visible facts).
|
57
|
+
This gets mapped to InstructionContent.prompt_context during message creation.
|
58
|
+
"""
|
59
|
+
|
60
|
+
_none_as_sentinel: ClassVar[bool] = True
|
61
|
+
guidance: JsonValue = None
|
62
|
+
context: JsonValue = None
|
63
|
+
sender: SenderRecipient = None
|
64
|
+
recipient: SenderRecipient = None
|
65
|
+
response_format: type[BaseModel] | dict = None
|
66
|
+
progression: ID.RefSeq = None
|
67
|
+
tool_schemas: list[dict] = None
|
68
|
+
images: list = None
|
69
|
+
image_detail: Literal["low", "high", "auto"] = None
|
70
|
+
plain_content: str = None
|
71
|
+
include_token_usage_to_model: bool = False
|
72
|
+
imodel: iModel = None
|
73
|
+
imodel_kw: dict = None
|
74
|
+
|
75
|
+
|
76
|
+
@dataclass(slots=True, frozen=True, init=False)
|
77
|
+
class InterpretParam(MorphParam):
|
78
|
+
"""Parameters for interpret morphism.
|
79
|
+
|
80
|
+
Defines interpretation style, domain, and sample writing for
|
81
|
+
transforming content according to specified guidelines.
|
82
|
+
"""
|
83
|
+
|
84
|
+
_none_as_sentinel: ClassVar[bool] = True
|
85
|
+
domain: str = None
|
86
|
+
style: str = None
|
87
|
+
sample_writing: str = None
|
88
|
+
imodel: iModel = None
|
89
|
+
imodel_kw: dict = None
|
90
|
+
|
91
|
+
|
92
|
+
@dataclass(slots=True, frozen=True, init=False)
|
93
|
+
class ParseParam(MorphParam):
|
94
|
+
"""Parameters for parse morphism.
|
95
|
+
|
96
|
+
Defines parsing behavior including response format validation,
|
97
|
+
fuzzy matching, and error handling strategies.
|
98
|
+
"""
|
99
|
+
|
100
|
+
_none_as_sentinel: ClassVar[bool] = True
|
101
|
+
response_format: type[BaseModel] | dict = None
|
102
|
+
fuzzy_match_params: FuzzyMatchKeysParams | dict = None
|
103
|
+
handle_validation: HandleValidation = "raise"
|
104
|
+
alcall_params: AlcallParams | dict = None
|
105
|
+
imodel: iModel = None
|
106
|
+
imodel_kw: dict = None
|
107
|
+
|
108
|
+
|
109
|
+
@dataclass(slots=True, frozen=True, init=False)
|
110
|
+
class ActionParam(MorphParam):
|
111
|
+
"""Parameters for action/tool execution morphism.
|
112
|
+
|
113
|
+
Defines tool execution strategy, error handling, and verbosity
|
114
|
+
for action-based operations.
|
115
|
+
"""
|
116
|
+
|
117
|
+
_none_as_sentinel: ClassVar[bool] = True
|
118
|
+
action_call_params: AlcallParams = None
|
119
|
+
tools: ToolRef = None
|
120
|
+
strategy: Literal["concurrent", "sequential"] = "concurrent"
|
121
|
+
suppress_errors: bool = True
|
122
|
+
verbose_action: bool = False
|
lionagi/protocols/generic/log.py
CHANGED
@@ -143,10 +143,11 @@ class DataLogger:
|
|
143
143
|
if self._config.auto_save_on_exit:
|
144
144
|
atexit.register(self.save_at_exit)
|
145
145
|
|
146
|
-
def log(self, log_:
|
146
|
+
def log(self, log_: Any) -> None:
|
147
147
|
"""
|
148
148
|
Add a log synchronously. If capacity is reached, auto-dump to file.
|
149
149
|
"""
|
150
|
+
log_ = Log.create(log_) if not isinstance(log_, Log) else log_
|
150
151
|
if self._config.capacity and len(self.logs) >= self._config.capacity:
|
151
152
|
try:
|
152
153
|
self.dump(clear=self._config.clear_after_dump)
|
@@ -154,7 +155,7 @@ class DataLogger:
|
|
154
155
|
logger.error(f"Failed to auto-dump logs: {e}")
|
155
156
|
self.logs.include(log_)
|
156
157
|
|
157
|
-
async def alog(self, log_:
|
158
|
+
async def alog(self, log_: Any) -> None:
|
158
159
|
"""
|
159
160
|
Add a log asynchronously. If capacity is reached, auto-dump to file.
|
160
161
|
"""
|