lionagi 0.17.11__py3-none-any.whl → 0.18.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lionagi/libs/schema/minimal_yaml.py +98 -0
- lionagi/ln/types.py +32 -5
- lionagi/models/field_model.py +9 -0
- lionagi/operations/ReAct/ReAct.py +474 -237
- lionagi/operations/ReAct/utils.py +3 -0
- lionagi/operations/act/act.py +206 -0
- lionagi/operations/chat/chat.py +130 -114
- lionagi/operations/communicate/communicate.py +101 -42
- lionagi/operations/flow.py +4 -4
- lionagi/operations/interpret/interpret.py +65 -20
- lionagi/operations/operate/operate.py +212 -106
- lionagi/operations/parse/parse.py +170 -142
- lionagi/operations/select/select.py +78 -17
- lionagi/operations/select/utils.py +1 -1
- lionagi/operations/types.py +119 -23
- lionagi/protocols/generic/log.py +3 -2
- lionagi/protocols/messages/__init__.py +27 -0
- lionagi/protocols/messages/action_request.py +86 -184
- lionagi/protocols/messages/action_response.py +73 -131
- lionagi/protocols/messages/assistant_response.py +130 -159
- lionagi/protocols/messages/base.py +26 -18
- lionagi/protocols/messages/instruction.py +281 -625
- lionagi/protocols/messages/manager.py +112 -62
- lionagi/protocols/messages/message.py +87 -197
- lionagi/protocols/messages/system.py +52 -123
- lionagi/protocols/types.py +0 -2
- lionagi/service/connections/endpoint.py +0 -8
- lionagi/service/connections/providers/oai_.py +29 -94
- lionagi/service/connections/providers/ollama_.py +3 -2
- lionagi/service/hooks/hooked_event.py +2 -2
- lionagi/service/third_party/claude_code.py +3 -2
- lionagi/service/third_party/openai_models.py +433 -0
- lionagi/session/branch.py +170 -178
- lionagi/session/session.py +3 -9
- lionagi/tools/file/reader.py +2 -2
- lionagi/version.py +1 -1
- {lionagi-0.17.11.dist-info → lionagi-0.18.0.dist-info}/METADATA +1 -2
- {lionagi-0.17.11.dist-info → lionagi-0.18.0.dist-info}/RECORD +41 -49
- lionagi/operations/_act/act.py +0 -86
- lionagi/protocols/messages/templates/README.md +0 -28
- lionagi/protocols/messages/templates/action_request.jinja2 +0 -5
- lionagi/protocols/messages/templates/action_response.jinja2 +0 -9
- lionagi/protocols/messages/templates/assistant_response.jinja2 +0 -6
- lionagi/protocols/messages/templates/instruction_message.jinja2 +0 -61
- lionagi/protocols/messages/templates/system_message.jinja2 +0 -11
- lionagi/protocols/messages/templates/tool_schemas.jinja2 +0 -7
- lionagi/service/connections/providers/types.py +0 -28
- lionagi/service/third_party/openai_model_names.py +0 -198
- lionagi/service/types.py +0 -58
- /lionagi/operations/{_act → act}/__init__.py +0 -0
- {lionagi-0.17.11.dist-info → lionagi-0.18.0.dist-info}/WHEEL +0 -0
- {lionagi-0.17.11.dist-info → lionagi-0.18.0.dist-info}/licenses/LICENSE +0 -0
lionagi/session/branch.py
CHANGED
@@ -7,6 +7,7 @@ from typing import TYPE_CHECKING, Any, Literal, Optional
|
|
7
7
|
from pydantic import BaseModel, Field, JsonValue, PrivateAttr, field_serializer
|
8
8
|
|
9
9
|
from lionagi.config import settings
|
10
|
+
from lionagi.fields import Instruct
|
10
11
|
from lionagi.ln.types import Unset
|
11
12
|
from lionagi.models.field_model import FieldModel
|
12
13
|
from lionagi.operations.flow import AlcallParams
|
@@ -38,8 +39,7 @@ from lionagi.protocols.types import (
|
|
38
39
|
SenderRecipient,
|
39
40
|
System,
|
40
41
|
)
|
41
|
-
from lionagi.service
|
42
|
-
from lionagi.service.types import iModel, iModelManager
|
42
|
+
from lionagi.service import Endpoint, iModel, iModelManager
|
43
43
|
from lionagi.tools.base import LionTool
|
44
44
|
from lionagi.utils import copy
|
45
45
|
|
@@ -194,11 +194,11 @@ class Branch(Element, Communicatable, Relational):
|
|
194
194
|
system = f"Developer Prompt: {str(system)}" if system else ""
|
195
195
|
system = (LION_SYSTEM_MESSAGE + "\n\n" + system).strip()
|
196
196
|
|
197
|
+
# Note: system_template and system_template_context are deprecated
|
198
|
+
# Template rendering has been removed from the message system
|
197
199
|
self._message_manager.add_message(
|
198
200
|
system=system,
|
199
201
|
system_datetime=system_datetime,
|
200
|
-
template=system_template,
|
201
|
-
template_context=system_template_context,
|
202
202
|
recipient=self.id,
|
203
203
|
sender=system_sender or self.user or MessageRole.SYSTEM,
|
204
204
|
)
|
@@ -502,7 +502,7 @@ class Branch(Element, Communicatable, Relational):
|
|
502
502
|
raise ValueError(
|
503
503
|
"Invalid message package: The item must be a `RoledMessage`."
|
504
504
|
)
|
505
|
-
new_message = mail.package.item.
|
505
|
+
new_message = mail.package.item.model_copy(deep=True)
|
506
506
|
new_message.sender = mail.sender
|
507
507
|
new_message.recipient = self.id
|
508
508
|
self.msgs.messages.include(new_message)
|
@@ -632,7 +632,7 @@ class Branch(Element, Communicatable, Relational):
|
|
632
632
|
async def _connect(**kwargs):
|
633
633
|
"""connect to an api endpoint"""
|
634
634
|
api_call = await imodel.invoke(**kwargs)
|
635
|
-
self._log_manager.log(
|
635
|
+
self._log_manager.log(api_call)
|
636
636
|
return api_call.response
|
637
637
|
|
638
638
|
_connect.__name__ = name or imodel.endpoint.name
|
@@ -760,6 +760,7 @@ class Branch(Element, Communicatable, Relational):
|
|
760
760
|
image_detail: Literal["low", "high", "auto"] = None,
|
761
761
|
plain_content: str = None,
|
762
762
|
return_ins_res_message: bool = False,
|
763
|
+
include_token_usage_to_model: bool = False,
|
763
764
|
**kwargs,
|
764
765
|
) -> tuple[Instruction, AssistantResponse]:
|
765
766
|
"""
|
@@ -809,25 +810,27 @@ class Branch(Element, Communicatable, Relational):
|
|
809
810
|
tuple[Instruction, AssistantResponse]:
|
810
811
|
The `Instruction` object and the final `AssistantResponse`.
|
811
812
|
"""
|
812
|
-
from lionagi.operations.chat.chat import chat
|
813
|
+
from lionagi.operations.chat.chat import ChatParam, chat
|
813
814
|
|
814
815
|
return await chat(
|
815
816
|
self,
|
816
817
|
instruction=instruction,
|
817
|
-
|
818
|
-
|
819
|
-
|
820
|
-
|
821
|
-
|
822
|
-
|
823
|
-
|
824
|
-
|
825
|
-
|
826
|
-
|
827
|
-
|
828
|
-
|
818
|
+
chat_param=ChatParam(
|
819
|
+
guidance=guidance,
|
820
|
+
context=context,
|
821
|
+
sender=sender or self.user or "user",
|
822
|
+
recipient=recipient or self.id,
|
823
|
+
response_format=response_format or request_fields,
|
824
|
+
progression=progression,
|
825
|
+
tool_schemas=tool_schemas or [],
|
826
|
+
images=images or [],
|
827
|
+
image_detail=image_detail or "auto",
|
828
|
+
plain_content=plain_content or "",
|
829
|
+
include_token_usage_to_model=include_token_usage_to_model,
|
830
|
+
imodel=imodel or self.chat_model,
|
831
|
+
imodel_kw=kwargs,
|
832
|
+
),
|
829
833
|
return_ins_res_message=return_ins_res_message,
|
830
|
-
**kwargs,
|
831
834
|
)
|
832
835
|
|
833
836
|
async def parse(
|
@@ -889,25 +892,14 @@ class Branch(Element, Communicatable, Relational):
|
|
889
892
|
BaseModel | dict | str | None:
|
890
893
|
Parsed model instance, or a fallback based on `handle_validation`.
|
891
894
|
"""
|
892
|
-
from lionagi.operations.parse.parse import parse
|
895
|
+
from lionagi.operations.parse.parse import parse, prepare_parse_kws
|
893
896
|
|
894
|
-
|
895
|
-
|
896
|
-
|
897
|
-
|
898
|
-
|
899
|
-
|
900
|
-
operative=operative,
|
901
|
-
similarity_algo=similarity_algo,
|
902
|
-
similarity_threshold=similarity_threshold,
|
903
|
-
fuzzy_match=fuzzy_match,
|
904
|
-
handle_unmatched=handle_unmatched,
|
905
|
-
fill_value=fill_value,
|
906
|
-
fill_mapping=fill_mapping,
|
907
|
-
strict=strict,
|
908
|
-
suppress_conversion_errors=suppress_conversion_errors,
|
909
|
-
response_format=response_format,
|
910
|
-
)
|
897
|
+
_pms = {
|
898
|
+
k: v
|
899
|
+
for k, v in locals().items()
|
900
|
+
if k not in ("self", "_pms") and v is not None
|
901
|
+
}
|
902
|
+
return await parse(self, **prepare_parse_kws(self, **_pms))
|
911
903
|
|
912
904
|
async def operate(
|
913
905
|
self,
|
@@ -1026,39 +1018,18 @@ class Branch(Element, Communicatable, Relational):
|
|
1026
1018
|
- If both `operative_model` and `response_format` or `request_model` are given.
|
1027
1019
|
- If the LLM's response cannot be parsed into the expected format and `handle_validation='raise'`.
|
1028
1020
|
"""
|
1029
|
-
|
1030
|
-
|
1031
|
-
|
1032
|
-
self,
|
1033
|
-
|
1034
|
-
|
1035
|
-
|
1036
|
-
|
1037
|
-
sender=sender,
|
1038
|
-
recipient=recipient,
|
1039
|
-
progression=progression,
|
1040
|
-
chat_model=chat_model,
|
1041
|
-
invoke_actions=invoke_actions,
|
1042
|
-
tool_schemas=tool_schemas,
|
1043
|
-
images=images,
|
1044
|
-
image_detail=image_detail,
|
1045
|
-
parse_model=parse_model,
|
1046
|
-
skip_validation=skip_validation,
|
1047
|
-
tools=tools,
|
1048
|
-
operative=operative,
|
1049
|
-
response_format=response_format,
|
1050
|
-
actions=actions,
|
1051
|
-
reason=reason,
|
1052
|
-
call_params=call_params,
|
1053
|
-
action_strategy=action_strategy,
|
1054
|
-
verbose_action=verbose_action,
|
1055
|
-
field_models=field_models,
|
1056
|
-
exclude_fields=exclude_fields,
|
1057
|
-
handle_validation=handle_validation,
|
1058
|
-
include_token_usage_to_model=include_token_usage_to_model,
|
1059
|
-
**kwargs,
|
1021
|
+
_pms = {
|
1022
|
+
k: v
|
1023
|
+
for k, v in locals().items()
|
1024
|
+
if k not in ("self", "_pms") and v is not None
|
1025
|
+
}
|
1026
|
+
from lionagi.operations.operate.operate import (
|
1027
|
+
operate,
|
1028
|
+
prepare_operate_kw,
|
1060
1029
|
)
|
1061
1030
|
|
1031
|
+
return await operate(self, **prepare_operate_kw(self, **_pms))
|
1032
|
+
|
1062
1033
|
async def communicate(
|
1063
1034
|
self,
|
1064
1035
|
instruction: Instruction | JsonValue = None,
|
@@ -1132,44 +1103,19 @@ class Branch(Element, Communicatable, Relational):
|
|
1132
1103
|
- A dict of the requested fields,
|
1133
1104
|
- or `None` if parsing fails and `handle_validation='return_none'`.
|
1134
1105
|
"""
|
1135
|
-
|
1106
|
+
_pms = {
|
1107
|
+
k: v
|
1108
|
+
for k, v in locals().items()
|
1109
|
+
if k not in ("self", "_pms", "kwargs") and v is not None
|
1110
|
+
}
|
1111
|
+
_pms.update(kwargs)
|
1136
1112
|
|
1137
|
-
|
1138
|
-
|
1139
|
-
|
1140
|
-
guidance=guidance,
|
1141
|
-
context=context,
|
1142
|
-
plain_content=plain_content,
|
1143
|
-
sender=sender,
|
1144
|
-
recipient=recipient,
|
1145
|
-
progression=progression,
|
1146
|
-
response_format=response_format,
|
1147
|
-
request_fields=request_fields,
|
1148
|
-
chat_model=chat_model,
|
1149
|
-
parse_model=parse_model,
|
1150
|
-
skip_validation=skip_validation,
|
1151
|
-
images=images,
|
1152
|
-
image_detail=image_detail,
|
1153
|
-
num_parse_retries=num_parse_retries,
|
1154
|
-
clear_messages=clear_messages,
|
1155
|
-
include_token_usage_to_model=include_token_usage_to_model,
|
1156
|
-
**kwargs,
|
1113
|
+
from lionagi.operations.communicate.communicate import (
|
1114
|
+
communicate,
|
1115
|
+
prepare_communicate_kw,
|
1157
1116
|
)
|
1158
1117
|
|
1159
|
-
|
1160
|
-
self,
|
1161
|
-
action_request: ActionRequest | BaseModel | dict,
|
1162
|
-
suppress_errors: bool,
|
1163
|
-
verbose_action: bool,
|
1164
|
-
) -> ActionResponse:
|
1165
|
-
from lionagi.operations._act.act import _act
|
1166
|
-
|
1167
|
-
return await _act(
|
1168
|
-
branch=self,
|
1169
|
-
action_request=action_request,
|
1170
|
-
suppress_errors=suppress_errors,
|
1171
|
-
verbose_action=verbose_action,
|
1172
|
-
)
|
1118
|
+
return await communicate(self, **prepare_communicate_kw(self, **_pms))
|
1173
1119
|
|
1174
1120
|
async def act(
|
1175
1121
|
self,
|
@@ -1180,54 +1126,15 @@ class Branch(Element, Communicatable, Relational):
|
|
1180
1126
|
suppress_errors: bool = True,
|
1181
1127
|
call_params: AlcallParams = None,
|
1182
1128
|
) -> list[ActionResponse]:
|
1183
|
-
global _DEFAULT_ALCALL_PARAMS
|
1184
|
-
if call_params is None:
|
1185
|
-
if _DEFAULT_ALCALL_PARAMS is None:
|
1186
|
-
_DEFAULT_ALCALL_PARAMS = AlcallParams(output_dropna=True)
|
1187
|
-
call_params = _DEFAULT_ALCALL_PARAMS
|
1188
|
-
|
1189
|
-
kw = {
|
1190
|
-
"suppress_errors": suppress_errors,
|
1191
|
-
"verbose_action": verbose_action,
|
1192
|
-
}
|
1193
|
-
|
1194
|
-
match strategy:
|
1195
|
-
case "concurrent":
|
1196
|
-
return await self._concurrent_act(
|
1197
|
-
action_request, call_params, **kw
|
1198
|
-
)
|
1199
|
-
case "sequential":
|
1200
|
-
return await self._sequential_act(action_request, **kw)
|
1201
|
-
case _:
|
1202
|
-
raise ValueError(
|
1203
|
-
"Invalid strategy. Choose 'concurrent' or 'sequential'."
|
1204
|
-
)
|
1205
1129
|
|
1206
|
-
|
1207
|
-
|
1208
|
-
|
1209
|
-
|
1210
|
-
|
1211
|
-
|
1212
|
-
return await call_params(action_request, self._act, **kwargs)
|
1130
|
+
_pms = {
|
1131
|
+
k: v
|
1132
|
+
for k, v in locals().items()
|
1133
|
+
if k not in ("self", "_pms") and v is not None
|
1134
|
+
}
|
1135
|
+
from lionagi.operations.act.act import act, prepare_act_kw
|
1213
1136
|
|
1214
|
-
|
1215
|
-
self,
|
1216
|
-
action_request: ActionRequest | BaseModel | dict,
|
1217
|
-
suppress_errors: bool = True,
|
1218
|
-
verbose_action: bool = False,
|
1219
|
-
) -> list:
|
1220
|
-
action_request = (
|
1221
|
-
action_request
|
1222
|
-
if isinstance(action_request, list)
|
1223
|
-
else [action_request]
|
1224
|
-
)
|
1225
|
-
results = []
|
1226
|
-
for req in action_request:
|
1227
|
-
results.append(
|
1228
|
-
await self._act(req, verbose_action, suppress_errors)
|
1229
|
-
)
|
1230
|
-
return results
|
1137
|
+
return await act(self, **prepare_act_kw(self, **_pms))
|
1231
1138
|
|
1232
1139
|
async def interpret(
|
1233
1140
|
self,
|
@@ -1275,17 +1182,21 @@ class Branch(Element, Communicatable, Relational):
|
|
1275
1182
|
# refined might be "Explain step-by-step how to set up a marketing analytics
|
1276
1183
|
# pipeline to track campaign performance..."
|
1277
1184
|
"""
|
1278
|
-
from lionagi.operations.interpret.interpret import interpret
|
1279
1185
|
|
1280
|
-
|
1281
|
-
|
1282
|
-
|
1283
|
-
|
1284
|
-
|
1285
|
-
|
1286
|
-
|
1186
|
+
_pms = {
|
1187
|
+
k: v
|
1188
|
+
for k, v in locals().items()
|
1189
|
+
if k not in ("self", "_pms", "kwargs") and v is not None
|
1190
|
+
}
|
1191
|
+
_pms.update(kwargs)
|
1192
|
+
|
1193
|
+
from lionagi.operations.interpret.interpret import (
|
1194
|
+
interpret,
|
1195
|
+
prepare_interpret_kw,
|
1287
1196
|
)
|
1288
1197
|
|
1198
|
+
return await interpret(self, **prepare_interpret_kw(self, **_pms))
|
1199
|
+
|
1289
1200
|
async def instruct(
|
1290
1201
|
self,
|
1291
1202
|
instruct: "Instruct",
|
@@ -1413,6 +1324,13 @@ class Branch(Element, Communicatable, Relational):
|
|
1413
1324
|
"""
|
1414
1325
|
from lionagi.operations.ReAct.ReAct import ReAct
|
1415
1326
|
|
1327
|
+
# Remove potential duplicate parameters from kwargs
|
1328
|
+
kwargs_filtered = {
|
1329
|
+
k: v
|
1330
|
+
for k, v in kwargs.items()
|
1331
|
+
if k not in {"verbose_analysis", "verbose_action"}
|
1332
|
+
}
|
1333
|
+
|
1416
1334
|
return await ReAct(
|
1417
1335
|
self,
|
1418
1336
|
instruct,
|
@@ -1438,7 +1356,7 @@ class Branch(Element, Communicatable, Relational):
|
|
1438
1356
|
reasoning_effort=reasoning_effort,
|
1439
1357
|
display_as=display_as,
|
1440
1358
|
include_token_usage_to_model=include_token_usage_to_model,
|
1441
|
-
**
|
1359
|
+
**kwargs_filtered,
|
1442
1360
|
)
|
1443
1361
|
|
1444
1362
|
async def ReActStream(
|
@@ -1466,40 +1384,114 @@ class Branch(Element, Communicatable, Relational):
|
|
1466
1384
|
include_token_usage_to_model: bool = True,
|
1467
1385
|
**kwargs,
|
1468
1386
|
) -> AsyncGenerator:
|
1387
|
+
from lionagi.ln.fuzzy import FuzzyMatchKeysParams
|
1469
1388
|
from lionagi.operations.ReAct.ReAct import ReActStream
|
1389
|
+
from lionagi.operations.ReAct.utils import ReActAnalysis
|
1390
|
+
from lionagi.operations.types import (
|
1391
|
+
ActionParam,
|
1392
|
+
ChatParam,
|
1393
|
+
InterpretParam,
|
1394
|
+
ParseParam,
|
1395
|
+
)
|
1396
|
+
|
1397
|
+
# Convert Instruct to dict if needed
|
1398
|
+
instruct_dict = (
|
1399
|
+
instruct.to_dict()
|
1400
|
+
if isinstance(instruct, Instruct)
|
1401
|
+
else dict(instruct)
|
1402
|
+
)
|
1403
|
+
|
1404
|
+
# Build InterpretContext if interpretation requested
|
1405
|
+
intp_param = None
|
1406
|
+
if interpret:
|
1407
|
+
intp_param = InterpretParam(
|
1408
|
+
domain=interpret_domain or "general",
|
1409
|
+
style=interpret_style or "concise",
|
1410
|
+
sample_writing=interpret_sample or "",
|
1411
|
+
imodel=interpret_model or analysis_model or self.chat_model,
|
1412
|
+
imodel_kw=interpret_kwargs or {},
|
1413
|
+
)
|
1414
|
+
|
1415
|
+
# Build ChatContext
|
1416
|
+
chat_param = ChatParam(
|
1417
|
+
guidance=instruct_dict.get("guidance"),
|
1418
|
+
context=instruct_dict.get("context"),
|
1419
|
+
sender=self.user or "user",
|
1420
|
+
recipient=self.id,
|
1421
|
+
response_format=None,
|
1422
|
+
progression=None,
|
1423
|
+
tool_schemas=tool_schemas or [],
|
1424
|
+
images=[],
|
1425
|
+
image_detail="auto",
|
1426
|
+
plain_content="",
|
1427
|
+
include_token_usage_to_model=include_token_usage_to_model,
|
1428
|
+
imodel=analysis_model or self.chat_model,
|
1429
|
+
imodel_kw=kwargs,
|
1430
|
+
)
|
1431
|
+
|
1432
|
+
# Build ActionContext
|
1433
|
+
action_param = None
|
1434
|
+
if tools is not None or tool_schemas is not None:
|
1435
|
+
from lionagi.operations.act.act import _get_default_call_params
|
1436
|
+
|
1437
|
+
action_param = ActionParam(
|
1438
|
+
action_call_params=_get_default_call_params(),
|
1439
|
+
tools=tools or True,
|
1440
|
+
strategy="concurrent",
|
1441
|
+
suppress_errors=True,
|
1442
|
+
verbose_action=False,
|
1443
|
+
)
|
1444
|
+
|
1445
|
+
# Build ParseContext
|
1446
|
+
from lionagi.operations.parse.parse import get_default_call
|
1447
|
+
|
1448
|
+
parse_param = ParseParam(
|
1449
|
+
response_format=ReActAnalysis,
|
1450
|
+
fuzzy_match_params=FuzzyMatchKeysParams(),
|
1451
|
+
handle_validation="return_value",
|
1452
|
+
alcall_params=get_default_call(),
|
1453
|
+
imodel=analysis_model or self.chat_model,
|
1454
|
+
imodel_kw={},
|
1455
|
+
)
|
1456
|
+
|
1457
|
+
# Response context for final answer
|
1458
|
+
resp_ctx = response_kwargs or {}
|
1459
|
+
if response_format:
|
1460
|
+
resp_ctx["response_format"] = response_format
|
1470
1461
|
|
1471
1462
|
async for result in ReActStream(
|
1472
1463
|
self,
|
1473
|
-
instruct,
|
1474
|
-
|
1475
|
-
|
1476
|
-
|
1477
|
-
|
1478
|
-
|
1479
|
-
|
1480
|
-
|
1481
|
-
|
1482
|
-
|
1464
|
+
instruction=instruct_dict.get("instruction", str(instruct)),
|
1465
|
+
chat_param=chat_param,
|
1466
|
+
action_param=action_param,
|
1467
|
+
parse_param=parse_param,
|
1468
|
+
intp_param=intp_param,
|
1469
|
+
resp_ctx=resp_ctx,
|
1470
|
+
reasoning_effort=reasoning_effort,
|
1471
|
+
reason=True,
|
1472
|
+
field_models=None,
|
1473
|
+
handle_validation="return_value",
|
1474
|
+
invoke_actions=True,
|
1475
|
+
clear_messages=False,
|
1483
1476
|
intermediate_response_options=intermediate_response_options,
|
1484
1477
|
intermediate_listable=intermediate_listable,
|
1485
|
-
|
1486
|
-
extension_allowed=extension_allowed,
|
1478
|
+
intermediate_nullable=False,
|
1487
1479
|
max_extensions=max_extensions,
|
1488
|
-
|
1489
|
-
|
1490
|
-
verbose_analysis=True,
|
1480
|
+
extension_allowed=extension_allowed,
|
1481
|
+
verbose_analysis=verbose,
|
1491
1482
|
display_as=display_as,
|
1492
1483
|
verbose_length=verbose_length,
|
1493
|
-
|
1494
|
-
**kwargs,
|
1484
|
+
continue_after_failed_response=False,
|
1495
1485
|
):
|
1496
|
-
analysis, str_ = result
|
1497
1486
|
if verbose:
|
1487
|
+
analysis, str_ = result
|
1498
1488
|
from lionagi.libs.schema.as_readable import as_readable
|
1499
1489
|
|
1500
1490
|
str_ += "\n---------\n"
|
1501
1491
|
as_readable(str_, md=True, display_str=True)
|
1502
|
-
|
1492
|
+
yield analysis
|
1493
|
+
else:
|
1494
|
+
yield result
|
1503
1495
|
|
1504
1496
|
|
1505
1497
|
# File: lionagi/session/branch.py
|
lionagi/session/session.py
CHANGED
@@ -22,7 +22,6 @@ from lionagi.protocols.types import (
|
|
22
22
|
Graph,
|
23
23
|
IDType,
|
24
24
|
MailManager,
|
25
|
-
MessageFlag,
|
26
25
|
Node,
|
27
26
|
Pile,
|
28
27
|
Progression,
|
@@ -273,17 +272,12 @@ class Session(Node, Communicatable, Relational):
|
|
273
272
|
if any(i not in self.branches for i in branches):
|
274
273
|
raise ValueError("Branch does not exist.")
|
275
274
|
|
276
|
-
|
277
|
-
|
278
|
-
exclude_flag.append(MessageFlag.MESSAGE_CLONE)
|
279
|
-
if exclude_load:
|
280
|
-
exclude_flag.append(MessageFlag.MESSAGE_LOAD)
|
275
|
+
# Note: exclude_clone and exclude_load parameters are deprecated
|
276
|
+
# and currently have no effect. They are kept for API compatibility.
|
281
277
|
|
282
278
|
messages = lcall(
|
283
279
|
branches,
|
284
|
-
lambda x: [
|
285
|
-
i for i in self.branches[x].messages if i not in exclude_flag
|
286
|
-
],
|
280
|
+
lambda x: list(self.branches[x].messages),
|
287
281
|
input_unique=True,
|
288
282
|
input_flatten=True,
|
289
283
|
input_dropna=True,
|
lionagi/tools/file/reader.py
CHANGED
@@ -79,8 +79,8 @@ class ReaderRequest(BaseModel):
|
|
79
79
|
),
|
80
80
|
)
|
81
81
|
|
82
|
-
recursive: bool = Field(
|
83
|
-
|
82
|
+
recursive: bool | None = Field(
|
83
|
+
None,
|
84
84
|
description=(
|
85
85
|
"Whether to recursively list files in subdirectories. Defaults to False."
|
86
86
|
"Only used if action='list_dir'."
|
lionagi/version.py
CHANGED
@@ -1 +1 @@
|
|
1
|
-
__version__ = "0.
|
1
|
+
__version__ = "0.18.0"
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: lionagi
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.18.0
|
4
4
|
Summary: An Intelligence Operating System.
|
5
5
|
Author-email: HaiyangLi <quantocean.li@gmail.com>
|
6
6
|
License: Apache License
|
@@ -223,7 +223,6 @@ Requires-Dist: aiocache>=0.12.0
|
|
223
223
|
Requires-Dist: aiohttp>=3.11.0
|
224
224
|
Requires-Dist: anyio>=4.7.0
|
225
225
|
Requires-Dist: backoff>=2.0.0
|
226
|
-
Requires-Dist: jinja2>=3.0.0
|
227
226
|
Requires-Dist: json-repair>=0.40.0
|
228
227
|
Requires-Dist: msgspec>=0.18.0
|
229
228
|
Requires-Dist: pydantic-settings>=2.8.0
|