lionagi 0.17.11__py3-none-any.whl → 0.18.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lionagi/libs/schema/minimal_yaml.py +98 -0
- lionagi/ln/types.py +32 -5
- lionagi/models/field_model.py +9 -0
- lionagi/operations/ReAct/ReAct.py +474 -237
- lionagi/operations/ReAct/utils.py +3 -0
- lionagi/operations/act/act.py +206 -0
- lionagi/operations/chat/chat.py +130 -114
- lionagi/operations/communicate/communicate.py +101 -42
- lionagi/operations/flow.py +4 -4
- lionagi/operations/interpret/interpret.py +65 -20
- lionagi/operations/operate/operate.py +212 -106
- lionagi/operations/parse/parse.py +170 -142
- lionagi/operations/select/select.py +78 -17
- lionagi/operations/select/utils.py +1 -1
- lionagi/operations/types.py +119 -23
- lionagi/protocols/generic/log.py +3 -2
- lionagi/protocols/messages/__init__.py +27 -0
- lionagi/protocols/messages/action_request.py +86 -184
- lionagi/protocols/messages/action_response.py +73 -131
- lionagi/protocols/messages/assistant_response.py +130 -159
- lionagi/protocols/messages/base.py +26 -18
- lionagi/protocols/messages/instruction.py +281 -625
- lionagi/protocols/messages/manager.py +112 -62
- lionagi/protocols/messages/message.py +87 -197
- lionagi/protocols/messages/system.py +52 -123
- lionagi/protocols/types.py +0 -2
- lionagi/service/connections/endpoint.py +0 -8
- lionagi/service/connections/providers/oai_.py +29 -94
- lionagi/service/connections/providers/ollama_.py +3 -2
- lionagi/service/hooks/hooked_event.py +2 -2
- lionagi/service/third_party/claude_code.py +3 -2
- lionagi/service/third_party/openai_models.py +433 -0
- lionagi/session/branch.py +170 -178
- lionagi/session/session.py +3 -9
- lionagi/tools/file/reader.py +2 -2
- lionagi/version.py +1 -1
- {lionagi-0.17.11.dist-info → lionagi-0.18.0.dist-info}/METADATA +1 -2
- {lionagi-0.17.11.dist-info → lionagi-0.18.0.dist-info}/RECORD +41 -49
- lionagi/operations/_act/act.py +0 -86
- lionagi/protocols/messages/templates/README.md +0 -28
- lionagi/protocols/messages/templates/action_request.jinja2 +0 -5
- lionagi/protocols/messages/templates/action_response.jinja2 +0 -9
- lionagi/protocols/messages/templates/assistant_response.jinja2 +0 -6
- lionagi/protocols/messages/templates/instruction_message.jinja2 +0 -61
- lionagi/protocols/messages/templates/system_message.jinja2 +0 -11
- lionagi/protocols/messages/templates/tool_schemas.jinja2 +0 -7
- lionagi/service/connections/providers/types.py +0 -28
- lionagi/service/third_party/openai_model_names.py +0 -198
- lionagi/service/types.py +0 -58
- /lionagi/operations/{_act → act}/__init__.py +0 -0
- {lionagi-0.17.11.dist-info → lionagi-0.18.0.dist-info}/WHEEL +0 -0
- {lionagi-0.17.11.dist-info → lionagi-0.18.0.dist-info}/licenses/LICENSE +0 -0
@@ -101,6 +101,9 @@ class ReActAnalysis(HashableModel):
|
|
101
101
|
),
|
102
102
|
)
|
103
103
|
|
104
|
+
# Note: action_requests and action_responses are added dynamically by Step.request_operative()
|
105
|
+
# when actions=True, so they don't need to be defined here. The operate() function will add them.
|
106
|
+
|
104
107
|
|
105
108
|
class Analysis(HashableModel):
|
106
109
|
answer: str | None = None
|
@@ -0,0 +1,206 @@
|
|
1
|
+
# Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
3
|
+
|
4
|
+
import logging
|
5
|
+
from typing import TYPE_CHECKING, Literal
|
6
|
+
|
7
|
+
from pydantic import BaseModel
|
8
|
+
|
9
|
+
from lionagi.fields.action import ActionResponseModel
|
10
|
+
from lionagi.ln._async_call import AlcallParams
|
11
|
+
from lionagi.protocols.types import ActionRequest, ActionResponse
|
12
|
+
|
13
|
+
from ..types import ActionParam
|
14
|
+
|
15
|
+
if TYPE_CHECKING:
|
16
|
+
from lionagi.session.branch import Branch
|
17
|
+
|
18
|
+
_DEFAULT_ALCALL_PARAMS = None
|
19
|
+
|
20
|
+
|
21
|
+
async def _act(
|
22
|
+
branch: "Branch",
|
23
|
+
action_request: BaseModel | dict | ActionRequest,
|
24
|
+
suppress_errors: bool = False,
|
25
|
+
verbose_action: bool = False,
|
26
|
+
):
|
27
|
+
|
28
|
+
_request = action_request
|
29
|
+
if isinstance(action_request, ActionRequest):
|
30
|
+
_request = {
|
31
|
+
"function": action_request.function,
|
32
|
+
"arguments": action_request.arguments,
|
33
|
+
}
|
34
|
+
elif isinstance(action_request, BaseModel) and set(
|
35
|
+
action_request.__class__.model_fields.keys()
|
36
|
+
) >= {"function", "arguments"}:
|
37
|
+
_request = {
|
38
|
+
"function": action_request.function,
|
39
|
+
"arguments": action_request.arguments,
|
40
|
+
}
|
41
|
+
if not isinstance(_request, dict) or not {"function", "arguments"} <= set(
|
42
|
+
_request.keys()
|
43
|
+
):
|
44
|
+
raise ValueError(
|
45
|
+
"action_request must be an ActionRequest, BaseModel with 'function'"
|
46
|
+
" and 'arguments', or dict with 'function' and 'arguments'."
|
47
|
+
)
|
48
|
+
|
49
|
+
try:
|
50
|
+
if verbose_action:
|
51
|
+
args_ = str(_request["arguments"])
|
52
|
+
args_ = args_[:50] + "..." if len(args_) > 50 else args_
|
53
|
+
print(f"Invoking action {_request['function']} with {args_}.")
|
54
|
+
|
55
|
+
func_call = await branch._action_manager.invoke(_request)
|
56
|
+
if verbose_action:
|
57
|
+
print(
|
58
|
+
f"Action {_request['function']} invoked, status: {func_call.status}."
|
59
|
+
)
|
60
|
+
|
61
|
+
except Exception as e:
|
62
|
+
content = {
|
63
|
+
"error": str(e),
|
64
|
+
"function": _request.get("function"),
|
65
|
+
"arguments": _request.get("arguments"),
|
66
|
+
"branch": str(branch.id),
|
67
|
+
}
|
68
|
+
branch._log_manager.log(content)
|
69
|
+
if verbose_action:
|
70
|
+
print(f"Action {_request['function']} failed, error: {str(e)}.")
|
71
|
+
if suppress_errors:
|
72
|
+
error_msg = f"Error invoking action '{_request['function']}': {e}"
|
73
|
+
logging.error(error_msg)
|
74
|
+
|
75
|
+
# Return error as action response so model knows it failed
|
76
|
+
return ActionResponseModel(
|
77
|
+
function=_request.get("function", "unknown"),
|
78
|
+
arguments=_request.get("arguments", {}),
|
79
|
+
output={"error": str(e), "message": error_msg},
|
80
|
+
)
|
81
|
+
raise e
|
82
|
+
|
83
|
+
branch._log_manager.log(func_call)
|
84
|
+
|
85
|
+
if not isinstance(action_request, ActionRequest):
|
86
|
+
action_request = ActionRequest(
|
87
|
+
content=_request,
|
88
|
+
sender=branch.id,
|
89
|
+
recipient=func_call.func_tool.id,
|
90
|
+
)
|
91
|
+
|
92
|
+
# Add the action request/response to the message manager, if not present
|
93
|
+
if action_request not in branch.messages:
|
94
|
+
branch.msgs.add_message(action_request=action_request)
|
95
|
+
|
96
|
+
branch.msgs.add_message(
|
97
|
+
action_request=action_request,
|
98
|
+
action_output=func_call.response,
|
99
|
+
)
|
100
|
+
|
101
|
+
return ActionResponseModel(
|
102
|
+
function=action_request.function,
|
103
|
+
arguments=action_request.arguments,
|
104
|
+
output=func_call.response,
|
105
|
+
)
|
106
|
+
|
107
|
+
|
108
|
+
def prepare_act_kw(
|
109
|
+
branch: "Branch",
|
110
|
+
action_request: list | ActionRequest | BaseModel | dict,
|
111
|
+
*,
|
112
|
+
strategy: Literal["concurrent", "sequential"] = "concurrent",
|
113
|
+
verbose_action: bool = False,
|
114
|
+
suppress_errors: bool = True,
|
115
|
+
call_params: AlcallParams = None,
|
116
|
+
):
|
117
|
+
|
118
|
+
action_param = ActionParam(
|
119
|
+
action_call_params=call_params or _get_default_call_params(),
|
120
|
+
tools=None, # Not used in this context
|
121
|
+
strategy=strategy,
|
122
|
+
suppress_errors=suppress_errors,
|
123
|
+
verbose_action=verbose_action,
|
124
|
+
)
|
125
|
+
return {
|
126
|
+
"action_request": action_request,
|
127
|
+
"action_param": action_param,
|
128
|
+
}
|
129
|
+
|
130
|
+
|
131
|
+
async def act(
|
132
|
+
branch: "Branch",
|
133
|
+
action_request: list | ActionRequest | BaseModel | dict,
|
134
|
+
action_param: ActionParam,
|
135
|
+
) -> list[ActionResponse]:
|
136
|
+
"""Execute action requests with ActionParam."""
|
137
|
+
|
138
|
+
match action_param.strategy:
|
139
|
+
case "concurrent":
|
140
|
+
return await _concurrent_act(
|
141
|
+
branch,
|
142
|
+
action_request,
|
143
|
+
action_param.action_call_params,
|
144
|
+
suppress_errors=action_param.suppress_errors,
|
145
|
+
verbose_action=action_param.verbose_action,
|
146
|
+
)
|
147
|
+
case "sequential":
|
148
|
+
return await _sequential_act(
|
149
|
+
branch,
|
150
|
+
action_request,
|
151
|
+
suppress_errors=action_param.suppress_errors,
|
152
|
+
verbose_action=action_param.verbose_action,
|
153
|
+
)
|
154
|
+
case _:
|
155
|
+
raise ValueError(
|
156
|
+
"Invalid strategy. Choose 'concurrent' or 'sequential'."
|
157
|
+
)
|
158
|
+
|
159
|
+
|
160
|
+
async def _concurrent_act(
|
161
|
+
branch: "Branch",
|
162
|
+
action_request: list | ActionRequest | BaseModel | dict,
|
163
|
+
call_params: AlcallParams,
|
164
|
+
suppress_errors: bool = True,
|
165
|
+
verbose_action: bool = False,
|
166
|
+
) -> list:
|
167
|
+
"""Execute actions concurrently using AlcallParams."""
|
168
|
+
|
169
|
+
async def _wrapper(req):
|
170
|
+
return await _act(branch, req, suppress_errors, verbose_action)
|
171
|
+
|
172
|
+
# AlcallParams expects a list as first argument
|
173
|
+
action_request_list = (
|
174
|
+
action_request
|
175
|
+
if isinstance(action_request, list)
|
176
|
+
else [action_request]
|
177
|
+
)
|
178
|
+
|
179
|
+
return await call_params(action_request_list, _wrapper)
|
180
|
+
|
181
|
+
|
182
|
+
async def _sequential_act(
|
183
|
+
branch: "Branch",
|
184
|
+
action_request: list | ActionRequest | BaseModel | dict,
|
185
|
+
suppress_errors: bool = True,
|
186
|
+
verbose_action: bool = False,
|
187
|
+
) -> list:
|
188
|
+
"""Execute actions sequentially."""
|
189
|
+
action_request = (
|
190
|
+
action_request
|
191
|
+
if isinstance(action_request, list)
|
192
|
+
else [action_request]
|
193
|
+
)
|
194
|
+
results = []
|
195
|
+
for req in action_request:
|
196
|
+
result = await _act(branch, req, suppress_errors, verbose_action)
|
197
|
+
results.append(result)
|
198
|
+
return results
|
199
|
+
|
200
|
+
|
201
|
+
def _get_default_call_params() -> AlcallParams:
|
202
|
+
"""Get or create default AlcallParams."""
|
203
|
+
global _DEFAULT_ALCALL_PARAMS
|
204
|
+
if _DEFAULT_ALCALL_PARAMS is None:
|
205
|
+
_DEFAULT_ALCALL_PARAMS = AlcallParams(output_dropna=True)
|
206
|
+
return _DEFAULT_ALCALL_PARAMS
|
lionagi/operations/chat/chat.py
CHANGED
@@ -1,19 +1,18 @@
|
|
1
1
|
# Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
|
2
2
|
# SPDX-License-Identifier: Apache-2.0
|
3
3
|
|
4
|
-
from typing import TYPE_CHECKING
|
4
|
+
from typing import TYPE_CHECKING
|
5
5
|
|
6
|
-
from pydantic import
|
6
|
+
from pydantic import JsonValue
|
7
7
|
|
8
|
-
from lionagi.
|
8
|
+
from lionagi.ln._to_list import to_list
|
9
|
+
from lionagi.protocols.messages import (
|
9
10
|
ActionResponse,
|
10
11
|
AssistantResponse,
|
11
12
|
Instruction,
|
12
|
-
Log,
|
13
|
-
RoledMessage,
|
14
13
|
)
|
15
|
-
|
16
|
-
from
|
14
|
+
|
15
|
+
from ..types import ChatParam
|
17
16
|
|
18
17
|
if TYPE_CHECKING:
|
19
18
|
from lionagi.session.branch import Branch
|
@@ -21,98 +20,94 @@ if TYPE_CHECKING:
|
|
21
20
|
|
22
21
|
async def chat(
|
23
22
|
branch: "Branch",
|
24
|
-
instruction
|
25
|
-
|
26
|
-
context=None,
|
27
|
-
sender=None,
|
28
|
-
recipient=None,
|
29
|
-
request_fields=None,
|
30
|
-
response_format: type[BaseModel] = None,
|
31
|
-
progression=None,
|
32
|
-
imodel: iModel = None,
|
33
|
-
tool_schemas=None,
|
34
|
-
images: list = None,
|
35
|
-
image_detail: Literal["low", "high", "auto"] = None,
|
36
|
-
plain_content: str = None,
|
23
|
+
instruction: JsonValue | Instruction,
|
24
|
+
chat_param: ChatParam,
|
37
25
|
return_ins_res_message: bool = False,
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
recipient=recipient or branch.id,
|
47
|
-
response_format=response_format,
|
48
|
-
request_fields=request_fields,
|
49
|
-
images=images,
|
50
|
-
image_detail=image_detail,
|
51
|
-
tool_schemas=tool_schemas,
|
52
|
-
plain_content=plain_content,
|
26
|
+
) -> tuple[Instruction, AssistantResponse] | str:
|
27
|
+
params = chat_param.to_dict(
|
28
|
+
exclude={
|
29
|
+
"imodel",
|
30
|
+
"imodel_kw",
|
31
|
+
"include_token_usage_to_model",
|
32
|
+
"progression",
|
33
|
+
}
|
53
34
|
)
|
35
|
+
params["sender"] = chat_param.sender or branch.user or "user"
|
36
|
+
params["recipient"] = chat_param.recipient or branch.id
|
37
|
+
params["instruction"] = instruction
|
54
38
|
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
if isinstance(i, AssistantResponse):
|
68
|
-
j = AssistantResponse(
|
69
|
-
role=i.role,
|
70
|
-
content=copy(i.content),
|
71
|
-
sender=i.sender,
|
72
|
-
recipient=i.recipient,
|
73
|
-
template=i.template,
|
74
|
-
)
|
75
|
-
_to_use.append(j)
|
76
|
-
if isinstance(i, Instruction):
|
77
|
-
j = Instruction(
|
78
|
-
role=i.role,
|
79
|
-
content=copy(i.content),
|
80
|
-
sender=i.sender,
|
81
|
-
recipient=i.recipient,
|
82
|
-
template=i.template,
|
39
|
+
ins = branch.msgs.create_instruction(**params)
|
40
|
+
|
41
|
+
_use_ins, _use_msgs, _act_res = None, [], []
|
42
|
+
progression = chat_param.progression or branch.msgs.progression
|
43
|
+
|
44
|
+
for msg in (branch.msgs.messages[j] for j in progression):
|
45
|
+
if isinstance(msg, ActionResponse):
|
46
|
+
_act_res.append(msg)
|
47
|
+
|
48
|
+
if isinstance(msg, AssistantResponse):
|
49
|
+
_use_msgs.append(
|
50
|
+
msg.model_copy(update={"content": msg.content.with_updates()})
|
83
51
|
)
|
84
|
-
j.tool_schemas = None
|
85
|
-
j.respond_schema_info = None
|
86
|
-
j.request_response_format = None
|
87
|
-
|
88
|
-
if _action_responses:
|
89
|
-
d_ = [k.content for k in _action_responses]
|
90
|
-
for z in d_:
|
91
|
-
if z not in j.context:
|
92
|
-
j.context.append(z)
|
93
|
-
|
94
|
-
_to_use.append(j)
|
95
|
-
_action_responses = set()
|
96
|
-
else:
|
97
|
-
_to_use.append(j)
|
98
52
|
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
53
|
+
if isinstance(msg, Instruction):
|
54
|
+
j = msg.model_copy(update={"content": msg.content.with_updates()})
|
55
|
+
j.content.tool_schemas.clear()
|
56
|
+
j.content.response_format = None
|
57
|
+
j.content._schema_dict = None
|
58
|
+
j.content._model_class = None
|
59
|
+
|
60
|
+
if _act_res:
|
61
|
+
# Convert ActionResponseContent to dicts for proper rendering
|
62
|
+
d_ = []
|
63
|
+
for k in to_list(_act_res, flatten=True, unique=True):
|
64
|
+
if hasattr(k.content, "function"): # ActionResponseContent
|
65
|
+
d_.append(
|
66
|
+
{
|
67
|
+
"function": k.content.function,
|
68
|
+
"arguments": k.content.arguments,
|
69
|
+
"output": k.content.output,
|
70
|
+
}
|
71
|
+
)
|
72
|
+
else:
|
73
|
+
d_.append(k.content)
|
74
|
+
j.content.prompt_context.extend(
|
75
|
+
[z for z in d_ if z not in j.content.prompt_context]
|
76
|
+
)
|
77
|
+
_use_msgs.append(j)
|
78
|
+
_act_res = []
|
79
|
+
else:
|
80
|
+
_use_msgs.append(j)
|
81
|
+
|
82
|
+
if _act_res:
|
83
|
+
j = ins.model_copy(update={"content": ins.content.with_updates()})
|
84
|
+
# Convert ActionResponseContent to dicts for proper rendering
|
85
|
+
d_ = []
|
86
|
+
for k in to_list(_act_res, flatten=True, unique=True):
|
87
|
+
if hasattr(k.content, "function"): # ActionResponseContent
|
88
|
+
d_.append(
|
89
|
+
{
|
90
|
+
"function": k.content.function,
|
91
|
+
"arguments": k.content.arguments,
|
92
|
+
"output": k.content.output,
|
93
|
+
}
|
94
|
+
)
|
95
|
+
else:
|
96
|
+
d_.append(k.content)
|
97
|
+
j.content.prompt_context.extend(
|
98
|
+
[z for z in d_ if z not in j.content.prompt_context]
|
99
|
+
)
|
100
|
+
_use_ins = j
|
107
101
|
|
108
|
-
|
109
|
-
|
102
|
+
messages = _use_msgs
|
103
|
+
if _use_msgs and len(_use_msgs) > 1:
|
104
|
+
_msgs = [_use_msgs[0]]
|
110
105
|
|
111
|
-
for i in
|
106
|
+
for i in _use_msgs[1:]:
|
112
107
|
if isinstance(i, AssistantResponse):
|
113
108
|
if isinstance(_msgs[-1], AssistantResponse):
|
114
|
-
_msgs[-1].
|
115
|
-
f"{_msgs[-1].
|
109
|
+
_msgs[-1].content.assistant_response = (
|
110
|
+
f"{_msgs[-1].content.assistant_response}\n\n{i.content.assistant_response}"
|
116
111
|
)
|
117
112
|
else:
|
118
113
|
_msgs.append(i)
|
@@ -125,11 +120,10 @@ async def chat(
|
|
125
120
|
if branch.msgs.system:
|
126
121
|
messages = [msg for msg in messages if msg.role != "system"]
|
127
122
|
first_instruction = None
|
128
|
-
|
123
|
+
f = lambda x: branch.msgs.system.rendered + (x.content.guidance or "")
|
129
124
|
if len(messages) == 0:
|
130
|
-
first_instruction = ins.model_copy(
|
131
|
-
|
132
|
-
first_instruction.guidance or ""
|
125
|
+
first_instruction = ins.model_copy(
|
126
|
+
update={"content": ins.content.with_updates(guidance=f(ins))}
|
133
127
|
)
|
134
128
|
messages.append(first_instruction)
|
135
129
|
elif len(messages) >= 1:
|
@@ -138,37 +132,59 @@ async def chat(
|
|
138
132
|
raise ValueError(
|
139
133
|
"First message in progression must be an Instruction or System"
|
140
134
|
)
|
141
|
-
first_instruction = first_instruction.model_copy(
|
142
|
-
|
143
|
-
|
135
|
+
first_instruction = first_instruction.model_copy(
|
136
|
+
update={
|
137
|
+
"content": first_instruction.content.with_updates(
|
138
|
+
guidance=f(first_instruction)
|
139
|
+
)
|
140
|
+
}
|
144
141
|
)
|
145
142
|
messages[0] = first_instruction
|
146
|
-
|
143
|
+
msg_to_append = _use_ins or ins
|
144
|
+
if msg_to_append is not None:
|
145
|
+
messages.append(msg_to_append)
|
147
146
|
|
148
147
|
else:
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
148
|
+
msg_to_append = _use_ins or ins
|
149
|
+
if msg_to_append is not None:
|
150
|
+
messages.append(msg_to_append)
|
151
|
+
|
152
|
+
kw = (chat_param.imodel_kw or {}).copy()
|
153
|
+
|
154
|
+
# Filter out messages with None chat_msg
|
155
|
+
chat_msgs = []
|
156
|
+
for msg in messages:
|
157
|
+
if msg is not None and hasattr(msg, "chat_msg"):
|
158
|
+
chat_msg = msg.chat_msg
|
159
|
+
if chat_msg is not None:
|
160
|
+
chat_msgs.append(chat_msg)
|
161
|
+
|
162
|
+
kw["messages"] = chat_msgs
|
163
|
+
|
164
|
+
imodel = chat_param.imodel or branch.chat_model
|
165
|
+
meth = imodel.stream if "stream" in kw and kw["stream"] else imodel.invoke
|
166
|
+
|
167
|
+
if meth is imodel.invoke:
|
168
|
+
# Only set if it's not the Unset sentinel value
|
169
|
+
if not chat_param._is_sentinel(
|
170
|
+
chat_param.include_token_usage_to_model
|
171
|
+
):
|
172
|
+
kw["include_token_usage_to_model"] = (
|
173
|
+
chat_param.include_token_usage_to_model
|
174
|
+
)
|
175
|
+
api_call = await meth(**kw)
|
159
176
|
|
160
|
-
api_call
|
161
|
-
branch._log_manager.log(Log.create(api_call))
|
177
|
+
branch._log_manager.log(api_call)
|
162
178
|
|
163
179
|
if return_ins_res_message:
|
164
180
|
# Wrap result in `AssistantResponse` and return
|
165
|
-
return ins, AssistantResponse.
|
166
|
-
|
181
|
+
return ins, AssistantResponse.from_response(
|
182
|
+
api_call.response,
|
167
183
|
sender=branch.id,
|
168
184
|
recipient=branch.user,
|
169
185
|
)
|
170
|
-
return AssistantResponse.
|
171
|
-
|
186
|
+
return AssistantResponse.from_response(
|
187
|
+
api_call.response,
|
172
188
|
sender=branch.id,
|
173
189
|
recipient=branch.user,
|
174
190
|
).response
|