lionagi 0.17.11__py3-none-any.whl → 0.18.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (109) hide show
  1. lionagi/_errors.py +0 -5
  2. lionagi/fields.py +83 -0
  3. lionagi/libs/schema/minimal_yaml.py +98 -0
  4. lionagi/ln/__init__.py +3 -1
  5. lionagi/ln/concurrency/primitives.py +4 -4
  6. lionagi/ln/concurrency/task.py +1 -0
  7. lionagi/ln/types.py +32 -5
  8. lionagi/models/field_model.py +21 -4
  9. lionagi/models/hashable_model.py +2 -3
  10. lionagi/operations/ReAct/ReAct.py +475 -238
  11. lionagi/operations/ReAct/utils.py +3 -0
  12. lionagi/operations/act/act.py +206 -0
  13. lionagi/operations/builder.py +5 -7
  14. lionagi/operations/chat/chat.py +130 -114
  15. lionagi/operations/communicate/communicate.py +101 -42
  16. lionagi/operations/fields.py +380 -0
  17. lionagi/operations/flow.py +8 -10
  18. lionagi/operations/interpret/interpret.py +65 -20
  19. lionagi/operations/node.py +4 -4
  20. lionagi/operations/operate/operate.py +216 -108
  21. lionagi/{protocols/operatives → operations/operate}/operative.py +4 -5
  22. lionagi/{protocols/operatives → operations/operate}/step.py +34 -39
  23. lionagi/operations/parse/parse.py +170 -142
  24. lionagi/operations/select/select.py +79 -18
  25. lionagi/operations/select/utils.py +8 -2
  26. lionagi/operations/types.py +119 -23
  27. lionagi/protocols/action/manager.py +5 -6
  28. lionagi/protocols/contracts.py +2 -2
  29. lionagi/protocols/generic/__init__.py +22 -0
  30. lionagi/protocols/generic/element.py +36 -127
  31. lionagi/protocols/generic/log.py +3 -2
  32. lionagi/protocols/generic/pile.py +9 -10
  33. lionagi/protocols/generic/progression.py +23 -22
  34. lionagi/protocols/graph/edge.py +6 -5
  35. lionagi/protocols/ids.py +6 -49
  36. lionagi/protocols/messages/__init__.py +29 -0
  37. lionagi/protocols/messages/action_request.py +86 -184
  38. lionagi/protocols/messages/action_response.py +73 -131
  39. lionagi/protocols/messages/assistant_response.py +130 -159
  40. lionagi/protocols/messages/base.py +31 -22
  41. lionagi/protocols/messages/instruction.py +280 -625
  42. lionagi/protocols/messages/manager.py +112 -62
  43. lionagi/protocols/messages/message.py +87 -197
  44. lionagi/protocols/messages/system.py +52 -123
  45. lionagi/protocols/types.py +1 -13
  46. lionagi/service/connections/__init__.py +3 -0
  47. lionagi/service/connections/endpoint.py +0 -8
  48. lionagi/service/connections/providers/claude_code_cli.py +3 -2
  49. lionagi/service/connections/providers/oai_.py +29 -94
  50. lionagi/service/connections/providers/ollama_.py +3 -2
  51. lionagi/service/hooks/_types.py +1 -1
  52. lionagi/service/hooks/_utils.py +1 -1
  53. lionagi/service/hooks/hook_event.py +3 -8
  54. lionagi/service/hooks/hook_registry.py +5 -5
  55. lionagi/service/hooks/hooked_event.py +63 -3
  56. lionagi/service/imodel.py +24 -20
  57. lionagi/service/third_party/claude_code.py +3 -3
  58. lionagi/service/third_party/openai_models.py +435 -0
  59. lionagi/service/token_calculator.py +1 -94
  60. lionagi/session/branch.py +190 -400
  61. lionagi/session/session.py +8 -99
  62. lionagi/tools/file/reader.py +2 -2
  63. lionagi/version.py +1 -1
  64. {lionagi-0.17.11.dist-info → lionagi-0.18.1.dist-info}/METADATA +6 -6
  65. lionagi-0.18.1.dist-info/RECORD +164 -0
  66. lionagi/fields/__init__.py +0 -47
  67. lionagi/fields/action.py +0 -188
  68. lionagi/fields/base.py +0 -153
  69. lionagi/fields/code.py +0 -239
  70. lionagi/fields/file.py +0 -234
  71. lionagi/fields/instruct.py +0 -135
  72. lionagi/fields/reason.py +0 -55
  73. lionagi/fields/research.py +0 -52
  74. lionagi/operations/_act/act.py +0 -86
  75. lionagi/operations/brainstorm/__init__.py +0 -2
  76. lionagi/operations/brainstorm/brainstorm.py +0 -498
  77. lionagi/operations/brainstorm/prompt.py +0 -11
  78. lionagi/operations/instruct/__init__.py +0 -2
  79. lionagi/operations/instruct/instruct.py +0 -28
  80. lionagi/operations/plan/__init__.py +0 -6
  81. lionagi/operations/plan/plan.py +0 -386
  82. lionagi/operations/plan/prompt.py +0 -25
  83. lionagi/operations/utils.py +0 -45
  84. lionagi/protocols/forms/__init__.py +0 -2
  85. lionagi/protocols/forms/base.py +0 -85
  86. lionagi/protocols/forms/flow.py +0 -79
  87. lionagi/protocols/forms/form.py +0 -86
  88. lionagi/protocols/forms/report.py +0 -48
  89. lionagi/protocols/mail/__init__.py +0 -2
  90. lionagi/protocols/mail/exchange.py +0 -220
  91. lionagi/protocols/mail/mail.py +0 -51
  92. lionagi/protocols/mail/mailbox.py +0 -103
  93. lionagi/protocols/mail/manager.py +0 -218
  94. lionagi/protocols/mail/package.py +0 -101
  95. lionagi/protocols/messages/templates/README.md +0 -28
  96. lionagi/protocols/messages/templates/action_request.jinja2 +0 -5
  97. lionagi/protocols/messages/templates/action_response.jinja2 +0 -9
  98. lionagi/protocols/messages/templates/assistant_response.jinja2 +0 -6
  99. lionagi/protocols/messages/templates/instruction_message.jinja2 +0 -61
  100. lionagi/protocols/messages/templates/system_message.jinja2 +0 -11
  101. lionagi/protocols/messages/templates/tool_schemas.jinja2 +0 -7
  102. lionagi/protocols/operatives/__init__.py +0 -2
  103. lionagi/service/connections/providers/types.py +0 -28
  104. lionagi/service/third_party/openai_model_names.py +0 -198
  105. lionagi/service/types.py +0 -58
  106. lionagi-0.17.11.dist-info/RECORD +0 -199
  107. /lionagi/operations/{_act → act}/__init__.py +0 -0
  108. {lionagi-0.17.11.dist-info → lionagi-0.18.1.dist-info}/WHEEL +0 -0
  109. {lionagi-0.17.11.dist-info → lionagi-0.18.1.dist-info}/licenses/LICENSE +0 -0
@@ -101,6 +101,9 @@ class ReActAnalysis(HashableModel):
101
101
  ),
102
102
  )
103
103
 
104
+ # Note: action_requests and action_responses are added dynamically by Step.request_operative()
105
+ # when actions=True, so they don't need to be defined here. The operate() function will add them.
106
+
104
107
 
105
108
  class Analysis(HashableModel):
106
109
  answer: str | None = None
@@ -0,0 +1,206 @@
1
+ # Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ import logging
5
+ from typing import TYPE_CHECKING, Literal
6
+
7
+ from pydantic import BaseModel
8
+
9
+ from lionagi.ln import AlcallParams
10
+ from lionagi.protocols.messages import ActionRequest, ActionResponse
11
+
12
+ from ..fields import ActionResponseModel
13
+ from ..types import ActionParam
14
+
15
+ if TYPE_CHECKING:
16
+ from lionagi.session.branch import Branch
17
+
18
+ _DEFAULT_ALCALL_PARAMS = None
19
+
20
+
21
+ async def _act(
22
+ branch: "Branch",
23
+ action_request: BaseModel | dict | ActionRequest,
24
+ suppress_errors: bool = False,
25
+ verbose_action: bool = False,
26
+ ):
27
+
28
+ _request = action_request
29
+ if isinstance(action_request, ActionRequest):
30
+ _request = {
31
+ "function": action_request.function,
32
+ "arguments": action_request.arguments,
33
+ }
34
+ elif isinstance(action_request, BaseModel) and set(
35
+ action_request.__class__.model_fields.keys()
36
+ ) >= {"function", "arguments"}:
37
+ _request = {
38
+ "function": action_request.function,
39
+ "arguments": action_request.arguments,
40
+ }
41
+ if not isinstance(_request, dict) or not {"function", "arguments"} <= set(
42
+ _request.keys()
43
+ ):
44
+ raise ValueError(
45
+ "action_request must be an ActionRequest, BaseModel with 'function'"
46
+ " and 'arguments', or dict with 'function' and 'arguments'."
47
+ )
48
+
49
+ try:
50
+ if verbose_action:
51
+ args_ = str(_request["arguments"])
52
+ args_ = args_[:50] + "..." if len(args_) > 50 else args_
53
+ print(f"Invoking action {_request['function']} with {args_}.")
54
+
55
+ func_call = await branch._action_manager.invoke(_request)
56
+ if verbose_action:
57
+ print(
58
+ f"Action {_request['function']} invoked, status: {func_call.status}."
59
+ )
60
+
61
+ except Exception as e:
62
+ content = {
63
+ "error": str(e),
64
+ "function": _request.get("function"),
65
+ "arguments": _request.get("arguments"),
66
+ "branch": str(branch.id),
67
+ }
68
+ branch._log_manager.log(content)
69
+ if verbose_action:
70
+ print(f"Action {_request['function']} failed, error: {str(e)}.")
71
+ if suppress_errors:
72
+ error_msg = f"Error invoking action '{_request['function']}': {e}"
73
+ logging.error(error_msg)
74
+
75
+ # Return error as action response so model knows it failed
76
+ return ActionResponseModel(
77
+ function=_request.get("function", "unknown"),
78
+ arguments=_request.get("arguments", {}),
79
+ output={"error": str(e), "message": error_msg},
80
+ )
81
+ raise e
82
+
83
+ branch._log_manager.log(func_call)
84
+
85
+ if not isinstance(action_request, ActionRequest):
86
+ action_request = ActionRequest(
87
+ content=_request,
88
+ sender=branch.id,
89
+ recipient=func_call.func_tool.id,
90
+ )
91
+
92
+ # Add the action request/response to the message manager, if not present
93
+ if action_request not in branch.messages:
94
+ branch.msgs.add_message(action_request=action_request)
95
+
96
+ branch.msgs.add_message(
97
+ action_request=action_request,
98
+ action_output=func_call.response,
99
+ )
100
+
101
+ return ActionResponseModel(
102
+ function=action_request.function,
103
+ arguments=action_request.arguments,
104
+ output=func_call.response,
105
+ )
106
+
107
+
108
+ def prepare_act_kw(
109
+ branch: "Branch",
110
+ action_request: list | ActionRequest | BaseModel | dict,
111
+ *,
112
+ strategy: Literal["concurrent", "sequential"] = "concurrent",
113
+ verbose_action: bool = False,
114
+ suppress_errors: bool = True,
115
+ call_params: AlcallParams = None,
116
+ ):
117
+
118
+ action_param = ActionParam(
119
+ action_call_params=call_params or _get_default_call_params(),
120
+ tools=None, # Not used in this context
121
+ strategy=strategy,
122
+ suppress_errors=suppress_errors,
123
+ verbose_action=verbose_action,
124
+ )
125
+ return {
126
+ "action_request": action_request,
127
+ "action_param": action_param,
128
+ }
129
+
130
+
131
+ async def act(
132
+ branch: "Branch",
133
+ action_request: list | ActionRequest | BaseModel | dict,
134
+ action_param: ActionParam,
135
+ ) -> list[ActionResponse]:
136
+ """Execute action requests with ActionParam."""
137
+
138
+ match action_param.strategy:
139
+ case "concurrent":
140
+ return await _concurrent_act(
141
+ branch,
142
+ action_request,
143
+ action_param.action_call_params,
144
+ suppress_errors=action_param.suppress_errors,
145
+ verbose_action=action_param.verbose_action,
146
+ )
147
+ case "sequential":
148
+ return await _sequential_act(
149
+ branch,
150
+ action_request,
151
+ suppress_errors=action_param.suppress_errors,
152
+ verbose_action=action_param.verbose_action,
153
+ )
154
+ case _:
155
+ raise ValueError(
156
+ "Invalid strategy. Choose 'concurrent' or 'sequential'."
157
+ )
158
+
159
+
160
+ async def _concurrent_act(
161
+ branch: "Branch",
162
+ action_request: list | ActionRequest | BaseModel | dict,
163
+ call_params: AlcallParams,
164
+ suppress_errors: bool = True,
165
+ verbose_action: bool = False,
166
+ ) -> list:
167
+ """Execute actions concurrently using AlcallParams."""
168
+
169
+ async def _wrapper(req):
170
+ return await _act(branch, req, suppress_errors, verbose_action)
171
+
172
+ # AlcallParams expects a list as first argument
173
+ action_request_list = (
174
+ action_request
175
+ if isinstance(action_request, list)
176
+ else [action_request]
177
+ )
178
+
179
+ return await call_params(action_request_list, _wrapper)
180
+
181
+
182
+ async def _sequential_act(
183
+ branch: "Branch",
184
+ action_request: list | ActionRequest | BaseModel | dict,
185
+ suppress_errors: bool = True,
186
+ verbose_action: bool = False,
187
+ ) -> list:
188
+ """Execute actions sequentially."""
189
+ action_request = (
190
+ action_request
191
+ if isinstance(action_request, list)
192
+ else [action_request]
193
+ )
194
+ results = []
195
+ for req in action_request:
196
+ result = await _act(branch, req, suppress_errors, verbose_action)
197
+ results.append(result)
198
+ return results
199
+
200
+
201
+ def _get_default_call_params() -> AlcallParams:
202
+ """Get or create default AlcallParams."""
203
+ global _DEFAULT_ALCALL_PARAMS
204
+ if _DEFAULT_ALCALL_PARAMS is None:
205
+ _DEFAULT_ALCALL_PARAMS = AlcallParams(output_dropna=True)
206
+ return _DEFAULT_ALCALL_PARAMS
@@ -46,9 +46,9 @@ class OperationGraphBuilder:
46
46
  >>> result = await session.flow(graph)
47
47
  >>>
48
48
  >>> # Expand based on results
49
- >>> if hasattr(result, 'instruct_models'):
49
+ >>> if hasattr(result, 'instruct_model'):
50
50
  ... builder.expand_from_result(
51
- ... result.instruct_models,
51
+ ... result.instruct_model,
52
52
  ... source_node_id=builder.last_operation_id,
53
53
  ... operation="instruct"
54
54
  ... )
@@ -150,7 +150,7 @@ class OperationGraphBuilder:
150
150
  based on results.
151
151
 
152
152
  Args:
153
- items: Items from result to expand (e.g., instruct_models)
153
+ items: Items from result to expand (e.g., instruct_model)
154
154
  source_node_id: ID of node that produced these items
155
155
  operation: Operation to apply to each item
156
156
  strategy: How to organize the expanded operations
@@ -253,11 +253,9 @@ class OperationGraphBuilder:
253
253
  if not sources:
254
254
  raise ValueError("No source nodes for aggregation")
255
255
 
256
- # Add aggregation metadata - convert IDType to strings for JSON serialization
256
+ # Add aggregation metadata - convert UUID to strings for JSON serialization
257
257
  agg_params = {
258
- "aggregation_sources": [
259
- str(s) for s in sources
260
- ], # Convert IDType to strings
258
+ "aggregation_sources": [str(s) for s in sources],
261
259
  "aggregation_count": len(sources),
262
260
  **parameters,
263
261
  }
@@ -1,19 +1,18 @@
1
1
  # Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
2
2
  # SPDX-License-Identifier: Apache-2.0
3
3
 
4
- from typing import TYPE_CHECKING, Literal
4
+ from typing import TYPE_CHECKING
5
5
 
6
- from pydantic import BaseModel
6
+ from pydantic import JsonValue
7
7
 
8
- from lionagi.protocols.types import (
8
+ from lionagi.ln._to_list import to_list
9
+ from lionagi.protocols.messages import (
9
10
  ActionResponse,
10
11
  AssistantResponse,
11
12
  Instruction,
12
- Log,
13
- RoledMessage,
14
13
  )
15
- from lionagi.service.imodel import iModel
16
- from lionagi.utils import copy
14
+
15
+ from ..types import ChatParam
17
16
 
18
17
  if TYPE_CHECKING:
19
18
  from lionagi.session.branch import Branch
@@ -21,98 +20,94 @@ if TYPE_CHECKING:
21
20
 
22
21
  async def chat(
23
22
  branch: "Branch",
24
- instruction=None,
25
- guidance=None,
26
- context=None,
27
- sender=None,
28
- recipient=None,
29
- request_fields=None,
30
- response_format: type[BaseModel] = None,
31
- progression=None,
32
- imodel: iModel = None,
33
- tool_schemas=None,
34
- images: list = None,
35
- image_detail: Literal["low", "high", "auto"] = None,
36
- plain_content: str = None,
23
+ instruction: JsonValue | Instruction,
24
+ chat_param: ChatParam,
37
25
  return_ins_res_message: bool = False,
38
- include_token_usage_to_model: bool = False,
39
- **kwargs,
40
- ) -> tuple[Instruction, AssistantResponse]:
41
- ins: Instruction = branch.msgs.create_instruction(
42
- instruction=instruction,
43
- guidance=guidance,
44
- context=context,
45
- sender=sender or branch.user or "user",
46
- recipient=recipient or branch.id,
47
- response_format=response_format,
48
- request_fields=request_fields,
49
- images=images,
50
- image_detail=image_detail,
51
- tool_schemas=tool_schemas,
52
- plain_content=plain_content,
26
+ ) -> tuple[Instruction, AssistantResponse] | str:
27
+ params = chat_param.to_dict(
28
+ exclude={
29
+ "imodel",
30
+ "imodel_kw",
31
+ "include_token_usage_to_model",
32
+ "progression",
33
+ }
53
34
  )
35
+ params["sender"] = chat_param.sender or branch.user or "user"
36
+ params["recipient"] = chat_param.recipient or branch.id
37
+ params["instruction"] = instruction
54
38
 
55
- progression = progression or branch.msgs.progression
56
- messages: list[RoledMessage] = [
57
- branch.msgs.messages[i] for i in progression
58
- ]
59
-
60
- use_ins = None
61
- _to_use = []
62
- _action_responses: set[ActionResponse] = set()
63
-
64
- for i in messages:
65
- if isinstance(i, ActionResponse):
66
- _action_responses.add(i)
67
- if isinstance(i, AssistantResponse):
68
- j = AssistantResponse(
69
- role=i.role,
70
- content=copy(i.content),
71
- sender=i.sender,
72
- recipient=i.recipient,
73
- template=i.template,
74
- )
75
- _to_use.append(j)
76
- if isinstance(i, Instruction):
77
- j = Instruction(
78
- role=i.role,
79
- content=copy(i.content),
80
- sender=i.sender,
81
- recipient=i.recipient,
82
- template=i.template,
39
+ ins = branch.msgs.create_instruction(**params)
40
+
41
+ _use_ins, _use_msgs, _act_res = None, [], []
42
+ progression = chat_param.progression or branch.msgs.progression
43
+
44
+ for msg in (branch.msgs.messages[j] for j in progression):
45
+ if isinstance(msg, ActionResponse):
46
+ _act_res.append(msg)
47
+
48
+ if isinstance(msg, AssistantResponse):
49
+ _use_msgs.append(
50
+ msg.model_copy(update={"content": msg.content.with_updates()})
83
51
  )
84
- j.tool_schemas = None
85
- j.respond_schema_info = None
86
- j.request_response_format = None
87
-
88
- if _action_responses:
89
- d_ = [k.content for k in _action_responses]
90
- for z in d_:
91
- if z not in j.context:
92
- j.context.append(z)
93
-
94
- _to_use.append(j)
95
- _action_responses = set()
96
- else:
97
- _to_use.append(j)
98
52
 
99
- messages = _to_use
100
- if _action_responses:
101
- j = ins.model_copy()
102
- d_ = [k.content for k in _action_responses]
103
- for z in d_:
104
- if z not in j.context:
105
- j.context.append(z)
106
- use_ins = j
53
+ if isinstance(msg, Instruction):
54
+ j = msg.model_copy(update={"content": msg.content.with_updates()})
55
+ j.content.tool_schemas.clear()
56
+ j.content.response_format = None
57
+ j.content._schema_dict = None
58
+ j.content._model_class = None
59
+
60
+ if _act_res:
61
+ # Convert ActionResponseContent to dicts for proper rendering
62
+ d_ = []
63
+ for k in to_list(_act_res, flatten=True, unique=True):
64
+ if hasattr(k.content, "function"): # ActionResponseContent
65
+ d_.append(
66
+ {
67
+ "function": k.content.function,
68
+ "arguments": k.content.arguments,
69
+ "output": k.content.output,
70
+ }
71
+ )
72
+ else:
73
+ d_.append(k.content)
74
+ j.content.prompt_context.extend(
75
+ [z for z in d_ if z not in j.content.prompt_context]
76
+ )
77
+ _use_msgs.append(j)
78
+ _act_res = []
79
+ else:
80
+ _use_msgs.append(j)
81
+
82
+ if _act_res:
83
+ j = ins.model_copy(update={"content": ins.content.with_updates()})
84
+ # Convert ActionResponseContent to dicts for proper rendering
85
+ d_ = []
86
+ for k in to_list(_act_res, flatten=True, unique=True):
87
+ if hasattr(k.content, "function"): # ActionResponseContent
88
+ d_.append(
89
+ {
90
+ "function": k.content.function,
91
+ "arguments": k.content.arguments,
92
+ "output": k.content.output,
93
+ }
94
+ )
95
+ else:
96
+ d_.append(k.content)
97
+ j.content.prompt_context.extend(
98
+ [z for z in d_ if z not in j.content.prompt_context]
99
+ )
100
+ _use_ins = j
107
101
 
108
- if messages and len(messages) > 1:
109
- _msgs = [messages[0]]
102
+ messages = _use_msgs
103
+ if _use_msgs and len(_use_msgs) > 1:
104
+ _msgs = [_use_msgs[0]]
110
105
 
111
- for i in messages[1:]:
106
+ for i in _use_msgs[1:]:
112
107
  if isinstance(i, AssistantResponse):
113
108
  if isinstance(_msgs[-1], AssistantResponse):
114
- _msgs[-1].response = (
115
- f"{_msgs[-1].response}\n\n{i.response}"
109
+ _msgs[-1].content.assistant_response = (
110
+ f"{_msgs[-1].content.assistant_response}\n\n{i.content.assistant_response}"
116
111
  )
117
112
  else:
118
113
  _msgs.append(i)
@@ -125,11 +120,10 @@ async def chat(
125
120
  if branch.msgs.system:
126
121
  messages = [msg for msg in messages if msg.role != "system"]
127
122
  first_instruction = None
128
-
123
+ f = lambda x: branch.msgs.system.rendered + (x.content.guidance or "")
129
124
  if len(messages) == 0:
130
- first_instruction = ins.model_copy()
131
- first_instruction.guidance = branch.msgs.system.rendered + (
132
- first_instruction.guidance or ""
125
+ first_instruction = ins.model_copy(
126
+ update={"content": ins.content.with_updates(guidance=f(ins))}
133
127
  )
134
128
  messages.append(first_instruction)
135
129
  elif len(messages) >= 1:
@@ -138,37 +132,59 @@ async def chat(
138
132
  raise ValueError(
139
133
  "First message in progression must be an Instruction or System"
140
134
  )
141
- first_instruction = first_instruction.model_copy()
142
- first_instruction.guidance = branch.msgs.system.rendered + (
143
- first_instruction.guidance or ""
135
+ first_instruction = first_instruction.model_copy(
136
+ update={
137
+ "content": first_instruction.content.with_updates(
138
+ guidance=f(first_instruction)
139
+ )
140
+ }
144
141
  )
145
142
  messages[0] = first_instruction
146
- messages.append(use_ins or ins)
143
+ msg_to_append = _use_ins or ins
144
+ if msg_to_append is not None:
145
+ messages.append(msg_to_append)
147
146
 
148
147
  else:
149
- messages.append(use_ins or ins)
150
-
151
- kwargs["messages"] = [i.chat_msg for i in messages]
152
- imodel = imodel or branch.chat_model
153
-
154
- meth = imodel.invoke
155
- if "stream" not in kwargs or not kwargs["stream"]:
156
- kwargs["include_token_usage_to_model"] = include_token_usage_to_model
157
- else:
158
- meth = imodel.stream
148
+ msg_to_append = _use_ins or ins
149
+ if msg_to_append is not None:
150
+ messages.append(msg_to_append)
151
+
152
+ kw = (chat_param.imodel_kw or {}).copy()
153
+
154
+ # Filter out messages with None chat_msg
155
+ chat_msgs = []
156
+ for msg in messages:
157
+ if msg is not None and hasattr(msg, "chat_msg"):
158
+ chat_msg = msg.chat_msg
159
+ if chat_msg is not None:
160
+ chat_msgs.append(chat_msg)
161
+
162
+ kw["messages"] = chat_msgs
163
+
164
+ imodel = chat_param.imodel or branch.chat_model
165
+ meth = imodel.stream if "stream" in kw and kw["stream"] else imodel.invoke
166
+
167
+ if meth is imodel.invoke:
168
+ # Only set if it's not the Unset sentinel value
169
+ if not chat_param._is_sentinel(
170
+ chat_param.include_token_usage_to_model
171
+ ):
172
+ kw["include_token_usage_to_model"] = (
173
+ chat_param.include_token_usage_to_model
174
+ )
175
+ api_call = await meth(**kw)
159
176
 
160
- api_call = await meth(**kwargs)
161
- branch._log_manager.log(Log.create(api_call))
177
+ branch._log_manager.log(api_call)
162
178
 
163
179
  if return_ins_res_message:
164
180
  # Wrap result in `AssistantResponse` and return
165
- return ins, AssistantResponse.create(
166
- assistant_response=api_call.response,
181
+ return ins, AssistantResponse.from_response(
182
+ api_call.response,
167
183
  sender=branch.id,
168
184
  recipient=branch.user,
169
185
  )
170
- return AssistantResponse.create(
171
- assistant_response=api_call.response,
186
+ return AssistantResponse.from_response(
187
+ api_call.response,
172
188
  sender=branch.id,
173
189
  recipient=branch.user,
174
190
  ).response