letta-nightly 0.1.7.dev20240924104148__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of letta-nightly might be problematic. Click here for more details.
- letta/__init__.py +24 -0
- letta/__main__.py +3 -0
- letta/agent.py +1427 -0
- letta/agent_store/chroma.py +295 -0
- letta/agent_store/db.py +546 -0
- letta/agent_store/lancedb.py +177 -0
- letta/agent_store/milvus.py +198 -0
- letta/agent_store/qdrant.py +201 -0
- letta/agent_store/storage.py +188 -0
- letta/benchmark/benchmark.py +96 -0
- letta/benchmark/constants.py +14 -0
- letta/cli/cli.py +689 -0
- letta/cli/cli_config.py +1282 -0
- letta/cli/cli_load.py +166 -0
- letta/client/__init__.py +0 -0
- letta/client/admin.py +171 -0
- letta/client/client.py +2360 -0
- letta/client/streaming.py +90 -0
- letta/client/utils.py +61 -0
- letta/config.py +484 -0
- letta/configs/anthropic.json +13 -0
- letta/configs/letta_hosted.json +11 -0
- letta/configs/openai.json +12 -0
- letta/constants.py +134 -0
- letta/credentials.py +140 -0
- letta/data_sources/connectors.py +247 -0
- letta/embeddings.py +218 -0
- letta/errors.py +26 -0
- letta/functions/__init__.py +0 -0
- letta/functions/function_sets/base.py +174 -0
- letta/functions/function_sets/extras.py +132 -0
- letta/functions/functions.py +105 -0
- letta/functions/schema_generator.py +205 -0
- letta/humans/__init__.py +0 -0
- letta/humans/examples/basic.txt +1 -0
- letta/humans/examples/cs_phd.txt +9 -0
- letta/interface.py +314 -0
- letta/llm_api/__init__.py +0 -0
- letta/llm_api/anthropic.py +383 -0
- letta/llm_api/azure_openai.py +155 -0
- letta/llm_api/cohere.py +396 -0
- letta/llm_api/google_ai.py +468 -0
- letta/llm_api/llm_api_tools.py +485 -0
- letta/llm_api/openai.py +470 -0
- letta/local_llm/README.md +3 -0
- letta/local_llm/__init__.py +0 -0
- letta/local_llm/chat_completion_proxy.py +279 -0
- letta/local_llm/constants.py +31 -0
- letta/local_llm/function_parser.py +68 -0
- letta/local_llm/grammars/__init__.py +0 -0
- letta/local_llm/grammars/gbnf_grammar_generator.py +1324 -0
- letta/local_llm/grammars/json.gbnf +26 -0
- letta/local_llm/grammars/json_func_calls_with_inner_thoughts.gbnf +32 -0
- letta/local_llm/groq/api.py +97 -0
- letta/local_llm/json_parser.py +202 -0
- letta/local_llm/koboldcpp/api.py +62 -0
- letta/local_llm/koboldcpp/settings.py +23 -0
- letta/local_llm/llamacpp/api.py +58 -0
- letta/local_llm/llamacpp/settings.py +22 -0
- letta/local_llm/llm_chat_completion_wrappers/__init__.py +0 -0
- letta/local_llm/llm_chat_completion_wrappers/airoboros.py +452 -0
- letta/local_llm/llm_chat_completion_wrappers/chatml.py +470 -0
- letta/local_llm/llm_chat_completion_wrappers/configurable_wrapper.py +387 -0
- letta/local_llm/llm_chat_completion_wrappers/dolphin.py +246 -0
- letta/local_llm/llm_chat_completion_wrappers/llama3.py +345 -0
- letta/local_llm/llm_chat_completion_wrappers/simple_summary_wrapper.py +156 -0
- letta/local_llm/llm_chat_completion_wrappers/wrapper_base.py +11 -0
- letta/local_llm/llm_chat_completion_wrappers/zephyr.py +345 -0
- letta/local_llm/lmstudio/api.py +100 -0
- letta/local_llm/lmstudio/settings.py +29 -0
- letta/local_llm/ollama/api.py +88 -0
- letta/local_llm/ollama/settings.py +32 -0
- letta/local_llm/settings/__init__.py +0 -0
- letta/local_llm/settings/deterministic_mirostat.py +45 -0
- letta/local_llm/settings/settings.py +72 -0
- letta/local_llm/settings/simple.py +28 -0
- letta/local_llm/utils.py +265 -0
- letta/local_llm/vllm/api.py +63 -0
- letta/local_llm/webui/api.py +60 -0
- letta/local_llm/webui/legacy_api.py +58 -0
- letta/local_llm/webui/legacy_settings.py +23 -0
- letta/local_llm/webui/settings.py +24 -0
- letta/log.py +76 -0
- letta/main.py +437 -0
- letta/memory.py +440 -0
- letta/metadata.py +884 -0
- letta/openai_backcompat/__init__.py +0 -0
- letta/openai_backcompat/openai_object.py +437 -0
- letta/persistence_manager.py +148 -0
- letta/personas/__init__.py +0 -0
- letta/personas/examples/anna_pa.txt +13 -0
- letta/personas/examples/google_search_persona.txt +15 -0
- letta/personas/examples/memgpt_doc.txt +6 -0
- letta/personas/examples/memgpt_starter.txt +4 -0
- letta/personas/examples/sam.txt +14 -0
- letta/personas/examples/sam_pov.txt +14 -0
- letta/personas/examples/sam_simple_pov_gpt35.txt +13 -0
- letta/personas/examples/sqldb/test.db +0 -0
- letta/prompts/__init__.py +0 -0
- letta/prompts/gpt_summarize.py +14 -0
- letta/prompts/gpt_system.py +26 -0
- letta/prompts/system/memgpt_base.txt +49 -0
- letta/prompts/system/memgpt_chat.txt +58 -0
- letta/prompts/system/memgpt_chat_compressed.txt +13 -0
- letta/prompts/system/memgpt_chat_fstring.txt +51 -0
- letta/prompts/system/memgpt_doc.txt +50 -0
- letta/prompts/system/memgpt_gpt35_extralong.txt +53 -0
- letta/prompts/system/memgpt_intuitive_knowledge.txt +31 -0
- letta/prompts/system/memgpt_modified_chat.txt +23 -0
- letta/pytest.ini +0 -0
- letta/schemas/agent.py +117 -0
- letta/schemas/api_key.py +21 -0
- letta/schemas/block.py +135 -0
- letta/schemas/document.py +21 -0
- letta/schemas/embedding_config.py +54 -0
- letta/schemas/enums.py +35 -0
- letta/schemas/job.py +38 -0
- letta/schemas/letta_base.py +80 -0
- letta/schemas/letta_message.py +175 -0
- letta/schemas/letta_request.py +23 -0
- letta/schemas/letta_response.py +28 -0
- letta/schemas/llm_config.py +54 -0
- letta/schemas/memory.py +224 -0
- letta/schemas/message.py +727 -0
- letta/schemas/openai/chat_completion_request.py +123 -0
- letta/schemas/openai/chat_completion_response.py +136 -0
- letta/schemas/openai/chat_completions.py +123 -0
- letta/schemas/openai/embedding_response.py +11 -0
- letta/schemas/openai/openai.py +157 -0
- letta/schemas/organization.py +20 -0
- letta/schemas/passage.py +80 -0
- letta/schemas/source.py +62 -0
- letta/schemas/tool.py +143 -0
- letta/schemas/usage.py +18 -0
- letta/schemas/user.py +33 -0
- letta/server/__init__.py +0 -0
- letta/server/constants.py +6 -0
- letta/server/rest_api/__init__.py +0 -0
- letta/server/rest_api/admin/__init__.py +0 -0
- letta/server/rest_api/admin/agents.py +21 -0
- letta/server/rest_api/admin/tools.py +83 -0
- letta/server/rest_api/admin/users.py +98 -0
- letta/server/rest_api/app.py +193 -0
- letta/server/rest_api/auth/__init__.py +0 -0
- letta/server/rest_api/auth/index.py +43 -0
- letta/server/rest_api/auth_token.py +22 -0
- letta/server/rest_api/interface.py +726 -0
- letta/server/rest_api/routers/__init__.py +0 -0
- letta/server/rest_api/routers/openai/__init__.py +0 -0
- letta/server/rest_api/routers/openai/assistants/__init__.py +0 -0
- letta/server/rest_api/routers/openai/assistants/assistants.py +115 -0
- letta/server/rest_api/routers/openai/assistants/schemas.py +121 -0
- letta/server/rest_api/routers/openai/assistants/threads.py +336 -0
- letta/server/rest_api/routers/openai/chat_completions/__init__.py +0 -0
- letta/server/rest_api/routers/openai/chat_completions/chat_completions.py +131 -0
- letta/server/rest_api/routers/v1/__init__.py +15 -0
- letta/server/rest_api/routers/v1/agents.py +543 -0
- letta/server/rest_api/routers/v1/blocks.py +73 -0
- letta/server/rest_api/routers/v1/jobs.py +46 -0
- letta/server/rest_api/routers/v1/llms.py +28 -0
- letta/server/rest_api/routers/v1/organizations.py +61 -0
- letta/server/rest_api/routers/v1/sources.py +199 -0
- letta/server/rest_api/routers/v1/tools.py +103 -0
- letta/server/rest_api/routers/v1/users.py +109 -0
- letta/server/rest_api/static_files.py +74 -0
- letta/server/rest_api/utils.py +69 -0
- letta/server/server.py +1995 -0
- letta/server/startup.sh +8 -0
- letta/server/static_files/assets/index-0cbf7ad5.js +274 -0
- letta/server/static_files/assets/index-156816da.css +1 -0
- letta/server/static_files/assets/index-486e3228.js +274 -0
- letta/server/static_files/favicon.ico +0 -0
- letta/server/static_files/index.html +39 -0
- letta/server/static_files/memgpt_logo_transparent.png +0 -0
- letta/server/utils.py +46 -0
- letta/server/ws_api/__init__.py +0 -0
- letta/server/ws_api/example_client.py +104 -0
- letta/server/ws_api/interface.py +108 -0
- letta/server/ws_api/protocol.py +100 -0
- letta/server/ws_api/server.py +145 -0
- letta/settings.py +165 -0
- letta/streaming_interface.py +396 -0
- letta/system.py +207 -0
- letta/utils.py +1065 -0
- letta_nightly-0.1.7.dev20240924104148.dist-info/LICENSE +190 -0
- letta_nightly-0.1.7.dev20240924104148.dist-info/METADATA +98 -0
- letta_nightly-0.1.7.dev20240924104148.dist-info/RECORD +189 -0
- letta_nightly-0.1.7.dev20240924104148.dist-info/WHEEL +4 -0
- letta_nightly-0.1.7.dev20240924104148.dist-info/entry_points.txt +3 -0
letta/schemas/message.py
ADDED
|
@@ -0,0 +1,727 @@
|
|
|
1
|
+
import copy
|
|
2
|
+
import json
|
|
3
|
+
import warnings
|
|
4
|
+
from datetime import datetime, timezone
|
|
5
|
+
from typing import List, Optional
|
|
6
|
+
|
|
7
|
+
from pydantic import Field, field_validator
|
|
8
|
+
|
|
9
|
+
from letta.constants import TOOL_CALL_ID_MAX_LEN
|
|
10
|
+
from letta.local_llm.constants import INNER_THOUGHTS_KWARG
|
|
11
|
+
from letta.schemas.enums import MessageRole
|
|
12
|
+
from letta.schemas.letta_base import LettaBase
|
|
13
|
+
from letta.schemas.letta_message import (
|
|
14
|
+
FunctionCall,
|
|
15
|
+
FunctionCallMessage,
|
|
16
|
+
FunctionReturn,
|
|
17
|
+
InternalMonologue,
|
|
18
|
+
LettaMessage,
|
|
19
|
+
SystemMessage,
|
|
20
|
+
UserMessage,
|
|
21
|
+
)
|
|
22
|
+
from letta.schemas.openai.chat_completions import ToolCall, ToolCallFunction
|
|
23
|
+
from letta.utils import get_utc_time, is_utc_datetime, json_dumps
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def add_inner_thoughts_to_tool_call(
|
|
27
|
+
tool_call: ToolCall,
|
|
28
|
+
inner_thoughts: str,
|
|
29
|
+
inner_thoughts_key: str,
|
|
30
|
+
) -> ToolCall:
|
|
31
|
+
"""Add inner thoughts (arg + value) to a tool call"""
|
|
32
|
+
# because the kwargs are stored as strings, we need to load then write the JSON dicts
|
|
33
|
+
try:
|
|
34
|
+
# load the args list
|
|
35
|
+
func_args = json.loads(tool_call.function.arguments)
|
|
36
|
+
# add the inner thoughts to the args list
|
|
37
|
+
func_args[inner_thoughts_key] = inner_thoughts
|
|
38
|
+
# create the updated tool call (as a string)
|
|
39
|
+
updated_tool_call = copy.deepcopy(tool_call)
|
|
40
|
+
updated_tool_call.function.arguments = json_dumps(func_args)
|
|
41
|
+
return updated_tool_call
|
|
42
|
+
except json.JSONDecodeError as e:
|
|
43
|
+
# TODO: change to logging
|
|
44
|
+
warnings.warn(f"Failed to put inner thoughts in kwargs: {e}")
|
|
45
|
+
raise e
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class BaseMessage(LettaBase):
|
|
49
|
+
__id_prefix__ = "message"
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class MessageCreate(BaseMessage):
|
|
53
|
+
"""Request to create a message"""
|
|
54
|
+
|
|
55
|
+
role: MessageRole = Field(..., description="The role of the participant.")
|
|
56
|
+
text: str = Field(..., description="The text of the message.")
|
|
57
|
+
name: Optional[str] = Field(None, description="The name of the participant.")
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class UpdateMessage(BaseMessage):
|
|
61
|
+
"""Request to update a message"""
|
|
62
|
+
|
|
63
|
+
id: str = Field(..., description="The id of the message.")
|
|
64
|
+
role: Optional[MessageRole] = Field(None, description="The role of the participant.")
|
|
65
|
+
text: Optional[str] = Field(None, description="The text of the message.")
|
|
66
|
+
# NOTE: probably doesn't make sense to allow remapping user_id or agent_id (vs creating a new message)
|
|
67
|
+
# user_id: Optional[str] = Field(None, description="The unique identifier of the user.")
|
|
68
|
+
# agent_id: Optional[str] = Field(None, description="The unique identifier of the agent.")
|
|
69
|
+
# NOTE: we probably shouldn't allow updating the model field, otherwise this loses meaning
|
|
70
|
+
# model: Optional[str] = Field(None, description="The model used to make the function call.")
|
|
71
|
+
name: Optional[str] = Field(None, description="The name of the participant.")
|
|
72
|
+
# NOTE: we probably shouldn't allow updating the created_at field, right?
|
|
73
|
+
# created_at: Optional[datetime] = Field(None, description="The time the message was created.")
|
|
74
|
+
tool_calls: Optional[List[ToolCall]] = Field(None, description="The list of tool calls requested.")
|
|
75
|
+
tool_call_id: Optional[str] = Field(None, description="The id of the tool call.")
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
class Message(BaseMessage):
|
|
79
|
+
"""
|
|
80
|
+
Letta's internal representation of a message. Includes methods to convert to/from LLM provider formats.
|
|
81
|
+
|
|
82
|
+
Attributes:
|
|
83
|
+
id (str): The unique identifier of the message.
|
|
84
|
+
role (MessageRole): The role of the participant.
|
|
85
|
+
text (str): The text of the message.
|
|
86
|
+
user_id (str): The unique identifier of the user.
|
|
87
|
+
agent_id (str): The unique identifier of the agent.
|
|
88
|
+
model (str): The model used to make the function call.
|
|
89
|
+
name (str): The name of the participant.
|
|
90
|
+
created_at (datetime): The time the message was created.
|
|
91
|
+
tool_calls (List[ToolCall]): The list of tool calls requested.
|
|
92
|
+
tool_call_id (str): The id of the tool call.
|
|
93
|
+
|
|
94
|
+
"""
|
|
95
|
+
|
|
96
|
+
id: str = BaseMessage.generate_id_field()
|
|
97
|
+
role: MessageRole = Field(..., description="The role of the participant.")
|
|
98
|
+
text: Optional[str] = Field(None, description="The text of the message.")
|
|
99
|
+
user_id: Optional[str] = Field(None, description="The unique identifier of the user.")
|
|
100
|
+
agent_id: Optional[str] = Field(None, description="The unique identifier of the agent.")
|
|
101
|
+
model: Optional[str] = Field(None, description="The model used to make the function call.")
|
|
102
|
+
name: Optional[str] = Field(None, description="The name of the participant.")
|
|
103
|
+
created_at: datetime = Field(default_factory=get_utc_time, description="The time the message was created.")
|
|
104
|
+
tool_calls: Optional[List[ToolCall]] = Field(None, description="The list of tool calls requested.")
|
|
105
|
+
tool_call_id: Optional[str] = Field(None, description="The id of the tool call.")
|
|
106
|
+
|
|
107
|
+
@field_validator("role")
|
|
108
|
+
@classmethod
|
|
109
|
+
def validate_role(cls, v: str) -> str:
|
|
110
|
+
roles = ["system", "assistant", "user", "tool"]
|
|
111
|
+
assert v in roles, f"Role must be one of {roles}"
|
|
112
|
+
return v
|
|
113
|
+
|
|
114
|
+
def to_json(self):
|
|
115
|
+
json_message = vars(self)
|
|
116
|
+
if json_message["tool_calls"] is not None:
|
|
117
|
+
json_message["tool_calls"] = [vars(tc) for tc in json_message["tool_calls"]]
|
|
118
|
+
# turn datetime to ISO format
|
|
119
|
+
# also if the created_at is missing a timezone, add UTC
|
|
120
|
+
if not is_utc_datetime(self.created_at):
|
|
121
|
+
self.created_at = self.created_at.replace(tzinfo=timezone.utc)
|
|
122
|
+
json_message["created_at"] = self.created_at.isoformat()
|
|
123
|
+
return json_message
|
|
124
|
+
|
|
125
|
+
def to_letta_message(self) -> List[LettaMessage]:
|
|
126
|
+
"""Convert message object (in DB format) to the style used by the original Letta API"""
|
|
127
|
+
|
|
128
|
+
messages = []
|
|
129
|
+
|
|
130
|
+
if self.role == MessageRole.assistant:
|
|
131
|
+
if self.text is not None:
|
|
132
|
+
# This is type InnerThoughts
|
|
133
|
+
messages.append(
|
|
134
|
+
InternalMonologue(
|
|
135
|
+
id=self.id,
|
|
136
|
+
date=self.created_at,
|
|
137
|
+
internal_monologue=self.text,
|
|
138
|
+
)
|
|
139
|
+
)
|
|
140
|
+
if self.tool_calls is not None:
|
|
141
|
+
# This is type FunctionCall
|
|
142
|
+
for tool_call in self.tool_calls:
|
|
143
|
+
messages.append(
|
|
144
|
+
FunctionCallMessage(
|
|
145
|
+
id=self.id,
|
|
146
|
+
date=self.created_at,
|
|
147
|
+
function_call=FunctionCall(
|
|
148
|
+
name=tool_call.function.name,
|
|
149
|
+
arguments=tool_call.function.arguments,
|
|
150
|
+
),
|
|
151
|
+
)
|
|
152
|
+
)
|
|
153
|
+
elif self.role == MessageRole.tool:
|
|
154
|
+
# This is type FunctionReturn
|
|
155
|
+
# Try to interpret the function return, recall that this is how we packaged:
|
|
156
|
+
# def package_function_response(was_success, response_string, timestamp=None):
|
|
157
|
+
# formatted_time = get_local_time() if timestamp is None else timestamp
|
|
158
|
+
# packaged_message = {
|
|
159
|
+
# "status": "OK" if was_success else "Failed",
|
|
160
|
+
# "message": response_string,
|
|
161
|
+
# "time": formatted_time,
|
|
162
|
+
# }
|
|
163
|
+
assert self.text is not None, self
|
|
164
|
+
try:
|
|
165
|
+
function_return = json.loads(self.text)
|
|
166
|
+
status = function_return["status"]
|
|
167
|
+
if status == "OK":
|
|
168
|
+
status_enum = "success"
|
|
169
|
+
elif status == "Failed":
|
|
170
|
+
status_enum = "error"
|
|
171
|
+
else:
|
|
172
|
+
raise ValueError(f"Invalid status: {status}")
|
|
173
|
+
except json.JSONDecodeError:
|
|
174
|
+
raise ValueError(f"Failed to decode function return: {self.text}")
|
|
175
|
+
messages.append(
|
|
176
|
+
# TODO make sure this is what the API returns
|
|
177
|
+
# function_return may not match exactly...
|
|
178
|
+
FunctionReturn(
|
|
179
|
+
id=self.id,
|
|
180
|
+
date=self.created_at,
|
|
181
|
+
function_return=self.text,
|
|
182
|
+
status=status_enum,
|
|
183
|
+
)
|
|
184
|
+
)
|
|
185
|
+
elif self.role == MessageRole.user:
|
|
186
|
+
# This is type UserMessage
|
|
187
|
+
assert self.text is not None, self
|
|
188
|
+
messages.append(
|
|
189
|
+
UserMessage(
|
|
190
|
+
id=self.id,
|
|
191
|
+
date=self.created_at,
|
|
192
|
+
message=self.text,
|
|
193
|
+
)
|
|
194
|
+
)
|
|
195
|
+
elif self.role == MessageRole.system:
|
|
196
|
+
# This is type SystemMessage
|
|
197
|
+
assert self.text is not None, self
|
|
198
|
+
messages.append(
|
|
199
|
+
SystemMessage(
|
|
200
|
+
id=self.id,
|
|
201
|
+
date=self.created_at,
|
|
202
|
+
message=self.text,
|
|
203
|
+
)
|
|
204
|
+
)
|
|
205
|
+
else:
|
|
206
|
+
raise ValueError(self.role)
|
|
207
|
+
|
|
208
|
+
return messages
|
|
209
|
+
|
|
210
|
+
@staticmethod
|
|
211
|
+
def dict_to_message(
|
|
212
|
+
user_id: str,
|
|
213
|
+
agent_id: str,
|
|
214
|
+
openai_message_dict: dict,
|
|
215
|
+
model: Optional[str] = None, # model used to make function call
|
|
216
|
+
allow_functions_style: bool = False, # allow deprecated functions style?
|
|
217
|
+
created_at: Optional[datetime] = None,
|
|
218
|
+
id: Optional[str] = None,
|
|
219
|
+
):
|
|
220
|
+
"""Convert a ChatCompletion message object into a Message object (synced to DB)"""
|
|
221
|
+
if not created_at:
|
|
222
|
+
# timestamp for creation
|
|
223
|
+
created_at = get_utc_time()
|
|
224
|
+
|
|
225
|
+
assert "role" in openai_message_dict, openai_message_dict
|
|
226
|
+
assert "content" in openai_message_dict, openai_message_dict
|
|
227
|
+
|
|
228
|
+
# If we're going from deprecated function form
|
|
229
|
+
if openai_message_dict["role"] == "function":
|
|
230
|
+
if not allow_functions_style:
|
|
231
|
+
raise DeprecationWarning(openai_message_dict)
|
|
232
|
+
assert "tool_call_id" in openai_message_dict, openai_message_dict
|
|
233
|
+
|
|
234
|
+
# Convert from 'function' response to a 'tool' response
|
|
235
|
+
# NOTE: this does not conventionally include a tool_call_id, it's on the caster to provide it
|
|
236
|
+
message_args = dict(
|
|
237
|
+
user_id=user_id,
|
|
238
|
+
agent_id=agent_id,
|
|
239
|
+
model=model,
|
|
240
|
+
# standard fields expected in an OpenAI ChatCompletion message object
|
|
241
|
+
role=MessageRole.tool, # NOTE
|
|
242
|
+
text=openai_message_dict["content"],
|
|
243
|
+
name=openai_message_dict["name"] if "name" in openai_message_dict else None,
|
|
244
|
+
tool_calls=openai_message_dict["tool_calls"] if "tool_calls" in openai_message_dict else None,
|
|
245
|
+
tool_call_id=openai_message_dict["tool_call_id"] if "tool_call_id" in openai_message_dict else None,
|
|
246
|
+
created_at=created_at,
|
|
247
|
+
)
|
|
248
|
+
if id is not None:
|
|
249
|
+
return Message(
|
|
250
|
+
user_id=user_id,
|
|
251
|
+
agent_id=agent_id,
|
|
252
|
+
model=model,
|
|
253
|
+
# standard fields expected in an OpenAI ChatCompletion message object
|
|
254
|
+
role=MessageRole.tool, # NOTE
|
|
255
|
+
text=openai_message_dict["content"],
|
|
256
|
+
name=openai_message_dict["name"] if "name" in openai_message_dict else None,
|
|
257
|
+
tool_calls=openai_message_dict["tool_calls"] if "tool_calls" in openai_message_dict else None,
|
|
258
|
+
tool_call_id=openai_message_dict["tool_call_id"] if "tool_call_id" in openai_message_dict else None,
|
|
259
|
+
created_at=created_at,
|
|
260
|
+
id=str(id),
|
|
261
|
+
)
|
|
262
|
+
else:
|
|
263
|
+
return Message(
|
|
264
|
+
user_id=user_id,
|
|
265
|
+
agent_id=agent_id,
|
|
266
|
+
model=model,
|
|
267
|
+
# standard fields expected in an OpenAI ChatCompletion message object
|
|
268
|
+
role=MessageRole.tool, # NOTE
|
|
269
|
+
text=openai_message_dict["content"],
|
|
270
|
+
name=openai_message_dict["name"] if "name" in openai_message_dict else None,
|
|
271
|
+
tool_calls=openai_message_dict["tool_calls"] if "tool_calls" in openai_message_dict else None,
|
|
272
|
+
tool_call_id=openai_message_dict["tool_call_id"] if "tool_call_id" in openai_message_dict else None,
|
|
273
|
+
created_at=created_at,
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
elif "function_call" in openai_message_dict and openai_message_dict["function_call"] is not None:
|
|
277
|
+
if not allow_functions_style:
|
|
278
|
+
raise DeprecationWarning(openai_message_dict)
|
|
279
|
+
assert openai_message_dict["role"] == "assistant", openai_message_dict
|
|
280
|
+
assert "tool_call_id" in openai_message_dict, openai_message_dict
|
|
281
|
+
|
|
282
|
+
# Convert a function_call (from an assistant message) into a tool_call
|
|
283
|
+
# NOTE: this does not conventionally include a tool_call_id (ToolCall.id), it's on the caster to provide it
|
|
284
|
+
tool_calls = [
|
|
285
|
+
ToolCall(
|
|
286
|
+
id=openai_message_dict["tool_call_id"], # NOTE: unconventional source, not to spec
|
|
287
|
+
type="function",
|
|
288
|
+
function=ToolCallFunction(
|
|
289
|
+
name=openai_message_dict["function_call"]["name"],
|
|
290
|
+
arguments=openai_message_dict["function_call"]["arguments"],
|
|
291
|
+
),
|
|
292
|
+
)
|
|
293
|
+
]
|
|
294
|
+
|
|
295
|
+
if id is not None:
|
|
296
|
+
return Message(
|
|
297
|
+
user_id=user_id,
|
|
298
|
+
agent_id=agent_id,
|
|
299
|
+
model=model,
|
|
300
|
+
# standard fields expected in an OpenAI ChatCompletion message object
|
|
301
|
+
role=MessageRole(openai_message_dict["role"]),
|
|
302
|
+
text=openai_message_dict["content"],
|
|
303
|
+
name=openai_message_dict["name"] if "name" in openai_message_dict else None,
|
|
304
|
+
tool_calls=tool_calls,
|
|
305
|
+
tool_call_id=None, # NOTE: None, since this field is only non-null for role=='tool'
|
|
306
|
+
created_at=created_at,
|
|
307
|
+
id=str(id),
|
|
308
|
+
)
|
|
309
|
+
else:
|
|
310
|
+
return Message(
|
|
311
|
+
user_id=user_id,
|
|
312
|
+
agent_id=agent_id,
|
|
313
|
+
model=model,
|
|
314
|
+
# standard fields expected in an OpenAI ChatCompletion message object
|
|
315
|
+
role=MessageRole(openai_message_dict["role"]),
|
|
316
|
+
text=openai_message_dict["content"],
|
|
317
|
+
name=openai_message_dict["name"] if "name" in openai_message_dict else None,
|
|
318
|
+
tool_calls=tool_calls,
|
|
319
|
+
tool_call_id=None, # NOTE: None, since this field is only non-null for role=='tool'
|
|
320
|
+
created_at=created_at,
|
|
321
|
+
)
|
|
322
|
+
|
|
323
|
+
else:
|
|
324
|
+
# Basic sanity check
|
|
325
|
+
if openai_message_dict["role"] == "tool":
|
|
326
|
+
assert "tool_call_id" in openai_message_dict and openai_message_dict["tool_call_id"] is not None, openai_message_dict
|
|
327
|
+
else:
|
|
328
|
+
if "tool_call_id" in openai_message_dict:
|
|
329
|
+
assert openai_message_dict["tool_call_id"] is None, openai_message_dict
|
|
330
|
+
|
|
331
|
+
if "tool_calls" in openai_message_dict and openai_message_dict["tool_calls"] is not None:
|
|
332
|
+
assert openai_message_dict["role"] == "assistant", openai_message_dict
|
|
333
|
+
|
|
334
|
+
tool_calls = [
|
|
335
|
+
ToolCall(id=tool_call["id"], type=tool_call["type"], function=tool_call["function"])
|
|
336
|
+
for tool_call in openai_message_dict["tool_calls"]
|
|
337
|
+
]
|
|
338
|
+
else:
|
|
339
|
+
tool_calls = None
|
|
340
|
+
|
|
341
|
+
# If we're going from tool-call style
|
|
342
|
+
if id is not None:
|
|
343
|
+
return Message(
|
|
344
|
+
user_id=user_id,
|
|
345
|
+
agent_id=agent_id,
|
|
346
|
+
model=model,
|
|
347
|
+
# standard fields expected in an OpenAI ChatCompletion message object
|
|
348
|
+
role=MessageRole(openai_message_dict["role"]),
|
|
349
|
+
text=openai_message_dict["content"],
|
|
350
|
+
name=openai_message_dict["name"] if "name" in openai_message_dict else None,
|
|
351
|
+
tool_calls=tool_calls,
|
|
352
|
+
tool_call_id=openai_message_dict["tool_call_id"] if "tool_call_id" in openai_message_dict else None,
|
|
353
|
+
created_at=created_at,
|
|
354
|
+
id=str(id),
|
|
355
|
+
)
|
|
356
|
+
else:
|
|
357
|
+
return Message(
|
|
358
|
+
user_id=user_id,
|
|
359
|
+
agent_id=agent_id,
|
|
360
|
+
model=model,
|
|
361
|
+
# standard fields expected in an OpenAI ChatCompletion message object
|
|
362
|
+
role=MessageRole(openai_message_dict["role"]),
|
|
363
|
+
text=openai_message_dict["content"],
|
|
364
|
+
name=openai_message_dict["name"] if "name" in openai_message_dict else None,
|
|
365
|
+
tool_calls=tool_calls,
|
|
366
|
+
tool_call_id=openai_message_dict["tool_call_id"] if "tool_call_id" in openai_message_dict else None,
|
|
367
|
+
created_at=created_at,
|
|
368
|
+
)
|
|
369
|
+
|
|
370
|
+
def to_openai_dict_search_results(self, max_tool_id_length: int = TOOL_CALL_ID_MAX_LEN) -> dict:
|
|
371
|
+
result_json = self.to_openai_dict()
|
|
372
|
+
search_result_json = {"timestamp": self.created_at, "message": {"content": result_json["content"], "role": result_json["role"]}}
|
|
373
|
+
return search_result_json
|
|
374
|
+
|
|
375
|
+
def to_openai_dict(
|
|
376
|
+
self,
|
|
377
|
+
max_tool_id_length: int = TOOL_CALL_ID_MAX_LEN,
|
|
378
|
+
put_inner_thoughts_in_kwargs: bool = False,
|
|
379
|
+
) -> dict:
|
|
380
|
+
"""Go from Message class to ChatCompletion message object"""
|
|
381
|
+
|
|
382
|
+
# TODO change to pydantic casting, eg `return SystemMessageModel(self)`
|
|
383
|
+
|
|
384
|
+
if self.role == "system":
|
|
385
|
+
assert all([v is not None for v in [self.role]]), vars(self)
|
|
386
|
+
openai_message = {
|
|
387
|
+
"content": self.text,
|
|
388
|
+
"role": self.role,
|
|
389
|
+
}
|
|
390
|
+
# Optional field, do not include if null
|
|
391
|
+
if self.name is not None:
|
|
392
|
+
openai_message["name"] = self.name
|
|
393
|
+
|
|
394
|
+
elif self.role == "user":
|
|
395
|
+
assert all([v is not None for v in [self.text, self.role]]), vars(self)
|
|
396
|
+
openai_message = {
|
|
397
|
+
"content": self.text,
|
|
398
|
+
"role": self.role,
|
|
399
|
+
}
|
|
400
|
+
# Optional field, do not include if null
|
|
401
|
+
if self.name is not None:
|
|
402
|
+
openai_message["name"] = self.name
|
|
403
|
+
|
|
404
|
+
elif self.role == "assistant":
|
|
405
|
+
assert self.tool_calls is not None or self.text is not None
|
|
406
|
+
openai_message = {
|
|
407
|
+
"content": None if put_inner_thoughts_in_kwargs else self.text,
|
|
408
|
+
"role": self.role,
|
|
409
|
+
}
|
|
410
|
+
# Optional fields, do not include if null
|
|
411
|
+
if self.name is not None:
|
|
412
|
+
openai_message["name"] = self.name
|
|
413
|
+
if self.tool_calls is not None:
|
|
414
|
+
if put_inner_thoughts_in_kwargs:
|
|
415
|
+
# put the inner thoughts inside the tool call before casting to a dict
|
|
416
|
+
openai_message["tool_calls"] = [
|
|
417
|
+
add_inner_thoughts_to_tool_call(
|
|
418
|
+
tool_call,
|
|
419
|
+
inner_thoughts=self.text,
|
|
420
|
+
inner_thoughts_key=INNER_THOUGHTS_KWARG,
|
|
421
|
+
).model_dump()
|
|
422
|
+
for tool_call in self.tool_calls
|
|
423
|
+
]
|
|
424
|
+
else:
|
|
425
|
+
openai_message["tool_calls"] = [tool_call.model_dump() for tool_call in self.tool_calls]
|
|
426
|
+
if max_tool_id_length:
|
|
427
|
+
for tool_call_dict in openai_message["tool_calls"]:
|
|
428
|
+
tool_call_dict["id"] = tool_call_dict["id"][:max_tool_id_length]
|
|
429
|
+
|
|
430
|
+
elif self.role == "tool":
|
|
431
|
+
assert all([v is not None for v in [self.role, self.tool_call_id]]), vars(self)
|
|
432
|
+
openai_message = {
|
|
433
|
+
"content": self.text,
|
|
434
|
+
"role": self.role,
|
|
435
|
+
"tool_call_id": self.tool_call_id[:max_tool_id_length] if max_tool_id_length else self.tool_call_id,
|
|
436
|
+
}
|
|
437
|
+
|
|
438
|
+
else:
|
|
439
|
+
raise ValueError(self.role)
|
|
440
|
+
|
|
441
|
+
return openai_message
|
|
442
|
+
|
|
443
|
+
def to_anthropic_dict(self, inner_thoughts_xml_tag="thinking") -> dict:
|
|
444
|
+
"""
|
|
445
|
+
Convert to an Anthropic message dictionary
|
|
446
|
+
|
|
447
|
+
Args:
|
|
448
|
+
inner_thoughts_xml_tag (str): The XML tag to wrap around inner thoughts
|
|
449
|
+
"""
|
|
450
|
+
|
|
451
|
+
def add_xml_tag(string: str, xml_tag: Optional[str]):
|
|
452
|
+
# NOTE: Anthropic docs recommends using <thinking> tag when using CoT + tool use
|
|
453
|
+
return f"<{xml_tag}>{string}</{xml_tag}" if xml_tag else string
|
|
454
|
+
|
|
455
|
+
if self.role == "system":
|
|
456
|
+
raise ValueError(f"Anthropic 'system' role not supported")
|
|
457
|
+
|
|
458
|
+
elif self.role == "user":
|
|
459
|
+
assert all([v is not None for v in [self.text, self.role]]), vars(self)
|
|
460
|
+
anthropic_message = {
|
|
461
|
+
"content": self.text,
|
|
462
|
+
"role": self.role,
|
|
463
|
+
}
|
|
464
|
+
# Optional field, do not include if null
|
|
465
|
+
if self.name is not None:
|
|
466
|
+
anthropic_message["name"] = self.name
|
|
467
|
+
|
|
468
|
+
elif self.role == "assistant":
|
|
469
|
+
assert self.tool_calls is not None or self.text is not None
|
|
470
|
+
anthropic_message = {
|
|
471
|
+
"role": self.role,
|
|
472
|
+
}
|
|
473
|
+
content = []
|
|
474
|
+
if self.text is not None:
|
|
475
|
+
content.append(
|
|
476
|
+
{
|
|
477
|
+
"type": "text",
|
|
478
|
+
"text": add_xml_tag(string=self.text, xml_tag=inner_thoughts_xml_tag),
|
|
479
|
+
}
|
|
480
|
+
)
|
|
481
|
+
if self.tool_calls is not None:
|
|
482
|
+
for tool_call in self.tool_calls:
|
|
483
|
+
content.append(
|
|
484
|
+
{
|
|
485
|
+
"type": "tool_use",
|
|
486
|
+
"id": tool_call.id,
|
|
487
|
+
"name": tool_call.function.name,
|
|
488
|
+
"input": json.loads(tool_call.function.arguments),
|
|
489
|
+
}
|
|
490
|
+
)
|
|
491
|
+
|
|
492
|
+
# If the only content was text, unpack it back into a singleton
|
|
493
|
+
# TODO
|
|
494
|
+
anthropic_message["content"] = content
|
|
495
|
+
|
|
496
|
+
# Optional fields, do not include if null
|
|
497
|
+
if self.name is not None:
|
|
498
|
+
anthropic_message["name"] = self.name
|
|
499
|
+
|
|
500
|
+
elif self.role == "tool":
|
|
501
|
+
# NOTE: Anthropic uses role "user" for "tool" responses
|
|
502
|
+
assert all([v is not None for v in [self.role, self.tool_call_id]]), vars(self)
|
|
503
|
+
anthropic_message = {
|
|
504
|
+
"role": "user", # NOTE: diff
|
|
505
|
+
"content": [
|
|
506
|
+
# TODO support error types etc
|
|
507
|
+
{
|
|
508
|
+
"type": "tool_result",
|
|
509
|
+
"tool_use_id": self.tool_call_id,
|
|
510
|
+
"content": self.text,
|
|
511
|
+
}
|
|
512
|
+
],
|
|
513
|
+
}
|
|
514
|
+
|
|
515
|
+
else:
|
|
516
|
+
raise ValueError(self.role)
|
|
517
|
+
|
|
518
|
+
return anthropic_message
|
|
519
|
+
|
|
520
|
+
def to_google_ai_dict(self, put_inner_thoughts_in_kwargs: bool = True) -> dict:
|
|
521
|
+
"""
|
|
522
|
+
Go from Message class to Google AI REST message object
|
|
523
|
+
"""
|
|
524
|
+
# type Content: https://ai.google.dev/api/rest/v1/Content / https://ai.google.dev/api/rest/v1beta/Content
|
|
525
|
+
# parts[]: Part
|
|
526
|
+
# role: str ('user' or 'model')
|
|
527
|
+
|
|
528
|
+
if self.role != "tool" and self.name is not None:
|
|
529
|
+
raise UserWarning(f"Using Google AI with non-null 'name' field ({self.name}) not yet supported.")
|
|
530
|
+
|
|
531
|
+
if self.role == "system":
|
|
532
|
+
# NOTE: Gemini API doesn't have a 'system' role, use 'user' instead
|
|
533
|
+
# https://www.reddit.com/r/Bard/comments/1b90i8o/does_gemini_have_a_system_prompt_option_while/
|
|
534
|
+
google_ai_message = {
|
|
535
|
+
"role": "user", # NOTE: no 'system'
|
|
536
|
+
"parts": [{"text": self.text}],
|
|
537
|
+
}
|
|
538
|
+
|
|
539
|
+
elif self.role == "user":
|
|
540
|
+
assert all([v is not None for v in [self.text, self.role]]), vars(self)
|
|
541
|
+
google_ai_message = {
|
|
542
|
+
"role": "user",
|
|
543
|
+
"parts": [{"text": self.text}],
|
|
544
|
+
}
|
|
545
|
+
|
|
546
|
+
elif self.role == "assistant":
|
|
547
|
+
assert self.tool_calls is not None or self.text is not None
|
|
548
|
+
google_ai_message = {
|
|
549
|
+
"role": "model", # NOTE: different
|
|
550
|
+
}
|
|
551
|
+
|
|
552
|
+
# NOTE: Google AI API doesn't allow non-null content + function call
|
|
553
|
+
# To get around this, just two a two part message, inner thoughts first then
|
|
554
|
+
parts = []
|
|
555
|
+
if not put_inner_thoughts_in_kwargs and self.text is not None:
|
|
556
|
+
# NOTE: ideally we do multi-part for CoT / inner thoughts + function call, but Google AI API doesn't allow it
|
|
557
|
+
raise NotImplementedError
|
|
558
|
+
parts.append({"text": self.text})
|
|
559
|
+
|
|
560
|
+
if self.tool_calls is not None:
|
|
561
|
+
# NOTE: implied support for multiple calls
|
|
562
|
+
for tool_call in self.tool_calls:
|
|
563
|
+
function_name = tool_call.function["name"]
|
|
564
|
+
function_args = tool_call.function["arguments"]
|
|
565
|
+
try:
|
|
566
|
+
# NOTE: Google AI wants actual JSON objects, not strings
|
|
567
|
+
function_args = json.loads(function_args)
|
|
568
|
+
except:
|
|
569
|
+
raise UserWarning(f"Failed to parse JSON function args: {function_args}")
|
|
570
|
+
function_args = {"args": function_args}
|
|
571
|
+
|
|
572
|
+
if put_inner_thoughts_in_kwargs and self.text is not None:
|
|
573
|
+
assert "inner_thoughts" not in function_args, function_args
|
|
574
|
+
assert len(self.tool_calls) == 1
|
|
575
|
+
function_args[INNER_THOUGHTS_KWARG] = self.text
|
|
576
|
+
|
|
577
|
+
parts.append(
|
|
578
|
+
{
|
|
579
|
+
"functionCall": {
|
|
580
|
+
"name": function_name,
|
|
581
|
+
"args": function_args,
|
|
582
|
+
}
|
|
583
|
+
}
|
|
584
|
+
)
|
|
585
|
+
else:
|
|
586
|
+
assert self.text is not None
|
|
587
|
+
parts.append({"text": self.text})
|
|
588
|
+
google_ai_message["parts"] = parts
|
|
589
|
+
|
|
590
|
+
elif self.role == "tool":
|
|
591
|
+
# NOTE: Significantly different tool calling format, more similar to function calling format
|
|
592
|
+
assert all([v is not None for v in [self.role, self.tool_call_id]]), vars(self)
|
|
593
|
+
|
|
594
|
+
if self.name is None:
|
|
595
|
+
warnings.warn(f"Couldn't find function name on tool call, defaulting to tool ID instead.")
|
|
596
|
+
function_name = self.tool_call_id
|
|
597
|
+
else:
|
|
598
|
+
function_name = self.name
|
|
599
|
+
|
|
600
|
+
# NOTE: Google AI API wants the function response as JSON only, no string
|
|
601
|
+
try:
|
|
602
|
+
function_response = json.loads(self.text)
|
|
603
|
+
except:
|
|
604
|
+
function_response = {"function_response": self.text}
|
|
605
|
+
|
|
606
|
+
google_ai_message = {
|
|
607
|
+
"role": "function",
|
|
608
|
+
"parts": [
|
|
609
|
+
{
|
|
610
|
+
"functionResponse": {
|
|
611
|
+
"name": function_name,
|
|
612
|
+
"response": {
|
|
613
|
+
"name": function_name, # NOTE: name twice... why?
|
|
614
|
+
"content": function_response,
|
|
615
|
+
},
|
|
616
|
+
}
|
|
617
|
+
}
|
|
618
|
+
],
|
|
619
|
+
}
|
|
620
|
+
|
|
621
|
+
else:
|
|
622
|
+
raise ValueError(self.role)
|
|
623
|
+
|
|
624
|
+
return google_ai_message
|
|
625
|
+
|
|
626
|
+
def to_cohere_dict(
|
|
627
|
+
self,
|
|
628
|
+
function_call_role: Optional[str] = "SYSTEM",
|
|
629
|
+
function_call_prefix: Optional[str] = "[CHATBOT called function]",
|
|
630
|
+
function_response_role: Optional[str] = "SYSTEM",
|
|
631
|
+
function_response_prefix: Optional[str] = "[CHATBOT function returned]",
|
|
632
|
+
inner_thoughts_as_kwarg: Optional[bool] = False,
|
|
633
|
+
) -> List[dict]:
|
|
634
|
+
"""
|
|
635
|
+
Cohere chat_history dicts only have 'role' and 'message' fields
|
|
636
|
+
"""
|
|
637
|
+
|
|
638
|
+
# NOTE: returns a list of dicts so that we can convert:
|
|
639
|
+
# assistant [cot]: "I'll send a message"
|
|
640
|
+
# assistant [func]: send_message("hi")
|
|
641
|
+
# tool: {'status': 'OK'}
|
|
642
|
+
# to:
|
|
643
|
+
# CHATBOT.text: "I'll send a message"
|
|
644
|
+
# SYSTEM.text: [CHATBOT called function] send_message("hi")
|
|
645
|
+
# SYSTEM.text: [CHATBOT function returned] {'status': 'OK'}
|
|
646
|
+
|
|
647
|
+
# TODO: update this prompt style once guidance from Cohere on
|
|
648
|
+
# embedded function calls in multi-turn conversation become more clear
|
|
649
|
+
|
|
650
|
+
if self.role == "system":
|
|
651
|
+
"""
|
|
652
|
+
The chat_history parameter should not be used for SYSTEM messages in most cases.
|
|
653
|
+
Instead, to add a SYSTEM role message at the beginning of a conversation, the preamble parameter should be used.
|
|
654
|
+
"""
|
|
655
|
+
raise UserWarning(f"role 'system' messages should go in 'preamble' field for Cohere API")
|
|
656
|
+
|
|
657
|
+
elif self.role == "user":
|
|
658
|
+
assert all([v is not None for v in [self.text, self.role]]), vars(self)
|
|
659
|
+
cohere_message = [
|
|
660
|
+
{
|
|
661
|
+
"role": "USER",
|
|
662
|
+
"message": self.text,
|
|
663
|
+
}
|
|
664
|
+
]
|
|
665
|
+
|
|
666
|
+
elif self.role == "assistant":
|
|
667
|
+
# NOTE: we may break this into two message - an inner thought and a function call
|
|
668
|
+
# Optionally, we could just make this a function call with the inner thought inside
|
|
669
|
+
assert self.tool_calls is not None or self.text is not None
|
|
670
|
+
|
|
671
|
+
if self.text and self.tool_calls:
|
|
672
|
+
if inner_thoughts_as_kwarg:
|
|
673
|
+
raise NotImplementedError
|
|
674
|
+
cohere_message = [
|
|
675
|
+
{
|
|
676
|
+
"role": "CHATBOT",
|
|
677
|
+
"message": self.text,
|
|
678
|
+
},
|
|
679
|
+
]
|
|
680
|
+
for tc in self.tool_calls:
|
|
681
|
+
# TODO better way to pack?
|
|
682
|
+
# function_call_text = json.dumps(tc.to_dict())
|
|
683
|
+
function_name = tc.function["name"]
|
|
684
|
+
function_args = json.loads(tc.function["arguments"])
|
|
685
|
+
function_args_str = ",".join([f"{k}={v}" for k, v in function_args.items()])
|
|
686
|
+
function_call_text = f"{function_name}({function_args_str})"
|
|
687
|
+
cohere_message.append(
|
|
688
|
+
{
|
|
689
|
+
"role": function_call_role,
|
|
690
|
+
"message": f"{function_call_prefix} {function_call_text}",
|
|
691
|
+
}
|
|
692
|
+
)
|
|
693
|
+
elif not self.text and self.tool_calls:
|
|
694
|
+
cohere_message = []
|
|
695
|
+
for tc in self.tool_calls:
|
|
696
|
+
# TODO better way to pack?
|
|
697
|
+
function_call_text = json_dumps(tc.to_dict())
|
|
698
|
+
cohere_message.append(
|
|
699
|
+
{
|
|
700
|
+
"role": function_call_role,
|
|
701
|
+
"message": f"{function_call_prefix} {function_call_text}",
|
|
702
|
+
}
|
|
703
|
+
)
|
|
704
|
+
elif self.text and not self.tool_calls:
|
|
705
|
+
cohere_message = [
|
|
706
|
+
{
|
|
707
|
+
"role": "CHATBOT",
|
|
708
|
+
"message": self.text,
|
|
709
|
+
}
|
|
710
|
+
]
|
|
711
|
+
else:
|
|
712
|
+
raise ValueError("Message does not have content nor tool_calls")
|
|
713
|
+
|
|
714
|
+
elif self.role == "tool":
|
|
715
|
+
assert all([v is not None for v in [self.role, self.tool_call_id]]), vars(self)
|
|
716
|
+
function_response_text = self.text
|
|
717
|
+
cohere_message = [
|
|
718
|
+
{
|
|
719
|
+
"role": function_response_role,
|
|
720
|
+
"message": f"{function_response_prefix} {function_response_text}",
|
|
721
|
+
}
|
|
722
|
+
]
|
|
723
|
+
|
|
724
|
+
else:
|
|
725
|
+
raise ValueError(self.role)
|
|
726
|
+
|
|
727
|
+
return cohere_message
|