letta-nightly 0.6.44.dev20250326104203__py3-none-any.whl → 0.6.45.dev20250327035218__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of letta-nightly might be problematic. Click here for more details.

@@ -1,438 +0,0 @@
1
- import uuid
2
- from typing import List, Optional, Tuple
3
-
4
- import requests
5
-
6
- from letta.constants import NON_USER_MSG_PREFIX
7
- from letta.helpers.datetime_helpers import get_utc_time
8
- from letta.helpers.json_helpers import json_dumps
9
- from letta.llm_api.helpers import make_post_request
10
- from letta.local_llm.json_parser import clean_json_string_extra_backslash
11
- from letta.local_llm.utils import count_tokens
12
- from letta.schemas.openai.chat_completion_request import Tool
13
- from letta.schemas.openai.chat_completion_response import ChatCompletionResponse, Choice, FunctionCall, Message, ToolCall, UsageStatistics
14
- from letta.tracing import log_event
15
- from letta.utils import get_tool_call_id
16
-
17
-
18
- def get_gemini_endpoint_and_headers(
19
- base_url: str, model: Optional[str], api_key: str, key_in_header: bool = True, generate_content: bool = False
20
- ) -> Tuple[str, dict]:
21
- """
22
- Dynamically generate the model endpoint and headers.
23
- """
24
- url = f"{base_url}/v1beta/models"
25
-
26
- # Add the model
27
- if model is not None:
28
- url += f"/{model}"
29
-
30
- # Add extension for generating content if we're hitting the LM
31
- if generate_content:
32
- url += ":generateContent"
33
-
34
- # Decide if api key should be in header or not
35
- # Two ways to pass the key: https://ai.google.dev/tutorials/setup
36
- if key_in_header:
37
- headers = {"Content-Type": "application/json", "x-goog-api-key": api_key}
38
- else:
39
- url += f"?key={api_key}"
40
- headers = {"Content-Type": "application/json"}
41
-
42
- return url, headers
43
-
44
-
45
- def google_ai_get_model_details(base_url: str, api_key: str, model: str, key_in_header: bool = True) -> List[dict]:
46
- from letta.utils import printd
47
-
48
- url, headers = get_gemini_endpoint_and_headers(base_url, model, api_key, key_in_header)
49
-
50
- try:
51
- response = requests.get(url, headers=headers)
52
- printd(f"response = {response}")
53
- response.raise_for_status() # Raises HTTPError for 4XX/5XX status
54
- response = response.json() # convert to dict from string
55
- printd(f"response.json = {response}")
56
-
57
- # Grab the models out
58
- return response
59
-
60
- except requests.exceptions.HTTPError as http_err:
61
- # Handle HTTP errors (e.g., response 4XX, 5XX)
62
- printd(f"Got HTTPError, exception={http_err}")
63
- # Print the HTTP status code
64
- print(f"HTTP Error: {http_err.response.status_code}")
65
- # Print the response content (error message from server)
66
- print(f"Message: {http_err.response.text}")
67
- raise http_err
68
-
69
- except requests.exceptions.RequestException as req_err:
70
- # Handle other requests-related errors (e.g., connection error)
71
- printd(f"Got RequestException, exception={req_err}")
72
- raise req_err
73
-
74
- except Exception as e:
75
- # Handle other potential errors
76
- printd(f"Got unknown Exception, exception={e}")
77
- raise e
78
-
79
-
80
- def google_ai_get_model_context_window(base_url: str, api_key: str, model: str, key_in_header: bool = True) -> int:
81
- model_details = google_ai_get_model_details(base_url=base_url, api_key=api_key, model=model, key_in_header=key_in_header)
82
- # TODO should this be:
83
- # return model_details["inputTokenLimit"] + model_details["outputTokenLimit"]
84
- return int(model_details["inputTokenLimit"])
85
-
86
-
87
- def google_ai_get_model_list(base_url: str, api_key: str, key_in_header: bool = True) -> List[dict]:
88
- from letta.utils import printd
89
-
90
- url, headers = get_gemini_endpoint_and_headers(base_url, None, api_key, key_in_header)
91
-
92
- try:
93
- response = requests.get(url, headers=headers)
94
- response.raise_for_status() # Raises HTTPError for 4XX/5XX status
95
- response = response.json() # convert to dict from string
96
-
97
- # Grab the models out
98
- model_list = response["models"]
99
- return model_list
100
-
101
- except requests.exceptions.HTTPError as http_err:
102
- # Handle HTTP errors (e.g., response 4XX, 5XX)
103
- printd(f"Got HTTPError, exception={http_err}")
104
- # Print the HTTP status code
105
- print(f"HTTP Error: {http_err.response.status_code}")
106
- # Print the response content (error message from server)
107
- print(f"Message: {http_err.response.text}")
108
- raise http_err
109
-
110
- except requests.exceptions.RequestException as req_err:
111
- # Handle other requests-related errors (e.g., connection error)
112
- printd(f"Got RequestException, exception={req_err}")
113
- raise req_err
114
-
115
- except Exception as e:
116
- # Handle other potential errors
117
- printd(f"Got unknown Exception, exception={e}")
118
- raise e
119
-
120
-
121
- def add_dummy_model_messages(messages: List[dict]) -> List[dict]:
122
- """Google AI API requires all function call returns are immediately followed by a 'model' role message.
123
-
124
- In Letta, the 'model' will often call a function (e.g. send_message) that itself yields to the user,
125
- so there is no natural follow-up 'model' role message.
126
-
127
- To satisfy the Google AI API restrictions, we can add a dummy 'yield' message
128
- with role == 'model' that is placed in-betweeen and function output
129
- (role == 'tool') and user message (role == 'user').
130
- """
131
- dummy_yield_message = {"role": "model", "parts": [{"text": f"{NON_USER_MSG_PREFIX}Function call returned, waiting for user response."}]}
132
- messages_with_padding = []
133
- for i, message in enumerate(messages):
134
- messages_with_padding.append(message)
135
- # Check if the current message role is 'tool' and the next message role is 'user'
136
- if message["role"] in ["tool", "function"] and (i + 1 < len(messages) and messages[i + 1]["role"] == "user"):
137
- messages_with_padding.append(dummy_yield_message)
138
-
139
- return messages_with_padding
140
-
141
-
142
- # TODO use pydantic model as input
143
- def to_google_ai(openai_message_dict: dict) -> dict:
144
-
145
- # TODO supports "parts" as part of multimodal support
146
- assert not isinstance(openai_message_dict["content"], list), "Multi-part content is message not yet supported"
147
- if openai_message_dict["role"] == "user":
148
- google_ai_message_dict = {
149
- "role": "user",
150
- "parts": [{"text": openai_message_dict["content"]}],
151
- }
152
- elif openai_message_dict["role"] == "assistant":
153
- google_ai_message_dict = {
154
- "role": "model", # NOTE: diff
155
- "parts": [{"text": openai_message_dict["content"]}],
156
- }
157
- elif openai_message_dict["role"] == "tool":
158
- google_ai_message_dict = {
159
- "role": "function", # NOTE: diff
160
- "parts": [{"text": openai_message_dict["content"]}],
161
- }
162
- else:
163
- raise ValueError(f"Unsupported conversion (OpenAI -> Google AI) from role {openai_message_dict['role']}")
164
-
165
-
166
- # TODO convert return type to pydantic
167
- def convert_tools_to_google_ai_format(tools: List[Tool], inner_thoughts_in_kwargs: Optional[bool] = True) -> List[dict]:
168
- """
169
- OpenAI style:
170
- "tools": [{
171
- "type": "function",
172
- "function": {
173
- "name": "find_movies",
174
- "description": "find ....",
175
- "parameters": {
176
- "type": "object",
177
- "properties": {
178
- PARAM: {
179
- "type": PARAM_TYPE, # eg "string"
180
- "description": PARAM_DESCRIPTION,
181
- },
182
- ...
183
- },
184
- "required": List[str],
185
- }
186
- }
187
- }
188
- ]
189
-
190
- Google AI style:
191
- "tools": [{
192
- "functionDeclarations": [{
193
- "name": "find_movies",
194
- "description": "find movie titles currently playing in theaters based on any description, genre, title words, etc.",
195
- "parameters": {
196
- "type": "OBJECT",
197
- "properties": {
198
- "location": {
199
- "type": "STRING",
200
- "description": "The city and state, e.g. San Francisco, CA or a zip code e.g. 95616"
201
- },
202
- "description": {
203
- "type": "STRING",
204
- "description": "Any kind of description including category or genre, title words, attributes, etc."
205
- }
206
- },
207
- "required": ["description"]
208
- }
209
- }, {
210
- "name": "find_theaters",
211
- ...
212
- """
213
- function_list = [
214
- dict(
215
- name=t.function.name,
216
- description=t.function.description,
217
- parameters=t.function.parameters, # TODO need to unpack
218
- )
219
- for t in tools
220
- ]
221
-
222
- # Correct casing + add inner thoughts if needed
223
- for func in function_list:
224
- func["parameters"]["type"] = "OBJECT"
225
- for param_name, param_fields in func["parameters"]["properties"].items():
226
- param_fields["type"] = param_fields["type"].upper()
227
- # Add inner thoughts
228
- if inner_thoughts_in_kwargs:
229
- from letta.local_llm.constants import INNER_THOUGHTS_KWARG, INNER_THOUGHTS_KWARG_DESCRIPTION
230
-
231
- func["parameters"]["properties"][INNER_THOUGHTS_KWARG] = {
232
- "type": "STRING",
233
- "description": INNER_THOUGHTS_KWARG_DESCRIPTION,
234
- }
235
- func["parameters"]["required"].append(INNER_THOUGHTS_KWARG)
236
-
237
- return [{"functionDeclarations": function_list}]
238
-
239
-
240
- def convert_google_ai_response_to_chatcompletion(
241
- response_json: dict, # REST response from Google AI API
242
- model: str, # Required since not returned
243
- input_messages: Optional[List[dict]] = None, # Required if the API doesn't return UsageMetadata
244
- pull_inner_thoughts_from_args: Optional[bool] = True,
245
- ) -> ChatCompletionResponse:
246
- """Google AI API response format is not the same as ChatCompletion, requires unpacking
247
-
248
- Example:
249
- {
250
- "candidates": [
251
- {
252
- "content": {
253
- "parts": [
254
- {
255
- "text": " OK. Barbie is showing in two theaters in Mountain View, CA: AMC Mountain View 16 and Regal Edwards 14."
256
- }
257
- ]
258
- }
259
- }
260
- ],
261
- "usageMetadata": {
262
- "promptTokenCount": 9,
263
- "candidatesTokenCount": 27,
264
- "totalTokenCount": 36
265
- }
266
- }
267
- """
268
- try:
269
- choices = []
270
- index = 0
271
- for candidate in response_json["candidates"]:
272
- content = candidate["content"]
273
-
274
- role = content["role"]
275
- assert role == "model", f"Unknown role in response: {role}"
276
-
277
- parts = content["parts"]
278
- # TODO support parts / multimodal
279
- # TODO support parallel tool calling natively
280
- # TODO Alternative here is to throw away everything else except for the first part
281
- for response_message in parts:
282
- # Convert the actual message style to OpenAI style
283
- if "functionCall" in response_message and response_message["functionCall"] is not None:
284
- function_call = response_message["functionCall"]
285
- assert isinstance(function_call, dict), function_call
286
- function_name = function_call["name"]
287
- assert isinstance(function_name, str), function_name
288
- function_args = function_call["args"]
289
- assert isinstance(function_args, dict), function_args
290
-
291
- # NOTE: this also involves stripping the inner monologue out of the function
292
- if pull_inner_thoughts_from_args:
293
- from letta.local_llm.constants import INNER_THOUGHTS_KWARG
294
-
295
- assert INNER_THOUGHTS_KWARG in function_args, f"Couldn't find inner thoughts in function args:\n{function_call}"
296
- inner_thoughts = function_args.pop(INNER_THOUGHTS_KWARG)
297
- assert inner_thoughts is not None, f"Expected non-null inner thoughts function arg:\n{function_call}"
298
- else:
299
- inner_thoughts = None
300
-
301
- # Google AI API doesn't generate tool call IDs
302
- openai_response_message = Message(
303
- role="assistant", # NOTE: "model" -> "assistant"
304
- content=inner_thoughts,
305
- tool_calls=[
306
- ToolCall(
307
- id=get_tool_call_id(),
308
- type="function",
309
- function=FunctionCall(
310
- name=function_name,
311
- arguments=clean_json_string_extra_backslash(json_dumps(function_args)),
312
- ),
313
- )
314
- ],
315
- )
316
-
317
- else:
318
-
319
- # Inner thoughts are the content by default
320
- inner_thoughts = response_message["text"]
321
-
322
- # Google AI API doesn't generate tool call IDs
323
- openai_response_message = Message(
324
- role="assistant", # NOTE: "model" -> "assistant"
325
- content=inner_thoughts,
326
- )
327
-
328
- # Google AI API uses different finish reason strings than OpenAI
329
- # OpenAI: 'stop', 'length', 'function_call', 'content_filter', null
330
- # see: https://platform.openai.com/docs/guides/text-generation/chat-completions-api
331
- # Google AI API: FINISH_REASON_UNSPECIFIED, STOP, MAX_TOKENS, SAFETY, RECITATION, OTHER
332
- # see: https://ai.google.dev/api/python/google/ai/generativelanguage/Candidate/FinishReason
333
- finish_reason = candidate["finishReason"]
334
- if finish_reason == "STOP":
335
- openai_finish_reason = (
336
- "function_call"
337
- if openai_response_message.tool_calls is not None and len(openai_response_message.tool_calls) > 0
338
- else "stop"
339
- )
340
- elif finish_reason == "MAX_TOKENS":
341
- openai_finish_reason = "length"
342
- elif finish_reason == "SAFETY":
343
- openai_finish_reason = "content_filter"
344
- elif finish_reason == "RECITATION":
345
- openai_finish_reason = "content_filter"
346
- else:
347
- raise ValueError(f"Unrecognized finish reason in Google AI response: {finish_reason}")
348
-
349
- choices.append(
350
- Choice(
351
- finish_reason=openai_finish_reason,
352
- index=index,
353
- message=openai_response_message,
354
- )
355
- )
356
- index += 1
357
-
358
- # if len(choices) > 1:
359
- # raise UserWarning(f"Unexpected number of candidates in response (expected 1, got {len(choices)})")
360
-
361
- # NOTE: some of the Google AI APIs show UsageMetadata in the response, but it seems to not exist?
362
- # "usageMetadata": {
363
- # "promptTokenCount": 9,
364
- # "candidatesTokenCount": 27,
365
- # "totalTokenCount": 36
366
- # }
367
- if "usageMetadata" in response_json:
368
- usage = UsageStatistics(
369
- prompt_tokens=response_json["usageMetadata"]["promptTokenCount"],
370
- completion_tokens=response_json["usageMetadata"]["candidatesTokenCount"],
371
- total_tokens=response_json["usageMetadata"]["totalTokenCount"],
372
- )
373
- else:
374
- # Count it ourselves
375
- assert input_messages is not None, f"Didn't get UsageMetadata from the API response, so input_messages is required"
376
- prompt_tokens = count_tokens(json_dumps(input_messages)) # NOTE: this is a very rough approximation
377
- completion_tokens = count_tokens(json_dumps(openai_response_message.model_dump())) # NOTE: this is also approximate
378
- total_tokens = prompt_tokens + completion_tokens
379
- usage = UsageStatistics(
380
- prompt_tokens=prompt_tokens,
381
- completion_tokens=completion_tokens,
382
- total_tokens=total_tokens,
383
- )
384
-
385
- response_id = str(uuid.uuid4())
386
- return ChatCompletionResponse(
387
- id=response_id,
388
- choices=choices,
389
- model=model, # NOTE: Google API doesn't pass back model in the response
390
- created=get_utc_time(),
391
- usage=usage,
392
- )
393
- except KeyError as e:
394
- raise e
395
-
396
-
397
- # TODO convert 'data' type to pydantic
398
- def google_ai_chat_completions_request(
399
- base_url: str,
400
- model: str,
401
- api_key: str,
402
- data: dict,
403
- key_in_header: bool = True,
404
- add_postfunc_model_messages: bool = True,
405
- # NOTE: Google AI API doesn't support mixing parts 'text' and 'function',
406
- # so there's no clean way to put inner thoughts in the same message as a function call
407
- inner_thoughts_in_kwargs: bool = True,
408
- ) -> ChatCompletionResponse:
409
- """https://ai.google.dev/docs/function_calling
410
-
411
- From https://ai.google.dev/api/rest#service-endpoint:
412
- "A service endpoint is a base URL that specifies the network address of an API service.
413
- One service might have multiple service endpoints.
414
- This service has the following service endpoint and all URIs below are relative to this service endpoint:
415
- https://xxx.googleapis.com
416
- """
417
-
418
- assert api_key is not None, "Missing api_key when calling Google AI"
419
-
420
- url, headers = get_gemini_endpoint_and_headers(base_url, model, api_key, key_in_header, generate_content=True)
421
-
422
- # data["contents"][-1]["role"] = "model"
423
- if add_postfunc_model_messages:
424
- data["contents"] = add_dummy_model_messages(data["contents"])
425
-
426
- log_event(name="llm_request_sent", attributes=data)
427
- response_json = make_post_request(url, headers, data)
428
- log_event(name="llm_response_received", attributes=response_json)
429
- try:
430
- return convert_google_ai_response_to_chatcompletion(
431
- response_json=response_json,
432
- model=data.get("model"),
433
- input_messages=data["contents"],
434
- pull_inner_thoughts_from_args=inner_thoughts_in_kwargs,
435
- )
436
- except Exception as conversion_error:
437
- print(f"Error during response conversion: {conversion_error}")
438
- raise conversion_error