letta-nightly 0.5.2.dev20241118104226__py3-none-any.whl → 0.5.2.dev20241119104253__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of letta-nightly might be problematic. Click here for more details.

letta/agent.py CHANGED
@@ -1584,7 +1584,8 @@ class Agent(BaseAgent):
1584
1584
 
1585
1585
  def count_tokens(self) -> int:
1586
1586
  """Count the tokens in the current context window"""
1587
- return self.get_context_window().context_window_size_current
1587
+ context_window_breakdown = self.get_context_window()
1588
+ return context_window_breakdown.context_window_size_current
1588
1589
 
1589
1590
 
1590
1591
  def save_agent(agent: Agent, ms: MetadataStore):
letta/constants.py CHANGED
@@ -19,7 +19,7 @@ IN_CONTEXT_MEMORY_KEYWORD = "CORE_MEMORY"
19
19
  TOOL_CALL_ID_MAX_LEN = 29
20
20
 
21
21
  # minimum context window size
22
- MIN_CONTEXT_WINDOW = 4000
22
+ MIN_CONTEXT_WINDOW = 4096
23
23
 
24
24
  # embeddings
25
25
  MAX_EMBEDDING_DIM = 4096 # maximum supported embeding size - do NOT change or else DBs will need to be reset
@@ -3,7 +3,7 @@ import inspect
3
3
  import os
4
4
  from textwrap import dedent # remove indentation
5
5
  from types import ModuleType
6
- from typing import Optional
6
+ from typing import Optional, List
7
7
 
8
8
  from letta.constants import CLI_WARNING_PREFIX
9
9
  from letta.functions.schema_generator import generate_schema
@@ -15,6 +15,7 @@ def derive_openai_json_schema(source_code: str, name: Optional[str] = None) -> d
15
15
  # Define a custom environment with necessary imports
16
16
  env = {
17
17
  "Optional": Optional, # Add any other required imports here
18
+ "List": List
18
19
  }
19
20
 
20
21
  env.update(globals())
@@ -25,6 +25,7 @@ from letta.local_llm.constants import (
25
25
  INNER_THOUGHTS_KWARG,
26
26
  INNER_THOUGHTS_KWARG_DESCRIPTION,
27
27
  )
28
+ from letta.local_llm.utils import num_tokens_from_functions, num_tokens_from_messages
28
29
  from letta.schemas.llm_config import LLMConfig
29
30
  from letta.schemas.message import Message
30
31
  from letta.schemas.openai.chat_completion_request import (
@@ -33,6 +34,7 @@ from letta.schemas.openai.chat_completion_request import (
33
34
  cast_message_to_subtype,
34
35
  )
35
36
  from letta.schemas.openai.chat_completion_response import ChatCompletionResponse
37
+ from letta.settings import ModelSettings
36
38
  from letta.streaming_interface import (
37
39
  AgentChunkStreamingInterface,
38
40
  AgentRefreshStreamingInterface,
@@ -122,10 +124,19 @@ def create(
122
124
  """Return response to chat completion with backoff"""
123
125
  from letta.utils import printd
124
126
 
127
+ # Count the tokens first, if there's an overflow exit early by throwing an error up the stack
128
+ # NOTE: we want to include a specific substring in the error message to trigger summarization
129
+ messages_oai_format = [m.to_openai_dict() for m in messages]
130
+ prompt_tokens = num_tokens_from_messages(messages=messages_oai_format, model=llm_config.model)
131
+ function_tokens = num_tokens_from_functions(functions=functions, model=llm_config.model) if functions else 0
132
+ if prompt_tokens + function_tokens > llm_config.context_window:
133
+ raise Exception(f"Request exceeds maximum context length ({prompt_tokens + function_tokens} > {llm_config.context_window} tokens)")
134
+
125
135
  if not model_settings:
126
136
  from letta.settings import model_settings
127
137
 
128
138
  model_settings = model_settings
139
+ assert isinstance(model_settings, ModelSettings)
129
140
 
130
141
  printd(f"Using model {llm_config.model_endpoint_type}, endpoint: {llm_config.model_endpoint}")
131
142
 
@@ -326,6 +337,33 @@ def create(
326
337
 
327
338
  return response
328
339
 
340
+ elif llm_config.model_endpoint_type == "together":
341
+ """TogetherAI endpoint that goes via /completions instead of /chat/completions"""
342
+
343
+ if stream:
344
+ raise NotImplementedError(f"Streaming not yet implemented for TogetherAI (via the /completions endpoint).")
345
+
346
+ if model_settings.together_api_key is None and llm_config.model_endpoint == "https://api.together.ai/v1/completions":
347
+ raise ValueError(f"TogetherAI key is missing from letta config file")
348
+
349
+ return get_chat_completion(
350
+ model=llm_config.model,
351
+ messages=messages,
352
+ functions=functions,
353
+ functions_python=functions_python,
354
+ function_call=function_call,
355
+ context_window=llm_config.context_window,
356
+ endpoint=llm_config.model_endpoint,
357
+ endpoint_type="vllm", # NOTE: use the vLLM path through /completions
358
+ wrapper=llm_config.model_wrapper,
359
+ user=str(user_id),
360
+ # hint
361
+ first_message=first_message,
362
+ # auth-related
363
+ auth_type="bearer_token", # NOTE: Together expects bearer token auth
364
+ auth_key=model_settings.together_api_key,
365
+ )
366
+
329
367
  # local model
330
368
  else:
331
369
  if stream:
letta/llm_api/openai.py CHANGED
@@ -536,7 +536,6 @@ def openai_chat_completions_request(
536
536
  tool["function"] = convert_to_structured_output(tool["function"])
537
537
 
538
538
  response_json = make_post_request(url, headers, data)
539
-
540
539
  return ChatCompletionResponse(**response_json)
541
540
 
542
541
 
letta/local_llm/utils.py CHANGED
@@ -94,7 +94,10 @@ def num_tokens_from_functions(functions: List[dict], model: str = "gpt-4"):
94
94
  num_tokens = 0
95
95
  for function in functions:
96
96
  function_tokens = len(encoding.encode(function["name"]))
97
- function_tokens += len(encoding.encode(function["description"]))
97
+ if function["description"]:
98
+ function_tokens += len(encoding.encode(function["description"]))
99
+ else:
100
+ raise ValueError(f"Function {function['name']} has no description, function: {function}")
98
101
 
99
102
  if "parameters" in function:
100
103
  parameters = function["parameters"]
@@ -184,6 +187,7 @@ def num_tokens_from_messages(messages: List[dict], model: str = "gpt-4") -> int:
184
187
  https://community.openai.com/t/how-to-calculate-the-tokens-when-using-function-call/266573/11
185
188
  """
186
189
  try:
190
+ # Attempt to search for the encoding based on the model string
187
191
  encoding = tiktoken.encoding_for_model(model)
188
192
  except KeyError:
189
193
  # print("Warning: model not found. Using cl100k_base encoding.")
@@ -228,7 +232,13 @@ def num_tokens_from_messages(messages: List[dict], model: str = "gpt-4") -> int:
228
232
  # num_tokens += len(encoding.encode(value["arguments"]))
229
233
 
230
234
  else:
231
- num_tokens += len(encoding.encode(value))
235
+ if value is None:
236
+ # raise ValueError(f"Message has null value: {key} with value: {value} - message={message}")
237
+ warnings.warn(f"Message has null value: {key} with value: {value} - message={message}")
238
+ else:
239
+ if not isinstance(value, str):
240
+ raise ValueError(f"Message has non-string value: {key} with value: {value} - message={message}")
241
+ num_tokens += len(encoding.encode(value))
232
242
 
233
243
  if key == "name":
234
244
  num_tokens += tokens_per_name
letta/providers.py CHANGED
@@ -2,7 +2,7 @@ from typing import List, Optional
2
2
 
3
3
  from pydantic import BaseModel, Field, model_validator
4
4
 
5
- from letta.constants import LLM_MAX_TOKENS
5
+ from letta.constants import LLM_MAX_TOKENS, MIN_CONTEXT_WINDOW
6
6
  from letta.llm_api.azure_openai import (
7
7
  get_azure_chat_completions_endpoint,
8
8
  get_azure_embeddings_endpoint,
@@ -67,10 +67,15 @@ class OpenAIProvider(Provider):
67
67
  extra_params = {"supported_parameters": "tools"} if "openrouter.ai" in self.base_url else None
68
68
  response = openai_get_model_list(self.base_url, api_key=self.api_key, extra_params=extra_params)
69
69
 
70
- assert "data" in response, f"OpenAI model query response missing 'data' field: {response}"
70
+ # TogetherAI's response is missing the 'data' field
71
+ # assert "data" in response, f"OpenAI model query response missing 'data' field: {response}"
72
+ if "data" in response:
73
+ data = response["data"]
74
+ else:
75
+ data = response
71
76
 
72
77
  configs = []
73
- for model in response["data"]:
78
+ for model in data:
74
79
  assert "id" in model, f"OpenAI model missing 'id' field: {model}"
75
80
  model_name = model["id"]
76
81
 
@@ -82,6 +87,32 @@ class OpenAIProvider(Provider):
82
87
 
83
88
  if not context_window_size:
84
89
  continue
90
+
91
+ # TogetherAI includes the type, which we can use to filter out embedding models
92
+ if self.base_url == "https://api.together.ai/v1":
93
+ if "type" in model and model["type"] != "chat":
94
+ continue
95
+
96
+ # for TogetherAI, we need to skip the models that don't support JSON mode / function calling
97
+ # requests.exceptions.HTTPError: HTTP error occurred: 400 Client Error: Bad Request for url: https://api.together.ai/v1/chat/completions | Status code: 400, Message: {
98
+ # "error": {
99
+ # "message": "mistralai/Mixtral-8x7B-v0.1 is not supported for JSON mode/function calling",
100
+ # "type": "invalid_request_error",
101
+ # "param": null,
102
+ # "code": "constraints_model"
103
+ # }
104
+ # }
105
+ if "config" not in model:
106
+ continue
107
+ if "chat_template" not in model["config"]:
108
+ continue
109
+ if model["config"]["chat_template"] is None:
110
+ continue
111
+ if "tools" not in model["config"]["chat_template"]:
112
+ continue
113
+ # if "config" in data and "chat_template" in data["config"] and "tools" not in data["config"]["chat_template"]:
114
+ # continue
115
+
85
116
  configs.append(
86
117
  LLMConfig(model=model_name, model_endpoint_type="openai", model_endpoint=self.base_url, context_window=context_window_size)
87
118
  )
@@ -325,6 +356,113 @@ class GroqProvider(OpenAIProvider):
325
356
  raise NotImplementedError
326
357
 
327
358
 
359
+ class TogetherProvider(OpenAIProvider):
360
+ """TogetherAI provider that uses the /completions API
361
+
362
+ TogetherAI can also be used via the /chat/completions API
363
+ by settings OPENAI_API_KEY and OPENAI_API_BASE to the TogetherAI API key
364
+ and API URL, however /completions is preferred because their /chat/completions
365
+ function calling support is limited.
366
+ """
367
+
368
+ name: str = "together"
369
+ base_url: str = "https://api.together.ai/v1"
370
+ api_key: str = Field(..., description="API key for the TogetherAI API.")
371
+ default_prompt_formatter: str = Field(..., description="Default prompt formatter (aka model wrapper) to use on vLLM /completions API.")
372
+
373
+ def list_llm_models(self) -> List[LLMConfig]:
374
+ from letta.llm_api.openai import openai_get_model_list
375
+
376
+ response = openai_get_model_list(self.base_url, api_key=self.api_key)
377
+
378
+ # TogetherAI's response is missing the 'data' field
379
+ # assert "data" in response, f"OpenAI model query response missing 'data' field: {response}"
380
+ if "data" in response:
381
+ data = response["data"]
382
+ else:
383
+ data = response
384
+
385
+ configs = []
386
+ for model in data:
387
+ assert "id" in model, f"TogetherAI model missing 'id' field: {model}"
388
+ model_name = model["id"]
389
+
390
+ if "context_length" in model:
391
+ # Context length is returned in OpenRouter as "context_length"
392
+ context_window_size = model["context_length"]
393
+ else:
394
+ context_window_size = self.get_model_context_window_size(model_name)
395
+
396
+ # We need the context length for embeddings too
397
+ if not context_window_size:
398
+ continue
399
+
400
+ # Skip models that are too small for Letta
401
+ if context_window_size <= MIN_CONTEXT_WINDOW:
402
+ continue
403
+
404
+ # TogetherAI includes the type, which we can use to filter for embedding models
405
+ if "type" in model and model["type"] not in ["chat", "language"]:
406
+ continue
407
+
408
+ configs.append(
409
+ LLMConfig(
410
+ model=model_name,
411
+ model_endpoint_type="together",
412
+ model_endpoint=self.base_url,
413
+ model_wrapper=self.default_prompt_formatter,
414
+ context_window=context_window_size,
415
+ )
416
+ )
417
+
418
+ return configs
419
+
420
+ def list_embedding_models(self) -> List[EmbeddingConfig]:
421
+ # TODO renable once we figure out how to pass API keys through properly
422
+ return []
423
+
424
+ # from letta.llm_api.openai import openai_get_model_list
425
+
426
+ # response = openai_get_model_list(self.base_url, api_key=self.api_key)
427
+
428
+ # # TogetherAI's response is missing the 'data' field
429
+ # # assert "data" in response, f"OpenAI model query response missing 'data' field: {response}"
430
+ # if "data" in response:
431
+ # data = response["data"]
432
+ # else:
433
+ # data = response
434
+
435
+ # configs = []
436
+ # for model in data:
437
+ # assert "id" in model, f"TogetherAI model missing 'id' field: {model}"
438
+ # model_name = model["id"]
439
+
440
+ # if "context_length" in model:
441
+ # # Context length is returned in OpenRouter as "context_length"
442
+ # context_window_size = model["context_length"]
443
+ # else:
444
+ # context_window_size = self.get_model_context_window_size(model_name)
445
+
446
+ # if not context_window_size:
447
+ # continue
448
+
449
+ # # TogetherAI includes the type, which we can use to filter out embedding models
450
+ # if "type" in model and model["type"] not in ["embedding"]:
451
+ # continue
452
+
453
+ # configs.append(
454
+ # EmbeddingConfig(
455
+ # embedding_model=model_name,
456
+ # embedding_endpoint_type="openai",
457
+ # embedding_endpoint=self.base_url,
458
+ # embedding_dim=context_window_size,
459
+ # embedding_chunk_size=300, # TODO: change?
460
+ # )
461
+ # )
462
+
463
+ # return configs
464
+
465
+
328
466
  class GoogleAIProvider(Provider):
329
467
  # gemini
330
468
  api_key: str = Field(..., description="API key for the Google AI API.")
@@ -35,6 +35,7 @@ class LLMConfig(BaseModel):
35
35
  "vllm",
36
36
  "hugging-face",
37
37
  "mistral",
38
+ "together", # completions endpoint
38
39
  ] = Field(..., description="The endpoint type for the model.")
39
40
  model_endpoint: Optional[str] = Field(None, description="The endpoint for the model.")
40
41
  model_wrapper: Optional[str] = Field(None, description="The wrapper for the model.")
@@ -46,6 +46,7 @@ class Choice(BaseModel):
46
46
  index: int
47
47
  message: Message
48
48
  logprobs: Optional[Dict[str, Union[List[MessageContentLogProb], None]]] = None
49
+ seed: Optional[int] = None # found in TogetherAI
49
50
 
50
51
 
51
52
  class UsageStatistics(BaseModel):
letta/server/server.py CHANGED
@@ -49,6 +49,7 @@ from letta.providers import (
49
49
  OllamaProvider,
50
50
  OpenAIProvider,
51
51
  Provider,
52
+ TogetherProvider,
52
53
  VLLMChatCompletionsProvider,
53
54
  VLLMCompletionsProvider,
54
55
  )
@@ -303,7 +304,18 @@ class SyncServer(Server):
303
304
  )
304
305
  )
305
306
  if model_settings.groq_api_key:
306
- self._enabled_providers.append(GroqProvider(api_key=model_settings.groq_api_key))
307
+ self._enabled_providers.append(
308
+ GroqProvider(
309
+ api_key=model_settings.groq_api_key,
310
+ )
311
+ )
312
+ if model_settings.together_api_key:
313
+ self._enabled_providers.append(
314
+ TogetherProvider(
315
+ api_key=model_settings.together_api_key,
316
+ default_prompt_formatter=model_settings.default_prompt_formatter,
317
+ )
318
+ )
307
319
  if model_settings.vllm_api_base:
308
320
  # vLLM exposes both a /chat/completions and a /completions endpoint
309
321
  self._enabled_providers.append(
@@ -70,6 +70,10 @@ class ToolManager:
70
70
  pydantic_tool.organization_id = actor.organization_id
71
71
  tool_data = pydantic_tool.model_dump()
72
72
  tool = ToolModel(**tool_data)
73
+ # The description is most likely auto-generated via the json_schema,
74
+ # so copy it over into the top-level description field
75
+ if tool.description is None:
76
+ tool.description = tool.json_schema.get("description", None)
73
77
  tool.create(session, actor=actor)
74
78
 
75
79
  return tool.to_pydantic()
letta/settings.py CHANGED
@@ -43,6 +43,9 @@ class ModelSettings(BaseSettings):
43
43
  # google ai
44
44
  gemini_api_key: Optional[str] = None
45
45
 
46
+ # together
47
+ together_api_key: Optional[str] = None
48
+
46
49
  # vLLM
47
50
  vllm_api_base: Optional[str] = None
48
51
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: letta-nightly
3
- Version: 0.5.2.dev20241118104226
3
+ Version: 0.5.2.dev20241119104253
4
4
  Summary: Create LLM agents with long-term memory and custom tools
5
5
  License: Apache License
6
6
  Author: Letta Team
@@ -1,6 +1,6 @@
1
1
  letta/__init__.py,sha256=IMLtpH5HlbVUa1mmPpSyBpTZqVz1rsS7lbuqT7viBQ0,1014
2
2
  letta/__main__.py,sha256=6Hs2PV7EYc5Tid4g4OtcLXhqVHiNYTGzSBdoOnW2HXA,29
3
- letta/agent.py,sha256=dQ0gtD0mPbWAvOTlPHHW2iYDfqgVwUZyOCgunKWZkGA,77222
3
+ letta/agent.py,sha256=QmDFxJW6_X2eM19alscfxzsvzt-cNRmvkmsPfoh3xfw,77282
4
4
  letta/agent_store/chroma.py,sha256=upR5zGnGs6I6btulEYbiZdGG87BgKjxUJOQZ4Y-RQ_M,12492
5
5
  letta/agent_store/db.py,sha256=9pIeakcRls0Fi3wO2b9Jg_Qw1IBJ-_GUSpE1z-upGS0,23425
6
6
  letta/agent_store/lancedb.py,sha256=i63d4VZwj9UIOTNs5f0JZ_r5yZD-jKWz4FAH4RMpXOE,5104
@@ -17,7 +17,7 @@ letta/client/client.py,sha256=XR4VukYXnZ1LG5og_BP1_m_FWrjb6ESFFkT-gK6_-os,97885
17
17
  letta/client/streaming.py,sha256=Hh5pjlyrdCuO2V75ZCxSSOCPd3BmHdKFGaIUJC6fBp0,4775
18
18
  letta/client/utils.py,sha256=OJlAKWrldc4I6M1WpcTWNtPJ4wfxlzlZqWLfCozkFtI,2872
19
19
  letta/config.py,sha256=eK-ip06ELHNYriInkgfidDvJxQ2tD1u49I-VLXB87nE,18929
20
- letta/constants.py,sha256=c8pEfIhtpqFGunyzGObnfEeRJNkunfmq9Pfiau8YYfA,6544
20
+ letta/constants.py,sha256=vX36LGNB-DSWovySf_0sqD6yfgnjvvMp2XROSehM3nk,6544
21
21
  letta/credentials.py,sha256=D9mlcPsdDWlIIXQQD8wSPE9M_QvsRrb0p3LB5i9OF5Q,5806
22
22
  letta/data_sources/connectors.py,sha256=5VKxfeV-QyUlK1wexLlpgar99dGm6PHxFaEbSeByo_U,9923
23
23
  letta/data_sources/connectors_helper.py,sha256=2TQjCt74fCgT5sw1AP8PalDEk06jPBbhrPG4HVr-WLs,3371
@@ -26,7 +26,7 @@ letta/errors.py,sha256=cDOo4cSYL-LA0w0b0GdsxXd5k2I1LLOY8nhtXk9YqYs,2875
26
26
  letta/functions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
27
27
  letta/functions/function_sets/base.py,sha256=N4QmOjL6gDEyOg67ocF6zVKM-NquTo-yXG_T8r18buA,6440
28
28
  letta/functions/function_sets/extras.py,sha256=Jik3UiDqYTm4Lam1XPTvuVjvgUHwIAhopsnbmVhGMBg,4732
29
- letta/functions/functions.py,sha256=0olz-jfj1sdmYoeks43mgrBDc3QpyxSS3Pbc4LVj3N8,4109
29
+ letta/functions/functions.py,sha256=VyA_7O56KRUj88iuMkLJTRfascaTCj1qFGT0BnDgC6k,4140
30
30
  letta/functions/helpers.py,sha256=JU3e5xkkTVx4EevBmtyCRnidf0ncAeASvH2ZT_aBvPc,9905
31
31
  letta/functions/schema_generator.py,sha256=CoDZQfXsOKBp5VOv-024efcR833wyrchQbQIN7mL11A,8407
32
32
  letta/helpers/__init__.py,sha256=p0luQ1Oe3Skc6sH4O58aHHA3Qbkyjifpuq0DZ1GAY0U,59
@@ -42,9 +42,9 @@ letta/llm_api/azure_openai_constants.py,sha256=oXtKrgBFHf744gyt5l1thILXgyi8NDNUr
42
42
  letta/llm_api/cohere.py,sha256=vDRd-SUGp1t_JUIdwC3RkIhwMl0OY7n-tAU9uPORYkY,14826
43
43
  letta/llm_api/google_ai.py,sha256=xKz9JDZs3m6yzSfcgCAAUD_rjI20BBIINoiSvlcnOw0,17621
44
44
  letta/llm_api/helpers.py,sha256=KqkdjZWYghx4OPwLcHEC6ruc_z9DScbysw3VH4x9A0Q,9887
45
- letta/llm_api/llm_api_tools.py,sha256=KFG2miI7KrDOIcOSgm2jwBIb3qvzYt2O_5UNjTbTsm8,14786
45
+ letta/llm_api/llm_api_tools.py,sha256=h2eudFygI6yFIOaA5Q9GmhiwMPq2mHQyhoSHbn57CCE,16866
46
46
  letta/llm_api/mistral.py,sha256=fHdfD9ug-rQIk2qn8tRKay1U6w9maF11ryhKi91FfXM,1593
47
- letta/llm_api/openai.py,sha256=p1tKbfCfhWOLnx2u-Vt67rq12uVUbo_zyslHHlmuTyU,23845
47
+ letta/llm_api/openai.py,sha256=gGuxfE4_TURwnKfleogDCURvKe4UFb7KitoCWqWqKkI,23844
48
48
  letta/local_llm/README.md,sha256=hFJyw5B0TU2jrh9nb0zGZMgdH-Ei1dSRfhvPQG_NSoU,168
49
49
  letta/local_llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
50
50
  letta/local_llm/chat_completion_proxy.py,sha256=SiohxsjGTku4vOryOZx7I0t0xoO_sUuhXgoe62fKq3c,12995
@@ -76,7 +76,7 @@ letta/local_llm/settings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
76
76
  letta/local_llm/settings/deterministic_mirostat.py,sha256=kgRikcxYHfIbPFydHW6W7IO9jmp6NeA7JNAhnI3DPsc,1221
77
77
  letta/local_llm/settings/settings.py,sha256=ZAbzDpu2WsBXjVGXJ-TKUpS99VTI__3EoZml9KqYef0,2971
78
78
  letta/local_llm/settings/simple.py,sha256=HAO2jBJ_hJCEsXWIJcD0sckR0tI0zs3x2CPdf6ORQLs,719
79
- letta/local_llm/utils.py,sha256=xdZDUPKz4HDgQ_kjtbqKU5DMIlq1Q1NJIqhHdGSMjBY,12193
79
+ letta/local_llm/utils.py,sha256=FtK3LGl3iiWQxCIQMsnmduR-RcYcej_Z2hbpfOVn3-8,12897
80
80
  letta/local_llm/vllm/api.py,sha256=2kAGZjc_GH9ILJnVRq-45yfsfKELVfbC9VEl_cIC6vg,2590
81
81
  letta/local_llm/webui/api.py,sha256=kkxncdCFq1vjgvaHOoQ__j7rcDPgC1F64KcEm94Y6Rs,2639
82
82
  letta/local_llm/webui/legacy_api.py,sha256=k3H3y4qp2Fs-XmP24iSIEyvq6wjWFWBzklY3-wRAJNI,2335
@@ -125,7 +125,7 @@ letta/prompts/system/memgpt_gpt35_extralong.txt,sha256=FheNhYoIzNz6qnJKhVquZVSMj
125
125
  letta/prompts/system/memgpt_intuitive_knowledge.txt,sha256=sA7c3urYqREVnSBI81nTGImXAekqC0Fxc7RojFqud1g,2966
126
126
  letta/prompts/system/memgpt_modified_chat.txt,sha256=HOaPVurEftD8KsuwsclDgE2afIfklMjxhuSO96q1-6I,4656
127
127
  letta/prompts/system/memgpt_modified_o1.txt,sha256=AxxYVjYLZwpZ6yfifh1SuPtwlJGWTcTVzw53QbkN-Ao,5492
128
- letta/providers.py,sha256=miNxSXDMKDZqYjrYtE2N9ecw9ltccDXSJBlJqxOhEws,19611
128
+ letta/providers.py,sha256=0j6WPRn70WNSOjWS7smhTI3ZZOlfVAVF0ZFcrdQDmMY,25321
129
129
  letta/pytest.ini,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
130
130
  letta/schemas/agent.py,sha256=f0khTBWIRGZva4_C15Nm_tkmn1cwaVQlWa7_7laRbEE,7866
131
131
  letta/schemas/agents_tags.py,sha256=9DGr8fN2DHYdWvZ_qcXmrKI0w7YKCGz2lfEcrX2KAkI,1130
@@ -140,11 +140,11 @@ letta/schemas/letta_base.py,sha256=VP6h6mV7u2AVSrcgzTIoptORv6BqmQXkkMzb7-NlcWs,3
140
140
  letta/schemas/letta_message.py,sha256=RuVVtwFbi85yP3dXQxowofQ6cI2cO_CdGtgpHGQzgHc,6563
141
141
  letta/schemas/letta_request.py,sha256=_oiDshc_AoFWIfXRk2VX5-AxO5vDlyN-9r-gnyLj_30,1890
142
142
  letta/schemas/letta_response.py,sha256=li_j4VUF_WtxdJy7ufRmmmchzvhVmr1idbOxtgFGiy0,6253
143
- letta/schemas/llm_config.py,sha256=uPzFWxMqKfqiArElDHEJebjm_P7fzzO7PZkCE6QYWKo,4423
143
+ letta/schemas/llm_config.py,sha256=RbgnCaqYd_yl-Xs7t-DEI1NhpKD8WiVWjxcwq5mZd5M,4467
144
144
  letta/schemas/memory.py,sha256=fHkJZr8CrGcHhbJlckWgfRYMhLkRliKCU-hRxqr19ks,11725
145
145
  letta/schemas/message.py,sha256=DQxnRYrYgHXpTKfMzfS-bpCAe-BO_Rmcfc1Wf-4GHjw,33703
146
146
  letta/schemas/openai/chat_completion_request.py,sha256=AOIwgbN3CZKVqkuXeMHeSa53u4h0wVq69t3T_LJ0vIE,3389
147
- letta/schemas/openai/chat_completion_response.py,sha256=05FRfm1EsVivyeWo2aoJk34h3W4pAb4lBCPn1eujjcw,3916
147
+ letta/schemas/openai/chat_completion_response.py,sha256=ub-oVSyLpuJd-5_yzCSIRR8tD3GM83IeDO1c1uAATa4,3970
148
148
  letta/schemas/openai/chat_completions.py,sha256=V0ZPIIk-ds3O6MAkNHMz8zh1hqMFSPrTcYr88WDYzWE,3588
149
149
  letta/schemas/openai/embedding_response.py,sha256=WKIZpXab1Av7v6sxKG8feW3ZtpQUNosmLVSuhXYa_xU,357
150
150
  letta/schemas/openai/openai.py,sha256=Hilo5BiLAGabzxCwnwfzK5QrWqwYD8epaEKFa4Pwndk,7970
@@ -184,7 +184,7 @@ letta/server/rest_api/routers/v1/tools.py,sha256=Bkb9oKswOycj5S3fBeim7LpDrZf37Sy
184
184
  letta/server/rest_api/routers/v1/users.py,sha256=M1wEr2IyHzuRwINYxLXTkrbAH3osLe_cWjzrWrzR1aw,3729
185
185
  letta/server/rest_api/static_files.py,sha256=NG8sN4Z5EJ8JVQdj19tkFa9iQ1kBPTab9f_CUxd_u4Q,3143
186
186
  letta/server/rest_api/utils.py,sha256=GdHYCzXtbM5VCAYDPR0z5gnNZpRhwPld2BGZV7xT6cU,2924
187
- letta/server/server.py,sha256=RQHtJwhpCgnRSL38wF13vATDkS5thCAMbtRDwU2U-yM,79746
187
+ letta/server/server.py,sha256=mDCLsheG5yNkAzKl6DGRcLg-HIW6CJiAi-OfCa6oyxM,80138
188
188
  letta/server/startup.sh,sha256=wTOQOJJZw_Iec57WIu0UW0AVflk0ZMWYZWg8D3T_gSQ,698
189
189
  letta/server/static_files/assets/index-3ab03d5b.css,sha256=OrA9W4iKJ5h2Wlr7GwdAT4wow0CM8hVit1yOxEL49Qw,54295
190
190
  letta/server/static_files/assets/index-9fa459a2.js,sha256=j2oMcDJO9dWJaH5e-tsflbVpWK20gLWpZKJk4-Kuy6A,1815592
@@ -201,15 +201,15 @@ letta/services/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
201
201
  letta/services/agents_tags_manager.py,sha256=zNqeXDpaf4dQ77jrRHiQfITdk4FawBzcND-9tWrj8gw,3127
202
202
  letta/services/organization_manager.py,sha256=OfE2_NMmhqXURX4sg7hCOiFQVQpV5ZiPu7J3sboCSYc,3555
203
203
  letta/services/source_manager.py,sha256=StX5Wfd7XSCKJet8qExIu3GMoI-eMIbEarAeTv2gq0s,6555
204
- letta/services/tool_manager.py,sha256=z3nnUDQWuqB5RYk_y78EvIH6SMx-KJy7qeHqclZHonw,7897
204
+ letta/services/tool_manager.py,sha256=Vr2_JQ3TQUSPSPNbmGwY26HIFjYw0NhzJGgpMvS6GV8,8163
205
205
  letta/services/user_manager.py,sha256=UJa0hqCjz0yXtvrCR8OVBqlSR5lC_Ejn-uG__58zLds,4398
206
- letta/settings.py,sha256=yiYNmnYKj_BdTm0cBEIvQKYGU-lCmFntqsyVfRUy3_k,3411
206
+ letta/settings.py,sha256=tnRUkIg1tvVtxqBUkfpjr4CNfgTun-gvOoAcrR0TJiE,3470
207
207
  letta/streaming_interface.py,sha256=_FPUWy58j50evHcpXyd7zB1wWqeCc71NCFeWh_TBvnw,15736
208
208
  letta/streaming_utils.py,sha256=329fsvj1ZN0r0LpQtmMPZ2vSxkDBIUUwvGHZFkjm2I8,11745
209
209
  letta/system.py,sha256=buKYPqG5n2x41hVmWpu6JUpyd7vTWED9Km2_M7dLrvk,6960
210
210
  letta/utils.py,sha256=SXLEYhyp3gHyIjrxNIKNZZ5ittKo3KOj6zxgC_Trex0,31012
211
- letta_nightly-0.5.2.dev20241118104226.dist-info/LICENSE,sha256=mExtuZ_GYJgDEI38GWdiEYZizZS4KkVt2SF1g_GPNhI,10759
212
- letta_nightly-0.5.2.dev20241118104226.dist-info/METADATA,sha256=_UTZODVLg4EJ1VdWLFwZIS6H00QBoPpxyUGOfbqrDR0,11024
213
- letta_nightly-0.5.2.dev20241118104226.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
214
- letta_nightly-0.5.2.dev20241118104226.dist-info/entry_points.txt,sha256=2zdiyGNEZGV5oYBuS-y2nAAgjDgcC9yM_mHJBFSRt5U,40
215
- letta_nightly-0.5.2.dev20241118104226.dist-info/RECORD,,
211
+ letta_nightly-0.5.2.dev20241119104253.dist-info/LICENSE,sha256=mExtuZ_GYJgDEI38GWdiEYZizZS4KkVt2SF1g_GPNhI,10759
212
+ letta_nightly-0.5.2.dev20241119104253.dist-info/METADATA,sha256=JPwqaSLRgzSyHiWg1VKEDImfqPLcGjjEUV3N6sLFpe0,11024
213
+ letta_nightly-0.5.2.dev20241119104253.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
214
+ letta_nightly-0.5.2.dev20241119104253.dist-info/entry_points.txt,sha256=2zdiyGNEZGV5oYBuS-y2nAAgjDgcC9yM_mHJBFSRt5U,40
215
+ letta_nightly-0.5.2.dev20241119104253.dist-info/RECORD,,