letta-nightly 0.4.1.dev20241008104105__py3-none-any.whl → 0.4.1.dev20241010104112__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of letta-nightly might be problematic. Click here for more details.

Files changed (43) hide show
  1. letta/agent.py +18 -2
  2. letta/agent_store/db.py +23 -7
  3. letta/cli/cli.py +2 -1
  4. letta/cli/cli_config.py +1 -1098
  5. letta/client/client.py +8 -1
  6. letta/client/utils.py +7 -2
  7. letta/credentials.py +2 -2
  8. letta/embeddings.py +3 -0
  9. letta/functions/schema_generator.py +1 -1
  10. letta/interface.py +6 -2
  11. letta/llm_api/anthropic.py +3 -24
  12. letta/llm_api/azure_openai.py +47 -98
  13. letta/llm_api/azure_openai_constants.py +10 -0
  14. letta/llm_api/google_ai.py +38 -63
  15. letta/llm_api/helpers.py +64 -2
  16. letta/llm_api/llm_api_tools.py +6 -15
  17. letta/llm_api/openai.py +6 -49
  18. letta/local_llm/constants.py +3 -0
  19. letta/main.py +1 -1
  20. letta/metadata.py +2 -0
  21. letta/providers.py +165 -31
  22. letta/schemas/agent.py +14 -0
  23. letta/schemas/llm_config.py +0 -3
  24. letta/schemas/openai/chat_completion_response.py +3 -0
  25. letta/schemas/tool.py +3 -3
  26. letta/server/rest_api/routers/openai/assistants/threads.py +5 -5
  27. letta/server/rest_api/routers/openai/chat_completions/chat_completions.py +2 -2
  28. letta/server/rest_api/routers/v1/agents.py +11 -11
  29. letta/server/rest_api/routers/v1/blocks.py +2 -2
  30. letta/server/rest_api/routers/v1/jobs.py +2 -2
  31. letta/server/rest_api/routers/v1/sources.py +12 -12
  32. letta/server/rest_api/routers/v1/tools.py +6 -6
  33. letta/server/server.py +26 -7
  34. letta/settings.py +3 -112
  35. letta/streaming_interface.py +8 -4
  36. {letta_nightly-0.4.1.dev20241008104105.dist-info → letta_nightly-0.4.1.dev20241010104112.dist-info}/METADATA +1 -1
  37. {letta_nightly-0.4.1.dev20241008104105.dist-info → letta_nightly-0.4.1.dev20241010104112.dist-info}/RECORD +40 -42
  38. letta/configs/anthropic.json +0 -13
  39. letta/configs/letta_hosted.json +0 -11
  40. letta/configs/openai.json +0 -12
  41. {letta_nightly-0.4.1.dev20241008104105.dist-info → letta_nightly-0.4.1.dev20241010104112.dist-info}/LICENSE +0 -0
  42. {letta_nightly-0.4.1.dev20241008104105.dist-info → letta_nightly-0.4.1.dev20241010104112.dist-info}/WHEEL +0 -0
  43. {letta_nightly-0.4.1.dev20241008104105.dist-info → letta_nightly-0.4.1.dev20241010104112.dist-info}/entry_points.txt +0 -0
letta/cli/cli_config.py CHANGED
@@ -1,5 +1,4 @@
1
1
  import ast
2
- import builtins
3
2
  import os
4
3
  from enum import Enum
5
4
  from typing import Annotated, List, Optional
@@ -10,1113 +9,17 @@ from prettytable.colortable import ColorTable, Themes
10
9
  from tqdm import tqdm
11
10
 
12
11
  from letta import utils
13
- from letta.config import LettaConfig
14
- from letta.constants import LETTA_DIR, LLM_MAX_TOKENS
15
- from letta.credentials import SUPPORTED_AUTH_TYPES, LettaCredentials
16
- from letta.llm_api.anthropic import (
17
- anthropic_get_model_list,
18
- antropic_get_model_context_window,
19
- )
20
- from letta.llm_api.azure_openai import azure_openai_get_model_list
21
- from letta.llm_api.cohere import (
22
- COHERE_VALID_MODEL_LIST,
23
- cohere_get_model_context_window,
24
- cohere_get_model_list,
25
- )
26
- from letta.llm_api.google_ai import (
27
- google_ai_get_model_context_window,
28
- google_ai_get_model_list,
29
- )
30
- from letta.llm_api.llm_api_tools import LLM_API_PROVIDER_OPTIONS
31
- from letta.llm_api.openai import openai_get_model_list
32
- from letta.local_llm.constants import (
33
- DEFAULT_ENDPOINTS,
34
- DEFAULT_OLLAMA_MODEL,
35
- DEFAULT_WRAPPER_NAME,
36
- )
37
- from letta.local_llm.utils import get_available_wrappers
38
- from letta.server.utils import shorten_key_middle
39
12
 
40
13
  app = typer.Typer()
41
14
 
42
15
 
43
- def get_azure_credentials():
44
- creds = dict(
45
- azure_key=os.getenv("AZURE_OPENAI_KEY"),
46
- azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"),
47
- azure_version=os.getenv("AZURE_OPENAI_VERSION"),
48
- azure_deployment=os.getenv("AZURE_OPENAI_DEPLOYMENT"),
49
- azure_embedding_deployment=os.getenv("AZURE_OPENAI_EMBEDDING_DEPLOYMENT"),
50
- )
51
- # embedding endpoint and version default to non-embedding
52
- creds["azure_embedding_endpoint"] = os.getenv("AZURE_OPENAI_EMBEDDING_ENDPOINT", creds["azure_endpoint"])
53
- creds["azure_embedding_version"] = os.getenv("AZURE_OPENAI_EMBEDDING_VERSION", creds["azure_version"])
54
- return creds
55
-
56
-
57
- def get_openai_credentials() -> Optional[str]:
58
- openai_key = os.getenv("OPENAI_API_KEY", None)
59
- return openai_key
60
-
61
-
62
- def get_google_ai_credentials() -> Optional[str]:
63
- google_ai_key = os.getenv("GOOGLE_AI_API_KEY", None)
64
- return google_ai_key
65
-
66
-
67
- def configure_llm_endpoint(config: LettaConfig, credentials: LettaCredentials):
68
- # configure model endpoint
69
- model_endpoint_type, model_endpoint = None, None
70
-
71
- # get default
72
- default_model_endpoint_type = None
73
- if (
74
- config.default_llm_config
75
- and config.default_llm_config.model_endpoint_type is not None
76
- and config.default_llm_config.model_endpoint_type not in [provider for provider in LLM_API_PROVIDER_OPTIONS if provider != "local"]
77
- ): # local model
78
- default_model_endpoint_type = "local"
79
-
80
- provider = questionary.select(
81
- "Select LLM inference provider:",
82
- choices=LLM_API_PROVIDER_OPTIONS,
83
- default=default_model_endpoint_type,
84
- ).ask()
85
- if provider is None:
86
- raise KeyboardInterrupt
87
-
88
- # set: model_endpoint_type, model_endpoint
89
- if provider == "openai":
90
- # check for key
91
- if credentials.openai_key is None:
92
- # allow key to get pulled from env vars
93
- openai_api_key = os.getenv("OPENAI_API_KEY", None)
94
- # if we still can't find it, ask for it as input
95
- if openai_api_key is None:
96
- while openai_api_key is None or len(openai_api_key) == 0:
97
- # Ask for API key as input
98
- openai_api_key = questionary.password(
99
- "Enter your OpenAI API key (starts with 'sk-', see https://platform.openai.com/api-keys):"
100
- ).ask()
101
- if openai_api_key is None:
102
- raise KeyboardInterrupt
103
- credentials.openai_key = openai_api_key
104
- credentials.save()
105
- else:
106
- # Give the user an opportunity to overwrite the key
107
- openai_api_key = None
108
- default_input = (
109
- shorten_key_middle(credentials.openai_key) if credentials.openai_key.startswith("sk-") else credentials.openai_key
110
- )
111
- openai_api_key = questionary.password(
112
- "Enter your OpenAI API key (starts with 'sk-', see https://platform.openai.com/api-keys):",
113
- default=default_input,
114
- ).ask()
115
- if openai_api_key is None:
116
- raise KeyboardInterrupt
117
- # If the user modified it, use the new one
118
- if openai_api_key != default_input:
119
- credentials.openai_key = openai_api_key
120
- credentials.save()
121
-
122
- model_endpoint_type = "openai"
123
- model_endpoint = "https://api.openai.com/v1"
124
- model_endpoint = questionary.text("Override default endpoint:", default=model_endpoint).ask()
125
- if model_endpoint is None:
126
- raise KeyboardInterrupt
127
-
128
- elif provider == "groq":
129
- groq_user_msg = "Enter your Groq API key (starts with 'gsk-', see https://console.groq.com/keys):"
130
- # check for key
131
- if credentials.groq_key is None:
132
- # allow key to get pulled from env vars
133
- groq_api_key = os.getenv("GROQ_API_KEY", None)
134
- # if we still can't find it, ask for it as input
135
- if groq_api_key is None:
136
- while groq_api_key is None or len(groq_api_key) == 0:
137
- # Ask for API key as input
138
- groq_api_key = questionary.password(groq_user_msg).ask()
139
- if groq_api_key is None:
140
- raise KeyboardInterrupt
141
- credentials.groq_key = groq_api_key
142
- credentials.save()
143
- else:
144
- # Give the user an opportunity to overwrite the key
145
- default_input = shorten_key_middle(credentials.groq_key) if credentials.groq_key.startswith("gsk-") else credentials.groq_key
146
- groq_api_key = questionary.password(
147
- groq_user_msg,
148
- default=default_input,
149
- ).ask()
150
- if groq_api_key is None:
151
- raise KeyboardInterrupt
152
- # If the user modified it, use the new one
153
- if groq_api_key != default_input:
154
- credentials.groq_key = groq_api_key
155
- credentials.save()
156
-
157
- model_endpoint_type = "groq"
158
- model_endpoint = "https://api.groq.com/openai/v1"
159
- model_endpoint = questionary.text("Override default endpoint:", default=model_endpoint).ask()
160
- if model_endpoint is None:
161
- raise KeyboardInterrupt
162
-
163
- elif provider == "azure":
164
- # check for necessary vars
165
- azure_creds = get_azure_credentials()
166
- if not all([azure_creds["azure_key"], azure_creds["azure_endpoint"], azure_creds["azure_version"]]):
167
- raise ValueError(
168
- "Missing environment variables for Azure (see https://letta.readme.io/docs/endpoints#azure-openai). Please set then run `letta configure` again."
169
- )
170
- else:
171
- credentials.azure_key = azure_creds["azure_key"]
172
- credentials.azure_version = azure_creds["azure_version"]
173
- credentials.azure_endpoint = azure_creds["azure_endpoint"]
174
- if "azure_deployment" in azure_creds:
175
- credentials.azure_deployment = azure_creds["azure_deployment"]
176
- credentials.azure_embedding_version = azure_creds["azure_embedding_version"]
177
- credentials.azure_embedding_endpoint = azure_creds["azure_embedding_endpoint"]
178
- if "azure_embedding_deployment" in azure_creds:
179
- credentials.azure_embedding_deployment = azure_creds["azure_embedding_deployment"]
180
- credentials.save()
181
-
182
- model_endpoint_type = "azure"
183
- model_endpoint = azure_creds["azure_endpoint"]
184
-
185
- elif provider == "google_ai":
186
-
187
- # check for key
188
- if credentials.google_ai_key is None:
189
- # allow key to get pulled from env vars
190
- google_ai_key = get_google_ai_credentials()
191
- # if we still can't find it, ask for it as input
192
- if google_ai_key is None:
193
- while google_ai_key is None or len(google_ai_key) == 0:
194
- # Ask for API key as input
195
- google_ai_key = questionary.password(
196
- "Enter your Google AI (Gemini) API key (see https://aistudio.google.com/app/apikey):"
197
- ).ask()
198
- if google_ai_key is None:
199
- raise KeyboardInterrupt
200
- credentials.google_ai_key = google_ai_key
201
- else:
202
- # Give the user an opportunity to overwrite the key
203
- google_ai_key = None
204
- default_input = shorten_key_middle(credentials.google_ai_key)
205
-
206
- google_ai_key = questionary.password(
207
- "Enter your Google AI (Gemini) API key (see https://aistudio.google.com/app/apikey):",
208
- default=default_input,
209
- ).ask()
210
- if google_ai_key is None:
211
- raise KeyboardInterrupt
212
- # If the user modified it, use the new one
213
- if google_ai_key != default_input:
214
- credentials.google_ai_key = google_ai_key
215
-
216
- default_input = os.getenv("GOOGLE_AI_SERVICE_ENDPOINT", None)
217
- if default_input is None:
218
- default_input = "generativelanguage"
219
- google_ai_service_endpoint = questionary.text(
220
- "Enter your Google AI (Gemini) service endpoint (see https://ai.google.dev/api/rest):",
221
- default=default_input,
222
- ).ask()
223
- credentials.google_ai_service_endpoint = google_ai_service_endpoint
224
-
225
- # write out the credentials
226
- credentials.save()
227
-
228
- model_endpoint_type = "google_ai"
229
-
230
- elif provider == "anthropic":
231
- # check for key
232
- if credentials.anthropic_key is None:
233
- # allow key to get pulled from env vars
234
- anthropic_api_key = os.getenv("ANTHROPIC_API_KEY", None)
235
- # if we still can't find it, ask for it as input
236
- if anthropic_api_key is None:
237
- while anthropic_api_key is None or len(anthropic_api_key) == 0:
238
- # Ask for API key as input
239
- anthropic_api_key = questionary.password(
240
- "Enter your Anthropic API key (starts with 'sk-', see https://console.anthropic.com/settings/keys):"
241
- ).ask()
242
- if anthropic_api_key is None:
243
- raise KeyboardInterrupt
244
- credentials.anthropic_key = anthropic_api_key
245
- credentials.save()
246
- else:
247
- # Give the user an opportunity to overwrite the key
248
- anthropic_api_key = None
249
- default_input = (
250
- shorten_key_middle(credentials.anthropic_key) if credentials.anthropic_key.startswith("sk-") else credentials.anthropic_key
251
- )
252
- anthropic_api_key = questionary.password(
253
- "Enter your Anthropic API key (starts with 'sk-', see https://console.anthropic.com/settings/keys):",
254
- default=default_input,
255
- ).ask()
256
- if anthropic_api_key is None:
257
- raise KeyboardInterrupt
258
- # If the user modified it, use the new one
259
- if anthropic_api_key != default_input:
260
- credentials.anthropic_key = anthropic_api_key
261
- credentials.save()
262
-
263
- model_endpoint_type = "anthropic"
264
- model_endpoint = "https://api.anthropic.com/v1"
265
- model_endpoint = questionary.text("Override default endpoint:", default=model_endpoint).ask()
266
- if model_endpoint is None:
267
- raise KeyboardInterrupt
268
- provider = "anthropic"
269
-
270
- elif provider == "cohere":
271
- # check for key
272
- if credentials.cohere_key is None:
273
- # allow key to get pulled from env vars
274
- cohere_api_key = os.getenv("COHERE_API_KEY", None)
275
- # if we still can't find it, ask for it as input
276
- if cohere_api_key is None:
277
- while cohere_api_key is None or len(cohere_api_key) == 0:
278
- # Ask for API key as input
279
- cohere_api_key = questionary.password("Enter your Cohere API key (see https://dashboard.cohere.com/api-keys):").ask()
280
- if cohere_api_key is None:
281
- raise KeyboardInterrupt
282
- credentials.cohere_key = cohere_api_key
283
- credentials.save()
284
- else:
285
- # Give the user an opportunity to overwrite the key
286
- cohere_api_key = None
287
- default_input = (
288
- shorten_key_middle(credentials.cohere_key) if credentials.cohere_key.startswith("sk-") else credentials.cohere_key
289
- )
290
- cohere_api_key = questionary.password(
291
- "Enter your Cohere API key (see https://dashboard.cohere.com/api-keys):",
292
- default=default_input,
293
- ).ask()
294
- if cohere_api_key is None:
295
- raise KeyboardInterrupt
296
- # If the user modified it, use the new one
297
- if cohere_api_key != default_input:
298
- credentials.cohere_key = cohere_api_key
299
- credentials.save()
300
-
301
- model_endpoint_type = "cohere"
302
- model_endpoint = "https://api.cohere.ai/v1"
303
- model_endpoint = questionary.text("Override default endpoint:", default=model_endpoint).ask()
304
- if model_endpoint is None:
305
- raise KeyboardInterrupt
306
- provider = "cohere"
307
-
308
- else: # local models
309
- # backend_options_old = ["webui", "webui-legacy", "llamacpp", "koboldcpp", "ollama", "lmstudio", "lmstudio-legacy", "vllm", "openai"]
310
- backend_options = builtins.list(DEFAULT_ENDPOINTS.keys())
311
- # assert backend_options_old == backend_options, (backend_options_old, backend_options)
312
- default_model_endpoint_type = None
313
- if config.default_llm_config and config.default_llm_config.model_endpoint_type in backend_options:
314
- # set from previous config
315
- default_model_endpoint_type = config.default_llm_config.model_endpoint_type
316
- model_endpoint_type = questionary.select(
317
- "Select LLM backend (select 'openai' if you have an OpenAI compatible proxy):",
318
- backend_options,
319
- default=default_model_endpoint_type,
320
- ).ask()
321
- if model_endpoint_type is None:
322
- raise KeyboardInterrupt
323
-
324
- # set default endpoint
325
- # if OPENAI_API_BASE is set, assume that this is the IP+port the user wanted to use
326
- default_model_endpoint = os.getenv("OPENAI_API_BASE")
327
- # if OPENAI_API_BASE is not set, try to pull a default IP+port format from a hardcoded set
328
- if default_model_endpoint is None:
329
- if model_endpoint_type in DEFAULT_ENDPOINTS:
330
- default_model_endpoint = DEFAULT_ENDPOINTS[model_endpoint_type]
331
- model_endpoint = questionary.text("Enter default endpoint:", default=default_model_endpoint).ask()
332
- if model_endpoint is None:
333
- raise KeyboardInterrupt
334
- while not utils.is_valid_url(model_endpoint):
335
- typer.secho(f"Endpoint must be a valid address", fg=typer.colors.YELLOW)
336
- model_endpoint = questionary.text("Enter default endpoint:", default=default_model_endpoint).ask()
337
- if model_endpoint is None:
338
- raise KeyboardInterrupt
339
- elif config.default_llm_config and config.default_llm_config.model_endpoint:
340
- model_endpoint = questionary.text("Enter default endpoint:", default=config.default_llm_config.model_endpoint).ask()
341
- if model_endpoint is None:
342
- raise KeyboardInterrupt
343
- while not utils.is_valid_url(model_endpoint):
344
- typer.secho(f"Endpoint must be a valid address", fg=typer.colors.YELLOW)
345
- model_endpoint = questionary.text("Enter default endpoint:", default=config.default_llm_config.model_endpoint).ask()
346
- if model_endpoint is None:
347
- raise KeyboardInterrupt
348
- else:
349
- # default_model_endpoint = None
350
- model_endpoint = None
351
- model_endpoint = questionary.text("Enter default endpoint:").ask()
352
- if model_endpoint is None:
353
- raise KeyboardInterrupt
354
- while not utils.is_valid_url(model_endpoint):
355
- typer.secho(f"Endpoint must be a valid address", fg=typer.colors.YELLOW)
356
- model_endpoint = questionary.text("Enter default endpoint:").ask()
357
- if model_endpoint is None:
358
- raise KeyboardInterrupt
359
- else:
360
- model_endpoint = default_model_endpoint
361
- assert model_endpoint, f"Environment variable OPENAI_API_BASE must be set."
362
-
363
- return model_endpoint_type, model_endpoint
364
-
365
-
366
- def get_model_options(
367
- credentials: LettaCredentials,
368
- model_endpoint_type: str,
369
- model_endpoint: str,
370
- filter_list: bool = True,
371
- filter_prefix: str = "gpt-",
372
- ) -> list:
373
- try:
374
- if model_endpoint_type == "openai":
375
- if credentials.openai_key is None:
376
- raise ValueError("Missing OpenAI API key")
377
- fetched_model_options_response = openai_get_model_list(url=model_endpoint, api_key=credentials.openai_key)
378
-
379
- # Filter the list for "gpt" models only
380
- if filter_list:
381
- model_options = [obj["id"] for obj in fetched_model_options_response["data"] if obj["id"].startswith(filter_prefix)]
382
- else:
383
- model_options = [obj["id"] for obj in fetched_model_options_response["data"]]
384
-
385
- elif model_endpoint_type == "azure":
386
- if credentials.azure_key is None:
387
- raise ValueError("Missing Azure key")
388
- if credentials.azure_version is None:
389
- raise ValueError("Missing Azure version")
390
- fetched_model_options_response = azure_openai_get_model_list(
391
- url=model_endpoint, api_key=credentials.azure_key, api_version=credentials.azure_version
392
- )
393
-
394
- # Filter the list for "gpt" models only
395
- if filter_list:
396
- model_options = [obj["id"] for obj in fetched_model_options_response["data"] if obj["id"].startswith(filter_prefix)]
397
- else:
398
- model_options = [obj["id"] for obj in fetched_model_options_response["data"]]
399
-
400
- elif model_endpoint_type == "google_ai":
401
- if credentials.google_ai_key is None:
402
- raise ValueError("Missing Google AI API key")
403
- if credentials.google_ai_service_endpoint is None:
404
- raise ValueError("Missing Google AI service endpoint")
405
- model_options = google_ai_get_model_list(
406
- service_endpoint=credentials.google_ai_service_endpoint, api_key=credentials.google_ai_key
407
- )
408
- model_options = [str(m["name"]) for m in model_options]
409
- model_options = [mo[len("models/") :] if mo.startswith("models/") else mo for mo in model_options]
410
-
411
- # TODO remove manual filtering for gemini-pro
412
- model_options = [mo for mo in model_options if str(mo).startswith("gemini") and "-pro" in str(mo)]
413
- # model_options = ["gemini-pro"]
414
-
415
- elif model_endpoint_type == "anthropic":
416
- if credentials.anthropic_key is None:
417
- raise ValueError("Missing Anthropic API key")
418
- fetched_model_options = anthropic_get_model_list(url=model_endpoint, api_key=credentials.anthropic_key)
419
- model_options = [obj["name"] for obj in fetched_model_options]
420
-
421
- elif model_endpoint_type == "cohere":
422
- if credentials.cohere_key is None:
423
- raise ValueError("Missing Cohere API key")
424
- fetched_model_options = cohere_get_model_list(url=model_endpoint, api_key=credentials.cohere_key)
425
- model_options = [obj for obj in fetched_model_options]
426
-
427
- elif model_endpoint_type == "groq":
428
- if credentials.groq_key is None:
429
- raise ValueError("Missing Groq API key")
430
- fetched_model_options_response = openai_get_model_list(url=model_endpoint, api_key=credentials.groq_key, fix_url=True)
431
- model_options = [obj["id"] for obj in fetched_model_options_response["data"]]
432
-
433
- else:
434
- # Attempt to do OpenAI endpoint style model fetching
435
- # TODO support local auth with api-key header
436
- if credentials.openllm_auth_type == "bearer_token":
437
- api_key = credentials.openllm_key
438
- else:
439
- api_key = None
440
- fetched_model_options_response = openai_get_model_list(url=model_endpoint, api_key=api_key, fix_url=True)
441
- model_options = [obj["id"] for obj in fetched_model_options_response["data"]]
442
- # NOTE no filtering of local model options
443
-
444
- # list
445
- return model_options
446
-
447
- except:
448
- raise Exception(f"Failed to get model list from {model_endpoint}")
449
-
450
-
451
- def configure_model(config: LettaConfig, credentials: LettaCredentials, model_endpoint_type: str, model_endpoint: str):
452
- # set: model, model_wrapper
453
- model, model_wrapper = None, None
454
- if model_endpoint_type == "openai" or model_endpoint_type == "azure":
455
- # Get the model list from the openai / azure endpoint
456
- hardcoded_model_options = ["gpt-4", "gpt-4-32k", "gpt-4-1106-preview", "gpt-3.5-turbo", "gpt-3.5-turbo-16k"]
457
- fetched_model_options = []
458
- try:
459
- fetched_model_options = get_model_options(
460
- credentials=credentials, model_endpoint_type=model_endpoint_type, model_endpoint=model_endpoint
461
- )
462
- except Exception as e:
463
- # NOTE: if this fails, it means the user's key is probably bad
464
- typer.secho(
465
- f"Failed to get model list from {model_endpoint} - make sure your API key and endpoints are correct!", fg=typer.colors.RED
466
- )
467
- raise e
468
-
469
- # First ask if the user wants to see the full model list (some may be incompatible)
470
- see_all_option_str = "[see all options]"
471
- other_option_str = "[enter model name manually]"
472
-
473
- # Check if the model we have set already is even in the list (informs our default)
474
- valid_model = config.default_llm_config and config.default_llm_config.model in hardcoded_model_options
475
- model = questionary.select(
476
- "Select default model (recommended: gpt-4):",
477
- choices=hardcoded_model_options + [see_all_option_str, other_option_str],
478
- default=config.default_llm_config.model if valid_model else hardcoded_model_options[0],
479
- ).ask()
480
- if model is None:
481
- raise KeyboardInterrupt
482
-
483
- # If the user asked for the full list, show it
484
- if model == see_all_option_str:
485
- typer.secho(f"Warning: not all models shown are guaranteed to work with Letta", fg=typer.colors.RED)
486
- model = questionary.select(
487
- "Select default model (recommended: gpt-4):",
488
- choices=fetched_model_options + [other_option_str],
489
- default=config.default_llm_config.model if (valid_model and config.default_llm_config) else fetched_model_options[0],
490
- ).ask()
491
- if model is None:
492
- raise KeyboardInterrupt
493
-
494
- # Finally if the user asked to manually input, allow it
495
- if model == other_option_str:
496
- model = ""
497
- while len(model) == 0:
498
- model = questionary.text(
499
- "Enter custom model name:",
500
- ).ask()
501
- if model is None:
502
- raise KeyboardInterrupt
503
-
504
- elif model_endpoint_type == "google_ai":
505
- try:
506
- fetched_model_options = get_model_options(
507
- credentials=credentials, model_endpoint_type=model_endpoint_type, model_endpoint=model_endpoint
508
- )
509
- except Exception as e:
510
- # NOTE: if this fails, it means the user's key is probably bad
511
- typer.secho(
512
- f"Failed to get model list from {model_endpoint} - make sure your API key and endpoints are correct!", fg=typer.colors.RED
513
- )
514
- raise e
515
-
516
- model = questionary.select(
517
- "Select default model:",
518
- choices=fetched_model_options,
519
- default=fetched_model_options[0],
520
- ).ask()
521
- if model is None:
522
- raise KeyboardInterrupt
523
-
524
- elif model_endpoint_type == "anthropic":
525
- try:
526
- fetched_model_options = get_model_options(
527
- credentials=credentials, model_endpoint_type=model_endpoint_type, model_endpoint=model_endpoint
528
- )
529
- except Exception as e:
530
- # NOTE: if this fails, it means the user's key is probably bad
531
- typer.secho(
532
- f"Failed to get model list from {model_endpoint} - make sure your API key and endpoints are correct!", fg=typer.colors.RED
533
- )
534
- raise e
535
-
536
- model = questionary.select(
537
- "Select default model:",
538
- choices=fetched_model_options,
539
- default=fetched_model_options[0],
540
- ).ask()
541
- if model is None:
542
- raise KeyboardInterrupt
543
-
544
- elif model_endpoint_type == "cohere":
545
-
546
- fetched_model_options = []
547
- try:
548
- fetched_model_options = get_model_options(
549
- credentials=credentials, model_endpoint_type=model_endpoint_type, model_endpoint=model_endpoint
550
- )
551
- except Exception as e:
552
- # NOTE: if this fails, it means the user's key is probably bad
553
- typer.secho(
554
- f"Failed to get model list from {model_endpoint} - make sure your API key and endpoints are correct!", fg=typer.colors.RED
555
- )
556
- raise e
557
-
558
- fetched_model_options = [m["name"] for m in fetched_model_options]
559
- hardcoded_model_options = [m for m in fetched_model_options if m in COHERE_VALID_MODEL_LIST]
560
-
561
- # First ask if the user wants to see the full model list (some may be incompatible)
562
- see_all_option_str = "[see all options]"
563
- other_option_str = "[enter model name manually]"
564
-
565
- # Check if the model we have set already is even in the list (informs our default)
566
- valid_model = config.default_llm_config.model in hardcoded_model_options
567
- model = questionary.select(
568
- "Select default model (recommended: command-r-plus):",
569
- choices=hardcoded_model_options + [see_all_option_str, other_option_str],
570
- default=config.default_llm_config.model if valid_model else hardcoded_model_options[0],
571
- ).ask()
572
- if model is None:
573
- raise KeyboardInterrupt
574
-
575
- # If the user asked for the full list, show it
576
- if model == see_all_option_str:
577
- typer.secho(f"Warning: not all models shown are guaranteed to work with Letta", fg=typer.colors.RED)
578
- model = questionary.select(
579
- "Select default model (recommended: command-r-plus):",
580
- choices=fetched_model_options + [other_option_str],
581
- default=config.default_llm_config.model if valid_model else fetched_model_options[0],
582
- ).ask()
583
- if model is None:
584
- raise KeyboardInterrupt
585
-
586
- # Finally if the user asked to manually input, allow it
587
- if model == other_option_str:
588
- model = ""
589
- while len(model) == 0:
590
- model = questionary.text(
591
- "Enter custom model name:",
592
- ).ask()
593
- if model is None:
594
- raise KeyboardInterrupt
595
-
596
- # Groq support via /chat/completions + function calling endpoints
597
- elif model_endpoint_type == "groq":
598
- try:
599
- fetched_model_options = get_model_options(
600
- credentials=credentials, model_endpoint_type=model_endpoint_type, model_endpoint=model_endpoint
601
- )
602
-
603
- except Exception as e:
604
- # NOTE: if this fails, it means the user's key is probably bad
605
- typer.secho(
606
- f"Failed to get model list from {model_endpoint} - make sure your API key and endpoints are correct!", fg=typer.colors.RED
607
- )
608
- raise e
609
-
610
- model = questionary.select(
611
- "Select default model:",
612
- choices=fetched_model_options,
613
- default=fetched_model_options[0],
614
- ).ask()
615
- if model is None:
616
- raise KeyboardInterrupt
617
-
618
- else: # local models
619
-
620
- # ask about local auth
621
- if model_endpoint_type in ["groq-chat-compltions"]: # TODO all llm engines under 'local' that will require api keys
622
- use_local_auth = True
623
- local_auth_type = "bearer_token"
624
- local_auth_key = questionary.password(
625
- "Enter your Groq API key:",
626
- ).ask()
627
- if local_auth_key is None:
628
- raise KeyboardInterrupt
629
- credentials.openllm_auth_type = local_auth_type
630
- credentials.openllm_key = local_auth_key
631
- credentials.save()
632
- else:
633
- use_local_auth = questionary.confirm(
634
- "Is your LLM endpoint authenticated? (default no)",
635
- default=False,
636
- ).ask()
637
- if use_local_auth is None:
638
- raise KeyboardInterrupt
639
- if use_local_auth:
640
- local_auth_type = questionary.select(
641
- "What HTTP authentication method does your endpoint require?",
642
- choices=SUPPORTED_AUTH_TYPES,
643
- default=SUPPORTED_AUTH_TYPES[0],
644
- ).ask()
645
- if local_auth_type is None:
646
- raise KeyboardInterrupt
647
- local_auth_key = questionary.password(
648
- "Enter your authentication key:",
649
- ).ask()
650
- if local_auth_key is None:
651
- raise KeyboardInterrupt
652
- # credentials = LettaCredentials.load()
653
- credentials.openllm_auth_type = local_auth_type
654
- credentials.openllm_key = local_auth_key
655
- credentials.save()
656
-
657
- # ollama also needs model type
658
- if model_endpoint_type == "ollama":
659
- default_model = (
660
- config.default_llm_config.model
661
- if config.default_llm_config and config.default_llm_config.model_endpoint_type == "ollama"
662
- else DEFAULT_OLLAMA_MODEL
663
- )
664
- model = questionary.text(
665
- "Enter default model name (required for Ollama, see: https://letta.readme.io/docs/ollama):",
666
- default=default_model,
667
- ).ask()
668
- if model is None:
669
- raise KeyboardInterrupt
670
- model = None if len(model) == 0 else model
671
-
672
- default_model = (
673
- config.default_llm_config.model if config.default_llm_config and config.default_llm_config.model_endpoint_type == "vllm" else ""
674
- )
675
-
676
- # vllm needs huggingface model tag
677
- if model_endpoint_type in ["vllm", "groq"]:
678
- try:
679
- # Don't filter model list for vLLM since model list is likely much smaller than OpenAI/Azure endpoint
680
- # + probably has custom model names
681
- # TODO support local auth
682
- model_options = get_model_options(
683
- credentials=credentials, model_endpoint_type=model_endpoint_type, model_endpoint=model_endpoint
684
- )
685
- except:
686
- print(f"Failed to get model list from {model_endpoint}, using defaults")
687
- model_options = None
688
-
689
- # If we got model options from vLLM endpoint, allow selection + custom input
690
- if model_options is not None:
691
- other_option_str = "other (enter name)"
692
- valid_model = config.default_llm_config.model in model_options
693
- model_options.append(other_option_str)
694
- model = questionary.select(
695
- "Select default model:",
696
- choices=model_options,
697
- default=config.default_llm_config.model if valid_model else model_options[0],
698
- ).ask()
699
- if model is None:
700
- raise KeyboardInterrupt
701
-
702
- # If we got custom input, ask for raw input
703
- if model == other_option_str:
704
- model = questionary.text(
705
- "Enter HuggingFace model tag (e.g. ehartford/dolphin-2.2.1-mistral-7b):",
706
- default=default_model,
707
- ).ask()
708
- if model is None:
709
- raise KeyboardInterrupt
710
- # TODO allow empty string for input?
711
- model = None if len(model) == 0 else model
712
-
713
- else:
714
- model = questionary.text(
715
- "Enter HuggingFace model tag (e.g. ehartford/dolphin-2.2.1-mistral-7b):",
716
- default=default_model,
717
- ).ask()
718
- if model is None:
719
- raise KeyboardInterrupt
720
- model = None if len(model) == 0 else model
721
-
722
- # model wrapper
723
- available_model_wrappers = builtins.list(get_available_wrappers().keys())
724
- model_wrapper = questionary.select(
725
- f"Select default model wrapper (recommended: {DEFAULT_WRAPPER_NAME}):",
726
- choices=available_model_wrappers,
727
- default=DEFAULT_WRAPPER_NAME,
728
- ).ask()
729
- if model_wrapper is None:
730
- raise KeyboardInterrupt
731
-
732
- # set: context_window
733
- if str(model) not in LLM_MAX_TOKENS:
734
-
735
- context_length_options = [
736
- str(2**12), # 4096
737
- str(2**13), # 8192
738
- str(2**14), # 16384
739
- str(2**15), # 32768
740
- str(2**18), # 262144
741
- "custom", # enter yourself
742
- ]
743
-
744
- if model_endpoint_type == "google_ai":
745
- try:
746
- fetched_context_window = str(
747
- google_ai_get_model_context_window(
748
- service_endpoint=credentials.google_ai_service_endpoint, api_key=credentials.google_ai_key, model=model
749
- )
750
- )
751
- print(f"Got context window {fetched_context_window} for model {model} (from Google API)")
752
- context_length_options = [
753
- fetched_context_window,
754
- "custom",
755
- ]
756
- except Exception as e:
757
- print(f"Failed to get model details for model '{model}' on Google AI API ({str(e)})")
758
-
759
- context_window_input = questionary.select(
760
- "Select your model's context window (see https://cloud.google.com/vertex-ai/generative-ai/docs/learn/model-versioning#gemini-model-versions):",
761
- choices=context_length_options,
762
- default=context_length_options[0],
763
- ).ask()
764
- if context_window_input is None:
765
- raise KeyboardInterrupt
766
-
767
- elif model_endpoint_type == "anthropic":
768
- try:
769
- fetched_context_window = str(
770
- antropic_get_model_context_window(url=model_endpoint, api_key=credentials.anthropic_key, model=model)
771
- )
772
- print(f"Got context window {fetched_context_window} for model {model}")
773
- context_length_options = [
774
- fetched_context_window,
775
- "custom",
776
- ]
777
- except Exception as e:
778
- print(f"Failed to get model details for model '{model}' ({str(e)})")
779
-
780
- context_window_input = questionary.select(
781
- "Select your model's context window (see https://docs.anthropic.com/claude/docs/models-overview):",
782
- choices=context_length_options,
783
- default=context_length_options[0],
784
- ).ask()
785
- if context_window_input is None:
786
- raise KeyboardInterrupt
787
-
788
- elif model_endpoint_type == "cohere":
789
- try:
790
- fetched_context_window = str(
791
- cohere_get_model_context_window(url=model_endpoint, api_key=credentials.cohere_key, model=model)
792
- )
793
- print(f"Got context window {fetched_context_window} for model {model}")
794
- context_length_options = [
795
- fetched_context_window,
796
- "custom",
797
- ]
798
- except Exception as e:
799
- print(f"Failed to get model details for model '{model}' ({str(e)})")
800
-
801
- context_window_input = questionary.select(
802
- "Select your model's context window (see https://docs.cohere.com/docs/command-r):",
803
- choices=context_length_options,
804
- default=context_length_options[0],
805
- ).ask()
806
- if context_window_input is None:
807
- raise KeyboardInterrupt
808
-
809
- else:
810
-
811
- # Ask the user to specify the context length
812
- context_window_input = questionary.select(
813
- "Select your model's context window (for Mistral 7B models, this is probably 8k / 8192):",
814
- choices=context_length_options,
815
- default=str(LLM_MAX_TOKENS["DEFAULT"]),
816
- ).ask()
817
- if context_window_input is None:
818
- raise KeyboardInterrupt
819
-
820
- # If custom, ask for input
821
- if context_window_input == "custom":
822
- while True:
823
- context_window_input = questionary.text("Enter context window (e.g. 8192)").ask()
824
- if context_window_input is None:
825
- raise KeyboardInterrupt
826
- try:
827
- context_window = int(context_window_input)
828
- break
829
- except ValueError:
830
- print(f"Context window must be a valid integer")
831
- else:
832
- context_window = int(context_window_input)
833
- else:
834
- # Pull the context length from the models
835
- context_window = int(LLM_MAX_TOKENS[str(model)])
836
- return model, model_wrapper, context_window
837
-
838
-
839
- def configure_embedding_endpoint(config: LettaConfig, credentials: LettaCredentials):
840
- # configure embedding endpoint
841
-
842
- default_embedding_endpoint_type = None
843
-
844
- embedding_endpoint_type, embedding_endpoint, embedding_dim, embedding_model = None, None, None, None
845
- embedding_provider = questionary.select(
846
- "Select embedding provider:", choices=["openai", "azure", "hugging-face", "local"], default=default_embedding_endpoint_type
847
- ).ask()
848
- if embedding_provider is None:
849
- raise KeyboardInterrupt
850
-
851
- if embedding_provider == "openai":
852
- # check for key
853
- if credentials.openai_key is None:
854
- # allow key to get pulled from env vars
855
- openai_api_key = os.getenv("OPENAI_API_KEY", None)
856
- if openai_api_key is None:
857
- # if we still can't find it, ask for it as input
858
- while openai_api_key is None or len(openai_api_key) == 0:
859
- # Ask for API key as input
860
- openai_api_key = questionary.password(
861
- "Enter your OpenAI API key (starts with 'sk-', see https://platform.openai.com/api-keys):"
862
- ).ask()
863
- if openai_api_key is None:
864
- raise KeyboardInterrupt
865
- credentials.openai_key = openai_api_key
866
- credentials.save()
867
-
868
- embedding_endpoint_type = "openai"
869
- embedding_endpoint = "https://api.openai.com/v1"
870
- embedding_dim = 1536
871
- embedding_model = "text-embedding-ada-002"
872
-
873
- elif embedding_provider == "azure":
874
- # check for necessary vars
875
- azure_creds = get_azure_credentials()
876
- if not all([azure_creds["azure_key"], azure_creds["azure_embedding_endpoint"], azure_creds["azure_embedding_version"]]):
877
- raise ValueError(
878
- "Missing environment variables for Azure (see https://letta.readme.io/docs/endpoints#azure-openai). Please set then run `letta configure` again."
879
- )
880
- credentials.azure_key = azure_creds["azure_key"]
881
- credentials.azure_version = azure_creds["azure_version"]
882
- credentials.azure_embedding_endpoint = azure_creds["azure_embedding_endpoint"]
883
- credentials.save()
884
-
885
- embedding_endpoint_type = "azure"
886
- embedding_endpoint = azure_creds["azure_embedding_endpoint"]
887
- embedding_dim = 1536
888
- embedding_model = "text-embedding-ada-002"
889
-
890
- elif embedding_provider == "hugging-face":
891
- # configure hugging face embedding endpoint (https://github.com/huggingface/text-embeddings-inference)
892
- # supports custom model/endpoints
893
- embedding_endpoint_type = "hugging-face"
894
- embedding_endpoint = None
895
-
896
- # get endpoint
897
- embedding_endpoint = questionary.text("Enter default endpoint:").ask()
898
- if embedding_endpoint is None:
899
- raise KeyboardInterrupt
900
- while not utils.is_valid_url(embedding_endpoint):
901
- typer.secho(f"Endpoint must be a valid address", fg=typer.colors.YELLOW)
902
- embedding_endpoint = questionary.text("Enter default endpoint:").ask()
903
- if embedding_endpoint is None:
904
- raise KeyboardInterrupt
905
-
906
- # get model type
907
- default_embedding_model = "BAAI/bge-large-en-v1.5"
908
- embedding_model = questionary.text(
909
- "Enter HuggingFace model tag (e.g. BAAI/bge-large-en-v1.5):",
910
- default=default_embedding_model,
911
- ).ask()
912
- if embedding_model is None:
913
- raise KeyboardInterrupt
914
-
915
- # get model dimentions
916
- default_embedding_dim = "1024"
917
- embedding_dim = questionary.text("Enter embedding model dimentions (e.g. 1024):", default=str(default_embedding_dim)).ask()
918
- if embedding_dim is None:
919
- raise KeyboardInterrupt
920
- try:
921
- embedding_dim = int(embedding_dim)
922
- except Exception:
923
- raise ValueError(f"Failed to cast {embedding_dim} to integer.")
924
- elif embedding_provider == "ollama":
925
- # configure ollama embedding endpoint
926
- embedding_endpoint_type = "ollama"
927
- embedding_endpoint = "http://localhost:11434/api/embeddings"
928
- # Source: https://github.com/ollama/ollama/blob/main/docs/api.md#generate-embeddings:~:text=http%3A//localhost%3A11434/api/embeddings
929
-
930
- # get endpoint (is this necessary?)
931
- embedding_endpoint = questionary.text("Enter Ollama API endpoint:").ask()
932
- if embedding_endpoint is None:
933
- raise KeyboardInterrupt
934
- while not utils.is_valid_url(embedding_endpoint):
935
- typer.secho(f"Endpoint must be a valid address", fg=typer.colors.YELLOW)
936
- embedding_endpoint = questionary.text("Enter Ollama API endpoint:").ask()
937
- if embedding_endpoint is None:
938
- raise KeyboardInterrupt
939
-
940
- # get model type
941
- default_embedding_model = "mxbai-embed-large"
942
- embedding_model = questionary.text(
943
- "Enter Ollama model tag (e.g. mxbai-embed-large):",
944
- default=default_embedding_model,
945
- ).ask()
946
- if embedding_model is None:
947
- raise KeyboardInterrupt
948
-
949
- # get model dimensions
950
- default_embedding_dim = "512"
951
- embedding_dim = questionary.text("Enter embedding model dimensions (e.g. 512):", default=str(default_embedding_dim)).ask()
952
- if embedding_dim is None:
953
- raise KeyboardInterrupt
954
- try:
955
- embedding_dim = int(embedding_dim)
956
- except Exception:
957
- raise ValueError(f"Failed to cast {embedding_dim} to integer.")
958
- else: # local models
959
- embedding_endpoint_type = "local"
960
- embedding_endpoint = None
961
- embedding_model = "BAAI/bge-small-en-v1.5"
962
- embedding_dim = 384
963
-
964
- return embedding_endpoint_type, embedding_endpoint, embedding_dim, embedding_model
965
-
966
-
967
- def configure_archival_storage(config: LettaConfig, credentials: LettaCredentials):
968
- # Configure archival storage backend
969
- archival_storage_options = ["postgres", "chroma", "milvus", "qdrant"]
970
- archival_storage_type = questionary.select(
971
- "Select storage backend for archival data:", archival_storage_options, default=config.archival_storage_type
972
- ).ask()
973
- if archival_storage_type is None:
974
- raise KeyboardInterrupt
975
- archival_storage_uri, archival_storage_path = config.archival_storage_uri, config.archival_storage_path
976
-
977
- # configure postgres
978
- if archival_storage_type == "postgres":
979
- archival_storage_uri = questionary.text(
980
- "Enter postgres connection string (e.g. postgresql+pg8000://{user}:{password}@{ip}:5432/{database}):",
981
- default=config.archival_storage_uri if config.archival_storage_uri else "",
982
- ).ask()
983
- if archival_storage_uri is None:
984
- raise KeyboardInterrupt
985
-
986
- # TODO: add back
987
- ## configure lancedb
988
- # if archival_storage_type == "lancedb":
989
- # archival_storage_uri = questionary.text(
990
- # "Enter lanncedb connection string (e.g. ./.lancedb",
991
- # default=config.archival_storage_uri if config.archival_storage_uri else "./.lancedb",
992
- # ).ask()
993
-
994
- # configure chroma
995
- if archival_storage_type == "chroma":
996
- chroma_type = questionary.select("Select chroma backend:", ["http", "persistent"], default="persistent").ask()
997
- if chroma_type is None:
998
- raise KeyboardInterrupt
999
- if chroma_type == "http":
1000
- archival_storage_uri = questionary.text("Enter chroma ip (e.g. localhost:8000):", default="localhost:8000").ask()
1001
- if archival_storage_uri is None:
1002
- raise KeyboardInterrupt
1003
- if chroma_type == "persistent":
1004
- archival_storage_path = os.path.join(LETTA_DIR, "chroma")
1005
-
1006
- if archival_storage_type == "qdrant":
1007
- qdrant_type = questionary.select("Select Qdrant backend:", ["local", "server"], default="local").ask()
1008
- if qdrant_type is None:
1009
- raise KeyboardInterrupt
1010
- if qdrant_type == "server":
1011
- archival_storage_uri = questionary.text(
1012
- "Enter the Qdrant instance URI (Default: localhost:6333):", default="localhost:6333"
1013
- ).ask()
1014
- if archival_storage_uri is None:
1015
- raise KeyboardInterrupt
1016
- if qdrant_type == "local":
1017
- archival_storage_path = os.path.join(LETTA_DIR, "qdrant")
1018
-
1019
- if archival_storage_type == "milvus":
1020
- default_milvus_uri = archival_storage_path = os.path.join(LETTA_DIR, "milvus.db")
1021
- archival_storage_uri = questionary.text(
1022
- f"Enter the Milvus connection URI (Default: {default_milvus_uri}):", default=default_milvus_uri
1023
- ).ask()
1024
- if archival_storage_uri is None:
1025
- raise KeyboardInterrupt
1026
- return archival_storage_type, archival_storage_uri, archival_storage_path
1027
-
1028
- # TODO: allow configuring embedding model
1029
-
1030
-
1031
- def configure_recall_storage(config: LettaConfig, credentials: LettaCredentials):
1032
- # Configure recall storage backend
1033
- recall_storage_options = ["sqlite", "postgres"]
1034
- recall_storage_type = questionary.select(
1035
- "Select storage backend for recall data:", recall_storage_options, default=config.recall_storage_type
1036
- ).ask()
1037
- if recall_storage_type is None:
1038
- raise KeyboardInterrupt
1039
- recall_storage_uri, recall_storage_path = config.recall_storage_uri, config.recall_storage_path
1040
- # configure postgres
1041
- if recall_storage_type == "postgres":
1042
- recall_storage_uri = questionary.text(
1043
- "Enter postgres connection string (e.g. postgresql+pg8000://{user}:{password}@{ip}:5432/{database}):",
1044
- default=config.recall_storage_uri if config.recall_storage_uri else "",
1045
- ).ask()
1046
- if recall_storage_uri is None:
1047
- raise KeyboardInterrupt
1048
-
1049
- return recall_storage_type, recall_storage_uri, recall_storage_path
1050
-
1051
-
1052
16
  @app.command()
1053
17
  def configure():
1054
18
  """Updates default Letta configurations
1055
19
 
1056
20
  This function and quickstart should be the ONLY place where LettaConfig.save() is called
1057
21
  """
1058
-
1059
- # check credentials
1060
- credentials = LettaCredentials.load()
1061
- openai_key = get_openai_credentials()
1062
-
1063
- LettaConfig.create_config_dir()
1064
-
1065
- # Will pre-populate with defaults, or what the user previously set
1066
- config = LettaConfig.load()
1067
- try:
1068
- model_endpoint_type, model_endpoint = configure_llm_endpoint(
1069
- config=config,
1070
- credentials=credentials,
1071
- )
1072
- model, model_wrapper, context_window = configure_model(
1073
- config=config,
1074
- credentials=credentials,
1075
- model_endpoint_type=str(model_endpoint_type),
1076
- model_endpoint=str(model_endpoint),
1077
- )
1078
- embedding_endpoint_type, embedding_endpoint, embedding_dim, embedding_model = configure_embedding_endpoint(
1079
- config=config,
1080
- credentials=credentials,
1081
- )
1082
- archival_storage_type, archival_storage_uri, archival_storage_path = configure_archival_storage(
1083
- config=config,
1084
- credentials=credentials,
1085
- )
1086
- recall_storage_type, recall_storage_uri, recall_storage_path = configure_recall_storage(
1087
- config=config,
1088
- credentials=credentials,
1089
- )
1090
- except ValueError as e:
1091
- typer.secho(str(e), fg=typer.colors.RED)
1092
- return
1093
-
1094
- # openai key might have gotten added along the way
1095
- openai_key = credentials.openai_key if credentials.openai_key is not None else openai_key
1096
-
1097
- # TODO: remove most of this (deplicated with User table)
1098
- config = LettaConfig(
1099
- # storage
1100
- archival_storage_type=archival_storage_type,
1101
- archival_storage_uri=archival_storage_uri,
1102
- archival_storage_path=archival_storage_path,
1103
- # recall storage
1104
- recall_storage_type=recall_storage_type,
1105
- recall_storage_uri=recall_storage_uri,
1106
- recall_storage_path=recall_storage_path,
1107
- # metadata storage (currently forced to match recall storage)
1108
- metadata_storage_type=recall_storage_type,
1109
- metadata_storage_uri=recall_storage_uri,
1110
- metadata_storage_path=recall_storage_path,
1111
- )
1112
-
1113
- typer.secho(f"📖 Saving config to {config.config_path}", fg=typer.colors.GREEN)
1114
- config.save()
1115
-
1116
- from letta import create_client
1117
-
1118
- client = create_client()
1119
- print("User ID:", client.user_id)
22
+ print("`letta configure` has been deprecated. Please see documentation on configuration, and run `letta run` instead.")
1120
23
 
1121
24
 
1122
25
  class ListChoice(str, Enum):