letta-client 1.0.0a1__py3-none-any.whl → 1.0.0a3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of letta-client might be problematic. Click here for more details.
- letta_client/__init__.py +149 -23
- letta_client/agents/__init__.py +15 -17
- letta_client/agents/blocks/__init__.py +3 -0
- letta_client/agents/blocks/client.py +77 -4
- letta_client/agents/blocks/raw_client.py +63 -2
- letta_client/agents/blocks/types/__init__.py +7 -0
- letta_client/agents/blocks/types/blocks_list_request_order.py +5 -0
- letta_client/agents/client.py +46 -13
- letta_client/agents/files/__init__.py +3 -0
- letta_client/agents/files/client.py +71 -10
- letta_client/agents/files/raw_client.py +51 -10
- letta_client/agents/files/types/__init__.py +7 -0
- letta_client/{types/tool_return_status.py → agents/files/types/files_list_request_order.py} +1 -1
- letta_client/agents/folders/__init__.py +3 -0
- letta_client/agents/folders/client.py +77 -4
- letta_client/agents/folders/raw_client.py +63 -2
- letta_client/agents/folders/types/__init__.py +7 -0
- letta_client/agents/folders/types/folders_list_request_order.py +5 -0
- letta_client/agents/groups/__init__.py +3 -0
- letta_client/agents/groups/client.py +71 -2
- letta_client/agents/groups/raw_client.py +51 -0
- letta_client/agents/groups/types/__init__.py +7 -0
- letta_client/agents/groups/types/groups_list_request_order.py +5 -0
- letta_client/agents/messages/__init__.py +2 -0
- letta_client/agents/messages/client.py +55 -14
- letta_client/agents/messages/raw_client.py +35 -14
- letta_client/agents/messages/types/__init__.py +2 -0
- letta_client/agents/messages/types/messages_list_request_order.py +5 -0
- letta_client/agents/passages/client.py +29 -0
- letta_client/agents/raw_client.py +4 -4
- letta_client/agents/sources/__init__.py +3 -0
- letta_client/agents/sources/client.py +77 -4
- letta_client/agents/sources/raw_client.py +63 -2
- letta_client/agents/sources/types/__init__.py +7 -0
- letta_client/agents/sources/types/sources_list_request_order.py +5 -0
- letta_client/agents/tools/__init__.py +3 -0
- letta_client/agents/tools/client.py +77 -4
- letta_client/agents/tools/raw_client.py +63 -2
- letta_client/agents/tools/types/__init__.py +7 -0
- letta_client/agents/tools/types/tools_list_request_order.py +5 -0
- letta_client/archives/client.py +16 -2
- letta_client/base_client.py +3 -0
- letta_client/batches/client.py +12 -2
- letta_client/batches/messages/client.py +10 -0
- letta_client/blocks/agents/client.py +8 -0
- letta_client/blocks/client.py +32 -2
- letta_client/chat/__init__.py +7 -0
- letta_client/chat/client.py +255 -0
- letta_client/chat/raw_client.py +269 -0
- letta_client/chat/types/__init__.py +8 -0
- letta_client/chat/types/chat_completion_request_messages_item.py +19 -0
- letta_client/chat/types/chat_completion_request_stop.py +5 -0
- letta_client/client_side_access_tokens/client.py +10 -2
- letta_client/core/client_wrapper.py +2 -2
- letta_client/errors/__init__.py +2 -0
- letta_client/errors/gone_error.py +10 -0
- letta_client/folders/agents/client.py +8 -0
- letta_client/folders/client.py +20 -4
- letta_client/folders/files/client.py +14 -0
- letta_client/folders/passages/client.py +8 -0
- letta_client/groups/client.py +16 -2
- letta_client/groups/messages/client.py +14 -0
- letta_client/identities/agents/client.py +8 -0
- letta_client/identities/blocks/client.py +8 -0
- letta_client/identities/client.py +20 -2
- letta_client/jobs/__init__.py +3 -0
- letta_client/jobs/client.py +61 -12
- letta_client/jobs/raw_client.py +29 -8
- letta_client/jobs/types/__init__.py +7 -0
- letta_client/jobs/types/jobs_list_request_order.py +5 -0
- letta_client/models/client.py +8 -2
- letta_client/projects/client.py +10 -2
- letta_client/providers/client.py +90 -2
- letta_client/providers/raw_client.py +102 -0
- letta_client/runs/__init__.py +11 -2
- letta_client/runs/client.py +150 -18
- letta_client/runs/messages/client.py +30 -2
- letta_client/runs/messages/raw_client.py +10 -0
- letta_client/runs/raw_client.py +144 -14
- letta_client/runs/steps/__init__.py +3 -0
- letta_client/runs/steps/client.py +39 -30
- letta_client/runs/steps/raw_client.py +19 -28
- letta_client/runs/steps/types/__init__.py +7 -0
- letta_client/runs/steps/types/steps_list_request_order.py +5 -0
- letta_client/runs/types/__init__.py +2 -1
- letta_client/runs/types/runs_list_request_order.py +5 -0
- letta_client/sources/client.py +8 -2
- letta_client/sources/files/client.py +12 -0
- letta_client/sources/passages/client.py +6 -0
- letta_client/steps/client.py +26 -2
- letta_client/steps/messages/client.py +8 -0
- letta_client/tags/client.py +16 -2
- letta_client/templates/__init__.py +12 -0
- letta_client/templates/client.py +30 -4
- letta_client/templates/raw_client.py +2 -2
- letta_client/templates/types/__init__.py +24 -0
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_llm_config.py +4 -0
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_llm_config_display_name.py +14 -0
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_llm_config_display_name_item.py +5 -0
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_secrets_item.py +4 -0
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_secrets_item_value_enc.py +14 -0
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_secrets_item_value_enc_item.py +5 -0
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_tool_exec_environment_variables_item.py +6 -0
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_tool_exec_environment_variables_item_value_enc.py +16 -0
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_tool_exec_environment_variables_item_value_enc_item.py +7 -0
- letta_client/templates/types/templates_create_agents_from_template_response_agents_item_tools_item_tool_type.py +1 -1
- letta_client/tools/client.py +30 -220
- letta_client/tools/raw_client.py +0 -292
- letta_client/types/__init__.py +130 -22
- letta_client/types/agent_environment_variable.py +5 -0
- letta_client/types/{action_parameters_model.py → annotation.py} +4 -10
- letta_client/types/{action_response_model.py → annotation_url_citation.py} +4 -9
- letta_client/types/approval_create.py +8 -2
- letta_client/types/approval_create_approvals_item.py +8 -0
- letta_client/types/approval_response_message.py +8 -2
- letta_client/types/approval_response_message_approvals_item.py +8 -0
- letta_client/types/approval_return.py +34 -0
- letta_client/{agents/templates/types/templates_migrate_response.py → types/audio.py} +4 -4
- letta_client/types/chat_completion.py +30 -0
- letta_client/types/chat_completion_assistant_message_param.py +30 -0
- letta_client/types/chat_completion_assistant_message_param_content.py +9 -0
- letta_client/types/chat_completion_assistant_message_param_content_item.py +10 -0
- letta_client/types/chat_completion_assistant_message_param_tool_calls_item.py +10 -0
- letta_client/types/chat_completion_audio.py +23 -0
- letta_client/types/chat_completion_content_part_image_param.py +22 -0
- letta_client/types/chat_completion_content_part_input_audio_param.py +22 -0
- letta_client/types/chat_completion_content_part_refusal_param.py +21 -0
- letta_client/types/chat_completion_content_part_text_param.py +21 -0
- letta_client/types/chat_completion_developer_message_param.py +23 -0
- letta_client/types/chat_completion_developer_message_param_content.py +7 -0
- letta_client/types/chat_completion_function_message_param.py +22 -0
- letta_client/types/chat_completion_message.py +30 -0
- letta_client/types/chat_completion_message_custom_tool_call.py +23 -0
- letta_client/types/chat_completion_message_custom_tool_call_param.py +23 -0
- letta_client/types/chat_completion_message_function_tool_call_input.py +25 -0
- letta_client/types/{chat_completion_message_function_tool_call.py → chat_completion_message_function_tool_call_output.py} +3 -3
- letta_client/types/chat_completion_message_function_tool_call_param.py +25 -0
- letta_client/types/chat_completion_message_tool_calls_item.py +10 -0
- letta_client/types/chat_completion_service_tier.py +5 -0
- letta_client/types/chat_completion_system_message_param.py +23 -0
- letta_client/types/chat_completion_system_message_param_content.py +7 -0
- letta_client/types/chat_completion_token_logprob.py +24 -0
- letta_client/types/chat_completion_tool_message_param.py +23 -0
- letta_client/types/chat_completion_tool_message_param_content.py +7 -0
- letta_client/types/chat_completion_user_message_param.py +23 -0
- letta_client/types/chat_completion_user_message_param_content.py +7 -0
- letta_client/types/chat_completion_user_message_param_content_item.py +15 -0
- letta_client/types/choice.py +26 -0
- letta_client/types/choice_finish_reason.py +7 -0
- letta_client/types/choice_logprobs.py +22 -0
- letta_client/types/completion_tokens_details.py +23 -0
- letta_client/types/{auth_scheme_field.py → completion_usage.py} +8 -13
- letta_client/types/custom_input.py +21 -0
- letta_client/types/custom_output.py +21 -0
- letta_client/types/file.py +22 -0
- letta_client/types/file_file.py +22 -0
- letta_client/types/function_call_input.py +21 -0
- letta_client/types/function_call_output.py +21 -0
- letta_client/types/{function.py → function_output.py} +1 -1
- letta_client/types/image_url.py +22 -0
- letta_client/types/image_url_detail.py +5 -0
- letta_client/types/input_audio.py +22 -0
- letta_client/types/input_audio_format.py +5 -0
- letta_client/types/internal_template_agent_create.py +2 -2
- letta_client/types/letta_schemas_agent_file_agent_schema.py +2 -2
- letta_client/types/letta_schemas_agent_file_message_schema.py +27 -4
- letta_client/types/letta_schemas_agent_file_message_schema_approvals_item.py +8 -0
- letta_client/types/letta_schemas_letta_message_tool_return.py +26 -0
- letta_client/types/letta_schemas_letta_message_tool_return_status.py +5 -0
- letta_client/types/{tool_return.py → letta_schemas_message_tool_return.py} +9 -3
- letta_client/types/letta_schemas_message_tool_return_status.py +5 -0
- letta_client/types/llm_config.py +5 -0
- letta_client/types/message.py +10 -4
- letta_client/types/message_approvals_item.py +8 -0
- letta_client/types/omitted_reasoning_content.py +4 -0
- letta_client/types/openai_types_chat_chat_completion_message_function_tool_call_function.py +21 -0
- letta_client/types/openai_types_chat_chat_completion_message_function_tool_call_param_function.py +21 -0
- letta_client/types/prompt_tokens_details.py +21 -0
- letta_client/types/provider.py +10 -0
- letta_client/types/run_metrics.py +58 -0
- letta_client/types/sandbox_environment_variable.py +5 -0
- letta_client/types/text_content.py +5 -0
- letta_client/types/tool_call_content.py +5 -0
- letta_client/types/tool_call_message.py +2 -0
- letta_client/types/tool_call_message_tool_calls.py +8 -0
- letta_client/types/tool_return_message.py +8 -5
- letta_client/types/tool_type.py +1 -1
- letta_client/types/top_logprob.py +22 -0
- letta_client/voice/client.py +14 -0
- letta_client/voice/raw_client.py +37 -0
- letta_client-1.0.0a3.dist-info/METADATA +422 -0
- {letta_client-1.0.0a1.dist-info → letta_client-1.0.0a3.dist-info}/RECORD +193 -115
- letta_client/agents/templates/__init__.py +0 -7
- letta_client/agents/templates/client.py +0 -307
- letta_client/agents/templates/raw_client.py +0 -275
- letta_client/agents/templates/types/__init__.py +0 -7
- letta_client/types/action_model.py +0 -39
- letta_client/types/app_auth_scheme.py +0 -35
- letta_client/types/app_auth_scheme_auth_mode.py +0 -19
- letta_client/types/app_model.py +0 -45
- letta_client-1.0.0a1.dist-info/METADATA +0 -211
- {letta_client-1.0.0a1.dist-info → letta_client-1.0.0a3.dist-info}/WHEEL +0 -0
|
@@ -13,6 +13,7 @@ from ...errors.unprocessable_entity_error import UnprocessableEntityError
|
|
|
13
13
|
from ...types.agent_state import AgentState
|
|
14
14
|
from ...types.http_validation_error import HttpValidationError
|
|
15
15
|
from ...types.tool import Tool
|
|
16
|
+
from .types.tools_list_request_order import ToolsListRequestOrder
|
|
16
17
|
|
|
17
18
|
|
|
18
19
|
class RawToolsClient:
|
|
@@ -20,7 +21,15 @@ class RawToolsClient:
|
|
|
20
21
|
self._client_wrapper = client_wrapper
|
|
21
22
|
|
|
22
23
|
def list(
|
|
23
|
-
self,
|
|
24
|
+
self,
|
|
25
|
+
agent_id: str,
|
|
26
|
+
*,
|
|
27
|
+
before: typing.Optional[str] = None,
|
|
28
|
+
after: typing.Optional[str] = None,
|
|
29
|
+
limit: typing.Optional[int] = None,
|
|
30
|
+
order: typing.Optional[ToolsListRequestOrder] = None,
|
|
31
|
+
order_by: typing.Optional[typing.Literal["created_at"]] = None,
|
|
32
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
24
33
|
) -> HttpResponse[typing.List[Tool]]:
|
|
25
34
|
"""
|
|
26
35
|
Get tools from an existing agent
|
|
@@ -29,6 +38,21 @@ class RawToolsClient:
|
|
|
29
38
|
----------
|
|
30
39
|
agent_id : str
|
|
31
40
|
|
|
41
|
+
before : typing.Optional[str]
|
|
42
|
+
Tool ID cursor for pagination. Returns tools that come before this tool ID in the specified sort order
|
|
43
|
+
|
|
44
|
+
after : typing.Optional[str]
|
|
45
|
+
Tool ID cursor for pagination. Returns tools that come after this tool ID in the specified sort order
|
|
46
|
+
|
|
47
|
+
limit : typing.Optional[int]
|
|
48
|
+
Maximum number of tools to return
|
|
49
|
+
|
|
50
|
+
order : typing.Optional[ToolsListRequestOrder]
|
|
51
|
+
Sort order for tools by creation time. 'asc' for oldest first, 'desc' for newest first
|
|
52
|
+
|
|
53
|
+
order_by : typing.Optional[typing.Literal["created_at"]]
|
|
54
|
+
Field to sort by
|
|
55
|
+
|
|
32
56
|
request_options : typing.Optional[RequestOptions]
|
|
33
57
|
Request-specific configuration.
|
|
34
58
|
|
|
@@ -40,6 +64,13 @@ class RawToolsClient:
|
|
|
40
64
|
_response = self._client_wrapper.httpx_client.request(
|
|
41
65
|
f"v1/agents/{jsonable_encoder(agent_id)}/tools",
|
|
42
66
|
method="GET",
|
|
67
|
+
params={
|
|
68
|
+
"before": before,
|
|
69
|
+
"after": after,
|
|
70
|
+
"limit": limit,
|
|
71
|
+
"order": order,
|
|
72
|
+
"order_by": order_by,
|
|
73
|
+
},
|
|
43
74
|
request_options=request_options,
|
|
44
75
|
)
|
|
45
76
|
try:
|
|
@@ -237,7 +268,15 @@ class AsyncRawToolsClient:
|
|
|
237
268
|
self._client_wrapper = client_wrapper
|
|
238
269
|
|
|
239
270
|
async def list(
|
|
240
|
-
self,
|
|
271
|
+
self,
|
|
272
|
+
agent_id: str,
|
|
273
|
+
*,
|
|
274
|
+
before: typing.Optional[str] = None,
|
|
275
|
+
after: typing.Optional[str] = None,
|
|
276
|
+
limit: typing.Optional[int] = None,
|
|
277
|
+
order: typing.Optional[ToolsListRequestOrder] = None,
|
|
278
|
+
order_by: typing.Optional[typing.Literal["created_at"]] = None,
|
|
279
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
241
280
|
) -> AsyncHttpResponse[typing.List[Tool]]:
|
|
242
281
|
"""
|
|
243
282
|
Get tools from an existing agent
|
|
@@ -246,6 +285,21 @@ class AsyncRawToolsClient:
|
|
|
246
285
|
----------
|
|
247
286
|
agent_id : str
|
|
248
287
|
|
|
288
|
+
before : typing.Optional[str]
|
|
289
|
+
Tool ID cursor for pagination. Returns tools that come before this tool ID in the specified sort order
|
|
290
|
+
|
|
291
|
+
after : typing.Optional[str]
|
|
292
|
+
Tool ID cursor for pagination. Returns tools that come after this tool ID in the specified sort order
|
|
293
|
+
|
|
294
|
+
limit : typing.Optional[int]
|
|
295
|
+
Maximum number of tools to return
|
|
296
|
+
|
|
297
|
+
order : typing.Optional[ToolsListRequestOrder]
|
|
298
|
+
Sort order for tools by creation time. 'asc' for oldest first, 'desc' for newest first
|
|
299
|
+
|
|
300
|
+
order_by : typing.Optional[typing.Literal["created_at"]]
|
|
301
|
+
Field to sort by
|
|
302
|
+
|
|
249
303
|
request_options : typing.Optional[RequestOptions]
|
|
250
304
|
Request-specific configuration.
|
|
251
305
|
|
|
@@ -257,6 +311,13 @@ class AsyncRawToolsClient:
|
|
|
257
311
|
_response = await self._client_wrapper.httpx_client.request(
|
|
258
312
|
f"v1/agents/{jsonable_encoder(agent_id)}/tools",
|
|
259
313
|
method="GET",
|
|
314
|
+
params={
|
|
315
|
+
"before": before,
|
|
316
|
+
"after": after,
|
|
317
|
+
"limit": limit,
|
|
318
|
+
"order": order,
|
|
319
|
+
"order_by": order_by,
|
|
320
|
+
},
|
|
260
321
|
request_options=request_options,
|
|
261
322
|
)
|
|
262
323
|
try:
|
letta_client/archives/client.py
CHANGED
|
@@ -77,7 +77,14 @@ class ArchivesClient:
|
|
|
77
77
|
project="YOUR_PROJECT",
|
|
78
78
|
token="YOUR_TOKEN",
|
|
79
79
|
)
|
|
80
|
-
client.archives.list_archives(
|
|
80
|
+
client.archives.list_archives(
|
|
81
|
+
before="before",
|
|
82
|
+
after="after",
|
|
83
|
+
limit=1,
|
|
84
|
+
order="asc",
|
|
85
|
+
name="name",
|
|
86
|
+
agent_id="agent_id",
|
|
87
|
+
)
|
|
81
88
|
"""
|
|
82
89
|
_response = self._raw_client.list_archives(
|
|
83
90
|
before=before,
|
|
@@ -244,7 +251,14 @@ class AsyncArchivesClient:
|
|
|
244
251
|
|
|
245
252
|
|
|
246
253
|
async def main() -> None:
|
|
247
|
-
await client.archives.list_archives(
|
|
254
|
+
await client.archives.list_archives(
|
|
255
|
+
before="before",
|
|
256
|
+
after="after",
|
|
257
|
+
limit=1,
|
|
258
|
+
order="asc",
|
|
259
|
+
name="name",
|
|
260
|
+
agent_id="agent_id",
|
|
261
|
+
)
|
|
248
262
|
|
|
249
263
|
|
|
250
264
|
asyncio.run(main())
|
letta_client/base_client.py
CHANGED
|
@@ -7,6 +7,7 @@ from .agents.client import AgentsClient, AsyncAgentsClient
|
|
|
7
7
|
from .archives.client import ArchivesClient, AsyncArchivesClient
|
|
8
8
|
from .batches.client import AsyncBatchesClient, BatchesClient
|
|
9
9
|
from .blocks.client import AsyncBlocksClient, BlocksClient
|
|
10
|
+
from .chat.client import AsyncChatClient, ChatClient
|
|
10
11
|
from .client_side_access_tokens.client import AsyncClientSideAccessTokensClient, ClientSideAccessTokensClient
|
|
11
12
|
from .core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
|
|
12
13
|
from .environment import LettaEnvironment
|
|
@@ -102,6 +103,7 @@ class LettaBase:
|
|
|
102
103
|
self.sources = SourcesClient(client_wrapper=self._client_wrapper)
|
|
103
104
|
self.folders = FoldersClient(client_wrapper=self._client_wrapper)
|
|
104
105
|
self.agents = AgentsClient(client_wrapper=self._client_wrapper)
|
|
106
|
+
self.chat = ChatClient(client_wrapper=self._client_wrapper)
|
|
105
107
|
self.groups = GroupsClient(client_wrapper=self._client_wrapper)
|
|
106
108
|
self.identities = IdentitiesClient(client_wrapper=self._client_wrapper)
|
|
107
109
|
self.models = ModelsClient(client_wrapper=self._client_wrapper)
|
|
@@ -194,6 +196,7 @@ class AsyncLettaBase:
|
|
|
194
196
|
self.sources = AsyncSourcesClient(client_wrapper=self._client_wrapper)
|
|
195
197
|
self.folders = AsyncFoldersClient(client_wrapper=self._client_wrapper)
|
|
196
198
|
self.agents = AsyncAgentsClient(client_wrapper=self._client_wrapper)
|
|
199
|
+
self.chat = AsyncChatClient(client_wrapper=self._client_wrapper)
|
|
197
200
|
self.groups = AsyncGroupsClient(client_wrapper=self._client_wrapper)
|
|
198
201
|
self.identities = AsyncIdentitiesClient(client_wrapper=self._client_wrapper)
|
|
199
202
|
self.models = AsyncModelsClient(client_wrapper=self._client_wrapper)
|
letta_client/batches/client.py
CHANGED
|
@@ -76,7 +76,12 @@ class BatchesClient:
|
|
|
76
76
|
project="YOUR_PROJECT",
|
|
77
77
|
token="YOUR_TOKEN",
|
|
78
78
|
)
|
|
79
|
-
client.batches.list(
|
|
79
|
+
client.batches.list(
|
|
80
|
+
before="before",
|
|
81
|
+
after="after",
|
|
82
|
+
limit=1,
|
|
83
|
+
order="asc",
|
|
84
|
+
)
|
|
80
85
|
"""
|
|
81
86
|
_response = self._raw_client.list(
|
|
82
87
|
before=before, after=after, limit=limit, order=order, order_by=order_by, request_options=request_options
|
|
@@ -275,7 +280,12 @@ class AsyncBatchesClient:
|
|
|
275
280
|
|
|
276
281
|
|
|
277
282
|
async def main() -> None:
|
|
278
|
-
await client.batches.list(
|
|
283
|
+
await client.batches.list(
|
|
284
|
+
before="before",
|
|
285
|
+
after="after",
|
|
286
|
+
limit=1,
|
|
287
|
+
order="asc",
|
|
288
|
+
)
|
|
279
289
|
|
|
280
290
|
|
|
281
291
|
asyncio.run(main())
|
|
@@ -79,6 +79,11 @@ class MessagesClient:
|
|
|
79
79
|
)
|
|
80
80
|
client.batches.messages.list(
|
|
81
81
|
batch_id="batch_id",
|
|
82
|
+
before="before",
|
|
83
|
+
after="after",
|
|
84
|
+
limit=1,
|
|
85
|
+
order="asc",
|
|
86
|
+
agent_id="agent_id",
|
|
82
87
|
)
|
|
83
88
|
"""
|
|
84
89
|
_response = self._raw_client.list(
|
|
@@ -169,6 +174,11 @@ class AsyncMessagesClient:
|
|
|
169
174
|
async def main() -> None:
|
|
170
175
|
await client.batches.messages.list(
|
|
171
176
|
batch_id="batch_id",
|
|
177
|
+
before="before",
|
|
178
|
+
after="after",
|
|
179
|
+
limit=1,
|
|
180
|
+
order="asc",
|
|
181
|
+
agent_id="agent_id",
|
|
172
182
|
)
|
|
173
183
|
|
|
174
184
|
|
|
@@ -80,6 +80,10 @@ class AgentsClient:
|
|
|
80
80
|
)
|
|
81
81
|
client.blocks.agents.list(
|
|
82
82
|
block_id="block_id",
|
|
83
|
+
before="before",
|
|
84
|
+
after="after",
|
|
85
|
+
limit=1,
|
|
86
|
+
order="asc",
|
|
83
87
|
)
|
|
84
88
|
"""
|
|
85
89
|
_response = self._raw_client.list(
|
|
@@ -171,6 +175,10 @@ class AsyncAgentsClient:
|
|
|
171
175
|
async def main() -> None:
|
|
172
176
|
await client.blocks.agents.list(
|
|
173
177
|
block_id="block_id",
|
|
178
|
+
before="before",
|
|
179
|
+
after="after",
|
|
180
|
+
limit=1,
|
|
181
|
+
order="asc",
|
|
174
182
|
)
|
|
175
183
|
|
|
176
184
|
|
letta_client/blocks/client.py
CHANGED
|
@@ -121,7 +121,22 @@ class BlocksClient:
|
|
|
121
121
|
project="YOUR_PROJECT",
|
|
122
122
|
token="YOUR_TOKEN",
|
|
123
123
|
)
|
|
124
|
-
client.blocks.list(
|
|
124
|
+
client.blocks.list(
|
|
125
|
+
label="label",
|
|
126
|
+
templates_only=True,
|
|
127
|
+
name="name",
|
|
128
|
+
identity_id="identity_id",
|
|
129
|
+
project_id="project_id",
|
|
130
|
+
limit=1,
|
|
131
|
+
before="before",
|
|
132
|
+
after="after",
|
|
133
|
+
order="asc",
|
|
134
|
+
label_search="label_search",
|
|
135
|
+
description_search="description_search",
|
|
136
|
+
value_search="value_search",
|
|
137
|
+
connected_to_agents_count_gt=1,
|
|
138
|
+
connected_to_agents_count_lt=1,
|
|
139
|
+
)
|
|
125
140
|
"""
|
|
126
141
|
_response = self._raw_client.list(
|
|
127
142
|
label=label,
|
|
@@ -556,7 +571,22 @@ class AsyncBlocksClient:
|
|
|
556
571
|
|
|
557
572
|
|
|
558
573
|
async def main() -> None:
|
|
559
|
-
await client.blocks.list(
|
|
574
|
+
await client.blocks.list(
|
|
575
|
+
label="label",
|
|
576
|
+
templates_only=True,
|
|
577
|
+
name="name",
|
|
578
|
+
identity_id="identity_id",
|
|
579
|
+
project_id="project_id",
|
|
580
|
+
limit=1,
|
|
581
|
+
before="before",
|
|
582
|
+
after="after",
|
|
583
|
+
order="asc",
|
|
584
|
+
label_search="label_search",
|
|
585
|
+
description_search="description_search",
|
|
586
|
+
value_search="value_search",
|
|
587
|
+
connected_to_agents_count_gt=1,
|
|
588
|
+
connected_to_agents_count_lt=1,
|
|
589
|
+
)
|
|
560
590
|
|
|
561
591
|
|
|
562
592
|
asyncio.run(main())
|
|
@@ -0,0 +1,255 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import typing
|
|
4
|
+
|
|
5
|
+
from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
|
|
6
|
+
from ..core.request_options import RequestOptions
|
|
7
|
+
from ..types.chat_completion import ChatCompletion
|
|
8
|
+
from .raw_client import AsyncRawChatClient, RawChatClient
|
|
9
|
+
from .types.chat_completion_request_messages_item import ChatCompletionRequestMessagesItem
|
|
10
|
+
from .types.chat_completion_request_stop import ChatCompletionRequestStop
|
|
11
|
+
|
|
12
|
+
# this is used as the default value for optional parameters
|
|
13
|
+
OMIT = typing.cast(typing.Any, ...)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class ChatClient:
|
|
17
|
+
def __init__(self, *, client_wrapper: SyncClientWrapper):
|
|
18
|
+
self._raw_client = RawChatClient(client_wrapper=client_wrapper)
|
|
19
|
+
|
|
20
|
+
@property
|
|
21
|
+
def with_raw_response(self) -> RawChatClient:
|
|
22
|
+
"""
|
|
23
|
+
Retrieves a raw implementation of this client that returns raw responses.
|
|
24
|
+
|
|
25
|
+
Returns
|
|
26
|
+
-------
|
|
27
|
+
RawChatClient
|
|
28
|
+
"""
|
|
29
|
+
return self._raw_client
|
|
30
|
+
|
|
31
|
+
def create_chat_completion(
|
|
32
|
+
self,
|
|
33
|
+
*,
|
|
34
|
+
model: str,
|
|
35
|
+
messages: typing.Sequence[ChatCompletionRequestMessagesItem],
|
|
36
|
+
temperature: typing.Optional[float] = OMIT,
|
|
37
|
+
top_p: typing.Optional[float] = OMIT,
|
|
38
|
+
n: typing.Optional[int] = OMIT,
|
|
39
|
+
stream: typing.Optional[bool] = OMIT,
|
|
40
|
+
stop: typing.Optional[ChatCompletionRequestStop] = OMIT,
|
|
41
|
+
max_tokens: typing.Optional[int] = OMIT,
|
|
42
|
+
presence_penalty: typing.Optional[float] = OMIT,
|
|
43
|
+
frequency_penalty: typing.Optional[float] = OMIT,
|
|
44
|
+
user: typing.Optional[str] = OMIT,
|
|
45
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
46
|
+
) -> ChatCompletion:
|
|
47
|
+
"""
|
|
48
|
+
Create a chat completion using a Letta agent (OpenAI-compatible).
|
|
49
|
+
|
|
50
|
+
This endpoint provides full OpenAI API compatibility. The agent is selected based on:
|
|
51
|
+
- The 'model' parameter in the request (should contain an agent ID in format 'agent-...')
|
|
52
|
+
|
|
53
|
+
When streaming is enabled (stream=true), the response will be Server-Sent Events
|
|
54
|
+
with ChatCompletionChunk objects.
|
|
55
|
+
|
|
56
|
+
Parameters
|
|
57
|
+
----------
|
|
58
|
+
model : str
|
|
59
|
+
ID of the model to use
|
|
60
|
+
|
|
61
|
+
messages : typing.Sequence[ChatCompletionRequestMessagesItem]
|
|
62
|
+
Messages comprising the conversation so far
|
|
63
|
+
|
|
64
|
+
temperature : typing.Optional[float]
|
|
65
|
+
Sampling temperature
|
|
66
|
+
|
|
67
|
+
top_p : typing.Optional[float]
|
|
68
|
+
Nucleus sampling parameter
|
|
69
|
+
|
|
70
|
+
n : typing.Optional[int]
|
|
71
|
+
Number of chat completion choices to generate
|
|
72
|
+
|
|
73
|
+
stream : typing.Optional[bool]
|
|
74
|
+
Whether to stream back partial progress
|
|
75
|
+
|
|
76
|
+
stop : typing.Optional[ChatCompletionRequestStop]
|
|
77
|
+
Sequences where the API will stop generating
|
|
78
|
+
|
|
79
|
+
max_tokens : typing.Optional[int]
|
|
80
|
+
Maximum number of tokens to generate
|
|
81
|
+
|
|
82
|
+
presence_penalty : typing.Optional[float]
|
|
83
|
+
Presence penalty
|
|
84
|
+
|
|
85
|
+
frequency_penalty : typing.Optional[float]
|
|
86
|
+
Frequency penalty
|
|
87
|
+
|
|
88
|
+
user : typing.Optional[str]
|
|
89
|
+
A unique identifier representing your end-user
|
|
90
|
+
|
|
91
|
+
request_options : typing.Optional[RequestOptions]
|
|
92
|
+
Request-specific configuration.
|
|
93
|
+
|
|
94
|
+
Returns
|
|
95
|
+
-------
|
|
96
|
+
ChatCompletion
|
|
97
|
+
Successful response
|
|
98
|
+
|
|
99
|
+
Examples
|
|
100
|
+
--------
|
|
101
|
+
from letta_client import ChatCompletionDeveloperMessageParam, Letta
|
|
102
|
+
|
|
103
|
+
client = Letta(
|
|
104
|
+
project="YOUR_PROJECT",
|
|
105
|
+
token="YOUR_TOKEN",
|
|
106
|
+
)
|
|
107
|
+
client.chat.create_chat_completion(
|
|
108
|
+
model="model",
|
|
109
|
+
messages=[
|
|
110
|
+
ChatCompletionDeveloperMessageParam(
|
|
111
|
+
content="content",
|
|
112
|
+
)
|
|
113
|
+
],
|
|
114
|
+
)
|
|
115
|
+
"""
|
|
116
|
+
_response = self._raw_client.create_chat_completion(
|
|
117
|
+
model=model,
|
|
118
|
+
messages=messages,
|
|
119
|
+
temperature=temperature,
|
|
120
|
+
top_p=top_p,
|
|
121
|
+
n=n,
|
|
122
|
+
stream=stream,
|
|
123
|
+
stop=stop,
|
|
124
|
+
max_tokens=max_tokens,
|
|
125
|
+
presence_penalty=presence_penalty,
|
|
126
|
+
frequency_penalty=frequency_penalty,
|
|
127
|
+
user=user,
|
|
128
|
+
request_options=request_options,
|
|
129
|
+
)
|
|
130
|
+
return _response.data
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
class AsyncChatClient:
|
|
134
|
+
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
|
135
|
+
self._raw_client = AsyncRawChatClient(client_wrapper=client_wrapper)
|
|
136
|
+
|
|
137
|
+
@property
|
|
138
|
+
def with_raw_response(self) -> AsyncRawChatClient:
|
|
139
|
+
"""
|
|
140
|
+
Retrieves a raw implementation of this client that returns raw responses.
|
|
141
|
+
|
|
142
|
+
Returns
|
|
143
|
+
-------
|
|
144
|
+
AsyncRawChatClient
|
|
145
|
+
"""
|
|
146
|
+
return self._raw_client
|
|
147
|
+
|
|
148
|
+
async def create_chat_completion(
|
|
149
|
+
self,
|
|
150
|
+
*,
|
|
151
|
+
model: str,
|
|
152
|
+
messages: typing.Sequence[ChatCompletionRequestMessagesItem],
|
|
153
|
+
temperature: typing.Optional[float] = OMIT,
|
|
154
|
+
top_p: typing.Optional[float] = OMIT,
|
|
155
|
+
n: typing.Optional[int] = OMIT,
|
|
156
|
+
stream: typing.Optional[bool] = OMIT,
|
|
157
|
+
stop: typing.Optional[ChatCompletionRequestStop] = OMIT,
|
|
158
|
+
max_tokens: typing.Optional[int] = OMIT,
|
|
159
|
+
presence_penalty: typing.Optional[float] = OMIT,
|
|
160
|
+
frequency_penalty: typing.Optional[float] = OMIT,
|
|
161
|
+
user: typing.Optional[str] = OMIT,
|
|
162
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
163
|
+
) -> ChatCompletion:
|
|
164
|
+
"""
|
|
165
|
+
Create a chat completion using a Letta agent (OpenAI-compatible).
|
|
166
|
+
|
|
167
|
+
This endpoint provides full OpenAI API compatibility. The agent is selected based on:
|
|
168
|
+
- The 'model' parameter in the request (should contain an agent ID in format 'agent-...')
|
|
169
|
+
|
|
170
|
+
When streaming is enabled (stream=true), the response will be Server-Sent Events
|
|
171
|
+
with ChatCompletionChunk objects.
|
|
172
|
+
|
|
173
|
+
Parameters
|
|
174
|
+
----------
|
|
175
|
+
model : str
|
|
176
|
+
ID of the model to use
|
|
177
|
+
|
|
178
|
+
messages : typing.Sequence[ChatCompletionRequestMessagesItem]
|
|
179
|
+
Messages comprising the conversation so far
|
|
180
|
+
|
|
181
|
+
temperature : typing.Optional[float]
|
|
182
|
+
Sampling temperature
|
|
183
|
+
|
|
184
|
+
top_p : typing.Optional[float]
|
|
185
|
+
Nucleus sampling parameter
|
|
186
|
+
|
|
187
|
+
n : typing.Optional[int]
|
|
188
|
+
Number of chat completion choices to generate
|
|
189
|
+
|
|
190
|
+
stream : typing.Optional[bool]
|
|
191
|
+
Whether to stream back partial progress
|
|
192
|
+
|
|
193
|
+
stop : typing.Optional[ChatCompletionRequestStop]
|
|
194
|
+
Sequences where the API will stop generating
|
|
195
|
+
|
|
196
|
+
max_tokens : typing.Optional[int]
|
|
197
|
+
Maximum number of tokens to generate
|
|
198
|
+
|
|
199
|
+
presence_penalty : typing.Optional[float]
|
|
200
|
+
Presence penalty
|
|
201
|
+
|
|
202
|
+
frequency_penalty : typing.Optional[float]
|
|
203
|
+
Frequency penalty
|
|
204
|
+
|
|
205
|
+
user : typing.Optional[str]
|
|
206
|
+
A unique identifier representing your end-user
|
|
207
|
+
|
|
208
|
+
request_options : typing.Optional[RequestOptions]
|
|
209
|
+
Request-specific configuration.
|
|
210
|
+
|
|
211
|
+
Returns
|
|
212
|
+
-------
|
|
213
|
+
ChatCompletion
|
|
214
|
+
Successful response
|
|
215
|
+
|
|
216
|
+
Examples
|
|
217
|
+
--------
|
|
218
|
+
import asyncio
|
|
219
|
+
|
|
220
|
+
from letta_client import AsyncLetta, ChatCompletionDeveloperMessageParam
|
|
221
|
+
|
|
222
|
+
client = AsyncLetta(
|
|
223
|
+
project="YOUR_PROJECT",
|
|
224
|
+
token="YOUR_TOKEN",
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
async def main() -> None:
|
|
229
|
+
await client.chat.create_chat_completion(
|
|
230
|
+
model="model",
|
|
231
|
+
messages=[
|
|
232
|
+
ChatCompletionDeveloperMessageParam(
|
|
233
|
+
content="content",
|
|
234
|
+
)
|
|
235
|
+
],
|
|
236
|
+
)
|
|
237
|
+
|
|
238
|
+
|
|
239
|
+
asyncio.run(main())
|
|
240
|
+
"""
|
|
241
|
+
_response = await self._raw_client.create_chat_completion(
|
|
242
|
+
model=model,
|
|
243
|
+
messages=messages,
|
|
244
|
+
temperature=temperature,
|
|
245
|
+
top_p=top_p,
|
|
246
|
+
n=n,
|
|
247
|
+
stream=stream,
|
|
248
|
+
stop=stop,
|
|
249
|
+
max_tokens=max_tokens,
|
|
250
|
+
presence_penalty=presence_penalty,
|
|
251
|
+
frequency_penalty=frequency_penalty,
|
|
252
|
+
user=user,
|
|
253
|
+
request_options=request_options,
|
|
254
|
+
)
|
|
255
|
+
return _response.data
|