@letta-ai/letta-client 1.6.8 → 1.7.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +34 -0
- package/client.d.mts +2 -2
- package/client.d.mts.map +1 -1
- package/client.d.ts +2 -2
- package/client.d.ts.map +1 -1
- package/client.js.map +1 -1
- package/client.mjs.map +1 -1
- package/package.json +1 -1
- package/resources/agents/agents.d.mts +237 -6
- package/resources/agents/agents.d.mts.map +1 -1
- package/resources/agents/agents.d.ts +237 -6
- package/resources/agents/agents.d.ts.map +1 -1
- package/resources/agents/agents.js.map +1 -1
- package/resources/agents/agents.mjs.map +1 -1
- package/resources/agents/messages.d.mts +99 -13
- package/resources/agents/messages.d.mts.map +1 -1
- package/resources/agents/messages.d.ts +99 -13
- package/resources/agents/messages.d.ts.map +1 -1
- package/resources/conversations/conversations.d.mts +20 -1
- package/resources/conversations/conversations.d.mts.map +1 -1
- package/resources/conversations/conversations.d.ts +20 -1
- package/resources/conversations/conversations.d.ts.map +1 -1
- package/resources/conversations/conversations.js +6 -0
- package/resources/conversations/conversations.js.map +1 -1
- package/resources/conversations/conversations.mjs +6 -0
- package/resources/conversations/conversations.mjs.map +1 -1
- package/resources/conversations/index.d.mts +1 -1
- package/resources/conversations/index.d.mts.map +1 -1
- package/resources/conversations/index.d.ts +1 -1
- package/resources/conversations/index.d.ts.map +1 -1
- package/resources/conversations/index.js.map +1 -1
- package/resources/conversations/index.mjs.map +1 -1
- package/resources/conversations/messages.d.mts +6 -0
- package/resources/conversations/messages.d.mts.map +1 -1
- package/resources/conversations/messages.d.ts +6 -0
- package/resources/conversations/messages.d.ts.map +1 -1
- package/resources/index.d.mts +1 -1
- package/resources/index.d.mts.map +1 -1
- package/resources/index.d.ts +1 -1
- package/resources/index.d.ts.map +1 -1
- package/resources/index.js.map +1 -1
- package/resources/index.mjs.map +1 -1
- package/resources/models/models.d.mts +15 -3
- package/resources/models/models.d.mts.map +1 -1
- package/resources/models/models.d.ts +15 -3
- package/resources/models/models.d.ts.map +1 -1
- package/resources/models/models.js.map +1 -1
- package/resources/models/models.mjs.map +1 -1
- package/resources/steps/steps.d.mts +27 -3
- package/resources/steps/steps.d.mts.map +1 -1
- package/resources/steps/steps.d.ts +27 -3
- package/resources/steps/steps.d.ts.map +1 -1
- package/resources/steps/steps.js.map +1 -1
- package/resources/steps/steps.mjs.map +1 -1
- package/src/client.ts +4 -0
- package/src/resources/agents/agents.ts +276 -0
- package/src/resources/agents/messages.ts +118 -10
- package/src/resources/conversations/conversations.ts +30 -0
- package/src/resources/conversations/index.ts +2 -0
- package/src/resources/conversations/messages.ts +7 -0
- package/src/resources/index.ts +2 -0
- package/src/resources/models/models.ts +20 -2
- package/src/resources/steps/steps.ts +32 -3
- package/src/version.ts +1 -1
- package/version.d.mts +1 -1
- package/version.d.ts +1 -1
- package/version.js +1 -1
- package/version.mjs +1 -1
|
@@ -535,7 +535,7 @@ export interface InternalMessage {
|
|
|
535
535
|
/**
|
|
536
536
|
* The list of approvals for this message.
|
|
537
537
|
*/
|
|
538
|
-
approvals?: Array<ApprovalReturn | InternalMessage.
|
|
538
|
+
approvals?: Array<ApprovalReturn | InternalMessage.LettaSchemasMessageToolReturnOutput> | null;
|
|
539
539
|
|
|
540
540
|
/**
|
|
541
541
|
* Whether tool call is approved.
|
|
@@ -649,16 +649,16 @@ export interface InternalMessage {
|
|
|
649
649
|
}
|
|
650
650
|
|
|
651
651
|
export namespace InternalMessage {
|
|
652
|
-
export interface
|
|
652
|
+
export interface LettaSchemasMessageToolReturnOutput {
|
|
653
653
|
/**
|
|
654
654
|
* The status of the tool call
|
|
655
655
|
*/
|
|
656
656
|
status: 'success' | 'error';
|
|
657
657
|
|
|
658
658
|
/**
|
|
659
|
-
* The function response string
|
|
659
|
+
* The function response - either a string or list of content parts (text/image)
|
|
660
660
|
*/
|
|
661
|
-
func_response?: string | null;
|
|
661
|
+
func_response?: string | Array<MessagesAPI.TextContent | MessagesAPI.ImageContent> | null;
|
|
662
662
|
|
|
663
663
|
/**
|
|
664
664
|
* Captured stderr from the tool invocation
|
|
@@ -751,9 +751,9 @@ export namespace InternalMessage {
|
|
|
751
751
|
status: 'success' | 'error';
|
|
752
752
|
|
|
753
753
|
/**
|
|
754
|
-
* The function response string
|
|
754
|
+
* The function response - either a string or list of content parts (text/image)
|
|
755
755
|
*/
|
|
756
|
-
func_response?: string | null;
|
|
756
|
+
func_response?: string | Array<MessagesAPI.TextContent | MessagesAPI.ImageContent> | null;
|
|
757
757
|
|
|
758
758
|
/**
|
|
759
759
|
* Captured stderr from the tool invocation
|
|
@@ -857,6 +857,13 @@ export interface LettaRequest {
|
|
|
857
857
|
*/
|
|
858
858
|
messages?: Array<AgentsAPI.MessageCreate | ApprovalCreate> | null;
|
|
859
859
|
|
|
860
|
+
/**
|
|
861
|
+
* Model handle to use for this request instead of the agent's default model. This
|
|
862
|
+
* allows sending a message to a different model without changing the agent's
|
|
863
|
+
* configuration.
|
|
864
|
+
*/
|
|
865
|
+
override_model?: string | null;
|
|
866
|
+
|
|
860
867
|
/**
|
|
861
868
|
* @deprecated Whether the server should parse specific tool call arguments
|
|
862
869
|
* (default `send_message`) as `AssistantMessage` objects. Still supported for
|
|
@@ -1095,6 +1102,13 @@ export interface LettaStreamingRequest {
|
|
|
1095
1102
|
*/
|
|
1096
1103
|
messages?: Array<AgentsAPI.MessageCreate | ApprovalCreate> | null;
|
|
1097
1104
|
|
|
1105
|
+
/**
|
|
1106
|
+
* Model handle to use for this request instead of the agent's default model. This
|
|
1107
|
+
* allows sending a message to a different model without changing the agent's
|
|
1108
|
+
* configuration.
|
|
1109
|
+
*/
|
|
1110
|
+
override_model?: string | null;
|
|
1111
|
+
|
|
1098
1112
|
/**
|
|
1099
1113
|
* Flag to determine if individual tokens should be streamed, rather than streaming
|
|
1100
1114
|
* per step (only used when streaming=true).
|
|
@@ -1201,14 +1215,36 @@ export type LettaStreamingResponse =
|
|
|
1201
1215
|
|
|
1202
1216
|
export namespace LettaStreamingResponse {
|
|
1203
1217
|
/**
|
|
1204
|
-
*
|
|
1218
|
+
* A ping message used as a keepalive to prevent SSE streams from timing out during
|
|
1205
1219
|
* long running requests.
|
|
1220
|
+
*
|
|
1221
|
+
* Args: id (str): The ID of the message date (datetime): The date the message was
|
|
1222
|
+
* created in ISO format
|
|
1206
1223
|
*/
|
|
1207
1224
|
export interface LettaPing {
|
|
1225
|
+
id: string;
|
|
1226
|
+
|
|
1227
|
+
date: string;
|
|
1228
|
+
|
|
1229
|
+
is_err?: boolean | null;
|
|
1230
|
+
|
|
1208
1231
|
/**
|
|
1209
|
-
* The type of the message.
|
|
1232
|
+
* The type of the message. Ping messages are a keep-alive to prevent SSE streams
|
|
1233
|
+
* from timing out during long running requests.
|
|
1210
1234
|
*/
|
|
1211
|
-
message_type
|
|
1235
|
+
message_type?: 'ping';
|
|
1236
|
+
|
|
1237
|
+
name?: string | null;
|
|
1238
|
+
|
|
1239
|
+
otid?: string | null;
|
|
1240
|
+
|
|
1241
|
+
run_id?: string | null;
|
|
1242
|
+
|
|
1243
|
+
sender_id?: string | null;
|
|
1244
|
+
|
|
1245
|
+
seq_id?: number | null;
|
|
1246
|
+
|
|
1247
|
+
step_id?: string | null;
|
|
1212
1248
|
}
|
|
1213
1249
|
|
|
1214
1250
|
/**
|
|
@@ -1240,6 +1276,11 @@ export namespace LettaStreamingResponse {
|
|
|
1240
1276
|
* An optional error detail.
|
|
1241
1277
|
*/
|
|
1242
1278
|
detail?: string;
|
|
1279
|
+
|
|
1280
|
+
/**
|
|
1281
|
+
* The sequence ID for cursor-based pagination.
|
|
1282
|
+
*/
|
|
1283
|
+
seq_id?: number;
|
|
1243
1284
|
}
|
|
1244
1285
|
|
|
1245
1286
|
/**
|
|
@@ -1743,7 +1784,10 @@ export interface ToolReturn {
|
|
|
1743
1784
|
|
|
1744
1785
|
tool_call_id: string;
|
|
1745
1786
|
|
|
1746
|
-
|
|
1787
|
+
/**
|
|
1788
|
+
* The tool return value - either a string or list of content parts (text/image)
|
|
1789
|
+
*/
|
|
1790
|
+
tool_return: Array<TextContent | ImageContent> | string;
|
|
1747
1791
|
|
|
1748
1792
|
stderr?: Array<string> | null;
|
|
1749
1793
|
|
|
@@ -1939,6 +1983,13 @@ export interface MessageCreateParamsBase {
|
|
|
1939
1983
|
*/
|
|
1940
1984
|
messages?: Array<AgentsAPI.MessageCreate | ApprovalCreate> | null;
|
|
1941
1985
|
|
|
1986
|
+
/**
|
|
1987
|
+
* Model handle to use for this request instead of the agent's default model. This
|
|
1988
|
+
* allows sending a message to a different model without changing the agent's
|
|
1989
|
+
* configuration.
|
|
1990
|
+
*/
|
|
1991
|
+
override_model?: string | null;
|
|
1992
|
+
|
|
1942
1993
|
/**
|
|
1943
1994
|
* Flag to determine if individual tokens should be streamed, rather than streaming
|
|
1944
1995
|
* per step (only used when streaming=true).
|
|
@@ -2134,6 +2185,7 @@ export namespace MessageCompactParams {
|
|
|
2134
2185
|
| AgentsAPI.DeepseekModelSettings
|
|
2135
2186
|
| AgentsAPI.TogetherModelSettings
|
|
2136
2187
|
| AgentsAPI.BedrockModelSettings
|
|
2188
|
+
| CompactionSettings.ChatGptoAuthModelSettings
|
|
2137
2189
|
| null;
|
|
2138
2190
|
|
|
2139
2191
|
/**
|
|
@@ -2188,6 +2240,48 @@ export namespace MessageCompactParams {
|
|
|
2188
2240
|
*/
|
|
2189
2241
|
temperature?: number;
|
|
2190
2242
|
}
|
|
2243
|
+
|
|
2244
|
+
/**
|
|
2245
|
+
* ChatGPT OAuth model configuration (uses ChatGPT backend API).
|
|
2246
|
+
*/
|
|
2247
|
+
export interface ChatGptoAuthModelSettings {
|
|
2248
|
+
/**
|
|
2249
|
+
* The maximum number of tokens the model can generate.
|
|
2250
|
+
*/
|
|
2251
|
+
max_output_tokens?: number;
|
|
2252
|
+
|
|
2253
|
+
/**
|
|
2254
|
+
* Whether to enable parallel tool calling.
|
|
2255
|
+
*/
|
|
2256
|
+
parallel_tool_calls?: boolean;
|
|
2257
|
+
|
|
2258
|
+
/**
|
|
2259
|
+
* The type of the provider.
|
|
2260
|
+
*/
|
|
2261
|
+
provider_type?: 'chatgpt_oauth';
|
|
2262
|
+
|
|
2263
|
+
/**
|
|
2264
|
+
* The reasoning configuration for the model.
|
|
2265
|
+
*/
|
|
2266
|
+
reasoning?: ChatGptoAuthModelSettings.Reasoning;
|
|
2267
|
+
|
|
2268
|
+
/**
|
|
2269
|
+
* The temperature of the model.
|
|
2270
|
+
*/
|
|
2271
|
+
temperature?: number;
|
|
2272
|
+
}
|
|
2273
|
+
|
|
2274
|
+
export namespace ChatGptoAuthModelSettings {
|
|
2275
|
+
/**
|
|
2276
|
+
* The reasoning configuration for the model.
|
|
2277
|
+
*/
|
|
2278
|
+
export interface Reasoning {
|
|
2279
|
+
/**
|
|
2280
|
+
* The reasoning effort level for GPT-5.x and o-series models.
|
|
2281
|
+
*/
|
|
2282
|
+
reasoning_effort?: 'none' | 'low' | 'medium' | 'high' | 'xhigh';
|
|
2283
|
+
}
|
|
2284
|
+
}
|
|
2191
2285
|
}
|
|
2192
2286
|
}
|
|
2193
2287
|
|
|
@@ -2257,6 +2351,13 @@ export interface MessageCreateAsyncParams {
|
|
|
2257
2351
|
*/
|
|
2258
2352
|
messages?: Array<AgentsAPI.MessageCreate | ApprovalCreate> | null;
|
|
2259
2353
|
|
|
2354
|
+
/**
|
|
2355
|
+
* Model handle to use for this request instead of the agent's default model. This
|
|
2356
|
+
* allows sending a message to a different model without changing the agent's
|
|
2357
|
+
* configuration.
|
|
2358
|
+
*/
|
|
2359
|
+
override_model?: string | null;
|
|
2360
|
+
|
|
2260
2361
|
/**
|
|
2261
2362
|
* @deprecated Whether the server should parse specific tool call arguments
|
|
2262
2363
|
* (default `send_message`) as `AssistantMessage` objects. Still supported for
|
|
@@ -2410,6 +2511,13 @@ export interface MessageStreamParams {
|
|
|
2410
2511
|
*/
|
|
2411
2512
|
messages?: Array<AgentsAPI.MessageCreate | ApprovalCreate> | null;
|
|
2412
2513
|
|
|
2514
|
+
/**
|
|
2515
|
+
* Model handle to use for this request instead of the agent's default model. This
|
|
2516
|
+
* allows sending a message to a different model without changing the agent's
|
|
2517
|
+
* configuration.
|
|
2518
|
+
*/
|
|
2519
|
+
override_model?: string | null;
|
|
2520
|
+
|
|
2413
2521
|
/**
|
|
2414
2522
|
* Flag to determine if individual tokens should be streamed, rather than streaming
|
|
2415
2523
|
* per step (only used when streaming=true).
|
|
@@ -31,6 +31,17 @@ export class Conversations extends APIResource {
|
|
|
31
31
|
return this._client.get(path`/v1/conversations/${conversationID}`, options);
|
|
32
32
|
}
|
|
33
33
|
|
|
34
|
+
/**
|
|
35
|
+
* Update a conversation.
|
|
36
|
+
*/
|
|
37
|
+
update(
|
|
38
|
+
conversationID: string,
|
|
39
|
+
body: ConversationUpdateParams,
|
|
40
|
+
options?: RequestOptions,
|
|
41
|
+
): APIPromise<Conversation> {
|
|
42
|
+
return this._client.patch(path`/v1/conversations/${conversationID}`, { body, ...options });
|
|
43
|
+
}
|
|
44
|
+
|
|
34
45
|
/**
|
|
35
46
|
* List all conversations for an agent.
|
|
36
47
|
*/
|
|
@@ -116,6 +127,16 @@ export interface CreateConversation {
|
|
|
116
127
|
summary?: string | null;
|
|
117
128
|
}
|
|
118
129
|
|
|
130
|
+
/**
|
|
131
|
+
* Request model for updating a conversation.
|
|
132
|
+
*/
|
|
133
|
+
export interface UpdateConversation {
|
|
134
|
+
/**
|
|
135
|
+
* A summary of the conversation.
|
|
136
|
+
*/
|
|
137
|
+
summary?: string | null;
|
|
138
|
+
}
|
|
139
|
+
|
|
119
140
|
export type ConversationListResponse = Array<Conversation>;
|
|
120
141
|
|
|
121
142
|
export type ConversationCancelResponse = { [key: string]: unknown };
|
|
@@ -139,6 +160,13 @@ export interface ConversationCreateParams {
|
|
|
139
160
|
summary?: string | null;
|
|
140
161
|
}
|
|
141
162
|
|
|
163
|
+
export interface ConversationUpdateParams {
|
|
164
|
+
/**
|
|
165
|
+
* A summary of the conversation.
|
|
166
|
+
*/
|
|
167
|
+
summary?: string | null;
|
|
168
|
+
}
|
|
169
|
+
|
|
142
170
|
export interface ConversationListParams {
|
|
143
171
|
/**
|
|
144
172
|
* The agent ID to list conversations for
|
|
@@ -162,9 +190,11 @@ export declare namespace Conversations {
|
|
|
162
190
|
export {
|
|
163
191
|
type Conversation as Conversation,
|
|
164
192
|
type CreateConversation as CreateConversation,
|
|
193
|
+
type UpdateConversation as UpdateConversation,
|
|
165
194
|
type ConversationListResponse as ConversationListResponse,
|
|
166
195
|
type ConversationCancelResponse as ConversationCancelResponse,
|
|
167
196
|
type ConversationCreateParams as ConversationCreateParams,
|
|
197
|
+
type ConversationUpdateParams as ConversationUpdateParams,
|
|
168
198
|
type ConversationListParams as ConversationListParams,
|
|
169
199
|
};
|
|
170
200
|
|
|
@@ -4,9 +4,11 @@ export {
|
|
|
4
4
|
Conversations,
|
|
5
5
|
type Conversation,
|
|
6
6
|
type CreateConversation,
|
|
7
|
+
type UpdateConversation,
|
|
7
8
|
type ConversationListResponse,
|
|
8
9
|
type ConversationCancelResponse,
|
|
9
10
|
type ConversationCreateParams,
|
|
11
|
+
type ConversationUpdateParams,
|
|
10
12
|
type ConversationListParams,
|
|
11
13
|
} from './conversations';
|
|
12
14
|
export {
|
|
@@ -141,6 +141,13 @@ export interface MessageCreateParams {
|
|
|
141
141
|
*/
|
|
142
142
|
messages?: Array<AgentsAPI.MessageCreate | MessagesAPI.ApprovalCreate> | null;
|
|
143
143
|
|
|
144
|
+
/**
|
|
145
|
+
* Model handle to use for this request instead of the agent's default model. This
|
|
146
|
+
* allows sending a message to a different model without changing the agent's
|
|
147
|
+
* configuration.
|
|
148
|
+
*/
|
|
149
|
+
override_model?: string | null;
|
|
150
|
+
|
|
144
151
|
/**
|
|
145
152
|
* Flag to determine if individual tokens should be streamed, rather than streaming
|
|
146
153
|
* per step (only used when streaming=true).
|
package/src/resources/index.ts
CHANGED
|
@@ -72,9 +72,11 @@ export {
|
|
|
72
72
|
Conversations,
|
|
73
73
|
type Conversation,
|
|
74
74
|
type CreateConversation,
|
|
75
|
+
type UpdateConversation,
|
|
75
76
|
type ConversationListResponse,
|
|
76
77
|
type ConversationCancelResponse,
|
|
77
78
|
type ConversationCreateParams,
|
|
79
|
+
type ConversationUpdateParams,
|
|
78
80
|
type ConversationListParams,
|
|
79
81
|
} from './conversations/conversations';
|
|
80
82
|
export {
|
|
@@ -241,7 +241,8 @@ export interface LlmConfig {
|
|
|
241
241
|
| 'bedrock'
|
|
242
242
|
| 'deepseek'
|
|
243
243
|
| 'xai'
|
|
244
|
-
| 'zai'
|
|
244
|
+
| 'zai'
|
|
245
|
+
| 'chatgpt_oauth';
|
|
245
246
|
|
|
246
247
|
/**
|
|
247
248
|
* The framework compatibility type for the model.
|
|
@@ -338,6 +339,13 @@ export interface LlmConfig {
|
|
|
338
339
|
| AgentsAPI.JsonObjectResponseFormat
|
|
339
340
|
| null;
|
|
340
341
|
|
|
342
|
+
/**
|
|
343
|
+
* Enable strict mode for tool calling. When true, tool schemas include strict:
|
|
344
|
+
* true and additionalProperties: false, guaranteeing tool outputs match JSON
|
|
345
|
+
* schemas.
|
|
346
|
+
*/
|
|
347
|
+
strict?: boolean;
|
|
348
|
+
|
|
341
349
|
/**
|
|
342
350
|
* The temperature to use when generating text with the model. A higher temperature
|
|
343
351
|
* will result in more random text.
|
|
@@ -398,7 +406,8 @@ export interface Model {
|
|
|
398
406
|
| 'bedrock'
|
|
399
407
|
| 'deepseek'
|
|
400
408
|
| 'xai'
|
|
401
|
-
| 'zai'
|
|
409
|
+
| 'zai'
|
|
410
|
+
| 'chatgpt_oauth';
|
|
402
411
|
|
|
403
412
|
/**
|
|
404
413
|
* The actual model name used by the provider
|
|
@@ -504,6 +513,13 @@ export interface Model {
|
|
|
504
513
|
| AgentsAPI.JsonObjectResponseFormat
|
|
505
514
|
| null;
|
|
506
515
|
|
|
516
|
+
/**
|
|
517
|
+
* Enable strict mode for tool calling. When true, tool schemas include strict:
|
|
518
|
+
* true and additionalProperties: false, guaranteeing tool outputs match JSON
|
|
519
|
+
* schemas.
|
|
520
|
+
*/
|
|
521
|
+
strict?: boolean;
|
|
522
|
+
|
|
507
523
|
/**
|
|
508
524
|
* @deprecated Deprecated: The temperature to use when generating text with the
|
|
509
525
|
* model.
|
|
@@ -528,6 +544,7 @@ export type ProviderType =
|
|
|
528
544
|
| 'azure'
|
|
529
545
|
| 'bedrock'
|
|
530
546
|
| 'cerebras'
|
|
547
|
+
| 'chatgpt_oauth'
|
|
531
548
|
| 'deepseek'
|
|
532
549
|
| 'google_ai'
|
|
533
550
|
| 'google_vertex'
|
|
@@ -540,6 +557,7 @@ export type ProviderType =
|
|
|
540
557
|
| 'openai'
|
|
541
558
|
| 'together'
|
|
542
559
|
| 'vllm'
|
|
560
|
+
| 'sglang'
|
|
543
561
|
| 'xai'
|
|
544
562
|
| 'zai';
|
|
545
563
|
|
|
@@ -48,9 +48,13 @@ export type StepsArrayPage = ArrayPage<Step>;
|
|
|
48
48
|
* Attributes: id (str): The unique identifier of the provider trace. request_json
|
|
49
49
|
* (Dict[str, Any]): JSON content of the provider request. response_json (Dict[str,
|
|
50
50
|
* Any]): JSON content of the provider response. step_id (str): ID of the step that
|
|
51
|
-
* this trace is associated with.
|
|
52
|
-
*
|
|
53
|
-
*
|
|
51
|
+
* this trace is associated with. agent_id (str): ID of the agent that generated
|
|
52
|
+
* this trace. agent_tags (list[str]): Tags associated with the agent for
|
|
53
|
+
* filtering. call_type (str): Type of call (agent_step, summarization, etc.).
|
|
54
|
+
* run_id (str): ID of the run this trace is associated with. source (str): Source
|
|
55
|
+
* service that generated this trace (memgpt-server, lettuce-py). organization_id
|
|
56
|
+
* (str): The unique identifier of the organization. created_at (datetime): The
|
|
57
|
+
* timestamp when the object was created.
|
|
54
58
|
*/
|
|
55
59
|
export interface ProviderTrace {
|
|
56
60
|
/**
|
|
@@ -68,6 +72,21 @@ export interface ProviderTrace {
|
|
|
68
72
|
*/
|
|
69
73
|
id?: string;
|
|
70
74
|
|
|
75
|
+
/**
|
|
76
|
+
* ID of the agent that generated this trace
|
|
77
|
+
*/
|
|
78
|
+
agent_id?: string | null;
|
|
79
|
+
|
|
80
|
+
/**
|
|
81
|
+
* Tags associated with the agent for filtering
|
|
82
|
+
*/
|
|
83
|
+
agent_tags?: Array<string> | null;
|
|
84
|
+
|
|
85
|
+
/**
|
|
86
|
+
* Type of call (agent_step, summarization, etc.)
|
|
87
|
+
*/
|
|
88
|
+
call_type?: string | null;
|
|
89
|
+
|
|
71
90
|
/**
|
|
72
91
|
* The timestamp when the object was created.
|
|
73
92
|
*/
|
|
@@ -83,6 +102,16 @@ export interface ProviderTrace {
|
|
|
83
102
|
*/
|
|
84
103
|
last_updated_by_id?: string | null;
|
|
85
104
|
|
|
105
|
+
/**
|
|
106
|
+
* ID of the run this trace is associated with
|
|
107
|
+
*/
|
|
108
|
+
run_id?: string | null;
|
|
109
|
+
|
|
110
|
+
/**
|
|
111
|
+
* Source service that generated this trace (memgpt-server, lettuce-py)
|
|
112
|
+
*/
|
|
113
|
+
source?: string | null;
|
|
114
|
+
|
|
86
115
|
/**
|
|
87
116
|
* ID of the step that this trace is associated with
|
|
88
117
|
*/
|
package/src/version.ts
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
export const VERSION = '1.
|
|
1
|
+
export const VERSION = '1.7.2'; // x-release-please-version
|
package/version.d.mts
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
|
-
export declare const VERSION = "1.
|
|
1
|
+
export declare const VERSION = "1.7.2";
|
|
2
2
|
//# sourceMappingURL=version.d.mts.map
|
package/version.d.ts
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
|
-
export declare const VERSION = "1.
|
|
1
|
+
export declare const VERSION = "1.7.2";
|
|
2
2
|
//# sourceMappingURL=version.d.ts.map
|
package/version.js
CHANGED
package/version.mjs
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
|
-
export const VERSION = '1.
|
|
1
|
+
export const VERSION = '1.7.2'; // x-release-please-version
|
|
2
2
|
//# sourceMappingURL=version.mjs.map
|