@mnexium/sdk 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +401 -0
- package/dist/index.d.mts +944 -0
- package/dist/index.d.ts +944 -0
- package/dist/index.js +1110 -0
- package/dist/index.mjs +1074 -0
- package/package.json +67 -0
package/dist/index.d.mts
ADDED
|
@@ -0,0 +1,944 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Mnexium SDK Types
|
|
3
|
+
*/
|
|
4
|
+
/** Provider API key configuration */
|
|
5
|
+
interface ProviderConfig {
|
|
6
|
+
apiKey: string;
|
|
7
|
+
}
|
|
8
|
+
/** Default settings for all process() calls */
|
|
9
|
+
interface MnexiumDefaults {
|
|
10
|
+
/** Default model to use */
|
|
11
|
+
model?: string;
|
|
12
|
+
/** Subject ID for memory scoping. Auto-generated with subj_ prefix if omitted */
|
|
13
|
+
subjectId?: string;
|
|
14
|
+
/** Chat ID for conversation tracking. Auto-generated if omitted */
|
|
15
|
+
chatId?: string;
|
|
16
|
+
/** Save messages to chat history. Default: true */
|
|
17
|
+
log?: boolean;
|
|
18
|
+
/** Memory extraction: true (LLM decides), 'force' (always), false (never). Default: true */
|
|
19
|
+
learn?: boolean | 'force';
|
|
20
|
+
/** Inject relevant stored memories into context. Default: false */
|
|
21
|
+
recall?: boolean;
|
|
22
|
+
/** Include user profile in context. Default: false */
|
|
23
|
+
profile?: boolean;
|
|
24
|
+
/** Prepend previous messages from this chat. Default: true */
|
|
25
|
+
history?: boolean;
|
|
26
|
+
/** Enable conversation summarization. Use preset modes: 'light', 'balanced', 'aggressive'. Default: false */
|
|
27
|
+
summarize?: boolean | 'light' | 'balanced' | 'aggressive';
|
|
28
|
+
/** System prompt: true (auto-resolve), false (skip), or prompt ID like 'sp_abc'. Default: true */
|
|
29
|
+
systemPrompt?: boolean | string;
|
|
30
|
+
/** Custom metadata attached to saved logs */
|
|
31
|
+
metadata?: Record<string, unknown>;
|
|
32
|
+
/** Maximum tokens to generate */
|
|
33
|
+
maxTokens?: number;
|
|
34
|
+
/** Temperature for sampling */
|
|
35
|
+
temperature?: number;
|
|
36
|
+
/** Force regenerate trial key on every request (for testing) */
|
|
37
|
+
regenerateKey?: boolean;
|
|
38
|
+
}
|
|
39
|
+
interface MnexiumConfig {
|
|
40
|
+
/** Mnexium API key. If omitted, a trial key will be auto-provisioned. */
|
|
41
|
+
apiKey?: string;
|
|
42
|
+
/** Base URL for the Mnexium API. Defaults to https://mnexium.com/api/v1 */
|
|
43
|
+
baseUrl?: string;
|
|
44
|
+
/** Request timeout in milliseconds. Defaults to 30000 */
|
|
45
|
+
timeout?: number;
|
|
46
|
+
/** Maximum number of retries for failed requests. Defaults to 2 */
|
|
47
|
+
maxRetries?: number;
|
|
48
|
+
/** OpenAI configuration */
|
|
49
|
+
openai?: ProviderConfig;
|
|
50
|
+
/** Anthropic configuration */
|
|
51
|
+
anthropic?: ProviderConfig;
|
|
52
|
+
/** Google configuration */
|
|
53
|
+
google?: ProviderConfig;
|
|
54
|
+
/** Default settings for process() calls */
|
|
55
|
+
defaults?: MnexiumDefaults;
|
|
56
|
+
}
|
|
57
|
+
/** Options for creating a chat session (legacy name) */
|
|
58
|
+
interface ChatSessionOptions {
|
|
59
|
+
/** Subject ID for memory scoping. Auto-generated if omitted */
|
|
60
|
+
subjectId?: string;
|
|
61
|
+
/** Chat ID for conversation tracking. Auto-generated if omitted */
|
|
62
|
+
chatId?: string;
|
|
63
|
+
/** Default model for this chat */
|
|
64
|
+
model?: string;
|
|
65
|
+
/** Save messages to chat history. Default: true */
|
|
66
|
+
log?: boolean;
|
|
67
|
+
/** Memory extraction: true (LLM decides), 'force' (always), false (never). Default: true */
|
|
68
|
+
learn?: boolean | 'force';
|
|
69
|
+
/** Inject relevant stored memories into context. Default: false */
|
|
70
|
+
recall?: boolean;
|
|
71
|
+
/** Include user profile in context. Default: false */
|
|
72
|
+
profile?: boolean;
|
|
73
|
+
/** Prepend previous messages from this chat. Default: true */
|
|
74
|
+
history?: boolean;
|
|
75
|
+
/** Enable conversation summarization */
|
|
76
|
+
summarize?: boolean | 'light' | 'balanced' | 'aggressive';
|
|
77
|
+
/** System prompt: true (auto-resolve), false (skip), or prompt ID */
|
|
78
|
+
systemPrompt?: boolean | string;
|
|
79
|
+
/** Maximum tokens */
|
|
80
|
+
maxTokens?: number;
|
|
81
|
+
/** Temperature for sampling */
|
|
82
|
+
temperature?: number;
|
|
83
|
+
/** Custom metadata attached to saved logs */
|
|
84
|
+
metadata?: Record<string, unknown>;
|
|
85
|
+
}
|
|
86
|
+
/** Options for creating a Chat */
|
|
87
|
+
interface ChatOptions {
|
|
88
|
+
/** Chat ID for conversation tracking. Auto-generated if omitted */
|
|
89
|
+
chatId?: string;
|
|
90
|
+
/** Default model for this chat */
|
|
91
|
+
model?: string;
|
|
92
|
+
/** Save messages to chat history. Default: true */
|
|
93
|
+
log?: boolean;
|
|
94
|
+
/** Memory extraction: true (LLM decides), 'force' (always), false (never). Default: true */
|
|
95
|
+
learn?: boolean | 'force';
|
|
96
|
+
/** Inject relevant stored memories into context. Default: false */
|
|
97
|
+
recall?: boolean;
|
|
98
|
+
/** Include user profile in context. Default: false */
|
|
99
|
+
profile?: boolean;
|
|
100
|
+
/** Prepend previous messages from this chat. Default: true */
|
|
101
|
+
history?: boolean;
|
|
102
|
+
/** Enable conversation summarization */
|
|
103
|
+
summarize?: boolean | 'light' | 'balanced' | 'aggressive';
|
|
104
|
+
/** System prompt: true (auto-resolve), false (skip), or prompt ID */
|
|
105
|
+
systemPrompt?: boolean | string;
|
|
106
|
+
/** Maximum tokens */
|
|
107
|
+
maxTokens?: number;
|
|
108
|
+
/** Temperature for sampling */
|
|
109
|
+
temperature?: number;
|
|
110
|
+
/** Custom metadata attached to saved logs */
|
|
111
|
+
metadata?: Record<string, unknown>;
|
|
112
|
+
}
|
|
113
|
+
/** Options for listing chat history */
|
|
114
|
+
interface ChatHistoryListOptions {
|
|
115
|
+
/** Maximum number of chats to return */
|
|
116
|
+
limit?: number;
|
|
117
|
+
/** Offset for pagination */
|
|
118
|
+
offset?: number;
|
|
119
|
+
}
|
|
120
|
+
/** A chat history item */
|
|
121
|
+
interface ChatHistoryItem {
|
|
122
|
+
/** Chat ID */
|
|
123
|
+
chat_id: string;
|
|
124
|
+
/** Subject ID */
|
|
125
|
+
subject_id: string;
|
|
126
|
+
/** Number of messages in the chat */
|
|
127
|
+
message_count?: number;
|
|
128
|
+
/** Timestamp of last message */
|
|
129
|
+
last_message_at?: string;
|
|
130
|
+
/** Timestamp when chat was created */
|
|
131
|
+
created_at: string;
|
|
132
|
+
}
|
|
133
|
+
/** Options for chat.process() - content only, uses chat session defaults */
|
|
134
|
+
interface ChatProcessOptions {
|
|
135
|
+
/** The user message content */
|
|
136
|
+
content: string;
|
|
137
|
+
/** Override model for this message */
|
|
138
|
+
model?: string;
|
|
139
|
+
/** Override log setting for this message */
|
|
140
|
+
log?: boolean;
|
|
141
|
+
/** Override learn setting for this message */
|
|
142
|
+
learn?: boolean | 'force';
|
|
143
|
+
/** Override recall setting for this message */
|
|
144
|
+
recall?: boolean;
|
|
145
|
+
/** Override profile setting for this message */
|
|
146
|
+
profile?: boolean;
|
|
147
|
+
/** Override history setting for this message */
|
|
148
|
+
history?: boolean;
|
|
149
|
+
/** Override summarize setting for this message */
|
|
150
|
+
summarize?: boolean | 'light' | 'balanced' | 'aggressive';
|
|
151
|
+
/** Override system prompt for this message */
|
|
152
|
+
systemPrompt?: boolean | string;
|
|
153
|
+
/** Override max tokens for this message */
|
|
154
|
+
maxTokens?: number;
|
|
155
|
+
/** Override temperature for this message */
|
|
156
|
+
temperature?: number;
|
|
157
|
+
/** Enable streaming */
|
|
158
|
+
stream?: boolean;
|
|
159
|
+
/** Override metadata for this message */
|
|
160
|
+
metadata?: Record<string, unknown>;
|
|
161
|
+
/** Force regenerate trial key */
|
|
162
|
+
regenerateKey?: boolean;
|
|
163
|
+
}
|
|
164
|
+
/** Options for the simplified process() API */
|
|
165
|
+
interface ProcessOptions {
|
|
166
|
+
/** The user message content */
|
|
167
|
+
content: string;
|
|
168
|
+
/** Model to use (overrides default) */
|
|
169
|
+
model?: string;
|
|
170
|
+
/** Subject ID for memory scoping */
|
|
171
|
+
subjectId?: string;
|
|
172
|
+
/** Chat ID for conversation tracking */
|
|
173
|
+
chatId?: string;
|
|
174
|
+
/** Save messages to chat history. Default: true */
|
|
175
|
+
log?: boolean;
|
|
176
|
+
/** Memory extraction: true (LLM decides), 'force' (always), false (never). Default: true */
|
|
177
|
+
learn?: boolean | 'force';
|
|
178
|
+
/** Inject relevant stored memories into context. Default: false */
|
|
179
|
+
recall?: boolean;
|
|
180
|
+
/** Include user profile in context. Default: false */
|
|
181
|
+
profile?: boolean;
|
|
182
|
+
/** Prepend previous messages from this chat. Default: true */
|
|
183
|
+
history?: boolean;
|
|
184
|
+
/** Enable conversation summarization */
|
|
185
|
+
summarize?: boolean | 'light' | 'balanced' | 'aggressive';
|
|
186
|
+
/** System prompt: true (auto-resolve), false (skip), or prompt ID */
|
|
187
|
+
systemPrompt?: boolean | string;
|
|
188
|
+
/** Maximum tokens (overrides default) */
|
|
189
|
+
maxTokens?: number;
|
|
190
|
+
/** Temperature (overrides default) */
|
|
191
|
+
temperature?: number;
|
|
192
|
+
/** Enable streaming */
|
|
193
|
+
stream?: boolean;
|
|
194
|
+
/** Custom metadata attached to saved logs */
|
|
195
|
+
metadata?: Record<string, unknown>;
|
|
196
|
+
/** Force regenerate trial key (for key recovery) */
|
|
197
|
+
regenerateKey?: boolean;
|
|
198
|
+
}
|
|
199
|
+
/** Response from process() */
|
|
200
|
+
interface ProcessResponse {
|
|
201
|
+
/** The assistant's response text */
|
|
202
|
+
content: string;
|
|
203
|
+
/** Chat ID for this conversation */
|
|
204
|
+
chatId: string;
|
|
205
|
+
/** Subject ID used */
|
|
206
|
+
subjectId: string;
|
|
207
|
+
/** Model used */
|
|
208
|
+
model: string;
|
|
209
|
+
/** Token usage */
|
|
210
|
+
usage?: {
|
|
211
|
+
promptTokens: number;
|
|
212
|
+
completionTokens: number;
|
|
213
|
+
totalTokens: number;
|
|
214
|
+
};
|
|
215
|
+
/** Provisioned trial key (if auto-provisioned) */
|
|
216
|
+
provisionedKey?: string;
|
|
217
|
+
/** Claim URL for trial key */
|
|
218
|
+
claimUrl?: string;
|
|
219
|
+
/** Full raw response (for advanced use) */
|
|
220
|
+
raw: ChatCompletionResponse;
|
|
221
|
+
}
|
|
222
|
+
interface ChatMessage {
|
|
223
|
+
role: 'system' | 'user' | 'assistant' | 'tool';
|
|
224
|
+
content: string;
|
|
225
|
+
name?: string;
|
|
226
|
+
tool_calls?: ToolCall[];
|
|
227
|
+
tool_call_id?: string;
|
|
228
|
+
}
|
|
229
|
+
interface ToolCall {
|
|
230
|
+
id: string;
|
|
231
|
+
type: 'function';
|
|
232
|
+
function: {
|
|
233
|
+
name: string;
|
|
234
|
+
arguments: string;
|
|
235
|
+
};
|
|
236
|
+
}
|
|
237
|
+
interface MnxOptions {
|
|
238
|
+
/** Subject ID for memory scoping */
|
|
239
|
+
subjectId?: string;
|
|
240
|
+
/** Chat ID for conversation tracking */
|
|
241
|
+
chatId?: string;
|
|
242
|
+
/** Enable automatic memory learning. Defaults to true */
|
|
243
|
+
learn?: boolean | 'force';
|
|
244
|
+
/** Enable memory recall. Defaults to false */
|
|
245
|
+
recall?: boolean;
|
|
246
|
+
/** Enable chat history. Defaults to true */
|
|
247
|
+
history?: boolean;
|
|
248
|
+
/** Enable chat logging. Defaults to true */
|
|
249
|
+
log?: boolean;
|
|
250
|
+
/** Agent state configuration */
|
|
251
|
+
state?: {
|
|
252
|
+
load?: boolean;
|
|
253
|
+
key?: string;
|
|
254
|
+
};
|
|
255
|
+
/** System prompt ID or false to disable */
|
|
256
|
+
systemPrompt?: string | false;
|
|
257
|
+
/** Additional metadata to attach */
|
|
258
|
+
metadata?: Record<string, unknown>;
|
|
259
|
+
/** Force regenerate trial key (for key recovery) */
|
|
260
|
+
regenerateKey?: boolean;
|
|
261
|
+
}
|
|
262
|
+
interface ChatCompletionOptions extends MnxOptions {
|
|
263
|
+
/** Model to use (e.g., 'gpt-4o-mini', 'claude-3-sonnet', 'gemini-1.5-pro') */
|
|
264
|
+
model: string;
|
|
265
|
+
/** Messages for the conversation */
|
|
266
|
+
messages: ChatMessage[];
|
|
267
|
+
/** OpenAI API key */
|
|
268
|
+
openaiKey?: string;
|
|
269
|
+
/** Anthropic API key */
|
|
270
|
+
anthropicKey?: string;
|
|
271
|
+
/** Google API key */
|
|
272
|
+
googleKey?: string;
|
|
273
|
+
/** Enable streaming */
|
|
274
|
+
stream?: boolean;
|
|
275
|
+
/** Maximum tokens to generate */
|
|
276
|
+
maxTokens?: number;
|
|
277
|
+
/** Temperature for sampling */
|
|
278
|
+
temperature?: number;
|
|
279
|
+
/** Top-p sampling */
|
|
280
|
+
topP?: number;
|
|
281
|
+
/** Stop sequences */
|
|
282
|
+
stop?: string | string[];
|
|
283
|
+
}
|
|
284
|
+
interface ChatCompletionChoice {
|
|
285
|
+
index: number;
|
|
286
|
+
message: ChatMessage;
|
|
287
|
+
finish_reason: string | null;
|
|
288
|
+
}
|
|
289
|
+
interface ChatCompletionUsage {
|
|
290
|
+
prompt_tokens: number;
|
|
291
|
+
completion_tokens: number;
|
|
292
|
+
total_tokens: number;
|
|
293
|
+
}
|
|
294
|
+
interface MnxResponseData {
|
|
295
|
+
chat_id: string;
|
|
296
|
+
subject_id: string;
|
|
297
|
+
provisioned_key?: string;
|
|
298
|
+
claim_url?: string;
|
|
299
|
+
}
|
|
300
|
+
interface ChatCompletionResponse {
|
|
301
|
+
id: string;
|
|
302
|
+
object: string;
|
|
303
|
+
created: number;
|
|
304
|
+
model: string;
|
|
305
|
+
choices: ChatCompletionChoice[];
|
|
306
|
+
usage?: ChatCompletionUsage;
|
|
307
|
+
mnx: MnxResponseData;
|
|
308
|
+
}
|
|
309
|
+
interface Memory {
|
|
310
|
+
id: string;
|
|
311
|
+
project_id: string;
|
|
312
|
+
subject_id: string;
|
|
313
|
+
text: string;
|
|
314
|
+
source?: string;
|
|
315
|
+
visibility?: 'private' | 'shared';
|
|
316
|
+
metadata?: Record<string, unknown>;
|
|
317
|
+
created_at: string;
|
|
318
|
+
updated_at?: string;
|
|
319
|
+
is_deleted?: boolean;
|
|
320
|
+
superseded_by?: string;
|
|
321
|
+
}
|
|
322
|
+
interface MemoryCreateOptions {
|
|
323
|
+
/** Subject ID to associate the memory with */
|
|
324
|
+
subjectId: string;
|
|
325
|
+
/** Memory text content */
|
|
326
|
+
text: string;
|
|
327
|
+
/** Source of the memory (e.g., 'user', 'agent', 'system') */
|
|
328
|
+
source?: string;
|
|
329
|
+
/** Visibility: 'private' (default) or 'shared' */
|
|
330
|
+
visibility?: 'private' | 'shared';
|
|
331
|
+
/** Additional metadata */
|
|
332
|
+
metadata?: Record<string, unknown>;
|
|
333
|
+
}
|
|
334
|
+
interface MemorySearchOptions {
|
|
335
|
+
/** Subject ID to search within */
|
|
336
|
+
subjectId: string;
|
|
337
|
+
/** Search query */
|
|
338
|
+
query: string;
|
|
339
|
+
/** Maximum number of results. Defaults to 10 */
|
|
340
|
+
limit?: number;
|
|
341
|
+
/** Minimum similarity score (0-1). Defaults to 0.7 */
|
|
342
|
+
minScore?: number;
|
|
343
|
+
/** Include deleted memories */
|
|
344
|
+
includeDeleted?: boolean;
|
|
345
|
+
/** Include superseded memories */
|
|
346
|
+
includeSuperseded?: boolean;
|
|
347
|
+
}
|
|
348
|
+
interface MemorySearchResult {
|
|
349
|
+
memory: Memory;
|
|
350
|
+
score: number;
|
|
351
|
+
}
|
|
352
|
+
interface Claim {
|
|
353
|
+
id: string;
|
|
354
|
+
project_id: string;
|
|
355
|
+
subject_id: string;
|
|
356
|
+
slot: string;
|
|
357
|
+
value: unknown;
|
|
358
|
+
confidence?: number;
|
|
359
|
+
source?: string;
|
|
360
|
+
source_memory_id?: string;
|
|
361
|
+
created_at: string;
|
|
362
|
+
updated_at?: string;
|
|
363
|
+
retracted_at?: string;
|
|
364
|
+
}
|
|
365
|
+
interface ClaimCreateOptions {
|
|
366
|
+
/** Subject ID */
|
|
367
|
+
subjectId: string;
|
|
368
|
+
/** Slot name (e.g., 'favorite_color', 'location') */
|
|
369
|
+
slot: string;
|
|
370
|
+
/** Claim value */
|
|
371
|
+
value: unknown;
|
|
372
|
+
/** Confidence score (0-1) */
|
|
373
|
+
confidence?: number;
|
|
374
|
+
/** Source of the claim */
|
|
375
|
+
source?: string;
|
|
376
|
+
/** ID of the memory this claim was extracted from */
|
|
377
|
+
sourceMemoryId?: string;
|
|
378
|
+
}
|
|
379
|
+
interface Profile {
|
|
380
|
+
subject_id: string;
|
|
381
|
+
project_id: string;
|
|
382
|
+
claims: Record<string, unknown>;
|
|
383
|
+
memory_count: number;
|
|
384
|
+
last_active?: string;
|
|
385
|
+
}
|
|
386
|
+
interface AgentState {
|
|
387
|
+
key: string;
|
|
388
|
+
value: unknown;
|
|
389
|
+
subject_id?: string;
|
|
390
|
+
ttl_seconds?: number;
|
|
391
|
+
created_at: string;
|
|
392
|
+
updated_at?: string;
|
|
393
|
+
expires_at?: string;
|
|
394
|
+
}
|
|
395
|
+
interface AgentStateSetOptions {
|
|
396
|
+
/** State key */
|
|
397
|
+
key: string;
|
|
398
|
+
/** State value (any JSON-serializable value) */
|
|
399
|
+
value: unknown;
|
|
400
|
+
/** Subject ID (optional, for subject-scoped state) */
|
|
401
|
+
subjectId?: string;
|
|
402
|
+
/** Time-to-live in seconds (optional) */
|
|
403
|
+
ttlSeconds?: number;
|
|
404
|
+
}
|
|
405
|
+
interface SystemPrompt {
|
|
406
|
+
id: string;
|
|
407
|
+
project_id: string;
|
|
408
|
+
name: string;
|
|
409
|
+
prompt_text: string;
|
|
410
|
+
is_default?: boolean;
|
|
411
|
+
created_at: string;
|
|
412
|
+
updated_at?: string;
|
|
413
|
+
}
|
|
414
|
+
interface SystemPromptCreateOptions {
|
|
415
|
+
/** Prompt name */
|
|
416
|
+
name: string;
|
|
417
|
+
/** Prompt text */
|
|
418
|
+
promptText: string;
|
|
419
|
+
/** Set as default prompt for the project */
|
|
420
|
+
isDefault?: boolean;
|
|
421
|
+
}
|
|
422
|
+
|
|
423
|
+
/**
|
|
424
|
+
* Streaming support for Mnexium SDK
|
|
425
|
+
*
|
|
426
|
+
* Provides an async iterable that yields text chunks from SSE streams,
|
|
427
|
+
* with access to final metadata after iteration completes.
|
|
428
|
+
*/
|
|
429
|
+
interface StreamChunk {
|
|
430
|
+
/** Incremental text content */
|
|
431
|
+
content: string;
|
|
432
|
+
/** The raw SSE data object (provider-specific) */
|
|
433
|
+
raw?: unknown;
|
|
434
|
+
}
|
|
435
|
+
/**
|
|
436
|
+
* StreamResponse is an async iterable that yields text chunks from an SSE stream.
|
|
437
|
+
*
|
|
438
|
+
* After iteration completes, metadata like usage, chatId, etc. are available.
|
|
439
|
+
*
|
|
440
|
+
* @example
|
|
441
|
+
* const stream = await chat.process({ content: 'Hello', stream: true });
|
|
442
|
+
* for await (const chunk of stream) {
|
|
443
|
+
* process.stdout.write(chunk.content);
|
|
444
|
+
* }
|
|
445
|
+
* console.log('Done!', stream.totalContent);
|
|
446
|
+
* console.log('Usage:', stream.usage);
|
|
447
|
+
*/
|
|
448
|
+
declare class StreamResponse implements AsyncIterable<StreamChunk> {
|
|
449
|
+
/** The full accumulated content after streaming completes */
|
|
450
|
+
totalContent: string;
|
|
451
|
+
/** Chat ID (from response headers) */
|
|
452
|
+
chatId: string;
|
|
453
|
+
/** Subject ID (from response headers) */
|
|
454
|
+
subjectId: string;
|
|
455
|
+
/** Model used */
|
|
456
|
+
model: string;
|
|
457
|
+
/** Token usage (populated after stream ends, if available) */
|
|
458
|
+
usage?: {
|
|
459
|
+
promptTokens: number;
|
|
460
|
+
completionTokens: number;
|
|
461
|
+
totalTokens: number;
|
|
462
|
+
};
|
|
463
|
+
/** Provisioned trial key */
|
|
464
|
+
provisionedKey?: string;
|
|
465
|
+
/** Claim URL */
|
|
466
|
+
claimUrl?: string;
|
|
467
|
+
private reader;
|
|
468
|
+
private decoder;
|
|
469
|
+
private consumed;
|
|
470
|
+
constructor(response: Response, metadata: {
|
|
471
|
+
chatId: string;
|
|
472
|
+
subjectId: string;
|
|
473
|
+
model: string;
|
|
474
|
+
provisionedKey?: string;
|
|
475
|
+
claimUrl?: string;
|
|
476
|
+
});
|
|
477
|
+
[Symbol.asyncIterator](): AsyncIterator<StreamChunk>;
|
|
478
|
+
/**
|
|
479
|
+
* Convenience: collect the full response as a string.
|
|
480
|
+
* Consumes the stream if not already consumed.
|
|
481
|
+
*/
|
|
482
|
+
text(): Promise<string>;
|
|
483
|
+
private _extractChunk;
|
|
484
|
+
private _extractUsage;
|
|
485
|
+
}
|
|
486
|
+
|
|
487
|
+
/**
|
|
488
|
+
* Chat - A conversation thread with a stable chatId
|
|
489
|
+
*/
|
|
490
|
+
|
|
491
|
+
/**
|
|
492
|
+
* Chat represents a conversation thread with a stable chatId
|
|
493
|
+
*
|
|
494
|
+
* Chats belong to a Subject and maintain conversation history.
|
|
495
|
+
*
|
|
496
|
+
* @example
|
|
497
|
+
* const chat = alice.createChat({ history: true });
|
|
498
|
+
* await chat.process("Hello!");
|
|
499
|
+
* await chat.process("What did I just say?");
|
|
500
|
+
*/
|
|
501
|
+
declare class Chat {
|
|
502
|
+
/** The chat ID */
|
|
503
|
+
readonly id: string;
|
|
504
|
+
/** The subject ID this chat belongs to */
|
|
505
|
+
readonly subjectId: string;
|
|
506
|
+
private readonly client;
|
|
507
|
+
private readonly options;
|
|
508
|
+
constructor(client: Mnexium, subjectId: string, options?: ChatOptions);
|
|
509
|
+
/**
|
|
510
|
+
* Process a message in this chat
|
|
511
|
+
*
|
|
512
|
+
* @example
|
|
513
|
+
* // Non-streaming
|
|
514
|
+
* const response = await chat.process('Hello!');
|
|
515
|
+
*
|
|
516
|
+
* // Streaming
|
|
517
|
+
* const stream = await chat.process({ content: 'Hello!', stream: true });
|
|
518
|
+
* for await (const chunk of stream) {
|
|
519
|
+
* process.stdout.write(chunk.content);
|
|
520
|
+
* }
|
|
521
|
+
*/
|
|
522
|
+
process(input: string): Promise<ProcessResponse>;
|
|
523
|
+
process(input: ChatProcessOptions & {
|
|
524
|
+
stream: true;
|
|
525
|
+
}): Promise<StreamResponse>;
|
|
526
|
+
process(input: ChatProcessOptions): Promise<ProcessResponse>;
|
|
527
|
+
}
|
|
528
|
+
|
|
529
|
+
/**
|
|
530
|
+
* Real-time event stream for memory notifications via SSE
|
|
531
|
+
*
|
|
532
|
+
* Connects to GET /api/v1/events/memories and yields typed events.
|
|
533
|
+
*
|
|
534
|
+
* Events:
|
|
535
|
+
* - connected: Initial connection confirmation
|
|
536
|
+
* - memory.created: New memory created
|
|
537
|
+
* - memory.updated: Memory updated
|
|
538
|
+
* - memory.deleted: Memory deleted
|
|
539
|
+
* - memory.superseded: Memory superseded
|
|
540
|
+
* - profile.updated: Profile updated
|
|
541
|
+
* - heartbeat: Keepalive (every 30s)
|
|
542
|
+
*/
|
|
543
|
+
|
|
544
|
+
interface MemoryEvent {
|
|
545
|
+
/** Event type */
|
|
546
|
+
type: 'connected' | 'memory.created' | 'memory.updated' | 'memory.deleted' | 'memory.superseded' | 'profile.updated' | 'heartbeat';
|
|
547
|
+
/** Event payload */
|
|
548
|
+
data: Record<string, unknown>;
|
|
549
|
+
}
|
|
550
|
+
/**
|
|
551
|
+
* EventStream is an async iterable that yields real-time memory events.
|
|
552
|
+
*
|
|
553
|
+
* @example
|
|
554
|
+
* const events = user.memories.subscribe();
|
|
555
|
+
* for await (const event of events) {
|
|
556
|
+
* if (event.type === 'memory.created') {
|
|
557
|
+
* console.log('New memory:', event.data);
|
|
558
|
+
* }
|
|
559
|
+
* }
|
|
560
|
+
*
|
|
561
|
+
* // To stop listening:
|
|
562
|
+
* events.close();
|
|
563
|
+
*/
|
|
564
|
+
declare class EventStream implements AsyncIterable<MemoryEvent> {
|
|
565
|
+
private readonly client;
|
|
566
|
+
private readonly subjectId;
|
|
567
|
+
private reader;
|
|
568
|
+
private decoder;
|
|
569
|
+
private abortController;
|
|
570
|
+
private connected;
|
|
571
|
+
constructor(client: Mnexium, subjectId: string, options?: {
|
|
572
|
+
signal?: AbortSignal;
|
|
573
|
+
});
|
|
574
|
+
[Symbol.asyncIterator](): AsyncIterator<MemoryEvent>;
|
|
575
|
+
/** Close the event stream */
|
|
576
|
+
close(): void;
|
|
577
|
+
/** Whether the stream is currently connected */
|
|
578
|
+
get isConnected(): boolean;
|
|
579
|
+
}
|
|
580
|
+
|
|
581
|
+
/**
|
|
582
|
+
* Subject - A logical identity (user, agent, org, device) that owns memory, profile, and state
|
|
583
|
+
*/
|
|
584
|
+
|
|
585
|
+
/**
|
|
586
|
+
* Subject-scoped memories resource
|
|
587
|
+
*/
|
|
588
|
+
declare class SubjectMemoriesResource {
|
|
589
|
+
private readonly client;
|
|
590
|
+
private readonly subjectId;
|
|
591
|
+
constructor(client: Mnexium, subjectId: string);
|
|
592
|
+
search(query: string, options?: {
|
|
593
|
+
limit?: number;
|
|
594
|
+
minScore?: number;
|
|
595
|
+
}): Promise<any[]>;
|
|
596
|
+
add(text: string, options?: {
|
|
597
|
+
source?: string;
|
|
598
|
+
visibility?: 'private' | 'shared';
|
|
599
|
+
metadata?: Record<string, unknown>;
|
|
600
|
+
}): Promise<any>;
|
|
601
|
+
list(options?: {
|
|
602
|
+
limit?: number;
|
|
603
|
+
offset?: number;
|
|
604
|
+
}): Promise<any[]>;
|
|
605
|
+
get(memoryId: string): Promise<any>;
|
|
606
|
+
update(memoryId: string, updates: {
|
|
607
|
+
text?: string;
|
|
608
|
+
visibility?: 'private' | 'shared';
|
|
609
|
+
metadata?: Record<string, unknown>;
|
|
610
|
+
}): Promise<any>;
|
|
611
|
+
delete(memoryId: string): Promise<void>;
|
|
612
|
+
superseded(options?: {
|
|
613
|
+
limit?: number;
|
|
614
|
+
offset?: number;
|
|
615
|
+
}): Promise<any[]>;
|
|
616
|
+
restore(memoryId: string): Promise<any>;
|
|
617
|
+
recalls(options: {
|
|
618
|
+
chatId?: string;
|
|
619
|
+
memoryId?: string;
|
|
620
|
+
}): Promise<any[]>;
|
|
621
|
+
subscribe(options?: {
|
|
622
|
+
signal?: AbortSignal;
|
|
623
|
+
}): EventStream;
|
|
624
|
+
}
|
|
625
|
+
/**
|
|
626
|
+
* Subject-scoped profile resource
|
|
627
|
+
*/
|
|
628
|
+
declare class SubjectProfileResource {
|
|
629
|
+
private readonly client;
|
|
630
|
+
private readonly subjectId;
|
|
631
|
+
constructor(client: Mnexium, subjectId: string);
|
|
632
|
+
get(): Promise<any>;
|
|
633
|
+
update(updates: Array<{
|
|
634
|
+
field_key: string;
|
|
635
|
+
value: unknown;
|
|
636
|
+
}>): Promise<any>;
|
|
637
|
+
deleteField(fieldKey: string): Promise<void>;
|
|
638
|
+
}
|
|
639
|
+
/**
|
|
640
|
+
* Subject-scoped state resource
|
|
641
|
+
*/
|
|
642
|
+
declare class SubjectStateResource {
|
|
643
|
+
private readonly client;
|
|
644
|
+
private readonly subjectId;
|
|
645
|
+
constructor(client: Mnexium, subjectId: string);
|
|
646
|
+
get(key: string): Promise<any | null>;
|
|
647
|
+
set(key: string, value: unknown, options?: {
|
|
648
|
+
ttlSeconds?: number;
|
|
649
|
+
}): Promise<any>;
|
|
650
|
+
delete(key: string): Promise<void>;
|
|
651
|
+
}
|
|
652
|
+
/**
|
|
653
|
+
* Subject-scoped claims resource
|
|
654
|
+
*/
|
|
655
|
+
declare class SubjectClaimsResource {
|
|
656
|
+
private readonly client;
|
|
657
|
+
private readonly subjectId;
|
|
658
|
+
constructor(client: Mnexium, subjectId: string);
|
|
659
|
+
get(slot: string): Promise<any | null>;
|
|
660
|
+
set(predicate: string, value: unknown, options?: {
|
|
661
|
+
confidence?: number;
|
|
662
|
+
source?: string;
|
|
663
|
+
}): Promise<any>;
|
|
664
|
+
list(): Promise<Record<string, unknown>>;
|
|
665
|
+
truth(): Promise<any>;
|
|
666
|
+
history(): Promise<any[]>;
|
|
667
|
+
retract(claimId: string): Promise<any>;
|
|
668
|
+
}
|
|
669
|
+
/**
|
|
670
|
+
* Subject-scoped chats resource (for listing chat history)
|
|
671
|
+
*/
|
|
672
|
+
declare class SubjectChatsResource {
|
|
673
|
+
private readonly client;
|
|
674
|
+
private readonly subjectId;
|
|
675
|
+
constructor(client: Mnexium, subjectId: string);
|
|
676
|
+
list(options?: ChatHistoryListOptions): Promise<ChatHistoryItem[]>;
|
|
677
|
+
read(chatId: string): Promise<any[]>;
|
|
678
|
+
delete(chatId: string): Promise<void>;
|
|
679
|
+
}
|
|
680
|
+
/**
|
|
681
|
+
* Subject represents a logical identity (user, agent, org, device)
|
|
682
|
+
*
|
|
683
|
+
* Creating a Subject does NOT make a network call - it's a lightweight scoped handle.
|
|
684
|
+
*
|
|
685
|
+
* @example
|
|
686
|
+
* const alice = mnx.subject("user_123");
|
|
687
|
+
* await alice.process("Hello!");
|
|
688
|
+
* await alice.memories.search("hobbies");
|
|
689
|
+
*/
|
|
690
|
+
declare class Subject {
|
|
691
|
+
private readonly client;
|
|
692
|
+
/** The subject ID */
|
|
693
|
+
readonly id: string;
|
|
694
|
+
/** Subject-scoped memories */
|
|
695
|
+
readonly memories: SubjectMemoriesResource;
|
|
696
|
+
/** Subject-scoped profile */
|
|
697
|
+
readonly profile: SubjectProfileResource;
|
|
698
|
+
/** Subject-scoped state */
|
|
699
|
+
readonly state: SubjectStateResource;
|
|
700
|
+
/** Subject-scoped claims */
|
|
701
|
+
readonly claims: SubjectClaimsResource;
|
|
702
|
+
/** Subject-scoped chat history */
|
|
703
|
+
readonly chats: SubjectChatsResource;
|
|
704
|
+
constructor(client: Mnexium, subjectId: string);
|
|
705
|
+
/**
|
|
706
|
+
* Process a message with an ephemeral chat (no persistent chatId)
|
|
707
|
+
*
|
|
708
|
+
* @example
|
|
709
|
+
* const response = await alice.process("What's my favorite color?");
|
|
710
|
+
*/
|
|
711
|
+
process(input: string | Omit<ProcessOptions, 'subjectId'>): Promise<ProcessResponse>;
|
|
712
|
+
/**
|
|
713
|
+
* Create a chat session for multi-turn conversation
|
|
714
|
+
*
|
|
715
|
+
* @example
|
|
716
|
+
* const chat = alice.createChat({ history: true });
|
|
717
|
+
* await chat.process("Hello!");
|
|
718
|
+
* await chat.process("What did I just say?");
|
|
719
|
+
*/
|
|
720
|
+
createChat(options?: Omit<ChatOptions, 'subjectId'>): Chat;
|
|
721
|
+
}
|
|
722
|
+
|
|
723
|
+
/**
|
|
724
|
+
* Chat Resource - Chat completions API
|
|
725
|
+
*/
|
|
726
|
+
|
|
727
|
+
declare class ChatResource {
|
|
728
|
+
readonly completions: ChatCompletionsResource;
|
|
729
|
+
constructor(client: Mnexium);
|
|
730
|
+
}
|
|
731
|
+
declare class ChatCompletionsResource {
|
|
732
|
+
private readonly client;
|
|
733
|
+
constructor(client: Mnexium);
|
|
734
|
+
create(options: ChatCompletionOptions): Promise<ChatCompletionResponse>;
|
|
735
|
+
}
|
|
736
|
+
|
|
737
|
+
/**
|
|
738
|
+
* Memories Resource - Memory management API
|
|
739
|
+
*/
|
|
740
|
+
|
|
741
|
+
declare class MemoriesResource {
|
|
742
|
+
private readonly client;
|
|
743
|
+
constructor(client: Mnexium);
|
|
744
|
+
create(options: MemoryCreateOptions): Promise<Memory>;
|
|
745
|
+
get(id: string): Promise<Memory>;
|
|
746
|
+
list(subjectId: string, options?: {
|
|
747
|
+
limit?: number;
|
|
748
|
+
offset?: number;
|
|
749
|
+
}): Promise<Memory[]>;
|
|
750
|
+
search(options: MemorySearchOptions): Promise<MemorySearchResult[]>;
|
|
751
|
+
delete(id: string): Promise<void>;
|
|
752
|
+
}
|
|
753
|
+
|
|
754
|
+
/**
|
|
755
|
+
* Claims Resource - Structured claims API
|
|
756
|
+
*/
|
|
757
|
+
|
|
758
|
+
declare class ClaimsResource {
|
|
759
|
+
private readonly client;
|
|
760
|
+
constructor(client: Mnexium);
|
|
761
|
+
create(options: ClaimCreateOptions): Promise<Claim>;
|
|
762
|
+
get(id: string): Promise<Claim>;
|
|
763
|
+
getBySlot(subjectId: string, slot: string): Promise<Claim | null>;
|
|
764
|
+
listSlots(subjectId: string): Promise<Record<string, unknown>>;
|
|
765
|
+
retract(id: string): Promise<void>;
|
|
766
|
+
}
|
|
767
|
+
|
|
768
|
+
/**
|
|
769
|
+
* Profiles Resource - Subject profiles API
|
|
770
|
+
*/
|
|
771
|
+
|
|
772
|
+
declare class ProfilesResource {
|
|
773
|
+
private readonly client;
|
|
774
|
+
constructor(client: Mnexium);
|
|
775
|
+
get(subjectId: string): Promise<Profile>;
|
|
776
|
+
}
|
|
777
|
+
|
|
778
|
+
/**
|
|
779
|
+
* State Resource - Agent state management API
|
|
780
|
+
*/
|
|
781
|
+
|
|
782
|
+
declare class StateResource {
|
|
783
|
+
private readonly client;
|
|
784
|
+
constructor(client: Mnexium);
|
|
785
|
+
get(key: string, subjectId?: string): Promise<AgentState | null>;
|
|
786
|
+
set(options: AgentStateSetOptions): Promise<AgentState>;
|
|
787
|
+
delete(key: string, subjectId?: string): Promise<void>;
|
|
788
|
+
}
|
|
789
|
+
|
|
790
|
+
/**
|
|
791
|
+
* Prompts Resource - System prompts management API
|
|
792
|
+
*/
|
|
793
|
+
|
|
794
|
+
declare class PromptsResource {
|
|
795
|
+
private readonly client;
|
|
796
|
+
constructor(client: Mnexium);
|
|
797
|
+
create(options: SystemPromptCreateOptions): Promise<SystemPrompt>;
|
|
798
|
+
get(id: string): Promise<SystemPrompt>;
|
|
799
|
+
list(): Promise<SystemPrompt[]>;
|
|
800
|
+
update(id: string, options: Partial<SystemPromptCreateOptions>): Promise<SystemPrompt>;
|
|
801
|
+
delete(id: string): Promise<void>;
|
|
802
|
+
resolve(options?: {
|
|
803
|
+
subjectId?: string;
|
|
804
|
+
chatId?: string;
|
|
805
|
+
combined?: boolean;
|
|
806
|
+
}): Promise<any>;
|
|
807
|
+
}
|
|
808
|
+
|
|
809
|
+
/**
|
|
810
|
+
* Mnexium SDK Client
|
|
811
|
+
*/
|
|
812
|
+
|
|
813
|
+
declare class Mnexium {
|
|
814
|
+
private readonly apiKey?;
|
|
815
|
+
private readonly baseUrl;
|
|
816
|
+
private readonly timeout;
|
|
817
|
+
private readonly maxRetries;
|
|
818
|
+
private provisionedKey?;
|
|
819
|
+
private readonly openaiConfig?;
|
|
820
|
+
private readonly anthropicConfig?;
|
|
821
|
+
private readonly googleConfig?;
|
|
822
|
+
private readonly defaults;
|
|
823
|
+
readonly chat: ChatResource;
|
|
824
|
+
readonly memories: MemoriesResource;
|
|
825
|
+
readonly claims: ClaimsResource;
|
|
826
|
+
readonly profiles: ProfilesResource;
|
|
827
|
+
readonly state: StateResource;
|
|
828
|
+
readonly prompts: PromptsResource;
|
|
829
|
+
constructor(config?: MnexiumConfig);
|
|
830
|
+
/**
|
|
831
|
+
* Process a message with Mnexium's memory-enhanced AI
|
|
832
|
+
*
|
|
833
|
+
* This is the simplified, recommended API for most use cases.
|
|
834
|
+
*
|
|
835
|
+
* @example
|
|
836
|
+
* // Simple usage
|
|
837
|
+
* const response = await mnx.process("Hello!");
|
|
838
|
+
*
|
|
839
|
+
* // With options
|
|
840
|
+
* const response = await mnx.process({
|
|
841
|
+
* content: "Hello!",
|
|
842
|
+
* model: "gpt-4o",
|
|
843
|
+
* subjectId: "user_123",
|
|
844
|
+
* });
|
|
845
|
+
*/
|
|
846
|
+
process(input: string): Promise<ProcessResponse>;
|
|
847
|
+
process(input: ProcessOptions & {
|
|
848
|
+
stream: true;
|
|
849
|
+
}): Promise<StreamResponse>;
|
|
850
|
+
process(input: ProcessOptions): Promise<ProcessResponse>;
|
|
851
|
+
/**
|
|
852
|
+
* Get the provisioned trial key (if auto-provisioned)
|
|
853
|
+
* @deprecated Use getTrialInfo() instead
|
|
854
|
+
*/
|
|
855
|
+
getProvisionedKey(): string | undefined;
|
|
856
|
+
/**
|
|
857
|
+
* Get trial key info including the key and claim URL
|
|
858
|
+
*
|
|
859
|
+
* @example
|
|
860
|
+
* const trial = mnx.getTrialInfo();
|
|
861
|
+
* if (trial) {
|
|
862
|
+
* console.log('Key:', trial.key);
|
|
863
|
+
* console.log('Claim at:', trial.claimUrl);
|
|
864
|
+
* }
|
|
865
|
+
*/
|
|
866
|
+
getTrialInfo(): {
|
|
867
|
+
key: string;
|
|
868
|
+
claimUrl: string;
|
|
869
|
+
} | null;
|
|
870
|
+
/**
|
|
871
|
+
* Get a Subject handle for a given subject ID
|
|
872
|
+
*
|
|
873
|
+
* Creating a Subject does NOT make a network call - it's a lightweight scoped handle.
|
|
874
|
+
*
|
|
875
|
+
* @example
|
|
876
|
+
* const alice = mnx.subject("user_123");
|
|
877
|
+
* await alice.process("Hello!");
|
|
878
|
+
* await alice.memories.search("hobbies");
|
|
879
|
+
*/
|
|
880
|
+
subject(subjectId?: string): Subject;
|
|
881
|
+
/**
|
|
882
|
+
* Create a chat for a subject
|
|
883
|
+
*
|
|
884
|
+
* @example
|
|
885
|
+
* const alice = mnx.subject("user_123");
|
|
886
|
+
* const chat = mnx.createChat(alice, { history: true });
|
|
887
|
+
* // or with string:
|
|
888
|
+
* const chat = mnx.createChat("user_123", { history: true });
|
|
889
|
+
*/
|
|
890
|
+
createChat(subject: Subject | string, options?: ChatOptions): Chat;
|
|
891
|
+
/**
|
|
892
|
+
* @deprecated Use createChat(subject, options) instead
|
|
893
|
+
*/
|
|
894
|
+
createChatSession(options?: ChatSessionOptions): Chat;
|
|
895
|
+
/**
|
|
896
|
+
* Internal: Set provisioned key from response
|
|
897
|
+
*/
|
|
898
|
+
_setProvisionedKey(key: string): void;
|
|
899
|
+
/**
|
|
900
|
+
* Internal: Make a raw API request (returns Response, used for streaming)
|
|
901
|
+
*/
|
|
902
|
+
_requestRaw(method: 'GET' | 'POST' | 'PUT' | 'PATCH' | 'DELETE', path: string, options?: {
|
|
903
|
+
body?: unknown;
|
|
904
|
+
headers?: Record<string, string>;
|
|
905
|
+
query?: Record<string, string | number | boolean | undefined>;
|
|
906
|
+
}): Promise<Response>;
|
|
907
|
+
/**
|
|
908
|
+
* Internal: Make an API request
|
|
909
|
+
*/
|
|
910
|
+
_request<T>(method: 'GET' | 'POST' | 'PUT' | 'PATCH' | 'DELETE', path: string, options?: {
|
|
911
|
+
body?: unknown;
|
|
912
|
+
headers?: Record<string, string>;
|
|
913
|
+
query?: Record<string, string | number | boolean | undefined>;
|
|
914
|
+
}): Promise<T>;
|
|
915
|
+
private _handleErrorResponse;
|
|
916
|
+
private _sleep;
|
|
917
|
+
}
|
|
918
|
+
|
|
919
|
+
/**
|
|
920
|
+
* Mnexium SDK Errors
|
|
921
|
+
*/
|
|
922
|
+
declare class MnexiumError extends Error {
|
|
923
|
+
constructor(message: string);
|
|
924
|
+
}
|
|
925
|
+
declare class AuthenticationError extends MnexiumError {
|
|
926
|
+
constructor(message?: string);
|
|
927
|
+
}
|
|
928
|
+
declare class RateLimitError extends MnexiumError {
|
|
929
|
+
readonly retryAfter?: number;
|
|
930
|
+
readonly current?: number;
|
|
931
|
+
readonly limit?: number;
|
|
932
|
+
constructor(message?: string, options?: {
|
|
933
|
+
retryAfter?: number;
|
|
934
|
+
current?: number;
|
|
935
|
+
limit?: number;
|
|
936
|
+
});
|
|
937
|
+
}
|
|
938
|
+
declare class APIError extends MnexiumError {
|
|
939
|
+
readonly status: number;
|
|
940
|
+
readonly code?: string;
|
|
941
|
+
constructor(message: string, status: number, code?: string);
|
|
942
|
+
}
|
|
943
|
+
|
|
944
|
+
export { APIError, type AgentState, type AgentStateSetOptions, AuthenticationError, Chat, type ChatCompletionOptions, type ChatCompletionResponse, type ChatHistoryItem, type ChatHistoryListOptions, type ChatMessage, type ChatOptions, type ChatProcessOptions, Chat as ChatSession, type ChatSessionOptions, type Claim, type ClaimCreateOptions, EventStream, type Memory, type MemoryCreateOptions, type MemoryEvent, type MemorySearchOptions, type MemorySearchResult, Mnexium, type MnexiumConfig, type MnexiumDefaults, MnexiumError, type MnxOptions, type ProcessOptions, type ProcessResponse, type Profile, type ProviderConfig, RateLimitError, type StreamChunk, StreamResponse, Subject, type SystemPrompt, type SystemPromptCreateOptions };
|