librechat-data-provider 0.8.402 → 0.8.404
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.es.js +1 -1
- package/dist/index.es.js.map +1 -1
- package/dist/index.js +1 -1
- package/dist/index.js.map +1 -1
- package/dist/react-query/index.es.js +1 -1
- package/dist/react-query/index.es.js.map +1 -1
- package/dist/types/accessPermissions.d.ts +744 -0
- package/dist/types/actions.d.ts +118 -0
- package/dist/types/api-endpoints.d.ts +150 -0
- package/dist/types/artifacts.d.ts +97 -0
- package/dist/types/azure.d.ts +22 -0
- package/dist/types/bedrock.d.ts +1220 -0
- package/dist/types/config.d.ts +14849 -0
- package/dist/types/config.spec.d.ts +1 -0
- package/dist/types/createPayload.d.ts +5 -0
- package/dist/types/data-service.d.ts +287 -0
- package/dist/types/feedback.d.ts +36 -0
- package/dist/types/file-config.d.ts +263 -0
- package/dist/types/file-config.spec.d.ts +1 -0
- package/dist/types/generate.d.ts +597 -0
- package/dist/types/headers-helpers.d.ts +2 -0
- package/{src/index.ts → dist/types/index.d.ts} +0 -15
- package/dist/types/keys.d.ts +92 -0
- package/dist/types/mcp.d.ts +2760 -0
- package/dist/types/messages.d.ts +10 -0
- package/dist/types/models.d.ts +1547 -0
- package/dist/types/parameterSettings.d.ts +69 -0
- package/dist/types/parsers.d.ts +110 -0
- package/dist/types/permissions.d.ts +522 -0
- package/dist/types/react-query/react-query-service.d.ts +85 -0
- package/dist/types/request.d.ts +25 -0
- package/dist/types/roles.d.ts +554 -0
- package/dist/types/roles.spec.d.ts +1 -0
- package/dist/types/schemas.d.ts +5110 -0
- package/dist/types/schemas.spec.d.ts +1 -0
- package/dist/types/types/agents.d.ts +433 -0
- package/dist/types/types/assistants.d.ts +547 -0
- package/dist/types/types/files.d.ts +172 -0
- package/dist/types/types/graph.d.ts +135 -0
- package/{src/types/mcpServers.ts → dist/types/types/mcpServers.d.ts} +12 -18
- package/dist/types/types/mutations.d.ts +209 -0
- package/dist/types/types/queries.d.ts +169 -0
- package/dist/types/types/runs.d.ts +36 -0
- package/dist/types/types/web.d.ts +520 -0
- package/dist/types/types.d.ts +503 -0
- package/dist/types/utils.d.ts +12 -0
- package/package.json +5 -1
- package/babel.config.js +0 -4
- package/check_updates.sh +0 -52
- package/jest.config.js +0 -19
- package/react-query/package-lock.json +0 -292
- package/react-query/package.json +0 -10
- package/rollup.config.js +0 -74
- package/server-rollup.config.js +0 -40
- package/specs/actions.spec.ts +0 -2533
- package/specs/api-endpoints-subdir.spec.ts +0 -140
- package/specs/api-endpoints.spec.ts +0 -74
- package/specs/azure.spec.ts +0 -844
- package/specs/bedrock.spec.ts +0 -862
- package/specs/filetypes.spec.ts +0 -175
- package/specs/generate.spec.ts +0 -770
- package/specs/headers-helpers.spec.ts +0 -24
- package/specs/mcp.spec.ts +0 -147
- package/specs/openapiSpecs.ts +0 -524
- package/specs/parsers.spec.ts +0 -601
- package/specs/request-interceptor.spec.ts +0 -304
- package/specs/utils.spec.ts +0 -196
- package/src/accessPermissions.ts +0 -346
- package/src/actions.ts +0 -813
- package/src/api-endpoints.ts +0 -440
- package/src/artifacts.ts +0 -3104
- package/src/azure.ts +0 -328
- package/src/bedrock.ts +0 -425
- package/src/config.spec.ts +0 -315
- package/src/config.ts +0 -2006
- package/src/createPayload.ts +0 -46
- package/src/data-service.ts +0 -1087
- package/src/feedback.ts +0 -141
- package/src/file-config.spec.ts +0 -1248
- package/src/file-config.ts +0 -764
- package/src/generate.ts +0 -634
- package/src/headers-helpers.ts +0 -13
- package/src/keys.ts +0 -99
- package/src/mcp.ts +0 -271
- package/src/messages.ts +0 -50
- package/src/models.ts +0 -69
- package/src/parameterSettings.ts +0 -1111
- package/src/parsers.ts +0 -563
- package/src/permissions.ts +0 -188
- package/src/react-query/react-query-service.ts +0 -566
- package/src/request.ts +0 -171
- package/src/roles.spec.ts +0 -132
- package/src/roles.ts +0 -225
- package/src/schemas.spec.ts +0 -355
- package/src/schemas.ts +0 -1234
- package/src/types/agents.ts +0 -470
- package/src/types/assistants.ts +0 -654
- package/src/types/files.ts +0 -191
- package/src/types/graph.ts +0 -145
- package/src/types/mutations.ts +0 -422
- package/src/types/queries.ts +0 -208
- package/src/types/runs.ts +0 -40
- package/src/types/web.ts +0 -588
- package/src/types.ts +0 -676
- package/src/utils.ts +0 -85
- package/tsconfig.json +0 -28
- package/tsconfig.spec.json +0 -10
- /package/{src/react-query/index.ts → dist/types/react-query/index.d.ts} +0 -0
- /package/{src/types/index.ts → dist/types/types/index.d.ts} +0 -0
package/src/schemas.ts
DELETED
|
@@ -1,1234 +0,0 @@
|
|
|
1
|
-
import { z } from 'zod';
|
|
2
|
-
import { Tools } from './types/assistants';
|
|
3
|
-
import type { TMessageContentParts, FunctionTool, FunctionToolCall } from './types/assistants';
|
|
4
|
-
import { TFeedback, feedbackSchema } from './feedback';
|
|
5
|
-
import type { SearchResultData } from './types/web';
|
|
6
|
-
import type { TFile } from './types/files';
|
|
7
|
-
|
|
8
|
-
export const isUUID = z.string().uuid();
|
|
9
|
-
|
|
10
|
-
export enum AuthType {
|
|
11
|
-
OVERRIDE_AUTH = 'override_auth',
|
|
12
|
-
USER_PROVIDED = 'user_provided',
|
|
13
|
-
SYSTEM_DEFINED = 'system_defined',
|
|
14
|
-
}
|
|
15
|
-
|
|
16
|
-
export const authTypeSchema = z.nativeEnum(AuthType);
|
|
17
|
-
|
|
18
|
-
export enum EModelEndpoint {
|
|
19
|
-
azureOpenAI = 'azureOpenAI',
|
|
20
|
-
openAI = 'openAI',
|
|
21
|
-
google = 'google',
|
|
22
|
-
anthropic = 'anthropic',
|
|
23
|
-
assistants = 'assistants',
|
|
24
|
-
azureAssistants = 'azureAssistants',
|
|
25
|
-
agents = 'agents',
|
|
26
|
-
custom = 'custom',
|
|
27
|
-
bedrock = 'bedrock',
|
|
28
|
-
}
|
|
29
|
-
|
|
30
|
-
/** Mirrors `@librechat/agents` providers */
|
|
31
|
-
export enum Providers {
|
|
32
|
-
OPENAI = 'openAI',
|
|
33
|
-
ANTHROPIC = 'anthropic',
|
|
34
|
-
AZURE = 'azureOpenAI',
|
|
35
|
-
GOOGLE = 'google',
|
|
36
|
-
VERTEXAI = 'vertexai',
|
|
37
|
-
BEDROCK = 'bedrock',
|
|
38
|
-
MISTRALAI = 'mistralai',
|
|
39
|
-
MISTRAL = 'mistral',
|
|
40
|
-
DEEPSEEK = 'deepseek',
|
|
41
|
-
MOONSHOT = 'moonshot',
|
|
42
|
-
OPENROUTER = 'openrouter',
|
|
43
|
-
XAI = 'xai',
|
|
44
|
-
}
|
|
45
|
-
|
|
46
|
-
/**
|
|
47
|
-
* Endpoints that support direct PDF processing in the agent system
|
|
48
|
-
*/
|
|
49
|
-
export const documentSupportedProviders = new Set<string>([
|
|
50
|
-
EModelEndpoint.anthropic,
|
|
51
|
-
EModelEndpoint.openAI,
|
|
52
|
-
EModelEndpoint.bedrock,
|
|
53
|
-
EModelEndpoint.custom,
|
|
54
|
-
// handled in AttachFileMenu and DragDropModal since azureOpenAI only supports documents with Use Responses API set to true
|
|
55
|
-
// EModelEndpoint.azureOpenAI,
|
|
56
|
-
EModelEndpoint.google,
|
|
57
|
-
Providers.VERTEXAI,
|
|
58
|
-
Providers.MISTRALAI,
|
|
59
|
-
Providers.MISTRAL,
|
|
60
|
-
Providers.DEEPSEEK,
|
|
61
|
-
Providers.MOONSHOT,
|
|
62
|
-
Providers.OPENROUTER,
|
|
63
|
-
Providers.XAI,
|
|
64
|
-
]);
|
|
65
|
-
|
|
66
|
-
const openAILikeProviders = new Set<string>([
|
|
67
|
-
Providers.OPENAI,
|
|
68
|
-
Providers.AZURE,
|
|
69
|
-
EModelEndpoint.custom,
|
|
70
|
-
Providers.MISTRALAI,
|
|
71
|
-
Providers.MISTRAL,
|
|
72
|
-
Providers.DEEPSEEK,
|
|
73
|
-
Providers.MOONSHOT,
|
|
74
|
-
Providers.OPENROUTER,
|
|
75
|
-
Providers.XAI,
|
|
76
|
-
]);
|
|
77
|
-
|
|
78
|
-
export const isOpenAILikeProvider = (provider?: string | null): boolean => {
|
|
79
|
-
return openAILikeProviders.has(provider ?? '');
|
|
80
|
-
};
|
|
81
|
-
|
|
82
|
-
export const isDocumentSupportedProvider = (provider?: string | null): boolean => {
|
|
83
|
-
return documentSupportedProviders.has(provider ?? '');
|
|
84
|
-
};
|
|
85
|
-
|
|
86
|
-
export const paramEndpoints = new Set<EModelEndpoint | string>([
|
|
87
|
-
EModelEndpoint.agents,
|
|
88
|
-
EModelEndpoint.openAI,
|
|
89
|
-
EModelEndpoint.bedrock,
|
|
90
|
-
EModelEndpoint.azureOpenAI,
|
|
91
|
-
EModelEndpoint.anthropic,
|
|
92
|
-
EModelEndpoint.custom,
|
|
93
|
-
EModelEndpoint.google,
|
|
94
|
-
]);
|
|
95
|
-
|
|
96
|
-
export enum BedrockProviders {
|
|
97
|
-
AI21 = 'ai21',
|
|
98
|
-
Amazon = 'amazon',
|
|
99
|
-
Anthropic = 'anthropic',
|
|
100
|
-
Cohere = 'cohere',
|
|
101
|
-
DeepSeek = 'deepseek',
|
|
102
|
-
Meta = 'meta',
|
|
103
|
-
MistralAI = 'mistral',
|
|
104
|
-
Moonshot = 'moonshot',
|
|
105
|
-
MoonshotAI = 'moonshotai',
|
|
106
|
-
OpenAI = 'openai',
|
|
107
|
-
StabilityAI = 'stability',
|
|
108
|
-
ZAI = 'zai',
|
|
109
|
-
}
|
|
110
|
-
|
|
111
|
-
export const getModelKey = (endpoint: EModelEndpoint | string, model: string) => {
|
|
112
|
-
if (endpoint === EModelEndpoint.bedrock) {
|
|
113
|
-
const parts = model.split('.');
|
|
114
|
-
const provider = [parts[0], parts[1]].find((part) =>
|
|
115
|
-
Object.values(BedrockProviders).includes(part as BedrockProviders),
|
|
116
|
-
);
|
|
117
|
-
return (provider ?? parts[0]) as BedrockProviders;
|
|
118
|
-
}
|
|
119
|
-
return model;
|
|
120
|
-
};
|
|
121
|
-
|
|
122
|
-
export const getSettingsKeys = (endpoint: EModelEndpoint | string, model: string) => {
|
|
123
|
-
const endpointKey = endpoint;
|
|
124
|
-
const modelKey = getModelKey(endpointKey, model);
|
|
125
|
-
const combinedKey = `${endpointKey}-${modelKey}`;
|
|
126
|
-
return [combinedKey, endpointKey];
|
|
127
|
-
};
|
|
128
|
-
|
|
129
|
-
export type AssistantsEndpoint = EModelEndpoint.assistants | EModelEndpoint.azureAssistants;
|
|
130
|
-
|
|
131
|
-
export const isAssistantsEndpoint = (_endpoint?: AssistantsEndpoint | null | string): boolean => {
|
|
132
|
-
const endpoint = _endpoint ?? '';
|
|
133
|
-
if (!endpoint) {
|
|
134
|
-
return false;
|
|
135
|
-
}
|
|
136
|
-
return endpoint.toLowerCase().endsWith(EModelEndpoint.assistants);
|
|
137
|
-
};
|
|
138
|
-
|
|
139
|
-
export type AgentProvider = Exclude<keyof typeof EModelEndpoint, EModelEndpoint.agents> | string;
|
|
140
|
-
|
|
141
|
-
export const isAgentsEndpoint = (_endpoint?: EModelEndpoint.agents | null | string): boolean => {
|
|
142
|
-
const endpoint = _endpoint ?? '';
|
|
143
|
-
if (!endpoint) {
|
|
144
|
-
return false;
|
|
145
|
-
}
|
|
146
|
-
return endpoint === EModelEndpoint.agents;
|
|
147
|
-
};
|
|
148
|
-
|
|
149
|
-
export const isParamEndpoint = (
|
|
150
|
-
endpoint: EModelEndpoint | string,
|
|
151
|
-
endpointType?: EModelEndpoint | string,
|
|
152
|
-
): boolean => {
|
|
153
|
-
if (paramEndpoints.has(endpoint)) {
|
|
154
|
-
return true;
|
|
155
|
-
}
|
|
156
|
-
|
|
157
|
-
if (endpointType != null) {
|
|
158
|
-
return paramEndpoints.has(endpointType);
|
|
159
|
-
}
|
|
160
|
-
|
|
161
|
-
return false;
|
|
162
|
-
};
|
|
163
|
-
|
|
164
|
-
export enum ImageDetail {
|
|
165
|
-
low = 'low',
|
|
166
|
-
auto = 'auto',
|
|
167
|
-
high = 'high',
|
|
168
|
-
}
|
|
169
|
-
|
|
170
|
-
export enum ReasoningEffort {
|
|
171
|
-
unset = '',
|
|
172
|
-
none = 'none',
|
|
173
|
-
minimal = 'minimal',
|
|
174
|
-
low = 'low',
|
|
175
|
-
medium = 'medium',
|
|
176
|
-
high = 'high',
|
|
177
|
-
xhigh = 'xhigh',
|
|
178
|
-
}
|
|
179
|
-
|
|
180
|
-
export enum AnthropicEffort {
|
|
181
|
-
unset = '',
|
|
182
|
-
low = 'low',
|
|
183
|
-
medium = 'medium',
|
|
184
|
-
high = 'high',
|
|
185
|
-
max = 'max',
|
|
186
|
-
}
|
|
187
|
-
|
|
188
|
-
export enum BedrockReasoningConfig {
|
|
189
|
-
low = 'low',
|
|
190
|
-
medium = 'medium',
|
|
191
|
-
high = 'high',
|
|
192
|
-
}
|
|
193
|
-
|
|
194
|
-
export enum ReasoningSummary {
|
|
195
|
-
none = '',
|
|
196
|
-
auto = 'auto',
|
|
197
|
-
concise = 'concise',
|
|
198
|
-
detailed = 'detailed',
|
|
199
|
-
}
|
|
200
|
-
|
|
201
|
-
export enum Verbosity {
|
|
202
|
-
none = '',
|
|
203
|
-
low = 'low',
|
|
204
|
-
medium = 'medium',
|
|
205
|
-
high = 'high',
|
|
206
|
-
}
|
|
207
|
-
|
|
208
|
-
export enum ThinkingLevel {
|
|
209
|
-
unset = '',
|
|
210
|
-
minimal = 'minimal',
|
|
211
|
-
low = 'low',
|
|
212
|
-
medium = 'medium',
|
|
213
|
-
high = 'high',
|
|
214
|
-
}
|
|
215
|
-
|
|
216
|
-
export const imageDetailNumeric = {
|
|
217
|
-
[ImageDetail.low]: 0,
|
|
218
|
-
[ImageDetail.auto]: 1,
|
|
219
|
-
[ImageDetail.high]: 2,
|
|
220
|
-
};
|
|
221
|
-
|
|
222
|
-
export const imageDetailValue = {
|
|
223
|
-
0: ImageDetail.low,
|
|
224
|
-
1: ImageDetail.auto,
|
|
225
|
-
2: ImageDetail.high,
|
|
226
|
-
};
|
|
227
|
-
|
|
228
|
-
export const eImageDetailSchema = z.nativeEnum(ImageDetail);
|
|
229
|
-
export const eReasoningEffortSchema = z.nativeEnum(ReasoningEffort);
|
|
230
|
-
export const eAnthropicEffortSchema = z.nativeEnum(AnthropicEffort);
|
|
231
|
-
export const eReasoningSummarySchema = z.nativeEnum(ReasoningSummary);
|
|
232
|
-
export const eVerbositySchema = z.nativeEnum(Verbosity);
|
|
233
|
-
export const eThinkingLevelSchema = z.nativeEnum(ThinkingLevel);
|
|
234
|
-
|
|
235
|
-
export const defaultAssistantFormValues = {
|
|
236
|
-
assistant: '',
|
|
237
|
-
id: '',
|
|
238
|
-
name: '',
|
|
239
|
-
description: '',
|
|
240
|
-
instructions: '',
|
|
241
|
-
conversation_starters: [],
|
|
242
|
-
model: '',
|
|
243
|
-
functions: [],
|
|
244
|
-
code_interpreter: false,
|
|
245
|
-
image_vision: false,
|
|
246
|
-
retrieval: false,
|
|
247
|
-
append_current_datetime: false,
|
|
248
|
-
};
|
|
249
|
-
|
|
250
|
-
export const defaultAgentFormValues = {
|
|
251
|
-
agent: {},
|
|
252
|
-
id: '',
|
|
253
|
-
name: '',
|
|
254
|
-
description: '',
|
|
255
|
-
instructions: '',
|
|
256
|
-
model: '',
|
|
257
|
-
model_parameters: {},
|
|
258
|
-
tools: [],
|
|
259
|
-
tool_options: {},
|
|
260
|
-
provider: {},
|
|
261
|
-
edges: [],
|
|
262
|
-
artifacts: '',
|
|
263
|
-
recursion_limit: undefined,
|
|
264
|
-
[Tools.execute_code]: false,
|
|
265
|
-
[Tools.file_search]: false,
|
|
266
|
-
[Tools.web_search]: false,
|
|
267
|
-
category: 'general',
|
|
268
|
-
support_contact: {
|
|
269
|
-
name: '',
|
|
270
|
-
email: '',
|
|
271
|
-
},
|
|
272
|
-
};
|
|
273
|
-
|
|
274
|
-
export const ImageVisionTool: FunctionTool = {
|
|
275
|
-
type: Tools.function,
|
|
276
|
-
[Tools.function]: {
|
|
277
|
-
name: 'image_vision',
|
|
278
|
-
description: 'Get detailed text descriptions for all current image attachments.',
|
|
279
|
-
parameters: {
|
|
280
|
-
type: 'object',
|
|
281
|
-
properties: {},
|
|
282
|
-
required: [],
|
|
283
|
-
},
|
|
284
|
-
},
|
|
285
|
-
};
|
|
286
|
-
|
|
287
|
-
export const isImageVisionTool = (tool: FunctionTool | FunctionToolCall) =>
|
|
288
|
-
tool.type === 'function' && tool.function?.name === ImageVisionTool.function?.name;
|
|
289
|
-
|
|
290
|
-
export const openAISettings = {
|
|
291
|
-
model: {
|
|
292
|
-
default: 'gpt-4o-mini' as const,
|
|
293
|
-
},
|
|
294
|
-
temperature: {
|
|
295
|
-
min: 0 as const,
|
|
296
|
-
max: 2 as const,
|
|
297
|
-
step: 0.01 as const,
|
|
298
|
-
default: 1 as const,
|
|
299
|
-
},
|
|
300
|
-
top_p: {
|
|
301
|
-
min: 0 as const,
|
|
302
|
-
max: 1 as const,
|
|
303
|
-
step: 0.01 as const,
|
|
304
|
-
default: 1 as const,
|
|
305
|
-
},
|
|
306
|
-
presence_penalty: {
|
|
307
|
-
min: -2 as const,
|
|
308
|
-
max: 2 as const,
|
|
309
|
-
step: 0.01 as const,
|
|
310
|
-
default: 0 as const,
|
|
311
|
-
},
|
|
312
|
-
frequency_penalty: {
|
|
313
|
-
min: -2 as const,
|
|
314
|
-
max: 2 as const,
|
|
315
|
-
step: 0.01 as const,
|
|
316
|
-
default: 0 as const,
|
|
317
|
-
},
|
|
318
|
-
resendFiles: {
|
|
319
|
-
default: true as const,
|
|
320
|
-
},
|
|
321
|
-
maxContextTokens: {
|
|
322
|
-
default: undefined,
|
|
323
|
-
},
|
|
324
|
-
max_tokens: {
|
|
325
|
-
default: undefined,
|
|
326
|
-
},
|
|
327
|
-
imageDetail: {
|
|
328
|
-
default: ImageDetail.auto as const,
|
|
329
|
-
min: 0 as const,
|
|
330
|
-
max: 2 as const,
|
|
331
|
-
step: 1 as const,
|
|
332
|
-
},
|
|
333
|
-
};
|
|
334
|
-
|
|
335
|
-
export const googleSettings = {
|
|
336
|
-
model: {
|
|
337
|
-
default: 'gemini-1.5-flash-latest' as const,
|
|
338
|
-
},
|
|
339
|
-
maxOutputTokens: {
|
|
340
|
-
min: 1 as const,
|
|
341
|
-
max: 64000 as const,
|
|
342
|
-
step: 1 as const,
|
|
343
|
-
default: 8192 as const,
|
|
344
|
-
},
|
|
345
|
-
temperature: {
|
|
346
|
-
min: 0 as const,
|
|
347
|
-
max: 2 as const,
|
|
348
|
-
step: 0.01 as const,
|
|
349
|
-
default: 1 as const,
|
|
350
|
-
},
|
|
351
|
-
topP: {
|
|
352
|
-
min: 0 as const,
|
|
353
|
-
max: 1 as const,
|
|
354
|
-
step: 0.01 as const,
|
|
355
|
-
default: 0.95 as const,
|
|
356
|
-
},
|
|
357
|
-
topK: {
|
|
358
|
-
min: 1 as const,
|
|
359
|
-
max: 40 as const,
|
|
360
|
-
step: 1 as const,
|
|
361
|
-
default: 40 as const,
|
|
362
|
-
},
|
|
363
|
-
thinking: {
|
|
364
|
-
default: true as const,
|
|
365
|
-
},
|
|
366
|
-
thinkingBudget: {
|
|
367
|
-
min: -1 as const,
|
|
368
|
-
max: 32000 as const,
|
|
369
|
-
step: 1 as const,
|
|
370
|
-
/** `-1` = Dynamic Thinking, meaning the model will adjust
|
|
371
|
-
* the budget based on the complexity of the request.
|
|
372
|
-
*/
|
|
373
|
-
default: -1 as const,
|
|
374
|
-
},
|
|
375
|
-
thinkingLevel: {
|
|
376
|
-
default: ThinkingLevel.unset as const,
|
|
377
|
-
},
|
|
378
|
-
};
|
|
379
|
-
|
|
380
|
-
const ANTHROPIC_MAX_OUTPUT = 128000 as const;
|
|
381
|
-
const CLAUDE_4_64K_MAX_OUTPUT = 64000 as const;
|
|
382
|
-
const CLAUDE_32K_MAX_OUTPUT = 32000 as const;
|
|
383
|
-
const DEFAULT_MAX_OUTPUT = 8192 as const;
|
|
384
|
-
const LEGACY_ANTHROPIC_MAX_OUTPUT = 4096 as const;
|
|
385
|
-
export const anthropicSettings = {
|
|
386
|
-
model: {
|
|
387
|
-
default: 'claude-3-5-sonnet-latest' as const,
|
|
388
|
-
},
|
|
389
|
-
temperature: {
|
|
390
|
-
min: 0 as const,
|
|
391
|
-
max: 1 as const,
|
|
392
|
-
step: 0.01 as const,
|
|
393
|
-
default: 1 as const,
|
|
394
|
-
},
|
|
395
|
-
promptCache: {
|
|
396
|
-
default: true as const,
|
|
397
|
-
},
|
|
398
|
-
thinking: {
|
|
399
|
-
default: true as const,
|
|
400
|
-
},
|
|
401
|
-
thinkingBudget: {
|
|
402
|
-
min: 1024 as const,
|
|
403
|
-
step: 100 as const,
|
|
404
|
-
max: 200000 as const,
|
|
405
|
-
default: 2000 as const,
|
|
406
|
-
},
|
|
407
|
-
maxOutputTokens: {
|
|
408
|
-
min: 1 as const,
|
|
409
|
-
max: ANTHROPIC_MAX_OUTPUT,
|
|
410
|
-
step: 1 as const,
|
|
411
|
-
default: DEFAULT_MAX_OUTPUT,
|
|
412
|
-
reset: (modelName: string) => {
|
|
413
|
-
if (/claude-opus[-.]?(?:4[-.]?(?:[6-9]|\d{2,})|[5-9]|\d{2,})/.test(modelName)) {
|
|
414
|
-
return ANTHROPIC_MAX_OUTPUT;
|
|
415
|
-
}
|
|
416
|
-
|
|
417
|
-
if (/claude-(?:sonnet|haiku)[-.]?[4-9]/.test(modelName)) {
|
|
418
|
-
return CLAUDE_4_64K_MAX_OUTPUT;
|
|
419
|
-
}
|
|
420
|
-
|
|
421
|
-
if (/claude-opus[-.]?(?:[5-9]|4[-.]?([5-9]|\d{2,}))/.test(modelName)) {
|
|
422
|
-
return CLAUDE_4_64K_MAX_OUTPUT;
|
|
423
|
-
}
|
|
424
|
-
|
|
425
|
-
if (/claude-opus[-.]?[4-9]/.test(modelName)) {
|
|
426
|
-
return CLAUDE_32K_MAX_OUTPUT;
|
|
427
|
-
}
|
|
428
|
-
|
|
429
|
-
return DEFAULT_MAX_OUTPUT;
|
|
430
|
-
},
|
|
431
|
-
set: (value: number, modelName: string) => {
|
|
432
|
-
if (/claude-opus[-.]?(?:4[-.]?(?:[6-9]|\d{2,})|[5-9]|\d{2,})/.test(modelName)) {
|
|
433
|
-
if (value > ANTHROPIC_MAX_OUTPUT) {
|
|
434
|
-
return ANTHROPIC_MAX_OUTPUT;
|
|
435
|
-
}
|
|
436
|
-
return value;
|
|
437
|
-
}
|
|
438
|
-
|
|
439
|
-
if (/claude-(?:sonnet|haiku)[-.]?[4-9]/.test(modelName) && value > CLAUDE_4_64K_MAX_OUTPUT) {
|
|
440
|
-
return CLAUDE_4_64K_MAX_OUTPUT;
|
|
441
|
-
}
|
|
442
|
-
|
|
443
|
-
if (/claude-opus[-.]?(?:[5-9]|4[-.]?([5-9]|\d{2,}))/.test(modelName)) {
|
|
444
|
-
if (value > CLAUDE_4_64K_MAX_OUTPUT) {
|
|
445
|
-
return CLAUDE_4_64K_MAX_OUTPUT;
|
|
446
|
-
}
|
|
447
|
-
return value;
|
|
448
|
-
}
|
|
449
|
-
|
|
450
|
-
if (/claude-opus[-.]?[4-9]/.test(modelName) && value > CLAUDE_32K_MAX_OUTPUT) {
|
|
451
|
-
return CLAUDE_32K_MAX_OUTPUT;
|
|
452
|
-
}
|
|
453
|
-
|
|
454
|
-
if (value > ANTHROPIC_MAX_OUTPUT) {
|
|
455
|
-
return ANTHROPIC_MAX_OUTPUT;
|
|
456
|
-
}
|
|
457
|
-
|
|
458
|
-
return value;
|
|
459
|
-
},
|
|
460
|
-
},
|
|
461
|
-
topP: {
|
|
462
|
-
min: 0 as const,
|
|
463
|
-
max: 1 as const,
|
|
464
|
-
step: 0.01 as const,
|
|
465
|
-
default: 0.7 as const,
|
|
466
|
-
},
|
|
467
|
-
topK: {
|
|
468
|
-
min: 1 as const,
|
|
469
|
-
max: 40 as const,
|
|
470
|
-
step: 1 as const,
|
|
471
|
-
default: 5 as const,
|
|
472
|
-
},
|
|
473
|
-
resendFiles: {
|
|
474
|
-
default: true as const,
|
|
475
|
-
},
|
|
476
|
-
maxContextTokens: {
|
|
477
|
-
default: undefined,
|
|
478
|
-
},
|
|
479
|
-
legacy: {
|
|
480
|
-
maxOutputTokens: {
|
|
481
|
-
min: 1 as const,
|
|
482
|
-
max: LEGACY_ANTHROPIC_MAX_OUTPUT,
|
|
483
|
-
step: 1 as const,
|
|
484
|
-
default: LEGACY_ANTHROPIC_MAX_OUTPUT,
|
|
485
|
-
},
|
|
486
|
-
},
|
|
487
|
-
effort: {
|
|
488
|
-
default: AnthropicEffort.unset,
|
|
489
|
-
options: [
|
|
490
|
-
AnthropicEffort.unset,
|
|
491
|
-
AnthropicEffort.low,
|
|
492
|
-
AnthropicEffort.medium,
|
|
493
|
-
AnthropicEffort.high,
|
|
494
|
-
AnthropicEffort.max,
|
|
495
|
-
],
|
|
496
|
-
},
|
|
497
|
-
web_search: {
|
|
498
|
-
default: false as const,
|
|
499
|
-
},
|
|
500
|
-
};
|
|
501
|
-
|
|
502
|
-
export const agentsSettings = {
|
|
503
|
-
model: {
|
|
504
|
-
default: 'gpt-3.5-turbo-test' as const,
|
|
505
|
-
},
|
|
506
|
-
temperature: {
|
|
507
|
-
min: 0 as const,
|
|
508
|
-
max: 1 as const,
|
|
509
|
-
step: 0.01 as const,
|
|
510
|
-
default: 1 as const,
|
|
511
|
-
},
|
|
512
|
-
top_p: {
|
|
513
|
-
min: 0 as const,
|
|
514
|
-
max: 1 as const,
|
|
515
|
-
step: 0.01 as const,
|
|
516
|
-
default: 1 as const,
|
|
517
|
-
},
|
|
518
|
-
presence_penalty: {
|
|
519
|
-
min: -2 as const,
|
|
520
|
-
max: 2 as const,
|
|
521
|
-
step: 0.01 as const,
|
|
522
|
-
default: 0 as const,
|
|
523
|
-
},
|
|
524
|
-
frequency_penalty: {
|
|
525
|
-
min: -2 as const,
|
|
526
|
-
max: 2 as const,
|
|
527
|
-
step: 0.01 as const,
|
|
528
|
-
default: 0 as const,
|
|
529
|
-
},
|
|
530
|
-
resendFiles: {
|
|
531
|
-
default: true as const,
|
|
532
|
-
},
|
|
533
|
-
maxContextTokens: {
|
|
534
|
-
default: undefined,
|
|
535
|
-
},
|
|
536
|
-
max_tokens: {
|
|
537
|
-
default: undefined,
|
|
538
|
-
},
|
|
539
|
-
imageDetail: {
|
|
540
|
-
default: ImageDetail.auto as const,
|
|
541
|
-
},
|
|
542
|
-
};
|
|
543
|
-
|
|
544
|
-
export const endpointSettings = {
|
|
545
|
-
[EModelEndpoint.openAI]: openAISettings,
|
|
546
|
-
[EModelEndpoint.google]: googleSettings,
|
|
547
|
-
[EModelEndpoint.anthropic]: anthropicSettings,
|
|
548
|
-
[EModelEndpoint.agents]: agentsSettings,
|
|
549
|
-
[EModelEndpoint.bedrock]: agentsSettings,
|
|
550
|
-
};
|
|
551
|
-
|
|
552
|
-
const google = endpointSettings[EModelEndpoint.google];
|
|
553
|
-
|
|
554
|
-
export const eModelEndpointSchema = z.nativeEnum(EModelEndpoint);
|
|
555
|
-
|
|
556
|
-
export const extendedModelEndpointSchema = z.union([eModelEndpointSchema, z.string()]);
|
|
557
|
-
|
|
558
|
-
export const tPluginAuthConfigSchema = z.object({
|
|
559
|
-
authField: z.string(),
|
|
560
|
-
label: z.string(),
|
|
561
|
-
description: z.string(),
|
|
562
|
-
optional: z.boolean().optional(),
|
|
563
|
-
});
|
|
564
|
-
|
|
565
|
-
export type TPluginAuthConfig = z.infer<typeof tPluginAuthConfigSchema>;
|
|
566
|
-
|
|
567
|
-
export const tPluginSchema = z.object({
|
|
568
|
-
name: z.string(),
|
|
569
|
-
pluginKey: z.string(),
|
|
570
|
-
description: z.string().optional(),
|
|
571
|
-
icon: z.string().optional(),
|
|
572
|
-
authConfig: z.array(tPluginAuthConfigSchema).optional(),
|
|
573
|
-
authenticated: z.boolean().optional(),
|
|
574
|
-
chatMenu: z.boolean().optional(),
|
|
575
|
-
isButton: z.boolean().optional(),
|
|
576
|
-
toolkit: z.boolean().optional(),
|
|
577
|
-
});
|
|
578
|
-
|
|
579
|
-
export type TPlugin = z.infer<typeof tPluginSchema>;
|
|
580
|
-
|
|
581
|
-
export type TInput = {
|
|
582
|
-
inputStr: string;
|
|
583
|
-
};
|
|
584
|
-
|
|
585
|
-
export const tExampleSchema = z.object({
|
|
586
|
-
input: z.object({
|
|
587
|
-
content: z.string(),
|
|
588
|
-
}),
|
|
589
|
-
output: z.object({
|
|
590
|
-
content: z.string(),
|
|
591
|
-
}),
|
|
592
|
-
});
|
|
593
|
-
|
|
594
|
-
export type TExample = z.infer<typeof tExampleSchema>;
|
|
595
|
-
|
|
596
|
-
export const tMessageSchema = z.object({
|
|
597
|
-
messageId: z.string(),
|
|
598
|
-
endpoint: z.string().optional(),
|
|
599
|
-
clientId: z.string().nullable().optional(),
|
|
600
|
-
conversationId: z.string().nullable(),
|
|
601
|
-
parentMessageId: z.string().nullable(),
|
|
602
|
-
responseMessageId: z.string().nullable().optional(),
|
|
603
|
-
overrideParentMessageId: z.string().nullable().optional(),
|
|
604
|
-
bg: z.string().nullable().optional(),
|
|
605
|
-
model: z.string().nullable().optional(),
|
|
606
|
-
title: z.string().nullable().or(z.literal('New Chat')).default('New Chat'),
|
|
607
|
-
sender: z.string().optional(),
|
|
608
|
-
text: z.string(),
|
|
609
|
-
/** @deprecated */
|
|
610
|
-
generation: z.string().nullable().optional(),
|
|
611
|
-
isCreatedByUser: z.boolean(),
|
|
612
|
-
error: z.boolean().optional(),
|
|
613
|
-
clientTimestamp: z.string().optional(),
|
|
614
|
-
createdAt: z
|
|
615
|
-
.string()
|
|
616
|
-
.optional()
|
|
617
|
-
.default(() => new Date().toISOString()),
|
|
618
|
-
updatedAt: z
|
|
619
|
-
.string()
|
|
620
|
-
.optional()
|
|
621
|
-
.default(() => new Date().toISOString()),
|
|
622
|
-
current: z.boolean().optional(),
|
|
623
|
-
unfinished: z.boolean().optional(),
|
|
624
|
-
searchResult: z.boolean().optional(),
|
|
625
|
-
finish_reason: z.string().optional(),
|
|
626
|
-
/* assistant */
|
|
627
|
-
thread_id: z.string().optional(),
|
|
628
|
-
/* frontend components */
|
|
629
|
-
iconURL: z.string().nullable().optional(),
|
|
630
|
-
feedback: feedbackSchema.optional(),
|
|
631
|
-
/** metadata */
|
|
632
|
-
metadata: z.record(z.unknown()).optional(),
|
|
633
|
-
contextMeta: z
|
|
634
|
-
.object({
|
|
635
|
-
calibrationRatio: z
|
|
636
|
-
.number()
|
|
637
|
-
.optional()
|
|
638
|
-
.describe('EMA ratio of provider-reported vs local token estimates; seeds the pruner on subsequent runs'),
|
|
639
|
-
encoding: z
|
|
640
|
-
.string()
|
|
641
|
-
.optional()
|
|
642
|
-
.describe('Tokenizer encoding used when this ratio was computed (e.g. "claude", "o200k_base")'),
|
|
643
|
-
})
|
|
644
|
-
.optional(),
|
|
645
|
-
});
|
|
646
|
-
|
|
647
|
-
export type MemoryArtifact = {
|
|
648
|
-
key: string;
|
|
649
|
-
value?: string;
|
|
650
|
-
tokenCount?: number;
|
|
651
|
-
type: 'update' | 'delete' | 'error';
|
|
652
|
-
};
|
|
653
|
-
|
|
654
|
-
export type UIResource = {
|
|
655
|
-
resourceId: string;
|
|
656
|
-
uri: string;
|
|
657
|
-
mimeType?: string;
|
|
658
|
-
text?: string;
|
|
659
|
-
[key: string]: unknown;
|
|
660
|
-
};
|
|
661
|
-
|
|
662
|
-
export type TAttachmentMetadata = {
|
|
663
|
-
type?: Tools;
|
|
664
|
-
messageId: string;
|
|
665
|
-
toolCallId: string;
|
|
666
|
-
[Tools.memory]?: MemoryArtifact;
|
|
667
|
-
[Tools.ui_resources]?: UIResource[];
|
|
668
|
-
[Tools.web_search]?: SearchResultData;
|
|
669
|
-
[Tools.file_search]?: SearchResultData;
|
|
670
|
-
};
|
|
671
|
-
|
|
672
|
-
export type TAttachment =
|
|
673
|
-
| (TFile & TAttachmentMetadata)
|
|
674
|
-
| (Pick<TFile, 'filename' | 'filepath' | 'conversationId'> & {
|
|
675
|
-
expiresAt: number;
|
|
676
|
-
} & TAttachmentMetadata)
|
|
677
|
-
| (Partial<Pick<TFile, 'filename' | 'filepath'>> &
|
|
678
|
-
Pick<TFile, 'conversationId'> &
|
|
679
|
-
TAttachmentMetadata);
|
|
680
|
-
|
|
681
|
-
export type TMessage = z.input<typeof tMessageSchema> & {
|
|
682
|
-
children?: TMessage[];
|
|
683
|
-
content?: TMessageContentParts[];
|
|
684
|
-
files?: Partial<TFile>[];
|
|
685
|
-
depth?: number;
|
|
686
|
-
siblingIndex?: number;
|
|
687
|
-
attachments?: TAttachment[];
|
|
688
|
-
clientTimestamp?: string;
|
|
689
|
-
feedback?: TFeedback;
|
|
690
|
-
};
|
|
691
|
-
|
|
692
|
-
export const coerceNumber = z.union([z.number(), z.string()]).transform((val) => {
|
|
693
|
-
if (typeof val === 'string') {
|
|
694
|
-
return val.trim() === '' ? undefined : parseFloat(val);
|
|
695
|
-
}
|
|
696
|
-
return val;
|
|
697
|
-
});
|
|
698
|
-
|
|
699
|
-
type DocumentTypeValue =
|
|
700
|
-
| null
|
|
701
|
-
| boolean
|
|
702
|
-
| number
|
|
703
|
-
| string
|
|
704
|
-
| DocumentTypeValue[]
|
|
705
|
-
| { [key: string]: DocumentTypeValue };
|
|
706
|
-
|
|
707
|
-
const DocumentType: z.ZodType<DocumentTypeValue> = z.lazy(() =>
|
|
708
|
-
z.union([
|
|
709
|
-
z.null(),
|
|
710
|
-
z.boolean(),
|
|
711
|
-
z.number(),
|
|
712
|
-
z.string(),
|
|
713
|
-
z.array(z.lazy(() => DocumentType)),
|
|
714
|
-
z.record(z.lazy(() => DocumentType)),
|
|
715
|
-
]),
|
|
716
|
-
);
|
|
717
|
-
|
|
718
|
-
export const tConversationSchema = z.object({
|
|
719
|
-
conversationId: z.string().nullable(),
|
|
720
|
-
endpoint: eModelEndpointSchema.nullable(),
|
|
721
|
-
endpointType: eModelEndpointSchema.nullable().optional(),
|
|
722
|
-
isArchived: z.boolean().optional(),
|
|
723
|
-
title: z.string().nullable().or(z.literal('New Chat')).default('New Chat'),
|
|
724
|
-
user: z.string().optional(),
|
|
725
|
-
messages: z.array(z.string()).optional(),
|
|
726
|
-
tools: z.union([z.array(tPluginSchema), z.array(z.string())]).optional(),
|
|
727
|
-
modelLabel: z.string().nullable().optional(),
|
|
728
|
-
userLabel: z.string().optional(),
|
|
729
|
-
model: z.string().nullable().optional(),
|
|
730
|
-
promptPrefix: z.string().nullable().optional(),
|
|
731
|
-
temperature: z.number().nullable().optional(),
|
|
732
|
-
topP: z.number().optional(),
|
|
733
|
-
topK: z.number().optional(),
|
|
734
|
-
top_p: z.number().optional(),
|
|
735
|
-
frequency_penalty: z.number().optional(),
|
|
736
|
-
presence_penalty: z.number().optional(),
|
|
737
|
-
parentMessageId: z.string().optional(),
|
|
738
|
-
maxOutputTokens: coerceNumber.nullable().optional(),
|
|
739
|
-
maxContextTokens: coerceNumber.optional(),
|
|
740
|
-
max_tokens: coerceNumber.optional(),
|
|
741
|
-
/* Anthropic */
|
|
742
|
-
promptCache: z.boolean().optional(),
|
|
743
|
-
system: z.string().optional(),
|
|
744
|
-
thinking: z.boolean().optional(),
|
|
745
|
-
thinkingBudget: coerceNumber.optional(),
|
|
746
|
-
thinkingLevel: eThinkingLevelSchema.optional(),
|
|
747
|
-
stream: z.boolean().optional(),
|
|
748
|
-
/* artifacts */
|
|
749
|
-
artifacts: z.string().optional(),
|
|
750
|
-
/* google */
|
|
751
|
-
context: z.string().nullable().optional(),
|
|
752
|
-
examples: z.array(tExampleSchema).optional(),
|
|
753
|
-
/* DB */
|
|
754
|
-
tags: z.array(z.string()).optional(),
|
|
755
|
-
createdAt: z.string(),
|
|
756
|
-
updatedAt: z.string(),
|
|
757
|
-
/* Files */
|
|
758
|
-
resendFiles: z.boolean().optional(),
|
|
759
|
-
file_ids: z.array(z.string()).optional(),
|
|
760
|
-
/* vision */
|
|
761
|
-
imageDetail: eImageDetailSchema.optional(),
|
|
762
|
-
/* OpenAI: Reasoning models only */
|
|
763
|
-
reasoning_effort: eReasoningEffortSchema.optional().nullable(),
|
|
764
|
-
reasoning_summary: eReasoningSummarySchema.optional().nullable(),
|
|
765
|
-
/* OpenAI: Verbosity control */
|
|
766
|
-
verbosity: eVerbositySchema.optional().nullable(),
|
|
767
|
-
/* OpenAI: use Responses API */
|
|
768
|
-
useResponsesApi: z.boolean().optional(),
|
|
769
|
-
/* Anthropic: Effort control */
|
|
770
|
-
effort: eAnthropicEffortSchema.optional().nullable(),
|
|
771
|
-
/* OpenAI Responses API / Anthropic API / Google API */
|
|
772
|
-
web_search: z.boolean().optional(),
|
|
773
|
-
/* disable streaming */
|
|
774
|
-
disableStreaming: z.boolean().optional(),
|
|
775
|
-
/* assistant */
|
|
776
|
-
assistant_id: z.string().optional(),
|
|
777
|
-
/* agents */
|
|
778
|
-
agent_id: z.string().optional(),
|
|
779
|
-
/* AWS Bedrock */
|
|
780
|
-
region: z.string().optional(),
|
|
781
|
-
maxTokens: coerceNumber.optional(),
|
|
782
|
-
additionalModelRequestFields: DocumentType.optional(),
|
|
783
|
-
/* assistants */
|
|
784
|
-
instructions: z.string().optional(),
|
|
785
|
-
additional_instructions: z.string().optional(),
|
|
786
|
-
append_current_datetime: z.boolean().optional(),
|
|
787
|
-
/** Used to overwrite active conversation settings when saving a Preset */
|
|
788
|
-
presetOverride: z.record(z.unknown()).optional(),
|
|
789
|
-
stop: z.array(z.string()).optional(),
|
|
790
|
-
/* frontend components */
|
|
791
|
-
greeting: z.string().optional(),
|
|
792
|
-
spec: z.string().nullable().optional(),
|
|
793
|
-
iconURL: z.string().nullable().optional(),
|
|
794
|
-
/* temporary chat */
|
|
795
|
-
expiredAt: z.string().nullable().optional(),
|
|
796
|
-
/* file token limits */
|
|
797
|
-
fileTokenLimit: coerceNumber.optional(),
|
|
798
|
-
/** @deprecated */
|
|
799
|
-
resendImages: z.boolean().optional(),
|
|
800
|
-
/** @deprecated Prefer `modelLabel` over `chatGptLabel` */
|
|
801
|
-
chatGptLabel: z.string().nullable().optional(),
|
|
802
|
-
});
|
|
803
|
-
|
|
804
|
-
export const tPresetSchema = tConversationSchema
|
|
805
|
-
.omit({
|
|
806
|
-
conversationId: true,
|
|
807
|
-
createdAt: true,
|
|
808
|
-
updatedAt: true,
|
|
809
|
-
title: true,
|
|
810
|
-
})
|
|
811
|
-
.merge(
|
|
812
|
-
z.object({
|
|
813
|
-
conversationId: z.string().nullable().optional(),
|
|
814
|
-
presetId: z.string().nullable().optional(),
|
|
815
|
-
title: z.string().nullable().optional(),
|
|
816
|
-
defaultPreset: z.boolean().optional(),
|
|
817
|
-
order: z.number().optional(),
|
|
818
|
-
endpoint: extendedModelEndpointSchema.nullable(),
|
|
819
|
-
}),
|
|
820
|
-
);
|
|
821
|
-
|
|
822
|
-
export const tConvoUpdateSchema = tConversationSchema.merge(
|
|
823
|
-
z.object({
|
|
824
|
-
endpoint: extendedModelEndpointSchema.nullable(),
|
|
825
|
-
createdAt: z.string().optional(),
|
|
826
|
-
updatedAt: z.string().optional(),
|
|
827
|
-
}),
|
|
828
|
-
);
|
|
829
|
-
|
|
830
|
-
export const tQueryParamsSchema = tConversationSchema
|
|
831
|
-
.pick({
|
|
832
|
-
// librechat settings
|
|
833
|
-
/** The model spec to be used */
|
|
834
|
-
spec: true,
|
|
835
|
-
/** The AI context window, overrides the system-defined window as determined by `model` value */
|
|
836
|
-
maxContextTokens: true,
|
|
837
|
-
/**
|
|
838
|
-
* Whether or not to re-submit files from previous messages on subsequent messages
|
|
839
|
-
* */
|
|
840
|
-
resendFiles: true,
|
|
841
|
-
/**
|
|
842
|
-
* @endpoints openAI, custom, azureOpenAI
|
|
843
|
-
*
|
|
844
|
-
* System parameter that only affects the above endpoints.
|
|
845
|
-
* Image detail for re-sizing according to OpenAI spec, defaults to `auto`
|
|
846
|
-
* */
|
|
847
|
-
imageDetail: true,
|
|
848
|
-
/**
|
|
849
|
-
* AKA Custom Instructions, dynamically added to chat history as a system message;
|
|
850
|
-
* for `bedrock` endpoint, this is used as the `system` model param if the provider uses it;
|
|
851
|
-
* for `assistants` endpoint, this is used as the `additional_instructions` model param:
|
|
852
|
-
* https://platform.openai.com/docs/api-reference/runs/createRun#runs-createrun-additional_instructions
|
|
853
|
-
* ; otherwise, a message with `system` role is added to the chat history
|
|
854
|
-
*/
|
|
855
|
-
promptPrefix: true,
|
|
856
|
-
// Model parameters
|
|
857
|
-
/** @endpoints openAI, custom, azureOpenAI, google, anthropic, assistants, azureAssistants, bedrock */
|
|
858
|
-
model: true,
|
|
859
|
-
/** @endpoints openAI, custom, azureOpenAI, google, anthropic, bedrock */
|
|
860
|
-
temperature: true,
|
|
861
|
-
/** @endpoints openAI, custom, azureOpenAI */
|
|
862
|
-
presence_penalty: true,
|
|
863
|
-
/** @endpoints openAI, custom, azureOpenAI */
|
|
864
|
-
frequency_penalty: true,
|
|
865
|
-
/** @endpoints openAI, custom, azureOpenAI */
|
|
866
|
-
stop: true,
|
|
867
|
-
/** @endpoints openAI, custom, azureOpenAI */
|
|
868
|
-
top_p: true,
|
|
869
|
-
/** @endpoints openAI, custom, azureOpenAI */
|
|
870
|
-
max_tokens: true,
|
|
871
|
-
/** @endpoints openAI, custom, azureOpenAI */
|
|
872
|
-
reasoning_effort: true,
|
|
873
|
-
/** @endpoints openAI, custom, azureOpenAI */
|
|
874
|
-
reasoning_summary: true,
|
|
875
|
-
/** @endpoints openAI, custom, azureOpenAI */
|
|
876
|
-
verbosity: true,
|
|
877
|
-
/** @endpoints openAI, custom, azureOpenAI */
|
|
878
|
-
useResponsesApi: true,
|
|
879
|
-
/** @endpoints openAI, anthropic, google */
|
|
880
|
-
web_search: true,
|
|
881
|
-
/** @endpoints openAI, custom, azureOpenAI */
|
|
882
|
-
disableStreaming: true,
|
|
883
|
-
/** @endpoints google, anthropic, bedrock */
|
|
884
|
-
topP: true,
|
|
885
|
-
/** @endpoints google, anthropic */
|
|
886
|
-
topK: true,
|
|
887
|
-
/** @endpoints google, anthropic */
|
|
888
|
-
maxOutputTokens: true,
|
|
889
|
-
/** @endpoints anthropic */
|
|
890
|
-
promptCache: true,
|
|
891
|
-
thinking: true,
|
|
892
|
-
thinkingBudget: true,
|
|
893
|
-
thinkingLevel: true,
|
|
894
|
-
effort: true,
|
|
895
|
-
/** @endpoints bedrock */
|
|
896
|
-
region: true,
|
|
897
|
-
/** @endpoints bedrock */
|
|
898
|
-
maxTokens: true,
|
|
899
|
-
/** @endpoints agents */
|
|
900
|
-
agent_id: true,
|
|
901
|
-
/** @endpoints assistants, azureAssistants */
|
|
902
|
-
assistant_id: true,
|
|
903
|
-
/** @endpoints assistants, azureAssistants */
|
|
904
|
-
append_current_datetime: true,
|
|
905
|
-
/**
|
|
906
|
-
* @endpoints assistants, azureAssistants
|
|
907
|
-
*
|
|
908
|
-
* Overrides existing assistant instructions, only used for the current run:
|
|
909
|
-
* https://platform.openai.com/docs/api-reference/runs/createRun#runs-createrun-instructions
|
|
910
|
-
* */
|
|
911
|
-
instructions: true,
|
|
912
|
-
/** @endpoints openAI, google, anthropic */
|
|
913
|
-
fileTokenLimit: true,
|
|
914
|
-
})
|
|
915
|
-
.merge(
|
|
916
|
-
z.object({
|
|
917
|
-
/** @endpoints openAI, custom, azureOpenAI, google, anthropic, assistants, azureAssistants, bedrock, agents */
|
|
918
|
-
endpoint: extendedModelEndpointSchema.nullable(),
|
|
919
|
-
}),
|
|
920
|
-
);
|
|
921
|
-
|
|
922
|
-
export type TPreset = z.infer<typeof tPresetSchema>;
|
|
923
|
-
|
|
924
|
-
export type TSetOption = (
|
|
925
|
-
param: number | string,
|
|
926
|
-
) => (newValue: number | string | boolean | string[] | Partial<TPreset>) => void;
|
|
927
|
-
|
|
928
|
-
export type TConversation = z.infer<typeof tConversationSchema> & {
|
|
929
|
-
presetOverride?: Partial<TPreset>;
|
|
930
|
-
disableParams?: boolean;
|
|
931
|
-
};
|
|
932
|
-
|
|
933
|
-
export const tSharedLinkSchema = z.object({
|
|
934
|
-
conversationId: z.string(),
|
|
935
|
-
shareId: z.string(),
|
|
936
|
-
messages: z.array(z.string()),
|
|
937
|
-
isPublic: z.boolean(),
|
|
938
|
-
title: z.string(),
|
|
939
|
-
createdAt: z.string(),
|
|
940
|
-
updatedAt: z.string(),
|
|
941
|
-
});
|
|
942
|
-
|
|
943
|
-
export type TSharedLink = z.infer<typeof tSharedLinkSchema>;
|
|
944
|
-
|
|
945
|
-
export const tConversationTagSchema = z.object({
|
|
946
|
-
_id: z.string(),
|
|
947
|
-
user: z.string(),
|
|
948
|
-
tag: z.string(),
|
|
949
|
-
description: z.string().optional(),
|
|
950
|
-
createdAt: z.string(),
|
|
951
|
-
updatedAt: z.string(),
|
|
952
|
-
count: z.number(),
|
|
953
|
-
position: z.number(),
|
|
954
|
-
});
|
|
955
|
-
export type TConversationTag = z.infer<typeof tConversationTagSchema>;
|
|
956
|
-
|
|
957
|
-
export const googleBaseSchema = tConversationSchema.pick({
|
|
958
|
-
model: true,
|
|
959
|
-
modelLabel: true,
|
|
960
|
-
promptPrefix: true,
|
|
961
|
-
examples: true,
|
|
962
|
-
temperature: true,
|
|
963
|
-
maxOutputTokens: true,
|
|
964
|
-
artifacts: true,
|
|
965
|
-
topP: true,
|
|
966
|
-
topK: true,
|
|
967
|
-
thinking: true,
|
|
968
|
-
thinkingBudget: true,
|
|
969
|
-
thinkingLevel: true,
|
|
970
|
-
web_search: true,
|
|
971
|
-
fileTokenLimit: true,
|
|
972
|
-
iconURL: true,
|
|
973
|
-
greeting: true,
|
|
974
|
-
spec: true,
|
|
975
|
-
maxContextTokens: true,
|
|
976
|
-
});
|
|
977
|
-
|
|
978
|
-
export const googleSchema = googleBaseSchema
|
|
979
|
-
.transform((obj: Partial<TConversation>) => removeNullishValues(obj, true))
|
|
980
|
-
.catch(() => ({}));
|
|
981
|
-
|
|
982
|
-
/**
|
|
983
|
-
* TODO: Map the following fields:
|
|
984
|
-
- presence_penalty -> presencePenalty
|
|
985
|
-
- frequency_penalty -> frequencyPenalty
|
|
986
|
-
- stop -> stopSequences
|
|
987
|
-
*/
|
|
988
|
-
export const googleGenConfigSchema = z
|
|
989
|
-
.object({
|
|
990
|
-
maxOutputTokens: coerceNumber.optional(),
|
|
991
|
-
temperature: coerceNumber.optional(),
|
|
992
|
-
topP: coerceNumber.optional(),
|
|
993
|
-
topK: coerceNumber.optional(),
|
|
994
|
-
presencePenalty: coerceNumber.optional(),
|
|
995
|
-
frequencyPenalty: coerceNumber.optional(),
|
|
996
|
-
stopSequences: z.array(z.string()).optional(),
|
|
997
|
-
thinkingConfig: z
|
|
998
|
-
.object({
|
|
999
|
-
includeThoughts: z.boolean().optional(),
|
|
1000
|
-
thinkingBudget: coerceNumber.optional(),
|
|
1001
|
-
thinkingLevel: z.string().optional(),
|
|
1002
|
-
})
|
|
1003
|
-
.optional(),
|
|
1004
|
-
web_search: z.boolean().optional(),
|
|
1005
|
-
})
|
|
1006
|
-
.strip()
|
|
1007
|
-
.optional();
|
|
1008
|
-
|
|
1009
|
-
export function removeNullishValues<T extends Record<string, unknown>>(
|
|
1010
|
-
obj: T,
|
|
1011
|
-
removeEmptyStrings?: boolean,
|
|
1012
|
-
): Partial<T> {
|
|
1013
|
-
const newObj: Partial<T> = { ...obj };
|
|
1014
|
-
|
|
1015
|
-
(Object.keys(newObj) as Array<keyof T>).forEach((key) => {
|
|
1016
|
-
const value = newObj[key];
|
|
1017
|
-
if (value === undefined || value === null) {
|
|
1018
|
-
delete newObj[key];
|
|
1019
|
-
}
|
|
1020
|
-
if (removeEmptyStrings && typeof value === 'string' && value === '') {
|
|
1021
|
-
delete newObj[key];
|
|
1022
|
-
}
|
|
1023
|
-
});
|
|
1024
|
-
|
|
1025
|
-
return newObj;
|
|
1026
|
-
}
|
|
1027
|
-
|
|
1028
|
-
const assistantBaseSchema = tConversationSchema.pick({
|
|
1029
|
-
model: true,
|
|
1030
|
-
assistant_id: true,
|
|
1031
|
-
instructions: true,
|
|
1032
|
-
artifacts: true,
|
|
1033
|
-
promptPrefix: true,
|
|
1034
|
-
iconURL: true,
|
|
1035
|
-
greeting: true,
|
|
1036
|
-
spec: true,
|
|
1037
|
-
append_current_datetime: true,
|
|
1038
|
-
});
|
|
1039
|
-
|
|
1040
|
-
export const assistantSchema = assistantBaseSchema
|
|
1041
|
-
.transform((obj) => ({
|
|
1042
|
-
...obj,
|
|
1043
|
-
model: obj.model ?? openAISettings.model.default,
|
|
1044
|
-
assistant_id: obj.assistant_id ?? undefined,
|
|
1045
|
-
instructions: obj.instructions ?? undefined,
|
|
1046
|
-
promptPrefix: obj.promptPrefix ?? null,
|
|
1047
|
-
iconURL: obj.iconURL ?? undefined,
|
|
1048
|
-
greeting: obj.greeting ?? undefined,
|
|
1049
|
-
spec: obj.spec ?? undefined,
|
|
1050
|
-
append_current_datetime: obj.append_current_datetime ?? false,
|
|
1051
|
-
}))
|
|
1052
|
-
.catch(() => ({
|
|
1053
|
-
model: openAISettings.model.default,
|
|
1054
|
-
assistant_id: undefined,
|
|
1055
|
-
instructions: undefined,
|
|
1056
|
-
promptPrefix: null,
|
|
1057
|
-
iconURL: undefined,
|
|
1058
|
-
greeting: undefined,
|
|
1059
|
-
spec: undefined,
|
|
1060
|
-
append_current_datetime: false,
|
|
1061
|
-
}));
|
|
1062
|
-
|
|
1063
|
-
const compactAssistantBaseSchema = tConversationSchema.pick({
|
|
1064
|
-
model: true,
|
|
1065
|
-
assistant_id: true,
|
|
1066
|
-
instructions: true,
|
|
1067
|
-
promptPrefix: true,
|
|
1068
|
-
artifacts: true,
|
|
1069
|
-
iconURL: true,
|
|
1070
|
-
greeting: true,
|
|
1071
|
-
spec: true,
|
|
1072
|
-
});
|
|
1073
|
-
|
|
1074
|
-
export const compactAssistantSchema = compactAssistantBaseSchema
|
|
1075
|
-
.transform((obj) => removeNullishValues(obj))
|
|
1076
|
-
.catch(() => ({}));
|
|
1077
|
-
|
|
1078
|
-
export const agentsBaseSchema = tConversationSchema.pick({
|
|
1079
|
-
model: true,
|
|
1080
|
-
modelLabel: true,
|
|
1081
|
-
temperature: true,
|
|
1082
|
-
top_p: true,
|
|
1083
|
-
presence_penalty: true,
|
|
1084
|
-
frequency_penalty: true,
|
|
1085
|
-
resendFiles: true,
|
|
1086
|
-
imageDetail: true,
|
|
1087
|
-
agent_id: true,
|
|
1088
|
-
instructions: true,
|
|
1089
|
-
promptPrefix: true,
|
|
1090
|
-
iconURL: true,
|
|
1091
|
-
greeting: true,
|
|
1092
|
-
maxContextTokens: true,
|
|
1093
|
-
});
|
|
1094
|
-
|
|
1095
|
-
export const agentsSchema = agentsBaseSchema
|
|
1096
|
-
.transform((obj) => ({
|
|
1097
|
-
...obj,
|
|
1098
|
-
model: obj.model ?? agentsSettings.model.default,
|
|
1099
|
-
modelLabel: obj.modelLabel ?? null,
|
|
1100
|
-
temperature: obj.temperature ?? 1,
|
|
1101
|
-
top_p: obj.top_p ?? 1,
|
|
1102
|
-
presence_penalty: obj.presence_penalty ?? 0,
|
|
1103
|
-
frequency_penalty: obj.frequency_penalty ?? 0,
|
|
1104
|
-
resendFiles:
|
|
1105
|
-
typeof obj.resendFiles === 'boolean' ? obj.resendFiles : agentsSettings.resendFiles.default,
|
|
1106
|
-
imageDetail: obj.imageDetail ?? ImageDetail.auto,
|
|
1107
|
-
agent_id: obj.agent_id ?? undefined,
|
|
1108
|
-
instructions: obj.instructions ?? undefined,
|
|
1109
|
-
promptPrefix: obj.promptPrefix ?? null,
|
|
1110
|
-
iconURL: obj.iconURL ?? undefined,
|
|
1111
|
-
greeting: obj.greeting ?? undefined,
|
|
1112
|
-
maxContextTokens: obj.maxContextTokens ?? undefined,
|
|
1113
|
-
}))
|
|
1114
|
-
.catch(() => ({
|
|
1115
|
-
model: agentsSettings.model.default,
|
|
1116
|
-
modelLabel: null,
|
|
1117
|
-
temperature: 1,
|
|
1118
|
-
top_p: 1,
|
|
1119
|
-
presence_penalty: 0,
|
|
1120
|
-
frequency_penalty: 0,
|
|
1121
|
-
resendFiles: agentsSettings.resendFiles.default,
|
|
1122
|
-
imageDetail: ImageDetail.auto,
|
|
1123
|
-
agent_id: undefined,
|
|
1124
|
-
instructions: undefined,
|
|
1125
|
-
promptPrefix: null,
|
|
1126
|
-
iconURL: undefined,
|
|
1127
|
-
greeting: undefined,
|
|
1128
|
-
maxContextTokens: undefined,
|
|
1129
|
-
}));
|
|
1130
|
-
|
|
1131
|
-
export const openAIBaseSchema = tConversationSchema.pick({
|
|
1132
|
-
model: true,
|
|
1133
|
-
modelLabel: true,
|
|
1134
|
-
chatGptLabel: true,
|
|
1135
|
-
promptPrefix: true,
|
|
1136
|
-
temperature: true,
|
|
1137
|
-
top_p: true,
|
|
1138
|
-
presence_penalty: true,
|
|
1139
|
-
frequency_penalty: true,
|
|
1140
|
-
resendFiles: true,
|
|
1141
|
-
artifacts: true,
|
|
1142
|
-
imageDetail: true,
|
|
1143
|
-
stop: true,
|
|
1144
|
-
iconURL: true,
|
|
1145
|
-
greeting: true,
|
|
1146
|
-
spec: true,
|
|
1147
|
-
maxContextTokens: true,
|
|
1148
|
-
max_tokens: true,
|
|
1149
|
-
reasoning_effort: true,
|
|
1150
|
-
reasoning_summary: true,
|
|
1151
|
-
verbosity: true,
|
|
1152
|
-
useResponsesApi: true,
|
|
1153
|
-
web_search: true,
|
|
1154
|
-
disableStreaming: true,
|
|
1155
|
-
fileTokenLimit: true,
|
|
1156
|
-
});
|
|
1157
|
-
|
|
1158
|
-
export const openAISchema = openAIBaseSchema
|
|
1159
|
-
.transform((obj: Partial<TConversation>) => removeNullishValues(obj, true))
|
|
1160
|
-
.catch(() => ({}));
|
|
1161
|
-
|
|
1162
|
-
export const compactGoogleSchema = googleBaseSchema
|
|
1163
|
-
.transform((obj) => {
|
|
1164
|
-
const newObj: Partial<TConversation> = { ...obj };
|
|
1165
|
-
if (newObj.temperature === google.temperature.default) {
|
|
1166
|
-
delete newObj.temperature;
|
|
1167
|
-
}
|
|
1168
|
-
if (newObj.maxOutputTokens === google.maxOutputTokens.default) {
|
|
1169
|
-
delete newObj.maxOutputTokens;
|
|
1170
|
-
}
|
|
1171
|
-
if (newObj.topP === google.topP.default) {
|
|
1172
|
-
delete newObj.topP;
|
|
1173
|
-
}
|
|
1174
|
-
if (newObj.topK === google.topK.default) {
|
|
1175
|
-
delete newObj.topK;
|
|
1176
|
-
}
|
|
1177
|
-
|
|
1178
|
-
return removeNullishValues(newObj, true);
|
|
1179
|
-
})
|
|
1180
|
-
.catch(() => ({}));
|
|
1181
|
-
|
|
1182
|
-
export const anthropicBaseSchema = tConversationSchema.pick({
|
|
1183
|
-
model: true,
|
|
1184
|
-
modelLabel: true,
|
|
1185
|
-
promptPrefix: true,
|
|
1186
|
-
temperature: true,
|
|
1187
|
-
maxOutputTokens: true,
|
|
1188
|
-
topP: true,
|
|
1189
|
-
topK: true,
|
|
1190
|
-
resendFiles: true,
|
|
1191
|
-
promptCache: true,
|
|
1192
|
-
thinking: true,
|
|
1193
|
-
thinkingBudget: true,
|
|
1194
|
-
effort: true,
|
|
1195
|
-
artifacts: true,
|
|
1196
|
-
iconURL: true,
|
|
1197
|
-
greeting: true,
|
|
1198
|
-
spec: true,
|
|
1199
|
-
maxContextTokens: true,
|
|
1200
|
-
web_search: true,
|
|
1201
|
-
fileTokenLimit: true,
|
|
1202
|
-
stop: true,
|
|
1203
|
-
stream: true,
|
|
1204
|
-
});
|
|
1205
|
-
|
|
1206
|
-
export const anthropicSchema = anthropicBaseSchema
|
|
1207
|
-
.transform((obj) => removeNullishValues(obj))
|
|
1208
|
-
.catch(() => ({}));
|
|
1209
|
-
|
|
1210
|
-
export const tBannerSchema = z.object({
|
|
1211
|
-
bannerId: z.string(),
|
|
1212
|
-
message: z.string(),
|
|
1213
|
-
displayFrom: z.string(),
|
|
1214
|
-
displayTo: z.string(),
|
|
1215
|
-
createdAt: z.string(),
|
|
1216
|
-
updatedAt: z.string(),
|
|
1217
|
-
isPublic: z.boolean(),
|
|
1218
|
-
persistable: z.boolean().default(false),
|
|
1219
|
-
});
|
|
1220
|
-
export type TBanner = z.infer<typeof tBannerSchema>;
|
|
1221
|
-
|
|
1222
|
-
export const compactAgentsBaseSchema = tConversationSchema.pick({
|
|
1223
|
-
spec: true,
|
|
1224
|
-
// model: true,
|
|
1225
|
-
iconURL: true,
|
|
1226
|
-
greeting: true,
|
|
1227
|
-
agent_id: true,
|
|
1228
|
-
instructions: true,
|
|
1229
|
-
additional_instructions: true,
|
|
1230
|
-
});
|
|
1231
|
-
|
|
1232
|
-
export const compactAgentsSchema = compactAgentsBaseSchema
|
|
1233
|
-
.transform((obj) => removeNullishValues(obj))
|
|
1234
|
-
.catch(() => ({}));
|