@adaline/openai 0.10.0 → 0.12.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +53 -25
- package/dist/index.d.ts +53 -25
- package/dist/index.js +146 -123
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +15 -14
- package/dist/index.mjs.map +1 -1
- package/package.json +3 -3
package/dist/index.d.mts
CHANGED
|
@@ -438,6 +438,16 @@ declare const OpenAIChatModelConfigs: {
|
|
|
438
438
|
};
|
|
439
439
|
readonly oSeries: (maxOutputTokens: number, maxSequences: number) => {
|
|
440
440
|
def: {
|
|
441
|
+
temperature: {
|
|
442
|
+
type: "range";
|
|
443
|
+
param: string;
|
|
444
|
+
title: string;
|
|
445
|
+
description: string;
|
|
446
|
+
max: number;
|
|
447
|
+
min: number;
|
|
448
|
+
step: number;
|
|
449
|
+
default: number;
|
|
450
|
+
};
|
|
441
451
|
maxTokens: {
|
|
442
452
|
type: "range";
|
|
443
453
|
param: string;
|
|
@@ -463,16 +473,6 @@ declare const OpenAIChatModelConfigs: {
|
|
|
463
473
|
description: string;
|
|
464
474
|
objectSchema?: any;
|
|
465
475
|
};
|
|
466
|
-
temperature: {
|
|
467
|
-
type: "range";
|
|
468
|
-
param: string;
|
|
469
|
-
title: string;
|
|
470
|
-
description: string;
|
|
471
|
-
max: number;
|
|
472
|
-
min: number;
|
|
473
|
-
step: number;
|
|
474
|
-
default: number;
|
|
475
|
-
};
|
|
476
476
|
stop: {
|
|
477
477
|
type: "multi-string";
|
|
478
478
|
param: string;
|
|
@@ -565,6 +565,7 @@ declare const OpenAIChatModelConfigs: {
|
|
|
565
565
|
[x: string]: any;
|
|
566
566
|
}>;
|
|
567
567
|
}>, {
|
|
568
|
+
temperature: zod.ZodOptional<zod.ZodDefault<zod.ZodNumber>>;
|
|
568
569
|
maxTokens: zod.ZodOptional<zod.ZodDefault<zod.ZodNumber>>;
|
|
569
570
|
}>, "strip", zod.ZodTypeAny, {
|
|
570
571
|
responseSchema: {
|
|
@@ -1207,6 +1208,16 @@ declare const ChatModelResponseSchemaConfigSchema: (maxOutputTokens: number, max
|
|
|
1207
1208
|
}>;
|
|
1208
1209
|
|
|
1209
1210
|
declare const ChatModelOSeriesConfigDef: (maxOutputTokens: number, maxSequences: number) => {
|
|
1211
|
+
temperature: {
|
|
1212
|
+
type: "range";
|
|
1213
|
+
param: string;
|
|
1214
|
+
title: string;
|
|
1215
|
+
description: string;
|
|
1216
|
+
max: number;
|
|
1217
|
+
min: number;
|
|
1218
|
+
step: number;
|
|
1219
|
+
default: number;
|
|
1220
|
+
};
|
|
1210
1221
|
maxTokens: {
|
|
1211
1222
|
type: "range";
|
|
1212
1223
|
param: string;
|
|
@@ -1232,16 +1243,6 @@ declare const ChatModelOSeriesConfigDef: (maxOutputTokens: number, maxSequences:
|
|
|
1232
1243
|
description: string;
|
|
1233
1244
|
objectSchema?: any;
|
|
1234
1245
|
};
|
|
1235
|
-
temperature: {
|
|
1236
|
-
type: "range";
|
|
1237
|
-
param: string;
|
|
1238
|
-
title: string;
|
|
1239
|
-
description: string;
|
|
1240
|
-
max: number;
|
|
1241
|
-
min: number;
|
|
1242
|
-
step: number;
|
|
1243
|
-
default: number;
|
|
1244
|
-
};
|
|
1245
1246
|
stop: {
|
|
1246
1247
|
type: "multi-string";
|
|
1247
1248
|
param: string;
|
|
@@ -1334,6 +1335,7 @@ declare const ChatModelOSeriesConfigSchema: (maxOutputTokens: number, maxSequenc
|
|
|
1334
1335
|
[x: string]: any;
|
|
1335
1336
|
}>;
|
|
1336
1337
|
}>, {
|
|
1338
|
+
temperature: zod.ZodOptional<zod.ZodDefault<zod.ZodNumber>>;
|
|
1337
1339
|
maxTokens: zod.ZodOptional<zod.ZodDefault<zod.ZodNumber>>;
|
|
1338
1340
|
}>, "strip", zod.ZodTypeAny, {
|
|
1339
1341
|
responseSchema: {
|
|
@@ -1449,9 +1451,16 @@ declare const OpenAIChatModelRolesMap: {
|
|
|
1449
1451
|
readonly assistant: "assistant";
|
|
1450
1452
|
readonly tool: "tool";
|
|
1451
1453
|
};
|
|
1454
|
+
declare const OpenAIChatModelOSSeriesRoles: z.ZodEnum<["user", "assistant"]>;
|
|
1455
|
+
declare const OpenAIChatModelOSSeriesRolesMap: {
|
|
1456
|
+
readonly user: "user";
|
|
1457
|
+
readonly assistant: "assistant";
|
|
1458
|
+
};
|
|
1452
1459
|
|
|
1453
1460
|
declare const OpenAIChatModelModalities: ChatModelSchemaType["modalities"];
|
|
1454
1461
|
declare const OpenAIChatModelModalitiesEnum: z.ZodEnum<["text", "image", "tool-call", "tool-response"]>;
|
|
1462
|
+
declare const OpenAIChatModelTextModalities: ChatModelSchemaType["modalities"];
|
|
1463
|
+
declare const OpenAIChatModelTextModalitiesEnum: z.ZodEnum<["text"]>;
|
|
1455
1464
|
declare const OpenAIChatModelTextToolModalities: ChatModelSchemaType["modalities"];
|
|
1456
1465
|
declare const OpenAIChatModelTextToolModalitiesEnum: z.ZodEnum<["text", "tool-call", "tool-response"]>;
|
|
1457
1466
|
|
|
@@ -3689,6 +3698,7 @@ declare class BaseOSeriesChatModel extends BaseChatModel {
|
|
|
3689
3698
|
messages: MessageType[];
|
|
3690
3699
|
tools: ToolType[] | undefined;
|
|
3691
3700
|
};
|
|
3701
|
+
transformTools(tools: ToolType[]): ParamsType;
|
|
3692
3702
|
getStreamChatUrl(config?: ConfigType, messages?: MessageType[], tools?: ToolType[]): Promise<UrlType>;
|
|
3693
3703
|
getStreamChatHeaders(config?: ConfigType, messages?: MessageType[], tools?: ToolType[]): Promise<HeadersType>;
|
|
3694
3704
|
getStreamChatData(config: ConfigType, messages: MessageType[], tools?: ToolType[]): Promise<ParamsType>;
|
|
@@ -3698,6 +3708,7 @@ declare class BaseOSeriesChatModel extends BaseChatModel {
|
|
|
3698
3708
|
}>;
|
|
3699
3709
|
}
|
|
3700
3710
|
|
|
3711
|
+
declare const GPT_3_5_Turbo_0125Literal = "gpt-3.5-turbo-0125";
|
|
3701
3712
|
declare const GPT_3_5_Turbo_0125Schema: {
|
|
3702
3713
|
description: string;
|
|
3703
3714
|
name: string;
|
|
@@ -3768,6 +3779,7 @@ declare class GPT_3_5_Turbo_0125 extends BaseChatModel {
|
|
|
3768
3779
|
constructor(options: GPT_3_5_Turbo_0125OptionsType);
|
|
3769
3780
|
}
|
|
3770
3781
|
|
|
3782
|
+
declare const GPT_3_5_Turbo_1106Literal = "gpt-3.5-turbo-1106";
|
|
3771
3783
|
declare const GPT_3_5_Turbo_1106Schema: {
|
|
3772
3784
|
description: string;
|
|
3773
3785
|
name: string;
|
|
@@ -3838,6 +3850,7 @@ declare class GPT_3_5_Turbo_1106 extends BaseChatModel {
|
|
|
3838
3850
|
constructor(options: GPT_3_5_Turbo_1106OptionsType);
|
|
3839
3851
|
}
|
|
3840
3852
|
|
|
3853
|
+
declare const GPT_3_5_TurboLiteral = "gpt-3.5-turbo";
|
|
3841
3854
|
declare const GPT_3_5_TurboSchema: {
|
|
3842
3855
|
description: string;
|
|
3843
3856
|
name: string;
|
|
@@ -3908,6 +3921,7 @@ declare class GPT_3_5_Turbo extends BaseChatModel {
|
|
|
3908
3921
|
constructor(options: GPT_3_5_TurboOptionsType);
|
|
3909
3922
|
}
|
|
3910
3923
|
|
|
3924
|
+
declare const GPT_4_0125_PreviewLiteral = "gpt-4-0125-preview";
|
|
3911
3925
|
declare const GPT_4_0125_PreviewSchema: {
|
|
3912
3926
|
description: string;
|
|
3913
3927
|
name: string;
|
|
@@ -3978,6 +3992,7 @@ declare class GPT_4_0125_Preview extends BaseChatModel {
|
|
|
3978
3992
|
constructor(options: GPT_4_0125_PreviewOptionsType);
|
|
3979
3993
|
}
|
|
3980
3994
|
|
|
3995
|
+
declare const GPT_4_0613Literal = "gpt-4-0613";
|
|
3981
3996
|
declare const GPT_4_0613Schema: {
|
|
3982
3997
|
description: string;
|
|
3983
3998
|
name: string;
|
|
@@ -4048,6 +4063,7 @@ declare class GPT_4_0613 extends BaseChatModel {
|
|
|
4048
4063
|
constructor(options: GPT_4_0613OptionsType);
|
|
4049
4064
|
}
|
|
4050
4065
|
|
|
4066
|
+
declare const GPT_4_1106_PreviewLiteral = "gpt-4-1106-preview";
|
|
4051
4067
|
declare const GPT_4_1106_PreviewSchema: {
|
|
4052
4068
|
description: string;
|
|
4053
4069
|
name: string;
|
|
@@ -4118,6 +4134,7 @@ declare class GPT_4_1106_Preview extends BaseChatModel {
|
|
|
4118
4134
|
constructor(options: GPT_4_1106_PreviewOptionsType);
|
|
4119
4135
|
}
|
|
4120
4136
|
|
|
4137
|
+
declare const GPT_4_Turbo_2024_04_09Literal = "gpt-4-turbo-2024-04-09";
|
|
4121
4138
|
declare const GPT_4_Turbo_2024_04_09Schema: {
|
|
4122
4139
|
description: string;
|
|
4123
4140
|
name: string;
|
|
@@ -4188,6 +4205,7 @@ declare class GPT_4_Turbo_2024_04_09 extends BaseChatModel {
|
|
|
4188
4205
|
constructor(options: GPT_4_Turbo_2024_04_09OptionsType);
|
|
4189
4206
|
}
|
|
4190
4207
|
|
|
4208
|
+
declare const GPT_4_Turbo_PreviewLiteral = "gpt-4-turbo-preview";
|
|
4191
4209
|
declare const GPT_4_Turbo_PreviewSchema: {
|
|
4192
4210
|
description: string;
|
|
4193
4211
|
name: string;
|
|
@@ -4258,6 +4276,7 @@ declare class GPT_4_Turbo_Preview extends BaseChatModel {
|
|
|
4258
4276
|
constructor(options: GPT_4_Turbo_PreviewOptionsType);
|
|
4259
4277
|
}
|
|
4260
4278
|
|
|
4279
|
+
declare const GPT_4_TurboLiteral = "gpt-4-turbo";
|
|
4261
4280
|
declare const GPT_4_TurboSchema: {
|
|
4262
4281
|
description: string;
|
|
4263
4282
|
name: string;
|
|
@@ -4328,6 +4347,7 @@ declare class GPT_4_Turbo extends BaseChatModel {
|
|
|
4328
4347
|
constructor(options: GPT_4_TurboOptionsType);
|
|
4329
4348
|
}
|
|
4330
4349
|
|
|
4350
|
+
declare const GPT_4Literal = "gpt-4";
|
|
4331
4351
|
declare const GPT_4Schema: {
|
|
4332
4352
|
description: string;
|
|
4333
4353
|
name: string;
|
|
@@ -4398,6 +4418,7 @@ declare class GPT_4 extends BaseChatModel {
|
|
|
4398
4418
|
constructor(options: GPT_4OptionsType);
|
|
4399
4419
|
}
|
|
4400
4420
|
|
|
4421
|
+
declare const GPT_4o_2024_08_06Literal = "gpt-4o-2024-08-06";
|
|
4401
4422
|
declare const GPT_4o_2024_08_06Schema: {
|
|
4402
4423
|
description: string;
|
|
4403
4424
|
name: string;
|
|
@@ -4468,6 +4489,7 @@ declare class GPT_4o_2024_08_06 extends BaseChatModel {
|
|
|
4468
4489
|
constructor(options: GPT_4o_2024_08_06OptionsType);
|
|
4469
4490
|
}
|
|
4470
4491
|
|
|
4492
|
+
declare const GPT_4o_MiniLiteral = "gpt-4o-mini";
|
|
4471
4493
|
declare const GPT_4o_MiniSchema: {
|
|
4472
4494
|
description: string;
|
|
4473
4495
|
name: string;
|
|
@@ -4538,6 +4560,7 @@ declare class GPT_4o_Mini extends BaseChatModel {
|
|
|
4538
4560
|
constructor(options: GPT_4o_MiniOptionsType);
|
|
4539
4561
|
}
|
|
4540
4562
|
|
|
4563
|
+
declare const GPT_4oLiteral = "gpt-4o";
|
|
4541
4564
|
declare const GPT_4oSchema: {
|
|
4542
4565
|
description: string;
|
|
4543
4566
|
name: string;
|
|
@@ -4608,11 +4631,12 @@ declare class GPT_4o extends BaseChatModel {
|
|
|
4608
4631
|
constructor(options: GPT_4oOptionsType);
|
|
4609
4632
|
}
|
|
4610
4633
|
|
|
4634
|
+
declare const O1_MiniLiteral = "o1-mini";
|
|
4611
4635
|
declare const O1_MiniSchema: {
|
|
4612
4636
|
description: string;
|
|
4613
4637
|
name: string;
|
|
4614
|
-
roles: Partial<Record<"
|
|
4615
|
-
modalities: ["text"
|
|
4638
|
+
roles: Partial<Record<"user" | "assistant", string | undefined>>;
|
|
4639
|
+
modalities: ["text", ..."text"[]];
|
|
4616
4640
|
maxInputTokens: number;
|
|
4617
4641
|
maxOutputTokens: number;
|
|
4618
4642
|
config: {
|
|
@@ -4678,11 +4702,12 @@ declare class O1_Mini extends BaseOSeriesChatModel {
|
|
|
4678
4702
|
constructor(options: O1_MiniOptionsType);
|
|
4679
4703
|
}
|
|
4680
4704
|
|
|
4705
|
+
declare const O1_PreviewLiteral = "o1-preview";
|
|
4681
4706
|
declare const O1_PreviewSchema: {
|
|
4682
4707
|
description: string;
|
|
4683
4708
|
name: string;
|
|
4684
|
-
roles: Partial<Record<"
|
|
4685
|
-
modalities: ["text"
|
|
4709
|
+
roles: Partial<Record<"user" | "assistant", string | undefined>>;
|
|
4710
|
+
modalities: ["text", ..."text"[]];
|
|
4686
4711
|
maxInputTokens: number;
|
|
4687
4712
|
maxOutputTokens: number;
|
|
4688
4713
|
config: {
|
|
@@ -4865,6 +4890,7 @@ declare class BaseEmbeddingModel implements EmbeddingModelV1<EmbeddingModelSchem
|
|
|
4865
4890
|
transformGetEmbeddingsResponse(response: any): EmbeddingResponseType;
|
|
4866
4891
|
}
|
|
4867
4892
|
|
|
4893
|
+
declare const Text_Embedding_Ada002Literal = "text-embedding-ada-002";
|
|
4868
4894
|
declare const Text_Embedding_Ada002Schema: {
|
|
4869
4895
|
description: string;
|
|
4870
4896
|
name: string;
|
|
@@ -4928,6 +4954,7 @@ declare class Text_Embedding_Ada002 extends BaseEmbeddingModel {
|
|
|
4928
4954
|
constructor(options: Text_Embedding_Ada002_OptionsType);
|
|
4929
4955
|
}
|
|
4930
4956
|
|
|
4957
|
+
declare const Text_Embedding_3_SmallLiteral = "text-embedding-3-small";
|
|
4931
4958
|
declare const Text_Embedding_3_SmallSchema: {
|
|
4932
4959
|
description: string;
|
|
4933
4960
|
name: string;
|
|
@@ -4991,6 +5018,7 @@ declare class Text_Embedding_3_Small extends BaseEmbeddingModel {
|
|
|
4991
5018
|
constructor(options: Text_Embedding_3_Small_OptionsType);
|
|
4992
5019
|
}
|
|
4993
5020
|
|
|
5021
|
+
declare const Text_Embedding_3_LargeLiteral = "text-embedding-3-large";
|
|
4994
5022
|
declare const Text_Embedding_3_LargeSchema: {
|
|
4995
5023
|
description: string;
|
|
4996
5024
|
name: string;
|
|
@@ -5071,4 +5099,4 @@ declare class OpenAI<O extends Record<string, any> = Record<string, any>> implem
|
|
|
5071
5099
|
embeddingModelSchemas(): Record<string, EmbeddingModelSchemaType>;
|
|
5072
5100
|
}
|
|
5073
5101
|
|
|
5074
|
-
export { BaseChatModel, BaseChatModelOptions, type BaseChatModelOptionsType, BaseEmbeddingModel, BaseEmbeddingModelOptions, type BaseEmbeddingModelOptionsType, BaseOSeriesChatModel, ChatModelBaseConfigDef, ChatModelBaseConfigSchema, ChatModelOSeriesConfigDef, ChatModelOSeriesConfigSchema, ChatModelResponseFormatConfigDef, ChatModelResponseFormatConfigSchema, ChatModelResponseSchemaConfigDef, ChatModelResponseSchemaConfigSchema, EmbeddingModelBaseConfigDef, EmbeddingModelBaseConfigSchema, EmbeddingModelDimensionsConfigDef, EmbeddingModelDimensionsConfigSchema, GPT_3_5_Turbo, GPT_3_5_TurboOptions, type GPT_3_5_TurboOptionsType, GPT_3_5_TurboSchema, GPT_3_5_Turbo_0125, GPT_3_5_Turbo_0125Options, type GPT_3_5_Turbo_0125OptionsType, GPT_3_5_Turbo_0125Schema, GPT_3_5_Turbo_1106, GPT_3_5_Turbo_1106Options, type GPT_3_5_Turbo_1106OptionsType, GPT_3_5_Turbo_1106Schema, GPT_4, GPT_4Options, type GPT_4OptionsType, GPT_4Schema, GPT_4_0125_Preview, GPT_4_0125_PreviewOptions, type GPT_4_0125_PreviewOptionsType, GPT_4_0125_PreviewSchema, GPT_4_0613, GPT_4_0613Options, type GPT_4_0613OptionsType, GPT_4_0613Schema, GPT_4_1106_Preview, GPT_4_1106_PreviewOptions, type GPT_4_1106_PreviewOptionsType, GPT_4_1106_PreviewSchema, GPT_4_Turbo, GPT_4_TurboOptions, type GPT_4_TurboOptionsType, GPT_4_TurboSchema, GPT_4_Turbo_2024_04_09, GPT_4_Turbo_2024_04_09Options, type GPT_4_Turbo_2024_04_09OptionsType, GPT_4_Turbo_2024_04_09Schema, GPT_4_Turbo_Preview, GPT_4_Turbo_PreviewOptions, type GPT_4_Turbo_PreviewOptionsType, GPT_4_Turbo_PreviewSchema, GPT_4o, GPT_4oOptions, type GPT_4oOptionsType, GPT_4oSchema, GPT_4o_2024_08_06, GPT_4o_2024_08_06Options, type GPT_4o_2024_08_06OptionsType, GPT_4o_2024_08_06Schema, GPT_4o_Mini, GPT_4o_MiniOptions, type GPT_4o_MiniOptionsType, GPT_4o_MiniSchema, O1_Mini, O1_MiniOptions, type O1_MiniOptionsType, O1_MiniSchema, O1_Preview, O1_PreviewOptions, type O1_PreviewOptionsType, O1_PreviewSchema, OpenAI, OpenAIChatModelConfigs, OpenAIChatModelModalities, OpenAIChatModelModalitiesEnum, OpenAIChatModelRoles, OpenAIChatModelRolesMap, OpenAIChatModelTextToolModalities, OpenAIChatModelTextToolModalitiesEnum, OpenAIChatOSeriesRequest, type OpenAIChatOSeriesRequestType, OpenAIChatRequest, OpenAIChatRequestAssistantMessage, type OpenAIChatRequestAssistantMessageType, OpenAIChatRequestImageContent, type OpenAIChatRequestImageContentType, OpenAIChatRequestMessage, type OpenAIChatRequestMessageType, OpenAIChatRequestResponseFormat, type OpenAIChatRequestResponseFormatType, OpenAIChatRequestSystemMessage, type OpenAIChatRequestSystemMessageType, OpenAIChatRequestTextContent, type OpenAIChatRequestTextContentType, OpenAIChatRequestTool, OpenAIChatRequestToolCallContent, type OpenAIChatRequestToolCallContentType, OpenAIChatRequestToolChoiceEnum, type OpenAIChatRequestToolChoiceEnumType, OpenAIChatRequestToolChoiceFunction, type OpenAIChatRequestToolChoiceFunctionType, OpenAIChatRequestToolMessage, type OpenAIChatRequestToolMessageType, type OpenAIChatRequestToolType, type OpenAIChatRequestType, OpenAIChatRequestUserMessage, type OpenAIChatRequestUserMessageType, OpenAICompleteChatResponse, type OpenAICompleteChatResponseType, OpenAIEmbeddingModelConfigs, OpenAIEmbeddingModelModalities, OpenAIEmbeddingModelModalitiesEnum, OpenAIEmbeddingRequest, OpenAIEmbeddingRequestInput, type OpenAIEmbeddingRequestInputType, type OpenAIEmbeddingRequestType, OpenAIGetEmbeddingsResponse, OpenAIStreamChatResponse, type OpenAIStreamChatResponseType, OpenAIToolCallsCompleteChatResponse, OpenAIToolCallsStreamChatResponse, ProviderLiteral, Text_Embedding_3_Large, Text_Embedding_3_LargeSchema, Text_Embedding_3_Large_Options, type Text_Embedding_3_Large_OptionsType, Text_Embedding_3_Small, Text_Embedding_3_SmallSchema, Text_Embedding_3_Small_Options, type Text_Embedding_3_Small_OptionsType, Text_Embedding_Ada002, Text_Embedding_Ada002Schema, Text_Embedding_Ada002_Options, type Text_Embedding_Ada002_OptionsType, dimensions, encodingFormat, frequencyPenalty, logProbs, maxTokens, presencePenalty, seed, stop, temperature, toolChoice, topLogProbs, topP };
|
|
5102
|
+
export { BaseChatModel, BaseChatModelOptions, type BaseChatModelOptionsType, BaseEmbeddingModel, BaseEmbeddingModelOptions, type BaseEmbeddingModelOptionsType, BaseOSeriesChatModel, ChatModelBaseConfigDef, ChatModelBaseConfigSchema, ChatModelOSeriesConfigDef, ChatModelOSeriesConfigSchema, ChatModelResponseFormatConfigDef, ChatModelResponseFormatConfigSchema, ChatModelResponseSchemaConfigDef, ChatModelResponseSchemaConfigSchema, EmbeddingModelBaseConfigDef, EmbeddingModelBaseConfigSchema, EmbeddingModelDimensionsConfigDef, EmbeddingModelDimensionsConfigSchema, GPT_3_5_Turbo, GPT_3_5_TurboLiteral, GPT_3_5_TurboOptions, type GPT_3_5_TurboOptionsType, GPT_3_5_TurboSchema, GPT_3_5_Turbo_0125, GPT_3_5_Turbo_0125Literal, GPT_3_5_Turbo_0125Options, type GPT_3_5_Turbo_0125OptionsType, GPT_3_5_Turbo_0125Schema, GPT_3_5_Turbo_1106, GPT_3_5_Turbo_1106Literal, GPT_3_5_Turbo_1106Options, type GPT_3_5_Turbo_1106OptionsType, GPT_3_5_Turbo_1106Schema, GPT_4, GPT_4Literal, GPT_4Options, type GPT_4OptionsType, GPT_4Schema, GPT_4_0125_Preview, GPT_4_0125_PreviewLiteral, GPT_4_0125_PreviewOptions, type GPT_4_0125_PreviewOptionsType, GPT_4_0125_PreviewSchema, GPT_4_0613, GPT_4_0613Literal, GPT_4_0613Options, type GPT_4_0613OptionsType, GPT_4_0613Schema, GPT_4_1106_Preview, GPT_4_1106_PreviewLiteral, GPT_4_1106_PreviewOptions, type GPT_4_1106_PreviewOptionsType, GPT_4_1106_PreviewSchema, GPT_4_Turbo, GPT_4_TurboLiteral, GPT_4_TurboOptions, type GPT_4_TurboOptionsType, GPT_4_TurboSchema, GPT_4_Turbo_2024_04_09, GPT_4_Turbo_2024_04_09Literal, GPT_4_Turbo_2024_04_09Options, type GPT_4_Turbo_2024_04_09OptionsType, GPT_4_Turbo_2024_04_09Schema, GPT_4_Turbo_Preview, GPT_4_Turbo_PreviewLiteral, GPT_4_Turbo_PreviewOptions, type GPT_4_Turbo_PreviewOptionsType, GPT_4_Turbo_PreviewSchema, GPT_4o, GPT_4oLiteral, GPT_4oOptions, type GPT_4oOptionsType, GPT_4oSchema, GPT_4o_2024_08_06, GPT_4o_2024_08_06Literal, GPT_4o_2024_08_06Options, type GPT_4o_2024_08_06OptionsType, GPT_4o_2024_08_06Schema, GPT_4o_Mini, GPT_4o_MiniLiteral, GPT_4o_MiniOptions, type GPT_4o_MiniOptionsType, GPT_4o_MiniSchema, O1_Mini, O1_MiniLiteral, O1_MiniOptions, type O1_MiniOptionsType, O1_MiniSchema, O1_Preview, O1_PreviewLiteral, O1_PreviewOptions, type O1_PreviewOptionsType, O1_PreviewSchema, OpenAI, OpenAIChatModelConfigs, OpenAIChatModelModalities, OpenAIChatModelModalitiesEnum, OpenAIChatModelOSSeriesRoles, OpenAIChatModelOSSeriesRolesMap, OpenAIChatModelRoles, OpenAIChatModelRolesMap, OpenAIChatModelTextModalities, OpenAIChatModelTextModalitiesEnum, OpenAIChatModelTextToolModalities, OpenAIChatModelTextToolModalitiesEnum, OpenAIChatOSeriesRequest, type OpenAIChatOSeriesRequestType, OpenAIChatRequest, OpenAIChatRequestAssistantMessage, type OpenAIChatRequestAssistantMessageType, OpenAIChatRequestImageContent, type OpenAIChatRequestImageContentType, OpenAIChatRequestMessage, type OpenAIChatRequestMessageType, OpenAIChatRequestResponseFormat, type OpenAIChatRequestResponseFormatType, OpenAIChatRequestSystemMessage, type OpenAIChatRequestSystemMessageType, OpenAIChatRequestTextContent, type OpenAIChatRequestTextContentType, OpenAIChatRequestTool, OpenAIChatRequestToolCallContent, type OpenAIChatRequestToolCallContentType, OpenAIChatRequestToolChoiceEnum, type OpenAIChatRequestToolChoiceEnumType, OpenAIChatRequestToolChoiceFunction, type OpenAIChatRequestToolChoiceFunctionType, OpenAIChatRequestToolMessage, type OpenAIChatRequestToolMessageType, type OpenAIChatRequestToolType, type OpenAIChatRequestType, OpenAIChatRequestUserMessage, type OpenAIChatRequestUserMessageType, OpenAICompleteChatResponse, type OpenAICompleteChatResponseType, OpenAIEmbeddingModelConfigs, OpenAIEmbeddingModelModalities, OpenAIEmbeddingModelModalitiesEnum, OpenAIEmbeddingRequest, OpenAIEmbeddingRequestInput, type OpenAIEmbeddingRequestInputType, type OpenAIEmbeddingRequestType, OpenAIGetEmbeddingsResponse, OpenAIStreamChatResponse, type OpenAIStreamChatResponseType, OpenAIToolCallsCompleteChatResponse, OpenAIToolCallsStreamChatResponse, ProviderLiteral, Text_Embedding_3_Large, Text_Embedding_3_LargeLiteral, Text_Embedding_3_LargeSchema, Text_Embedding_3_Large_Options, type Text_Embedding_3_Large_OptionsType, Text_Embedding_3_Small, Text_Embedding_3_SmallLiteral, Text_Embedding_3_SmallSchema, Text_Embedding_3_Small_Options, type Text_Embedding_3_Small_OptionsType, Text_Embedding_Ada002, Text_Embedding_Ada002Literal, Text_Embedding_Ada002Schema, Text_Embedding_Ada002_Options, type Text_Embedding_Ada002_OptionsType, dimensions, encodingFormat, frequencyPenalty, logProbs, maxTokens, presencePenalty, seed, stop, temperature, toolChoice, topLogProbs, topP };
|
package/dist/index.d.ts
CHANGED
|
@@ -438,6 +438,16 @@ declare const OpenAIChatModelConfigs: {
|
|
|
438
438
|
};
|
|
439
439
|
readonly oSeries: (maxOutputTokens: number, maxSequences: number) => {
|
|
440
440
|
def: {
|
|
441
|
+
temperature: {
|
|
442
|
+
type: "range";
|
|
443
|
+
param: string;
|
|
444
|
+
title: string;
|
|
445
|
+
description: string;
|
|
446
|
+
max: number;
|
|
447
|
+
min: number;
|
|
448
|
+
step: number;
|
|
449
|
+
default: number;
|
|
450
|
+
};
|
|
441
451
|
maxTokens: {
|
|
442
452
|
type: "range";
|
|
443
453
|
param: string;
|
|
@@ -463,16 +473,6 @@ declare const OpenAIChatModelConfigs: {
|
|
|
463
473
|
description: string;
|
|
464
474
|
objectSchema?: any;
|
|
465
475
|
};
|
|
466
|
-
temperature: {
|
|
467
|
-
type: "range";
|
|
468
|
-
param: string;
|
|
469
|
-
title: string;
|
|
470
|
-
description: string;
|
|
471
|
-
max: number;
|
|
472
|
-
min: number;
|
|
473
|
-
step: number;
|
|
474
|
-
default: number;
|
|
475
|
-
};
|
|
476
476
|
stop: {
|
|
477
477
|
type: "multi-string";
|
|
478
478
|
param: string;
|
|
@@ -565,6 +565,7 @@ declare const OpenAIChatModelConfigs: {
|
|
|
565
565
|
[x: string]: any;
|
|
566
566
|
}>;
|
|
567
567
|
}>, {
|
|
568
|
+
temperature: zod.ZodOptional<zod.ZodDefault<zod.ZodNumber>>;
|
|
568
569
|
maxTokens: zod.ZodOptional<zod.ZodDefault<zod.ZodNumber>>;
|
|
569
570
|
}>, "strip", zod.ZodTypeAny, {
|
|
570
571
|
responseSchema: {
|
|
@@ -1207,6 +1208,16 @@ declare const ChatModelResponseSchemaConfigSchema: (maxOutputTokens: number, max
|
|
|
1207
1208
|
}>;
|
|
1208
1209
|
|
|
1209
1210
|
declare const ChatModelOSeriesConfigDef: (maxOutputTokens: number, maxSequences: number) => {
|
|
1211
|
+
temperature: {
|
|
1212
|
+
type: "range";
|
|
1213
|
+
param: string;
|
|
1214
|
+
title: string;
|
|
1215
|
+
description: string;
|
|
1216
|
+
max: number;
|
|
1217
|
+
min: number;
|
|
1218
|
+
step: number;
|
|
1219
|
+
default: number;
|
|
1220
|
+
};
|
|
1210
1221
|
maxTokens: {
|
|
1211
1222
|
type: "range";
|
|
1212
1223
|
param: string;
|
|
@@ -1232,16 +1243,6 @@ declare const ChatModelOSeriesConfigDef: (maxOutputTokens: number, maxSequences:
|
|
|
1232
1243
|
description: string;
|
|
1233
1244
|
objectSchema?: any;
|
|
1234
1245
|
};
|
|
1235
|
-
temperature: {
|
|
1236
|
-
type: "range";
|
|
1237
|
-
param: string;
|
|
1238
|
-
title: string;
|
|
1239
|
-
description: string;
|
|
1240
|
-
max: number;
|
|
1241
|
-
min: number;
|
|
1242
|
-
step: number;
|
|
1243
|
-
default: number;
|
|
1244
|
-
};
|
|
1245
1246
|
stop: {
|
|
1246
1247
|
type: "multi-string";
|
|
1247
1248
|
param: string;
|
|
@@ -1334,6 +1335,7 @@ declare const ChatModelOSeriesConfigSchema: (maxOutputTokens: number, maxSequenc
|
|
|
1334
1335
|
[x: string]: any;
|
|
1335
1336
|
}>;
|
|
1336
1337
|
}>, {
|
|
1338
|
+
temperature: zod.ZodOptional<zod.ZodDefault<zod.ZodNumber>>;
|
|
1337
1339
|
maxTokens: zod.ZodOptional<zod.ZodDefault<zod.ZodNumber>>;
|
|
1338
1340
|
}>, "strip", zod.ZodTypeAny, {
|
|
1339
1341
|
responseSchema: {
|
|
@@ -1449,9 +1451,16 @@ declare const OpenAIChatModelRolesMap: {
|
|
|
1449
1451
|
readonly assistant: "assistant";
|
|
1450
1452
|
readonly tool: "tool";
|
|
1451
1453
|
};
|
|
1454
|
+
declare const OpenAIChatModelOSSeriesRoles: z.ZodEnum<["user", "assistant"]>;
|
|
1455
|
+
declare const OpenAIChatModelOSSeriesRolesMap: {
|
|
1456
|
+
readonly user: "user";
|
|
1457
|
+
readonly assistant: "assistant";
|
|
1458
|
+
};
|
|
1452
1459
|
|
|
1453
1460
|
declare const OpenAIChatModelModalities: ChatModelSchemaType["modalities"];
|
|
1454
1461
|
declare const OpenAIChatModelModalitiesEnum: z.ZodEnum<["text", "image", "tool-call", "tool-response"]>;
|
|
1462
|
+
declare const OpenAIChatModelTextModalities: ChatModelSchemaType["modalities"];
|
|
1463
|
+
declare const OpenAIChatModelTextModalitiesEnum: z.ZodEnum<["text"]>;
|
|
1455
1464
|
declare const OpenAIChatModelTextToolModalities: ChatModelSchemaType["modalities"];
|
|
1456
1465
|
declare const OpenAIChatModelTextToolModalitiesEnum: z.ZodEnum<["text", "tool-call", "tool-response"]>;
|
|
1457
1466
|
|
|
@@ -3689,6 +3698,7 @@ declare class BaseOSeriesChatModel extends BaseChatModel {
|
|
|
3689
3698
|
messages: MessageType[];
|
|
3690
3699
|
tools: ToolType[] | undefined;
|
|
3691
3700
|
};
|
|
3701
|
+
transformTools(tools: ToolType[]): ParamsType;
|
|
3692
3702
|
getStreamChatUrl(config?: ConfigType, messages?: MessageType[], tools?: ToolType[]): Promise<UrlType>;
|
|
3693
3703
|
getStreamChatHeaders(config?: ConfigType, messages?: MessageType[], tools?: ToolType[]): Promise<HeadersType>;
|
|
3694
3704
|
getStreamChatData(config: ConfigType, messages: MessageType[], tools?: ToolType[]): Promise<ParamsType>;
|
|
@@ -3698,6 +3708,7 @@ declare class BaseOSeriesChatModel extends BaseChatModel {
|
|
|
3698
3708
|
}>;
|
|
3699
3709
|
}
|
|
3700
3710
|
|
|
3711
|
+
declare const GPT_3_5_Turbo_0125Literal = "gpt-3.5-turbo-0125";
|
|
3701
3712
|
declare const GPT_3_5_Turbo_0125Schema: {
|
|
3702
3713
|
description: string;
|
|
3703
3714
|
name: string;
|
|
@@ -3768,6 +3779,7 @@ declare class GPT_3_5_Turbo_0125 extends BaseChatModel {
|
|
|
3768
3779
|
constructor(options: GPT_3_5_Turbo_0125OptionsType);
|
|
3769
3780
|
}
|
|
3770
3781
|
|
|
3782
|
+
declare const GPT_3_5_Turbo_1106Literal = "gpt-3.5-turbo-1106";
|
|
3771
3783
|
declare const GPT_3_5_Turbo_1106Schema: {
|
|
3772
3784
|
description: string;
|
|
3773
3785
|
name: string;
|
|
@@ -3838,6 +3850,7 @@ declare class GPT_3_5_Turbo_1106 extends BaseChatModel {
|
|
|
3838
3850
|
constructor(options: GPT_3_5_Turbo_1106OptionsType);
|
|
3839
3851
|
}
|
|
3840
3852
|
|
|
3853
|
+
declare const GPT_3_5_TurboLiteral = "gpt-3.5-turbo";
|
|
3841
3854
|
declare const GPT_3_5_TurboSchema: {
|
|
3842
3855
|
description: string;
|
|
3843
3856
|
name: string;
|
|
@@ -3908,6 +3921,7 @@ declare class GPT_3_5_Turbo extends BaseChatModel {
|
|
|
3908
3921
|
constructor(options: GPT_3_5_TurboOptionsType);
|
|
3909
3922
|
}
|
|
3910
3923
|
|
|
3924
|
+
declare const GPT_4_0125_PreviewLiteral = "gpt-4-0125-preview";
|
|
3911
3925
|
declare const GPT_4_0125_PreviewSchema: {
|
|
3912
3926
|
description: string;
|
|
3913
3927
|
name: string;
|
|
@@ -3978,6 +3992,7 @@ declare class GPT_4_0125_Preview extends BaseChatModel {
|
|
|
3978
3992
|
constructor(options: GPT_4_0125_PreviewOptionsType);
|
|
3979
3993
|
}
|
|
3980
3994
|
|
|
3995
|
+
declare const GPT_4_0613Literal = "gpt-4-0613";
|
|
3981
3996
|
declare const GPT_4_0613Schema: {
|
|
3982
3997
|
description: string;
|
|
3983
3998
|
name: string;
|
|
@@ -4048,6 +4063,7 @@ declare class GPT_4_0613 extends BaseChatModel {
|
|
|
4048
4063
|
constructor(options: GPT_4_0613OptionsType);
|
|
4049
4064
|
}
|
|
4050
4065
|
|
|
4066
|
+
declare const GPT_4_1106_PreviewLiteral = "gpt-4-1106-preview";
|
|
4051
4067
|
declare const GPT_4_1106_PreviewSchema: {
|
|
4052
4068
|
description: string;
|
|
4053
4069
|
name: string;
|
|
@@ -4118,6 +4134,7 @@ declare class GPT_4_1106_Preview extends BaseChatModel {
|
|
|
4118
4134
|
constructor(options: GPT_4_1106_PreviewOptionsType);
|
|
4119
4135
|
}
|
|
4120
4136
|
|
|
4137
|
+
declare const GPT_4_Turbo_2024_04_09Literal = "gpt-4-turbo-2024-04-09";
|
|
4121
4138
|
declare const GPT_4_Turbo_2024_04_09Schema: {
|
|
4122
4139
|
description: string;
|
|
4123
4140
|
name: string;
|
|
@@ -4188,6 +4205,7 @@ declare class GPT_4_Turbo_2024_04_09 extends BaseChatModel {
|
|
|
4188
4205
|
constructor(options: GPT_4_Turbo_2024_04_09OptionsType);
|
|
4189
4206
|
}
|
|
4190
4207
|
|
|
4208
|
+
declare const GPT_4_Turbo_PreviewLiteral = "gpt-4-turbo-preview";
|
|
4191
4209
|
declare const GPT_4_Turbo_PreviewSchema: {
|
|
4192
4210
|
description: string;
|
|
4193
4211
|
name: string;
|
|
@@ -4258,6 +4276,7 @@ declare class GPT_4_Turbo_Preview extends BaseChatModel {
|
|
|
4258
4276
|
constructor(options: GPT_4_Turbo_PreviewOptionsType);
|
|
4259
4277
|
}
|
|
4260
4278
|
|
|
4279
|
+
declare const GPT_4_TurboLiteral = "gpt-4-turbo";
|
|
4261
4280
|
declare const GPT_4_TurboSchema: {
|
|
4262
4281
|
description: string;
|
|
4263
4282
|
name: string;
|
|
@@ -4328,6 +4347,7 @@ declare class GPT_4_Turbo extends BaseChatModel {
|
|
|
4328
4347
|
constructor(options: GPT_4_TurboOptionsType);
|
|
4329
4348
|
}
|
|
4330
4349
|
|
|
4350
|
+
declare const GPT_4Literal = "gpt-4";
|
|
4331
4351
|
declare const GPT_4Schema: {
|
|
4332
4352
|
description: string;
|
|
4333
4353
|
name: string;
|
|
@@ -4398,6 +4418,7 @@ declare class GPT_4 extends BaseChatModel {
|
|
|
4398
4418
|
constructor(options: GPT_4OptionsType);
|
|
4399
4419
|
}
|
|
4400
4420
|
|
|
4421
|
+
declare const GPT_4o_2024_08_06Literal = "gpt-4o-2024-08-06";
|
|
4401
4422
|
declare const GPT_4o_2024_08_06Schema: {
|
|
4402
4423
|
description: string;
|
|
4403
4424
|
name: string;
|
|
@@ -4468,6 +4489,7 @@ declare class GPT_4o_2024_08_06 extends BaseChatModel {
|
|
|
4468
4489
|
constructor(options: GPT_4o_2024_08_06OptionsType);
|
|
4469
4490
|
}
|
|
4470
4491
|
|
|
4492
|
+
declare const GPT_4o_MiniLiteral = "gpt-4o-mini";
|
|
4471
4493
|
declare const GPT_4o_MiniSchema: {
|
|
4472
4494
|
description: string;
|
|
4473
4495
|
name: string;
|
|
@@ -4538,6 +4560,7 @@ declare class GPT_4o_Mini extends BaseChatModel {
|
|
|
4538
4560
|
constructor(options: GPT_4o_MiniOptionsType);
|
|
4539
4561
|
}
|
|
4540
4562
|
|
|
4563
|
+
declare const GPT_4oLiteral = "gpt-4o";
|
|
4541
4564
|
declare const GPT_4oSchema: {
|
|
4542
4565
|
description: string;
|
|
4543
4566
|
name: string;
|
|
@@ -4608,11 +4631,12 @@ declare class GPT_4o extends BaseChatModel {
|
|
|
4608
4631
|
constructor(options: GPT_4oOptionsType);
|
|
4609
4632
|
}
|
|
4610
4633
|
|
|
4634
|
+
declare const O1_MiniLiteral = "o1-mini";
|
|
4611
4635
|
declare const O1_MiniSchema: {
|
|
4612
4636
|
description: string;
|
|
4613
4637
|
name: string;
|
|
4614
|
-
roles: Partial<Record<"
|
|
4615
|
-
modalities: ["text"
|
|
4638
|
+
roles: Partial<Record<"user" | "assistant", string | undefined>>;
|
|
4639
|
+
modalities: ["text", ..."text"[]];
|
|
4616
4640
|
maxInputTokens: number;
|
|
4617
4641
|
maxOutputTokens: number;
|
|
4618
4642
|
config: {
|
|
@@ -4678,11 +4702,12 @@ declare class O1_Mini extends BaseOSeriesChatModel {
|
|
|
4678
4702
|
constructor(options: O1_MiniOptionsType);
|
|
4679
4703
|
}
|
|
4680
4704
|
|
|
4705
|
+
declare const O1_PreviewLiteral = "o1-preview";
|
|
4681
4706
|
declare const O1_PreviewSchema: {
|
|
4682
4707
|
description: string;
|
|
4683
4708
|
name: string;
|
|
4684
|
-
roles: Partial<Record<"
|
|
4685
|
-
modalities: ["text"
|
|
4709
|
+
roles: Partial<Record<"user" | "assistant", string | undefined>>;
|
|
4710
|
+
modalities: ["text", ..."text"[]];
|
|
4686
4711
|
maxInputTokens: number;
|
|
4687
4712
|
maxOutputTokens: number;
|
|
4688
4713
|
config: {
|
|
@@ -4865,6 +4890,7 @@ declare class BaseEmbeddingModel implements EmbeddingModelV1<EmbeddingModelSchem
|
|
|
4865
4890
|
transformGetEmbeddingsResponse(response: any): EmbeddingResponseType;
|
|
4866
4891
|
}
|
|
4867
4892
|
|
|
4893
|
+
declare const Text_Embedding_Ada002Literal = "text-embedding-ada-002";
|
|
4868
4894
|
declare const Text_Embedding_Ada002Schema: {
|
|
4869
4895
|
description: string;
|
|
4870
4896
|
name: string;
|
|
@@ -4928,6 +4954,7 @@ declare class Text_Embedding_Ada002 extends BaseEmbeddingModel {
|
|
|
4928
4954
|
constructor(options: Text_Embedding_Ada002_OptionsType);
|
|
4929
4955
|
}
|
|
4930
4956
|
|
|
4957
|
+
declare const Text_Embedding_3_SmallLiteral = "text-embedding-3-small";
|
|
4931
4958
|
declare const Text_Embedding_3_SmallSchema: {
|
|
4932
4959
|
description: string;
|
|
4933
4960
|
name: string;
|
|
@@ -4991,6 +5018,7 @@ declare class Text_Embedding_3_Small extends BaseEmbeddingModel {
|
|
|
4991
5018
|
constructor(options: Text_Embedding_3_Small_OptionsType);
|
|
4992
5019
|
}
|
|
4993
5020
|
|
|
5021
|
+
declare const Text_Embedding_3_LargeLiteral = "text-embedding-3-large";
|
|
4994
5022
|
declare const Text_Embedding_3_LargeSchema: {
|
|
4995
5023
|
description: string;
|
|
4996
5024
|
name: string;
|
|
@@ -5071,4 +5099,4 @@ declare class OpenAI<O extends Record<string, any> = Record<string, any>> implem
|
|
|
5071
5099
|
embeddingModelSchemas(): Record<string, EmbeddingModelSchemaType>;
|
|
5072
5100
|
}
|
|
5073
5101
|
|
|
5074
|
-
export { BaseChatModel, BaseChatModelOptions, type BaseChatModelOptionsType, BaseEmbeddingModel, BaseEmbeddingModelOptions, type BaseEmbeddingModelOptionsType, BaseOSeriesChatModel, ChatModelBaseConfigDef, ChatModelBaseConfigSchema, ChatModelOSeriesConfigDef, ChatModelOSeriesConfigSchema, ChatModelResponseFormatConfigDef, ChatModelResponseFormatConfigSchema, ChatModelResponseSchemaConfigDef, ChatModelResponseSchemaConfigSchema, EmbeddingModelBaseConfigDef, EmbeddingModelBaseConfigSchema, EmbeddingModelDimensionsConfigDef, EmbeddingModelDimensionsConfigSchema, GPT_3_5_Turbo, GPT_3_5_TurboOptions, type GPT_3_5_TurboOptionsType, GPT_3_5_TurboSchema, GPT_3_5_Turbo_0125, GPT_3_5_Turbo_0125Options, type GPT_3_5_Turbo_0125OptionsType, GPT_3_5_Turbo_0125Schema, GPT_3_5_Turbo_1106, GPT_3_5_Turbo_1106Options, type GPT_3_5_Turbo_1106OptionsType, GPT_3_5_Turbo_1106Schema, GPT_4, GPT_4Options, type GPT_4OptionsType, GPT_4Schema, GPT_4_0125_Preview, GPT_4_0125_PreviewOptions, type GPT_4_0125_PreviewOptionsType, GPT_4_0125_PreviewSchema, GPT_4_0613, GPT_4_0613Options, type GPT_4_0613OptionsType, GPT_4_0613Schema, GPT_4_1106_Preview, GPT_4_1106_PreviewOptions, type GPT_4_1106_PreviewOptionsType, GPT_4_1106_PreviewSchema, GPT_4_Turbo, GPT_4_TurboOptions, type GPT_4_TurboOptionsType, GPT_4_TurboSchema, GPT_4_Turbo_2024_04_09, GPT_4_Turbo_2024_04_09Options, type GPT_4_Turbo_2024_04_09OptionsType, GPT_4_Turbo_2024_04_09Schema, GPT_4_Turbo_Preview, GPT_4_Turbo_PreviewOptions, type GPT_4_Turbo_PreviewOptionsType, GPT_4_Turbo_PreviewSchema, GPT_4o, GPT_4oOptions, type GPT_4oOptionsType, GPT_4oSchema, GPT_4o_2024_08_06, GPT_4o_2024_08_06Options, type GPT_4o_2024_08_06OptionsType, GPT_4o_2024_08_06Schema, GPT_4o_Mini, GPT_4o_MiniOptions, type GPT_4o_MiniOptionsType, GPT_4o_MiniSchema, O1_Mini, O1_MiniOptions, type O1_MiniOptionsType, O1_MiniSchema, O1_Preview, O1_PreviewOptions, type O1_PreviewOptionsType, O1_PreviewSchema, OpenAI, OpenAIChatModelConfigs, OpenAIChatModelModalities, OpenAIChatModelModalitiesEnum, OpenAIChatModelRoles, OpenAIChatModelRolesMap, OpenAIChatModelTextToolModalities, OpenAIChatModelTextToolModalitiesEnum, OpenAIChatOSeriesRequest, type OpenAIChatOSeriesRequestType, OpenAIChatRequest, OpenAIChatRequestAssistantMessage, type OpenAIChatRequestAssistantMessageType, OpenAIChatRequestImageContent, type OpenAIChatRequestImageContentType, OpenAIChatRequestMessage, type OpenAIChatRequestMessageType, OpenAIChatRequestResponseFormat, type OpenAIChatRequestResponseFormatType, OpenAIChatRequestSystemMessage, type OpenAIChatRequestSystemMessageType, OpenAIChatRequestTextContent, type OpenAIChatRequestTextContentType, OpenAIChatRequestTool, OpenAIChatRequestToolCallContent, type OpenAIChatRequestToolCallContentType, OpenAIChatRequestToolChoiceEnum, type OpenAIChatRequestToolChoiceEnumType, OpenAIChatRequestToolChoiceFunction, type OpenAIChatRequestToolChoiceFunctionType, OpenAIChatRequestToolMessage, type OpenAIChatRequestToolMessageType, type OpenAIChatRequestToolType, type OpenAIChatRequestType, OpenAIChatRequestUserMessage, type OpenAIChatRequestUserMessageType, OpenAICompleteChatResponse, type OpenAICompleteChatResponseType, OpenAIEmbeddingModelConfigs, OpenAIEmbeddingModelModalities, OpenAIEmbeddingModelModalitiesEnum, OpenAIEmbeddingRequest, OpenAIEmbeddingRequestInput, type OpenAIEmbeddingRequestInputType, type OpenAIEmbeddingRequestType, OpenAIGetEmbeddingsResponse, OpenAIStreamChatResponse, type OpenAIStreamChatResponseType, OpenAIToolCallsCompleteChatResponse, OpenAIToolCallsStreamChatResponse, ProviderLiteral, Text_Embedding_3_Large, Text_Embedding_3_LargeSchema, Text_Embedding_3_Large_Options, type Text_Embedding_3_Large_OptionsType, Text_Embedding_3_Small, Text_Embedding_3_SmallSchema, Text_Embedding_3_Small_Options, type Text_Embedding_3_Small_OptionsType, Text_Embedding_Ada002, Text_Embedding_Ada002Schema, Text_Embedding_Ada002_Options, type Text_Embedding_Ada002_OptionsType, dimensions, encodingFormat, frequencyPenalty, logProbs, maxTokens, presencePenalty, seed, stop, temperature, toolChoice, topLogProbs, topP };
|
|
5102
|
+
export { BaseChatModel, BaseChatModelOptions, type BaseChatModelOptionsType, BaseEmbeddingModel, BaseEmbeddingModelOptions, type BaseEmbeddingModelOptionsType, BaseOSeriesChatModel, ChatModelBaseConfigDef, ChatModelBaseConfigSchema, ChatModelOSeriesConfigDef, ChatModelOSeriesConfigSchema, ChatModelResponseFormatConfigDef, ChatModelResponseFormatConfigSchema, ChatModelResponseSchemaConfigDef, ChatModelResponseSchemaConfigSchema, EmbeddingModelBaseConfigDef, EmbeddingModelBaseConfigSchema, EmbeddingModelDimensionsConfigDef, EmbeddingModelDimensionsConfigSchema, GPT_3_5_Turbo, GPT_3_5_TurboLiteral, GPT_3_5_TurboOptions, type GPT_3_5_TurboOptionsType, GPT_3_5_TurboSchema, GPT_3_5_Turbo_0125, GPT_3_5_Turbo_0125Literal, GPT_3_5_Turbo_0125Options, type GPT_3_5_Turbo_0125OptionsType, GPT_3_5_Turbo_0125Schema, GPT_3_5_Turbo_1106, GPT_3_5_Turbo_1106Literal, GPT_3_5_Turbo_1106Options, type GPT_3_5_Turbo_1106OptionsType, GPT_3_5_Turbo_1106Schema, GPT_4, GPT_4Literal, GPT_4Options, type GPT_4OptionsType, GPT_4Schema, GPT_4_0125_Preview, GPT_4_0125_PreviewLiteral, GPT_4_0125_PreviewOptions, type GPT_4_0125_PreviewOptionsType, GPT_4_0125_PreviewSchema, GPT_4_0613, GPT_4_0613Literal, GPT_4_0613Options, type GPT_4_0613OptionsType, GPT_4_0613Schema, GPT_4_1106_Preview, GPT_4_1106_PreviewLiteral, GPT_4_1106_PreviewOptions, type GPT_4_1106_PreviewOptionsType, GPT_4_1106_PreviewSchema, GPT_4_Turbo, GPT_4_TurboLiteral, GPT_4_TurboOptions, type GPT_4_TurboOptionsType, GPT_4_TurboSchema, GPT_4_Turbo_2024_04_09, GPT_4_Turbo_2024_04_09Literal, GPT_4_Turbo_2024_04_09Options, type GPT_4_Turbo_2024_04_09OptionsType, GPT_4_Turbo_2024_04_09Schema, GPT_4_Turbo_Preview, GPT_4_Turbo_PreviewLiteral, GPT_4_Turbo_PreviewOptions, type GPT_4_Turbo_PreviewOptionsType, GPT_4_Turbo_PreviewSchema, GPT_4o, GPT_4oLiteral, GPT_4oOptions, type GPT_4oOptionsType, GPT_4oSchema, GPT_4o_2024_08_06, GPT_4o_2024_08_06Literal, GPT_4o_2024_08_06Options, type GPT_4o_2024_08_06OptionsType, GPT_4o_2024_08_06Schema, GPT_4o_Mini, GPT_4o_MiniLiteral, GPT_4o_MiniOptions, type GPT_4o_MiniOptionsType, GPT_4o_MiniSchema, O1_Mini, O1_MiniLiteral, O1_MiniOptions, type O1_MiniOptionsType, O1_MiniSchema, O1_Preview, O1_PreviewLiteral, O1_PreviewOptions, type O1_PreviewOptionsType, O1_PreviewSchema, OpenAI, OpenAIChatModelConfigs, OpenAIChatModelModalities, OpenAIChatModelModalitiesEnum, OpenAIChatModelOSSeriesRoles, OpenAIChatModelOSSeriesRolesMap, OpenAIChatModelRoles, OpenAIChatModelRolesMap, OpenAIChatModelTextModalities, OpenAIChatModelTextModalitiesEnum, OpenAIChatModelTextToolModalities, OpenAIChatModelTextToolModalitiesEnum, OpenAIChatOSeriesRequest, type OpenAIChatOSeriesRequestType, OpenAIChatRequest, OpenAIChatRequestAssistantMessage, type OpenAIChatRequestAssistantMessageType, OpenAIChatRequestImageContent, type OpenAIChatRequestImageContentType, OpenAIChatRequestMessage, type OpenAIChatRequestMessageType, OpenAIChatRequestResponseFormat, type OpenAIChatRequestResponseFormatType, OpenAIChatRequestSystemMessage, type OpenAIChatRequestSystemMessageType, OpenAIChatRequestTextContent, type OpenAIChatRequestTextContentType, OpenAIChatRequestTool, OpenAIChatRequestToolCallContent, type OpenAIChatRequestToolCallContentType, OpenAIChatRequestToolChoiceEnum, type OpenAIChatRequestToolChoiceEnumType, OpenAIChatRequestToolChoiceFunction, type OpenAIChatRequestToolChoiceFunctionType, OpenAIChatRequestToolMessage, type OpenAIChatRequestToolMessageType, type OpenAIChatRequestToolType, type OpenAIChatRequestType, OpenAIChatRequestUserMessage, type OpenAIChatRequestUserMessageType, OpenAICompleteChatResponse, type OpenAICompleteChatResponseType, OpenAIEmbeddingModelConfigs, OpenAIEmbeddingModelModalities, OpenAIEmbeddingModelModalitiesEnum, OpenAIEmbeddingRequest, OpenAIEmbeddingRequestInput, type OpenAIEmbeddingRequestInputType, type OpenAIEmbeddingRequestType, OpenAIGetEmbeddingsResponse, OpenAIStreamChatResponse, type OpenAIStreamChatResponseType, OpenAIToolCallsCompleteChatResponse, OpenAIToolCallsStreamChatResponse, ProviderLiteral, Text_Embedding_3_Large, Text_Embedding_3_LargeLiteral, Text_Embedding_3_LargeSchema, Text_Embedding_3_Large_Options, type Text_Embedding_3_Large_OptionsType, Text_Embedding_3_Small, Text_Embedding_3_SmallLiteral, Text_Embedding_3_SmallSchema, Text_Embedding_3_Small_Options, type Text_Embedding_3_Small_OptionsType, Text_Embedding_Ada002, Text_Embedding_Ada002Literal, Text_Embedding_Ada002Schema, Text_Embedding_Ada002_Options, type Text_Embedding_Ada002_OptionsType, dimensions, encodingFormat, frequencyPenalty, logProbs, maxTokens, presencePenalty, seed, stop, temperature, toolChoice, topLogProbs, topP };
|